max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
flytekit/types/schema/types_pandas.py
NotMatthewGriffin/flytekit
1
6628651
import os import typing from typing import Type import pandas from flytekit import FlyteContext from flytekit.configuration import sdk from flytekit.core.type_engine import T, TypeEngine, TypeTransformer from flytekit.models.literals import Literal, Scalar, Schema from flytekit.models.types import LiteralType, SchemaType from flytekit.types.schema import LocalIOSchemaReader, LocalIOSchemaWriter, SchemaEngine, SchemaFormat, SchemaHandler class ParquetIO(object): PARQUET_ENGINE = "pyarrow" def _read(self, chunk: os.PathLike, columns: typing.Optional[typing.List[str]], **kwargs) -> pandas.DataFrame: return pandas.read_parquet(chunk, columns=columns, engine=self.PARQUET_ENGINE, **kwargs) def read(self, *files: os.PathLike, columns: typing.List[str] = None, **kwargs) -> pandas.DataFrame: frames = [self._read(chunk=f, columns=columns, **kwargs) for f in files if os.path.getsize(f) > 0] if len(frames) == 1: return frames[0] elif len(frames) > 1: return pandas.concat(frames, copy=True) return pandas.DataFrame() def write( self, df: pandas.DataFrame, to_file: os.PathLike, coerce_timestamps: str = "us", allow_truncated_timestamps: bool = False, **kwargs, ): """ Writes data frame as a chunk to the local directory owned by the Schema object. Will later be uploaded to s3. :param df: data frame to write as parquet :param to_file: Sink file to write the dataframe to :param coerce_timestamps: format to store timestamp in parquet. 'us', 'ms', 's' are allowed values. Note: if your timestamps will lose data due to the coercion, your write will fail! Nanoseconds are problematic in the Parquet format and will not work. See allow_truncated_timestamps. :param allow_truncated_timestamps: default False. Allow truncation when coercing timestamps to a coarser resolution. """ # TODO @ketan validate and remove this comment, as python 3 all strings are unicode # Convert all columns to unicode as pyarrow's parquet reader can not handle mixed strings and unicode. # Since columns from Hive are returned as unicode, if a user wants to add a column to a dataframe returned from # Hive, then output the new data, the user would have to provide a unicode column name which is unnatural. df.to_parquet( to_file, coerce_timestamps=coerce_timestamps, allow_truncated_timestamps=allow_truncated_timestamps, **kwargs, ) class FastParquetIO(ParquetIO): PARQUET_ENGINE = "fastparquet" def _read(self, chunk: os.PathLike, columns: typing.Optional[typing.List[str]], **kwargs) -> pandas.DataFrame: from fastparquet import ParquetFile as _ParquetFile from fastparquet import thrift_structures as _ts # TODO Follow up to figure out if this is not needed anymore # https://github.com/dask/fastparquet/issues/414#issuecomment-478983811 df = pandas.read_parquet(chunk, columns=columns, engine=self.PARQUET_ENGINE, index=False) df_column_types = df.dtypes pf = _ParquetFile(chunk) schema_column_dtypes = {l.name: l.type for l in list(pf.schema.schema_elements)} for idx in df_column_types[df_column_types == "float16"].index.tolist(): # A hacky way to get the string representations of the column types of a parquet schema # Reference: # https://github.com/dask/fastparquet/blob/f4ecc67f50e7bf98b2d0099c9589c615ea4b06aa/fastparquet/schema.py if _ts.parquet_thrift.Type._VALUES_TO_NAMES[schema_column_dtypes[idx]] == "BOOLEAN": df[idx] = df[idx].astype("object") df[idx].replace({0: False, 1: True, pandas.np.nan: None}, inplace=True) return df _PARQUETIO_ENGINES: typing.Dict[str, ParquetIO] = { ParquetIO.PARQUET_ENGINE: ParquetIO(), FastParquetIO.PARQUET_ENGINE: FastParquetIO(), } class PandasSchemaReader(LocalIOSchemaReader[pandas.DataFrame]): def __init__(self, local_dir: os.PathLike, cols: typing.Optional[typing.Dict[str, type]], fmt: SchemaFormat): super().__init__(local_dir, cols, fmt) self._parquet_engine = _PARQUETIO_ENGINES[sdk.PARQUET_ENGINE.get()] def _read(self, *path: os.PathLike, **kwargs) -> pandas.DataFrame: return self._parquet_engine.read(*path, columns=self.column_names, **kwargs) class PandasSchemaWriter(LocalIOSchemaWriter[pandas.DataFrame]): def __init__(self, local_dir: os.PathLike, cols: typing.Optional[typing.Dict[str, type]], fmt: SchemaFormat): super().__init__(local_dir, cols, fmt) self._parquet_engine = _PARQUETIO_ENGINES[sdk.PARQUET_ENGINE.get()] def _write(self, df: T, path: os.PathLike, **kwargs): return self._parquet_engine.write(df, to_file=path, **kwargs) class PandasDataFrameTransformer(TypeTransformer[pandas.DataFrame]): """ Transforms a pd.DataFrame to Schema without column types. """ def __init__(self): super().__init__("PandasDataFrame<->GenericSchema", pandas.DataFrame) self._parquet_engine = _PARQUETIO_ENGINES[sdk.PARQUET_ENGINE.get()] @staticmethod def _get_schema_type() -> SchemaType: return SchemaType(columns=[]) def get_literal_type(self, t: Type[pandas.DataFrame]) -> LiteralType: return LiteralType(schema=self._get_schema_type()) def to_literal( self, ctx: FlyteContext, python_val: pandas.DataFrame, python_type: Type[pandas.DataFrame], expected: LiteralType, ) -> Literal: local_dir = ctx.file_access.get_random_local_directory() w = PandasSchemaWriter(local_dir=local_dir, cols=None, fmt=SchemaFormat.PARQUET) w.write(python_val) remote_path = ctx.file_access.get_random_remote_directory() ctx.file_access.put_data(local_dir, remote_path, is_multipart=True) return Literal(scalar=Scalar(schema=Schema(remote_path, self._get_schema_type()))) def to_python_value( self, ctx: FlyteContext, lv: Literal, expected_python_type: Type[pandas.DataFrame] ) -> pandas.DataFrame: if not (lv and lv.scalar and lv.scalar.schema): return pandas.DataFrame() local_dir = ctx.file_access.get_random_local_directory() ctx.file_access.get_data(lv.scalar.schema.uri, local_dir, is_multipart=True) r = PandasSchemaReader(local_dir=local_dir, cols=None, fmt=SchemaFormat.PARQUET) return r.all() SchemaEngine.register_handler( SchemaHandler("pandas-dataframe-schema", pandas.DataFrame, PandasSchemaReader, PandasSchemaWriter) ) TypeEngine.register(PandasDataFrameTransformer())
import os import typing from typing import Type import pandas from flytekit import FlyteContext from flytekit.configuration import sdk from flytekit.core.type_engine import T, TypeEngine, TypeTransformer from flytekit.models.literals import Literal, Scalar, Schema from flytekit.models.types import LiteralType, SchemaType from flytekit.types.schema import LocalIOSchemaReader, LocalIOSchemaWriter, SchemaEngine, SchemaFormat, SchemaHandler class ParquetIO(object): PARQUET_ENGINE = "pyarrow" def _read(self, chunk: os.PathLike, columns: typing.Optional[typing.List[str]], **kwargs) -> pandas.DataFrame: return pandas.read_parquet(chunk, columns=columns, engine=self.PARQUET_ENGINE, **kwargs) def read(self, *files: os.PathLike, columns: typing.List[str] = None, **kwargs) -> pandas.DataFrame: frames = [self._read(chunk=f, columns=columns, **kwargs) for f in files if os.path.getsize(f) > 0] if len(frames) == 1: return frames[0] elif len(frames) > 1: return pandas.concat(frames, copy=True) return pandas.DataFrame() def write( self, df: pandas.DataFrame, to_file: os.PathLike, coerce_timestamps: str = "us", allow_truncated_timestamps: bool = False, **kwargs, ): """ Writes data frame as a chunk to the local directory owned by the Schema object. Will later be uploaded to s3. :param df: data frame to write as parquet :param to_file: Sink file to write the dataframe to :param coerce_timestamps: format to store timestamp in parquet. 'us', 'ms', 's' are allowed values. Note: if your timestamps will lose data due to the coercion, your write will fail! Nanoseconds are problematic in the Parquet format and will not work. See allow_truncated_timestamps. :param allow_truncated_timestamps: default False. Allow truncation when coercing timestamps to a coarser resolution. """ # TODO @ketan validate and remove this comment, as python 3 all strings are unicode # Convert all columns to unicode as pyarrow's parquet reader can not handle mixed strings and unicode. # Since columns from Hive are returned as unicode, if a user wants to add a column to a dataframe returned from # Hive, then output the new data, the user would have to provide a unicode column name which is unnatural. df.to_parquet( to_file, coerce_timestamps=coerce_timestamps, allow_truncated_timestamps=allow_truncated_timestamps, **kwargs, ) class FastParquetIO(ParquetIO): PARQUET_ENGINE = "fastparquet" def _read(self, chunk: os.PathLike, columns: typing.Optional[typing.List[str]], **kwargs) -> pandas.DataFrame: from fastparquet import ParquetFile as _ParquetFile from fastparquet import thrift_structures as _ts # TODO Follow up to figure out if this is not needed anymore # https://github.com/dask/fastparquet/issues/414#issuecomment-478983811 df = pandas.read_parquet(chunk, columns=columns, engine=self.PARQUET_ENGINE, index=False) df_column_types = df.dtypes pf = _ParquetFile(chunk) schema_column_dtypes = {l.name: l.type for l in list(pf.schema.schema_elements)} for idx in df_column_types[df_column_types == "float16"].index.tolist(): # A hacky way to get the string representations of the column types of a parquet schema # Reference: # https://github.com/dask/fastparquet/blob/f4ecc67f50e7bf98b2d0099c9589c615ea4b06aa/fastparquet/schema.py if _ts.parquet_thrift.Type._VALUES_TO_NAMES[schema_column_dtypes[idx]] == "BOOLEAN": df[idx] = df[idx].astype("object") df[idx].replace({0: False, 1: True, pandas.np.nan: None}, inplace=True) return df _PARQUETIO_ENGINES: typing.Dict[str, ParquetIO] = { ParquetIO.PARQUET_ENGINE: ParquetIO(), FastParquetIO.PARQUET_ENGINE: FastParquetIO(), } class PandasSchemaReader(LocalIOSchemaReader[pandas.DataFrame]): def __init__(self, local_dir: os.PathLike, cols: typing.Optional[typing.Dict[str, type]], fmt: SchemaFormat): super().__init__(local_dir, cols, fmt) self._parquet_engine = _PARQUETIO_ENGINES[sdk.PARQUET_ENGINE.get()] def _read(self, *path: os.PathLike, **kwargs) -> pandas.DataFrame: return self._parquet_engine.read(*path, columns=self.column_names, **kwargs) class PandasSchemaWriter(LocalIOSchemaWriter[pandas.DataFrame]): def __init__(self, local_dir: os.PathLike, cols: typing.Optional[typing.Dict[str, type]], fmt: SchemaFormat): super().__init__(local_dir, cols, fmt) self._parquet_engine = _PARQUETIO_ENGINES[sdk.PARQUET_ENGINE.get()] def _write(self, df: T, path: os.PathLike, **kwargs): return self._parquet_engine.write(df, to_file=path, **kwargs) class PandasDataFrameTransformer(TypeTransformer[pandas.DataFrame]): """ Transforms a pd.DataFrame to Schema without column types. """ def __init__(self): super().__init__("PandasDataFrame<->GenericSchema", pandas.DataFrame) self._parquet_engine = _PARQUETIO_ENGINES[sdk.PARQUET_ENGINE.get()] @staticmethod def _get_schema_type() -> SchemaType: return SchemaType(columns=[]) def get_literal_type(self, t: Type[pandas.DataFrame]) -> LiteralType: return LiteralType(schema=self._get_schema_type()) def to_literal( self, ctx: FlyteContext, python_val: pandas.DataFrame, python_type: Type[pandas.DataFrame], expected: LiteralType, ) -> Literal: local_dir = ctx.file_access.get_random_local_directory() w = PandasSchemaWriter(local_dir=local_dir, cols=None, fmt=SchemaFormat.PARQUET) w.write(python_val) remote_path = ctx.file_access.get_random_remote_directory() ctx.file_access.put_data(local_dir, remote_path, is_multipart=True) return Literal(scalar=Scalar(schema=Schema(remote_path, self._get_schema_type()))) def to_python_value( self, ctx: FlyteContext, lv: Literal, expected_python_type: Type[pandas.DataFrame] ) -> pandas.DataFrame: if not (lv and lv.scalar and lv.scalar.schema): return pandas.DataFrame() local_dir = ctx.file_access.get_random_local_directory() ctx.file_access.get_data(lv.scalar.schema.uri, local_dir, is_multipart=True) r = PandasSchemaReader(local_dir=local_dir, cols=None, fmt=SchemaFormat.PARQUET) return r.all() SchemaEngine.register_handler( SchemaHandler("pandas-dataframe-schema", pandas.DataFrame, PandasSchemaReader, PandasSchemaWriter) ) TypeEngine.register(PandasDataFrameTransformer())
en
0.813421
Writes data frame as a chunk to the local directory owned by the Schema object. Will later be uploaded to s3. :param df: data frame to write as parquet :param to_file: Sink file to write the dataframe to :param coerce_timestamps: format to store timestamp in parquet. 'us', 'ms', 's' are allowed values. Note: if your timestamps will lose data due to the coercion, your write will fail! Nanoseconds are problematic in the Parquet format and will not work. See allow_truncated_timestamps. :param allow_truncated_timestamps: default False. Allow truncation when coercing timestamps to a coarser resolution. # TODO @ketan validate and remove this comment, as python 3 all strings are unicode # Convert all columns to unicode as pyarrow's parquet reader can not handle mixed strings and unicode. # Since columns from Hive are returned as unicode, if a user wants to add a column to a dataframe returned from # Hive, then output the new data, the user would have to provide a unicode column name which is unnatural. # TODO Follow up to figure out if this is not needed anymore # https://github.com/dask/fastparquet/issues/414#issuecomment-478983811 # A hacky way to get the string representations of the column types of a parquet schema # Reference: # https://github.com/dask/fastparquet/blob/f4ecc67f50e7bf98b2d0099c9589c615ea4b06aa/fastparquet/schema.py Transforms a pd.DataFrame to Schema without column types.
2.190967
2
helper.py
mlwatkins/cs3240-labdemo
0
6628652
<reponame>mlwatkins/cs3240-labdemo<gh_stars>0 __author__ = 'mlw5ea' def greeting(msg): print(msg)
__author__ = 'mlw5ea' def greeting(msg): print(msg)
none
1
1.300327
1
betzbeyond.py
powerthecoder/BetzBeyond
0
6628653
# Created By: <NAME> # Discord: -{ Power1482 }-#0101 # https://powerthecoder.xyz/ import os import sys import time import random from random import randrange import discord from discord.ext import commands from discord.ext import tasks from discord import Member from discord.ext.commands import has_permissions from discord.ext.commands import MissingPermissions from discord.utils import find from discord.utils import get import asyncio import requests from requests import get import logging from datetime import datetime client = commands.Bot(command_prefix="$") Token = "TOKEN HERE" Verison = "1.1" client.remove_command("help") # BetzBeyond ID: 210260301696729088 # Powerlt1482 ID: 255876083918831616 # TwilightLogs: 791852159519686666 # Betz Logs: THIS # General: 529838468063559687 #betzbeyond_console_log = client.get_channel(THIS) # Bot Events # @client.event async def on_ready(): print() print("-"*70) print("Bot Online") print(f"Logged In As: {client.user.name}") print(f"ID: {client.user.id}") print(f"Bot Version: 1.0") print(f"Discord Version {discord.__version__}") print("-"*70) print() print() await client.change_presence(status=discord.Status.online, activity=discord.Game("mixer.com/BetzBeyond")) #await client.change_presence(status=discord.Status.dnd, activity=discord.Game('Testing')) StartTime = datetime.now() @client.event async def on_guild_join(guild): print(f"Bot Joined {guild} ") twilight_console_log = client.get_channel(791852159519686666) general = client.get_channel(529838468063559687) staff = client.get_channel(664523965871685654) await twilight_console_log.send(f"Bot Joined **{guild}**") msg = ">>> Thankyou for adding me. My prefix is `$`" embed=discord.Embed(title="My prefix is `$", description="here are all of the commands available", color=0xfea730) embed.set_author(name="Thankyou For Adding Me") embed.add_field(name="I am a bot to assist Betz's Server", value="$about", inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await general.send(embed=embed) await staff.send(embed=embed) @client.event async def on_guild_remove(guild): print(f"The Bot Has Been Removed From {guild}") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**BetzBeyond Bot** Has been removed from **{guild}**") @client.event async def on_member_join(member): twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{member}** joined **BetzBeyond**") print(f"**{member}** joined **BetzBeyond**") Viewer = discord.utils.get(member.guild.roles, id=664757537488502804) await Member.add_roles(member, Viewer) @client.event async def on_member_remove(member): print(f"{member} left BetzBeyond") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{member}** has left a server") @client.event async def on_message(message): if 'nigga' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'rape' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'nigger' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'niger' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'n1gg3r' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'nigg3r' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'n1gger' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") await client.process_commands(message) <EMAIL> #async def betz_advertisement(): # await client.wait_until_ready() # betz_ad = client.get_channel(529838468063559687) # while not client.is_closed(): # await asyncio.sleep(86400) #86400 = 24 hours # await betz_ad.send("Come join **Betz Beyond** on Mixer https://mixer.com/BetzBeyond and **Minecraft Server** IP: **172.16.58.3:25582**") # powerbot_console_log = client.get_channel(791852159519686666) # await powerbot_console_log.send("Advertised Server For **Betz Beyond**") # Client Commands # @client.command() async def help(ctx): author = ctx.message.author.id twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** did the **Help** Command") print(f"**{author}** did the **Help** Command") embed=discord.Embed(title="HELP", description="Help Menu For BetzBeyond", color=0x00ffff) embed.add_field(name="$about", value="About The Bot", inline=False) embed.add_field(name="$ping", value="Check Ping Status", inline=False) embed.add_field(name="$status", value="Check Bot Server Status", inline=False) embed.add_field(name="$stream", value="Get the BetzBeyond Stream Link", inline=False) embed.add_field(name="$ban", value="Ban Members", inline=False) embed.add_field(name="$kick", value="Kick Members", inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) @client.command() async def ping(ctx): author = ctx.message.author.name print(f"{author} entered ping command") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** entered **Ping** Command") embed=discord.Embed(title="PING", description=f"Testing Network Latency For **{author}**", color=0xfea730) embed.add_field(name=f"Your Ping Is", value=f"**{round(client.latency * 1000)}**", inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) @client.command() async def about(ctx): author = ctx.message.author.name print(f"{author} Entered About Command") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** Entered **About** Command") embed=discord.Embed(title="Created By: -{ Power1482 }-#0101 for BetzBeyond", description="Version: **1.0**", color=0xfea730) embed.add_field(name="Website: ", value="https://powerthecoder.xyz", inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) @client.command() async def status(ctx): author = ctx.message.author.name print(f"Status command ran by {author}") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** ran the **status** command") amm_server = 0 for guild in client.guilds: amm_server += 1 T2 = datetime.now() time_total = T2 - StartTime embed=discord.Embed(title="Server Status: **online**", color=0xfea730) embed.set_author(name="Server Status") embed.add_field(name="Ammount Of Servers In ", value=amm_server, inline=False) embed.add_field(name="Server Version ", value="0.0", inline=False) embed.add_field(name="Uptime", value=time_total, inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) @client.command() async def stream(ctx): author = ctx.message.author.name print(f"{author} ran the stream command") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** ran the **Stream** command") embed=discord.Embed(title="Stream", url="https://mixer.com/BetzBeyond", description="**Mixer Stream Link**", color=0x00ffff) embed.set_author(name="Find BetzBeyond On Mixer") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) # ADMIN COMMANDS # @client.command(pass_context=True) @has_permissions(ban_members=True) async def ban(ctx, user_name: discord.Member, *, reason=None): author = ctx.message.author.name print(f'The Ban Command Ran by {author}') embed=discord.Embed(title="BANNED", discription=f"**{user_name}** got **banned** as they have done something wrong", color=0xff0000) embed.add_field(name="Member", value=f"**{user_name}** got **Banned**") embed.add_field(name="Opperator", value=f"{author}") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) await user_name.ban(reason=reason) print(f'{user_name} got Banned') twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{user_name}** got **banned**") @ban.error async def ban_error(ctx, error): if isinstance(error, commands.BadArgument): embed=discord.Embed(title="You are not an Admin", description="Please Contact Administrators If you think this is incorrect", color=0xff0000) embed.set_author(name="ERROR Command") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) else: await ctx.send(">>> command is `$ban <client>` Replace `client` with the user you want to ban ") @client.command(pass_context=True) @has_permissions(kick_members=True) async def kick(ctx, *, user_name: discord.Member, reason=None): author = ctx.message.author.name print(f'The Kick Command Ran by {author}') embed=discord.Embed(title="KICKED", discription=f"**{user_name}** got **Kicked** as they have done something wrong", color=0xff0000) embed.add_field(name="Member", value=f"**{user_name}** got **Kicked**") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) await user_name.kick() asyncio.sleep(2) print(f"{user_name} got Kicked") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{user_name}** got **kicked**") @kick.error async def kick_error(ctx, error): if isinstance(error, commands.BadArgument): embed=discord.Embed(title="You are not an Admin", description="Please Contact Administrators If you think this is incorrect", color=0xff0000) embed.set_author(name="ERROR Command") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) else: await ctx.send(">>> command is `$kick <client>` Replace `client` with the user you want to kick ") @client.command(pass_context=True) async def say(ctx, *, args): if (ctx.author.id == 2<PASSWORD>) or (ctx.author.id == 2<PASSWORD>): await asyncio.sleep(0.5) await ctx.channel.purge(limit=1) await asyncio.sleep(0.5) embed=discord.Embed(title=args, description="\u200b", color=0xfea730) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) twilight_console_log = client.get_channel(791852159519686666) author = ctx.message.author.name await twilight_console_log.send(f"**{author}** ran the **Say** command") else: embed=discord.Embed(title="**-{ Power1482 }-#0101** and **Betz Beyond#2225** are my owner", color=0xff0000) embed.set_author(name="You Are Not My Owner") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) print(f"{author} tried to use Say command and Failed") @client.command(pass_context=True) async def mod(ctx, user_name: discord.Member): author = ctx.message.author.id mod_role = discord.utils.get(Member.guild.roles, id=0) await user_name.add_roles(user_name, mod_role) twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** added **Moderator** to **{user_name}**") print(f"**{author}** added **Moderator** to **{user_name}**") @client.command(pass_context=True) async def admin(ctx, user_name: discord.Member): author = ctx.message.author.id mod_role = discord.utils.get(Member.guild.roles, id=0) await user_name.add_roles(user_name, mod_role) twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** added **Admin** to **{user_name}**") print(f"**{author}** added **Admin** to **{user_name}**") @client.command(pass_context=True) async def mode(ctx, arg1): author = ctx.message.author.id if(ctx.author.id == <PASSWORD>): if(arg1 == "testing".lower()) or (arg1 == "test".lower()) or (arg1 == "dev".lower()): await client.change_presence(status=discord.Status.dnd, activity=discord.Game('Testing')) else: await client.change_presence(status=discord.Status.online, activity=discord.Game("mixer.com/BetzBeyond")) else: embed=discord.Embed(title="-{ Power1482 }-#0101 is my owner", color=0xff0000) embed.set_author(name="You Are Not My Owner") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101") await ctx.send(embed=embed) print(f"{author} tried to update my Presence and FAILED") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** Tried To Change Status @everyone") #client.loop.create_task(betz_advertisement()) client.run(Token)
# Created By: <NAME> # Discord: -{ Power1482 }-#0101 # https://powerthecoder.xyz/ import os import sys import time import random from random import randrange import discord from discord.ext import commands from discord.ext import tasks from discord import Member from discord.ext.commands import has_permissions from discord.ext.commands import MissingPermissions from discord.utils import find from discord.utils import get import asyncio import requests from requests import get import logging from datetime import datetime client = commands.Bot(command_prefix="$") Token = "TOKEN HERE" Verison = "1.1" client.remove_command("help") # BetzBeyond ID: 210260301696729088 # Powerlt1482 ID: 255876083918831616 # TwilightLogs: 791852159519686666 # Betz Logs: THIS # General: 529838468063559687 #betzbeyond_console_log = client.get_channel(THIS) # Bot Events # @client.event async def on_ready(): print() print("-"*70) print("Bot Online") print(f"Logged In As: {client.user.name}") print(f"ID: {client.user.id}") print(f"Bot Version: 1.0") print(f"Discord Version {discord.__version__}") print("-"*70) print() print() await client.change_presence(status=discord.Status.online, activity=discord.Game("mixer.com/BetzBeyond")) #await client.change_presence(status=discord.Status.dnd, activity=discord.Game('Testing')) StartTime = datetime.now() @client.event async def on_guild_join(guild): print(f"Bot Joined {guild} ") twilight_console_log = client.get_channel(791852159519686666) general = client.get_channel(529838468063559687) staff = client.get_channel(664523965871685654) await twilight_console_log.send(f"Bot Joined **{guild}**") msg = ">>> Thankyou for adding me. My prefix is `$`" embed=discord.Embed(title="My prefix is `$", description="here are all of the commands available", color=0xfea730) embed.set_author(name="Thankyou For Adding Me") embed.add_field(name="I am a bot to assist Betz's Server", value="$about", inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await general.send(embed=embed) await staff.send(embed=embed) @client.event async def on_guild_remove(guild): print(f"The Bot Has Been Removed From {guild}") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**BetzBeyond Bot** Has been removed from **{guild}**") @client.event async def on_member_join(member): twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{member}** joined **BetzBeyond**") print(f"**{member}** joined **BetzBeyond**") Viewer = discord.utils.get(member.guild.roles, id=664757537488502804) await Member.add_roles(member, Viewer) @client.event async def on_member_remove(member): print(f"{member} left BetzBeyond") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{member}** has left a server") @client.event async def on_message(message): if 'nigga' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'rape' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'nigger' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'niger' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'n1gg3r' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'nigg3r' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") if 'n1gger' in message.content.lower(): print(" PowerBot found bad word") await message.channel.purge(limit=1) powerbot_console_log = client.get_channel(791852159519686666) author = message.author await powerbot_console_log.send(f"PowerBot found a bad word from **{author}**") await client.process_commands(message) <EMAIL> #async def betz_advertisement(): # await client.wait_until_ready() # betz_ad = client.get_channel(529838468063559687) # while not client.is_closed(): # await asyncio.sleep(86400) #86400 = 24 hours # await betz_ad.send("Come join **Betz Beyond** on Mixer https://mixer.com/BetzBeyond and **Minecraft Server** IP: **172.16.58.3:25582**") # powerbot_console_log = client.get_channel(791852159519686666) # await powerbot_console_log.send("Advertised Server For **Betz Beyond**") # Client Commands # @client.command() async def help(ctx): author = ctx.message.author.id twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** did the **Help** Command") print(f"**{author}** did the **Help** Command") embed=discord.Embed(title="HELP", description="Help Menu For BetzBeyond", color=0x00ffff) embed.add_field(name="$about", value="About The Bot", inline=False) embed.add_field(name="$ping", value="Check Ping Status", inline=False) embed.add_field(name="$status", value="Check Bot Server Status", inline=False) embed.add_field(name="$stream", value="Get the BetzBeyond Stream Link", inline=False) embed.add_field(name="$ban", value="Ban Members", inline=False) embed.add_field(name="$kick", value="Kick Members", inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) @client.command() async def ping(ctx): author = ctx.message.author.name print(f"{author} entered ping command") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** entered **Ping** Command") embed=discord.Embed(title="PING", description=f"Testing Network Latency For **{author}**", color=0xfea730) embed.add_field(name=f"Your Ping Is", value=f"**{round(client.latency * 1000)}**", inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) @client.command() async def about(ctx): author = ctx.message.author.name print(f"{author} Entered About Command") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** Entered **About** Command") embed=discord.Embed(title="Created By: -{ Power1482 }-#0101 for BetzBeyond", description="Version: **1.0**", color=0xfea730) embed.add_field(name="Website: ", value="https://powerthecoder.xyz", inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) @client.command() async def status(ctx): author = ctx.message.author.name print(f"Status command ran by {author}") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** ran the **status** command") amm_server = 0 for guild in client.guilds: amm_server += 1 T2 = datetime.now() time_total = T2 - StartTime embed=discord.Embed(title="Server Status: **online**", color=0xfea730) embed.set_author(name="Server Status") embed.add_field(name="Ammount Of Servers In ", value=amm_server, inline=False) embed.add_field(name="Server Version ", value="0.0", inline=False) embed.add_field(name="Uptime", value=time_total, inline=False) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) @client.command() async def stream(ctx): author = ctx.message.author.name print(f"{author} ran the stream command") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** ran the **Stream** command") embed=discord.Embed(title="Stream", url="https://mixer.com/BetzBeyond", description="**Mixer Stream Link**", color=0x00ffff) embed.set_author(name="Find BetzBeyond On Mixer") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) # ADMIN COMMANDS # @client.command(pass_context=True) @has_permissions(ban_members=True) async def ban(ctx, user_name: discord.Member, *, reason=None): author = ctx.message.author.name print(f'The Ban Command Ran by {author}') embed=discord.Embed(title="BANNED", discription=f"**{user_name}** got **banned** as they have done something wrong", color=0xff0000) embed.add_field(name="Member", value=f"**{user_name}** got **Banned**") embed.add_field(name="Opperator", value=f"{author}") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) await user_name.ban(reason=reason) print(f'{user_name} got Banned') twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{user_name}** got **banned**") @ban.error async def ban_error(ctx, error): if isinstance(error, commands.BadArgument): embed=discord.Embed(title="You are not an Admin", description="Please Contact Administrators If you think this is incorrect", color=0xff0000) embed.set_author(name="ERROR Command") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) else: await ctx.send(">>> command is `$ban <client>` Replace `client` with the user you want to ban ") @client.command(pass_context=True) @has_permissions(kick_members=True) async def kick(ctx, *, user_name: discord.Member, reason=None): author = ctx.message.author.name print(f'The Kick Command Ran by {author}') embed=discord.Embed(title="KICKED", discription=f"**{user_name}** got **Kicked** as they have done something wrong", color=0xff0000) embed.add_field(name="Member", value=f"**{user_name}** got **Kicked**") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) await user_name.kick() asyncio.sleep(2) print(f"{user_name} got Kicked") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{user_name}** got **kicked**") @kick.error async def kick_error(ctx, error): if isinstance(error, commands.BadArgument): embed=discord.Embed(title="You are not an Admin", description="Please Contact Administrators If you think this is incorrect", color=0xff0000) embed.set_author(name="ERROR Command") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) else: await ctx.send(">>> command is `$kick <client>` Replace `client` with the user you want to kick ") @client.command(pass_context=True) async def say(ctx, *, args): if (ctx.author.id == 2<PASSWORD>) or (ctx.author.id == 2<PASSWORD>): await asyncio.sleep(0.5) await ctx.channel.purge(limit=1) await asyncio.sleep(0.5) embed=discord.Embed(title=args, description="\u200b", color=0xfea730) embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) twilight_console_log = client.get_channel(791852159519686666) author = ctx.message.author.name await twilight_console_log.send(f"**{author}** ran the **Say** command") else: embed=discord.Embed(title="**-{ Power1482 }-#0101** and **Betz Beyond#2225** are my owner", color=0xff0000) embed.set_author(name="You Are Not My Owner") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101 \nhttps://powerthecoder.xyz") await ctx.send(embed=embed) print(f"{author} tried to use Say command and Failed") @client.command(pass_context=True) async def mod(ctx, user_name: discord.Member): author = ctx.message.author.id mod_role = discord.utils.get(Member.guild.roles, id=0) await user_name.add_roles(user_name, mod_role) twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** added **Moderator** to **{user_name}**") print(f"**{author}** added **Moderator** to **{user_name}**") @client.command(pass_context=True) async def admin(ctx, user_name: discord.Member): author = ctx.message.author.id mod_role = discord.utils.get(Member.guild.roles, id=0) await user_name.add_roles(user_name, mod_role) twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** added **Admin** to **{user_name}**") print(f"**{author}** added **Admin** to **{user_name}**") @client.command(pass_context=True) async def mode(ctx, arg1): author = ctx.message.author.id if(ctx.author.id == <PASSWORD>): if(arg1 == "testing".lower()) or (arg1 == "test".lower()) or (arg1 == "dev".lower()): await client.change_presence(status=discord.Status.dnd, activity=discord.Game('Testing')) else: await client.change_presence(status=discord.Status.online, activity=discord.Game("mixer.com/BetzBeyond")) else: embed=discord.Embed(title="-{ Power1482 }-#0101 is my owner", color=0xff0000) embed.set_author(name="You Are Not My Owner") embed.set_footer(text="Bot Created By: -{ Power1482 }-#0101") await ctx.send(embed=embed) print(f"{author} tried to update my Presence and FAILED") twilight_console_log = client.get_channel(791852159519686666) await twilight_console_log.send(f"**{author}** Tried To Change Status @everyone") #client.loop.create_task(betz_advertisement()) client.run(Token)
en
0.351845
# Created By: <NAME> # Discord: -{ Power1482 }-#0101 # https://powerthecoder.xyz/ # BetzBeyond ID: 210260301696729088 # Powerlt1482 ID: 255876083918831616 # TwilightLogs: 791852159519686666 # Betz Logs: THIS # General: 529838468063559687 #betzbeyond_console_log = client.get_channel(THIS) # Bot Events # #await client.change_presence(status=discord.Status.dnd, activity=discord.Game('Testing')) #0101 \nhttps://powerthecoder.xyz") #async def betz_advertisement(): # await client.wait_until_ready() # betz_ad = client.get_channel(529838468063559687) # while not client.is_closed(): # await asyncio.sleep(86400) #86400 = 24 hours # await betz_ad.send("Come join **Betz Beyond** on Mixer https://mixer.com/BetzBeyond and **Minecraft Server** IP: **172.16.58.3:25582**") # powerbot_console_log = client.get_channel(791852159519686666) # await powerbot_console_log.send("Advertised Server For **Betz Beyond**") # Client Commands # #0101 \nhttps://powerthecoder.xyz") #0101 \nhttps://powerthecoder.xyz") #0101 for BetzBeyond", description="Version: **1.0**", color=0xfea730) #0101 \nhttps://powerthecoder.xyz") #0101 \nhttps://powerthecoder.xyz") #0101 \nhttps://powerthecoder.xyz") # ADMIN COMMANDS # #0101 \nhttps://powerthecoder.xyz") #0101 \nhttps://powerthecoder.xyz") #0101 \nhttps://powerthecoder.xyz") #0101 \nhttps://powerthecoder.xyz") #0101 \nhttps://powerthecoder.xyz") #0101** and **Betz Beyond#2225** are my owner", color=0xff0000) #0101 \nhttps://powerthecoder.xyz") #0101 is my owner", color=0xff0000) #0101") #client.loop.create_task(betz_advertisement())
2.354393
2
project/migrations/versions/90821cd8db49_add_daya_output_selorejo_to_sms1_sms2.py
Firdaus212/pjb
0
6628654
"""Add daya_output_selorejo to SMS1 & SMS2 Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2021-01-24 21:37:45.132094 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '<KEY>' down_revision = '9a35d11d8f12' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('sm_s1', sa.Column('daya_output_selorejo', sa.Float(), nullable=True)) op.add_column('sm_s2', sa.Column('daya_output_selorejo', sa.Float(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('sm_s2', 'daya_output_selorejo') op.drop_column('sm_s1', 'daya_output_selorejo') # ### end Alembic commands ###
"""Add daya_output_selorejo to SMS1 & SMS2 Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2021-01-24 21:37:45.132094 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '<KEY>' down_revision = '9a35d11d8f12' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('sm_s1', sa.Column('daya_output_selorejo', sa.Float(), nullable=True)) op.add_column('sm_s2', sa.Column('daya_output_selorejo', sa.Float(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('sm_s2', 'daya_output_selorejo') op.drop_column('sm_s1', 'daya_output_selorejo') # ### end Alembic commands ###
en
0.458677
Add daya_output_selorejo to SMS1 & SMS2 Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2021-01-24 21:37:45.132094 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ###
1.198672
1
pandas-profiling-master/pandas-profiling-master/pandas_profiling/plot.py
PedroLeonel17/praticaempesquisa
3
6628655
<filename>pandas-profiling-master/pandas-profiling-master/pandas_profiling/plot.py # -*- coding: utf-8 -*- """Plot distribution of datasets""" import base64 from distutils.version import LooseVersion import pandas_profiling.base as base import matplotlib import numpy as np from matplotlib.colors import ListedColormap import missingno as msno # Fix #68, this call is not needed and brings side effects in some use cases # Backend name specifications are not case-sensitive; e.g., ‘GTKAgg’ and ‘gtkagg’ are equivalent. # See https://matplotlib.org/faq/usage_faq.html#what-is-a-backend BACKEND = matplotlib.get_backend() if matplotlib.get_backend().lower() != BACKEND.lower(): # If backend is not set properly a call to describe will hang matplotlib.use(BACKEND) from matplotlib import pyplot as plt try: from StringIO import BytesIO except ImportError: from io import BytesIO try: from urllib import quote except ImportError: from urllib.parse import quote def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'): """Plot an histogram from the data and return the AxesSubplot object. Parameters ---------- series : Series The data to plot figsize : tuple The size of the figure (width, height) in inches, default (6,4) facecolor : str The color code. Returns ------- matplotlib.AxesSubplot The plot. """ if base.get_vartype(series) == base.TYPE_DATE: # TODO: These calls should be merged fig = plt.figure(figsize=figsize) plot = fig.add_subplot(111) plot.set_ylabel('Frequency') try: plot.hist(series.dropna().values, facecolor=facecolor, bins=bins) except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead pass else: plot = series.plot(kind='hist', figsize=figsize, facecolor=facecolor, bins=bins) # TODO when running on server, send this off to a different thread return plot def histogram(series, **kwargs): """Plot an histogram of the data. Parameters ---------- series: Series The data to plot. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = _plot_histogram(series, **kwargs) plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) # TODO Think about writing this to disk instead of caching them in strings plt.close(plot.figure) return result_string def mini_histogram(series, **kwargs): """Plot a small (mini) histogram of the data. Parameters ---------- series: Series The data to plot. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() #plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs) plot = _plot_histogram(series, figsize=(4, 2), **kwargs) #plot.axes.get_yaxis().set_visible(False) if LooseVersion(matplotlib.__version__) <= '1.5.9': plot.set_axis_bgcolor("w") else: plot.set_facecolor("w") xticks = plot.xaxis.get_major_ticks() #for tick in xticks[1:-1]: # tick.set_visible(False) # tick.label.set_visible(False) for tick in (xticks[0], xticks[-1]): tick.label.set_fontsize(8) every_nth = 2 for n, label in enumerate(plot.xaxis.get_ticklabels()): if n % every_nth == 0: label.set_visible(False) #plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0) plot.figure.subplots_adjust(left=0.2, right=0.95, top=0.95 , wspace=0, hspace=0) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string def correlation_matrix(corrdf, title, **kwargs): """Plot image of a matrix correlation. Parameters ---------- corrdf: DataFrame The matrix correlation to plot. title: str The matrix title Returns ------- str, The resulting image encoded as a string. """ imgdata = BytesIO() fig_cor, axes_cor = plt.subplots(1, 1) labels = corrdf.columns N = 256 blues = np.ones((N, 4)) blues[:, 0] = np.linspace(1, 66/256, N) blues[:, 1] = np.linspace(1, 136/256, N) blues[:, 2] = np.linspace(1, 181/256, N) reds = np.ones((N, 4)) reds[:, 0] = np.linspace(209/256, 1, N) reds[:, 1] = np.linspace(60/256, 1, N) reds[:, 2] = np.linspace(75/256, 1, N) newcmp = ListedColormap(np.concatenate((reds, blues))) matrix_image = axes_cor.imshow(corrdf, vmin=-1, vmax=1, interpolation="nearest", cmap=newcmp) plt.title(title, size=18) plt.colorbar(matrix_image) axes_cor.set_xticks(np.arange(0, corrdf.shape[0], corrdf.shape[0] * 1.0 / len(labels))) axes_cor.set_yticks(np.arange(0, corrdf.shape[1], corrdf.shape[1] * 1.0 / len(labels))) axes_cor.set_xticklabels(labels, rotation=90) axes_cor.set_yticklabels(labels) matrix_image.figure.savefig(imgdata, bbox_inches='tight') imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(matrix_image.figure) return result_string def missing_matrix(df): """Plot a missingno matrix Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = msno.matrix(df) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string def missing_bar(df): """Plot a missingno bar chart Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = msno.bar(df) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string def missing_heat(df): """Plot a missingno heat map Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = msno.heatmap(df) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string def missing_dendrogram(df): """Plot a missingno dendrogram Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = msno.dendrogram(df) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string
<filename>pandas-profiling-master/pandas-profiling-master/pandas_profiling/plot.py # -*- coding: utf-8 -*- """Plot distribution of datasets""" import base64 from distutils.version import LooseVersion import pandas_profiling.base as base import matplotlib import numpy as np from matplotlib.colors import ListedColormap import missingno as msno # Fix #68, this call is not needed and brings side effects in some use cases # Backend name specifications are not case-sensitive; e.g., ‘GTKAgg’ and ‘gtkagg’ are equivalent. # See https://matplotlib.org/faq/usage_faq.html#what-is-a-backend BACKEND = matplotlib.get_backend() if matplotlib.get_backend().lower() != BACKEND.lower(): # If backend is not set properly a call to describe will hang matplotlib.use(BACKEND) from matplotlib import pyplot as plt try: from StringIO import BytesIO except ImportError: from io import BytesIO try: from urllib import quote except ImportError: from urllib.parse import quote def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'): """Plot an histogram from the data and return the AxesSubplot object. Parameters ---------- series : Series The data to plot figsize : tuple The size of the figure (width, height) in inches, default (6,4) facecolor : str The color code. Returns ------- matplotlib.AxesSubplot The plot. """ if base.get_vartype(series) == base.TYPE_DATE: # TODO: These calls should be merged fig = plt.figure(figsize=figsize) plot = fig.add_subplot(111) plot.set_ylabel('Frequency') try: plot.hist(series.dropna().values, facecolor=facecolor, bins=bins) except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead pass else: plot = series.plot(kind='hist', figsize=figsize, facecolor=facecolor, bins=bins) # TODO when running on server, send this off to a different thread return plot def histogram(series, **kwargs): """Plot an histogram of the data. Parameters ---------- series: Series The data to plot. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = _plot_histogram(series, **kwargs) plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) # TODO Think about writing this to disk instead of caching them in strings plt.close(plot.figure) return result_string def mini_histogram(series, **kwargs): """Plot a small (mini) histogram of the data. Parameters ---------- series: Series The data to plot. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() #plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs) plot = _plot_histogram(series, figsize=(4, 2), **kwargs) #plot.axes.get_yaxis().set_visible(False) if LooseVersion(matplotlib.__version__) <= '1.5.9': plot.set_axis_bgcolor("w") else: plot.set_facecolor("w") xticks = plot.xaxis.get_major_ticks() #for tick in xticks[1:-1]: # tick.set_visible(False) # tick.label.set_visible(False) for tick in (xticks[0], xticks[-1]): tick.label.set_fontsize(8) every_nth = 2 for n, label in enumerate(plot.xaxis.get_ticklabels()): if n % every_nth == 0: label.set_visible(False) #plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0) plot.figure.subplots_adjust(left=0.2, right=0.95, top=0.95 , wspace=0, hspace=0) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string def correlation_matrix(corrdf, title, **kwargs): """Plot image of a matrix correlation. Parameters ---------- corrdf: DataFrame The matrix correlation to plot. title: str The matrix title Returns ------- str, The resulting image encoded as a string. """ imgdata = BytesIO() fig_cor, axes_cor = plt.subplots(1, 1) labels = corrdf.columns N = 256 blues = np.ones((N, 4)) blues[:, 0] = np.linspace(1, 66/256, N) blues[:, 1] = np.linspace(1, 136/256, N) blues[:, 2] = np.linspace(1, 181/256, N) reds = np.ones((N, 4)) reds[:, 0] = np.linspace(209/256, 1, N) reds[:, 1] = np.linspace(60/256, 1, N) reds[:, 2] = np.linspace(75/256, 1, N) newcmp = ListedColormap(np.concatenate((reds, blues))) matrix_image = axes_cor.imshow(corrdf, vmin=-1, vmax=1, interpolation="nearest", cmap=newcmp) plt.title(title, size=18) plt.colorbar(matrix_image) axes_cor.set_xticks(np.arange(0, corrdf.shape[0], corrdf.shape[0] * 1.0 / len(labels))) axes_cor.set_yticks(np.arange(0, corrdf.shape[1], corrdf.shape[1] * 1.0 / len(labels))) axes_cor.set_xticklabels(labels, rotation=90) axes_cor.set_yticklabels(labels) matrix_image.figure.savefig(imgdata, bbox_inches='tight') imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(matrix_image.figure) return result_string def missing_matrix(df): """Plot a missingno matrix Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = msno.matrix(df) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string def missing_bar(df): """Plot a missingno bar chart Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = msno.bar(df) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string def missing_heat(df): """Plot a missingno heat map Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = msno.heatmap(df) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string def missing_dendrogram(df): """Plot a missingno dendrogram Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. """ imgdata = BytesIO() plot = msno.dendrogram(df) plot.figure.savefig(imgdata) imgdata.seek(0) result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue())) plt.close(plot.figure) return result_string
en
0.656554
# -*- coding: utf-8 -*- Plot distribution of datasets # Fix #68, this call is not needed and brings side effects in some use cases # Backend name specifications are not case-sensitive; e.g., ‘GTKAgg’ and ‘gtkagg’ are equivalent. # See https://matplotlib.org/faq/usage_faq.html#what-is-a-backend # If backend is not set properly a call to describe will hang Plot an histogram from the data and return the AxesSubplot object. Parameters ---------- series : Series The data to plot figsize : tuple The size of the figure (width, height) in inches, default (6,4) facecolor : str The color code. Returns ------- matplotlib.AxesSubplot The plot. # TODO: These calls should be merged # matplotlib 1.4 can't plot dates so will show empty plot instead # TODO when running on server, send this off to a different thread Plot an histogram of the data. Parameters ---------- series: Series The data to plot. Returns ------- str The resulting image encoded as a string. # TODO Think about writing this to disk instead of caching them in strings Plot a small (mini) histogram of the data. Parameters ---------- series: Series The data to plot. Returns ------- str The resulting image encoded as a string. #plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs) #plot.axes.get_yaxis().set_visible(False) #for tick in xticks[1:-1]: # tick.set_visible(False) # tick.label.set_visible(False) #plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0) Plot image of a matrix correlation. Parameters ---------- corrdf: DataFrame The matrix correlation to plot. title: str The matrix title Returns ------- str, The resulting image encoded as a string. Plot a missingno matrix Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. Plot a missingno bar chart Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. Plot a missingno heat map Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string. Plot a missingno dendrogram Parameters ---------- df: DataFrame The dataframe. Returns ------- str The resulting image encoded as a string.
2.369086
2
ex024.py
ArthurCorrea/python-exercises
0
6628656
<reponame>ArthurCorrea/python-exercises<filename>ex024.py # Crie um programa que leia o nome de uma cidade e diga se ela começa ou não # com a palavra "Santo" name = str(input('Em qual cidade você mora? ')).strip() print(name[:5].lower() == 'santo')
# Crie um programa que leia o nome de uma cidade e diga se ela começa ou não # com a palavra "Santo" name = str(input('Em qual cidade você mora? ')).strip() print(name[:5].lower() == 'santo')
pt
0.999977
# Crie um programa que leia o nome de uma cidade e diga se ela começa ou não # com a palavra "Santo"
3.900018
4
chapters/ch01/hello-sushil.py
vermanotes/learning-python
0
6628657
#!/usr/bin/python print('Hello, Sushil!')
#!/usr/bin/python print('Hello, Sushil!')
ru
0.258958
#!/usr/bin/python
1.314214
1
Unscramble_Computer_Science_Problems/Task3.py
nalbert9/DataStructures-Algorithms
0
6628658
""" Read file into texts and calls. It's ok if you don't understand how to read files. """ import re import csv with open('texts.csv', 'r') as f: reader = csv.reader(f) texts = list(reader) with open('calls.csv', 'r') as f: reader = csv.reader(f) calls = list(reader) """ TASK 3: (080) is the area code for fixed line telephones in Bangalore. Fixed line numbers include parentheses, so Bangalore numbers have the form (080)xxxxxxx.) Part A: Find all of the area codes and mobile prefixes called by people in Bangalore. - Fixed lines start with an area code enclosed in brackets. The area codes vary in length but always begin with 0. - Mobile numbers have no parentheses, but have a space in the middle of the number to help readability. The prefix of a mobile number is its first four digits, and they always start with 7, 8 or 9. - Telemarketers' numbers have no parentheses or space, but they start with the area code 140. Print the answer as part of a message: "The numbers called by people in Bangalore have codes:" <list of codes> The list of codes should be print out one per line in lexicographic order with no duplicates. Part B: What percentage of calls from fixed lines in Bangalore are made to fixed lines also in Bangalore? In other words, of all the calls made from a number starting with "(080)", what percentage of these calls were made to a number also starting with "(080)"? Print the answer as a part of a message:: "<percentage> percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore." The percentage should have 2 decimal digits """ # Part A bangalore_num = [call[0] for call in calls if call[0].startswith("(080)")] receivers_list = [] for num in bangalore_num: for call in calls: if num == call[0]: receivers_list.append(call[1]) area_codes = set() total_bangalore = 0 tolal_fixed_lines = 0 for call in receivers_list: # Fixed line if call[:2] == "(0": area_codes.add(re.search(r'(\(.*?\))', call).group(1)) # Telemarketers if call.startswith =="140": area_codes.add(call[:4]) # Mobile number if (" " in call) and call.startswith(("7", "8", "9")): area_codes.add(call[:4]) print("The numbers called by people in Bangalore have codes: {}".format(sorted(area_codes))) # Part B inter_bangalore = 0 for call in calls: if call[0].startswith("(080)") and call[1].startswith("(080)"): inter_bangalore+=1 total_bangalore = len(bangalore_num) print("{:.2f} percent of calls from fixed lines in Bangalore are calls\ to other fixed lines in Bangalore.".format(inter_bangalore/total_bangalore*100))
""" Read file into texts and calls. It's ok if you don't understand how to read files. """ import re import csv with open('texts.csv', 'r') as f: reader = csv.reader(f) texts = list(reader) with open('calls.csv', 'r') as f: reader = csv.reader(f) calls = list(reader) """ TASK 3: (080) is the area code for fixed line telephones in Bangalore. Fixed line numbers include parentheses, so Bangalore numbers have the form (080)xxxxxxx.) Part A: Find all of the area codes and mobile prefixes called by people in Bangalore. - Fixed lines start with an area code enclosed in brackets. The area codes vary in length but always begin with 0. - Mobile numbers have no parentheses, but have a space in the middle of the number to help readability. The prefix of a mobile number is its first four digits, and they always start with 7, 8 or 9. - Telemarketers' numbers have no parentheses or space, but they start with the area code 140. Print the answer as part of a message: "The numbers called by people in Bangalore have codes:" <list of codes> The list of codes should be print out one per line in lexicographic order with no duplicates. Part B: What percentage of calls from fixed lines in Bangalore are made to fixed lines also in Bangalore? In other words, of all the calls made from a number starting with "(080)", what percentage of these calls were made to a number also starting with "(080)"? Print the answer as a part of a message:: "<percentage> percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore." The percentage should have 2 decimal digits """ # Part A bangalore_num = [call[0] for call in calls if call[0].startswith("(080)")] receivers_list = [] for num in bangalore_num: for call in calls: if num == call[0]: receivers_list.append(call[1]) area_codes = set() total_bangalore = 0 tolal_fixed_lines = 0 for call in receivers_list: # Fixed line if call[:2] == "(0": area_codes.add(re.search(r'(\(.*?\))', call).group(1)) # Telemarketers if call.startswith =="140": area_codes.add(call[:4]) # Mobile number if (" " in call) and call.startswith(("7", "8", "9")): area_codes.add(call[:4]) print("The numbers called by people in Bangalore have codes: {}".format(sorted(area_codes))) # Part B inter_bangalore = 0 for call in calls: if call[0].startswith("(080)") and call[1].startswith("(080)"): inter_bangalore+=1 total_bangalore = len(bangalore_num) print("{:.2f} percent of calls from fixed lines in Bangalore are calls\ to other fixed lines in Bangalore.".format(inter_bangalore/total_bangalore*100))
en
0.940843
Read file into texts and calls. It's ok if you don't understand how to read files. TASK 3: (080) is the area code for fixed line telephones in Bangalore. Fixed line numbers include parentheses, so Bangalore numbers have the form (080)xxxxxxx.) Part A: Find all of the area codes and mobile prefixes called by people in Bangalore. - Fixed lines start with an area code enclosed in brackets. The area codes vary in length but always begin with 0. - Mobile numbers have no parentheses, but have a space in the middle of the number to help readability. The prefix of a mobile number is its first four digits, and they always start with 7, 8 or 9. - Telemarketers' numbers have no parentheses or space, but they start with the area code 140. Print the answer as part of a message: "The numbers called by people in Bangalore have codes:" <list of codes> The list of codes should be print out one per line in lexicographic order with no duplicates. Part B: What percentage of calls from fixed lines in Bangalore are made to fixed lines also in Bangalore? In other words, of all the calls made from a number starting with "(080)", what percentage of these calls were made to a number also starting with "(080)"? Print the answer as a part of a message:: "<percentage> percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore." The percentage should have 2 decimal digits # Part A # Fixed line # Telemarketers # Mobile number # Part B
4.188986
4
tests/test_quest.py
juhi-09/qds-sdk-py
0
6628659
<filename>tests/test_quest.py from __future__ import print_function from test_base import QdsCliTestCase from test_base import print_command from qds_sdk.pipelines import PipelinesCode from qds_sdk.connection import Connection import qds from mock import * import sys import os if sys.version_info > (2, 7, 0): import unittest else: import unittest2 as unittest sys.path.append(os.path.join(os.path.dirname(__file__), '../bin')) class TestQuestList(QdsCliTestCase): def test_list_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'list', '--pipeline-status', 'draft'] print_command() Connection._api_call = Mock(return_value={}) params = {'filter': "draft"} qds.main() Connection._api_call.assert_called_with( "GET", "pipelines", params=params) def test_pause_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'pause', '--pipeline-id', '153'] print_command() Connection._api_call = Mock(return_value={}) qds.main() Connection._api_call.assert_called_with( "PUT", "pipelines/153/pause", None) def test_clone_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'clone', '--pipeline-id', '153'] print_command() Connection._api_call = Mock(return_value={}) qds.main() Connection._api_call.assert_called_with( "POST", "pipelines/153/duplicate", None) def test_archive_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'archive', '--pipeline-id', '153'] print_command() Connection._api_call = Mock(return_value={}) qds.main() Connection._api_call.assert_called_with( "PUT", "pipelines/153/archive", None) def test_delete_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'delete', '--pipeline-id', '153'] print_command() Connection._api_call = Mock(return_value={}) qds.main() Connection._api_call.assert_called_with( "PUT", "pipelines/153/delete", None) def test_create_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'create', '--create-type', '3', '--pipeline-name', 'test_pipeline_name', '--cluster-label', 'spark', '-c', 'print("hello")', '--language', 'python', '--user-arguments', 'users_argument'] print_command() d1 = {"data": {"attributes": {"name": "test_pipeline_name", "status": "DRAFT", "create_type": 3}, "type": "pipelines"}} response = {"relationships": {"nodes": [], "alerts": []}, "included": [], "meta": {"command_details": {"code": "print(\"hello\")", "language": "python"}, "properties": {"checkpoint_location": None, "trigger_interval": None, "command_line_options": """--conf spark.driver.extraLibraryPath=/usr/lib/hadoop2/lib/native\n--conf spark.eventLog.compress=true\n--conf spark.eventLog.enabled=true\n--conf spark.sql.streaming.qubole.enableStreamingEvents=true\n--conf spark.qubole.event.enabled=true""", "cluster_label": "spark", "jar_path": None, "user_arguments": "users_argument", "main_class_name": None, "can_retry": True, "is_monitoring_enabled": True}, "query_hist": None, "cluster_id": None}, "data": {"id": 1, "type": "pipeline", "attributes": {"name": "test_pipeline_name", "description": None, "status": "draft", "created_at": "2020-02-10T14:02:20Z", "updated_at": "2020-02-11T11:05:40Z", "cluster_label": "spark", "owner_name": "eam-airflow", "pipeline_instance_status": "draft", "create_type": 3, "health": "UNKNOWN"}}} PipelinesCode.pipeline_id = '1' PipelinesCode.pipeline_code = """print("helloworld")""" PipelinesCode.pipeline_name = "test_pipeline_name" d2 = {"data": {"attributes": {"cluster_label": "spark", "can_retry": True, "checkpoint_location": None, "trigger_interval": None, "output_mode": None, "command_line_options": """--conf spark.driver.extraLibraryPath=/usr/lib/hadoop2/lib/native\n--conf spark.eventLog.compress=true\n--conf spark.eventLog.enabled=true\n--conf spark.sql.streaming.qubole.enableStreamingEvents=true\n--conf spark.qubole.event.enabled=true"""}, "type": "pipeline/properties"}} d3 = {"data": { "attributes": {"create_type": 3, "user_arguments": "users_argument", "code": """print("hello")""", "language": "python"}}} Connection._api_call = Mock(return_value=response, any_order=False) qds.main() Connection._api_call.assert_has_calls( [call("POST", "pipelines?mode=wizard", d1), call("PUT", "pipelines/1/properties", d2), call("PUT", "pipelines/1/save_code", d3)]) if __name__ == '__main__': unittest.main()
<filename>tests/test_quest.py from __future__ import print_function from test_base import QdsCliTestCase from test_base import print_command from qds_sdk.pipelines import PipelinesCode from qds_sdk.connection import Connection import qds from mock import * import sys import os if sys.version_info > (2, 7, 0): import unittest else: import unittest2 as unittest sys.path.append(os.path.join(os.path.dirname(__file__), '../bin')) class TestQuestList(QdsCliTestCase): def test_list_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'list', '--pipeline-status', 'draft'] print_command() Connection._api_call = Mock(return_value={}) params = {'filter': "draft"} qds.main() Connection._api_call.assert_called_with( "GET", "pipelines", params=params) def test_pause_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'pause', '--pipeline-id', '153'] print_command() Connection._api_call = Mock(return_value={}) qds.main() Connection._api_call.assert_called_with( "PUT", "pipelines/153/pause", None) def test_clone_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'clone', '--pipeline-id', '153'] print_command() Connection._api_call = Mock(return_value={}) qds.main() Connection._api_call.assert_called_with( "POST", "pipelines/153/duplicate", None) def test_archive_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'archive', '--pipeline-id', '153'] print_command() Connection._api_call = Mock(return_value={}) qds.main() Connection._api_call.assert_called_with( "PUT", "pipelines/153/archive", None) def test_delete_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'delete', '--pipeline-id', '153'] print_command() Connection._api_call = Mock(return_value={}) qds.main() Connection._api_call.assert_called_with( "PUT", "pipelines/153/delete", None) def test_create_pipeline(self): sys.argv = ['qds.py', 'pipelines', 'create', '--create-type', '3', '--pipeline-name', 'test_pipeline_name', '--cluster-label', 'spark', '-c', 'print("hello")', '--language', 'python', '--user-arguments', 'users_argument'] print_command() d1 = {"data": {"attributes": {"name": "test_pipeline_name", "status": "DRAFT", "create_type": 3}, "type": "pipelines"}} response = {"relationships": {"nodes": [], "alerts": []}, "included": [], "meta": {"command_details": {"code": "print(\"hello\")", "language": "python"}, "properties": {"checkpoint_location": None, "trigger_interval": None, "command_line_options": """--conf spark.driver.extraLibraryPath=/usr/lib/hadoop2/lib/native\n--conf spark.eventLog.compress=true\n--conf spark.eventLog.enabled=true\n--conf spark.sql.streaming.qubole.enableStreamingEvents=true\n--conf spark.qubole.event.enabled=true""", "cluster_label": "spark", "jar_path": None, "user_arguments": "users_argument", "main_class_name": None, "can_retry": True, "is_monitoring_enabled": True}, "query_hist": None, "cluster_id": None}, "data": {"id": 1, "type": "pipeline", "attributes": {"name": "test_pipeline_name", "description": None, "status": "draft", "created_at": "2020-02-10T14:02:20Z", "updated_at": "2020-02-11T11:05:40Z", "cluster_label": "spark", "owner_name": "eam-airflow", "pipeline_instance_status": "draft", "create_type": 3, "health": "UNKNOWN"}}} PipelinesCode.pipeline_id = '1' PipelinesCode.pipeline_code = """print("helloworld")""" PipelinesCode.pipeline_name = "test_pipeline_name" d2 = {"data": {"attributes": {"cluster_label": "spark", "can_retry": True, "checkpoint_location": None, "trigger_interval": None, "output_mode": None, "command_line_options": """--conf spark.driver.extraLibraryPath=/usr/lib/hadoop2/lib/native\n--conf spark.eventLog.compress=true\n--conf spark.eventLog.enabled=true\n--conf spark.sql.streaming.qubole.enableStreamingEvents=true\n--conf spark.qubole.event.enabled=true"""}, "type": "pipeline/properties"}} d3 = {"data": { "attributes": {"create_type": 3, "user_arguments": "users_argument", "code": """print("hello")""", "language": "python"}}} Connection._api_call = Mock(return_value=response, any_order=False) qds.main() Connection._api_call.assert_has_calls( [call("POST", "pipelines?mode=wizard", d1), call("PUT", "pipelines/1/properties", d2), call("PUT", "pipelines/1/save_code", d3)]) if __name__ == '__main__': unittest.main()
en
0.209868
--conf spark.driver.extraLibraryPath=/usr/lib/hadoop2/lib/native\n--conf spark.eventLog.compress=true\n--conf spark.eventLog.enabled=true\n--conf spark.sql.streaming.qubole.enableStreamingEvents=true\n--conf spark.qubole.event.enabled=true print("helloworld") --conf spark.driver.extraLibraryPath=/usr/lib/hadoop2/lib/native\n--conf spark.eventLog.compress=true\n--conf spark.eventLog.enabled=true\n--conf spark.sql.streaming.qubole.enableStreamingEvents=true\n--conf spark.qubole.event.enabled=true print("hello")
2.349272
2
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2018_04_01/models/_iot_hub_client_enums.py
vincenttran-msft/azure-sdk-for-python
1
6628660
<gh_stars>1-10 # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from enum import Enum from six import with_metaclass from azure.core import CaseInsensitiveEnumMeta class AccessRights(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The permissions assigned to the shared access policy. """ REGISTRY_READ = "RegistryRead" REGISTRY_WRITE = "RegistryWrite" SERVICE_CONNECT = "ServiceConnect" DEVICE_CONNECT = "DeviceConnect" REGISTRY_READ_REGISTRY_WRITE = "RegistryRead, RegistryWrite" REGISTRY_READ_SERVICE_CONNECT = "RegistryRead, ServiceConnect" REGISTRY_READ_DEVICE_CONNECT = "RegistryRead, DeviceConnect" REGISTRY_WRITE_SERVICE_CONNECT = "RegistryWrite, ServiceConnect" REGISTRY_WRITE_DEVICE_CONNECT = "RegistryWrite, DeviceConnect" SERVICE_CONNECT_DEVICE_CONNECT = "ServiceConnect, DeviceConnect" REGISTRY_READ_REGISTRY_WRITE_SERVICE_CONNECT = "RegistryRead, RegistryWrite, ServiceConnect" REGISTRY_READ_REGISTRY_WRITE_DEVICE_CONNECT = "RegistryRead, RegistryWrite, DeviceConnect" REGISTRY_READ_SERVICE_CONNECT_DEVICE_CONNECT = "RegistryRead, ServiceConnect, DeviceConnect" REGISTRY_WRITE_SERVICE_CONNECT_DEVICE_CONNECT = "RegistryWrite, ServiceConnect, DeviceConnect" REGISTRY_READ_REGISTRY_WRITE_SERVICE_CONNECT_DEVICE_CONNECT = "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect" class Capabilities(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The capabilities and features enabled for the IoT hub. """ NONE = "None" DEVICE_MANAGEMENT = "DeviceManagement" class EndpointHealthStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The health status code of the endpoint """ UNKNOWN = "unknown" HEALTHY = "healthy" UNHEALTHY = "unhealthy" DEAD = "dead" class IotHubNameUnavailabilityReason(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The reason for unavailability. """ INVALID = "Invalid" ALREADY_EXISTS = "AlreadyExists" class IotHubScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The type of the scaling enabled. """ AUTOMATIC = "Automatic" MANUAL = "Manual" NONE = "None" class IotHubSku(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The name of the SKU. """ F1 = "F1" S1 = "S1" S2 = "S2" S3 = "S3" B1 = "B1" B2 = "B2" B3 = "B3" class IotHubSkuTier(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The billing tier for the IoT hub. """ FREE = "Free" STANDARD = "Standard" BASIC = "Basic" class IpFilterActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The desired action for requests captured by this rule. """ ACCEPT = "Accept" REJECT = "Reject" class JobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The status of the job. """ UNKNOWN = "unknown" ENQUEUED = "enqueued" RUNNING = "running" COMPLETED = "completed" FAILED = "failed" CANCELLED = "cancelled" class JobType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The type of the job. """ UNKNOWN = "unknown" EXPORT = "export" IMPORT_ENUM = "import" BACKUP = "backup" READ_DEVICE_PROPERTIES = "readDeviceProperties" WRITE_DEVICE_PROPERTIES = "writeDeviceProperties" UPDATE_DEVICE_CONFIGURATION = "updateDeviceConfiguration" REBOOT_DEVICE = "rebootDevice" FACTORY_RESET_DEVICE = "factoryResetDevice" FIRMWARE_UPDATE = "firmwareUpdate" class OperationMonitoringLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The operations monitoring level. """ NONE = "None" ERROR = "Error" INFORMATION = "Information" ERROR_INFORMATION = "Error, Information" class RouteErrorSeverity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Severity of the route error """ ERROR = "error" WARNING = "warning" class RoutingSource(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The source that the routing rule is to be applied to, such as DeviceMessages. """ INVALID = "Invalid" DEVICE_MESSAGES = "DeviceMessages" TWIN_CHANGE_EVENTS = "TwinChangeEvents" DEVICE_LIFECYCLE_EVENTS = "DeviceLifecycleEvents" DEVICE_JOB_LIFECYCLE_EVENTS = "DeviceJobLifecycleEvents" class TestResultStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Result of testing route """ UNDEFINED = "undefined" FALSE = "false" TRUE = "true"
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from enum import Enum from six import with_metaclass from azure.core import CaseInsensitiveEnumMeta class AccessRights(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The permissions assigned to the shared access policy. """ REGISTRY_READ = "RegistryRead" REGISTRY_WRITE = "RegistryWrite" SERVICE_CONNECT = "ServiceConnect" DEVICE_CONNECT = "DeviceConnect" REGISTRY_READ_REGISTRY_WRITE = "RegistryRead, RegistryWrite" REGISTRY_READ_SERVICE_CONNECT = "RegistryRead, ServiceConnect" REGISTRY_READ_DEVICE_CONNECT = "RegistryRead, DeviceConnect" REGISTRY_WRITE_SERVICE_CONNECT = "RegistryWrite, ServiceConnect" REGISTRY_WRITE_DEVICE_CONNECT = "RegistryWrite, DeviceConnect" SERVICE_CONNECT_DEVICE_CONNECT = "ServiceConnect, DeviceConnect" REGISTRY_READ_REGISTRY_WRITE_SERVICE_CONNECT = "RegistryRead, RegistryWrite, ServiceConnect" REGISTRY_READ_REGISTRY_WRITE_DEVICE_CONNECT = "RegistryRead, RegistryWrite, DeviceConnect" REGISTRY_READ_SERVICE_CONNECT_DEVICE_CONNECT = "RegistryRead, ServiceConnect, DeviceConnect" REGISTRY_WRITE_SERVICE_CONNECT_DEVICE_CONNECT = "RegistryWrite, ServiceConnect, DeviceConnect" REGISTRY_READ_REGISTRY_WRITE_SERVICE_CONNECT_DEVICE_CONNECT = "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect" class Capabilities(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The capabilities and features enabled for the IoT hub. """ NONE = "None" DEVICE_MANAGEMENT = "DeviceManagement" class EndpointHealthStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The health status code of the endpoint """ UNKNOWN = "unknown" HEALTHY = "healthy" UNHEALTHY = "unhealthy" DEAD = "dead" class IotHubNameUnavailabilityReason(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The reason for unavailability. """ INVALID = "Invalid" ALREADY_EXISTS = "AlreadyExists" class IotHubScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The type of the scaling enabled. """ AUTOMATIC = "Automatic" MANUAL = "Manual" NONE = "None" class IotHubSku(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The name of the SKU. """ F1 = "F1" S1 = "S1" S2 = "S2" S3 = "S3" B1 = "B1" B2 = "B2" B3 = "B3" class IotHubSkuTier(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The billing tier for the IoT hub. """ FREE = "Free" STANDARD = "Standard" BASIC = "Basic" class IpFilterActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The desired action for requests captured by this rule. """ ACCEPT = "Accept" REJECT = "Reject" class JobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The status of the job. """ UNKNOWN = "unknown" ENQUEUED = "enqueued" RUNNING = "running" COMPLETED = "completed" FAILED = "failed" CANCELLED = "cancelled" class JobType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The type of the job. """ UNKNOWN = "unknown" EXPORT = "export" IMPORT_ENUM = "import" BACKUP = "backup" READ_DEVICE_PROPERTIES = "readDeviceProperties" WRITE_DEVICE_PROPERTIES = "writeDeviceProperties" UPDATE_DEVICE_CONFIGURATION = "updateDeviceConfiguration" REBOOT_DEVICE = "rebootDevice" FACTORY_RESET_DEVICE = "factoryResetDevice" FIRMWARE_UPDATE = "firmwareUpdate" class OperationMonitoringLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The operations monitoring level. """ NONE = "None" ERROR = "Error" INFORMATION = "Information" ERROR_INFORMATION = "Error, Information" class RouteErrorSeverity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Severity of the route error """ ERROR = "error" WARNING = "warning" class RoutingSource(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The source that the routing rule is to be applied to, such as DeviceMessages. """ INVALID = "Invalid" DEVICE_MESSAGES = "DeviceMessages" TWIN_CHANGE_EVENTS = "TwinChangeEvents" DEVICE_LIFECYCLE_EVENTS = "DeviceLifecycleEvents" DEVICE_JOB_LIFECYCLE_EVENTS = "DeviceJobLifecycleEvents" class TestResultStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Result of testing route """ UNDEFINED = "undefined" FALSE = "false" TRUE = "true"
en
0.796785
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- The permissions assigned to the shared access policy. The capabilities and features enabled for the IoT hub. The health status code of the endpoint The reason for unavailability. The type of the scaling enabled. The name of the SKU. The billing tier for the IoT hub. The desired action for requests captured by this rule. The status of the job. The type of the job. The operations monitoring level. Severity of the route error The source that the routing rule is to be applied to, such as DeviceMessages. Result of testing route
1.947902
2
src/py4hy/courses.py
uh-soco/pyhy
0
6628661
<reponame>uh-soco/pyhy<filename>src/py4hy/courses.py import requests import datetime _API_PATH = 'https://studies.helsinki.fi/api/search' def _collect( query ): page = 0 content = [] while True: query['page'] = page r = requests.get( _API_PATH , params = query ) data = r.json() content += data['hits'] page += 1 if len( content ) >= data['totalHits']: break return content ## a bit weird heuristic def _guess_study_year(): now = datetime.datetime.now() if now.month < 7: return now.year - 1 else: return now.year def _select_language( courses, lang ): _courses = [] for course in courses: for key, value in course.items(): if isinstance( value, dict ) and lang in value: course[ key ] = value[ lang ] _courses.append( course ) return _courses def search( search = '', lang = 'en', academic_year = _guess_study_year() ): courses = _collect( {'searchText': search, 'studyYear' : academic_year, 'lang' : lang } ) return _select_language( courses, lang ) def by_organisation( organisations = [], lang = 'en', academic_year = _guess_study_year() ): ## todo: can requests multiple organisations at the same time, but not easy to plug in with the collect implementation ret = [] for org in organisations: org = str( org ) if not org.startswith( 'hy-org-' ): org = 'hy-org-' + org ret += _collect( {'organisation': org, 'studyYear' : academic_year, 'lang' : lang } ) return ret
import requests import datetime _API_PATH = 'https://studies.helsinki.fi/api/search' def _collect( query ): page = 0 content = [] while True: query['page'] = page r = requests.get( _API_PATH , params = query ) data = r.json() content += data['hits'] page += 1 if len( content ) >= data['totalHits']: break return content ## a bit weird heuristic def _guess_study_year(): now = datetime.datetime.now() if now.month < 7: return now.year - 1 else: return now.year def _select_language( courses, lang ): _courses = [] for course in courses: for key, value in course.items(): if isinstance( value, dict ) and lang in value: course[ key ] = value[ lang ] _courses.append( course ) return _courses def search( search = '', lang = 'en', academic_year = _guess_study_year() ): courses = _collect( {'searchText': search, 'studyYear' : academic_year, 'lang' : lang } ) return _select_language( courses, lang ) def by_organisation( organisations = [], lang = 'en', academic_year = _guess_study_year() ): ## todo: can requests multiple organisations at the same time, but not easy to plug in with the collect implementation ret = [] for org in organisations: org = str( org ) if not org.startswith( 'hy-org-' ): org = 'hy-org-' + org ret += _collect( {'organisation': org, 'studyYear' : academic_year, 'lang' : lang } ) return ret
en
0.805633
## a bit weird heuristic ## todo: can requests multiple organisations at the same time, but not easy to plug in with the collect implementation
3.154258
3
helpdesk/query.py
normoes/django-helpdesk
0
6628662
from django.db.models import Q from django.core.cache import cache from django.urls import reverse from django.utils.translation import ugettext as _ from base64 import b64encode from base64 import b64decode import json from model_utils import Choices from helpdesk.serializers import DatatablesTicketSerializer def query_to_base64(query): """ Converts a query dict object to a base64-encoded bytes object. """ return b64encode(json.dumps(query).encode('UTF-8')).decode("ascii") def query_from_base64(b64data): """ Converts base64-encoded bytes object back to a query dict object. """ query = {'search_string': ''} query.update(json.loads(b64decode(b64data).decode('utf-8'))) if query['search_string'] is None: query['search_string'] = '' return query def query_to_dict(results, descriptions): """ Replacement method for cursor.dictfetchall() as that method no longer exists in psycopg2, and I'm guessing in other backends too. Converts the results of a raw SQL query into a list of dictionaries, suitable for use in templates etc. """ output = [] for data in results: row = {} i = 0 for column in descriptions: row[column[0]] = data[i] i += 1 output.append(row) return output def get_search_filter_args(search): if search.startswith('queue:'): return Q(queue__title__icontains=search[len('queue:'):]) if search.startswith('priority:'): return Q(priority__icontains=search[len('priority:'):]) filter = Q() for subsearch in search.split("OR"): subsearch = subsearch.strip() filter = ( filter | Q(id__icontains=subsearch) | Q(title__icontains=subsearch) | Q(description__icontains=subsearch) | Q(priority__icontains=subsearch) | Q(resolution__icontains=subsearch) | Q(submitter_email__icontains=subsearch) | Q(assigned_to__email__icontains=subsearch) | Q(ticketcustomfieldvalue__value__icontains=subsearch) | Q(created__icontains=subsearch) | Q(due_date__icontains=subsearch) ) return filter DATATABLES_ORDER_COLUMN_CHOICES = Choices( ('0', 'id'), ('2', 'priority'), ('3', 'title'), ('4', 'queue'), ('5', 'status'), ('6', 'created'), ('7', 'due_date'), ('8', 'assigned_to') ) def get_query_class(): from django.conf import settings def _get_query_class(): return __Query__ return getattr(settings, 'HELPDESK_QUERY_CLASS', _get_query_class)() class __Query__: def __init__(self, huser, base64query=None, query_params=None): self.huser = huser self.params = query_params if query_params else query_from_base64(base64query) self.base64 = base64query if base64query else query_to_base64(query_params) self.result = None def get_search_filter_args(self): search = self.params.get('search_string', '') return get_search_filter_args(search) def __run__(self, queryset): """ Apply a dict-based set of filters & parameters to a queryset. queryset is a Django queryset, eg MyModel.objects.all() or MyModel.objects.filter(user=request.user) params is a dictionary that contains the following: filtering: A dict of Django ORM filters, eg: {'user__id__in': [1, 3, 103], 'title__contains': 'foo'} search_string: A freetext search string sorting: The name of the column to sort by """ for key in self.params.get('filtering', {}).keys(): filter = {key: self.params['filtering'][key]} queryset = queryset.filter(**filter) queryset = queryset.filter(self.get_search_filter_args()) sorting = self.params.get('sorting', None) if sorting: sortreverse = self.params.get('sortreverse', None) if sortreverse: sorting = "-%s" % sorting queryset = queryset.order_by(sorting) return queryset.distinct() # https://stackoverflow.com/questions/30487056/django-queryset-contains-duplicate-entries def get_cache_key(self): return str(self.huser.user.pk) + ":" + self.base64 def refresh_query(self): tickets = self.huser.get_tickets_in_queues().select_related() ticket_qs = self.__run__(tickets) cache.set(self.get_cache_key(), ticket_qs, timeout=3600) return ticket_qs def get(self): # Prefilter the allowed tickets objects = cache.get(self.get_cache_key()) if objects is not None: return objects return self.refresh_query() def get_datatables_context(self, **kwargs): """ This function takes in a list of ticket objects from the views and throws it to the datatables on ticket_list.html. If a search string was entered, this function filters existing dataset on search string and returns a filtered filtered list. The `draw`, `length` etc parameters are for datatables to display meta data on the table contents. The returning queryset is passed to a Serializer called DatatablesTicketSerializer in serializers.py. """ objects = self.get() order_by = '-date_created' draw = int(kwargs.get('draw', None)[0]) length = int(kwargs.get('length', None)[0]) start = int(kwargs.get('start', None)[0]) search_value = kwargs.get('search[value]', None)[0] order_column = kwargs.get('order[0][column]', None)[0] order = kwargs.get('order[0][dir]', None)[0] order_column = DATATABLES_ORDER_COLUMN_CHOICES[order_column] # django orm '-' -> desc if order == 'desc': order_column = '-' + order_column queryset = objects.all().order_by(order_by) total = queryset.count() if search_value: queryset = queryset.filter(get_search_filter_args(search_value)) count = queryset.count() queryset = queryset.order_by(order_column)[start:start + length] return { 'data': DatatablesTicketSerializer(queryset, many=True).data, 'recordsFiltered': count, 'recordsTotal': total, 'draw': draw } def get_timeline_context(self): events = [] for ticket in self.get(): for followup in ticket.followup_set.all(): event = { 'start_date': self.mk_timeline_date(followup.date), 'text': { 'headline': ticket.title + ' - ' + followup.title, 'text': (followup.comment if followup.comment else _('No text')) + '<br/> <a href="%s" class="btn" role="button">%s</a>' % (reverse('helpdesk:view', kwargs={'ticket_id': ticket.pk}), _("View ticket")), }, 'group': _('Messages'), } events.append(event) return { 'events': events, } def mk_timeline_date(self, date): return { 'year': date.year, 'month': date.month, 'day': date.day, 'hour': date.hour, 'minute': date.minute, 'second': date.second, 'second': date.second, }
from django.db.models import Q from django.core.cache import cache from django.urls import reverse from django.utils.translation import ugettext as _ from base64 import b64encode from base64 import b64decode import json from model_utils import Choices from helpdesk.serializers import DatatablesTicketSerializer def query_to_base64(query): """ Converts a query dict object to a base64-encoded bytes object. """ return b64encode(json.dumps(query).encode('UTF-8')).decode("ascii") def query_from_base64(b64data): """ Converts base64-encoded bytes object back to a query dict object. """ query = {'search_string': ''} query.update(json.loads(b64decode(b64data).decode('utf-8'))) if query['search_string'] is None: query['search_string'] = '' return query def query_to_dict(results, descriptions): """ Replacement method for cursor.dictfetchall() as that method no longer exists in psycopg2, and I'm guessing in other backends too. Converts the results of a raw SQL query into a list of dictionaries, suitable for use in templates etc. """ output = [] for data in results: row = {} i = 0 for column in descriptions: row[column[0]] = data[i] i += 1 output.append(row) return output def get_search_filter_args(search): if search.startswith('queue:'): return Q(queue__title__icontains=search[len('queue:'):]) if search.startswith('priority:'): return Q(priority__icontains=search[len('priority:'):]) filter = Q() for subsearch in search.split("OR"): subsearch = subsearch.strip() filter = ( filter | Q(id__icontains=subsearch) | Q(title__icontains=subsearch) | Q(description__icontains=subsearch) | Q(priority__icontains=subsearch) | Q(resolution__icontains=subsearch) | Q(submitter_email__icontains=subsearch) | Q(assigned_to__email__icontains=subsearch) | Q(ticketcustomfieldvalue__value__icontains=subsearch) | Q(created__icontains=subsearch) | Q(due_date__icontains=subsearch) ) return filter DATATABLES_ORDER_COLUMN_CHOICES = Choices( ('0', 'id'), ('2', 'priority'), ('3', 'title'), ('4', 'queue'), ('5', 'status'), ('6', 'created'), ('7', 'due_date'), ('8', 'assigned_to') ) def get_query_class(): from django.conf import settings def _get_query_class(): return __Query__ return getattr(settings, 'HELPDESK_QUERY_CLASS', _get_query_class)() class __Query__: def __init__(self, huser, base64query=None, query_params=None): self.huser = huser self.params = query_params if query_params else query_from_base64(base64query) self.base64 = base64query if base64query else query_to_base64(query_params) self.result = None def get_search_filter_args(self): search = self.params.get('search_string', '') return get_search_filter_args(search) def __run__(self, queryset): """ Apply a dict-based set of filters & parameters to a queryset. queryset is a Django queryset, eg MyModel.objects.all() or MyModel.objects.filter(user=request.user) params is a dictionary that contains the following: filtering: A dict of Django ORM filters, eg: {'user__id__in': [1, 3, 103], 'title__contains': 'foo'} search_string: A freetext search string sorting: The name of the column to sort by """ for key in self.params.get('filtering', {}).keys(): filter = {key: self.params['filtering'][key]} queryset = queryset.filter(**filter) queryset = queryset.filter(self.get_search_filter_args()) sorting = self.params.get('sorting', None) if sorting: sortreverse = self.params.get('sortreverse', None) if sortreverse: sorting = "-%s" % sorting queryset = queryset.order_by(sorting) return queryset.distinct() # https://stackoverflow.com/questions/30487056/django-queryset-contains-duplicate-entries def get_cache_key(self): return str(self.huser.user.pk) + ":" + self.base64 def refresh_query(self): tickets = self.huser.get_tickets_in_queues().select_related() ticket_qs = self.__run__(tickets) cache.set(self.get_cache_key(), ticket_qs, timeout=3600) return ticket_qs def get(self): # Prefilter the allowed tickets objects = cache.get(self.get_cache_key()) if objects is not None: return objects return self.refresh_query() def get_datatables_context(self, **kwargs): """ This function takes in a list of ticket objects from the views and throws it to the datatables on ticket_list.html. If a search string was entered, this function filters existing dataset on search string and returns a filtered filtered list. The `draw`, `length` etc parameters are for datatables to display meta data on the table contents. The returning queryset is passed to a Serializer called DatatablesTicketSerializer in serializers.py. """ objects = self.get() order_by = '-date_created' draw = int(kwargs.get('draw', None)[0]) length = int(kwargs.get('length', None)[0]) start = int(kwargs.get('start', None)[0]) search_value = kwargs.get('search[value]', None)[0] order_column = kwargs.get('order[0][column]', None)[0] order = kwargs.get('order[0][dir]', None)[0] order_column = DATATABLES_ORDER_COLUMN_CHOICES[order_column] # django orm '-' -> desc if order == 'desc': order_column = '-' + order_column queryset = objects.all().order_by(order_by) total = queryset.count() if search_value: queryset = queryset.filter(get_search_filter_args(search_value)) count = queryset.count() queryset = queryset.order_by(order_column)[start:start + length] return { 'data': DatatablesTicketSerializer(queryset, many=True).data, 'recordsFiltered': count, 'recordsTotal': total, 'draw': draw } def get_timeline_context(self): events = [] for ticket in self.get(): for followup in ticket.followup_set.all(): event = { 'start_date': self.mk_timeline_date(followup.date), 'text': { 'headline': ticket.title + ' - ' + followup.title, 'text': (followup.comment if followup.comment else _('No text')) + '<br/> <a href="%s" class="btn" role="button">%s</a>' % (reverse('helpdesk:view', kwargs={'ticket_id': ticket.pk}), _("View ticket")), }, 'group': _('Messages'), } events.append(event) return { 'events': events, } def mk_timeline_date(self, date): return { 'year': date.year, 'month': date.month, 'day': date.day, 'hour': date.hour, 'minute': date.minute, 'second': date.second, 'second': date.second, }
en
0.72182
Converts a query dict object to a base64-encoded bytes object. Converts base64-encoded bytes object back to a query dict object. Replacement method for cursor.dictfetchall() as that method no longer exists in psycopg2, and I'm guessing in other backends too. Converts the results of a raw SQL query into a list of dictionaries, suitable for use in templates etc. Apply a dict-based set of filters & parameters to a queryset. queryset is a Django queryset, eg MyModel.objects.all() or MyModel.objects.filter(user=request.user) params is a dictionary that contains the following: filtering: A dict of Django ORM filters, eg: {'user__id__in': [1, 3, 103], 'title__contains': 'foo'} search_string: A freetext search string sorting: The name of the column to sort by # https://stackoverflow.com/questions/30487056/django-queryset-contains-duplicate-entries # Prefilter the allowed tickets This function takes in a list of ticket objects from the views and throws it to the datatables on ticket_list.html. If a search string was entered, this function filters existing dataset on search string and returns a filtered filtered list. The `draw`, `length` etc parameters are for datatables to display meta data on the table contents. The returning queryset is passed to a Serializer called DatatablesTicketSerializer in serializers.py. # django orm '-' -> desc
2.438526
2
setup.py
gfronza/rabbitmq-alert
72
6628663
<reponame>gfronza/rabbitmq-alert #! /usr/bin/python2 from setuptools import setup, find_packages from os import path # remember to push a new tag after changing this! VERSION = "1.9.0" DIST_CONFIG_PATH = "rabbitmqalert/config" DATA_FILES = [ ("/etc/rabbitmq-alert/", [DIST_CONFIG_PATH + "/config.ini.example"]), ("/var/log/rabbitmq-alert/", []) ] def generate_readme(): return open("README.rst").read() def generate_data_files(): if path.isdir("/etc/systemd/system/"): DATA_FILES.append(("/etc/systemd/system/", [DIST_CONFIG_PATH + "/service/rabbitmq-alert.service"])) if path.isdir("/etc/init.d/"): DATA_FILES.append(("/etc/init.d/", [DIST_CONFIG_PATH + "/service/rabbitmq-alert"])) return DATA_FILES setup( name="rabbitmq-alert", version=VERSION, long_description=generate_readme(), packages=find_packages(exclude=["*tests*"]), description="Send notifications when predefined conditions are met", author="<NAME> (gfronza), <NAME> (mylk), velika12, <NAME> (23doors), <NAME> (TeslA1402), <NAME> (anderson-fachini), <NAME> (Venomen)", author_email="<EMAIL>", url="https://github.com/gfronza/rabbitmq-alert", download_url="https://github.com/gfronza/rabbitmq-alert/tarball/" + VERSION, keywords=["rabbitmq", "alert", "monitor"], classifiers=[], entry_points={ "console_scripts": [ "rabbitmq-alert = rabbitmqalert:rabbitmqalert.main" ] }, data_files=generate_data_files() )
#! /usr/bin/python2 from setuptools import setup, find_packages from os import path # remember to push a new tag after changing this! VERSION = "1.9.0" DIST_CONFIG_PATH = "rabbitmqalert/config" DATA_FILES = [ ("/etc/rabbitmq-alert/", [DIST_CONFIG_PATH + "/config.ini.example"]), ("/var/log/rabbitmq-alert/", []) ] def generate_readme(): return open("README.rst").read() def generate_data_files(): if path.isdir("/etc/systemd/system/"): DATA_FILES.append(("/etc/systemd/system/", [DIST_CONFIG_PATH + "/service/rabbitmq-alert.service"])) if path.isdir("/etc/init.d/"): DATA_FILES.append(("/etc/init.d/", [DIST_CONFIG_PATH + "/service/rabbitmq-alert"])) return DATA_FILES setup( name="rabbitmq-alert", version=VERSION, long_description=generate_readme(), packages=find_packages(exclude=["*tests*"]), description="Send notifications when predefined conditions are met", author="<NAME> (gfronza), <NAME> (mylk), velika12, <NAME> (23doors), <NAME> (TeslA1402), <NAME> (anderson-fachini), <NAME> (Venomen)", author_email="<EMAIL>", url="https://github.com/gfronza/rabbitmq-alert", download_url="https://github.com/gfronza/rabbitmq-alert/tarball/" + VERSION, keywords=["rabbitmq", "alert", "monitor"], classifiers=[], entry_points={ "console_scripts": [ "rabbitmq-alert = rabbitmqalert:rabbitmqalert.main" ] }, data_files=generate_data_files() )
en
0.733111
#! /usr/bin/python2 # remember to push a new tag after changing this!
1.507961
2
AtCoder/ABC058/D.py
takaaki82/Java-Lessons
1
6628664
<gh_stars>1-10 n, m = map(int, input().split()) x_list = tuple(map(int, input().split())) y_list = tuple(map(int, input().split())) mod = 10 ** 9 + 7 x_sum = 0 for i in range(1, n + 1): x_sum += ((i - 1) * x_list[i - 1] - (n - i) * x_list[i - 1]) % mod y_sum = 0 for i in range(1, m + 1): y_sum += ((i - 1) * y_list[i - 1] - (m - i) * y_list[i - 1]) % mod print(x_sum * y_sum % mod )
n, m = map(int, input().split()) x_list = tuple(map(int, input().split())) y_list = tuple(map(int, input().split())) mod = 10 ** 9 + 7 x_sum = 0 for i in range(1, n + 1): x_sum += ((i - 1) * x_list[i - 1] - (n - i) * x_list[i - 1]) % mod y_sum = 0 for i in range(1, m + 1): y_sum += ((i - 1) * y_list[i - 1] - (m - i) * y_list[i - 1]) % mod print(x_sum * y_sum % mod )
none
1
2.795689
3
Utils.py
hdo/bCNC
0
6628665
<gh_stars>0 # -*- coding: ascii -*- # $Id$ # # Author: <EMAIL> # Date: 16-Apr-2015 __author__ = "<NAME>" __email__ = "<EMAIL>" import os import glob import traceback from log import say try: from Tkinter import * import tkFont import tkMessageBox import ConfigParser except ImportError: from tkinter import * import tkinter.font as tkFont import tkinter.messagebox as tkMessageBox import configparser as ConfigParser import gettext try: import __builtin__ except: import builtins as __builtin__ #__builtin__.unicode = str # dirty hack for python3 try: import serial except: serial = None __prg__ = "bCNC" prgpath = os.path.abspath(os.path.dirname(sys.argv[0])) iniSystem = os.path.join(prgpath,"%s.ini"%(__prg__)) iniUser = os.path.expanduser("~/.%s" % (__prg__)) hisFile = os.path.expanduser("~/.%s.history" % (__prg__)) # dirty way of substituting the "_" on the builtin namespace #__builtin__.__dict__["_"] = gettext.translation('bCNC', 'locale', fallback=True).ugettext __builtin__._ = gettext.translation('bCNC', os.path.join(prgpath,'locale'), fallback=True).gettext __builtin__.N_ = lambda message: message import Ribbon import tkExtra __www__ = "https://github.com/vlachoudis/bCNC" __contribute__ = \ "@effer <NAME>\n" \ "@carlosgs <NAME>\n" \ "@dguerizec\n" \ "@buschhardt\n" \ "@MARIOBASZ\n" \ "@harvie <NAME>udrunka" __credits__ = \ "@1bigpig\n" \ "@chamnit <NAME>\n" \ "@harvie <NAME>\n" \ "@onekk Carlo\n" \ "@SteveMoto\n" \ "@willadams <NAME>" __translations__ = \ "Dutch - @hypothermic\n" \ "French - @ThierryM\n" \ "German - @feistus, @SteveMoto\n" \ "Italian - @onekk\n" \ "Japanese - @stm32f1\n" \ "Korean - @jjayd\n" \ "Portuguese - @moacirbmn \n" \ "Russian - @minithc\n" \ "Simplified Chinese - @Bluermen\n" \ "Spanish - @carlosgs\n" \ "Traditional Chinese - @Engineer2Designer\n" LANGUAGES = { "" : "<system>", "de" : "Deutsch", "en" : "English", "es" : u"Espa\u00f1ol", "fr" : u"Fran\u00e7ais", "it" : "Italiano", "ja" : "Japanese", "kr" : "Korean", "nl" : "Nederlands", "pt_BR" : "Brazilian - Portuguese", "ru" : "Russian", "zh_cn" : "Simplified Chinese", "zh_tw" : "Traditional Chinese", } icons = {} config = ConfigParser.ConfigParser() language = "" _errorReport = True errors = [] _maxRecent = 10 _FONT_SECTION = "Font" GRBL0 = 0 GRBL1 = 1 SMOOTHIE = 10 CONTROLLER = { "Grbl-V0" : GRBL0, "Grbl" : GRBL1, "Smoothie" : SMOOTHIE} #------------------------------------------------------------------------------ def loadIcons(): global icons icons = {} for img in glob.glob("%s%sicons%s*.gif"%(prgpath,os.sep,os.sep)): name,ext = os.path.splitext(os.path.basename(img)) try: icons[name] = PhotoImage(file=img) if getBool("CNC", "doublesizeicon"): icons[name] = icons[name].zoom(2,2) except TclError: pass #Images global images images = {} for img in glob.glob("%s%simages%s*.gif"%(prgpath,os.sep,os.sep)): name,ext = os.path.splitext(os.path.basename(img)) try: images[name] = PhotoImage(file=img) if getBool("CNC", "doublesizeicon"): images[name] = images[name].zoom(2,2) except TclError: pass #------------------------------------------------------------------------------ def delIcons(): global icons if len(icons) > 0: for i in icons.values(): del i icons = {} # needed otherwise it complains on deleting the icons #------------------------------------------------------------------------------ # Load configuration #------------------------------------------------------------------------------ def loadConfiguration(systemOnly=False): global config, _errorReport, language if systemOnly: config.read(iniSystem) else: config.read([iniSystem, iniUser]) _errorReport = getInt("Connection","errorreport",1) language = getStr(__prg__, "language") if language: # replace language __builtin__._ = gettext.translation('bCNC', os.path.join(prgpath,'locale'), fallback=True, languages=[language]).gettext #------------------------------------------------------------------------------ # Save configuration file #------------------------------------------------------------------------------ def saveConfiguration(): global config cleanConfiguration() f = open(iniUser,"w") config.write(f) f.close() delIcons() #---------------------------------------------------------------------- # Remove items that are the same as in the default ini #---------------------------------------------------------------------- def cleanConfiguration(): global config newconfig = config # Remember config config = ConfigParser.ConfigParser() loadConfiguration(True) # Compare items for section in config.sections(): for item, value in config.items(section): try: new = newconfig.get(section, item) if value==new: newconfig.remove_option(section, item) except ConfigParser.NoOptionError: pass config = newconfig #------------------------------------------------------------------------------ # add section if it doesn't exist #------------------------------------------------------------------------------ def addSection(section): global config if not config.has_section(section): config.add_section(section) #------------------------------------------------------------------------------ def getStr(section, name, default=""): global config try: return config.get(section, name) except: return default #------------------------------------------------------------------------------ def getUtf(section, name, default=""): global config try: return config.get(section, name).decode("utf8") except: return default #------------------------------------------------------------------------------ def getInt(section, name, default=0): global config try: return int(config.get(section, name)) except: return default #------------------------------------------------------------------------------ def getFloat(section, name, default=0.0): global config try: return float(config.get(section, name)) except: return default #------------------------------------------------------------------------------ def getBool(section, name, default=False): global config try: return bool(int(config.get(section, name))) except: return default #------------------------------------------------------------------------------- # Return a font from a string #------------------------------------------------------------------------------- def makeFont(name, value=None): try: font = tkFont.Font(name=name, exists=True) except TclError: font = tkFont.Font(name=name) font.delete_font = False except AttributeError: return None if value is None: return font if isinstance(value, tuple): font.configure(family=value[0]) try: font.configure(size=value[1]) except: pass try: font.configure(weight=value[2]) except: pass try: font.configure(slant=value[3]) except: pass return font #------------------------------------------------------------------------------- # Create a font string #------------------------------------------------------------------------------- def fontString(font): name = str(font[0]) size = str(font[1]) if name.find(' ')>=0: s = '"%s" %s'%(name,size) else: s = '%s %s'%(name,size) try: if font[2] == tkFont.BOLD: s += " bold" except: pass try: if font[3] == tkFont.ITALIC: s += " italic" except: pass return s #------------------------------------------------------------------------------- # Get font from configuration #------------------------------------------------------------------------------- def getFont(name, default=None): try: value = config.get(_FONT_SECTION, name) except: value = None if not value: font = makeFont(name, default) setFont(name, font) return font if isinstance(value, str): value = tuple(value.split(',')) if isinstance(value, tuple): font = makeFont(name, value) if font is not None: return font return value #------------------------------------------------------------------------------- # Set font in configuration #------------------------------------------------------------------------------- def setFont(name, font): if font is None: return if isinstance(font,str): config.set(_FONT_SECTION, name, font) elif isinstance(font,tuple): config.set(_FONT_SECTION, name, ",".join(map(str,font))) else: config.set(_FONT_SECTION, name, "%s,%s,%s" % \ (font.cget("family"),font.cget("size"),font.cget("weight"))) #------------------------------------------------------------------------------ def setBool(section, name, value): global config config.set(section, name, str(int(value))) #------------------------------------------------------------------------------ def setStr(section, name, value): global config config.set(section, name, str(value)) #------------------------------------------------------------------------------ def setUtf(section, name, value): global config try: s = str(value.encode("utf8")) except: s = str(value) config.set(section, name, s) setInt = setStr setFloat = setStr #------------------------------------------------------------------------------- def controllerName(idx): for n,i in CONTROLLER.items(): if i==idx: return n return "unknown" #------------------------------------------------------------------------------- # Add Recent #------------------------------------------------------------------------------- def addRecent(filename): try: sfn = str(os.path.abspath(filename)) except UnicodeEncodeError: sfn = filename.encode("utf8") last = _maxRecent-1 for i in range(_maxRecent): rfn = getRecent(i) if rfn is None: last = i-1 break if rfn == sfn: if i==0: return last = i-1 break # Shift everything by one for i in range(last, -1, -1): config.set("File", "recent.%d"%(i+1), getRecent(i)) config.set("File", "recent.0", sfn) #------------------------------------------------------------------------------- def getRecent(recent): try: return config.get("File","recent.%d"%(recent)) except ConfigParser.NoOptionError: return None #------------------------------------------------------------------------------ # Return all comports when serial.tools.list_ports is not available! #------------------------------------------------------------------------------ def comports(): locations=[ '/dev/ttyACM', '/dev/ttyUSB', '/dev/ttyS', 'com'] comports = [] for prefix in locations: for i in range(32): device = "%s%d"%(prefix,i) try: os.stat(device) comports.append((device,None,None)) except OSError: pass # Detects windows XP serial ports try: s = serial.Serial(device) s.close() comports.append((device,None,None)) except: pass return comports #=============================================================================== def addException(): global errors #self.widget._report_exception() try: typ, val, tb = sys.exc_info() traceback.print_exception(typ, val, tb) if errors: errors.append("") exception = traceback.format_exception(typ, val, tb) errors.extend(exception) if len(errors) > 100: # If too many errors are found send the error report ReportDialog(self.widget) except: say(str(sys.exc_info())) #=============================================================================== class CallWrapper: """Replaces the Tkinter.CallWrapper with extra functionality""" def __init__(self, func, subst, widget): """Store FUNC, SUBST and WIDGET as members.""" self.func = func self.subst = subst self.widget = widget # ---------------------------------------------------------------------- def __call__(self, *args): """Apply first function SUBST to arguments, than FUNC.""" try: if self.subst: args = self.subst(*args) return self.func(*args) # One possible fix is to make an external file for the wrapper # and import depending the version #except SystemExit, msg: # python2.4 syntax #except SystemExit as msg: # python3 syntax # raise SystemExit(msg) except SystemExit: # both raise SystemExit(sys.exc_info()[1]) except KeyboardInterrupt: pass except: addException() #=============================================================================== # Error message reporting dialog #=============================================================================== class ReportDialog(Toplevel): _shown = False # avoid re-entry when multiple errors are displayed def __init__(self, master): if ReportDialog._shown: return ReportDialog._shown = True Toplevel.__init__(self, master) if master is not None: self.transient(master) self.title(_("Error Reporting")) # Label Frame frame = LabelFrame(self, text=_("Report")) frame.pack(side=TOP, expand=YES, fill=BOTH) l = Label(frame, text=_("The following report is about to be send "\ "to the author of %s")%(__name__), justify=LEFT, anchor=W) l.pack(side=TOP) self.text = Text(frame, background="White") self.text.pack(side=LEFT, expand=YES, fill=BOTH) sb = Scrollbar(frame, orient=VERTICAL, command=self.text.yview) sb.pack(side=RIGHT, fill=Y) self.text.config(yscrollcommand=sb.set) # email frame frame = Frame(self) frame.pack(side=TOP, fill=X) l = Label(frame, text=_("Your email")) l.pack(side=LEFT) self.email = Entry(frame, background="White") self.email.pack(side=LEFT, expand=YES, fill=X) # Automatic error reporting self.err = BooleanVar() self.err.set(_errorReport) b = Checkbutton(frame, text=_("Automatic error reporting"), variable=self.err, anchor=E, justify=RIGHT) b.pack(side=RIGHT) # Buttons frame = Frame(self) frame.pack(side=BOTTOM, fill=X) b = Button(frame, text=_("Close"), compound=LEFT, command=self.cancel) b.pack(side=RIGHT) b = Button(frame, text=_("Send report"), compound=LEFT, command=self.send) b.pack(side=RIGHT) from bCNC import __version__, __date__ # Fill report txt = [ "Program : %s"%(__prg__), "Version : %s"%(__version__), "Last Change : %s"%(__date__), "Platform : %s"%(sys.platform), "Python : %s"%(sys.version), "TkVersion : %s"%(TkVersion), "TclVersion : %s"%(TclVersion), "\nTraceback:" ] for e in errors: if e!="" and e[-1] == "\n": txt.append(e[:-1]) else: txt.append(e) self.text.insert('0.0', "\n".join(txt)) # Guess email user = os.getenv("USER") host = os.getenv("HOSTNAME") if user and host: email = "%s@%s"%(user,host) else: email = "" self.email.insert(0,email) self.protocol("WM_DELETE_WINDOW", self.close) self.bind('<Escape>', self.close) # Wait action self.wait_visibility() self.grab_set() self.focus_set() self.wait_window() # ---------------------------------------------------------------------- def close(self, event=None): ReportDialog._shown = False self.destroy() # ---------------------------------------------------------------------- def send(self): import httplib, urllib global errors email = self.email.get() desc = self.text.get('1.0', END).strip() # Send information self.config(cursor="watch") self.text.config(cursor="watch") self.update_idletasks() params = urllib.urlencode({"email":email, "desc":desc}) headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} conn = httplib.HTTPConnection("www.bcnc.org:80") try: conn.request("POST", "/flair/send_email_bcnc.php", params, headers) response = conn.getresponse() except: tkMessageBox.showwarning(_("Error sending report"), _("There was a problem connecting to the web site"), parent=self) else: if response.status == 200: tkMessageBox.showinfo(_("Report successfully send"), _("Report was successfully uploaded to web site"), parent=self) del errors[:] else: tkMessageBox.showwarning(_("Error sending report"), _("There was an error sending the report\nCode=%d %s")%\ (response.status, response.reason), parent=self) conn.close() self.config(cursor="") self.cancel() # ---------------------------------------------------------------------- def cancel(self): global _errorReport, errors _errorReport = self.err.get() config.set("Connection", "errorreport", str(bool(self.err.get()))) del errors[:] self.close() # ---------------------------------------------------------------------- @staticmethod def sendErrorReport(): ReportDialog(None) #=============================================================================== # User Button #=============================================================================== class UserButton(Ribbon.LabelButton): TOOLTIP = "User configurable button.\n<RightClick> to configure" def __init__(self, master, cnc, button, *args, **kwargs): if button == 0: Button.__init__(self, master, *args, **kwargs) else: Ribbon.LabelButton.__init__(self, master, *args, **kwargs) self.cnc = cnc self.button = button self.get() #self.bind("<Control-Button-1>", self.edit) self.bind("<Button-3>", self.edit) self.bind("<Control-Button-1>", self.edit) self["command"] = self.execute # ---------------------------------------------------------------------- # get information from configuration # ---------------------------------------------------------------------- def get(self): if self.button == 0: return name = self.name() self["text"] = name #if icon == "": # icon = icons.get("empty","") self["image"] = icons.get(self.icon(),icons["material"]) self["compound"] = LEFT tooltip = self.tooltip() if not tooltip: tooltip = UserButton.TOOLTIP tkExtra.Balloon.set(self, tooltip) # ---------------------------------------------------------------------- def name(self): try: return config.get("Buttons","name.%d"%(self.button)) except: return str(self.button) # ---------------------------------------------------------------------- def icon(self): try: return config.get("Buttons","icon.%d"%(self.button)) except: return None # ---------------------------------------------------------------------- def tooltip(self): try: return config.get("Buttons","tooltip.%d"%(self.button)) except: return "" # ---------------------------------------------------------------------- def command(self): try: return config.get("Buttons","command.%d"%(self.button)) except: return "" # ---------------------------------------------------------------------- # Edit button # ---------------------------------------------------------------------- def edit(self, event=None): UserButtonDialog(self, self) self.get() # ---------------------------------------------------------------------- # Execute command # ---------------------------------------------------------------------- def execute(self): cmd = self.command() if not cmd: self.edit() return for line in cmd.splitlines(): self.cnc.pendant.put(line) #=============================================================================== # User Configurable Buttons #=============================================================================== class UserButtonDialog(Toplevel): NONE = "<none>" def __init__(self, master, button): Toplevel.__init__(self, master) self.title(_("User configurable button")) self.transient(master) self.button = button # Name row,col = 0,0 Label(self, text=_("Name:")).grid(row=row, column=col, sticky=E) col += 1 self.name = Entry(self, background="White") self.name.grid(row=row, column=col, columnspan=2, sticky=EW) tkExtra.Balloon.set(self.name, _("Name to appear on button")) # Icon row,col = row+1,0 Label(self, text=_("Icon:")).grid(row=row, column=col, sticky=E) col += 1 self.icon = Label(self, relief=RAISED) self.icon.grid(row=row, column=col, sticky=EW) col += 1 self.iconCombo = tkExtra.Combobox(self, True, width=5, command=self.iconChange) lst = list(sorted(icons.keys())) lst.insert(0,UserButtonDialog.NONE) self.iconCombo.fill(lst) self.iconCombo.grid(row=row, column=col, sticky=EW) tkExtra.Balloon.set(self.iconCombo, _("Icon to appear on button")) # Tooltip row,col = row+1,0 Label(self, text=_("Tool Tip:")).grid(row=row, column=col, sticky=E) col += 1 self.tooltip = Entry(self, background="White") self.tooltip.grid(row=row, column=col, columnspan=2, sticky=EW) tkExtra.Balloon.set(self.tooltip, _("Tooltip for button")) # Tooltip row,col = row+1,0 Label(self, text=_("Command:")).grid(row=row, column=col, sticky=N+E) col += 1 self.command = Text(self, background="White", width=40, height=10) self.command.grid(row=row, column=col, columnspan=2, sticky=EW) self.grid_columnconfigure(2,weight=1) self.grid_rowconfigure(row,weight=1) # Actions row += 1 f = Frame(self) f.grid(row=row, column=0, columnspan=3, sticky=EW) Button(f, text=_("Cancel"), command=self.cancel).pack(side=RIGHT) Button(f, text=_("Ok"), command=self.ok).pack(side=RIGHT) # Set variables self.name.insert(0,self.button.name()) self.tooltip.insert(0,self.button.tooltip()) icon = self.button.icon() if icon is None: self.iconCombo.set(UserButtonDialog.NONE) else: self.iconCombo.set(icon) self.icon["image"] = icons.get(icon,"") self.command.insert("1.0", self.button.command()) # Wait action self.wait_visibility() self.grab_set() self.focus_set() self.wait_window() # ---------------------------------------------------------------------- def ok(self, event=None): n = self.button.button config.set("Buttons", "name.%d"%(n), self.name.get().strip()) icon = self.iconCombo.get() if icon == UserButtonDialog.NONE: icon = "" config.set("Buttons", "icon.%d"%(n), icon) config.set("Buttons", "tooltip.%d"%(n), self.tooltip.get().strip()) config.set("Buttons", "command.%d"%(n), self.command.get("1.0",END).strip()) self.destroy() # ---------------------------------------------------------------------- def cancel(self): self.destroy() # ---------------------------------------------------------------------- def iconChange(self): self.icon["image"] = icons.get(self.iconCombo.get(),"")
# -*- coding: ascii -*- # $Id$ # # Author: <EMAIL> # Date: 16-Apr-2015 __author__ = "<NAME>" __email__ = "<EMAIL>" import os import glob import traceback from log import say try: from Tkinter import * import tkFont import tkMessageBox import ConfigParser except ImportError: from tkinter import * import tkinter.font as tkFont import tkinter.messagebox as tkMessageBox import configparser as ConfigParser import gettext try: import __builtin__ except: import builtins as __builtin__ #__builtin__.unicode = str # dirty hack for python3 try: import serial except: serial = None __prg__ = "bCNC" prgpath = os.path.abspath(os.path.dirname(sys.argv[0])) iniSystem = os.path.join(prgpath,"%s.ini"%(__prg__)) iniUser = os.path.expanduser("~/.%s" % (__prg__)) hisFile = os.path.expanduser("~/.%s.history" % (__prg__)) # dirty way of substituting the "_" on the builtin namespace #__builtin__.__dict__["_"] = gettext.translation('bCNC', 'locale', fallback=True).ugettext __builtin__._ = gettext.translation('bCNC', os.path.join(prgpath,'locale'), fallback=True).gettext __builtin__.N_ = lambda message: message import Ribbon import tkExtra __www__ = "https://github.com/vlachoudis/bCNC" __contribute__ = \ "@effer <NAME>\n" \ "@carlosgs <NAME>\n" \ "@dguerizec\n" \ "@buschhardt\n" \ "@MARIOBASZ\n" \ "@harvie <NAME>udrunka" __credits__ = \ "@1bigpig\n" \ "@chamnit <NAME>\n" \ "@harvie <NAME>\n" \ "@onekk Carlo\n" \ "@SteveMoto\n" \ "@willadams <NAME>" __translations__ = \ "Dutch - @hypothermic\n" \ "French - @ThierryM\n" \ "German - @feistus, @SteveMoto\n" \ "Italian - @onekk\n" \ "Japanese - @stm32f1\n" \ "Korean - @jjayd\n" \ "Portuguese - @moacirbmn \n" \ "Russian - @minithc\n" \ "Simplified Chinese - @Bluermen\n" \ "Spanish - @carlosgs\n" \ "Traditional Chinese - @Engineer2Designer\n" LANGUAGES = { "" : "<system>", "de" : "Deutsch", "en" : "English", "es" : u"Espa\u00f1ol", "fr" : u"Fran\u00e7ais", "it" : "Italiano", "ja" : "Japanese", "kr" : "Korean", "nl" : "Nederlands", "pt_BR" : "Brazilian - Portuguese", "ru" : "Russian", "zh_cn" : "Simplified Chinese", "zh_tw" : "Traditional Chinese", } icons = {} config = ConfigParser.ConfigParser() language = "" _errorReport = True errors = [] _maxRecent = 10 _FONT_SECTION = "Font" GRBL0 = 0 GRBL1 = 1 SMOOTHIE = 10 CONTROLLER = { "Grbl-V0" : GRBL0, "Grbl" : GRBL1, "Smoothie" : SMOOTHIE} #------------------------------------------------------------------------------ def loadIcons(): global icons icons = {} for img in glob.glob("%s%sicons%s*.gif"%(prgpath,os.sep,os.sep)): name,ext = os.path.splitext(os.path.basename(img)) try: icons[name] = PhotoImage(file=img) if getBool("CNC", "doublesizeicon"): icons[name] = icons[name].zoom(2,2) except TclError: pass #Images global images images = {} for img in glob.glob("%s%simages%s*.gif"%(prgpath,os.sep,os.sep)): name,ext = os.path.splitext(os.path.basename(img)) try: images[name] = PhotoImage(file=img) if getBool("CNC", "doublesizeicon"): images[name] = images[name].zoom(2,2) except TclError: pass #------------------------------------------------------------------------------ def delIcons(): global icons if len(icons) > 0: for i in icons.values(): del i icons = {} # needed otherwise it complains on deleting the icons #------------------------------------------------------------------------------ # Load configuration #------------------------------------------------------------------------------ def loadConfiguration(systemOnly=False): global config, _errorReport, language if systemOnly: config.read(iniSystem) else: config.read([iniSystem, iniUser]) _errorReport = getInt("Connection","errorreport",1) language = getStr(__prg__, "language") if language: # replace language __builtin__._ = gettext.translation('bCNC', os.path.join(prgpath,'locale'), fallback=True, languages=[language]).gettext #------------------------------------------------------------------------------ # Save configuration file #------------------------------------------------------------------------------ def saveConfiguration(): global config cleanConfiguration() f = open(iniUser,"w") config.write(f) f.close() delIcons() #---------------------------------------------------------------------- # Remove items that are the same as in the default ini #---------------------------------------------------------------------- def cleanConfiguration(): global config newconfig = config # Remember config config = ConfigParser.ConfigParser() loadConfiguration(True) # Compare items for section in config.sections(): for item, value in config.items(section): try: new = newconfig.get(section, item) if value==new: newconfig.remove_option(section, item) except ConfigParser.NoOptionError: pass config = newconfig #------------------------------------------------------------------------------ # add section if it doesn't exist #------------------------------------------------------------------------------ def addSection(section): global config if not config.has_section(section): config.add_section(section) #------------------------------------------------------------------------------ def getStr(section, name, default=""): global config try: return config.get(section, name) except: return default #------------------------------------------------------------------------------ def getUtf(section, name, default=""): global config try: return config.get(section, name).decode("utf8") except: return default #------------------------------------------------------------------------------ def getInt(section, name, default=0): global config try: return int(config.get(section, name)) except: return default #------------------------------------------------------------------------------ def getFloat(section, name, default=0.0): global config try: return float(config.get(section, name)) except: return default #------------------------------------------------------------------------------ def getBool(section, name, default=False): global config try: return bool(int(config.get(section, name))) except: return default #------------------------------------------------------------------------------- # Return a font from a string #------------------------------------------------------------------------------- def makeFont(name, value=None): try: font = tkFont.Font(name=name, exists=True) except TclError: font = tkFont.Font(name=name) font.delete_font = False except AttributeError: return None if value is None: return font if isinstance(value, tuple): font.configure(family=value[0]) try: font.configure(size=value[1]) except: pass try: font.configure(weight=value[2]) except: pass try: font.configure(slant=value[3]) except: pass return font #------------------------------------------------------------------------------- # Create a font string #------------------------------------------------------------------------------- def fontString(font): name = str(font[0]) size = str(font[1]) if name.find(' ')>=0: s = '"%s" %s'%(name,size) else: s = '%s %s'%(name,size) try: if font[2] == tkFont.BOLD: s += " bold" except: pass try: if font[3] == tkFont.ITALIC: s += " italic" except: pass return s #------------------------------------------------------------------------------- # Get font from configuration #------------------------------------------------------------------------------- def getFont(name, default=None): try: value = config.get(_FONT_SECTION, name) except: value = None if not value: font = makeFont(name, default) setFont(name, font) return font if isinstance(value, str): value = tuple(value.split(',')) if isinstance(value, tuple): font = makeFont(name, value) if font is not None: return font return value #------------------------------------------------------------------------------- # Set font in configuration #------------------------------------------------------------------------------- def setFont(name, font): if font is None: return if isinstance(font,str): config.set(_FONT_SECTION, name, font) elif isinstance(font,tuple): config.set(_FONT_SECTION, name, ",".join(map(str,font))) else: config.set(_FONT_SECTION, name, "%s,%s,%s" % \ (font.cget("family"),font.cget("size"),font.cget("weight"))) #------------------------------------------------------------------------------ def setBool(section, name, value): global config config.set(section, name, str(int(value))) #------------------------------------------------------------------------------ def setStr(section, name, value): global config config.set(section, name, str(value)) #------------------------------------------------------------------------------ def setUtf(section, name, value): global config try: s = str(value.encode("utf8")) except: s = str(value) config.set(section, name, s) setInt = setStr setFloat = setStr #------------------------------------------------------------------------------- def controllerName(idx): for n,i in CONTROLLER.items(): if i==idx: return n return "unknown" #------------------------------------------------------------------------------- # Add Recent #------------------------------------------------------------------------------- def addRecent(filename): try: sfn = str(os.path.abspath(filename)) except UnicodeEncodeError: sfn = filename.encode("utf8") last = _maxRecent-1 for i in range(_maxRecent): rfn = getRecent(i) if rfn is None: last = i-1 break if rfn == sfn: if i==0: return last = i-1 break # Shift everything by one for i in range(last, -1, -1): config.set("File", "recent.%d"%(i+1), getRecent(i)) config.set("File", "recent.0", sfn) #------------------------------------------------------------------------------- def getRecent(recent): try: return config.get("File","recent.%d"%(recent)) except ConfigParser.NoOptionError: return None #------------------------------------------------------------------------------ # Return all comports when serial.tools.list_ports is not available! #------------------------------------------------------------------------------ def comports(): locations=[ '/dev/ttyACM', '/dev/ttyUSB', '/dev/ttyS', 'com'] comports = [] for prefix in locations: for i in range(32): device = "%s%d"%(prefix,i) try: os.stat(device) comports.append((device,None,None)) except OSError: pass # Detects windows XP serial ports try: s = serial.Serial(device) s.close() comports.append((device,None,None)) except: pass return comports #=============================================================================== def addException(): global errors #self.widget._report_exception() try: typ, val, tb = sys.exc_info() traceback.print_exception(typ, val, tb) if errors: errors.append("") exception = traceback.format_exception(typ, val, tb) errors.extend(exception) if len(errors) > 100: # If too many errors are found send the error report ReportDialog(self.widget) except: say(str(sys.exc_info())) #=============================================================================== class CallWrapper: """Replaces the Tkinter.CallWrapper with extra functionality""" def __init__(self, func, subst, widget): """Store FUNC, SUBST and WIDGET as members.""" self.func = func self.subst = subst self.widget = widget # ---------------------------------------------------------------------- def __call__(self, *args): """Apply first function SUBST to arguments, than FUNC.""" try: if self.subst: args = self.subst(*args) return self.func(*args) # One possible fix is to make an external file for the wrapper # and import depending the version #except SystemExit, msg: # python2.4 syntax #except SystemExit as msg: # python3 syntax # raise SystemExit(msg) except SystemExit: # both raise SystemExit(sys.exc_info()[1]) except KeyboardInterrupt: pass except: addException() #=============================================================================== # Error message reporting dialog #=============================================================================== class ReportDialog(Toplevel): _shown = False # avoid re-entry when multiple errors are displayed def __init__(self, master): if ReportDialog._shown: return ReportDialog._shown = True Toplevel.__init__(self, master) if master is not None: self.transient(master) self.title(_("Error Reporting")) # Label Frame frame = LabelFrame(self, text=_("Report")) frame.pack(side=TOP, expand=YES, fill=BOTH) l = Label(frame, text=_("The following report is about to be send "\ "to the author of %s")%(__name__), justify=LEFT, anchor=W) l.pack(side=TOP) self.text = Text(frame, background="White") self.text.pack(side=LEFT, expand=YES, fill=BOTH) sb = Scrollbar(frame, orient=VERTICAL, command=self.text.yview) sb.pack(side=RIGHT, fill=Y) self.text.config(yscrollcommand=sb.set) # email frame frame = Frame(self) frame.pack(side=TOP, fill=X) l = Label(frame, text=_("Your email")) l.pack(side=LEFT) self.email = Entry(frame, background="White") self.email.pack(side=LEFT, expand=YES, fill=X) # Automatic error reporting self.err = BooleanVar() self.err.set(_errorReport) b = Checkbutton(frame, text=_("Automatic error reporting"), variable=self.err, anchor=E, justify=RIGHT) b.pack(side=RIGHT) # Buttons frame = Frame(self) frame.pack(side=BOTTOM, fill=X) b = Button(frame, text=_("Close"), compound=LEFT, command=self.cancel) b.pack(side=RIGHT) b = Button(frame, text=_("Send report"), compound=LEFT, command=self.send) b.pack(side=RIGHT) from bCNC import __version__, __date__ # Fill report txt = [ "Program : %s"%(__prg__), "Version : %s"%(__version__), "Last Change : %s"%(__date__), "Platform : %s"%(sys.platform), "Python : %s"%(sys.version), "TkVersion : %s"%(TkVersion), "TclVersion : %s"%(TclVersion), "\nTraceback:" ] for e in errors: if e!="" and e[-1] == "\n": txt.append(e[:-1]) else: txt.append(e) self.text.insert('0.0', "\n".join(txt)) # Guess email user = os.getenv("USER") host = os.getenv("HOSTNAME") if user and host: email = "%s@%s"%(user,host) else: email = "" self.email.insert(0,email) self.protocol("WM_DELETE_WINDOW", self.close) self.bind('<Escape>', self.close) # Wait action self.wait_visibility() self.grab_set() self.focus_set() self.wait_window() # ---------------------------------------------------------------------- def close(self, event=None): ReportDialog._shown = False self.destroy() # ---------------------------------------------------------------------- def send(self): import httplib, urllib global errors email = self.email.get() desc = self.text.get('1.0', END).strip() # Send information self.config(cursor="watch") self.text.config(cursor="watch") self.update_idletasks() params = urllib.urlencode({"email":email, "desc":desc}) headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} conn = httplib.HTTPConnection("www.bcnc.org:80") try: conn.request("POST", "/flair/send_email_bcnc.php", params, headers) response = conn.getresponse() except: tkMessageBox.showwarning(_("Error sending report"), _("There was a problem connecting to the web site"), parent=self) else: if response.status == 200: tkMessageBox.showinfo(_("Report successfully send"), _("Report was successfully uploaded to web site"), parent=self) del errors[:] else: tkMessageBox.showwarning(_("Error sending report"), _("There was an error sending the report\nCode=%d %s")%\ (response.status, response.reason), parent=self) conn.close() self.config(cursor="") self.cancel() # ---------------------------------------------------------------------- def cancel(self): global _errorReport, errors _errorReport = self.err.get() config.set("Connection", "errorreport", str(bool(self.err.get()))) del errors[:] self.close() # ---------------------------------------------------------------------- @staticmethod def sendErrorReport(): ReportDialog(None) #=============================================================================== # User Button #=============================================================================== class UserButton(Ribbon.LabelButton): TOOLTIP = "User configurable button.\n<RightClick> to configure" def __init__(self, master, cnc, button, *args, **kwargs): if button == 0: Button.__init__(self, master, *args, **kwargs) else: Ribbon.LabelButton.__init__(self, master, *args, **kwargs) self.cnc = cnc self.button = button self.get() #self.bind("<Control-Button-1>", self.edit) self.bind("<Button-3>", self.edit) self.bind("<Control-Button-1>", self.edit) self["command"] = self.execute # ---------------------------------------------------------------------- # get information from configuration # ---------------------------------------------------------------------- def get(self): if self.button == 0: return name = self.name() self["text"] = name #if icon == "": # icon = icons.get("empty","") self["image"] = icons.get(self.icon(),icons["material"]) self["compound"] = LEFT tooltip = self.tooltip() if not tooltip: tooltip = UserButton.TOOLTIP tkExtra.Balloon.set(self, tooltip) # ---------------------------------------------------------------------- def name(self): try: return config.get("Buttons","name.%d"%(self.button)) except: return str(self.button) # ---------------------------------------------------------------------- def icon(self): try: return config.get("Buttons","icon.%d"%(self.button)) except: return None # ---------------------------------------------------------------------- def tooltip(self): try: return config.get("Buttons","tooltip.%d"%(self.button)) except: return "" # ---------------------------------------------------------------------- def command(self): try: return config.get("Buttons","command.%d"%(self.button)) except: return "" # ---------------------------------------------------------------------- # Edit button # ---------------------------------------------------------------------- def edit(self, event=None): UserButtonDialog(self, self) self.get() # ---------------------------------------------------------------------- # Execute command # ---------------------------------------------------------------------- def execute(self): cmd = self.command() if not cmd: self.edit() return for line in cmd.splitlines(): self.cnc.pendant.put(line) #=============================================================================== # User Configurable Buttons #=============================================================================== class UserButtonDialog(Toplevel): NONE = "<none>" def __init__(self, master, button): Toplevel.__init__(self, master) self.title(_("User configurable button")) self.transient(master) self.button = button # Name row,col = 0,0 Label(self, text=_("Name:")).grid(row=row, column=col, sticky=E) col += 1 self.name = Entry(self, background="White") self.name.grid(row=row, column=col, columnspan=2, sticky=EW) tkExtra.Balloon.set(self.name, _("Name to appear on button")) # Icon row,col = row+1,0 Label(self, text=_("Icon:")).grid(row=row, column=col, sticky=E) col += 1 self.icon = Label(self, relief=RAISED) self.icon.grid(row=row, column=col, sticky=EW) col += 1 self.iconCombo = tkExtra.Combobox(self, True, width=5, command=self.iconChange) lst = list(sorted(icons.keys())) lst.insert(0,UserButtonDialog.NONE) self.iconCombo.fill(lst) self.iconCombo.grid(row=row, column=col, sticky=EW) tkExtra.Balloon.set(self.iconCombo, _("Icon to appear on button")) # Tooltip row,col = row+1,0 Label(self, text=_("Tool Tip:")).grid(row=row, column=col, sticky=E) col += 1 self.tooltip = Entry(self, background="White") self.tooltip.grid(row=row, column=col, columnspan=2, sticky=EW) tkExtra.Balloon.set(self.tooltip, _("Tooltip for button")) # Tooltip row,col = row+1,0 Label(self, text=_("Command:")).grid(row=row, column=col, sticky=N+E) col += 1 self.command = Text(self, background="White", width=40, height=10) self.command.grid(row=row, column=col, columnspan=2, sticky=EW) self.grid_columnconfigure(2,weight=1) self.grid_rowconfigure(row,weight=1) # Actions row += 1 f = Frame(self) f.grid(row=row, column=0, columnspan=3, sticky=EW) Button(f, text=_("Cancel"), command=self.cancel).pack(side=RIGHT) Button(f, text=_("Ok"), command=self.ok).pack(side=RIGHT) # Set variables self.name.insert(0,self.button.name()) self.tooltip.insert(0,self.button.tooltip()) icon = self.button.icon() if icon is None: self.iconCombo.set(UserButtonDialog.NONE) else: self.iconCombo.set(icon) self.icon["image"] = icons.get(icon,"") self.command.insert("1.0", self.button.command()) # Wait action self.wait_visibility() self.grab_set() self.focus_set() self.wait_window() # ---------------------------------------------------------------------- def ok(self, event=None): n = self.button.button config.set("Buttons", "name.%d"%(n), self.name.get().strip()) icon = self.iconCombo.get() if icon == UserButtonDialog.NONE: icon = "" config.set("Buttons", "icon.%d"%(n), icon) config.set("Buttons", "tooltip.%d"%(n), self.tooltip.get().strip()) config.set("Buttons", "command.%d"%(n), self.command.get("1.0",END).strip()) self.destroy() # ---------------------------------------------------------------------- def cancel(self): self.destroy() # ---------------------------------------------------------------------- def iconChange(self): self.icon["image"] = icons.get(self.iconCombo.get(),"")
en
0.171503
# -*- coding: ascii -*- # $Id$ # # Author: <EMAIL> # Date: 16-Apr-2015 #__builtin__.unicode = str # dirty hack for python3 # dirty way of substituting the "_" on the builtin namespace #__builtin__.__dict__["_"] = gettext.translation('bCNC', 'locale', fallback=True).ugettext #------------------------------------------------------------------------------ #Images #------------------------------------------------------------------------------ # needed otherwise it complains on deleting the icons #------------------------------------------------------------------------------ # Load configuration #------------------------------------------------------------------------------ # replace language #------------------------------------------------------------------------------ # Save configuration file #------------------------------------------------------------------------------ #---------------------------------------------------------------------- # Remove items that are the same as in the default ini #---------------------------------------------------------------------- # Remember config # Compare items #------------------------------------------------------------------------------ # add section if it doesn't exist #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------- # Return a font from a string #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Create a font string #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Get font from configuration #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Set font in configuration #------------------------------------------------------------------------------- #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # Add Recent #------------------------------------------------------------------------------- # Shift everything by one #------------------------------------------------------------------------------- #------------------------------------------------------------------------------ # Return all comports when serial.tools.list_ports is not available! #------------------------------------------------------------------------------ # Detects windows XP serial ports #=============================================================================== #self.widget._report_exception() # If too many errors are found send the error report #=============================================================================== Replaces the Tkinter.CallWrapper with extra functionality Store FUNC, SUBST and WIDGET as members. # ---------------------------------------------------------------------- Apply first function SUBST to arguments, than FUNC. # One possible fix is to make an external file for the wrapper # and import depending the version #except SystemExit, msg: # python2.4 syntax #except SystemExit as msg: # python3 syntax # raise SystemExit(msg) # both #=============================================================================== # Error message reporting dialog #=============================================================================== # avoid re-entry when multiple errors are displayed # Label Frame # email frame # Automatic error reporting # Buttons # Fill report # Guess email # Wait action # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Send information # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- #=============================================================================== # User Button #=============================================================================== #self.bind("<Control-Button-1>", self.edit) # ---------------------------------------------------------------------- # get information from configuration # ---------------------------------------------------------------------- #if icon == "": # icon = icons.get("empty","") # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Edit button # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Execute command # ---------------------------------------------------------------------- #=============================================================================== # User Configurable Buttons #=============================================================================== # Name # Icon # Tooltip # Tooltip # Actions # Set variables # Wait action # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ----------------------------------------------------------------------
2.089
2
SVM/Imdb/classifier.py
revegon/Sentiment-Analysis-of-Text
0
6628666
from sklearn.metrics import confusion_matrix from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.datasets import load_files from sklearn.naive_bayes import MultinomialNB from sklearn.cross_validation import train_test_split from sklearn import metrics from sklearn.externals import joblib path = r'G:\Academic\ML\Sentiment-Analysis-of-Text\SVM\Imdb' dataset = load_files(path, shuffle= False, decode_error='ignore', random_state=None,load_content=True) trainData,testData,trainTarget,testTarget = train_test_split(dataset.data,dataset.target,train_size = 0.85, test_size=0.15,random_state=45); vectorizer=TfidfVectorizer(use_idf=True,lowercase = True, analyzer='word') trainData=vectorizer.fit_transform(trainData) print(trainData.shape) from sklearn import svm from sklearn.metrics import accuracy_score # target_names = ['accident','art','crime','economics','education','entertainment','environment','international','opinion','politics','science_tech','sports'] model = svm.SVC(kernel='linear', C=1, gamma=1) model.fit(trainData, trainTarget) new_doc_tfidf_matrix = vectorizer.transform(testData) predicted = model.predict(new_doc_tfidf_matrix) print(accuracy_score(testTarget, predicted)) joblib.dump(vectorizer,"vectorizer.pkl") joblib.dump(model,"trainer.pkl") # print(metrics.classification_report(testTarget, predicted,target_names=target_names)) # print(confusion_matrix(predicted, testTarget, target_names))
from sklearn.metrics import confusion_matrix from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.datasets import load_files from sklearn.naive_bayes import MultinomialNB from sklearn.cross_validation import train_test_split from sklearn import metrics from sklearn.externals import joblib path = r'G:\Academic\ML\Sentiment-Analysis-of-Text\SVM\Imdb' dataset = load_files(path, shuffle= False, decode_error='ignore', random_state=None,load_content=True) trainData,testData,trainTarget,testTarget = train_test_split(dataset.data,dataset.target,train_size = 0.85, test_size=0.15,random_state=45); vectorizer=TfidfVectorizer(use_idf=True,lowercase = True, analyzer='word') trainData=vectorizer.fit_transform(trainData) print(trainData.shape) from sklearn import svm from sklearn.metrics import accuracy_score # target_names = ['accident','art','crime','economics','education','entertainment','environment','international','opinion','politics','science_tech','sports'] model = svm.SVC(kernel='linear', C=1, gamma=1) model.fit(trainData, trainTarget) new_doc_tfidf_matrix = vectorizer.transform(testData) predicted = model.predict(new_doc_tfidf_matrix) print(accuracy_score(testTarget, predicted)) joblib.dump(vectorizer,"vectorizer.pkl") joblib.dump(model,"trainer.pkl") # print(metrics.classification_report(testTarget, predicted,target_names=target_names)) # print(confusion_matrix(predicted, testTarget, target_names))
en
0.323098
# target_names = ['accident','art','crime','economics','education','entertainment','environment','international','opinion','politics','science_tech','sports'] # print(metrics.classification_report(testTarget, predicted,target_names=target_names)) # print(confusion_matrix(predicted, testTarget, target_names))
2.826051
3
experiments/e12/qsub.py
smly/Landmark2019-1st-and-3rd-Place-Solution
7
6628667
from logging import getLogger import subprocess import tempfile import os import re import time from easydict import EasyDict as edict logger = getLogger('landmark18') # rt_G.large, rt_C.large ... max=72 hours # rt_G.small, rt_C.small ... max=168 hours # 16分割なら rt_C.large, 8分割なら rt_C.small JOB_TEMPLATE = """ #!/bin/bash #$ -l {instance_type:s}=1 #$ -l h_rt={n_hours:d}:00:00 #$ -j y #$ -cwd source /etc/profile.d/modules.sh module load cuda/9.0/9.0.176.2 module load cudnn/7.0/7.0.5 module load nccl/2.1/2.1.15-1 source ~/anaconda3/bin/activate landmark19 &&\\ PYTHONPATH=/home/aca10305wp/trunk/models_190505/research:lib {cmd_str:s} """ JOB_CPU_TEMPLATE = """ #!/bin/bash #$ -l {instance_type:s}=1 #$ -l h_rt={n_hours:d}:00:00 #$ -j y #$ -cwd source ~/anaconda3/bin/activate landmark19 &&\\ PYTHONPATH=lib {cmd_str:s} """ def monitor_jobs(job_ids): while True: res = check_job_running(job_ids) if not res.is_done and len(res.jobs) == 1: job = res.jobs[0] logger.info( f'>>> Job {job.job_id} ({job.name}) is running....') elif not res.is_done and len(res.jobs) > 1: logger.info( f'>>> {len(res.jobs)} jobs are running....') elif res.is_done or len(res.jobs) == 0: logger.info(f'>>> Job is completed!') break time.sleep(120) def check_job_running(job_ids): proc = subprocess.run([ 'qstat', ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) assert proc.returncode == 0 is_done = True res = [] for job_id in job_ids: m = re.search( r'\s' + f'{job_id}' + r'\s+[0-9\.]+\s+([^\s]+?)\s', proc.stdout.decode('utf8')) if m is not None: is_done = False name = m.group(1) res.append(edict(job_id=job_id, name=name, status='runninng')) else: pass return edict(is_done=is_done, jobs=res) def qsub(cmd_with_args, n_hours=1, instance_type='rt_C.small'): job_temp = JOB_TEMPLATE if instance_type.startswith('rt_C'): job_temp = JOB_CPU_TEMPLATE cmd_str = " ".join(cmd_with_args) job_content = job_temp.strip().format( instance_type=instance_type, n_hours=n_hours, cmd_str=cmd_str) # Generate job file logger.info(f'Run qsub: {cmd_str}') tempfilename = None with tempfile.NamedTemporaryFile(mode='w', dir=os.getcwd(), delete=True) as f: f.write(job_content) f.flush() tempfilename = f.name proc = subprocess.run([ 'qsub', '-g', 'gca50080', tempfilename, ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return proc
from logging import getLogger import subprocess import tempfile import os import re import time from easydict import EasyDict as edict logger = getLogger('landmark18') # rt_G.large, rt_C.large ... max=72 hours # rt_G.small, rt_C.small ... max=168 hours # 16分割なら rt_C.large, 8分割なら rt_C.small JOB_TEMPLATE = """ #!/bin/bash #$ -l {instance_type:s}=1 #$ -l h_rt={n_hours:d}:00:00 #$ -j y #$ -cwd source /etc/profile.d/modules.sh module load cuda/9.0/9.0.176.2 module load cudnn/7.0/7.0.5 module load nccl/2.1/2.1.15-1 source ~/anaconda3/bin/activate landmark19 &&\\ PYTHONPATH=/home/aca10305wp/trunk/models_190505/research:lib {cmd_str:s} """ JOB_CPU_TEMPLATE = """ #!/bin/bash #$ -l {instance_type:s}=1 #$ -l h_rt={n_hours:d}:00:00 #$ -j y #$ -cwd source ~/anaconda3/bin/activate landmark19 &&\\ PYTHONPATH=lib {cmd_str:s} """ def monitor_jobs(job_ids): while True: res = check_job_running(job_ids) if not res.is_done and len(res.jobs) == 1: job = res.jobs[0] logger.info( f'>>> Job {job.job_id} ({job.name}) is running....') elif not res.is_done and len(res.jobs) > 1: logger.info( f'>>> {len(res.jobs)} jobs are running....') elif res.is_done or len(res.jobs) == 0: logger.info(f'>>> Job is completed!') break time.sleep(120) def check_job_running(job_ids): proc = subprocess.run([ 'qstat', ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) assert proc.returncode == 0 is_done = True res = [] for job_id in job_ids: m = re.search( r'\s' + f'{job_id}' + r'\s+[0-9\.]+\s+([^\s]+?)\s', proc.stdout.decode('utf8')) if m is not None: is_done = False name = m.group(1) res.append(edict(job_id=job_id, name=name, status='runninng')) else: pass return edict(is_done=is_done, jobs=res) def qsub(cmd_with_args, n_hours=1, instance_type='rt_C.small'): job_temp = JOB_TEMPLATE if instance_type.startswith('rt_C'): job_temp = JOB_CPU_TEMPLATE cmd_str = " ".join(cmd_with_args) job_content = job_temp.strip().format( instance_type=instance_type, n_hours=n_hours, cmd_str=cmd_str) # Generate job file logger.info(f'Run qsub: {cmd_str}') tempfilename = None with tempfile.NamedTemporaryFile(mode='w', dir=os.getcwd(), delete=True) as f: f.write(job_content) f.flush() tempfilename = f.name proc = subprocess.run([ 'qsub', '-g', 'gca50080', tempfilename, ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return proc
en
0.246247
# rt_G.large, rt_C.large ... max=72 hours # rt_G.small, rt_C.small ... max=168 hours # 16分割なら rt_C.large, 8分割なら rt_C.small #!/bin/bash #$ -l {instance_type:s}=1 #$ -l h_rt={n_hours:d}:00:00 #$ -j y #$ -cwd source /etc/profile.d/modules.sh module load cuda/9.0/9.0.176.2 module load cudnn/7.0/7.0.5 module load nccl/2.1/2.1.15-1 source ~/anaconda3/bin/activate landmark19 &&\\ PYTHONPATH=/home/aca10305wp/trunk/models_190505/research:lib {cmd_str:s} #!/bin/bash #$ -l {instance_type:s}=1 #$ -l h_rt={n_hours:d}:00:00 #$ -j y #$ -cwd source ~/anaconda3/bin/activate landmark19 &&\\ PYTHONPATH=lib {cmd_str:s} # Generate job file
2.240467
2
unit_05/main4.py
janusnic/21v-pyqt
0
6628668
<filename>unit_05/main4.py # -*- coding: utf-8 -*- #! /usr/bin/python import sys import os from PyQt4 import QtGui from PyQt4.QtCore import Qt import design4 class Notepad(QtGui.QMainWindow, design4.Ui_MainWindow): def __init__(self, parent=None): super(Notepad, self).__init__(parent) self.filename = "" self.setupUi(self) self.closeAction.triggered.connect(self.close) self.newAction.triggered.connect(self.newFile) self.saveAction.triggered.connect(self.saveFile) self.openAction.triggered.connect(self.openFile) self.printAction.triggered.connect(self.printHandler) self.previewAction.triggered.connect(self.preview) self.cutAction.triggered.connect(self.textEdit.cut) self.copyAction.triggered.connect(self.textEdit.copy) self.pasteAction.triggered.connect(self.textEdit.paste) self.undoAction.triggered.connect(self.textEdit.undo) self.redoAction.triggered.connect(self.textEdit.redo) self.bulletAction.triggered.connect(self.bulletList) self.numberedAction.triggered.connect(self.numberList) self.fontColor.triggered.connect(self.fontColorChanged) self.backColor.triggered.connect(self.highlight) self.boldAction.triggered.connect(self.bold) self.italicAction.triggered.connect(self.italic) self.underlAction.triggered.connect(self.underline) self.strikeAction.triggered.connect(self.strike) self.superAction.triggered.connect(self.superScript) self.subAction.triggered.connect(self.subScript) def newFile(self): #self.textEdit.clear() spawn = Notepad(self) spawn.show() def saveFile(self): # Only open dialog if there is no filename yet if not self.filename: self.filename = QtGui.QFileDialog.getSaveFileName(self, 'Save File','.') # Append extension if not there yet if you use python3: # if not self.filename.endswith(".wrt"): if not self.filename.endsWith(".wrt"): self.filename += ".wrt" # We just store the contents of the text file along with the # format in html, which Qt does in a very nice way for us with open(self.filename,"wt") as file: file.write(self.textEdit.toHtml()) def openFile(self): # Get filename and show only .writer files self.filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File',".",filter= "All (*);;txt(*.wrt *.txt)") if self.filename: with open(self.filename,"rt") as file: self.textEdit.setText(file.read()) def preview(self): # Open preview dialog preview = QtGui.QPrintPreviewDialog() # If a print is requested, open print dialog preview.paintRequested.connect(lambda p: self.textEdit.print_(p)) preview.exec_() def printHandler(self): # Open printing dialog dialog = QtGui.QPrintDialog() if dialog.exec_() == QtGui.QDialog.Accepted: self.textEdit.document().print_(dialog.printer()) def bulletList(self): cursor = self.textEdit.textCursor() # Insert bulleted list cursor.insertList(QtGui.QTextListFormat.ListDisc) def numberList(self): cursor = self.textEdit.textCursor() # Insert list with numbers cursor.insertList(QtGui.QTextListFormat.ListDecimal) def fontColorChanged(self): # Get a color from the text dialog color = QtGui.QColorDialog.getColor() # Set it as the new text color self.textEdit.setTextColor(color) def highlight(self): color = QtGui.QColorDialog.getColor() self.textEdit.setTextBackgroundColor(color) def bold(self): if self.textEdit.fontWeight() == QtGui.QFont.Bold: self.textEdit.setFontWeight(QtGui.QFont.Normal) else: self.textEdit.setFontWeight(QtGui.QFont.Bold) def italic(self): state = self.textEdit.fontItalic() self.textEdit.setFontItalic(not state) def underline(self): state = self.textEdit.fontUnderline() self.textEdit.setFontUnderline(not state) def strike(self): # Grab the text's format fmt = self.textEdit.currentCharFormat() # Set the fontStrikeOut property to its opposite fmt.setFontStrikeOut(not fmt.fontStrikeOut()) # And set the next char format self.textEdit.setCurrentCharFormat(fmt) def superScript(self): # Grab the current format fmt = self.textEdit.currentCharFormat() # And get the vertical alignment property align = fmt.verticalAlignment() # Toggle the state if align == QtGui.QTextCharFormat.AlignNormal: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSuperScript) else: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal) # Set the new format self.textEdit.setCurrentCharFormat(fmt) def subScript(self): # Grab the current format fmt = self.textEdit.currentCharFormat() # And get the vertical alignment property align = fmt.verticalAlignment() # Toggle the state if align == QtGui.QTextCharFormat.AlignNormal: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSubScript) else: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal) # Set the new format self.textEdit.setCurrentCharFormat(fmt) def main(): app = QtGui.QApplication(sys.argv) form = Notepad() form.show() app.exec_() if __name__ == '__main__': main()
<filename>unit_05/main4.py # -*- coding: utf-8 -*- #! /usr/bin/python import sys import os from PyQt4 import QtGui from PyQt4.QtCore import Qt import design4 class Notepad(QtGui.QMainWindow, design4.Ui_MainWindow): def __init__(self, parent=None): super(Notepad, self).__init__(parent) self.filename = "" self.setupUi(self) self.closeAction.triggered.connect(self.close) self.newAction.triggered.connect(self.newFile) self.saveAction.triggered.connect(self.saveFile) self.openAction.triggered.connect(self.openFile) self.printAction.triggered.connect(self.printHandler) self.previewAction.triggered.connect(self.preview) self.cutAction.triggered.connect(self.textEdit.cut) self.copyAction.triggered.connect(self.textEdit.copy) self.pasteAction.triggered.connect(self.textEdit.paste) self.undoAction.triggered.connect(self.textEdit.undo) self.redoAction.triggered.connect(self.textEdit.redo) self.bulletAction.triggered.connect(self.bulletList) self.numberedAction.triggered.connect(self.numberList) self.fontColor.triggered.connect(self.fontColorChanged) self.backColor.triggered.connect(self.highlight) self.boldAction.triggered.connect(self.bold) self.italicAction.triggered.connect(self.italic) self.underlAction.triggered.connect(self.underline) self.strikeAction.triggered.connect(self.strike) self.superAction.triggered.connect(self.superScript) self.subAction.triggered.connect(self.subScript) def newFile(self): #self.textEdit.clear() spawn = Notepad(self) spawn.show() def saveFile(self): # Only open dialog if there is no filename yet if not self.filename: self.filename = QtGui.QFileDialog.getSaveFileName(self, 'Save File','.') # Append extension if not there yet if you use python3: # if not self.filename.endswith(".wrt"): if not self.filename.endsWith(".wrt"): self.filename += ".wrt" # We just store the contents of the text file along with the # format in html, which Qt does in a very nice way for us with open(self.filename,"wt") as file: file.write(self.textEdit.toHtml()) def openFile(self): # Get filename and show only .writer files self.filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File',".",filter= "All (*);;txt(*.wrt *.txt)") if self.filename: with open(self.filename,"rt") as file: self.textEdit.setText(file.read()) def preview(self): # Open preview dialog preview = QtGui.QPrintPreviewDialog() # If a print is requested, open print dialog preview.paintRequested.connect(lambda p: self.textEdit.print_(p)) preview.exec_() def printHandler(self): # Open printing dialog dialog = QtGui.QPrintDialog() if dialog.exec_() == QtGui.QDialog.Accepted: self.textEdit.document().print_(dialog.printer()) def bulletList(self): cursor = self.textEdit.textCursor() # Insert bulleted list cursor.insertList(QtGui.QTextListFormat.ListDisc) def numberList(self): cursor = self.textEdit.textCursor() # Insert list with numbers cursor.insertList(QtGui.QTextListFormat.ListDecimal) def fontColorChanged(self): # Get a color from the text dialog color = QtGui.QColorDialog.getColor() # Set it as the new text color self.textEdit.setTextColor(color) def highlight(self): color = QtGui.QColorDialog.getColor() self.textEdit.setTextBackgroundColor(color) def bold(self): if self.textEdit.fontWeight() == QtGui.QFont.Bold: self.textEdit.setFontWeight(QtGui.QFont.Normal) else: self.textEdit.setFontWeight(QtGui.QFont.Bold) def italic(self): state = self.textEdit.fontItalic() self.textEdit.setFontItalic(not state) def underline(self): state = self.textEdit.fontUnderline() self.textEdit.setFontUnderline(not state) def strike(self): # Grab the text's format fmt = self.textEdit.currentCharFormat() # Set the fontStrikeOut property to its opposite fmt.setFontStrikeOut(not fmt.fontStrikeOut()) # And set the next char format self.textEdit.setCurrentCharFormat(fmt) def superScript(self): # Grab the current format fmt = self.textEdit.currentCharFormat() # And get the vertical alignment property align = fmt.verticalAlignment() # Toggle the state if align == QtGui.QTextCharFormat.AlignNormal: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSuperScript) else: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal) # Set the new format self.textEdit.setCurrentCharFormat(fmt) def subScript(self): # Grab the current format fmt = self.textEdit.currentCharFormat() # And get the vertical alignment property align = fmt.verticalAlignment() # Toggle the state if align == QtGui.QTextCharFormat.AlignNormal: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSubScript) else: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal) # Set the new format self.textEdit.setCurrentCharFormat(fmt) def main(): app = QtGui.QApplication(sys.argv) form = Notepad() form.show() app.exec_() if __name__ == '__main__': main()
en
0.698588
# -*- coding: utf-8 -*- #! /usr/bin/python #self.textEdit.clear() # Only open dialog if there is no filename yet # Append extension if not there yet if you use python3: # if not self.filename.endswith(".wrt"): # We just store the contents of the text file along with the # format in html, which Qt does in a very nice way for us # Get filename and show only .writer files # Open preview dialog # If a print is requested, open print dialog # Open printing dialog # Insert bulleted list # Insert list with numbers # Get a color from the text dialog # Set it as the new text color # Grab the text's format # Set the fontStrikeOut property to its opposite # And set the next char format # Grab the current format # And get the vertical alignment property # Toggle the state # Set the new format # Grab the current format # And get the vertical alignment property # Toggle the state # Set the new format
2.593717
3
release/stubs.min/System/Diagnostics/__init___parts/DebuggerVisualizerAttribute.py
tranconbv/ironpython-stubs
0
6628669
<filename>release/stubs.min/System/Diagnostics/__init___parts/DebuggerVisualizerAttribute.py class DebuggerVisualizerAttribute: """ Specifies that the type has a visualizer. This class cannot be inherited. DebuggerVisualizerAttribute(visualizerTypeName: str) DebuggerVisualizerAttribute(visualizerTypeName: str,visualizerObjectSourceTypeName: str) DebuggerVisualizerAttribute(visualizerTypeName: str,visualizerObjectSource: Type) DebuggerVisualizerAttribute(visualizer: Type) DebuggerVisualizerAttribute(visualizer: Type,visualizerObjectSource: Type) DebuggerVisualizerAttribute(visualizer: Type,visualizerObjectSourceTypeName: str) """ def ZZZ(self): """hardcoded/mock instance of the class""" return DebuggerVisualizerAttribute() instance=ZZZ() """hardcoded/returns an instance of the class""" def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod def __new__(self,*__args): """ __new__(cls: type,visualizerTypeName: str) __new__(cls: type,visualizerTypeName: str,visualizerObjectSourceTypeName: str) __new__(cls: type,visualizerTypeName: str,visualizerObjectSource: Type) __new__(cls: type,visualizer: Type) __new__(cls: type,visualizer: Type,visualizerObjectSource: Type) __new__(cls: type,visualizer: Type,visualizerObjectSourceTypeName: str) """ pass Description=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the description of the visualizer. Get: Description(self: DebuggerVisualizerAttribute) -> str Set: Description(self: DebuggerVisualizerAttribute)=value """ Target=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the target type when the attribute is applied at the assembly level. Get: Target(self: DebuggerVisualizerAttribute) -> Type Set: Target(self: DebuggerVisualizerAttribute)=value """ TargetTypeName=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the fully qualified type name when the attribute is applied at the assembly level. Get: TargetTypeName(self: DebuggerVisualizerAttribute) -> str Set: TargetTypeName(self: DebuggerVisualizerAttribute)=value """ VisualizerObjectSourceTypeName=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the fully qualified type name of the visualizer object source. Get: VisualizerObjectSourceTypeName(self: DebuggerVisualizerAttribute) -> str """ VisualizerTypeName=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the fully qualified type name of the visualizer. Get: VisualizerTypeName(self: DebuggerVisualizerAttribute) -> str """
<filename>release/stubs.min/System/Diagnostics/__init___parts/DebuggerVisualizerAttribute.py class DebuggerVisualizerAttribute: """ Specifies that the type has a visualizer. This class cannot be inherited. DebuggerVisualizerAttribute(visualizerTypeName: str) DebuggerVisualizerAttribute(visualizerTypeName: str,visualizerObjectSourceTypeName: str) DebuggerVisualizerAttribute(visualizerTypeName: str,visualizerObjectSource: Type) DebuggerVisualizerAttribute(visualizer: Type) DebuggerVisualizerAttribute(visualizer: Type,visualizerObjectSource: Type) DebuggerVisualizerAttribute(visualizer: Type,visualizerObjectSourceTypeName: str) """ def ZZZ(self): """hardcoded/mock instance of the class""" return DebuggerVisualizerAttribute() instance=ZZZ() """hardcoded/returns an instance of the class""" def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod def __new__(self,*__args): """ __new__(cls: type,visualizerTypeName: str) __new__(cls: type,visualizerTypeName: str,visualizerObjectSourceTypeName: str) __new__(cls: type,visualizerTypeName: str,visualizerObjectSource: Type) __new__(cls: type,visualizer: Type) __new__(cls: type,visualizer: Type,visualizerObjectSource: Type) __new__(cls: type,visualizer: Type,visualizerObjectSourceTypeName: str) """ pass Description=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the description of the visualizer. Get: Description(self: DebuggerVisualizerAttribute) -> str Set: Description(self: DebuggerVisualizerAttribute)=value """ Target=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the target type when the attribute is applied at the assembly level. Get: Target(self: DebuggerVisualizerAttribute) -> Type Set: Target(self: DebuggerVisualizerAttribute)=value """ TargetTypeName=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the fully qualified type name when the attribute is applied at the assembly level. Get: TargetTypeName(self: DebuggerVisualizerAttribute) -> str Set: TargetTypeName(self: DebuggerVisualizerAttribute)=value """ VisualizerObjectSourceTypeName=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the fully qualified type name of the visualizer object source. Get: VisualizerObjectSourceTypeName(self: DebuggerVisualizerAttribute) -> str """ VisualizerTypeName=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the fully qualified type name of the visualizer. Get: VisualizerTypeName(self: DebuggerVisualizerAttribute) -> str """
en
0.290464
Specifies that the type has a visualizer. This class cannot be inherited. DebuggerVisualizerAttribute(visualizerTypeName: str) DebuggerVisualizerAttribute(visualizerTypeName: str,visualizerObjectSourceTypeName: str) DebuggerVisualizerAttribute(visualizerTypeName: str,visualizerObjectSource: Type) DebuggerVisualizerAttribute(visualizer: Type) DebuggerVisualizerAttribute(visualizer: Type,visualizerObjectSource: Type) DebuggerVisualizerAttribute(visualizer: Type,visualizerObjectSourceTypeName: str) hardcoded/mock instance of the class hardcoded/returns an instance of the class x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature __new__(cls: type,visualizerTypeName: str) __new__(cls: type,visualizerTypeName: str,visualizerObjectSourceTypeName: str) __new__(cls: type,visualizerTypeName: str,visualizerObjectSource: Type) __new__(cls: type,visualizer: Type) __new__(cls: type,visualizer: Type,visualizerObjectSource: Type) __new__(cls: type,visualizer: Type,visualizerObjectSourceTypeName: str) Gets or sets the description of the visualizer. Get: Description(self: DebuggerVisualizerAttribute) -> str Set: Description(self: DebuggerVisualizerAttribute)=value Gets or sets the target type when the attribute is applied at the assembly level. Get: Target(self: DebuggerVisualizerAttribute) -> Type Set: Target(self: DebuggerVisualizerAttribute)=value Gets or sets the fully qualified type name when the attribute is applied at the assembly level. Get: TargetTypeName(self: DebuggerVisualizerAttribute) -> str Set: TargetTypeName(self: DebuggerVisualizerAttribute)=value Gets the fully qualified type name of the visualizer object source. Get: VisualizerObjectSourceTypeName(self: DebuggerVisualizerAttribute) -> str Gets the fully qualified type name of the visualizer. Get: VisualizerTypeName(self: DebuggerVisualizerAttribute) -> str
1.856482
2
app011.py
ChloeRuan/HelloWorld
0
6628670
# comparison operator temperature = 30 # == if temperature >= 30: print("It's a hot day") else: print("It's not a hot day") # exercise name_character = 3 if name_character < 3: print("name must be at least 3 characters") elif name_character > 50: print("name can be a maximum of 50 characters") else: print("name looks good!") # correction name = "J" print(len(name)) if len(name) < 3: print("name must be at least 3 characters") elif len(name) > 50: print("name can be a maximum of 50 characters") else: print("name looks good!")
# comparison operator temperature = 30 # == if temperature >= 30: print("It's a hot day") else: print("It's not a hot day") # exercise name_character = 3 if name_character < 3: print("name must be at least 3 characters") elif name_character > 50: print("name can be a maximum of 50 characters") else: print("name looks good!") # correction name = "J" print(len(name)) if len(name) < 3: print("name must be at least 3 characters") elif len(name) > 50: print("name can be a maximum of 50 characters") else: print("name looks good!")
en
0.865793
# comparison operator # == # exercise # correction
4.488122
4
core/events/guild.py
Thirio27/Pemgu-Bot
1
6628671
<reponame>Thirio27/Pemgu-Bot<filename>core/events/guild.py import discord from discord.ext import commands class OnGuild(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_guild_remove(self, guild:discord.Guild): prefix = await self.bot.postgres.fetchval("SELECT prefix FROM prefixes WHERE guild_id=$1", guild.id) if prefix: await self.bot.postgres.execute("DELETE FROM prefixes WHERE guild_id=$1", guild.id) welcome = await self.bot.postgres.fetchval("SELECT msg FROM welcome WHERE guild_id=$1", guild.id) if welcome: await self.bot.postgres.execute("DELETE FROM welcome WHERE guild_id=$1", guild.id) goodbye = await self.bot.postgres.fetchval("SELECT msg FROM goodbye WHERE guild_id=$1", guild.id) if goodbye: await self.bot.postgres.execute("DELETE FROM goodbye WHERE guild_id=$1", guild.id) def setup(bot): bot.add_cog(OnGuild(bot))
import discord from discord.ext import commands class OnGuild(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_guild_remove(self, guild:discord.Guild): prefix = await self.bot.postgres.fetchval("SELECT prefix FROM prefixes WHERE guild_id=$1", guild.id) if prefix: await self.bot.postgres.execute("DELETE FROM prefixes WHERE guild_id=$1", guild.id) welcome = await self.bot.postgres.fetchval("SELECT msg FROM welcome WHERE guild_id=$1", guild.id) if welcome: await self.bot.postgres.execute("DELETE FROM welcome WHERE guild_id=$1", guild.id) goodbye = await self.bot.postgres.fetchval("SELECT msg FROM goodbye WHERE guild_id=$1", guild.id) if goodbye: await self.bot.postgres.execute("DELETE FROM goodbye WHERE guild_id=$1", guild.id) def setup(bot): bot.add_cog(OnGuild(bot))
none
1
2.641723
3
conan/tools/env/__init__.py
thombet/conan
1
6628672
from conan.tools.env.environment import Environment from conan.tools.env.virtualenv import VirtualEnv
from conan.tools.env.environment import Environment from conan.tools.env.virtualenv import VirtualEnv
none
1
1.164016
1
server/app/automata_learning/black_box/pac_learning/smart_teacher/timeInterval.py
MrEnvision/TA-learning-site
4
6628673
<reponame>MrEnvision/TA-learning-site from enum import IntEnum class Bracket(IntEnum): RO = 1 LC = 2 RC = 3 LO = 4 class BracketNum: def __init__(self, value, bracket): self.value = value self.bracket = bracket def __eq__(self, bn): if self.value == '+' and bn.value == '+': return True elif self.value == '+' and bn.value != '+': return False elif self.value != '+' and bn.value == '+': return False elif float(self.value) == float(bn.value) and self.bracket == bn.bracket: return True else: return False def complement(self): if self.value == '+': return BracketNum('+', Bracket.RO) # ceil if float(self.value) == 0 and self.bracket == Bracket.LC: return BracketNum('0', Bracket.LC) # floor tempValue = self.value tempBracket = None if self.bracket == Bracket.LC: tempBracket = Bracket.RO if self.bracket == Bracket.RC: tempBracket = Bracket.LO if self.bracket == Bracket.LO: tempBracket = Bracket.RC if self.bracket == Bracket.RO: tempBracket = Bracket.LC return BracketNum(tempValue, tempBracket) def __lt__(self, bn): if self.value == '+': return False elif bn.value == '+': return True elif float(self.value) > float(bn.value): return False elif float(self.value) < float(bn.value): return True else: if self.bracket < bn.bracket: return True else: return False def __gt__(self, bn): if self.value == '+': if bn.value == '+': return False else: return True if bn.value == '+': return False if float(self.value) > float(bn.value): return True elif float(self.value) < float(bn.value): return False else: if self.bracket > bn.bracket: return True else: return False def __ge__(self, bn): return not self.__lt__(bn) def __le__(self, bn): return not self.__gt__(bn) def getBN(self): if self.bracket == Bracket.LC: return '[' + self.value if self.bracket == Bracket.LO: return '(' + self.value if self.bracket == Bracket.RC: return self.value + ']' if self.bracket == Bracket.RO: return self.value + ')' class Guard: def __init__(self, guard): self.guard = guard self.__build() def __build(self): min_type, max_type = self.guard.split(',') # 处理左边 if min_type[0] == '[': self.closed_min = True min_bn_bracket = Bracket.LC else: self.closed_min = False min_bn_bracket = Bracket.LO self.min_value = min_type[1:].strip() self.min_bn = BracketNum(self.min_value, min_bn_bracket) # 处理右边 if max_type[-1] == ']': self.closed_max = True max_bn_bracket = Bracket.RC else: self.closed_max = False max_bn_bracket = Bracket.RO self.max_value = max_type[:-1].strip() self.max_bn = BracketNum(self.max_value, max_bn_bracket) def __eq__(self, guard): if self.min_bn == guard.min_bn and self.max_bn == guard.max_bn: return True else: return False def get_min(self): return float(self.min_value) def get_closed_min(self): return self.closed_min def get_max(self): if self.max_value == '+': return float("inf") else: return float(self.max_value) def get_closed_max(self): return self.closed_max def __hash__(self): return hash(("CONSTRAINT", self.get_min(), self.closed_min, self.get_max(), self.closed_max)) def is_point(self): if self.min_value == '+' or self.max_value == '+': return False if self.get_min() == self.get_max() and self.closed_min and self.closed_max: return True else: return False def is_subset(self, c2): min_bn1 = self.min_bn max_bn1 = self.max_bn min_bn2 = c2.min_bn max_bn2 = c2.max_bn if min_bn1 >= min_bn2 and max_bn1 <= max_bn2: return True else: return False def is_in_interval(self, num): if num < self.get_min(): return False elif num == self.get_min(): if self.closed_min: return True else: return False elif self.get_min() < num < self.get_max(): return True elif num == self.get_max(): if self.closed_max: return True else: return False else: return False def is_empty(self): if self.max_bn < self.min_bn: return True else: return False def show(self): return self.guard # Merge guards def simple_guards(guards): if len(guards) == 1 or len(guards) == 0: return guards else: sorted_guards = sort_guards(guards) result = [] temp_guard = sorted_guards[0] for i in range(1, len(sorted_guards)): first_right = temp_guard.max_bn second_left = sorted_guards[i].min_bn if float(first_right.value) == float(second_left.value): if (first_right.bracket == 1 and second_left.bracket == 2) or (first_right.bracket == 3 and second_left.bracket == 4): left = temp_guard.guard.split(',')[0] right = sorted_guards[i].guard.split(',')[1] guard = Guard(left + ',' + right) temp_guard = guard elif first_right.bracket == 1 and second_left.bracket == 4: result.append(temp_guard) temp_guard = sorted_guards[i] else: result.append(temp_guard) temp_guard = sorted_guards[i] result.append(temp_guard) return result # Sort guards def sort_guards(guards): for i in range(len(guards) - 1): for j in range(len(guards) - i - 1): if guards[j].max_bn > guards[j + 1].max_bn: guards[j], guards[j + 1] = guards[j + 1], guards[j] return guards
from enum import IntEnum class Bracket(IntEnum): RO = 1 LC = 2 RC = 3 LO = 4 class BracketNum: def __init__(self, value, bracket): self.value = value self.bracket = bracket def __eq__(self, bn): if self.value == '+' and bn.value == '+': return True elif self.value == '+' and bn.value != '+': return False elif self.value != '+' and bn.value == '+': return False elif float(self.value) == float(bn.value) and self.bracket == bn.bracket: return True else: return False def complement(self): if self.value == '+': return BracketNum('+', Bracket.RO) # ceil if float(self.value) == 0 and self.bracket == Bracket.LC: return BracketNum('0', Bracket.LC) # floor tempValue = self.value tempBracket = None if self.bracket == Bracket.LC: tempBracket = Bracket.RO if self.bracket == Bracket.RC: tempBracket = Bracket.LO if self.bracket == Bracket.LO: tempBracket = Bracket.RC if self.bracket == Bracket.RO: tempBracket = Bracket.LC return BracketNum(tempValue, tempBracket) def __lt__(self, bn): if self.value == '+': return False elif bn.value == '+': return True elif float(self.value) > float(bn.value): return False elif float(self.value) < float(bn.value): return True else: if self.bracket < bn.bracket: return True else: return False def __gt__(self, bn): if self.value == '+': if bn.value == '+': return False else: return True if bn.value == '+': return False if float(self.value) > float(bn.value): return True elif float(self.value) < float(bn.value): return False else: if self.bracket > bn.bracket: return True else: return False def __ge__(self, bn): return not self.__lt__(bn) def __le__(self, bn): return not self.__gt__(bn) def getBN(self): if self.bracket == Bracket.LC: return '[' + self.value if self.bracket == Bracket.LO: return '(' + self.value if self.bracket == Bracket.RC: return self.value + ']' if self.bracket == Bracket.RO: return self.value + ')' class Guard: def __init__(self, guard): self.guard = guard self.__build() def __build(self): min_type, max_type = self.guard.split(',') # 处理左边 if min_type[0] == '[': self.closed_min = True min_bn_bracket = Bracket.LC else: self.closed_min = False min_bn_bracket = Bracket.LO self.min_value = min_type[1:].strip() self.min_bn = BracketNum(self.min_value, min_bn_bracket) # 处理右边 if max_type[-1] == ']': self.closed_max = True max_bn_bracket = Bracket.RC else: self.closed_max = False max_bn_bracket = Bracket.RO self.max_value = max_type[:-1].strip() self.max_bn = BracketNum(self.max_value, max_bn_bracket) def __eq__(self, guard): if self.min_bn == guard.min_bn and self.max_bn == guard.max_bn: return True else: return False def get_min(self): return float(self.min_value) def get_closed_min(self): return self.closed_min def get_max(self): if self.max_value == '+': return float("inf") else: return float(self.max_value) def get_closed_max(self): return self.closed_max def __hash__(self): return hash(("CONSTRAINT", self.get_min(), self.closed_min, self.get_max(), self.closed_max)) def is_point(self): if self.min_value == '+' or self.max_value == '+': return False if self.get_min() == self.get_max() and self.closed_min and self.closed_max: return True else: return False def is_subset(self, c2): min_bn1 = self.min_bn max_bn1 = self.max_bn min_bn2 = c2.min_bn max_bn2 = c2.max_bn if min_bn1 >= min_bn2 and max_bn1 <= max_bn2: return True else: return False def is_in_interval(self, num): if num < self.get_min(): return False elif num == self.get_min(): if self.closed_min: return True else: return False elif self.get_min() < num < self.get_max(): return True elif num == self.get_max(): if self.closed_max: return True else: return False else: return False def is_empty(self): if self.max_bn < self.min_bn: return True else: return False def show(self): return self.guard # Merge guards def simple_guards(guards): if len(guards) == 1 or len(guards) == 0: return guards else: sorted_guards = sort_guards(guards) result = [] temp_guard = sorted_guards[0] for i in range(1, len(sorted_guards)): first_right = temp_guard.max_bn second_left = sorted_guards[i].min_bn if float(first_right.value) == float(second_left.value): if (first_right.bracket == 1 and second_left.bracket == 2) or (first_right.bracket == 3 and second_left.bracket == 4): left = temp_guard.guard.split(',')[0] right = sorted_guards[i].guard.split(',')[1] guard = Guard(left + ',' + right) temp_guard = guard elif first_right.bracket == 1 and second_left.bracket == 4: result.append(temp_guard) temp_guard = sorted_guards[i] else: result.append(temp_guard) temp_guard = sorted_guards[i] result.append(temp_guard) return result # Sort guards def sort_guards(guards): for i in range(len(guards) - 1): for j in range(len(guards) - i - 1): if guards[j].max_bn > guards[j + 1].max_bn: guards[j], guards[j + 1] = guards[j + 1], guards[j] return guards
en
0.380238
# ceil # floor # 处理左边 # 处理右边 # Merge guards # Sort guards
3.534348
4
atcoder/abc122/b/solution.py
yhmin84/codeforces
0
6628674
<gh_stars>0 def atcoder(string): acgt = set('ACGT') on_count = False count = 0 max_count = 0 for c in string: if c in acgt: if not on_count: on_count = True count += 1 else: if on_count: on_count = False max_count = max(max_count, count) count = 0 if on_count: max_count = max(max_count, count) return max_count if __name__ == "__main__": string = input() print(atcoder(string))
def atcoder(string): acgt = set('ACGT') on_count = False count = 0 max_count = 0 for c in string: if c in acgt: if not on_count: on_count = True count += 1 else: if on_count: on_count = False max_count = max(max_count, count) count = 0 if on_count: max_count = max(max_count, count) return max_count if __name__ == "__main__": string = input() print(atcoder(string))
none
1
3.399755
3
lte/gateway/c/oai/test/test_e2e_mme_startup.py
saurabhsoni88/magma
2
6628675
<reponame>saurabhsoni88/magma<gh_stars>1-10 #!/usr/bin/env python # # Licensed to the OpenAirInterface (OAI) Software Alliance under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The OpenAirInterface Software Alliance licenses this file to You under # the terms found in the LICENSE file in the root of this source tree. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------- # For more information about the OpenAirInterface (OAI) Software Alliance: # <EMAIL> # from mme_app_driver import MMEAppDriver def test_non_blocking_mme_init(): """Test that MME startup does not block during init phase. Tests very specifically for s6a init running without blocking MME startup. """ log_conditions = ( # In regex form r'Initializing S6a interface', # S6A init is running r'S6a peer connection attempt \d+ / \d+', # S6A attempting to connect r'MME app initialization complete', # MME proceeded past init steps ) MMEAppDriver().run(log_conditions=log_conditions) def main(): """Main method for testing.""" test_non_blocking_mme_init() if __name__ == '__main__': main()
#!/usr/bin/env python # # Licensed to the OpenAirInterface (OAI) Software Alliance under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The OpenAirInterface Software Alliance licenses this file to You under # the terms found in the LICENSE file in the root of this source tree. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------- # For more information about the OpenAirInterface (OAI) Software Alliance: # <EMAIL> # from mme_app_driver import MMEAppDriver def test_non_blocking_mme_init(): """Test that MME startup does not block during init phase. Tests very specifically for s6a init running without blocking MME startup. """ log_conditions = ( # In regex form r'Initializing S6a interface', # S6A init is running r'S6a peer connection attempt \d+ / \d+', # S6A attempting to connect r'MME app initialization complete', # MME proceeded past init steps ) MMEAppDriver().run(log_conditions=log_conditions) def main(): """Main method for testing.""" test_non_blocking_mme_init() if __name__ == '__main__': main()
en
0.826431
#!/usr/bin/env python # # Licensed to the OpenAirInterface (OAI) Software Alliance under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The OpenAirInterface Software Alliance licenses this file to You under # the terms found in the LICENSE file in the root of this source tree. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #------------------------------------------------------------------------------- # For more information about the OpenAirInterface (OAI) Software Alliance: # <EMAIL> # Test that MME startup does not block during init phase. Tests very specifically for s6a init running without blocking MME startup. # In regex form # S6A init is running # S6A attempting to connect # MME proceeded past init steps Main method for testing.
1.833788
2
main/views.py
jmhubbard/Good_Life_Meal_Prep_Subscribers_Page
0
6628676
<gh_stars>0 from django.shortcuts import render from django.views.generic.base import TemplateView from django.utils.decorators import method_decorator from .decorators import unauthenticated_user from .forms import CustomAuthenticationForm, ContactForm from django.contrib.auth.views import (LoginView, LogoutView) from django.contrib.messages.views import SuccessMessageMixin from django.views.generic.edit import FormView from django.urls import reverse_lazy from django.contrib.auth.mixins import LoginRequiredMixin from users.models import User class HomePageView(TemplateView): """ The homepage view that just include a signup button and some explanitory text. """ template_name = "main/home.html" @method_decorator(unauthenticated_user) #If user is already authenticated they will be redirected to their subscription page def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) class UserLoginView(LoginView): """ The login view for unauthenticated users. If a user is already authenticated, they will be redirected to the menu page. """ form_class = CustomAuthenticationForm template_name = "registration/login.html" @method_decorator(unauthenticated_user) #If user is already authenticated they will be redirected to their subscription page def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) class UserLogoutView(LogoutView): """ A generic logout view that redirects users to the homepage after logging them out. """ next_page = 'home' class ContactFormView(SuccessMessageMixin, LoginRequiredMixin, FormView): template_name = "main/contact_form.html" form_class = ContactForm success_url = reverse_lazy('contact_form') success_message = "Thank you for contacting us. We value your feedback. Somebody will reply within 24 hours." def form_valid(self, form): current_user = User.objects.get(email=self.request.user) form.send_message(current_user) return super().form_valid(form)
from django.shortcuts import render from django.views.generic.base import TemplateView from django.utils.decorators import method_decorator from .decorators import unauthenticated_user from .forms import CustomAuthenticationForm, ContactForm from django.contrib.auth.views import (LoginView, LogoutView) from django.contrib.messages.views import SuccessMessageMixin from django.views.generic.edit import FormView from django.urls import reverse_lazy from django.contrib.auth.mixins import LoginRequiredMixin from users.models import User class HomePageView(TemplateView): """ The homepage view that just include a signup button and some explanitory text. """ template_name = "main/home.html" @method_decorator(unauthenticated_user) #If user is already authenticated they will be redirected to their subscription page def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) class UserLoginView(LoginView): """ The login view for unauthenticated users. If a user is already authenticated, they will be redirected to the menu page. """ form_class = CustomAuthenticationForm template_name = "registration/login.html" @method_decorator(unauthenticated_user) #If user is already authenticated they will be redirected to their subscription page def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) class UserLogoutView(LogoutView): """ A generic logout view that redirects users to the homepage after logging them out. """ next_page = 'home' class ContactFormView(SuccessMessageMixin, LoginRequiredMixin, FormView): template_name = "main/contact_form.html" form_class = ContactForm success_url = reverse_lazy('contact_form') success_message = "Thank you for contacting us. We value your feedback. Somebody will reply within 24 hours." def form_valid(self, form): current_user = User.objects.get(email=self.request.user) form.send_message(current_user) return super().form_valid(form)
en
0.942437
The homepage view that just include a signup button and some explanitory text. #If user is already authenticated they will be redirected to their subscription page The login view for unauthenticated users. If a user is already authenticated, they will be redirected to the menu page. #If user is already authenticated they will be redirected to their subscription page A generic logout view that redirects users to the homepage after logging them out.
2.310461
2
built-in/TensorFlow/Official/cv/image_classification/AM3_ID1260_for_TensorFlow/datasets/create_dataset_miniImagenet.py
Ascend/modelzoo
12
6628677
<gh_stars>10-100 # # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Creates the mini-ImageNet dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from npu_bridge.npu_init import * import argparse import csv import os import sys import zipfile import numpy as np import scipy.misc import io # Make train, validation and test splits deterministic from one run to another np.random.seed(2017 + 5 + 17) def get_class_label_dict(class_label_addr): lines = [x.strip() for x in open(class_label_addr, 'r').readlines()] cld={} for l in lines: tl=l.split(' ') if tl[0] not in cld.keys(): cld[tl[0]]=tl[2].lower() return cld # def load_embedding_dict(emb_addr): # lines = [x.strip() for x in open(emb_addr, 'r', encoding="utf-8").readlines()] # emb_dict = {} # for l in lines: # w = l.split(' ') # print(l) # if w[0] not in emb_dict.keys(): # tmpv = [float(w[i]) for i in range(1, len(w))] # emb_dict[w[0]] = tmpv # return emb_dict def load_embedding_dict(emb_addr): fin = io.open(emb_addr, 'r', encoding='utf-8', newline='\n', errors='ignore') n, d = map(int, fin.readline().split()) data = {} for line in fin: #print(line) tokens = line.rstrip().split(' ') tmpv = [float(tokens[i]) for i in range(1, len(tokens))] data[tokens[0]] = tmpv return data def get_embeddings_for_labels(all_classes, cld, emb_dict): label_list = [] emb_list = [] no_emb = 0 print(all_classes) print(len(all_classes)) for c in all_classes: label_list.append(cld[c]) print(label_list) print(len(label_list)) for v in label_list: # check the embeddings of labels #print(v) labels = v.split('_') tmpv = np.zeros(300) tmpl = [] c = 0 for l in labels: if l in emb_dict.keys(): tmpv += emb_dict[l] tmpl.append(l) c += 1 if len(labels) != 1: if c != len(labels): print(v, c, tmpl) if c != 0: emb_list.append(tmpv / c) else: emb_list.append(tmpv) no_emb += 1 print("no embedding for " + v) print(no_emb) return emb_list def main(data_dir, output_dir, emb_addr, class_label_addr): print("loading the embedding dictionary....") cld = get_class_label_dict(class_label_addr) emb_dict = load_embedding_dict(emb_addr) for split in ('val', 'test', 'train'): # List of selected image files for the current split file_paths = [] with open('{}.csv'.format(split), 'r') as csv_file: # Read the CSV file for that split, and get all classes present in # that split. reader = csv.DictReader(csv_file, delimiter=',') file_paths, labels = zip( *((os.path.join('images', row['filename']), row['label']) for row in reader)) all_labels = sorted(list(set(labels))) print("getting word embeddings....") emb_list = get_embeddings_for_labels(all_labels,cld, emb_dict) print("saving word embeddings...") np.savez( os.path.join(output_dir, 'few-shot-wordemb-{}.npz'.format(split)), features=np.asarray(emb_list)) archive = zipfile.ZipFile(os.path.join(data_dir, 'images.zip'), 'r') # Processing loop over examples features, targets = [], [] for i, (file_path, label) in enumerate(zip(file_paths, labels)): # Write progress to stdout sys.stdout.write( '\r>> Processing {} image {}/{}'.format( split, i + 1, len(file_paths))) sys.stdout.flush() # Load image in RGB mode to ensure image.ndim == 3 file_path = archive.open(file_path) image = scipy.misc.imread(file_path, mode='RGB') # Infer class from filename. label = all_labels.index(label) # Central square crop of size equal to the image's smallest side. height, width, channels = image.shape crop_size = min(height, width) start_height = (height // 2) - (crop_size // 2) start_width = (width // 2) - (crop_size // 2) image = image[ start_height: start_height + crop_size, start_width: start_width + crop_size, :] # Resize image to 84 x 84. image = scipy.misc.imresize(image, (84, 84), interp='bilinear') features.append(image) targets.append(label) sys.stdout.write('\n') sys.stdout.flush() # Save dataset to disk features = np.stack(features, axis=0) targets = np.stack(targets, axis=0) permutation = np.random.permutation(len(features)) features = features[permutation] targets = targets[permutation] np.savez( os.path.join(output_dir, 'few-shot-{}.npz'.format(split)), features=features, targets=targets) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--data-dir', type=str, default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'mini-imagenet', 'raw-data'), help='Path to the raw data') parser.add_argument( '--output-dir', type=str, default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'mini-imagenet'), help='Output directory') parser.add_argument( '--emb_addr', type=str, default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'mini-imagenet', 'raw-data'), help='Path to the raw data') parser.add_argument( '--class_label_addr', type=str, default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'mini-imagenet'), help='Output directory') args = parser.parse_args() main(args.data_dir, args.output_dir, args.emb_addr, args.class_label_addr)
# # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Creates the mini-ImageNet dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from npu_bridge.npu_init import * import argparse import csv import os import sys import zipfile import numpy as np import scipy.misc import io # Make train, validation and test splits deterministic from one run to another np.random.seed(2017 + 5 + 17) def get_class_label_dict(class_label_addr): lines = [x.strip() for x in open(class_label_addr, 'r').readlines()] cld={} for l in lines: tl=l.split(' ') if tl[0] not in cld.keys(): cld[tl[0]]=tl[2].lower() return cld # def load_embedding_dict(emb_addr): # lines = [x.strip() for x in open(emb_addr, 'r', encoding="utf-8").readlines()] # emb_dict = {} # for l in lines: # w = l.split(' ') # print(l) # if w[0] not in emb_dict.keys(): # tmpv = [float(w[i]) for i in range(1, len(w))] # emb_dict[w[0]] = tmpv # return emb_dict def load_embedding_dict(emb_addr): fin = io.open(emb_addr, 'r', encoding='utf-8', newline='\n', errors='ignore') n, d = map(int, fin.readline().split()) data = {} for line in fin: #print(line) tokens = line.rstrip().split(' ') tmpv = [float(tokens[i]) for i in range(1, len(tokens))] data[tokens[0]] = tmpv return data def get_embeddings_for_labels(all_classes, cld, emb_dict): label_list = [] emb_list = [] no_emb = 0 print(all_classes) print(len(all_classes)) for c in all_classes: label_list.append(cld[c]) print(label_list) print(len(label_list)) for v in label_list: # check the embeddings of labels #print(v) labels = v.split('_') tmpv = np.zeros(300) tmpl = [] c = 0 for l in labels: if l in emb_dict.keys(): tmpv += emb_dict[l] tmpl.append(l) c += 1 if len(labels) != 1: if c != len(labels): print(v, c, tmpl) if c != 0: emb_list.append(tmpv / c) else: emb_list.append(tmpv) no_emb += 1 print("no embedding for " + v) print(no_emb) return emb_list def main(data_dir, output_dir, emb_addr, class_label_addr): print("loading the embedding dictionary....") cld = get_class_label_dict(class_label_addr) emb_dict = load_embedding_dict(emb_addr) for split in ('val', 'test', 'train'): # List of selected image files for the current split file_paths = [] with open('{}.csv'.format(split), 'r') as csv_file: # Read the CSV file for that split, and get all classes present in # that split. reader = csv.DictReader(csv_file, delimiter=',') file_paths, labels = zip( *((os.path.join('images', row['filename']), row['label']) for row in reader)) all_labels = sorted(list(set(labels))) print("getting word embeddings....") emb_list = get_embeddings_for_labels(all_labels,cld, emb_dict) print("saving word embeddings...") np.savez( os.path.join(output_dir, 'few-shot-wordemb-{}.npz'.format(split)), features=np.asarray(emb_list)) archive = zipfile.ZipFile(os.path.join(data_dir, 'images.zip'), 'r') # Processing loop over examples features, targets = [], [] for i, (file_path, label) in enumerate(zip(file_paths, labels)): # Write progress to stdout sys.stdout.write( '\r>> Processing {} image {}/{}'.format( split, i + 1, len(file_paths))) sys.stdout.flush() # Load image in RGB mode to ensure image.ndim == 3 file_path = archive.open(file_path) image = scipy.misc.imread(file_path, mode='RGB') # Infer class from filename. label = all_labels.index(label) # Central square crop of size equal to the image's smallest side. height, width, channels = image.shape crop_size = min(height, width) start_height = (height // 2) - (crop_size // 2) start_width = (width // 2) - (crop_size // 2) image = image[ start_height: start_height + crop_size, start_width: start_width + crop_size, :] # Resize image to 84 x 84. image = scipy.misc.imresize(image, (84, 84), interp='bilinear') features.append(image) targets.append(label) sys.stdout.write('\n') sys.stdout.flush() # Save dataset to disk features = np.stack(features, axis=0) targets = np.stack(targets, axis=0) permutation = np.random.permutation(len(features)) features = features[permutation] targets = targets[permutation] np.savez( os.path.join(output_dir, 'few-shot-{}.npz'.format(split)), features=features, targets=targets) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--data-dir', type=str, default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'mini-imagenet', 'raw-data'), help='Path to the raw data') parser.add_argument( '--output-dir', type=str, default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'mini-imagenet'), help='Output directory') parser.add_argument( '--emb_addr', type=str, default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'mini-imagenet', 'raw-data'), help='Path to the raw data') parser.add_argument( '--class_label_addr', type=str, default=os.path.join(os.sep, 'mnt', 'datasets', 'public', 'mini-imagenet'), help='Output directory') args = parser.parse_args() main(args.data_dir, args.output_dir, args.emb_addr, args.class_label_addr)
en
0.777852
# # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Creates the mini-ImageNet dataset. # Make train, validation and test splits deterministic from one run to another # def load_embedding_dict(emb_addr): # lines = [x.strip() for x in open(emb_addr, 'r', encoding="utf-8").readlines()] # emb_dict = {} # for l in lines: # w = l.split(' ') # print(l) # if w[0] not in emb_dict.keys(): # tmpv = [float(w[i]) for i in range(1, len(w))] # emb_dict[w[0]] = tmpv # return emb_dict #print(line) # check the embeddings of labels #print(v) # List of selected image files for the current split # Read the CSV file for that split, and get all classes present in # that split. # Processing loop over examples # Write progress to stdout # Load image in RGB mode to ensure image.ndim == 3 # Infer class from filename. # Central square crop of size equal to the image's smallest side. # Resize image to 84 x 84. # Save dataset to disk
1.737309
2
moodlefuse/filesystem/path_parser.py
BroganD1993/MoodleFUSE
4
6628678
<reponame>BroganD1993/MoodleFUSE<filename>moodlefuse/filesystem/path_parser.py #!/usr/bin/env python # encoding: utf-8 """Class to parse a filesystem path """ from moodlefuse.core import config class PathParser(object): @staticmethod def is_in_root(location): return len(location) is 0 @staticmethod def is_in_course(location): return len(location) is 1 @staticmethod def is_in_course_categorie(location): return len(location) is 2 @staticmethod def is_file(location): return len(location) is 3 @staticmethod def is_assignment(location): return len(location) is 4 @staticmethod def is_assignment_submission(location): return len(location) is 5 @staticmethod def get_position_in_filesystem_as_array(path): path = path.replace(config['LOCAL_MOODLE_FOLDER'] + '/', '') if len(path) is 0: return [] path_sections = path.split("/") return path_sections
#!/usr/bin/env python # encoding: utf-8 """Class to parse a filesystem path """ from moodlefuse.core import config class PathParser(object): @staticmethod def is_in_root(location): return len(location) is 0 @staticmethod def is_in_course(location): return len(location) is 1 @staticmethod def is_in_course_categorie(location): return len(location) is 2 @staticmethod def is_file(location): return len(location) is 3 @staticmethod def is_assignment(location): return len(location) is 4 @staticmethod def is_assignment_submission(location): return len(location) is 5 @staticmethod def get_position_in_filesystem_as_array(path): path = path.replace(config['LOCAL_MOODLE_FOLDER'] + '/', '') if len(path) is 0: return [] path_sections = path.split("/") return path_sections
en
0.519465
#!/usr/bin/env python # encoding: utf-8 Class to parse a filesystem path
2.63272
3
karp/domain/models/morphological_entry.py
spraakbanken/karp-backend-v6-tmp
1
6628679
from typing import Any, List, Tuple from paradigmextract import morphparser, paradigm as pe_paradigm from karp.domain.models.entry import Entry, EntryOp, EntryStatus from karp.utility import unique_id class MorphologicalEntry(Entry): def __init__( self, *args, form_msds: List[Tuple[str, Any]], var_insts: List[List[Tuple[str, Any]]], pos: str, **kwargs, ): super().__init__(*args, **kwargs) self.paradigm = pe_paradigm.Paradigm( form_msds=form_msds, var_insts=var_insts, p_id=self.entry_id, pos=pos, uuid=self.id, ) self.tags = ("inf aktiv", "inf s-form") if pos in ["vb", "vbm"] else () def get_inflection_table(self, wordform: str) -> List[Tuple[str, str]]: # for now, assume wordform is the baseform variables = morphparser.eval_baseform( self.paradigm, wordform, self.tags, ) print(f"variables = {variables}") if variables is None: print("early exit") return [] res = [] table = self.paradigm(*variables) for form, msd in table: res.append((msd, form)) if not msd: # when can this happend? res.append((None, form)) return res def create_morphological_entry( entry_id: str, *, pos: str, form_msds: List[Tuple[str, Any]], var_insts: List[List[Tuple[str, Any]]], resource_id: str, ) -> MorphologicalEntry: return MorphologicalEntry( entity_id=unique_id.make_unique_id(), entry_id=entry_id, resource_id=resource_id, body={}, message="", op=EntryOp.ADDED, status=EntryStatus.IN_PROGRESS, version=1, form_msds=form_msds, var_insts=var_insts, pos=pos, )
from typing import Any, List, Tuple from paradigmextract import morphparser, paradigm as pe_paradigm from karp.domain.models.entry import Entry, EntryOp, EntryStatus from karp.utility import unique_id class MorphologicalEntry(Entry): def __init__( self, *args, form_msds: List[Tuple[str, Any]], var_insts: List[List[Tuple[str, Any]]], pos: str, **kwargs, ): super().__init__(*args, **kwargs) self.paradigm = pe_paradigm.Paradigm( form_msds=form_msds, var_insts=var_insts, p_id=self.entry_id, pos=pos, uuid=self.id, ) self.tags = ("inf aktiv", "inf s-form") if pos in ["vb", "vbm"] else () def get_inflection_table(self, wordform: str) -> List[Tuple[str, str]]: # for now, assume wordform is the baseform variables = morphparser.eval_baseform( self.paradigm, wordform, self.tags, ) print(f"variables = {variables}") if variables is None: print("early exit") return [] res = [] table = self.paradigm(*variables) for form, msd in table: res.append((msd, form)) if not msd: # when can this happend? res.append((None, form)) return res def create_morphological_entry( entry_id: str, *, pos: str, form_msds: List[Tuple[str, Any]], var_insts: List[List[Tuple[str, Any]]], resource_id: str, ) -> MorphologicalEntry: return MorphologicalEntry( entity_id=unique_id.make_unique_id(), entry_id=entry_id, resource_id=resource_id, body={}, message="", op=EntryOp.ADDED, status=EntryStatus.IN_PROGRESS, version=1, form_msds=form_msds, var_insts=var_insts, pos=pos, )
en
0.944595
# for now, assume wordform is the baseform # when can this happend?
2.181572
2
flight_feed_operations/flight_stream_helper.py
openskies-sh/flight-assembly
0
6628680
<reponame>openskies-sh/flight-assembly<gh_stars>0 from datetime import datetime, timedelta import os import json, redis import logging import requests from auth_helper.common import get_walrus_database from itertools import zip_longest from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv()) from os import environ as env # iterate a list in batches of size n def batcher(iterable, n): args = [iter(iterable)] * n return zip_longest(*args) class StreamHelperOps(): def __init__(self): self.db = get_walrus_database() self.stream_keys = ['all_observations'] def create_push_cg(self): self.get_push_cg(create=True) def get_push_cg(self,create=False): cg = self.db.time_series('cg-push', self.stream_keys) if create: for stream in self.stream_keys: self.db.xadd(stream, {'data': ''}) cg.create() cg.set_id('$') return cg def create_pull_cg(self): self.get_pull_cg(create=True) def get_pull_cg(self,create=False): cg = self.db.time_series('cg-pull', self.stream_keys) if create: for stream in self.stream_keys: self.db.xadd(stream, {'data': ''}) cg.create() cg.set_id('$') return cg class ObservationReadOperations(): def get_observations(self, push_cg): messages = push_cg.read() pending_messages = [] my_credentials = PassportCredentialsGetter() credentials = my_credentials.get_cached_credentials() if 'error' in credentials: logging.error('Error in getting credentials %s' % credentials) else: for message in messages: pending_messages.append({'timestamp': message.timestamp,'seq': message.sequence, 'msg_data':message.data, 'address':message.data['icao_address'], 'metadata':message['metadata']}) return pending_messages class PassportCredentialsGetter(): def __init__(self): pass def get_cached_credentials(self): r = redis.Redis(host=os.getenv('REDIS_HOST',"redis"), port =os.getenv('REDIS_PORT',6379)) now = datetime.now() cache_key = 'airtraffic_access_token_details' token_details = r.get(cache_key) if token_details: token_details = json.loads(token_details) created_at = token_details['created_at'] set_date = datetime.strptime(created_at,"%Y-%m-%dT%H:%M:%S.%f") if now < (set_date - timedelta(minutes=58)): credentials = self.get_write_credentials() r.set(cache_key, json.dumps({'credentials': credentials, 'created_at':now.isoformat()})) else: credentials = token_details['credentials'] else: credentials = self.get_write_credentials() r.set(cache_key, json.dumps({'credentials': credentials, 'created_at':now.isoformat()})) r.expire(cache_key, timedelta(minutes=58)) return credentials def get_write_credentials(self): payload = {"grant_type":"client_credentials","client_id": os.getenv('SPOTLIGHT_WRITE_CLIENT_ID'),"client_secret": os.getenv('SPOTLIGHT_WRITE_CLIENT_SECRET'),"audience": os.getenv('SPOTLIGHT_AUDIENCE'),"scope": os.getenv('SPOTLIGHT_AIR_TRAFFIC_SCOPE')} url = os.getenv('PASSPORT_URL') + os.getenv('PASSPORT_TOKEN_URL') token_data = requests.post(url, data = payload) t_data = token_data.json() return t_data
from datetime import datetime, timedelta import os import json, redis import logging import requests from auth_helper.common import get_walrus_database from itertools import zip_longest from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv()) from os import environ as env # iterate a list in batches of size n def batcher(iterable, n): args = [iter(iterable)] * n return zip_longest(*args) class StreamHelperOps(): def __init__(self): self.db = get_walrus_database() self.stream_keys = ['all_observations'] def create_push_cg(self): self.get_push_cg(create=True) def get_push_cg(self,create=False): cg = self.db.time_series('cg-push', self.stream_keys) if create: for stream in self.stream_keys: self.db.xadd(stream, {'data': ''}) cg.create() cg.set_id('$') return cg def create_pull_cg(self): self.get_pull_cg(create=True) def get_pull_cg(self,create=False): cg = self.db.time_series('cg-pull', self.stream_keys) if create: for stream in self.stream_keys: self.db.xadd(stream, {'data': ''}) cg.create() cg.set_id('$') return cg class ObservationReadOperations(): def get_observations(self, push_cg): messages = push_cg.read() pending_messages = [] my_credentials = PassportCredentialsGetter() credentials = my_credentials.get_cached_credentials() if 'error' in credentials: logging.error('Error in getting credentials %s' % credentials) else: for message in messages: pending_messages.append({'timestamp': message.timestamp,'seq': message.sequence, 'msg_data':message.data, 'address':message.data['icao_address'], 'metadata':message['metadata']}) return pending_messages class PassportCredentialsGetter(): def __init__(self): pass def get_cached_credentials(self): r = redis.Redis(host=os.getenv('REDIS_HOST',"redis"), port =os.getenv('REDIS_PORT',6379)) now = datetime.now() cache_key = 'airtraffic_access_token_details' token_details = r.get(cache_key) if token_details: token_details = json.loads(token_details) created_at = token_details['created_at'] set_date = datetime.strptime(created_at,"%Y-%m-%dT%H:%M:%S.%f") if now < (set_date - timedelta(minutes=58)): credentials = self.get_write_credentials() r.set(cache_key, json.dumps({'credentials': credentials, 'created_at':now.isoformat()})) else: credentials = token_details['credentials'] else: credentials = self.get_write_credentials() r.set(cache_key, json.dumps({'credentials': credentials, 'created_at':now.isoformat()})) r.expire(cache_key, timedelta(minutes=58)) return credentials def get_write_credentials(self): payload = {"grant_type":"client_credentials","client_id": os.getenv('SPOTLIGHT_WRITE_CLIENT_ID'),"client_secret": os.getenv('SPOTLIGHT_WRITE_CLIENT_SECRET'),"audience": os.getenv('SPOTLIGHT_AUDIENCE'),"scope": os.getenv('SPOTLIGHT_AIR_TRAFFIC_SCOPE')} url = os.getenv('PASSPORT_URL') + os.getenv('PASSPORT_TOKEN_URL') token_data = requests.post(url, data = payload) t_data = token_data.json() return t_data
en
0.714643
# iterate a list in batches of size n
2.145798
2
src/resource_manager.py
expman/fastapi-test
0
6628681
class ResourceManager: def __init__(self): pass def install(self, name, data): setattr(self, name, data) def get(self, name): return getattr(self, name)
class ResourceManager: def __init__(self): pass def install(self, name, data): setattr(self, name, data) def get(self, name): return getattr(self, name)
none
1
2.245263
2
problems/problem57.py
Julien-Verdun/Project-Euler
0
6628682
<filename>problems/problem57.py # Problem 57 : Square root convergents def frac(n): """ This function calculates, for a given integer n, the fraction 1/2(n-1) """ if n > 1: return 1/(2+frac(n-1)) else: return 1/2 def square_root_approx(n): """ This function returns, for a given integer n, the result of the operation 1+n**0.5 """ return 1 + frac(n) def square_root_convergents(n): """ This function calculates every number of the sequence defined by frac function, and count the number of results that have a denominator with more digit than the numerator. """ i = 0 nb_frac = 0 num = 1 den = 2 while i < n: if i % 10 == 0: print(i, " : ", den + num, " / ", den) if len(str(den + num)) > len(str(den)): nb_frac += 1 i += 1 num, den = den, 2*den+num return nb_frac print(square_root_convergents(1000)) # Result 153
<filename>problems/problem57.py # Problem 57 : Square root convergents def frac(n): """ This function calculates, for a given integer n, the fraction 1/2(n-1) """ if n > 1: return 1/(2+frac(n-1)) else: return 1/2 def square_root_approx(n): """ This function returns, for a given integer n, the result of the operation 1+n**0.5 """ return 1 + frac(n) def square_root_convergents(n): """ This function calculates every number of the sequence defined by frac function, and count the number of results that have a denominator with more digit than the numerator. """ i = 0 nb_frac = 0 num = 1 den = 2 while i < n: if i % 10 == 0: print(i, " : ", den + num, " / ", den) if len(str(den + num)) > len(str(den)): nb_frac += 1 i += 1 num, den = den, 2*den+num return nb_frac print(square_root_convergents(1000)) # Result 153
en
0.737514
# Problem 57 : Square root convergents This function calculates, for a given integer n, the fraction 1/2(n-1) This function returns, for a given integer n, the result of the operation 1+n**0.5 This function calculates every number of the sequence defined by frac function, and count the number of results that have a denominator with more digit than the numerator. # Result 153
4.214741
4
src/gestureDetect.py
CLiu13/CascadeOS
10
6628683
""" Copyright 2017 <NAME> and <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from tools.captureProcessFrame import * from tools.determineDataTrends import * from tools.processWhitePoints import * from tools.recordGesture import * from tools.filterData import * from tools.recordData import * from configparser import ConfigParser from picamera.array import PiRGBArray from picamera import PiCamera import time import cv2 config = ConfigParser() config.read("config.ini") # Image resolution of captured frames IMG_SIZE = int(config.get("image", "imgResolution")) # Size of the surrounding region utilized # when appying a Gaussian blur on frames BLUR_REGION = int(config.get("image", "blurRegion")) # Cutoff for gray intensity value of pixels when thresholding frames PIXEL_INTENSITY_THRESHOLD = int(config.get("image", "intensityThreshold")) # Number of elements to analyze when calculating # trends in x-axis and y-axis movement DATA_WINDOW_SIZE = int(config.get("data", "windowSize")) # Cutoff values for data points when being filtered LOWER_OUTLIER_CUTOFF = int(config.get("data", "lowerCutoff")) UPPER_OUTLIER_CUTOFF = int(config.get("data", "upperCutoff")) # Cutoff values for calculated trends to compare with when detecting gestures X_DATA_THRESHOLD = float(config.get("data", "xThreshold")) Y_DATA_THRESHOLD = int(0.25 * IMG_SIZE) # Zoom scale factor value to pass through the pipe for zoomDisplay.py ZOOM_FACTOR = float(config.get("zoom", "scaleFactor")) # Value at which the gesture detection will # terminate and record all data in files FRAME_COUNT_LIMIT = int(config.get("misc", "frameLimit")) # Boolean value to determine whether or not to show debug statements DEBUG = config.getboolean("misc", "debug") # Initialize data lists xData = [] xDataSample = [] xDataFiltered = [] yData = [] yDataSample = [] yDataFiltered = [] # Define camera settings and specify variable to store frame camera = PiCamera() camera.resolution = (IMG_SIZE, IMG_SIZE) rgbFrame = PiRGBArray(camera, size=camera.resolution) time.sleep(0.1) frameCount = 0 frame1 = captureProcessFrame(camera, rgbFrame, BLUR_REGION) while frameCount <= FRAME_COUNT_LIMIT: # Increment the frame count each iteration frameCount += 1 frame2 = captureProcessFrame(camera, rgbFrame, BLUR_REGION) # Create an image based on the differences between # the two frames and then enhance the result diffImg = cv2.absdiff(frame1, frame2) threshImg = cv2.threshold(diffImg, PIXEL_INTENSITY_THRESHOLD, 255, cv2.THRESH_BINARY)[1] # Assign frame 1 to frame 2 for the next iteration of comparison frame1 = frame2 whitePixelsData = processWhitePoints(threshImg) xData.append(whitePixelsData[0]) yData.append(whitePixelsData[1]) # Analyze for trends when a full window of data points has been gathered if len(xData) % DATA_WINDOW_SIZE == 0: filteredDataWindows = filterData(DATA_WINDOW_SIZE, xData, yData, LOWER_OUTLIER_CUTOFF, UPPER_OUTLIER_CUTOFF) # If no data points survived the filtering, # continue to the next iteration if filteredDataWindows is None: continue xWindowFiltered = filteredDataWindows[0] yWindowFiltered = filteredDataWindows[1] # Save all filtered data so they can be logged later xDataFiltered += xWindowFiltered yDataFiltered += yWindowFiltered gestureDetected = determineDataTrends(xWindowFiltered, yWindowFiltered, X_DATA_THRESHOLD, Y_DATA_THRESHOLD) if gestureDetected is not None: recordGesture(gestureDetected, ZOOM_FACTOR) if DEBUG: print("[INFO] Gesture detected: " + gestureDetected) recordData(xData, xDataFiltered, yData, yDataFiltered) if DEBUG: print("[INFO] Data recorded!")
""" Copyright 2017 <NAME> and <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from tools.captureProcessFrame import * from tools.determineDataTrends import * from tools.processWhitePoints import * from tools.recordGesture import * from tools.filterData import * from tools.recordData import * from configparser import ConfigParser from picamera.array import PiRGBArray from picamera import PiCamera import time import cv2 config = ConfigParser() config.read("config.ini") # Image resolution of captured frames IMG_SIZE = int(config.get("image", "imgResolution")) # Size of the surrounding region utilized # when appying a Gaussian blur on frames BLUR_REGION = int(config.get("image", "blurRegion")) # Cutoff for gray intensity value of pixels when thresholding frames PIXEL_INTENSITY_THRESHOLD = int(config.get("image", "intensityThreshold")) # Number of elements to analyze when calculating # trends in x-axis and y-axis movement DATA_WINDOW_SIZE = int(config.get("data", "windowSize")) # Cutoff values for data points when being filtered LOWER_OUTLIER_CUTOFF = int(config.get("data", "lowerCutoff")) UPPER_OUTLIER_CUTOFF = int(config.get("data", "upperCutoff")) # Cutoff values for calculated trends to compare with when detecting gestures X_DATA_THRESHOLD = float(config.get("data", "xThreshold")) Y_DATA_THRESHOLD = int(0.25 * IMG_SIZE) # Zoom scale factor value to pass through the pipe for zoomDisplay.py ZOOM_FACTOR = float(config.get("zoom", "scaleFactor")) # Value at which the gesture detection will # terminate and record all data in files FRAME_COUNT_LIMIT = int(config.get("misc", "frameLimit")) # Boolean value to determine whether or not to show debug statements DEBUG = config.getboolean("misc", "debug") # Initialize data lists xData = [] xDataSample = [] xDataFiltered = [] yData = [] yDataSample = [] yDataFiltered = [] # Define camera settings and specify variable to store frame camera = PiCamera() camera.resolution = (IMG_SIZE, IMG_SIZE) rgbFrame = PiRGBArray(camera, size=camera.resolution) time.sleep(0.1) frameCount = 0 frame1 = captureProcessFrame(camera, rgbFrame, BLUR_REGION) while frameCount <= FRAME_COUNT_LIMIT: # Increment the frame count each iteration frameCount += 1 frame2 = captureProcessFrame(camera, rgbFrame, BLUR_REGION) # Create an image based on the differences between # the two frames and then enhance the result diffImg = cv2.absdiff(frame1, frame2) threshImg = cv2.threshold(diffImg, PIXEL_INTENSITY_THRESHOLD, 255, cv2.THRESH_BINARY)[1] # Assign frame 1 to frame 2 for the next iteration of comparison frame1 = frame2 whitePixelsData = processWhitePoints(threshImg) xData.append(whitePixelsData[0]) yData.append(whitePixelsData[1]) # Analyze for trends when a full window of data points has been gathered if len(xData) % DATA_WINDOW_SIZE == 0: filteredDataWindows = filterData(DATA_WINDOW_SIZE, xData, yData, LOWER_OUTLIER_CUTOFF, UPPER_OUTLIER_CUTOFF) # If no data points survived the filtering, # continue to the next iteration if filteredDataWindows is None: continue xWindowFiltered = filteredDataWindows[0] yWindowFiltered = filteredDataWindows[1] # Save all filtered data so they can be logged later xDataFiltered += xWindowFiltered yDataFiltered += yWindowFiltered gestureDetected = determineDataTrends(xWindowFiltered, yWindowFiltered, X_DATA_THRESHOLD, Y_DATA_THRESHOLD) if gestureDetected is not None: recordGesture(gestureDetected, ZOOM_FACTOR) if DEBUG: print("[INFO] Gesture detected: " + gestureDetected) recordData(xData, xDataFiltered, yData, yDataFiltered) if DEBUG: print("[INFO] Data recorded!")
en
0.816764
Copyright 2017 <NAME> and <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Image resolution of captured frames # Size of the surrounding region utilized # when appying a Gaussian blur on frames # Cutoff for gray intensity value of pixels when thresholding frames # Number of elements to analyze when calculating # trends in x-axis and y-axis movement # Cutoff values for data points when being filtered # Cutoff values for calculated trends to compare with when detecting gestures # Zoom scale factor value to pass through the pipe for zoomDisplay.py # Value at which the gesture detection will # terminate and record all data in files # Boolean value to determine whether or not to show debug statements # Initialize data lists # Define camera settings and specify variable to store frame # Increment the frame count each iteration # Create an image based on the differences between # the two frames and then enhance the result # Assign frame 1 to frame 2 for the next iteration of comparison # Analyze for trends when a full window of data points has been gathered # If no data points survived the filtering, # continue to the next iteration # Save all filtered data so they can be logged later
1.910763
2
expressivar/dec.py
NCBI-Hackathons/ExpressiVar
4
6628684
<filename>expressivar/dec.py from io import SEEK_SET, SEEK_CUR import builtins import contextlib import inspect import os from expressivar.exceptions import UnmodifiableAttributeError from expressivar.exceptions import UnmodifiableModeError import wrapt def is_file_like(f): return callable(getattr(f, 'read', None)) def file_or_path(strictmodes=False, strictparams=False, **argmap): """Checks whether named arguments to decorated functions are file-likeish File-likeish means either a string-like object representing a path to a file or a `file-like` object. If it is a file-like object, then pass it unmodified. Otherwise, the path will attempted to be opened and the resulting file-like object passed in its place. argmap is a mapping of function parameter names for the decorated fuction to the desired arguments to be passed to the opener for the file-likeish object. strictparams and strictmodes indicate open file attributes and how strictly they should be enforced. They cannot both be True. strictmodes will ensure the open file has the specified mode; if not, the decorated function will receive an open file object pointing to same underlying data, but with the specified mode. strictparams will present the decorated function with an open file object pointing to same underlying data, opened with the specified parameters. """ if strictmodes and strictparams: raise ValueError( 'Only one of strictmodes or strictparams can be specified.' ) OPEN_KWDS = inspect.getfullargspec(builtins.open).args @wrapt.decorator def inner(wrapped, instance, args, kw): w_args = inspect.getcallargs(wrapped, *args, **kw) managed = [] to_reopen = [] for _name in argmap: _val = w_args.get(_name, None) if _val is None: continue if not is_file_like(_val): # throw here?? managed.append((_name, _val)) else: # This is file-like. Test modes if strictness specified if strictmodes: try: desired_mode = argmap[_name]['mode'] except KeyError: raise ValueError('strictmodes requires a target mode.') try: actual_mode = _val.mode if desired_mode != actual_mode: to_reopen.append( (_name, _val, {'mode': desired_mode}) ) except AttributeError as e: raise UnmodifiableModeError(_val) from e elif strictparams: desired_params = argmap[_name].copy() try: for key in desired_params: if key not in OPEN_KWDS: raise TypeError( "'{}' is not a valid keyword argument" "".format(key) ) except (TypeError, AttributeError) as e: raise UnmodifiableAttributeError((_val, key)) from e # Always attempt to preserve mode if 'mode' not in desired_params: try: mode = _val.mode desired_params['mode'] = mode except AttributeError as e: pass to_reopen.append((_name, _val, desired_params)) with contextlib.ExitStack() as stack: for _key, _path in managed: _kwargs = argmap[_key] try: w_args[_key] = stack.enter_context(open(_path, **_kwargs)) except TypeError as e: raise AttributeError(*e.args) from e for _key, _file, _kwargs in to_reopen: # TODO(zeroslack): handle possible OSError due to seek, tell... try: w_args[_key] = stack.enter_context( reopen(_file, **_kwargs) ) except TypeError as e: raise UnmodifiableAttributeError((_val, *e.args)) from e return wrapped.__call__(**w_args) return inner @contextlib.contextmanager def reopen(fh, **kwargs): """Simply reopens a open file with a new paramaters.""" try: pos = fh.tell() fd = os.dup(fh.fileno()) with os.fdopen(fd, **kwargs) as file_: file_.seek(pos) yield file_ finally: pass @contextlib.contextmanager def rewind(fh): """Simply rewinds an open file.""" pos, direction = 0, SEEK_CUR try: pos, direction = fh.tell(), SEEK_SET fh.flush() fh.seek(0) yield fh finally: with contextlib.suppress(OSError): fh.seek(pos, direction)
<filename>expressivar/dec.py from io import SEEK_SET, SEEK_CUR import builtins import contextlib import inspect import os from expressivar.exceptions import UnmodifiableAttributeError from expressivar.exceptions import UnmodifiableModeError import wrapt def is_file_like(f): return callable(getattr(f, 'read', None)) def file_or_path(strictmodes=False, strictparams=False, **argmap): """Checks whether named arguments to decorated functions are file-likeish File-likeish means either a string-like object representing a path to a file or a `file-like` object. If it is a file-like object, then pass it unmodified. Otherwise, the path will attempted to be opened and the resulting file-like object passed in its place. argmap is a mapping of function parameter names for the decorated fuction to the desired arguments to be passed to the opener for the file-likeish object. strictparams and strictmodes indicate open file attributes and how strictly they should be enforced. They cannot both be True. strictmodes will ensure the open file has the specified mode; if not, the decorated function will receive an open file object pointing to same underlying data, but with the specified mode. strictparams will present the decorated function with an open file object pointing to same underlying data, opened with the specified parameters. """ if strictmodes and strictparams: raise ValueError( 'Only one of strictmodes or strictparams can be specified.' ) OPEN_KWDS = inspect.getfullargspec(builtins.open).args @wrapt.decorator def inner(wrapped, instance, args, kw): w_args = inspect.getcallargs(wrapped, *args, **kw) managed = [] to_reopen = [] for _name in argmap: _val = w_args.get(_name, None) if _val is None: continue if not is_file_like(_val): # throw here?? managed.append((_name, _val)) else: # This is file-like. Test modes if strictness specified if strictmodes: try: desired_mode = argmap[_name]['mode'] except KeyError: raise ValueError('strictmodes requires a target mode.') try: actual_mode = _val.mode if desired_mode != actual_mode: to_reopen.append( (_name, _val, {'mode': desired_mode}) ) except AttributeError as e: raise UnmodifiableModeError(_val) from e elif strictparams: desired_params = argmap[_name].copy() try: for key in desired_params: if key not in OPEN_KWDS: raise TypeError( "'{}' is not a valid keyword argument" "".format(key) ) except (TypeError, AttributeError) as e: raise UnmodifiableAttributeError((_val, key)) from e # Always attempt to preserve mode if 'mode' not in desired_params: try: mode = _val.mode desired_params['mode'] = mode except AttributeError as e: pass to_reopen.append((_name, _val, desired_params)) with contextlib.ExitStack() as stack: for _key, _path in managed: _kwargs = argmap[_key] try: w_args[_key] = stack.enter_context(open(_path, **_kwargs)) except TypeError as e: raise AttributeError(*e.args) from e for _key, _file, _kwargs in to_reopen: # TODO(zeroslack): handle possible OSError due to seek, tell... try: w_args[_key] = stack.enter_context( reopen(_file, **_kwargs) ) except TypeError as e: raise UnmodifiableAttributeError((_val, *e.args)) from e return wrapped.__call__(**w_args) return inner @contextlib.contextmanager def reopen(fh, **kwargs): """Simply reopens a open file with a new paramaters.""" try: pos = fh.tell() fd = os.dup(fh.fileno()) with os.fdopen(fd, **kwargs) as file_: file_.seek(pos) yield file_ finally: pass @contextlib.contextmanager def rewind(fh): """Simply rewinds an open file.""" pos, direction = 0, SEEK_CUR try: pos, direction = fh.tell(), SEEK_SET fh.flush() fh.seek(0) yield fh finally: with contextlib.suppress(OSError): fh.seek(pos, direction)
en
0.827047
Checks whether named arguments to decorated functions are file-likeish File-likeish means either a string-like object representing a path to a file or a `file-like` object. If it is a file-like object, then pass it unmodified. Otherwise, the path will attempted to be opened and the resulting file-like object passed in its place. argmap is a mapping of function parameter names for the decorated fuction to the desired arguments to be passed to the opener for the file-likeish object. strictparams and strictmodes indicate open file attributes and how strictly they should be enforced. They cannot both be True. strictmodes will ensure the open file has the specified mode; if not, the decorated function will receive an open file object pointing to same underlying data, but with the specified mode. strictparams will present the decorated function with an open file object pointing to same underlying data, opened with the specified parameters. # throw here?? # This is file-like. Test modes if strictness specified # Always attempt to preserve mode # TODO(zeroslack): handle possible OSError due to seek, tell... Simply reopens a open file with a new paramaters. Simply rewinds an open file.
2.637483
3
mgsradio/base.py
scivision/mgs-utils
0
6628685
<filename>mgsradio/base.py from pathlib import Path from .read import read_mgs_occultation def loop_mgs(P: Path) -> tuple: P = Path(P).expanduser() if P.is_dir(): flist = sorted(P.glob("*.sri")) elif P.is_file(): flist = [P] else: raise FileNotFoundError(f"{P} not found") data = [] for f in flist: data.append(read_mgs_occultation(f)) return data, flist
<filename>mgsradio/base.py from pathlib import Path from .read import read_mgs_occultation def loop_mgs(P: Path) -> tuple: P = Path(P).expanduser() if P.is_dir(): flist = sorted(P.glob("*.sri")) elif P.is_file(): flist = [P] else: raise FileNotFoundError(f"{P} not found") data = [] for f in flist: data.append(read_mgs_occultation(f)) return data, flist
none
1
2.52742
3
magenta/music/melodies_lib.py
sleep-yearning/magenta
1
6628686
# Copyright 2020 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for working with melodies. Use extract_melodies to extract monophonic melodies from a quantized NoteSequence proto. Use Melody.to_sequence to write a melody to a NoteSequence proto. Then use midi_io.sequence_proto_to_midi_file to write that NoteSequence to a midi file. """ from magenta.music import constants from magenta.music import events_lib from magenta.music import midi_io from magenta.music import sequences_lib from magenta.music.protobuf import music_pb2 import numpy as np from six.moves import range # pylint: disable=redefined-builtin MELODY_NOTE_OFF = constants.MELODY_NOTE_OFF MELODY_NO_EVENT = constants.MELODY_NO_EVENT MIN_MELODY_EVENT = constants.MIN_MELODY_EVENT MAX_MELODY_EVENT = constants.MAX_MELODY_EVENT MIN_MIDI_PITCH = constants.MIN_MIDI_PITCH MAX_MIDI_PITCH = constants.MAX_MIDI_PITCH NOTES_PER_OCTAVE = constants.NOTES_PER_OCTAVE DEFAULT_STEPS_PER_BAR = constants.DEFAULT_STEPS_PER_BAR DEFAULT_STEPS_PER_QUARTER = constants.DEFAULT_STEPS_PER_QUARTER STANDARD_PPQ = constants.STANDARD_PPQ NOTE_KEYS = constants.NOTE_KEYS class PolyphonicMelodyError(Exception): pass class BadNoteError(Exception): pass class Melody(events_lib.SimpleEventSequence): """Stores a quantized stream of monophonic melody events. Melody is an intermediate representation that all melody models can use. Quantized sequence to Melody code will do work to align notes and extract extract monophonic melodies. Model-specific code then needs to convert Melody to SequenceExample protos for TensorFlow. Melody implements an iterable object. Simply iterate to retrieve the melody events. Melody events are integers in range [-2, 127] (inclusive), where negative values are the special event events: MELODY_NOTE_OFF, and MELODY_NO_EVENT. Non-negative values [0, 127] are note-on events for that midi pitch. A note starts at a non-negative value (that is the pitch), and is held through subsequent MELODY_NO_EVENT events until either another non-negative value is reached (even if the pitch is the same as the previous note), or a MELODY_NOTE_OFF event is reached. A MELODY_NOTE_OFF starts at least one step of silence, which continues through MELODY_NO_EVENT events until the next non-negative value. MELODY_NO_EVENT values are treated as default filler. Notes must be inserted in ascending order by start time. Note end times will be truncated if the next note overlaps. Any sustained notes are implicitly turned off at the end of a melody. Melodies can start at any non-negative time, and are shifted left so that the bar containing the first note-on event is the first bar. Attributes: start_step: The offset of the first step of the melody relative to the beginning of the source sequence. Will always be the first step of a bar. end_step: The offset to the beginning of the bar following the last step of the melody relative the beginning of the source sequence. Will always be the first step of a bar. steps_per_quarter: Number of steps in in a quarter note. steps_per_bar: Number of steps in a bar (measure) of music. """ def __init__(self, events=None, **kwargs): """Construct a Melody.""" if 'pad_event' in kwargs: del kwargs['pad_event'] super(Melody, self).__init__(pad_event=MELODY_NO_EVENT, events=events, **kwargs) def _from_event_list(self, events, start_step=0, steps_per_bar=DEFAULT_STEPS_PER_BAR, steps_per_quarter=DEFAULT_STEPS_PER_QUARTER): """Initializes with a list of event values and sets attributes. Args: events: List of Melody events to set melody to. start_step: The integer starting step offset. steps_per_bar: The number of steps in a bar. steps_per_quarter: The number of steps in a quarter note. Raises: ValueError: If `events` contains an event that is not in the proper range. """ for event in events: if not MIN_MELODY_EVENT <= event <= MAX_MELODY_EVENT: raise ValueError('Melody event out of range: %d' % event) # Replace MELODY_NOTE_OFF events with MELODY_NO_EVENT before first note. cleaned_events = list(events) for i, e in enumerate(events): if e not in (MELODY_NO_EVENT, MELODY_NOTE_OFF): break cleaned_events[i] = MELODY_NO_EVENT super(Melody, self)._from_event_list( cleaned_events, start_step=start_step, steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter) def _add_note(self, pitch, start_step, end_step): """Adds the given note to the `events` list. `start_step` is set to the given pitch. `end_step` is set to NOTE_OFF. Everything after `start_step` in `events` is deleted before the note is added. `events`'s length will be changed so that the last event has index `end_step`. Args: pitch: Midi pitch. An integer between 0 and 127 inclusive. start_step: A non-negative integer step that the note begins on. end_step: An integer step that the note ends on. The note is considered to end at the onset of the end step. `end_step` must be greater than `start_step`. Raises: BadNoteError: If `start_step` does not precede `end_step`. """ if start_step >= end_step: raise BadNoteError( 'Start step does not precede end step: start=%d, end=%d' % (start_step, end_step)) self.set_length(end_step + 1) self._events[start_step] = pitch self._events[end_step] = MELODY_NOTE_OFF for i in range(start_step + 1, end_step): self._events[i] = MELODY_NO_EVENT def _get_last_on_off_events(self): """Returns indexes of the most recent pitch and NOTE_OFF events. Returns: A tuple (start_step, end_step) of the last note's on and off event indices. Raises: ValueError: If `events` contains no NOTE_OFF or pitch events. """ last_off = len(self) for i in range(len(self) - 1, -1, -1): if self._events[i] == MELODY_NOTE_OFF: last_off = i if self._events[i] >= MIN_MIDI_PITCH: return (i, last_off) raise ValueError('No events in the stream') def get_note_histogram(self): """Gets a histogram of the note occurrences in a melody. Returns: A list of 12 ints, one for each note value (C at index 0 through B at index 11). Each int is the total number of times that note occurred in the melody. """ np_melody = np.array(self._events, dtype=int) return np.bincount(np_melody[np_melody >= MIN_MIDI_PITCH] % NOTES_PER_OCTAVE, minlength=NOTES_PER_OCTAVE) def get_major_key_histogram(self): """Gets a histogram of the how many notes fit into each key. Returns: A list of 12 ints, one for each Major key (C Major at index 0 through B Major at index 11). Each int is the total number of notes that could fit into that key. """ note_histogram = self.get_note_histogram() key_histogram = np.zeros(NOTES_PER_OCTAVE) for note, count in enumerate(note_histogram): key_histogram[NOTE_KEYS[note]] += count return key_histogram def get_major_key(self): """Finds the major key that this melody most likely belongs to. If multiple keys match equally, the key with the lowest index is returned, where the indexes of the keys are C Major = 0 through B Major = 11. Returns: An int for the most likely key (C Major = 0 through B Major = 11) """ key_histogram = self.get_major_key_histogram() return key_histogram.argmax() def append(self, event): """Appends the event to the end of the melody and increments the end step. An implicit NOTE_OFF at the end of the melody will not be respected by this modification. Args: event: The integer Melody event to append to the end. Raises: ValueError: If `event` is not in the proper range. """ if not MIN_MELODY_EVENT <= event <= MAX_MELODY_EVENT: raise ValueError('Event out of range: %d' % event) super(Melody, self).append(event) def from_quantized_sequence(self, quantized_sequence, search_start_step=0, instrument=0, gap_bars=1, ignore_polyphonic_notes=False, pad_end=False, filter_drums=True): """Populate self with a melody from the given quantized NoteSequence. A monophonic melody is extracted from the given `instrument` starting at `search_start_step`. `instrument` and `search_start_step` can be used to drive extraction of multiple melodies from the same quantized sequence. The end step of the extracted melody will be stored in `self._end_step`. 0 velocity notes are ignored. The melody extraction is ended when there are no held notes for a time stretch of `gap_bars` in bars (measures) of music. The number of time steps per bar is computed from the time signature in `quantized_sequence`. `ignore_polyphonic_notes` determines what happens when polyphonic (multiple notes start at the same time) data is encountered. If `ignore_polyphonic_notes` is true, the highest pitch is used in the melody when multiple notes start at the same time. If false, an exception is raised. Args: quantized_sequence: A NoteSequence quantized with sequences_lib.quantize_note_sequence. search_start_step: Start searching for a melody at this time step. Assumed to be the first step of a bar. instrument: Search for a melody in this instrument number. gap_bars: If this many bars or more follow a NOTE_OFF event, the melody is ended. ignore_polyphonic_notes: If True, the highest pitch is used in the melody when multiple notes start at the same time. If False, PolyphonicMelodyError will be raised if multiple notes start at the same time. pad_end: If True, the end of the melody will be padded with NO_EVENTs so that it will end at a bar boundary. filter_drums: If True, notes for which `is_drum` is True will be ignored. Raises: NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length (derived from its time signature) is not an integer number of time steps. PolyphonicMelodyError: If any of the notes start on the same step and `ignore_polyphonic_notes` is False. """ sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence) self._reset() steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence( quantized_sequence) if steps_per_bar_float % 1 != 0: raise events_lib.NonIntegerStepsPerBarError( 'There are %f timesteps per bar. Time signature: %d/%d' % (steps_per_bar_float, quantized_sequence.time_signatures[0].numerator, quantized_sequence.time_signatures[0].denominator)) self._steps_per_bar = steps_per_bar = int(steps_per_bar_float) self._steps_per_quarter = ( quantized_sequence.quantization_info.steps_per_quarter) # Sort track by note start times, and secondarily by pitch descending. notes = sorted([n for n in quantized_sequence.notes if n.instrument == instrument and n.quantized_start_step >= search_start_step], key=lambda note: (note.quantized_start_step, -note.pitch)) if not notes: return # The first step in the melody, beginning at the first step of a bar. melody_start_step = ( notes[0].quantized_start_step - (notes[0].quantized_start_step - search_start_step) % steps_per_bar) for note in notes: if filter_drums and note.is_drum: continue # Ignore 0 velocity notes. if not note.velocity: continue start_index = note.quantized_start_step - melody_start_step end_index = note.quantized_end_step - melody_start_step if not self._events: # If there are no events, we don't need to check for polyphony. self._add_note(note.pitch, start_index, end_index) continue # If `start_index` comes before or lands on an already added note's start # step, we cannot add it. In that case either discard the melody or keep # the highest pitch. last_on, last_off = self._get_last_on_off_events() on_distance = start_index - last_on off_distance = start_index - last_off if on_distance == 0: if ignore_polyphonic_notes: # Keep highest note. # Notes are sorted by pitch descending, so if a note is already at # this position its the highest pitch. continue else: self._reset() raise PolyphonicMelodyError() elif on_distance < 0: raise PolyphonicMelodyError( 'Unexpected note. Not in ascending order.') # If a gap of `gap` or more steps is found, end the melody. if len(self) and off_distance >= gap_bars * steps_per_bar: # pylint:disable=len-as-condition break # Add the note-on and off events to the melody. self._add_note(note.pitch, start_index, end_index) if not self._events: # If no notes were added, don't set `_start_step` and `_end_step`. return self._start_step = melody_start_step # Strip final MELODY_NOTE_OFF event. if self._events[-1] == MELODY_NOTE_OFF: del self._events[-1] length = len(self) # Optionally round up `_end_step` to a multiple of `steps_per_bar`. if pad_end: length += -len(self) % steps_per_bar self.set_length(length) def to_sequence(self, velocity=100, instrument=0, program=0, sequence_start_time=0.0, qpm=120.0): """Converts the Melody to NoteSequence proto. The end of the melody is treated as a NOTE_OFF event for any sustained notes. Args: velocity: Midi velocity to give each note. Between 1 and 127 (inclusive). instrument: Midi instrument to give each note. program: Midi program to give each note. sequence_start_time: A time in seconds (float) that the first note in the sequence will land on. qpm: Quarter notes per minute (float). Returns: A NoteSequence proto encoding the given melody. """ seconds_per_step = 60.0 / qpm / self.steps_per_quarter sequence = music_pb2.NoteSequence() sequence.tempos.add().qpm = qpm sequence.ticks_per_quarter = STANDARD_PPQ sequence_start_time += self.start_step * seconds_per_step current_sequence_note = None for step, note in enumerate(self): if MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH: # End any sustained notes. if current_sequence_note is not None: current_sequence_note.end_time = ( step * seconds_per_step + sequence_start_time) # Add a note. current_sequence_note = sequence.notes.add() current_sequence_note.start_time = ( step * seconds_per_step + sequence_start_time) current_sequence_note.pitch = note current_sequence_note.velocity = velocity current_sequence_note.instrument = instrument current_sequence_note.program = program elif note == MELODY_NOTE_OFF: # End any sustained notes. if current_sequence_note is not None: current_sequence_note.end_time = ( step * seconds_per_step + sequence_start_time) current_sequence_note = None # End any sustained notes. if current_sequence_note is not None: current_sequence_note.end_time = ( len(self) * seconds_per_step + sequence_start_time) if sequence.notes: sequence.total_time = sequence.notes[-1].end_time return sequence def transpose(self, transpose_amount, min_note=0, max_note=128): """Transpose notes in this Melody. All notes are transposed the specified amount. Additionally, all notes are octave shifted to lie within the [min_note, max_note) range. Args: transpose_amount: The number of half steps to transpose this Melody. Positive values transpose up. Negative values transpose down. min_note: Minimum pitch (inclusive) that the resulting notes will take on. max_note: Maximum pitch (exclusive) that the resulting notes will take on. """ for i in range(len(self)): # Transpose MIDI pitches. Special events below MIN_MIDI_PITCH are not # changed. if self._events[i] >= MIN_MIDI_PITCH: self._events[i] += transpose_amount if self._events[i] < min_note: self._events[i] = ( min_note + (self._events[i] - min_note) % NOTES_PER_OCTAVE) elif self._events[i] >= max_note: self._events[i] = (max_note - NOTES_PER_OCTAVE + (self._events[i] - max_note) % NOTES_PER_OCTAVE) def squash(self, min_note, max_note, transpose_to_key=None): """Transpose and octave shift the notes in this Melody. The key center of this melody is computed with a heuristic, and the notes are transposed to be in the given key. The melody is also octave shifted to be centered in the given range. Additionally, all notes are octave shifted to lie within a given range. Args: min_note: Minimum pitch (inclusive) that the resulting notes will take on. max_note: Maximum pitch (exclusive) that the resulting notes will take on. transpose_to_key: The melody is transposed to be in this key or None if should not be transposed. 0 = C Major. Returns: How much notes are transposed by. """ if transpose_to_key is None: transpose_amount = 0 else: melody_key = self.get_major_key() key_diff = transpose_to_key - melody_key midi_notes = [note for note in self._events if MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH] if not midi_notes: return 0 melody_min_note = min(midi_notes) melody_max_note = max(midi_notes) melody_center = (melody_min_note + melody_max_note) / 2 target_center = (min_note + max_note - 1) / 2 center_diff = target_center - (melody_center + key_diff) transpose_amount = ( key_diff + NOTES_PER_OCTAVE * int(round(center_diff / float(NOTES_PER_OCTAVE)))) self.transpose(transpose_amount, min_note, max_note) return transpose_amount def set_length(self, steps, from_left=False): """Sets the length of the melody to the specified number of steps. If the melody is not long enough, ends any sustained notes and adds NO_EVENT steps for padding. If it is too long, it will be truncated to the requested length. Args: steps: How many steps long the melody should be. from_left: Whether to add/remove from the left instead of right. """ old_len = len(self) super(Melody, self).set_length(steps, from_left=from_left) if steps > old_len and not from_left: # When extending the melody on the right, we end any sustained notes. for i in reversed(range(old_len)): if self._events[i] == MELODY_NOTE_OFF: break elif self._events[i] != MELODY_NO_EVENT: self._events[old_len] = MELODY_NOTE_OFF break def increase_resolution(self, k): """Increase the resolution of a Melody. Increases the resolution of a Melody object by a factor of `k`. This uses MELODY_NO_EVENT to extend each event in the melody to be `k` steps long. Args: k: An integer, the factor by which to increase the resolution of the melody. """ super(Melody, self).increase_resolution( k, fill_event=MELODY_NO_EVENT) def midi_file_to_melody(midi_file, steps_per_quarter=4, qpm=None, ignore_polyphonic_notes=True): """Loads a melody from a MIDI file. Args: midi_file: Absolute path to MIDI file. steps_per_quarter: Quantization of Melody. For example, 4 = 16th notes. qpm: Tempo in quarters per a minute. If not set, tries to use the first tempo of the midi track and defaults to magenta.music.DEFAULT_QUARTERS_PER_MINUTE if fails. ignore_polyphonic_notes: Only use the highest simultaneous note if True. Returns: A Melody object extracted from the MIDI file. """ sequence = midi_io.midi_file_to_sequence_proto(midi_file) if qpm is None: if sequence.tempos: qpm = sequence.tempos[0].qpm else: qpm = constants.DEFAULT_QUARTERS_PER_MINUTE quantized_sequence = sequences_lib.quantize_note_sequence( sequence, steps_per_quarter=steps_per_quarter) melody = Melody() melody.from_quantized_sequence( quantized_sequence, ignore_polyphonic_notes=ignore_polyphonic_notes) return melody
# Copyright 2020 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for working with melodies. Use extract_melodies to extract monophonic melodies from a quantized NoteSequence proto. Use Melody.to_sequence to write a melody to a NoteSequence proto. Then use midi_io.sequence_proto_to_midi_file to write that NoteSequence to a midi file. """ from magenta.music import constants from magenta.music import events_lib from magenta.music import midi_io from magenta.music import sequences_lib from magenta.music.protobuf import music_pb2 import numpy as np from six.moves import range # pylint: disable=redefined-builtin MELODY_NOTE_OFF = constants.MELODY_NOTE_OFF MELODY_NO_EVENT = constants.MELODY_NO_EVENT MIN_MELODY_EVENT = constants.MIN_MELODY_EVENT MAX_MELODY_EVENT = constants.MAX_MELODY_EVENT MIN_MIDI_PITCH = constants.MIN_MIDI_PITCH MAX_MIDI_PITCH = constants.MAX_MIDI_PITCH NOTES_PER_OCTAVE = constants.NOTES_PER_OCTAVE DEFAULT_STEPS_PER_BAR = constants.DEFAULT_STEPS_PER_BAR DEFAULT_STEPS_PER_QUARTER = constants.DEFAULT_STEPS_PER_QUARTER STANDARD_PPQ = constants.STANDARD_PPQ NOTE_KEYS = constants.NOTE_KEYS class PolyphonicMelodyError(Exception): pass class BadNoteError(Exception): pass class Melody(events_lib.SimpleEventSequence): """Stores a quantized stream of monophonic melody events. Melody is an intermediate representation that all melody models can use. Quantized sequence to Melody code will do work to align notes and extract extract monophonic melodies. Model-specific code then needs to convert Melody to SequenceExample protos for TensorFlow. Melody implements an iterable object. Simply iterate to retrieve the melody events. Melody events are integers in range [-2, 127] (inclusive), where negative values are the special event events: MELODY_NOTE_OFF, and MELODY_NO_EVENT. Non-negative values [0, 127] are note-on events for that midi pitch. A note starts at a non-negative value (that is the pitch), and is held through subsequent MELODY_NO_EVENT events until either another non-negative value is reached (even if the pitch is the same as the previous note), or a MELODY_NOTE_OFF event is reached. A MELODY_NOTE_OFF starts at least one step of silence, which continues through MELODY_NO_EVENT events until the next non-negative value. MELODY_NO_EVENT values are treated as default filler. Notes must be inserted in ascending order by start time. Note end times will be truncated if the next note overlaps. Any sustained notes are implicitly turned off at the end of a melody. Melodies can start at any non-negative time, and are shifted left so that the bar containing the first note-on event is the first bar. Attributes: start_step: The offset of the first step of the melody relative to the beginning of the source sequence. Will always be the first step of a bar. end_step: The offset to the beginning of the bar following the last step of the melody relative the beginning of the source sequence. Will always be the first step of a bar. steps_per_quarter: Number of steps in in a quarter note. steps_per_bar: Number of steps in a bar (measure) of music. """ def __init__(self, events=None, **kwargs): """Construct a Melody.""" if 'pad_event' in kwargs: del kwargs['pad_event'] super(Melody, self).__init__(pad_event=MELODY_NO_EVENT, events=events, **kwargs) def _from_event_list(self, events, start_step=0, steps_per_bar=DEFAULT_STEPS_PER_BAR, steps_per_quarter=DEFAULT_STEPS_PER_QUARTER): """Initializes with a list of event values and sets attributes. Args: events: List of Melody events to set melody to. start_step: The integer starting step offset. steps_per_bar: The number of steps in a bar. steps_per_quarter: The number of steps in a quarter note. Raises: ValueError: If `events` contains an event that is not in the proper range. """ for event in events: if not MIN_MELODY_EVENT <= event <= MAX_MELODY_EVENT: raise ValueError('Melody event out of range: %d' % event) # Replace MELODY_NOTE_OFF events with MELODY_NO_EVENT before first note. cleaned_events = list(events) for i, e in enumerate(events): if e not in (MELODY_NO_EVENT, MELODY_NOTE_OFF): break cleaned_events[i] = MELODY_NO_EVENT super(Melody, self)._from_event_list( cleaned_events, start_step=start_step, steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter) def _add_note(self, pitch, start_step, end_step): """Adds the given note to the `events` list. `start_step` is set to the given pitch. `end_step` is set to NOTE_OFF. Everything after `start_step` in `events` is deleted before the note is added. `events`'s length will be changed so that the last event has index `end_step`. Args: pitch: Midi pitch. An integer between 0 and 127 inclusive. start_step: A non-negative integer step that the note begins on. end_step: An integer step that the note ends on. The note is considered to end at the onset of the end step. `end_step` must be greater than `start_step`. Raises: BadNoteError: If `start_step` does not precede `end_step`. """ if start_step >= end_step: raise BadNoteError( 'Start step does not precede end step: start=%d, end=%d' % (start_step, end_step)) self.set_length(end_step + 1) self._events[start_step] = pitch self._events[end_step] = MELODY_NOTE_OFF for i in range(start_step + 1, end_step): self._events[i] = MELODY_NO_EVENT def _get_last_on_off_events(self): """Returns indexes of the most recent pitch and NOTE_OFF events. Returns: A tuple (start_step, end_step) of the last note's on and off event indices. Raises: ValueError: If `events` contains no NOTE_OFF or pitch events. """ last_off = len(self) for i in range(len(self) - 1, -1, -1): if self._events[i] == MELODY_NOTE_OFF: last_off = i if self._events[i] >= MIN_MIDI_PITCH: return (i, last_off) raise ValueError('No events in the stream') def get_note_histogram(self): """Gets a histogram of the note occurrences in a melody. Returns: A list of 12 ints, one for each note value (C at index 0 through B at index 11). Each int is the total number of times that note occurred in the melody. """ np_melody = np.array(self._events, dtype=int) return np.bincount(np_melody[np_melody >= MIN_MIDI_PITCH] % NOTES_PER_OCTAVE, minlength=NOTES_PER_OCTAVE) def get_major_key_histogram(self): """Gets a histogram of the how many notes fit into each key. Returns: A list of 12 ints, one for each Major key (C Major at index 0 through B Major at index 11). Each int is the total number of notes that could fit into that key. """ note_histogram = self.get_note_histogram() key_histogram = np.zeros(NOTES_PER_OCTAVE) for note, count in enumerate(note_histogram): key_histogram[NOTE_KEYS[note]] += count return key_histogram def get_major_key(self): """Finds the major key that this melody most likely belongs to. If multiple keys match equally, the key with the lowest index is returned, where the indexes of the keys are C Major = 0 through B Major = 11. Returns: An int for the most likely key (C Major = 0 through B Major = 11) """ key_histogram = self.get_major_key_histogram() return key_histogram.argmax() def append(self, event): """Appends the event to the end of the melody and increments the end step. An implicit NOTE_OFF at the end of the melody will not be respected by this modification. Args: event: The integer Melody event to append to the end. Raises: ValueError: If `event` is not in the proper range. """ if not MIN_MELODY_EVENT <= event <= MAX_MELODY_EVENT: raise ValueError('Event out of range: %d' % event) super(Melody, self).append(event) def from_quantized_sequence(self, quantized_sequence, search_start_step=0, instrument=0, gap_bars=1, ignore_polyphonic_notes=False, pad_end=False, filter_drums=True): """Populate self with a melody from the given quantized NoteSequence. A monophonic melody is extracted from the given `instrument` starting at `search_start_step`. `instrument` and `search_start_step` can be used to drive extraction of multiple melodies from the same quantized sequence. The end step of the extracted melody will be stored in `self._end_step`. 0 velocity notes are ignored. The melody extraction is ended when there are no held notes for a time stretch of `gap_bars` in bars (measures) of music. The number of time steps per bar is computed from the time signature in `quantized_sequence`. `ignore_polyphonic_notes` determines what happens when polyphonic (multiple notes start at the same time) data is encountered. If `ignore_polyphonic_notes` is true, the highest pitch is used in the melody when multiple notes start at the same time. If false, an exception is raised. Args: quantized_sequence: A NoteSequence quantized with sequences_lib.quantize_note_sequence. search_start_step: Start searching for a melody at this time step. Assumed to be the first step of a bar. instrument: Search for a melody in this instrument number. gap_bars: If this many bars or more follow a NOTE_OFF event, the melody is ended. ignore_polyphonic_notes: If True, the highest pitch is used in the melody when multiple notes start at the same time. If False, PolyphonicMelodyError will be raised if multiple notes start at the same time. pad_end: If True, the end of the melody will be padded with NO_EVENTs so that it will end at a bar boundary. filter_drums: If True, notes for which `is_drum` is True will be ignored. Raises: NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length (derived from its time signature) is not an integer number of time steps. PolyphonicMelodyError: If any of the notes start on the same step and `ignore_polyphonic_notes` is False. """ sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence) self._reset() steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence( quantized_sequence) if steps_per_bar_float % 1 != 0: raise events_lib.NonIntegerStepsPerBarError( 'There are %f timesteps per bar. Time signature: %d/%d' % (steps_per_bar_float, quantized_sequence.time_signatures[0].numerator, quantized_sequence.time_signatures[0].denominator)) self._steps_per_bar = steps_per_bar = int(steps_per_bar_float) self._steps_per_quarter = ( quantized_sequence.quantization_info.steps_per_quarter) # Sort track by note start times, and secondarily by pitch descending. notes = sorted([n for n in quantized_sequence.notes if n.instrument == instrument and n.quantized_start_step >= search_start_step], key=lambda note: (note.quantized_start_step, -note.pitch)) if not notes: return # The first step in the melody, beginning at the first step of a bar. melody_start_step = ( notes[0].quantized_start_step - (notes[0].quantized_start_step - search_start_step) % steps_per_bar) for note in notes: if filter_drums and note.is_drum: continue # Ignore 0 velocity notes. if not note.velocity: continue start_index = note.quantized_start_step - melody_start_step end_index = note.quantized_end_step - melody_start_step if not self._events: # If there are no events, we don't need to check for polyphony. self._add_note(note.pitch, start_index, end_index) continue # If `start_index` comes before or lands on an already added note's start # step, we cannot add it. In that case either discard the melody or keep # the highest pitch. last_on, last_off = self._get_last_on_off_events() on_distance = start_index - last_on off_distance = start_index - last_off if on_distance == 0: if ignore_polyphonic_notes: # Keep highest note. # Notes are sorted by pitch descending, so if a note is already at # this position its the highest pitch. continue else: self._reset() raise PolyphonicMelodyError() elif on_distance < 0: raise PolyphonicMelodyError( 'Unexpected note. Not in ascending order.') # If a gap of `gap` or more steps is found, end the melody. if len(self) and off_distance >= gap_bars * steps_per_bar: # pylint:disable=len-as-condition break # Add the note-on and off events to the melody. self._add_note(note.pitch, start_index, end_index) if not self._events: # If no notes were added, don't set `_start_step` and `_end_step`. return self._start_step = melody_start_step # Strip final MELODY_NOTE_OFF event. if self._events[-1] == MELODY_NOTE_OFF: del self._events[-1] length = len(self) # Optionally round up `_end_step` to a multiple of `steps_per_bar`. if pad_end: length += -len(self) % steps_per_bar self.set_length(length) def to_sequence(self, velocity=100, instrument=0, program=0, sequence_start_time=0.0, qpm=120.0): """Converts the Melody to NoteSequence proto. The end of the melody is treated as a NOTE_OFF event for any sustained notes. Args: velocity: Midi velocity to give each note. Between 1 and 127 (inclusive). instrument: Midi instrument to give each note. program: Midi program to give each note. sequence_start_time: A time in seconds (float) that the first note in the sequence will land on. qpm: Quarter notes per minute (float). Returns: A NoteSequence proto encoding the given melody. """ seconds_per_step = 60.0 / qpm / self.steps_per_quarter sequence = music_pb2.NoteSequence() sequence.tempos.add().qpm = qpm sequence.ticks_per_quarter = STANDARD_PPQ sequence_start_time += self.start_step * seconds_per_step current_sequence_note = None for step, note in enumerate(self): if MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH: # End any sustained notes. if current_sequence_note is not None: current_sequence_note.end_time = ( step * seconds_per_step + sequence_start_time) # Add a note. current_sequence_note = sequence.notes.add() current_sequence_note.start_time = ( step * seconds_per_step + sequence_start_time) current_sequence_note.pitch = note current_sequence_note.velocity = velocity current_sequence_note.instrument = instrument current_sequence_note.program = program elif note == MELODY_NOTE_OFF: # End any sustained notes. if current_sequence_note is not None: current_sequence_note.end_time = ( step * seconds_per_step + sequence_start_time) current_sequence_note = None # End any sustained notes. if current_sequence_note is not None: current_sequence_note.end_time = ( len(self) * seconds_per_step + sequence_start_time) if sequence.notes: sequence.total_time = sequence.notes[-1].end_time return sequence def transpose(self, transpose_amount, min_note=0, max_note=128): """Transpose notes in this Melody. All notes are transposed the specified amount. Additionally, all notes are octave shifted to lie within the [min_note, max_note) range. Args: transpose_amount: The number of half steps to transpose this Melody. Positive values transpose up. Negative values transpose down. min_note: Minimum pitch (inclusive) that the resulting notes will take on. max_note: Maximum pitch (exclusive) that the resulting notes will take on. """ for i in range(len(self)): # Transpose MIDI pitches. Special events below MIN_MIDI_PITCH are not # changed. if self._events[i] >= MIN_MIDI_PITCH: self._events[i] += transpose_amount if self._events[i] < min_note: self._events[i] = ( min_note + (self._events[i] - min_note) % NOTES_PER_OCTAVE) elif self._events[i] >= max_note: self._events[i] = (max_note - NOTES_PER_OCTAVE + (self._events[i] - max_note) % NOTES_PER_OCTAVE) def squash(self, min_note, max_note, transpose_to_key=None): """Transpose and octave shift the notes in this Melody. The key center of this melody is computed with a heuristic, and the notes are transposed to be in the given key. The melody is also octave shifted to be centered in the given range. Additionally, all notes are octave shifted to lie within a given range. Args: min_note: Minimum pitch (inclusive) that the resulting notes will take on. max_note: Maximum pitch (exclusive) that the resulting notes will take on. transpose_to_key: The melody is transposed to be in this key or None if should not be transposed. 0 = C Major. Returns: How much notes are transposed by. """ if transpose_to_key is None: transpose_amount = 0 else: melody_key = self.get_major_key() key_diff = transpose_to_key - melody_key midi_notes = [note for note in self._events if MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH] if not midi_notes: return 0 melody_min_note = min(midi_notes) melody_max_note = max(midi_notes) melody_center = (melody_min_note + melody_max_note) / 2 target_center = (min_note + max_note - 1) / 2 center_diff = target_center - (melody_center + key_diff) transpose_amount = ( key_diff + NOTES_PER_OCTAVE * int(round(center_diff / float(NOTES_PER_OCTAVE)))) self.transpose(transpose_amount, min_note, max_note) return transpose_amount def set_length(self, steps, from_left=False): """Sets the length of the melody to the specified number of steps. If the melody is not long enough, ends any sustained notes and adds NO_EVENT steps for padding. If it is too long, it will be truncated to the requested length. Args: steps: How many steps long the melody should be. from_left: Whether to add/remove from the left instead of right. """ old_len = len(self) super(Melody, self).set_length(steps, from_left=from_left) if steps > old_len and not from_left: # When extending the melody on the right, we end any sustained notes. for i in reversed(range(old_len)): if self._events[i] == MELODY_NOTE_OFF: break elif self._events[i] != MELODY_NO_EVENT: self._events[old_len] = MELODY_NOTE_OFF break def increase_resolution(self, k): """Increase the resolution of a Melody. Increases the resolution of a Melody object by a factor of `k`. This uses MELODY_NO_EVENT to extend each event in the melody to be `k` steps long. Args: k: An integer, the factor by which to increase the resolution of the melody. """ super(Melody, self).increase_resolution( k, fill_event=MELODY_NO_EVENT) def midi_file_to_melody(midi_file, steps_per_quarter=4, qpm=None, ignore_polyphonic_notes=True): """Loads a melody from a MIDI file. Args: midi_file: Absolute path to MIDI file. steps_per_quarter: Quantization of Melody. For example, 4 = 16th notes. qpm: Tempo in quarters per a minute. If not set, tries to use the first tempo of the midi track and defaults to magenta.music.DEFAULT_QUARTERS_PER_MINUTE if fails. ignore_polyphonic_notes: Only use the highest simultaneous note if True. Returns: A Melody object extracted from the MIDI file. """ sequence = midi_io.midi_file_to_sequence_proto(midi_file) if qpm is None: if sequence.tempos: qpm = sequence.tempos[0].qpm else: qpm = constants.DEFAULT_QUARTERS_PER_MINUTE quantized_sequence = sequences_lib.quantize_note_sequence( sequence, steps_per_quarter=steps_per_quarter) melody = Melody() melody.from_quantized_sequence( quantized_sequence, ignore_polyphonic_notes=ignore_polyphonic_notes) return melody
en
0.862179
# Copyright 2020 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Utility functions for working with melodies. Use extract_melodies to extract monophonic melodies from a quantized NoteSequence proto. Use Melody.to_sequence to write a melody to a NoteSequence proto. Then use midi_io.sequence_proto_to_midi_file to write that NoteSequence to a midi file. # pylint: disable=redefined-builtin Stores a quantized stream of monophonic melody events. Melody is an intermediate representation that all melody models can use. Quantized sequence to Melody code will do work to align notes and extract extract monophonic melodies. Model-specific code then needs to convert Melody to SequenceExample protos for TensorFlow. Melody implements an iterable object. Simply iterate to retrieve the melody events. Melody events are integers in range [-2, 127] (inclusive), where negative values are the special event events: MELODY_NOTE_OFF, and MELODY_NO_EVENT. Non-negative values [0, 127] are note-on events for that midi pitch. A note starts at a non-negative value (that is the pitch), and is held through subsequent MELODY_NO_EVENT events until either another non-negative value is reached (even if the pitch is the same as the previous note), or a MELODY_NOTE_OFF event is reached. A MELODY_NOTE_OFF starts at least one step of silence, which continues through MELODY_NO_EVENT events until the next non-negative value. MELODY_NO_EVENT values are treated as default filler. Notes must be inserted in ascending order by start time. Note end times will be truncated if the next note overlaps. Any sustained notes are implicitly turned off at the end of a melody. Melodies can start at any non-negative time, and are shifted left so that the bar containing the first note-on event is the first bar. Attributes: start_step: The offset of the first step of the melody relative to the beginning of the source sequence. Will always be the first step of a bar. end_step: The offset to the beginning of the bar following the last step of the melody relative the beginning of the source sequence. Will always be the first step of a bar. steps_per_quarter: Number of steps in in a quarter note. steps_per_bar: Number of steps in a bar (measure) of music. Construct a Melody. Initializes with a list of event values and sets attributes. Args: events: List of Melody events to set melody to. start_step: The integer starting step offset. steps_per_bar: The number of steps in a bar. steps_per_quarter: The number of steps in a quarter note. Raises: ValueError: If `events` contains an event that is not in the proper range. # Replace MELODY_NOTE_OFF events with MELODY_NO_EVENT before first note. Adds the given note to the `events` list. `start_step` is set to the given pitch. `end_step` is set to NOTE_OFF. Everything after `start_step` in `events` is deleted before the note is added. `events`'s length will be changed so that the last event has index `end_step`. Args: pitch: Midi pitch. An integer between 0 and 127 inclusive. start_step: A non-negative integer step that the note begins on. end_step: An integer step that the note ends on. The note is considered to end at the onset of the end step. `end_step` must be greater than `start_step`. Raises: BadNoteError: If `start_step` does not precede `end_step`. Returns indexes of the most recent pitch and NOTE_OFF events. Returns: A tuple (start_step, end_step) of the last note's on and off event indices. Raises: ValueError: If `events` contains no NOTE_OFF or pitch events. Gets a histogram of the note occurrences in a melody. Returns: A list of 12 ints, one for each note value (C at index 0 through B at index 11). Each int is the total number of times that note occurred in the melody. Gets a histogram of the how many notes fit into each key. Returns: A list of 12 ints, one for each Major key (C Major at index 0 through B Major at index 11). Each int is the total number of notes that could fit into that key. Finds the major key that this melody most likely belongs to. If multiple keys match equally, the key with the lowest index is returned, where the indexes of the keys are C Major = 0 through B Major = 11. Returns: An int for the most likely key (C Major = 0 through B Major = 11) Appends the event to the end of the melody and increments the end step. An implicit NOTE_OFF at the end of the melody will not be respected by this modification. Args: event: The integer Melody event to append to the end. Raises: ValueError: If `event` is not in the proper range. Populate self with a melody from the given quantized NoteSequence. A monophonic melody is extracted from the given `instrument` starting at `search_start_step`. `instrument` and `search_start_step` can be used to drive extraction of multiple melodies from the same quantized sequence. The end step of the extracted melody will be stored in `self._end_step`. 0 velocity notes are ignored. The melody extraction is ended when there are no held notes for a time stretch of `gap_bars` in bars (measures) of music. The number of time steps per bar is computed from the time signature in `quantized_sequence`. `ignore_polyphonic_notes` determines what happens when polyphonic (multiple notes start at the same time) data is encountered. If `ignore_polyphonic_notes` is true, the highest pitch is used in the melody when multiple notes start at the same time. If false, an exception is raised. Args: quantized_sequence: A NoteSequence quantized with sequences_lib.quantize_note_sequence. search_start_step: Start searching for a melody at this time step. Assumed to be the first step of a bar. instrument: Search for a melody in this instrument number. gap_bars: If this many bars or more follow a NOTE_OFF event, the melody is ended. ignore_polyphonic_notes: If True, the highest pitch is used in the melody when multiple notes start at the same time. If False, PolyphonicMelodyError will be raised if multiple notes start at the same time. pad_end: If True, the end of the melody will be padded with NO_EVENTs so that it will end at a bar boundary. filter_drums: If True, notes for which `is_drum` is True will be ignored. Raises: NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length (derived from its time signature) is not an integer number of time steps. PolyphonicMelodyError: If any of the notes start on the same step and `ignore_polyphonic_notes` is False. # Sort track by note start times, and secondarily by pitch descending. # The first step in the melody, beginning at the first step of a bar. # Ignore 0 velocity notes. # If there are no events, we don't need to check for polyphony. # If `start_index` comes before or lands on an already added note's start # step, we cannot add it. In that case either discard the melody or keep # the highest pitch. # Keep highest note. # Notes are sorted by pitch descending, so if a note is already at # this position its the highest pitch. # If a gap of `gap` or more steps is found, end the melody. # pylint:disable=len-as-condition # Add the note-on and off events to the melody. # If no notes were added, don't set `_start_step` and `_end_step`. # Strip final MELODY_NOTE_OFF event. # Optionally round up `_end_step` to a multiple of `steps_per_bar`. Converts the Melody to NoteSequence proto. The end of the melody is treated as a NOTE_OFF event for any sustained notes. Args: velocity: Midi velocity to give each note. Between 1 and 127 (inclusive). instrument: Midi instrument to give each note. program: Midi program to give each note. sequence_start_time: A time in seconds (float) that the first note in the sequence will land on. qpm: Quarter notes per minute (float). Returns: A NoteSequence proto encoding the given melody. # End any sustained notes. # Add a note. # End any sustained notes. # End any sustained notes. Transpose notes in this Melody. All notes are transposed the specified amount. Additionally, all notes are octave shifted to lie within the [min_note, max_note) range. Args: transpose_amount: The number of half steps to transpose this Melody. Positive values transpose up. Negative values transpose down. min_note: Minimum pitch (inclusive) that the resulting notes will take on. max_note: Maximum pitch (exclusive) that the resulting notes will take on. # Transpose MIDI pitches. Special events below MIN_MIDI_PITCH are not # changed. Transpose and octave shift the notes in this Melody. The key center of this melody is computed with a heuristic, and the notes are transposed to be in the given key. The melody is also octave shifted to be centered in the given range. Additionally, all notes are octave shifted to lie within a given range. Args: min_note: Minimum pitch (inclusive) that the resulting notes will take on. max_note: Maximum pitch (exclusive) that the resulting notes will take on. transpose_to_key: The melody is transposed to be in this key or None if should not be transposed. 0 = C Major. Returns: How much notes are transposed by. Sets the length of the melody to the specified number of steps. If the melody is not long enough, ends any sustained notes and adds NO_EVENT steps for padding. If it is too long, it will be truncated to the requested length. Args: steps: How many steps long the melody should be. from_left: Whether to add/remove from the left instead of right. # When extending the melody on the right, we end any sustained notes. Increase the resolution of a Melody. Increases the resolution of a Melody object by a factor of `k`. This uses MELODY_NO_EVENT to extend each event in the melody to be `k` steps long. Args: k: An integer, the factor by which to increase the resolution of the melody. Loads a melody from a MIDI file. Args: midi_file: Absolute path to MIDI file. steps_per_quarter: Quantization of Melody. For example, 4 = 16th notes. qpm: Tempo in quarters per a minute. If not set, tries to use the first tempo of the midi track and defaults to magenta.music.DEFAULT_QUARTERS_PER_MINUTE if fails. ignore_polyphonic_notes: Only use the highest simultaneous note if True. Returns: A Melody object extracted from the MIDI file.
2.13308
2
rssnotifier/typings.py
PredaaA/JackCogs
0
6628687
<reponame>PredaaA/JackCogs # Copyright 2018-2020 <NAME> (https://github.com/jack1142) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ These type hints are terrible, but that's because Red's type hints are terrible and these have to be compatible... Yes, I know how that sounds. """ from typing import ( Any, Awaitable, Callable, Coroutine, Generator, Protocol, TypeVar, Union, overload, ) from redbot.core.commands import Context _CT = TypeVar("_CT", bound=Context) _T = TypeVar("_T") CoroLike = Callable[..., Union[Awaitable[_T], Generator[Any, None, _T]]] class CheckDecorator(Protocol): predicate: Coroutine[Any, Any, bool] @overload def __call__(self, func: _CT) -> _CT: ... @overload def __call__(self, func: CoroLike[Any]) -> CoroLike[Any]: ...
# Copyright 2018-2020 <NAME> (https://github.com/jack1142) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ These type hints are terrible, but that's because Red's type hints are terrible and these have to be compatible... Yes, I know how that sounds. """ from typing import ( Any, Awaitable, Callable, Coroutine, Generator, Protocol, TypeVar, Union, overload, ) from redbot.core.commands import Context _CT = TypeVar("_CT", bound=Context) _T = TypeVar("_T") CoroLike = Callable[..., Union[Awaitable[_T], Generator[Any, None, _T]]] class CheckDecorator(Protocol): predicate: Coroutine[Any, Any, bool] @overload def __call__(self, func: _CT) -> _CT: ... @overload def __call__(self, func: CoroLike[Any]) -> CoroLike[Any]: ...
en
0.892299
# Copyright 2018-2020 <NAME> (https://github.com/jack1142) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. These type hints are terrible, but that's because Red's type hints are terrible and these have to be compatible... Yes, I know how that sounds.
2.000327
2
main.py
adambaumeister/panhit
0
6628688
<gh_stars>0 from phlibs.host import HostList from phlibs.config import ConfigFile from phlibs.messages import * from flask import Flask, escape, request, render_template from phlibs.jqueue import Job, JobQueue from phlibs.outputs import JsonOutput # Default path to the configuration file for PANHIT DEFAULT_CONFIG_FILE="server.yaml" application = Flask(__name__) def configure(j): """ Configures PANHIT. :param j: (dict) Dictionary as received from JSON :return: ConfigFile object, HostList object """ mod_opts = {} c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) # Add any configuration that the client has sent in the request if 'config' in j: c.unpickle(j['config']) if 'mod_opts' in j['config']: mod_opts = j['config']['mod_opts'] mods = c.init_modules(mod_opts) db = c.get_db() input = c.get_input(mod_opts) hl = HostList(input, mods_enabled=mods, db=db) return c, hl # If a specfile is passed, load from stored configuration instead of parsing as if it were a config file elif 'spec' in j: (inputs, mods, tag_policies, outputs) = c.load_from_spec(j['spec']) db = c.get_db() if 'name' in j: c.name = j['name'] # Hack - currently HostList only supports one input hl = HostList(inputs[0], mods_enabled=mods, db=db, tags_policy=tag_policies, output=outputs) return c, hl ################ # VIEW METHODS # ################ @application.route('/', methods=['GET']) def index(): """ Basic index and welcome page. :return: index.html """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() summary = db.summary() return render_template('index.html', summary=summary) @application.route('/config/input', methods=['GET']) def config_input_page(): """ Configuration landing page :return: config.html """ config_type = "input" c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path(config_type) docs = cdb.get_all() inputs = [] for doc in docs: i = c.get_input_from_data(doc) inputs.append(i) input_types = c.get_inputs_available() config_descr = """ An Input is a list of host IP addresses, either statically defined or dynamically retrieved. """ return render_template('config.html', items=inputs, config_type=config_type, config_descr=config_descr, item_types=input_types) @application.route('/config/outputs', methods=['GET']) def config_output_page(): """ Configuration landing page :return: config.html """ config_type = "output" c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path(config_type) docs = cdb.get_all() outputs = [] for doc in docs: i = c.get_output_from_data(doc) outputs.append(i) output_types = c.get_outputs_available() config_descr = """ Outputs act as stores - seperate from the local database - for host information """ return render_template('config.html', items=outputs, config_type=config_type, config_descr=config_descr, item_types=output_types) @application.route('/config/tags', methods=['GET']) def config_tags(): c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() j = db.get_all_in_subdir_sorted('jqstatus', limit=5) cdb = c.get_cdb() cdb.update_path("tags") docs = cdb.get_all() return render_template('tag_config.html', jobs=j, items=docs) @application.route('/config/taglist', methods=['GET']) def config_taglist(): c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path("tags") tags = cdb.get_all() cdb = c.get_cdb() cdb.update_path("taglist") docs = cdb.get_all() return render_template('taglist.html', tags=tags, items=docs) @application.route('/config/modules', methods=['GET']) def config_modules_page(): """ Configuration landing page :return: config.html """ config_type = "modules" c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path(config_type) docs = cdb.get_all() mods = [] for doc in docs: i = c.get_module_from_data(doc) mods.append(i) mod_types = c.get_mods_available() config_descr = """ A module retrieves additional information about a host, to be later displayed or used as categorization. """ return render_template('config.html', items=mods, config_type=config_type, config_descr=config_descr, item_types=mod_types) @application.route('/run', methods=['GET']) def spec_page(): """ Render the job runner page :return: spec.html """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) # Grab all the inputs cdb = c.get_cdb() cdb.update_path("input") docs = cdb.get_all() inputs = [] for doc in docs: i = c.get_input_from_data(doc) inputs.append(i) # Grab all the modules cdb = c.get_cdb() cdb.update_path("modules") docs = cdb.get_all() modules = [] for doc in docs: i = c.get_module_from_data(doc) modules.append(i) cdb = c.get_cdb() cdb.update_path("taglist") docs = cdb.get_all() tag_policies = docs cdb = c.get_cdb() cdb.update_path("output") docs = cdb.get_all() outputs = docs cdb = c.get_cdb() cdb.update_path("specs") docs = cdb.get_all() specs = docs return render_template('spec.html', inputs=inputs, modules=modules, tag_policies=tag_policies, outputs=outputs, specs=specs) @application.route('/jobs', methods=['GET']) def jobs_page(): """ Display running, scheduled, and completed jobs. :return: Jobs page """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() j = db.get_all_in_subdir_sorted('jqstatus') return render_template('jobs.html', jobs=j) @application.route('/jobs/<job_id>', methods=['GET']) def job_page(job_id): """ Return the result of a specific job. :return: Jobs page """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path_nocreate(job_id) hl = HostList(db=db) hosts = hl.get_all_hosts() job_status = db.get('jqstatus') return render_template('job.html', hosts=hosts, job=job_status) ############### # API METHODS # ############### @application.route('/api/run', methods=['POST']) def run(): """ Primary job schedular This route takes a complete job spec from the API and outputs the scheduled job ID as a valid JobQueue ID, which it then runs. :return: JobStarted JSON message type """ try: c, hl = configure(request.get_json()) except ValueError as e: return { "Error": str(e) } j = request.get_json() if j["save"]: # Save the job as a spec cdb = c.get_cdb() cdb.update_path("specs") cdb.write(request.get_json()) # Run the actual job in the background and return immediately jq = JobQueue() # Set the job quueue name to the configuration spec name db = c.get_db() db.update_path(jq.id) jq.set_db(db) jq.set_name(c.name) j = Job(hl.run_all_hosts, args=(jq,)) m = JobStarted() m.status = "started" m.jid = jq.id j.Run() return m.GetMsg() @application.route('/api/jobs/<job_id>', methods=['GET']) def get_job(job_id): """ Individual job retrieval This route retreives a job, either current or historical, from the configured database type. :param job_id: ID of job, either running, scheduled, or existing. :return: JobStatus JSON message type. """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path(job_id) jqstatus = db.get('jqstatus') m = JobStatus() m.set_from_json(jqstatus) return m.GetMsg() @application.route('/api/specs/<spec_id>', methods=['GET']) def get_job_spec(spec_id): """ Get a saved job spec :param spec_id: ID of spec :return: JobSpec type """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path("specs") json = cdb.get(spec_id) return json @application.route('/api/jobs/<job_id>/result', methods=['GET']) def get_job_result(job_id): """ Job result retrieval This route retreives a job, either current or historical, from the configured database type. :param job_id: ID of completed job. :return: JobResult message type """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path(job_id) data = {} jqstatus = db.get('jqstatus') o = JsonOutput() if jqstatus['completed'] == jqstatus['queued']: hl = HostList(db=db) data = o.Output(hl) m = JobResult() t = request.args.get('table') if t: page = request.args.get('page') html = request.args.get('as_html') m.set_table_from_json(data) if page: m.page(page, 5) if html: return m.as_html() else: m.set_from_json(data) return m.GetMsg() @application.route('/api/jobs', methods=['GET']) def list_jobs(): """ List all current and past jobs. This call can be paginated but is not by default :return: JobList JSON message type """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() j = db.get_all_in_subdir_sorted('jqstatus') j.reverse() t = request.args.get('table') m = JobList() if t: page = request.args.get('page') html = request.args.get('as_html') m.set_table_from_json(j) if page: m.page(page, 10) if html: return m.as_html() else: m.set_from_json(j) return m.GetMsg() @application.route('/api/jobs/<job_id>/graph', methods=['GET']) def graph_job(job_id): """ Return a graph of host based data for a given job :return: JobGraph message type """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path_nocreate(job_id) hl = HostList(db=db) labels,data,bg_colors = hl.stats_by_tag() m = JobGraph() m.set_graph(labels,data, bg_colors) return m.GetMsg() @application.route('/api/jobs/<job_id>/tag_spec', methods=['GET']) def get_job_tag_spec(job_id): """ Job tag spec Returns all the fields that are taggable from the result of a job :param job_id: ID of job, either running, scheduled, or existing. :return: JobStatus JSON message type. """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path(job_id) index = db.get("index") first_id = index[0] j = db.get(first_id) m = TagSpec() m.set_spec(j) if request.args.get('as_html'): return m.as_html() return m.GetMsg() @application.route('/api/config/<config_type>', methods=['POST']) def add_config(config_type): """ Add a configuration object of the given type with the given object value Object is expected as JSON. :param config_type: Type of configuration object to add :return: ConfigStatus """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_cdb() db.update_path(config_type) input_json = request.get_json() name = input_json['name'] db.write_id(name, input_json) m = ConfigStatus() m.set_name(name) m.set_status(0) m.long_status = "Sucessfully added {} object.".format(config_type) return m.GetMsg() @application.route('/api/config/<config_type>', methods=['GET']) def get_config(config_type): """ Retrieve all the configuration objects matching the provided type :param config_type: Type of configuration object to retrieve :return: ConfigGet """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_cdb() db.update_path(config_type) names = [] for item in db.get_all(): names.append(item['name']) m = ConfigGet() m.set_items(names) return m.GetMsg() @application.route('/api/config/<config_type>/<config_name>', methods=['GET']) def get_config_item(config_type, config_name): """ Retrieve all the configuration objects matching the provided type :param config_type: Type of configuration object to retrieve :return: ConfigGet """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_cdb() db.update_path(config_type) item = db.get(config_name) m = ConfigGet() m.set_items([item]) return m.GetMsg() @application.route('/api/config/<config_type>/<config_name>', methods=['DELETE']) def delete_config_item(config_type, config_name): """ Retrieve all the configuration objects matching the provided type :param config_type: Type of configuration object to retrieve :return: ConfigGet """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_cdb() db.update_path(config_type) db.delete_id(config_name) m = ConfigStatus() m.set_name(config_name) m.set_status(0) m.set_long_status("Deleted item.") return m.GetMsg() @application.route('/api/config/<config_type>/<module_name>/spec', methods=['GET']) def get_config_spec(config_type, module_name): """ Get the spec of the given configuration type (i.e options etc) :param config_type: Type of configuration object to retrieve :return: ConfigGet """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) if config_type == "input": inputs = c.get_inputs_available() if module_name in inputs: mod = inputs[module_name] elif config_type == "output": outputs = c.get_outputs_available() if module_name in outputs: mod = outputs[module_name] else: mods = c.get_mods_available() if module_name in mods: mod = mods[module_name] spec = mod.module_options.get_spec() m = ModuleSpec() m.set_specs(spec) m.set_type(config_type) m.set_name(module_name) if request.args.get("from"): f = request.args.get("from") db = c.get_cdb() db.update_path(config_type) values = db.get(f) m.add_values(values) if request.args.get('as_html'): return m.as_html() return m.GetMsg()
from phlibs.host import HostList from phlibs.config import ConfigFile from phlibs.messages import * from flask import Flask, escape, request, render_template from phlibs.jqueue import Job, JobQueue from phlibs.outputs import JsonOutput # Default path to the configuration file for PANHIT DEFAULT_CONFIG_FILE="server.yaml" application = Flask(__name__) def configure(j): """ Configures PANHIT. :param j: (dict) Dictionary as received from JSON :return: ConfigFile object, HostList object """ mod_opts = {} c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) # Add any configuration that the client has sent in the request if 'config' in j: c.unpickle(j['config']) if 'mod_opts' in j['config']: mod_opts = j['config']['mod_opts'] mods = c.init_modules(mod_opts) db = c.get_db() input = c.get_input(mod_opts) hl = HostList(input, mods_enabled=mods, db=db) return c, hl # If a specfile is passed, load from stored configuration instead of parsing as if it were a config file elif 'spec' in j: (inputs, mods, tag_policies, outputs) = c.load_from_spec(j['spec']) db = c.get_db() if 'name' in j: c.name = j['name'] # Hack - currently HostList only supports one input hl = HostList(inputs[0], mods_enabled=mods, db=db, tags_policy=tag_policies, output=outputs) return c, hl ################ # VIEW METHODS # ################ @application.route('/', methods=['GET']) def index(): """ Basic index and welcome page. :return: index.html """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() summary = db.summary() return render_template('index.html', summary=summary) @application.route('/config/input', methods=['GET']) def config_input_page(): """ Configuration landing page :return: config.html """ config_type = "input" c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path(config_type) docs = cdb.get_all() inputs = [] for doc in docs: i = c.get_input_from_data(doc) inputs.append(i) input_types = c.get_inputs_available() config_descr = """ An Input is a list of host IP addresses, either statically defined or dynamically retrieved. """ return render_template('config.html', items=inputs, config_type=config_type, config_descr=config_descr, item_types=input_types) @application.route('/config/outputs', methods=['GET']) def config_output_page(): """ Configuration landing page :return: config.html """ config_type = "output" c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path(config_type) docs = cdb.get_all() outputs = [] for doc in docs: i = c.get_output_from_data(doc) outputs.append(i) output_types = c.get_outputs_available() config_descr = """ Outputs act as stores - seperate from the local database - for host information """ return render_template('config.html', items=outputs, config_type=config_type, config_descr=config_descr, item_types=output_types) @application.route('/config/tags', methods=['GET']) def config_tags(): c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() j = db.get_all_in_subdir_sorted('jqstatus', limit=5) cdb = c.get_cdb() cdb.update_path("tags") docs = cdb.get_all() return render_template('tag_config.html', jobs=j, items=docs) @application.route('/config/taglist', methods=['GET']) def config_taglist(): c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path("tags") tags = cdb.get_all() cdb = c.get_cdb() cdb.update_path("taglist") docs = cdb.get_all() return render_template('taglist.html', tags=tags, items=docs) @application.route('/config/modules', methods=['GET']) def config_modules_page(): """ Configuration landing page :return: config.html """ config_type = "modules" c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path(config_type) docs = cdb.get_all() mods = [] for doc in docs: i = c.get_module_from_data(doc) mods.append(i) mod_types = c.get_mods_available() config_descr = """ A module retrieves additional information about a host, to be later displayed or used as categorization. """ return render_template('config.html', items=mods, config_type=config_type, config_descr=config_descr, item_types=mod_types) @application.route('/run', methods=['GET']) def spec_page(): """ Render the job runner page :return: spec.html """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) # Grab all the inputs cdb = c.get_cdb() cdb.update_path("input") docs = cdb.get_all() inputs = [] for doc in docs: i = c.get_input_from_data(doc) inputs.append(i) # Grab all the modules cdb = c.get_cdb() cdb.update_path("modules") docs = cdb.get_all() modules = [] for doc in docs: i = c.get_module_from_data(doc) modules.append(i) cdb = c.get_cdb() cdb.update_path("taglist") docs = cdb.get_all() tag_policies = docs cdb = c.get_cdb() cdb.update_path("output") docs = cdb.get_all() outputs = docs cdb = c.get_cdb() cdb.update_path("specs") docs = cdb.get_all() specs = docs return render_template('spec.html', inputs=inputs, modules=modules, tag_policies=tag_policies, outputs=outputs, specs=specs) @application.route('/jobs', methods=['GET']) def jobs_page(): """ Display running, scheduled, and completed jobs. :return: Jobs page """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() j = db.get_all_in_subdir_sorted('jqstatus') return render_template('jobs.html', jobs=j) @application.route('/jobs/<job_id>', methods=['GET']) def job_page(job_id): """ Return the result of a specific job. :return: Jobs page """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path_nocreate(job_id) hl = HostList(db=db) hosts = hl.get_all_hosts() job_status = db.get('jqstatus') return render_template('job.html', hosts=hosts, job=job_status) ############### # API METHODS # ############### @application.route('/api/run', methods=['POST']) def run(): """ Primary job schedular This route takes a complete job spec from the API and outputs the scheduled job ID as a valid JobQueue ID, which it then runs. :return: JobStarted JSON message type """ try: c, hl = configure(request.get_json()) except ValueError as e: return { "Error": str(e) } j = request.get_json() if j["save"]: # Save the job as a spec cdb = c.get_cdb() cdb.update_path("specs") cdb.write(request.get_json()) # Run the actual job in the background and return immediately jq = JobQueue() # Set the job quueue name to the configuration spec name db = c.get_db() db.update_path(jq.id) jq.set_db(db) jq.set_name(c.name) j = Job(hl.run_all_hosts, args=(jq,)) m = JobStarted() m.status = "started" m.jid = jq.id j.Run() return m.GetMsg() @application.route('/api/jobs/<job_id>', methods=['GET']) def get_job(job_id): """ Individual job retrieval This route retreives a job, either current or historical, from the configured database type. :param job_id: ID of job, either running, scheduled, or existing. :return: JobStatus JSON message type. """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path(job_id) jqstatus = db.get('jqstatus') m = JobStatus() m.set_from_json(jqstatus) return m.GetMsg() @application.route('/api/specs/<spec_id>', methods=['GET']) def get_job_spec(spec_id): """ Get a saved job spec :param spec_id: ID of spec :return: JobSpec type """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) cdb = c.get_cdb() cdb.update_path("specs") json = cdb.get(spec_id) return json @application.route('/api/jobs/<job_id>/result', methods=['GET']) def get_job_result(job_id): """ Job result retrieval This route retreives a job, either current or historical, from the configured database type. :param job_id: ID of completed job. :return: JobResult message type """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path(job_id) data = {} jqstatus = db.get('jqstatus') o = JsonOutput() if jqstatus['completed'] == jqstatus['queued']: hl = HostList(db=db) data = o.Output(hl) m = JobResult() t = request.args.get('table') if t: page = request.args.get('page') html = request.args.get('as_html') m.set_table_from_json(data) if page: m.page(page, 5) if html: return m.as_html() else: m.set_from_json(data) return m.GetMsg() @application.route('/api/jobs', methods=['GET']) def list_jobs(): """ List all current and past jobs. This call can be paginated but is not by default :return: JobList JSON message type """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() j = db.get_all_in_subdir_sorted('jqstatus') j.reverse() t = request.args.get('table') m = JobList() if t: page = request.args.get('page') html = request.args.get('as_html') m.set_table_from_json(j) if page: m.page(page, 10) if html: return m.as_html() else: m.set_from_json(j) return m.GetMsg() @application.route('/api/jobs/<job_id>/graph', methods=['GET']) def graph_job(job_id): """ Return a graph of host based data for a given job :return: JobGraph message type """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path_nocreate(job_id) hl = HostList(db=db) labels,data,bg_colors = hl.stats_by_tag() m = JobGraph() m.set_graph(labels,data, bg_colors) return m.GetMsg() @application.route('/api/jobs/<job_id>/tag_spec', methods=['GET']) def get_job_tag_spec(job_id): """ Job tag spec Returns all the fields that are taggable from the result of a job :param job_id: ID of job, either running, scheduled, or existing. :return: JobStatus JSON message type. """ c = ConfigFile() # First load in all the configuration from the provided configuration file, if it exists c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_db() db.update_path(job_id) index = db.get("index") first_id = index[0] j = db.get(first_id) m = TagSpec() m.set_spec(j) if request.args.get('as_html'): return m.as_html() return m.GetMsg() @application.route('/api/config/<config_type>', methods=['POST']) def add_config(config_type): """ Add a configuration object of the given type with the given object value Object is expected as JSON. :param config_type: Type of configuration object to add :return: ConfigStatus """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_cdb() db.update_path(config_type) input_json = request.get_json() name = input_json['name'] db.write_id(name, input_json) m = ConfigStatus() m.set_name(name) m.set_status(0) m.long_status = "Sucessfully added {} object.".format(config_type) return m.GetMsg() @application.route('/api/config/<config_type>', methods=['GET']) def get_config(config_type): """ Retrieve all the configuration objects matching the provided type :param config_type: Type of configuration object to retrieve :return: ConfigGet """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_cdb() db.update_path(config_type) names = [] for item in db.get_all(): names.append(item['name']) m = ConfigGet() m.set_items(names) return m.GetMsg() @application.route('/api/config/<config_type>/<config_name>', methods=['GET']) def get_config_item(config_type, config_name): """ Retrieve all the configuration objects matching the provided type :param config_type: Type of configuration object to retrieve :return: ConfigGet """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_cdb() db.update_path(config_type) item = db.get(config_name) m = ConfigGet() m.set_items([item]) return m.GetMsg() @application.route('/api/config/<config_type>/<config_name>', methods=['DELETE']) def delete_config_item(config_type, config_name): """ Retrieve all the configuration objects matching the provided type :param config_type: Type of configuration object to retrieve :return: ConfigGet """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) db = c.get_cdb() db.update_path(config_type) db.delete_id(config_name) m = ConfigStatus() m.set_name(config_name) m.set_status(0) m.set_long_status("Deleted item.") return m.GetMsg() @application.route('/api/config/<config_type>/<module_name>/spec', methods=['GET']) def get_config_spec(config_type, module_name): """ Get the spec of the given configuration type (i.e options etc) :param config_type: Type of configuration object to retrieve :return: ConfigGet """ c = ConfigFile() c.load_from_file(DEFAULT_CONFIG_FILE) if config_type == "input": inputs = c.get_inputs_available() if module_name in inputs: mod = inputs[module_name] elif config_type == "output": outputs = c.get_outputs_available() if module_name in outputs: mod = outputs[module_name] else: mods = c.get_mods_available() if module_name in mods: mod = mods[module_name] spec = mod.module_options.get_spec() m = ModuleSpec() m.set_specs(spec) m.set_type(config_type) m.set_name(module_name) if request.args.get("from"): f = request.args.get("from") db = c.get_cdb() db.update_path(config_type) values = db.get(f) m.add_values(values) if request.args.get('as_html'): return m.as_html() return m.GetMsg()
en
0.682559
# Default path to the configuration file for PANHIT Configures PANHIT. :param j: (dict) Dictionary as received from JSON :return: ConfigFile object, HostList object # First load in all the configuration from the provided configuration file, if it exists # Add any configuration that the client has sent in the request # If a specfile is passed, load from stored configuration instead of parsing as if it were a config file # Hack - currently HostList only supports one input ################ # VIEW METHODS # ################ Basic index and welcome page. :return: index.html # First load in all the configuration from the provided configuration file, if it exists Configuration landing page :return: config.html # First load in all the configuration from the provided configuration file, if it exists An Input is a list of host IP addresses, either statically defined or dynamically retrieved. Configuration landing page :return: config.html # First load in all the configuration from the provided configuration file, if it exists Outputs act as stores - seperate from the local database - for host information # First load in all the configuration from the provided configuration file, if it exists # First load in all the configuration from the provided configuration file, if it exists Configuration landing page :return: config.html # First load in all the configuration from the provided configuration file, if it exists A module retrieves additional information about a host, to be later displayed or used as categorization. Render the job runner page :return: spec.html # First load in all the configuration from the provided configuration file, if it exists # Grab all the inputs # Grab all the modules Display running, scheduled, and completed jobs. :return: Jobs page # First load in all the configuration from the provided configuration file, if it exists Return the result of a specific job. :return: Jobs page # First load in all the configuration from the provided configuration file, if it exists ############### # API METHODS # ############### Primary job schedular This route takes a complete job spec from the API and outputs the scheduled job ID as a valid JobQueue ID, which it then runs. :return: JobStarted JSON message type # Save the job as a spec # Run the actual job in the background and return immediately # Set the job quueue name to the configuration spec name Individual job retrieval This route retreives a job, either current or historical, from the configured database type. :param job_id: ID of job, either running, scheduled, or existing. :return: JobStatus JSON message type. # First load in all the configuration from the provided configuration file, if it exists Get a saved job spec :param spec_id: ID of spec :return: JobSpec type # First load in all the configuration from the provided configuration file, if it exists Job result retrieval This route retreives a job, either current or historical, from the configured database type. :param job_id: ID of completed job. :return: JobResult message type # First load in all the configuration from the provided configuration file, if it exists List all current and past jobs. This call can be paginated but is not by default :return: JobList JSON message type # First load in all the configuration from the provided configuration file, if it exists Return a graph of host based data for a given job :return: JobGraph message type # First load in all the configuration from the provided configuration file, if it exists Job tag spec Returns all the fields that are taggable from the result of a job :param job_id: ID of job, either running, scheduled, or existing. :return: JobStatus JSON message type. # First load in all the configuration from the provided configuration file, if it exists Add a configuration object of the given type with the given object value Object is expected as JSON. :param config_type: Type of configuration object to add :return: ConfigStatus Retrieve all the configuration objects matching the provided type :param config_type: Type of configuration object to retrieve :return: ConfigGet Retrieve all the configuration objects matching the provided type :param config_type: Type of configuration object to retrieve :return: ConfigGet Retrieve all the configuration objects matching the provided type :param config_type: Type of configuration object to retrieve :return: ConfigGet Get the spec of the given configuration type (i.e options etc) :param config_type: Type of configuration object to retrieve :return: ConfigGet
2.205742
2
tests/test_depth_limitation.py
DreamEmulator/OpenCLSim
0
6628689
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `openclsim` package.""" import pytest import simpy import shapely.geometry import logging import datetime import time import numpy as np import pandas as pd from click.testing import CliRunner from openclsim import core from openclsim import model from openclsim import cli logger = logging.getLogger(__name__) @pytest.fixture def env(): simulation_start = datetime.datetime(2019, 1, 1) my_env = simpy.Environment(initial_time=time.mktime(simulation_start.timetuple())) my_env.epoch = time.mktime(simulation_start.timetuple()) return my_env @pytest.fixture def geometry_a(): return shapely.geometry.Point(0, 0) @pytest.fixture def geometry_b(): return shapely.geometry.Point(1, 1) @pytest.fixture def locatable_a(geometry_a): return core.Locatable(geometry_a) @pytest.fixture def locatable_b(geometry_b): return core.Locatable(geometry_b) @pytest.fixture def weather_data(): df = pd.read_csv("tests/test_weather.csv") df.index = df[["Year", "Month", "Day", "Hour"]].apply( lambda s: datetime.datetime(*s), axis=1 ) df = df.drop(["Year", "Month", "Day", "Hour"], axis=1) return df # make a location with metocean data @pytest.fixture def LocationWeather(): return type( "Location with Metocean", ( core.Identifiable, # Give it a name core.Log, # Allow logging of all discrete events core.Locatable, # Add coordinates to extract distance information and visualize core.HasContainer, # Add information on the material available at the site core.HasResource, # Add information on serving equipment core.HasWeather, ), # Add information on metocean data {}, ) # make a location without metocean data @pytest.fixture def Location(): return type( "Location without Metocean", ( core.Identifiable, # Give it a name core.Log, # Allow logging of all discrete events core.Locatable, # Add coordinates to extract distance information and visualize core.HasContainer, # Add information on the material available at the site core.HasResource, ), # Add information on serving equipment {}, ) # make the processors @pytest.fixture def Processor(): return type( "Processor", ( core.Identifiable, core.Processor, core.LoadingFunction, core.UnloadingFunction, core.Log, core.Locatable, ), {}, ) # make the movers @pytest.fixture def Mover(): return type( "Mover", ( core.Identifiable, core.Movable, core.Log, core.HasResource, core.HasContainer, core.HasDepthRestriction, ), {}, ) # Test calculating restrictions def test_calc_restrictions( env, geometry_a, Mover, Processor, LocationWeather, weather_data ): # Initialize the Mover def compute_draught(draught_empty, draught_full): return lambda x: x * (draught_full - draught_empty) + draught_empty data = { "env": env, # The simpy environment "name": "Vessel", # Name "geometry": geometry_a, # Location "capacity": 7_200, # Capacity of the hopper - "Beunvolume" "v": 1, # Speed always 1 m/s "compute_draught": compute_draught(4.0, 7.0), # Variable draught "waves": [0.5, 1], # Waves with specific ukc "ukc": [0.75, 1], # UKC corresponding to the waves "filling": None, } # The filling degree mover = Mover(**data) mover.ActivityID = "Test activity" data = { "env": env, # The simpy environment "name": "<NAME>", # Name "geometry": geometry_a, # It starts at the "from site" "loading_rate": 1, # Loading rate "unloading_rate": 1, } # Unloading rate crane = Processor(**data) crane.rate = crane.loading crane.ActivityID = "Test activity" # Initialize the LocationWeather data = { "env": env, # The simpy environment defined in the first cel "name": "Limited Location", # The name of the site "geometry": geometry_a, # Location "capacity": 500_000, # The capacity of the site "level": 500_000, # The actual volume of the site "dataframe": weather_data, # The dataframe containing the weather data "bed": -7, } # The level of the seabed with respect to CD location = LocationWeather(**data) # Test weather data at site # The bed level is at CD -7, the tide is at CD. thus the water depth is 7 meters assert location.metocean_data["Water depth"][0] == 7 # The timeseries start is equal to the simulation start assert location.metocean_data.index[0] == datetime.datetime.fromtimestamp(env.now) # Test calculated restrictions mover.calc_depth_restrictions(location, crane) assert mover.depth_data[location.name][0.5]["Volume"] == 3_600 assert mover.depth_data[location.name][0.5]["Draught"] == 5.5 # Test current draught of the mover (empty) assert mover.current_draught == 4.0 # Process an amount of 3_600 from the location into the mover # This takes 3_600 seconds and should be able to start right away start = env.now env.process(crane.process(site=location, ship=mover, desired_level=3_600)) env.run() np.testing.assert_almost_equal(env.now, start + 3_600) # Step forward to 18:00 def step_forward(env): yield env.timeout(17 * 3600) env.process(step_forward(env)) env.run() # Process an amount of 3_600 from the location into the mover # This takes 3_600 seconds and cannot start right away due to tide restrictions start = env.now assert datetime.datetime.fromtimestamp(env.now) == datetime.datetime(2019, 1, 1, 18) assert ( location.metocean_data["Water depth"][datetime.datetime(2019, 1, 1, 21)] == 6.5 ) assert mover.container.level / mover.container.capacity in list( mover.depth_data[location.name].keys() ) env.process(crane.process(ship=mover, site=location, desired_level=0)) env.run() # There should be 3 hours of waiting, 1 hour of processing, so time should be start + 4 hours np.testing.assert_almost_equal(env.now, start + 3_600 + 3 * 3_600) # Test optimal filling # Every 4th hour dredging not possible # sailing 2x 1 hour, dredging + dumping 1 hour, to get cycle with continous "optimal degree at 50%"
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `openclsim` package.""" import pytest import simpy import shapely.geometry import logging import datetime import time import numpy as np import pandas as pd from click.testing import CliRunner from openclsim import core from openclsim import model from openclsim import cli logger = logging.getLogger(__name__) @pytest.fixture def env(): simulation_start = datetime.datetime(2019, 1, 1) my_env = simpy.Environment(initial_time=time.mktime(simulation_start.timetuple())) my_env.epoch = time.mktime(simulation_start.timetuple()) return my_env @pytest.fixture def geometry_a(): return shapely.geometry.Point(0, 0) @pytest.fixture def geometry_b(): return shapely.geometry.Point(1, 1) @pytest.fixture def locatable_a(geometry_a): return core.Locatable(geometry_a) @pytest.fixture def locatable_b(geometry_b): return core.Locatable(geometry_b) @pytest.fixture def weather_data(): df = pd.read_csv("tests/test_weather.csv") df.index = df[["Year", "Month", "Day", "Hour"]].apply( lambda s: datetime.datetime(*s), axis=1 ) df = df.drop(["Year", "Month", "Day", "Hour"], axis=1) return df # make a location with metocean data @pytest.fixture def LocationWeather(): return type( "Location with Metocean", ( core.Identifiable, # Give it a name core.Log, # Allow logging of all discrete events core.Locatable, # Add coordinates to extract distance information and visualize core.HasContainer, # Add information on the material available at the site core.HasResource, # Add information on serving equipment core.HasWeather, ), # Add information on metocean data {}, ) # make a location without metocean data @pytest.fixture def Location(): return type( "Location without Metocean", ( core.Identifiable, # Give it a name core.Log, # Allow logging of all discrete events core.Locatable, # Add coordinates to extract distance information and visualize core.HasContainer, # Add information on the material available at the site core.HasResource, ), # Add information on serving equipment {}, ) # make the processors @pytest.fixture def Processor(): return type( "Processor", ( core.Identifiable, core.Processor, core.LoadingFunction, core.UnloadingFunction, core.Log, core.Locatable, ), {}, ) # make the movers @pytest.fixture def Mover(): return type( "Mover", ( core.Identifiable, core.Movable, core.Log, core.HasResource, core.HasContainer, core.HasDepthRestriction, ), {}, ) # Test calculating restrictions def test_calc_restrictions( env, geometry_a, Mover, Processor, LocationWeather, weather_data ): # Initialize the Mover def compute_draught(draught_empty, draught_full): return lambda x: x * (draught_full - draught_empty) + draught_empty data = { "env": env, # The simpy environment "name": "Vessel", # Name "geometry": geometry_a, # Location "capacity": 7_200, # Capacity of the hopper - "Beunvolume" "v": 1, # Speed always 1 m/s "compute_draught": compute_draught(4.0, 7.0), # Variable draught "waves": [0.5, 1], # Waves with specific ukc "ukc": [0.75, 1], # UKC corresponding to the waves "filling": None, } # The filling degree mover = Mover(**data) mover.ActivityID = "Test activity" data = { "env": env, # The simpy environment "name": "<NAME>", # Name "geometry": geometry_a, # It starts at the "from site" "loading_rate": 1, # Loading rate "unloading_rate": 1, } # Unloading rate crane = Processor(**data) crane.rate = crane.loading crane.ActivityID = "Test activity" # Initialize the LocationWeather data = { "env": env, # The simpy environment defined in the first cel "name": "Limited Location", # The name of the site "geometry": geometry_a, # Location "capacity": 500_000, # The capacity of the site "level": 500_000, # The actual volume of the site "dataframe": weather_data, # The dataframe containing the weather data "bed": -7, } # The level of the seabed with respect to CD location = LocationWeather(**data) # Test weather data at site # The bed level is at CD -7, the tide is at CD. thus the water depth is 7 meters assert location.metocean_data["Water depth"][0] == 7 # The timeseries start is equal to the simulation start assert location.metocean_data.index[0] == datetime.datetime.fromtimestamp(env.now) # Test calculated restrictions mover.calc_depth_restrictions(location, crane) assert mover.depth_data[location.name][0.5]["Volume"] == 3_600 assert mover.depth_data[location.name][0.5]["Draught"] == 5.5 # Test current draught of the mover (empty) assert mover.current_draught == 4.0 # Process an amount of 3_600 from the location into the mover # This takes 3_600 seconds and should be able to start right away start = env.now env.process(crane.process(site=location, ship=mover, desired_level=3_600)) env.run() np.testing.assert_almost_equal(env.now, start + 3_600) # Step forward to 18:00 def step_forward(env): yield env.timeout(17 * 3600) env.process(step_forward(env)) env.run() # Process an amount of 3_600 from the location into the mover # This takes 3_600 seconds and cannot start right away due to tide restrictions start = env.now assert datetime.datetime.fromtimestamp(env.now) == datetime.datetime(2019, 1, 1, 18) assert ( location.metocean_data["Water depth"][datetime.datetime(2019, 1, 1, 21)] == 6.5 ) assert mover.container.level / mover.container.capacity in list( mover.depth_data[location.name].keys() ) env.process(crane.process(ship=mover, site=location, desired_level=0)) env.run() # There should be 3 hours of waiting, 1 hour of processing, so time should be start + 4 hours np.testing.assert_almost_equal(env.now, start + 3_600 + 3 * 3_600) # Test optimal filling # Every 4th hour dredging not possible # sailing 2x 1 hour, dredging + dumping 1 hour, to get cycle with continous "optimal degree at 50%"
en
0.805186
#!/usr/bin/env python # -*- coding: utf-8 -*- Tests for `openclsim` package. # make a location with metocean data # Give it a name # Allow logging of all discrete events # Add coordinates to extract distance information and visualize # Add information on the material available at the site # Add information on serving equipment # Add information on metocean data # make a location without metocean data # Give it a name # Allow logging of all discrete events # Add coordinates to extract distance information and visualize # Add information on the material available at the site # Add information on serving equipment # make the processors # make the movers # Test calculating restrictions # Initialize the Mover # The simpy environment # Name # Location # Capacity of the hopper - "Beunvolume" # Speed always 1 m/s # Variable draught # Waves with specific ukc # UKC corresponding to the waves # The filling degree # The simpy environment # Name # It starts at the "from site" # Loading rate # Unloading rate # Initialize the LocationWeather # The simpy environment defined in the first cel # The name of the site # Location # The capacity of the site # The actual volume of the site # The dataframe containing the weather data # The level of the seabed with respect to CD # Test weather data at site # The bed level is at CD -7, the tide is at CD. thus the water depth is 7 meters # The timeseries start is equal to the simulation start # Test calculated restrictions # Test current draught of the mover (empty) # Process an amount of 3_600 from the location into the mover # This takes 3_600 seconds and should be able to start right away # Step forward to 18:00 # Process an amount of 3_600 from the location into the mover # This takes 3_600 seconds and cannot start right away due to tide restrictions # There should be 3 hours of waiting, 1 hour of processing, so time should be start + 4 hours # Test optimal filling # Every 4th hour dredging not possible # sailing 2x 1 hour, dredging + dumping 1 hour, to get cycle with continous "optimal degree at 50%"
2.434561
2
crawl_good_softwares/crawl_good_softwares/spiders/phantom_software_spider.py
skihyy/GT-Spring-2017-CS-6262
2
6628690
<reponame>skihyy/GT-Spring-2017-CS-6262<gh_stars>1-10 # -*- coding: utf-8 -*- import scrapy import urllib import os from selenium import webdriver from scrapy.http import Request class TestSpiderSpider(scrapy.Spider): name = "phantom_software_spider" start_urls = ['http://download.cnet.com/s/software/windows-free/?sort=most-popular&page='] current_page = 1 max_page = 1 headers = { 'Connection': 'keep - alive', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36' } # using selenium + PhantomJS to simulate Chrome since directly use Chrome is tooooo slow # also, using Chrome need Chrome driver # using firefox need firefox driver = webdriver.PhantomJS() def start_requests(self): yield Request(self.start_urls[0], callback=self.parse, headers=self.headers) ''' for i in range(1, 2): url = self.start_urls[0] if 1 != i: url = url + str(i) ''' def parse(self, response): # using PhantomJS to reloaded the web page # get JS rendered page self.driver.get(response.url) software_links = self.driver.find_elements_by_xpath('//a[@data-position]') if software_links: # it is a download list page for link in software_links: href = link.get_attribute('href') yield Request(href, callback=self.parse, headers=self.headers) else: # it is a download page tmp_name = self.driver.title.split(' ') file_name = '' for name in tmp_name: if '-' != name: file_name = file_name + name file_name = file_name + 'a.exe' file_path = os.path.join('./app_download/', file_name) download_url = self.driver.find_element_by_xpath('//a[@class="dln-a"]') download_href = download_url.get_attribute('data-href') print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') print(download_href) print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') urllib.urlretrieve(download_href, file_path) def __del__(self): self.driver.quit()
# -*- coding: utf-8 -*- import scrapy import urllib import os from selenium import webdriver from scrapy.http import Request class TestSpiderSpider(scrapy.Spider): name = "phantom_software_spider" start_urls = ['http://download.cnet.com/s/software/windows-free/?sort=most-popular&page='] current_page = 1 max_page = 1 headers = { 'Connection': 'keep - alive', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36' } # using selenium + PhantomJS to simulate Chrome since directly use Chrome is tooooo slow # also, using Chrome need Chrome driver # using firefox need firefox driver = webdriver.PhantomJS() def start_requests(self): yield Request(self.start_urls[0], callback=self.parse, headers=self.headers) ''' for i in range(1, 2): url = self.start_urls[0] if 1 != i: url = url + str(i) ''' def parse(self, response): # using PhantomJS to reloaded the web page # get JS rendered page self.driver.get(response.url) software_links = self.driver.find_elements_by_xpath('//a[@data-position]') if software_links: # it is a download list page for link in software_links: href = link.get_attribute('href') yield Request(href, callback=self.parse, headers=self.headers) else: # it is a download page tmp_name = self.driver.title.split(' ') file_name = '' for name in tmp_name: if '-' != name: file_name = file_name + name file_name = file_name + 'a.exe' file_path = os.path.join('./app_download/', file_name) download_url = self.driver.find_element_by_xpath('//a[@class="dln-a"]') download_href = download_url.get_attribute('data-href') print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') print(download_href) print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>') urllib.urlretrieve(download_href, file_path) def __del__(self): self.driver.quit()
en
0.704587
# -*- coding: utf-8 -*- # using selenium + PhantomJS to simulate Chrome since directly use Chrome is tooooo slow # also, using Chrome need Chrome driver # using firefox need firefox for i in range(1, 2): url = self.start_urls[0] if 1 != i: url = url + str(i) # using PhantomJS to reloaded the web page # get JS rendered page # it is a download list page # it is a download page
3.072253
3
python--exercicios/ex078.py
Eliezer2000/python
0
6628691
<filename>python--exercicios/ex078.py '''valores = list() for cont in range(0, 5): valores.append(int(input(f'Digite um valor na posição {cont} : '))) print(f'Você digitou os valores {valores} ') print(f'O maior valor digitado foi {max(valores)} nas posições', end=' ') for i, v in enumerate(valores): if v == max(valores): print(f'{i}..', end=' ') print(f'\nO menor valor digitado foi {min(valores)} nas posições ', end=' ') for i, v in enumerate(valores): if v == min(valores): print(f'{i}..', end=' ')''' valores = list() for cont in range(0, 5): valores.append(int(input(f'Digite um valor na posição {cont} : '))) print(f'Você digitou os valores {valores}') print(f'O maior valor digitado foi {max(valores)} na posição', end=' ') for i, v in enumerate(valores): if v == max(valores): print(f'{i}..', end=' ') print(f'\nO menor valor digitado foi {min(valores)} na posição', end=' ') for i, v in enumerate(valores): if v == min(valores): print(f'{i}..', end=' ')
<filename>python--exercicios/ex078.py '''valores = list() for cont in range(0, 5): valores.append(int(input(f'Digite um valor na posição {cont} : '))) print(f'Você digitou os valores {valores} ') print(f'O maior valor digitado foi {max(valores)} nas posições', end=' ') for i, v in enumerate(valores): if v == max(valores): print(f'{i}..', end=' ') print(f'\nO menor valor digitado foi {min(valores)} nas posições ', end=' ') for i, v in enumerate(valores): if v == min(valores): print(f'{i}..', end=' ')''' valores = list() for cont in range(0, 5): valores.append(int(input(f'Digite um valor na posição {cont} : '))) print(f'Você digitou os valores {valores}') print(f'O maior valor digitado foi {max(valores)} na posição', end=' ') for i, v in enumerate(valores): if v == max(valores): print(f'{i}..', end=' ') print(f'\nO menor valor digitado foi {min(valores)} na posição', end=' ') for i, v in enumerate(valores): if v == min(valores): print(f'{i}..', end=' ')
pt
0.8278
valores = list() for cont in range(0, 5): valores.append(int(input(f'Digite um valor na posição {cont} : '))) print(f'Você digitou os valores {valores} ') print(f'O maior valor digitado foi {max(valores)} nas posições', end=' ') for i, v in enumerate(valores): if v == max(valores): print(f'{i}..', end=' ') print(f'\nO menor valor digitado foi {min(valores)} nas posições ', end=' ') for i, v in enumerate(valores): if v == min(valores): print(f'{i}..', end=' ')
4.125433
4
user/views.py
Cjwpython/dgk_api
0
6628692
from django.shortcuts import render # Create your views here. from rest_framework import generics from rest_framework.decorators import permission_classes from rest_framework.permissions import AllowAny from user.models import User from user.serializers import UserSerializer @permission_classes((AllowAny,)) class UserCreateAPIView(generics.CreateAPIView): '''注册用户''' queryset = User.objects.all() serializer_class = UserSerializer
from django.shortcuts import render # Create your views here. from rest_framework import generics from rest_framework.decorators import permission_classes from rest_framework.permissions import AllowAny from user.models import User from user.serializers import UserSerializer @permission_classes((AllowAny,)) class UserCreateAPIView(generics.CreateAPIView): '''注册用户''' queryset = User.objects.all() serializer_class = UserSerializer
en
0.907883
# Create your views here. 注册用户
1.895041
2
tests/conftest.py
ixje/app-neo3
0
6628693
<gh_stars>0 from pathlib import Path import pytest from ledgercomm import Transport from boilerplate_client.boilerplate_cmd import BoilerplateCommand from boilerplate_client.button import ButtonTCP, ButtonFake def pytest_addoption(parser): parser.addoption("--hid", action="store_true") parser.addoption("--headless", action="store_true") @pytest.fixture(scope="module") def sw_h_path(): # path with tests conftest_folder_path: Path = Path(__file__).parent # sw.h should be in src/sw.h sw_h_path = conftest_folder_path.parent / "src" / "sw.h" if not sw_h_path.is_file(): raise FileNotFoundError(f"Can't find sw.h: '{sw_h_path}'") return sw_h_path @pytest.fixture(scope="module") def types_h_path(): # path with tests conftest_folder_path: Path = Path(__file__).parent # types.h should be in src/types.h types_h_path = conftest_folder_path.parent / "src" / "transaction" / "types.h" if not types_h_path.is_file(): raise FileNotFoundError(f"Can't find types.h: '{types_h_path}'") return types_h_path @pytest.fixture(scope="session") def hid(pytestconfig): return pytestconfig.getoption("hid") @pytest.fixture(scope="session") def headless(pytestconfig): return pytestconfig.getoption("headless") @pytest.fixture(scope="module") def button(headless): if headless: button_client = ButtonTCP(server="127.0.0.1", port=42000) else: button_client = ButtonFake() yield button_client button_client.close() @pytest.fixture(scope="session") def cmd(hid): transport = (Transport(interface="hid", debug=True) if hid else Transport(interface="tcp", server="127.0.0.1", port=9999, debug=True)) command = BoilerplateCommand( transport=transport, debug=True ) yield command command.transport.close()
from pathlib import Path import pytest from ledgercomm import Transport from boilerplate_client.boilerplate_cmd import BoilerplateCommand from boilerplate_client.button import ButtonTCP, ButtonFake def pytest_addoption(parser): parser.addoption("--hid", action="store_true") parser.addoption("--headless", action="store_true") @pytest.fixture(scope="module") def sw_h_path(): # path with tests conftest_folder_path: Path = Path(__file__).parent # sw.h should be in src/sw.h sw_h_path = conftest_folder_path.parent / "src" / "sw.h" if not sw_h_path.is_file(): raise FileNotFoundError(f"Can't find sw.h: '{sw_h_path}'") return sw_h_path @pytest.fixture(scope="module") def types_h_path(): # path with tests conftest_folder_path: Path = Path(__file__).parent # types.h should be in src/types.h types_h_path = conftest_folder_path.parent / "src" / "transaction" / "types.h" if not types_h_path.is_file(): raise FileNotFoundError(f"Can't find types.h: '{types_h_path}'") return types_h_path @pytest.fixture(scope="session") def hid(pytestconfig): return pytestconfig.getoption("hid") @pytest.fixture(scope="session") def headless(pytestconfig): return pytestconfig.getoption("headless") @pytest.fixture(scope="module") def button(headless): if headless: button_client = ButtonTCP(server="127.0.0.1", port=42000) else: button_client = ButtonFake() yield button_client button_client.close() @pytest.fixture(scope="session") def cmd(hid): transport = (Transport(interface="hid", debug=True) if hid else Transport(interface="tcp", server="127.0.0.1", port=9999, debug=True)) command = BoilerplateCommand( transport=transport, debug=True ) yield command command.transport.close()
en
0.83732
# path with tests # sw.h should be in src/sw.h # path with tests # types.h should be in src/types.h
2.096353
2
netbox/circuits/choices.py
TheFlyingCorpse/netbox
4,994
6628694
<filename>netbox/circuits/choices.py<gh_stars>1000+ from utilities.choices import ChoiceSet # # Circuits # class CircuitStatusChoices(ChoiceSet): STATUS_DEPROVISIONING = 'deprovisioning' STATUS_ACTIVE = 'active' STATUS_PLANNED = 'planned' STATUS_PROVISIONING = 'provisioning' STATUS_OFFLINE = 'offline' STATUS_DECOMMISSIONED = 'decommissioned' CHOICES = ( (STATUS_PLANNED, 'Planned'), (STATUS_PROVISIONING, 'Provisioning'), (STATUS_ACTIVE, 'Active'), (STATUS_OFFLINE, 'Offline'), (STATUS_DEPROVISIONING, 'Deprovisioning'), (STATUS_DECOMMISSIONED, 'Decommissioned'), ) CSS_CLASSES = { STATUS_DEPROVISIONING: 'warning', STATUS_ACTIVE: 'success', STATUS_PLANNED: 'info', STATUS_PROVISIONING: 'primary', STATUS_OFFLINE: 'danger', STATUS_DECOMMISSIONED: 'secondary', } # # CircuitTerminations # class CircuitTerminationSideChoices(ChoiceSet): SIDE_A = 'A' SIDE_Z = 'Z' CHOICES = ( (SIDE_A, 'A'), (SIDE_Z, 'Z') )
<filename>netbox/circuits/choices.py<gh_stars>1000+ from utilities.choices import ChoiceSet # # Circuits # class CircuitStatusChoices(ChoiceSet): STATUS_DEPROVISIONING = 'deprovisioning' STATUS_ACTIVE = 'active' STATUS_PLANNED = 'planned' STATUS_PROVISIONING = 'provisioning' STATUS_OFFLINE = 'offline' STATUS_DECOMMISSIONED = 'decommissioned' CHOICES = ( (STATUS_PLANNED, 'Planned'), (STATUS_PROVISIONING, 'Provisioning'), (STATUS_ACTIVE, 'Active'), (STATUS_OFFLINE, 'Offline'), (STATUS_DEPROVISIONING, 'Deprovisioning'), (STATUS_DECOMMISSIONED, 'Decommissioned'), ) CSS_CLASSES = { STATUS_DEPROVISIONING: 'warning', STATUS_ACTIVE: 'success', STATUS_PLANNED: 'info', STATUS_PROVISIONING: 'primary', STATUS_OFFLINE: 'danger', STATUS_DECOMMISSIONED: 'secondary', } # # CircuitTerminations # class CircuitTerminationSideChoices(ChoiceSet): SIDE_A = 'A' SIDE_Z = 'Z' CHOICES = ( (SIDE_A, 'A'), (SIDE_Z, 'Z') )
en
0.487375
# # Circuits # # # CircuitTerminations #
2.33049
2
tests/hikari/internal/test_ed25519.py
sabidib/hikari
520
6628695
# -*- coding: utf-8 -*- # Copyright (c) 2020 Nekokatt # Copyright (c) 2021 davfsa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import pytest from hikari.internal import ed25519 @pytest.fixture() def valid_edd25519(): body = ( b'{"application_id":"658822586720976907","id":"838085779104202753","token":"<KEY>' b"<KEY>" b'<KEY>","type":1,"user":{"avatar":"b333580bd947' b'4630226ff7b0a9696231","discriminator":"6127","id":"115590097100865541","public_flags":131072,"username":"Fast' b'er Speeding"},"version":1}' ) signature = ( b"\xb4*\x91w\xf8\xfa{\x8f\xdf\xc3%\xaa\x81nl\xdej\x9aS\xdeq\xe5\x97\xb8$\x8f\xc6\xd4?Y\x1c\x85+\xcf\x93\xc1\xd5" b"\xea-\xfe-\x97s\xe04\xb6a:k\xbb\x12\xa4\xa0\x19\xb1P\xf6s\x8e\r'\xab\xbe\x07" ) timestamp = b"1619885621" return (body, signature, timestamp) @pytest.fixture() def invalid_ed25519(): body = ( b'{"application_id":"658822586720976907","id":"838085779104202754","token":"<KEY>' b"<KEY>" b'RWRBY0ltZTRTS0NneFFSYW1BbDZxSkpnMkEwejlkTldXZUh2OGwzbnBrMzhscURIMXUz","type":1,"user":{"avatar":"b333580bd947' b'4630226ff7b0a9696231","discriminator":"6127","id":"115590097100865541","public_flags":13' b'1072,"username":"Faster Speeding"},"version":1}' ) signature = ( b"\x0c4\xda!\xd9\xd5\x08<{a\x0c\xfa\xe6\xd2\x9e\xb3\xe0\x17r\x83\xa8\xb5\xda\xaa\x97\n\xb5\xe1\x92A|\x94\xbb" b"\x8aGu\xdb\xd6\x19\xd5\x94\x98\x08\xb4\x1a\xfaF@\xbbx\xc9\xa3\x8f\x1f\x13\t\xd81\xa3:\xa9%p\x0c" ) timestamp = b"1619885620" return (body, signature, timestamp) @pytest.fixture() def public_key(): return b"\<KEY>" class TestSlowED25519Verifier: @pytest.mark.parametrize( ("key", "message"), [ ("okokokokokokokokokokokokokokokok", "Invalid type passed for public key"), (b"NO", "Invalid public key passed"), ], ) def test_handles_invalid_public_key(self, key, message): with pytest.raises(ValueError, match=message): ed25519.build_slow_ed25519_verifier(key) def test_verify_matches(self, valid_edd25519, public_key): verifier = ed25519.build_slow_ed25519_verifier(public_key) assert verifier(*valid_edd25519) is True def test_verify_rejects(self, invalid_ed25519, public_key): verifier = ed25519.build_slow_ed25519_verifier(public_key) assert verifier(*invalid_ed25519) is False @pytest.mark.skipif(ed25519.build_fast_ed25519_verifier is None, reason="Fast ed25519 verifier impl not present") class TestFastED25519Verifier: @pytest.mark.parametrize( ("key", "message"), [ ("okokokokokokokokokokokokokokokok", "Invalid type passed for public key"), (b"NO", "Invalid public key passed"), ], ) def test_handles_invalid_public_key(self, key, message): with pytest.raises(ValueError, match=message): ed25519.build_fast_ed25519_verifier(key) def test_verify_matches(self, valid_edd25519, public_key): verifier = ed25519.build_fast_ed25519_verifier(public_key) assert verifier(*valid_edd25519) is True def test_verify_rejects(self, invalid_ed25519, public_key): verifier = ed25519.build_fast_ed25519_verifier(public_key) assert verifier(*invalid_ed25519) is False @pytest.mark.skipif(ed25519.build_fast_ed25519_verifier is not None, reason="Fast ed25519 verifier impl not present") def test_build_ed25519_verifier_set_as_fast_impl(): assert ed25519.build_ed25519_verifier is ed25519.build_slow_ed25519_verifier @pytest.mark.skipif(ed25519.build_fast_ed25519_verifier is None, reason="Fast ed25519 verifier impl present") def test_build_ed25519_verifier_set_as_slow_impl(): assert ed25519.build_ed25519_verifier is ed25519.build_fast_ed25519_verifier
# -*- coding: utf-8 -*- # Copyright (c) 2020 Nekokatt # Copyright (c) 2021 davfsa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import pytest from hikari.internal import ed25519 @pytest.fixture() def valid_edd25519(): body = ( b'{"application_id":"658822586720976907","id":"838085779104202753","token":"<KEY>' b"<KEY>" b'<KEY>","type":1,"user":{"avatar":"b333580bd947' b'4630226ff7b0a9696231","discriminator":"6127","id":"115590097100865541","public_flags":131072,"username":"Fast' b'er Speeding"},"version":1}' ) signature = ( b"\xb4*\x91w\xf8\xfa{\x8f\xdf\xc3%\xaa\x81nl\xdej\x9aS\xdeq\xe5\x97\xb8$\x8f\xc6\xd4?Y\x1c\x85+\xcf\x93\xc1\xd5" b"\xea-\xfe-\x97s\xe04\xb6a:k\xbb\x12\xa4\xa0\x19\xb1P\xf6s\x8e\r'\xab\xbe\x07" ) timestamp = b"1619885621" return (body, signature, timestamp) @pytest.fixture() def invalid_ed25519(): body = ( b'{"application_id":"658822586720976907","id":"838085779104202754","token":"<KEY>' b"<KEY>" b'RWRBY0ltZTRTS0NneFFSYW1BbDZxSkpnMkEwejlkTldXZUh2OGwzbnBrMzhscURIMXUz","type":1,"user":{"avatar":"b333580bd947' b'4630226ff7b0a9696231","discriminator":"6127","id":"115590097100865541","public_flags":13' b'1072,"username":"Faster Speeding"},"version":1}' ) signature = ( b"\x0c4\xda!\xd9\xd5\x08<{a\x0c\xfa\xe6\xd2\x9e\xb3\xe0\x17r\x83\xa8\xb5\xda\xaa\x97\n\xb5\xe1\x92A|\x94\xbb" b"\x8aGu\xdb\xd6\x19\xd5\x94\x98\x08\xb4\x1a\xfaF@\xbbx\xc9\xa3\x8f\x1f\x13\t\xd81\xa3:\xa9%p\x0c" ) timestamp = b"1619885620" return (body, signature, timestamp) @pytest.fixture() def public_key(): return b"\<KEY>" class TestSlowED25519Verifier: @pytest.mark.parametrize( ("key", "message"), [ ("okokokokokokokokokokokokokokokok", "Invalid type passed for public key"), (b"NO", "Invalid public key passed"), ], ) def test_handles_invalid_public_key(self, key, message): with pytest.raises(ValueError, match=message): ed25519.build_slow_ed25519_verifier(key) def test_verify_matches(self, valid_edd25519, public_key): verifier = ed25519.build_slow_ed25519_verifier(public_key) assert verifier(*valid_edd25519) is True def test_verify_rejects(self, invalid_ed25519, public_key): verifier = ed25519.build_slow_ed25519_verifier(public_key) assert verifier(*invalid_ed25519) is False @pytest.mark.skipif(ed25519.build_fast_ed25519_verifier is None, reason="Fast ed25519 verifier impl not present") class TestFastED25519Verifier: @pytest.mark.parametrize( ("key", "message"), [ ("okokokokokokokokokokokokokokokok", "Invalid type passed for public key"), (b"NO", "Invalid public key passed"), ], ) def test_handles_invalid_public_key(self, key, message): with pytest.raises(ValueError, match=message): ed25519.build_fast_ed25519_verifier(key) def test_verify_matches(self, valid_edd25519, public_key): verifier = ed25519.build_fast_ed25519_verifier(public_key) assert verifier(*valid_edd25519) is True def test_verify_rejects(self, invalid_ed25519, public_key): verifier = ed25519.build_fast_ed25519_verifier(public_key) assert verifier(*invalid_ed25519) is False @pytest.mark.skipif(ed25519.build_fast_ed25519_verifier is not None, reason="Fast ed25519 verifier impl not present") def test_build_ed25519_verifier_set_as_fast_impl(): assert ed25519.build_ed25519_verifier is ed25519.build_slow_ed25519_verifier @pytest.mark.skipif(ed25519.build_fast_ed25519_verifier is None, reason="Fast ed25519 verifier impl present") def test_build_ed25519_verifier_set_as_slow_impl(): assert ed25519.build_ed25519_verifier is ed25519.build_fast_ed25519_verifier
en
0.77213
# -*- coding: utf-8 -*- # Copyright (c) 2020 Nekokatt # Copyright (c) 2021 davfsa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.
1.489065
1
installer/win/PyQt-Py2.6-x86-gpl-4.8.3-1/Lib/site-packages/PyQt4/uic/Loader/loader.py
dongniu/cadnano2
17
6628696
############################################################################# ## ## Copyright (C) 2011 Riverbank Computing Limited. ## Copyright (C) 2006 <NAME>. ## All right reserved. ## ## This file is part of PyQt. ## ## You may use this file under the terms of the GPL v2 or the revised BSD ## license as follows: ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in ## the documentation and/or other materials provided with the ## distribution. ## * Neither the name of the Riverbank Computing Limited nor the names ## of its contributors may be used to endorse or promote products ## derived from this software without specific prior written ## permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################# import os.path from PyQt4 import QtGui, QtCore from PyQt4.uic.uiparser import UIParser from PyQt4.uic.Loader.qobjectcreator import LoaderCreatorPolicy class DynamicUILoader(UIParser): def __init__(self): UIParser.__init__(self, QtCore, QtGui, LoaderCreatorPolicy()) def createToplevelWidget(self, classname, widgetname): if self.toplevelInst is not None: if not isinstance(self.toplevelInst, self.factory.findQObjectType(classname)): raise TypeError(("Wrong base class of toplevel widget", (type(self.toplevelInst), classname))) return self.toplevelInst else: return self.factory.createQObject(classname, widgetname, ()) def loadUi(self, filename, toplevelInst=None): self.toplevelInst = toplevelInst if hasattr(filename, 'read'): basedir = '' else: # Allow the filename to be a QString. filename = str(filename) basedir = os.path.dirname(filename) return self.parse(filename, basedir)
############################################################################# ## ## Copyright (C) 2011 Riverbank Computing Limited. ## Copyright (C) 2006 <NAME>. ## All right reserved. ## ## This file is part of PyQt. ## ## You may use this file under the terms of the GPL v2 or the revised BSD ## license as follows: ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in ## the documentation and/or other materials provided with the ## distribution. ## * Neither the name of the Riverbank Computing Limited nor the names ## of its contributors may be used to endorse or promote products ## derived from this software without specific prior written ## permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################# import os.path from PyQt4 import QtGui, QtCore from PyQt4.uic.uiparser import UIParser from PyQt4.uic.Loader.qobjectcreator import LoaderCreatorPolicy class DynamicUILoader(UIParser): def __init__(self): UIParser.__init__(self, QtCore, QtGui, LoaderCreatorPolicy()) def createToplevelWidget(self, classname, widgetname): if self.toplevelInst is not None: if not isinstance(self.toplevelInst, self.factory.findQObjectType(classname)): raise TypeError(("Wrong base class of toplevel widget", (type(self.toplevelInst), classname))) return self.toplevelInst else: return self.factory.createQObject(classname, widgetname, ()) def loadUi(self, filename, toplevelInst=None): self.toplevelInst = toplevelInst if hasattr(filename, 'read'): basedir = '' else: # Allow the filename to be a QString. filename = str(filename) basedir = os.path.dirname(filename) return self.parse(filename, basedir)
en
0.578051
############################################################################# ## ## Copyright (C) 2011 Riverbank Computing Limited. ## Copyright (C) 2006 <NAME>. ## All right reserved. ## ## This file is part of PyQt. ## ## You may use this file under the terms of the GPL v2 or the revised BSD ## license as follows: ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in ## the documentation and/or other materials provided with the ## distribution. ## * Neither the name of the Riverbank Computing Limited nor the names ## of its contributors may be used to endorse or promote products ## derived from this software without specific prior written ## permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################# # Allow the filename to be a QString.
1.099597
1
src/transfer/utils.py
jordiae/DeepLearning-MAI
1
6628697
<gh_stars>1-10 import os from PIL import Image import torch import torch.nn.functional as F from torch import nn from typing import List import torchvision def dir_path(s: str): if os.path.isdir(s): return s else: raise NotADirectoryError(s) def get_num_pixels(img_path: str): width, height = Image.open(img_path).size return width, height def load_model(pretrained_model: str, pre_conv: bool, mode: str, transfer_strategy: str): from transfer.models import build_pretrained model, transform_in = build_pretrained(pretrained_model, pretrained=mode == 'train', n_classes=67, input_size=(256, 256), transfer_strategy=transfer_strategy, preconv=pre_conv) return model, transform_in class ComposedOptimizer: def __init__(self, optimizers: List[torch.optim.Optimizer]): self.optimizers = optimizers def zero_grad(self): for opt in self.optimizers: opt.zero_grad() def step(self): for opt in self.optimizers: opt.step() class LabelSmoothingLoss(nn.Module): def __init__(self, smoothing=0.0): super(LabelSmoothingLoss, self).__init__() self.smoothing = smoothing def smooth_one_hot(self, target: torch.Tensor, classes: int, smoothing: float = 0.0): assert 0 <= smoothing < 1 shape = (target.size(0), classes) with torch.no_grad(): target = torch.empty(size=shape, device=target.device) \ .fill_(smoothing / (classes - 1)) \ .scatter_(1, target.data.unsqueeze(1), 1. - smoothing) return target def forward(self, input: torch.Tensor, target: torch.Tensor): target = LabelSmoothingLoss.smooth_one_hot(self, target, input.size(-1), self.smoothing) lsm = F.log_softmax(input, -1) loss = -(target * lsm).sum(-1) loss = loss.mean() return loss
import os from PIL import Image import torch import torch.nn.functional as F from torch import nn from typing import List import torchvision def dir_path(s: str): if os.path.isdir(s): return s else: raise NotADirectoryError(s) def get_num_pixels(img_path: str): width, height = Image.open(img_path).size return width, height def load_model(pretrained_model: str, pre_conv: bool, mode: str, transfer_strategy: str): from transfer.models import build_pretrained model, transform_in = build_pretrained(pretrained_model, pretrained=mode == 'train', n_classes=67, input_size=(256, 256), transfer_strategy=transfer_strategy, preconv=pre_conv) return model, transform_in class ComposedOptimizer: def __init__(self, optimizers: List[torch.optim.Optimizer]): self.optimizers = optimizers def zero_grad(self): for opt in self.optimizers: opt.zero_grad() def step(self): for opt in self.optimizers: opt.step() class LabelSmoothingLoss(nn.Module): def __init__(self, smoothing=0.0): super(LabelSmoothingLoss, self).__init__() self.smoothing = smoothing def smooth_one_hot(self, target: torch.Tensor, classes: int, smoothing: float = 0.0): assert 0 <= smoothing < 1 shape = (target.size(0), classes) with torch.no_grad(): target = torch.empty(size=shape, device=target.device) \ .fill_(smoothing / (classes - 1)) \ .scatter_(1, target.data.unsqueeze(1), 1. - smoothing) return target def forward(self, input: torch.Tensor, target: torch.Tensor): target = LabelSmoothingLoss.smooth_one_hot(self, target, input.size(-1), self.smoothing) lsm = F.log_softmax(input, -1) loss = -(target * lsm).sum(-1) loss = loss.mean() return loss
none
1
2.469802
2
python_file_shuffle/__init__.py
yjg30737/python-file-shuffle
0
6628698
<gh_stars>0 from .python_file_shuffle import *
from .python_file_shuffle import *
none
1
1.017159
1
homework3 [MAIN PROJECT]/project/helpers/models.py
Gaon-Choi/ITE2038_
0
6628699
from dataclasses import dataclass from typing import Any, List @dataclass class Bank: bid: int code: int name: str
from dataclasses import dataclass from typing import Any, List @dataclass class Bank: bid: int code: int name: str
none
1
2.246039
2
test/sim.unittest.py
umd-lhcb/pyUTM
1
6628700
<reponame>umd-lhcb/pyUTM<filename>test/sim.unittest.py<gh_stars>1-10 #!/usr/bin/env python # # License: BSD 2-clause # Last Change: Tue Feb 26, 2019 at 02:38 PM -0500 import unittest # from math import factorial import sys sys.path.insert(0, '..') from pyUTM.sim import CurrentFlow class CurrentFlowTester(unittest.TestCase): def test_strip(self): flow = CurrentFlow() net_dict = { 'Net1': [('R1', '1'), ('C2', '1'), ('M1', '1')], 'Net2': [('R1', '2'), ('R2', '2'), ('NT3', '1'), ('M2', '2')], } self.assertEqual( flow.strip(net_dict), { 'Net1': ['R1', 'C2'], 'Net2': ['R1', 'R2', 'NT3'], } ) def test_diff_case1(self): self.assertEqual( CurrentFlow.diff(['R1', 'C2'], ['R2', 'C1']), ['R1', 'C2'] ) def test_diff_case2(self): self.assertEqual( CurrentFlow.diff(['R1', 'C2'], ['C1', 'C2']), ['R1'] ) def test_swap_key_to_value(self): net_to_comp = { 'Net1': ['R1', 'R2'], 'Net2': ['R1'], 'Net3': ['R2'] } self.assertEqual( dict(CurrentFlow.swap_key_to_value(net_to_comp)), { 'R1': ['Net1', 'Net2'], 'R2': ['Net1', 'Net3'] } ) def test_find_all_flows_case1(self): net_to_comp = { 'Net1': ['R1', 'R2'], 'Net2': ['R1'], 'Net3': ['R2'] } comp_to_net = CurrentFlow.swap_key_to_value(net_to_comp) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net1', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net2', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net3', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3'] ) def test_find_all_flows_case2(self): net_to_comp = { 'Net1': ['R1', 'R2'], 'Net2': ['R1'], 'Net3': ['R3'], 'Net4': ['R2', 'R3', 'R4'], 'Net5': ['R4', 'R5', 'R6', 'R7', 'R8'], 'Net6': ['R5'], 'Net7': ['R6'] } comp_to_net = CurrentFlow.swap_key_to_value(net_to_comp) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net1', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net2', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net3', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net4', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net5', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net6', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net7', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) def test_realistic(self): real_nets = { 'Net1': [('R1', 1), ('R2', 1)], 'Net2': [('R1', 2)], 'Net3': [('R3', 1)], 'Net4': [('R2', 2), ('R3', 2)] } worker = CurrentFlow() self.assertEqual( list(map(sorted, worker.do(real_nets))), [['Net1', 'Net2', 'Net3', 'Net4']] ) def test_compnents_appear_more_than_twice(self): net_to_comp = { 'Net1': ['CxRB_320', 'CxRB_320', 'CxRB_320', 'R2'], 'Net2': ['CxRB_320', 'CxRB_320', 'CxRB_320'], } comp_to_net = CurrentFlow.swap_key_to_value(net_to_comp) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net1', net_to_comp, comp_to_net)), ['Net1', 'Net2', ] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net2', net_to_comp, comp_to_net)), ['Net1', 'Net2', ] ) if __name__ == '__main__': unittest.main()
#!/usr/bin/env python # # License: BSD 2-clause # Last Change: Tue Feb 26, 2019 at 02:38 PM -0500 import unittest # from math import factorial import sys sys.path.insert(0, '..') from pyUTM.sim import CurrentFlow class CurrentFlowTester(unittest.TestCase): def test_strip(self): flow = CurrentFlow() net_dict = { 'Net1': [('R1', '1'), ('C2', '1'), ('M1', '1')], 'Net2': [('R1', '2'), ('R2', '2'), ('NT3', '1'), ('M2', '2')], } self.assertEqual( flow.strip(net_dict), { 'Net1': ['R1', 'C2'], 'Net2': ['R1', 'R2', 'NT3'], } ) def test_diff_case1(self): self.assertEqual( CurrentFlow.diff(['R1', 'C2'], ['R2', 'C1']), ['R1', 'C2'] ) def test_diff_case2(self): self.assertEqual( CurrentFlow.diff(['R1', 'C2'], ['C1', 'C2']), ['R1'] ) def test_swap_key_to_value(self): net_to_comp = { 'Net1': ['R1', 'R2'], 'Net2': ['R1'], 'Net3': ['R2'] } self.assertEqual( dict(CurrentFlow.swap_key_to_value(net_to_comp)), { 'R1': ['Net1', 'Net2'], 'R2': ['Net1', 'Net3'] } ) def test_find_all_flows_case1(self): net_to_comp = { 'Net1': ['R1', 'R2'], 'Net2': ['R1'], 'Net3': ['R2'] } comp_to_net = CurrentFlow.swap_key_to_value(net_to_comp) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net1', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net2', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net3', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3'] ) def test_find_all_flows_case2(self): net_to_comp = { 'Net1': ['R1', 'R2'], 'Net2': ['R1'], 'Net3': ['R3'], 'Net4': ['R2', 'R3', 'R4'], 'Net5': ['R4', 'R5', 'R6', 'R7', 'R8'], 'Net6': ['R5'], 'Net7': ['R6'] } comp_to_net = CurrentFlow.swap_key_to_value(net_to_comp) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net1', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net2', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net3', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net4', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net5', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net6', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net7', net_to_comp, comp_to_net)), ['Net1', 'Net2', 'Net3', 'Net4', 'Net5', 'Net6', 'Net7'] ) def test_realistic(self): real_nets = { 'Net1': [('R1', 1), ('R2', 1)], 'Net2': [('R1', 2)], 'Net3': [('R3', 1)], 'Net4': [('R2', 2), ('R3', 2)] } worker = CurrentFlow() self.assertEqual( list(map(sorted, worker.do(real_nets))), [['Net1', 'Net2', 'Net3', 'Net4']] ) def test_compnents_appear_more_than_twice(self): net_to_comp = { 'Net1': ['CxRB_320', 'CxRB_320', 'CxRB_320', 'R2'], 'Net2': ['CxRB_320', 'CxRB_320', 'CxRB_320'], } comp_to_net = CurrentFlow.swap_key_to_value(net_to_comp) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net1', net_to_comp, comp_to_net)), ['Net1', 'Net2', ] ) self.assertEqual( sorted( CurrentFlow.find_all_flows('Net2', net_to_comp, comp_to_net)), ['Net1', 'Net2', ] ) if __name__ == '__main__': unittest.main()
en
0.669439
#!/usr/bin/env python # # License: BSD 2-clause # Last Change: Tue Feb 26, 2019 at 02:38 PM -0500 # from math import factorial
2.333716
2
src/fine_tune.py
yeguixin/captcha_solver
156
6628701
<gh_stars>100-1000 # Copyright 2018 Northwest University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from options.tune_options import TuneOptions from generator.generator import Generator from models.models import creat_model from keras.optimizers import adam # NB_RETRAIN_LAYERS = 20 # epochs = 200 opt = TuneOptions().parse() # opt.isTune = True # opt.batchSize = 32 generator = Generator(opt) train_generator = generator.real_generator('train') val_generator = generator.real_generator('val') num_train_samples = generator.num_train_samples num_test_sample = generator.num_test_sample model = creat_model(opt) model.load_weight() # setup_to_finetune(model) # model.setup_to_finetune() # history_ft = model.fit_generator( # train_generator, # steps_per_epoch=num_train_samples // opt.batchSize, # epochs=opt.epoch, # validation_data=val_generator, # validation_steps=num_test_sample // opt.batchSize # ) # # model.save(history_ft) test_data = [generator.x_test, generator.y_test] # test_data = [generator.x_train, generator.y_train] model.predict(test_data, batch_size=opt.batchSize) # if opt.plot: # model.plot_training(history_ft)
# Copyright 2018 Northwest University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from options.tune_options import TuneOptions from generator.generator import Generator from models.models import creat_model from keras.optimizers import adam # NB_RETRAIN_LAYERS = 20 # epochs = 200 opt = TuneOptions().parse() # opt.isTune = True # opt.batchSize = 32 generator = Generator(opt) train_generator = generator.real_generator('train') val_generator = generator.real_generator('val') num_train_samples = generator.num_train_samples num_test_sample = generator.num_test_sample model = creat_model(opt) model.load_weight() # setup_to_finetune(model) # model.setup_to_finetune() # history_ft = model.fit_generator( # train_generator, # steps_per_epoch=num_train_samples // opt.batchSize, # epochs=opt.epoch, # validation_data=val_generator, # validation_steps=num_test_sample // opt.batchSize # ) # # model.save(history_ft) test_data = [generator.x_test, generator.y_test] # test_data = [generator.x_train, generator.y_train] model.predict(test_data, batch_size=opt.batchSize) # if opt.plot: # model.plot_training(history_ft)
en
0.672529
# Copyright 2018 Northwest University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NB_RETRAIN_LAYERS = 20 # epochs = 200 # opt.isTune = True # opt.batchSize = 32 # setup_to_finetune(model) # model.setup_to_finetune() # history_ft = model.fit_generator( # train_generator, # steps_per_epoch=num_train_samples // opt.batchSize, # epochs=opt.epoch, # validation_data=val_generator, # validation_steps=num_test_sample // opt.batchSize # ) # # model.save(history_ft) # test_data = [generator.x_train, generator.y_train] # if opt.plot: # model.plot_training(history_ft)
2.064619
2
minihack/envs/minigrid.py
samvelyan/minihack-1
0
6628702
<filename>minihack/envs/minigrid.py # Copyright (c) Facebook, Inc. and its affiliates. from minihack import MiniHackNavigation, LevelGenerator from nle.nethack import Command, CompassDirection from minihack.envs import register import gym MOVE_AND_KICK_ACTIONS = tuple( list(CompassDirection) + [Command.OPEN, Command.KICK] ) class MiniGridHack(MiniHackNavigation): def __init__(self, *args, **kwargs): # Only ask users to install gym-minigrid if they actually need it try: import gym_minigrid # noqa: F401 except ModuleNotFoundError: raise ModuleNotFoundError( "To use MiniGrid-based environments, please install" " gym-minigrid: pip3 install gym-minigrid" ) self.minigrid_env = gym.make(kwargs.pop("env_name")) self.num_mon = kwargs.pop("num_mon", 0) self.num_trap = kwargs.pop("num_trap", 0) self.door_state = kwargs.pop("door_state", "closed") if self.door_state == "locked": kwargs["actions"] = MOVE_AND_KICK_ACTIONS lava_walls = kwargs.pop("lava_walls", False) if lava_walls: self.wall = "L" else: self.wall = "|" des_file = self.get_env_desc() super().__init__(*args, des_file=des_file, **kwargs) def get_env_map(self, env): door_pos = [] goal_pos = None empty_strs = 0 empty_str = True env_map = [] for j in range(env.grid.height): str = "" for i in range(env.width): c = env.grid.get(i, j) if c is None: str += "." continue empty_str = False if c.type == "wall": str += self.wall elif c.type == "door": str += "+" door_pos.append((i, j - empty_strs)) elif c.type == "floor": str += "." elif c.type == "lava": str += "L" elif c.type == "goal": goal_pos = (i, j - empty_strs) str += "." elif c.type == "player": str += "." if not empty_str and j < env.grid.height - 1: if set(str) != {"."}: str = str.replace(".", " ", str.index(self.wall)) inv = str[::-1] str = inv.replace(".", " ", inv.index(self.wall))[::-1] env_map.append(str) elif empty_str: empty_strs += 1 start_pos = (int(env.agent_pos[0]), int(env.agent_pos[1]) - empty_strs) env_map = "\n".join(env_map) return env_map, start_pos, goal_pos, door_pos def get_env_desc(self): self.minigrid_env.reset() env = self.minigrid_env map, start_pos, goal_pos, door_pos = self.get_env_map(env) lev_gen = LevelGenerator(map=map) lev_gen.add_goal_pos(goal_pos) lev_gen.set_start_pos(start_pos) for d in door_pos: lev_gen.add_door(self.door_state, d) lev_gen.wallify() for _ in range(self.num_mon): lev_gen.add_monster() for _ in range(self.num_trap): lev_gen.add_trap() return lev_gen.get_des() def reset(self, wizkit_items=None): des_file = self.get_env_desc() self.update(des_file) return super().reset(wizkit_items=wizkit_items) class MiniHackMultiRoomN2(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) register( id="MiniHack-MultiRoom-N2-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2", ) register( id="MiniHack-MultiRoom-N4-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4", ) register( id="MiniHack-MultiRoom-N6-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6", ) # MiniGrid: LockedMultiRoom class MiniHackMultiRoomN2Locked(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) kwargs["door_state"] = "locked" super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4Locked(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) kwargs["door_state"] = "locked" super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6Locked(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) kwargs["door_state"] = "locked" super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) register( id="MiniHack-MultiRoom-N2-Locked-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2Locked", ) register( id="MiniHack-MultiRoom-N4-Locked-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4Locked", ) register( id="MiniHack-MultiRoom-N6-Locked-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6Locked", ) # MiniGrid: LavaMultiRoom class MiniHackMultiRoomN2Lava(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) kwargs["lava_walls"] = True super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4Lava(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) kwargs["lava_walls"] = True super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6Lava(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) kwargs["lava_walls"] = True super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) register( id="MiniHack-MultiRoom-N2-Lava-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2Lava", ) register( id="MiniHack-MultiRoom-N4-Lava-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4Lava", ) register( id="MiniHack-MultiRoom-N6-Lava-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6Lava", ) # MiniGrid: MonsterpedMultiRoom class MiniHackMultiRoomN2Monster(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) kwargs["num_mon"] = 3 super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4Monster(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) kwargs["num_mon"] = 6 super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6Monster(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) kwargs["num_mon"] = 9 super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) # MiniGrid: MonsterMultiRoom register( id="MiniHack-MultiRoom-N2-Monster-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2Monster", ) register( id="MiniHack-MultiRoom-N4-Monster-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4Monster", ) register( id="MiniHack-MultiRoom-N6-Monster-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6Monster", ) # MiniGrid: ExtremeMultiRoom class MiniHackMultiRoomN2Extreme(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) kwargs["num_mon"] = 3 kwargs["lava_walls"] = True kwargs["door_state"] = "locked" super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4Extreme(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) kwargs["num_mon"] = 6 kwargs["lava_walls"] = True kwargs["door_state"] = "locked" super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6Extreme(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) kwargs["num_mon"] = 9 kwargs["lava_walls"] = True kwargs["door_state"] = "locked" super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) register( id="MiniHack-MultiRoom-N2-Extreme-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2Extreme", ) register( id="MiniHack-MultiRoom-N4-Extreme-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4Extreme", ) register( id="MiniHack-MultiRoom-N6-Extreme-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6Extreme", ) # MiniGrid: LavaCrossing register( id="MiniHack-LavaCrossingS9N1-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-LavaCrossingS9N1-v0"}, ) register( id="MiniHack-LavaCrossingS9N2-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-LavaCrossingS9N2-v0"}, ) register( id="MiniHack-LavaCrossingS9N3-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-LavaCrossingS9N3-v0"}, ) register( id="MiniHack-LavaCrossingS11N5-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-LavaCrossingS11N5-v0"}, ) # MiniGrid: Simple Crossing register( id="MiniHack-SimpleCrossingS9N1-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-SimpleCrossingS9N1-v0"}, ) register( id="MiniHack-SimpleCrossingS9N2-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-SimpleCrossingS9N2-v0"}, ) register( id="MiniHack-SimpleCrossingS9N3-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-SimpleCrossingS9N3-v0"}, ) register( id="MiniHack-SimpleCrossingS11N5-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-SimpleCrossingS11N5-v0"}, )
<filename>minihack/envs/minigrid.py # Copyright (c) Facebook, Inc. and its affiliates. from minihack import MiniHackNavigation, LevelGenerator from nle.nethack import Command, CompassDirection from minihack.envs import register import gym MOVE_AND_KICK_ACTIONS = tuple( list(CompassDirection) + [Command.OPEN, Command.KICK] ) class MiniGridHack(MiniHackNavigation): def __init__(self, *args, **kwargs): # Only ask users to install gym-minigrid if they actually need it try: import gym_minigrid # noqa: F401 except ModuleNotFoundError: raise ModuleNotFoundError( "To use MiniGrid-based environments, please install" " gym-minigrid: pip3 install gym-minigrid" ) self.minigrid_env = gym.make(kwargs.pop("env_name")) self.num_mon = kwargs.pop("num_mon", 0) self.num_trap = kwargs.pop("num_trap", 0) self.door_state = kwargs.pop("door_state", "closed") if self.door_state == "locked": kwargs["actions"] = MOVE_AND_KICK_ACTIONS lava_walls = kwargs.pop("lava_walls", False) if lava_walls: self.wall = "L" else: self.wall = "|" des_file = self.get_env_desc() super().__init__(*args, des_file=des_file, **kwargs) def get_env_map(self, env): door_pos = [] goal_pos = None empty_strs = 0 empty_str = True env_map = [] for j in range(env.grid.height): str = "" for i in range(env.width): c = env.grid.get(i, j) if c is None: str += "." continue empty_str = False if c.type == "wall": str += self.wall elif c.type == "door": str += "+" door_pos.append((i, j - empty_strs)) elif c.type == "floor": str += "." elif c.type == "lava": str += "L" elif c.type == "goal": goal_pos = (i, j - empty_strs) str += "." elif c.type == "player": str += "." if not empty_str and j < env.grid.height - 1: if set(str) != {"."}: str = str.replace(".", " ", str.index(self.wall)) inv = str[::-1] str = inv.replace(".", " ", inv.index(self.wall))[::-1] env_map.append(str) elif empty_str: empty_strs += 1 start_pos = (int(env.agent_pos[0]), int(env.agent_pos[1]) - empty_strs) env_map = "\n".join(env_map) return env_map, start_pos, goal_pos, door_pos def get_env_desc(self): self.minigrid_env.reset() env = self.minigrid_env map, start_pos, goal_pos, door_pos = self.get_env_map(env) lev_gen = LevelGenerator(map=map) lev_gen.add_goal_pos(goal_pos) lev_gen.set_start_pos(start_pos) for d in door_pos: lev_gen.add_door(self.door_state, d) lev_gen.wallify() for _ in range(self.num_mon): lev_gen.add_monster() for _ in range(self.num_trap): lev_gen.add_trap() return lev_gen.get_des() def reset(self, wizkit_items=None): des_file = self.get_env_desc() self.update(des_file) return super().reset(wizkit_items=wizkit_items) class MiniHackMultiRoomN2(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) register( id="MiniHack-MultiRoom-N2-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2", ) register( id="MiniHack-MultiRoom-N4-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4", ) register( id="MiniHack-MultiRoom-N6-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6", ) # MiniGrid: LockedMultiRoom class MiniHackMultiRoomN2Locked(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) kwargs["door_state"] = "locked" super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4Locked(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) kwargs["door_state"] = "locked" super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6Locked(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) kwargs["door_state"] = "locked" super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) register( id="MiniHack-MultiRoom-N2-Locked-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2Locked", ) register( id="MiniHack-MultiRoom-N4-Locked-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4Locked", ) register( id="MiniHack-MultiRoom-N6-Locked-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6Locked", ) # MiniGrid: LavaMultiRoom class MiniHackMultiRoomN2Lava(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) kwargs["lava_walls"] = True super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4Lava(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) kwargs["lava_walls"] = True super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6Lava(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) kwargs["lava_walls"] = True super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) register( id="MiniHack-MultiRoom-N2-Lava-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2Lava", ) register( id="MiniHack-MultiRoom-N4-Lava-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4Lava", ) register( id="MiniHack-MultiRoom-N6-Lava-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6Lava", ) # MiniGrid: MonsterpedMultiRoom class MiniHackMultiRoomN2Monster(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) kwargs["num_mon"] = 3 super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4Monster(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) kwargs["num_mon"] = 6 super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6Monster(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) kwargs["num_mon"] = 9 super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) # MiniGrid: MonsterMultiRoom register( id="MiniHack-MultiRoom-N2-Monster-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2Monster", ) register( id="MiniHack-MultiRoom-N4-Monster-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4Monster", ) register( id="MiniHack-MultiRoom-N6-Monster-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6Monster", ) # MiniGrid: ExtremeMultiRoom class MiniHackMultiRoomN2Extreme(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 40) kwargs["num_mon"] = 3 kwargs["lava_walls"] = True kwargs["door_state"] = "locked" super().__init__( *args, env_name="MiniGrid-MultiRoom-N2-S4-v0", **kwargs ) class MiniHackMultiRoomN4Extreme(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 120) kwargs["num_mon"] = 6 kwargs["lava_walls"] = True kwargs["door_state"] = "locked" super().__init__( *args, env_name="MiniGrid-MultiRoom-N4-S5-v0", **kwargs ) class MiniHackMultiRoomN6Extreme(MiniGridHack): def __init__(self, *args, **kwargs): kwargs["max_episode_steps"] = kwargs.pop("max_episode_steps", 240) kwargs["num_mon"] = 9 kwargs["lava_walls"] = True kwargs["door_state"] = "locked" super().__init__(*args, env_name="MiniGrid-MultiRoom-N6-v0", **kwargs) register( id="MiniHack-MultiRoom-N2-Extreme-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN2Extreme", ) register( id="MiniHack-MultiRoom-N4-Extreme-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN4Extreme", ) register( id="MiniHack-MultiRoom-N6-Extreme-v0", entry_point="minihack.envs.minigrid:MiniHackMultiRoomN6Extreme", ) # MiniGrid: LavaCrossing register( id="MiniHack-LavaCrossingS9N1-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-LavaCrossingS9N1-v0"}, ) register( id="MiniHack-LavaCrossingS9N2-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-LavaCrossingS9N2-v0"}, ) register( id="MiniHack-LavaCrossingS9N3-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-LavaCrossingS9N3-v0"}, ) register( id="MiniHack-LavaCrossingS11N5-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-LavaCrossingS11N5-v0"}, ) # MiniGrid: Simple Crossing register( id="MiniHack-SimpleCrossingS9N1-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-SimpleCrossingS9N1-v0"}, ) register( id="MiniHack-SimpleCrossingS9N2-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-SimpleCrossingS9N2-v0"}, ) register( id="MiniHack-SimpleCrossingS9N3-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-SimpleCrossingS9N3-v0"}, ) register( id="MiniHack-SimpleCrossingS11N5-v0", entry_point="minihack.envs.minigrid:MiniGridHack", kwargs={"env_name": "MiniGrid-SimpleCrossingS11N5-v0"}, )
en
0.557061
# Copyright (c) Facebook, Inc. and its affiliates. # Only ask users to install gym-minigrid if they actually need it # noqa: F401 # MiniGrid: LockedMultiRoom # MiniGrid: LavaMultiRoom # MiniGrid: MonsterpedMultiRoom # MiniGrid: MonsterMultiRoom # MiniGrid: ExtremeMultiRoom # MiniGrid: LavaCrossing # MiniGrid: Simple Crossing
2.484225
2
jinahub/encoders/text/TransformerTorchEncoder/transform_encoder.py
sauravgarg540/executors
0
6628703
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' import os from typing import Dict, List, Optional, Tuple import numpy as np import torch from jina import DocumentArray, Executor, requests from jina.logging.logger import JinaLogger from jina_commons.batching import get_docs_batch_generator from transformers import AutoModel, AutoTokenizer _DEFAULT_MODEL = 'sentence-transformers/distilbert-base-nli-stsb-mean-tokens' class TransformerTorchEncoder(Executor): """ The transformer torch encoder encodes sentences into embeddings. :param pretrained_model_name_or_path: Name of the pretrained model or path to the model :param base_tokenizer_model: Base tokenizer model :param pooling_strategy: The pooling strategy to be used :param layer_index: Index of the layer which contains the embeddings :param max_length: Max length argument for the tokenizer :param embedding_fn_name: Function to call on the model in order to get output :param device: Device to be used. Use 'cuda' for GPU :param num_threads: The number of threads used for intraop parallelism on CPU :param default_traversal_paths: Used in the encode method an define traversal on the received `DocumentArray` :param default_batch_size: Defines the batch size for inference on the loaded PyTorch model. :param args: Arguments :param kwargs: Keyword Arguments """ def __init__( self, pretrained_model_name_or_path: str = _DEFAULT_MODEL, base_tokenizer_model: Optional[str] = None, pooling_strategy: str = 'mean', layer_index: int = -1, max_length: Optional[int] = None, embedding_fn_name: str = '__call__', device: str = 'cpu', num_threads: Optional[int] = None, default_traversal_paths: Optional[List[str]] = None, default_batch_size: int = 32, *args, **kwargs, ): super().__init__(*args, **kwargs) if default_traversal_paths is not None: self.default_traversal_paths = default_traversal_paths else: self.default_traversal_paths = ['r'] self.default_batch_size = default_batch_size self.pretrained_model_name_or_path = pretrained_model_name_or_path self.base_tokenizer_model = ( base_tokenizer_model or pretrained_model_name_or_path ) self.pooling_strategy = pooling_strategy self.layer_index = layer_index self.max_length = max_length self.logger = JinaLogger(self.__class__.__name__) if (device not in ['cpu', 'cuda']) and (not device.startswith('cuda:')): self.logger.error( f'Torch device {device} not supported. Must be cpu or cuda!' ) raise RuntimeError( f'Torch device {device} not supported. Must be cpu or cuda!' ) if device == 'cpu' and num_threads: cpu_num = os.cpu_count() if num_threads > cpu_num: self.logger.warning( f'You tried to use {num_threads} threads > {cpu_num} CPUs' ) else: torch.set_num_threads(num_threads) self.device = device self.embedding_fn_name = embedding_fn_name self.tokenizer = AutoTokenizer.from_pretrained(self.base_tokenizer_model) self.model = AutoModel.from_pretrained( self.pretrained_model_name_or_path, output_hidden_states=True ) self.model.to(torch.device(device)) def _compute_embedding( self, hidden_states: Tuple['torch.Tensor'], input_tokens: Dict ): fill_vals = {'cls': 0.0, 'mean': 0.0, 'max': -np.inf, 'min': np.inf} fill_val = torch.tensor( fill_vals[self.pooling_strategy], device=torch.device(self.device) ) layer = hidden_states[self.layer_index] attn_mask = input_tokens['attention_mask'] # Fix LongFormerModel like model which has mismatch seq_len between # attention_mask and hidden_states padding_len = layer.size(1) - attn_mask.size(1) if padding_len > 0: attn_mask = torch.nn.functional.pad(attn_mask, (0, padding_len), value=0) expand_attn_mask = attn_mask.unsqueeze(-1).expand_as(layer) layer = torch.where(expand_attn_mask.bool(), layer, fill_val) embeddings = layer.sum(dim=1) / expand_attn_mask.sum(dim=1) return embeddings.cpu().numpy() @requests def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs): """ Encode text data into a ndarray of `D` as dimension, and fill the embedding of each Document. :param docs: DocumentArray containing text :param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example, `parameters={'traversal_paths': ['r'], 'batch_size': 10}`. :param kwargs: Additional key value arguments. """ for batch in get_docs_batch_generator( docs, traversal_path=parameters.get( 'traversal_paths', self.default_traversal_paths ), batch_size=parameters.get('batch_size', self.default_batch_size), needs_attr='text', ): texts = batch.get_attributes('text') with torch.no_grad(): input_tokens = self._generate_input_tokens(texts) outputs = getattr(self.model, self.embedding_fn_name)(**input_tokens) if isinstance(outputs, torch.Tensor): outputs = outputs.cpu().numpy() hidden_states = outputs.hidden_states embeds = self._compute_embedding(hidden_states, input_tokens) for doc, embed in zip(batch, embeds): doc.embedding = embed def _generate_input_tokens(self, texts): if not self.tokenizer.pad_token: self.tokenizer.add_special_tokens({'pad_token': '[PAD]'}) self.model.resize_token_embeddings(len(self.tokenizer.vocab)) input_tokens = self.tokenizer( texts, max_length=self.max_length, padding='longest', truncation=True, return_tensors='pt', ) input_tokens = { k: v.to(torch.device(self.device)) for k, v in input_tokens.items() } return input_tokens
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.' __license__ = 'Apache-2.0' import os from typing import Dict, List, Optional, Tuple import numpy as np import torch from jina import DocumentArray, Executor, requests from jina.logging.logger import JinaLogger from jina_commons.batching import get_docs_batch_generator from transformers import AutoModel, AutoTokenizer _DEFAULT_MODEL = 'sentence-transformers/distilbert-base-nli-stsb-mean-tokens' class TransformerTorchEncoder(Executor): """ The transformer torch encoder encodes sentences into embeddings. :param pretrained_model_name_or_path: Name of the pretrained model or path to the model :param base_tokenizer_model: Base tokenizer model :param pooling_strategy: The pooling strategy to be used :param layer_index: Index of the layer which contains the embeddings :param max_length: Max length argument for the tokenizer :param embedding_fn_name: Function to call on the model in order to get output :param device: Device to be used. Use 'cuda' for GPU :param num_threads: The number of threads used for intraop parallelism on CPU :param default_traversal_paths: Used in the encode method an define traversal on the received `DocumentArray` :param default_batch_size: Defines the batch size for inference on the loaded PyTorch model. :param args: Arguments :param kwargs: Keyword Arguments """ def __init__( self, pretrained_model_name_or_path: str = _DEFAULT_MODEL, base_tokenizer_model: Optional[str] = None, pooling_strategy: str = 'mean', layer_index: int = -1, max_length: Optional[int] = None, embedding_fn_name: str = '__call__', device: str = 'cpu', num_threads: Optional[int] = None, default_traversal_paths: Optional[List[str]] = None, default_batch_size: int = 32, *args, **kwargs, ): super().__init__(*args, **kwargs) if default_traversal_paths is not None: self.default_traversal_paths = default_traversal_paths else: self.default_traversal_paths = ['r'] self.default_batch_size = default_batch_size self.pretrained_model_name_or_path = pretrained_model_name_or_path self.base_tokenizer_model = ( base_tokenizer_model or pretrained_model_name_or_path ) self.pooling_strategy = pooling_strategy self.layer_index = layer_index self.max_length = max_length self.logger = JinaLogger(self.__class__.__name__) if (device not in ['cpu', 'cuda']) and (not device.startswith('cuda:')): self.logger.error( f'Torch device {device} not supported. Must be cpu or cuda!' ) raise RuntimeError( f'Torch device {device} not supported. Must be cpu or cuda!' ) if device == 'cpu' and num_threads: cpu_num = os.cpu_count() if num_threads > cpu_num: self.logger.warning( f'You tried to use {num_threads} threads > {cpu_num} CPUs' ) else: torch.set_num_threads(num_threads) self.device = device self.embedding_fn_name = embedding_fn_name self.tokenizer = AutoTokenizer.from_pretrained(self.base_tokenizer_model) self.model = AutoModel.from_pretrained( self.pretrained_model_name_or_path, output_hidden_states=True ) self.model.to(torch.device(device)) def _compute_embedding( self, hidden_states: Tuple['torch.Tensor'], input_tokens: Dict ): fill_vals = {'cls': 0.0, 'mean': 0.0, 'max': -np.inf, 'min': np.inf} fill_val = torch.tensor( fill_vals[self.pooling_strategy], device=torch.device(self.device) ) layer = hidden_states[self.layer_index] attn_mask = input_tokens['attention_mask'] # Fix LongFormerModel like model which has mismatch seq_len between # attention_mask and hidden_states padding_len = layer.size(1) - attn_mask.size(1) if padding_len > 0: attn_mask = torch.nn.functional.pad(attn_mask, (0, padding_len), value=0) expand_attn_mask = attn_mask.unsqueeze(-1).expand_as(layer) layer = torch.where(expand_attn_mask.bool(), layer, fill_val) embeddings = layer.sum(dim=1) / expand_attn_mask.sum(dim=1) return embeddings.cpu().numpy() @requests def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs): """ Encode text data into a ndarray of `D` as dimension, and fill the embedding of each Document. :param docs: DocumentArray containing text :param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example, `parameters={'traversal_paths': ['r'], 'batch_size': 10}`. :param kwargs: Additional key value arguments. """ for batch in get_docs_batch_generator( docs, traversal_path=parameters.get( 'traversal_paths', self.default_traversal_paths ), batch_size=parameters.get('batch_size', self.default_batch_size), needs_attr='text', ): texts = batch.get_attributes('text') with torch.no_grad(): input_tokens = self._generate_input_tokens(texts) outputs = getattr(self.model, self.embedding_fn_name)(**input_tokens) if isinstance(outputs, torch.Tensor): outputs = outputs.cpu().numpy() hidden_states = outputs.hidden_states embeds = self._compute_embedding(hidden_states, input_tokens) for doc, embed in zip(batch, embeds): doc.embedding = embed def _generate_input_tokens(self, texts): if not self.tokenizer.pad_token: self.tokenizer.add_special_tokens({'pad_token': '[PAD]'}) self.model.resize_token_embeddings(len(self.tokenizer.vocab)) input_tokens = self.tokenizer( texts, max_length=self.max_length, padding='longest', truncation=True, return_tensors='pt', ) input_tokens = { k: v.to(torch.device(self.device)) for k, v in input_tokens.items() } return input_tokens
en
0.688618
The transformer torch encoder encodes sentences into embeddings. :param pretrained_model_name_or_path: Name of the pretrained model or path to the model :param base_tokenizer_model: Base tokenizer model :param pooling_strategy: The pooling strategy to be used :param layer_index: Index of the layer which contains the embeddings :param max_length: Max length argument for the tokenizer :param embedding_fn_name: Function to call on the model in order to get output :param device: Device to be used. Use 'cuda' for GPU :param num_threads: The number of threads used for intraop parallelism on CPU :param default_traversal_paths: Used in the encode method an define traversal on the received `DocumentArray` :param default_batch_size: Defines the batch size for inference on the loaded PyTorch model. :param args: Arguments :param kwargs: Keyword Arguments # Fix LongFormerModel like model which has mismatch seq_len between # attention_mask and hidden_states Encode text data into a ndarray of `D` as dimension, and fill the embedding of each Document. :param docs: DocumentArray containing text :param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example, `parameters={'traversal_paths': ['r'], 'batch_size': 10}`. :param kwargs: Additional key value arguments.
2.17351
2
glue/core/coordinates.py
sergiopasra/glue
0
6628704
from __future__ import absolute_import, division, print_function import logging import numpy as np from glue.utils import unbroadcast, broadcast_to, axis_correlation_matrix __all__ = ['Coordinates', 'WCSCoordinates', 'coordinates_from_header', 'coordinates_from_wcs'] class Coordinates(object): ''' Base class for coordinate transformation ''' def __init__(self): pass def pixel2world(self, *args): """ Convert pixel to world coordinates, preserving input type/shape. Parameters ---------- *pixel : scalars lists, or Numpy arrays The pixel coordinates (0-based) to convert Returns ------- *world : Numpy arrays The corresponding world coordinates """ return args def world2pixel(self, *args): """ Convert world to pixel coordinates, preserving input type/shape. Parameters ---------- *world : scalars lists, or Numpy arrays The world coordinates to convert Returns ------- *pixel : Numpy arrays The corresponding pixel coordinates """ return args def default_world_coords(self, ndim): return np.zeros(ndim, dtype=float) # PY3: pixel2world_single_axis(self, *pixel, axis=None) def pixel2world_single_axis(self, *pixel, **kwargs): """ Convert pixel to world coordinates, preserving input type/shape. This is a wrapper around pixel2world which returns the result for just one axis, and also determines whether the calculation can be sped up if broadcasting is present in the input arrays. Parameters ---------- *pixel : scalars lists, or Numpy arrays The pixel coordinates (0-based) to convert axis : int, optional If only one axis is needed, it should be specified since the calculation will be much more efficient. Returns ------- world : `numpy.ndarray` The world coordinates for the requested axis """ # PY3: the following is needed for Python 2 axis = kwargs.get('axis', None) if axis is None: raise ValueError("axis needs to be set") if np.size(pixel[0]) == 0: return np.array([], dtype=float) original_shape = pixel[0].shape pixel_new = [] # NOTE: the axis passed to this function is the WCS axis not the Numpy # axis, so we need to convert it as needed. dep_axes = self.dependent_axes(len(pixel) - 1 - axis) for ip, p in enumerate(pixel): if (len(pixel) - 1 - ip) in dep_axes: pixel_new.append(unbroadcast(p)) else: pixel_new.append(p.flat[0]) pixel = np.broadcast_arrays(*pixel_new) result = self.pixel2world(*pixel) return broadcast_to(result[axis], original_shape) def world2pixel_single_axis(self, *world, **kwargs): """ Convert world to pixel coordinates, preserving input type/shape. This is a wrapper around world2pixel which returns the result for just one axis, and also determines whether the calculation can be sped up if broadcasting is present in the input arrays. Parameters ---------- *world : scalars lists, or Numpy arrays The world coordinates to convert axis : int, optional If only one axis is needed, it should be specified since the calculation will be much more efficient. Returns ------- pixel : `numpy.ndarray` The pixel coordinates for the requested axis """ # PY3: the following is needed for Python 2 axis = kwargs.get('axis', None) if axis is None: raise ValueError("axis needs to be set") if np.size(world[0]) == 0: return np.array([], dtype=float) original_shape = world[0].shape world_new = [] # NOTE: the axis passed to this function is the WCS axis not the Numpy # axis, so we need to convert it as needed. dep_axes = self.dependent_axes(len(world) - 1 - axis) for iw, w in enumerate(world): if (len(world) - 1 - iw) in dep_axes: world_new.append(unbroadcast(w)) else: world_new.append(w.flat[0]) world = np.broadcast_arrays(*world_new) result = self.world2pixel(*world) return broadcast_to(result[axis], original_shape) def world_axis(self, data, axis): """ Find the world coordinates along a given dimension, and which for now we center on the pixel origin. Parameters ---------- data : `~glue.core.data.Data` The data to compute the coordinate axis for (this is used to determine the size of the axis) axis : int The axis to compute, in Numpy axis order Notes ----- This method computes the axis values using pixel positions at the center of the data along all other axes. This will therefore only give the correct result for non-dependent axes (which can be checked using the ``dependent_axes`` method). """ pixel = [] for i, s in enumerate(data.shape): if i == axis: pixel.append(np.arange(data.shape[axis])) else: pixel.append(np.repeat((s - 1) / 2, data.shape[axis])) return self.pixel2world_single_axis(*pixel[::-1], axis=data.ndim - 1 - axis) def world_axis_unit(self, axis): """ Return the unit of the world coordinate given by ``axis`` (assuming the Numpy axis order) """ return '' def axis_label(self, axis): return "World {}".format(axis) def dependent_axes(self, axis): """Return a tuple of which world-axes are non-independent from a given pixel axis The axis index is given in numpy ordering convention (note that opposite the fits convention) """ return (axis,) def __gluestate__(self, context): return {} # no state @classmethod def __setgluestate__(cls, rec, context): return cls() class WCSCoordinates(Coordinates): ''' Class for coordinate transformation based on the WCS FITS standard. This class does not take into account distortions. Parameters ---------- header : :class:`astropy.io.fits.Header` FITS header (derived from WCS if not given) wcs : :class:`astropy.wcs.WCS` WCS object to use, if different from header References ---------- * Greisen & Calabretta (2002), Astronomy and Astrophysics, 395, 1061 * Calabretta & Greisen (2002), Astronomy and Astrophysics, 395, 1077 * Greisen, Calabretta, Valdes & Allen (2006), Astronomy and Astrophysics, 446, 747 ''' def __init__(self, header=None, wcs=None): super(WCSCoordinates, self).__init__() from astropy.wcs import WCS if header is None and wcs is None: raise ValueError('Must provide either FITS header or WCS or both') if header is None: header = wcs.to_header() self._header = header try: naxis = header['NAXIS'] except (KeyError, TypeError): naxis = None wcs = wcs or WCS(header, naxis=naxis) # update WCS interface if using old API mapping = {'wcs_pix2world': 'wcs_pix2sky', 'wcs_world2pix': 'wcs_sky2pix', 'all_pix2world': 'all_pix2sky'} for k, v in mapping.items(): if not hasattr(wcs, k): setattr(wcs, k, getattr(wcs, v)) self._wcs = wcs # Pre-compute dependent axes. The matrix returned by # axis_correlation_matrix is (n_world, n_pixel) but we want to know # which pixel coordinates are linked to which other pixel coordinates. # So to do this we take a column from the matrix and find if there are # any entries in common with all other columns in the matrix. matrix = axis_correlation_matrix(wcs)[::-1, ::-1] self._dependent_axes = [] for axis in range(wcs.naxis): world_dep = matrix[:, axis:axis + 1] dependent = tuple(np.nonzero((world_dep & matrix).any(axis=0))[0]) self._dependent_axes.append(dependent) def world_axis_unit(self, axis): return str(self._wcs.wcs.cunit[self._wcs.naxis - 1 - axis]) @property def wcs(self): return self._wcs @property def header(self): return self._header def dependent_axes(self, axis): return self._dependent_axes[axis] def __setstate__(self, state): self.__dict__ = state # wcs object doesn't seem to unpickle properly. reconstruct it from astropy.wcs import WCS try: naxis = self._header['NAXIS'] except (KeyError, TypeError): naxis = None self._wcs = WCS(self._header, naxis=naxis) def pixel2world(self, *pixel): # PY3: can just do pix2world(*pixel, 0) if np.size(pixel[0]) == 0: return tuple(np.array([], dtype=float) for p in pixel) else: return self._wcs.wcs_pix2world(*(tuple(pixel) + (0,))) def world2pixel(self, *world): # PY3: can just do world2pix(*world, 0) if np.size(world[0]) == 0: return tuple(np.array([], dtype=float) for w in world) else: return self._wcs.wcs_world2pix(*(tuple(world) + (0,))) def default_world_coords(self, ndim): if ndim != self._wcs.naxis: raise ValueError("Requested default world coordinates for {0} " "dimensions, WCS has {1}".format(ndim, self._wcs.naxis)) return self._wcs.wcs.crval def axis_label(self, axis): header = self._header num = _get_ndim(header) - axis # number orientation reversed ax = self._header.get('CTYPE%i' % num) if ax is not None: if len(ax) == 8 or '-' in ax: # assume standard format ax = ax[:5].split('-')[0].title() else: ax = ax.title() translate = dict( Glon='Galactic Longitude', Glat='Galactic Latitude', Ra='Right Ascension', Dec='Declination', Velo='Velocity', Freq='Frequency' ) return translate.get(ax, ax) return super(WCSCoordinates, self).axis_label(axis) def __gluestate__(self, context): return dict(header=self._wcs.to_header_string()) @classmethod def __setgluestate__(cls, rec, context): from astropy.io import fits return cls(fits.Header.fromstring(rec['header'])) def coordinates_from_header(header): """ Convert a FITS header into a glue Coordinates object. Parameters ---------- header : :class:`astropy.io.fits.Header` Header to convert Returns ------- coordinates : :class:`~glue.core.coordinates.Coordinates` """ # We check whether the header contains at least CRVAL1 - if not, we would # end up with a default WCS that isn't quite 1 to 1 (because of a 1-pixel # offset) so better use Coordinates in that case. from astropy.io.fits import Header if isinstance(header, Header) and 'CRVAL1' in header: try: return WCSCoordinates(header) except Exception as e: logging.getLogger(__name__).warn( "\n\n*******************************\n" "Encounted an error during WCS parsing. " "Discarding world coordinates! " "\n{}\n" "*******************************\n\n".format(str(e))) return Coordinates() def _get_ndim(header): if 'NAXIS' in header: return header['NAXIS'] if 'WCSAXES' in header: return header['WCSAXES'] return None def coordinates_from_wcs(wcs): """ Convert an Astropy WCS object into a glue Coordinates object. Parameters ---------- wcs : :class:`astropy.wcs.WCS` The WCS object to use Returns ------- coordinates : :class:`~glue.core.coordinates.Coordinates` """ from astropy.io import fits hdr_str = wcs.wcs.to_header() hdr = fits.Header.fromstring(hdr_str) try: return WCSCoordinates(hdr, wcs) except (AttributeError, TypeError) as e: print(e) return Coordinates() def header_from_string(string): """ Convert a string to a FITS header. """ from astropy.io import fits return fits.Header.fromstring(string, sep='\n')
from __future__ import absolute_import, division, print_function import logging import numpy as np from glue.utils import unbroadcast, broadcast_to, axis_correlation_matrix __all__ = ['Coordinates', 'WCSCoordinates', 'coordinates_from_header', 'coordinates_from_wcs'] class Coordinates(object): ''' Base class for coordinate transformation ''' def __init__(self): pass def pixel2world(self, *args): """ Convert pixel to world coordinates, preserving input type/shape. Parameters ---------- *pixel : scalars lists, or Numpy arrays The pixel coordinates (0-based) to convert Returns ------- *world : Numpy arrays The corresponding world coordinates """ return args def world2pixel(self, *args): """ Convert world to pixel coordinates, preserving input type/shape. Parameters ---------- *world : scalars lists, or Numpy arrays The world coordinates to convert Returns ------- *pixel : Numpy arrays The corresponding pixel coordinates """ return args def default_world_coords(self, ndim): return np.zeros(ndim, dtype=float) # PY3: pixel2world_single_axis(self, *pixel, axis=None) def pixel2world_single_axis(self, *pixel, **kwargs): """ Convert pixel to world coordinates, preserving input type/shape. This is a wrapper around pixel2world which returns the result for just one axis, and also determines whether the calculation can be sped up if broadcasting is present in the input arrays. Parameters ---------- *pixel : scalars lists, or Numpy arrays The pixel coordinates (0-based) to convert axis : int, optional If only one axis is needed, it should be specified since the calculation will be much more efficient. Returns ------- world : `numpy.ndarray` The world coordinates for the requested axis """ # PY3: the following is needed for Python 2 axis = kwargs.get('axis', None) if axis is None: raise ValueError("axis needs to be set") if np.size(pixel[0]) == 0: return np.array([], dtype=float) original_shape = pixel[0].shape pixel_new = [] # NOTE: the axis passed to this function is the WCS axis not the Numpy # axis, so we need to convert it as needed. dep_axes = self.dependent_axes(len(pixel) - 1 - axis) for ip, p in enumerate(pixel): if (len(pixel) - 1 - ip) in dep_axes: pixel_new.append(unbroadcast(p)) else: pixel_new.append(p.flat[0]) pixel = np.broadcast_arrays(*pixel_new) result = self.pixel2world(*pixel) return broadcast_to(result[axis], original_shape) def world2pixel_single_axis(self, *world, **kwargs): """ Convert world to pixel coordinates, preserving input type/shape. This is a wrapper around world2pixel which returns the result for just one axis, and also determines whether the calculation can be sped up if broadcasting is present in the input arrays. Parameters ---------- *world : scalars lists, or Numpy arrays The world coordinates to convert axis : int, optional If only one axis is needed, it should be specified since the calculation will be much more efficient. Returns ------- pixel : `numpy.ndarray` The pixel coordinates for the requested axis """ # PY3: the following is needed for Python 2 axis = kwargs.get('axis', None) if axis is None: raise ValueError("axis needs to be set") if np.size(world[0]) == 0: return np.array([], dtype=float) original_shape = world[0].shape world_new = [] # NOTE: the axis passed to this function is the WCS axis not the Numpy # axis, so we need to convert it as needed. dep_axes = self.dependent_axes(len(world) - 1 - axis) for iw, w in enumerate(world): if (len(world) - 1 - iw) in dep_axes: world_new.append(unbroadcast(w)) else: world_new.append(w.flat[0]) world = np.broadcast_arrays(*world_new) result = self.world2pixel(*world) return broadcast_to(result[axis], original_shape) def world_axis(self, data, axis): """ Find the world coordinates along a given dimension, and which for now we center on the pixel origin. Parameters ---------- data : `~glue.core.data.Data` The data to compute the coordinate axis for (this is used to determine the size of the axis) axis : int The axis to compute, in Numpy axis order Notes ----- This method computes the axis values using pixel positions at the center of the data along all other axes. This will therefore only give the correct result for non-dependent axes (which can be checked using the ``dependent_axes`` method). """ pixel = [] for i, s in enumerate(data.shape): if i == axis: pixel.append(np.arange(data.shape[axis])) else: pixel.append(np.repeat((s - 1) / 2, data.shape[axis])) return self.pixel2world_single_axis(*pixel[::-1], axis=data.ndim - 1 - axis) def world_axis_unit(self, axis): """ Return the unit of the world coordinate given by ``axis`` (assuming the Numpy axis order) """ return '' def axis_label(self, axis): return "World {}".format(axis) def dependent_axes(self, axis): """Return a tuple of which world-axes are non-independent from a given pixel axis The axis index is given in numpy ordering convention (note that opposite the fits convention) """ return (axis,) def __gluestate__(self, context): return {} # no state @classmethod def __setgluestate__(cls, rec, context): return cls() class WCSCoordinates(Coordinates): ''' Class for coordinate transformation based on the WCS FITS standard. This class does not take into account distortions. Parameters ---------- header : :class:`astropy.io.fits.Header` FITS header (derived from WCS if not given) wcs : :class:`astropy.wcs.WCS` WCS object to use, if different from header References ---------- * Greisen & Calabretta (2002), Astronomy and Astrophysics, 395, 1061 * Calabretta & Greisen (2002), Astronomy and Astrophysics, 395, 1077 * Greisen, Calabretta, Valdes & Allen (2006), Astronomy and Astrophysics, 446, 747 ''' def __init__(self, header=None, wcs=None): super(WCSCoordinates, self).__init__() from astropy.wcs import WCS if header is None and wcs is None: raise ValueError('Must provide either FITS header or WCS or both') if header is None: header = wcs.to_header() self._header = header try: naxis = header['NAXIS'] except (KeyError, TypeError): naxis = None wcs = wcs or WCS(header, naxis=naxis) # update WCS interface if using old API mapping = {'wcs_pix2world': 'wcs_pix2sky', 'wcs_world2pix': 'wcs_sky2pix', 'all_pix2world': 'all_pix2sky'} for k, v in mapping.items(): if not hasattr(wcs, k): setattr(wcs, k, getattr(wcs, v)) self._wcs = wcs # Pre-compute dependent axes. The matrix returned by # axis_correlation_matrix is (n_world, n_pixel) but we want to know # which pixel coordinates are linked to which other pixel coordinates. # So to do this we take a column from the matrix and find if there are # any entries in common with all other columns in the matrix. matrix = axis_correlation_matrix(wcs)[::-1, ::-1] self._dependent_axes = [] for axis in range(wcs.naxis): world_dep = matrix[:, axis:axis + 1] dependent = tuple(np.nonzero((world_dep & matrix).any(axis=0))[0]) self._dependent_axes.append(dependent) def world_axis_unit(self, axis): return str(self._wcs.wcs.cunit[self._wcs.naxis - 1 - axis]) @property def wcs(self): return self._wcs @property def header(self): return self._header def dependent_axes(self, axis): return self._dependent_axes[axis] def __setstate__(self, state): self.__dict__ = state # wcs object doesn't seem to unpickle properly. reconstruct it from astropy.wcs import WCS try: naxis = self._header['NAXIS'] except (KeyError, TypeError): naxis = None self._wcs = WCS(self._header, naxis=naxis) def pixel2world(self, *pixel): # PY3: can just do pix2world(*pixel, 0) if np.size(pixel[0]) == 0: return tuple(np.array([], dtype=float) for p in pixel) else: return self._wcs.wcs_pix2world(*(tuple(pixel) + (0,))) def world2pixel(self, *world): # PY3: can just do world2pix(*world, 0) if np.size(world[0]) == 0: return tuple(np.array([], dtype=float) for w in world) else: return self._wcs.wcs_world2pix(*(tuple(world) + (0,))) def default_world_coords(self, ndim): if ndim != self._wcs.naxis: raise ValueError("Requested default world coordinates for {0} " "dimensions, WCS has {1}".format(ndim, self._wcs.naxis)) return self._wcs.wcs.crval def axis_label(self, axis): header = self._header num = _get_ndim(header) - axis # number orientation reversed ax = self._header.get('CTYPE%i' % num) if ax is not None: if len(ax) == 8 or '-' in ax: # assume standard format ax = ax[:5].split('-')[0].title() else: ax = ax.title() translate = dict( Glon='Galactic Longitude', Glat='Galactic Latitude', Ra='Right Ascension', Dec='Declination', Velo='Velocity', Freq='Frequency' ) return translate.get(ax, ax) return super(WCSCoordinates, self).axis_label(axis) def __gluestate__(self, context): return dict(header=self._wcs.to_header_string()) @classmethod def __setgluestate__(cls, rec, context): from astropy.io import fits return cls(fits.Header.fromstring(rec['header'])) def coordinates_from_header(header): """ Convert a FITS header into a glue Coordinates object. Parameters ---------- header : :class:`astropy.io.fits.Header` Header to convert Returns ------- coordinates : :class:`~glue.core.coordinates.Coordinates` """ # We check whether the header contains at least CRVAL1 - if not, we would # end up with a default WCS that isn't quite 1 to 1 (because of a 1-pixel # offset) so better use Coordinates in that case. from astropy.io.fits import Header if isinstance(header, Header) and 'CRVAL1' in header: try: return WCSCoordinates(header) except Exception as e: logging.getLogger(__name__).warn( "\n\n*******************************\n" "Encounted an error during WCS parsing. " "Discarding world coordinates! " "\n{}\n" "*******************************\n\n".format(str(e))) return Coordinates() def _get_ndim(header): if 'NAXIS' in header: return header['NAXIS'] if 'WCSAXES' in header: return header['WCSAXES'] return None def coordinates_from_wcs(wcs): """ Convert an Astropy WCS object into a glue Coordinates object. Parameters ---------- wcs : :class:`astropy.wcs.WCS` The WCS object to use Returns ------- coordinates : :class:`~glue.core.coordinates.Coordinates` """ from astropy.io import fits hdr_str = wcs.wcs.to_header() hdr = fits.Header.fromstring(hdr_str) try: return WCSCoordinates(hdr, wcs) except (AttributeError, TypeError) as e: print(e) return Coordinates() def header_from_string(string): """ Convert a string to a FITS header. """ from astropy.io import fits return fits.Header.fromstring(string, sep='\n')
en
0.697059
Base class for coordinate transformation Convert pixel to world coordinates, preserving input type/shape. Parameters ---------- *pixel : scalars lists, or Numpy arrays The pixel coordinates (0-based) to convert Returns ------- *world : Numpy arrays The corresponding world coordinates Convert world to pixel coordinates, preserving input type/shape. Parameters ---------- *world : scalars lists, or Numpy arrays The world coordinates to convert Returns ------- *pixel : Numpy arrays The corresponding pixel coordinates # PY3: pixel2world_single_axis(self, *pixel, axis=None) Convert pixel to world coordinates, preserving input type/shape. This is a wrapper around pixel2world which returns the result for just one axis, and also determines whether the calculation can be sped up if broadcasting is present in the input arrays. Parameters ---------- *pixel : scalars lists, or Numpy arrays The pixel coordinates (0-based) to convert axis : int, optional If only one axis is needed, it should be specified since the calculation will be much more efficient. Returns ------- world : `numpy.ndarray` The world coordinates for the requested axis # PY3: the following is needed for Python 2 # NOTE: the axis passed to this function is the WCS axis not the Numpy # axis, so we need to convert it as needed. Convert world to pixel coordinates, preserving input type/shape. This is a wrapper around world2pixel which returns the result for just one axis, and also determines whether the calculation can be sped up if broadcasting is present in the input arrays. Parameters ---------- *world : scalars lists, or Numpy arrays The world coordinates to convert axis : int, optional If only one axis is needed, it should be specified since the calculation will be much more efficient. Returns ------- pixel : `numpy.ndarray` The pixel coordinates for the requested axis # PY3: the following is needed for Python 2 # NOTE: the axis passed to this function is the WCS axis not the Numpy # axis, so we need to convert it as needed. Find the world coordinates along a given dimension, and which for now we center on the pixel origin. Parameters ---------- data : `~glue.core.data.Data` The data to compute the coordinate axis for (this is used to determine the size of the axis) axis : int The axis to compute, in Numpy axis order Notes ----- This method computes the axis values using pixel positions at the center of the data along all other axes. This will therefore only give the correct result for non-dependent axes (which can be checked using the ``dependent_axes`` method). Return the unit of the world coordinate given by ``axis`` (assuming the Numpy axis order) Return a tuple of which world-axes are non-independent from a given pixel axis The axis index is given in numpy ordering convention (note that opposite the fits convention) # no state Class for coordinate transformation based on the WCS FITS standard. This class does not take into account distortions. Parameters ---------- header : :class:`astropy.io.fits.Header` FITS header (derived from WCS if not given) wcs : :class:`astropy.wcs.WCS` WCS object to use, if different from header References ---------- * Greisen & Calabretta (2002), Astronomy and Astrophysics, 395, 1061 * Calabretta & Greisen (2002), Astronomy and Astrophysics, 395, 1077 * Greisen, Calabretta, Valdes & Allen (2006), Astronomy and Astrophysics, 446, 747 # update WCS interface if using old API # Pre-compute dependent axes. The matrix returned by # axis_correlation_matrix is (n_world, n_pixel) but we want to know # which pixel coordinates are linked to which other pixel coordinates. # So to do this we take a column from the matrix and find if there are # any entries in common with all other columns in the matrix. # wcs object doesn't seem to unpickle properly. reconstruct it # PY3: can just do pix2world(*pixel, 0) # PY3: can just do world2pix(*world, 0) # number orientation reversed # assume standard format Convert a FITS header into a glue Coordinates object. Parameters ---------- header : :class:`astropy.io.fits.Header` Header to convert Returns ------- coordinates : :class:`~glue.core.coordinates.Coordinates` # We check whether the header contains at least CRVAL1 - if not, we would # end up with a default WCS that isn't quite 1 to 1 (because of a 1-pixel # offset) so better use Coordinates in that case. Convert an Astropy WCS object into a glue Coordinates object. Parameters ---------- wcs : :class:`astropy.wcs.WCS` The WCS object to use Returns ------- coordinates : :class:`~glue.core.coordinates.Coordinates` Convert a string to a FITS header.
2.921289
3
src/SimpleSchemaGenerator/Schema/Parse.py
davidbrownell/Common_SimpleSchemaGenerator
0
6628705
<reponame>davidbrownell/Common_SimpleSchemaGenerator # ---------------------------------------------------------------------- # | # | Parse.py # | # | <NAME> <<EMAIL>> # | 2018-07-09 13:16:09 # | # ---------------------------------------------------------------------- # | # | Copyright <NAME> 2018-21. # | Distributed under the Boost Software License, Version 1.0. # | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # | # ---------------------------------------------------------------------- """Functionality used when parsing SimpleSchema files""" import os from collections import OrderedDict import six import CommonEnvironment from .Impl.Populate import Populate from .Impl.Resolve import Resolve from .Impl.Validate import Validate from .Impl.Transform import Transform # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- def ParseFiles(filenames, plugin, filter_unsupported_extensions, filter_unsupported_attributes): file_map = OrderedDict() for filename in filenames: file_map[filename] = lambda filename=filename: open(filename).read() return ParseEx(file_map, plugin, filter_unsupported_extensions, filter_unsupported_attributes) # ---------------------------------------------------------------------- def ParseStrings(named_strings, plugin, filter_unsupported_extensions, filter_unsupported_attributes): # { "<name>" : "<content>", ... } string_map = OrderedDict() for k, v in six.iteritems(named_strings): string_map[k] = lambda v=v: v return ParseEx(string_map, plugin, filter_unsupported_extensions, filter_unsupported_attributes) # ---------------------------------------------------------------------- def ParseEx(source_name_content_generators, plugin, filter_unsupported_extensions, filter_unsupported_attributes): # { "<name>" : def Func() -> content } plugin.VerifyFlags() root = Populate(source_name_content_generators, plugin.Flags) root = Resolve(root, plugin) root = Validate(root, plugin, filter_unsupported_extensions, filter_unsupported_attributes) root = Transform(root, plugin) # Eliminate the root element as the parent for top-level elements for child in root.Children: child.Parent = None return root.Children
# ---------------------------------------------------------------------- # | # | Parse.py # | # | <NAME> <<EMAIL>> # | 2018-07-09 13:16:09 # | # ---------------------------------------------------------------------- # | # | Copyright <NAME> 2018-21. # | Distributed under the Boost Software License, Version 1.0. # | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # | # ---------------------------------------------------------------------- """Functionality used when parsing SimpleSchema files""" import os from collections import OrderedDict import six import CommonEnvironment from .Impl.Populate import Populate from .Impl.Resolve import Resolve from .Impl.Validate import Validate from .Impl.Transform import Transform # ---------------------------------------------------------------------- _script_fullpath = CommonEnvironment.ThisFullpath() _script_dir, _script_name = os.path.split(_script_fullpath) # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- def ParseFiles(filenames, plugin, filter_unsupported_extensions, filter_unsupported_attributes): file_map = OrderedDict() for filename in filenames: file_map[filename] = lambda filename=filename: open(filename).read() return ParseEx(file_map, plugin, filter_unsupported_extensions, filter_unsupported_attributes) # ---------------------------------------------------------------------- def ParseStrings(named_strings, plugin, filter_unsupported_extensions, filter_unsupported_attributes): # { "<name>" : "<content>", ... } string_map = OrderedDict() for k, v in six.iteritems(named_strings): string_map[k] = lambda v=v: v return ParseEx(string_map, plugin, filter_unsupported_extensions, filter_unsupported_attributes) # ---------------------------------------------------------------------- def ParseEx(source_name_content_generators, plugin, filter_unsupported_extensions, filter_unsupported_attributes): # { "<name>" : def Func() -> content } plugin.VerifyFlags() root = Populate(source_name_content_generators, plugin.Flags) root = Resolve(root, plugin) root = Validate(root, plugin, filter_unsupported_extensions, filter_unsupported_attributes) root = Transform(root, plugin) # Eliminate the root element as the parent for top-level elements for child in root.Children: child.Parent = None return root.Children
en
0.294715
# ---------------------------------------------------------------------- # | # | Parse.py # | # | <NAME> <<EMAIL>> # | 2018-07-09 13:16:09 # | # ---------------------------------------------------------------------- # | # | Copyright <NAME> 2018-21. # | Distributed under the Boost Software License, Version 1.0. # | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # | # ---------------------------------------------------------------------- Functionality used when parsing SimpleSchema files # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # { "<name>" : "<content>", ... } # ---------------------------------------------------------------------- # { "<name>" : def Func() -> content } # Eliminate the root element as the parent for top-level elements
1.92043
2
setup.py
andrewhalle/sudoku_solver
0
6628706
from setuptools import setup, find_packages setup( name="sudoku_solver", version="0.1", description="A sudoku solver using CSP techniques", url="https://github.com/andrewhalle/sudoku_solver", author="<NAME>", author_email="<EMAIL>", license="MIT", packages=find_packages(), entry_points={ "console_scripts": [ "sudoku_solver = sudoku_solver.gui:main" ] } )
from setuptools import setup, find_packages setup( name="sudoku_solver", version="0.1", description="A sudoku solver using CSP techniques", url="https://github.com/andrewhalle/sudoku_solver", author="<NAME>", author_email="<EMAIL>", license="MIT", packages=find_packages(), entry_points={ "console_scripts": [ "sudoku_solver = sudoku_solver.gui:main" ] } )
none
1
1.228411
1
test/test_basic/test_utils.py
jkrueger1/nicos
0
6628707
<filename>test/test_basic/test_utils.py # -*- coding: utf-8 -*- # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # ***************************************************************************** """NICOS tests for some utility modules.""" import os import pickle import socket import sys from datetime import timedelta from time import mktime, monotonic, sleep import pytest from nicos.core.errors import NicosError from nicos.core.sessions.utils import SimClock from nicos.utils import KEYEXPR_NS, TB_CAUSE_MSG, Repeater, allDays, \ bitDescription, checkSetupSpec, chunks, closeSocket, comparestrings, \ formatDuration, formatExtendedFrame, formatExtendedStack, \ formatExtendedTraceback, lazy_property, moveOutOfWay, num_sort, \ parseConnectionString, parseDuration, parseKeyExpression, \ readFileCounter, readonlydict, readonlylist, safeName, safeWriteFile, \ squeeze, tcpSocket, timedRetryOnExcept, tupelize, updateFileCounter from nicos.utils.timer import Timer from test.utils import raises def test_lazy_property(): asked = [] class P: @lazy_property def prop(self): asked.append('x') return 'ok' p = P() assert p.prop == 'ok' assert p.prop == 'ok' # ask twice! assert len(asked) == 1 # but getter only called once def test_readonly_objects(): d = readonlydict({'a': 1, 'b': 2}) assert raises(TypeError, d.update, {}) # pickle Protocoll 0 unpickled = pickle.loads(pickle.dumps(d)) assert isinstance(unpickled, readonlydict) assert len(unpickled) == 2 # pickle Protocoll 2 unpickled = pickle.loads(pickle.dumps(d, 2)) assert isinstance(unpickled, readonlydict) assert len(unpickled) == 2 lst = readonlylist([1, 2, 3]) assert raises(TypeError, lst.append, 4) # pickle Protocoll 0 unpickled = pickle.loads(pickle.dumps(lst)) assert isinstance(unpickled, readonlylist) assert len(unpickled) == 3 # pickle Protocoll 2 unpickled = pickle.loads(pickle.dumps(lst, 2)) assert isinstance(unpickled, readonlylist) assert len(unpickled) == 3 def test_readonlylist_hashable(): lst = readonlylist([1, 2, 3]) assert lst == [1, 2, 3] dt = {lst: 'testval'} assert dt[readonlylist([1, 2, 3])] == 'testval' def test_repeater(): r = Repeater(1) it = iter(r) assert next(it) == 1 assert next(it) == 1 assert r[23] == 1 def test_functions(): assert formatDuration(1) == '1 second' assert formatDuration(4) == '4 seconds' assert formatDuration(154, precise=False) == '3 min' assert formatDuration(154, precise=True) == '2 min, 34 sec' assert formatDuration(7199) == '2 h, 0 min' assert formatDuration(3700) == '1 h, 2 min' assert formatDuration(24 * 3600 + 7240, precise=False) == '1 day, 2 h' assert formatDuration(48 * 3600 - 1) == '2 days, 0 h' assert bitDescription(0x5, (0, 'a'), (1, 'b', 'c'), (2, 'd', 'e')) == 'a, c, d' assert parseConnectionString('<EMAIL>:pass@host:1301', 1302) == \ {'user': '<EMAIL>', 'password': '<PASSWORD>', 'host': 'host', 'port': 1301} assert parseConnectionString('user:@host', 1302) == \ {'user': 'user', 'password': '', 'host': 'host', 'port': 1302} assert parseConnectionString('user@host:1301', 1302) == \ {'user': 'user', 'password': None, 'host': 'host', 'port': 1301} assert parseConnectionString('user@ho-st:1301', 1302) == \ {'user': 'user', 'password': None, 'host': 'ho-st', 'port': 1301} assert parseConnectionString('', 1302) is None assert parseConnectionString('host?', 1302) is None assert [tuple(x) for x in chunks(range(10), 3)] == \ [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)] def test_traceback(): a = 1 # pylint: disable=unused-variable f = sys._getframe() fmt = formatExtendedFrame(f) assert any('a = 1' in line for line in fmt) try: try: 1 / 0 except ZeroDivisionError as err: raise RuntimeError from err except Exception: ei = sys.exc_info() tb = formatExtendedTraceback(ei[1]) assert 'ZeroDivisionError' in tb assert 'RuntimeError' in tb assert TB_CAUSE_MSG in tb assert ', in test_traceback' in tb st = formatExtendedStack() assert ', in test_traceback' in st def test_comparestrings(): comparestrings.test() def test_retryOnExcept(): def raising_func(x): x += 1 if x < 2: raise NicosError return x @timedRetryOnExcept(timeout=0.2) def wr(x): x = raising_func(x) return x @timedRetryOnExcept(max_retries=3, timeout=0.2) def wr2(x): x = raising_func(x) return x def raising_func2(x): if x < 2: raise Exception('test exception') return x @timedRetryOnExcept(ex=NicosError, timeout=0.2) def wr3(x): x = raising_func2(x) return x # Make sure we get the inner error in case of too many retries x = 0 assert raises(NicosError, wr, x) # Make sure we get success if inner succeeds x = 2 ret = wr2(x) assert ret == 3 # assert we get x = 0 assert raises(Exception, wr3, x) assert x == 0 @pytest.fixture() def serversocket(): """create a server socket""" serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: serv.bind(('localhost', 65432)) serv.listen(10) except Exception: pytest.skip('could not bind') yield serv closeSocket(serv) def test_tcpsocket(serversocket): sock = None sockargs = [ ('localhost:65432', 1), ('localhost', 65432), (('localhost', 65432), 1, dict(timeout=5)) ] for args in sockargs: try: if len(args) == 3: kwds = args[2] args = args[:2] sock = tcpSocket(*args, **kwds) else: sock = tcpSocket(*args) finally: if sock: closeSocket(sock) def test_timer(): t = monotonic() def cb(tmr, x, y=None): if x == 3 and y == 'ykwd': tmr.cb_called = True def nf(tmr, info): if info == 'notify': tmr.notify_called = True def nf1(tmr): tmr.notify_called = 'yes' # a) test a (short) timed timer tmr = Timer(0.1, cb, cb_args=(3,), cb_kwds={'y': 'ykwd'}) assert tmr.is_running() while (monotonic() - t < 1.5) and tmr.is_running(): sleep(0.05) assert not tmr.is_running() assert tmr.cb_called assert tmr.elapsed_time() == 0.1 assert tmr.remaining_time() == 0 tmr.restart() # timer timed out, can not restart assert not tmr.is_running() # b) test an unlimited timer (for a short while) tmr.start() sleep(0.02) # due to windows time() resolution assert tmr.is_running() assert tmr.elapsed_time() > 0 assert tmr.remaining_time() is None sleep(0.1) assert 0.1 < tmr.elapsed_time() < 0.2 tmr.stop() # check elapsed time for stopped timer assert 0.1 < tmr.elapsed_time() < 0.2 assert not(tmr.is_running()) assert not(tmr.wait()) # c) stopping before timeout and then restart tmr.restart() tmr.restart() tmr.start(run_for=0.5) sleep(0.1) tmr.stop() tmr.restart() tmr.wait(interval=0.1, notify_func=nf, notify_args=('notify',)) assert tmr.notify_called tmr.start(run_for=0.5) tmr.wait(0.1, nf1) assert tmr.notify_called == 'yes' def test_num_sort(): # purely alpha keys assert sorted(['a', 'c', 'b'], key=num_sort) == ['a', 'b', 'c'] # mixed with floats assert sorted(['X', '12A', '2.4B'], key=num_sort) == ['2.4B', '12A', 'X'] # also negative ones assert sorted(['X', '-1', '2'], key=num_sort) == ['-1', '2', 'X'] # handle invalid floats assert sorted(['X', '1A', '2.4.5A'], key=num_sort) == ['1A', '2.4.5A', 'X'] # handle non-strings too assert sorted([0.4, '1A'], key=num_sort) == [0.4, '1A'] # setupspec : loaded_setups : result CASES = [ (None, None, True), (None, ['a', 'b', 'c'], True), ('a', ['a', 'b', 'c'], True), ('a and d', ['a', 'b', 'c'], False), ('a and b', ['a', 'b', 'c'], True), ('a or d', ['a', 'b', 'c'], True), ('a or b', ['a'], True), ('a or b', ['b'], True), ('a or b', [], False), ('a or b', ['c'], False), ('a*', ['alpha', 'b'], True), ('c*', ['alpha', 'b'], False), ('c-d*', ['c-de'], True), ('(b and not (c or h)', ['b'], True), ('(b and not (c or h))', ['b', 'c'], False), ('(b and not (c or h))', ['b', 'h'], False), ('(b and not (c or h))', ['b', 'c', 'h'], False), ('(b and not (c or h))', [], False), ('(b and not (c or h))', ['h'], False), ('(b and not (c or h))', ['h', 'c'], False), ('a and', ['b'], True), # warns ('a?', ['a1'], True), ('a? or c', ['c'], True), ('a?', ['a12', 'a2'], True), ('a?', ['a12', 'a34'], False), ] OLDSTYLE_CASES = [ # old style cases (['a'], ['a', 'b', 'c'], True), ('!a', ['a', 'b', 'c'], True), (['!a'], ['a', 'b', 'c'], True), (['a', 'd'], ['a', 'b', 'c'], True), (['d'], ['a', 'b', 'c'], True), (['!d'], ['a', 'b', 'c'], True), (['a', '!d'], ['a', 'b', 'c'], True), (['!a', 'd'], ['a', 'b', 'c'], True), (['!a', '!d'], ['a', 'b', 'c'], True), ] def test_check_setup_spec(): for spec, setups, result in CASES + OLDSTYLE_CASES: # print is here to aid in finding the offending input parameters # as the stacktrace doesn't output locals res = checkSetupSpec(spec, setups) print('testing checkSetupSpec(%r, %r) == %r: %r' % (spec, setups, result, res)) assert res == result def test_parse_key_expression(): assert parseKeyExpression('dev.key')[0] == 'dev/key' assert parseKeyExpression('dev.key', normalize=lambda s: s)[0] == \ 'dev.key/value' assert parseKeyExpression('dev.key', False, normalize=lambda s: s)[0] == \ 'dev.key' key, expr, _ = parseKeyExpression('dev + 1') assert key == 'dev/value' assert eval(expr, {}, {'x': 42}) == 43 _, expr, _ = parseKeyExpression('100/(dev/key)/10') assert eval(expr, {}, {'x': 2}) == 5 _, expr, _ = parseKeyExpression('sqrt(key)') assert eval(expr, KEYEXPR_NS, {'x': 25}) == 5 _, _, descs = parseKeyExpression('a/b, c.d*2, sqrt(e)', multiple=True) assert descs == [ 'a/b', 'c.d*2', 'sqrt(e)' ] def test_squeeze(): assert isinstance(squeeze([]), list) assert isinstance(squeeze(tuple()), tuple) assert squeeze((256, 256, 1)) == (256, 256) assert squeeze((1, 10, 1)) == (10, ) assert squeeze((1, 1, 1), 2) == (1, 1) assert squeeze((1, 10, 1), -1) == (1, 10) assert squeeze((1, 10, 1), 1) == (1, 10) assert squeeze((1, ), 2) == (1, ) # n > len(shape) @pytest.mark.parametrize('maxbackup', [2, None, 0]) def test_moveOutOfWay(tmpdir, maxbackup): i = 0 fn1 = str(tmpdir.join('test1')) while i < 3: with open(fn1, 'w', encoding='utf-8') as fp: fp.write('Test %r %i' % (maxbackup, i)) moveOutOfWay(fn1, maxbackup) i += 1 files = [f for f in os.listdir(str(tmpdir)) if f.startswith('test1')] assert 'test1' not in files assert len(files) == maxbackup if maxbackup is not None else 3 @pytest.mark.parametrize('inp,expected', [ ['1d:2h:3m:14s', 93794], ['1d2h 3m 14s', 93794], ['1d :2h: 3m : 14s ', 93794], ['1day 2hr 2min 74sec', 93794], ['5days', 5*86400], [93794, 93794], ['0.5h', 1800], [1.0, 1.0], ['2.0', 2.0], ['1h:0.005s', 3600.005], [timedelta(hours=2), 7200], ['-20s', -20], ['+30m', 1800], ['-2500', -2500], [-50, -50], [-2.71828, -2.71828], ]) def test_parse_duration(inp, expected): assert parseDuration(inp, allownegative=True) == expected @pytest.mark.parametrize('inp', [ '1m3d', '1d::3m', '1d:3m jad', '42secop', '-2m', -50, '-3.1415', '+-5d', ]) def test_parse_duration_parse_errors(inp): assert raises(ValueError, parseDuration, inp) @pytest.mark.parametrize('inp', [ [1, 2, 3], {'days': 5}, ]) def test_parse_duration_type_errors(inp): assert raises(TypeError, parseDuration, inp) def test_all_days(): # 25. Oct 2020 switch CEST -> CET exp_list = [('2020', '10-24'), ('2020', '10-25'), ('2020', '10-26'), ('2020', '10-27'), ('2020', '10-28')] # 28. Oct 2020, 08:00:00 tmto = mktime((2020, 10, 28, 8, 0, 0, 0, 0, -1)) assert list(allDays(tmto - 86400, tmto)) == exp_list[3:] assert list(allDays(tmto - 2 * 86400, tmto)) == exp_list[2:] assert list(allDays(tmto - 3 * 86400, tmto)) == exp_list[1:] assert list(allDays(tmto - 4 * 86400, tmto)) == exp_list # 28. Mar 2020 switch CET -> CEST exp_list = [('2020', '03-27'), ('2020', '03-28'), ('2020', '03-29'), ('2020', '03-30'), ('2020', '03-31')] # 31. Mar 2020, 08:00:00 tmto = mktime((2020, 3, 31, 8, 0, 0, 0, 0, 1)) assert list(allDays(tmto - 86400, tmto)) == exp_list[3:] assert list(allDays(tmto - 2 * 86400, tmto)) == exp_list[2:] assert list(allDays(tmto - 3 * 86400, tmto)) == exp_list[1:] assert list(allDays(tmto - 4 * 86400, tmto)) == exp_list @pytest.mark.parametrize('name', [('COM1', '_COM1_'), ('xyz.txt', 'xyz.txt')]) def test_safeName(name): assert safeName(name[0]) == name[1] @pytest.mark.parametrize('maxbackup', [2, None, 0]) @pytest.mark.parametrize('content', ['XXXXX', ['XXXX\n', 'YYYYY\n']]) def test_safeWriteFile(tmpdir, maxbackup, content): i = 0 fn1 = str(tmpdir.join('test1')) while i < 3: safeWriteFile(fn1, content, maxbackups=maxbackup) i += 1 files = [f for f in os.listdir(str(tmpdir)) if f.startswith('test1')] assert 'test1' in files assert len(files) == maxbackup + 1 if maxbackup is not None else 4 with open(fn1, encoding='utf-8') as fp: if isinstance(content, list): assert len(fp.readlines()) == len(content) else: assert len(fp.read()) == len(content) def test_tupelize(): ilist = ['a', 1, 'b', 2, 'c', 3] assert list(tupelize(ilist)) == [('a', 1), ('b', 2), ('c', 3)] assert list(tupelize(ilist[:3])) == [('a', 1)] assert list(tupelize(ilist, 3)) == [('a', 1, 'b'), (2, 'c', 3)] assert list(tupelize(ilist[:4], 3)) == [('a', 1, 'b')] @pytest.fixture(scope='function') def nonexistantfile(tmpdir): fc1 = str(tmpdir.join('testcounter1')) try: os.unlink(fc1) except FileNotFoundError: pass yield fc1 try: os.unlink(fc1) except FileNotFoundError: pass @pytest.fixture(scope='function') def filecounterfile(tmpdir): fc = str(tmpdir.join('testcounter2')) with open(fc, 'w', encoding='utf-8') as f: f.write('key1 1234\n') f.write('key2 5678\n') yield fc os.unlink(fc) def test_readfilecounter_exist(filecounterfile): assert readFileCounter(filecounterfile, 'key1') == 1234 assert readFileCounter(filecounterfile, 'key3') == 0 def test_readfilecounter_nofile(nonexistantfile): assert readFileCounter(nonexistantfile, 'key') == 0 assert os.path.exists(nonexistantfile) def test_updatefilecounter_exist(filecounterfile): updateFileCounter(filecounterfile, 'key1', 222) assert readFileCounter(filecounterfile, 'key1') == 222 assert readFileCounter(filecounterfile, 'key2') == 5678 def test_updatefilecounter_nofile(nonexistantfile): updateFileCounter(nonexistantfile, 'key', 9876) assert os.path.exists(nonexistantfile) assert readFileCounter(nonexistantfile, 'key') == 9876 def test_simclock(): clock = SimClock() # unconditional tick clock.tick(2) assert clock.time == 2 # conditional waits, the maximum wins clock.wait(5) clock.wait(7) assert clock.time == 7 clock.reset() assert clock.time == 0
<filename>test/test_basic/test_utils.py # -*- coding: utf-8 -*- # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # ***************************************************************************** """NICOS tests for some utility modules.""" import os import pickle import socket import sys from datetime import timedelta from time import mktime, monotonic, sleep import pytest from nicos.core.errors import NicosError from nicos.core.sessions.utils import SimClock from nicos.utils import KEYEXPR_NS, TB_CAUSE_MSG, Repeater, allDays, \ bitDescription, checkSetupSpec, chunks, closeSocket, comparestrings, \ formatDuration, formatExtendedFrame, formatExtendedStack, \ formatExtendedTraceback, lazy_property, moveOutOfWay, num_sort, \ parseConnectionString, parseDuration, parseKeyExpression, \ readFileCounter, readonlydict, readonlylist, safeName, safeWriteFile, \ squeeze, tcpSocket, timedRetryOnExcept, tupelize, updateFileCounter from nicos.utils.timer import Timer from test.utils import raises def test_lazy_property(): asked = [] class P: @lazy_property def prop(self): asked.append('x') return 'ok' p = P() assert p.prop == 'ok' assert p.prop == 'ok' # ask twice! assert len(asked) == 1 # but getter only called once def test_readonly_objects(): d = readonlydict({'a': 1, 'b': 2}) assert raises(TypeError, d.update, {}) # pickle Protocoll 0 unpickled = pickle.loads(pickle.dumps(d)) assert isinstance(unpickled, readonlydict) assert len(unpickled) == 2 # pickle Protocoll 2 unpickled = pickle.loads(pickle.dumps(d, 2)) assert isinstance(unpickled, readonlydict) assert len(unpickled) == 2 lst = readonlylist([1, 2, 3]) assert raises(TypeError, lst.append, 4) # pickle Protocoll 0 unpickled = pickle.loads(pickle.dumps(lst)) assert isinstance(unpickled, readonlylist) assert len(unpickled) == 3 # pickle Protocoll 2 unpickled = pickle.loads(pickle.dumps(lst, 2)) assert isinstance(unpickled, readonlylist) assert len(unpickled) == 3 def test_readonlylist_hashable(): lst = readonlylist([1, 2, 3]) assert lst == [1, 2, 3] dt = {lst: 'testval'} assert dt[readonlylist([1, 2, 3])] == 'testval' def test_repeater(): r = Repeater(1) it = iter(r) assert next(it) == 1 assert next(it) == 1 assert r[23] == 1 def test_functions(): assert formatDuration(1) == '1 second' assert formatDuration(4) == '4 seconds' assert formatDuration(154, precise=False) == '3 min' assert formatDuration(154, precise=True) == '2 min, 34 sec' assert formatDuration(7199) == '2 h, 0 min' assert formatDuration(3700) == '1 h, 2 min' assert formatDuration(24 * 3600 + 7240, precise=False) == '1 day, 2 h' assert formatDuration(48 * 3600 - 1) == '2 days, 0 h' assert bitDescription(0x5, (0, 'a'), (1, 'b', 'c'), (2, 'd', 'e')) == 'a, c, d' assert parseConnectionString('<EMAIL>:pass@host:1301', 1302) == \ {'user': '<EMAIL>', 'password': '<PASSWORD>', 'host': 'host', 'port': 1301} assert parseConnectionString('user:@host', 1302) == \ {'user': 'user', 'password': '', 'host': 'host', 'port': 1302} assert parseConnectionString('user@host:1301', 1302) == \ {'user': 'user', 'password': None, 'host': 'host', 'port': 1301} assert parseConnectionString('user@ho-st:1301', 1302) == \ {'user': 'user', 'password': None, 'host': 'ho-st', 'port': 1301} assert parseConnectionString('', 1302) is None assert parseConnectionString('host?', 1302) is None assert [tuple(x) for x in chunks(range(10), 3)] == \ [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9,)] def test_traceback(): a = 1 # pylint: disable=unused-variable f = sys._getframe() fmt = formatExtendedFrame(f) assert any('a = 1' in line for line in fmt) try: try: 1 / 0 except ZeroDivisionError as err: raise RuntimeError from err except Exception: ei = sys.exc_info() tb = formatExtendedTraceback(ei[1]) assert 'ZeroDivisionError' in tb assert 'RuntimeError' in tb assert TB_CAUSE_MSG in tb assert ', in test_traceback' in tb st = formatExtendedStack() assert ', in test_traceback' in st def test_comparestrings(): comparestrings.test() def test_retryOnExcept(): def raising_func(x): x += 1 if x < 2: raise NicosError return x @timedRetryOnExcept(timeout=0.2) def wr(x): x = raising_func(x) return x @timedRetryOnExcept(max_retries=3, timeout=0.2) def wr2(x): x = raising_func(x) return x def raising_func2(x): if x < 2: raise Exception('test exception') return x @timedRetryOnExcept(ex=NicosError, timeout=0.2) def wr3(x): x = raising_func2(x) return x # Make sure we get the inner error in case of too many retries x = 0 assert raises(NicosError, wr, x) # Make sure we get success if inner succeeds x = 2 ret = wr2(x) assert ret == 3 # assert we get x = 0 assert raises(Exception, wr3, x) assert x == 0 @pytest.fixture() def serversocket(): """create a server socket""" serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: serv.bind(('localhost', 65432)) serv.listen(10) except Exception: pytest.skip('could not bind') yield serv closeSocket(serv) def test_tcpsocket(serversocket): sock = None sockargs = [ ('localhost:65432', 1), ('localhost', 65432), (('localhost', 65432), 1, dict(timeout=5)) ] for args in sockargs: try: if len(args) == 3: kwds = args[2] args = args[:2] sock = tcpSocket(*args, **kwds) else: sock = tcpSocket(*args) finally: if sock: closeSocket(sock) def test_timer(): t = monotonic() def cb(tmr, x, y=None): if x == 3 and y == 'ykwd': tmr.cb_called = True def nf(tmr, info): if info == 'notify': tmr.notify_called = True def nf1(tmr): tmr.notify_called = 'yes' # a) test a (short) timed timer tmr = Timer(0.1, cb, cb_args=(3,), cb_kwds={'y': 'ykwd'}) assert tmr.is_running() while (monotonic() - t < 1.5) and tmr.is_running(): sleep(0.05) assert not tmr.is_running() assert tmr.cb_called assert tmr.elapsed_time() == 0.1 assert tmr.remaining_time() == 0 tmr.restart() # timer timed out, can not restart assert not tmr.is_running() # b) test an unlimited timer (for a short while) tmr.start() sleep(0.02) # due to windows time() resolution assert tmr.is_running() assert tmr.elapsed_time() > 0 assert tmr.remaining_time() is None sleep(0.1) assert 0.1 < tmr.elapsed_time() < 0.2 tmr.stop() # check elapsed time for stopped timer assert 0.1 < tmr.elapsed_time() < 0.2 assert not(tmr.is_running()) assert not(tmr.wait()) # c) stopping before timeout and then restart tmr.restart() tmr.restart() tmr.start(run_for=0.5) sleep(0.1) tmr.stop() tmr.restart() tmr.wait(interval=0.1, notify_func=nf, notify_args=('notify',)) assert tmr.notify_called tmr.start(run_for=0.5) tmr.wait(0.1, nf1) assert tmr.notify_called == 'yes' def test_num_sort(): # purely alpha keys assert sorted(['a', 'c', 'b'], key=num_sort) == ['a', 'b', 'c'] # mixed with floats assert sorted(['X', '12A', '2.4B'], key=num_sort) == ['2.4B', '12A', 'X'] # also negative ones assert sorted(['X', '-1', '2'], key=num_sort) == ['-1', '2', 'X'] # handle invalid floats assert sorted(['X', '1A', '2.4.5A'], key=num_sort) == ['1A', '2.4.5A', 'X'] # handle non-strings too assert sorted([0.4, '1A'], key=num_sort) == [0.4, '1A'] # setupspec : loaded_setups : result CASES = [ (None, None, True), (None, ['a', 'b', 'c'], True), ('a', ['a', 'b', 'c'], True), ('a and d', ['a', 'b', 'c'], False), ('a and b', ['a', 'b', 'c'], True), ('a or d', ['a', 'b', 'c'], True), ('a or b', ['a'], True), ('a or b', ['b'], True), ('a or b', [], False), ('a or b', ['c'], False), ('a*', ['alpha', 'b'], True), ('c*', ['alpha', 'b'], False), ('c-d*', ['c-de'], True), ('(b and not (c or h)', ['b'], True), ('(b and not (c or h))', ['b', 'c'], False), ('(b and not (c or h))', ['b', 'h'], False), ('(b and not (c or h))', ['b', 'c', 'h'], False), ('(b and not (c or h))', [], False), ('(b and not (c or h))', ['h'], False), ('(b and not (c or h))', ['h', 'c'], False), ('a and', ['b'], True), # warns ('a?', ['a1'], True), ('a? or c', ['c'], True), ('a?', ['a12', 'a2'], True), ('a?', ['a12', 'a34'], False), ] OLDSTYLE_CASES = [ # old style cases (['a'], ['a', 'b', 'c'], True), ('!a', ['a', 'b', 'c'], True), (['!a'], ['a', 'b', 'c'], True), (['a', 'd'], ['a', 'b', 'c'], True), (['d'], ['a', 'b', 'c'], True), (['!d'], ['a', 'b', 'c'], True), (['a', '!d'], ['a', 'b', 'c'], True), (['!a', 'd'], ['a', 'b', 'c'], True), (['!a', '!d'], ['a', 'b', 'c'], True), ] def test_check_setup_spec(): for spec, setups, result in CASES + OLDSTYLE_CASES: # print is here to aid in finding the offending input parameters # as the stacktrace doesn't output locals res = checkSetupSpec(spec, setups) print('testing checkSetupSpec(%r, %r) == %r: %r' % (spec, setups, result, res)) assert res == result def test_parse_key_expression(): assert parseKeyExpression('dev.key')[0] == 'dev/key' assert parseKeyExpression('dev.key', normalize=lambda s: s)[0] == \ 'dev.key/value' assert parseKeyExpression('dev.key', False, normalize=lambda s: s)[0] == \ 'dev.key' key, expr, _ = parseKeyExpression('dev + 1') assert key == 'dev/value' assert eval(expr, {}, {'x': 42}) == 43 _, expr, _ = parseKeyExpression('100/(dev/key)/10') assert eval(expr, {}, {'x': 2}) == 5 _, expr, _ = parseKeyExpression('sqrt(key)') assert eval(expr, KEYEXPR_NS, {'x': 25}) == 5 _, _, descs = parseKeyExpression('a/b, c.d*2, sqrt(e)', multiple=True) assert descs == [ 'a/b', 'c.d*2', 'sqrt(e)' ] def test_squeeze(): assert isinstance(squeeze([]), list) assert isinstance(squeeze(tuple()), tuple) assert squeeze((256, 256, 1)) == (256, 256) assert squeeze((1, 10, 1)) == (10, ) assert squeeze((1, 1, 1), 2) == (1, 1) assert squeeze((1, 10, 1), -1) == (1, 10) assert squeeze((1, 10, 1), 1) == (1, 10) assert squeeze((1, ), 2) == (1, ) # n > len(shape) @pytest.mark.parametrize('maxbackup', [2, None, 0]) def test_moveOutOfWay(tmpdir, maxbackup): i = 0 fn1 = str(tmpdir.join('test1')) while i < 3: with open(fn1, 'w', encoding='utf-8') as fp: fp.write('Test %r %i' % (maxbackup, i)) moveOutOfWay(fn1, maxbackup) i += 1 files = [f for f in os.listdir(str(tmpdir)) if f.startswith('test1')] assert 'test1' not in files assert len(files) == maxbackup if maxbackup is not None else 3 @pytest.mark.parametrize('inp,expected', [ ['1d:2h:3m:14s', 93794], ['1d2h 3m 14s', 93794], ['1d :2h: 3m : 14s ', 93794], ['1day 2hr 2min 74sec', 93794], ['5days', 5*86400], [93794, 93794], ['0.5h', 1800], [1.0, 1.0], ['2.0', 2.0], ['1h:0.005s', 3600.005], [timedelta(hours=2), 7200], ['-20s', -20], ['+30m', 1800], ['-2500', -2500], [-50, -50], [-2.71828, -2.71828], ]) def test_parse_duration(inp, expected): assert parseDuration(inp, allownegative=True) == expected @pytest.mark.parametrize('inp', [ '1m3d', '1d::3m', '1d:3m jad', '42secop', '-2m', -50, '-3.1415', '+-5d', ]) def test_parse_duration_parse_errors(inp): assert raises(ValueError, parseDuration, inp) @pytest.mark.parametrize('inp', [ [1, 2, 3], {'days': 5}, ]) def test_parse_duration_type_errors(inp): assert raises(TypeError, parseDuration, inp) def test_all_days(): # 25. Oct 2020 switch CEST -> CET exp_list = [('2020', '10-24'), ('2020', '10-25'), ('2020', '10-26'), ('2020', '10-27'), ('2020', '10-28')] # 28. Oct 2020, 08:00:00 tmto = mktime((2020, 10, 28, 8, 0, 0, 0, 0, -1)) assert list(allDays(tmto - 86400, tmto)) == exp_list[3:] assert list(allDays(tmto - 2 * 86400, tmto)) == exp_list[2:] assert list(allDays(tmto - 3 * 86400, tmto)) == exp_list[1:] assert list(allDays(tmto - 4 * 86400, tmto)) == exp_list # 28. Mar 2020 switch CET -> CEST exp_list = [('2020', '03-27'), ('2020', '03-28'), ('2020', '03-29'), ('2020', '03-30'), ('2020', '03-31')] # 31. Mar 2020, 08:00:00 tmto = mktime((2020, 3, 31, 8, 0, 0, 0, 0, 1)) assert list(allDays(tmto - 86400, tmto)) == exp_list[3:] assert list(allDays(tmto - 2 * 86400, tmto)) == exp_list[2:] assert list(allDays(tmto - 3 * 86400, tmto)) == exp_list[1:] assert list(allDays(tmto - 4 * 86400, tmto)) == exp_list @pytest.mark.parametrize('name', [('COM1', '_COM1_'), ('xyz.txt', 'xyz.txt')]) def test_safeName(name): assert safeName(name[0]) == name[1] @pytest.mark.parametrize('maxbackup', [2, None, 0]) @pytest.mark.parametrize('content', ['XXXXX', ['XXXX\n', 'YYYYY\n']]) def test_safeWriteFile(tmpdir, maxbackup, content): i = 0 fn1 = str(tmpdir.join('test1')) while i < 3: safeWriteFile(fn1, content, maxbackups=maxbackup) i += 1 files = [f for f in os.listdir(str(tmpdir)) if f.startswith('test1')] assert 'test1' in files assert len(files) == maxbackup + 1 if maxbackup is not None else 4 with open(fn1, encoding='utf-8') as fp: if isinstance(content, list): assert len(fp.readlines()) == len(content) else: assert len(fp.read()) == len(content) def test_tupelize(): ilist = ['a', 1, 'b', 2, 'c', 3] assert list(tupelize(ilist)) == [('a', 1), ('b', 2), ('c', 3)] assert list(tupelize(ilist[:3])) == [('a', 1)] assert list(tupelize(ilist, 3)) == [('a', 1, 'b'), (2, 'c', 3)] assert list(tupelize(ilist[:4], 3)) == [('a', 1, 'b')] @pytest.fixture(scope='function') def nonexistantfile(tmpdir): fc1 = str(tmpdir.join('testcounter1')) try: os.unlink(fc1) except FileNotFoundError: pass yield fc1 try: os.unlink(fc1) except FileNotFoundError: pass @pytest.fixture(scope='function') def filecounterfile(tmpdir): fc = str(tmpdir.join('testcounter2')) with open(fc, 'w', encoding='utf-8') as f: f.write('key1 1234\n') f.write('key2 5678\n') yield fc os.unlink(fc) def test_readfilecounter_exist(filecounterfile): assert readFileCounter(filecounterfile, 'key1') == 1234 assert readFileCounter(filecounterfile, 'key3') == 0 def test_readfilecounter_nofile(nonexistantfile): assert readFileCounter(nonexistantfile, 'key') == 0 assert os.path.exists(nonexistantfile) def test_updatefilecounter_exist(filecounterfile): updateFileCounter(filecounterfile, 'key1', 222) assert readFileCounter(filecounterfile, 'key1') == 222 assert readFileCounter(filecounterfile, 'key2') == 5678 def test_updatefilecounter_nofile(nonexistantfile): updateFileCounter(nonexistantfile, 'key', 9876) assert os.path.exists(nonexistantfile) assert readFileCounter(nonexistantfile, 'key') == 9876 def test_simclock(): clock = SimClock() # unconditional tick clock.tick(2) assert clock.time == 2 # conditional waits, the maximum wins clock.wait(5) clock.wait(7) assert clock.time == 7 clock.reset() assert clock.time == 0
en
0.766134
# -*- coding: utf-8 -*- # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # ***************************************************************************** NICOS tests for some utility modules. # ask twice! # but getter only called once # pickle Protocoll 0 # pickle Protocoll 2 # pickle Protocoll 0 # pickle Protocoll 2 # pylint: disable=unused-variable # Make sure we get the inner error in case of too many retries # Make sure we get success if inner succeeds # assert we get create a server socket # a) test a (short) timed timer # timer timed out, can not restart # b) test an unlimited timer (for a short while) # due to windows time() resolution # check elapsed time for stopped timer # c) stopping before timeout and then restart # purely alpha keys # mixed with floats # also negative ones # handle invalid floats # handle non-strings too # setupspec : loaded_setups : result # warns # old style cases # print is here to aid in finding the offending input parameters # as the stacktrace doesn't output locals # n > len(shape) # 25. Oct 2020 switch CEST -> CET # 28. Oct 2020, 08:00:00 # 28. Mar 2020 switch CET -> CEST # 31. Mar 2020, 08:00:00 # unconditional tick # conditional waits, the maximum wins
1.795139
2
tests/test_invoice.py
jordi2326/RaccBoot
0
6628708
<filename>tests/test_invoice.py #!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2020 # <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. import pytest from flaky import flaky from telegram import LabeledPrice, Invoice @pytest.fixture(scope='class') def invoice(): return Invoice(TestInvoice.title, TestInvoice.description, TestInvoice.start_parameter, TestInvoice.currency, TestInvoice.total_amount) class TestInvoice(object): payload = 'payload' prices = [LabeledPrice('Fish', 100), LabeledPrice('Fish Tax', 1000)] provider_data = """{"test":"test"}""" title = 'title' description = 'description' start_parameter = 'start_parameter' currency = 'EUR' total_amount = sum([p.amount for p in prices]) def test_de_json(self, bot): invoice_json = Invoice.de_json({ 'title': TestInvoice.title, 'description': TestInvoice.description, 'start_parameter': TestInvoice.start_parameter, 'currency': TestInvoice.currency, 'total_amount': TestInvoice.total_amount }, bot) assert invoice_json.title == self.title assert invoice_json.description == self.description assert invoice_json.start_parameter == self.start_parameter assert invoice_json.currency == self.currency assert invoice_json.total_amount == self.total_amount def test_to_dict(self, invoice): invoice_dict = invoice.to_dict() assert isinstance(invoice_dict, dict) assert invoice_dict['title'] == invoice.title assert invoice_dict['description'] == invoice.description assert invoice_dict['start_parameter'] == invoice.start_parameter assert invoice_dict['currency'] == invoice.currency assert invoice_dict['total_amount'] == invoice.total_amount @flaky(3, 1) @pytest.mark.timeout(10) def test_send_required_args_only(self, bot, chat_id, provider_token): message = bot.send_invoice(chat_id, self.title, self.description, self.payload, provider_token, self.start_parameter, self.currency, self.prices) assert message.invoice.currency == self.currency assert message.invoice.start_parameter == self.start_parameter assert message.invoice.description == self.description assert message.invoice.title == self.title assert message.invoice.total_amount == self.total_amount @flaky(3, 1) @pytest.mark.timeout(10) def test_send_all_args(self, bot, chat_id, provider_token): message = bot.send_invoice( chat_id, self.title, self.description, self.payload, provider_token, self.start_parameter, self.currency, self.prices, provider_data=self.provider_data, photo_url='https://raw.githubusercontent.com/' 'python-telegram-bot/logos/master/' 'logo/png/ptb-logo_240.png', photo_size=240, photo_width=240, photo_height=240, need_name=True, need_phone_number=True, need_email=True, need_shipping_address=True, send_phone_number_to_provider=True, send_email_to_provider=True, is_flexible=True) assert message.invoice.currency == self.currency assert message.invoice.start_parameter == self.start_parameter assert message.invoice.description == self.description assert message.invoice.title == self.title assert message.invoice.total_amount == self.total_amount def test_send_object_as_provider_data(self, monkeypatch, bot, chat_id, provider_token): def test(_, url, data, **kwargs): return (data['provider_data'] == '{"test_data": 123456789}' # Depends if using or data['provider_data'] == '{"test_data":123456789}') # ujson or not monkeypatch.setattr('telegram.utils.request.Request.post', test) assert bot.send_invoice(chat_id, self.title, self.description, self.payload, provider_token, self.start_parameter, self.currency, self.prices, provider_data={'test_data': 123456789})
<filename>tests/test_invoice.py #!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2020 # <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. import pytest from flaky import flaky from telegram import LabeledPrice, Invoice @pytest.fixture(scope='class') def invoice(): return Invoice(TestInvoice.title, TestInvoice.description, TestInvoice.start_parameter, TestInvoice.currency, TestInvoice.total_amount) class TestInvoice(object): payload = 'payload' prices = [LabeledPrice('Fish', 100), LabeledPrice('Fish Tax', 1000)] provider_data = """{"test":"test"}""" title = 'title' description = 'description' start_parameter = 'start_parameter' currency = 'EUR' total_amount = sum([p.amount for p in prices]) def test_de_json(self, bot): invoice_json = Invoice.de_json({ 'title': TestInvoice.title, 'description': TestInvoice.description, 'start_parameter': TestInvoice.start_parameter, 'currency': TestInvoice.currency, 'total_amount': TestInvoice.total_amount }, bot) assert invoice_json.title == self.title assert invoice_json.description == self.description assert invoice_json.start_parameter == self.start_parameter assert invoice_json.currency == self.currency assert invoice_json.total_amount == self.total_amount def test_to_dict(self, invoice): invoice_dict = invoice.to_dict() assert isinstance(invoice_dict, dict) assert invoice_dict['title'] == invoice.title assert invoice_dict['description'] == invoice.description assert invoice_dict['start_parameter'] == invoice.start_parameter assert invoice_dict['currency'] == invoice.currency assert invoice_dict['total_amount'] == invoice.total_amount @flaky(3, 1) @pytest.mark.timeout(10) def test_send_required_args_only(self, bot, chat_id, provider_token): message = bot.send_invoice(chat_id, self.title, self.description, self.payload, provider_token, self.start_parameter, self.currency, self.prices) assert message.invoice.currency == self.currency assert message.invoice.start_parameter == self.start_parameter assert message.invoice.description == self.description assert message.invoice.title == self.title assert message.invoice.total_amount == self.total_amount @flaky(3, 1) @pytest.mark.timeout(10) def test_send_all_args(self, bot, chat_id, provider_token): message = bot.send_invoice( chat_id, self.title, self.description, self.payload, provider_token, self.start_parameter, self.currency, self.prices, provider_data=self.provider_data, photo_url='https://raw.githubusercontent.com/' 'python-telegram-bot/logos/master/' 'logo/png/ptb-logo_240.png', photo_size=240, photo_width=240, photo_height=240, need_name=True, need_phone_number=True, need_email=True, need_shipping_address=True, send_phone_number_to_provider=True, send_email_to_provider=True, is_flexible=True) assert message.invoice.currency == self.currency assert message.invoice.start_parameter == self.start_parameter assert message.invoice.description == self.description assert message.invoice.title == self.title assert message.invoice.total_amount == self.total_amount def test_send_object_as_provider_data(self, monkeypatch, bot, chat_id, provider_token): def test(_, url, data, **kwargs): return (data['provider_data'] == '{"test_data": 123456789}' # Depends if using or data['provider_data'] == '{"test_data":123456789}') # ujson or not monkeypatch.setattr('telegram.utils.request.Request.post', test) assert bot.send_invoice(chat_id, self.title, self.description, self.payload, provider_token, self.start_parameter, self.currency, self.prices, provider_data={'test_data': 123456789})
en
0.814639
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2020 # <NAME> <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. {"test":"test"} # Depends if using # ujson or not
2.493558
2
libs/utils/analysis_module.py
ionela-voinescu/lisa
0
6628709
<filename>libs/utils/analysis_module.py # SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2015, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Helper module for Analysis classes """ class AnalysisModule(object): """ Base class for Analysis modules. :param trace: input Trace object :type trace: :mod:`libs.utils.Trace` """ def __init__(self, trace): self._trace = trace self._platform = trace.platform self._tasks = trace.tasks self._data_dir = trace.data_dir self._dfg_trace_event = trace._dfg_trace_event self._big_cap = self._platform['nrg_model']['big']['cpu']['cap_max'] self._little_cap = self._platform['nrg_model']['little']['cpu']['cap_max'] self._big_cpus = self._platform['clusters']['big'] self._little_cpus = self._platform['clusters']['little'] trace._registerDataFrameGetters(self) # vim :set tabstop=4 shiftwidth=4 expandtab
<filename>libs/utils/analysis_module.py # SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2015, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Helper module for Analysis classes """ class AnalysisModule(object): """ Base class for Analysis modules. :param trace: input Trace object :type trace: :mod:`libs.utils.Trace` """ def __init__(self, trace): self._trace = trace self._platform = trace.platform self._tasks = trace.tasks self._data_dir = trace.data_dir self._dfg_trace_event = trace._dfg_trace_event self._big_cap = self._platform['nrg_model']['big']['cpu']['cap_max'] self._little_cap = self._platform['nrg_model']['little']['cpu']['cap_max'] self._big_cpus = self._platform['clusters']['big'] self._little_cpus = self._platform['clusters']['little'] trace._registerDataFrameGetters(self) # vim :set tabstop=4 shiftwidth=4 expandtab
en
0.719889
# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2015, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Helper module for Analysis classes Base class for Analysis modules. :param trace: input Trace object :type trace: :mod:`libs.utils.Trace` # vim :set tabstop=4 shiftwidth=4 expandtab
1.928074
2
examples/python/Advanced/remove_geometry.py
SBCV/Open3D
1
6628710
# Open3D: www.open3d.org # The MIT License (MIT) # See license file or visit www.open3d.org for details # examples/python/Advanced/remove_geometry.py import open3d as o3d import numpy as np import time import copy def visualize_non_blocking(vis, pcds): for pcd in pcds: vis.update_geometry(pcd) vis.poll_events() vis.update_renderer() pcd_orig = o3d.io.read_point_cloud("../../test_data/fragment.pcd") flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] pcd_orig.transform(flip_transform) n_pcd = 5 pcds = [] for i in range(n_pcd): pcds.append(copy.deepcopy(pcd_orig)) trans = np.identity(4) trans[:3, 3] = [3 * i, 0, 0] pcds[i].transform(trans) vis = o3d.visualization.Visualizer() vis.create_window() start_time = time.time() added = [False] * n_pcd curr_sec = int(time.time() - start_time) prev_sec = curr_sec - 1 while True: curr_sec = int(time.time() - start_time) if curr_sec - prev_sec == 1: prev_sec = curr_sec for i in range(n_pcd): if curr_sec % (n_pcd * 2) == i and not added[i]: vis.add_geometry(pcds[i]) added[i] = True print("Adding %d" % i) if curr_sec % (n_pcd * 2) == (i + n_pcd) and added[i]: vis.remove_geometry(pcds[i]) added[i] = False print("Removing %d" % i) visualize_non_blocking(vis, pcds)
# Open3D: www.open3d.org # The MIT License (MIT) # See license file or visit www.open3d.org for details # examples/python/Advanced/remove_geometry.py import open3d as o3d import numpy as np import time import copy def visualize_non_blocking(vis, pcds): for pcd in pcds: vis.update_geometry(pcd) vis.poll_events() vis.update_renderer() pcd_orig = o3d.io.read_point_cloud("../../test_data/fragment.pcd") flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] pcd_orig.transform(flip_transform) n_pcd = 5 pcds = [] for i in range(n_pcd): pcds.append(copy.deepcopy(pcd_orig)) trans = np.identity(4) trans[:3, 3] = [3 * i, 0, 0] pcds[i].transform(trans) vis = o3d.visualization.Visualizer() vis.create_window() start_time = time.time() added = [False] * n_pcd curr_sec = int(time.time() - start_time) prev_sec = curr_sec - 1 while True: curr_sec = int(time.time() - start_time) if curr_sec - prev_sec == 1: prev_sec = curr_sec for i in range(n_pcd): if curr_sec % (n_pcd * 2) == i and not added[i]: vis.add_geometry(pcds[i]) added[i] = True print("Adding %d" % i) if curr_sec % (n_pcd * 2) == (i + n_pcd) and added[i]: vis.remove_geometry(pcds[i]) added[i] = False print("Removing %d" % i) visualize_non_blocking(vis, pcds)
en
0.571822
# Open3D: www.open3d.org # The MIT License (MIT) # See license file or visit www.open3d.org for details # examples/python/Advanced/remove_geometry.py
2.359031
2
predict_utils.py
charlotteesavage/Image-Classifier-Project
0
6628711
<reponame>charlotteesavage/Image-Classifier-Project import time import PIL from PIL import Image import glob, os import sys import argparse import torch from torch import nn from torch import optim from torchvision import datasets, transforms, models from torch import tensor import torch.nn.functional as F import numpy as np #def checkpoint_load(filepath, architecture): def checkpoint_load(filepath): checkpoint = torch.load(filepath) # model_call = getattr(models, architecture) model = getattr(models, checkpoint['model']) classifier = nn.Sequential(nn.Linear(25088, checkpoint['hidden_units'][0]), nn.ReLU(), nn.Dropout(0.2), nn.Linear(checkpoint['hidden_units'][0], checkpoint['hidden_units'][1]), nn.ReLU(), nn.Dropout(0.2), nn.Linear(checkpoint['hidden_units'][1],checkpoint['hidden_units'][2]), nn.LogSoftmax(dim=1)) model.classifier = classifier for param in model.parameters(): param.requires_grad = False #load from checkpoint model.class_to_idx = checkpoint['class_to_idx'] model.load_state_dict(checkpoint['state_dict']) return model def process_image(image): image = Image.open(image) transformed_image = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) processed_image = transformed_image(image) return processed_image def predict(image_path, model, topk, device): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' if device == "cuda" and torch.cuda.is_available(): model.to("cuda") else: model.to("cpu") model.eval() img = process_image(image_path) img = img.unsqueeze_(0) img = img.float() with torch.no_grad(): if device == "cuda" and torch.cuda.is_available(): img.to("cuda") else: img.to("cpu") output = model.forward(img) prob = torch.exp(output) top_probs, top_indices = prob.topk(int(topk), dim=1) idx_to_class = {val: key for key, value in model.class_to_idx.items()} top_classes = [idx_to_class[idx] for idx in top_indices[0].tolist()] if device == 'cuda': top_prob, top_class = top_probs.cpu().numpy()[0], top_classes.cpu().numpy()[0] else: top_prob, top_class = top_probs.numpy()[0], top_classes.numpy()[0] return top_prob, top_class
import time import PIL from PIL import Image import glob, os import sys import argparse import torch from torch import nn from torch import optim from torchvision import datasets, transforms, models from torch import tensor import torch.nn.functional as F import numpy as np #def checkpoint_load(filepath, architecture): def checkpoint_load(filepath): checkpoint = torch.load(filepath) # model_call = getattr(models, architecture) model = getattr(models, checkpoint['model']) classifier = nn.Sequential(nn.Linear(25088, checkpoint['hidden_units'][0]), nn.ReLU(), nn.Dropout(0.2), nn.Linear(checkpoint['hidden_units'][0], checkpoint['hidden_units'][1]), nn.ReLU(), nn.Dropout(0.2), nn.Linear(checkpoint['hidden_units'][1],checkpoint['hidden_units'][2]), nn.LogSoftmax(dim=1)) model.classifier = classifier for param in model.parameters(): param.requires_grad = False #load from checkpoint model.class_to_idx = checkpoint['class_to_idx'] model.load_state_dict(checkpoint['state_dict']) return model def process_image(image): image = Image.open(image) transformed_image = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) processed_image = transformed_image(image) return processed_image def predict(image_path, model, topk, device): ''' Predict the class (or classes) of an image using a trained deep learning model. ''' if device == "cuda" and torch.cuda.is_available(): model.to("cuda") else: model.to("cpu") model.eval() img = process_image(image_path) img = img.unsqueeze_(0) img = img.float() with torch.no_grad(): if device == "cuda" and torch.cuda.is_available(): img.to("cuda") else: img.to("cpu") output = model.forward(img) prob = torch.exp(output) top_probs, top_indices = prob.topk(int(topk), dim=1) idx_to_class = {val: key for key, value in model.class_to_idx.items()} top_classes = [idx_to_class[idx] for idx in top_indices[0].tolist()] if device == 'cuda': top_prob, top_class = top_probs.cpu().numpy()[0], top_classes.cpu().numpy()[0] else: top_prob, top_class = top_probs.numpy()[0], top_classes.numpy()[0] return top_prob, top_class
en
0.814718
#def checkpoint_load(filepath, architecture): # model_call = getattr(models, architecture) #load from checkpoint Predict the class (or classes) of an image using a trained deep learning model.
2.580551
3
Semenenya_Vladislav_dz_3/task_3_5.py
neesaj/1824_GB_Python_1
0
6628712
<reponame>neesaj/1824_GB_Python_1<filename>Semenenya_Vladislav_dz_3/task_3_5.py """ Реализовать функцию get_jokes(), возвращающую n шуток, сформированных из трех случайных слов, взятых из трёх списков (по одному из каждого): nouns = ["автомобиль", "лес", "огонь", "город", "дом"] adverbs = ["сегодня", "вчера", "завтра", "позавчера", "ночью"] adjectives = ["веселый", "яркий", "зеленый", "утопичный", "мягкий"] Например: # >>> get_jokes(2) ["лес завтра зеленый", "город вчера веселый"] Документировать код функции. Сможете ли вы добавить еще один аргумент — флаг, разрешающий или запрещающий повторы слов в шутках (когда каждое слово можно использовать только в одной шутке)? Сможете ли вы сделать аргументы именованными? """ from random import choice nouns = ["автомобиль", "лес", "огонь", "город", "дом"] adverbs = ["сегодня", "вчера", "завтра", "позавчера", "ночью"] adjectives = ["веселый", "яркий", "зеленый", "утопичный", "мягкий"] list_1 = [] def get_jokes(n, flag=False): for i in range(n): random_nouns = choice(nouns) random_adverbs = choice(adverbs) random_adjectives = choice(adjectives) joke = f'{random_nouns} {random_adverbs} {random_adjectives}' list_2 = [] if flag: list_2 = joke.split() for noun in nouns: for fun in list_2: if noun == fun: nouns.remove(noun) for adverb in adverbs: for fun in list_2: if adverb == fun: adverbs.remove(adverb) for adjective in adjectives: for fun in list_2: if adjective == fun: adjectives.remove(adjective) print(joke) count = int(input('Введите количество шуток: ')) get_jokes(count, True)
""" Реализовать функцию get_jokes(), возвращающую n шуток, сформированных из трех случайных слов, взятых из трёх списков (по одному из каждого): nouns = ["автомобиль", "лес", "огонь", "город", "дом"] adverbs = ["сегодня", "вчера", "завтра", "позавчера", "ночью"] adjectives = ["веселый", "яркий", "зеленый", "утопичный", "мягкий"] Например: # >>> get_jokes(2) ["лес завтра зеленый", "город вчера веселый"] Документировать код функции. Сможете ли вы добавить еще один аргумент — флаг, разрешающий или запрещающий повторы слов в шутках (когда каждое слово можно использовать только в одной шутке)? Сможете ли вы сделать аргументы именованными? """ from random import choice nouns = ["автомобиль", "лес", "огонь", "город", "дом"] adverbs = ["сегодня", "вчера", "завтра", "позавчера", "ночью"] adjectives = ["веселый", "яркий", "зеленый", "утопичный", "мягкий"] list_1 = [] def get_jokes(n, flag=False): for i in range(n): random_nouns = choice(nouns) random_adverbs = choice(adverbs) random_adjectives = choice(adjectives) joke = f'{random_nouns} {random_adverbs} {random_adjectives}' list_2 = [] if flag: list_2 = joke.split() for noun in nouns: for fun in list_2: if noun == fun: nouns.remove(noun) for adverb in adverbs: for fun in list_2: if adverb == fun: adverbs.remove(adverb) for adjective in adjectives: for fun in list_2: if adjective == fun: adjectives.remove(adjective) print(joke) count = int(input('Введите количество шуток: ')) get_jokes(count, True)
ru
0.976691
Реализовать функцию get_jokes(), возвращающую n шуток, сформированных из трех случайных слов, взятых из трёх списков (по одному из каждого): nouns = ["автомобиль", "лес", "огонь", "город", "дом"] adverbs = ["сегодня", "вчера", "завтра", "позавчера", "ночью"] adjectives = ["веселый", "яркий", "зеленый", "утопичный", "мягкий"] Например: # >>> get_jokes(2) ["лес завтра зеленый", "город вчера веселый"] Документировать код функции. Сможете ли вы добавить еще один аргумент — флаг, разрешающий или запрещающий повторы слов в шутках (когда каждое слово можно использовать только в одной шутке)? Сможете ли вы сделать аргументы именованными?
2.850405
3
lib/base.py
joanfont/laas
1
6628713
<gh_stars>1-10 import requests from bs4 import BeautifulSoup class ParserMixin: BASE_URL = None @classmethod def get_raw_content(cls, url): response = requests.get(url, headers={ 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:44.0) Gecko/20100101 Firefox/44.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', }) return response.content @classmethod def get_soup(cls, raw_content): return BeautifulSoup(raw_content, 'lxml')
import requests from bs4 import BeautifulSoup class ParserMixin: BASE_URL = None @classmethod def get_raw_content(cls, url): response = requests.get(url, headers={ 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:44.0) Gecko/20100101 Firefox/44.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', }) return response.content @classmethod def get_soup(cls, raw_content): return BeautifulSoup(raw_content, 'lxml')
none
1
2.971157
3
src/toctou/service.py
Xzonn/geekgame-0th
30
6628714
<reponame>Xzonn/geekgame-0th #!/usr/bin/env python3 import os import signal from urllib.parse import quote import verify import comm import functools print = functools.partial(print, flush=True) TIMEOUT = 60 PLAYER_INIT_MONEY = 500 SALER_INIT_MONEY = 10000 DISCOUNT = 0.9 MAX_LINES = 100 class Commodity: def __init__(self, name, desc, price, num): self.name = name self.desc = desc self.price = price self.num = num class Merchant: def __init__(self, money): self.money = money self.possession = dict() def gain_commodity(self, commodity, num): self.possession[commodity] = self.possession.get(commodity, 0) + num def gain_money(self, money): self.money += money def take_commodity(self, commodity, num): new_num = self.possession.get(commodity, 0) - num if new_num < 0: raise ValueError("no enough commodity") self.possession[commodity] = new_num if new_num == 0: del self.possession[commodity] def take_money(self, money): if self.money < money: raise ValueError("no enough money") self.money -= money def load_commodities(): global commodities commodities = [] for name, data in comm.comms().items(): commodities.append(Commodity(name, data["desc"], data["price"], 1)) commodities.sort(key=lambda c: c.price) def find_commodity(name): global commodities for c in commodities: if c.name == name: return c return None def build_saler(): global commodities saler = Merchant(SALER_INIT_MONEY) for c in commodities: num = 1 if c.name == "flag" else 10 saler.gain_commodity(c.name, num) return saler def build_player(): global commodities player = Merchant(PLAYER_INIT_MONEY) player.gain_commodity(commodities[0].name, 1) return player def _check_transaction(filename): global saler, player transaction = [] with open(filename, "r") as f: for line in f.readlines(): if line.startswith("#"): continue line = line.split() name, num = line[0], int(line[1]) c = find_commodity(name) if c is None: raise ValueError("%s: invalid name" % name) money = c.price * num if num > 0: # buy if name not in saler.possession: raise ValueError("%s: not available" % name) if name == "flag": raise ValueError("%s: not for sale" % name) if saler.possession[name] < num: raise ValueError("%s: too much to buy" % name) if player.money < money: raise ValueError("%s: too expensive" % name) transaction.append("buy %d %s ($%d)" % (num, name, money)) else: # sale money = int(-money * DISCOUNT) if name not in player.possession: raise ValueError("%s: not available" % name) if player.possession[name] < -num: raise ValueError("%s: too much to sale" % name) if saler.money < money: raise ValueError("%s: too expensive" % name) transaction.append("sale %d %s ($%d)" % (-num, name, money)) return transaction def _perform_transaction(filename): global saler, player with open(filename, "r") as f: for line in f.readlines(): if line.startswith("#"): continue line = line.split() name, num = line[0], int(line[1]) c = find_commodity(name) money = c.price * num if num > 0: # buy player.take_money(money) player.gain_commodity(name, num) saler.gain_money(money) saler.take_commodity(name, num) else: # sale money = int(-money * DISCOUNT) player.gain_money(money) # 先拿到钱 player.take_commodity(name, -num) # 再出货(抛异常) saler.take_money(money) saler.gain_commodity(name, -num) def check_transaction(filename): try: transaction = _check_transaction(filename) print("You are going to:") print('\n'.join(transaction)) print("Type 'y' to confirm: ", end='') if input() == 'y': print("confirmed") return True else: print("cancelled") return False except Exception as e: print(e) return False def perform_transaction(filename): try: _perform_transaction(filename) except Exception as e: print(e) return False return True def banner(): print("Welcome to the store.") print("What do you want to do?") print("Type 'help' for help.") if __name__ == "__main__": global saler, player signal.alarm(TIMEOUT) os.chdir(os.path.dirname(os.path.abspath(__file__))) token = input("token: ") if verify.validate(token) is None: print("wrong token") exit() comm.set_token(token) load_commodities() saler = build_saler() player = build_player() banner() while True: try: cmd = input("\n> ") except EOFError: print("bye") break if cmd == "help": print("help: show help message") print("inspect: show your possessions") print("list: show commodities in the store") print("trade: start a transaction") print() print("an example for trade:") print("jade 1 (buy 1 jade)") print("citrine -1 (sale 1 citrine)") print("END (trade ends)") continue if cmd == "inspect": print("You have $%d, and" % player.money) if len(player.possession) == 0: print("nothing") for name, num in player.possession.items(): c = find_commodity(name) print("%s ($%d * %d): %s" % (name, c.price, num, c.desc)) continue if cmd == "list": print("Saler have $%d, and" % saler.money) for name, num in saler.possession.items(): c = find_commodity(name) print("%s ($%d * %d)" % (name, c.price, num)) continue if cmd == "trade": filename = os.path.join("/tmp", quote(token[:5] + token[-5:], safe='') + ".txt") f = open(filename, "w") for _ in range(MAX_LINES): line = input() if line == "END": break f.write(line + '\n') f.close() if not check_transaction(filename): continue # 在这里发起另一个连接,篡改文件 if perform_transaction(filename): print("transaction completed") else: print("transaction failed") continue print("command error") exit()
#!/usr/bin/env python3 import os import signal from urllib.parse import quote import verify import comm import functools print = functools.partial(print, flush=True) TIMEOUT = 60 PLAYER_INIT_MONEY = 500 SALER_INIT_MONEY = 10000 DISCOUNT = 0.9 MAX_LINES = 100 class Commodity: def __init__(self, name, desc, price, num): self.name = name self.desc = desc self.price = price self.num = num class Merchant: def __init__(self, money): self.money = money self.possession = dict() def gain_commodity(self, commodity, num): self.possession[commodity] = self.possession.get(commodity, 0) + num def gain_money(self, money): self.money += money def take_commodity(self, commodity, num): new_num = self.possession.get(commodity, 0) - num if new_num < 0: raise ValueError("no enough commodity") self.possession[commodity] = new_num if new_num == 0: del self.possession[commodity] def take_money(self, money): if self.money < money: raise ValueError("no enough money") self.money -= money def load_commodities(): global commodities commodities = [] for name, data in comm.comms().items(): commodities.append(Commodity(name, data["desc"], data["price"], 1)) commodities.sort(key=lambda c: c.price) def find_commodity(name): global commodities for c in commodities: if c.name == name: return c return None def build_saler(): global commodities saler = Merchant(SALER_INIT_MONEY) for c in commodities: num = 1 if c.name == "flag" else 10 saler.gain_commodity(c.name, num) return saler def build_player(): global commodities player = Merchant(PLAYER_INIT_MONEY) player.gain_commodity(commodities[0].name, 1) return player def _check_transaction(filename): global saler, player transaction = [] with open(filename, "r") as f: for line in f.readlines(): if line.startswith("#"): continue line = line.split() name, num = line[0], int(line[1]) c = find_commodity(name) if c is None: raise ValueError("%s: invalid name" % name) money = c.price * num if num > 0: # buy if name not in saler.possession: raise ValueError("%s: not available" % name) if name == "flag": raise ValueError("%s: not for sale" % name) if saler.possession[name] < num: raise ValueError("%s: too much to buy" % name) if player.money < money: raise ValueError("%s: too expensive" % name) transaction.append("buy %d %s ($%d)" % (num, name, money)) else: # sale money = int(-money * DISCOUNT) if name not in player.possession: raise ValueError("%s: not available" % name) if player.possession[name] < -num: raise ValueError("%s: too much to sale" % name) if saler.money < money: raise ValueError("%s: too expensive" % name) transaction.append("sale %d %s ($%d)" % (-num, name, money)) return transaction def _perform_transaction(filename): global saler, player with open(filename, "r") as f: for line in f.readlines(): if line.startswith("#"): continue line = line.split() name, num = line[0], int(line[1]) c = find_commodity(name) money = c.price * num if num > 0: # buy player.take_money(money) player.gain_commodity(name, num) saler.gain_money(money) saler.take_commodity(name, num) else: # sale money = int(-money * DISCOUNT) player.gain_money(money) # 先拿到钱 player.take_commodity(name, -num) # 再出货(抛异常) saler.take_money(money) saler.gain_commodity(name, -num) def check_transaction(filename): try: transaction = _check_transaction(filename) print("You are going to:") print('\n'.join(transaction)) print("Type 'y' to confirm: ", end='') if input() == 'y': print("confirmed") return True else: print("cancelled") return False except Exception as e: print(e) return False def perform_transaction(filename): try: _perform_transaction(filename) except Exception as e: print(e) return False return True def banner(): print("Welcome to the store.") print("What do you want to do?") print("Type 'help' for help.") if __name__ == "__main__": global saler, player signal.alarm(TIMEOUT) os.chdir(os.path.dirname(os.path.abspath(__file__))) token = input("token: ") if verify.validate(token) is None: print("wrong token") exit() comm.set_token(token) load_commodities() saler = build_saler() player = build_player() banner() while True: try: cmd = input("\n> ") except EOFError: print("bye") break if cmd == "help": print("help: show help message") print("inspect: show your possessions") print("list: show commodities in the store") print("trade: start a transaction") print() print("an example for trade:") print("jade 1 (buy 1 jade)") print("citrine -1 (sale 1 citrine)") print("END (trade ends)") continue if cmd == "inspect": print("You have $%d, and" % player.money) if len(player.possession) == 0: print("nothing") for name, num in player.possession.items(): c = find_commodity(name) print("%s ($%d * %d): %s" % (name, c.price, num, c.desc)) continue if cmd == "list": print("Saler have $%d, and" % saler.money) for name, num in saler.possession.items(): c = find_commodity(name) print("%s ($%d * %d)" % (name, c.price, num)) continue if cmd == "trade": filename = os.path.join("/tmp", quote(token[:5] + token[-5:], safe='') + ".txt") f = open(filename, "w") for _ in range(MAX_LINES): line = input() if line == "END": break f.write(line + '\n') f.close() if not check_transaction(filename): continue # 在这里发起另一个连接,篡改文件 if perform_transaction(filename): print("transaction completed") else: print("transaction failed") continue print("command error") exit()
zh
0.881886
#!/usr/bin/env python3 # buy # sale # buy # sale # 先拿到钱 # 再出货(抛异常) # 在这里发起另一个连接,篡改文件
3.105315
3
pdfmerge/migrations/0013_auto_20190623_1536.py
rupin/pdfmerger
0
6628715
<filename>pdfmerge/migrations/0013_auto_20190623_1536.py # Generated by Django 2.1.3 on 2019-06-23 10:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pdfmerge', '0012_auto_20190623_0948'), ] operations = [ migrations.AddField( model_name='pdfform', name='cellSize_X', field=models.DecimalField(decimal_places=2, default=0, max_digits=6), ), migrations.AddField( model_name='pdfform', name='cellSize_Y', field=models.DecimalField(decimal_places=2, default=0, max_digits=6), ), migrations.AddField( model_name='pdfformfield', name='font_size', field=models.IntegerField(default=0), ), ]
<filename>pdfmerge/migrations/0013_auto_20190623_1536.py # Generated by Django 2.1.3 on 2019-06-23 10:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('pdfmerge', '0012_auto_20190623_0948'), ] operations = [ migrations.AddField( model_name='pdfform', name='cellSize_X', field=models.DecimalField(decimal_places=2, default=0, max_digits=6), ), migrations.AddField( model_name='pdfform', name='cellSize_Y', field=models.DecimalField(decimal_places=2, default=0, max_digits=6), ), migrations.AddField( model_name='pdfformfield', name='font_size', field=models.IntegerField(default=0), ), ]
en
0.787787
# Generated by Django 2.1.3 on 2019-06-23 10:06
1.395559
1
discordmenu/embed/emoji.py
RheingoldRiver/discord-menu
0
6628716
class EmbedMenuEmojiConfig: def __init__(self, delete_message="❌", unsupported_transition="🚫"): self.delete_message = delete_message self.unsupported_transition = unsupported_transition def to_list(self): return [self.delete_message, self.unsupported_transition] DEFAULT_EMBED_MENU_EMOJI_CONFIG = EmbedMenuEmojiConfig()
class EmbedMenuEmojiConfig: def __init__(self, delete_message="❌", unsupported_transition="🚫"): self.delete_message = delete_message self.unsupported_transition = unsupported_transition def to_list(self): return [self.delete_message, self.unsupported_transition] DEFAULT_EMBED_MENU_EMOJI_CONFIG = EmbedMenuEmojiConfig()
none
1
2.401446
2
pylarklispy/interop_utils.py
decorator-factory/py-lark-lispy
1
6628717
<filename>pylarklispy/interop_utils.py<gh_stars>1-10 from .entities import Function class Index(dict): def add_function(self, name, rewrite: bool = False): def decorator(fn): entity = Function.make(name)(fn) self.add_value(name, entity, rewrite=rewrite) return entity return decorator def add_value(self, name, value, rewrite: bool = False): if name in self and not rewrite: raise LookupError(f"{name} is already present") self[name] = value
<filename>pylarklispy/interop_utils.py<gh_stars>1-10 from .entities import Function class Index(dict): def add_function(self, name, rewrite: bool = False): def decorator(fn): entity = Function.make(name)(fn) self.add_value(name, entity, rewrite=rewrite) return entity return decorator def add_value(self, name, value, rewrite: bool = False): if name in self and not rewrite: raise LookupError(f"{name} is already present") self[name] = value
none
1
2.65402
3
src/VulnerableScanner/MysqlPasswordScanner.py
b0bac/B0b-cExploit
10
6628718
from CoreUtils.WeakPasswordScannerEngine import WeakPasswordScanningEngine, ScannerRegister class MysqlPasswordScanner(WeakPasswordScanningEngine): def __init__(self, target): super().__init__(target) self.username_list = [username for username in open(self.username_file, 'r').readlines()] self.password_list = [password for password in open(self.password_file, 'r').readlines()] def mysql_login(self, ipaddress, port, username, password): import pymysql try: connect = pymysql.Connect(host=ipaddress, port=int(port), user=username, passwd=password, charset='utf8') connect.close() self.percent.write("\033[1;31m[+] 主机【%s】%s端口 存在Mysql弱口令! 【%s, %s】" % (ipaddress, str(port), username, password)) except Exception as exception: return def scan(self): argments = [] for target in self.target: for username in self.username_list: for password in self.password_list: argments.append([target[0], int(target[1]), username.split("\n")[0], password.split("\n")[0]]) self.percent = self.percent(argments) for item in self.percent: self.thread_pool.start_new_thread(self.mysql_login, item) self.percent.set_description("Scanning") ScannerRegister("MysqlBrute", MysqlPasswordScanner)
from CoreUtils.WeakPasswordScannerEngine import WeakPasswordScanningEngine, ScannerRegister class MysqlPasswordScanner(WeakPasswordScanningEngine): def __init__(self, target): super().__init__(target) self.username_list = [username for username in open(self.username_file, 'r').readlines()] self.password_list = [password for password in open(self.password_file, 'r').readlines()] def mysql_login(self, ipaddress, port, username, password): import pymysql try: connect = pymysql.Connect(host=ipaddress, port=int(port), user=username, passwd=password, charset='utf8') connect.close() self.percent.write("\033[1;31m[+] 主机【%s】%s端口 存在Mysql弱口令! 【%s, %s】" % (ipaddress, str(port), username, password)) except Exception as exception: return def scan(self): argments = [] for target in self.target: for username in self.username_list: for password in self.password_list: argments.append([target[0], int(target[1]), username.split("\n")[0], password.split("\n")[0]]) self.percent = self.percent(argments) for item in self.percent: self.thread_pool.start_new_thread(self.mysql_login, item) self.percent.set_description("Scanning") ScannerRegister("MysqlBrute", MysqlPasswordScanner)
none
1
2.808382
3
migrations/versions/44552e9fdead_.py
vab9/is-projekt-2016
0
6628719
<filename>migrations/versions/44552e9fdead_.py """empty message Revision ID: 4<PASSWORD> Revises: <PASSWORD> Create Date: 2016-10-05 02:02:17.183163 """ # revision identifiers, used by Alembic. revision = '<PASSWORD>' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('username', sa.String(), nullable=False)) op.alter_column('user', 'geb', existing_type=sa.DATE(), nullable=False) op.alter_column('user', 'nachname', existing_type=sa.VARCHAR(length=32), nullable=False) op.alter_column('user', 'vorname', existing_type=sa.VARCHAR(length=32), nullable=False) op.create_unique_constraint(None, 'user', ['username']) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_constraint(None, 'user', type_='unique') op.alter_column('user', 'vorname', existing_type=sa.VARCHAR(length=32), nullable=True) op.alter_column('user', 'nachname', existing_type=sa.VARCHAR(length=32), nullable=True) op.alter_column('user', 'geb', existing_type=sa.DATE(), nullable=True) op.drop_column('user', 'username') ### end Alembic commands ###
<filename>migrations/versions/44552e9fdead_.py """empty message Revision ID: 4<PASSWORD> Revises: <PASSWORD> Create Date: 2016-10-05 02:02:17.183163 """ # revision identifiers, used by Alembic. revision = '<PASSWORD>' down_revision = '<PASSWORD>' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('user', sa.Column('username', sa.String(), nullable=False)) op.alter_column('user', 'geb', existing_type=sa.DATE(), nullable=False) op.alter_column('user', 'nachname', existing_type=sa.VARCHAR(length=32), nullable=False) op.alter_column('user', 'vorname', existing_type=sa.VARCHAR(length=32), nullable=False) op.create_unique_constraint(None, 'user', ['username']) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_constraint(None, 'user', type_='unique') op.alter_column('user', 'vorname', existing_type=sa.VARCHAR(length=32), nullable=True) op.alter_column('user', 'nachname', existing_type=sa.VARCHAR(length=32), nullable=True) op.alter_column('user', 'geb', existing_type=sa.DATE(), nullable=True) op.drop_column('user', 'username') ### end Alembic commands ###
en
0.479507
empty message Revision ID: 4<PASSWORD> Revises: <PASSWORD> Create Date: 2016-10-05 02:02:17.183163 # revision identifiers, used by Alembic. ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ### ### commands auto generated by Alembic - please adjust! ### ### end Alembic commands ###
1.698715
2
EISeg/eiseg/util/manager.py
Amanda-Barbara/PaddleSeg
2
6628720
import inspect from collections.abc import Sequence class ComponentManager: def __init__(self, name=None): self._components_dict = dict() self._name = name def __len__(self): return len(self._components_dict) def __repr__(self): name_str = self._name if self._name else self.__class__.__name__ return "{}:{}".format(name_str, list(self._components_dict.keys())) def __getitem__(self, item): if isinstance(item, int): if item >= len(self): raise KeyError(f"指定的下标 {item} 在长度为 {len(self)} 的 {self} 中越界") return list(self._components_dict.values())[item] if item not in self._components_dict.keys(): raise KeyError(f"{self} 中不存在 {item}") return self._components_dict[item] def __iter__(self): for val in self._components_dict.values(): yield val def keys(self): return list(self._components_dict.keys()) def idx(self, item): for idx, val in enumerate(self.keys()): if val == item: return idx raise KeyError(f"{item} is not in {self}") @property def components_dict(self): return self._components_dict @property def name(self): return self._name def _add_single_component(self, component): # Currently only support class or function type if not (inspect.isclass(component) or inspect.isfunction(component)): raise TypeError("Expect class/function type, but received {}". format(type(component))) # Obtain the internal name of the component component_name = component.__name__ # Check whether the component was added already if component_name in self._components_dict.keys(): raise KeyError("{} exists already!".format(component_name)) else: # Take the internal name of the component as its key self._components_dict[component_name] = component def add_component(self, components): # Check whether the type is a sequence if isinstance(components, Sequence): for component in components: self._add_single_component(component) else: component = components self._add_single_component(component) return components MODELS = ComponentManager("models") ACTIONS = ComponentManager("actions")
import inspect from collections.abc import Sequence class ComponentManager: def __init__(self, name=None): self._components_dict = dict() self._name = name def __len__(self): return len(self._components_dict) def __repr__(self): name_str = self._name if self._name else self.__class__.__name__ return "{}:{}".format(name_str, list(self._components_dict.keys())) def __getitem__(self, item): if isinstance(item, int): if item >= len(self): raise KeyError(f"指定的下标 {item} 在长度为 {len(self)} 的 {self} 中越界") return list(self._components_dict.values())[item] if item not in self._components_dict.keys(): raise KeyError(f"{self} 中不存在 {item}") return self._components_dict[item] def __iter__(self): for val in self._components_dict.values(): yield val def keys(self): return list(self._components_dict.keys()) def idx(self, item): for idx, val in enumerate(self.keys()): if val == item: return idx raise KeyError(f"{item} is not in {self}") @property def components_dict(self): return self._components_dict @property def name(self): return self._name def _add_single_component(self, component): # Currently only support class or function type if not (inspect.isclass(component) or inspect.isfunction(component)): raise TypeError("Expect class/function type, but received {}". format(type(component))) # Obtain the internal name of the component component_name = component.__name__ # Check whether the component was added already if component_name in self._components_dict.keys(): raise KeyError("{} exists already!".format(component_name)) else: # Take the internal name of the component as its key self._components_dict[component_name] = component def add_component(self, components): # Check whether the type is a sequence if isinstance(components, Sequence): for component in components: self._add_single_component(component) else: component = components self._add_single_component(component) return components MODELS = ComponentManager("models") ACTIONS = ComponentManager("actions")
en
0.924842
# Currently only support class or function type # Obtain the internal name of the component # Check whether the component was added already # Take the internal name of the component as its key # Check whether the type is a sequence
2.850936
3
marco/portal/base/templatetags/feedback.py
Ecotrust/marco-portal2
4
6628721
<reponame>Ecotrust/marco-portal2 from django import template from django.conf import settings register = template.Library() @register.inclusion_tag('portal/tags/feedback.html') def feedback(): return { 'iframe_url': settings.FEEDBACK_IFRAME_URL, }
from django import template from django.conf import settings register = template.Library() @register.inclusion_tag('portal/tags/feedback.html') def feedback(): return { 'iframe_url': settings.FEEDBACK_IFRAME_URL, }
none
1
1.492945
1
ExCon/data_aug.py
DarrenZhang01/ExCon
17
6628722
""" References: 1. https://github.com/HobbitLong/SupContrast/blob/master/util.py """ from __future__ import print_function import math import numpy as np import torch import torch.optim as optim def aug_no_bbox_mc(inputs, input_batch2, targets, explainer, model, num_classes, p, tau, backup, opt): """ inputs: shape (batch size, channels, side length, side length) """ #tau = 0.5 #p = 0. explanation = explainer.attribute(inputs, targets) # print("explanation shape: {} input shape: {}".format(explanation.shape, inputs.shape)) # if explainer.method == "GradCAM": explanation = explanation.transpose(1, 2, 0) explanation = explanation - np.min(explanation, axis=(0, 1)) explanation = explanation / (np.max(explanation, axis=(0, 1)) + 1e-8) explanation = (explanation <= tau).transpose(2, 0, 1) explanation = explanation[:, np.newaxis] explanation = np.repeat(explanation, 3, axis=1) # print("the explainer method is: {}".format(explainer.method)) augmented = torch.from_numpy(np.where(explanation, np.zeros_like(inputs.cpu().numpy()), inputs.cpu().numpy())) if torch.cuda.is_available(): augmented = augmented.cuda() _, preds_aug = torch.max(model(augmented).data, 1) _, preds_inputs = torch.max(model(inputs).data, 1) new_inputs = torch.zeros_like(inputs) new_targets = targets.clone() flag = np.zeros(len(targets)) for i in range(len(targets)): # correct prediction on original and augmented image if preds_inputs[i].item() == targets[i].item() and preds_aug[i].item() == targets[i].item(): new_inputs[i] = augmented[i] # incorrect prediction on original and correct prediction on augmented image elif preds_inputs[i].item() != targets[i].item() and preds_aug[i].item() == targets[i].item(): new_inputs[i] = augmented[i] # incorrect prediction on original and augmented image else: new_inputs[i] = backup[i] flag[i] = 1 # If we want to include the negative pairs in training ExCon, # we need to include the masked image with the wrong prediction as # a different training data in both batch 1 and batch 2 with a # background label. if opt.negative_pair == 1: new_label = torch.Tensor([opt.n_cls]) if torch.cuda.is_available(): new_label = new_label.cuda() new_inputs = torch.cat((new_inputs, augmented[i:i+1]), dim=0) input_batch2 = torch.cat((input_batch2, augmented[i:i+1]), dim=0) new_targets = torch.cat((new_targets, new_label), dim=0) return new_inputs, input_batch2, new_targets, flag class TwoCropTransform: """Create two crops of the same image""" def __init__(self, random_transform, standard_transform, opt): self.random_transform = random_transform self.standard_transform = standard_transform self.opt = opt def __call__(self, x): if 'Ex' in self.opt.method: # # Add one more random cropping besides the one standard transformation and the one random cropping. # # Since if the masked image later on does not give a correct prediction, then we want to use # # the randomly cropped version of the image rather than the image with only standard transformation # return [self.standard_transform(x), self.standard_transform(x), self.random_transform(x), self.random_transform(x)] return [self.standard_transform(x), self.random_transform(x), self.random_transform(x)] else: return [self.random_transform(x), self.random_transform(x)]
""" References: 1. https://github.com/HobbitLong/SupContrast/blob/master/util.py """ from __future__ import print_function import math import numpy as np import torch import torch.optim as optim def aug_no_bbox_mc(inputs, input_batch2, targets, explainer, model, num_classes, p, tau, backup, opt): """ inputs: shape (batch size, channels, side length, side length) """ #tau = 0.5 #p = 0. explanation = explainer.attribute(inputs, targets) # print("explanation shape: {} input shape: {}".format(explanation.shape, inputs.shape)) # if explainer.method == "GradCAM": explanation = explanation.transpose(1, 2, 0) explanation = explanation - np.min(explanation, axis=(0, 1)) explanation = explanation / (np.max(explanation, axis=(0, 1)) + 1e-8) explanation = (explanation <= tau).transpose(2, 0, 1) explanation = explanation[:, np.newaxis] explanation = np.repeat(explanation, 3, axis=1) # print("the explainer method is: {}".format(explainer.method)) augmented = torch.from_numpy(np.where(explanation, np.zeros_like(inputs.cpu().numpy()), inputs.cpu().numpy())) if torch.cuda.is_available(): augmented = augmented.cuda() _, preds_aug = torch.max(model(augmented).data, 1) _, preds_inputs = torch.max(model(inputs).data, 1) new_inputs = torch.zeros_like(inputs) new_targets = targets.clone() flag = np.zeros(len(targets)) for i in range(len(targets)): # correct prediction on original and augmented image if preds_inputs[i].item() == targets[i].item() and preds_aug[i].item() == targets[i].item(): new_inputs[i] = augmented[i] # incorrect prediction on original and correct prediction on augmented image elif preds_inputs[i].item() != targets[i].item() and preds_aug[i].item() == targets[i].item(): new_inputs[i] = augmented[i] # incorrect prediction on original and augmented image else: new_inputs[i] = backup[i] flag[i] = 1 # If we want to include the negative pairs in training ExCon, # we need to include the masked image with the wrong prediction as # a different training data in both batch 1 and batch 2 with a # background label. if opt.negative_pair == 1: new_label = torch.Tensor([opt.n_cls]) if torch.cuda.is_available(): new_label = new_label.cuda() new_inputs = torch.cat((new_inputs, augmented[i:i+1]), dim=0) input_batch2 = torch.cat((input_batch2, augmented[i:i+1]), dim=0) new_targets = torch.cat((new_targets, new_label), dim=0) return new_inputs, input_batch2, new_targets, flag class TwoCropTransform: """Create two crops of the same image""" def __init__(self, random_transform, standard_transform, opt): self.random_transform = random_transform self.standard_transform = standard_transform self.opt = opt def __call__(self, x): if 'Ex' in self.opt.method: # # Add one more random cropping besides the one standard transformation and the one random cropping. # # Since if the masked image later on does not give a correct prediction, then we want to use # # the randomly cropped version of the image rather than the image with only standard transformation # return [self.standard_transform(x), self.standard_transform(x), self.random_transform(x), self.random_transform(x)] return [self.standard_transform(x), self.random_transform(x), self.random_transform(x)] else: return [self.random_transform(x), self.random_transform(x)]
en
0.776141
References: 1. https://github.com/HobbitLong/SupContrast/blob/master/util.py inputs: shape (batch size, channels, side length, side length) #tau = 0.5 #p = 0. # print("explanation shape: {} input shape: {}".format(explanation.shape, inputs.shape)) # if explainer.method == "GradCAM": # print("the explainer method is: {}".format(explainer.method)) # correct prediction on original and augmented image # incorrect prediction on original and correct prediction on augmented image # incorrect prediction on original and augmented image # If we want to include the negative pairs in training ExCon, # we need to include the masked image with the wrong prediction as # a different training data in both batch 1 and batch 2 with a # background label. Create two crops of the same image # # Add one more random cropping besides the one standard transformation and the one random cropping. # # Since if the masked image later on does not give a correct prediction, then we want to use # # the randomly cropped version of the image rather than the image with only standard transformation # return [self.standard_transform(x), self.standard_transform(x), self.random_transform(x), self.random_transform(x)]
2.335672
2
shop/models.py
threecoolcat/ThreeCoolCat
6
6628723
<reponame>threecoolcat/ThreeCoolCat from django.db import models # 使用tinymce编辑富文本 from tinymce.models import HTMLField """商店的内容定义,采用 abstract 方式""" # Create your models here. class Item(models.Model): """物品基础类""" id = models.AutoField(primary_key=True) name = models.CharField('名称', db_column='name', null=False, blank=False, default='', max_length=255) cover = models.ImageField('封面', db_column='cover', upload_to='items', null=True, blank=True) unit_price = models.DecimalField('单价', db_column='unit_price', null=True, blank=False, decimal_places=2, max_digits=8) discount_price = models.DecimalField('折扣价', db_column='discount_price', null=True, blank=False, decimal_places=2, max_digits=8) enabled = models.BooleanField('启用', db_column='enabled', null=False, blank=False, default=True) order_by = models.IntegerField('排序', db_column='order_by', null=True, blank=True, default=0) class Meta: # 抽象类, 不生成实体表 abstract = True class Book(Item): author = models.CharField('作者', db_column='author', max_length=100, null=True, blank=True) sale_url = models.CharField('购买链接', db_column='sale_url', max_length=100, null=True, blank=True) sub_title = models.CharField('副标题', db_column='sub_title', max_length=100, null=True, blank=True) menus_text = HTMLField('图书目录', db_column='menus_text', null=True, blank=True) description = HTMLField('图书详情', db_column='description', null=True, blank=True) class Meta: managed = True abstract = False db_table = 'book' verbose_name = '图书' verbose_name_plural = '图书管理' def __str__(self): return self.name class Video(Item): """教学视频""" author = models.CharField('作者', db_column='author', max_length=100, null=True, blank=True) video_url = models.CharField('视频地址', db_column='video_url', max_length=100, null=True, blank=True) class Meta: managed = True db_table = 'video' verbose_name = '视频' verbose_name_plural = '视频管理' def __str__(self): return self.name
from django.db import models # 使用tinymce编辑富文本 from tinymce.models import HTMLField """商店的内容定义,采用 abstract 方式""" # Create your models here. class Item(models.Model): """物品基础类""" id = models.AutoField(primary_key=True) name = models.CharField('名称', db_column='name', null=False, blank=False, default='', max_length=255) cover = models.ImageField('封面', db_column='cover', upload_to='items', null=True, blank=True) unit_price = models.DecimalField('单价', db_column='unit_price', null=True, blank=False, decimal_places=2, max_digits=8) discount_price = models.DecimalField('折扣价', db_column='discount_price', null=True, blank=False, decimal_places=2, max_digits=8) enabled = models.BooleanField('启用', db_column='enabled', null=False, blank=False, default=True) order_by = models.IntegerField('排序', db_column='order_by', null=True, blank=True, default=0) class Meta: # 抽象类, 不生成实体表 abstract = True class Book(Item): author = models.CharField('作者', db_column='author', max_length=100, null=True, blank=True) sale_url = models.CharField('购买链接', db_column='sale_url', max_length=100, null=True, blank=True) sub_title = models.CharField('副标题', db_column='sub_title', max_length=100, null=True, blank=True) menus_text = HTMLField('图书目录', db_column='menus_text', null=True, blank=True) description = HTMLField('图书详情', db_column='description', null=True, blank=True) class Meta: managed = True abstract = False db_table = 'book' verbose_name = '图书' verbose_name_plural = '图书管理' def __str__(self): return self.name class Video(Item): """教学视频""" author = models.CharField('作者', db_column='author', max_length=100, null=True, blank=True) video_url = models.CharField('视频地址', db_column='video_url', max_length=100, null=True, blank=True) class Meta: managed = True db_table = 'video' verbose_name = '视频' verbose_name_plural = '视频管理' def __str__(self): return self.name
zh
0.908725
# 使用tinymce编辑富文本 商店的内容定义,采用 abstract 方式 # Create your models here. 物品基础类 # 抽象类, 不生成实体表 教学视频
2.42509
2
backend/backendAPI.py
CSIRT-MU/fimetis
6
6628724
<reponame>CSIRT-MU/fimetis import datetime import os from flask import Flask, jsonify, request from flask_cors import CORS from elasticsearch import Elasticsearch from werkzeug.security import check_password_hash, generate_password_hash from werkzeug.utils import secure_filename from functools import wraps import jwt import json import subprocess import fsa_lib as fsa import import_metadata import type_recognizer import postgres_lib as pg import tempfile import logging from base64 import b64encode import requests logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO) app = Flask(__name__) CORS(app) app.config['UPLOAD_FOLDER'] = '/tmp' app.config['SECRET_KEY'] = os.urandom(128) app.config['elastic_metadata_index'] = 'metadata' app.config['elastic_metadata_type'] = 'mactimes' app.config['elastic_filter_index'] = 'filter' app.config['elastic_filter_type'] = None app.config['elastic_user_index'] = 'user' app.config['elastic_user_type'] = None app.config['TOKEN_EXPIRATION'] = datetime.timedelta(days=1) app.config['elastic_host'] = 'localhost' app.config['elastic_port'] = 9200 app.config['pg_user'] = 'fimetis' app.config['pg_db'] = 'fimetis' es = Elasticsearch([{'host': app.config['elastic_host'], 'port': app.config['elastic_port'], 'timeout': 3600}]) app.config['oidc_introspect_url'] = 'https://oidc.muni.cz/oidc/introspect' app.config['oidc_client_id'] = 'client-id' app.config['oidc_client_secret'] = 'XXXXXXX' def token_required(f): @wraps(f) def decorated(*args, **kwargs): token = None if 'x-access-token' in request.headers: token = request.headers['x-access-token'] if not token: return jsonify({'message': 'Token is missing!'}), 401 try: data = jwt.decode(token, app.config['SECRET_KEY'], options={"verify_signature": False}) current_user = {'username': data['username'], 'is_super_admin': data['is_super_admin']} except jwt.exceptions.PyJWTError as e: logging.error("Failed to decode token: %s" % (e)) return jsonify({'message': 'Token is invalid!'}), 401 return f(current_user, *args, **kwargs) return decorated def admin_required(f): @wraps(f) def decorated(*args, **kwargs): try: current_user = args[0] except: return jsonify({'message': 'This user is not authorized'}), 403 authorized = False if current_user['is_super_admin']: authorized = True # for group in current_user['groups']: # if group == 'admin': # authorized = True # break if not authorized: return jsonify({'message': 'This user is not authorized'}), 403 return f(*args, **kwargs) return decorated # def authorization_required(f): # @wraps(f) # def decorated(*args, **kwargs): # roles = ['admin'] # for arg in args: # print(arg) # for kwarg in kwargs: # print(kwarg) # current_user = {'groups': ['admin']} # if roles is not None: # authorized = False # for group in current_user['groups']: # if group in roles: # authorized = True # break # if not authorized: # return jsonify({'message': 'This user is not authorized'}), 401 # return f(*args, *kwargs) # return decorated @app.route('/login', methods=['POST']) def login(): if not request.get_json() or not request.get_json()['username'] or not request.get_json()['password']: logging.warning('LOGIN - Wrong username or password') return jsonify({'message': 'Wrong username or password'}), 400 username = request.get_json()['username'] user = pg.get_user_by_login(username) group_names = pg.get_user_groups_names_by_login(username) password_hash = user[0] is_super_admin = user[1] email = user[2] name = user[3] if check_password_hash(password_hash, request.get_json()['password']): token = jwt.encode( { 'username': username, 'is_super_admin': is_super_admin, 'exp': datetime.datetime.utcnow() + app.config['TOKEN_EXPIRATION'] }, app.config['SECRET_KEY'] ) logging.warning('LOGIN - successful for user: ' + str(username) + ' from ' + str(request.remote_addr)) return jsonify({ 'username': username, 'is_super_admin': is_super_admin, 'email': email, 'name': name, 'groups': group_names, 'token': token} ) logging.warning('LOGIN - Wrong username or password') return jsonify({'message': 'Wrong username or password'}), 400 @app.route('/oidc-login', methods=['POST']) def oidc_login(): access_token = request.get_json()['access_token'] key_and_secret_encoded = b64encode( (app.config['oidc_client_id'] + ':' + app.config['oidc_client_secret']).encode() ) payload = 'token=' + access_token + '&token_type_hint=access_token' headers = { 'Authorization': 'Basic ' + key_and_secret_encoded.decode(), 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post(app.config['oidc_introspect_url'], headers=headers, data=payload) if r.status_code != 200: return jsonify({'message': 'Token introspection not successful'}), 400 response_json = json.loads(r.text) if response_json['active']: pg.process_oidc_user_login( response_json['sub'], response_json['name'], response_json['preferred_username'], response_json['email'], response_json['eduperson_entitlement'] ) group_names = pg.get_user_groups_names_by_login(response_json['sub']) token = jwt.encode( { 'username': response_json['sub'], 'is_super_admin': False, 'exp': datetime.datetime.utcnow() + app.config['TOKEN_EXPIRATION'] }, app.config['SECRET_KEY'] ) return jsonify({ 'username': response_json['sub'], 'name': response_json['name'], 'email': response_json['email'], 'preferred_username': response_json['preferred_username'], 'groups': response_json['eduperson_entitlement'] + group_names, 'is_external': True, 'token': token.decode('UTF-8')}), 200 else: return jsonify({'message': 'Token is not valid'}), 400 @app.route('/authenticated', methods=['GET', 'POST']) @token_required def authenticated(): return jsonify({'authenticated': 'OK'}), 200 @app.route('/') @token_required def es_info(): return jsonify(es.info()) @app.route('/upload', methods=['POST']) @token_required def upload(current_user): if 'case' not in request.form: return jsonify({'status': 'failed', 'message': 'Case name is missing in form'}) else: case_name = request.form['case'] if 'description' not in request.form: description = '' else: description = request.form['description'] if 'removeDeleted' not in request.form: remove_deleted = True else: remove_deleted = request.form['removeDeleted'] in ('true', '1') if 'removeDeletedRealloc' not in request.form: remove_deleted_realloc = True else: remove_deleted_realloc = request.form['removeDeletedRealloc'] in ('true', '1') if 'file' not in request.files: return jsonify({'status': 'failed', 'message': 'No file to upload'}) for file in request.files.getlist('file'): logging.info('upload file: ' + str(file) + ' to case: ' + str(case_name)) if file.filename == '': return jsonify({'status': 'failed', 'message': 'Invalid file name'}) if file: tf = tempfile.NamedTemporaryFile(suffix='-' + secure_filename(file.filename), dir=app.config['UPLOAD_FOLDER'], delete=False) file.save(tf.name) file_type = type_recognizer.recognize_type(tf.name) if file_type == 'fls': normalized_file = tf.name + '.norm' os.system('mactime -b %s -d > %s' % (tf.name, normalized_file)) os.system('rm %s' % (tf.name)) elif file_type == 'find': normalized_file = tf.name + '.norm' fls_file = tf.name + '.fls' os.system('python3 find2fls.py --input_file %s --output_file %s' % (tf.name, fls_file)) os.system('rm %s' % (tf.name)) os.system('mactime -b %s -d > %s' % (fls_file, normalized_file)) os.system('rm %s' % fls_file) elif file_type == 'mactime_noheader': normalized_file = tf.name with open(tf.name, "r+") as f: content = f.read() f.seek(0, 0) f.write('Date,Size,Type,Mode,UID,GID,Meta,File Name\n') f.write(content) elif file_type == 'l2tcsv': normalized_file = tf.name else: normalized_file = tf.name import_metadata.import_csv(normalized_file, file_type, es, app.config['elastic_metadata_index'], app.config['elastic_metadata_type'], case_name, remove_deleted=remove_deleted, remove_deleted_realloc=remove_deleted_realloc) if request.form['datasetExtend'] == 'false': pg.insert_case(case_name, description) pg.insert_user_case_role(current_user['username'], case_name, 'admin') pg.insert_init_note_for_case(case_name, current_user['username']) cluster_ids = json.loads(request.form['cluster_ids']) pg.add_user_clusters_for_case(current_user['username'], case_name, cluster_ids) full_access_ids = json.loads(request.form['full_access_ids']) read_access_ids = json.loads(request.form['read_access_ids']) pg.add_access_for_many_users_to_case(case_name, full_access_ids, read_access_ids, cluster_ids) pg.update_note_and_clusters_for_case_for_external_users(case_name) return jsonify({'status': 'OK', 'message': 'uploading files'}) @app.route('/case/delete/<string:case>', methods=['DELETE']) @token_required @admin_required def delete_case(current_user, case): if not pg.has_user_admin_access(current_user['username'], case): return jsonify({'status': 'failed', 'message': 'User has not admin access for this case'}) pg.delete_case(case) query = { 'query': { 'match_phrase': { 'case': case } } } logging.info('QUERY delete case: ' + '\n' + json.dumps(query)) res = es.delete_by_query(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=query) return jsonify(res) @app.route('/case/accessible', methods=['GET']) @token_required def accessible_cases(current_user): logging.info('Listed accessible cases for user %s', (current_user['username'])) return jsonify(cases=pg.get_accessible_cases(current_user['username'])) @app.route('/user/all', methods=['GET']) @token_required def get_all_users(current_user): return jsonify(users=pg.get_all_users()) @app.route('/group/all', methods=['GET']) @token_required def get_all_groups(current_user): return jsonify(groups=pg.get_all_groups()) @app.route('/group/internal/all', methods=['GET']) @token_required def get_all_internal_groups(current_user): return jsonify(groups=pg.get_all_internal_groups()) @app.route('/case/update-description', methods=['POST']) @token_required def update_case_description(current_user): case_id = request.json.get('case_id') description = request.json.get('description') pg.update_case_description(case_id, description) logging.info('Case %s description updated', (case_id)) return jsonify({'case description updated': 'OK'}), 200 @app.route('/case/note', methods=['POST']) @token_required def get_note_for_case(current_user): case_name = request.json.get('case_name') note = pg.get_note_for_case(case_name, current_user['username']) #logging.info('Getting note for user %s and case %s', (current_user['username'], case_name,)) return jsonify(note=note) @app.route('/case/note/update', methods=['POST']) @token_required def update_note_for_case(current_user): case_name = request.json.get('case_name') updated_note = request.json.get('updated_note') pg.update_note_for_case(updated_note, case_name, current_user['username']) #logging.info('Updating note for user %s and case %s', (current_user['username'], case_name,)) return jsonify({'note for case was updated': 'OK'}), 200 @app.route('/filter/all', methods=['GET']) @token_required def filters(current_user): query = { 'aggs': { 'filters': { 'terms': { 'field': 'name.keyword', 'size': 2147483647 } } } } logging.info('QUERY get all filters: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_filter_index'], doc_type=app.config['elastic_filter_type'], body=query) return jsonify(filters=res['aggregations']['filters']['buckets']) @app.route('/filter/name', methods=['POST']) @token_required def filter_by_name(current_user): if 'name' not in request.json: return jsonify({'message': 'Bad request'}), 400 else: filter_name = request.json.get('name') query = { 'query': { 'term': { 'name.keyword': { 'value': filter_name } } } } logging.info('QUERY filter by name: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_filter_index'], doc_type=app.config['elastic_filter_type'], body=query) return jsonify(res['hits']['hits'][0]['_source']) @app.route('/clusters/data/<string:case>', methods=['POST']) @token_required def clusters_get_data(current_user, case): clusters = request.json.get('clusters') marks_ids = request.json.get('marks_ids') additional_filters = request.json.get('additional_filters') begin = request.json.get('begin') page_size = request.json.get('page_size') sort = request.json.get('sort') sort_order = request.json.get('sort_order') mark_query = {} mark_query['ids'] = { 'values' : marks_ids} query = fsa.build_data_query(case, clusters, additional_filters, begin, page_size, sort, sort_order) query['query']['bool']['must'][1]['bool']['should'].append(mark_query) logging.info('QUERY cluster get data: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) #print(res) return jsonify(res) @app.route('/clusters/get_rank_of_marked_mactime_by_id/<string:case>', methods=['POST']) @token_required def get_rank_of_marked_mactime_by_id(current_user, case): clusters = request.json.get('clusters') marks_ids = request.json.get('marks_ids') additional_filters = request.json.get('additional_filters') begin = 0 size = request.json.get('size') sort = request.json.get('sort') sort_order = request.json.get('sort_order') mark_id = request.json.get('mark_id') mark_query = {} mark_query['ids'] = {'values': marks_ids} query = fsa.build_data_query(case, clusters, additional_filters, begin, size, sort, sort_order) query['query']['bool']['must'][1]['bool']['should'].append(mark_query) query['_source'] = False logging.info('QUERY cluster get data: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) rank = 0 for entry in res['hits']['hits']: if mark_id == entry['_id']: break rank += 1 return {'rank': rank} @app.route('/clusters/get_rank_of_mactime_by_timestamp/<string:case>', methods=['POST']) @token_required def get_rank_of_mactime_by_timestamp(current_user, case): clusters = request.json.get('clusters') marks_ids = request.json.get('marks_ids') additional_filters = request.json.get('additional_filters') begin = 0 size = request.json.get('size') sort = request.json.get('sort') sort_order = request.json.get('sort_order') timestamp = request.json.get('timestamp') mark_query = {} mark_query['ids'] = {'values': marks_ids} query = fsa.build_data_query(case, clusters, additional_filters, begin, size, sort, sort_order) query['query']['bool']['must'][1]['bool']['should'].append(mark_query) query['_source'] = ['@timestamp'] logging.info('QUERY cluster get data: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) rank = 0 for entry in res['hits']['hits']: if entry['_source']['@timestamp'] < timestamp: rank += 1 else: break return {'rank': rank} @app.route('/clusters/entries_border/<string:case>', methods=['POST']) @token_required def clusters_entries_border(current_user, case): clusters = request.json.get('clusters') additional_filters = request.json.get('additional_filters') query = fsa.build_data_query(case, clusters, additional_filters, 0, 1, 'timestamp', 'asc') logging.info('QUERY cluster entries border: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) return jsonify(res) @app.route('/clusters/data_counts/<string:case>', methods=['POST']) @token_required def clusters_data_counts(current_user, case): clusters = request.json.get('clusters') additional_filters = request.json.get('additional_filters') query_filters = fsa.build_data_query(case, clusters, additional_filters) query_all = fsa.build_data_query(case, clusters, None) logging.info('QUERY clusters counts: ' + '\n' + json.dumps(query_filters) + '\n' + json.dumps(query_all)) res_filters = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query_filters)) res_all = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query_all)) return jsonify({'total': res_filters['hits']['total'], 'total_all': res_all['hits']['total']}) @app.route('/cluster/count/<string:case>', methods=['POST']) @token_required def cluster_get_count(current_user, case): cluster = request.json.get('cluster') additional_filters = request.json.get('additional_filters') query_filters = fsa.build_count_query(case, cluster, additional_filters) query_all = fsa.build_count_query(case, cluster, None) logging.info('QUERY cluster get count: ' + '\n' + json.dumps(query_filters) + '\n' + json.dumps(query_all)) res_filters = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query_filters)) res_all = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query_all)) return jsonify({'total': res_filters['hits']['total'], 'total_all': res_all['hits']['total']}) @app.route('/cluster/first_and_last/<string:case>', methods=['POST']) @token_required def cluster_get_first_and_last_entry(current_user, case): clusters = request.json.get('clusters') additional_filters = request.json.get('additional_filters') mac_type = request.json.get('mac_type') first_query = fsa.build_first_entry_query(case, clusters, additional_filters, mac_type, 'asc') last_query = fsa.build_first_entry_query(case, clusters, additional_filters, mac_type, 'desc') logging.info('QUERY cluster get first and last entry: ' + '\n' + json.dumps(first_query) + '\n' + json.dumps(last_query)) first = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(first_query)) last = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(last_query)) res = [] if first is not None: if len(first['hits']['hits']) > 0: res.append(first['hits']['hits'][0]) if last is not None: if len(last['hits']['hits']) > 0: res.append(last['hits']['hits'][0]) return jsonify(res) @app.route('/graph/first_and_last/<string:case>', methods=['POST']) @token_required def graph_get_first_and_last_entry(current_user, case): first_query = fsa.build_whole_case_first_entry_query(case, 'asc') last_query = fsa.build_whole_case_first_entry_query(case, 'desc') logging.info('QUERY cluster get first and last entry: ' + '\n' + json.dumps(first_query) + '\n' + json.dumps(last_query)) first = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(first_query)) last = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(last_query)) res = [] if first is not None: if len(first['hits']['hits']) > 0: res.append(first['hits']['hits'][0]) if last is not None: if len(last['hits']['hits']) > 0: res.append(last['hits']['hits'][0]) return jsonify(res) @app.route('/graph/data/<string:case>', methods=['POST']) @token_required def graph_get_data(current_user, case): clusters = request.json.get('clusters') additional_filters = request.json.get('additional_filters') mac_type = request.json.get('mac_type') frequency = request.json.get('frequency') if frequency is None: frequency = 'day' query = fsa.build_graph_data_query(case, clusters, additional_filters, mac_type, frequency) logging.info('QUERY graph get data: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) return jsonify(res) @app.route('/graph/is_mark_in_cluster/<string:case>', methods=['POST']) @token_required def is_mark_in_cluster(current_user, case): clusters = request.json.get('clusters') timestamp_id = request.json.get('id') query = fsa.build_id_presence_query(case, clusters, timestamp_id) logging.info('QUERY is mark in cluster: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) return jsonify(res) @app.route('/mark/all/<string:case>', methods=['GET']) @token_required def get_all_marks_for_case_and_user(current_user, case): return jsonify(marks=pg.get_all_marks_for_case_and_user(case, current_user['username'])) @app.route('/mark/get/<string:id>', methods=['GET']) @token_required def get_mark_info_by_id(current_user, id): id_query = {'_id': [id]} mark_query = {'query': {'terms': id_query}} res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(mark_query)) return jsonify(res) @app.route('/mark/insert/', methods=['POST']) @token_required def insert_mark(current_user): timestamp_id = request.json.get('id') case = request.json.get('case') pg.insert_mark(case, current_user['username'], timestamp_id) return jsonify({'mark inserted': 'OK'}), 200 @app.route('/mark/delete/', methods=['POST']) @token_required def delete_mark(current_user): id = request.json.get('id') case = request.json.get('case') pg.delete_mark(case, current_user['username'], id) return jsonify({'mark inserted': 'OK'}), 200 @app.route('/cluster-definition/all', methods=['GET']) @token_required def get_all_cluster_definitions(current_user): return jsonify(cluster_definitions=pg.get_all_cluster_definitons()) @app.route('/cluster-definition/add', methods=['POST']) @token_required def insert_cluster_definition(current_user): name = request.json.get('name') description = request.json.get('description') definition = request.json.get('definition') filter_name = request.json.get('filter_name') pg.insert_cluster_definition(name, definition, description, filter_name) return jsonify({'cluster definition inserted': 'OK'}), 200 @app.route('/cluster-definition/delete/<string:id>', methods=['GET']) @token_required def delete_cluster_definition(current_user, id): pg.delete_cluster_definition(id) return jsonify({'cluster definition deleted': 'OK'}), 200 @app.route('/filter-db/all', methods=['GET']) @token_required def get_filters(current_user): return jsonify(filters=pg.get_filters()) @app.route('/cluster-definition/case/<string:case>', methods=['GET']) @token_required def get_clusters_for_user_and_case(current_user, case): return jsonify(cluster_definitions=pg.get_clusters_for_user_and_case(current_user['username'], case)) @app.route('/cluster-definition/case/<string:case>/add-user-clusters', methods=['POST']) @token_required def add_user_clusters_for_case(current_user, case): cluster_ids = request.json.get('cluster_ids') pg.add_user_clusters_for_case(current_user['username'], case, cluster_ids) return jsonify({'user clusters added to case': 'OK'}), 200 @app.route('/cluster-definition/case/<string:case>/delete-user-clusters', methods=['POST']) @token_required def delete_user_clusters_for_case(current_user, case): cluster_ids = request.json.get('cluster_ids') pg.delete_user_clusters_from_case(current_user['username'], case, cluster_ids) return jsonify({'user clusters deleted from case': 'OK'}), 200 @app.route('/case/<string:case>/access/<string:role>', methods=['GET']) @token_required def get_user_ids_with_access_to_case(current_user, case, role): return jsonify(user_ids=pg.get_user_ids_with_access_to_case(case, role)) @app.route('/group/<string:group_id>/users', methods=['GET']) @token_required def get_user_ids_in_group(current_user, group_id): return jsonify(user_ids=pg.get_user_ids_in_group(group_id)) @app.route('/case/<string:case>/access/<string:role>/manage', methods=['POST']) @token_required def manage_access_for_many_users_to_case(current_user, case, role): user_ids_to_add = request.json.get('user_ids_to_add') user_ids_to_del = request.json.get('user_ids_to_del') pg.manage_access_for_many_users_to_case(case, role, user_ids_to_add, user_ids_to_del) return jsonify({'user access managed': 'OK'}), 200 @app.route('/case/<string:case>/access/group/manage', methods=['POST']) @token_required def manage_access_for_many_groups_to_case(current_user, case): group_ids_to_add = request.json.get('group_ids_to_add') group_ids_to_del = request.json.get('group_ids_to_del') pg.manage_access_for_many_groups_to_case(case, group_ids_to_add, group_ids_to_del) return jsonify({'group access managed': 'OK'}), 200 @app.route('/group/users/manage', methods=['POST']) @token_required def manage_users_in_group(current_user): user_ids_to_add = request.json.get('user_ids_to_add') user_ids_to_del = request.json.get('user_ids_to_del') group_id = request.json.get('group_id') pg.manage_users_in_group(group_id, user_ids_to_add, user_ids_to_del) return jsonify({'user access managed': 'OK'}), 200 @app.route('/user/add', methods=['POST']) @token_required def add_user(current_user): login = request.json.get('login') password = generate_password_hash(request.json.get('password')) name = request.json.get('name') email = request.json.get('email') pg.add_user(login, password, name, email) return jsonify({'user added': 'OK'}), 200 @app.route('/group/add', methods=['POST']) @token_required def add_group(current_user): name = request.json.get('name') role = request.json.get('role') pg.add_group(name, role) return jsonify({'grouped added': 'OK'}), 200 @app.route('/case/<string:case_id>/access/groups', methods=['GET']) @token_required def get_group_ids_with_access_to_case(current_user, case_id): return jsonify(group_ids=pg.get_group_ids_with_access_to_case(case_id)) if __name__ == '__main__': app.run(debug=True, host='127.0.0.1', port=5000, threaded=True)
import datetime import os from flask import Flask, jsonify, request from flask_cors import CORS from elasticsearch import Elasticsearch from werkzeug.security import check_password_hash, generate_password_hash from werkzeug.utils import secure_filename from functools import wraps import jwt import json import subprocess import fsa_lib as fsa import import_metadata import type_recognizer import postgres_lib as pg import tempfile import logging from base64 import b64encode import requests logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO) app = Flask(__name__) CORS(app) app.config['UPLOAD_FOLDER'] = '/tmp' app.config['SECRET_KEY'] = os.urandom(128) app.config['elastic_metadata_index'] = 'metadata' app.config['elastic_metadata_type'] = 'mactimes' app.config['elastic_filter_index'] = 'filter' app.config['elastic_filter_type'] = None app.config['elastic_user_index'] = 'user' app.config['elastic_user_type'] = None app.config['TOKEN_EXPIRATION'] = datetime.timedelta(days=1) app.config['elastic_host'] = 'localhost' app.config['elastic_port'] = 9200 app.config['pg_user'] = 'fimetis' app.config['pg_db'] = 'fimetis' es = Elasticsearch([{'host': app.config['elastic_host'], 'port': app.config['elastic_port'], 'timeout': 3600}]) app.config['oidc_introspect_url'] = 'https://oidc.muni.cz/oidc/introspect' app.config['oidc_client_id'] = 'client-id' app.config['oidc_client_secret'] = 'XXXXXXX' def token_required(f): @wraps(f) def decorated(*args, **kwargs): token = None if 'x-access-token' in request.headers: token = request.headers['x-access-token'] if not token: return jsonify({'message': 'Token is missing!'}), 401 try: data = jwt.decode(token, app.config['SECRET_KEY'], options={"verify_signature": False}) current_user = {'username': data['username'], 'is_super_admin': data['is_super_admin']} except jwt.exceptions.PyJWTError as e: logging.error("Failed to decode token: %s" % (e)) return jsonify({'message': 'Token is invalid!'}), 401 return f(current_user, *args, **kwargs) return decorated def admin_required(f): @wraps(f) def decorated(*args, **kwargs): try: current_user = args[0] except: return jsonify({'message': 'This user is not authorized'}), 403 authorized = False if current_user['is_super_admin']: authorized = True # for group in current_user['groups']: # if group == 'admin': # authorized = True # break if not authorized: return jsonify({'message': 'This user is not authorized'}), 403 return f(*args, **kwargs) return decorated # def authorization_required(f): # @wraps(f) # def decorated(*args, **kwargs): # roles = ['admin'] # for arg in args: # print(arg) # for kwarg in kwargs: # print(kwarg) # current_user = {'groups': ['admin']} # if roles is not None: # authorized = False # for group in current_user['groups']: # if group in roles: # authorized = True # break # if not authorized: # return jsonify({'message': 'This user is not authorized'}), 401 # return f(*args, *kwargs) # return decorated @app.route('/login', methods=['POST']) def login(): if not request.get_json() or not request.get_json()['username'] or not request.get_json()['password']: logging.warning('LOGIN - Wrong username or password') return jsonify({'message': 'Wrong username or password'}), 400 username = request.get_json()['username'] user = pg.get_user_by_login(username) group_names = pg.get_user_groups_names_by_login(username) password_hash = user[0] is_super_admin = user[1] email = user[2] name = user[3] if check_password_hash(password_hash, request.get_json()['password']): token = jwt.encode( { 'username': username, 'is_super_admin': is_super_admin, 'exp': datetime.datetime.utcnow() + app.config['TOKEN_EXPIRATION'] }, app.config['SECRET_KEY'] ) logging.warning('LOGIN - successful for user: ' + str(username) + ' from ' + str(request.remote_addr)) return jsonify({ 'username': username, 'is_super_admin': is_super_admin, 'email': email, 'name': name, 'groups': group_names, 'token': token} ) logging.warning('LOGIN - Wrong username or password') return jsonify({'message': 'Wrong username or password'}), 400 @app.route('/oidc-login', methods=['POST']) def oidc_login(): access_token = request.get_json()['access_token'] key_and_secret_encoded = b64encode( (app.config['oidc_client_id'] + ':' + app.config['oidc_client_secret']).encode() ) payload = 'token=' + access_token + '&token_type_hint=access_token' headers = { 'Authorization': 'Basic ' + key_and_secret_encoded.decode(), 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post(app.config['oidc_introspect_url'], headers=headers, data=payload) if r.status_code != 200: return jsonify({'message': 'Token introspection not successful'}), 400 response_json = json.loads(r.text) if response_json['active']: pg.process_oidc_user_login( response_json['sub'], response_json['name'], response_json['preferred_username'], response_json['email'], response_json['eduperson_entitlement'] ) group_names = pg.get_user_groups_names_by_login(response_json['sub']) token = jwt.encode( { 'username': response_json['sub'], 'is_super_admin': False, 'exp': datetime.datetime.utcnow() + app.config['TOKEN_EXPIRATION'] }, app.config['SECRET_KEY'] ) return jsonify({ 'username': response_json['sub'], 'name': response_json['name'], 'email': response_json['email'], 'preferred_username': response_json['preferred_username'], 'groups': response_json['eduperson_entitlement'] + group_names, 'is_external': True, 'token': token.decode('UTF-8')}), 200 else: return jsonify({'message': 'Token is not valid'}), 400 @app.route('/authenticated', methods=['GET', 'POST']) @token_required def authenticated(): return jsonify({'authenticated': 'OK'}), 200 @app.route('/') @token_required def es_info(): return jsonify(es.info()) @app.route('/upload', methods=['POST']) @token_required def upload(current_user): if 'case' not in request.form: return jsonify({'status': 'failed', 'message': 'Case name is missing in form'}) else: case_name = request.form['case'] if 'description' not in request.form: description = '' else: description = request.form['description'] if 'removeDeleted' not in request.form: remove_deleted = True else: remove_deleted = request.form['removeDeleted'] in ('true', '1') if 'removeDeletedRealloc' not in request.form: remove_deleted_realloc = True else: remove_deleted_realloc = request.form['removeDeletedRealloc'] in ('true', '1') if 'file' not in request.files: return jsonify({'status': 'failed', 'message': 'No file to upload'}) for file in request.files.getlist('file'): logging.info('upload file: ' + str(file) + ' to case: ' + str(case_name)) if file.filename == '': return jsonify({'status': 'failed', 'message': 'Invalid file name'}) if file: tf = tempfile.NamedTemporaryFile(suffix='-' + secure_filename(file.filename), dir=app.config['UPLOAD_FOLDER'], delete=False) file.save(tf.name) file_type = type_recognizer.recognize_type(tf.name) if file_type == 'fls': normalized_file = tf.name + '.norm' os.system('mactime -b %s -d > %s' % (tf.name, normalized_file)) os.system('rm %s' % (tf.name)) elif file_type == 'find': normalized_file = tf.name + '.norm' fls_file = tf.name + '.fls' os.system('python3 find2fls.py --input_file %s --output_file %s' % (tf.name, fls_file)) os.system('rm %s' % (tf.name)) os.system('mactime -b %s -d > %s' % (fls_file, normalized_file)) os.system('rm %s' % fls_file) elif file_type == 'mactime_noheader': normalized_file = tf.name with open(tf.name, "r+") as f: content = f.read() f.seek(0, 0) f.write('Date,Size,Type,Mode,UID,GID,Meta,File Name\n') f.write(content) elif file_type == 'l2tcsv': normalized_file = tf.name else: normalized_file = tf.name import_metadata.import_csv(normalized_file, file_type, es, app.config['elastic_metadata_index'], app.config['elastic_metadata_type'], case_name, remove_deleted=remove_deleted, remove_deleted_realloc=remove_deleted_realloc) if request.form['datasetExtend'] == 'false': pg.insert_case(case_name, description) pg.insert_user_case_role(current_user['username'], case_name, 'admin') pg.insert_init_note_for_case(case_name, current_user['username']) cluster_ids = json.loads(request.form['cluster_ids']) pg.add_user_clusters_for_case(current_user['username'], case_name, cluster_ids) full_access_ids = json.loads(request.form['full_access_ids']) read_access_ids = json.loads(request.form['read_access_ids']) pg.add_access_for_many_users_to_case(case_name, full_access_ids, read_access_ids, cluster_ids) pg.update_note_and_clusters_for_case_for_external_users(case_name) return jsonify({'status': 'OK', 'message': 'uploading files'}) @app.route('/case/delete/<string:case>', methods=['DELETE']) @token_required @admin_required def delete_case(current_user, case): if not pg.has_user_admin_access(current_user['username'], case): return jsonify({'status': 'failed', 'message': 'User has not admin access for this case'}) pg.delete_case(case) query = { 'query': { 'match_phrase': { 'case': case } } } logging.info('QUERY delete case: ' + '\n' + json.dumps(query)) res = es.delete_by_query(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=query) return jsonify(res) @app.route('/case/accessible', methods=['GET']) @token_required def accessible_cases(current_user): logging.info('Listed accessible cases for user %s', (current_user['username'])) return jsonify(cases=pg.get_accessible_cases(current_user['username'])) @app.route('/user/all', methods=['GET']) @token_required def get_all_users(current_user): return jsonify(users=pg.get_all_users()) @app.route('/group/all', methods=['GET']) @token_required def get_all_groups(current_user): return jsonify(groups=pg.get_all_groups()) @app.route('/group/internal/all', methods=['GET']) @token_required def get_all_internal_groups(current_user): return jsonify(groups=pg.get_all_internal_groups()) @app.route('/case/update-description', methods=['POST']) @token_required def update_case_description(current_user): case_id = request.json.get('case_id') description = request.json.get('description') pg.update_case_description(case_id, description) logging.info('Case %s description updated', (case_id)) return jsonify({'case description updated': 'OK'}), 200 @app.route('/case/note', methods=['POST']) @token_required def get_note_for_case(current_user): case_name = request.json.get('case_name') note = pg.get_note_for_case(case_name, current_user['username']) #logging.info('Getting note for user %s and case %s', (current_user['username'], case_name,)) return jsonify(note=note) @app.route('/case/note/update', methods=['POST']) @token_required def update_note_for_case(current_user): case_name = request.json.get('case_name') updated_note = request.json.get('updated_note') pg.update_note_for_case(updated_note, case_name, current_user['username']) #logging.info('Updating note for user %s and case %s', (current_user['username'], case_name,)) return jsonify({'note for case was updated': 'OK'}), 200 @app.route('/filter/all', methods=['GET']) @token_required def filters(current_user): query = { 'aggs': { 'filters': { 'terms': { 'field': 'name.keyword', 'size': 2147483647 } } } } logging.info('QUERY get all filters: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_filter_index'], doc_type=app.config['elastic_filter_type'], body=query) return jsonify(filters=res['aggregations']['filters']['buckets']) @app.route('/filter/name', methods=['POST']) @token_required def filter_by_name(current_user): if 'name' not in request.json: return jsonify({'message': 'Bad request'}), 400 else: filter_name = request.json.get('name') query = { 'query': { 'term': { 'name.keyword': { 'value': filter_name } } } } logging.info('QUERY filter by name: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_filter_index'], doc_type=app.config['elastic_filter_type'], body=query) return jsonify(res['hits']['hits'][0]['_source']) @app.route('/clusters/data/<string:case>', methods=['POST']) @token_required def clusters_get_data(current_user, case): clusters = request.json.get('clusters') marks_ids = request.json.get('marks_ids') additional_filters = request.json.get('additional_filters') begin = request.json.get('begin') page_size = request.json.get('page_size') sort = request.json.get('sort') sort_order = request.json.get('sort_order') mark_query = {} mark_query['ids'] = { 'values' : marks_ids} query = fsa.build_data_query(case, clusters, additional_filters, begin, page_size, sort, sort_order) query['query']['bool']['must'][1]['bool']['should'].append(mark_query) logging.info('QUERY cluster get data: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) #print(res) return jsonify(res) @app.route('/clusters/get_rank_of_marked_mactime_by_id/<string:case>', methods=['POST']) @token_required def get_rank_of_marked_mactime_by_id(current_user, case): clusters = request.json.get('clusters') marks_ids = request.json.get('marks_ids') additional_filters = request.json.get('additional_filters') begin = 0 size = request.json.get('size') sort = request.json.get('sort') sort_order = request.json.get('sort_order') mark_id = request.json.get('mark_id') mark_query = {} mark_query['ids'] = {'values': marks_ids} query = fsa.build_data_query(case, clusters, additional_filters, begin, size, sort, sort_order) query['query']['bool']['must'][1]['bool']['should'].append(mark_query) query['_source'] = False logging.info('QUERY cluster get data: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) rank = 0 for entry in res['hits']['hits']: if mark_id == entry['_id']: break rank += 1 return {'rank': rank} @app.route('/clusters/get_rank_of_mactime_by_timestamp/<string:case>', methods=['POST']) @token_required def get_rank_of_mactime_by_timestamp(current_user, case): clusters = request.json.get('clusters') marks_ids = request.json.get('marks_ids') additional_filters = request.json.get('additional_filters') begin = 0 size = request.json.get('size') sort = request.json.get('sort') sort_order = request.json.get('sort_order') timestamp = request.json.get('timestamp') mark_query = {} mark_query['ids'] = {'values': marks_ids} query = fsa.build_data_query(case, clusters, additional_filters, begin, size, sort, sort_order) query['query']['bool']['must'][1]['bool']['should'].append(mark_query) query['_source'] = ['@timestamp'] logging.info('QUERY cluster get data: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) rank = 0 for entry in res['hits']['hits']: if entry['_source']['@timestamp'] < timestamp: rank += 1 else: break return {'rank': rank} @app.route('/clusters/entries_border/<string:case>', methods=['POST']) @token_required def clusters_entries_border(current_user, case): clusters = request.json.get('clusters') additional_filters = request.json.get('additional_filters') query = fsa.build_data_query(case, clusters, additional_filters, 0, 1, 'timestamp', 'asc') logging.info('QUERY cluster entries border: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) return jsonify(res) @app.route('/clusters/data_counts/<string:case>', methods=['POST']) @token_required def clusters_data_counts(current_user, case): clusters = request.json.get('clusters') additional_filters = request.json.get('additional_filters') query_filters = fsa.build_data_query(case, clusters, additional_filters) query_all = fsa.build_data_query(case, clusters, None) logging.info('QUERY clusters counts: ' + '\n' + json.dumps(query_filters) + '\n' + json.dumps(query_all)) res_filters = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query_filters)) res_all = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query_all)) return jsonify({'total': res_filters['hits']['total'], 'total_all': res_all['hits']['total']}) @app.route('/cluster/count/<string:case>', methods=['POST']) @token_required def cluster_get_count(current_user, case): cluster = request.json.get('cluster') additional_filters = request.json.get('additional_filters') query_filters = fsa.build_count_query(case, cluster, additional_filters) query_all = fsa.build_count_query(case, cluster, None) logging.info('QUERY cluster get count: ' + '\n' + json.dumps(query_filters) + '\n' + json.dumps(query_all)) res_filters = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query_filters)) res_all = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query_all)) return jsonify({'total': res_filters['hits']['total'], 'total_all': res_all['hits']['total']}) @app.route('/cluster/first_and_last/<string:case>', methods=['POST']) @token_required def cluster_get_first_and_last_entry(current_user, case): clusters = request.json.get('clusters') additional_filters = request.json.get('additional_filters') mac_type = request.json.get('mac_type') first_query = fsa.build_first_entry_query(case, clusters, additional_filters, mac_type, 'asc') last_query = fsa.build_first_entry_query(case, clusters, additional_filters, mac_type, 'desc') logging.info('QUERY cluster get first and last entry: ' + '\n' + json.dumps(first_query) + '\n' + json.dumps(last_query)) first = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(first_query)) last = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(last_query)) res = [] if first is not None: if len(first['hits']['hits']) > 0: res.append(first['hits']['hits'][0]) if last is not None: if len(last['hits']['hits']) > 0: res.append(last['hits']['hits'][0]) return jsonify(res) @app.route('/graph/first_and_last/<string:case>', methods=['POST']) @token_required def graph_get_first_and_last_entry(current_user, case): first_query = fsa.build_whole_case_first_entry_query(case, 'asc') last_query = fsa.build_whole_case_first_entry_query(case, 'desc') logging.info('QUERY cluster get first and last entry: ' + '\n' + json.dumps(first_query) + '\n' + json.dumps(last_query)) first = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(first_query)) last = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(last_query)) res = [] if first is not None: if len(first['hits']['hits']) > 0: res.append(first['hits']['hits'][0]) if last is not None: if len(last['hits']['hits']) > 0: res.append(last['hits']['hits'][0]) return jsonify(res) @app.route('/graph/data/<string:case>', methods=['POST']) @token_required def graph_get_data(current_user, case): clusters = request.json.get('clusters') additional_filters = request.json.get('additional_filters') mac_type = request.json.get('mac_type') frequency = request.json.get('frequency') if frequency is None: frequency = 'day' query = fsa.build_graph_data_query(case, clusters, additional_filters, mac_type, frequency) logging.info('QUERY graph get data: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) return jsonify(res) @app.route('/graph/is_mark_in_cluster/<string:case>', methods=['POST']) @token_required def is_mark_in_cluster(current_user, case): clusters = request.json.get('clusters') timestamp_id = request.json.get('id') query = fsa.build_id_presence_query(case, clusters, timestamp_id) logging.info('QUERY is mark in cluster: ' + '\n' + json.dumps(query)) res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(query)) return jsonify(res) @app.route('/mark/all/<string:case>', methods=['GET']) @token_required def get_all_marks_for_case_and_user(current_user, case): return jsonify(marks=pg.get_all_marks_for_case_and_user(case, current_user['username'])) @app.route('/mark/get/<string:id>', methods=['GET']) @token_required def get_mark_info_by_id(current_user, id): id_query = {'_id': [id]} mark_query = {'query': {'terms': id_query}} res = es.search(index=app.config['elastic_metadata_index'], doc_type=app.config['elastic_metadata_type'], body=json.dumps(mark_query)) return jsonify(res) @app.route('/mark/insert/', methods=['POST']) @token_required def insert_mark(current_user): timestamp_id = request.json.get('id') case = request.json.get('case') pg.insert_mark(case, current_user['username'], timestamp_id) return jsonify({'mark inserted': 'OK'}), 200 @app.route('/mark/delete/', methods=['POST']) @token_required def delete_mark(current_user): id = request.json.get('id') case = request.json.get('case') pg.delete_mark(case, current_user['username'], id) return jsonify({'mark inserted': 'OK'}), 200 @app.route('/cluster-definition/all', methods=['GET']) @token_required def get_all_cluster_definitions(current_user): return jsonify(cluster_definitions=pg.get_all_cluster_definitons()) @app.route('/cluster-definition/add', methods=['POST']) @token_required def insert_cluster_definition(current_user): name = request.json.get('name') description = request.json.get('description') definition = request.json.get('definition') filter_name = request.json.get('filter_name') pg.insert_cluster_definition(name, definition, description, filter_name) return jsonify({'cluster definition inserted': 'OK'}), 200 @app.route('/cluster-definition/delete/<string:id>', methods=['GET']) @token_required def delete_cluster_definition(current_user, id): pg.delete_cluster_definition(id) return jsonify({'cluster definition deleted': 'OK'}), 200 @app.route('/filter-db/all', methods=['GET']) @token_required def get_filters(current_user): return jsonify(filters=pg.get_filters()) @app.route('/cluster-definition/case/<string:case>', methods=['GET']) @token_required def get_clusters_for_user_and_case(current_user, case): return jsonify(cluster_definitions=pg.get_clusters_for_user_and_case(current_user['username'], case)) @app.route('/cluster-definition/case/<string:case>/add-user-clusters', methods=['POST']) @token_required def add_user_clusters_for_case(current_user, case): cluster_ids = request.json.get('cluster_ids') pg.add_user_clusters_for_case(current_user['username'], case, cluster_ids) return jsonify({'user clusters added to case': 'OK'}), 200 @app.route('/cluster-definition/case/<string:case>/delete-user-clusters', methods=['POST']) @token_required def delete_user_clusters_for_case(current_user, case): cluster_ids = request.json.get('cluster_ids') pg.delete_user_clusters_from_case(current_user['username'], case, cluster_ids) return jsonify({'user clusters deleted from case': 'OK'}), 200 @app.route('/case/<string:case>/access/<string:role>', methods=['GET']) @token_required def get_user_ids_with_access_to_case(current_user, case, role): return jsonify(user_ids=pg.get_user_ids_with_access_to_case(case, role)) @app.route('/group/<string:group_id>/users', methods=['GET']) @token_required def get_user_ids_in_group(current_user, group_id): return jsonify(user_ids=pg.get_user_ids_in_group(group_id)) @app.route('/case/<string:case>/access/<string:role>/manage', methods=['POST']) @token_required def manage_access_for_many_users_to_case(current_user, case, role): user_ids_to_add = request.json.get('user_ids_to_add') user_ids_to_del = request.json.get('user_ids_to_del') pg.manage_access_for_many_users_to_case(case, role, user_ids_to_add, user_ids_to_del) return jsonify({'user access managed': 'OK'}), 200 @app.route('/case/<string:case>/access/group/manage', methods=['POST']) @token_required def manage_access_for_many_groups_to_case(current_user, case): group_ids_to_add = request.json.get('group_ids_to_add') group_ids_to_del = request.json.get('group_ids_to_del') pg.manage_access_for_many_groups_to_case(case, group_ids_to_add, group_ids_to_del) return jsonify({'group access managed': 'OK'}), 200 @app.route('/group/users/manage', methods=['POST']) @token_required def manage_users_in_group(current_user): user_ids_to_add = request.json.get('user_ids_to_add') user_ids_to_del = request.json.get('user_ids_to_del') group_id = request.json.get('group_id') pg.manage_users_in_group(group_id, user_ids_to_add, user_ids_to_del) return jsonify({'user access managed': 'OK'}), 200 @app.route('/user/add', methods=['POST']) @token_required def add_user(current_user): login = request.json.get('login') password = generate_password_hash(request.json.get('password')) name = request.json.get('name') email = request.json.get('email') pg.add_user(login, password, name, email) return jsonify({'user added': 'OK'}), 200 @app.route('/group/add', methods=['POST']) @token_required def add_group(current_user): name = request.json.get('name') role = request.json.get('role') pg.add_group(name, role) return jsonify({'grouped added': 'OK'}), 200 @app.route('/case/<string:case_id>/access/groups', methods=['GET']) @token_required def get_group_ids_with_access_to_case(current_user, case_id): return jsonify(group_ids=pg.get_group_ids_with_access_to_case(case_id)) if __name__ == '__main__': app.run(debug=True, host='127.0.0.1', port=5000, threaded=True)
en
0.539834
# for group in current_user['groups']: # if group == 'admin': # authorized = True # break # def authorization_required(f): # @wraps(f) # def decorated(*args, **kwargs): # roles = ['admin'] # for arg in args: # print(arg) # for kwarg in kwargs: # print(kwarg) # current_user = {'groups': ['admin']} # if roles is not None: # authorized = False # for group in current_user['groups']: # if group in roles: # authorized = True # break # if not authorized: # return jsonify({'message': 'This user is not authorized'}), 401 # return f(*args, *kwargs) # return decorated #logging.info('Getting note for user %s and case %s', (current_user['username'], case_name,)) #logging.info('Updating note for user %s and case %s', (current_user['username'], case_name,)) #print(res)
1.803532
2
setup.py
wimglenn/raven-python
0
6628725
#!/usr/bin/env python """ Raven ===== Raven is a Python client for `Sentry <http://getsentry.com/>`_. It provides full out-of-the-box support for many of the popular frameworks, including `Django <djangoproject.com>`_, `Flask <http://flask.pocoo.org/>`_, and `Pylons <http://www.pylonsproject.org/>`_. Raven also includes drop-in support for any `WSGI <https://wsgi.readthedocs.io/>`_-compatible web application. """ # Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error # in multiprocessing/util.py _exit_function when running `python # setup.py test` (see # http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html) for m in ('multiprocessing', 'billiard'): try: __import__(m) except ImportError: pass from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand import re import sys import ast _version_re = re.compile(r'VERSION\s+=\s+(.*)') with open('raven/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) install_requires = [] unittest2_requires = ['unittest2'] flask_requires = [ 'Flask>=0.8', 'blinker>=1.1', ] flask_tests_requires = [ 'Flask-Login>=0.2.0', ] sanic_requires = [] sanic_tests_requires = [] webpy_tests_requires = [ 'paste', 'web.py', ] # If it's python3, remove unittest2 & web.py. if sys.version_info[0] == 3: unittest2_requires = [] webpy_tests_requires = [] # If it's Python 3.5+, add Sanic packages. if sys.version_info >= (3, 5): sanic_requires = ['sanic>=0.7.0', ] sanic_tests_requires = ['aiohttp', ] tests_require = [ 'bottle', 'celery>=2.5', 'coverage<4', 'exam>=0.5.2', 'flake8==3.5.0', 'logbook', 'mock', 'nose', 'pycodestyle', 'pytz', 'pytest>=3.2.0,<3.3.0', 'pytest-timeout==1.2.0', 'pytest-xdist==1.18.2', 'pytest-pythonpath==0.7.1', 'pytest-sugar==0.9.0', 'pytest-cov', 'pytest-flake8==1.0.0', 'requests', 'tornado>=4.1,<5.0', 'tox', 'webob', 'webtest', 'wheel', 'anyjson', 'ZConfig', ] + ( flask_requires + flask_tests_requires + sanic_requires + sanic_tests_requires + unittest2_requires + webpy_tests_requires ) class PyTest(TestCommand): def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) setup( name='raven', version=version, author='Sentry', author_email='<EMAIL>', url='https://github.com/getsentry/raven-python', description='Raven is a client for Sentry (https://getsentry.com)', long_description=__doc__, packages=find_packages(exclude=("tests", "tests.*",)), zip_safe=False, extras_require={ 'flask': flask_requires, 'tests': tests_require, ':python_version<"3.2"': ['contextlib2'], }, license='BSD', tests_require=tests_require, install_requires=install_requires, cmdclass={'test': PyTest}, include_package_data=True, entry_points={ 'console_scripts': [ 'raven = raven.scripts.runner:main', ], 'paste.filter_app_factory': [ 'raven = raven.contrib.paste:sentry_filter_factory', ], }, classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python', 'Topic :: Software Development', ], )
#!/usr/bin/env python """ Raven ===== Raven is a Python client for `Sentry <http://getsentry.com/>`_. It provides full out-of-the-box support for many of the popular frameworks, including `Django <djangoproject.com>`_, `Flask <http://flask.pocoo.org/>`_, and `Pylons <http://www.pylonsproject.org/>`_. Raven also includes drop-in support for any `WSGI <https://wsgi.readthedocs.io/>`_-compatible web application. """ # Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error # in multiprocessing/util.py _exit_function when running `python # setup.py test` (see # http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html) for m in ('multiprocessing', 'billiard'): try: __import__(m) except ImportError: pass from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand import re import sys import ast _version_re = re.compile(r'VERSION\s+=\s+(.*)') with open('raven/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) install_requires = [] unittest2_requires = ['unittest2'] flask_requires = [ 'Flask>=0.8', 'blinker>=1.1', ] flask_tests_requires = [ 'Flask-Login>=0.2.0', ] sanic_requires = [] sanic_tests_requires = [] webpy_tests_requires = [ 'paste', 'web.py', ] # If it's python3, remove unittest2 & web.py. if sys.version_info[0] == 3: unittest2_requires = [] webpy_tests_requires = [] # If it's Python 3.5+, add Sanic packages. if sys.version_info >= (3, 5): sanic_requires = ['sanic>=0.7.0', ] sanic_tests_requires = ['aiohttp', ] tests_require = [ 'bottle', 'celery>=2.5', 'coverage<4', 'exam>=0.5.2', 'flake8==3.5.0', 'logbook', 'mock', 'nose', 'pycodestyle', 'pytz', 'pytest>=3.2.0,<3.3.0', 'pytest-timeout==1.2.0', 'pytest-xdist==1.18.2', 'pytest-pythonpath==0.7.1', 'pytest-sugar==0.9.0', 'pytest-cov', 'pytest-flake8==1.0.0', 'requests', 'tornado>=4.1,<5.0', 'tox', 'webob', 'webtest', 'wheel', 'anyjson', 'ZConfig', ] + ( flask_requires + flask_tests_requires + sanic_requires + sanic_tests_requires + unittest2_requires + webpy_tests_requires ) class PyTest(TestCommand): def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) setup( name='raven', version=version, author='Sentry', author_email='<EMAIL>', url='https://github.com/getsentry/raven-python', description='Raven is a client for Sentry (https://getsentry.com)', long_description=__doc__, packages=find_packages(exclude=("tests", "tests.*",)), zip_safe=False, extras_require={ 'flask': flask_requires, 'tests': tests_require, ':python_version<"3.2"': ['contextlib2'], }, license='BSD', tests_require=tests_require, install_requires=install_requires, cmdclass={'test': PyTest}, include_package_data=True, entry_points={ 'console_scripts': [ 'raven = raven.scripts.runner:main', ], 'paste.filter_app_factory': [ 'raven = raven.contrib.paste:sentry_filter_factory', ], }, classifiers=[ 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python', 'Topic :: Software Development', ], )
en
0.696176
#!/usr/bin/env python Raven ===== Raven is a Python client for `Sentry <http://getsentry.com/>`_. It provides full out-of-the-box support for many of the popular frameworks, including `Django <djangoproject.com>`_, `Flask <http://flask.pocoo.org/>`_, and `Pylons <http://www.pylonsproject.org/>`_. Raven also includes drop-in support for any `WSGI <https://wsgi.readthedocs.io/>`_-compatible web application. # Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error # in multiprocessing/util.py _exit_function when running `python # setup.py test` (see # http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html) # If it's python3, remove unittest2 & web.py. # If it's Python 3.5+, add Sanic packages. # import here, cause outside the eggs aren't loaded
2.163661
2
challenge/agoda_cancellation_prediction.py
giladh7/IML.HUJI
0
6628726
<filename>challenge/agoda_cancellation_prediction.py from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator from sklearn.model_selection import train_test_split # from IMLearn.utils import split_train_test import numpy as np import pandas as pd def load_data(filename: str): """ Load Agoda booking cancellation dataset Parameters ---------- filename: str Path to house prices dataset Returns ------- Design matrix and response vector in either of the following formats: 1) Single dataframe with last column representing the response 2) Tuple of pandas.DataFrame and Series 3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,) """ full_data = pd.read_csv(filename).drop_duplicates() # drop samples with booking date after checking date booking_dates = pd.to_datetime(full_data["booking_datetime"]).dt.date checkin_dates = pd.to_datetime(full_data["checkin_date"]).dt.date full_data = full_data[booking_dates <= checkin_dates] # add column for days of stay checkout_dates = pd.to_datetime(full_data["checkout_date"]).dt.date full_data["days_of_stay"] = (checkout_dates - checkin_dates).dt.days # drop samples with checkout not-after checkin (one day) full_data = full_data[full_data["days_of_stay"] >= 1] # transform Nan in requests to 0 special_requests = ["request_nonesmoke", "request_latecheckin", "request_highfloor", "request_largebed", "request_twinbeds", "request_airport", "request_earlycheckin"] for request in special_requests: full_data[request] = full_data[request].fillna(0) # transform charge option to int pay_int_replace = {"Pay Now": 1, "Pay Later": 3, "Pay at Check-in": 2} full_data = full_data.replace({"charge_option": pay_int_replace}) # STANDARTIZE original selling amount mean_selling_amount = full_data["original_selling_amount"].mean() full_data["original_selling_amount"] /= mean_selling_amount # create labels - 1 for cancellation, 0 otherwise labels = full_data["cancellation_datetime"] labels = labels.fillna(0) labels[labels != 0] = 1 numbers = ["no_of_room", "no_of_extra_bed", "no_of_children", "no_of_adults"] features = ["days_of_stay", "hotel_star_rating", "charge_option", "guest_is_not_the_customer"] + special_requests + numbers return full_data[features], labels # def evaluate_and_export(estimator: BaseEstimator, X: np.ndarray, filename: str): # """ # Export to specified file the prediction results of given estimator on given testset. # # File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing # predicted values. # # Parameters # ---------- # estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn) # Fitted estimator to use for prediction # # X: ndarray of shape (n_samples, n_features) # Test design matrix to predict its responses # # filename: # path to store file at # # """ # pd.DataFrame(estimator.predict(X), columns=["predicted_values"]).to_csv(filename, index=False) if __name__ == '__main__': np.random.seed(0) # Load data df, cancellation_labels = load_data("../datasets/agoda_cancellation_train.csv") cancellation_labels = cancellation_labels.astype('int') X_train, X_test, y_train, y_test = train_test_split(df, cancellation_labels, test_size=0.2) # Fit model over data estimator = AgodaCancellationEstimator().fit(X_train, y_train) print(estimator.loss(X_test, y_test)) # Store model predictions over test set # evaluate_and_export(estimator, test_X, "id1_id2_id3.csv")
<filename>challenge/agoda_cancellation_prediction.py from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator from sklearn.model_selection import train_test_split # from IMLearn.utils import split_train_test import numpy as np import pandas as pd def load_data(filename: str): """ Load Agoda booking cancellation dataset Parameters ---------- filename: str Path to house prices dataset Returns ------- Design matrix and response vector in either of the following formats: 1) Single dataframe with last column representing the response 2) Tuple of pandas.DataFrame and Series 3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,) """ full_data = pd.read_csv(filename).drop_duplicates() # drop samples with booking date after checking date booking_dates = pd.to_datetime(full_data["booking_datetime"]).dt.date checkin_dates = pd.to_datetime(full_data["checkin_date"]).dt.date full_data = full_data[booking_dates <= checkin_dates] # add column for days of stay checkout_dates = pd.to_datetime(full_data["checkout_date"]).dt.date full_data["days_of_stay"] = (checkout_dates - checkin_dates).dt.days # drop samples with checkout not-after checkin (one day) full_data = full_data[full_data["days_of_stay"] >= 1] # transform Nan in requests to 0 special_requests = ["request_nonesmoke", "request_latecheckin", "request_highfloor", "request_largebed", "request_twinbeds", "request_airport", "request_earlycheckin"] for request in special_requests: full_data[request] = full_data[request].fillna(0) # transform charge option to int pay_int_replace = {"Pay Now": 1, "Pay Later": 3, "Pay at Check-in": 2} full_data = full_data.replace({"charge_option": pay_int_replace}) # STANDARTIZE original selling amount mean_selling_amount = full_data["original_selling_amount"].mean() full_data["original_selling_amount"] /= mean_selling_amount # create labels - 1 for cancellation, 0 otherwise labels = full_data["cancellation_datetime"] labels = labels.fillna(0) labels[labels != 0] = 1 numbers = ["no_of_room", "no_of_extra_bed", "no_of_children", "no_of_adults"] features = ["days_of_stay", "hotel_star_rating", "charge_option", "guest_is_not_the_customer"] + special_requests + numbers return full_data[features], labels # def evaluate_and_export(estimator: BaseEstimator, X: np.ndarray, filename: str): # """ # Export to specified file the prediction results of given estimator on given testset. # # File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing # predicted values. # # Parameters # ---------- # estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn) # Fitted estimator to use for prediction # # X: ndarray of shape (n_samples, n_features) # Test design matrix to predict its responses # # filename: # path to store file at # # """ # pd.DataFrame(estimator.predict(X), columns=["predicted_values"]).to_csv(filename, index=False) if __name__ == '__main__': np.random.seed(0) # Load data df, cancellation_labels = load_data("../datasets/agoda_cancellation_train.csv") cancellation_labels = cancellation_labels.astype('int') X_train, X_test, y_train, y_test = train_test_split(df, cancellation_labels, test_size=0.2) # Fit model over data estimator = AgodaCancellationEstimator().fit(X_train, y_train) print(estimator.loss(X_test, y_test)) # Store model predictions over test set # evaluate_and_export(estimator, test_X, "id1_id2_id3.csv")
en
0.660506
# from IMLearn.utils import split_train_test Load Agoda booking cancellation dataset Parameters ---------- filename: str Path to house prices dataset Returns ------- Design matrix and response vector in either of the following formats: 1) Single dataframe with last column representing the response 2) Tuple of pandas.DataFrame and Series 3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,) # drop samples with booking date after checking date # add column for days of stay # drop samples with checkout not-after checkin (one day) # transform Nan in requests to 0 # transform charge option to int # STANDARTIZE original selling amount # create labels - 1 for cancellation, 0 otherwise # def evaluate_and_export(estimator: BaseEstimator, X: np.ndarray, filename: str): # """ # Export to specified file the prediction results of given estimator on given testset. # # File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing # predicted values. # # Parameters # ---------- # estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn) # Fitted estimator to use for prediction # # X: ndarray of shape (n_samples, n_features) # Test design matrix to predict its responses # # filename: # path to store file at # # """ # pd.DataFrame(estimator.predict(X), columns=["predicted_values"]).to_csv(filename, index=False) # Load data # Fit model over data # Store model predictions over test set # evaluate_and_export(estimator, test_X, "id1_id2_id3.csv")
3.072082
3
tests/test_process_args.py
man-of-eel/dpgv4
3
6628727
# pylint: disable=missing-docstring from dpgv4 import process_args_str def test_process_args_str() -> None: assert process_args_str("ffmpeg arg1 arg2") == "ffmpeg arg1 arg2" def test_process_args_bytes() -> None: assert "ffmpeg arg1 arg2" in process_args_str(b"ffmpeg arg1 arg2") def test_process_args_str_sequence() -> None: assert process_args_str(["ffmpeg", "arg1", "arg2"]) == "ffmpeg arg1 arg2" def test_process_args_bytes_sequence() -> None: assert "ffmpeg arg1 arg2" in process_args_str([b"ffmpeg", b"arg1", b"arg2"])
# pylint: disable=missing-docstring from dpgv4 import process_args_str def test_process_args_str() -> None: assert process_args_str("ffmpeg arg1 arg2") == "ffmpeg arg1 arg2" def test_process_args_bytes() -> None: assert "ffmpeg arg1 arg2" in process_args_str(b"ffmpeg arg1 arg2") def test_process_args_str_sequence() -> None: assert process_args_str(["ffmpeg", "arg1", "arg2"]) == "ffmpeg arg1 arg2" def test_process_args_bytes_sequence() -> None: assert "ffmpeg arg1 arg2" in process_args_str([b"ffmpeg", b"arg1", b"arg2"])
en
0.568955
# pylint: disable=missing-docstring
2.478406
2
catatom2osm/geo/layer/cons.py
Crashillo/CatAtom2Osm
0
6628728
import logging from collections import defaultdict from qgis.core import QgsFeatureRequest, QgsField, QgsGeometry from qgis.PyQt.QtCore import QVariant from catatom2osm import config, translate from catatom2osm.geo import BUFFER_SIZE, SIMPLIFY_BUILDING_PARTS from catatom2osm.geo.geometry import Geometry from catatom2osm.geo.layer.polygon import PolygonLayer from catatom2osm.geo.point import Point from catatom2osm.geo.tools import get_attributes, is_inside from catatom2osm.report import instance as report log = logging.getLogger(config.app_name) class ConsLayer(PolygonLayer): """Class for constructions.""" def __init__( self, path="MultiPolygon", baseName="building", providerLib="memory", source_date=None, ): super(ConsLayer, self).__init__(path, baseName, providerLib) if self.fields().isEmpty(): self.writer.addAttributes( [ QgsField("localId", QVariant.String, len=254), QgsField("condition", QVariant.String, len=254), QgsField("image", QVariant.String, len=254), QgsField("currentUse", QVariant.String, len=254), QgsField("bu_units", QVariant.Int), QgsField("dwellings", QVariant.Int), QgsField("lev_above", QVariant.Int), QgsField("lev_below", QVariant.Int), QgsField("nature", QVariant.String, len=254), QgsField("task", QVariant.String, len=254), QgsField("fixme", QVariant.String, len=254), QgsField("layer", QVariant.Int), ] ) self.updateFields() self.rename = { "condition": "conditionOfConstruction", "image": "documentLink", "bu_units": "numberOfBuildingUnits", "dwellings": "numberOfDwellings", "lev_above": "numberOfFloorsAboveGround", "lev_below": "numberOfFloorsBelowGround", "nature": "constructionNature", } self.source_date = source_date @staticmethod def is_building(feature): """Return True for building features.""" return "_" not in feature["localId"] @staticmethod def is_part(feature): """Return True for Part features.""" return "_part" in feature["localId"] @staticmethod def is_pool(feature): """Return True for Pool features.""" return "_PI." in feature["localId"] @staticmethod def get_id(feat): """Trim to parcel id.""" return feat["localId"].split("_")[0].split(".")[-1] def explode_multi_parts(self, address=False): request = QgsFeatureRequest() if address: refs = {self.get_id(ad) for ad in address.getFeatures()} fids = [f.id() for f in self.getFeatures() if f["localId"] not in refs] request.setFilterFids(fids) super(ConsLayer, self).explode_multi_parts(request) def to_osm(self, data=None, tags={}, upload="never"): """Export to OSM.""" return super(ConsLayer, self).to_osm( translate.building_tags, data, tags=tags, upload=upload ) def index_of_parts(self): """Index parts of building by building localid.""" parts = defaultdict(list) for part in self.search("regexp_match(localId, '_part')"): localId = self.get_id(part) parts[localId].append(part) return parts def index_of_pools(self): """Index pools in building parcel by building localid.""" pools = defaultdict(list) for pool in self.search("regexp_match(localId, '_PI')"): localId = self.get_id(pool) pools[localId].append(pool) return pools def index_of_building_and_parts(self): """ Construct some utility dicts. buildings index building by localid (call before explode_multi_parts). parts index parts of building by building localid. """ buildings = defaultdict(list) parts = defaultdict(list) for feature in self.getFeatures(): if self.is_building(feature): buildings[feature["localId"]].append(feature) elif self.is_part(feature): localId = self.get_id(feature) parts[localId].append(feature) return (buildings, parts) def remove_parts_wo_building(self): """Remove building parts without building.""" bu_refs = [f["localId"] for f in self.getFeatures() if self.is_building(f)] to_clean = [ f.id() for f in self.getFeatures() if self.is_part(f) and self.get_id(f) not in bu_refs ] if to_clean: self.writer.deleteFeatures(to_clean) log.debug(_("Removed %d parts without building"), len(to_clean)) report.parts_wo_building = len(to_clean) def remove_outside_parts(self): """ Remove parts outside the outline of it building. Remove parts without levels above ground. Precondition: Called before merge_greatest_part. """ to_clean_o = [] to_clean_b = [] buildings = {f["localId"]: f for f in self.getFeatures() if self.is_building(f)} pbar = self.get_progressbar(_("Remove outside parts"), self.featureCount()) for feat in self.getFeatures(): if self.is_part(feat): ref = self.get_id(feat) if feat["lev_above"] == 0 and feat["lev_below"] != 0: to_clean_b.append(feat.id()) elif ref in buildings: bu = buildings[ref] if not is_inside(feat, bu): to_clean_o.append(feat.id()) pbar.update() pbar.close() if len(to_clean_o) + len(to_clean_b) > 0: self.writer.deleteFeatures(to_clean_o + to_clean_b) if len(to_clean_o) > 0: log.debug( _("Removed %d building parts outside the outline"), len(to_clean_o) ) report.outside_parts = len(to_clean_o) if len(to_clean_b) > 0: log.debug( _("Deleted %d building parts with no floors above ground"), len(to_clean_b), ) report.underground_parts = len(to_clean_b) def get_parts(self, outline, parts): """ Return a dictionary of parts for levels, the maximum and minimum levels. Given the building outline and its parts, for the parts inside the outline. """ max_level = 0 min_level = 0 parts_for_level = defaultdict(list) for part in parts: if is_inside(part, outline): level = (part["lev_above"] or 0, part["lev_below"] or 0) if level[0] > max_level: max_level = level[0] if level[1] > min_level: min_level = level[1] parts_for_level[level].append(part) return parts_for_level, max_level, min_level def merge_adjacent_parts(self, outline, parts): """ Merge the adjacent parts in each level given a building outline and its parts. Translates the maximum values of number of levels above and below ground to the outline and optionally deletes all the parts in that level. """ to_clean = [] to_clean_g = [] to_change = {} to_change_g = {} parts_for_level, max_level, min_level = self.get_parts(outline, parts) parts_area = 0 outline["lev_above"] = max_level outline["lev_below"] = min_level building_area = round(outline.geometry().area(), 0) for (level, parts) in parts_for_level.items(): check_area = False for part in parts: part_area = part.geometry().area() parts_area += part_area if round(part_area, 0) > building_area: part["fixme"] = _("This part is bigger than its building") to_change[part.id()] = get_attributes(part) check_area = True if check_area: continue if len(parts_for_level) == 1 or ( level == (max_level, min_level) and SIMPLIFY_BUILDING_PARTS ): to_clean = [p.id() for p in parts_for_level[max_level, min_level]] else: geom = Geometry.merge_adjacent_features(parts) poly = Geometry.get_multipolygon(geom) if len(poly) < len(parts): for (i, part) in enumerate(parts): if i < len(poly): g = Geometry.fromPolygonXY(poly[i]) to_change_g[part.id()] = g else: to_clean_g.append(part.id()) if len(parts_for_level) > 1 and round(parts_area, 0) != building_area: outline["fixme"] = _("Building parts don't fill the building outline") to_change[outline.id()] = get_attributes(outline) return to_clean, to_clean_g, to_change, to_change_g def remove_inner_rings(self, feat1, feat2): """ Auxiliary method to remove feat1 of its inner rings if equals to feat2. Returns True if feat1 must be deleted and new geometry if any ring is removed. """ poly = Geometry.get_multipolygon(feat1)[0] geom2 = Geometry.fromPolygonXY(Geometry.get_multipolygon(feat2)[0]) delete = False new_geom = None delete_rings = [] for i, ring in enumerate(poly): if Geometry.fromPolygonXY([ring]).equals(geom2): if i == 0: delete = True break else: delete_rings.append(i) if delete_rings: new_poly = [ring for i, ring in enumerate(poly) if i not in delete_rings] new_geom = Geometry().fromPolygonXY(new_poly) return delete, new_geom def merge_building_parts(self): """ Apply merge_adjacent_parts to each set of building and its parts. Detect pools contained in a building and assign layer=1. Detect buildings/parts with geometry equals to a pool geometry and delete them. Detect inner rings of buildings/parts with geometry equals to a pool geometry and remove them. """ parts = self.index_of_parts() pools = self.index_of_pools() to_clean = [] to_change = {} to_change_g = {} buildings_in_pools = 0 levels_to_outline = 0 parts_merged_to_building = 0 adjacent_parts_deleted = 0 pools_on_roofs = 0 visited_parcels = set() t_buildings = self.count("not regexp_match(localId, '_')") pbar = self.get_progressbar(_("Merge building parts"), t_buildings) for building in self.search("not regexp_match(localId, '_')"): ref = building["localId"] it_pools = pools[ref] it_parts = parts[ref] for pool in it_pools: if pool["layer"] != 1 and is_inside(pool, building): pool["layer"] = 1 to_change[pool.id()] = get_attributes(pool) pools_on_roofs += 1 del_building, new_geom = self.remove_inner_rings(building, pool) if del_building: to_clean.append(building.id()) buildings_in_pools += 1 break if new_geom: to_change_g[building.id()] = QgsGeometry(new_geom) if ref not in visited_parcels: for part in frozenset(it_parts): del_part, new_geom = self.remove_inner_rings(part, pool) if del_part: to_clean.append(part.id()) it_parts.remove(part) if part in parts[ref]: parts[ref].remove(part) adjacent_parts_deleted += 1 elif new_geom: to_change_g[part.id()] = QgsGeometry(new_geom) visited_parcels.add(ref) cn, cng, ch, chg = self.merge_adjacent_parts(building, it_parts) to_clean += cn + cng to_change.update(ch) to_change_g.update(chg) levels_to_outline += len(ch) parts_merged_to_building += len(cn) adjacent_parts_deleted += len(cng) pbar.update() pbar.close() if to_change: self.writer.changeAttributeValues(to_change) if to_change_g: self.writer.changeGeometryValues(to_change_g) if to_clean: self.writer.deleteFeatures(to_clean) if pools_on_roofs: log.debug(_("Located %d swimming pools over a building"), pools_on_roofs) report.pools_on_roofs = pools_on_roofs if buildings_in_pools: log.debug( _("Deleted %d buildings coincidents with a swimming pool"), buildings_in_pools, ) report.buildings_in_pools = buildings_in_pools if levels_to_outline: log.debug(_("Translated %d level values to the outline"), levels_to_outline) if parts_merged_to_building: log.debug( _("Merged %d building parts to the outline"), parts_merged_to_building ) report.parts_to_outline = parts_merged_to_building if adjacent_parts_deleted: log.debug(_("Merged %d adjacent parts"), adjacent_parts_deleted) report.adjacent_parts = adjacent_parts_deleted def clean(self): """ Clean geometries. Delete invalid geometries and close vertices, add topological points, merge building parts and simplify vertices. """ self.delete_invalid_geometries( query_small_area=lambda feat: "_part" not in feat["localId"] ) self.topology() self.merge_building_parts() self.simplify() self.delete_small_geometries() def move_entrance( self, ad, ad_buildings, ad_parts, to_move, to_insert, parents_per_vx, ): """ Auxiliary method to move entrance to the nearest building and part. Don't move and the entrance specification is changed if the new position is not enough close ('remote'), is a corner ('corner'), is in an inner ring ('inner') or is in a wall shared with another building ('shared'). """ point = ad.geometry().asPoint() distance = 9e9 for bu in ad_buildings: bg = bu.geometry() d, c, v = bg.closestSegmentWithContext(point)[:3] if d < distance: (building, distance, closest, vertex) = (bu, d, c, v) bg = building.geometry() bid = building.id() va = Point(bg.vertexAt(vertex - 1)) vb = Point(bg.vertexAt(vertex)) if distance > config.addr_thr**2: ad["spec"] = "remote" elif vertex > len(Geometry.get_multipolygon(bg)[0][0]): ad["spec"] = "inner" elif ( closest.sqrDist(va) < config.entrance_thr**2 or closest.sqrDist(vb) < config.entrance_thr**2 ): ad["spec"] = "corner" elif PolygonLayer.is_shared_segment(parents_per_vx, va, vb, bid): ad["spec"] = "shared" else: dg = Geometry.fromPointXY(closest) to_move[ad.id()] = dg bg.insertVertex(closest.x(), closest.y(), vertex) to_insert[bid] = QgsGeometry(bg) building.setGeometry(bg) for part in ad_parts: pg = part.geometry() r = Geometry.get_multipolygon(pg)[0][0] for i in range(len(r) - 1): vpa = Point(pg.vertexAt(i)) vpb = Point(pg.vertexAt(i + 1)) if va in (vpa, vpb) and vb in (vpa, vpb): pg.insertVertex(closest.x(), closest.y(), i + 1) to_insert[part.id()] = QgsGeometry(pg) part.setGeometry(pg) break def move_address(self, address): """ Try to move each entrance address to the nearest point in the building outline. Building and addresses are associated using the cadastral reference. Non entrance addresses ends in the building outline when CatAtom2Osm.merge_address is called. Delete the address if the number of associated buildings is 0 or greater than 1 for non entrance addresses. """ to_change = {} to_move = {} to_insert = {} to_clean = [] mp = 0 oa = 0 (buildings, parts) = self.index_of_building_and_parts() exp = "NOT(localId ~ '_')" ppv, geometries = self.get_parents_per_vertex_and_geometries(exp) pbar = self.get_progressbar(_("Move addresses"), address.featureCount()) for ad in address.getFeatures(): refcat = self.get_id(ad) building_count = len(buildings.get(refcat, [])) ad_buildings = buildings[refcat] ad_parts = parts[refcat] if building_count == 0: to_clean.append(ad.id()) oa += 1 else: if ad["spec"] == "Entrance": self.move_entrance( ad, ad_buildings, ad_parts, to_move, to_insert, ppv, ) if ad["spec"] != "Entrance" and building_count > 1: to_clean.append(ad.id()) mp += 1 if ad["spec"] != "Parcel" and building_count == 1: to_change[ad.id()] = get_attributes(ad) if len(to_insert) > BUFFER_SIZE: self.writer.changeGeometryValues(to_insert) to_insert = {} pbar.update() pbar.close() address.writer.changeAttributeValues(to_change) address.writer.changeGeometryValues(to_move) if len(to_insert) > 0: self.writer.changeGeometryValues(to_insert) msg = _("Moved %d addresses to entrance, %d specification changed") log.debug(msg, len(to_move), len(to_change)) if len(to_clean) > 0: address.writer.deleteFeatures(to_clean) if oa > 0: msg = _("Deleted %d addresses without associated building") log.debug(msg, oa) report.pool_addresses = oa if mp > 0: msg = _("Refused %d addresses belonging to multiple buildings") log.debug(msg, mp) report.multiple_addresses = mp def validate(self, max_level, min_level): """ Put fixmes to buildings with not valid geometry, too small or big. Returns distribution of floors. """ to_change = {} for feat in self.getFeatures(): geom = feat.geometry() errors = geom.validateGeometry() if errors: feat["fixme"] = ( _("GEOS validation") + ": " + "; ".join([e.what() for e in errors]) ) to_change[feat.id()] = get_attributes(feat) if ConsLayer.is_building(feat): localid = feat["localId"] if isinstance(feat["lev_above"], int) and feat["lev_above"] > 0: max_level[localid] = feat["lev_above"] if isinstance(feat["lev_below"], int) and feat["lev_below"] > 0: min_level[localid] = feat["lev_below"] if feat.id() not in to_change: area = geom.area() if area < config.warning_min_area: feat["fixme"] = _("Check, area too small") to_change[feat.id()] = get_attributes(feat) if area > config.warning_max_area: feat["fixme"] = _("Check, area too big") to_change[feat.id()] = get_attributes(feat) if to_change: self.writer.changeAttributeValues(to_change) def conflate(self, current_bu_osm, delete=True): """ Remove from current_bu_osm the buildings that don't have conflicts. If delete=False, only mark buildings with conflicts. """ if len(current_bu_osm.elements) == 0: return index = self.get_index() geometries = {f.id(): QgsGeometry(f.geometry()) for f in self.getFeatures()} num_buildings = 0 conflicts = 0 to_clean = set() pbar = self.get_progressbar(_("Conflate"), len(current_bu_osm.elements)) for el in current_bu_osm.elements: poly = None is_pool = "leisure" in el.tags and el.tags["leisure"] == "swimming_pool" is_building = "building" in el.tags if el.type == "way" and el.is_closed() and (is_building or is_pool): poly = [[map(Point, el.geometry())]] elif el.type == "relation" and (is_building or is_pool): poly = [[map(Point, w)] for w in el.outer_geometry()] if poly: num_buildings += 1 geom = Geometry().fromMultiPolygonXY(poly) if geom is None or not geom.isGeosValid(): msg = _("OSM building with id %s is not valid") % el.fid pbar.clear() log.warning(msg) report.warnings.append(msg) else: fids = index.intersects(geom.boundingBox()) conflict = False for fid in fids: fg = geometries[fid] if geom.contains(fg) or fg.contains(geom) or geom.overlaps(fg): conflict = True conflicts += 1 break if delete and not conflict: to_clean.add(el) if not delete and conflict: el.tags["conflict"] = "yes" pbar.update() pbar.close() for el in to_clean: current_bu_osm.remove(el) log.debug( _("Detected %d conflicts in %d buildings/pools from OSM"), conflicts, num_buildings, ) report.osm_buildings = num_buildings report.osm_building_conflicts = conflicts return len(to_clean) > 0
import logging from collections import defaultdict from qgis.core import QgsFeatureRequest, QgsField, QgsGeometry from qgis.PyQt.QtCore import QVariant from catatom2osm import config, translate from catatom2osm.geo import BUFFER_SIZE, SIMPLIFY_BUILDING_PARTS from catatom2osm.geo.geometry import Geometry from catatom2osm.geo.layer.polygon import PolygonLayer from catatom2osm.geo.point import Point from catatom2osm.geo.tools import get_attributes, is_inside from catatom2osm.report import instance as report log = logging.getLogger(config.app_name) class ConsLayer(PolygonLayer): """Class for constructions.""" def __init__( self, path="MultiPolygon", baseName="building", providerLib="memory", source_date=None, ): super(ConsLayer, self).__init__(path, baseName, providerLib) if self.fields().isEmpty(): self.writer.addAttributes( [ QgsField("localId", QVariant.String, len=254), QgsField("condition", QVariant.String, len=254), QgsField("image", QVariant.String, len=254), QgsField("currentUse", QVariant.String, len=254), QgsField("bu_units", QVariant.Int), QgsField("dwellings", QVariant.Int), QgsField("lev_above", QVariant.Int), QgsField("lev_below", QVariant.Int), QgsField("nature", QVariant.String, len=254), QgsField("task", QVariant.String, len=254), QgsField("fixme", QVariant.String, len=254), QgsField("layer", QVariant.Int), ] ) self.updateFields() self.rename = { "condition": "conditionOfConstruction", "image": "documentLink", "bu_units": "numberOfBuildingUnits", "dwellings": "numberOfDwellings", "lev_above": "numberOfFloorsAboveGround", "lev_below": "numberOfFloorsBelowGround", "nature": "constructionNature", } self.source_date = source_date @staticmethod def is_building(feature): """Return True for building features.""" return "_" not in feature["localId"] @staticmethod def is_part(feature): """Return True for Part features.""" return "_part" in feature["localId"] @staticmethod def is_pool(feature): """Return True for Pool features.""" return "_PI." in feature["localId"] @staticmethod def get_id(feat): """Trim to parcel id.""" return feat["localId"].split("_")[0].split(".")[-1] def explode_multi_parts(self, address=False): request = QgsFeatureRequest() if address: refs = {self.get_id(ad) for ad in address.getFeatures()} fids = [f.id() for f in self.getFeatures() if f["localId"] not in refs] request.setFilterFids(fids) super(ConsLayer, self).explode_multi_parts(request) def to_osm(self, data=None, tags={}, upload="never"): """Export to OSM.""" return super(ConsLayer, self).to_osm( translate.building_tags, data, tags=tags, upload=upload ) def index_of_parts(self): """Index parts of building by building localid.""" parts = defaultdict(list) for part in self.search("regexp_match(localId, '_part')"): localId = self.get_id(part) parts[localId].append(part) return parts def index_of_pools(self): """Index pools in building parcel by building localid.""" pools = defaultdict(list) for pool in self.search("regexp_match(localId, '_PI')"): localId = self.get_id(pool) pools[localId].append(pool) return pools def index_of_building_and_parts(self): """ Construct some utility dicts. buildings index building by localid (call before explode_multi_parts). parts index parts of building by building localid. """ buildings = defaultdict(list) parts = defaultdict(list) for feature in self.getFeatures(): if self.is_building(feature): buildings[feature["localId"]].append(feature) elif self.is_part(feature): localId = self.get_id(feature) parts[localId].append(feature) return (buildings, parts) def remove_parts_wo_building(self): """Remove building parts without building.""" bu_refs = [f["localId"] for f in self.getFeatures() if self.is_building(f)] to_clean = [ f.id() for f in self.getFeatures() if self.is_part(f) and self.get_id(f) not in bu_refs ] if to_clean: self.writer.deleteFeatures(to_clean) log.debug(_("Removed %d parts without building"), len(to_clean)) report.parts_wo_building = len(to_clean) def remove_outside_parts(self): """ Remove parts outside the outline of it building. Remove parts without levels above ground. Precondition: Called before merge_greatest_part. """ to_clean_o = [] to_clean_b = [] buildings = {f["localId"]: f for f in self.getFeatures() if self.is_building(f)} pbar = self.get_progressbar(_("Remove outside parts"), self.featureCount()) for feat in self.getFeatures(): if self.is_part(feat): ref = self.get_id(feat) if feat["lev_above"] == 0 and feat["lev_below"] != 0: to_clean_b.append(feat.id()) elif ref in buildings: bu = buildings[ref] if not is_inside(feat, bu): to_clean_o.append(feat.id()) pbar.update() pbar.close() if len(to_clean_o) + len(to_clean_b) > 0: self.writer.deleteFeatures(to_clean_o + to_clean_b) if len(to_clean_o) > 0: log.debug( _("Removed %d building parts outside the outline"), len(to_clean_o) ) report.outside_parts = len(to_clean_o) if len(to_clean_b) > 0: log.debug( _("Deleted %d building parts with no floors above ground"), len(to_clean_b), ) report.underground_parts = len(to_clean_b) def get_parts(self, outline, parts): """ Return a dictionary of parts for levels, the maximum and minimum levels. Given the building outline and its parts, for the parts inside the outline. """ max_level = 0 min_level = 0 parts_for_level = defaultdict(list) for part in parts: if is_inside(part, outline): level = (part["lev_above"] or 0, part["lev_below"] or 0) if level[0] > max_level: max_level = level[0] if level[1] > min_level: min_level = level[1] parts_for_level[level].append(part) return parts_for_level, max_level, min_level def merge_adjacent_parts(self, outline, parts): """ Merge the adjacent parts in each level given a building outline and its parts. Translates the maximum values of number of levels above and below ground to the outline and optionally deletes all the parts in that level. """ to_clean = [] to_clean_g = [] to_change = {} to_change_g = {} parts_for_level, max_level, min_level = self.get_parts(outline, parts) parts_area = 0 outline["lev_above"] = max_level outline["lev_below"] = min_level building_area = round(outline.geometry().area(), 0) for (level, parts) in parts_for_level.items(): check_area = False for part in parts: part_area = part.geometry().area() parts_area += part_area if round(part_area, 0) > building_area: part["fixme"] = _("This part is bigger than its building") to_change[part.id()] = get_attributes(part) check_area = True if check_area: continue if len(parts_for_level) == 1 or ( level == (max_level, min_level) and SIMPLIFY_BUILDING_PARTS ): to_clean = [p.id() for p in parts_for_level[max_level, min_level]] else: geom = Geometry.merge_adjacent_features(parts) poly = Geometry.get_multipolygon(geom) if len(poly) < len(parts): for (i, part) in enumerate(parts): if i < len(poly): g = Geometry.fromPolygonXY(poly[i]) to_change_g[part.id()] = g else: to_clean_g.append(part.id()) if len(parts_for_level) > 1 and round(parts_area, 0) != building_area: outline["fixme"] = _("Building parts don't fill the building outline") to_change[outline.id()] = get_attributes(outline) return to_clean, to_clean_g, to_change, to_change_g def remove_inner_rings(self, feat1, feat2): """ Auxiliary method to remove feat1 of its inner rings if equals to feat2. Returns True if feat1 must be deleted and new geometry if any ring is removed. """ poly = Geometry.get_multipolygon(feat1)[0] geom2 = Geometry.fromPolygonXY(Geometry.get_multipolygon(feat2)[0]) delete = False new_geom = None delete_rings = [] for i, ring in enumerate(poly): if Geometry.fromPolygonXY([ring]).equals(geom2): if i == 0: delete = True break else: delete_rings.append(i) if delete_rings: new_poly = [ring for i, ring in enumerate(poly) if i not in delete_rings] new_geom = Geometry().fromPolygonXY(new_poly) return delete, new_geom def merge_building_parts(self): """ Apply merge_adjacent_parts to each set of building and its parts. Detect pools contained in a building and assign layer=1. Detect buildings/parts with geometry equals to a pool geometry and delete them. Detect inner rings of buildings/parts with geometry equals to a pool geometry and remove them. """ parts = self.index_of_parts() pools = self.index_of_pools() to_clean = [] to_change = {} to_change_g = {} buildings_in_pools = 0 levels_to_outline = 0 parts_merged_to_building = 0 adjacent_parts_deleted = 0 pools_on_roofs = 0 visited_parcels = set() t_buildings = self.count("not regexp_match(localId, '_')") pbar = self.get_progressbar(_("Merge building parts"), t_buildings) for building in self.search("not regexp_match(localId, '_')"): ref = building["localId"] it_pools = pools[ref] it_parts = parts[ref] for pool in it_pools: if pool["layer"] != 1 and is_inside(pool, building): pool["layer"] = 1 to_change[pool.id()] = get_attributes(pool) pools_on_roofs += 1 del_building, new_geom = self.remove_inner_rings(building, pool) if del_building: to_clean.append(building.id()) buildings_in_pools += 1 break if new_geom: to_change_g[building.id()] = QgsGeometry(new_geom) if ref not in visited_parcels: for part in frozenset(it_parts): del_part, new_geom = self.remove_inner_rings(part, pool) if del_part: to_clean.append(part.id()) it_parts.remove(part) if part in parts[ref]: parts[ref].remove(part) adjacent_parts_deleted += 1 elif new_geom: to_change_g[part.id()] = QgsGeometry(new_geom) visited_parcels.add(ref) cn, cng, ch, chg = self.merge_adjacent_parts(building, it_parts) to_clean += cn + cng to_change.update(ch) to_change_g.update(chg) levels_to_outline += len(ch) parts_merged_to_building += len(cn) adjacent_parts_deleted += len(cng) pbar.update() pbar.close() if to_change: self.writer.changeAttributeValues(to_change) if to_change_g: self.writer.changeGeometryValues(to_change_g) if to_clean: self.writer.deleteFeatures(to_clean) if pools_on_roofs: log.debug(_("Located %d swimming pools over a building"), pools_on_roofs) report.pools_on_roofs = pools_on_roofs if buildings_in_pools: log.debug( _("Deleted %d buildings coincidents with a swimming pool"), buildings_in_pools, ) report.buildings_in_pools = buildings_in_pools if levels_to_outline: log.debug(_("Translated %d level values to the outline"), levels_to_outline) if parts_merged_to_building: log.debug( _("Merged %d building parts to the outline"), parts_merged_to_building ) report.parts_to_outline = parts_merged_to_building if adjacent_parts_deleted: log.debug(_("Merged %d adjacent parts"), adjacent_parts_deleted) report.adjacent_parts = adjacent_parts_deleted def clean(self): """ Clean geometries. Delete invalid geometries and close vertices, add topological points, merge building parts and simplify vertices. """ self.delete_invalid_geometries( query_small_area=lambda feat: "_part" not in feat["localId"] ) self.topology() self.merge_building_parts() self.simplify() self.delete_small_geometries() def move_entrance( self, ad, ad_buildings, ad_parts, to_move, to_insert, parents_per_vx, ): """ Auxiliary method to move entrance to the nearest building and part. Don't move and the entrance specification is changed if the new position is not enough close ('remote'), is a corner ('corner'), is in an inner ring ('inner') or is in a wall shared with another building ('shared'). """ point = ad.geometry().asPoint() distance = 9e9 for bu in ad_buildings: bg = bu.geometry() d, c, v = bg.closestSegmentWithContext(point)[:3] if d < distance: (building, distance, closest, vertex) = (bu, d, c, v) bg = building.geometry() bid = building.id() va = Point(bg.vertexAt(vertex - 1)) vb = Point(bg.vertexAt(vertex)) if distance > config.addr_thr**2: ad["spec"] = "remote" elif vertex > len(Geometry.get_multipolygon(bg)[0][0]): ad["spec"] = "inner" elif ( closest.sqrDist(va) < config.entrance_thr**2 or closest.sqrDist(vb) < config.entrance_thr**2 ): ad["spec"] = "corner" elif PolygonLayer.is_shared_segment(parents_per_vx, va, vb, bid): ad["spec"] = "shared" else: dg = Geometry.fromPointXY(closest) to_move[ad.id()] = dg bg.insertVertex(closest.x(), closest.y(), vertex) to_insert[bid] = QgsGeometry(bg) building.setGeometry(bg) for part in ad_parts: pg = part.geometry() r = Geometry.get_multipolygon(pg)[0][0] for i in range(len(r) - 1): vpa = Point(pg.vertexAt(i)) vpb = Point(pg.vertexAt(i + 1)) if va in (vpa, vpb) and vb in (vpa, vpb): pg.insertVertex(closest.x(), closest.y(), i + 1) to_insert[part.id()] = QgsGeometry(pg) part.setGeometry(pg) break def move_address(self, address): """ Try to move each entrance address to the nearest point in the building outline. Building and addresses are associated using the cadastral reference. Non entrance addresses ends in the building outline when CatAtom2Osm.merge_address is called. Delete the address if the number of associated buildings is 0 or greater than 1 for non entrance addresses. """ to_change = {} to_move = {} to_insert = {} to_clean = [] mp = 0 oa = 0 (buildings, parts) = self.index_of_building_and_parts() exp = "NOT(localId ~ '_')" ppv, geometries = self.get_parents_per_vertex_and_geometries(exp) pbar = self.get_progressbar(_("Move addresses"), address.featureCount()) for ad in address.getFeatures(): refcat = self.get_id(ad) building_count = len(buildings.get(refcat, [])) ad_buildings = buildings[refcat] ad_parts = parts[refcat] if building_count == 0: to_clean.append(ad.id()) oa += 1 else: if ad["spec"] == "Entrance": self.move_entrance( ad, ad_buildings, ad_parts, to_move, to_insert, ppv, ) if ad["spec"] != "Entrance" and building_count > 1: to_clean.append(ad.id()) mp += 1 if ad["spec"] != "Parcel" and building_count == 1: to_change[ad.id()] = get_attributes(ad) if len(to_insert) > BUFFER_SIZE: self.writer.changeGeometryValues(to_insert) to_insert = {} pbar.update() pbar.close() address.writer.changeAttributeValues(to_change) address.writer.changeGeometryValues(to_move) if len(to_insert) > 0: self.writer.changeGeometryValues(to_insert) msg = _("Moved %d addresses to entrance, %d specification changed") log.debug(msg, len(to_move), len(to_change)) if len(to_clean) > 0: address.writer.deleteFeatures(to_clean) if oa > 0: msg = _("Deleted %d addresses without associated building") log.debug(msg, oa) report.pool_addresses = oa if mp > 0: msg = _("Refused %d addresses belonging to multiple buildings") log.debug(msg, mp) report.multiple_addresses = mp def validate(self, max_level, min_level): """ Put fixmes to buildings with not valid geometry, too small or big. Returns distribution of floors. """ to_change = {} for feat in self.getFeatures(): geom = feat.geometry() errors = geom.validateGeometry() if errors: feat["fixme"] = ( _("GEOS validation") + ": " + "; ".join([e.what() for e in errors]) ) to_change[feat.id()] = get_attributes(feat) if ConsLayer.is_building(feat): localid = feat["localId"] if isinstance(feat["lev_above"], int) and feat["lev_above"] > 0: max_level[localid] = feat["lev_above"] if isinstance(feat["lev_below"], int) and feat["lev_below"] > 0: min_level[localid] = feat["lev_below"] if feat.id() not in to_change: area = geom.area() if area < config.warning_min_area: feat["fixme"] = _("Check, area too small") to_change[feat.id()] = get_attributes(feat) if area > config.warning_max_area: feat["fixme"] = _("Check, area too big") to_change[feat.id()] = get_attributes(feat) if to_change: self.writer.changeAttributeValues(to_change) def conflate(self, current_bu_osm, delete=True): """ Remove from current_bu_osm the buildings that don't have conflicts. If delete=False, only mark buildings with conflicts. """ if len(current_bu_osm.elements) == 0: return index = self.get_index() geometries = {f.id(): QgsGeometry(f.geometry()) for f in self.getFeatures()} num_buildings = 0 conflicts = 0 to_clean = set() pbar = self.get_progressbar(_("Conflate"), len(current_bu_osm.elements)) for el in current_bu_osm.elements: poly = None is_pool = "leisure" in el.tags and el.tags["leisure"] == "swimming_pool" is_building = "building" in el.tags if el.type == "way" and el.is_closed() and (is_building or is_pool): poly = [[map(Point, el.geometry())]] elif el.type == "relation" and (is_building or is_pool): poly = [[map(Point, w)] for w in el.outer_geometry()] if poly: num_buildings += 1 geom = Geometry().fromMultiPolygonXY(poly) if geom is None or not geom.isGeosValid(): msg = _("OSM building with id %s is not valid") % el.fid pbar.clear() log.warning(msg) report.warnings.append(msg) else: fids = index.intersects(geom.boundingBox()) conflict = False for fid in fids: fg = geometries[fid] if geom.contains(fg) or fg.contains(geom) or geom.overlaps(fg): conflict = True conflicts += 1 break if delete and not conflict: to_clean.add(el) if not delete and conflict: el.tags["conflict"] = "yes" pbar.update() pbar.close() for el in to_clean: current_bu_osm.remove(el) log.debug( _("Detected %d conflicts in %d buildings/pools from OSM"), conflicts, num_buildings, ) report.osm_buildings = num_buildings report.osm_building_conflicts = conflicts return len(to_clean) > 0
en
0.925854
Class for constructions. Return True for building features. Return True for Part features. Return True for Pool features. Trim to parcel id. Export to OSM. Index parts of building by building localid. Index pools in building parcel by building localid. Construct some utility dicts. buildings index building by localid (call before explode_multi_parts). parts index parts of building by building localid. Remove building parts without building. Remove parts outside the outline of it building. Remove parts without levels above ground. Precondition: Called before merge_greatest_part. Return a dictionary of parts for levels, the maximum and minimum levels. Given the building outline and its parts, for the parts inside the outline. Merge the adjacent parts in each level given a building outline and its parts. Translates the maximum values of number of levels above and below ground to the outline and optionally deletes all the parts in that level. Auxiliary method to remove feat1 of its inner rings if equals to feat2. Returns True if feat1 must be deleted and new geometry if any ring is removed. Apply merge_adjacent_parts to each set of building and its parts. Detect pools contained in a building and assign layer=1. Detect buildings/parts with geometry equals to a pool geometry and delete them. Detect inner rings of buildings/parts with geometry equals to a pool geometry and remove them. Clean geometries. Delete invalid geometries and close vertices, add topological points, merge building parts and simplify vertices. Auxiliary method to move entrance to the nearest building and part. Don't move and the entrance specification is changed if the new position is not enough close ('remote'), is a corner ('corner'), is in an inner ring ('inner') or is in a wall shared with another building ('shared'). Try to move each entrance address to the nearest point in the building outline. Building and addresses are associated using the cadastral reference. Non entrance addresses ends in the building outline when CatAtom2Osm.merge_address is called. Delete the address if the number of associated buildings is 0 or greater than 1 for non entrance addresses. Put fixmes to buildings with not valid geometry, too small or big. Returns distribution of floors. Remove from current_bu_osm the buildings that don't have conflicts. If delete=False, only mark buildings with conflicts.
2.023499
2
utils/util.py
seyidliadem/Music
0
6628729
<gh_stars>0 def humanbytes(num, suffix='B'): if num is None: num = 0 else: num = int(num) for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
def humanbytes(num, suffix='B'): if num is None: num = 0 else: num = int(num) for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
none
1
3.184214
3
tests/unit/test_condition.py
etta-trust/PolicyGlass
49
6628730
<reponame>etta-trust/PolicyGlass<filename>tests/unit/test_condition.py import pytest from policyglass import Condition, ConditionOperator CONDITION_REVERSIBLE_SCENARIOS = { "StringEquals": { "input": Condition("TestKey", "StringEquals", ["TestValue"]), "output": Condition("TestKey", "StringNotEquals", ["TestValue"]), }, "StringNotEquals": { "input": Condition("TestKey", "StringNotEquals", ["TestValue"]), "output": Condition("TestKey", "StringEquals", ["TestValue"]), }, "StringEqualsIgnoreCase": { "input": Condition("TestKey", "StringEqualsIgnoreCase", ["TestValue"]), "output": Condition("TestKey", "StringNotEqualsIgnoreCase", ["TestValue"]), }, "StringNotEqualsIgnoreCase": { "input": Condition("TestKey", "StringNotEqualsIgnoreCase", ["TestValue"]), "output": Condition("TestKey", "StringEqualsIgnoreCase", ["TestValue"]), }, "StringLike": { "input": Condition("TestKey", "StringLike", ["TestValue"]), "output": Condition("TestKey", "StringNotLike", ["TestValue"]), }, "StringNotLike": { "input": Condition("TestKey", "StringNotLike", ["TestValue"]), "output": Condition("TestKey", "StringLike", ["TestValue"]), }, "NumericEquals": { "input": Condition("TestKey", "NumericEquals", ["1"]), "output": Condition("TestKey", "NumericNotEquals", ["1"]), }, "NumericNotEquals": { "input": Condition("TestKey", "NumericNotEquals", ["1"]), "output": Condition("TestKey", "NumericEquals", ["1"]), }, "NumericLessThan": { "input": Condition("TestKey", "NumericLessThan", ["1"]), "output": Condition("TestKey", "NumericGreaterThanEquals", ["1"]), }, "NumericGreaterThan": { "input": Condition("TestKey", "NumericGreaterThan", ["1"]), "output": Condition("TestKey", "NumericLessThanEquals", ["1"]), }, "NumericLessThanEquals": { "input": Condition("TestKey", "NumericLessThanEquals", ["1"]), "output": Condition("TestKey", "NumericGreaterThan", ["1"]), }, "NumericGreaterThanEquals": { "input": Condition("TestKey", "NumericGreaterThanEquals", ["1"]), "output": Condition("TestKey", "NumericLessThan", ["1"]), }, "DateEquals": { "input": Condition("TestKey", "DateEquals", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateNotEquals", ["2020-01-01T00:00:01Z"]), }, "DateNotEquals": { "input": Condition("TestKey", "DateNotEquals", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateEquals", ["2020-01-01T00:00:01Z"]), }, "DateLessThan": { "input": Condition("TestKey", "DateLessThan", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateGreaterThanEquals", ["2020-01-01T00:00:01Z"]), }, "DateGreaterThan": { "input": Condition("TestKey", "DateGreaterThan", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateLessThanEquals", ["2020-01-01T00:00:01Z"]), }, "DateLessThanEquals": { "input": Condition("TestKey", "DateLessThanEquals", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateGreaterThan", ["2020-01-01T00:00:01Z"]), }, "DateGreaterThanEquals": { "input": Condition("TestKey", "DateGreaterThanEquals", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateLessThan", ["2020-01-01T00:00:01Z"]), }, "IpAddress": { "input": Condition("TestKey", "IpAddress", ["203.0.113.0/24"]), "output": Condition("TestKey", "NotIpAddress", ["203.0.113.0/24"]), }, "NotIpAddress": { "input": Condition("TestKey", "NotIpAddress", ["203.0.113.0/24"]), "output": Condition("TestKey", "IpAddress", ["203.0.113.0/24"]), }, "ArnEquals": { "input": Condition("TestKey", "ArnEquals", ["203.0.113.0/24"]), "output": Condition("TestKey", "ArnNotEquals", ["203.0.113.0/24"]), }, "ArnNotEquals": { "input": Condition("TestKey", "ArnNotEquals", ["203.0.113.0/24"]), "output": Condition("TestKey", "ArnEquals", ["203.0.113.0/24"]), }, } @pytest.mark.parametrize("_, scenario", CONDITION_REVERSIBLE_SCENARIOS.items()) def test_condition_reversible(_, scenario): input = scenario["input"] output = scenario["output"] assert input.reverse == output @pytest.mark.parametrize("_, scenario", CONDITION_REVERSIBLE_SCENARIOS.items()) def test_condition_reversible_if_exists(_, scenario): input = scenario["input"] input.operator = ConditionOperator(input.operator + "IfExists") output = scenario["output"] output.operator = ConditionOperator(output.operator + "IfExists") assert input.reverse == output CONDITION_NON_REVERSIBLE_SCENARIOS = { "BinaryEquals": { "input": Condition("TestKey", "BinaryEquals", ["TestValue"]), } } @pytest.mark.parametrize("_, scenario", CONDITION_NON_REVERSIBLE_SCENARIOS.items()) def test_condition_not_reversible(_, scenario): input = scenario["input"] with pytest.raises(ValueError) as ex: input.reverse assert f"Cannot reverse conditions with operator {input.operator}" in str(ex.value) def test_condition_reversal_not_case_sensitive(): assert Condition("TestKey", "STRINGEQUALS", ["TestValue"]).reverse == Condition( "TestKey", "stringnotequals", ["TestValue"] )
import pytest from policyglass import Condition, ConditionOperator CONDITION_REVERSIBLE_SCENARIOS = { "StringEquals": { "input": Condition("TestKey", "StringEquals", ["TestValue"]), "output": Condition("TestKey", "StringNotEquals", ["TestValue"]), }, "StringNotEquals": { "input": Condition("TestKey", "StringNotEquals", ["TestValue"]), "output": Condition("TestKey", "StringEquals", ["TestValue"]), }, "StringEqualsIgnoreCase": { "input": Condition("TestKey", "StringEqualsIgnoreCase", ["TestValue"]), "output": Condition("TestKey", "StringNotEqualsIgnoreCase", ["TestValue"]), }, "StringNotEqualsIgnoreCase": { "input": Condition("TestKey", "StringNotEqualsIgnoreCase", ["TestValue"]), "output": Condition("TestKey", "StringEqualsIgnoreCase", ["TestValue"]), }, "StringLike": { "input": Condition("TestKey", "StringLike", ["TestValue"]), "output": Condition("TestKey", "StringNotLike", ["TestValue"]), }, "StringNotLike": { "input": Condition("TestKey", "StringNotLike", ["TestValue"]), "output": Condition("TestKey", "StringLike", ["TestValue"]), }, "NumericEquals": { "input": Condition("TestKey", "NumericEquals", ["1"]), "output": Condition("TestKey", "NumericNotEquals", ["1"]), }, "NumericNotEquals": { "input": Condition("TestKey", "NumericNotEquals", ["1"]), "output": Condition("TestKey", "NumericEquals", ["1"]), }, "NumericLessThan": { "input": Condition("TestKey", "NumericLessThan", ["1"]), "output": Condition("TestKey", "NumericGreaterThanEquals", ["1"]), }, "NumericGreaterThan": { "input": Condition("TestKey", "NumericGreaterThan", ["1"]), "output": Condition("TestKey", "NumericLessThanEquals", ["1"]), }, "NumericLessThanEquals": { "input": Condition("TestKey", "NumericLessThanEquals", ["1"]), "output": Condition("TestKey", "NumericGreaterThan", ["1"]), }, "NumericGreaterThanEquals": { "input": Condition("TestKey", "NumericGreaterThanEquals", ["1"]), "output": Condition("TestKey", "NumericLessThan", ["1"]), }, "DateEquals": { "input": Condition("TestKey", "DateEquals", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateNotEquals", ["2020-01-01T00:00:01Z"]), }, "DateNotEquals": { "input": Condition("TestKey", "DateNotEquals", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateEquals", ["2020-01-01T00:00:01Z"]), }, "DateLessThan": { "input": Condition("TestKey", "DateLessThan", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateGreaterThanEquals", ["2020-01-01T00:00:01Z"]), }, "DateGreaterThan": { "input": Condition("TestKey", "DateGreaterThan", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateLessThanEquals", ["2020-01-01T00:00:01Z"]), }, "DateLessThanEquals": { "input": Condition("TestKey", "DateLessThanEquals", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateGreaterThan", ["2020-01-01T00:00:01Z"]), }, "DateGreaterThanEquals": { "input": Condition("TestKey", "DateGreaterThanEquals", ["2020-01-01T00:00:01Z"]), "output": Condition("TestKey", "DateLessThan", ["2020-01-01T00:00:01Z"]), }, "IpAddress": { "input": Condition("TestKey", "IpAddress", ["203.0.113.0/24"]), "output": Condition("TestKey", "NotIpAddress", ["203.0.113.0/24"]), }, "NotIpAddress": { "input": Condition("TestKey", "NotIpAddress", ["203.0.113.0/24"]), "output": Condition("TestKey", "IpAddress", ["203.0.113.0/24"]), }, "ArnEquals": { "input": Condition("TestKey", "ArnEquals", ["203.0.113.0/24"]), "output": Condition("TestKey", "ArnNotEquals", ["203.0.113.0/24"]), }, "ArnNotEquals": { "input": Condition("TestKey", "ArnNotEquals", ["203.0.113.0/24"]), "output": Condition("TestKey", "ArnEquals", ["203.0.113.0/24"]), }, } @pytest.mark.parametrize("_, scenario", CONDITION_REVERSIBLE_SCENARIOS.items()) def test_condition_reversible(_, scenario): input = scenario["input"] output = scenario["output"] assert input.reverse == output @pytest.mark.parametrize("_, scenario", CONDITION_REVERSIBLE_SCENARIOS.items()) def test_condition_reversible_if_exists(_, scenario): input = scenario["input"] input.operator = ConditionOperator(input.operator + "IfExists") output = scenario["output"] output.operator = ConditionOperator(output.operator + "IfExists") assert input.reverse == output CONDITION_NON_REVERSIBLE_SCENARIOS = { "BinaryEquals": { "input": Condition("TestKey", "BinaryEquals", ["TestValue"]), } } @pytest.mark.parametrize("_, scenario", CONDITION_NON_REVERSIBLE_SCENARIOS.items()) def test_condition_not_reversible(_, scenario): input = scenario["input"] with pytest.raises(ValueError) as ex: input.reverse assert f"Cannot reverse conditions with operator {input.operator}" in str(ex.value) def test_condition_reversal_not_case_sensitive(): assert Condition("TestKey", "STRINGEQUALS", ["TestValue"]).reverse == Condition( "TestKey", "stringnotequals", ["TestValue"] )
none
1
2.309241
2
windows_packages_gpu/torch/testing/_internal/test_module/no_future_div.py
codeproject/DeepStack
353
6628731
<gh_stars>100-1000 import torch # noqa: F401 def div_int_nofuture(): return 1 / 2 def div_float_nofuture(): return 3.14 / 0.125
import torch # noqa: F401 def div_int_nofuture(): return 1 / 2 def div_float_nofuture(): return 3.14 / 0.125
uz
0.465103
# noqa: F401
1.937801
2
smart_selects/urls.py
flibbertigibbet/django-smart-selects
0
6628732
try: from django.conf.urls.defaults import patterns, url except ImportError: from django.conf.urls import patterns, url urlpatterns = patterns( 'smart_selects.views', url(r'^all/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<foreign_key_app_name>[\w\-]+)/(?P<foreign_key_model_name>[\w\-]+)/(?P<foreign_key_field_name>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain_all', name='chained_filter_all'), url(r'^filter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<foreign_key_app_name>[\w\-]+)/(?P<foreign_key_model_name>[\w\-]+)/(?P<foreign_key_field_name>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain', name='chained_filter'), url(r'^filter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<manager>[\w\-]+)/(?P<field>[\w\-]+)/(?P<foreign_key_app_name>[\w\-]+)/(?P<foreign_key_model_name>[\w\-]+)/(?P<foreign_key_field_name>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain', name='chained_filter'), )
try: from django.conf.urls.defaults import patterns, url except ImportError: from django.conf.urls import patterns, url urlpatterns = patterns( 'smart_selects.views', url(r'^all/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<foreign_key_app_name>[\w\-]+)/(?P<foreign_key_model_name>[\w\-]+)/(?P<foreign_key_field_name>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain_all', name='chained_filter_all'), url(r'^filter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<foreign_key_app_name>[\w\-]+)/(?P<foreign_key_model_name>[\w\-]+)/(?P<foreign_key_field_name>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain', name='chained_filter'), url(r'^filter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<manager>[\w\-]+)/(?P<field>[\w\-]+)/(?P<foreign_key_app_name>[\w\-]+)/(?P<foreign_key_model_name>[\w\-]+)/(?P<foreign_key_field_name>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain', name='chained_filter'), )
none
1
1.954825
2
cleverhans/tf2/attacks/momentum_iterative_method.py
xu-weizhen/cleverhans
4,333
6628733
"""The MomentumIterativeMethod attack.""" import numpy as np import tensorflow as tf from cleverhans.tf2.utils import optimize_linear, compute_gradient from cleverhans.tf2.utils import clip_eta def momentum_iterative_method( model_fn, x, eps=0.3, eps_iter=0.06, nb_iter=10, norm=np.inf, clip_min=None, clip_max=None, y=None, targeted=False, decay_factor=1.0, sanity_checks=True, ): """ Tensorflow 2.0 implementation of Momentum Iterative Method (Dong et al. 2017). This method won the first places in NIPS 2017 Non-targeted Adversarial Attacks and Targeted Adversarial Attacks. The original paper used hard labels for this attack; no label smoothing. Paper link: https://arxiv.org/pdf/1710.06081.pdf :param model_fn: a callable that takes an input tensor and returns the model logits. :param x: input tensor. :param eps: (optional float) maximum distortion of adversarial example compared to original input :param eps_iter: (optional float) step size for each attack iteration :param nb_iter: (optional int) Number of attack iterations. :param norm: (optional) Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value :param y: (optional) Tensor with true labels. If targeted is true, then provide the target label. Otherwise, only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. :param targeted: (optional) bool. Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :param decay_factor: (optional) Decay factor for the momentum term. :param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime / memory or for unit tests that intentionally pass strange input) :return: a tensor for the adversarial example """ if norm == 1: raise NotImplementedError( "This attack hasn't been tested for norm=1." "It's not clear that FGM makes a good inner " "loop step for iterative optimization since " "it updates just one coordinate at a time." ) # Check if order of the norm is acceptable given current implementation if norm not in [np.inf, 1, 2]: raise ValueError("Norm order must be either np.inf, 1, or 2.") asserts = [] # If a data range was specified, check that the input was in that range if clip_min is not None: asserts.append(tf.math.greater_equal(x, clip_min)) if clip_max is not None: asserts.append(tf.math.less_equal(x, clip_max)) if y is None: # Using model predictions as ground truth to avoid label leaking y = tf.argmax(model_fn(x), 1) # Initialize loop variables momentum = tf.zeros_like(x) adv_x = x i = 0 while i < nb_iter: # Define gradient of loss wrt input grad = compute_gradient(model_fn, loss_fn, adv_x, y, targeted) # Normalize current gradient and add it to the accumulated gradient red_ind = list(range(1, len(grad.shape))) avoid_zero_div = tf.cast(1e-12, grad.dtype) grad = grad / tf.math.maximum( avoid_zero_div, tf.math.reduce_mean(tf.math.abs(grad), red_ind, keepdims=True), ) momentum = decay_factor * momentum + grad optimal_perturbation = optimize_linear(momentum, eps_iter, norm) # Update and clip adversarial example in current iteration adv_x = adv_x + optimal_perturbation adv_x = x + clip_eta(adv_x - x, norm, eps) if clip_min is not None and clip_max is not None: adv_x = tf.clip_by_value(adv_x, clip_min, clip_max) i += 1 if sanity_checks: assert np.all(asserts) return adv_x def loss_fn(labels, logits): """ Added softmax cross entropy loss for MIM as in the original MI-FGSM paper. """ return tf.nn.sparse_softmax_cross_entropy_with_logits(labels, logits, name=None)
"""The MomentumIterativeMethod attack.""" import numpy as np import tensorflow as tf from cleverhans.tf2.utils import optimize_linear, compute_gradient from cleverhans.tf2.utils import clip_eta def momentum_iterative_method( model_fn, x, eps=0.3, eps_iter=0.06, nb_iter=10, norm=np.inf, clip_min=None, clip_max=None, y=None, targeted=False, decay_factor=1.0, sanity_checks=True, ): """ Tensorflow 2.0 implementation of Momentum Iterative Method (Dong et al. 2017). This method won the first places in NIPS 2017 Non-targeted Adversarial Attacks and Targeted Adversarial Attacks. The original paper used hard labels for this attack; no label smoothing. Paper link: https://arxiv.org/pdf/1710.06081.pdf :param model_fn: a callable that takes an input tensor and returns the model logits. :param x: input tensor. :param eps: (optional float) maximum distortion of adversarial example compared to original input :param eps_iter: (optional float) step size for each attack iteration :param nb_iter: (optional int) Number of attack iterations. :param norm: (optional) Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value :param y: (optional) Tensor with true labels. If targeted is true, then provide the target label. Otherwise, only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. :param targeted: (optional) bool. Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :param decay_factor: (optional) Decay factor for the momentum term. :param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime / memory or for unit tests that intentionally pass strange input) :return: a tensor for the adversarial example """ if norm == 1: raise NotImplementedError( "This attack hasn't been tested for norm=1." "It's not clear that FGM makes a good inner " "loop step for iterative optimization since " "it updates just one coordinate at a time." ) # Check if order of the norm is acceptable given current implementation if norm not in [np.inf, 1, 2]: raise ValueError("Norm order must be either np.inf, 1, or 2.") asserts = [] # If a data range was specified, check that the input was in that range if clip_min is not None: asserts.append(tf.math.greater_equal(x, clip_min)) if clip_max is not None: asserts.append(tf.math.less_equal(x, clip_max)) if y is None: # Using model predictions as ground truth to avoid label leaking y = tf.argmax(model_fn(x), 1) # Initialize loop variables momentum = tf.zeros_like(x) adv_x = x i = 0 while i < nb_iter: # Define gradient of loss wrt input grad = compute_gradient(model_fn, loss_fn, adv_x, y, targeted) # Normalize current gradient and add it to the accumulated gradient red_ind = list(range(1, len(grad.shape))) avoid_zero_div = tf.cast(1e-12, grad.dtype) grad = grad / tf.math.maximum( avoid_zero_div, tf.math.reduce_mean(tf.math.abs(grad), red_ind, keepdims=True), ) momentum = decay_factor * momentum + grad optimal_perturbation = optimize_linear(momentum, eps_iter, norm) # Update and clip adversarial example in current iteration adv_x = adv_x + optimal_perturbation adv_x = x + clip_eta(adv_x - x, norm, eps) if clip_min is not None and clip_max is not None: adv_x = tf.clip_by_value(adv_x, clip_min, clip_max) i += 1 if sanity_checks: assert np.all(asserts) return adv_x def loss_fn(labels, logits): """ Added softmax cross entropy loss for MIM as in the original MI-FGSM paper. """ return tf.nn.sparse_softmax_cross_entropy_with_logits(labels, logits, name=None)
en
0.778228
The MomentumIterativeMethod attack. Tensorflow 2.0 implementation of Momentum Iterative Method (Dong et al. 2017). This method won the first places in NIPS 2017 Non-targeted Adversarial Attacks and Targeted Adversarial Attacks. The original paper used hard labels for this attack; no label smoothing. Paper link: https://arxiv.org/pdf/1710.06081.pdf :param model_fn: a callable that takes an input tensor and returns the model logits. :param x: input tensor. :param eps: (optional float) maximum distortion of adversarial example compared to original input :param eps_iter: (optional float) step size for each attack iteration :param nb_iter: (optional int) Number of attack iterations. :param norm: (optional) Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value :param y: (optional) Tensor with true labels. If targeted is true, then provide the target label. Otherwise, only provide this parameter if you'd like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. :param targeted: (optional) bool. Is the attack targeted or untargeted? Untargeted, the default, will try to make the label incorrect. Targeted will instead try to move in the direction of being more like y. :param decay_factor: (optional) Decay factor for the momentum term. :param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime / memory or for unit tests that intentionally pass strange input) :return: a tensor for the adversarial example # Check if order of the norm is acceptable given current implementation # If a data range was specified, check that the input was in that range # Using model predictions as ground truth to avoid label leaking # Initialize loop variables # Define gradient of loss wrt input # Normalize current gradient and add it to the accumulated gradient # Update and clip adversarial example in current iteration Added softmax cross entropy loss for MIM as in the original MI-FGSM paper.
2.846496
3
photutils/morphology.py
fred3m/photutils
0
6628734
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions for centroiding sources and measuring their morphological properties. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import collections import numpy as np from astropy.modeling.models import Gaussian1D, Gaussian2D, Const1D, Const2D from astropy.modeling.fitting import LevMarLSQFitter from astropy.nddata.utils import overlap_slices from .segmentation import SegmentProperties import warnings from astropy.utils.exceptions import AstropyUserWarning __all__ = ['GaussianConst2D', 'centroid_com', 'gaussian1d_moments', 'marginalize_data2d', 'centroid_1dg', 'centroid_2dg', 'fit_2dgaussian', 'data_properties', 'cutout_footprint'] class _GaussianConst1D(Const1D + Gaussian1D): """A 1D Gaussian plus a constant model.""" class GaussianConst2D(Const2D + Gaussian2D): """ A 2D Gaussian plus a constant model. Parameters ---------- amplitude_0 : float Value of the constant. amplitude_1 : float Amplitude of the Gaussian. x_mean_1 : float Mean of the Gaussian in x. y_mean_1 : float Mean of the Gaussian in y. x_stddev_1 : float Standard deviation of the Gaussian in x. ``x_stddev`` and ``y_stddev`` must be specified unless a covariance matrix (``cov_matrix``) is input. y_stddev_1 : float Standard deviation of the Gaussian in y. ``x_stddev`` and ``y_stddev`` must be specified unless a covariance matrix (``cov_matrix``) is input. theta_1 : float, optional Rotation angle in radians. The rotation angle increases counterclockwise. cov_matrix_1 : ndarray, optional A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` specification. """ def _convert_image(data, mask=None): """ Convert the input data to a float64 (double) `numpy.ndarray`, required for input to `skimage.measure.moments` and `skimage.measure.moments_central`. The input ``data`` is copied unless it already has that `numpy.dtype`. If ``mask`` is input, then masked pixels are set to zero in the output ``data``. Parameters ---------- data : array_like The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are set to zero in the output ``data``. Returns ------- image : `numpy.ndarray`, float64 The converted 2D array of the image, where masked pixels have been set to zero. """ try: if mask is None: copy = False else: copy = True image = np.asarray(data).astype(np.float, copy=copy) except TypeError: # pragma: no cover image = np.asarray(data).astype(np.float) # for numpy <= 1.6 if mask is not None: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape') image[mask] = 0.0 return image def centroid_com(data, mask=None): """ Calculate the centroid of a 2D array as its center of mass determined from image moments. Parameters ---------- data : array_like The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- xcen, ycen : float (x, y) coordinates of the centroid. """ from skimage.measure import moments data = _convert_image(data, mask=mask) m = moments(data, 1) xcen = m[1, 0] / m[0, 0] ycen = m[0, 1] / m[0, 0] return xcen, ycen def gaussian1d_moments(data, mask=None): """ Estimate 1D Gaussian parameters from the moments of 1D data. This function can be useful for providing initial parameter values when fitting a 1D Gaussian to the ``data``. Parameters ---------- data : array_like (1D) The 1D array. mask : array_like (1D bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- amplitude, mean, stddev : float The estimated parameters of a 1D Gaussian. """ if mask is not None: mask = np.asanyarray(mask) data = data.copy() data[mask] = 0. x = np.arange(data.size) x_mean = np.sum(x * data) / np.sum(data) x_stddev = np.sqrt(abs(np.sum(data * (x - x_mean)**2) / np.sum(data))) amplitude = np.nanmax(data) - np.nanmin(data) return amplitude, x_mean, x_stddev def marginalize_data2d(data, error=None, mask=None): """ Generate the marginal x and y distributions from a 2D data array. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- marginal_data : list of `~numpy.ndarray` The marginal x and y distributions of the input ``data``. marginal_error : list of `~numpy.ndarray` The marginal x and y distributions of the input ``error``. marginal_mask : list of `~numpy.ndarray` (bool) The marginal x and y distributions of the input ``mask``. """ if error is not None: marginal_error = np.array( [np.sqrt(np.sum(error**2, axis=i)) for i in [0, 1]]) else: marginal_error = [None, None] if mask is not None: mask = np.asanyarray(mask) marginal_mask = [np.sum(mask, axis=i).astype(np.bool) for i in [0, 1]] else: marginal_mask = [None, None] marginal_data = [np.sum(data, axis=i) for i in [0, 1]] return marginal_data, marginal_error, marginal_mask def centroid_1dg(data, error=None, mask=None): """ Calculate the centroid of a 2D array by fitting 1D Gaussians to the marginal x and y distributions of the array. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- xcen, ycen : float (x, y) coordinates of the centroid. """ mdata, merror, mmask = marginalize_data2d(data, error=error, mask=mask) if merror[0] is None and mmask[0] is None: mweights = [None, None] else: if merror[0] is not None: mweights = [(1.0 / merror[i].clip(min=1.e-30)) for i in [0, 1]] else: mweights = np.array([np.ones(data.shape[1]), np.ones(data.shape[0])]) # down-weight masked pixels for i in [0, 1]: mweights[i][mmask[i]] = 1.e-20 const_init = np.min(data) centroid = [] for (mdata_i, mweights_i, mmask_i) in zip(mdata, mweights, mmask): params_init = gaussian1d_moments(mdata_i, mask=mmask_i) g_init = _GaussianConst1D(const_init, *params_init) fitter = LevMarLSQFitter() x = np.arange(mdata_i.size) g_fit = fitter(g_init, x, mdata_i, weights=mweights_i) centroid.append(g_fit.mean_1.value) return tuple(centroid) def centroid_2dg(data, error=None, mask=None): """ Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus a constant) to the array. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- xcen, ycen : float (x, y) coordinates of the centroid. """ gfit = fit_2dgaussian(data, error=error, mask=mask) return gfit.x_mean_1.value, gfit.y_mean_1.value def fit_2dgaussian(data, error=None, mask=None): """ Fit a 2D Gaussian plus a constant to a 2D image. Parameters ---------- data : array_like The 2D array of the image. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- result : A `GaussianConst2D` model instance. The best-fitting Gaussian 2D model. """ if data.size < 7: warnings.warn('data array must have a least 7 values to fit a 2D ' 'Gaussian plus a constant', AstropyUserWarning) return None if error is not None: weights = 1.0 / error else: weights = None if mask is not None: mask = np.asanyarray(mask) if weights is None: weights = np.ones_like(data) # down-weight masked pixels weights[mask] = 1.e-30 # Subtract the minimum of the data as a crude background estimate. # This will also make the data values positive, preventing issues with # the moment estimation in data_properties (moments from negative data # values can yield undefined Gaussian parameters, e.g. x/y_stddev). shift = np.min(data) data = np.copy(data) - shift props = data_properties(data, mask=mask) init_values = np.array([props.xcentroid.value, props.ycentroid.value, props.semimajor_axis_sigma.value, props.semiminor_axis_sigma.value, props.orientation.value]) init_const = 0. # subtracted data minimum above init_amplitude = np.nanmax(data) - np.nanmin(data) g_init = GaussianConst2D(init_const, init_amplitude, *init_values) fitter = LevMarLSQFitter() y, x = np.indices(data.shape) gfit = fitter(g_init, x, y, data, weights=weights) gfit.amplitude_0 = gfit.amplitude_0 + shift return gfit def data_properties(data, mask=None, background=None): """ Calculate the centroid and morphological properties of a 2D array, e.g., an image cutout of an object. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was previously present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Returns ------- result : `~photutils.segmentation.SegmentProperties` instance A `~photutils.segmentation.SegmentProperties` object. """ segment_image = np.ones(data.shape, dtype=np.int) return SegmentProperties(data, segment_image, label=1, mask=mask, background=background) def cutout_footprint(data, position, box_size=3, footprint=None, mask=None, error=None): """ Cut out a region from data (and optional mask and error) centered at specified (x, y) position. The size of the region is specified via the ``box_size`` or ``footprint`` keywords. The output mask for the cutout region represents the combination of the input mask and footprint mask. Parameters ---------- data : array_like The 2D array of the image. position : 2 tuple The ``(x, y)`` pixel coordinate of the center of the region. box_size : scalar or tuple, optional The size of the region to cutout from ``data``. If ``box_size`` is a scalar, then the region shape will be ``(box_size, box_size)``. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. footprint : `~numpy.ndarray` of bools, optional A boolean array where `True` values describe the local footprint region. ``box_size=(n, m)`` is equivalent to ``footprint=np.ones((n, m))``. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. Returns ------- region_data : `~numpy.ndarray` The ``data`` cutout. region_mask : `~numpy.ndarray` The ``mask`` cutout. region_error : `~numpy.ndarray` The ``error`` cutout. slices : tuple of slices Slices in each dimension of the ``data`` array used to define the cutout region. """ if len(position) != 2: raise ValueError('position must have a length of 2') if footprint is None: if box_size is None: raise ValueError('box_size or footprint must be defined.') if not isinstance(box_size, collections.Iterable): shape = (box_size, box_size) else: if len(box_size) != 2: raise ValueError('box_size must have a length of 2') shape = box_size footprint = np.ones(shape, dtype=bool) else: footprint = np.asanyarray(footprint, dtype=bool) slices_large, slices_small = overlap_slices(data.shape, footprint.shape, position[::-1]) region_data = data[slices_large] if error is not None: region_error = error[slices_large] else: region_error = None if mask is not None: region_mask = mask[slices_large] else: region_mask = np.zeros_like(region_data, dtype=bool) footprint_mask = ~footprint footprint_mask = footprint_mask[slices_small] # trim if necessary region_mask = np.logical_or(region_mask, footprint_mask) return region_data, region_mask, region_error, slices_large
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions for centroiding sources and measuring their morphological properties. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import collections import numpy as np from astropy.modeling.models import Gaussian1D, Gaussian2D, Const1D, Const2D from astropy.modeling.fitting import LevMarLSQFitter from astropy.nddata.utils import overlap_slices from .segmentation import SegmentProperties import warnings from astropy.utils.exceptions import AstropyUserWarning __all__ = ['GaussianConst2D', 'centroid_com', 'gaussian1d_moments', 'marginalize_data2d', 'centroid_1dg', 'centroid_2dg', 'fit_2dgaussian', 'data_properties', 'cutout_footprint'] class _GaussianConst1D(Const1D + Gaussian1D): """A 1D Gaussian plus a constant model.""" class GaussianConst2D(Const2D + Gaussian2D): """ A 2D Gaussian plus a constant model. Parameters ---------- amplitude_0 : float Value of the constant. amplitude_1 : float Amplitude of the Gaussian. x_mean_1 : float Mean of the Gaussian in x. y_mean_1 : float Mean of the Gaussian in y. x_stddev_1 : float Standard deviation of the Gaussian in x. ``x_stddev`` and ``y_stddev`` must be specified unless a covariance matrix (``cov_matrix``) is input. y_stddev_1 : float Standard deviation of the Gaussian in y. ``x_stddev`` and ``y_stddev`` must be specified unless a covariance matrix (``cov_matrix``) is input. theta_1 : float, optional Rotation angle in radians. The rotation angle increases counterclockwise. cov_matrix_1 : ndarray, optional A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` specification. """ def _convert_image(data, mask=None): """ Convert the input data to a float64 (double) `numpy.ndarray`, required for input to `skimage.measure.moments` and `skimage.measure.moments_central`. The input ``data`` is copied unless it already has that `numpy.dtype`. If ``mask`` is input, then masked pixels are set to zero in the output ``data``. Parameters ---------- data : array_like The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are set to zero in the output ``data``. Returns ------- image : `numpy.ndarray`, float64 The converted 2D array of the image, where masked pixels have been set to zero. """ try: if mask is None: copy = False else: copy = True image = np.asarray(data).astype(np.float, copy=copy) except TypeError: # pragma: no cover image = np.asarray(data).astype(np.float) # for numpy <= 1.6 if mask is not None: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape') image[mask] = 0.0 return image def centroid_com(data, mask=None): """ Calculate the centroid of a 2D array as its center of mass determined from image moments. Parameters ---------- data : array_like The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- xcen, ycen : float (x, y) coordinates of the centroid. """ from skimage.measure import moments data = _convert_image(data, mask=mask) m = moments(data, 1) xcen = m[1, 0] / m[0, 0] ycen = m[0, 1] / m[0, 0] return xcen, ycen def gaussian1d_moments(data, mask=None): """ Estimate 1D Gaussian parameters from the moments of 1D data. This function can be useful for providing initial parameter values when fitting a 1D Gaussian to the ``data``. Parameters ---------- data : array_like (1D) The 1D array. mask : array_like (1D bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- amplitude, mean, stddev : float The estimated parameters of a 1D Gaussian. """ if mask is not None: mask = np.asanyarray(mask) data = data.copy() data[mask] = 0. x = np.arange(data.size) x_mean = np.sum(x * data) / np.sum(data) x_stddev = np.sqrt(abs(np.sum(data * (x - x_mean)**2) / np.sum(data))) amplitude = np.nanmax(data) - np.nanmin(data) return amplitude, x_mean, x_stddev def marginalize_data2d(data, error=None, mask=None): """ Generate the marginal x and y distributions from a 2D data array. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- marginal_data : list of `~numpy.ndarray` The marginal x and y distributions of the input ``data``. marginal_error : list of `~numpy.ndarray` The marginal x and y distributions of the input ``error``. marginal_mask : list of `~numpy.ndarray` (bool) The marginal x and y distributions of the input ``mask``. """ if error is not None: marginal_error = np.array( [np.sqrt(np.sum(error**2, axis=i)) for i in [0, 1]]) else: marginal_error = [None, None] if mask is not None: mask = np.asanyarray(mask) marginal_mask = [np.sum(mask, axis=i).astype(np.bool) for i in [0, 1]] else: marginal_mask = [None, None] marginal_data = [np.sum(data, axis=i) for i in [0, 1]] return marginal_data, marginal_error, marginal_mask def centroid_1dg(data, error=None, mask=None): """ Calculate the centroid of a 2D array by fitting 1D Gaussians to the marginal x and y distributions of the array. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- xcen, ycen : float (x, y) coordinates of the centroid. """ mdata, merror, mmask = marginalize_data2d(data, error=error, mask=mask) if merror[0] is None and mmask[0] is None: mweights = [None, None] else: if merror[0] is not None: mweights = [(1.0 / merror[i].clip(min=1.e-30)) for i in [0, 1]] else: mweights = np.array([np.ones(data.shape[1]), np.ones(data.shape[0])]) # down-weight masked pixels for i in [0, 1]: mweights[i][mmask[i]] = 1.e-20 const_init = np.min(data) centroid = [] for (mdata_i, mweights_i, mmask_i) in zip(mdata, mweights, mmask): params_init = gaussian1d_moments(mdata_i, mask=mmask_i) g_init = _GaussianConst1D(const_init, *params_init) fitter = LevMarLSQFitter() x = np.arange(mdata_i.size) g_fit = fitter(g_init, x, mdata_i, weights=mweights_i) centroid.append(g_fit.mean_1.value) return tuple(centroid) def centroid_2dg(data, error=None, mask=None): """ Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus a constant) to the array. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- xcen, ycen : float (x, y) coordinates of the centroid. """ gfit = fit_2dgaussian(data, error=error, mask=mask) return gfit.x_mean_1.value, gfit.y_mean_1.value def fit_2dgaussian(data, error=None, mask=None): """ Fit a 2D Gaussian plus a constant to a 2D image. Parameters ---------- data : array_like The 2D array of the image. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- result : A `GaussianConst2D` model instance. The best-fitting Gaussian 2D model. """ if data.size < 7: warnings.warn('data array must have a least 7 values to fit a 2D ' 'Gaussian plus a constant', AstropyUserWarning) return None if error is not None: weights = 1.0 / error else: weights = None if mask is not None: mask = np.asanyarray(mask) if weights is None: weights = np.ones_like(data) # down-weight masked pixels weights[mask] = 1.e-30 # Subtract the minimum of the data as a crude background estimate. # This will also make the data values positive, preventing issues with # the moment estimation in data_properties (moments from negative data # values can yield undefined Gaussian parameters, e.g. x/y_stddev). shift = np.min(data) data = np.copy(data) - shift props = data_properties(data, mask=mask) init_values = np.array([props.xcentroid.value, props.ycentroid.value, props.semimajor_axis_sigma.value, props.semiminor_axis_sigma.value, props.orientation.value]) init_const = 0. # subtracted data minimum above init_amplitude = np.nanmax(data) - np.nanmin(data) g_init = GaussianConst2D(init_const, init_amplitude, *init_values) fitter = LevMarLSQFitter() y, x = np.indices(data.shape) gfit = fitter(g_init, x, y, data, weights=weights) gfit.amplitude_0 = gfit.amplitude_0 + shift return gfit def data_properties(data, mask=None, background=None): """ Calculate the centroid and morphological properties of a 2D array, e.g., an image cutout of an object. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was previously present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Returns ------- result : `~photutils.segmentation.SegmentProperties` instance A `~photutils.segmentation.SegmentProperties` object. """ segment_image = np.ones(data.shape, dtype=np.int) return SegmentProperties(data, segment_image, label=1, mask=mask, background=background) def cutout_footprint(data, position, box_size=3, footprint=None, mask=None, error=None): """ Cut out a region from data (and optional mask and error) centered at specified (x, y) position. The size of the region is specified via the ``box_size`` or ``footprint`` keywords. The output mask for the cutout region represents the combination of the input mask and footprint mask. Parameters ---------- data : array_like The 2D array of the image. position : 2 tuple The ``(x, y)`` pixel coordinate of the center of the region. box_size : scalar or tuple, optional The size of the region to cutout from ``data``. If ``box_size`` is a scalar, then the region shape will be ``(box_size, box_size)``. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. footprint : `~numpy.ndarray` of bools, optional A boolean array where `True` values describe the local footprint region. ``box_size=(n, m)`` is equivalent to ``footprint=np.ones((n, m))``. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. Returns ------- region_data : `~numpy.ndarray` The ``data`` cutout. region_mask : `~numpy.ndarray` The ``mask`` cutout. region_error : `~numpy.ndarray` The ``error`` cutout. slices : tuple of slices Slices in each dimension of the ``data`` array used to define the cutout region. """ if len(position) != 2: raise ValueError('position must have a length of 2') if footprint is None: if box_size is None: raise ValueError('box_size or footprint must be defined.') if not isinstance(box_size, collections.Iterable): shape = (box_size, box_size) else: if len(box_size) != 2: raise ValueError('box_size must have a length of 2') shape = box_size footprint = np.ones(shape, dtype=bool) else: footprint = np.asanyarray(footprint, dtype=bool) slices_large, slices_small = overlap_slices(data.shape, footprint.shape, position[::-1]) region_data = data[slices_large] if error is not None: region_error = error[slices_large] else: region_error = None if mask is not None: region_mask = mask[slices_large] else: region_mask = np.zeros_like(region_data, dtype=bool) footprint_mask = ~footprint footprint_mask = footprint_mask[slices_small] # trim if necessary region_mask = np.logical_or(region_mask, footprint_mask) return region_data, region_mask, region_error, slices_large
en
0.567264
# Licensed under a 3-clause BSD style license - see LICENSE.rst Functions for centroiding sources and measuring their morphological properties. A 1D Gaussian plus a constant model. A 2D Gaussian plus a constant model. Parameters ---------- amplitude_0 : float Value of the constant. amplitude_1 : float Amplitude of the Gaussian. x_mean_1 : float Mean of the Gaussian in x. y_mean_1 : float Mean of the Gaussian in y. x_stddev_1 : float Standard deviation of the Gaussian in x. ``x_stddev`` and ``y_stddev`` must be specified unless a covariance matrix (``cov_matrix``) is input. y_stddev_1 : float Standard deviation of the Gaussian in y. ``x_stddev`` and ``y_stddev`` must be specified unless a covariance matrix (``cov_matrix``) is input. theta_1 : float, optional Rotation angle in radians. The rotation angle increases counterclockwise. cov_matrix_1 : ndarray, optional A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` specification. Convert the input data to a float64 (double) `numpy.ndarray`, required for input to `skimage.measure.moments` and `skimage.measure.moments_central`. The input ``data`` is copied unless it already has that `numpy.dtype`. If ``mask`` is input, then masked pixels are set to zero in the output ``data``. Parameters ---------- data : array_like The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked pixels are set to zero in the output ``data``. Returns ------- image : `numpy.ndarray`, float64 The converted 2D array of the image, where masked pixels have been set to zero. # pragma: no cover # for numpy <= 1.6 Calculate the centroid of a 2D array as its center of mass determined from image moments. Parameters ---------- data : array_like The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- xcen, ycen : float (x, y) coordinates of the centroid. Estimate 1D Gaussian parameters from the moments of 1D data. This function can be useful for providing initial parameter values when fitting a 1D Gaussian to the ``data``. Parameters ---------- data : array_like (1D) The 1D array. mask : array_like (1D bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- amplitude, mean, stddev : float The estimated parameters of a 1D Gaussian. Generate the marginal x and y distributions from a 2D data array. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- marginal_data : list of `~numpy.ndarray` The marginal x and y distributions of the input ``data``. marginal_error : list of `~numpy.ndarray` The marginal x and y distributions of the input ``error``. marginal_mask : list of `~numpy.ndarray` (bool) The marginal x and y distributions of the input ``mask``. Calculate the centroid of a 2D array by fitting 1D Gaussians to the marginal x and y distributions of the array. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- xcen, ycen : float (x, y) coordinates of the centroid. # down-weight masked pixels Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus a constant) to the array. Parameters ---------- data : array_like The 2D data array. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- xcen, ycen : float (x, y) coordinates of the centroid. Fit a 2D Gaussian plus a constant to a 2D image. Parameters ---------- data : array_like The 2D array of the image. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Returns ------- result : A `GaussianConst2D` model instance. The best-fitting Gaussian 2D model. # down-weight masked pixels # Subtract the minimum of the data as a crude background estimate. # This will also make the data values positive, preventing issues with # the moment estimation in data_properties (moments from negative data # values can yield undefined Gaussian parameters, e.g. x/y_stddev). # subtracted data minimum above Calculate the centroid and morphological properties of a 2D array, e.g., an image cutout of an object. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was previously present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Returns ------- result : `~photutils.segmentation.SegmentProperties` instance A `~photutils.segmentation.SegmentProperties` object. Cut out a region from data (and optional mask and error) centered at specified (x, y) position. The size of the region is specified via the ``box_size`` or ``footprint`` keywords. The output mask for the cutout region represents the combination of the input mask and footprint mask. Parameters ---------- data : array_like The 2D array of the image. position : 2 tuple The ``(x, y)`` pixel coordinate of the center of the region. box_size : scalar or tuple, optional The size of the region to cutout from ``data``. If ``box_size`` is a scalar, then the region shape will be ``(box_size, box_size)``. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. footprint : `~numpy.ndarray` of bools, optional A boolean array where `True` values describe the local footprint region. ``box_size=(n, m)`` is equivalent to ``footprint=np.ones((n, m))``. Either ``box_size`` or ``footprint`` must be defined. If they are both defined, then ``footprint`` overrides ``box_size``. mask : array_like, bool, optional A boolean mask with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. error : array_like, optional The 2D array of the 1-sigma errors of the input ``data``. Returns ------- region_data : `~numpy.ndarray` The ``data`` cutout. region_mask : `~numpy.ndarray` The ``mask`` cutout. region_error : `~numpy.ndarray` The ``error`` cutout. slices : tuple of slices Slices in each dimension of the ``data`` array used to define the cutout region. # trim if necessary
2.364497
2
shop/test_urls.py
okcashpro/okshop
3
6628735
<filename>shop/test_urls.py<gh_stars>1-10 from django.conf.urls import url, include from django.contrib import admin from django.contrib.auth.decorators import login_required from django.conf import settings from django.conf.urls.static import static admin.autodiscover() admin.site.login = login_required(admin.site.login) urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include('shop.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<filename>shop/test_urls.py<gh_stars>1-10 from django.conf.urls import url, include from django.contrib import admin from django.contrib.auth.decorators import login_required from django.conf import settings from django.conf.urls.static import static admin.autodiscover() admin.site.login = login_required(admin.site.login) urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include('shop.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
none
1
1.715559
2
train_gnn_scannet.py
alexeybokhovkin/part-based-scan-understanding
19
6628736
import os, sys from pytorch_lightning import Trainer from pytorch_lightning.logging import TensorBoardLogger from pytorch_lightning.callbacks import ModelCheckpoint from utils.config import load_config from unet3d.lightning_model_scannet import Unet3DGNNPartnetLightning def main(args): config = load_config(args) tb_logger = TensorBoardLogger(config.checkpoint_dir, name=config.model, version=config.version) CHECKPOINTS = os.path.join(config.checkpoint_dir, config.model, config.version, 'checkpoints') checkpoint_callback = ModelCheckpoint( filepath=CHECKPOINTS, save_top_k=100 ) os.makedirs(CHECKPOINTS, exist_ok=True) if config.model == 'Unet3DGNNPartnet': model = Unet3DGNNPartnetLightning(config) trainer = Trainer( checkpoint_callback=checkpoint_callback, logger=tb_logger, early_stop_callback=False, gpus=config.gpus, distributed_backend=config.distributed_backend, num_nodes=1, max_epochs=config.max_epochs, val_check_interval=config.val_check_interval, amp_level=config.amp_level, log_save_interval=10, fast_dev_run=False, # resume_from_checkpoint=config.resume_from_checkpoint, accumulate_grad_batches=4 ) trainer.fit(model) if __name__ == '__main__': main(sys.argv[1:])
import os, sys from pytorch_lightning import Trainer from pytorch_lightning.logging import TensorBoardLogger from pytorch_lightning.callbacks import ModelCheckpoint from utils.config import load_config from unet3d.lightning_model_scannet import Unet3DGNNPartnetLightning def main(args): config = load_config(args) tb_logger = TensorBoardLogger(config.checkpoint_dir, name=config.model, version=config.version) CHECKPOINTS = os.path.join(config.checkpoint_dir, config.model, config.version, 'checkpoints') checkpoint_callback = ModelCheckpoint( filepath=CHECKPOINTS, save_top_k=100 ) os.makedirs(CHECKPOINTS, exist_ok=True) if config.model == 'Unet3DGNNPartnet': model = Unet3DGNNPartnetLightning(config) trainer = Trainer( checkpoint_callback=checkpoint_callback, logger=tb_logger, early_stop_callback=False, gpus=config.gpus, distributed_backend=config.distributed_backend, num_nodes=1, max_epochs=config.max_epochs, val_check_interval=config.val_check_interval, amp_level=config.amp_level, log_save_interval=10, fast_dev_run=False, # resume_from_checkpoint=config.resume_from_checkpoint, accumulate_grad_batches=4 ) trainer.fit(model) if __name__ == '__main__': main(sys.argv[1:])
en
0.264082
# resume_from_checkpoint=config.resume_from_checkpoint,
1.981349
2
main.py
ikurilov/wordTranslateCardsBot
0
6628737
import telebot import postgresql import config from states import get_user_state, set_user_state db = postgresql.open(config.dbconnect) bot = telebot.TeleBot(config.token) # текущие тренировки пользователей trainings = {} @bot.message_handler(func=lambda message: not get_user_state(message.from_user.id, db)) def start_messaging(message): user_id = message.from_user.id add_user(user_id) set_user_state(user_id, 'start', db) bot.send_message(message.chat.id, "Здравствуйте! Наш бот может работать для вас в качестве вашего личного словаря, а также поможет запомнить трудно дающиеся вам слова.") end_any_operation(message) @bot.callback_query_handler(func=lambda call: True) def callback_inline(call): print(call) # Если сообщение из чата с ботом if call.message: if call.data == "test": bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Пыщь") # ловим конец операций @bot.message_handler(commands=["end"]) def end_any_operation(message): set_user_state(message.from_user.id, 'start', db) help_show_main_message(message) # перевод пользователя в состояние добавления карточки @bot.message_handler(commands=["add"]) def add_word_handler(message): user_id = message.from_user.id set_user_state(user_id, "adding_word", db) bot.send_message(message.chat.id, 'Вводите карточки в формате слово - перевод. /end - закончить: ') # все слова, для тестов @bot.message_handler(commands=["show_all_cards"]) def get_all_words(message): user_id = message.from_user.id words = db.prepare('select word_en, word_ru from word_translations where user_id = $1') reply = '' for i in words(user_id): reply += i[0] + ' - ' + i[1] + '\n' if reply == '': bot.send_message(message.chat.id, 'Список ваших карточек пуст.') else: bot.send_message(message.chat.id, reply) end_any_operation(message) # ловим пользователя находящегося в состоянии удаления своей карточки @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == 'card_deleting') def delete_card(message): user_id = message.from_user.id card_deleting = db.prepare('delete from word_translations where user_id = $1 and (word_en = $2 or word_ru = $2)') result_deleting = card_deleting(message.from_user.id, message.text) if result_deleting[1] == 0: bot.send_message(message.chat.id, 'Карточка не найдена! Повторите ввод карточки или напишите /end для выхода.') else: bot.send_message(message.chat.id, 'Карточка успешно удалена.') # помещаем пользователя в состояние удаления своей карточки @bot.message_handler(commands=["delete_card"]) def delete_word_state(message): set_user_state(message.from_user.id, 'card_deleting', db) bot.send_message(message.chat.id, 'Введите либо слово либо перевод, которые хотите удалить: ') # ловим пользователя находящегося в состоянии удаления всех своих карточек @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == 'deleting_all_cards') def delete_all_cards(message): if message.text == 'yes': all_cards_deleting = db.prepare('delete from word_translations where user_id = $1') all_cards_deleting(message.from_user.id) bot.send_message(message.chat.id, 'Все ваши карточки успешно удалены!') else: bot.send_message(message.chat.id, 'Вы отменили удаление.') end_any_operation(message) # помещаем пользователя в состояние удаления всех своих карточек @bot.message_handler(commands=["delete_all_cards"]) def delete_all_cards_state(message): set_user_state(message.from_user.id, 'deleting_all_cards', db) bot.send_message(message.chat.id, 'Вы действительно хотите удалить все ваши карточки? (yes).') # перевод пользователя в состояние добавления карточки @bot.message_handler(commands=["training"]) def training_cmd_handler(message): user_id = message.from_user.id words = get_words_for_training(user_id) if len(words) > 0: set_user_state(user_id, "training", db) trainings[user_id] = words bot.send_message(message.chat.id, 'Введите перевод слова: ' + trainings[user_id][0][0]) else: bot.send_message(message.chat.id, 'Список слов пуст. Сначала добавьте слова') # обработка сообщений от пользователей, находящихся в состоянии добавления англ. слова @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == "adding_word") def adding_word(message): user_id = message.from_user.id try: word, translation = message.text.lower().split(" - ") except ValueError: bot.send_message(message.chat.id, "Введите карточку в правильном формате!") else: add_word(user_id, word, translation) bot.send_message(message.chat.id, 'Карточка успешно добавлена!') # обработка сообщений от пользователей, находящихся в состоянии тренировки @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == "training") def user_training_handler(message): user_id = message.from_user.id card = trainings[user_id].pop(0) word, translation = card[0], card[1] user_answer = message.text.lower().strip() success = user_answer == translation change_score(user_id, word, success) reply = 'Верно!\n' if success else 'Неверно!\nПравильный ответ: ' + translation + '.\n' if len(trainings[user_id]) == 0: set_user_state(user_id, "start", db) reply += 'Тренировка окончена' else: reply += 'Введите перевод слова: ' + trainings[user_id][0][0] bot.send_message(message.chat.id, reply) # добавление пользователья в бд def add_user(user_id): db_state = db.prepare("INSERT INTO user_states(user_id, state) VALUES ($1, $2)") db_state(user_id, "start") # добавление карточки в бд def add_word(user_id, word, translation): prep_statement = db.prepare("INSERT INTO word_translations(user_id, word_en, word_ru) VALUES ($1, $2, $3)") prep_statement(user_id, word, translation) # функция выбора слов для тренировки def get_words_for_training(user_id): db_words = db.prepare('\ SELECT word_en "word", word_ru "translation", \ CASE WHEN sum_en = 0 THEN 0 \ ELSE score_en::float/sum_en::float \ END "koef"\ FROM word_translations\ WHERE user_id = $1\ ORDER BY "koef"\ LIMIT 5') return db_words(user_id) def change_score(user_id, word, success=True): query_text = 'UPDATE word_translations SET score_en = score_en ' + ( '+ 1' if success else '') + ', sum_en = sum_en + 1 ' \ + 'WHERE user_id = $1 AND word_en = $2' prepared_query = db.prepare(query_text) prepared_query(user_id, word) @bot.message_handler(commands=["help"]) def show_commands(message): reply = '' reply += '/add - Добавить карточку слово - перевод\n' reply += '/training - Начать тренировку\n' reply += '/show_all_cards - Список всех ваших карточек\n' reply += '/delete_card - Удалить карточку\n' reply += '/delete_all_cards - Удалить все карточки\n' reply += '/help - Cписок доступных команд\n' bot.send_message(message.chat.id, reply) set_user_state(message.from_user.id, 'start', db) @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == "start") def help_show_main_message(message): bot.send_message(message.chat.id, 'Для списка доступных команд отправьте "/help" или просто введите "/".') if __name__ == '__main__': bot.polling(none_stop=True)
import telebot import postgresql import config from states import get_user_state, set_user_state db = postgresql.open(config.dbconnect) bot = telebot.TeleBot(config.token) # текущие тренировки пользователей trainings = {} @bot.message_handler(func=lambda message: not get_user_state(message.from_user.id, db)) def start_messaging(message): user_id = message.from_user.id add_user(user_id) set_user_state(user_id, 'start', db) bot.send_message(message.chat.id, "Здравствуйте! Наш бот может работать для вас в качестве вашего личного словаря, а также поможет запомнить трудно дающиеся вам слова.") end_any_operation(message) @bot.callback_query_handler(func=lambda call: True) def callback_inline(call): print(call) # Если сообщение из чата с ботом if call.message: if call.data == "test": bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text="Пыщь") # ловим конец операций @bot.message_handler(commands=["end"]) def end_any_operation(message): set_user_state(message.from_user.id, 'start', db) help_show_main_message(message) # перевод пользователя в состояние добавления карточки @bot.message_handler(commands=["add"]) def add_word_handler(message): user_id = message.from_user.id set_user_state(user_id, "adding_word", db) bot.send_message(message.chat.id, 'Вводите карточки в формате слово - перевод. /end - закончить: ') # все слова, для тестов @bot.message_handler(commands=["show_all_cards"]) def get_all_words(message): user_id = message.from_user.id words = db.prepare('select word_en, word_ru from word_translations where user_id = $1') reply = '' for i in words(user_id): reply += i[0] + ' - ' + i[1] + '\n' if reply == '': bot.send_message(message.chat.id, 'Список ваших карточек пуст.') else: bot.send_message(message.chat.id, reply) end_any_operation(message) # ловим пользователя находящегося в состоянии удаления своей карточки @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == 'card_deleting') def delete_card(message): user_id = message.from_user.id card_deleting = db.prepare('delete from word_translations where user_id = $1 and (word_en = $2 or word_ru = $2)') result_deleting = card_deleting(message.from_user.id, message.text) if result_deleting[1] == 0: bot.send_message(message.chat.id, 'Карточка не найдена! Повторите ввод карточки или напишите /end для выхода.') else: bot.send_message(message.chat.id, 'Карточка успешно удалена.') # помещаем пользователя в состояние удаления своей карточки @bot.message_handler(commands=["delete_card"]) def delete_word_state(message): set_user_state(message.from_user.id, 'card_deleting', db) bot.send_message(message.chat.id, 'Введите либо слово либо перевод, которые хотите удалить: ') # ловим пользователя находящегося в состоянии удаления всех своих карточек @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == 'deleting_all_cards') def delete_all_cards(message): if message.text == 'yes': all_cards_deleting = db.prepare('delete from word_translations where user_id = $1') all_cards_deleting(message.from_user.id) bot.send_message(message.chat.id, 'Все ваши карточки успешно удалены!') else: bot.send_message(message.chat.id, 'Вы отменили удаление.') end_any_operation(message) # помещаем пользователя в состояние удаления всех своих карточек @bot.message_handler(commands=["delete_all_cards"]) def delete_all_cards_state(message): set_user_state(message.from_user.id, 'deleting_all_cards', db) bot.send_message(message.chat.id, 'Вы действительно хотите удалить все ваши карточки? (yes).') # перевод пользователя в состояние добавления карточки @bot.message_handler(commands=["training"]) def training_cmd_handler(message): user_id = message.from_user.id words = get_words_for_training(user_id) if len(words) > 0: set_user_state(user_id, "training", db) trainings[user_id] = words bot.send_message(message.chat.id, 'Введите перевод слова: ' + trainings[user_id][0][0]) else: bot.send_message(message.chat.id, 'Список слов пуст. Сначала добавьте слова') # обработка сообщений от пользователей, находящихся в состоянии добавления англ. слова @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == "adding_word") def adding_word(message): user_id = message.from_user.id try: word, translation = message.text.lower().split(" - ") except ValueError: bot.send_message(message.chat.id, "Введите карточку в правильном формате!") else: add_word(user_id, word, translation) bot.send_message(message.chat.id, 'Карточка успешно добавлена!') # обработка сообщений от пользователей, находящихся в состоянии тренировки @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == "training") def user_training_handler(message): user_id = message.from_user.id card = trainings[user_id].pop(0) word, translation = card[0], card[1] user_answer = message.text.lower().strip() success = user_answer == translation change_score(user_id, word, success) reply = 'Верно!\n' if success else 'Неверно!\nПравильный ответ: ' + translation + '.\n' if len(trainings[user_id]) == 0: set_user_state(user_id, "start", db) reply += 'Тренировка окончена' else: reply += 'Введите перевод слова: ' + trainings[user_id][0][0] bot.send_message(message.chat.id, reply) # добавление пользователья в бд def add_user(user_id): db_state = db.prepare("INSERT INTO user_states(user_id, state) VALUES ($1, $2)") db_state(user_id, "start") # добавление карточки в бд def add_word(user_id, word, translation): prep_statement = db.prepare("INSERT INTO word_translations(user_id, word_en, word_ru) VALUES ($1, $2, $3)") prep_statement(user_id, word, translation) # функция выбора слов для тренировки def get_words_for_training(user_id): db_words = db.prepare('\ SELECT word_en "word", word_ru "translation", \ CASE WHEN sum_en = 0 THEN 0 \ ELSE score_en::float/sum_en::float \ END "koef"\ FROM word_translations\ WHERE user_id = $1\ ORDER BY "koef"\ LIMIT 5') return db_words(user_id) def change_score(user_id, word, success=True): query_text = 'UPDATE word_translations SET score_en = score_en ' + ( '+ 1' if success else '') + ', sum_en = sum_en + 1 ' \ + 'WHERE user_id = $1 AND word_en = $2' prepared_query = db.prepare(query_text) prepared_query(user_id, word) @bot.message_handler(commands=["help"]) def show_commands(message): reply = '' reply += '/add - Добавить карточку слово - перевод\n' reply += '/training - Начать тренировку\n' reply += '/show_all_cards - Список всех ваших карточек\n' reply += '/delete_card - Удалить карточку\n' reply += '/delete_all_cards - Удалить все карточки\n' reply += '/help - Cписок доступных команд\n' bot.send_message(message.chat.id, reply) set_user_state(message.from_user.id, 'start', db) @bot.message_handler(func=lambda message: get_user_state(message.from_user.id, db) == "start") def help_show_main_message(message): bot.send_message(message.chat.id, 'Для списка доступных команд отправьте "/help" или просто введите "/".') if __name__ == '__main__': bot.polling(none_stop=True)
ru
0.99643
# текущие тренировки пользователей # Если сообщение из чата с ботом # ловим конец операций # перевод пользователя в состояние добавления карточки # все слова, для тестов # ловим пользователя находящегося в состоянии удаления своей карточки # помещаем пользователя в состояние удаления своей карточки # ловим пользователя находящегося в состоянии удаления всех своих карточек # помещаем пользователя в состояние удаления всех своих карточек # перевод пользователя в состояние добавления карточки # обработка сообщений от пользователей, находящихся в состоянии добавления англ. слова # обработка сообщений от пользователей, находящихся в состоянии тренировки # добавление пользователья в бд # добавление карточки в бд # функция выбора слов для тренировки
2.163125
2
mmdet/apis/__init__.py
ruyueshuo/MaskTrackRCNN
1
6628738
from .env import init_dist, get_root_logger, set_random_seed from .train import train_detector, train_flownet from .inference import inference_detector, show_result __all__ = [ 'init_dist', 'get_root_logger', 'set_random_seed', 'train_detector', 'train_flownet', 'inference_detector', 'show_result' ]
from .env import init_dist, get_root_logger, set_random_seed from .train import train_detector, train_flownet from .inference import inference_detector, show_result __all__ = [ 'init_dist', 'get_root_logger', 'set_random_seed', 'train_detector', 'train_flownet', 'inference_detector', 'show_result' ]
none
1
1.457703
1
Live/Basic_Passthrough_Live_Event/basic_passthrough_live_event.py
IngridAtMicrosoft/media-services-v3-python
0
6628739
# Azure Media Services Live Streaming Sample for Python # This sample demonstrates how to enable Low Latency HLS (LL-HLS) streaming with encoding # This sample assumes that you will use OBS Studio to broadcast RTMP # to the ingest endpoint. Please install OBS Studio first. # Use the following settings in OBS: # Encoder: NVIDIA NVENC (if avail) or x264 # Rate control : CDR # Bitrate: 2500 kbps (or something reasonable for your laptop) # Keyframe Interval : 2s, or 1s for low latency # Preset: Low-latency Quality or Performance (NVENC) or "veryfast" using x264 # Profile: high # GPU: 0 (Auto) # Max B-frames: 2 # The workflow for the sample and for the recommended use of the Live API: # 1) Create the client for AMS using AAD service principal or managed ID # 2) Set up your IP restriction allow objects for ingest and preview # 3) Configure the Live Event object with your settings. Choose pass-through # or encoding channel type and size (720p or 1080p) # 4) Create the Live Event without starting it # 5) Create an Asset to be used for recording the live stream into # 6) Create a Live Output, which acts as the "recorder" to record into the # Asset (which is like the tape in the recorder). # 7) Start the Live Event - this can take a little bit. # 8) Get the preview endpoint to monitor in a player for DASH or HLS. # 9) Get the ingest RTMP endpoint URL for use in OBS Studio. # Set up OBS studio and start the broadcast. Monitor the stream in # your DASH or HLS player of choice. # 10) Create a new Streaming Locator on the recording Asset object from step 5. # 11) Get the URLs for the HLS and DASH manifest to share with your audience # or CMS system. This can also be created earlier after step 5 if desired. import asyncio from datetime import timedelta import time from dotenv import load_dotenv from azure.identity.aio import DefaultAzureCredential from azure.mgmt.media.aio import AzureMediaServices from azure.mgmt.media.models import ( Asset, IPRange, IPAccessControl, LiveEvent, LiveEventInputAccessControl, LiveEventPreviewAccessControl, LiveEventPreview, LiveEventInput, LiveOutput, LiveEventEncoding, LiveEventEncodingType, LiveEventInputProtocol, StreamOptionsFlag, LiveEventTranscription, LiveEventOutputTranscriptionTrack, Hls, StreamingLocator ) import os import random # Get the environment variables load_dotenv() # This sample uses the default Azure Credential object, which relies on the environment variable settings. # Get the default Azure credential from the environment variables AZURE_CLIENT_ID and AZURE_CLIENT_SECRET and AZURE_TENTANT_ID default_credential = DefaultAzureCredential() # Get the environment variables SUBSCRIPTIONID, RESOURCEGROUP and ACCOUNTNAME subscription_id = os.getenv('SUBSCRIPTIONID') resource_group = os.getenv('RESOURCEGROUP') account_name = os.getenv('ACCOUNTNAME') # This is a random string that will be added to the naming of things so that you don't have to keep doing this during testing uniqueness = random.randint(0,9999) live_event_name = f'liveEvent-{uniqueness}' # WARNING: Be careful not to leak live events using this sample! asset_name = f'archiveAsset-{uniqueness}' live_output_name = f'liveOutput-{uniqueness}' streaming_locator_name = f'liveStreamLocator-{uniqueness}' streaming_endpoint_name = 'default' # Change this to your specific streaming endpoint name if not using "default" manifest_name = "output" print("Starting the Live Streaming sample for Azure Media Services") # The AMS Client print("Creating AMS Client") client = AzureMediaServices(default_credential, subscription_id) # Creating the LiveEvent - the primary object for live streaming in AMS. # See the overview - https://docs.microsoft.com/azure/media-services/latest/live-streaming-overview # Create the LiveEvent # Understand the concepts of what a live event and a live output is in AMS first! # Read the following - https://docs.microsoft.com/azure/media-services/latest/live-events-outputs-concept # 1) Understand the billing implications for the various states # 2) Understand the different live event types, pass-through and encoding # 3) Understand how to use long-running async operations # 4) Understand the available Standby mode and how it differs from the Running Mode. # 5) Understand the differences between a LiveOutput and the Asset that it records to. They are two different concepts. # A live output can be considered as the "tape recorder" and the Asset is the tape that is inserted into it for recording. # 6) Understand the advanced options such as low latency, and live transcription/captioning support. # Live Transcription - https://docs.microsoft.com/en-us/azure/media-services/latest/live-transcription # Low Latency - https://docs.microsoft.com/en-us/azure/media-services/latest/live-event-latency # When broadcasting to a live event, please use one of the verified on-premises live streaminf encoders. # While operating this tutorial, it is recommended to start out using OBS Studio before moving to another encoder. # Note: When creating a LiveEvent, you can specify allowed IP addresses in one of the following formats: # IPV4 address with 4 numbers # CIDR address range allow_all_input_range=IPRange(name="AllowAll", address="0.0.0.0", subnet_prefix_length=0) # Create the LiveEvent input IP access control object # This will control the IP that the encoder is running on and restrict access to only that encoder IP range. # re-use the same range here for the sample, but in production, you can lock this down to the IP range for your on-premises # live encoder, laptop, or device that is sending the live stream live_event_input_access=LiveEventInputAccessControl(ip=IPAccessControl(allow=[allow_all_input_range])) # Create the LiveEvent Preview IP access control object. # This will restrict which clients can view the preview endpoint # re-se the same range here for the sample, but in production, you can lock this to the IPs of your # devices that would be monitoring the live preview. live_event_preview=LiveEventPreview(access_control=LiveEventPreviewAccessControl(ip=IPAccessControl(allow=[allow_all_input_range]))) # To get the same ingest URL for the same LiveEvent name every single time... # 1. Set useStaticHostname to true so you have inget like: # rtmps://liveevent-hevc12-eventgridmediaservice-usw22.channel.media.azure.net:2935/live/522f9b27dd2d4b26aeb9ef8ab96c5c77 # 2. Set accessToken to a desired GUID string (with or without hyphen) # See REST API documentation for the details on each setting value # https://docs.microsoft.com/rest/api/media/liveevents/create live_event_create=LiveEvent( location="West US 2", # For the sample, we are using location: West US 2 description="Sample 720P Encoding Live Event from Python SDK sample", # Set useStaticHostname to true to make the ingest and preview URL host name the same. # This can slow things down a bit. use_static_hostname=True, # hostname_prefix= "somethingstatic", # When using Static host name true, you can control the host prefix name here if desired # 1) Set up the input settings for the Live event... input=LiveEventInput( streaming_protocol=LiveEventInputProtocol.RTMP, # Options are RTMP or Smooth Streaming ingest format. access_control=live_event_input_access, # controls the IP restriction for the source header # key_frame_interval_duration = timedelta(seconds = 2), # Set this to match the ingest encoder's settings. This should not be used for encoding channels access_token='9eb1f703b149417c8448771867f48501' # Use this value when you want to make sure the ingest URL is static and always the same. If omited, the service will generate a random GUID values. ), # 2) Set the live event to use pass-through or cloud encoding modes... encoding=LiveEventEncoding( # Set this to Basic pass-through, Standard pass-through, Standard or Premium1080P to use the cloud live encoder. # See https://go.microsoft.com/fwlink/?linkid=2095101 for more information # Otherwise, leave as "None" to use pass-through mode encoding_type=LiveEventEncodingType.PASSTHROUGH_BASIC, # OPTIONS for encoding type you can use: # encoding_type=LiveEventEncodingType.PassthroughBasic, # Basic pass-through mode - the cheapest option! # encoding_type=LiveEventEncodingType.PassthroughStandard, # also known as standard pass-through mode (formerly "none") # encoding_type=LiveEventEncodingType.Premium1080p, # live transcoding up to 1080P 30fps with adaptive bitrate set # encoding_type=LiveEventEncodingType.Standard, # use live transcoding in the cloud for 720P 30fps with adaptive bitrate set # OPTIONS using live cloud encoding type: # key_frame_interval=timedelta(seconds = 2), # If this value is not set for an encoding live event, the fragment duration defaults to 2 seconds. The value cannot be set for pass-through live events. # For Low Latency HLS Live streaming, there are two new custom presets available: # "720p-3-Layer": For use with a Standard 720P encoding_type live event # {"ElementaryStreams":[{"Type":"Video","BitRate":2500000,"Width":1280,"Height":720},{"Type":"Video","BitRate":1000000,"Width":960,"Height":540},{"Type":"Video","BitRate":400000,"Width":640,"Height":360}]}" # "1080p-4-Layer": For use with a Premium1080p encoding_type live event # {"ElementaryStreams":[{"Type":"Video","BitRate":4500000,"Width":1920,"Height":1080},{"Type":"Video","BitRate":2200000,"Width":1280,"Height":720},{"Type":"Video","BitRate":1000000,"Width":960,"Height":540},{"Type":"Video","BitRate":400000,"Width":640,"Height":360}]} # preset_name=None, # only used for custom defined presets. # stretch_mode= None # can be used to determine stretch on encoder mode ), # 3) Set up the Preview endpoint for monitoring based on the settings above we already set. preview=live_event_preview, # 4) Set up more advanced options on the live event. Low Latency is the most common one. # To enable Apple's Low Latency HLS (LL-HLS) streaming, you must use "LOW_LATENCY_V2" stream option stream_options=[StreamOptionsFlag.LOW_LATENCY] #5) Optionally, enable live transcriptions if desired. # WARNING : This is extra cost ($$$), so please check pricing before enabling. Transcriptions are not supported on PassthroughBasic. # switch this sample to use encodingType: "PassthroughStandard" first before un-commenting the transcriptions object below. # transcriptions = LiveEventTranscription( # input_track_selection = [], # Choose which track to transcribe on the source input. # # The value should be in BCP-47 format (e.g: 'en-US'). See https://go.microsoft.com/fwlink/?linkid=2133742 # language = 'en-US', # output_transcription_track = LiveEventOutputTranscriptionTrack( # track_name = 'English' # Set the name you want to appear in the output manifest # ) # ) ) print("Creating the LiveEvent, please be patient as this can take time to complete async.") print("Live Event creation is an async operation in Azure and timing can depend on resources available.") print() # When autostart is set to true, the Live Event will be started after creation. # That means, the billing starts as soon as the Live Event starts running. # You must explicitly call Stop on the Live Event resource to halt further billing. # The following operation can sometimes take awhile. Be patient. # On optional workflow is to first call allocate() instead of create. # https://docs.microsoft.com/en-us/rest/api/media/liveevents/allocate # This allows you to allocate the resources and place the live event into a "Standby" mode until # you are ready to transition to "Running". This is useful when you want to pool resources in a warm "Standby" state at a reduced cost. # The transition from Standby to "Running" is much faster than cold creation to "Running" using the autostart property. # Returns a long running operation polling object that can be used to poll until completion. async def main(): async with client: time_start=time.perf_counter() client_live = await client.live_events.begin_create(resource_group_name=resource_group, account_name=account_name, live_event_name=live_event_name, parameters=live_event_create, auto_start=False) time_end = time.perf_counter() execution_time = (time_end - time_start) if client_live: print(f"Live Event Created - long running operation complete! Name: {live_event_name}") print(f"Execution time to create LiveEvent: {execution_time:.2f}seconds") print() poller = client_live print(await poller.result()) else: raise ValueError('Live Event creation failed!') # Create an Asset for the LiveOutput to use. Think of this as the "tape" that will be recorded to. # The asset entity points to a folder/container in your Azure Storage account. print(f"Creating an asset named: {asset_name}") print() out_alternate_id = f'outputALTid-{uniqueness}' out_description = f'outputdescription-{uniqueness}' # Create an output asset object out_asset = Asset(alternate_id=out_alternate_id, description=out_description) # Create an output asset output_asset = await client.assets.create_or_update(resource_group, account_name, asset_name, out_asset) if output_asset: # print output asset name print(f"The output asset name is: {output_asset.name}") print() else: raise ValueError('Output Asset creation failed!') # Create the Live Output - think of this as the "tape recorder for the live event". # Live outputs are optional, but are required if you want to archive the event to storage, # use the asset for on-demand playback later, or if you want to enable cloud DVR time-shifting. # We will use the asset created above for the "tape" to record to. manifest_name = "output" # See the REST API for details on each of the settings on Live Output # https://docs.microsoft.com/rest/api/media/liveoutputs/create print(f"Creating a live output named: {live_output_name}") print() if output_asset: time_start = time.perf_counter() live_output_create = LiveOutput( description="Optional description when using more than one live output", asset_name=output_asset.name, manifest_name=manifest_name, # The HLS and DASH manifest file name. This is recommended to set if you want a deterministic manifest path up front. archive_window_length=timedelta(hours=1), # Sets an one hour time-shift DVR window. Uses ISO 8601 format string. hls=Hls( fragments_per_ts_segment=1 # Advanced setting when using HLS TS output only. ) ) print(f"live_output_create object is {live_output_create}") print() # Create and await the live output live_output_await = await client.live_outputs.begin_create(resource_group_name=resource_group, account_name=account_name, live_event_name=live_event_name, live_output_name=live_output_name, parameters=live_output_create) if live_output_await: print(f"Live Output created: {live_output_name}") poller = live_output_await print(await poller.result()) time_end = time.perf_counter() execution_time = time_end - time_start print(f"Execution time to create LiveEvent: {execution_time:.2f}seconds") print() else: raise Exception("Live Output creation failed!") # Refresh the LiveEvent object's settings after starting it... live_event = await client.live_events.get(resource_group, account_name, live_event_name) # Get the RTMP ingest URL to configure in OBS Studio # The endpoints is a collection of RTMP primary and secondary, and RTMPS primary and secondary URLs. # to get the primary secure RTMPS, it is usually going to be index 3, but you could add a loop here to confirm... if live_event.input.endpoints: ingest_url = live_event.input.endpoints[0].url print("The RTMP ingest URL to enter into OBS Studio is:") print(f"RTMP ingest: {ingest_url}") print("Make sure to enter a Stream Key into the OBS studio settings. It can be any value or you can repeat the accessToken used in the ingest URL path.") print() if live_event.preview.endpoints: # Use the preview_endpoint to preview and verify # that the input from the encoder is actually being received # The preview endpoint URL also support the addition of various format strings for HLS (format=m3u8-cmaf) and DASH (format=mpd-time-cmaf) for example. # The default manifest is Smooth. preview_endpoint = live_event.preview.endpoints[0].url print(f"The preview url is: {preview_endpoint}") print() print("Open the live preview in your browser and use any DASH and HLS player to monitor the preview playback.") print(f"https://ampdemo.azureedge.net/?url={preview_endpoint}(format=mpd-time-cmaf)&heuristicprofile=lowlatency") print("You will need to refresh the player page SEVERAL times until enough data has arrived to allow for manifest creation.") print("In a production player, the player can inspect the manifest to see if it contains enough content for the player to load and auto reload.") print() print("Start the live stream now, sending the input to the ingest url and verify that it is arriving with the preview url.") print("IMPORTANT TIP!: Make CERTAIN that the video is flowing to the Preview URL before continuing!") # Create the Streaming Locator URL for playback of the contents in the Live Output recoding print(f"Creating a streaming locator named: {streaming_locator_name}") print() streaming_locator = StreamingLocator(asset_name=asset_name, streaming_policy_name="Predefined_ClearStreamingOnly") locator = await client.streaming_locators.create( resource_group_name=resource_group, account_name=account_name, streaming_locator_name=streaming_locator_name, parameters=streaming_locator ) # Get the default streaming endpoint on the account streaming_endpoint = await client.streaming_endpoints.get( resource_group_name=resource_group, account_name=account_name, streaming_endpoint_name=streaming_endpoint_name ) if streaming_endpoint.resource_state != "Running": print(f"Streaming endpoint is stopped. Starting the endpoint named {streaming_endpoint_name}...") poller = await client.streaming_endpoints.begin_start(resource_group, account_name, streaming_endpoint_name) client_streaming_begin = await poller.result() print("Streaming Endpoint started.") if not client_streaming_begin: print("Streaming Endpoint was already started.") # Get the URL to stream the Output print("The streaming URLs to stream the live output from a client player") print() host_name = streaming_endpoint.host_name scheme = 'https' # If you wish to get the streaming manifest ahead of time, make sure to set the manifest name in the LiveOutput as done above. # This allows you to have a deterministic manifest path. <streaming endpoint hostname>/<streaming locator ID>/manifestName.ism/manifest(<format string>) # Building the paths statically. Which is highly recommended when you want to share the stream manifests # to a player application or CMS system ahead of the live event. hls_format = "format=m3u8-cmaf" dash_format = "format=mpd-time-cmaf" manifest_base = f"{scheme}://{host_name}/{locator.streaming_locator_id}/{manifest_name}.ism/manifest" hls_manifest = f'{manifest_base}({hls_format})' print(f"The HLS (MP4) manifest URL is: {hls_manifest}") print("Open the following URL to playback the live stream in an HLS compliant player (HLS.js, Shaka, ExoPlayer) or directly in an iOS device") print({hls_manifest}) print() dash_manifest = f'{manifest_base}({dash_format})' print(f"The DASH manifest URL is: {dash_manifest}") print("Open the following URL to playback the live stream from the LiveOutput in the Azure Media Player") print(f"https://ampdemo.azureedge.net/?url={dash_manifest}&heuristicprofile=lowlatency") print() # closing media client print('Closing media client') await client.close() # closing credential client print('Closing credential client') await default_credential.close() if __name__ == "__main__": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncio.run(main())
# Azure Media Services Live Streaming Sample for Python # This sample demonstrates how to enable Low Latency HLS (LL-HLS) streaming with encoding # This sample assumes that you will use OBS Studio to broadcast RTMP # to the ingest endpoint. Please install OBS Studio first. # Use the following settings in OBS: # Encoder: NVIDIA NVENC (if avail) or x264 # Rate control : CDR # Bitrate: 2500 kbps (or something reasonable for your laptop) # Keyframe Interval : 2s, or 1s for low latency # Preset: Low-latency Quality or Performance (NVENC) or "veryfast" using x264 # Profile: high # GPU: 0 (Auto) # Max B-frames: 2 # The workflow for the sample and for the recommended use of the Live API: # 1) Create the client for AMS using AAD service principal or managed ID # 2) Set up your IP restriction allow objects for ingest and preview # 3) Configure the Live Event object with your settings. Choose pass-through # or encoding channel type and size (720p or 1080p) # 4) Create the Live Event without starting it # 5) Create an Asset to be used for recording the live stream into # 6) Create a Live Output, which acts as the "recorder" to record into the # Asset (which is like the tape in the recorder). # 7) Start the Live Event - this can take a little bit. # 8) Get the preview endpoint to monitor in a player for DASH or HLS. # 9) Get the ingest RTMP endpoint URL for use in OBS Studio. # Set up OBS studio and start the broadcast. Monitor the stream in # your DASH or HLS player of choice. # 10) Create a new Streaming Locator on the recording Asset object from step 5. # 11) Get the URLs for the HLS and DASH manifest to share with your audience # or CMS system. This can also be created earlier after step 5 if desired. import asyncio from datetime import timedelta import time from dotenv import load_dotenv from azure.identity.aio import DefaultAzureCredential from azure.mgmt.media.aio import AzureMediaServices from azure.mgmt.media.models import ( Asset, IPRange, IPAccessControl, LiveEvent, LiveEventInputAccessControl, LiveEventPreviewAccessControl, LiveEventPreview, LiveEventInput, LiveOutput, LiveEventEncoding, LiveEventEncodingType, LiveEventInputProtocol, StreamOptionsFlag, LiveEventTranscription, LiveEventOutputTranscriptionTrack, Hls, StreamingLocator ) import os import random # Get the environment variables load_dotenv() # This sample uses the default Azure Credential object, which relies on the environment variable settings. # Get the default Azure credential from the environment variables AZURE_CLIENT_ID and AZURE_CLIENT_SECRET and AZURE_TENTANT_ID default_credential = DefaultAzureCredential() # Get the environment variables SUBSCRIPTIONID, RESOURCEGROUP and ACCOUNTNAME subscription_id = os.getenv('SUBSCRIPTIONID') resource_group = os.getenv('RESOURCEGROUP') account_name = os.getenv('ACCOUNTNAME') # This is a random string that will be added to the naming of things so that you don't have to keep doing this during testing uniqueness = random.randint(0,9999) live_event_name = f'liveEvent-{uniqueness}' # WARNING: Be careful not to leak live events using this sample! asset_name = f'archiveAsset-{uniqueness}' live_output_name = f'liveOutput-{uniqueness}' streaming_locator_name = f'liveStreamLocator-{uniqueness}' streaming_endpoint_name = 'default' # Change this to your specific streaming endpoint name if not using "default" manifest_name = "output" print("Starting the Live Streaming sample for Azure Media Services") # The AMS Client print("Creating AMS Client") client = AzureMediaServices(default_credential, subscription_id) # Creating the LiveEvent - the primary object for live streaming in AMS. # See the overview - https://docs.microsoft.com/azure/media-services/latest/live-streaming-overview # Create the LiveEvent # Understand the concepts of what a live event and a live output is in AMS first! # Read the following - https://docs.microsoft.com/azure/media-services/latest/live-events-outputs-concept # 1) Understand the billing implications for the various states # 2) Understand the different live event types, pass-through and encoding # 3) Understand how to use long-running async operations # 4) Understand the available Standby mode and how it differs from the Running Mode. # 5) Understand the differences between a LiveOutput and the Asset that it records to. They are two different concepts. # A live output can be considered as the "tape recorder" and the Asset is the tape that is inserted into it for recording. # 6) Understand the advanced options such as low latency, and live transcription/captioning support. # Live Transcription - https://docs.microsoft.com/en-us/azure/media-services/latest/live-transcription # Low Latency - https://docs.microsoft.com/en-us/azure/media-services/latest/live-event-latency # When broadcasting to a live event, please use one of the verified on-premises live streaminf encoders. # While operating this tutorial, it is recommended to start out using OBS Studio before moving to another encoder. # Note: When creating a LiveEvent, you can specify allowed IP addresses in one of the following formats: # IPV4 address with 4 numbers # CIDR address range allow_all_input_range=IPRange(name="AllowAll", address="0.0.0.0", subnet_prefix_length=0) # Create the LiveEvent input IP access control object # This will control the IP that the encoder is running on and restrict access to only that encoder IP range. # re-use the same range here for the sample, but in production, you can lock this down to the IP range for your on-premises # live encoder, laptop, or device that is sending the live stream live_event_input_access=LiveEventInputAccessControl(ip=IPAccessControl(allow=[allow_all_input_range])) # Create the LiveEvent Preview IP access control object. # This will restrict which clients can view the preview endpoint # re-se the same range here for the sample, but in production, you can lock this to the IPs of your # devices that would be monitoring the live preview. live_event_preview=LiveEventPreview(access_control=LiveEventPreviewAccessControl(ip=IPAccessControl(allow=[allow_all_input_range]))) # To get the same ingest URL for the same LiveEvent name every single time... # 1. Set useStaticHostname to true so you have inget like: # rtmps://liveevent-hevc12-eventgridmediaservice-usw22.channel.media.azure.net:2935/live/522f9b27dd2d4b26aeb9ef8ab96c5c77 # 2. Set accessToken to a desired GUID string (with or without hyphen) # See REST API documentation for the details on each setting value # https://docs.microsoft.com/rest/api/media/liveevents/create live_event_create=LiveEvent( location="West US 2", # For the sample, we are using location: West US 2 description="Sample 720P Encoding Live Event from Python SDK sample", # Set useStaticHostname to true to make the ingest and preview URL host name the same. # This can slow things down a bit. use_static_hostname=True, # hostname_prefix= "somethingstatic", # When using Static host name true, you can control the host prefix name here if desired # 1) Set up the input settings for the Live event... input=LiveEventInput( streaming_protocol=LiveEventInputProtocol.RTMP, # Options are RTMP or Smooth Streaming ingest format. access_control=live_event_input_access, # controls the IP restriction for the source header # key_frame_interval_duration = timedelta(seconds = 2), # Set this to match the ingest encoder's settings. This should not be used for encoding channels access_token='9eb1f703b149417c8448771867f48501' # Use this value when you want to make sure the ingest URL is static and always the same. If omited, the service will generate a random GUID values. ), # 2) Set the live event to use pass-through or cloud encoding modes... encoding=LiveEventEncoding( # Set this to Basic pass-through, Standard pass-through, Standard or Premium1080P to use the cloud live encoder. # See https://go.microsoft.com/fwlink/?linkid=2095101 for more information # Otherwise, leave as "None" to use pass-through mode encoding_type=LiveEventEncodingType.PASSTHROUGH_BASIC, # OPTIONS for encoding type you can use: # encoding_type=LiveEventEncodingType.PassthroughBasic, # Basic pass-through mode - the cheapest option! # encoding_type=LiveEventEncodingType.PassthroughStandard, # also known as standard pass-through mode (formerly "none") # encoding_type=LiveEventEncodingType.Premium1080p, # live transcoding up to 1080P 30fps with adaptive bitrate set # encoding_type=LiveEventEncodingType.Standard, # use live transcoding in the cloud for 720P 30fps with adaptive bitrate set # OPTIONS using live cloud encoding type: # key_frame_interval=timedelta(seconds = 2), # If this value is not set for an encoding live event, the fragment duration defaults to 2 seconds. The value cannot be set for pass-through live events. # For Low Latency HLS Live streaming, there are two new custom presets available: # "720p-3-Layer": For use with a Standard 720P encoding_type live event # {"ElementaryStreams":[{"Type":"Video","BitRate":2500000,"Width":1280,"Height":720},{"Type":"Video","BitRate":1000000,"Width":960,"Height":540},{"Type":"Video","BitRate":400000,"Width":640,"Height":360}]}" # "1080p-4-Layer": For use with a Premium1080p encoding_type live event # {"ElementaryStreams":[{"Type":"Video","BitRate":4500000,"Width":1920,"Height":1080},{"Type":"Video","BitRate":2200000,"Width":1280,"Height":720},{"Type":"Video","BitRate":1000000,"Width":960,"Height":540},{"Type":"Video","BitRate":400000,"Width":640,"Height":360}]} # preset_name=None, # only used for custom defined presets. # stretch_mode= None # can be used to determine stretch on encoder mode ), # 3) Set up the Preview endpoint for monitoring based on the settings above we already set. preview=live_event_preview, # 4) Set up more advanced options on the live event. Low Latency is the most common one. # To enable Apple's Low Latency HLS (LL-HLS) streaming, you must use "LOW_LATENCY_V2" stream option stream_options=[StreamOptionsFlag.LOW_LATENCY] #5) Optionally, enable live transcriptions if desired. # WARNING : This is extra cost ($$$), so please check pricing before enabling. Transcriptions are not supported on PassthroughBasic. # switch this sample to use encodingType: "PassthroughStandard" first before un-commenting the transcriptions object below. # transcriptions = LiveEventTranscription( # input_track_selection = [], # Choose which track to transcribe on the source input. # # The value should be in BCP-47 format (e.g: 'en-US'). See https://go.microsoft.com/fwlink/?linkid=2133742 # language = 'en-US', # output_transcription_track = LiveEventOutputTranscriptionTrack( # track_name = 'English' # Set the name you want to appear in the output manifest # ) # ) ) print("Creating the LiveEvent, please be patient as this can take time to complete async.") print("Live Event creation is an async operation in Azure and timing can depend on resources available.") print() # When autostart is set to true, the Live Event will be started after creation. # That means, the billing starts as soon as the Live Event starts running. # You must explicitly call Stop on the Live Event resource to halt further billing. # The following operation can sometimes take awhile. Be patient. # On optional workflow is to first call allocate() instead of create. # https://docs.microsoft.com/en-us/rest/api/media/liveevents/allocate # This allows you to allocate the resources and place the live event into a "Standby" mode until # you are ready to transition to "Running". This is useful when you want to pool resources in a warm "Standby" state at a reduced cost. # The transition from Standby to "Running" is much faster than cold creation to "Running" using the autostart property. # Returns a long running operation polling object that can be used to poll until completion. async def main(): async with client: time_start=time.perf_counter() client_live = await client.live_events.begin_create(resource_group_name=resource_group, account_name=account_name, live_event_name=live_event_name, parameters=live_event_create, auto_start=False) time_end = time.perf_counter() execution_time = (time_end - time_start) if client_live: print(f"Live Event Created - long running operation complete! Name: {live_event_name}") print(f"Execution time to create LiveEvent: {execution_time:.2f}seconds") print() poller = client_live print(await poller.result()) else: raise ValueError('Live Event creation failed!') # Create an Asset for the LiveOutput to use. Think of this as the "tape" that will be recorded to. # The asset entity points to a folder/container in your Azure Storage account. print(f"Creating an asset named: {asset_name}") print() out_alternate_id = f'outputALTid-{uniqueness}' out_description = f'outputdescription-{uniqueness}' # Create an output asset object out_asset = Asset(alternate_id=out_alternate_id, description=out_description) # Create an output asset output_asset = await client.assets.create_or_update(resource_group, account_name, asset_name, out_asset) if output_asset: # print output asset name print(f"The output asset name is: {output_asset.name}") print() else: raise ValueError('Output Asset creation failed!') # Create the Live Output - think of this as the "tape recorder for the live event". # Live outputs are optional, but are required if you want to archive the event to storage, # use the asset for on-demand playback later, or if you want to enable cloud DVR time-shifting. # We will use the asset created above for the "tape" to record to. manifest_name = "output" # See the REST API for details on each of the settings on Live Output # https://docs.microsoft.com/rest/api/media/liveoutputs/create print(f"Creating a live output named: {live_output_name}") print() if output_asset: time_start = time.perf_counter() live_output_create = LiveOutput( description="Optional description when using more than one live output", asset_name=output_asset.name, manifest_name=manifest_name, # The HLS and DASH manifest file name. This is recommended to set if you want a deterministic manifest path up front. archive_window_length=timedelta(hours=1), # Sets an one hour time-shift DVR window. Uses ISO 8601 format string. hls=Hls( fragments_per_ts_segment=1 # Advanced setting when using HLS TS output only. ) ) print(f"live_output_create object is {live_output_create}") print() # Create and await the live output live_output_await = await client.live_outputs.begin_create(resource_group_name=resource_group, account_name=account_name, live_event_name=live_event_name, live_output_name=live_output_name, parameters=live_output_create) if live_output_await: print(f"Live Output created: {live_output_name}") poller = live_output_await print(await poller.result()) time_end = time.perf_counter() execution_time = time_end - time_start print(f"Execution time to create LiveEvent: {execution_time:.2f}seconds") print() else: raise Exception("Live Output creation failed!") # Refresh the LiveEvent object's settings after starting it... live_event = await client.live_events.get(resource_group, account_name, live_event_name) # Get the RTMP ingest URL to configure in OBS Studio # The endpoints is a collection of RTMP primary and secondary, and RTMPS primary and secondary URLs. # to get the primary secure RTMPS, it is usually going to be index 3, but you could add a loop here to confirm... if live_event.input.endpoints: ingest_url = live_event.input.endpoints[0].url print("The RTMP ingest URL to enter into OBS Studio is:") print(f"RTMP ingest: {ingest_url}") print("Make sure to enter a Stream Key into the OBS studio settings. It can be any value or you can repeat the accessToken used in the ingest URL path.") print() if live_event.preview.endpoints: # Use the preview_endpoint to preview and verify # that the input from the encoder is actually being received # The preview endpoint URL also support the addition of various format strings for HLS (format=m3u8-cmaf) and DASH (format=mpd-time-cmaf) for example. # The default manifest is Smooth. preview_endpoint = live_event.preview.endpoints[0].url print(f"The preview url is: {preview_endpoint}") print() print("Open the live preview in your browser and use any DASH and HLS player to monitor the preview playback.") print(f"https://ampdemo.azureedge.net/?url={preview_endpoint}(format=mpd-time-cmaf)&heuristicprofile=lowlatency") print("You will need to refresh the player page SEVERAL times until enough data has arrived to allow for manifest creation.") print("In a production player, the player can inspect the manifest to see if it contains enough content for the player to load and auto reload.") print() print("Start the live stream now, sending the input to the ingest url and verify that it is arriving with the preview url.") print("IMPORTANT TIP!: Make CERTAIN that the video is flowing to the Preview URL before continuing!") # Create the Streaming Locator URL for playback of the contents in the Live Output recoding print(f"Creating a streaming locator named: {streaming_locator_name}") print() streaming_locator = StreamingLocator(asset_name=asset_name, streaming_policy_name="Predefined_ClearStreamingOnly") locator = await client.streaming_locators.create( resource_group_name=resource_group, account_name=account_name, streaming_locator_name=streaming_locator_name, parameters=streaming_locator ) # Get the default streaming endpoint on the account streaming_endpoint = await client.streaming_endpoints.get( resource_group_name=resource_group, account_name=account_name, streaming_endpoint_name=streaming_endpoint_name ) if streaming_endpoint.resource_state != "Running": print(f"Streaming endpoint is stopped. Starting the endpoint named {streaming_endpoint_name}...") poller = await client.streaming_endpoints.begin_start(resource_group, account_name, streaming_endpoint_name) client_streaming_begin = await poller.result() print("Streaming Endpoint started.") if not client_streaming_begin: print("Streaming Endpoint was already started.") # Get the URL to stream the Output print("The streaming URLs to stream the live output from a client player") print() host_name = streaming_endpoint.host_name scheme = 'https' # If you wish to get the streaming manifest ahead of time, make sure to set the manifest name in the LiveOutput as done above. # This allows you to have a deterministic manifest path. <streaming endpoint hostname>/<streaming locator ID>/manifestName.ism/manifest(<format string>) # Building the paths statically. Which is highly recommended when you want to share the stream manifests # to a player application or CMS system ahead of the live event. hls_format = "format=m3u8-cmaf" dash_format = "format=mpd-time-cmaf" manifest_base = f"{scheme}://{host_name}/{locator.streaming_locator_id}/{manifest_name}.ism/manifest" hls_manifest = f'{manifest_base}({hls_format})' print(f"The HLS (MP4) manifest URL is: {hls_manifest}") print("Open the following URL to playback the live stream in an HLS compliant player (HLS.js, Shaka, ExoPlayer) or directly in an iOS device") print({hls_manifest}) print() dash_manifest = f'{manifest_base}({dash_format})' print(f"The DASH manifest URL is: {dash_manifest}") print("Open the following URL to playback the live stream from the LiveOutput in the Azure Media Player") print(f"https://ampdemo.azureedge.net/?url={dash_manifest}&heuristicprofile=lowlatency") print() # closing media client print('Closing media client') await client.close() # closing credential client print('Closing credential client') await default_credential.close() if __name__ == "__main__": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncio.run(main())
en
0.791859
# Azure Media Services Live Streaming Sample for Python # This sample demonstrates how to enable Low Latency HLS (LL-HLS) streaming with encoding # This sample assumes that you will use OBS Studio to broadcast RTMP # to the ingest endpoint. Please install OBS Studio first. # Use the following settings in OBS: # Encoder: NVIDIA NVENC (if avail) or x264 # Rate control : CDR # Bitrate: 2500 kbps (or something reasonable for your laptop) # Keyframe Interval : 2s, or 1s for low latency # Preset: Low-latency Quality or Performance (NVENC) or "veryfast" using x264 # Profile: high # GPU: 0 (Auto) # Max B-frames: 2 # The workflow for the sample and for the recommended use of the Live API: # 1) Create the client for AMS using AAD service principal or managed ID # 2) Set up your IP restriction allow objects for ingest and preview # 3) Configure the Live Event object with your settings. Choose pass-through # or encoding channel type and size (720p or 1080p) # 4) Create the Live Event without starting it # 5) Create an Asset to be used for recording the live stream into # 6) Create a Live Output, which acts as the "recorder" to record into the # Asset (which is like the tape in the recorder). # 7) Start the Live Event - this can take a little bit. # 8) Get the preview endpoint to monitor in a player for DASH or HLS. # 9) Get the ingest RTMP endpoint URL for use in OBS Studio. # Set up OBS studio and start the broadcast. Monitor the stream in # your DASH or HLS player of choice. # 10) Create a new Streaming Locator on the recording Asset object from step 5. # 11) Get the URLs for the HLS and DASH manifest to share with your audience # or CMS system. This can also be created earlier after step 5 if desired. # Get the environment variables # This sample uses the default Azure Credential object, which relies on the environment variable settings. # Get the default Azure credential from the environment variables AZURE_CLIENT_ID and AZURE_CLIENT_SECRET and AZURE_TENTANT_ID # Get the environment variables SUBSCRIPTIONID, RESOURCEGROUP and ACCOUNTNAME # This is a random string that will be added to the naming of things so that you don't have to keep doing this during testing # WARNING: Be careful not to leak live events using this sample! # Change this to your specific streaming endpoint name if not using "default" # The AMS Client # Creating the LiveEvent - the primary object for live streaming in AMS. # See the overview - https://docs.microsoft.com/azure/media-services/latest/live-streaming-overview # Create the LiveEvent # Understand the concepts of what a live event and a live output is in AMS first! # Read the following - https://docs.microsoft.com/azure/media-services/latest/live-events-outputs-concept # 1) Understand the billing implications for the various states # 2) Understand the different live event types, pass-through and encoding # 3) Understand how to use long-running async operations # 4) Understand the available Standby mode and how it differs from the Running Mode. # 5) Understand the differences between a LiveOutput and the Asset that it records to. They are two different concepts. # A live output can be considered as the "tape recorder" and the Asset is the tape that is inserted into it for recording. # 6) Understand the advanced options such as low latency, and live transcription/captioning support. # Live Transcription - https://docs.microsoft.com/en-us/azure/media-services/latest/live-transcription # Low Latency - https://docs.microsoft.com/en-us/azure/media-services/latest/live-event-latency # When broadcasting to a live event, please use one of the verified on-premises live streaminf encoders. # While operating this tutorial, it is recommended to start out using OBS Studio before moving to another encoder. # Note: When creating a LiveEvent, you can specify allowed IP addresses in one of the following formats: # IPV4 address with 4 numbers # CIDR address range # Create the LiveEvent input IP access control object # This will control the IP that the encoder is running on and restrict access to only that encoder IP range. # re-use the same range here for the sample, but in production, you can lock this down to the IP range for your on-premises # live encoder, laptop, or device that is sending the live stream # Create the LiveEvent Preview IP access control object. # This will restrict which clients can view the preview endpoint # re-se the same range here for the sample, but in production, you can lock this to the IPs of your # devices that would be monitoring the live preview. # To get the same ingest URL for the same LiveEvent name every single time... # 1. Set useStaticHostname to true so you have inget like: # rtmps://liveevent-hevc12-eventgridmediaservice-usw22.channel.media.azure.net:2935/live/522f9b27dd2d4b26aeb9ef8ab96c5c77 # 2. Set accessToken to a desired GUID string (with or without hyphen) # See REST API documentation for the details on each setting value # https://docs.microsoft.com/rest/api/media/liveevents/create # For the sample, we are using location: West US 2 # Set useStaticHostname to true to make the ingest and preview URL host name the same. # This can slow things down a bit. # hostname_prefix= "somethingstatic", # When using Static host name true, you can control the host prefix name here if desired # 1) Set up the input settings for the Live event... # Options are RTMP or Smooth Streaming ingest format. # controls the IP restriction for the source header # key_frame_interval_duration = timedelta(seconds = 2), # Set this to match the ingest encoder's settings. This should not be used for encoding channels # Use this value when you want to make sure the ingest URL is static and always the same. If omited, the service will generate a random GUID values. # 2) Set the live event to use pass-through or cloud encoding modes... # Set this to Basic pass-through, Standard pass-through, Standard or Premium1080P to use the cloud live encoder. # See https://go.microsoft.com/fwlink/?linkid=2095101 for more information # Otherwise, leave as "None" to use pass-through mode # OPTIONS for encoding type you can use: # encoding_type=LiveEventEncodingType.PassthroughBasic, # Basic pass-through mode - the cheapest option! # encoding_type=LiveEventEncodingType.PassthroughStandard, # also known as standard pass-through mode (formerly "none") # encoding_type=LiveEventEncodingType.Premium1080p, # live transcoding up to 1080P 30fps with adaptive bitrate set # encoding_type=LiveEventEncodingType.Standard, # use live transcoding in the cloud for 720P 30fps with adaptive bitrate set # OPTIONS using live cloud encoding type: # key_frame_interval=timedelta(seconds = 2), # If this value is not set for an encoding live event, the fragment duration defaults to 2 seconds. The value cannot be set for pass-through live events. # For Low Latency HLS Live streaming, there are two new custom presets available: # "720p-3-Layer": For use with a Standard 720P encoding_type live event # {"ElementaryStreams":[{"Type":"Video","BitRate":2500000,"Width":1280,"Height":720},{"Type":"Video","BitRate":1000000,"Width":960,"Height":540},{"Type":"Video","BitRate":400000,"Width":640,"Height":360}]}" # "1080p-4-Layer": For use with a Premium1080p encoding_type live event # {"ElementaryStreams":[{"Type":"Video","BitRate":4500000,"Width":1920,"Height":1080},{"Type":"Video","BitRate":2200000,"Width":1280,"Height":720},{"Type":"Video","BitRate":1000000,"Width":960,"Height":540},{"Type":"Video","BitRate":400000,"Width":640,"Height":360}]} # preset_name=None, # only used for custom defined presets. # stretch_mode= None # can be used to determine stretch on encoder mode # 3) Set up the Preview endpoint for monitoring based on the settings above we already set. # 4) Set up more advanced options on the live event. Low Latency is the most common one. # To enable Apple's Low Latency HLS (LL-HLS) streaming, you must use "LOW_LATENCY_V2" stream option #5) Optionally, enable live transcriptions if desired. # WARNING : This is extra cost ($$$), so please check pricing before enabling. Transcriptions are not supported on PassthroughBasic. # switch this sample to use encodingType: "PassthroughStandard" first before un-commenting the transcriptions object below. # transcriptions = LiveEventTranscription( # input_track_selection = [], # Choose which track to transcribe on the source input. # # The value should be in BCP-47 format (e.g: 'en-US'). See https://go.microsoft.com/fwlink/?linkid=2133742 # language = 'en-US', # output_transcription_track = LiveEventOutputTranscriptionTrack( # track_name = 'English' # Set the name you want to appear in the output manifest # ) # ) # When autostart is set to true, the Live Event will be started after creation. # That means, the billing starts as soon as the Live Event starts running. # You must explicitly call Stop on the Live Event resource to halt further billing. # The following operation can sometimes take awhile. Be patient. # On optional workflow is to first call allocate() instead of create. # https://docs.microsoft.com/en-us/rest/api/media/liveevents/allocate # This allows you to allocate the resources and place the live event into a "Standby" mode until # you are ready to transition to "Running". This is useful when you want to pool resources in a warm "Standby" state at a reduced cost. # The transition from Standby to "Running" is much faster than cold creation to "Running" using the autostart property. # Returns a long running operation polling object that can be used to poll until completion. # Create an Asset for the LiveOutput to use. Think of this as the "tape" that will be recorded to. # The asset entity points to a folder/container in your Azure Storage account. # Create an output asset object # Create an output asset # print output asset name # Create the Live Output - think of this as the "tape recorder for the live event". # Live outputs are optional, but are required if you want to archive the event to storage, # use the asset for on-demand playback later, or if you want to enable cloud DVR time-shifting. # We will use the asset created above for the "tape" to record to. # See the REST API for details on each of the settings on Live Output # https://docs.microsoft.com/rest/api/media/liveoutputs/create # The HLS and DASH manifest file name. This is recommended to set if you want a deterministic manifest path up front. # Sets an one hour time-shift DVR window. Uses ISO 8601 format string. # Advanced setting when using HLS TS output only. # Create and await the live output # Refresh the LiveEvent object's settings after starting it... # Get the RTMP ingest URL to configure in OBS Studio # The endpoints is a collection of RTMP primary and secondary, and RTMPS primary and secondary URLs. # to get the primary secure RTMPS, it is usually going to be index 3, but you could add a loop here to confirm... # Use the preview_endpoint to preview and verify # that the input from the encoder is actually being received # The preview endpoint URL also support the addition of various format strings for HLS (format=m3u8-cmaf) and DASH (format=mpd-time-cmaf) for example. # The default manifest is Smooth. # Create the Streaming Locator URL for playback of the contents in the Live Output recoding # Get the default streaming endpoint on the account # Get the URL to stream the Output # If you wish to get the streaming manifest ahead of time, make sure to set the manifest name in the LiveOutput as done above. # This allows you to have a deterministic manifest path. <streaming endpoint hostname>/<streaming locator ID>/manifestName.ism/manifest(<format string>) # Building the paths statically. Which is highly recommended when you want to share the stream manifests # to a player application or CMS system ahead of the live event. # closing media client # closing credential client
2.312644
2
scd/settings.py
timlindeberg/ShellConfigDeployment
0
6628740
<reponame>timlindeberg/ShellConfigDeployment<gh_stars>0 import json import sys import textwrap from getpass import getpass from typing import List, Set, Dict, Optional from pygments import highlight, lexers, formatters from scd import colors from scd.argparser import parser from scd.constants import * from scd.data_structs import FileData from scd.host_status import HostStatus from scd.printer import Printer class Settings: DEFAULT_PORT = 22 DEFAULT_TIMEOUT = 5 DEFAULT_CONFIG = textwrap.dedent(""" { "user": "", "private_key": "", "ignored_files": [ "*/.git/*", "*/.gitignore", "*/.DS_Store" ], "files": [ "~/.oh-my-zsh", "~/.zshrc" ], "programs": [ "tree" "zsh" ], "scripts": [ { "file": '', "as_sudo": false } ] } """).strip() def __init__(self): args = parser.parse_args() colors.no_color = args.no_color self.printer = Printer(False) self._check_config_file() config = self._parse_config_file() colors.no_color = args.no_color or config.get("use_color") is False if args.clear_status: self._clear_host_status(args.clear_status) if args.print_host_status: self._print_host_status(args.print_host_status) if args.print_config: self._print_config(config) self._parse_settings(args, config) def _check_config_file(self) -> None: if os.path.isfile(SCD_CONFIG): return self.printer.error("Missing configuration file %s.", SCD_CONFIG) self.printer.error("Creating default configuration. Please edit %s with your settings.", SCD_CONFIG) if not os.path.exists(SCD_FOLDER): os.makedirs(SCD_FOLDER) with open(SCD_CONFIG, "w") as f: f.write(self.DEFAULT_CONFIG) sys.exit(1) def _parse_config_file(self) -> Dict[str, any]: with open(SCD_CONFIG) as f: try: return json.load(f) except json.decoder.JSONDecodeError as e: self.printer.error("Failed to parse configuration file %s:", SCD_CONFIG) self.printer.error(f" {e}") sys.exit(1) def _clear_host_status(self, host: str) -> None: host_status = HostStatus() if host_status.clear(host): host_status.save() self.printer.info("Cleared status of host %s.", host) sys.exit(0) else: self.printer.error("Host status file does not contain host %s.", host) sys.exit(1) def _print_host_status(self, host_to_print: str) -> None: host_status = HostStatus() status = host_status.status if host_to_print == "all": self._print_colored_json(host_status.as_dict()) elif host_to_print in status: self._print_colored_json(status[host_to_print]) else: host_name = host_status.get_host_name(host_to_print) if host_name not in host_status.status: self.printer.error("No status saved for host %s.", host_to_print) sys.exit(1) self._print_colored_json(host_status.status[host_name]) sys.exit(0) def _print_config(self, config: Dict[str, any]): self._print_colored_json(config) sys.exit(0) def _parse_settings(self, args: any, config: Dict[str, any]) -> None: self.hosts: List[str] = args.hosts or config.get("hosts") or self._error( "No host specified. Specify hosts either in %s under the attribute %s or as a command line argument.", SCD_CONFIG, '"hosts"' ) self.user: str = args.user or config.get("user") or self._error( "No user specified. Specify user either in %s under the attribute %s or using the %s (%s) flag.", SCD_CONFIG, '"user"', "--user", "-u" ) self.files = self._parse_files(config) self.scripts: List[str] = config.get("scripts") or [] self.programs: Set[str] = set(config.get("programs") or []) self.shell: Optional[str] = config.get("shell") self.ignored_files: List[str] = config.get("ignored_files") or [] self.timeout = float(config.get("timeout") or self.DEFAULT_TIMEOUT) self.port = int(args.port or config.get("port") or self.DEFAULT_PORT) self.verbose: bool = args.verbose self.force: bool = args.force self.private_key: str = args.private_key or config.get("private_key") or None self.password = self._get_password(config, args) def _parse_files(self, config: Dict[str, any]) -> List[FileData]: files = config.get("files") or [] def _parse_file(file: any) -> FileData: if type(file) is dict: if not (len(file) == 2 and "source_path" in file and "host_path" in file): self.printer.error("Invalid file: %s. Dict items in file should contain two elements, source_path and the host_path.", file) sys.exit(1) return FileData(file["source_path"], file["host_path"]) elif type(file) is list: if len(file) != 2: self.printer.error("Invalid file: %s. List items in file should contain two elements, the source path and the host path.", file) sys.exit(1) return FileData(file[0], file[1]) elif type(file) is str: return FileData(file, file) else: self.printer.error("Invalid file: %s. Expected a string, dict or a list.", file) sys.exit(1) return [_parse_file(file) for file in files] def _get_password(self, config: Dict[str, any], args) -> str: password_file = args.password_file if password_file: if not os.path.isfile(password_file): self._error("The given password file %s does not exist.", password_file) return open(password_file).read().strip() if args.read_password: self.printer.info("Enter password: ", end="") return getpass(prompt="") return args.password or config.get("password") def _error(self, msg: str, *items) -> None: self.printer.error(msg, *items) sys.exit(1) def _print_colored_json(self, obj) -> None: formatted_json = json.dumps(obj, default=lambda o: o.__dict__, sort_keys=True, indent=4) if not colors.no_color: formatted_json = highlight(formatted_json, lexers.JsonLexer(), formatters.TerminalFormatter()).strip() for line in formatted_json.split("\n"): self.printer.info(line)
import json import sys import textwrap from getpass import getpass from typing import List, Set, Dict, Optional from pygments import highlight, lexers, formatters from scd import colors from scd.argparser import parser from scd.constants import * from scd.data_structs import FileData from scd.host_status import HostStatus from scd.printer import Printer class Settings: DEFAULT_PORT = 22 DEFAULT_TIMEOUT = 5 DEFAULT_CONFIG = textwrap.dedent(""" { "user": "", "private_key": "", "ignored_files": [ "*/.git/*", "*/.gitignore", "*/.DS_Store" ], "files": [ "~/.oh-my-zsh", "~/.zshrc" ], "programs": [ "tree" "zsh" ], "scripts": [ { "file": '', "as_sudo": false } ] } """).strip() def __init__(self): args = parser.parse_args() colors.no_color = args.no_color self.printer = Printer(False) self._check_config_file() config = self._parse_config_file() colors.no_color = args.no_color or config.get("use_color") is False if args.clear_status: self._clear_host_status(args.clear_status) if args.print_host_status: self._print_host_status(args.print_host_status) if args.print_config: self._print_config(config) self._parse_settings(args, config) def _check_config_file(self) -> None: if os.path.isfile(SCD_CONFIG): return self.printer.error("Missing configuration file %s.", SCD_CONFIG) self.printer.error("Creating default configuration. Please edit %s with your settings.", SCD_CONFIG) if not os.path.exists(SCD_FOLDER): os.makedirs(SCD_FOLDER) with open(SCD_CONFIG, "w") as f: f.write(self.DEFAULT_CONFIG) sys.exit(1) def _parse_config_file(self) -> Dict[str, any]: with open(SCD_CONFIG) as f: try: return json.load(f) except json.decoder.JSONDecodeError as e: self.printer.error("Failed to parse configuration file %s:", SCD_CONFIG) self.printer.error(f" {e}") sys.exit(1) def _clear_host_status(self, host: str) -> None: host_status = HostStatus() if host_status.clear(host): host_status.save() self.printer.info("Cleared status of host %s.", host) sys.exit(0) else: self.printer.error("Host status file does not contain host %s.", host) sys.exit(1) def _print_host_status(self, host_to_print: str) -> None: host_status = HostStatus() status = host_status.status if host_to_print == "all": self._print_colored_json(host_status.as_dict()) elif host_to_print in status: self._print_colored_json(status[host_to_print]) else: host_name = host_status.get_host_name(host_to_print) if host_name not in host_status.status: self.printer.error("No status saved for host %s.", host_to_print) sys.exit(1) self._print_colored_json(host_status.status[host_name]) sys.exit(0) def _print_config(self, config: Dict[str, any]): self._print_colored_json(config) sys.exit(0) def _parse_settings(self, args: any, config: Dict[str, any]) -> None: self.hosts: List[str] = args.hosts or config.get("hosts") or self._error( "No host specified. Specify hosts either in %s under the attribute %s or as a command line argument.", SCD_CONFIG, '"hosts"' ) self.user: str = args.user or config.get("user") or self._error( "No user specified. Specify user either in %s under the attribute %s or using the %s (%s) flag.", SCD_CONFIG, '"user"', "--user", "-u" ) self.files = self._parse_files(config) self.scripts: List[str] = config.get("scripts") or [] self.programs: Set[str] = set(config.get("programs") or []) self.shell: Optional[str] = config.get("shell") self.ignored_files: List[str] = config.get("ignored_files") or [] self.timeout = float(config.get("timeout") or self.DEFAULT_TIMEOUT) self.port = int(args.port or config.get("port") or self.DEFAULT_PORT) self.verbose: bool = args.verbose self.force: bool = args.force self.private_key: str = args.private_key or config.get("private_key") or None self.password = self._get_password(config, args) def _parse_files(self, config: Dict[str, any]) -> List[FileData]: files = config.get("files") or [] def _parse_file(file: any) -> FileData: if type(file) is dict: if not (len(file) == 2 and "source_path" in file and "host_path" in file): self.printer.error("Invalid file: %s. Dict items in file should contain two elements, source_path and the host_path.", file) sys.exit(1) return FileData(file["source_path"], file["host_path"]) elif type(file) is list: if len(file) != 2: self.printer.error("Invalid file: %s. List items in file should contain two elements, the source path and the host path.", file) sys.exit(1) return FileData(file[0], file[1]) elif type(file) is str: return FileData(file, file) else: self.printer.error("Invalid file: %s. Expected a string, dict or a list.", file) sys.exit(1) return [_parse_file(file) for file in files] def _get_password(self, config: Dict[str, any], args) -> str: password_file = args.password_file if password_file: if not os.path.isfile(password_file): self._error("The given password file %s does not exist.", password_file) return open(password_file).read().strip() if args.read_password: self.printer.info("Enter password: ", end="") return getpass(prompt="") return args.password or config.get("password") def _error(self, msg: str, *items) -> None: self.printer.error(msg, *items) sys.exit(1) def _print_colored_json(self, obj) -> None: formatted_json = json.dumps(obj, default=lambda o: o.__dict__, sort_keys=True, indent=4) if not colors.no_color: formatted_json = highlight(formatted_json, lexers.JsonLexer(), formatters.TerminalFormatter()).strip() for line in formatted_json.split("\n"): self.printer.info(line)
en
0.190404
{ "user": "", "private_key": "", "ignored_files": [ "*/.git/*", "*/.gitignore", "*/.DS_Store" ], "files": [ "~/.oh-my-zsh", "~/.zshrc" ], "programs": [ "tree" "zsh" ], "scripts": [ { "file": '', "as_sudo": false } ] }
2.178038
2
firetail/lib/esi.py
RaptorEye/FUN-i
0
6628741
import json import aiohttp ESI_URL = "https://esi.tech.ccp.is/latest" class ESI: """Data manager for requesting and returning ESI data.""" def __init__(self, session): self.session = session async def server_info(self): async with aiohttp.ClientSession() as session: url = '{}/status/'.format(ESI_URL) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def esi_search(self, item, category): async with aiohttp.ClientSession() as session: url = ('{}/search/?categories={}&datasource=tranquility' '&language=en-us&search={}&strict=false' '').format(ESI_URL, category, item) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) if category in data: if len(data[category]) > 1: url = ('{}/search/?categories={}&datasource=tranquility' '&language=en-us&search={}&strict=true' '').format(ESI_URL, category, item) async with session.get(url) as strict: data = await strict.text() data = json.loads(data) if category not in data: return False return data else: return data return None async def type_info_search(self, type_id): async with aiohttp.ClientSession() as session: url = '{}/universe/types/{}/'.format(ESI_URL, type_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data # Location Stuff async def system_info(self, system_id): async with aiohttp.ClientSession() as session: url = '{}/universe/systems/{}/'.format(ESI_URL, system_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def system_name(self, system_id): async with aiohttp.ClientSession() as session: url = '{}/universe/systems/{}/'.format(ESI_URL, system_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data['name'] async def constellation_info(self, constellation_id): async with aiohttp.ClientSession() as session: url = '{}/universe/constellations/{}/'.format(ESI_URL, constellation_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def region_info(self, region_id): async with aiohttp.ClientSession() as session: url = '{}/universe/regions/{}/'.format(ESI_URL, region_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def planet_info(self, planet_id): async with aiohttp.ClientSession() as session: url = '{}/universe/planets/{}/'.format(ESI_URL, planet_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def get_jump_info(self, system_id): async with aiohttp.ClientSession() as session: async with session.get( '{}/universe/system_jumps/'.format(ESI_URL)) as resp: data = await resp.text() data = json.loads(data) ship_jumps = 0 for system in data: if system['system_id'] == system_id: ship_jumps = system['ship_jumps'] return ship_jumps async def get_incursion_info(self): async with aiohttp.ClientSession() as session: async with session.get('{}/incursions/'.format(ESI_URL)) as resp: data = await resp.text() data = json.loads(data) return data async def get_active_sov_battles(self): async with aiohttp.ClientSession() as session: async with session.get( '{}/sovereignty/campaigns/?datasource=tranquility'.format(ESI_URL)) as resp: data = await resp.text() data = json.loads(data) return data # Character Stuff async def character_info(self, character_id): async with aiohttp.ClientSession() as session: url = '{}/characters/{}/'.format(ESI_URL, character_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def character_corp_id(self, character_id): data = await self.character_info(character_id) return data['corporation_id'] async def corporation_info(self, corporation_id): async with aiohttp.ClientSession() as session: url = '{}/corporations/{}/'.format(ESI_URL, corporation_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def character_alliance_id(self, character_id): data = await self.character_info(character_id) return data['alliance_id'] async def alliance_info(self, alliance_id): async with aiohttp.ClientSession() as session: url = '{}/alliances/{}/'.format(ESI_URL, alliance_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def character_name(self, character_id): data = await self.character_info(character_id) return data['name'] # Item Stuff async def item_id(self, item_name): async with aiohttp.ClientSession() as session: baseurl = 'https://www.fuzzwork.co.uk/api' url = '{}/typeid.php?typename={}'.format(baseurl, item_name) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data['typeID'] async def item_info(self, item_id): async with aiohttp.ClientSession() as session: url = '{}/universe/types/{}/'.format(ESI_URL, item_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def market_data(self, item_name, station): itemid = await self.item_id(item_name) if itemid == 0: return itemid else: async with aiohttp.ClientSession() as session: baseurl = 'https://market.fuzzwork.co.uk/aggregates' url = '{}/?station={}&types={}'.format(baseurl, station, itemid) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data[str(itemid)] # Token Handling async def refresh_access_token(self, refresh_token, auth): async with aiohttp.ClientSession() as session: header = {'Authorization': 'Basic {}'.format(auth)} params = {'grant_type': 'refresh_token', 'refresh_token': refresh_token} url = 'https://login.eveonline.com/oauth/token' async with session.get(url, params=params, headers=header) as resp: data = await resp.text() data = json.loads(data) return data async def verify_token(self, access_token): async with aiohttp.ClientSession() as session: header = {'Authorization': 'Bearer {}'.format(access_token)} url = 'https://login.eveonline.com/oauth/verify' async with session.get(url, headers=header) as resp: data = await resp.text() data = json.loads(data) return data # Token Restricted async def notifications(self, alliance_id): async with aiohttp.ClientSession() as session: url = '{}/alliances/{}/'.format(ESI_URL, alliance_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data
import json import aiohttp ESI_URL = "https://esi.tech.ccp.is/latest" class ESI: """Data manager for requesting and returning ESI data.""" def __init__(self, session): self.session = session async def server_info(self): async with aiohttp.ClientSession() as session: url = '{}/status/'.format(ESI_URL) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def esi_search(self, item, category): async with aiohttp.ClientSession() as session: url = ('{}/search/?categories={}&datasource=tranquility' '&language=en-us&search={}&strict=false' '').format(ESI_URL, category, item) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) if category in data: if len(data[category]) > 1: url = ('{}/search/?categories={}&datasource=tranquility' '&language=en-us&search={}&strict=true' '').format(ESI_URL, category, item) async with session.get(url) as strict: data = await strict.text() data = json.loads(data) if category not in data: return False return data else: return data return None async def type_info_search(self, type_id): async with aiohttp.ClientSession() as session: url = '{}/universe/types/{}/'.format(ESI_URL, type_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data # Location Stuff async def system_info(self, system_id): async with aiohttp.ClientSession() as session: url = '{}/universe/systems/{}/'.format(ESI_URL, system_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def system_name(self, system_id): async with aiohttp.ClientSession() as session: url = '{}/universe/systems/{}/'.format(ESI_URL, system_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data['name'] async def constellation_info(self, constellation_id): async with aiohttp.ClientSession() as session: url = '{}/universe/constellations/{}/'.format(ESI_URL, constellation_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def region_info(self, region_id): async with aiohttp.ClientSession() as session: url = '{}/universe/regions/{}/'.format(ESI_URL, region_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def planet_info(self, planet_id): async with aiohttp.ClientSession() as session: url = '{}/universe/planets/{}/'.format(ESI_URL, planet_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def get_jump_info(self, system_id): async with aiohttp.ClientSession() as session: async with session.get( '{}/universe/system_jumps/'.format(ESI_URL)) as resp: data = await resp.text() data = json.loads(data) ship_jumps = 0 for system in data: if system['system_id'] == system_id: ship_jumps = system['ship_jumps'] return ship_jumps async def get_incursion_info(self): async with aiohttp.ClientSession() as session: async with session.get('{}/incursions/'.format(ESI_URL)) as resp: data = await resp.text() data = json.loads(data) return data async def get_active_sov_battles(self): async with aiohttp.ClientSession() as session: async with session.get( '{}/sovereignty/campaigns/?datasource=tranquility'.format(ESI_URL)) as resp: data = await resp.text() data = json.loads(data) return data # Character Stuff async def character_info(self, character_id): async with aiohttp.ClientSession() as session: url = '{}/characters/{}/'.format(ESI_URL, character_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def character_corp_id(self, character_id): data = await self.character_info(character_id) return data['corporation_id'] async def corporation_info(self, corporation_id): async with aiohttp.ClientSession() as session: url = '{}/corporations/{}/'.format(ESI_URL, corporation_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def character_alliance_id(self, character_id): data = await self.character_info(character_id) return data['alliance_id'] async def alliance_info(self, alliance_id): async with aiohttp.ClientSession() as session: url = '{}/alliances/{}/'.format(ESI_URL, alliance_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def character_name(self, character_id): data = await self.character_info(character_id) return data['name'] # Item Stuff async def item_id(self, item_name): async with aiohttp.ClientSession() as session: baseurl = 'https://www.fuzzwork.co.uk/api' url = '{}/typeid.php?typename={}'.format(baseurl, item_name) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data['typeID'] async def item_info(self, item_id): async with aiohttp.ClientSession() as session: url = '{}/universe/types/{}/'.format(ESI_URL, item_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data async def market_data(self, item_name, station): itemid = await self.item_id(item_name) if itemid == 0: return itemid else: async with aiohttp.ClientSession() as session: baseurl = 'https://market.fuzzwork.co.uk/aggregates' url = '{}/?station={}&types={}'.format(baseurl, station, itemid) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data[str(itemid)] # Token Handling async def refresh_access_token(self, refresh_token, auth): async with aiohttp.ClientSession() as session: header = {'Authorization': 'Basic {}'.format(auth)} params = {'grant_type': 'refresh_token', 'refresh_token': refresh_token} url = 'https://login.eveonline.com/oauth/token' async with session.get(url, params=params, headers=header) as resp: data = await resp.text() data = json.loads(data) return data async def verify_token(self, access_token): async with aiohttp.ClientSession() as session: header = {'Authorization': 'Bearer {}'.format(access_token)} url = 'https://login.eveonline.com/oauth/verify' async with session.get(url, headers=header) as resp: data = await resp.text() data = json.loads(data) return data # Token Restricted async def notifications(self, alliance_id): async with aiohttp.ClientSession() as session: url = '{}/alliances/{}/'.format(ESI_URL, alliance_id) async with session.get(url) as resp: data = await resp.text() data = json.loads(data) return data
en
0.626941
Data manager for requesting and returning ESI data. # Location Stuff # Character Stuff # Item Stuff # Token Handling # Token Restricted
2.724128
3
submissions/abc128/c.py
m-star18/atcoder
1
6628742
<reponame>m-star18/atcoder import sys read = sys.stdin.buffer.read readline = sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) from itertools import product n, m = map(int, readline().split()) ks = [list(map(int, readline().split()))[1:] for _ in range(m)] p = list(map(int, readline().split())) ans = 0 for bit in product([0, 1], repeat=n): for pp, ksi in zip(p, ks): cnt = 0 for j in range(n): if bit[j] == 1 and (j + 1) in ksi: cnt += 1 if cnt % 2 != pp: break else: ans += 1 print(ans)
import sys read = sys.stdin.buffer.read readline = sys.stdin.buffer.readline readlines = sys.stdin.buffer.readlines sys.setrecursionlimit(10 ** 7) from itertools import product n, m = map(int, readline().split()) ks = [list(map(int, readline().split()))[1:] for _ in range(m)] p = list(map(int, readline().split())) ans = 0 for bit in product([0, 1], repeat=n): for pp, ksi in zip(p, ks): cnt = 0 for j in range(n): if bit[j] == 1 and (j + 1) in ksi: cnt += 1 if cnt % 2 != pp: break else: ans += 1 print(ans)
none
1
2.343214
2
Lib/site-packages/tensorflow_probability/python/experimental/inference_gym/targets/neals_funnel.py
caiyongji/tf2.3.1-py3.7.9-full-built
0
6628743
# Lint as: python2, python3 # Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Neal's funnel model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.experimental.inference_gym.targets import model __all__ = [ 'NealsFunnel', ] class NealsFunnel(model.Model): """Creates a funnel-shaped distribution. This distribution was first described in [1]. The distribution is constructed by transforming a N-D gaussian with scale [3, 1, ...] by scaling all but the first dimensions by `exp(x0 / 2)` where `x0` is the value of the first dimension. This distribution is notable for having a relatively very narrow "neck" region which is challenging for HMC to explore. This distribution resembles the posteriors of centrally parameterized hierarchical models. #### References 1. <NAME>. (2003). Slice sampling. Annals of Statistics, 31(3), 705-767. """ def __init__( self, ndims=10, name='neals_funnel', pretty_name='Neal\'s Funnel', ): """Construct the Neal's funnel model. Args: ndims: Python integer. Dimensionality of the distribution. Must be at least 2. name: Python `str` name prefixed to Ops created by this class. pretty_name: A Python `str`. The pretty name of this model. Raises: ValueError: If ndims < 2. """ if ndims < 2: raise ValueError('ndims must be at least 2, saw: {}'.format(ndims)) with tf.name_scope(name): def bijector_fn(x): """Funnel transform.""" batch_shape = tf.shape(x)[:-1] scale = tf.concat( [ tf.ones(tf.concat([batch_shape, [1]], axis=0)), tf.exp(x[..., :1] / 2) * tf.ones(tf.concat([batch_shape, [ndims - 1]], axis=0)), ], axis=-1, ) return tfb.Scale(scale) mg = tfd.MultivariateNormalDiag( loc=tf.zeros(ndims), scale_diag=[3.] + [1.] * (ndims - 1)) funnel = tfd.TransformedDistribution( mg, bijector=tfb.MaskedAutoregressiveFlow(bijector_fn=bijector_fn)) sample_transformations = { 'identity': model.Model.SampleTransformation( fn=lambda params: params, pretty_name='Identity', # The trailing dimensions come from a product distribution of # independent standard normal and a log-normal with a scale of # 3 / 2. See # https://en.wikipedia.org/wiki/Product_distribution for the # formulas. For the mean, the formulas yield zero. ground_truth_mean=np.zeros(ndims), # For the standard deviation, all means are zero and standard # deivations of the normals are 1, so the formula reduces to # `sqrt((sigma_log_normal + mean_log_normal**2))` which # reduces to `exp((sigma_log_normal)**2)`. ground_truth_standard_deviation=np.array( [3.] + [np.exp((3. / 2)**2)] * (ndims - 1)), ) } self._funnel = funnel super(NealsFunnel, self).__init__( default_event_space_bijector=tfb.Identity(), event_shape=funnel.event_shape, dtype=funnel.dtype, name=name, pretty_name=pretty_name, sample_transformations=sample_transformations, ) def _unnormalized_log_prob(self, value): return self._funnel.log_prob(value) def sample(self, sample_shape=(), seed=None, name='sample'): """Generate samples of the specified shape from the target distribution. The returned samples are exact (and independent) samples from the target distribution of this model. Args: sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples. seed: Python integer or `tfp.util.SeedStream` instance, for seeding PRNG. name: Name to give to the prefix the generated ops. Returns: samples: a `Tensor` with prepended dimensions `sample_shape`. """ return self._funnel.sample(sample_shape, seed=seed, name=name)
# Lint as: python2, python3 # Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Neal's funnel model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.experimental.inference_gym.targets import model __all__ = [ 'NealsFunnel', ] class NealsFunnel(model.Model): """Creates a funnel-shaped distribution. This distribution was first described in [1]. The distribution is constructed by transforming a N-D gaussian with scale [3, 1, ...] by scaling all but the first dimensions by `exp(x0 / 2)` where `x0` is the value of the first dimension. This distribution is notable for having a relatively very narrow "neck" region which is challenging for HMC to explore. This distribution resembles the posteriors of centrally parameterized hierarchical models. #### References 1. <NAME>. (2003). Slice sampling. Annals of Statistics, 31(3), 705-767. """ def __init__( self, ndims=10, name='neals_funnel', pretty_name='Neal\'s Funnel', ): """Construct the Neal's funnel model. Args: ndims: Python integer. Dimensionality of the distribution. Must be at least 2. name: Python `str` name prefixed to Ops created by this class. pretty_name: A Python `str`. The pretty name of this model. Raises: ValueError: If ndims < 2. """ if ndims < 2: raise ValueError('ndims must be at least 2, saw: {}'.format(ndims)) with tf.name_scope(name): def bijector_fn(x): """Funnel transform.""" batch_shape = tf.shape(x)[:-1] scale = tf.concat( [ tf.ones(tf.concat([batch_shape, [1]], axis=0)), tf.exp(x[..., :1] / 2) * tf.ones(tf.concat([batch_shape, [ndims - 1]], axis=0)), ], axis=-1, ) return tfb.Scale(scale) mg = tfd.MultivariateNormalDiag( loc=tf.zeros(ndims), scale_diag=[3.] + [1.] * (ndims - 1)) funnel = tfd.TransformedDistribution( mg, bijector=tfb.MaskedAutoregressiveFlow(bijector_fn=bijector_fn)) sample_transformations = { 'identity': model.Model.SampleTransformation( fn=lambda params: params, pretty_name='Identity', # The trailing dimensions come from a product distribution of # independent standard normal and a log-normal with a scale of # 3 / 2. See # https://en.wikipedia.org/wiki/Product_distribution for the # formulas. For the mean, the formulas yield zero. ground_truth_mean=np.zeros(ndims), # For the standard deviation, all means are zero and standard # deivations of the normals are 1, so the formula reduces to # `sqrt((sigma_log_normal + mean_log_normal**2))` which # reduces to `exp((sigma_log_normal)**2)`. ground_truth_standard_deviation=np.array( [3.] + [np.exp((3. / 2)**2)] * (ndims - 1)), ) } self._funnel = funnel super(NealsFunnel, self).__init__( default_event_space_bijector=tfb.Identity(), event_shape=funnel.event_shape, dtype=funnel.dtype, name=name, pretty_name=pretty_name, sample_transformations=sample_transformations, ) def _unnormalized_log_prob(self, value): return self._funnel.log_prob(value) def sample(self, sample_shape=(), seed=None, name='sample'): """Generate samples of the specified shape from the target distribution. The returned samples are exact (and independent) samples from the target distribution of this model. Args: sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples. seed: Python integer or `tfp.util.SeedStream` instance, for seeding PRNG. name: Name to give to the prefix the generated ops. Returns: samples: a `Tensor` with prepended dimensions `sample_shape`. """ return self._funnel.sample(sample_shape, seed=seed, name=name)
en
0.825689
# Lint as: python2, python3 # Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Neal's funnel model. Creates a funnel-shaped distribution. This distribution was first described in [1]. The distribution is constructed by transforming a N-D gaussian with scale [3, 1, ...] by scaling all but the first dimensions by `exp(x0 / 2)` where `x0` is the value of the first dimension. This distribution is notable for having a relatively very narrow "neck" region which is challenging for HMC to explore. This distribution resembles the posteriors of centrally parameterized hierarchical models. #### References 1. <NAME>. (2003). Slice sampling. Annals of Statistics, 31(3), 705-767. Construct the Neal's funnel model. Args: ndims: Python integer. Dimensionality of the distribution. Must be at least 2. name: Python `str` name prefixed to Ops created by this class. pretty_name: A Python `str`. The pretty name of this model. Raises: ValueError: If ndims < 2. Funnel transform. # The trailing dimensions come from a product distribution of # independent standard normal and a log-normal with a scale of # 3 / 2. See # https://en.wikipedia.org/wiki/Product_distribution for the # formulas. For the mean, the formulas yield zero. # For the standard deviation, all means are zero and standard # deivations of the normals are 1, so the formula reduces to # `sqrt((sigma_log_normal + mean_log_normal**2))` which # reduces to `exp((sigma_log_normal)**2)`. Generate samples of the specified shape from the target distribution. The returned samples are exact (and independent) samples from the target distribution of this model. Args: sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples. seed: Python integer or `tfp.util.SeedStream` instance, for seeding PRNG. name: Name to give to the prefix the generated ops. Returns: samples: a `Tensor` with prepended dimensions `sample_shape`.
2.048183
2
cmsplugin_contact_form_3.0.7/cms_plugins.py
xaoo/djangocms_contact_form
0
6628744
from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from django.core.mail import send_mail, get_connection from . import models class ContactFormCMSPlugin(CMSPluginBase): model = models.ContactFormCMS name = 'Contact Form' render_template = 'cmsplugin_contact_form_3.0.7_3.0.7/form.html' allow_children = True child_classes = [ 'ContactFormTextFieldCMSPlugin', 'ContactFormEmailFieldCMSPlugin', 'ContactFormPhoneFieldCMSPlugin', 'ContactFormTextAreaFieldCMSPlugin', 'ContactFormCheckboxFieldCMSPlugin', 'ContactFormRadioFieldCMSPlugin', 'ContactFormDateFieldCMSPlugin', 'ContactFormTimeFieldCMSPlugin', 'ContactFormDateTimeFieldCMSPlugin', 'ContactFormSubmitFieldCMSPlugin' ] def render(self, context, instance, placeholder): request = context['request'] if request.method == 'POST': message = "New message from your website:\n\n" email_field = [ inst for inst in instance.child_plugin_instances if isinstance(inst, models.ContactFormTextFieldCMS) ][0] email_key = email_field.html_name submit_field = [ inst for inst in instance.child_plugin_instances if isinstance(inst, models.ContactFormSubmitFieldCMS) ][0] submit_key = submit_field.html_name for key in request.POST.keys(): if key in ['csrfmiddlewaretoken', submit_key]: continue message += '<b>{k}:</b> {v}\n'.format( k=key.capitalize(), v=request.POST[key] ) send_mail( subject="New message from your website", message=message, html_message=message, from_email=request.POST[email_key], recipient_list=[instance.email], fail_silently=False, connection=get_connection( backend='django.core.mail.backends.smtp.EmailBackend', host=instance.smtp_server, port=instance.smtp_port, username=instance.email, password=instance.password, use_tls=instance.use_tls ) ) context.update({ 'placeholder': placeholder, 'instance': instance }) return context class ContactFormTextFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextFieldCMS name = 'Text Field' render_template = 'cmsplugin_contact_form_3.0.7/text_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormEmailFieldCMSPlugin(CMSPluginBase): model = models.ContactFormEmailFieldCMS name = 'Email Field' render_template = 'cmsplugin_contact_form_3.0.7/email_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormPhoneFieldCMSPlugin(CMSPluginBase): model = models.ContactFormPhoneFieldCMS name = 'Phone Field' render_template = 'cmsplugin_contact_form_3.0.7/phone_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormTextAreaFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextAreaFieldCMS name = 'Text Area Field' render_template = 'cmsplugin_contact_form_3.0.7/textarea_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormCheckboxFieldCMSPlugin(CMSPluginBase): model = models.ContactFormCheckboxFieldCMS name = 'Checkbox Field' render_template = 'cmsplugin_contact_form_3.0.7/checkbox_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormRadioFieldCMSPlugin(CMSPluginBase): model = models.ContactFormCheckboxFieldCMS name = 'Radio Field' render_template = 'cmsplugin_contact_form_3.0.7/radio_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormDateFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextFieldCMS name = 'Date Field' render_template = 'cmsplugin_contact_form_3.0.7/date_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormTimeFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextFieldCMS name = 'Time Field' render_template = 'cmsplugin_contact_form_3.0.7/time_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormDateTimeFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextFieldCMS name = 'Date & Time Field' render_template = 'cmsplugin_contact_form_3.0.7/datetime_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormSubmitFieldCMSPlugin(CMSPluginBase): model = models.ContactFormSubmitFieldCMS name = 'Submit Field' render_template = 'cmsplugin_contact_form_3.0.7/submit_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] plugin_pool.register_plugin(ContactFormCMSPlugin) plugin_pool.register_plugin(ContactFormTextFieldCMSPlugin) plugin_pool.register_plugin(ContactFormEmailFieldCMSPlugin) plugin_pool.register_plugin(ContactFormPhoneFieldCMSPlugin) plugin_pool.register_plugin(ContactFormTextAreaFieldCMSPlugin) plugin_pool.register_plugin(ContactFormCheckboxFieldCMSPlugin) plugin_pool.register_plugin(ContactFormRadioFieldCMSPlugin) plugin_pool.register_plugin(ContactFormDateFieldCMSPlugin) plugin_pool.register_plugin(ContactFormTimeFieldCMSPlugin) plugin_pool.register_plugin(ContactFormDateTimeFieldCMSPlugin) plugin_pool.register_plugin(ContactFormSubmitFieldCMSPlugin)
from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from django.core.mail import send_mail, get_connection from . import models class ContactFormCMSPlugin(CMSPluginBase): model = models.ContactFormCMS name = 'Contact Form' render_template = 'cmsplugin_contact_form_3.0.7_3.0.7/form.html' allow_children = True child_classes = [ 'ContactFormTextFieldCMSPlugin', 'ContactFormEmailFieldCMSPlugin', 'ContactFormPhoneFieldCMSPlugin', 'ContactFormTextAreaFieldCMSPlugin', 'ContactFormCheckboxFieldCMSPlugin', 'ContactFormRadioFieldCMSPlugin', 'ContactFormDateFieldCMSPlugin', 'ContactFormTimeFieldCMSPlugin', 'ContactFormDateTimeFieldCMSPlugin', 'ContactFormSubmitFieldCMSPlugin' ] def render(self, context, instance, placeholder): request = context['request'] if request.method == 'POST': message = "New message from your website:\n\n" email_field = [ inst for inst in instance.child_plugin_instances if isinstance(inst, models.ContactFormTextFieldCMS) ][0] email_key = email_field.html_name submit_field = [ inst for inst in instance.child_plugin_instances if isinstance(inst, models.ContactFormSubmitFieldCMS) ][0] submit_key = submit_field.html_name for key in request.POST.keys(): if key in ['csrfmiddlewaretoken', submit_key]: continue message += '<b>{k}:</b> {v}\n'.format( k=key.capitalize(), v=request.POST[key] ) send_mail( subject="New message from your website", message=message, html_message=message, from_email=request.POST[email_key], recipient_list=[instance.email], fail_silently=False, connection=get_connection( backend='django.core.mail.backends.smtp.EmailBackend', host=instance.smtp_server, port=instance.smtp_port, username=instance.email, password=instance.password, use_tls=instance.use_tls ) ) context.update({ 'placeholder': placeholder, 'instance': instance }) return context class ContactFormTextFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextFieldCMS name = 'Text Field' render_template = 'cmsplugin_contact_form_3.0.7/text_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormEmailFieldCMSPlugin(CMSPluginBase): model = models.ContactFormEmailFieldCMS name = 'Email Field' render_template = 'cmsplugin_contact_form_3.0.7/email_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormPhoneFieldCMSPlugin(CMSPluginBase): model = models.ContactFormPhoneFieldCMS name = 'Phone Field' render_template = 'cmsplugin_contact_form_3.0.7/phone_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormTextAreaFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextAreaFieldCMS name = 'Text Area Field' render_template = 'cmsplugin_contact_form_3.0.7/textarea_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormCheckboxFieldCMSPlugin(CMSPluginBase): model = models.ContactFormCheckboxFieldCMS name = 'Checkbox Field' render_template = 'cmsplugin_contact_form_3.0.7/checkbox_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormRadioFieldCMSPlugin(CMSPluginBase): model = models.ContactFormCheckboxFieldCMS name = 'Radio Field' render_template = 'cmsplugin_contact_form_3.0.7/radio_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormDateFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextFieldCMS name = 'Date Field' render_template = 'cmsplugin_contact_form_3.0.7/date_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormTimeFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextFieldCMS name = 'Time Field' render_template = 'cmsplugin_contact_form_3.0.7/time_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormDateTimeFieldCMSPlugin(CMSPluginBase): model = models.ContactFormTextFieldCMS name = 'Date & Time Field' render_template = 'cmsplugin_contact_form_3.0.7/datetime_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] class ContactFormSubmitFieldCMSPlugin(CMSPluginBase): model = models.ContactFormSubmitFieldCMS name = 'Submit Field' render_template = 'cmsplugin_contact_form_3.0.7/submit_field.html' require_parent = True parent_classes = ['ContactFormCMSPlugin'] plugin_pool.register_plugin(ContactFormCMSPlugin) plugin_pool.register_plugin(ContactFormTextFieldCMSPlugin) plugin_pool.register_plugin(ContactFormEmailFieldCMSPlugin) plugin_pool.register_plugin(ContactFormPhoneFieldCMSPlugin) plugin_pool.register_plugin(ContactFormTextAreaFieldCMSPlugin) plugin_pool.register_plugin(ContactFormCheckboxFieldCMSPlugin) plugin_pool.register_plugin(ContactFormRadioFieldCMSPlugin) plugin_pool.register_plugin(ContactFormDateFieldCMSPlugin) plugin_pool.register_plugin(ContactFormTimeFieldCMSPlugin) plugin_pool.register_plugin(ContactFormDateTimeFieldCMSPlugin) plugin_pool.register_plugin(ContactFormSubmitFieldCMSPlugin)
none
1
1.960402
2
backend/fms_core/serializers.py
c3g/freezeman
2
6628745
from django.contrib.auth.models import User, Group from django.contrib.contenttypes.models import ContentType from rest_framework import serializers from reversion.models import Version, Revision from .models import ( Container, ExperimentRun, RunType, Individual, Instrument, PropertyValue, Protocol, Process, ProcessMeasurement, Sample, SampleKind, FullSample, Project, ) __all__ = [ "ContainerSerializer", "ContainerExportSerializer", "ExperimentRunSerializer", "ExperimentRunExportSerializer", "RunTypeSerializer", "SimpleContainerSerializer", "IndividualSerializer", "InstrumentSerializer", "SampleKindSerializer", "PropertyValueSerializer", "ProcessSerializer", "ProcessMeasurementSerializer", "ProcessMeasurementExportSerializer", "ProtocolSerializer", "SampleSerializer", "SampleExportSerializer", "FullSampleSerializer", "FullSampleExportSerializer", "FullNestedSampleSerializer", "VersionSerializer", "RevisionSerializer", "UserSerializer", "GroupSerializer", "ProjectSerializer", "ProjectExportSerializer", ] class ContainerSerializer(serializers.ModelSerializer): children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) samples = serializers.PrimaryKeyRelatedField(many=True, read_only=True) experiment_run = serializers.PrimaryKeyRelatedField(many=False, read_only=True) class Meta: model = Container fields = "__all__" extra_fields = ('experiment_run') class SimpleContainerSerializer(serializers.ModelSerializer): class Meta: model = Container fields = "__all__" class ContainerExportSerializer(serializers.ModelSerializer): location = serializers.SlugRelatedField(slug_field='barcode', read_only=True) container_kind = serializers.CharField(source='kind') class Meta: model = Container fields = ('name', 'container_kind', 'barcode', 'location', 'coordinates', 'comment') class ExperimentRunSerializer(serializers.ModelSerializer): children_processes = serializers.SerializerMethodField() instrument_type = serializers.SerializerMethodField() platform = serializers.SerializerMethodField() class Meta: model = ExperimentRun fields = "__all__" extra_fields = ('children_processes', 'instrument_type', 'platform') def get_children_processes(self, obj): return Process.objects.filter(parent_process=obj.process).values_list('id', flat=True) def get_instrument_type(self, obj): return obj.instrument.type.type def get_platform(self, obj): return obj.instrument.type.platform.name class ExperimentRunExportSerializer(serializers.ModelSerializer): experiment_run_id = serializers.IntegerField(read_only=True, source="id") experiment_run_name = serializers.CharField(read_only=True, source="name") run_type = serializers.CharField(read_only=True, source="run_type.name") instrument = serializers.CharField(read_only=True, source="instrument.name") container_kind = serializers.CharField(read_only=True, source="container.kind") container_name = serializers.CharField(read_only=True, source="container.name") container_barcode = serializers.CharField(read_only=True, source="container.barcode") class Meta: model = ExperimentRun fields = ('experiment_run_id', 'experiment_run_name', 'run_type', 'instrument', 'container_kind', 'container_name', 'container_barcode', 'start_date') class RunTypeSerializer(serializers.ModelSerializer): class Meta: model = RunType fields = "__all__" class IndividualSerializer(serializers.ModelSerializer): class Meta: model = Individual fields = "__all__" class InstrumentSerializer(serializers.ModelSerializer): class Meta: model = Instrument fields = "__all__" class SampleKindSerializer(serializers.ModelSerializer): class Meta: model = SampleKind fields = "__all__" class ProtocolSerializer(serializers.ModelSerializer): class Meta: model = Protocol fields = "__all__" class ProcessSerializer(serializers.ModelSerializer): children_properties = serializers.SerializerMethodField() class Meta: model = Process fields = "__all__" extra_fields = ('children_processes') def get_children_properties(self, obj): process_content_type = ContentType.objects.get_for_model(Process) return PropertyValue.objects.filter(object_id=obj.id, content_type=process_content_type).values_list('id', flat=True) class ProcessMeasurementSerializer(serializers.ModelSerializer): protocol = serializers.IntegerField(read_only=True, source="process.protocol.id") child_sample = serializers.IntegerField(read_only=True) properties = serializers.SerializerMethodField() class Meta: model = ProcessMeasurement fields = "__all__" extra_fields = ('protocol', 'child_sample') def get_properties(self, obj): pm_content_type = ContentType.objects.get_for_model(ProcessMeasurement) return PropertyValue.objects.filter(object_id=obj.id, content_type=pm_content_type).values_list('id', flat=True) class ProcessMeasurementExportSerializer(serializers.ModelSerializer): process_measurement_id = serializers.IntegerField(read_only=True, source="id") protocol_name = serializers.CharField(read_only=True, source="process.protocol.name") child_sample_name = serializers.CharField(read_only=True) source_sample_name = serializers.CharField(read_only=True) class Meta: model = ProcessMeasurement fields = ('process_measurement_id', 'process_id', 'protocol_name', 'source_sample_name', 'child_sample_name', 'volume_used', 'execution_date', 'comment') class PropertyValueSerializer(serializers.ModelSerializer): property_name = serializers.CharField(read_only=True, source="property_type.name") class Meta: model = PropertyValue fields = "__all__" extra_fields = ('property_name') class FullSampleSerializer(serializers.ModelSerializer): extracted_from = serializers.SerializerMethodField() class Meta: model = FullSample fields = "__all__" extra_fields = ('extracted_from') def get_extracted_from(self, obj): return obj.extracted_from and obj.extracted_from.id class FullSampleExportSerializer(serializers.ModelSerializer): individual_name = serializers.CharField(read_only=True, source="individual.name") taxon = serializers.CharField(read_only=True, source="individual.taxon") sex = serializers.CharField(read_only=True, source="individual.sex") pedigree = serializers.CharField(read_only=True, source="individual.pedigree") cohort = serializers.CharField(read_only=True, source="individual.cohort") mother_name = serializers.SerializerMethodField() father_name = serializers.SerializerMethodField() sample_kind = serializers.CharField(read_only=True, source="sample_kind.name") container_kind = serializers.CharField(read_only=True, source="container.kind") container_name = serializers.CharField(read_only=True, source="container.name") container_barcode = serializers.CharField(read_only=True, source="container.barcode") location_coord = serializers.CharField(read_only=True, source="container.coordinates") location_barcode = serializers.SerializerMethodField() current_volume = serializers.SerializerMethodField() projects_names = serializers.SerializerMethodField() quantity_flag = serializers.SerializerMethodField() quality_flag = serializers.SerializerMethodField() depleted = serializers.SerializerMethodField() class Meta: model = FullSample fields = ('id', 'biosample', 'name', 'alias', 'sample_kind', 'tissue_source', 'container', 'container_kind', 'container_name', 'container_barcode', 'coordinates', 'location_barcode', 'location_coord', 'current_volume', 'concentration', 'creation_date', 'collection_site', 'individual_name', 'sex', 'taxon', 'cohort', 'pedigree', 'father_name', 'mother_name', 'quality_flag', 'quantity_flag', 'projects_names', 'depleted', 'comment') def get_location_barcode(self, obj): if obj.container and obj.container.location is None: return '' else: return obj.container.location.barcode def get_current_volume(self, obj): return obj.volume if obj.volume else None def get_father_name(self, obj): father = '' if not obj.individual or obj.individual.father is None else obj.individual.father.name return father def get_mother_name(self, obj): mother = '' if not obj.individual or obj.individual.mother is None else obj.individual.mother.name return mother def get_projects_names(self, obj): return (', '.join(obj.projects_names)) def get_quality_flag(self, obj): if obj.quality_flag is None: return None else: return "Passed" if obj.quality_flag else "Failed" def get_quantity_flag(self, obj): if obj.quantity_flag is None: return None else: return "Passed" if obj.quantity_flag else "Failed" def get_depleted(self, obj): return "Yes" if obj.depleted else "No" class SampleSerializer(serializers.ModelSerializer): extracted_from = serializers.SerializerMethodField() process_measurements = serializers.PrimaryKeyRelatedField(source='process_measurement', many=True, read_only=True) projects = serializers.PrimaryKeyRelatedField(read_only=True, many=True) class Meta: model = Sample fields = "__all__" extra_fields = ('extracted_from', 'projects') def get_extracted_from(self, obj): return obj.extracted_from and obj.extracted_from.id class SampleExportSerializer(serializers.ModelSerializer): sample_id = serializers.IntegerField(read_only=True, source="id") sample_name = serializers.CharField(source="name") container_kind = serializers.CharField(read_only=True, source="container.kind") container_name = serializers.CharField(read_only=True, source="container.name") container_barcode = serializers.CharField(read_only=True, source="container.barcode") location_coord = serializers.CharField(read_only=True, source="container.coordinates") location_barcode = serializers.SerializerMethodField() current_volume = serializers.SerializerMethodField() projects = serializers.PrimaryKeyRelatedField(read_only=True, many=True) class Meta: model = Sample fields = ('sample_id', 'sample_name', 'container_kind', 'container_name', 'container_barcode', 'location_barcode', 'location_coord', 'current_volume', 'concentration', 'creation_date', 'depleted', 'coordinates', 'projects', 'comment' ) def get_location_barcode(self, obj): if obj.container.location is None: return '' else: return obj.container.location.barcode def get_current_volume(self, obj): return obj.volume class FullNestedSampleSerializer(serializers.ModelSerializer): # Serialize foreign keys' objects; don't allow posting new objects (rather accept foreign keys) individual = IndividualSerializer(read_only=True) container = SimpleContainerSerializer(read_only=True) class Meta: model = FullSample fields = "__all__" class VersionSerializer(serializers.ModelSerializer): class Meta: model = Version fields = "__all__" depth = 1 class RevisionSerializer(serializers.ModelSerializer): class Meta: model = Revision fields = "__all__" class UserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ("id", "username", "password", "first_name", "last_name", "email", "groups", "is_staff", "is_superuser", "date_joined") extra_kwargs = { "password": {"<PASSWORD>": True} } def create(self, validated_data): user = super(UserSerializer, self).create(validated_data) user.set_password(validated_data['password']) user.save() return user class GroupSerializer(serializers.ModelSerializer): class Meta: model = Group fields = ("id", "name", "permissions") depth = 1 class ProjectSerializer(serializers.ModelSerializer): class Meta: model = Project exclude = ("samples",) class ProjectExportSerializer(serializers.ModelSerializer): class Meta: model = Project fields = ("id", "name", "principal_investigator", "requestor_name", "requestor_email", "status", "targeted_end_date", "comment")
from django.contrib.auth.models import User, Group from django.contrib.contenttypes.models import ContentType from rest_framework import serializers from reversion.models import Version, Revision from .models import ( Container, ExperimentRun, RunType, Individual, Instrument, PropertyValue, Protocol, Process, ProcessMeasurement, Sample, SampleKind, FullSample, Project, ) __all__ = [ "ContainerSerializer", "ContainerExportSerializer", "ExperimentRunSerializer", "ExperimentRunExportSerializer", "RunTypeSerializer", "SimpleContainerSerializer", "IndividualSerializer", "InstrumentSerializer", "SampleKindSerializer", "PropertyValueSerializer", "ProcessSerializer", "ProcessMeasurementSerializer", "ProcessMeasurementExportSerializer", "ProtocolSerializer", "SampleSerializer", "SampleExportSerializer", "FullSampleSerializer", "FullSampleExportSerializer", "FullNestedSampleSerializer", "VersionSerializer", "RevisionSerializer", "UserSerializer", "GroupSerializer", "ProjectSerializer", "ProjectExportSerializer", ] class ContainerSerializer(serializers.ModelSerializer): children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) samples = serializers.PrimaryKeyRelatedField(many=True, read_only=True) experiment_run = serializers.PrimaryKeyRelatedField(many=False, read_only=True) class Meta: model = Container fields = "__all__" extra_fields = ('experiment_run') class SimpleContainerSerializer(serializers.ModelSerializer): class Meta: model = Container fields = "__all__" class ContainerExportSerializer(serializers.ModelSerializer): location = serializers.SlugRelatedField(slug_field='barcode', read_only=True) container_kind = serializers.CharField(source='kind') class Meta: model = Container fields = ('name', 'container_kind', 'barcode', 'location', 'coordinates', 'comment') class ExperimentRunSerializer(serializers.ModelSerializer): children_processes = serializers.SerializerMethodField() instrument_type = serializers.SerializerMethodField() platform = serializers.SerializerMethodField() class Meta: model = ExperimentRun fields = "__all__" extra_fields = ('children_processes', 'instrument_type', 'platform') def get_children_processes(self, obj): return Process.objects.filter(parent_process=obj.process).values_list('id', flat=True) def get_instrument_type(self, obj): return obj.instrument.type.type def get_platform(self, obj): return obj.instrument.type.platform.name class ExperimentRunExportSerializer(serializers.ModelSerializer): experiment_run_id = serializers.IntegerField(read_only=True, source="id") experiment_run_name = serializers.CharField(read_only=True, source="name") run_type = serializers.CharField(read_only=True, source="run_type.name") instrument = serializers.CharField(read_only=True, source="instrument.name") container_kind = serializers.CharField(read_only=True, source="container.kind") container_name = serializers.CharField(read_only=True, source="container.name") container_barcode = serializers.CharField(read_only=True, source="container.barcode") class Meta: model = ExperimentRun fields = ('experiment_run_id', 'experiment_run_name', 'run_type', 'instrument', 'container_kind', 'container_name', 'container_barcode', 'start_date') class RunTypeSerializer(serializers.ModelSerializer): class Meta: model = RunType fields = "__all__" class IndividualSerializer(serializers.ModelSerializer): class Meta: model = Individual fields = "__all__" class InstrumentSerializer(serializers.ModelSerializer): class Meta: model = Instrument fields = "__all__" class SampleKindSerializer(serializers.ModelSerializer): class Meta: model = SampleKind fields = "__all__" class ProtocolSerializer(serializers.ModelSerializer): class Meta: model = Protocol fields = "__all__" class ProcessSerializer(serializers.ModelSerializer): children_properties = serializers.SerializerMethodField() class Meta: model = Process fields = "__all__" extra_fields = ('children_processes') def get_children_properties(self, obj): process_content_type = ContentType.objects.get_for_model(Process) return PropertyValue.objects.filter(object_id=obj.id, content_type=process_content_type).values_list('id', flat=True) class ProcessMeasurementSerializer(serializers.ModelSerializer): protocol = serializers.IntegerField(read_only=True, source="process.protocol.id") child_sample = serializers.IntegerField(read_only=True) properties = serializers.SerializerMethodField() class Meta: model = ProcessMeasurement fields = "__all__" extra_fields = ('protocol', 'child_sample') def get_properties(self, obj): pm_content_type = ContentType.objects.get_for_model(ProcessMeasurement) return PropertyValue.objects.filter(object_id=obj.id, content_type=pm_content_type).values_list('id', flat=True) class ProcessMeasurementExportSerializer(serializers.ModelSerializer): process_measurement_id = serializers.IntegerField(read_only=True, source="id") protocol_name = serializers.CharField(read_only=True, source="process.protocol.name") child_sample_name = serializers.CharField(read_only=True) source_sample_name = serializers.CharField(read_only=True) class Meta: model = ProcessMeasurement fields = ('process_measurement_id', 'process_id', 'protocol_name', 'source_sample_name', 'child_sample_name', 'volume_used', 'execution_date', 'comment') class PropertyValueSerializer(serializers.ModelSerializer): property_name = serializers.CharField(read_only=True, source="property_type.name") class Meta: model = PropertyValue fields = "__all__" extra_fields = ('property_name') class FullSampleSerializer(serializers.ModelSerializer): extracted_from = serializers.SerializerMethodField() class Meta: model = FullSample fields = "__all__" extra_fields = ('extracted_from') def get_extracted_from(self, obj): return obj.extracted_from and obj.extracted_from.id class FullSampleExportSerializer(serializers.ModelSerializer): individual_name = serializers.CharField(read_only=True, source="individual.name") taxon = serializers.CharField(read_only=True, source="individual.taxon") sex = serializers.CharField(read_only=True, source="individual.sex") pedigree = serializers.CharField(read_only=True, source="individual.pedigree") cohort = serializers.CharField(read_only=True, source="individual.cohort") mother_name = serializers.SerializerMethodField() father_name = serializers.SerializerMethodField() sample_kind = serializers.CharField(read_only=True, source="sample_kind.name") container_kind = serializers.CharField(read_only=True, source="container.kind") container_name = serializers.CharField(read_only=True, source="container.name") container_barcode = serializers.CharField(read_only=True, source="container.barcode") location_coord = serializers.CharField(read_only=True, source="container.coordinates") location_barcode = serializers.SerializerMethodField() current_volume = serializers.SerializerMethodField() projects_names = serializers.SerializerMethodField() quantity_flag = serializers.SerializerMethodField() quality_flag = serializers.SerializerMethodField() depleted = serializers.SerializerMethodField() class Meta: model = FullSample fields = ('id', 'biosample', 'name', 'alias', 'sample_kind', 'tissue_source', 'container', 'container_kind', 'container_name', 'container_barcode', 'coordinates', 'location_barcode', 'location_coord', 'current_volume', 'concentration', 'creation_date', 'collection_site', 'individual_name', 'sex', 'taxon', 'cohort', 'pedigree', 'father_name', 'mother_name', 'quality_flag', 'quantity_flag', 'projects_names', 'depleted', 'comment') def get_location_barcode(self, obj): if obj.container and obj.container.location is None: return '' else: return obj.container.location.barcode def get_current_volume(self, obj): return obj.volume if obj.volume else None def get_father_name(self, obj): father = '' if not obj.individual or obj.individual.father is None else obj.individual.father.name return father def get_mother_name(self, obj): mother = '' if not obj.individual or obj.individual.mother is None else obj.individual.mother.name return mother def get_projects_names(self, obj): return (', '.join(obj.projects_names)) def get_quality_flag(self, obj): if obj.quality_flag is None: return None else: return "Passed" if obj.quality_flag else "Failed" def get_quantity_flag(self, obj): if obj.quantity_flag is None: return None else: return "Passed" if obj.quantity_flag else "Failed" def get_depleted(self, obj): return "Yes" if obj.depleted else "No" class SampleSerializer(serializers.ModelSerializer): extracted_from = serializers.SerializerMethodField() process_measurements = serializers.PrimaryKeyRelatedField(source='process_measurement', many=True, read_only=True) projects = serializers.PrimaryKeyRelatedField(read_only=True, many=True) class Meta: model = Sample fields = "__all__" extra_fields = ('extracted_from', 'projects') def get_extracted_from(self, obj): return obj.extracted_from and obj.extracted_from.id class SampleExportSerializer(serializers.ModelSerializer): sample_id = serializers.IntegerField(read_only=True, source="id") sample_name = serializers.CharField(source="name") container_kind = serializers.CharField(read_only=True, source="container.kind") container_name = serializers.CharField(read_only=True, source="container.name") container_barcode = serializers.CharField(read_only=True, source="container.barcode") location_coord = serializers.CharField(read_only=True, source="container.coordinates") location_barcode = serializers.SerializerMethodField() current_volume = serializers.SerializerMethodField() projects = serializers.PrimaryKeyRelatedField(read_only=True, many=True) class Meta: model = Sample fields = ('sample_id', 'sample_name', 'container_kind', 'container_name', 'container_barcode', 'location_barcode', 'location_coord', 'current_volume', 'concentration', 'creation_date', 'depleted', 'coordinates', 'projects', 'comment' ) def get_location_barcode(self, obj): if obj.container.location is None: return '' else: return obj.container.location.barcode def get_current_volume(self, obj): return obj.volume class FullNestedSampleSerializer(serializers.ModelSerializer): # Serialize foreign keys' objects; don't allow posting new objects (rather accept foreign keys) individual = IndividualSerializer(read_only=True) container = SimpleContainerSerializer(read_only=True) class Meta: model = FullSample fields = "__all__" class VersionSerializer(serializers.ModelSerializer): class Meta: model = Version fields = "__all__" depth = 1 class RevisionSerializer(serializers.ModelSerializer): class Meta: model = Revision fields = "__all__" class UserSerializer(serializers.ModelSerializer): class Meta: model = User fields = ("id", "username", "password", "first_name", "last_name", "email", "groups", "is_staff", "is_superuser", "date_joined") extra_kwargs = { "password": {"<PASSWORD>": True} } def create(self, validated_data): user = super(UserSerializer, self).create(validated_data) user.set_password(validated_data['password']) user.save() return user class GroupSerializer(serializers.ModelSerializer): class Meta: model = Group fields = ("id", "name", "permissions") depth = 1 class ProjectSerializer(serializers.ModelSerializer): class Meta: model = Project exclude = ("samples",) class ProjectExportSerializer(serializers.ModelSerializer): class Meta: model = Project fields = ("id", "name", "principal_investigator", "requestor_name", "requestor_email", "status", "targeted_end_date", "comment")
en
0.911848
# Serialize foreign keys' objects; don't allow posting new objects (rather accept foreign keys)
1.996564
2
gym_solo/core/obs.py
WPI-MMR/solo-gym
9
6628746
from abc import ABC, abstractmethod from pybullet_utils import bullet_client from typing import List, Tuple import pybullet as p import numpy as np import math import gym from gym import spaces from gym_solo import solo_types class Observation(ABC): """An observation for a body in the pybullet simulation. Attributes: _client: The PyBullet client for the instance. Will be set via a property setter. """ _client: bullet_client.BulletClient = None @abstractmethod def __init__(self, body_id: int): """Create a new Observation. Note that for every child of this class, each one *needs* to specify which pybullet body id they would like to track the observation for. Args: body_id (int): PyBullet body id to get the observation for. """ pass @property @abstractmethod def observation_space(self) -> spaces.Space: """Get the observation space of the Observation. Returns: spaces.Space: The observation space. """ pass @property @abstractmethod def labels(self) -> List[str]: """A list of labels corresponding to the observation. i.e. if the observation was [1, 2, 3], and the labels were ['a', 'b', 'c'], then a = 1, b = 2, c = 3. Returns: List[str]: Labels, where the index is the same as its respective observation. """ pass @abstractmethod def compute(self) -> solo_types.obs: """Compute the observation for the current state. Returns: solo_types.obs: Specified observation for the current state. """ pass @property def client(self) -> bullet_client.BulletClient: """Get the Observation's physics client. Raises: ValueError: If the PyBullet client hasn't been set yet. Returns: bullet_client.BulletClient: The active client for the observation. """ if not self._client: raise ValueError('PyBullet client needs to be set') return self._client @client.setter def client(self, client: bullet_client.BulletClient): """Set the Observation's physics client. Args: client (bullet_client.BulletClient): The client to use for the observation. """ self._client = client class ObservationFactory: def __init__(self, client: bullet_client.BulletClient, normalize: bool = False): """Create a new Observation Factory. Args: client (bullet_client.BulletClient): Pybullet client to perform calculations. """ self._client = client self._observations = [] self._obs_space = None self._normalize = normalize def register_observation(self, obs: Observation): """Add an observation to be computed. Args: obs (Observation): Observation to be tracked. """ obs.client = self._client lbl_len = len(obs.labels) obs_space_len = len(obs.observation_space.low) obs_len = obs.compute().size if lbl_len != obs_space_len: raise ValueError('Labels have length {} != obs space len {}'.format( lbl_len, obs_space_len)) if lbl_len != obs_len: raise ValueError('Labels have length {} != obs len {}'.format( lbl_len, obs_len)) self._observations.append(obs) def get_obs(self) -> Tuple[solo_types.obs, List[str]]: """Get all of the observations for the current state. Returns: Tuple[solo_types.obs, List[str]]: The observations and associated labels. len(observations) == len(labels) and labels[i] corresponds to the i-th observation. """ if not self._observations: raise ValueError('Need to register at least one observation instance') all_obs = [] all_labels = [] for obs in self._observations: all_labels.append(obs.labels) values = obs.compute() if self._normalize: a = np.array(values) low = obs.observation_space.low hi = obs.observation_space.high values = ((2 * (a - low)) / (hi - low)) - 1 all_obs.append(values) observations = np.concatenate(all_obs) labels = [l for lbl in all_labels for l in lbl] return observations, labels def get_observation_space(self, generate=False) -> spaces.Box: """Get the combined observation space of all of the registered observations. Args: generate (bool, optional): Whether or not to regenerate the observation space or just used the cached ersion. Note that some Observations might dynamically generate their observation space, so this could be a potentially expensive operation. Defaults to False. Raises: ValueError: If no observations are registered. Returns: spaces.Box: The observation space of the registered Observations. """ if not self._observations: raise ValueError('Can\'t generate an empty observation space') if self._obs_space and not generate: return self._obs_space lower, upper = [], [] for obs in self._observations: lower.extend(obs.observation_space.low) upper.extend(obs.observation_space.high) if self._normalize: self._obs_space = spaces.Box(low=-1, high=1, shape=(len(lower),)) else: self._obs_space = spaces.Box(low=np.array(lower), high=np.array(upper)) return self._obs_space class TorsoIMU(Observation): """Get the orientation and velocities of the Solo 8 torso. Attributes: labels (List[str]): The labels associated with the outputted observation robot (int): PyBullet BodyId for the robot. """ # TODO: Add angular acceleration to support production IMUs labels: List[str] = ['θx', 'θy', 'θz', 'vx', 'vy', 'vz', 'wx', 'wy', 'wz'] def __init__(self, body_id: int, degrees: bool = False, max_lin_velocity: float = 15, max_angular_velocity: float = 10.): """Create a new TorsoIMU observation Args: body_id (int): The PyBullet body id for the robot. degrees (bool, optional): Whether or not to return angles in degrees. Defaults to False. max_lin_velocity (float, optional): The maximum linear velocity read by TorsoIMU. Any torso reading made past this will just get clamped. The unit is arbritary, but it is recommended that this is experimentally found. Defaults to 10. max_ang_velocity (float, optional): The maximum angular velocity read by TorsoIMU. Any torso reading made past this will just get clamped. The unit is arbritary, but it is recommended that this is experimentally found. Units are in rad/s unless degrees == True. Deaults to 10. """ self.robot = body_id self._degrees = degrees self._max_lin = max_lin_velocity self._max_ang = max_angular_velocity self._low = None self._high = None self.observation_space # Populate the bounds incase it dosn't get called @property def observation_space(self) -> spaces.Box: """Get the observation space for the IMU mounted on the Torso. The IMU in this case return the orientation of the torso (x, y, z angles), the linear velocity (vx, vy, vz), and the angular velocity (wx, wy, wz). Note that the range for the angle measurements is [-180, 180] degrees. This value can be toggled between degrees and radians at instantiation. Returns: spaces.Box: The observation space corresponding to (θx, θy, θz, vx, vy, vz, wx, wy, wz) """ angle_min = -180. if self._degrees else -np.pi angle_max = 180. if self._degrees else np.pi lower = [angle_min, angle_min, angle_min, # Orientation -self._max_lin, -self._max_lin, -self._max_lin, # Linear Velocity -self._max_ang, -self._max_ang, -self._max_ang] # Angular Velocity upper = [angle_max, angle_max, angle_max, # Same as above self._max_lin, self._max_lin, self._max_lin, self._max_ang, self._max_ang, self._max_ang] if not (self._low and self._high): self._low = lower self._high = upper return spaces.Box(low=np.array(lower), high=np.array(upper)) def compute(self) -> solo_types.obs: """Compute the torso IMU values for a state. Returns: solo_types.obs: The observation for the current state (accessed via pybullet). Note the values are bounded by self.observation_space even if the true value is greater than that (i.e. the observation is clipped) """ _, orien_quat = self.client.getBasePositionAndOrientation(self.robot) # Orien is in (x, y, z) orien = np.array(self.client.getEulerFromQuaternion(orien_quat)) v_lin, v_ang = self.client.getBaseVelocity(self.robot) v_lin = np.array(v_lin) v_ang = np.array(v_ang) if self._degrees: orien = np.degrees(orien) v_ang = np.degrees(v_ang) raw_values = np.concatenate([orien, v_lin, v_ang]) return np.clip(raw_values, self._low, self._high) class MotorEncoder(Observation): """Get the position of the all the joints """ def __init__(self, body_id: int, degrees: bool = False, max_rotation: float = None): """Create a new MotorEncoder observation Args: body_id (int): The PyBullet body id for the robot. degrees (bool, optional): Whether or not to return angles in degrees. Defaults to False. max_rotation (float, optional): Artificially limit the range of the motor encoders. Note then that the motor encoder observation space then becomes (low=[-max_rotation] * joints, high=[max_rotation] * joints). Defaults to the max values as per the URDF. """ self.robot = body_id self._degrees = degrees self._max_rot = max_rotation @property def _num_joints(self): return self.client.getNumJoints(self.robot) @property def observation_space(self) -> spaces.Space: """Gets the observation space for the joints Returns:: spaces.Space: The observation space corresponding to the labels """ if self._max_rot: return spaces.Box(low=-self._max_rot, high=self._max_rot, shape=(self._num_joints, )) lower, upper = [], [] for joint in range(self._num_joints): joint_info = self.client.getJointInfo(self.robot, joint) lower.append(joint_info[8]) upper.append(joint_info[9]) lower = np.array(lower) upper = np.array(upper) if self._degrees: lower = np.degrees(lower) upper = np.degrees(upper) return spaces.Box(low=lower, high=upper) @property def labels(self) -> List[str]: """A list of labels corresponding to the observation. Returns: List[str]: Labels, where the index is the same as its respective observation. """ return [self.client.getJointInfo(self.robot, joint)[1].decode('UTF-8') for joint in range(self._num_joints)] def compute(self) -> solo_types.obs: """Computes the motor position values all the joints of the robot for the current state. Clipped to the max allowed motor rotations, if set. Returns: solo_types.obs: The observation extracted from pybullet """ joint_values = np.array([self.client.getJointState(self.robot, i)[0] for i in range(self._num_joints)]) if self._degrees: joint_values = np.degrees(joint_values) if self._max_rot: joint_values = np.clip(joint_values, -self._max_rot, self._max_rot) return joint_values
from abc import ABC, abstractmethod from pybullet_utils import bullet_client from typing import List, Tuple import pybullet as p import numpy as np import math import gym from gym import spaces from gym_solo import solo_types class Observation(ABC): """An observation for a body in the pybullet simulation. Attributes: _client: The PyBullet client for the instance. Will be set via a property setter. """ _client: bullet_client.BulletClient = None @abstractmethod def __init__(self, body_id: int): """Create a new Observation. Note that for every child of this class, each one *needs* to specify which pybullet body id they would like to track the observation for. Args: body_id (int): PyBullet body id to get the observation for. """ pass @property @abstractmethod def observation_space(self) -> spaces.Space: """Get the observation space of the Observation. Returns: spaces.Space: The observation space. """ pass @property @abstractmethod def labels(self) -> List[str]: """A list of labels corresponding to the observation. i.e. if the observation was [1, 2, 3], and the labels were ['a', 'b', 'c'], then a = 1, b = 2, c = 3. Returns: List[str]: Labels, where the index is the same as its respective observation. """ pass @abstractmethod def compute(self) -> solo_types.obs: """Compute the observation for the current state. Returns: solo_types.obs: Specified observation for the current state. """ pass @property def client(self) -> bullet_client.BulletClient: """Get the Observation's physics client. Raises: ValueError: If the PyBullet client hasn't been set yet. Returns: bullet_client.BulletClient: The active client for the observation. """ if not self._client: raise ValueError('PyBullet client needs to be set') return self._client @client.setter def client(self, client: bullet_client.BulletClient): """Set the Observation's physics client. Args: client (bullet_client.BulletClient): The client to use for the observation. """ self._client = client class ObservationFactory: def __init__(self, client: bullet_client.BulletClient, normalize: bool = False): """Create a new Observation Factory. Args: client (bullet_client.BulletClient): Pybullet client to perform calculations. """ self._client = client self._observations = [] self._obs_space = None self._normalize = normalize def register_observation(self, obs: Observation): """Add an observation to be computed. Args: obs (Observation): Observation to be tracked. """ obs.client = self._client lbl_len = len(obs.labels) obs_space_len = len(obs.observation_space.low) obs_len = obs.compute().size if lbl_len != obs_space_len: raise ValueError('Labels have length {} != obs space len {}'.format( lbl_len, obs_space_len)) if lbl_len != obs_len: raise ValueError('Labels have length {} != obs len {}'.format( lbl_len, obs_len)) self._observations.append(obs) def get_obs(self) -> Tuple[solo_types.obs, List[str]]: """Get all of the observations for the current state. Returns: Tuple[solo_types.obs, List[str]]: The observations and associated labels. len(observations) == len(labels) and labels[i] corresponds to the i-th observation. """ if not self._observations: raise ValueError('Need to register at least one observation instance') all_obs = [] all_labels = [] for obs in self._observations: all_labels.append(obs.labels) values = obs.compute() if self._normalize: a = np.array(values) low = obs.observation_space.low hi = obs.observation_space.high values = ((2 * (a - low)) / (hi - low)) - 1 all_obs.append(values) observations = np.concatenate(all_obs) labels = [l for lbl in all_labels for l in lbl] return observations, labels def get_observation_space(self, generate=False) -> spaces.Box: """Get the combined observation space of all of the registered observations. Args: generate (bool, optional): Whether or not to regenerate the observation space or just used the cached ersion. Note that some Observations might dynamically generate their observation space, so this could be a potentially expensive operation. Defaults to False. Raises: ValueError: If no observations are registered. Returns: spaces.Box: The observation space of the registered Observations. """ if not self._observations: raise ValueError('Can\'t generate an empty observation space') if self._obs_space and not generate: return self._obs_space lower, upper = [], [] for obs in self._observations: lower.extend(obs.observation_space.low) upper.extend(obs.observation_space.high) if self._normalize: self._obs_space = spaces.Box(low=-1, high=1, shape=(len(lower),)) else: self._obs_space = spaces.Box(low=np.array(lower), high=np.array(upper)) return self._obs_space class TorsoIMU(Observation): """Get the orientation and velocities of the Solo 8 torso. Attributes: labels (List[str]): The labels associated with the outputted observation robot (int): PyBullet BodyId for the robot. """ # TODO: Add angular acceleration to support production IMUs labels: List[str] = ['θx', 'θy', 'θz', 'vx', 'vy', 'vz', 'wx', 'wy', 'wz'] def __init__(self, body_id: int, degrees: bool = False, max_lin_velocity: float = 15, max_angular_velocity: float = 10.): """Create a new TorsoIMU observation Args: body_id (int): The PyBullet body id for the robot. degrees (bool, optional): Whether or not to return angles in degrees. Defaults to False. max_lin_velocity (float, optional): The maximum linear velocity read by TorsoIMU. Any torso reading made past this will just get clamped. The unit is arbritary, but it is recommended that this is experimentally found. Defaults to 10. max_ang_velocity (float, optional): The maximum angular velocity read by TorsoIMU. Any torso reading made past this will just get clamped. The unit is arbritary, but it is recommended that this is experimentally found. Units are in rad/s unless degrees == True. Deaults to 10. """ self.robot = body_id self._degrees = degrees self._max_lin = max_lin_velocity self._max_ang = max_angular_velocity self._low = None self._high = None self.observation_space # Populate the bounds incase it dosn't get called @property def observation_space(self) -> spaces.Box: """Get the observation space for the IMU mounted on the Torso. The IMU in this case return the orientation of the torso (x, y, z angles), the linear velocity (vx, vy, vz), and the angular velocity (wx, wy, wz). Note that the range for the angle measurements is [-180, 180] degrees. This value can be toggled between degrees and radians at instantiation. Returns: spaces.Box: The observation space corresponding to (θx, θy, θz, vx, vy, vz, wx, wy, wz) """ angle_min = -180. if self._degrees else -np.pi angle_max = 180. if self._degrees else np.pi lower = [angle_min, angle_min, angle_min, # Orientation -self._max_lin, -self._max_lin, -self._max_lin, # Linear Velocity -self._max_ang, -self._max_ang, -self._max_ang] # Angular Velocity upper = [angle_max, angle_max, angle_max, # Same as above self._max_lin, self._max_lin, self._max_lin, self._max_ang, self._max_ang, self._max_ang] if not (self._low and self._high): self._low = lower self._high = upper return spaces.Box(low=np.array(lower), high=np.array(upper)) def compute(self) -> solo_types.obs: """Compute the torso IMU values for a state. Returns: solo_types.obs: The observation for the current state (accessed via pybullet). Note the values are bounded by self.observation_space even if the true value is greater than that (i.e. the observation is clipped) """ _, orien_quat = self.client.getBasePositionAndOrientation(self.robot) # Orien is in (x, y, z) orien = np.array(self.client.getEulerFromQuaternion(orien_quat)) v_lin, v_ang = self.client.getBaseVelocity(self.robot) v_lin = np.array(v_lin) v_ang = np.array(v_ang) if self._degrees: orien = np.degrees(orien) v_ang = np.degrees(v_ang) raw_values = np.concatenate([orien, v_lin, v_ang]) return np.clip(raw_values, self._low, self._high) class MotorEncoder(Observation): """Get the position of the all the joints """ def __init__(self, body_id: int, degrees: bool = False, max_rotation: float = None): """Create a new MotorEncoder observation Args: body_id (int): The PyBullet body id for the robot. degrees (bool, optional): Whether or not to return angles in degrees. Defaults to False. max_rotation (float, optional): Artificially limit the range of the motor encoders. Note then that the motor encoder observation space then becomes (low=[-max_rotation] * joints, high=[max_rotation] * joints). Defaults to the max values as per the URDF. """ self.robot = body_id self._degrees = degrees self._max_rot = max_rotation @property def _num_joints(self): return self.client.getNumJoints(self.robot) @property def observation_space(self) -> spaces.Space: """Gets the observation space for the joints Returns:: spaces.Space: The observation space corresponding to the labels """ if self._max_rot: return spaces.Box(low=-self._max_rot, high=self._max_rot, shape=(self._num_joints, )) lower, upper = [], [] for joint in range(self._num_joints): joint_info = self.client.getJointInfo(self.robot, joint) lower.append(joint_info[8]) upper.append(joint_info[9]) lower = np.array(lower) upper = np.array(upper) if self._degrees: lower = np.degrees(lower) upper = np.degrees(upper) return spaces.Box(low=lower, high=upper) @property def labels(self) -> List[str]: """A list of labels corresponding to the observation. Returns: List[str]: Labels, where the index is the same as its respective observation. """ return [self.client.getJointInfo(self.robot, joint)[1].decode('UTF-8') for joint in range(self._num_joints)] def compute(self) -> solo_types.obs: """Computes the motor position values all the joints of the robot for the current state. Clipped to the max allowed motor rotations, if set. Returns: solo_types.obs: The observation extracted from pybullet """ joint_values = np.array([self.client.getJointState(self.robot, i)[0] for i in range(self._num_joints)]) if self._degrees: joint_values = np.degrees(joint_values) if self._max_rot: joint_values = np.clip(joint_values, -self._max_rot, self._max_rot) return joint_values
en
0.779138
An observation for a body in the pybullet simulation. Attributes: _client: The PyBullet client for the instance. Will be set via a property setter. Create a new Observation. Note that for every child of this class, each one *needs* to specify which pybullet body id they would like to track the observation for. Args: body_id (int): PyBullet body id to get the observation for. Get the observation space of the Observation. Returns: spaces.Space: The observation space. A list of labels corresponding to the observation. i.e. if the observation was [1, 2, 3], and the labels were ['a', 'b', 'c'], then a = 1, b = 2, c = 3. Returns: List[str]: Labels, where the index is the same as its respective observation. Compute the observation for the current state. Returns: solo_types.obs: Specified observation for the current state. Get the Observation's physics client. Raises: ValueError: If the PyBullet client hasn't been set yet. Returns: bullet_client.BulletClient: The active client for the observation. Set the Observation's physics client. Args: client (bullet_client.BulletClient): The client to use for the observation. Create a new Observation Factory. Args: client (bullet_client.BulletClient): Pybullet client to perform calculations. Add an observation to be computed. Args: obs (Observation): Observation to be tracked. Get all of the observations for the current state. Returns: Tuple[solo_types.obs, List[str]]: The observations and associated labels. len(observations) == len(labels) and labels[i] corresponds to the i-th observation. Get the combined observation space of all of the registered observations. Args: generate (bool, optional): Whether or not to regenerate the observation space or just used the cached ersion. Note that some Observations might dynamically generate their observation space, so this could be a potentially expensive operation. Defaults to False. Raises: ValueError: If no observations are registered. Returns: spaces.Box: The observation space of the registered Observations. Get the orientation and velocities of the Solo 8 torso. Attributes: labels (List[str]): The labels associated with the outputted observation robot (int): PyBullet BodyId for the robot. # TODO: Add angular acceleration to support production IMUs Create a new TorsoIMU observation Args: body_id (int): The PyBullet body id for the robot. degrees (bool, optional): Whether or not to return angles in degrees. Defaults to False. max_lin_velocity (float, optional): The maximum linear velocity read by TorsoIMU. Any torso reading made past this will just get clamped. The unit is arbritary, but it is recommended that this is experimentally found. Defaults to 10. max_ang_velocity (float, optional): The maximum angular velocity read by TorsoIMU. Any torso reading made past this will just get clamped. The unit is arbritary, but it is recommended that this is experimentally found. Units are in rad/s unless degrees == True. Deaults to 10. # Populate the bounds incase it dosn't get called Get the observation space for the IMU mounted on the Torso. The IMU in this case return the orientation of the torso (x, y, z angles), the linear velocity (vx, vy, vz), and the angular velocity (wx, wy, wz). Note that the range for the angle measurements is [-180, 180] degrees. This value can be toggled between degrees and radians at instantiation. Returns: spaces.Box: The observation space corresponding to (θx, θy, θz, vx, vy, vz, wx, wy, wz) # Orientation # Linear Velocity # Angular Velocity # Same as above Compute the torso IMU values for a state. Returns: solo_types.obs: The observation for the current state (accessed via pybullet). Note the values are bounded by self.observation_space even if the true value is greater than that (i.e. the observation is clipped) # Orien is in (x, y, z) Get the position of the all the joints Create a new MotorEncoder observation Args: body_id (int): The PyBullet body id for the robot. degrees (bool, optional): Whether or not to return angles in degrees. Defaults to False. max_rotation (float, optional): Artificially limit the range of the motor encoders. Note then that the motor encoder observation space then becomes (low=[-max_rotation] * joints, high=[max_rotation] * joints). Defaults to the max values as per the URDF. Gets the observation space for the joints Returns:: spaces.Space: The observation space corresponding to the labels A list of labels corresponding to the observation. Returns: List[str]: Labels, where the index is the same as its respective observation. Computes the motor position values all the joints of the robot for the current state. Clipped to the max allowed motor rotations, if set. Returns: solo_types.obs: The observation extracted from pybullet
3.079138
3
scripts/perturbate_dataset.py
airbert-vln/bnb-dataset
7
6628747
<filename>scripts/perturbate_dataset.py<gh_stars>1-10 from typing import List, Dict, Callable, Optional, Tuple, Iterator, Any, Set from typing_extensions import Literal from itertools import combinations, product import os import re import copy from collections import OrderedDict, defaultdict import string import functools import random from dataclasses import dataclass, field from pathlib import Path import json import argtyped from tqdm.auto import tqdm import networkx as nx from transformers import PreTrainedTokenizer, AutoTokenizer import numpy as np import torch from torch import nn import allennlp from allennlp.predictors.predictor import Predictor from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer as AllenTokenizer import allennlp_models.structured_prediction from allennlp_models.pretrained import load_predictor import nltk from nltk.corpus import stopwords from nltk.corpus import wordnet as wn from nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer import spacy from spacy.tokenizer import Tokenizer as SpacyTokenizer from spacy.tokens import Token from utils.dataset.pano_features_reader import PanoFeaturesReader from scripts.highlight_sentence import POSHighlighter from vln_bert import VLNBert from vilbert.vilbert import BertConfig from utils.dataset.common import get_headings, load_nav_graphs random.seed(0) nlp = spacy.load("en") # nltk.download('stopwords') def read_dataset(infile: Path): with open(infile) as fid: return json.load(fid) def save_dataset(dataset: Any, outfile: Path): with open(outfile, "w") as fid: return json.dump(dataset, fid, indent=2) @dataclass class Sample: instr: str path: List[str] scan: str heading: float @dataclass class TokenPerturbation: text: str span: Tuple[int, int] mode: str = "NONE" cand: List[str] = field(default_factory=list) def fill_with_none( sentence: str, tokens: List[TokenPerturbation] ) -> List[TokenPerturbation]: filled: List[TokenPerturbation] = [] cursor = 0 for token in tokens: start, end = token.span if start > cursor: filled.append( TokenPerturbation(sentence[cursor:start], (cursor, start - 1), "NONE") ) cursor = end + 1 filled.append(token) if cursor < len(sentence): filled.append( TokenPerturbation(sentence[cursor:], (cursor, len(sentence) - 1), "NONE") ) return filled def random_order_cartesian_product(*factors): """ https://stackoverflow.com/a/53895551/4986615 """ amount = functools.reduce(lambda prod, factor: prod * len(list(factor)), factors, 1) index_linked_list = [None, None] for max_index in reversed(range(amount)): index = random.randint(0, max_index) index_link = index_linked_list while index_link[1] is not None and index_link[1][0] <= index: index += 1 index_link = index_link[1] index_link[1] = [index, index_link[1]] items = [] for factor in factors: items.append(factor[index % len(factor)]) index //= len(factor) yield items @dataclass class Perturbation: num_perturbations: int = 1 tokenizer: AllenTokenizer = AllenTokenizer() def segment(self, sentence: str) -> List[TokenPerturbation]: raise NotImplementedError() def __call__(self, sample: Sample) -> Iterator[str]: sentence = sample.instr # We need to add space between each word to avoid a mismatch tokens = self.tokenizer.tokenize(sentence.lower().rstrip()) fake_sentence = " ".join([str(token) for token in tokens]) segments = self.segment(fake_sentence) corruptable = [i for i, tok in enumerate(segments) if tok.mode != "NONE"] random.shuffle(corruptable) candidates = combinations(corruptable, self.num_perturbations) cand_tokens = [list(range(len(segment.cand))) for segment in segments] iterators = {} for candidate in candidates: tokens = [cand_tokens[i] for i in candidate] iterators[candidate] = random_order_cartesian_product(*tokens) while True: if not iterators: return candidate, it = random.choice(list(iterators.items())) try: indexes = next(it) except StopIteration: del iterators[candidate] continue words = [] j = 0 for i, segment in enumerate(segments): if i in candidate: words.append(segment.cand[indexes[j]]) j += 1 else: words.append(segment.text) yield "".join(words) @dataclass class MaskPerturbation(Perturbation): """ Candidates are replaced by [MASK] for being replaced later by BERT """ perturbator: Perturbation = field(default_factory=lambda: Perturbation()) def segment(self, sentence: str) -> List[TokenPerturbation]: segments = self.perturbator.segment(sentence) for segment in segments: if len(segment.cand) == 0: continue mask = re.sub(r"^(\W)*.*\w+(\W)*$", r"\1[MASK]\2", segment.cand[0]) segment.cand = [mask] return segments def lm_replacements(masked_sentence: str, predictor: Predictor) -> Iterator[str]: assert "[MASK]" in masked_sentence if not isinstance( predictor, allennlp_models.lm.predictors.masked_language_model.MaskedLanguageModelPredictor, ): raise NotImplementedError() convert_tokens_to_string = ( predictor._dataset_reader._tokenizer.tokenizer.convert_tokens_to_string ) predictions = predictor.predict(masked_sentence) tokens = predictions["tokens"] # import ipdb # ipdb.set_trace() for words in product(*predictions["words"]): cand_tokens = copy.deepcopy(tokens) for i, word in enumerate(words): cand_tokens[cand_tokens.index("[MASK]", i)] = word yield convert_tokens_to_string(cand_tokens[1:-1]) @dataclass class BertPerturbation(Perturbation): """ Candidates are replaced by the best BERT prediction for being replaced later by BERT """ perturbator: Perturbation = field(default_factory=lambda: Perturbation()) predictor: Predictor = field( default_factory=lambda: load_predictor("lm-masked-language-model") ) def __call__(self, sample: Sample) -> Iterator[str]: for masked_sentence in self.perturbator(sample): for fixed_sentence in lm_replacements(masked_sentence, self.predictor): yield fixed_sentence class Graphs: def __init__(self): self._graphs: Dict[str, nx.Graph] = {} def __getitem__(self, scan: str): if scan not in self._graphs: self._graphs[scan] = load_nav_graphs([scan])[scan] return self._graphs[scan] def is_punctuation(s: str): return s != "" and s.translate(str.maketrans("", "", string.punctuation + string.whitespace)) == "" def load_vilbert() -> nn.Module: config = BertConfig.from_json_file( "data/config/bert_base_6_layer_6_connect.json" ) model = VLNBert.from_pretrained("data/model_zoos/vlnbert.123mine.bin", config) model = model.cuda() model.eval() return model def synonyms(name: str): return set([l.name() for s in wn.synsets(name.strip()) for l in s.lemmas()]) def hypernyms(name: str): return set([l.name() for s in wn.synsets(name.strip()) for k in s.hypernyms() for l in k.lemmas()]) def same_meaning(a: str, b: str): return b in synonyms(a) or a in synonyms(b) or b in hypernyms(a) or a in hypernyms(b) @dataclass class VilbertPerturbation(Perturbation): """ Candidates are replaced by the best VilBERT prediction for being replaced later by BERT FIXME this perturbator should not be combined directly with any other perturbator """ perturbator: Perturbation = field(default_factory=lambda: Perturbation()) model: nn.Module = field(default_factory=load_vilbert) tokenizer: AutoTokenizer = field( default_factory=lambda: AutoTokenizer.from_pretrained("bert-base-uncased") ) features: PanoFeaturesReader = field( default_factory=lambda: PanoFeaturesReader( "data/matterport-ResNet-101-faster-rcnn-genome.lmdb" ) ) highlighter: POSHighlighter = field(default_factory=POSHighlighter) graphs: Graphs = field(default_factory=Graphs) def get_model_input(self, sample: Sample) -> Tuple[Optional[torch.Tensor], ...]: tokens = self.tokenizer.tokenize(sample.instr) instr_tokens = torch.tensor([self.tokenizer.convert_tokens_to_ids(tokens)]).cuda() segment_ids = torch.zeros_like(instr_tokens) instr_masks = instr_tokens > 0 # get path features features, boxes, probs, masks = self.get_path_features( sample.scan, sample.path, sample.heading ) # convert data into tensors image_features = torch.tensor([features]).float().cuda() image_locations = torch.tensor([boxes]).float().cuda() image_masks = torch.tensor([masks]).long().cuda() co_attention_mask = torch.zeros(2, 8 * 101, 60).long() return ( instr_tokens, image_features, image_locations, segment_ids, instr_masks, image_masks, co_attention_mask, None, ) def get_path_features(self, scan_id: str, path: List[str], first_heading: float): """ Get features for a given path. """ headings = get_headings(self.graphs[scan_id], path, first_heading) # for next headings duplicate the last next_headings = headings[1:] + [headings[-1]] path_length = min(len(path), 8) path_features, path_boxes, path_probs, path_masks = [], [], [], [] for path_idx, path_id in enumerate(path[:path_length]): key = scan_id + "-" + path_id # get image features features, boxes, probs = self.features[ key.encode(), headings[path_idx], next_headings[path_idx], ] num_boxes = min(len(boxes), 101) # pad features and boxes (if needed) pad_features = np.zeros((101, 2048)) pad_features[:num_boxes] = features[:num_boxes] pad_boxes = np.zeros((101, 12)) pad_boxes[:num_boxes, :11] = boxes[:num_boxes, :11] pad_boxes[:, 11] = np.ones(101) * path_idx pad_probs = np.zeros((101, 1601)) pad_probs[:num_boxes] = probs[:num_boxes] box_pad_length = 101 - num_boxes pad_masks = [1] * num_boxes + [0] * box_pad_length path_features.append(pad_features) path_boxes.append(pad_boxes) path_probs.append(pad_probs) path_masks.append(pad_masks) # pad path lists (if needed) for path_idx in range(path_length, 8): pad_features = np.zeros((101, 2048)) pad_boxes = np.zeros((101, 12)) pad_boxes[:, 11] = np.ones(101) * path_idx pad_probs = np.zeros((101, 1601)) pad_masks = [0] * 101 path_features.append(pad_features) path_boxes.append(pad_boxes) path_probs.append(pad_probs) path_masks.append(pad_masks) return ( np.vstack(path_features), np.vstack(path_boxes), np.vstack(path_probs), np.hstack(path_masks), ) @torch.no_grad() def vlm_replacement(self, orig_word: str, sample: Sample) -> str: mask_token = self.tokenizer.convert_tokens_to_ids("[MASK]") inputs = self.get_model_input(sample) output = self.model(*inputs) instr_token = inputs[0] if instr_token is None: raise RuntimeError() instr_token = instr_token[0] word_idx = (instr_token.cpu() == mask_token).int().argmax() linguistic_predictions = output[2].view(-1, output[2].shape[-1]) values, indices = torch.sort(linguistic_predictions[word_idx], descending=True) for index in indices.tolist(): token = self.tokenizer.convert_ids_to_tokens(index) if ( not token in stopwords.words() and not is_punctuation(token) and same_meaning(token, orig_word.strip()) ): instr_token[word_idx] = index break tokens = self.tokenizer.convert_ids_to_tokens(instr_token) return self.tokenizer.convert_tokens_to_string(tokens) def mask(self, sentence: str) -> Iterator[Tuple[str, str]]: words_of_interest = self.highlighter(sentence) cursor = 0 for word in words_of_interest: position = sentence[cursor:].find(word) if position == -1: continue masked_sentence = sentence[:cursor] + sentence[cursor:].replace( word, "[MASK]", 1 ) cursor += position yield (word, masked_sentence) def __call__(self, sample: Sample) -> Iterator[str]: for orig_word, masked_sentence in self.mask(sample.instr): masked_sample = copy.deepcopy(sample) masked_sample.instr = masked_sentence fixed_sentence = self.vlm_replacement(orig_word, masked_sample) yield fixed_sentence @dataclass class RemainPerturbation(Perturbation): perturbator: Perturbation = field(default_factory=lambda: Perturbation()) num_clues: int = 1 def __call__(self, sample: Sample) -> Iterator[str]: sentence = sample.instr # We need to add space between each word to avoid a mismatch tokens = self.tokenizer.tokenize(sentence.lower().rstrip()) fake_sentence = " ".join([str(token) for token in tokens]) segments = self.perturbator.segment(fake_sentence) corruptable = [i for i, tok in enumerate(segments) if tok.mode != "NONE"] random.shuffle(corruptable) cand_size = len(corruptable) - self.num_clues if cand_size <= 0: return candidates = combinations(corruptable, cand_size) for candidate in candidates: yield "".join( [ random.choice(s.cand) if i in candidate else s.text for i, s in enumerate(segments) ] ) @dataclass class CombinePerturbation(Perturbation): perturbators: List[Perturbation] = field(default_factory=list) @staticmethod def add_segments( proposals: List[TokenPerturbation], segments: List[TokenPerturbation] ) -> List[TokenPerturbation]: # Assuming that the segments are sorted merged: List[TokenPerturbation] = [] for segment in segments: if segment.mode != "NONE": merged.append(segment) continue start, end = segment.span subsegment = False for proposal in proposals: if proposal.mode == "NONE": continue if proposal.span[0] < start or proposal.span[1] > end: continue # a proposal is inside a NONE segment --> we divide the NONE segment # keep the part of the segment before if proposal.span[0] > start: length = proposal.span[0] - start merged.append( TokenPerturbation( segment.text[start : start + length], (start, proposal.span[0] - 1), "NONE", ) ) start = proposal.span[1] + 1 subsegment = True # add the proposal merged.append(proposal) # add the end of the current segment if subsegment and merged[-1].span[1] < end: merged.append( TokenPerturbation( segment.text[merged[-1].span[1] + 1 :], (merged[-1].span[1] + 1, end), "NONE", ) ) if not subsegment: merged.append(segment) return merged def segment(self, sentence: str) -> List[TokenPerturbation]: segments: List[TokenPerturbation] = [] for perturbator in self.perturbators: proposals = perturbator.segment(sentence) if segments == []: segments = proposals else: segments = CombinePerturbation.add_segments(proposals, segments) return segments @dataclass class DeafPerturbation(Perturbation): def __call__(self, sample: Sample) -> Iterator[str]: yield "" @dataclass class StopWordPerturbation(Perturbation): tokenizer: SpacyTokenizer = SpacyTokenizer(nlp.vocab) @staticmethod def extended_is_stop(token: Token) -> bool: stop_words = nlp.Defaults.stop_words return token.is_stop or token.lower_ in stop_words or token.lemma_ in stop_words def __call__(self, sample: Sample) -> Iterator[str]: sentence = sample.instr doc = self.tokenizer(sentence) yield "".join( [ token.text_with_ws for token in doc if not StopWordPerturbation.extended_is_stop(token) ] ) @dataclass class DirectionPerturbation(Perturbation): predictor: Predictor = field( default_factory=lambda: Predictor.from_path( "https://storage.googleapis.com/allennlp-public-models/biaffine-dependency-parser-ptb-2020.04.06.tar.gz" ) ) keywords: Tuple[List[str], ...] = ( ["left"], ["right"], ["upstairs", "up"], ["downstairs", "down"], ["forward", "straight"], ["inside"], ["outside"], ["around"], ) action_verbs: Tuple[str, ...] = ( "turn", "veer", "walk", "go", "exit", "move", "continue", "head", "stop", "enter", ) def __post_init__(self) -> None: self._pattern = "|".join( [f"(?<!\w){word}(?!\w)" for cat in self.keywords for word in cat] ) @staticmethod def _search_real_span(tree: Dict) -> Tuple[int, int]: start = tree["spans"][0]["start"] end = tree["spans"][0]["end"] if "children" in tree: for leaf in tree["children"]: cstart, cend = DirectionPerturbation._search_real_span(leaf) start = min(start, cstart) end = max(end, cend) return start, end @staticmethod def _span_pos_to_span_tok(tokens: List[str], start_pos: int, end_pos: int): """ >>> sentence = 'Take a left and go down the stairs.' >>> result = re.search(r"go down", sentence) >>> span_pos_to_span_tok(sentence.split(" "), *result.span()) (4, 5) """ counter = 0 start = -1 end = -1 for i, token in enumerate(tokens): if start < 0 and counter >= start_pos: start = i counter += len(str(token)) + 1 if end < 0 and counter > end_pos - 1: end = i return start, end @staticmethod def _get_parent_attr(tree: Dict, start: int, end: int, dep_attr: str = "ROOT"): tspan = DirectionPerturbation._search_real_span(tree) # print(tree["spans"][0], tree["word"], tspan) if tree["spans"][0]["start"] == start and tree["spans"][0]["end"] == end + 1: return dep_attr if tspan[0] == start and tspan[1] == end + 1: return dep_attr if "children" in tree: for child in tree["children"]: cand_dep_attr = DirectionPerturbation._get_parent_attr( child, start, end, dep_attr=tree["attributes"][0] ) if cand_dep_attr is not None: return cand_dep_attr def segment(self, sentence: str) -> List[TokenPerturbation]: """ We split sentence into sub sentence, as it increases the performances of the dependency parser. """ segments = [] splits = sentence.split(". ") offset = 0 for i, phrase in enumerate(splits): new_segments = self._segment_phrase(phrase) for segment in new_segments: start, end = segment.span start += offset end += offset segment.span = (start, end) segments += new_segments if i != len(splits) - 1: segments[-1].text += ". " start, end = segments[-1].span end += 2 segments[-1].span = (start, end) offset = segments[-1].span[1] + 1 return segments def _segment_phrase(self, sentence: str) -> List[TokenPerturbation]: segments = self._get_detection_tokens(sentence) return fill_with_none(sentence, segments) def _get_detection_tokens(self, sentence: str) -> List[TokenPerturbation]: detection_tokens = [] tokens = self.tokenizer.tokenize(sentence) pred = self.predictor.predict(sentence=sentence) # type: ignore tree = pred["hierplane_tree"]["root"] for result in re.finditer(self._pattern, sentence): found = result.group() start, end = result.span() # start += len(found) - len(re.sub("^\W", "", found)) # end -= len(found) - len(re.sub("\W$", "", found)) word_idx, _ = DirectionPerturbation._span_pos_to_span_tok( tokens, start, end ) previous_word = str(tokens[word_idx - 1]).lower() if word_idx > 0 else None attr = DirectionPerturbation._get_parent_attr(tree, start, end) # print(" ", previous_word, found, start, end, attr) if ( attr != "VERB" # and attr != "ROOT" and previous_word not in self.action_verbs ): # print(" ", previous_word, found, start, end, attr) continue for cat_index, cat in enumerate(self.keywords): if found in cat: break if found not in self.keywords[cat_index]: raise RuntimeError(f"Can't find {found} among {self.keywords}") replacements: List[str] = [] for i, words in enumerate(self.keywords): if i != cat_index: replacements += words detection_tokens.append( TokenPerturbation(found, (start, end - 1), "DIRECTION", replacements) ) return detection_tokens @dataclass class LocationPerturbation(Perturbation): # next to is not in the list, because it has several meaning (left? right?) patterns_adv = r"(?<!\w)(?:on|in|at|to|into|onto)\s?(?:your|the|th)?\s(?:left|right|bottom|top|front|middle)(?: of| side of)?(?!\w)|behind|underneath|\Wover\W|above" keywords_adv: Optional[Dict] = None patterns_adj = r"(?:left|right)(?:most|side)" keywords_adj: Optional[Dict] = None @staticmethod def _gen_keywords(direction): return [ f"{prep}{conj} {direction}{filler}" for prep, conj, filler in product( ["into", "onto", "on", "in", "at", "to"], [" your", "", " the"], ["", " side of", " of"], ) ] def __post_init__(self): if self.keywords_adv is None: self.keywords_adv = defaultdict(list) self.keywords_adv["top"] = ["over", "above"] self.keywords_adv["bottom"] = ["underneath", "below"] self.keywords_adv["behind"] = ["behind"] for direction in ("left", "right", "bottom", "top", "front", "middle"): self.keywords_adv[direction] += self._gen_keywords(direction) if self.keywords_adj is None: self.keywords_adj = { direction: [f"{direction}{adv}" for adv in ["most", "side"]] for direction in ["left", "right"] } @staticmethod def _pick_replacement(found: str, keywords: Dict[str, str]) -> List[str]: strip = re.sub("^\W|\W$", "", found) strip = re.sub(" +", " ", strip) for cat, words in keywords.items(): if strip in words: break if strip not in words: raise RuntimeError(f"Can't find '{strip}' among '{keywords}'") replacements: List[str] = [] for name, words in keywords.items(): if name != cat: replacements += words return replacements def segment(self, sentence: str) -> List[TokenPerturbation]: if self.keywords_adv is None or self.keywords_adj is None: raise RuntimeError("Not correctly initialized") location_tokens = [] for result in re.finditer(self.patterns_adv, sentence.lower()): found = result.group() start, end = result.span() replace = self._pick_replacement(found, self.keywords_adv) location_tokens.append( TokenPerturbation(found, (start, end - 1), "LOCATION", replace) ) for result in re.finditer(self.patterns_adj, sentence.lower()): found = result.group() start, end = result.span() replace = self._pick_replacement(found, self.keywords_adj) location_tokens.append( TokenPerturbation(found, (start, end - 1), "LOCATION", replace) ) return fill_with_none(sentence, location_tokens) @dataclass class NounPhrasePerturbation(Perturbation): num_perturbations: int = 1 predictor: Predictor = field( default_factory=lambda: Predictor.from_path( "/gpfsdswork/projects/rech/vuo/uok79zh/.allennlp/elmo/" # "https://storage.googleapis.com/allennlp-public-models/elmo-constituency-parser-2020.02.10.tar.gz" ) ) # turn is causing a lot of confusion to the parser forbidden_words: Tuple = ("turn",) min_len: int = 2 max_len: int = 6 mode: str = "NP" def _retrieve_noun_phrases( self, sentence: str, tree: Dict, pos: int = 0 ) -> List[TokenPerturbation]: """ Return a dictionary with noun phrases and the spanning positions max_len is a protection against parser failures """ noun_phrases: List[TokenPerturbation] = [] next_char = len(tree["word"]) + pos if next_char < len(sentence) and sentence[next_char].isspace(): tree["word"] += " " # print(tree["word"], pos) # offset the position as we decode this tree inner_pos = 0 for children in tree["children"]: next_char = len(children["word"]) + pos + inner_pos if next_char < len(sentence) and sentence[next_char].isspace(): children["word"] += " " # print( # "---", # children["word"], # f"{pos+inner_pos} ({inner_pos}+{pos}) => {pos+inner_pos+len(children['word']) - 1}", # sentence[pos + inner_pos : pos + inner_pos + len(children["word"])], # ) if children["nodeType"] == "NP": proposal = children["word"] num_tokens = len(self.tokenizer.tokenize(proposal)) if ( "." not in proposal and self.min_len <= num_tokens and num_tokens <= self.max_len and all(word not in proposal for word in self.forbidden_words) ): start = tree["word"][inner_pos:].find(proposal) + pos + inner_pos end = start + len(proposal) - 1 noun_phrases.append( TokenPerturbation(proposal, (start, end), self.mode) ) inner_pos += len(children["word"]) continue if "children" in children: start = ( tree["word"][inner_pos:].find(children["word"]) + pos + inner_pos ) noun_phrases += self._retrieve_noun_phrases( sentence, children, pos=start ) inner_pos += len(children["word"]) return noun_phrases def segment(self, sentence: str) -> List[TokenPerturbation]: preds = self.predictor.predict(sentence=sentence) # type: ignore noun_phrases = self._retrieve_noun_phrases( sentence, preds["hierplane_tree"]["root"] ) # sort the noun phrases by start span noun_phrases = sorted(noun_phrases, key=lambda item: item.span[0]) return fill_with_none(sentence, noun_phrases) @dataclass class SwitchPerturbation(NounPhrasePerturbation): num_perturbations: int = 2 def __post_init__(self): assert self.num_perturbations == 2, self.num_perturbations def __call__(self, sample: Sample) -> Iterator[str]: sentence = sample.instr tokens = self.tokenizer.tokenize(sentence.lower().rstrip()) fake_sentence = " ".join([str(token) for token in tokens]) segments = self.segment(fake_sentence) corruptable = [i for i, tok in enumerate(segments) if tok.mode == self.mode] random.shuffle(corruptable) candidates = combinations(corruptable, self.num_perturbations) for (a, b) in candidates: a_text = segments[a].text b_text = segments[b].text a_start = a_text[0] if a_text[0].isspace() else "" a_end = a_text[-1] if a_text[-1].isspace() else "" b_start = b_text[0] if b_text[0].isspace() else "" b_end = b_text[-1] if b_text[-1].isspace() else "" if b_text.strip() == a_text.strip(): continue segments[a].text = a_start + b_text.strip() + a_end segments[b].text = b_start + a_text.strip() + b_end yield "".join([t.text for t in segments]) # rollback segments[b].text = b_text segments[a].text = a_text @dataclass class NounPhraseFromVocPerturbation(NounPhrasePerturbation): vocabulary: List = field(default_factory=list) num_cand: int = 10 def __post_init__(self): self.num_cand = min(10, len(self.vocabulary)) def segment(self, sentence: str) -> List[TokenPerturbation]: segments = super().segment(sentence) for segment in segments: if segment.mode == self.mode: words = random.sample(self.vocabulary, self.num_cand) start_ws = segment.text[0] if segment.text[0].isspace() else "" end_ws = segment.text[-1] if segment.text[-1].isspace() else "" segment.cand = [start_ws + cand.strip() + end_ws for cand in words] return segments @dataclass class NounPhraseReplacedPerturbation(NounPhrasePerturbation): replacements: Dict = field(default_factory=dict) def __post_init__(self): self._categories_to_words = defaultdict(list) for name, cat in self.replacements.items(): self._categories_to_words[cat].append(name) def segment(self, sentence: str) -> List[TokenPerturbation]: segments = super().segment(sentence) for segment in segments: if segment.mode != self.mode: continue for w in self.replacements.keys(): if w not in segment.text: continue segment.cand = [] for k, words in self._categories_to_words.items(): if k != self.replacements[w]: segment.cand += [ segment.text.replace(w, cand) for cand in words ] break return segments def perturbations_training( dataset: List[Dict], perturbator: Perturbation, num_samples: int, only_pert: bool, max_variants: int = 10, ) -> List[Dict]: counter = 0 if num_samples == -1: num_samples = len(dataset) perturbed = [] removed_instr = 0 removed_path = 0 for item in tqdm(dataset, total=num_samples): item["perturbations"] = [] for instr in item["instructions"]: it = perturbator(Sample(instr, item["path"], item["scan"], item["heading"])) item["perturbations"].append([stc for _, stc in zip(range(max_variants), it)]) if only_pert: indices = list(range(len(item["instructions"])))[::-1] for i in indices: if item["perturbations"][i] == []: del item["instructions"][i] del item["perturbations"][i] removed_instr += 1 if item["instructions"] == []: removed_path += 1 continue perturbed.append(item) counter += 1 if num_samples > -1 and counter >= num_samples: return perturbed print("Removed instr", removed_instr) print("Removed path", removed_path) print("Kept instr", sum(len(i["instructions"]) for i in perturbed )) print("Kept path", len(perturbed)) return perturbed def perturbations_testing( dataset: List[Dict], perturbator: Perturbation, num_samples: int, only_pert: bool ) -> List[Dict]: if num_samples != -1: print("Ignoring the parameter num_samples") if only_pert: raise NotImplementedError() perturbed = dataset for item in tqdm(dataset): item["corrupted"] = [False] * len(item["instructions"]) for i, instr in enumerate(item["instructions"]): it = perturbator(Sample(instr, item["path"], item["scan"], item["heading"])) try: item["instructions"][i] = next(it) item["corrupted"][i] = True except StopIteration: pass return perturbed def get_perturbator( mode: str, num_perturbations: int = 1, num_clues: int = 1, mask: bool = False, bert: bool = False, ) -> Perturbation: perturbators: List[Perturbation] = [] if "direction" in mode: perturbators.append(DirectionPerturbation(num_perturbations=num_perturbations)) elif "location" in mode: perturbators.append(LocationPerturbation(num_perturbations=num_perturbations)) elif "deaf" in mode: perturbators.append(DeafPerturbation()) elif "stop" in mode: perturbators.append(StopWordPerturbation()) elif "object" in mode: forbidden: Tuple = ( "turn", "room", "kitchen", "cellar", "hall", "office", "garage", "cabinet", ) if "switch" in mode: perturbators.append(SwitchPerturbation(forbidden_words=forbidden)) else: with open("data/task/noun_phrases.txt") as fid: voc = [w.strip() for w in fid.readlines()] perturbators.append( NounPhraseFromVocPerturbation( forbidden_words=forbidden, vocabulary=voc, num_perturbations=num_perturbations, ) ) elif "room" in mode: forbidden = ("turn",) with open("data/task/rooms.txt") as fid: replacements = {} for w in fid.readlines(): name, replace = w.strip().split(", ") replacements[name] = replace perturbators.append( NounPhraseReplacedPerturbation( forbidden_words=forbidden, replacements=replacements, num_perturbations=num_perturbations, ) ) elif "swap" in mode: perturbators.append(SwitchPerturbation(forbidden_words=("turn",))) elif mode == "vilbert": perturbators.append(VilbertPerturbation()) if len(perturbators) == 0: raise RuntimeError() if mask or bert: assert num_perturbations == 1, num_perturbations perturbators = [MaskPerturbation(perturbator=p) for p in perturbators] if bert: assert num_perturbations == 1, num_perturbations perturbators = [BertPerturbation(perturbator=p) for p in perturbators] if len(perturbators) == 1: perturbator: Perturbation = perturbators[0] else: perturbator = CombinePerturbation( perturbators=perturbators, num_perturbations=num_perturbations ) if "remain" in mode: perturbator = RemainPerturbation(perturbator=perturbator, num_clues=num_clues) return perturbator class Arguments(argtyped.Arguments): infile: Path outfile: Path mode: str num_samples: int = -1 num_perturbations: int = 1 num_clues: int = 1 training: bool = False mask: bool = False only_pert: bool = False bert: bool = False if __name__ == "__main__": args = Arguments() print("Loading the dataset") dataset = read_dataset(args.infile) mode = str(args.mode) print("Loading the perturbator") perturbator = get_perturbator( args.mode, args.num_perturbations, args.num_clues, args.mask, args.bert ) print("Perturbating") fn = perturbations_training if args.training else perturbations_testing dataset = fn(dataset, perturbator, args.num_samples, args.only_pert) # type: ignore print("Exporting to", args.outfile) save_dataset(dataset, args.outfile)
<filename>scripts/perturbate_dataset.py<gh_stars>1-10 from typing import List, Dict, Callable, Optional, Tuple, Iterator, Any, Set from typing_extensions import Literal from itertools import combinations, product import os import re import copy from collections import OrderedDict, defaultdict import string import functools import random from dataclasses import dataclass, field from pathlib import Path import json import argtyped from tqdm.auto import tqdm import networkx as nx from transformers import PreTrainedTokenizer, AutoTokenizer import numpy as np import torch from torch import nn import allennlp from allennlp.predictors.predictor import Predictor from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer as AllenTokenizer import allennlp_models.structured_prediction from allennlp_models.pretrained import load_predictor import nltk from nltk.corpus import stopwords from nltk.corpus import wordnet as wn from nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer import spacy from spacy.tokenizer import Tokenizer as SpacyTokenizer from spacy.tokens import Token from utils.dataset.pano_features_reader import PanoFeaturesReader from scripts.highlight_sentence import POSHighlighter from vln_bert import VLNBert from vilbert.vilbert import BertConfig from utils.dataset.common import get_headings, load_nav_graphs random.seed(0) nlp = spacy.load("en") # nltk.download('stopwords') def read_dataset(infile: Path): with open(infile) as fid: return json.load(fid) def save_dataset(dataset: Any, outfile: Path): with open(outfile, "w") as fid: return json.dump(dataset, fid, indent=2) @dataclass class Sample: instr: str path: List[str] scan: str heading: float @dataclass class TokenPerturbation: text: str span: Tuple[int, int] mode: str = "NONE" cand: List[str] = field(default_factory=list) def fill_with_none( sentence: str, tokens: List[TokenPerturbation] ) -> List[TokenPerturbation]: filled: List[TokenPerturbation] = [] cursor = 0 for token in tokens: start, end = token.span if start > cursor: filled.append( TokenPerturbation(sentence[cursor:start], (cursor, start - 1), "NONE") ) cursor = end + 1 filled.append(token) if cursor < len(sentence): filled.append( TokenPerturbation(sentence[cursor:], (cursor, len(sentence) - 1), "NONE") ) return filled def random_order_cartesian_product(*factors): """ https://stackoverflow.com/a/53895551/4986615 """ amount = functools.reduce(lambda prod, factor: prod * len(list(factor)), factors, 1) index_linked_list = [None, None] for max_index in reversed(range(amount)): index = random.randint(0, max_index) index_link = index_linked_list while index_link[1] is not None and index_link[1][0] <= index: index += 1 index_link = index_link[1] index_link[1] = [index, index_link[1]] items = [] for factor in factors: items.append(factor[index % len(factor)]) index //= len(factor) yield items @dataclass class Perturbation: num_perturbations: int = 1 tokenizer: AllenTokenizer = AllenTokenizer() def segment(self, sentence: str) -> List[TokenPerturbation]: raise NotImplementedError() def __call__(self, sample: Sample) -> Iterator[str]: sentence = sample.instr # We need to add space between each word to avoid a mismatch tokens = self.tokenizer.tokenize(sentence.lower().rstrip()) fake_sentence = " ".join([str(token) for token in tokens]) segments = self.segment(fake_sentence) corruptable = [i for i, tok in enumerate(segments) if tok.mode != "NONE"] random.shuffle(corruptable) candidates = combinations(corruptable, self.num_perturbations) cand_tokens = [list(range(len(segment.cand))) for segment in segments] iterators = {} for candidate in candidates: tokens = [cand_tokens[i] for i in candidate] iterators[candidate] = random_order_cartesian_product(*tokens) while True: if not iterators: return candidate, it = random.choice(list(iterators.items())) try: indexes = next(it) except StopIteration: del iterators[candidate] continue words = [] j = 0 for i, segment in enumerate(segments): if i in candidate: words.append(segment.cand[indexes[j]]) j += 1 else: words.append(segment.text) yield "".join(words) @dataclass class MaskPerturbation(Perturbation): """ Candidates are replaced by [MASK] for being replaced later by BERT """ perturbator: Perturbation = field(default_factory=lambda: Perturbation()) def segment(self, sentence: str) -> List[TokenPerturbation]: segments = self.perturbator.segment(sentence) for segment in segments: if len(segment.cand) == 0: continue mask = re.sub(r"^(\W)*.*\w+(\W)*$", r"\1[MASK]\2", segment.cand[0]) segment.cand = [mask] return segments def lm_replacements(masked_sentence: str, predictor: Predictor) -> Iterator[str]: assert "[MASK]" in masked_sentence if not isinstance( predictor, allennlp_models.lm.predictors.masked_language_model.MaskedLanguageModelPredictor, ): raise NotImplementedError() convert_tokens_to_string = ( predictor._dataset_reader._tokenizer.tokenizer.convert_tokens_to_string ) predictions = predictor.predict(masked_sentence) tokens = predictions["tokens"] # import ipdb # ipdb.set_trace() for words in product(*predictions["words"]): cand_tokens = copy.deepcopy(tokens) for i, word in enumerate(words): cand_tokens[cand_tokens.index("[MASK]", i)] = word yield convert_tokens_to_string(cand_tokens[1:-1]) @dataclass class BertPerturbation(Perturbation): """ Candidates are replaced by the best BERT prediction for being replaced later by BERT """ perturbator: Perturbation = field(default_factory=lambda: Perturbation()) predictor: Predictor = field( default_factory=lambda: load_predictor("lm-masked-language-model") ) def __call__(self, sample: Sample) -> Iterator[str]: for masked_sentence in self.perturbator(sample): for fixed_sentence in lm_replacements(masked_sentence, self.predictor): yield fixed_sentence class Graphs: def __init__(self): self._graphs: Dict[str, nx.Graph] = {} def __getitem__(self, scan: str): if scan not in self._graphs: self._graphs[scan] = load_nav_graphs([scan])[scan] return self._graphs[scan] def is_punctuation(s: str): return s != "" and s.translate(str.maketrans("", "", string.punctuation + string.whitespace)) == "" def load_vilbert() -> nn.Module: config = BertConfig.from_json_file( "data/config/bert_base_6_layer_6_connect.json" ) model = VLNBert.from_pretrained("data/model_zoos/vlnbert.123mine.bin", config) model = model.cuda() model.eval() return model def synonyms(name: str): return set([l.name() for s in wn.synsets(name.strip()) for l in s.lemmas()]) def hypernyms(name: str): return set([l.name() for s in wn.synsets(name.strip()) for k in s.hypernyms() for l in k.lemmas()]) def same_meaning(a: str, b: str): return b in synonyms(a) or a in synonyms(b) or b in hypernyms(a) or a in hypernyms(b) @dataclass class VilbertPerturbation(Perturbation): """ Candidates are replaced by the best VilBERT prediction for being replaced later by BERT FIXME this perturbator should not be combined directly with any other perturbator """ perturbator: Perturbation = field(default_factory=lambda: Perturbation()) model: nn.Module = field(default_factory=load_vilbert) tokenizer: AutoTokenizer = field( default_factory=lambda: AutoTokenizer.from_pretrained("bert-base-uncased") ) features: PanoFeaturesReader = field( default_factory=lambda: PanoFeaturesReader( "data/matterport-ResNet-101-faster-rcnn-genome.lmdb" ) ) highlighter: POSHighlighter = field(default_factory=POSHighlighter) graphs: Graphs = field(default_factory=Graphs) def get_model_input(self, sample: Sample) -> Tuple[Optional[torch.Tensor], ...]: tokens = self.tokenizer.tokenize(sample.instr) instr_tokens = torch.tensor([self.tokenizer.convert_tokens_to_ids(tokens)]).cuda() segment_ids = torch.zeros_like(instr_tokens) instr_masks = instr_tokens > 0 # get path features features, boxes, probs, masks = self.get_path_features( sample.scan, sample.path, sample.heading ) # convert data into tensors image_features = torch.tensor([features]).float().cuda() image_locations = torch.tensor([boxes]).float().cuda() image_masks = torch.tensor([masks]).long().cuda() co_attention_mask = torch.zeros(2, 8 * 101, 60).long() return ( instr_tokens, image_features, image_locations, segment_ids, instr_masks, image_masks, co_attention_mask, None, ) def get_path_features(self, scan_id: str, path: List[str], first_heading: float): """ Get features for a given path. """ headings = get_headings(self.graphs[scan_id], path, first_heading) # for next headings duplicate the last next_headings = headings[1:] + [headings[-1]] path_length = min(len(path), 8) path_features, path_boxes, path_probs, path_masks = [], [], [], [] for path_idx, path_id in enumerate(path[:path_length]): key = scan_id + "-" + path_id # get image features features, boxes, probs = self.features[ key.encode(), headings[path_idx], next_headings[path_idx], ] num_boxes = min(len(boxes), 101) # pad features and boxes (if needed) pad_features = np.zeros((101, 2048)) pad_features[:num_boxes] = features[:num_boxes] pad_boxes = np.zeros((101, 12)) pad_boxes[:num_boxes, :11] = boxes[:num_boxes, :11] pad_boxes[:, 11] = np.ones(101) * path_idx pad_probs = np.zeros((101, 1601)) pad_probs[:num_boxes] = probs[:num_boxes] box_pad_length = 101 - num_boxes pad_masks = [1] * num_boxes + [0] * box_pad_length path_features.append(pad_features) path_boxes.append(pad_boxes) path_probs.append(pad_probs) path_masks.append(pad_masks) # pad path lists (if needed) for path_idx in range(path_length, 8): pad_features = np.zeros((101, 2048)) pad_boxes = np.zeros((101, 12)) pad_boxes[:, 11] = np.ones(101) * path_idx pad_probs = np.zeros((101, 1601)) pad_masks = [0] * 101 path_features.append(pad_features) path_boxes.append(pad_boxes) path_probs.append(pad_probs) path_masks.append(pad_masks) return ( np.vstack(path_features), np.vstack(path_boxes), np.vstack(path_probs), np.hstack(path_masks), ) @torch.no_grad() def vlm_replacement(self, orig_word: str, sample: Sample) -> str: mask_token = self.tokenizer.convert_tokens_to_ids("[MASK]") inputs = self.get_model_input(sample) output = self.model(*inputs) instr_token = inputs[0] if instr_token is None: raise RuntimeError() instr_token = instr_token[0] word_idx = (instr_token.cpu() == mask_token).int().argmax() linguistic_predictions = output[2].view(-1, output[2].shape[-1]) values, indices = torch.sort(linguistic_predictions[word_idx], descending=True) for index in indices.tolist(): token = self.tokenizer.convert_ids_to_tokens(index) if ( not token in stopwords.words() and not is_punctuation(token) and same_meaning(token, orig_word.strip()) ): instr_token[word_idx] = index break tokens = self.tokenizer.convert_ids_to_tokens(instr_token) return self.tokenizer.convert_tokens_to_string(tokens) def mask(self, sentence: str) -> Iterator[Tuple[str, str]]: words_of_interest = self.highlighter(sentence) cursor = 0 for word in words_of_interest: position = sentence[cursor:].find(word) if position == -1: continue masked_sentence = sentence[:cursor] + sentence[cursor:].replace( word, "[MASK]", 1 ) cursor += position yield (word, masked_sentence) def __call__(self, sample: Sample) -> Iterator[str]: for orig_word, masked_sentence in self.mask(sample.instr): masked_sample = copy.deepcopy(sample) masked_sample.instr = masked_sentence fixed_sentence = self.vlm_replacement(orig_word, masked_sample) yield fixed_sentence @dataclass class RemainPerturbation(Perturbation): perturbator: Perturbation = field(default_factory=lambda: Perturbation()) num_clues: int = 1 def __call__(self, sample: Sample) -> Iterator[str]: sentence = sample.instr # We need to add space between each word to avoid a mismatch tokens = self.tokenizer.tokenize(sentence.lower().rstrip()) fake_sentence = " ".join([str(token) for token in tokens]) segments = self.perturbator.segment(fake_sentence) corruptable = [i for i, tok in enumerate(segments) if tok.mode != "NONE"] random.shuffle(corruptable) cand_size = len(corruptable) - self.num_clues if cand_size <= 0: return candidates = combinations(corruptable, cand_size) for candidate in candidates: yield "".join( [ random.choice(s.cand) if i in candidate else s.text for i, s in enumerate(segments) ] ) @dataclass class CombinePerturbation(Perturbation): perturbators: List[Perturbation] = field(default_factory=list) @staticmethod def add_segments( proposals: List[TokenPerturbation], segments: List[TokenPerturbation] ) -> List[TokenPerturbation]: # Assuming that the segments are sorted merged: List[TokenPerturbation] = [] for segment in segments: if segment.mode != "NONE": merged.append(segment) continue start, end = segment.span subsegment = False for proposal in proposals: if proposal.mode == "NONE": continue if proposal.span[0] < start or proposal.span[1] > end: continue # a proposal is inside a NONE segment --> we divide the NONE segment # keep the part of the segment before if proposal.span[0] > start: length = proposal.span[0] - start merged.append( TokenPerturbation( segment.text[start : start + length], (start, proposal.span[0] - 1), "NONE", ) ) start = proposal.span[1] + 1 subsegment = True # add the proposal merged.append(proposal) # add the end of the current segment if subsegment and merged[-1].span[1] < end: merged.append( TokenPerturbation( segment.text[merged[-1].span[1] + 1 :], (merged[-1].span[1] + 1, end), "NONE", ) ) if not subsegment: merged.append(segment) return merged def segment(self, sentence: str) -> List[TokenPerturbation]: segments: List[TokenPerturbation] = [] for perturbator in self.perturbators: proposals = perturbator.segment(sentence) if segments == []: segments = proposals else: segments = CombinePerturbation.add_segments(proposals, segments) return segments @dataclass class DeafPerturbation(Perturbation): def __call__(self, sample: Sample) -> Iterator[str]: yield "" @dataclass class StopWordPerturbation(Perturbation): tokenizer: SpacyTokenizer = SpacyTokenizer(nlp.vocab) @staticmethod def extended_is_stop(token: Token) -> bool: stop_words = nlp.Defaults.stop_words return token.is_stop or token.lower_ in stop_words or token.lemma_ in stop_words def __call__(self, sample: Sample) -> Iterator[str]: sentence = sample.instr doc = self.tokenizer(sentence) yield "".join( [ token.text_with_ws for token in doc if not StopWordPerturbation.extended_is_stop(token) ] ) @dataclass class DirectionPerturbation(Perturbation): predictor: Predictor = field( default_factory=lambda: Predictor.from_path( "https://storage.googleapis.com/allennlp-public-models/biaffine-dependency-parser-ptb-2020.04.06.tar.gz" ) ) keywords: Tuple[List[str], ...] = ( ["left"], ["right"], ["upstairs", "up"], ["downstairs", "down"], ["forward", "straight"], ["inside"], ["outside"], ["around"], ) action_verbs: Tuple[str, ...] = ( "turn", "veer", "walk", "go", "exit", "move", "continue", "head", "stop", "enter", ) def __post_init__(self) -> None: self._pattern = "|".join( [f"(?<!\w){word}(?!\w)" for cat in self.keywords for word in cat] ) @staticmethod def _search_real_span(tree: Dict) -> Tuple[int, int]: start = tree["spans"][0]["start"] end = tree["spans"][0]["end"] if "children" in tree: for leaf in tree["children"]: cstart, cend = DirectionPerturbation._search_real_span(leaf) start = min(start, cstart) end = max(end, cend) return start, end @staticmethod def _span_pos_to_span_tok(tokens: List[str], start_pos: int, end_pos: int): """ >>> sentence = 'Take a left and go down the stairs.' >>> result = re.search(r"go down", sentence) >>> span_pos_to_span_tok(sentence.split(" "), *result.span()) (4, 5) """ counter = 0 start = -1 end = -1 for i, token in enumerate(tokens): if start < 0 and counter >= start_pos: start = i counter += len(str(token)) + 1 if end < 0 and counter > end_pos - 1: end = i return start, end @staticmethod def _get_parent_attr(tree: Dict, start: int, end: int, dep_attr: str = "ROOT"): tspan = DirectionPerturbation._search_real_span(tree) # print(tree["spans"][0], tree["word"], tspan) if tree["spans"][0]["start"] == start and tree["spans"][0]["end"] == end + 1: return dep_attr if tspan[0] == start and tspan[1] == end + 1: return dep_attr if "children" in tree: for child in tree["children"]: cand_dep_attr = DirectionPerturbation._get_parent_attr( child, start, end, dep_attr=tree["attributes"][0] ) if cand_dep_attr is not None: return cand_dep_attr def segment(self, sentence: str) -> List[TokenPerturbation]: """ We split sentence into sub sentence, as it increases the performances of the dependency parser. """ segments = [] splits = sentence.split(". ") offset = 0 for i, phrase in enumerate(splits): new_segments = self._segment_phrase(phrase) for segment in new_segments: start, end = segment.span start += offset end += offset segment.span = (start, end) segments += new_segments if i != len(splits) - 1: segments[-1].text += ". " start, end = segments[-1].span end += 2 segments[-1].span = (start, end) offset = segments[-1].span[1] + 1 return segments def _segment_phrase(self, sentence: str) -> List[TokenPerturbation]: segments = self._get_detection_tokens(sentence) return fill_with_none(sentence, segments) def _get_detection_tokens(self, sentence: str) -> List[TokenPerturbation]: detection_tokens = [] tokens = self.tokenizer.tokenize(sentence) pred = self.predictor.predict(sentence=sentence) # type: ignore tree = pred["hierplane_tree"]["root"] for result in re.finditer(self._pattern, sentence): found = result.group() start, end = result.span() # start += len(found) - len(re.sub("^\W", "", found)) # end -= len(found) - len(re.sub("\W$", "", found)) word_idx, _ = DirectionPerturbation._span_pos_to_span_tok( tokens, start, end ) previous_word = str(tokens[word_idx - 1]).lower() if word_idx > 0 else None attr = DirectionPerturbation._get_parent_attr(tree, start, end) # print(" ", previous_word, found, start, end, attr) if ( attr != "VERB" # and attr != "ROOT" and previous_word not in self.action_verbs ): # print(" ", previous_word, found, start, end, attr) continue for cat_index, cat in enumerate(self.keywords): if found in cat: break if found not in self.keywords[cat_index]: raise RuntimeError(f"Can't find {found} among {self.keywords}") replacements: List[str] = [] for i, words in enumerate(self.keywords): if i != cat_index: replacements += words detection_tokens.append( TokenPerturbation(found, (start, end - 1), "DIRECTION", replacements) ) return detection_tokens @dataclass class LocationPerturbation(Perturbation): # next to is not in the list, because it has several meaning (left? right?) patterns_adv = r"(?<!\w)(?:on|in|at|to|into|onto)\s?(?:your|the|th)?\s(?:left|right|bottom|top|front|middle)(?: of| side of)?(?!\w)|behind|underneath|\Wover\W|above" keywords_adv: Optional[Dict] = None patterns_adj = r"(?:left|right)(?:most|side)" keywords_adj: Optional[Dict] = None @staticmethod def _gen_keywords(direction): return [ f"{prep}{conj} {direction}{filler}" for prep, conj, filler in product( ["into", "onto", "on", "in", "at", "to"], [" your", "", " the"], ["", " side of", " of"], ) ] def __post_init__(self): if self.keywords_adv is None: self.keywords_adv = defaultdict(list) self.keywords_adv["top"] = ["over", "above"] self.keywords_adv["bottom"] = ["underneath", "below"] self.keywords_adv["behind"] = ["behind"] for direction in ("left", "right", "bottom", "top", "front", "middle"): self.keywords_adv[direction] += self._gen_keywords(direction) if self.keywords_adj is None: self.keywords_adj = { direction: [f"{direction}{adv}" for adv in ["most", "side"]] for direction in ["left", "right"] } @staticmethod def _pick_replacement(found: str, keywords: Dict[str, str]) -> List[str]: strip = re.sub("^\W|\W$", "", found) strip = re.sub(" +", " ", strip) for cat, words in keywords.items(): if strip in words: break if strip not in words: raise RuntimeError(f"Can't find '{strip}' among '{keywords}'") replacements: List[str] = [] for name, words in keywords.items(): if name != cat: replacements += words return replacements def segment(self, sentence: str) -> List[TokenPerturbation]: if self.keywords_adv is None or self.keywords_adj is None: raise RuntimeError("Not correctly initialized") location_tokens = [] for result in re.finditer(self.patterns_adv, sentence.lower()): found = result.group() start, end = result.span() replace = self._pick_replacement(found, self.keywords_adv) location_tokens.append( TokenPerturbation(found, (start, end - 1), "LOCATION", replace) ) for result in re.finditer(self.patterns_adj, sentence.lower()): found = result.group() start, end = result.span() replace = self._pick_replacement(found, self.keywords_adj) location_tokens.append( TokenPerturbation(found, (start, end - 1), "LOCATION", replace) ) return fill_with_none(sentence, location_tokens) @dataclass class NounPhrasePerturbation(Perturbation): num_perturbations: int = 1 predictor: Predictor = field( default_factory=lambda: Predictor.from_path( "/gpfsdswork/projects/rech/vuo/uok79zh/.allennlp/elmo/" # "https://storage.googleapis.com/allennlp-public-models/elmo-constituency-parser-2020.02.10.tar.gz" ) ) # turn is causing a lot of confusion to the parser forbidden_words: Tuple = ("turn",) min_len: int = 2 max_len: int = 6 mode: str = "NP" def _retrieve_noun_phrases( self, sentence: str, tree: Dict, pos: int = 0 ) -> List[TokenPerturbation]: """ Return a dictionary with noun phrases and the spanning positions max_len is a protection against parser failures """ noun_phrases: List[TokenPerturbation] = [] next_char = len(tree["word"]) + pos if next_char < len(sentence) and sentence[next_char].isspace(): tree["word"] += " " # print(tree["word"], pos) # offset the position as we decode this tree inner_pos = 0 for children in tree["children"]: next_char = len(children["word"]) + pos + inner_pos if next_char < len(sentence) and sentence[next_char].isspace(): children["word"] += " " # print( # "---", # children["word"], # f"{pos+inner_pos} ({inner_pos}+{pos}) => {pos+inner_pos+len(children['word']) - 1}", # sentence[pos + inner_pos : pos + inner_pos + len(children["word"])], # ) if children["nodeType"] == "NP": proposal = children["word"] num_tokens = len(self.tokenizer.tokenize(proposal)) if ( "." not in proposal and self.min_len <= num_tokens and num_tokens <= self.max_len and all(word not in proposal for word in self.forbidden_words) ): start = tree["word"][inner_pos:].find(proposal) + pos + inner_pos end = start + len(proposal) - 1 noun_phrases.append( TokenPerturbation(proposal, (start, end), self.mode) ) inner_pos += len(children["word"]) continue if "children" in children: start = ( tree["word"][inner_pos:].find(children["word"]) + pos + inner_pos ) noun_phrases += self._retrieve_noun_phrases( sentence, children, pos=start ) inner_pos += len(children["word"]) return noun_phrases def segment(self, sentence: str) -> List[TokenPerturbation]: preds = self.predictor.predict(sentence=sentence) # type: ignore noun_phrases = self._retrieve_noun_phrases( sentence, preds["hierplane_tree"]["root"] ) # sort the noun phrases by start span noun_phrases = sorted(noun_phrases, key=lambda item: item.span[0]) return fill_with_none(sentence, noun_phrases) @dataclass class SwitchPerturbation(NounPhrasePerturbation): num_perturbations: int = 2 def __post_init__(self): assert self.num_perturbations == 2, self.num_perturbations def __call__(self, sample: Sample) -> Iterator[str]: sentence = sample.instr tokens = self.tokenizer.tokenize(sentence.lower().rstrip()) fake_sentence = " ".join([str(token) for token in tokens]) segments = self.segment(fake_sentence) corruptable = [i for i, tok in enumerate(segments) if tok.mode == self.mode] random.shuffle(corruptable) candidates = combinations(corruptable, self.num_perturbations) for (a, b) in candidates: a_text = segments[a].text b_text = segments[b].text a_start = a_text[0] if a_text[0].isspace() else "" a_end = a_text[-1] if a_text[-1].isspace() else "" b_start = b_text[0] if b_text[0].isspace() else "" b_end = b_text[-1] if b_text[-1].isspace() else "" if b_text.strip() == a_text.strip(): continue segments[a].text = a_start + b_text.strip() + a_end segments[b].text = b_start + a_text.strip() + b_end yield "".join([t.text for t in segments]) # rollback segments[b].text = b_text segments[a].text = a_text @dataclass class NounPhraseFromVocPerturbation(NounPhrasePerturbation): vocabulary: List = field(default_factory=list) num_cand: int = 10 def __post_init__(self): self.num_cand = min(10, len(self.vocabulary)) def segment(self, sentence: str) -> List[TokenPerturbation]: segments = super().segment(sentence) for segment in segments: if segment.mode == self.mode: words = random.sample(self.vocabulary, self.num_cand) start_ws = segment.text[0] if segment.text[0].isspace() else "" end_ws = segment.text[-1] if segment.text[-1].isspace() else "" segment.cand = [start_ws + cand.strip() + end_ws for cand in words] return segments @dataclass class NounPhraseReplacedPerturbation(NounPhrasePerturbation): replacements: Dict = field(default_factory=dict) def __post_init__(self): self._categories_to_words = defaultdict(list) for name, cat in self.replacements.items(): self._categories_to_words[cat].append(name) def segment(self, sentence: str) -> List[TokenPerturbation]: segments = super().segment(sentence) for segment in segments: if segment.mode != self.mode: continue for w in self.replacements.keys(): if w not in segment.text: continue segment.cand = [] for k, words in self._categories_to_words.items(): if k != self.replacements[w]: segment.cand += [ segment.text.replace(w, cand) for cand in words ] break return segments def perturbations_training( dataset: List[Dict], perturbator: Perturbation, num_samples: int, only_pert: bool, max_variants: int = 10, ) -> List[Dict]: counter = 0 if num_samples == -1: num_samples = len(dataset) perturbed = [] removed_instr = 0 removed_path = 0 for item in tqdm(dataset, total=num_samples): item["perturbations"] = [] for instr in item["instructions"]: it = perturbator(Sample(instr, item["path"], item["scan"], item["heading"])) item["perturbations"].append([stc for _, stc in zip(range(max_variants), it)]) if only_pert: indices = list(range(len(item["instructions"])))[::-1] for i in indices: if item["perturbations"][i] == []: del item["instructions"][i] del item["perturbations"][i] removed_instr += 1 if item["instructions"] == []: removed_path += 1 continue perturbed.append(item) counter += 1 if num_samples > -1 and counter >= num_samples: return perturbed print("Removed instr", removed_instr) print("Removed path", removed_path) print("Kept instr", sum(len(i["instructions"]) for i in perturbed )) print("Kept path", len(perturbed)) return perturbed def perturbations_testing( dataset: List[Dict], perturbator: Perturbation, num_samples: int, only_pert: bool ) -> List[Dict]: if num_samples != -1: print("Ignoring the parameter num_samples") if only_pert: raise NotImplementedError() perturbed = dataset for item in tqdm(dataset): item["corrupted"] = [False] * len(item["instructions"]) for i, instr in enumerate(item["instructions"]): it = perturbator(Sample(instr, item["path"], item["scan"], item["heading"])) try: item["instructions"][i] = next(it) item["corrupted"][i] = True except StopIteration: pass return perturbed def get_perturbator( mode: str, num_perturbations: int = 1, num_clues: int = 1, mask: bool = False, bert: bool = False, ) -> Perturbation: perturbators: List[Perturbation] = [] if "direction" in mode: perturbators.append(DirectionPerturbation(num_perturbations=num_perturbations)) elif "location" in mode: perturbators.append(LocationPerturbation(num_perturbations=num_perturbations)) elif "deaf" in mode: perturbators.append(DeafPerturbation()) elif "stop" in mode: perturbators.append(StopWordPerturbation()) elif "object" in mode: forbidden: Tuple = ( "turn", "room", "kitchen", "cellar", "hall", "office", "garage", "cabinet", ) if "switch" in mode: perturbators.append(SwitchPerturbation(forbidden_words=forbidden)) else: with open("data/task/noun_phrases.txt") as fid: voc = [w.strip() for w in fid.readlines()] perturbators.append( NounPhraseFromVocPerturbation( forbidden_words=forbidden, vocabulary=voc, num_perturbations=num_perturbations, ) ) elif "room" in mode: forbidden = ("turn",) with open("data/task/rooms.txt") as fid: replacements = {} for w in fid.readlines(): name, replace = w.strip().split(", ") replacements[name] = replace perturbators.append( NounPhraseReplacedPerturbation( forbidden_words=forbidden, replacements=replacements, num_perturbations=num_perturbations, ) ) elif "swap" in mode: perturbators.append(SwitchPerturbation(forbidden_words=("turn",))) elif mode == "vilbert": perturbators.append(VilbertPerturbation()) if len(perturbators) == 0: raise RuntimeError() if mask or bert: assert num_perturbations == 1, num_perturbations perturbators = [MaskPerturbation(perturbator=p) for p in perturbators] if bert: assert num_perturbations == 1, num_perturbations perturbators = [BertPerturbation(perturbator=p) for p in perturbators] if len(perturbators) == 1: perturbator: Perturbation = perturbators[0] else: perturbator = CombinePerturbation( perturbators=perturbators, num_perturbations=num_perturbations ) if "remain" in mode: perturbator = RemainPerturbation(perturbator=perturbator, num_clues=num_clues) return perturbator class Arguments(argtyped.Arguments): infile: Path outfile: Path mode: str num_samples: int = -1 num_perturbations: int = 1 num_clues: int = 1 training: bool = False mask: bool = False only_pert: bool = False bert: bool = False if __name__ == "__main__": args = Arguments() print("Loading the dataset") dataset = read_dataset(args.infile) mode = str(args.mode) print("Loading the perturbator") perturbator = get_perturbator( args.mode, args.num_perturbations, args.num_clues, args.mask, args.bert ) print("Perturbating") fn = perturbations_training if args.training else perturbations_testing dataset = fn(dataset, perturbator, args.num_samples, args.only_pert) # type: ignore print("Exporting to", args.outfile) save_dataset(dataset, args.outfile)
en
0.821115
# nltk.download('stopwords') https://stackoverflow.com/a/53895551/4986615 # We need to add space between each word to avoid a mismatch Candidates are replaced by [MASK] for being replaced later by BERT # import ipdb # ipdb.set_trace() Candidates are replaced by the best BERT prediction for being replaced later by BERT Candidates are replaced by the best VilBERT prediction for being replaced later by BERT FIXME this perturbator should not be combined directly with any other perturbator # get path features # convert data into tensors Get features for a given path. # for next headings duplicate the last # get image features # pad features and boxes (if needed) # pad path lists (if needed) # We need to add space between each word to avoid a mismatch # Assuming that the segments are sorted # a proposal is inside a NONE segment --> we divide the NONE segment # keep the part of the segment before # add the proposal # add the end of the current segment >>> sentence = 'Take a left and go down the stairs.' >>> result = re.search(r"go down", sentence) >>> span_pos_to_span_tok(sentence.split(" "), *result.span()) (4, 5) # print(tree["spans"][0], tree["word"], tspan) We split sentence into sub sentence, as it increases the performances of the dependency parser. # type: ignore # start += len(found) - len(re.sub("^\W", "", found)) # end -= len(found) - len(re.sub("\W$", "", found)) # print(" ", previous_word, found, start, end, attr) # and attr != "ROOT" # print(" ", previous_word, found, start, end, attr) # next to is not in the list, because it has several meaning (left? right?) # "https://storage.googleapis.com/allennlp-public-models/elmo-constituency-parser-2020.02.10.tar.gz" # turn is causing a lot of confusion to the parser Return a dictionary with noun phrases and the spanning positions max_len is a protection against parser failures # print(tree["word"], pos) # offset the position as we decode this tree # print( # "---", # children["word"], # f"{pos+inner_pos} ({inner_pos}+{pos}) => {pos+inner_pos+len(children['word']) - 1}", # sentence[pos + inner_pos : pos + inner_pos + len(children["word"])], # ) # type: ignore # sort the noun phrases by start span # rollback # type: ignore
2.133823
2
Dudo_m_v.py
AirisFiorentini/CFR
0
6628748
<reponame>AirisFiorentini/CFR import numpy as np from pytreemap import TreeMap from typing import List from Kuhn_s_poker_matrix_v import MNode import utils class MDudoNode(MNode): # TODO: inheritance def __init__(self, NUM_ACTIONS: int, isClaimed: List[bool], NUM_SIDES: int = 6, NUM_HANDS: int = 2): self.regretSum = np.zeros((NUM_SIDES, NUM_ACTIONS)) self.strategy = np.zeros((NUM_SIDES, NUM_ACTIONS)) self.strategySum = np.zeros((NUM_SIDES, NUM_ACTIONS)) self.infoSet = "" self.NUM_ACTIONS = NUM_ACTIONS self.NUM_SIDES = NUM_SIDES self.NUM_HANDS = NUM_HANDS self.isClaimed = isClaimed # Get current information set mixed strategy through regret-matching def getStrategy(self, realizationWeight: np.ndarray) -> np.ndarray: normalizingSum = np.zeros(self.NUM_SIDES) for k in range(self.NUM_SIDES): for a in range(self.NUM_ACTIONS): self.strategy[k][a] = self.regretSum[k][a] if self.regretSum[k][a] > 0 else 0 normalizingSum[k] += self.strategy[k][a] for k in range(self.NUM_SIDES): for a in range(self.NUM_ACTIONS): if normalizingSum[k] > 0: self.strategy[k][a] /= normalizingSum[k] else: self.strategy[k][a] = 1.0 / self.NUM_ACTIONS self.strategySum[k][a] += realizationWeight[k] * self.strategy[k][a] return self.strategy # Get average information set mixed strategy across all training iterations def getAverageStrategy(self) -> np.ndarray: avgStrategy = np.zeros((self.NUM_SIDES, self.NUM_ACTIONS)) normalizingSum = np.zeros(self.NUM_SIDES) for k in range(self.NUM_SIDES): normalizingSum[k] += np.sum(self.strategySum[k]) for k in range(self.NUM_SIDES): if normalizingSum[k] > 0: avgStrategy[k] = self.strategySum[k] / normalizingSum[k] else: avgStrategy[k] = 1 / self.NUM_ACTIONS return avgStrategy class MDudoTrainer: # Dudo definitions def __init__(self): # Dudo definitions of 2 6-sided dice self.nodeMap = TreeMap() self.NUM_SIDES = 6 self.NUM_ACTIONS = (2 * self.NUM_SIDES) + 1 self.DUDO = self.NUM_ACTIONS - 1 self.claimNum = ([1] * 6) + ([2] * 6) self.claimRank = [2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1] # convert Dudo information set to an integer (binary nums) def infoSetToInt(self, isClaimed: List[bool]) -> int: str_num = '' for i in range(len(isClaimed)): if isClaimed[i]: str_num += '1' else: str_num += '0' return int(str_num, base=2) # convert Dudo claim history to a String def claimHistoryToString(self, isClaimed: List[bool]) -> str: sb = "" for a in range(self.NUM_ACTIONS): if isClaimed[a]: if len(sb) > 0: sb += ',' sb += str(self.claimNum[a]) sb += '*' sb += str(self.claimRank[a]) return sb # info set node class definitions (m_node class) # Counterfactual regret minimization iteration def m_cfr(self, isClaimed: List[bool], p0: np.ndarray, p1: np.ndarray) -> np.ndarray: # history -> isClaimed plays = isClaimed.count(True) player = plays % 2 # return payoff for terminal states if isClaimed[self.DUDO]: doubted = self.NUM_ACTIONS - 2 - isClaimed[self.NUM_ACTIONS - 2::-1].index(True) cN = self.claimNum[doubted] # quantity cR = self.claimRank[doubted] # value realDoubtedRankQuantity = np.zeros((self.NUM_SIDES, self.NUM_SIDES)) for i in range(self.NUM_SIDES): for j in range(self.NUM_SIDES): dice = [i + 1, j + 1] # '1' <- 0 in arrays realDoubtedRankQuantity[i][j] = dice.count(cR) + dice.count(1) if cR != 1 else dice.count(cR) U = np.zeros((self.NUM_SIDES, self.NUM_SIDES)) # payoffs: +1 || -1 for the first player (only!) for i in range(self.NUM_SIDES): for j in range(self.NUM_SIDES): if realDoubtedRankQuantity[i][j] >= cN: # for player #0: U[i][j] = 1 else: U[i][j] = -1 if player == 1: return -U else: return U infoSet = str(self.infoSetToInt(isClaimed)) # <Get information set node or create it if nonexistent> node = self.nodeMap.get(infoSet) AfterTrueIndex = self.NUM_ACTIONS - isClaimed[self.NUM_ACTIONS - 1::-1].index(True) if True in isClaimed else 0 if node is None: node = MDudoNode(self.NUM_ACTIONS - AfterTrueIndex if AfterTrueIndex > 0 else 12, isClaimed) node.infoSet = infoSet self.nodeMap.put(infoSet, node) # For each action, recursively call cfr with additional history and probability strategy = node.getStrategy(p0 if player == 0 else p1) util = np.zeros((self.NUM_SIDES, self.NUM_SIDES, self.NUM_ACTIONS)) nodeUtil = np.zeros((self.NUM_SIDES, self.NUM_SIDES)) for a in range(node.NUM_ACTIONS): nextHistory = isClaimed.copy() iter = AfterTrueIndex + a nextHistory[iter] = True if player == 0: util[:, :, a] = self.m_cfr(nextHistory, p0 * strategy[:, a], p1) for i in range(self.NUM_SIDES): nodeUtil[i, :] += util[i, :, a] * strategy[i, a] else: util[:, :, a] = self.m_cfr(nextHistory, p0, p1 * strategy[:, a]) for j in range(self.NUM_SIDES): nodeUtil[:, j] += util[:, j, a] * strategy[j, a] # For each action, compute and accumulate counterfactual regret for a in range(node.NUM_ACTIONS): # self.NUM_ACTIONS regret = util[:, :, a] - nodeUtil if player == 0: node.regretSum[:, a] += np.dot(regret, p1) else: node.regretSum[:, a] += np.dot(p0, -regret) return nodeUtil def train(self, iterations: int): results = [] eps = 0.001 util = np.zeros((6, 6)) for i in range(1, iterations + 1): print("iteration: ", i) startClaims = [False] * self.NUM_ACTIONS util += self.m_cfr(startClaims, np.array([1] * 6), np.array([1] * 6)) cur_res = np.sum(util / iterations / 36) if (i + 1) % 10 == 0: results.append(cur_res) if abs(cur_res - (-7/258)) < eps: results.append(cur_res) break utils.save_result_to_file(results, "Dudo_m_v_simple") print("The number of iterations: ", iterations) print(np.sum(util / iterations / 36)) agv = util / iterations / 36 # print(agv) # # for n in self.nodeMap.values(): # print cards + history # # print(n.die, self.claimHistoryToString(n.isClaimed), n.toString(), sep='|') # # print() return self, np.sum(agv) # TODO: describe a node def getNode(self, isClaimed: List[bool]) -> str: infoSet = str(self.infoSetToInt(isClaimed)) if infoSet in self.nodeMap: return self.nodeMap.get(infoSet).strategy else: return MDudoNode(self.NUM_ACTIONS, [False] * 13).toString() def getNodeStrategy(self, die: int, isClaimed: List[bool]) -> str: infoSet = str(self.infoSetToInt(isClaimed)) if infoSet in self.nodeMap: return self.nodeMap.get(infoSet).strategy[die - 1] else: return "Not found" if __name__ == '__main__': TrainRes, _ = MDudoTrainer().train(500) startClaims = [False] * 13 startClaims[2] = True print(TrainRes.getNodeStrategy(2, startClaims))
import numpy as np from pytreemap import TreeMap from typing import List from Kuhn_s_poker_matrix_v import MNode import utils class MDudoNode(MNode): # TODO: inheritance def __init__(self, NUM_ACTIONS: int, isClaimed: List[bool], NUM_SIDES: int = 6, NUM_HANDS: int = 2): self.regretSum = np.zeros((NUM_SIDES, NUM_ACTIONS)) self.strategy = np.zeros((NUM_SIDES, NUM_ACTIONS)) self.strategySum = np.zeros((NUM_SIDES, NUM_ACTIONS)) self.infoSet = "" self.NUM_ACTIONS = NUM_ACTIONS self.NUM_SIDES = NUM_SIDES self.NUM_HANDS = NUM_HANDS self.isClaimed = isClaimed # Get current information set mixed strategy through regret-matching def getStrategy(self, realizationWeight: np.ndarray) -> np.ndarray: normalizingSum = np.zeros(self.NUM_SIDES) for k in range(self.NUM_SIDES): for a in range(self.NUM_ACTIONS): self.strategy[k][a] = self.regretSum[k][a] if self.regretSum[k][a] > 0 else 0 normalizingSum[k] += self.strategy[k][a] for k in range(self.NUM_SIDES): for a in range(self.NUM_ACTIONS): if normalizingSum[k] > 0: self.strategy[k][a] /= normalizingSum[k] else: self.strategy[k][a] = 1.0 / self.NUM_ACTIONS self.strategySum[k][a] += realizationWeight[k] * self.strategy[k][a] return self.strategy # Get average information set mixed strategy across all training iterations def getAverageStrategy(self) -> np.ndarray: avgStrategy = np.zeros((self.NUM_SIDES, self.NUM_ACTIONS)) normalizingSum = np.zeros(self.NUM_SIDES) for k in range(self.NUM_SIDES): normalizingSum[k] += np.sum(self.strategySum[k]) for k in range(self.NUM_SIDES): if normalizingSum[k] > 0: avgStrategy[k] = self.strategySum[k] / normalizingSum[k] else: avgStrategy[k] = 1 / self.NUM_ACTIONS return avgStrategy class MDudoTrainer: # Dudo definitions def __init__(self): # Dudo definitions of 2 6-sided dice self.nodeMap = TreeMap() self.NUM_SIDES = 6 self.NUM_ACTIONS = (2 * self.NUM_SIDES) + 1 self.DUDO = self.NUM_ACTIONS - 1 self.claimNum = ([1] * 6) + ([2] * 6) self.claimRank = [2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1] # convert Dudo information set to an integer (binary nums) def infoSetToInt(self, isClaimed: List[bool]) -> int: str_num = '' for i in range(len(isClaimed)): if isClaimed[i]: str_num += '1' else: str_num += '0' return int(str_num, base=2) # convert Dudo claim history to a String def claimHistoryToString(self, isClaimed: List[bool]) -> str: sb = "" for a in range(self.NUM_ACTIONS): if isClaimed[a]: if len(sb) > 0: sb += ',' sb += str(self.claimNum[a]) sb += '*' sb += str(self.claimRank[a]) return sb # info set node class definitions (m_node class) # Counterfactual regret minimization iteration def m_cfr(self, isClaimed: List[bool], p0: np.ndarray, p1: np.ndarray) -> np.ndarray: # history -> isClaimed plays = isClaimed.count(True) player = plays % 2 # return payoff for terminal states if isClaimed[self.DUDO]: doubted = self.NUM_ACTIONS - 2 - isClaimed[self.NUM_ACTIONS - 2::-1].index(True) cN = self.claimNum[doubted] # quantity cR = self.claimRank[doubted] # value realDoubtedRankQuantity = np.zeros((self.NUM_SIDES, self.NUM_SIDES)) for i in range(self.NUM_SIDES): for j in range(self.NUM_SIDES): dice = [i + 1, j + 1] # '1' <- 0 in arrays realDoubtedRankQuantity[i][j] = dice.count(cR) + dice.count(1) if cR != 1 else dice.count(cR) U = np.zeros((self.NUM_SIDES, self.NUM_SIDES)) # payoffs: +1 || -1 for the first player (only!) for i in range(self.NUM_SIDES): for j in range(self.NUM_SIDES): if realDoubtedRankQuantity[i][j] >= cN: # for player #0: U[i][j] = 1 else: U[i][j] = -1 if player == 1: return -U else: return U infoSet = str(self.infoSetToInt(isClaimed)) # <Get information set node or create it if nonexistent> node = self.nodeMap.get(infoSet) AfterTrueIndex = self.NUM_ACTIONS - isClaimed[self.NUM_ACTIONS - 1::-1].index(True) if True in isClaimed else 0 if node is None: node = MDudoNode(self.NUM_ACTIONS - AfterTrueIndex if AfterTrueIndex > 0 else 12, isClaimed) node.infoSet = infoSet self.nodeMap.put(infoSet, node) # For each action, recursively call cfr with additional history and probability strategy = node.getStrategy(p0 if player == 0 else p1) util = np.zeros((self.NUM_SIDES, self.NUM_SIDES, self.NUM_ACTIONS)) nodeUtil = np.zeros((self.NUM_SIDES, self.NUM_SIDES)) for a in range(node.NUM_ACTIONS): nextHistory = isClaimed.copy() iter = AfterTrueIndex + a nextHistory[iter] = True if player == 0: util[:, :, a] = self.m_cfr(nextHistory, p0 * strategy[:, a], p1) for i in range(self.NUM_SIDES): nodeUtil[i, :] += util[i, :, a] * strategy[i, a] else: util[:, :, a] = self.m_cfr(nextHistory, p0, p1 * strategy[:, a]) for j in range(self.NUM_SIDES): nodeUtil[:, j] += util[:, j, a] * strategy[j, a] # For each action, compute and accumulate counterfactual regret for a in range(node.NUM_ACTIONS): # self.NUM_ACTIONS regret = util[:, :, a] - nodeUtil if player == 0: node.regretSum[:, a] += np.dot(regret, p1) else: node.regretSum[:, a] += np.dot(p0, -regret) return nodeUtil def train(self, iterations: int): results = [] eps = 0.001 util = np.zeros((6, 6)) for i in range(1, iterations + 1): print("iteration: ", i) startClaims = [False] * self.NUM_ACTIONS util += self.m_cfr(startClaims, np.array([1] * 6), np.array([1] * 6)) cur_res = np.sum(util / iterations / 36) if (i + 1) % 10 == 0: results.append(cur_res) if abs(cur_res - (-7/258)) < eps: results.append(cur_res) break utils.save_result_to_file(results, "Dudo_m_v_simple") print("The number of iterations: ", iterations) print(np.sum(util / iterations / 36)) agv = util / iterations / 36 # print(agv) # # for n in self.nodeMap.values(): # print cards + history # # print(n.die, self.claimHistoryToString(n.isClaimed), n.toString(), sep='|') # # print() return self, np.sum(agv) # TODO: describe a node def getNode(self, isClaimed: List[bool]) -> str: infoSet = str(self.infoSetToInt(isClaimed)) if infoSet in self.nodeMap: return self.nodeMap.get(infoSet).strategy else: return MDudoNode(self.NUM_ACTIONS, [False] * 13).toString() def getNodeStrategy(self, die: int, isClaimed: List[bool]) -> str: infoSet = str(self.infoSetToInt(isClaimed)) if infoSet in self.nodeMap: return self.nodeMap.get(infoSet).strategy[die - 1] else: return "Not found" if __name__ == '__main__': TrainRes, _ = MDudoTrainer().train(500) startClaims = [False] * 13 startClaims[2] = True print(TrainRes.getNodeStrategy(2, startClaims))
en
0.630864
# TODO: inheritance # Get current information set mixed strategy through regret-matching # Get average information set mixed strategy across all training iterations # Dudo definitions # Dudo definitions of 2 6-sided dice # convert Dudo information set to an integer (binary nums) # convert Dudo claim history to a String # info set node class definitions (m_node class) # Counterfactual regret minimization iteration # history -> isClaimed # return payoff for terminal states # quantity # value # '1' <- 0 in arrays # payoffs: +1 || -1 for the first player (only!) # for player #0: # <Get information set node or create it if nonexistent> # For each action, recursively call cfr with additional history and probability # For each action, compute and accumulate counterfactual regret # self.NUM_ACTIONS # print(agv) # # for n in self.nodeMap.values(): # print cards + history # # print(n.die, self.claimHistoryToString(n.isClaimed), n.toString(), sep='|') # # print() # TODO: describe a node
2.292665
2
projecto1/experimento2.py
Rachidomar1523/pythonExercicios
0
6628749
<filename>projecto1/experimento2.py import random n = random.randint(0, 10), random.randint(0, 10), random.randint(0, 10), random.randint(0, 10) nc = 0 mais = 0 menos = 0 for r in n: nc += 1 if nc == 1: mais = r menos = r else: if r > mais: mais = r if r < menos: menos = r print(f'''o maior numero digitado foi "{mais}" e o menor numero digitado foi "{menos}"''') print(n)
<filename>projecto1/experimento2.py import random n = random.randint(0, 10), random.randint(0, 10), random.randint(0, 10), random.randint(0, 10) nc = 0 mais = 0 menos = 0 for r in n: nc += 1 if nc == 1: mais = r menos = r else: if r > mais: mais = r if r < menos: menos = r print(f'''o maior numero digitado foi "{mais}" e o menor numero digitado foi "{menos}"''') print(n)
pt
0.979161
o maior numero digitado foi "{mais}" e o menor numero digitado foi "{menos}"
3.81217
4
tests/repositories/pypi/test_pypi_model.py
pomes/valiant
2
6628750
<filename>tests/repositories/pypi/test_pypi_model.py """Tests for the PyPi repo model. Copyright (c) 2020 The Valiant Authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import json from pathlib import Path from typing import Dict import py # https://py.readthedocs.io/en/latest/index.html import pytest from valiant.repositories import RepositoryConfiguration, ValidationError from valiant.repositories.pypi import PyPiPackageMetadata from . import ALL_PKG_FILES, DATAFILE_VALIDATION, TEST_FILE_DIR, load_test_json_data def test_empty_package_data() -> None: """Ensures exception when constructor is passed an empty dict.""" with pytest.raises(ValidationError): PyPiPackageMetadata("", {}) @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package.json") def test_json_load_basic( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package.json") ) assert pkg.name == "Demo" assert pkg.description == "Basic description" assert pkg.summary == "A short summary." assert pkg.url_documentation == "http://docs.example.com" assert pkg.url_project == "http://project.example.com" assert pkg.url_issue_tracker == "http://bugs.example.com" assert pkg.repository_url == pypi_config.base_url @pytest.mark.datafiles(TEST_FILE_DIR / "fake_package.json") def test_json_load_fake( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test with an unexpected field.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "fake_package.json") ) assert pkg.name == "Demo" assert pkg.description == "Basic description" assert pkg.summary == "A short summary." assert pkg.url_documentation == "http://docs.example.com" assert pkg.url_project == "http://project.example.com" assert pkg.url_issue_tracker == "http://bugs.example.com" assert pkg.repository_url == pypi_config.base_url @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package_2.json") def test_json_load_basic2( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package_2.json") ) assert pkg.name == "Demo 2" assert pkg.description == "Basic description" assert pkg.summary == "A short summary." assert pkg.url_documentation == "http://docs.example.com" assert pkg.url_project == "http://project.example.com" assert pkg.url_issue_tracker == "http://bugs.example.com" @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package_3.json") def test_json_load_basic3( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package_3.json") ) assert pkg.name == "Demo 2" assert pkg.description == "Basic description" assert pkg.summary == "A short summary." assert pkg.url_documentation == "http://docs.example.com" assert pkg.url_project == "http://project.example.com" assert pkg.url_issue_tracker == "http://bugs.example.com" @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package.json") def test_json_load_basic_to_dict( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test of the string representation.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package.json") ) val = pkg.to_dict() assert val["name"] == "Demo" assert val["version"] == "0" assert val["summary"] == "A short summary." @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package.json") def test_json_load_basic_to_json( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test of the string representation.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package.json") ) json_val = pkg.to_json() val = json.loads(json_val) assert val["name"] == "Demo" assert val["version"] == "0" assert val["summary"] == "A short summary." @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package.json") def test_json_load_basic_repr( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test of the string representation.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package.json") ) val = json.loads(str(pkg)) assert val["name"] == "Demo" assert val["version"] == "0" assert val["summary"] == "A short summary." @ALL_PKG_FILES @pytest.mark.parametrize( ("input_file,expected"), DATAFILE_VALIDATION, ) def test_json_load( input_file: str, expected: Dict, datafiles: py.path, pypi_config: RepositoryConfiguration, ) -> None: """Validate loading against sample data from pypi.org. This is different to test_data_load.py as this test is more than just a basic load. The DATAFILE_VALIDATION consttruct lays out a set of attributes to test against for the various inputs. """ source_data = Path(datafiles.join(input_file)) with open(source_data, "r") as f: data = json.load(f) pkg = PyPiPackageMetadata(pypi_config.base_url, data) assert pkg.name == expected["name"] assert pkg.version == expected["version"] assert pkg.license == expected["license"] assert pkg.url_code == expected["url_code"] assert pkg.url_documentation == expected["url_documentation"] assert pkg.url_project == expected["url_project"] assert pkg.url_issue_tracker == expected["url_issue_tracker"] assert pkg.requires_python == expected["requires_python"] # This now gets messy - sorry # TODO: Tidy this up for k in expected["requires_dist"].keys(): assert k in pkg.requires_dist reqs = [pkg._requirement_to_dict(item) for item in pkg.requires_dist[k]] expected_reqs = expected["requires_dist"][k] assert len(reqs) == len(expected["requires_dist"][k]) # Iterate over the requirements in the source dependency for req in reqs: flag = False assert ( k == req["name"] ), "All of the Requirements instances in the list must have the same name" # Inner loop - blech for ereq in expected_reqs: assert ereq["name"] == req["name"] print(f"Comparing {req} with {ereq}") if ereq == req: flag = True break assert flag, f"No match for requires_dist item: {ereq}."
<filename>tests/repositories/pypi/test_pypi_model.py """Tests for the PyPi repo model. Copyright (c) 2020 The Valiant Authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import json from pathlib import Path from typing import Dict import py # https://py.readthedocs.io/en/latest/index.html import pytest from valiant.repositories import RepositoryConfiguration, ValidationError from valiant.repositories.pypi import PyPiPackageMetadata from . import ALL_PKG_FILES, DATAFILE_VALIDATION, TEST_FILE_DIR, load_test_json_data def test_empty_package_data() -> None: """Ensures exception when constructor is passed an empty dict.""" with pytest.raises(ValidationError): PyPiPackageMetadata("", {}) @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package.json") def test_json_load_basic( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package.json") ) assert pkg.name == "Demo" assert pkg.description == "Basic description" assert pkg.summary == "A short summary." assert pkg.url_documentation == "http://docs.example.com" assert pkg.url_project == "http://project.example.com" assert pkg.url_issue_tracker == "http://bugs.example.com" assert pkg.repository_url == pypi_config.base_url @pytest.mark.datafiles(TEST_FILE_DIR / "fake_package.json") def test_json_load_fake( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test with an unexpected field.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "fake_package.json") ) assert pkg.name == "Demo" assert pkg.description == "Basic description" assert pkg.summary == "A short summary." assert pkg.url_documentation == "http://docs.example.com" assert pkg.url_project == "http://project.example.com" assert pkg.url_issue_tracker == "http://bugs.example.com" assert pkg.repository_url == pypi_config.base_url @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package_2.json") def test_json_load_basic2( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package_2.json") ) assert pkg.name == "Demo 2" assert pkg.description == "Basic description" assert pkg.summary == "A short summary." assert pkg.url_documentation == "http://docs.example.com" assert pkg.url_project == "http://project.example.com" assert pkg.url_issue_tracker == "http://bugs.example.com" @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package_3.json") def test_json_load_basic3( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package_3.json") ) assert pkg.name == "Demo 2" assert pkg.description == "Basic description" assert pkg.summary == "A short summary." assert pkg.url_documentation == "http://docs.example.com" assert pkg.url_project == "http://project.example.com" assert pkg.url_issue_tracker == "http://bugs.example.com" @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package.json") def test_json_load_basic_to_dict( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test of the string representation.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package.json") ) val = pkg.to_dict() assert val["name"] == "Demo" assert val["version"] == "0" assert val["summary"] == "A short summary." @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package.json") def test_json_load_basic_to_json( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test of the string representation.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package.json") ) json_val = pkg.to_json() val = json.loads(json_val) assert val["name"] == "Demo" assert val["version"] == "0" assert val["summary"] == "A short summary." @pytest.mark.datafiles(TEST_FILE_DIR / "basic_package.json") def test_json_load_basic_repr( datafiles: py.path, pypi_config: RepositoryConfiguration ) -> None: """Small baseline test of the string representation.""" pkg = PyPiPackageMetadata( pypi_config.base_url, load_test_json_data(datafiles, "basic_package.json") ) val = json.loads(str(pkg)) assert val["name"] == "Demo" assert val["version"] == "0" assert val["summary"] == "A short summary." @ALL_PKG_FILES @pytest.mark.parametrize( ("input_file,expected"), DATAFILE_VALIDATION, ) def test_json_load( input_file: str, expected: Dict, datafiles: py.path, pypi_config: RepositoryConfiguration, ) -> None: """Validate loading against sample data from pypi.org. This is different to test_data_load.py as this test is more than just a basic load. The DATAFILE_VALIDATION consttruct lays out a set of attributes to test against for the various inputs. """ source_data = Path(datafiles.join(input_file)) with open(source_data, "r") as f: data = json.load(f) pkg = PyPiPackageMetadata(pypi_config.base_url, data) assert pkg.name == expected["name"] assert pkg.version == expected["version"] assert pkg.license == expected["license"] assert pkg.url_code == expected["url_code"] assert pkg.url_documentation == expected["url_documentation"] assert pkg.url_project == expected["url_project"] assert pkg.url_issue_tracker == expected["url_issue_tracker"] assert pkg.requires_python == expected["requires_python"] # This now gets messy - sorry # TODO: Tidy this up for k in expected["requires_dist"].keys(): assert k in pkg.requires_dist reqs = [pkg._requirement_to_dict(item) for item in pkg.requires_dist[k]] expected_reqs = expected["requires_dist"][k] assert len(reqs) == len(expected["requires_dist"][k]) # Iterate over the requirements in the source dependency for req in reqs: flag = False assert ( k == req["name"] ), "All of the Requirements instances in the list must have the same name" # Inner loop - blech for ereq in expected_reqs: assert ereq["name"] == req["name"] print(f"Comparing {req} with {ereq}") if ereq == req: flag = True break assert flag, f"No match for requires_dist item: {ereq}."
en
0.780946
Tests for the PyPi repo model. Copyright (c) 2020 The Valiant Authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # https://py.readthedocs.io/en/latest/index.html Ensures exception when constructor is passed an empty dict. Small baseline test. Small baseline test with an unexpected field. Small baseline test. Small baseline test. Small baseline test of the string representation. Small baseline test of the string representation. Small baseline test of the string representation. Validate loading against sample data from pypi.org. This is different to test_data_load.py as this test is more than just a basic load. The DATAFILE_VALIDATION consttruct lays out a set of attributes to test against for the various inputs. # This now gets messy - sorry # TODO: Tidy this up # Iterate over the requirements in the source dependency # Inner loop - blech
1.969062
2