hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
481864b9c51dc4f9d0b6fdb0be5da4ad7dc75287
| 13,309
|
py
|
Python
|
Cogs/Image_manipulation.py
|
AaalbatrossGuy/DeltaDiscordBot
|
04b21d41265bbca4b4b4af09277fb82623327b6c
|
[
"Apache-2.0"
] | 1
|
2021-04-17T09:31:47.000Z
|
2021-04-17T09:31:47.000Z
|
Cogs/Image_manipulation.py
|
AaalbatrossGuy/Delta-Discord-Bot
|
04b21d41265bbca4b4b4af09277fb82623327b6c
|
[
"Apache-2.0"
] | 2
|
2021-05-10T06:10:31.000Z
|
2021-05-10T06:10:51.000Z
|
Cogs/Image_manipulation.py
|
AaalbatrossGuy/Delta-Discord-Bot
|
04b21d41265bbca4b4b4af09277fb82623327b6c
|
[
"Apache-2.0"
] | 1
|
2021-08-02T04:43:54.000Z
|
2021-08-02T04:43:54.000Z
|
# Coding=UTF8
# !python
# !/usr/bin/env python3
import discord
import requests
from discord.ext import commands
from io import BytesIO
from PIL import Image, ImageOps, ImageFilter
from asyncdagpi import Client, ImageFeatures
class ImageManipulation(commands.Cog):
def __init__(self, client):
self.client = client
self.dagpi = Client("MTYyMzUwMzMzMQ.876re9HvmxvFcf41LIotTu2WrHC5VNPc.edc4473be1c68f30")
@commands.command(name="bw_u")
async def black_and_white_user(self, ctx, *, member: discord.Member = None):
member = member or ctx.message.author
avatar_url = member.avatar_url_as(format='jpeg')
image = Image.open(requests.get(url=avatar_url, stream=True).raw).convert("L")
with BytesIO() as image_bytes:
image.save(image_bytes, 'jpeg')
image_bytes.seek(0)
await ctx.channel.send(
file=discord.File(fp=image_bytes, filename="image.jpeg"))
@commands.command(name="negative_u")
async def negative_user(self, ctx, *, member: discord.Member = None):
member = member or ctx.message.author
avatar_url = member.avatar_url_as(format='jpeg')
image = Image.open(requests.get(url=avatar_url, stream=True).raw)
image_inverted = ImageOps.invert(image)
with BytesIO() as image_bytes:
image_inverted.save(image_bytes, 'jpeg')
image_bytes.seek(0)
await ctx.channel.send(
file=discord.File(fp=image_bytes, filename="image.jpeg"))
@commands.command(name="blur_u")
async def blur_user(self, ctx, radius: int, *, member: discord.Member = None):
member = member or ctx.message.author
avatar_url = member.avatar_url_as(format='jpeg')
image = Image.open(requests.get(url=avatar_url, stream=True).raw)
blurred_image = image.filter(ImageFilter.GaussianBlur(radius=int(radius)))
with BytesIO() as image_bytes:
blurred_image.save(image_bytes, 'jpeg')
image_bytes.seek(0)
await ctx.channel.send(
file=discord.File(fp=image_bytes, filename="image.jpeg"))
@commands.command(name="bw_f")
async def black_and_white_file(self, ctx):
image = ctx.message.attachments[0].url
main_image = Image.open(requests.get(url=image, stream=True).raw).convert("L")
with BytesIO() as image_bytes:
main_image.save(image_bytes, 'jpeg')
image_bytes.seek(0)
await ctx.channel.send(
file=discord.File(fp=image_bytes, filename="image.jpeg"))
@commands.command(name="negative_f")
async def negative_file(self, ctx):
image = ctx.message.attachments[0].url
image = Image.open(requests.get(url=image, stream=True).raw).convert("RGB")
main_image = ImageOps.invert(image)
with BytesIO() as image_bytes:
main_image.save(image_bytes, 'jpeg')
image_bytes.seek(0)
await ctx.channel.send(
file=discord.File(fp=image_bytes, filename="image.jpeg"))
@commands.command(name="blur_f")
async def blur_file(self, ctx, radius: int):
image = ctx.message.attachments[0].url
image = Image.open(requests.get(url=image, stream=True).raw)
main_image = image.filter(ImageFilter.GaussianBlur(radius=int(radius)))
with BytesIO() as image_bytes:
main_image.save(image_bytes, 'png')
image_bytes.seek(0)
await ctx.channel.send(
file=discord.File(fp=image_bytes, filename="image.png"))
@commands.command()
async def wasted(self, ctx, *, member:discord.Member = None):
member = member or ctx.message.author
url = member.avatar_url_as(format="png")
base_url = f"https://some-random-api.ml/canvas/wasted?avatar={url}"
await ctx.channel.send(base_url)
@commands.command()
async def trigger(self, ctx, *, member:discord.Member = None):
member = member or ctx.message.author
url= member.avatar_url_as(format="png")
img = await self.dagpi.image_process(ImageFeatures.triggered(), str(url))
file = discord.File(fp=img.image, filename=f"triggered.{img.format}")
await ctx.channel.send(file=file)
@commands.command()
async def magic(self, ctx, *, member:discord.Member = None):
member = member or ctx.message.author
url = member.avatar_url_as(format="png")
img = await self.dagpi.image_process(ImageFeatures.magik(), str(url))
file = discord.File(fp=img.image, filename=f"magic.{img.format}")
await ctx.channel.send(file=file)
@commands.command()
async def pixel(self, ctx, *, member:discord.Member = None):
member = member or ctx.message.author
url = member.avatar_url_as(format="png")
img = await self.dagpi.image_process(ImageFeatures.pixel(), str(url))
file = discord.File(fp=img.image, filename=f'pixel.{img.format}')
await ctx.channel.send(file=file)
@commands.command()
async def angel(self, ctx, *, member:discord.Member = None):
member = member or ctx.message.author
url = member.avatar_url_as(format="png")
img = await self.dagpi.image_process(ImageFeatures.angel(), str(url))
file = discord.File(fp=img.image, filename=f"angel.{img.format}")
await ctx.channel.send(file=file)
@commands.command()
async def devil(self, ctx, *, member:discord.Member = None):
member = member or ctx.message.author
url = member.avatar_url_as(format="png")
img = await self.dagpi.image_process(ImageFeatures.satan(), str(url))
file = discord.File(fp=img.image, filename=f"devil.{img.format}")
await ctx.channel.send(file=file)
@commands.command()
async def windel(self, ctx, *, member:discord.Member = None):
member = member or ctx.message.author
url = member.avatar_url_as(format="png")
img = await self.dagpi.image_process(ImageFeatures.delete(), str(url))
file = discord.File(fp=img.image, filename=f'delete.{img.format}')
await ctx.channel.send(file=file)
@commands.command()
async def hitler(self, ctx, *, member:discord.Member = None):
member = member or ctx.message.author
url = member.avatar_url_as(format="png")
img = await self.dagpi.image_process(ImageFeatures.hitler(), str(url))
file = discord.File(fp=img.image, filename=f'hitler.{img.format}')
await ctx.channel.send(file=file)
@commands.command()
async def stringify(self, ctx, *, member:discord.Member = None):
member = member or ctx.message.author
url = member.avatar_url_as(format="png")
img = await self.dagpi.image_process(ImageFeatures.stringify(), str(url))
file = discord.File(fp=img.image, filename = f"stringify.{img.format}")
await ctx.channel.send(file=file)
#Error Handlers
@black_and_white_user.error
async def bw_user_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@negative_user.error
async def negative_u_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@blur_user.error
async def blur_u_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Arguments", description="```ini\nMake sure you have run the command providing the [blur radius] and the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_teal()))
@black_and_white_file.error
async def bw_f_error_handling(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Attachment", description="```prolog\nMake sure you have run the command providing the File/Image as an Attachment```", timestamp=ctx.message.created_at, color=discord.Color.dark_teal()))
@negative_file.error
async def negative_f_error_handling(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Attachment", description="```prolog\nMake sure you have run the command providing the File/Image as an Attachment```", timestamp=ctx.message.created_at, color=discord.Color.dark_teal()))
@blur_file.error
async def blur_f_error_handling(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Missing Attachment", description="```prolog\nMake sure you have run the command providing the File/Image as an Attachment```", timestamp=ctx.message.created_at, color=discord.Color.dark_teal()))
@wasted.error
async def wasted_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@trigger.error
async def trigger_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@magic.error
async def magic_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@pixel.error
async def pixel_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@angel.error
async def angel_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@devil.error
async def devil_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@windel.error
async def windel_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@hitler.error
async def hitler_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
@stringify.error
async def stringify_error_handling(self, ctx, error):
if isinstance(error, commands.MemberNotFound):
await ctx.send(embed=discord.Embed(title="<:hellno:871582891585437759> Member Not Found", description="```ini\nMake sure you have run the command providing the [username]```", timestamp=ctx.message.created_at, color=discord.Color.dark_red()))
def setup(client):
client.add_cog(ImageManipulation(client))
| 50.992337
| 278
| 0.691637
| 1,721
| 13,309
| 5.248112
| 0.084253
| 0.034322
| 0.030115
| 0.044287
| 0.853078
| 0.848428
| 0.847874
| 0.847874
| 0.837135
| 0.831377
| 0
| 0.028999
| 0.181231
| 13,309
| 260
| 279
| 51.188462
| 0.799853
| 0.004133
| 0
| 0.51269
| 0
| 0.005076
| 0.17858
| 0.041965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010152
| false
| 0
| 0.030457
| 0
| 0.045685
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
483fd9b56b25aa6f8be62034b345311e1625c4ba
| 52
|
py
|
Python
|
vcst/__init__.py
|
crdavis12/vcst
|
a826fb33251e774247a9a99e308479dcfa51ccc5
|
[
"MIT"
] | 4
|
2021-02-15T22:30:11.000Z
|
2021-02-16T13:46:57.000Z
|
vcst/__init__.py
|
crdavis12/vcst
|
a826fb33251e774247a9a99e308479dcfa51ccc5
|
[
"MIT"
] | null | null | null |
vcst/__init__.py
|
crdavis12/vcst
|
a826fb33251e774247a9a99e308479dcfa51ccc5
|
[
"MIT"
] | 1
|
2021-08-13T19:57:56.000Z
|
2021-08-13T19:57:56.000Z
|
from vcst.ui import VCST
import vcst.monkey_patching
| 26
| 27
| 0.865385
| 9
| 52
| 4.888889
| 0.666667
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 52
| 2
| 27
| 26
| 0.93617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6fa5822bcab1f027d71366f0cf4cad546814103c
| 24
|
py
|
Python
|
SAPLogger/__init__.py
|
jduncan8142/sap_gui_robot_framework
|
01fd8f59548afd643f37009967a8a5183654fe12
|
[
"MIT"
] | null | null | null |
SAPLogger/__init__.py
|
jduncan8142/sap_gui_robot_framework
|
01fd8f59548afd643f37009967a8a5183654fe12
|
[
"MIT"
] | null | null | null |
SAPLogger/__init__.py
|
jduncan8142/sap_gui_robot_framework
|
01fd8f59548afd643f37009967a8a5183654fe12
|
[
"MIT"
] | null | null | null |
from .SapLogger import *
| 24
| 24
| 0.791667
| 3
| 24
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b5169f2f260c1e9de38741bd0b6a62e4852fe38c
| 1,067
|
py
|
Python
|
venv/lib/python2.7/sre_constants.py
|
sunlum/Deep-Semantic-Space-NST
|
468ac2590385f48e65df12c1a3c9db0ed8d49477
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/sre_constants.py
|
sunlum/Deep-Semantic-Space-NST
|
468ac2590385f48e65df12c1a3c9db0ed8d49477
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/sre_constants.py
|
sunlum/Deep-Semantic-Space-NST
|
468ac2590385f48e65df12c1a3c9db0ed8d49477
|
[
"MIT"
] | null | null | null |
XSym
0041
b52d6938687953531e13366cf0e53e25
/anaconda2/lib/python2.7/sre_constants.py
| 213.4
| 982
| 0.070291
| 10
| 1,067
| 7.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.395062
| 0.924086
| 1,067
| 5
| 982
| 213.4
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d211c79c0804d5dfc97de98de689e8ed18629374
| 99
|
py
|
Python
|
utils/losses/__init__.py
|
ozcell/pytorch-auto-drive
|
f1c2fd223cf7d307a3968fe671d0271b03ced39c
|
[
"BSD-3-Clause"
] | 292
|
2020-10-14T01:04:22.000Z
|
2022-03-31T15:34:59.000Z
|
utils/losses/__init__.py
|
ozcell/pytorch-auto-drive
|
f1c2fd223cf7d307a3968fe671d0271b03ced39c
|
[
"BSD-3-Clause"
] | 33
|
2021-02-17T03:41:16.000Z
|
2022-03-19T12:39:41.000Z
|
utils/losses/__init__.py
|
ozcell/pytorch-auto-drive
|
f1c2fd223cf7d307a3968fe671d0271b03ced39c
|
[
"BSD-3-Clause"
] | 48
|
2020-11-09T05:54:46.000Z
|
2022-03-31T10:32:55.000Z
|
# Implementation based on pytorch 1.6.0
from .lane_seg_loss import *
from .hungarian_loss import *
| 24.75
| 39
| 0.787879
| 16
| 99
| 4.6875
| 0.8125
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035294
| 0.141414
| 99
| 3
| 40
| 33
| 0.847059
| 0.373737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d27dacc7d465c145745fafd2546a33134903d7b6
| 8,911
|
py
|
Python
|
tests/core/pyspec/eth2spec/test/phase1/sanity/test_shard_blocks.py
|
barnabemonnot/eth2.0-specs
|
5e83e60a594c1d855d1396b8e25fbf43af913577
|
[
"CC0-1.0"
] | null | null | null |
tests/core/pyspec/eth2spec/test/phase1/sanity/test_shard_blocks.py
|
barnabemonnot/eth2.0-specs
|
5e83e60a594c1d855d1396b8e25fbf43af913577
|
[
"CC0-1.0"
] | null | null | null |
tests/core/pyspec/eth2spec/test/phase1/sanity/test_shard_blocks.py
|
barnabemonnot/eth2.0-specs
|
5e83e60a594c1d855d1396b8e25fbf43af913577
|
[
"CC0-1.0"
] | null | null | null |
from eth2spec.test.context import (
PHASE0,
ALTAIR,
always_bls,
expect_assertion_error,
spec_state_test,
with_all_phases_except,
only_full_crosslink,
)
from eth2spec.test.helpers.shard_block import (
build_shard_block,
sign_shard_block,
)
from eth2spec.test.helpers.state import next_slot, transition_to_valid_shard_slot, transition_to
def run_shard_blocks(spec, shard_state, signed_shard_block, beacon_parent_state, valid=True):
pre_shard_state = shard_state.copy()
yield 'pre', pre_shard_state
yield 'signed_shard_block', signed_shard_block
yield 'beacon_parent_state', beacon_parent_state
if not valid:
expect_assertion_error(
lambda: spec.shard_state_transition(shard_state, signed_shard_block, beacon_parent_state)
)
yield 'post', None
return
spec.shard_state_transition(shard_state, signed_shard_block, beacon_parent_state)
yield 'post', shard_state
# Verify `process_shard_block`
block = signed_shard_block.message
assert shard_state.slot == block.slot
shard_block_length = len(block.body)
assert shard_state.gasprice == spec.compute_updated_gasprice(pre_shard_state.gasprice, shard_block_length)
if shard_block_length != 0:
shard_state.latest_block_root == block.hash_tree_root()
else:
shard_state.latest_block_root == pre_shard_state.latest_block_root
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@always_bls
@only_full_crosslink
def test_valid_shard_block(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state)
#
# verify_shard_block_message
#
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@only_full_crosslink
def test_invalid_shard_parent_root(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
signed_shard_block.message.shard_parent_root = b'\x12' * 32
sign_shard_block(spec, beacon_state, shard, signed_shard_block)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@only_full_crosslink
def test_invalid_beacon_parent_root(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
signed_shard_block.message.beacon_parent_root = b'\x12' * 32
sign_shard_block(spec, beacon_state, shard, signed_shard_block)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@only_full_crosslink
def test_invalid_slot(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
signed_shard_block.message.slot = beacon_state.slot + 1
proposer_index = spec.get_shard_proposer_index(beacon_state, signed_shard_block.message.slot, shard)
sign_shard_block(spec, beacon_state, shard, signed_shard_block, proposer_index=proposer_index)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@only_full_crosslink
def test_invalid_proposer_index(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
active_validator_indices = spec.get_active_validator_indices(beacon_state, spec.get_current_epoch(beacon_state))
proposer_index = (
(spec.get_shard_proposer_index(beacon_state, signed_shard_block.message.slot, shard) + 1)
% len(active_validator_indices)
)
signed_shard_block.message.proposer_index = proposer_index
sign_shard_block(spec, beacon_state, shard, signed_shard_block, proposer_index=proposer_index)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@always_bls
@only_full_crosslink
def test_out_of_bound_offset(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
slot = (
beacon_state.shard_states[shard].slot
+ spec.SHARD_BLOCK_OFFSETS[spec.MAX_SHARD_BLOCKS_PER_ATTESTATION - 1]
+ 1 # out-of-bound
)
transition_to(spec, beacon_state, slot)
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@always_bls
@only_full_crosslink
def test_invalid_offset(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
# 4 is not in `SHARD_BLOCK_OFFSETS`
shard = 0
slot = beacon_state.shard_states[shard].slot + 4
assert slot not in spec.SHARD_BLOCK_OFFSETS
transition_to(spec, beacon_state, slot)
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@always_bls
@only_full_crosslink
def test_empty_block_body(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, body=b'', signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
#
# verify_shard_block_signature
#
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@always_bls
@only_full_crosslink
def test_invalid_signature(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=False)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
#
# Other cases
#
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@always_bls
@only_full_crosslink
def test_max_offset(spec, state):
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
slot = beacon_state.shard_states[shard].slot + spec.SHARD_BLOCK_OFFSETS[spec.MAX_SHARD_BLOCKS_PER_ATTESTATION - 1]
transition_to(spec, beacon_state, slot)
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state)
@with_all_phases_except([PHASE0, ALTAIR])
@spec_state_test
@always_bls
@only_full_crosslink
def test_pending_shard_parent_block(spec, state):
# Block N
beacon_state = state.copy()
transition_to_valid_shard_slot(spec, beacon_state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block_1 = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
_, _, _, _ = run_shard_blocks(spec, shard_state, signed_shard_block_1, beacon_state)
# Block N+1
next_slot(spec, beacon_state)
signed_shard_block_2 = build_shard_block(
spec, beacon_state, shard,
slot=beacon_state.slot, shard_parent_state=shard_state, signed=True
)
assert signed_shard_block_2.message.shard_parent_root == shard_state.latest_block_root
assert signed_shard_block_2.message.slot == signed_shard_block_1.message.slot + 1
yield from run_shard_blocks(spec, shard_state, signed_shard_block_2, beacon_state)
| 35.361111
| 118
| 0.7787
| 1,282
| 8,911
| 4.968019
| 0.074883
| 0.145078
| 0.108023
| 0.078505
| 0.803737
| 0.778772
| 0.769352
| 0.769352
| 0.767624
| 0.754279
| 0
| 0.006649
| 0.139266
| 8,911
| 251
| 119
| 35.501992
| 0.823729
| 0.018068
| 0
| 0.583784
| 0
| 0
| 0.006411
| 0
| 0
| 0
| 0
| 0
| 0.037838
| 1
| 0.064865
| false
| 0
| 0.016216
| 0
| 0.086486
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
96315b12a6b1544dbfafc54f1e0e11bc632fc8c1
| 2,876
|
py
|
Python
|
tests/test_mp3_compression.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 930
|
2019-02-14T10:21:22.000Z
|
2022-03-31T03:49:48.000Z
|
tests/test_mp3_compression.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 169
|
2019-02-12T21:16:14.000Z
|
2022-03-18T07:53:43.000Z
|
tests/test_mp3_compression.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 122
|
2019-02-26T05:12:45.000Z
|
2022-03-24T08:45:51.000Z
|
import unittest
import numpy as np
from audiomentations.augmentations.transforms import Mp3Compression
from audiomentations.core.composition import Compose
class TestMp3Compression(unittest.TestCase):
def test_apply_mp3_compression_pydub(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_lameenc(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_pydub(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_lameenc(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_invalid_argument_combination(self):
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=400, max_bitrate=800)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=2, max_bitrate=4)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=64, max_bitrate=8)
| 40.507042
| 86
| 0.695758
| 353
| 2,876
| 5.407932
| 0.181303
| 0.075432
| 0.054479
| 0.079623
| 0.857517
| 0.843897
| 0.843897
| 0.75275
| 0.75275
| 0.75275
| 0
| 0.054434
| 0.207928
| 2,876
| 70
| 87
| 41.085714
| 0.783582
| 0
| 0
| 0.625
| 0
| 0
| 0.008345
| 0
| 0
| 0
| 0
| 0
| 0.267857
| 1
| 0.089286
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9669d55e7e8184ccd1b231f9091711578c491c29
| 74,121
|
py
|
Python
|
plotGAN.py
|
spagliarini/canary-vocal-sensorimotor-model
|
36eedd6e8a690526cfcdcd416d9d6ff65643098d
|
[
"MIT"
] | null | null | null |
plotGAN.py
|
spagliarini/canary-vocal-sensorimotor-model
|
36eedd6e8a690526cfcdcd416d9d6ff65643098d
|
[
"MIT"
] | null | null | null |
plotGAN.py
|
spagliarini/canary-vocal-sensorimotor-model
|
36eedd6e8a690526cfcdcd416d9d6ff65643098d
|
[
"MIT"
] | 1
|
2021-12-08T16:13:44.000Z
|
2021-12-08T16:13:44.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue 26 March 18:44:45 2020
@author: Mnemosyne
Vocal learning model results (plots of)
"""
import os
import time
import glob
import pickle
import numpy as np
import matplotlib
import librosa
from matplotlib import rcParams, cm, colors
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.mplot3d import Axes3D
import scipy.io.wavfile as wav
csfont = {'fontname':'Times New Roman'}
from songbird_data_analysis import Song_functions
def magnitude(v):
"""
:param v = (x,y,z): 3D cartesian coordinates - vector
:return m: magnitude (Euclidian norm in this case)
"""
m = np.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
return m
def polar_coord(v):
"""
:param v = (x,y,z): 3D cartesian coordinates - vector
:return r,phi, theta: polar coordinates
"""
r = np.sqrt(v[0]**2 + v[1]**2 + v[2]**2)
phi = np.arctan(v[1]/v[0])
theta = np.arctan(np.sqrt(v[0]**2 + v[1]**2)/v[2])
return r, phi, theta
def arctan_coord(v):
"""
:param v: 3D cartesian coordinates - vector
:return x_new, y_new: 2D vector with x_new = arctan(v0/v2) ane y_new = arctan(v0/v2)
"""
x_new = np.arctan(v[0]/v[1])
y_new = np.arctan(v[0]/v[2])
return x_new, y_new
def arctan_distance(v,w):
"""
:param v, w: vectors of the same size
:return: "angular" distance component by componet - vector
"""
d = np.zeros((np.size(v),))
for i in range(0, np.size(v)):
d[i] = np.arctan(v[i] - w[i])
return d
def create_sphere(cx,cy,cz, r, resolution=360):
'''
create sphere with center (cx, cy, cz) and radius r
'''
phi = np.linspace(0, 2*np.pi, 2*resolution)
theta = np.linspace(0, np.pi, resolution)
theta, phi = np.meshgrid(theta, phi)
r_xy = r*np.sin(theta)
x = cx + np.cos(phi) * r_xy
y = cy + np.sin(phi) * r_xy
z = cz + r * np.cos(theta)
return np.stack([x,y,z])
def plot_auditory_activation(args):
"""
Plot the results of the different auditory activation functions (results from the test function)
"""
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
for sim_counter in range(0, args.N_sim):
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
softmax_sum_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_softmax_sum_expl_' + str(sim_counter) + '.npy')
softmax_mean_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_softmax_mean_expl_' + str(sim_counter) + '.npy')
raw_score_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_raw_score_expl_' + str(sim_counter) + '.npy')
raw_mean_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_mean_expl_' + str(sim_counter) + '.npy')
mean_norm_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_mean_norm_expl_' + str(sim_counter) + '.npy')
logistic_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_logistic_expl_' + str(sim_counter) + '.npy')
tanh_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_tanh_expl_' + str(sim_counter) + '.npy')
minmax_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_minmax_expl_' + str(sim_counter) + '.npy')
sign_minmax_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_sign_minmax_expl_' + str(sim_counter) + '.npy')
sign_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_sign_expl_' + str(sim_counter) + '.npy')
square_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_square_expl_' + str(sim_counter) + '.npy')
arctg_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_arctg_expl_' + str(sim_counter) + '.npy')
scaling_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_scaling_expl' + str(sim_counter) + '.npy', allow_pickle=True)
scaling_softmax_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_scaling_softmax_expl' + str(sim_counter) + '.npy', allow_pickle=True)
softmax_MAX_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_softmax_MAX_expl' + str(sim_counter) + '.npy', allow_pickle=True)
max_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_max_expl' + str(sim_counter) + '.npy', allow_pickle=True)
max_norm_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_max_norm_expl' + str(sim_counter) + '.npy', allow_pickle=True)
p95_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_p95_expl' + str(sim_counter) + '.npy', allow_pickle=True)
for i in range(0, np.shape(raw_score_expl)[0]):
for j in range(0, len(classes)):
if p95_expl[i,j] > 1:
p95_expl[i,j] = 1
# Time vector
x_time = np.linspace(0, np.shape(raw_score_expl)[0], np.shape(raw_score_expl)[0])
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(raw_score_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('MinMax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_raw_score_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(p95_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-0.1, 1)
ax[i, j].set_ylim(0, 1500)
ax[i, j].set_xlabel('p95 score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_p95_expl_pw' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(max_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Max score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_max_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(max_norm_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Max norm score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_max_norm_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(scaling_softmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Scaling softmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_scaling_softmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_MAX_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_ylim(0, 1000)
ax[i, j].set_xlabel('Softmax MAX score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_softmax_MAX_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(scaling_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-0.1, 1)
ax[i, j].set_ylim(0, 1500)
ax[i, j].set_xlabel('Scaling score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_scaling_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(arctg_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Arctg score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_arctg_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(square_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Square root score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_square_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(sign_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Sign score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_sign_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(minmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Minmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_minmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(sign_minmax_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i,j].set_ylim(0,800)
ax[i, j].set_xlabel('Sign minmax score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_sign_minmax_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(logistic_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h, width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Logistic score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_logistic_expl_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(tanh_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(-1, 1)
ax[i, j].set_xlabel('Tanh score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_tanh_expl_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(raw_mean_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw mean score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_raw_mean_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(mean_norm_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Mean score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_mean_norm_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_sum_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Soft-max', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_softmax_sum_expl' + str(
sim_counter) + '.' + args.format)
plt.close('all')
for b in range(0, np.size(args.beta)):
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(softmax_mean_expl[b][:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h , width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[
cl] + '_softmax_mean_expl_beta_' + str(args.beta[b]) + '_' + str(
sim_counter) + '.' + args.format)
print('Done')
def plot_sensory(args):
"""
Plots of the results obtained from the leanring model (VLM function).
"""
# Colors
color = ['r', 'b', 'k', 'orange', 'magenta', 'purple']
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
p95_mean = np.zeros((len(args.learning_rate), args.n_points + 1, len(classes)))
for lr in range(0, len(args.learning_rate)):
print(args.learning_rate[lr])
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
p95_all_sim = []
for sim_counter in range(0, args.N_sim):
p95 = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_p95_sim_' + str(sim_counter) + '.npy')
p95_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_p95_expl_' + str(sim_counter) + '.npy')
# Focus on 200 time steps
p95_focus = p95[0:200, :]
# Remove focus (every N points up to 200 points) - CHECK PLOT
p95_begin = p95[0:200, :]
p95_jump = np.zeros((args.n_points + 1, np.size(args.T_names)))
p95_jump[0:14, :] = p95_begin[0::15, :]
p95_jump[14::, :] = p95[200::, :]
# All sim vector
p95_all_sim.append(p95_jump)
# Time vector
x_time = np.linspace(0, args.MAX_trial, np.shape(p95_jump)[0])
x_time_expl = np.linspace(0, np.shape(p95_expl)[0], np.shape(p95_expl)[0])
x_time_focus = np.linspace(0, np.shape(p95_focus)[0], np.shape(p95_focus)[0])
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_focus, p95_focus[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, np.shape(p95_focus)[0])
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_FOCUS_sim' + str(
sim_counter) + '.' + args.format)
W_p95 = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_W_p95_sim_' + str(sim_counter) + '.npy')[0:args.MAX_trial, :, :]
# Plot the evolution of the synaptic weights over trials
if np.size(args.T_names) == len(classes):
fig, ax = plt.subplots(4, 4, sharex='col', sharey='row', figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
for k in range(0, args.wavegan_latent_dim):
ax[i, j].plot(x_time_expl, W_p95[:, k, 4 * i + j], color[k])
ax[i, j].set_ylabel('Weights', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i,j].set_ylim(-1,1)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + 'Synaptic_weights_evolution_p95' + str(sim_counter) + '.' + args.format)
# Plot activation of the exploration
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, p95_expl[:, 4 * i + j], 'b')
#ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_expl' + str(
sim_counter) + '.' + args.format)
# Plot activation during learning
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, p95_all_sim[sim_counter][:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial-1)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_sim' + str(
sim_counter) + '.' + args.format)
# [TODO] add comment here when I try this option
if args.example == True:
if sim_counter == 1:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5), sharey=True, sharex=True)
for lr in range(0, len(args.learning_rate)):
ax.plot(x_time, p95_all_sim[sim_counter][:, 14], 'b')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.set_xlim(0, args.MAX_trial)
ax.set_xlabel('Time (in number of time steps)', fontsize=15)
ax.set_ylabel('Activation', fontsize=15)
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_R' + '.' + args.format)
plt.close('all')
# Average over multiple simulations
p95_mean_sim = np.mean(p95_all_sim, axis=0)
p95_mean[lr, :, :] = p95_mean_sim
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for sim_counter in range(0, args.N_sim):
for i in range(0, 4):
for j in range(0, 4):
#ax[i, j].plot(x_time, np.ones((np.shape(p95)[0], 1)), 'k')
ax[i, j].plot(x_time, p95_all_sim[sim_counter][:, 4 * i + j], c=color[sim_counter], alpha=.7)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_sim_ALL' + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for sim_counter in range(0, args.N_sim):
for i in range(0, 4):
for j in range(0, 4):
#ax[i, j].plot(x_time, np.ones((np.shape(p95)[0], 1)), 'k')
ax[i, j].plot(x_time, p95_mean_sim[:, 4 * i + j], c=color[sim_counter], alpha=.7)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Average A', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_p95_MEAN' + '.' + args.format)
# Comparison between different learning rates
cfr_lr = ['10e-1', '10e-2']
fig, ax = plt.subplots(4, 4, figsize=(12, 7))
for lr in range(0, len(args.learning_rate)):
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, p95_mean[lr,:, 4 * i + j], c=color[lr], alpha=.7, label=cfr_lr[lr])
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[0, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, 0].set_ylabel('Average A', fontsize=8)
ax[0, 0].legend(fontsize=5)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + '_p95_MEAN_all' + '.' + args.format)
np.save(args.data_dir + '/' + 'p95_MEAN_lr_' + str(args.wavegan_latent_dim) + '.npy' ,p95_mean)
plt.close('all')
print('Done')
def cfr_dim13(p95_MEAN, colors, ld, args):
"""
:param p95_MEAN: list of the arrays containing the data (one per latent space condition, two values each - one per learning rate condition)
:return: figure with the comparison (one per leanring rate condition)
"""
x_time = np.linspace(0, args.MAX_trial, 201)
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
for lr in range(0, len(args.learning_rate)):
fig, ax = plt.subplots(4, 4, figsize=(12, 7))
for i in range(0, 4):
for j in range(0, 4):
for l in range(0, len(p95_MEAN)):
ax[i, j].plot(x_time, p95_MEAN[l][lr,:, 4 * i + j], c=colors[l], alpha=.7, label=str(ld[l]))
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[0, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, 0].set_ylabel('Average A', fontsize=8)
ax[0, 0].legend(fontsize=5)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + '_p95_MEAN_lr_' + str(args.learning_rate[lr]) + '.' + args.format)
plt.close('all')
print('Done')
def plot_sensory_test(args):
# Colors
color = ['r', 'b', 'k', 'orange', 'magenta', 'purple']
# Repertoire
classes = ['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']
for sim_counter in range(0, args.N_sim):
cfr_class_A_all = []
cfr_class_A_expl_all = []
cfr_class_raw_all = []
cfr_class_expl_all = []
conv = []
for cl in range(0, len(args.classifier_name)):
print(args.classifier_name[cl])
cfr_class_A = []
cfr_class_A_expl = []
cfr_class_raw = []
cfr_class_expl = []
mean_spectrogram_env = []
T = []
for lr in range(0, len(args.learning_rate)):
print(args.learning_rate[lr])
sensory_gen = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_A_sim_' + str(sim_counter) + '.npy')
sensory_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_A_expl_' + str(sim_counter) + '.npy')
sensory_expl_all = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_A_expl_all_' + str(sim_counter) + '.npy')
raw_score = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_raw_score_sim_' + str(sim_counter) + '.npy')
max_score = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_sim_' + str(sim_counter) + '.npy')
max_norm = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_norm_sim_' + str(sim_counter) + '.npy')
max_scaling = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_scaling_sim_' + str(sim_counter) + '.npy')
raw_score_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_raw_score_expl_' + str(sim_counter) + '.npy')
max_score_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_score_expl_' + str(sim_counter) + '.npy')
max_norm_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_norm_expl_' + str(sim_counter) + '.npy')
max_scaling_expl = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_max_scaling_expl_' + str(sim_counter) + '.npy')
#cfr_class_A.append(sensory_gen)
#cfr_class_A_expl.append(sensory_expl)
cfr_class_raw.append(raw_score)
cfr_class_expl.append(raw_score_expl)
# Time vector
x_time = np.linspace(0, args.MAX_trial, np.shape(raw_score)[0])
x_time_expl = np.linspace(0, np.shape(raw_score_expl)[0], np.shape(raw_score_expl)[0])
#
# if args.learning_rate[lr] == 0.01:
# for c in range(0, np.size(args.T_names)):
# loc = np.where(raw_score[:, c] > 0.9)[0]
#
# spectrograms_envelope = []
# for sp in range(0, np.size(loc)):
# samples_aux, sr = librosa.load(
# args.data_dir + '/' + args.sim_name + str(sim_counter) + '/' + args.classifier_name[
# cl] + '_lr' + str(args.learning_rate[lr]) + '_' + args.sim_name + str(
# sim_counter) + '_' + str(
# loc[sp]) + '/' + 'sensory_production_' + args.T_names[c] + '.wav', sr=16000)
# trim = librosa.effects.trim(samples_aux.astype(np.float), top_db=20)
# samples_aux = trim[0]
#
# if samples_aux.size / 16 < 4000:
# aux_size = 4000 - samples_aux.size / 16
# silence = np.zeros((int(round(aux_size / 2) * 16)), )
# samples_aux = np.append(silence, samples_aux)
# samples_aux = np.append(samples_aux, silence)
#
# rawsong = samples_aux.astype(float)
# rawsong = rawsong.flatten()
# amp = Song_functions.smooth_data(rawsong, sr, freq_cutoffs=(500, 7999))
#
# # if args.T_names[c] == 'N':
# # new_song = rawsong[0:np.where(amp > 0.00001)[0][-1]] # new training
# # silence = np.zeros((8000 - np.size(new_song),))
# # new_song = np.append(silence, new_song)
#
# # else:
# new_song = rawsong[np.where(amp > 0.00001)[0][0]::]
# silence = np.zeros((100000 - np.size(new_song),))
# new_song = np.append(new_song, silence)
#
# X = librosa.stft(new_song, n_fft=args.N, hop_length=args.H, win_length=args.N,
# window='hann',
# pad_mode='constant', center=True)
# T_coef = np.arange(X.shape[1]) * args.H / sr * 1000 # save to plot
# spectrograms_envelope.append(np.log(1 + 100 * np.abs(X ** 2)))
#
# mean_spectrogram_env.append(np.mean(spectrograms_envelope, axis=0)) # dimension 16
# T.append(T_coef)
#
# np.save(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
# args.learning_rate[lr]) + 'Mean_spectrogram_envelope', mean_spectrogram_env)
#
# # Mean spectrogram after convergence
# fig, axs = plt.subplots(nrows=4, ncols=4, figsize=(10, 14), sharey=True, sharex=True)
# for i in range(0, 4):
# for j in range(0, 4):
# extent = [0, np.max(T_coef[4 * i + j]), 0, 8000]
# if mean_spectrogram_env[4 * i + j].size > 1:
# axs[i, j].imshow(mean_spectrogram_env[4 * i + j], extent=extent, cmap=args.color,
# aspect='auto', origin='lower',
# norm=colors.PowerNorm(gamma=0.5)) # gamma 0.2 in original data
# axs[i, j].set_title(args.T_names[4 * i + j], fontsize=15)
# # axs[i, j].set_xlim(0,350)
# axs[i, j].spines['top'].set_color('none')
# axs[i, j].spines['right'].set_color('none')
# axs[0, j].set_xlabel('Time (ms)', fontsize=15)
# axs[i, 3].set_ylabel('Frequency (Hz)', fontsize=15)
# plt.tight_layout()
# plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
# args.learning_rate[lr]) + 'Mean_spectrogram_envelope.' + args.format)
#
# W and Delta W
# W = np.load(args.data_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_W_sim_' + str(sim_counter) + '.npy')[0:args.time_limit, :, :]
# Plot the evolution of the synaptic weights over trials
# if np.size(args.T_names) == len(classes):
# fig, ax = plt.subplots(4, 4, sharex='col', sharey='row', figsize=(10, 5))
# for i in range(0, 4):
# for j in range(0, 4):
# for k in range(0, args.wavegan_latent_dim):
# ax[i, j].plot(x_time, W[:, k, 4 * i + j], color[k])
# ax[i, j].set_ylabel('Weights', fontsize=8)
# ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
# ax[i, j].set_title(classes[4 * i + j], fontsize=8)
# plt.tight_layout()
# plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + 'Synaptic_weights_evolution_' + str(
# sim_counter) + '.' + args.format)
# diff = []
# for s in range(0, np.size(args.T_names)):
# diff.append(np.abs(np.diff(W[:, :, s], axis = 0)))
# fig, ax = plt.subplots(4, 4, figsize=(10, 5))
# for i in range(0, 4):
# for j in range(0, 4):
# for w in range(0, args.wavegan_latent_dim):
# ax[i,j].plot(x_time[0:args.time_limit-1], diff[4 * i + j][:, w], 'b')
# ax[i, j].set_ylim(0, np.max(diff))
# ax[i,j].set_ylabel('Delta W', fontsize=8)
# ax[i,j].set_xlabel('Time (in number of time steps)', fontsize=8)
# ax[i, j].set_title(classes[4 * i + j], fontsize=8)
# plt.tight_layout()
# plt.savefig(
# args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr]) + '_diff_all' + str(sim_counter) + '.' + args.format)
if np.size(args.T_names) == len(classes):
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, np.ones((np.shape(max_score_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, max_score_expl[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Max score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_score_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, np.ones((np.shape(max_norm_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, max_norm_expl[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Max-norm score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_norm_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, np.ones((np.shape(max_scaling_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, max_scaling_expl[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Scaling score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_scaling_expl' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(max_score)[0], 1)), 'k')
ax[i, j].plot(x_time, max_score[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Max score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_sim' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(max_norm)[0], 1)), 'k')
ax[i, j].plot(x_time, max_norm[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Max-norm score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_norm_sim' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(max_scaling)[0], 1)), 'k')
ax[i, j].plot(x_time, max_scaling[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Scaling score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_max_scaling_sim' + str(
sim_counter) + '.' + args.format)
# Sensory response raw score
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(raw_score)[0], 1)), 'k')
ax[i, j].plot(x_time, raw_score[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_xlim(0, args.MAX_trial)
ax[i, j].set_ylabel('Raw score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_raw_score_sim' + str(
sim_counter) + '.' + args.format)
raw_score_sum = np.zeros((args.time_limit,))
for t in range(0, args.time_limit):
raw_score_sum[t] = np.sum(raw_score[t, :])
aux_save_raw = []
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
print('Raw_score')
for i in range(0, 4):
for j in range(0, 4):
aux_save_raw.append(np.size(np.where(raw_score_expl[:, 4 * i + j] > 0.9)))
# print(np.size(np.where(raw_score_expl[:, 4 * i + j]>0.9)))
# input()
ax[i, j].plot(x_time_expl, np.ones((np.shape(raw_score_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, raw_score_expl[:, 4 * i + j], 'b')
ax[i, j].set_xlim(0, 300)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Raw_score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_raw_score_expl' + str(
sim_counter) + '.' + args.format)
if args.learning_rate[lr] == 0.1:
np.save(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_cumulative_raw_score_expl.npy', aux_save_raw)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(raw_score_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h / np.max(h), width=0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Raw score', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_raw_score_expl_hist' + str(sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time, np.ones((np.shape(sensory_gen)[0], 1)), 'k')
ax[i, j].plot(x_time, sensory_gen[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Soft-max', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr])+ '_Sensory_response_sim' + str(
sim_counter) + '.' + args.format)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
ax[i, j].plot(x_time_expl, np.ones((np.shape(sensory_expl_all)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, sensory_expl_all[:, 4 * i + j], 'b')
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Soft-max', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(
args.learning_rate[lr]) + '_Sensory_response_expl_all' + str(
sim_counter) + '.' + args.format)
plt.close('all')
aux_save_softmax = []
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
print('sensory_expl')
for i in range(0, 4):
for j in range(0, 4):
aux_save_softmax.append(np.size(np.where(sensory_expl[:, 4 * i + j] > 0.9)))
##input()
ax[i, j].plot(x_time_expl, np.ones((np.shape(sensory_expl)[0], 1)), 'k')
ax[i, j].plot(x_time_expl, sensory_expl[:, 4 * i + j], 'b')
ax[i, j].set_xlim(0,300)
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Soft-max', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr])+ '_Sensory_response_expl' + str(sim_counter) + '.' + args.format)
if args.learning_rate[lr] == 0.1:
np.save(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr])+ '_cumulative_softmax_expl.npy', aux_save_softmax)
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
h, bins = np.histogram(sensory_expl[:, 4 * i + j], bins=15)
ax[i, j].bar(bins[:-1], h/np.max(h), width = 0.05, color='b', alpha=0.6)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
ax[i, j].set_xlim(0, 1)
ax[i, j].set_xlabel('Soft-max', fontsize=8)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_lr' + str(args.learning_rate[lr])+ '_Sensory_response_expl_hist' + str(sim_counter) + '.' + args.format)
cfr_class_A_all.append(cfr_class_A)
cfr_class_A_expl_all.append(cfr_class_A_expl)
cfr_class_raw_all.append(cfr_class_raw)
cfr_class_expl_all.append(cfr_class_expl)
cfr_lr = ['10e-1', '10e-2']
# CFR classifier sensory response
for cl in range(0, len(args.classifier_name)):
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
for lr in range(0, len(args.learning_rate)):
ax[i, j].plot(x_time, np.ones((np.shape(cfr_class_A_all[cl][lr])[0], 1)), 'k')
ax[i, j].plot(x_time, cfr_class_A_all[cl][lr][:, 4 * i + j], color=color[lr], label = cfr_lr[lr])
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Soft-max', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].legend(loc='lower right', fontsize=5)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_CFR_Sensory_response_sim' + str(sim_counter) + '.' + args.format)
# CFR sensory response raw score
fig, ax = plt.subplots(4, 4, figsize=(10, 5))
for i in range(0, 4):
for j in range(0, 4):
for lr in range(0, len(args.learning_rate)):
ax[i, j].plot(x_time, np.ones((np.shape(cfr_class_raw_all[cl][lr])[0], 1)), 'k')
ax[i, j].plot(x_time, cfr_class_raw_all[cl][lr][:, 4 * i + j], color=color[lr], label=cfr_lr[lr])
ax[i, j].set_ylim(0, 1)
ax[i, j].set_ylabel('Raw score', fontsize=8)
ax[i, j].set_xlabel('Time (in number of time steps)', fontsize=8)
ax[i, j].legend(loc='lower right', fontsize=5)
ax[i, j].set_title(classes[4 * i + j], fontsize=8)
ax[i, j].spines['top'].set_color('none')
ax[i, j].spines['right'].set_color('none')
plt.tight_layout()
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[cl] + '_CFR_raw_score_sim' + str(
sim_counter) + '.' + args.format)
# Ex syllable B
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(10, 5), sharey=True, sharex=True)
for lr in range(0, len(args.learning_rate)):
axs[0].plot(x_time, np.ones((np.shape(cfr_class_expl_all[1][lr])[0], 1)), 'k')
axs[0].plot(x_time, cfr_class_expl_all[1][lr][:, 1], 'b')
axs[0].spines['top'].set_color('none')
axs[0].spines['right'].set_color('none')
axs[0].set_xlim(0, 300)
#axs[0].set_xlabel('Time (in number of time steps)', fontsize=8)
axs[0].legend(loc='lower right', fontsize=5)
axs[0].set_ylabel('Raw score', fontsize=15)
for lr in range(0, len(args.learning_rate)):
axs[1].plot(x_time, np.ones((np.shape(cfr_class_raw_all[1][lr])[0], 1)), 'k')
axs[1].plot(x_time, cfr_class_raw_all[1][lr][:, 1], color=color[lr], label = cfr_lr[lr])
axs[1].spines['top'].set_color('none')
axs[1].spines['right'].set_color('none')
axs[1].set_xlim(0, 300)
axs[1].set_xlabel('Time (in number of time steps)', fontsize=8)
axs[1].legend(loc='lower right', fontsize=5)
axs[1].set_ylabel('Raw score', fontsize=15)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'B1_realBIS.' + args.format)
# Ex syllable C
fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(10, 5), sharey=True, sharex=True)
for lr in range(0, len(args.learning_rate)):
axs[0].plot(x_time, np.ones((np.shape(cfr_class_expl_all[0][lr])[0], 3)), 'k')
axs[0].plot(x_time, cfr_class_expl_all[0][lr][:, 3], 'b')
axs[0].set_xlim(0, 300)
axs[0].spines['top'].set_color('none')
axs[0].spines['right'].set_color('none')
#axs[0].set_xlabel('Time (in number of time steps)', fontsize=8)
axs[0].legend(loc='lower right', fontsize=5)
axs[0].set_ylabel('Raw score', fontsize=15)
for lr in range(0, len(args.learning_rate)):
axs[1].plot(x_time, np.ones((np.shape(cfr_class_raw_all[0][lr])[0], 3)), 'k')
axs[1].plot(x_time, cfr_class_raw_all[0][lr][:, 3], color=color[lr], label = cfr_lr[lr])
axs[1].set_xlim(0, 300)
axs[1].spines['top'].set_color('none')
axs[1].spines['right'].set_color('none')
axs[1].set_xlabel('Time (in number of time steps)', fontsize=8)
axs[1].legend(loc='lower right', fontsize=5)
axs[1].set_ylabel('Raw score', fontsize=15)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'C_extBIS.' + args.format)
input()
if np.size(args.T_names) == 3:
fig, ax = plt.subplots(args.ns, 1, figsize=(5, 10))
for j in range(0, args.ns):
ax.flat[j].plot(x_time, np.ones((np.shape(sensory_gen)[0], 1)))
ax.flat[j].plot(x_time, sensory_gen[:,j], color[j], label='Syllable '+ args.T_names[j])
ax[j].set_ylabel('Sensory response', fontsize=15)
ax[j].set_xlabel('Time (in number of time steps)', fontsize=15)
plt.legend(loc='lower right', fontsize=15)
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'Sensory_response_sim' + str(sim_counter) + '.' + args.format)
fig, ax = plt.subplots(args.ns, 1, figsize=(5, 10))
for j in range(0, args.ns):
ax.flat[j].plot(x_time, np.ones((np.shape(sensory_expl)[0], 1)))
ax.flat[j].plot(x_time, sensory_expl[:, j], color[j], label='Syllable ' + args.T_names[j])
ax[j].set_ylabel('Sensory response', fontsize=15)
ax[j].set_xlabel('Time (in number of time steps)', fontsize=15)
plt.legend(loc='lower right', fontsize=15)
plt.savefig(
args.data_dir + '/' + args.output_dir + '/' + 'Sensory_response_expl' + str(sim_counter) + '.' + args.format)
print('Done')
def plot_syll(args):
"""
Plot the example of a syllable across time: change the name in syllables variable (just below this comment)
"""
syllables = glob.glob(args.data_dir + '/' + '*R.wav')
counter = 0
while counter < len(syllables):
samples_aux, sr = librosa.load(syllables[counter], sr=16000)
trim = librosa.effects.trim(samples_aux.astype(np.float), top_db=20)
samples_aux = trim[0]
X = librosa.stft(samples_aux, n_fft=args.N, hop_length=args.H, win_length=args.N, window='hann', pad_mode='constant', center=True)
Y = np.log(1 + 100 * np.abs(X) ** 2)
T_coef = np.arange(X.shape[1]) * args.H / sr
K = args.N // 2
F_coef = np.arange(K + 1) * sr / args.N
plt.figure(figsize=(4, 18))
extent = [T_coef[0], T_coef[-1], F_coef[0], F_coef[-1]]
plt.imshow(Y, aspect='auto', origin='lower', extent=extent, cmap=args.color, norm=colors.PowerNorm(gamma=0.5))
plt.xlabel('Time (seconds)')
plt.ylabel('Frequency (Hz)')
plt.title(str(counter))
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + 'R' + '.' + args.format)
counter = counter + 1
print('Done')
def mean_spectro(learning_rate, sim_counter, ths, args):
"""
:param learning_rate: which learning rate
:param sim_counter: which simulation
:param ths threshold to define activation
:return: mean spectogram for each syllable when it is active more than a threshold
"""
# Load activation function and list of directories
p95 = np.load(args.data_dir + '/' + args.classifier_name[0] + '_lr' + str(learning_rate) + '_p95_sim_' + str(sim_counter) + '.npy')
# Remove focus (every N points up to 200 points)
p95_begin = p95[0:200, :]
p95_jump = np.zeros((args.n_points + 1, np.size(args.T_names)))
p95_jump[0:14, :] = p95_begin[0::15, :]
p95_jump[14::, :] = p95[200::, :]
list = np.zeros((args.n_points + 1,))
aux = np.linspace(0, 3000, 3000).astype(int)
list[0:200] = aux[0::15]
list[-1] = 3000
mean_spectrogram_env = []
T = []
for c in range(0, np.size(args.T_names)):
# Find where the activation threshold is reached/crossed
loc = np.where(p95_jump[:, c] > ths)[0]
spectrograms_envelope = []
for sp in range(0, np.size(loc)):
if loc[sp] < 200:
samples_aux, sr = librosa.load(
args.data_dir + '/' + args.sim_name + str(sim_counter) + '/' + args.classifier_name[
0] + '_lr' + str(learning_rate) + '_' + args.sim_name + str(sim_counter) + '_' + str(
int(list[loc[sp]])) + '/' + '__condition_0_' + str(int(list[loc[sp]])) + '/' + 'sensory_production_condition_0_' + args.T_names[c] + '.wav', sr=16000)
else:
loc[sp] = loc[sp]
samples_aux, sr = librosa.load(
args.data_dir + '/' + args.sim_name + str(sim_counter) + '/' + args.classifier_name[
0] + '_lr' + str(learning_rate) + '_' + args.sim_name + str(sim_counter) + '_' + str(
int(list[loc[sp]])) + '/' + '__condition_0_' + str(int(list[loc[sp]])) + '/' + 'sensory_production_condition_0_' + args.T_names[c] + '.wav', sr=16000)
trim = librosa.effects.trim(samples_aux.astype(np.float), top_db=20)
samples_aux = trim[0]
if samples_aux.size / 16 < 4000:
aux_size = 4000 - samples_aux.size / 16
silence = np.zeros((int(round(aux_size / 2) * 16)), )
samples_aux = np.append(silence, samples_aux)
samples_aux = np.append(samples_aux, silence)
rawsong = samples_aux.astype(float)
rawsong = rawsong.flatten()
amp = Song_functions.smooth_data(rawsong, sr, freq_cutoffs=(500, 7999))
new_song = rawsong[np.where(amp > 0.00001)[0][0]::]
silence = np.zeros((50000 - np.size(new_song),))
new_song = np.append(new_song, silence)
X = librosa.stft(new_song, n_fft=args.N, hop_length=args.H, win_length=args.N, window='hann', pad_mode='constant', center=True)
T_coef = np.arange(X.shape[1]) * args.H / sr * 1000
spectrograms_envelope.append(np.log(1 + 100 * np.abs(X ** 2)))
mean_spectrogram_env.append(np.mean(spectrograms_envelope, axis=0)) # dimension 16
T.append(T_coef)
#np.save(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[0] + '_' + str(sim_counter) + '_lr' + str(args.learning_rate) + 'Mean_spectrogram_envelope', mean_spectrogram_env)
# Mean spectrogram after convergence (plot)
fig, axs = plt.subplots(nrows=4, ncols=4, figsize=(10, 14), sharey=True, sharex=True)
for i in range(0, 4):
for j in range(0, 4):
extent = [0, 300, 0, 8000]
if mean_spectrogram_env[4 * i + j].size > 1:
axs[i, j].imshow(mean_spectrogram_env[4 * i + j], extent=extent, cmap=args.color, aspect='auto', origin='lower', norm=colors.PowerNorm(gamma=0.5)) # gamma 0.2 in original data
axs[i, j].set_title(args.T_names[4 * i + j], fontsize=15)
axs[i, j].set_xlim(0,20)
axs[i, j].spines['top'].set_color('none')
axs[i, j].spines['right'].set_color('none')
axs[0, j].set_xlabel('Time (ms)', fontsize=15)
axs[i, 3].set_ylabel('Frequency (Hz)', fontsize=15)
plt.tight_layout()
plt.savefig(args.data_dir + '/' + args.output_dir + '/' + args.classifier_name[0] + '_' + str(sim_counter) + '_lr' + str(args.learning_rate) + 'Mean_spectrogram_envelope.' + args.format)
print('Done')
if __name__ == '__main__':
import argparse
import glob
import sys
"""
Example how to run it:
>python plotGAN.py --option learning --data_dir experiment --output_dir plots
The output_dir will be created by default inside the data directory.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--option', type=str,
help='What do you want to see? Motor exploration or results after learning?',
choices=['sensory', 'activation_aud', 'syll', 'mean_spectro', 'cfr'])
parser.add_argument('--data_dir', type=str,
help='Data directory where the data are saved',
default=None)
parser.add_argument('--output_dir', type=str,
help='Output directory where to save the plots',
default=None)
simulation_args = parser.add_argument_group('Simulation')
simulation_args.add_argument('--MAX_trial', type=int,
help='Maximal number of trials',
default = 3001)
simulation_args.add_argument('--ns', type=int,
help='number of syllables',
default = 16)
simulation_args.add_argument('--N_sim', type=int, help='Number of instances', default=3)
simulation_args.add_argument('--T_names', type=list, help='Target syllables', default=['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']) #['A', 'B1', 'B2', 'C', 'D', 'E', 'H', 'J1', 'J2', 'L', 'M', 'N', 'O', 'Q', 'R', 'V']) #['B1', 'C', 'M'])
simulation_args.add_argument('--sim_name', type=str, help='Sub directory containing the generations per each simulation', default='sensory_prod_sim_')
simulation_args.add_argument('--classifier_name', type=list, help='Which classifier model I want to use. Multiple classifier are allowed', default=['EXT']) #'REAL'
simulation_args.add_argument('--learning_rate', type=list,
help='Learning rate used during learning',
default = [0.1, 0.01]) #[0.1, 0.01]
simulation_args.add_argument('--beta', type=list, help='Type of auditory softmax activation',
default=[0.01, 0.1, 1, 5])
spectro_args = parser. add_argument_group('Spectorgram')
spectro_args.add_argument('--N', type = int, help='Nftt spectrogram librosa', default=256)
spectro_args.add_argument('--H', type = int, help='Hop length spectrogram librosa', default=64)
spectro_args.add_argument('--color', type = str, help='Colormap', default='inferno')
# TODO add reading of the params file, it could be that I need to change in the InverseLearningGAN the way I save
# args.txt. Perhaps using a dict or json instead or in addition.
wavegan_args = parser.add_argument_group('WaveGAN')
wavegan_args.add_argument('--wavegan_latent_dim', type=int,
help='Dimension of the latent space',
default = 2)
plot_args = parser.add_argument_group('Plots')
plot_args.add_argument('--format', type=str, help='Saving format', default='png')
plot_args.add_argument('--time_limit', type=int, help='Print only a certain time', default=100)
plot_args.add_argument('--n_points', type=int, help='How many point to be plot in the figure (=to saved points)', default=200)
plot_args.add_argument('--example', type=str, help='Figure of an example', default=True)
args = parser.parse_args()
# Make output dir
if args.output_dir != None:
if not os.path.isdir(args.data_dir + '/' + args.output_dir):
os.makedirs(args.data_dir + '/' + args.output_dir)
if args.option == 'activation_aud':
plot_auditory_activation(args)
if args.option == 'sensory':
plot_sensory(args)
if args.option == 'syll':
plot_syll(args)
if args.option =='mean_spectro':
learning_rate = 0.01
ths = 0.99
sim_counter = 2
mean_spectro(learning_rate, sim_counter, ths, args)
if args.option =='cfr':
# Latent space conditions
ld = [1, 2, 3, 6]
colors = ['r', 'b', 'gold', 'k']
p95_MEAN =[]
for i in range(0,len(ld)):
p95_MEAN.append(np.load(args.data_dir + '/' + 'p95_MEAN_lr_' + str(ld[i]) + '.npy'))
cfr_dim13(p95_MEAN, colors, ld, args)
| 56.237481
| 281
| 0.490104
| 9,973
| 74,121
| 3.468264
| 0.049333
| 0.023418
| 0.034346
| 0.033797
| 0.832374
| 0.813843
| 0.790974
| 0.77594
| 0.758738
| 0.745295
| 0
| 0.03641
| 0.350441
| 74,121
| 1,318
| 282
| 56.237481
| 0.682009
| 0.108863
| 0
| 0.607685
| 0
| 0
| 0.080228
| 0.005863
| 0
| 0
| 0
| 0.001517
| 0
| 1
| 0.011122
| false
| 0
| 0.016178
| 0
| 0.032356
| 0.013145
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
73a65b4dfefc648d870a970500e2a59002724788
| 4,201
|
py
|
Python
|
test/PR_test/integration_test/trace/io/test_restore_wizard.py
|
hanskrupakar/fastestimator
|
1c3fe89ad8b012991b524a6c48f328b2a80dc9f6
|
[
"Apache-2.0"
] | null | null | null |
test/PR_test/integration_test/trace/io/test_restore_wizard.py
|
hanskrupakar/fastestimator
|
1c3fe89ad8b012991b524a6c48f328b2a80dc9f6
|
[
"Apache-2.0"
] | null | null | null |
test/PR_test/integration_test/trace/io/test_restore_wizard.py
|
hanskrupakar/fastestimator
|
1c3fe89ad8b012991b524a6c48f328b2a80dc9f6
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import tempfile
import unittest
import fastestimator as fe
from fastestimator.backend.load_model import load_model
from fastestimator.backend.save_model import save_model
from fastestimator.test.unittest_util import sample_system_object, sample_system_object_torch
from fastestimator.trace.io import RestoreWizard
from fastestimator.util.data import Data
def get_model_name(system):
model_names = []
for model in system.network.models:
model_names.append(model.model_name)
return model_names
class TestRestoreWizard(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.system_json_path = os.path.join(tempfile.gettempdir(), 'restorewizard')
def setUp(self):
self.data = Data({})
def test_tf_model_on_begin(self):
restore_wizard = RestoreWizard(directory=self.system_json_path)
restore_wizard.system = sample_system_object()
# save state
for model in restore_wizard.system.network.models:
save_model(model, save_dir=restore_wizard.directory, save_optimizer=True)
restore_wizard.system.save_state(json_path=os.path.join(restore_wizard.directory, restore_wizard.system_file))
restore_wizard.on_begin(data=self.data)
with self.subTest('Check the restore files directory'):
self.assertEqual(restore_wizard.directory, self.system_json_path)
with self.subTest('check data dictionary'):
self.assertEqual(self.data['epoch'], 0)
if os.path.exists(self.system_json_path):
shutil.rmtree(self.system_json_path)
def test_tf_model_on_epoch_end(self):
restore_wizard = RestoreWizard(directory=self.system_json_path)
restore_wizard.system = sample_system_object()
restore_wizard.on_epoch_end(data=self.data)
model_names = get_model_name(restore_wizard.system)
with self.subTest('check json exists'):
self.assertTrue(os.path.exists(os.path.join(self.system_json_path, 'system.json')))
with self.subTest('Check if model weights path stored'):
self.assertTrue(os.path.exists(os.path.join(self.system_json_path, model_names[0] + '.h5')))
with self.subTest('Check if model optimizer stored'):
self.assertTrue(os.path.exists(os.path.join(self.system_json_path, model_names[0] + '_opt.pkl')))
if os.path.exists(self.system_json_path):
shutil.rmtree(self.system_json_path)
def test_torch_model_on_begin(self):
restore_wizard = RestoreWizard(directory=self.system_json_path)
restore_wizard.system = sample_system_object_torch()
# save state
for model in restore_wizard.system.network.models:
save_model(model, save_dir=restore_wizard.directory, save_optimizer=True)
restore_wizard.system.save_state(json_path=os.path.join(restore_wizard.directory, restore_wizard.system_file))
restore_wizard.on_begin(data=self.data)
with self.subTest('Check the restore files directory'):
self.assertEqual(restore_wizard.directory, self.system_json_path)
with self.subTest('check data dictionary'):
self.assertEqual(self.data['epoch'], 0)
if os.path.exists(self.system_json_path):
shutil.rmtree(self.system_json_path)
def test_torch_model_on_epoch_end(self):
restore_wizard = RestoreWizard(directory=self.system_json_path)
restore_wizard.system = sample_system_object_torch()
restore_wizard.on_epoch_end(data=self.data)
model_names = get_model_name(restore_wizard.system)
with self.subTest('check json exists'):
self.assertTrue(os.path.exists(os.path.join(self.system_json_path, 'system.json')))
with self.subTest('Check if model weights path stored'):
self.assertTrue(os.path.exists(os.path.join(self.system_json_path, model_names[0] + '.pt')))
with self.subTest('Check if model optimizer stored'):
self.assertTrue(os.path.exists(os.path.join(self.system_json_path, model_names[0] + '_opt.pt')))
if os.path.exists(self.system_json_path):
shutil.rmtree(self.system_json_path)
| 48.848837
| 118
| 0.721019
| 563
| 4,201
| 5.127886
| 0.134991
| 0.117077
| 0.101836
| 0.124697
| 0.80291
| 0.792865
| 0.792865
| 0.792865
| 0.792865
| 0.792865
| 0
| 0.002033
| 0.180195
| 4,201
| 85
| 119
| 49.423529
| 0.836237
| 0.004999
| 0
| 0.60274
| 0
| 0
| 0.080919
| 0
| 0
| 0
| 0
| 0
| 0.136986
| 1
| 0.09589
| false
| 0
| 0.136986
| 0
| 0.260274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
73aea2e572ef9a68d42419fb03af520a2efe8100
| 41
|
py
|
Python
|
Computable/Computable/Resources/python-lib/sympy/calculus/__init__.py
|
ktraunmueller/Computable
|
5a6a872c4c0f5e122c24c321cd877a949877dcf7
|
[
"MIT"
] | 26
|
2018-02-14T23:52:58.000Z
|
2021-08-16T13:50:03.000Z
|
Computable/Computable/Resources/python-lib/sympy/calculus/__init__.py
|
preslavrachev/Computable
|
2f802ff5a14628e425aae4ec14667d2f98c1fd75
|
[
"MIT"
] | null | null | null |
Computable/Computable/Resources/python-lib/sympy/calculus/__init__.py
|
preslavrachev/Computable
|
2f802ff5a14628e425aae4ec14667d2f98c1fd75
|
[
"MIT"
] | 10
|
2018-08-13T19:38:39.000Z
|
2020-04-19T03:02:00.000Z
|
from .singularities import singularities
| 20.5
| 40
| 0.878049
| 4
| 41
| 9
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb57efc4070ea7ba05c646ae9feda566148776b0
| 371
|
py
|
Python
|
week6/task7.py
|
sdanil-ops/stepik-beegeek-python
|
02302ab85d581962a82cbce766b7b284d4c5491e
|
[
"MIT"
] | null | null | null |
week6/task7.py
|
sdanil-ops/stepik-beegeek-python
|
02302ab85d581962a82cbce766b7b284d4c5491e
|
[
"MIT"
] | null | null | null |
week6/task7.py
|
sdanil-ops/stepik-beegeek-python
|
02302ab85d581962a82cbce766b7b284d4c5491e
|
[
"MIT"
] | 1
|
2021-08-18T00:58:27.000Z
|
2021-08-18T00:58:27.000Z
|
# -----------------------------------------------------------
# Copyright (c) 2021. Danil Smirnov
# A positive real number is given. Print its fractional part.
# -----------------------------------------------------------
def get_fractional_part(number: float)-> float:
return float(number - (int(number // 1)))
print(get_fractional_part(float(input())))
| 46.375
| 64
| 0.466307
| 34
| 371
| 4.970588
| 0.647059
| 0.248521
| 0.201183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01548
| 0.12938
| 371
| 8
| 65
| 46.375
| 0.50774
| 0.598383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
fb5f842e93d05e2246aadf7cee2315b4220eef60
| 169
|
py
|
Python
|
Django/src/hello_world/views.py
|
D2MAC-dev/IT_academ
|
142c92a6edfb5dc5563bcabf7b1f21f53065985c
|
[
"Apache-2.0"
] | null | null | null |
Django/src/hello_world/views.py
|
D2MAC-dev/IT_academ
|
142c92a6edfb5dc5563bcabf7b1f21f53065985c
|
[
"Apache-2.0"
] | null | null | null |
Django/src/hello_world/views.py
|
D2MAC-dev/IT_academ
|
142c92a6edfb5dc5563bcabf7b1f21f53065985c
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def hello_world(request):
return HttpResponse("Hello world!!!")
| 21.125
| 41
| 0.775148
| 22
| 169
| 5.909091
| 0.727273
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142012
| 169
| 7
| 42
| 24.142857
| 0.896552
| 0.136095
| 0
| 0
| 0
| 0
| 0.097222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
fb72bc1fbb2c39ea0b678134a07c8e0e1af31e9c
| 71
|
py
|
Python
|
tirelire-account/app/adapters/event_publisher/__init__.py
|
AgRenaud/tirelire
|
0ac42dbf735dea4ecb741057bd037c18657b95c7
|
[
"MIT"
] | null | null | null |
tirelire-account/app/adapters/event_publisher/__init__.py
|
AgRenaud/tirelire
|
0ac42dbf735dea4ecb741057bd037c18657b95c7
|
[
"MIT"
] | null | null | null |
tirelire-account/app/adapters/event_publisher/__init__.py
|
AgRenaud/tirelire
|
0ac42dbf735dea4ecb741057bd037c18657b95c7
|
[
"MIT"
] | null | null | null |
from app.adapters.event_publisher.redis_event_publisher import publish
| 35.5
| 70
| 0.901408
| 10
| 71
| 6.1
| 0.8
| 0.459016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056338
| 71
| 1
| 71
| 71
| 0.910448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fb81c2c0b62ec6d7e0241c523d5abe42ec980424
| 120
|
py
|
Python
|
tests/test_utilities.py
|
ThePythonator/PyWire3D
|
7ec008b983c1ee90b77ea2e03506e04633f000a4
|
[
"MIT"
] | 2
|
2021-04-23T17:24:43.000Z
|
2021-04-23T17:28:16.000Z
|
tests/test_utilities.py
|
ThePythonator/PyWire3D
|
7ec008b983c1ee90b77ea2e03506e04633f000a4
|
[
"MIT"
] | null | null | null |
tests/test_utilities.py
|
ThePythonator/PyWire3D
|
7ec008b983c1ee90b77ea2e03506e04633f000a4
|
[
"MIT"
] | null | null | null |
from PyWire3D.Utilities.Vector import add
def test_vector_add():
assert add([1,2,3], [2,4,5], [-2,-3,1]) == [1,3,9]
| 30
| 54
| 0.625
| 24
| 120
| 3.041667
| 0.625
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.133333
| 120
| 4
| 54
| 30
| 0.576923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
83aade3992f856e7b8b0e24aec67431b4fbfaa33
| 30,244
|
py
|
Python
|
crds/tree_builders/python_tree_builder.py
|
philok55/CRDS
|
b7fc3a7f461505d0ba41a7da68da85c3055d98cf
|
[
"Apache-2.0"
] | null | null | null |
crds/tree_builders/python_tree_builder.py
|
philok55/CRDS
|
b7fc3a7f461505d0ba41a7da68da85c3055d98cf
|
[
"Apache-2.0"
] | null | null | null |
crds/tree_builders/python_tree_builder.py
|
philok55/CRDS
|
b7fc3a7f461505d0ba41a7da68da85c3055d98cf
|
[
"Apache-2.0"
] | null | null | null |
"""
Hashed Tree builder for the Python parser.
This is an ANTLR generated parse tree listener, adapted to
walk a Python parse tree, build our hashed AST and store
all its sub trees by size.
"""
from antlr4 import ParseTreeWalker
from antlr4.tree.Tree import TerminalNode
from parsers.python3.Python3Listener import Python3Listener
from parsers.python3.Python3Parser import Python3Parser
from ..hash_tree.hash_tree import HashedNode
class PythonTreeBuilder(Python3Listener):
"""
Parse Tree Listener for the Python language.
Enter- and exit functions generated by ANTLR.
"""
def __init__(self, tree):
super().__init__()
self.tree = tree
self.hashed_tree = None
self.current = None
self.sorted_trees = {}
self.sub_tree_sizes = []
def start(self):
walker = ParseTreeWalker()
walker.walk(self, self.tree)
def print_tree(self, file_name=None):
"""Print the full tree, either to a file or to stdout."""
try:
with open(file_name, 'w') as file:
file.write(str(self.hashed_tree))
except TypeError:
print(self.hashed_tree)
def hash_node(self):
"""
Hash the current node. Should be called on CTX exit,
because it expects the children to be hashed already.
"""
self.current.hash()
def store_subtree(self):
"""
Store the sub tree that has the current node as root.
Sub trees are stored by size in a dictionary (for fast lookup) as follows:
{
<<size>>: [<<subtree>>, <<subtree>>],
<<size>>: [<<subtree>>, <<subtree>>, <<subtree>>]
}
Should be called on CTX exit, because it expects the children to be stored already.
"""
size = self.current.set_subtree_size()
if size in self.sorted_trees:
self.sorted_trees[size].append(self.current)
else:
self.sorted_trees.update({size: [self.current]})
self.sub_tree_sizes.append(size)
def enter_rule(self, ctx):
"""
Function executed on entry of every CTX node (downward pass of traversal).
Here we build the tree that will be hashed.
"""
# Skip 'wrapper' nodes
if ctx.getChildCount() == 1 and not isinstance(ctx.getChild(0), TerminalNode):
return
self.current = self.current.add_child(ctx)
def exit_rule(self, ctx):
"""
Function executed on exit of every CTX node (upward pass of traversal).
Here we have the data of the children, so we can hash the current node
and store it by sub tree size.
"""
# Skip 'wrapper' nodes
if ctx.getChildCount() == 1 and not isinstance(ctx.getChild(0), TerminalNode):
return
self.hash_node()
self.store_subtree()
self.current = self.current.parent
def enterFile_input(self, ctx:Python3Parser.File_inputContext):
"""
File input subtree, this is the root node.
We add this ctx as the root of our hashed tree.
"""
self.hashed_tree = HashedNode(ctx, parser=Python3Parser)
self.current = self.hashed_tree
def exitFile_input(self, ctx:Python3Parser.File_inputContext):
self.hash_node()
self.store_subtree()
# --------------------------------------------------------------------
# Below are all the enter- and exit methods for every ctx type
# --------------------------------------------------------------------
# Enter a parse tree produced by Python3Parser#eval_input.
def enterEval_input(self, ctx:Python3Parser.Eval_inputContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#eval_input.
def exitEval_input(self, ctx:Python3Parser.Eval_inputContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#decorator.
def enterDecorator(self, ctx:Python3Parser.DecoratorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#decorator.
def exitDecorator(self, ctx:Python3Parser.DecoratorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#decorators.
def enterDecorators(self, ctx:Python3Parser.DecoratorsContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#decorators.
def exitDecorators(self, ctx:Python3Parser.DecoratorsContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#decorated.
def enterDecorated(self, ctx:Python3Parser.DecoratedContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#decorated.
def exitDecorated(self, ctx:Python3Parser.DecoratedContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#async_funcdef.
def enterAsync_funcdef(self, ctx:Python3Parser.Async_funcdefContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#async_funcdef.
def exitAsync_funcdef(self, ctx:Python3Parser.Async_funcdefContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#funcdef.
def enterFuncdef(self, ctx:Python3Parser.FuncdefContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#funcdef.
def exitFuncdef(self, ctx:Python3Parser.FuncdefContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#parameters.
def enterParameters(self, ctx:Python3Parser.ParametersContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#parameters.
def exitParameters(self, ctx:Python3Parser.ParametersContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#typedargslist.
def enterTypedargslist(self, ctx:Python3Parser.TypedargslistContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#typedargslist.
def exitTypedargslist(self, ctx:Python3Parser.TypedargslistContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#tfpdef.
def enterTfpdef(self, ctx:Python3Parser.TfpdefContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#tfpdef.
def exitTfpdef(self, ctx:Python3Parser.TfpdefContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#varargslist.
def enterVarargslist(self, ctx:Python3Parser.VarargslistContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#varargslist.
def exitVarargslist(self, ctx:Python3Parser.VarargslistContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#vfpdef.
def enterVfpdef(self, ctx:Python3Parser.VfpdefContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#vfpdef.
def exitVfpdef(self, ctx:Python3Parser.VfpdefContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#stmt.
def enterStmt(self, ctx:Python3Parser.StmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#stmt.
def exitStmt(self, ctx:Python3Parser.StmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#simple_stmt.
def enterSimple_stmt(self, ctx:Python3Parser.Simple_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#simple_stmt.
def exitSimple_stmt(self, ctx:Python3Parser.Simple_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#small_stmt.
def enterSmall_stmt(self, ctx:Python3Parser.Small_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#small_stmt.
def exitSmall_stmt(self, ctx:Python3Parser.Small_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#expr_stmt.
def enterExpr_stmt(self, ctx:Python3Parser.Expr_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#expr_stmt.
def exitExpr_stmt(self, ctx:Python3Parser.Expr_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#annassign.
def enterAnnassign(self, ctx:Python3Parser.AnnassignContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#annassign.
def exitAnnassign(self, ctx:Python3Parser.AnnassignContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#testlist_star_expr.
def enterTestlist_star_expr(self, ctx:Python3Parser.Testlist_star_exprContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#testlist_star_expr.
def exitTestlist_star_expr(self, ctx:Python3Parser.Testlist_star_exprContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#augassign.
def enterAugassign(self, ctx:Python3Parser.AugassignContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#augassign.
def exitAugassign(self, ctx:Python3Parser.AugassignContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#del_stmt.
def enterDel_stmt(self, ctx:Python3Parser.Del_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#del_stmt.
def exitDel_stmt(self, ctx:Python3Parser.Del_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#pass_stmt.
def enterPass_stmt(self, ctx:Python3Parser.Pass_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#pass_stmt.
def exitPass_stmt(self, ctx:Python3Parser.Pass_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#flow_stmt.
def enterFlow_stmt(self, ctx:Python3Parser.Flow_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#flow_stmt.
def exitFlow_stmt(self, ctx:Python3Parser.Flow_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#break_stmt.
def enterBreak_stmt(self, ctx:Python3Parser.Break_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#break_stmt.
def exitBreak_stmt(self, ctx:Python3Parser.Break_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#continue_stmt.
def enterContinue_stmt(self, ctx:Python3Parser.Continue_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#continue_stmt.
def exitContinue_stmt(self, ctx:Python3Parser.Continue_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#return_stmt.
def enterReturn_stmt(self, ctx:Python3Parser.Return_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#return_stmt.
def exitReturn_stmt(self, ctx:Python3Parser.Return_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#yield_stmt.
def enterYield_stmt(self, ctx:Python3Parser.Yield_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#yield_stmt.
def exitYield_stmt(self, ctx:Python3Parser.Yield_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#raise_stmt.
def enterRaise_stmt(self, ctx:Python3Parser.Raise_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#raise_stmt.
def exitRaise_stmt(self, ctx:Python3Parser.Raise_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#import_stmt.
def enterImport_stmt(self, ctx:Python3Parser.Import_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#import_stmt.
def exitImport_stmt(self, ctx:Python3Parser.Import_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#import_name.
def enterImport_name(self, ctx:Python3Parser.Import_nameContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#import_name.
def exitImport_name(self, ctx:Python3Parser.Import_nameContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#import_from.
def enterImport_from(self, ctx:Python3Parser.Import_fromContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#import_from.
def exitImport_from(self, ctx:Python3Parser.Import_fromContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#import_as_name.
def enterImport_as_name(self, ctx:Python3Parser.Import_as_nameContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#import_as_name.
def exitImport_as_name(self, ctx:Python3Parser.Import_as_nameContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#dotted_as_name.
def enterDotted_as_name(self, ctx:Python3Parser.Dotted_as_nameContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#dotted_as_name.
def exitDotted_as_name(self, ctx:Python3Parser.Dotted_as_nameContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#import_as_names.
def enterImport_as_names(self, ctx:Python3Parser.Import_as_namesContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#import_as_names.
def exitImport_as_names(self, ctx:Python3Parser.Import_as_namesContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#dotted_as_names.
def enterDotted_as_names(self, ctx:Python3Parser.Dotted_as_namesContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#dotted_as_names.
def exitDotted_as_names(self, ctx:Python3Parser.Dotted_as_namesContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#dotted_name.
def enterDotted_name(self, ctx:Python3Parser.Dotted_nameContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#dotted_name.
def exitDotted_name(self, ctx:Python3Parser.Dotted_nameContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#global_stmt.
def enterGlobal_stmt(self, ctx:Python3Parser.Global_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#global_stmt.
def exitGlobal_stmt(self, ctx:Python3Parser.Global_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#nonlocal_stmt.
def enterNonlocal_stmt(self, ctx:Python3Parser.Nonlocal_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#nonlocal_stmt.
def exitNonlocal_stmt(self, ctx:Python3Parser.Nonlocal_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#assert_stmt.
def enterAssert_stmt(self, ctx:Python3Parser.Assert_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#assert_stmt.
def exitAssert_stmt(self, ctx:Python3Parser.Assert_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#compound_stmt.
def enterCompound_stmt(self, ctx:Python3Parser.Compound_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#compound_stmt.
def exitCompound_stmt(self, ctx:Python3Parser.Compound_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#async_stmt.
def enterAsync_stmt(self, ctx:Python3Parser.Async_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#async_stmt.
def exitAsync_stmt(self, ctx:Python3Parser.Async_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#if_stmt.
def enterIf_stmt(self, ctx:Python3Parser.If_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#if_stmt.
def exitIf_stmt(self, ctx:Python3Parser.If_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#while_stmt.
def enterWhile_stmt(self, ctx:Python3Parser.While_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#while_stmt.
def exitWhile_stmt(self, ctx:Python3Parser.While_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#for_stmt.
def enterFor_stmt(self, ctx:Python3Parser.For_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#for_stmt.
def exitFor_stmt(self, ctx:Python3Parser.For_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#try_stmt.
def enterTry_stmt(self, ctx:Python3Parser.Try_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#try_stmt.
def exitTry_stmt(self, ctx:Python3Parser.Try_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#with_stmt.
def enterWith_stmt(self, ctx:Python3Parser.With_stmtContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#with_stmt.
def exitWith_stmt(self, ctx:Python3Parser.With_stmtContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#with_item.
def enterWith_item(self, ctx:Python3Parser.With_itemContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#with_item.
def exitWith_item(self, ctx:Python3Parser.With_itemContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#except_clause.
def enterExcept_clause(self, ctx:Python3Parser.Except_clauseContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#except_clause.
def exitExcept_clause(self, ctx:Python3Parser.Except_clauseContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#suite.
def enterSuite(self, ctx:Python3Parser.SuiteContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#suite.
def exitSuite(self, ctx:Python3Parser.SuiteContext):
self.exit_rule(ctx)
# XXX: this one causes a weird invalid hash value
# # Enter a parse tree produced by Python3Parser#test.
# def enterTest(self, ctx:Python3Parser.TestContext):
# self.enter_rule(ctx)
# # Exit a parse tree produced by Python3Parser#test.
# def exitTest(self, ctx:Python3Parser.TestContext):
# self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#test_nocond.
def enterTest_nocond(self, ctx:Python3Parser.Test_nocondContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#test_nocond.
def exitTest_nocond(self, ctx:Python3Parser.Test_nocondContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#lambdef.
def enterLambdef(self, ctx:Python3Parser.LambdefContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#lambdef.
def exitLambdef(self, ctx:Python3Parser.LambdefContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#lambdef_nocond.
def enterLambdef_nocond(self, ctx:Python3Parser.Lambdef_nocondContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#lambdef_nocond.
def exitLambdef_nocond(self, ctx:Python3Parser.Lambdef_nocondContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#or_test.
def enterOr_test(self, ctx:Python3Parser.Or_testContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#or_test.
def exitOr_test(self, ctx:Python3Parser.Or_testContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#and_test.
def enterAnd_test(self, ctx:Python3Parser.And_testContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#and_test.
def exitAnd_test(self, ctx:Python3Parser.And_testContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#not_test.
def enterNot_test(self, ctx:Python3Parser.Not_testContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#not_test.
def exitNot_test(self, ctx:Python3Parser.Not_testContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#comparison.
def enterComparison(self, ctx:Python3Parser.ComparisonContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#comparison.
def exitComparison(self, ctx:Python3Parser.ComparisonContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#comp_op.
def enterComp_op(self, ctx:Python3Parser.Comp_opContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#comp_op.
def exitComp_op(self, ctx:Python3Parser.Comp_opContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#star_expr.
def enterStar_expr(self, ctx:Python3Parser.Star_exprContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#star_expr.
def exitStar_expr(self, ctx:Python3Parser.Star_exprContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#expr.
def enterExpr(self, ctx:Python3Parser.ExprContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#expr.
def exitExpr(self, ctx:Python3Parser.ExprContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#xor_expr.
def enterXor_expr(self, ctx:Python3Parser.Xor_exprContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#xor_expr.
def exitXor_expr(self, ctx:Python3Parser.Xor_exprContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#and_expr.
def enterAnd_expr(self, ctx:Python3Parser.And_exprContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#and_expr.
def exitAnd_expr(self, ctx:Python3Parser.And_exprContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#shift_expr.
def enterShift_expr(self, ctx:Python3Parser.Shift_exprContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#shift_expr.
def exitShift_expr(self, ctx:Python3Parser.Shift_exprContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#arith_expr.
def enterArith_expr(self, ctx:Python3Parser.Arith_exprContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#arith_expr.
def exitArith_expr(self, ctx:Python3Parser.Arith_exprContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#term.
def enterTerm(self, ctx:Python3Parser.TermContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#term.
def exitTerm(self, ctx:Python3Parser.TermContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#factor.
def enterFactor(self, ctx:Python3Parser.FactorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#factor.
def exitFactor(self, ctx:Python3Parser.FactorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#power.
def enterPower(self, ctx:Python3Parser.PowerContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#power.
def exitPower(self, ctx:Python3Parser.PowerContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#atom_expr.
def enterAtom_expr(self, ctx:Python3Parser.Atom_exprContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#atom_expr.
def exitAtom_expr(self, ctx:Python3Parser.Atom_exprContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#atom.
def enterAtom(self, ctx:Python3Parser.AtomContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#atom.
def exitAtom(self, ctx:Python3Parser.AtomContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#testlist_comp.
def enterTestlist_comp(self, ctx:Python3Parser.Testlist_compContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#testlist_comp.
def exitTestlist_comp(self, ctx:Python3Parser.Testlist_compContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#trailer.
def enterTrailer(self, ctx:Python3Parser.TrailerContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#trailer.
def exitTrailer(self, ctx:Python3Parser.TrailerContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#subscriptlist.
def enterSubscriptlist(self, ctx:Python3Parser.SubscriptlistContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#subscriptlist.
def exitSubscriptlist(self, ctx:Python3Parser.SubscriptlistContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#subscript.
def enterSubscript(self, ctx:Python3Parser.SubscriptContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#subscript.
def exitSubscript(self, ctx:Python3Parser.SubscriptContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#sliceop.
def enterSliceop(self, ctx:Python3Parser.SliceopContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#sliceop.
def exitSliceop(self, ctx:Python3Parser.SliceopContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#exprlist.
def enterExprlist(self, ctx:Python3Parser.ExprlistContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#exprlist.
def exitExprlist(self, ctx:Python3Parser.ExprlistContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#testlist.
def enterTestlist(self, ctx:Python3Parser.TestlistContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#testlist.
def exitTestlist(self, ctx:Python3Parser.TestlistContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#dictorsetmaker.
def enterDictorsetmaker(self, ctx:Python3Parser.DictorsetmakerContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#dictorsetmaker.
def exitDictorsetmaker(self, ctx:Python3Parser.DictorsetmakerContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#classdef.
def enterClassdef(self, ctx:Python3Parser.ClassdefContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#classdef.
def exitClassdef(self, ctx:Python3Parser.ClassdefContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#arglist.
def enterArglist(self, ctx:Python3Parser.ArglistContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#arglist.
def exitArglist(self, ctx:Python3Parser.ArglistContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#argument.
def enterArgument(self, ctx:Python3Parser.ArgumentContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#argument.
def exitArgument(self, ctx:Python3Parser.ArgumentContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#comp_iter.
def enterComp_iter(self, ctx:Python3Parser.Comp_iterContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#comp_iter.
def exitComp_iter(self, ctx:Python3Parser.Comp_iterContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#comp_for.
def enterComp_for(self, ctx:Python3Parser.Comp_forContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#comp_for.
def exitComp_for(self, ctx:Python3Parser.Comp_forContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#comp_if.
def enterComp_if(self, ctx:Python3Parser.Comp_ifContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#comp_if.
def exitComp_if(self, ctx:Python3Parser.Comp_ifContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#encoding_decl.
def enterEncoding_decl(self, ctx:Python3Parser.Encoding_declContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#encoding_decl.
def exitEncoding_decl(self, ctx:Python3Parser.Encoding_declContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#yield_expr.
def enterYield_expr(self, ctx:Python3Parser.Yield_exprContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#yield_expr.
def exitYield_expr(self, ctx:Python3Parser.Yield_exprContext):
self.exit_rule(ctx)
# Enter a parse tree produced by Python3Parser#yield_arg.
def enterYield_arg(self, ctx:Python3Parser.Yield_argContext):
self.enter_rule(ctx)
# Exit a parse tree produced by Python3Parser#yield_arg.
def exitYield_arg(self, ctx:Python3Parser.Yield_argContext):
self.exit_rule(ctx)
| 35.045191
| 91
| 0.721498
| 3,820
| 30,244
| 5.566492
| 0.098429
| 0.056622
| 0.159895
| 0.142212
| 0.838883
| 0.739748
| 0.730719
| 0.58893
| 0.558079
| 0.496802
| 0
| 0.014491
| 0.196832
| 30,244
| 862
| 92
| 35.085847
| 0.860895
| 0.357492
| 0
| 0.459103
| 1
| 0
| 0.000053
| 0
| 0
| 0
| 0
| 0
| 0.005277
| 1
| 0.461741
| false
| 0.005277
| 0.039578
| 0
| 0.509235
| 0.005277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
83c049ab482b64ac30ab3a754c27060c544e4954
| 34
|
py
|
Python
|
example/__main__.py
|
evrom/python-package
|
d7d0daec13da4ade9b7d2c96097c19dff6ba187a
|
[
"BSD-2-Clause"
] | null | null | null |
example/__main__.py
|
evrom/python-package
|
d7d0daec13da4ade9b7d2c96097c19dff6ba187a
|
[
"BSD-2-Clause"
] | null | null | null |
example/__main__.py
|
evrom/python-package
|
d7d0daec13da4ade9b7d2c96097c19dff6ba187a
|
[
"BSD-2-Clause"
] | null | null | null |
print('Hi from my first package')
| 17
| 33
| 0.735294
| 6
| 34
| 4.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 1
| 34
| 34
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
83da2d49fb344756ea7c7ddebc0dd9e0c0273252
| 135
|
py
|
Python
|
py_cui/dialogs/__init__.py
|
ne-msft/py_cui
|
b4938dd2c23a422496af7e32a33c2dbfcb348719
|
[
"BSD-3-Clause"
] | 654
|
2020-02-22T00:02:14.000Z
|
2022-03-29T23:10:31.000Z
|
py_cui/dialogs/__init__.py
|
ne-msft/py_cui
|
b4938dd2c23a422496af7e32a33c2dbfcb348719
|
[
"BSD-3-Clause"
] | 133
|
2020-01-28T15:41:05.000Z
|
2022-03-22T19:05:38.000Z
|
py_cui/dialogs/__init__.py
|
ne-msft/py_cui
|
b4938dd2c23a422496af7e32a33c2dbfcb348719
|
[
"BSD-3-Clause"
] | 68
|
2020-02-22T01:43:09.000Z
|
2022-02-22T18:01:43.000Z
|
"""A collection of modules containing dialog-style widgets and popups.
"""
import py_cui.dialogs.form
import py_cui.dialogs.filedialog
| 27
| 70
| 0.807407
| 20
| 135
| 5.35
| 0.8
| 0.149533
| 0.205607
| 0.336449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103704
| 135
| 5
| 71
| 27
| 0.884298
| 0.496296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
83e639b31edf6be0c3d345a07a817355c6ce2dd6
| 3,008
|
py
|
Python
|
backend/pharmacy/api/tests/setup/setup_auth_user.py
|
rahul007-bit/pharmaService
|
73191f64569eae7c7851f5b7bf9187f3f01b7a6e
|
[
"MIT"
] | 4
|
2022-01-28T13:05:07.000Z
|
2022-01-31T12:24:56.000Z
|
backend/pharmacy/api/tests/setup/setup_auth_user.py
|
rahul007-bit/pharmaService
|
73191f64569eae7c7851f5b7bf9187f3f01b7a6e
|
[
"MIT"
] | 6
|
2022-01-30T11:53:31.000Z
|
2022-02-02T06:17:30.000Z
|
backend/pharmacy/api/tests/setup/setup_auth_user.py
|
rahul007-bit/pharmaService
|
73191f64569eae7c7851f5b7bf9187f3f01b7a6e
|
[
"MIT"
] | 3
|
2022-01-28T13:41:03.000Z
|
2022-01-30T12:23:11.000Z
|
# Copyright (C) 2022 by YadavGulshan@Github, < https://github.com/YadavGulshan >.
#
# This file is part of < https://github.com/Yadavgulshan/PharmaService > project,
# and is released under the "BSD 3-Clause License Agreement".
# Please see < https://github.com/YadavGulshan/pharmaService/blob/master/LICENCE >
#
# All rights reserved.
from django.contrib.auth.models import User
from rest_framework.test import APIClient, APIRequestFactory
class setup:
def setup_auth_user(**kwargs):
factory = APIRequestFactory()
client = APIClient()
username = str(
kwargs.get("username") is not None and kwargs.get("username") or "testuser",
)
password = str(
kwargs.get("password") is not None
and kwargs.get("password")
or "top_secret",
)
email = str(
kwargs.get("email") is not None
and kwargs.get("email")
or "testemail@email.com",
)
first_name = str(
kwargs.get("first_name") is not None and kwargs.get("first_name") or "Test",
)
last_name = str(
kwargs.get("last_name") is not None and kwargs.get("last_name") or "User",
)
user = User.objects.create_user(
username=username,
password=password,
email=email,
first_name=first_name,
last_name=last_name,
is_staff=True,
)
response = client.post(
"/api/token/", {"username": username, "password": password}
)
access_token = response.data["access"]
header = "Bearer " + access_token
return factory, client, header
def setup_auth_user_with_no_staff_permission(**kwargs):
factory = APIRequestFactory()
client = APIClient()
username = str(
kwargs.get("username") is not None
and kwargs.get("username")
or "fuckingstaff",
)
password = str(
kwargs.get("password") is not None
and kwargs.get("password")
or "top_secret",
)
email = str(
kwargs.get("email") is not None
and kwargs.get("email")
or "testnonstaff@email.com",
)
first_name = str(
kwargs.get("first_name") is not None and kwargs.get("first_name") or "NotA",
)
last_name = str(
kwargs.get("last_name") is not None and kwargs.get("last_name") or "Staff",
)
User.objects.create_user(
username=username,
password=password,
email=email,
first_name=first_name,
last_name=last_name,
is_staff=False,
)
response = client.post(
"/api/token/", {"username": username, "password": password}
)
access_token = response.data["access"]
header = "Bearer " + access_token
return factory, client, header
| 30.693878
| 88
| 0.563165
| 327
| 3,008
| 5.061162
| 0.259939
| 0.108761
| 0.072508
| 0.072508
| 0.760121
| 0.712991
| 0.712991
| 0.712991
| 0.712991
| 0.712991
| 0
| 0.002469
| 0.326795
| 3,008
| 97
| 89
| 31.010309
| 0.814815
| 0.106715
| 0
| 0.571429
| 0
| 0
| 0.126166
| 0.008212
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025974
| false
| 0.12987
| 0.025974
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
f7d0dd3863090191f908f62cc20def0a30669294
| 308
|
py
|
Python
|
application/auth/__init__.py
|
anasabufarraj/hello_flask_fwd
|
b6f859e904353666542ad960299ad4a2650fc9e2
|
[
"MIT"
] | null | null | null |
application/auth/__init__.py
|
anasabufarraj/hello_flask_fwd
|
b6f859e904353666542ad960299ad4a2650fc9e2
|
[
"MIT"
] | null | null | null |
application/auth/__init__.py
|
anasabufarraj/hello_flask_fwd
|
b6f859e904353666542ad960299ad4a2650fc9e2
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) 2020. Anas Abu Farraj.
# ------------------------------------------------------------------------------
"""Creating Authentication Blueprint."""
from flask import Blueprint
auth = Blueprint('auth', __name__)
| 34.222222
| 80
| 0.347403
| 17
| 308
| 6.058824
| 0.823529
| 0.252427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014134
| 0.081169
| 308
| 8
| 81
| 38.5
| 0.349823
| 0.75
| 0
| 0
| 0
| 0
| 0.057971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
f7dd7e7a72528d78876cf670ecc0742354eeb420
| 1,458
|
py
|
Python
|
tests/vcf_tools/test_header_parser.py
|
Varstation/genmod
|
991a0fca36936b5dde49a95e8ea1d4336288c7c0
|
[
"MIT"
] | 46
|
2015-01-15T17:53:22.000Z
|
2021-08-09T09:35:29.000Z
|
tests/vcf_tools/test_header_parser.py
|
Varstation/genmod
|
991a0fca36936b5dde49a95e8ea1d4336288c7c0
|
[
"MIT"
] | 55
|
2015-06-04T09:09:29.000Z
|
2021-05-20T10:48:18.000Z
|
tests/vcf_tools/test_header_parser.py
|
moonso/genmod
|
99b6c9510ffc67fd54c07eab24de5db7345ef95d
|
[
"MIT"
] | 15
|
2015-02-06T04:08:23.000Z
|
2021-05-04T10:06:58.000Z
|
from genmod.vcf_tools.header_parser import HeaderParser
def test_parse_info():
## GIVEN a header object
head = HeaderParser()
assert 'MQ' not in head.info_dict
info_line = '##INFO=<ID=MQ,Number=1,Type=Float,Description="RMS Mapping Quality">'
## WHEN parsing a correct info line
head.parse_meta_data(info_line)
## THEN assert it is added to the parser
assert 'MQ' in head.info_dict
def test_parse_contig():
## GIVEN a header object
head = HeaderParser()
assert '1' not in head.contig_dict
contig_line = '##contig=<ID=1,length=249250621,assembly=b37>'
## WHEN parsing a correct info line
head.parse_meta_data(contig_line)
## THEN assert it is added to the parser
assert '1' in head.contig_dict
def test_parse_contig_no_length():
## GIVEN a header object
head = HeaderParser()
assert '1' not in head.contig_dict
contig_line = '##contig=<ID=1,assembly=b37>'
## WHEN parsing a correct info line
head.parse_meta_data(contig_line)
## THEN assert it is added to the parser
assert '1' in head.contig_dict
def test_parse_minimal_contig():
## GIVEN a header object
head = HeaderParser()
assert '1' not in head.contig_dict
contig_line = '##contig=<ID=1>'
## WHEN parsing a correct info line
head.parse_meta_data(contig_line)
## THEN assert it is added to the parser
assert '1' in head.contig_dict
| 29.16
| 86
| 0.677641
| 217
| 1,458
| 4.382488
| 0.225806
| 0.050473
| 0.07571
| 0.100946
| 0.801262
| 0.778128
| 0.778128
| 0.736067
| 0.736067
| 0.736067
| 0
| 0.020463
| 0.229081
| 1,458
| 50
| 87
| 29.16
| 0.825623
| 0.254458
| 0
| 0.52
| 0
| 0.04
| 0.156162
| 0.11571
| 0
| 0
| 0
| 0
| 0.32
| 1
| 0.16
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f7e0d763e95c2c91a6c97fb6e9894ca38163437c
| 39,667
|
py
|
Python
|
tests/python/extra/dummy_test_data.py
|
scottdonaldau/ledger-qrl
|
7a3b933b84065b9db2b775d50205efcdbed2399e
|
[
"Apache-2.0"
] | 2
|
2018-05-28T15:26:21.000Z
|
2018-11-19T08:31:17.000Z
|
tests/python/extra/dummy_test_data.py
|
scottdonaldau/ledger-qrl
|
7a3b933b84065b9db2b775d50205efcdbed2399e
|
[
"Apache-2.0"
] | null | null | null |
tests/python/extra/dummy_test_data.py
|
scottdonaldau/ledger-qrl
|
7a3b933b84065b9db2b775d50205efcdbed2399e
|
[
"Apache-2.0"
] | 6
|
2018-11-15T10:38:47.000Z
|
2022-03-14T19:51:05.000Z
|
from pyledgerqrl.ledgerqrl import *
# DUMMY TEST DATA FOR SEED EQUAL TO ALL ZEROS
expected_leafs_zeroseed = [
b"98E68D7AB40D358B5B0F4DF4C86AAE78B444BD50248C02773CF1965FAEA092AE",
b"702AA3E3184702E7AE6BE645D70F089B1E9793F656FF9515603B5C483867ACAD",
b"C1B09D5C9065BD06323C60FF0DB2ADB00D552D4C4CEC84F956C8D1051E51A191",
b"14D0B0A51890B07BC423CB590C8E377E582AAA6A8F671ECDAD3A53B18E0897A6",
b"35B3195636E7D953454A7019BDB5FBD5E52060511BD2E7AE15D012463B01F524",
b"7202EBF0DF67175E726257AE745EF055BBA68D70EB7738322AB5D40F123EC708",
b"A2D9258A84AF8111D59206B967D9168F4F1DFDCB9A8D5ED2ABF356E48A9DA931",
b"C9809FE61C902B25926BFC2C317973CAE4013D63FF4001EDB438F8D4E1846D7E",
b"839DE15504EC22C788301A3610C996234ACAC4DA4A788025612ABA48B3AAA84C",
b"23B6AE975A6E69600B246DAA47FD26492582B7E2E5CA311E1475CA1E17B50D71",
b"A26468A18D4158E6E92369633F288D049E5C41E0A20A9102ADA3876105AD3CB7",
b"338A8558244F720156AE8F0DC2E2A5CB5E50F5BE510AFB55FB1E2DE8DF2C93CB",
b"D49473FF16339CCAF0A2FC083DEF1CE9148B9DB192E5E98C9790771AD027DF81",
b"F91135E8CC926F1C681F04156E03C7BD5E830CEE6676CD568EC2B3F5060E4366",
b"C192A1C0636C95B105A4E032FECCA80DFCA57C10F588F7024CAFBF45CEEFCF59",
b"EB708000DC18FFD35ED424126A3F3444FCC76C9556A15818FECAF560F87366FE",
b"9B93C865447FC8C1D480D3376387030C512C08A6AECDC642FC3B4FA52773F851",
b"1255D60CCE17DF3EAFD7300B9BD74CDE32656CA9E6E9FF77344138077BA1C992",
b"956D1C05C999FEDB9E0A46AEB6DE709CD1019B7CFC28E3106B7542B4B49C4CD3",
b"0D20DF23770DA0386157495E422A67436A1EF0C5180ECB44D78913E7EDC857BF",
b"8E66C0B26238BC9E12804A83AEF0429E9A666266001A826B5025889B45AE86A3",
b"C436A1CF6BBE7421017480AB7C2D4592EAC6E63C787C0FFE6D2FBF9B26E9CFE4",
b"6DC41C92E0B0AAE73F7C9E4CB8457FC2FC20A6C1DC82DC3F9782E99DBD41306A",
b"8890BC471767F59D328FD2AA672DCCE345A239499EAECC00E55AAD165BB077F4",
b"C852B2ED891E1DBCE5BAC01470AD988C3897C3EBDF1BE1428BBC2DB352BC25C9",
b"088E54F81C39E2F701F09A328ACA6BEA7734232C4F1750BDCBB71E6D8AFA95E4",
b"0B0F29A21F821BB6DDA2100F0753ED44E1BDC3743E168843A227158FC0A762A5",
b"84A748FE7D24AB7683C00A1428B5BB26FB7EC66066C45DF55D5A96FEF03889D6",
b"F0897E14484AEFB04663275B2DD02C2469F7A48A45924134AA906DF2C4DB4DE8",
b"964F80EAAC640F5C2C771ED108E7D783919B22B958A7451737BD06E3CA30275D",
b"99D010F7F40953E629A70EE6E1A346CDF7949B6DC4A29823A2403615FFEE9EDF",
b"386C16EAA196603AE01EEE935B908DCC6417A12C5CE6C8C9D2F51738C84E09A6",
b"D9429573B7F925EF3906B78E6E5CE48741B28A3C102787F047EDF6C5491CDEE4",
b"B1A88F08A11CDD68B8A3E8FCD477398ABD622E28159E0B48DB153E7FA199FFFF",
b"F1CB90E1EB573FFF3E1DC530A04D14C6139BC157FE101BF1B8D1D0B63CA46F25",
b"9F1676B67C21830062EC9F46DD7FC10D8C07BB1DD7C309652F665D525C45218C",
b"6192A71A8DE9DD94266415DD69F94EC650C1D7728810080A198AAF7ACD8BAC93",
b"AE0C2ED2918B840062D8C2B811BF0E41535CBC46792C86A965C122D5D773854E",
b"6AE6D765DE71AAC149CB894498B70BB3564AC899BB20D90940D82AA657E28530",
b"A62C02B002DEF4B1880482298975FDA76B9DC479D83C89D85837B65254E63C9C",
b"D2BAD383B25900503A34FA126ABB19D3AAC6FC110F431929C7EB18E613E101F8",
b"938A65C3A94BA1B3EE02ED8F752D8859BE07EC09B2C582ECF7782C647C5305D9",
b"A65E31FF46065126DDB1C69883DF3A4349F3A1094DE91F1E3A8F82CAABAB76C3",
b"42938AC9DCC88A2AD64E23DF9D8074554616E7B8517916D3B633526D78267EBC",
b"4107F311C639FF2F41D310CF1D846A1F222ED502AC4AF35F37297D018E70F5C6",
b"86DBBD2F5D30F47133CEB5B424E6A20A245C82C63B4F4495A7A4976AE7B72702",
b"3D80136F3C7620042F8201D7BA5986BFB12F30619C8FB29DCDFDE137B6DC623D",
b"968E53F47AD0F199E500CADF88487C8C94A69B02681EF6D7790F14378BC585A7",
b"DCD42CC8A81D1987F0EEC8414F8B3937A6CE24A073DDF5D0318E0BBE0D8F7C70",
b"4A76483FB0CFED4D6CFA8CC15A2B9F5D017F51363209FD0E2A156996985586BF",
b"E94B9AC5C55F4CAEFB74A93D4DE097B2685211CA696D7EE40F02DAE8D5AC49AC",
b"C2342CDD061EDD743535BA32AE9F05D4FE33FC1ACF8A272290AB266067B461E9",
b"11C30452609E1089DA77232C1D6BC184DE73B755580C8549AB7CAAA938C93CCD",
b"605CE8574B0EC929C0FF91CDEABDD54E5B7D638C388CF548B2FD1EA7BD8FF252",
b"B57142A828953478563DDE3B69AA92973559A35BC81D41ABF173B36A213D6E24",
b"72C82AE13211A2632875EAA202A95D83C63285F86637952B07DEC9AE67FD282B",
b"6EB6CF7F82036746601EABD968DDD00CF2D46BBA29115EB71824217B1CA3936E",
b"327C264BF13454B64C11FF1DEEDAA08C6E64C6EE570255D244F4FEBF18A25A37",
b"C0AE9989F72C73A303452B6EA0E210E1582822BB6446A654D4A68480AC1306F0",
b"881FF19D4AF0C3D1D4DCE5C7B1D830DDD3495E1EA090CE3F2DE4034B4CF5D62A",
b"E52615B05190CBBFD78A20EFC6F5A3F7C364FB8B444E151AF8638ABE6DFFD67A",
b"215ED74FB0CA043A3DBED28709D48980E4D4F9CF9EDF36392D509C7A3CF72BF4",
b"F23D9E482239DEDB08F56CA9FD717A215AC679511C127C606EFFF11E43690365",
b"58A61D513128C462CBFED6256027CDC7F5ED5AF303C6F265F23A60A198BA5092",
b"84B2344E4210E49F5E71A9A4C5D755516DD7713974E6EDCD1A60B2D831C115AD",
b"33DD2D7405367C4FB61AC3CA4F66D759E54AD8E791437B9D7BFB0F16225E4526",
b"92E85802331A6F6235E79DC85DF6066CF168488DACD3A27265524A1B52E95B71",
b"0201BCD121337E18F98B5948203D072B715BE3B2C9A6A0E41A9D61C68CA3573C",
b"36EC961F3E435758C503A961B183A11AFEFCB306C052BCAFD3C0564739A16D4B",
b"AFDA1C39F8AB4E30D750B548AF4AA2265867CDAC6BADEE959B93C8482B88D8AE",
b"8784D981743AF2E8CE5CB8C5AAB28C6D8E5B9E8727D8DA9F3EF7D999A9BF3A8D",
b"79186761975568C93E961A292F29FA9FC471A6A84C57324D19FACD8C6F92C58E",
b"8F043A22A9DA0552B840FCE9EB6490E114F0AC5B63296EF4B5E45262A8136415",
b"D62469113C15FAD03C3FDF78643E3FEEEEC32AB95AA6C8AF7F394DA4C0190EB1",
b"5872637B7FC9CB9ED58D0532EE5B27C8390A42152FA6106E9CD49B107C1E23D9",
b"4A7213BFBBE45B23A2D95C3097DB541CBDBF3CA3DB7DC3912A8788E849D8C4E1",
b"CC493D77588B3879D551A778AAEC98FFCB8C409618B62E2EDA6746CC1595008C",
b"67D42022705EF0535DC5909A9D4B0860F64E94519BFC0888C2318B068D0C335B",
b"098D2057B52A027FCAFCA8247DE2F623A7AFB4A22923E58A30A00CBCF7305C19",
b"FDAD9033BF1C0D6F748C784DB6C9B457CBD0EDAB6EEF1751231C27DD46B065B4",
b"20F61D0440B12E329AB5627FF979466574F8BEDA3BD10EA75C0117901483E5A8",
b"8BDC81CB655983B038DDEBB2B17982D53A4F3CF57F346E2B3BCA972200AB8B9C",
b"B205C1112C3CA229574B5AE9833D16501121194C31A66E903A9557FFA6203E73",
b"A35A433705089624FCEFFD293A02EE315737C092175F0CF09A94E90B2871A176",
b"730BF553DA93DC793A1EA7E75746D8D7030EB9CC040727B08AA0924367366C11",
b"250E5F9AF90A5DB5F99C5BE1E157748393B3284F5741803AEF5C741B0256F2FE",
b"030090CEE99EE7D12830D8E183E37E0ABE4E4A3A05F190F4247629281468B65C",
b"D157FAE418816E5A2CA09385AAF830BAA3CFB6AA2D1EFC5957E7364EDFCDC06A",
b"7173298B16BD10FEF6D32F9231FEAB283E1A774207CF759A58B779DBDD127A71",
b"4928ED42E4F66CF86258781BD53DA5491BD0CE1E23F8376617981378C496CF2A",
b"120ADE9CB569E8C850CF4598F802610D4B044DC6E4A75037A920CE776F82E31E",
b"5490563824119BBF4D479AD450096765AFB61812021273BCB9A4148BFCE91C69",
b"E243ABFEFDEF43F6E3FE77F6B15D3E7DC5FD236AF34B4C2E82A0B9A58FE2DDA2",
b"260EB51AF3F14813BDFFD17828947E3B4FFA82B71F246759E1C2E45C6837412B",
b"6139A235893F8374DDC03D63F920A13CAD86BBE87CF3FBD0F3FF47D0F3C72D9B",
b"C6EE73A332E67B6243775C134003382E118A1FBB8CE03C599679ABB8CB8F6EA1",
b"202030D5D533D4AD2084EC8DD16EA54004C70F896A5A9D77C2F609C967641DE7",
b"5A2A7BB5AEF31DA87E8CC03EEFF265750092E43777EF29F797237FCAB200EEAE",
b"90949248546DF96D662FB2275BFC5DC4FC96A3E99CF77D91719EFDAAAD53B263",
b"D15981CC3EE6B405EB84B35F74158EB17ACB6F827728674FCA1787FFAF405627",
b"9C9264190832E5240D306654E5885B7A5B8E2574767C85326E41BCEE97876CF2",
b"7B695F246E7B9B3952DFD1DE1FE6B9C0B6218243478BE0EFEA9BDDDA08BFA948",
b"45EE872A1478159EA6FD43082613298912B2157478C6EC701F57D3FC597986A1",
b"54C8560AFE7C0AB482F8C45E0F977824C6E0504C9C1788DD87F6B5ACB9DC0688",
b"651EE68D20B35886C98D1CE8A1A205E0617FC69F950B5D754EEDF1E56E460A98",
b"7C8E462649F6D45B0C842DA68B30931BDBDA64EE31AC51705A32040A0F502373",
b"ED165BF8E4F3E16AE12A4685FDAE11CD3FF620790F0B7323102A628F30620072",
b"DFF1B7DA12C17D8FCF31C060D15666CEA29A61D4FFB67C185CDE74548BFCE1DF",
b"75B9AEE7C563303037ACC0499A4A8B0915E806AB8A7D586DE3D33AC8C1945E25",
b"CF81BB5F2E4464358DFCFEC77B19F1A653CFFEA66C2D9AE4FA7B0066462FECCE",
b"97149691D954DB1BBE60E1C172C682B7BF8FCCC5D90D7B2453FA9484A6C344F5",
b"683BEECA8BCFB0D3AF0C8257AA8D24AB1AE9633DDA47C46C4BE5D7265916BF46",
b"E7C8077961B5DDE3F1ECD2AA5C0536191D82B4C0213D73A769514A961E9DC712",
b"04AFC43E2E03D355FED5D6AB6338D3832CC029D81C511F2FCF4C54F3A9F91AFB",
b"1521D4D85575B38F934DDBE9D467A2AF9D0CBB30B3496622E780150BFF665516",
b"C59DDF436858EF79F7D6BC907439D353A699F499BCCAA2995AB3B8F6FABD4DAB",
b"5F88CB0F35B3406B2AF8CFF0F1782DF223D338B69F46691ACF6CC376A70C5897",
b"00E78D1595E71BEAB84F0D061BE64B0B1450DAD55706C27CDD32AECEE85DD89B",
b"EF23C42D8ACFDF17C5D61BB04B17E019238353C100C1FE17BFE361D91FC48C6E",
b"294EA90F25A9C5825A994CD48358CC1B090D0ADD69799BFA1A194BF20433CCAB",
b"AC10EB477B08DA4A73C1065C3F83903A341139B88DFE2CAB0C8AEC63DDDB80E5",
b"EDC4A26A1DDF3AB782F31D9F4ED65AA609198F07FF506327E1F6CC5AE15A19C1",
b"002309B845064504D4B99108E608DBA6893872EA65103903D007935A2C5F7B52",
b"4F9C062D4879C276DF24C2173A67BE80CA0223D051946A73023A54DC79030F39",
b"9303A662ACC3C1F08F049F61627730F97F4F6C85847999D98549F5720DFBEC00",
b"99809D673007CCF8F9C639E86BED03B57DB7C862F5142ABE18779BDC6B2F9A2F",
b"1B6B9ED8AFD7DF2BBBA92DB83B96E676E8E641B79869F1377DC46FAFB4E2C7B0",
b"5EE4F8188795588FEBBC07B8D0E622269E572E6227468788131B72D1762DBD82",
b"32409922E56DA6AC78D128F81D46841001513BE73F0D43981878FB64888E9141",
b"B2AE4DC7001F917C10091F34C5A5BFEEF5B940F45CB768F20AB6FEA90B5BDBF1",
b"95097E6C42224935C4E1E2863A2EA9B817724EDD3A71B9664E8E9F1B1901B5E6",
b"4F0095CDA1B880F86E38AD0F37A8C8A0CA0887822E91092D41B8D6D3E6765247",
b"AC7319F857E058F47504671B250551E983B223E6F4606837AC844828B4014B9F",
b"FABC2D995B966A0F9011CF71F13B0A0C5769688492082B0243FCB1FB0905B71A",
b"F6E85B1A2DCE7D4B538CAA458C5143FFB56CDE0C9568492A28294F92E94A367A",
b"BE43076F5402276BD1DB405E53590896AF491BFC2D89FE811EE5D3FDC9ABDD7F",
b"72BF484B93D3CEEC40D1DD138F433DF08D8B5065ACE4DDC62AF3B8FFF7838760",
b"C3A07B82EC09303C226A82B6E851274067B52F534ACFC75B7B50364E4EA591B1",
b"0A108F08B6AE231ABC965EAE135D3BB262C36A01174FD151E3BCA25543A8E524",
b"4A09E7BE6DEB8FE28D1DCE83F598CFCC73AC8A9FD510E29CF627D73B21D7A9BD",
b"2FFE928907DBE21A2D14E529B118F7F9451C1DE2F74E9CA79C3FB3BB2E919F60",
b"A5BB74691FFD51F1BC57A476C4F65BA8EAF7A9178AD71EF32120DCE12A4A0BDE",
b"20C2091CEC51B6283FE3A503691A72B65DED2912199E6A2B42CE53892DD92DC7",
b"DD48C7FA9CCBFC1256E63CED469F634885FE07C09636F7C721DE1D33B29EC121",
b"84758D448A92D130D44AD5B0602ED2D0186AFC8129AACD5EA40BF155826CE440",
b"64D701D10D415914FF6ABBC79414471302CD8C4865BC5FB1C1ED0CF2FCF38D59",
b"97EF41049281535A386F9605712E9D6816D73E25EBB8B47BC4CF96FD9E7D4119",
b"A8DB1E3C35426B343367C13A90C46388DA7C2D5A4E8DBC10FB0875C8CF292D96",
b"55B2B436788DB4D61680679706A92CC93DE07685484706032B0068C4B8459B0C",
b"AC75FA32574F4016EDA2419515A5601B2D1A607E6967C516258BD5DB56E31C3F",
b"25238530101EE7777691161236523BC9E873967EACAD0C12EA75B54B970A6DBA",
b"B48A6CD79DFB39ACDE3AA5B07DB376810F5272273F9D4373FD2D80BB7626D471",
b"C5E1D149C29BC66215FA621B3D280C2656CA40BBF50ACE457C21E24ACC09238D",
b"65917CED60E193BCC84793CB669BB498C1CB80C29323AA0EBEE89DB3EBE3F1F0",
b"FDFB3BF29578B754E2D2C237C062B839B0F4D5CFFB3561A024CFFB0422C27007",
b"11344A8416A18BBDACEBE7E7686FC58391DA68372BE4EF936AECB1F2DC0548D9",
b"D3BE347EF3C29A17F1C202C8ED999A888B90F088DCA156C5806D732893B2859F",
b"69A86EE3176A08A791C67CA12945FE223E3BC1707D1E06E571F7E65B6BD4A929",
b"0F7472696BFDF817AE3189EA627675CB15BC2FE77344EF1F62812A466FF57FB7",
b"99B91F10566900FCA824E567B1D04E37141E1921545328D2EC03D927B25102D8",
b"B49516D55489412ECCDE03F03837B1B3508D0D6071A9A3033EB4A7CFAFB7F2D3",
b"8F49844606558EF385C7BE0E74748095B324CFDD540560D162344522FD263CE6",
b"34F7472FE31E2A679EC44E50B663D3E5D5C741C2C39569236A2785605C54D779",
b"616670AB4BD1F36D6A43D57634712210B20DAFD12E4A08CE2FA0F60460EC2185",
b"B357FA0F19BFC0F8B318FE24AE8759BF227FCD2DBC45C6B9CDC8DB67E22E4037",
b"3149A0FE2DFE5057684F3DC7CBAD51A759583B11B9207F646EA64130B7E457A0",
b"0AC23592ECC274089A23CF9E9BBDA12F5C110A0F7333DE87F9A48D357F83B2BA",
b"AFAA60A03438DD95E08869EE80F344FC830AF08421DA955F427CFAD4C76A1B09",
b"0594D08097B39D05FCA3D224797772338A3640D8589F102BF1C526ACB8EE2323",
b"78CF82DE66C4CFCCF01FACDEFA87179852538B08F810AF199AA7C93A60003D53",
b"035149BD7BCEDF1BC89DCF1E50FB3F7F307A728A1EC0FE3B2825BA600E5D7777",
b"379580777B797498981279DA565B7BE89FB99DE1A19E2E0A562FE54AE926EA52",
b"DD2C1F1F823317B49FE25A7AA96C387419C4B716F4EA0EA2CDA83B665B1F4A22",
b"A4749F00BE2B0A686BAF9604A646BE2A79D4ECAE74F9002D4CAB4170D7DE17F9",
b"461958BA2D5BA48F02AD06E33F0A1779CE3B95DFE79523AEB018CC2A0387C725",
b"3C63E52DB929993D9E41B0AF526D0EC3571C241164D731491078F2A72BF216F4",
b"442A948F666C1FCAB47E44F8AD475B9869751D4FE8ED44372FF48BE2FD6E06AC",
b"605C39A36B8F0EC9DE6A96F329ACBC6037F2CFB14B1BE4E0BED10D234FC74E52",
b"23A9AA52ACE21B174A9C4D80FEAA66E0B56E33B921EF96D4FA6A6CDC8EBCEFEA",
b"D4D167EF275697B9894E71BA055DC1B6F952DF5C24A22ABAAB23E2D3E0AB6744",
b"65965BFE774019ACBE02C3AEDE01312034093C45DC87444918970356C7E7EBBA",
b"3DE99CD9111CC26E3B36A538F025F5F09EA2846737FA22002E398A9EADECFF71",
b"3AB3104BDBF8D71306295BB1763F15AEA9BE14D79F06F17B3128B5D925AA06DA",
b"81EECB52C924F9F268959DC97910E1D106427C5E57AF1BD9A1E3E1C7B6032DF3",
b"06F7F67B7F5DBCFBFD37E6F91D16416881C161E9CBE59E6B53AE229AC19B198F",
b"16FDC34147BD45DEF04F24B8131E71109D3F4534CBE943B8C4B39771AD70788C",
b"048236A016E035FD5318A38365AC066AB47AC3373BDB0C82A18F24FF8DF639C1",
b"191C6673588A58432DC22B8BBA46BAAAEAFC474A7364D7987662B3F6142DD47E",
b"FCAF59B457C5A180339FD8AE7D6465A29EA34A9823AA9879EF9876ACB7CEA842",
b"F65DD75CD4B656E3CE850080685604584629176122458D90DD66D68D004BD89D",
b"EA188CD53B60A35587F95A172753B18A9AAEDDD8C81269C44BF7C3D3886496C6",
b"166B47A4CA6660232F4D57652E719FBC33EB86F3891EA821715C2B8B884C85BF",
b"621F78E1D5DCF58CA4E34B2CF89435296ABD4C8E1A90ED8FDF1D35717DF58BC8",
b"B0C45F45C6B18BC0FC07DCAD6E6383BD7A46CB1ADC00229E1B63B1E317ED6695",
b"6F812C35691A210382EB4693F120ADCEE6742A8E3CDD7809DAC05DD181E0D146",
b"5B5F504B58D189AA99E6FE8DA9BB62174063E82F1587C189DE8E672EE79CED18",
b"C5216BFB92654031745A4AC1B1482272BFFAD695E846EE39CFE9D11FDED80FE6",
b"59855C7C73CA0FFF1B2E6C5FCFB9DE7C7FD7685B875D1D88826CE776778131C2",
b"E8D17213447679D1DB06EC63731A0C345A8F437CD7AD816616E7FBC8AF9236EF",
b"7C7329DCC9CAA41A9258D86F8CB205736945F4686964165999FE705C3E71FBDB",
b"05ACC2F0143A4736355A866FC3FAE43D8514342CB5EE1ADCCFC3B3DAE548B7B0",
b"82946DDE232418402CA40AD2A717E1079F3F8C8E224F56289E419A3D040A5757",
b"E9741624BCC52A823496E97C6079212AD51DDE9D0196FAEBBBCECE549FE04C53",
b"8D21CB0E80EBF4D1473EC6B8EDB20969CE7BD9A9179CDD211603D1F7DDE768DE",
b"4C2D311791EF3151E8F1AA5A1E0B562524ABAF4A6964D13794958B4481041EC1",
b"3A5A9D28B274A9985457C9CBCBE6A30BDC67D8395E2ACEE0A60CF62F1223A7C7",
b"D23C082E8283379281BE145B224D31EE05155CA94DED1D1509BC5483A191DAEB",
b"B8B9310C93F6C46A7C1B830F287CD365F0D603E698A9A74258638D569CD548C0",
b"F97C61B70EDC591A43C8847DE31FB32F65C5CA6749BF42DD02C96CA71F4EB7DC",
b"C660440B803968BD0BBA71EF8E3F6256C64E5824E861B1D3C07AAA41341FD682",
b"980B9C1B27454ADB2685A92A21FA5FF483CAEAB7441D3D7E4535A61333C0CB5A",
b"7C4C9BEE4E53C88812E8F8459F24387B37C0A67AF726EA1C32DA0E85AA5BF6D4",
b"BFDA6578A14FC02E073194E3669EBEC425F79E1D7D678B4CF63B50997168AC2F",
b"128C74C03B9DFD26DD55B40CAFE6B14F72A59A65A6B080C534295A0154BD406A",
b"569B04E5F5C45DE399A55D39CE590BC38C74DE175E6367CDFB7116066409342D",
b"98CD2E5B16060F413ADE4427813199CC44317F72E4C057A34A580EBAE008AB87",
b"1CDD6AB8AE3767FE886ED0CB45615286E3F4C34C0D8114D8728709E2B90FE849",
b"511B75E66AC319CA979E071034E4D147457F6850ECD4FF9F07AB6D6402E3FEE1",
b"178612473FB5CD8487302C6F07DAA58E86AED392F81F2D49CD4F3E69DDE21183",
b"FD66D244F57C34D88975FA372DD04FBE59785273C134D54DDAAACFC5880A4993",
b"293CFDC040B29F57F092AF3D81607C95443F0DB6EB0CCF4EA0072DBD12CBFD6B",
b"F80F005D0007B61B33CA1BDF9A2E34639815605D67679708AC99462B66E10CBC",
b"825BAE98752B9B986F5DC904BFC1ECF34DD83CA975025C88916DAC19569E694A",
b"B5D76B616DC069A2CB9D122829243AE31EE2B54FD84F0C68622D21E49FBB3900",
b"615A64F6B52AE7A1D664EBA3370721F541E188ED5D74F117648C8A96EEB94EA4",
b"0F42150D285B989E28A4917FC9F0A19FCEE805877BC5FBBF347F1F17D43B0EF0",
b"1B95602E44EB5D436573F5D7F680B369C0C3E3FB35E0A939E218D003C61F57F7",
b"F48D9D351D46BA8E3C6F5F907DCCFCFE7F7A411DF1D8B7D96EC96494C689B778",
b"5A341D93EB427FF13453382AF4B9A5C44F1B7F14182C8818011D01C9B7CB1A2A",
b"2370CE9C991335DA53D865E1F621D1A77F93F0A6E156154C99D79AFB55A71AA7",
b"35A8A33BB6F2298E35D6AD6F4063365786A92C48A907783C445E4E6896EB84F8",
b"9B1DF504C6090E0176BDE43D16B80504515375AF0BD5D8804A1F02A67A537C92",
b"F0498D011F24748FDF61F5DF003C677D45334EE6725ABFC8417501C60A89264D",
b"7A69273868C30D0F573A888947591C538DF9C03D530170AE3957E0E94F85BFC3",
b"F8ECFDD3972A632346A5F3D7E2D07B8E30A1E35ABBA29AE1844A1102852F69D3",
b"4302E7D4638C8EA59F205CB9D270DCED3EAA682D220FC28DA8CB7A5EFCE6FBF5",
b"485624FDF605E80C683728DCAF2300865D3B3FD4352AA7FCD23EEFD211FB0083",
b"7C2921A1EA36F6FAFB77F124E849D3B124D7358BDD0ACCA8EAF4E0B95FC328ED",
b"1135E56475D874251A02EEC1D3EB228DB849502D9C678838CC07CEE0F6D1A6F6",
b"1D06C0229F0BBFA93390424CD31F42E3B6ACB346D9BD64D688DD50470777410C",
b"B41A49BA39F4BE6C27456A1CAD56A3F1113A4E3AAFEF7988C4CE2D41B47FF5FD",
b"4CB4894745E9E2B10E8057A0C3248F9B5C776F4C08D66E5EA9C0AF34E6980107",
b"065F3D7912D1976FE0234CCA5643E0EED242516454FFDB40509FA9CCB8423D7B",
b"A979CAB5C0F046B44CCB93057ED480BF61283660F347FE0B77616D944FF0BAEC",
b"922CF2341D5328B2FE3243289E7A8E24DEC128A279C9B5D06A78072BB37C1660",
b"EA75E7FD98EA3AD537BF821863CDCEEBE5C6F435ED47C56AB7B968C80AFA32D6",
b"9583257B0BA79CE340C9BC6933513E6915E1F0A5416BC18A827DF223B22067CE",
b"CE2CD2647D37B29E9BF0050EF83FCA9F962C697D2635EE02DDE1563F18557C9A",
b"D847CFF438C9E9F8984F97806B5BD60F187041860F24D625ACFECE307E2DF5DC",
b"FA7541CC0B96AD1E8F4E594800E9E7EE105ED5F5A32A98A4D5E2D6C689FE60F0",
b"1B27266DAACB69B32DF19202F89A42BEA0D4E81678FC356CB48500F68A3E05FD",
b"144ED325886905B07A82F76B5D434A9805D431E3D0C1306DF6D6E1C27BD7CB95",
b"D57AF1DCF3B5429193319D240AA263567A86F9993AD0801AFDC1709C8FE91907",
b"1CD9CB1BFD39F51C0EDDF166D3C1097D2D7FEDB2B9D2CD92E2E8FE029E05EB0A",
b"AD20CAC4B4EF7E3BD0F29A057538DBBB5CF4CD8B4EDEEFFFB1BC847CD61BFC36",
b"4CDC3C04D7F214CC373D5E67E7EB9EBDC8BEF491D5273E03CFB55D09A3D40320",
]
def test_export_c():
print()
for l in expected_leafs_zeroseed:
print("{", end='')
for idx in range(0, len(l) - 2, 2):
print("0x{}".format(l[idx:idx + 2].decode('ascii')), end='')
print(',', end='')
print("0x{}".format(l[-2:].decode('ascii')), end='')
print("},")
expected_sig_tc0_idx0_R = \
"D1F266CCB592D4695045C0BD5F80B66FCD4C14C0B7B98896F80CC2B0B89F3FC5"
expected_sig_tc0_idx0_wots = \
"088DE12A087C94B16B4D7E91AA728491C559CECAB3335C61D7CD2A26737932A8" \
"4771991B92ACCCAAD3A87CDCEB68762E8B322D8B4DC0CEDB69DF091AC56C23D0" \
"3ED26B1E6D51213D2421933DD9E064851BA49A953EB75B68EEF91E3DE4583671" \
"EB4BFEAD47E50EAD6340C62EC4B46A83578D759AEE1A5DCA7B69A508CD01DFAB" \
"A1488BD06841E9AE7E1246C79D0C155539DA5990EE097C6A067E42BF1925C6DC" \
"504509A11EF692C53DF4DAA875E8E7B9B15F6757139172CB0E8D78B9148B6022" \
"1884581EBCC69F7BD49E62E6545F8F16ED8060D0DF6DB3B36D603BBCA0053EDE" \
"84C73CE9603767D8997979132C93E76CEC2CAD3981F8B1AC45927CDAD3D65B1C" \
"A46B728B9AADB4DFCEA00DD796DC6047EE3E1599D0EC130D6362B4E48B817F0A" \
"7943EC8D2826CAB81597A273F1C57E4E26CF6BAED904A145189ACAFC7416C8C6" \
"36324B1EC3FBECE5644109E5404CE90BE0E525AE84B077F21EF81686CD7A0CE2" \
"CCFE7ABD24BC4F0D005110781190F659FF47E26F0EE1E37F8C322BF7A544EFFD" \
"AC47987CC8D228647CA2808120CB4A0CFDCC9EEBE3F284DC5D2E94A8DC753358" \
"4F24BCFC8291EFE606B917C7FC3F943D73BA88471B6A88979778A8A7EBEE3CD3" \
"C130BCE870B63D153CE9A9105C3F9208277B01573D6BB64F14395E9BBCB0B289" \
"E7579BC7FC0CFD88FFFEC418BBBA03929A7E1E55FB5517A84518C04559EEC901" \
"13680ACA408CB94B18D8EED7ED49CF4E130550B576AD59EFF152FF1BBD8E8E37" \
"35234045500115647D70C986667F7E4B8B9164F18FA4826EB4222D1B0935B986" \
"5905989BA1E6097C938D8850E35F2A4BABF306EF103D670798C74C2359B60DBD" \
"A3E13347D28D8A987DE9B03BB70287D19E841BCF2CF4936C8C4F96537C1FC31C" \
"9BA72EA7BF0B1A94907C739611F2615ACD4119CD243F7002446C10F29B6D2FF9" \
"8F434B51E67FAF94B99DE0C2B75C1DB6C897A2DE464E6A1293C6D59633410728" \
"643C66DC8C0F51384A43B230537950A37681F8A8FCFBA0D9DEE14CB69BC266AF" \
"A467ECEDD2F1D6460204E94DBCB49B44CE6931676FB16052D44AE2FED2FD39EE" \
"B3F8CD9D8AE61B005EA28E8EAA353A273CB99223E7CCD9350ED5369F8C19093C" \
"3F1BEC986CD583C7C9080ED4EBCDADE149A8D76DBCC09632EFE45E978C34818C" \
"849839A7FE73100F56435767EE838BEE246F91F19EFAB7E5FEB60E0B5AE4982E" \
"C949E629EC98C9DBCCFE6BECE6D8693F262A821D5A75A3778741C4BCC85E229E" \
"1EFC880A365201F4728FD70844286A28E376027CC8CD36F332AC60E9FC02FBE0" \
"772612CDD4DC2CD30903A69613651012941B6DD75FF20D7491B57DAF982A0712" \
"A37B3DD050C17AD17D8DB289246B917B06361713BB9ECF71DC55E84773859CFE" \
"F7508EF45BAB7619D5E8DA41DDE5FE5A12298678A6365DAC8EC0B9FC56293B74" \
"78B758B54F36730DE5D8FD174B90631F8FC8790FAAEC2378743CAF3BB4F36FED" \
"15BE543B7B9696D221CA0D957F73655E90EBB8D4546E0B073449198CB3FC0E53" \
"4EF1F81E1B2B65DAB75A967D784AD3F38BDB05CAF710818510DCB040FB3D7906" \
"D0411F9ADDC7E271427435A2451829159322363B2566EDF7C236CA3F350990EB" \
"F33B0D2D3EAA92071242015EC525ABD3411746A64057867C896A226A265E071E" \
"7B14C9EFF30A529D9EF1023D9CB94537F8375CDBF0FA9C303436755183C34B41" \
"E33BA8065F2CCECCD8F9101188619F629CFAAC43188B086BEC31FB60F5A06952" \
"D9C6F63566F5B41F10990C316A975A4838CA0752649585F9D50C9AA6BCFFD10C" \
"2A6BC6A3368272E49A8557D7A8A340521839658A80A65498868B06CB5047D9F6" \
"079D85D7DF5F618CC9200ED3E7AC04216EDA521322BAEB48D509A394EF0EA0B7" \
"A9F5953408D00783609E31BAFDC96B29B8FB3D955842C8FA48BE04C16E5341B0" \
"94E401220C71952DCD478C6DCB73A232DF3BE47DEDE4FBC04D72385DC05337A0" \
"EB23283592C59DB8936F38C703BE5035C55547DE29D98C765FF5753468E49B19" \
"A6D5B525669269907457FED3974C924EA6425C5A340C68BC94B756A2CB981181" \
"175A922A30C43F6A74A41DE2210E0F5C14DF7516CB96DE1C521D69FF59BA7721" \
"368228205AB32A15E2297577111786FFFAF168E63AF371C020FD9788D5A852BC" \
"443B42ED0AA64AEAAC6F58FD0AA14052A7F4FC1FD10087AED48205F8458D6172" \
"44F80E2846CBF9E70765E6DC44FE263053DD3601F2DA713CF02738B8E78D0DB7" \
"571BBB3862570142472BEC9E08DFDE5946C5EAF21CF7DCCDE838D1C8A64464E5" \
"72F72DA65A188B6134008028E3945D853BCCB023BD1BE12F10F138596B21FA85" \
"49EADA74E5C31CDC53C2B4F3A7DA9AA4A487EB2986A3A6B4287EC92CAC63C2CA" \
"04FC9551C4CA869329F2D0179CAF8E9C46B33490276CED0E21D6D2CA3446AE81" \
"CBA14040BCC58E267C861387ED012548CE7F694DD4C5B2C35DEC40DDE997261C" \
"CA90565889420D70BD258F1327EB6DE5F74538E765C2A83BE487641257530475" \
"2C6974D996D42CC43D334ED5CEAA3C4F82A3F65C07A27CD8999CDD011F8569AB" \
"9A19E7AE01D4343B8D761C4A191E2DD8D3F0A27B6E910496B7537157E9622450" \
"89608E94D4BB11F4ED40792A6C2310D3E1ABE8C0EAF4CE1EB00F523A2CAC5FC3" \
"2DA8598607F3B1E2CC795CEC7C7C08E4704ADF75E437A8BE093C6B62C3ABDF73" \
"37E23BFFB1A89672121C1E2E45A7F321FBFE61A78F3E59488EF05EC9B4B3989A" \
"96ED65B6BB50BD95A37DA89113B0BE46369D62455F12BC7FF04131CF4BA24748" \
"CA0B7EE8329BF4EBF29931EE1E654B374792884817F207E31A7CEAA19E3C3417" \
"D04D758EAF8D923C67CAD5F2F5990311CDD9C85DA4668C6DEBE9E3900236B086" \
"92F0FB989B20FDFBD6A686113421FAD8BB1B1E82C673A06F7EF4D9173C6DC6CF" \
"A2E778327F795CCD5CE293ABAA3BE1F0C54CD895265B0681463690F42CD40301" \
"687D7A3D66AB63CA87B7FE19E00966BE7DB78FC259260FD1CE37046D610A012C"
expected_sig_tc0_idx0_auth_path = \
"702AA3E3184702E7AE6BE645D70F089B1E9793F656FF9515603B5C483867ACAD" \
"B155CA848F1E1A4B73ACBC9D4EBD2D78CE27E088D58E32A5AE39701EE1F7197A" \
"793358107631E5430B16FE3D97837BCA9F3923A329835C5A3A961EE685D1394A" \
"3D55101E1919502F6F1F788B3F95700EFE2E177F03E06A4E81EC4FA0183B787D" \
"8A48CA41EED7B59BA6847E6EE2D1587E5FA39FA28B37EFFA35F1C309C4B3DC5E" \
"CEDB1B9C8A05CCC1A3B1A33101FB768C3A86B334EB64C81728C4A81EB5388B4C" \
"70E5DC9FCBAC9FDDD87AAB7FA0D3DF8222B9017033F4BBE9D311E0EEB4460EDF" \
"881CFF9B90BFCE69BC1B4C79C997EDFE4AAA65C88482B1BEBF47940B4977C674"
expected_sig_tc0_idx0 = "00000000" + \
expected_sig_tc0_idx0_R + \
expected_sig_tc0_idx0_wots + \
expected_sig_tc0_idx0_auth_path
###############################################################################33
###############################################################################33
###############################################################################33
###############################################################################33
###############################################################################33
expected_sig_tc1_idx5_R = \
"743EF66B8257AF7BCCF1197C4B93CDCFC6EC805A408841735F80150885A2D60D"
expected_sig_tc1_idx5_wots = \
"833D92FA4C366688C7117E1C4D45E18388E060E6D8007D717B8D764C744FCE07" \
"D4F2B63B77DE68C03ABDF566FBD50C148999680ADAC3DADC0D977A52AB05F46E" \
"6E0D7C5D04FEC8EA8F2D233694872EC823B058D02F7AFBCA5EFC39B11748D63A" \
"2A53EE3FB0BBA02D50CE061382FCE22816CEA661CEE5D6DC847F38AC2C8EC3DA" \
"82C638FAFD0E7E9CC08D8FE469A355CE981BBE7E0432AAF717CBCFC3431330E3" \
"73A6E10A04D68775ECC94F5EB57D295886D2E5CD9B46CC006CC1549C6E9D17E0" \
"BA290825AD76515D3713AC19E3E3B7A8AECBCD827ECA274A82D608B6B5CFD4F5" \
"EBF38FABD855DB7DD525825D78204954C794EECA43F740656A132967590BFD0E" \
"9749DE913496B12F654559DE650A45D96B475284A60491F6ACBCC5FDF79BCB24" \
"B2C14D53628D72BBFCDE7E497C8E308352ED2FBC34C7B39A41492E752B69F9C8" \
"4F53DC134A9752546E1E01F954094D269ECACB59B7D0A60AF806CB8884E82A27" \
"1B82871343C368F22ED2D0971D48E4627D6214036C3771E7548484BB4E4C15C7" \
"F3E850E595EB50BDB7070B0FFBF7340AFDADAC8A12BD1177075273CD3230EC81" \
"7EB0DF5D6F3AFCB91EB1C1449790DD8CC8585835C116B935E1BE6F6289EA42CF" \
"9DF74ECDBEC673816D91816D9C9E1652C7A7887224552B8180B6CAC95868B447" \
"C42BAAEAA6DF9EA9490468E3E5D624CB2B45B837978721FD426C3E937907C7CF" \
"7059854BA7A34E02ECFA49DB48318D0A88569B385B6D1CBFF088D733CF218743" \
"503FEC60CF21B0E0795E9DD0AD885F0655B6F3C4B830B16755EC0B5D7FBEB797" \
"1AF2F485580BA693C3E62FB27F8E7698D7FBD997F918E365F05314AE5CBC5CCA" \
"D4A499CB6383B9766634BACD3959824E0A145DDA8AC3210FEF6A5699502A7B05" \
"21F5A94E8891B660C51708FDBCE9D5268AB678D3B6B463BB8E28A52A78D81272" \
"BEA7F269D4E32FF9EF4124A4FA956A282156EAD51D532C0CFECA4DE4FDD72ED0" \
"CF5643FDA2A1108E09332AE5FE4447DC768FD2B4C6481BFA33DF98B857CAB3D4" \
"940637E891171526DEF2DBC33455B8CF94CFEC6D3345C403FA620C23E7AF6EFB" \
"DE7AC4AD373A7AB0AC4D729401E0DFDBA93BAA0F3F48AB1F45640DC5D233EE7A" \
"B347C932CBFE89944A83466471C44CD7BAD31FBD1ABCD7E4D363E31975EA0512" \
"1BD33E8193F368A3537B6961B1D5EFD9D96A137A4BEBCF92733027789AB633EE" \
"6824418B78E02C031B3ACD55150C2056F7D485A99752BDE9AE850BF76F38372C" \
"39E6646E33A10972A78D184E09F2366C8A27FD84269FEC1F715A3D5320BC04CC" \
"0F81917D8DC970590461FF232B8F8E238B9EDD70C35B4966A866FAB046FD9AB0" \
"4D7A2C7E0296FD5751EEDE331AB433ACC53BC7176BBE18A0F2CC7557E434DC1D" \
"0466E8C8F9FCC79E4D4E518285FF8CC825232D73924AEB59395DDF5E0D273203" \
"F2C0CA1AEE313814B3E694D5D837C47D413935D0B6E3E48FBED043E7A0CE6436" \
"9927843B80BA6053CF29D7144DB306036CDF161B0D96006573456C0AD72743DC" \
"4DA60552D7110FFE30F26BBC8EEAAD9FAC907890E9504F3A5C2B67256A6DD933" \
"93E11D3A48FA02B1B32534BBF8DC71C4D839FA2449DDA375DB3F9AD0C6853719" \
"CB976D11CC5ECFDC6740FCDFE1C5A96884FEF2C9FA76599A372D761DC184589D" \
"3B0D514460FEB23BB2C25560F77B09E9D8DE2B79C849081E1BCD6C233E14F473" \
"1EDE6DAE7DD87656A2CF444A92A57192F34260A7DC8FA01D00AA69C090813811" \
"74DFCA9D3A0D07A2A3CB953FE3619DC12DF05DC1C3C05FC00DD90D70FE9ADA6A" \
"FE749A7F09B4D76AF7B1B05FF7A0918530923163B27D9EA579BA1BE7C8E35DD0" \
"D10DC2B87C63E00AB88D3E6887A78308779A796ADDA9BF4BC7FB45A7F4AB735B" \
"B2C400D2122256A2DC93BF3D1A91843A4136E13A81ADBAAC6F336BCEB81E94FB" \
"4AA99B5671AE8B85F861FD9F13B613A75BC7BDC051DE36B2F58EE81BACB42E49" \
"24F1A57C453BEA23912FE59E10882B34F3731F06FA4CD1E57214FC7F2C6421CD" \
"CEDA5BDD8D6FC7D4C4FACDDEBF8064832D4B9DF457AE110D7149B6343A5FF498" \
"834EF2161A83CA214C0EB1ED1AA5C8E46BCD524DF72673AFE5917D9405E8E576" \
"1FC66433A344FD54F20948329EC6BCF996EE3A454635BF9C0287A32DDAB8C029" \
"548A919CB3D3E6A1B5C9554A26AD5229D61E8DFBB33ECD02221E492214130CF2" \
"8BC0FE13324229DFAA9155EDBB2AFCFD8EA9142B8EB2736DF616556FFE99ADD9" \
"5FEB389B7678CB284B8E4EAFD6CDDC3C88BDA093A906D40000BC5934EF99DED1" \
"A416E3172582691EBAE2EE4E79F13B0675AEAD31B8CB9DF5D869ABDC3F056877" \
"BE5F8BD9134BE075EE06B9F01369218FD4CF88F3E1F0673BFE0402415DE208BB" \
"04A6E2548DAA5DFC239B88F1F8DC4904041A8C6F20DB6769928AA58AD9D61834" \
"2A14864408EE7DE2BB8009B042355B58905F887287C7F69D8B1173352A238C81" \
"B32836E74E1EDD09613F5FD5698A62AE4FBB82F95D2D9636ECD39F8DDFDA55E4" \
"D954E7239D5EA4EF2FCD92BD93AB050468B2C1818ED95F9CBD4143764D0AE5E8" \
"F9ED858ADD2CAB4CC0A9C01730155B2CA1157F8E21B0BEEA874843338B18D0A3" \
"E75E312E0CB4F4BB1087442B16BB2E48E5736D4E43557F8497D5E1EC9F734454" \
"E1F297C9163641B0452C67A4A059645E8825664CEB29E7D20DFC0B416D0AC232" \
"B9C77DB56DA38924BF2CFFE0D078A173C612FD2C3D8A94F955F33A3F22D24CE1" \
"1A36324730048D2CDC9BCE9F666278DCA4EA56F47A42221923384C5FB3B23130" \
"8D516F8C8C881495B569294EDDB1608F64DC95A4C00545113B13318BFE13F8CD" \
"65C7C49B944E85259D368ED484FDECAC39C4E7D5A20AFC1C5EB62ED2DC19B8A9" \
"69FE90374EC2A0C88917F88748CC2BE0316631E05ED4659DC91E13D4B8358E19" \
"1FF32EB2E20CF9A4FC73649B35357B5AA89EA4A4B1E57FD7D2D0F749D243B0A1" \
"D5A361641251C424E143093E3A705476710C224511D6F20E930330864C88B56A" \
"881CFF9B90BFCE69BC1B4C79C997EDFE4AAA65C88482B1BEBF47940B4977C674"
expected_sig_tc1_idx5_auth_path = \
"35B3195636E7D953454A7019BDB5FBD5E52060511BD2E7AE15D012463B01F524" \
"D2A393A64D369A06EBC5429DBEE6C68D17272E3117274A42C402A2C373187FD1" \
"98D965868DE234B7885E40175EC48EE5D9B93831C9859223B71486A2DA618704" \
"3D55101E1919502F6F1F788B3F95700EFE2E177F03E06A4E81EC4FA0183B787D" \
"8A48CA41EED7B59BA6847E6EE2D1587E5FA39FA28B37EFFA35F1C309C4B3DC5E" \
"CEDB1B9C8A05CCC1A3B1A33101FB768C3A86B334EB64C81728C4A81EB5388B4C" \
"70E5DC9FCBAC9FDDD87AAB7FA0D3DF8222B9017033F4BBE9D311E0EEB4460EDF"
expected_sig_tc1_idx5 = "00000005" + \
expected_sig_tc1_idx5_R + \
expected_sig_tc1_idx5_wots + \
expected_sig_tc1_idx5_auth_path
###############################################################################33
###############################################################################33
###############################################################################33
###############################################################################33
###############################################################################33
expected_sig_tc2_idx10_R = \
"616056FA0BE97D18D433CF62604B0442F07E88AC605742C47F4882EC055C7CAE"
expected_sig_tc2_idx10_wots = \
"4B082975BFFA3A2061CDC15928D07E49345018E158C261903BADB72C47C04234" \
"6A3F068BBB1DE92D0E57374A57DA15203FD13AD519FA6773295392B3633BBF97" \
"04622EBEAAFE81A9C4490BD2918E1C3E387FDC0278DF00D2068197424B9C61D6" \
"80D840D26A6FF8BC08850BEEC77EF304395FEA460A2534C2075A68C83FF48E35" \
"2E57B149672DDE53429D3EDD1E0630AF86A8BC468FB3B7A2549F7695FAA23324" \
"7BB14EC6A6C08BC0F47A196029950ABF06370AE492E1CF92CCCA4D067B3FCEC0" \
"155FF81D5CD0FE2E05A8DCF5ACDE018249F463787FBB0D65773329A3E8363B48" \
"E0788F8C5CEE052D94E32C9C2925A10875E028B73EA593AE8FFC48AA4B9EF646" \
"00F97EB881327109BB91A57D806B6AFC2EEF0BCBB31EC76D439110C57F4177C2" \
"9942F291C2262C236F756847B983E137F550EFEEBD57C5C4CAC117BA4D1393D1" \
"C195CDD1FC340023632EE001BE3EEFC1BA3BD44AFA848AFD279E5DB6862A9395" \
"A6E8C4097C675B79BB024B76C9620FD97CBF4A9CDAD012F7936C014E97E6701E" \
"D0B31D2E0CB86D058307E0CED7C86317C8EF37D92E51C7F3C2F0CF16BD535779" \
"AA95145FBC83B4FB8A3A44638F9E0BC3D849E5082025692EED075353C164CC38" \
"7DADC37CC9E42459D078DE45DA76292A7B60D9B8CBC8CEEF802FFFD0998610A8" \
"E07ADEFF24228CC6397D223DB6D47FC55BE13C3C097AA272248E9A10E9AB7868" \
"629AE3D4754257BB88CE644F2DB17C0786F940EB4A1CAFCF7B385042D203081F" \
"8FB5F46CA7816D27563D27951723F35729AD3C9E7006FD51B772955379043B2A" \
"E8D7F91AD96290B86BD669CBD7F29BCB57639304E5C7F1F82C1E3311E3915696" \
"E1AD36C14415F3C1786A307231E4086F1DAFD1A7DB3DEC02232A3A143FE06BD0" \
"06CB24DC9763BB25ED58DFC31DEF5379E3C5CE1084B6A745027850474742892F" \
"C8F014D132256117B11D5C93C757CA126715D3990403FAB3F699D3A478155DD5" \
"3986817564822A64A7BFBFEA3A900B06170A57E327619260DC5A759E2EE94C73" \
"E0002565571D5FB10F164DFA46DC1423DD99F0110171ED7C1EB5FF4A79DB6BFB" \
"BCB88CCAE04FFD6EF1CDB954A54436D213C469DBC21D4F00AAFF3143286F7362" \
"31DA69F079980DFA2A6F70DE3130CFE5D5C9D85ABD827C2677342921D65786D7" \
"040643F6F08C1EAAA1C13F306A5FF2FC71817E905721B3EC6944DE3350F759E1" \
"6F11A8DBF534797C6370E7265EA2E799852F35F810A04A5D7AD148076DEC8282" \
"262924F26CED990D6E14A693B8B6A1DB27B4E2C4196493830A1AA316E1FB5D6E" \
"25C0D4F8E831F81C8404A876FBB6197A5CF34F59E40251156F9DB5EE46B482EE" \
"585DD4BD460E8044F928FE3D4A308B74BE66824D05B2A1EAF83CFA5354E76C5A" \
"47FA5A14F625884D435DF9D575D3C5C6C4AA4220AA960DA1B92E406EE28DC070" \
"B32CF41E9AD9704419D8506CE0E5A09D12FF9E69E10B58E30DF1EF5452CBECAB" \
"147CEA57310D1D7A17DD5B9635806BE06CB5B5A3E7D471960DD7C75AAB11BDDB" \
"F50A0D8C76F57A26210D1D7D4E1450B679F62CC006BDCF89E15FAE21267349DC" \
"20125485F70B9D2DE77B5826F917E9670AF2C0C2E49900F609628B75F0F9E0B9" \
"7DE5ED85BC49BF2720F696344541CD95F784C4A769FA839F36F9D6A15021B833" \
"07C20C5B1999E7B96D5EB1FD1DC263B904180F54AF688D12379B16E6934D4B49" \
"01DA0C00C5C2C065A161897034C1413B938EAFC15E2FD3C2AA29E12A78C0B548" \
"9767F2F1A4A4A5027A39AFF933EBEBF479A9760822898F3AF62BB068750028D9" \
"19AF55F16ABD5EE29116A3EA6D5303ADB5126CE2F4E17603AF14311071C98B12" \
"0B52645041F8BFA4CE2122494E694C9CF3D546A6237CEA319F333C23F1ECD26E" \
"52D50CCAA4DF0A69016091145A0E91DD24335D67199B6B3BECA721F20104B03A" \
"74452E240B8384EF2C1194AE72E5AD41CDED320D2C6313B053E1D652062D30B3" \
"C397DB525D1B94838A8DE01410C766B5ED9A09A1B62FF24A112536F2F5F6774B" \
"ACD7F0F18CF1E5ABF9420ADCA0B880E9FC94DECF2577AF33F2743FCDD6F339A9" \
"6B13FE295B50EA4BDD474B50CD53AAAB1AF5C549279F050B63990C405EA9E1BA" \
"2FF72C205C40CC4DBE00285811F29257311E818242E7A9D4BBF164818D4D2D0A" \
"34C604B40E628561BA148F4206A5ABB8033C016B4EDCF6BE8DC496A6E28378DF" \
"6991596B203C528914E8B89AB5EDA262888E8F43E39D264E4EC02DCFA01CE634" \
"30D32027CB818F2AB72D944DC9E62BF14A14AF3F2E9EC4131B60A9E7BCEE6B09" \
"4729D48D68E76F16A8D6287E20539FE8A2A1FC71862645AFC7577680699DF8F6" \
"B1BB609B667A71B21B09D5172F0EA5C06947510F57F0DDDE984D511CE93E91A3" \
"B52A5E924E78DB943AC1754922AB8DC2F8E467CD863328F2BAE4994B930F6905" \
"1095F9AC11B3FFDF4B5B8B8C43F9E32AA8E7344691F0986CA79FBD73D86ED537" \
"9F535B2FDF90B99B2A3A2EE86EA21B436CD245FE02E37583F87301AC75973AC5" \
"EF4222DC09C2B02F41973B6411982129758E0B4BE56775A4D2A3C6EB33949EF6" \
"D81C166B490C449D6A9BCA27F4F8A3DFC13A23870DEB5735CD5B98907406004E" \
"8D1D7A3555740A29C0353FC70632267C66C1AA65DA0BCEBA89EB8EBFD81F0208" \
"468912B0C34F2C87C98224AC64C28A17BF2D1E2A512CABA74397280CF38E9412" \
"D03CE886B1D08A78E73C3B7FE8DA6F6BC26CCCA35B71553A7DAB1B312F72398D" \
"DC37B740BFF0AB683AF810BF582605158E846E28BC786E1F624DBD2F86E152BF" \
"D66318D618A7AC5E6C0999EA89413523D5093E059274AD3A9DB77C333A69B5E7" \
"B09C145E0D643B242F4FEC8A90A4EE229CEB1918FDD8F243DFFA25545AC9152F" \
"7688DA263C9DAA0E80F59DEC0C95A40D7094A7FA2B2EE93FAD7D19C206B1579C" \
"A8E9B056B03289FC2F09C031F75682E600BCB05055AB01CE105BBE11EF1D5B66" \
"0E5A1B67E34D7B514C75C3048E97D744C7A41627FE5996EA1A75D019274D819A"
expected_sig_tc2_idx10_auth_path = \
"338A8558244F720156AE8F0DC2E2A5CB5E50F5BE510AFB55FB1E2DE8DF2C93CB" \
"F2A33281C1427CD69BD01C8C6BF0A4C6B8F18D384D4FE2F195EFA589C9567272" \
"CBE9C19E5E4B0C3594A732591FCFA3AA37EA3B26477E4DBF839E9B0C3FCED8F5" \
"12F87F06D652C6437CFE0D9E51FA80AED4C5851CEB6F101BB4C9A9572D510E21" \
"8A48CA41EED7B59BA6847E6EE2D1587E5FA39FA28B37EFFA35F1C309C4B3DC5E" \
"CEDB1B9C8A05CCC1A3B1A33101FB768C3A86B334EB64C81728C4A81EB5388B4C" \
"70E5DC9FCBAC9FDDD87AAB7FA0D3DF8222B9017033F4BBE9D311E0EEB4460EDF" \
"881CFF9B90BFCE69BC1B4C79C997EDFE4AAA65C88482B1BEBF47940B4977C674"
expected_sig_tc2_idx10 = "0000000A" + \
expected_sig_tc2_idx10_R + \
expected_sig_tc2_idx10_wots + \
expected_sig_tc2_idx10_auth_path
#########################################################################
#########################################################################
#########################################################################
#########################################################################
#########################################################################
if __name__ == '__main__':
# This script will upload test data to the ledger
dev = LedgerQRL()
##########################
# KEYGEN PHASE 1
answer = dev.send(INS_TEST_PK_GEN_1)
idx = binascii.hexlify(answer[0:4]).upper()
seed = binascii.hexlify(answer[4:36]).upper()
prf_seed = binascii.hexlify(answer[36:68]).upper()
pub_seed = binascii.hexlify(answer[68:100]).upper()
root = binascii.hexlify(answer[100:]).upper()
# ledgerqrl.U2FMODE=False
print(len(answer))
print(seed)
print(prf_seed)
print(pub_seed)
assert seed == "EDA313C95591A023A5B37F361C07A5753A92D3D0427459F34C7895D727D62816"
assert prf_seed == "B3AA2224EB9D823127D4F9F8A30FD7A1A02C6483D9C0F1FD41957B9AE4DFC63A"
assert pub_seed == "3191DA3442686282B3D5160F25CF162A517FD2131F83FBF2698A58F9C46AFC5D"
##########################
# KEYGEN PHASE 2 - UPLOAD
assert len(expected_leafs_zeroseed) == 256
start = time.time()
for i in range(0, 256, 4):
print("====", i)
data = bytearray.fromhex(expected_leafs_zeroseed[i + 0]) + \
bytearray.fromhex(expected_leafs_zeroseed[i + 1]) + \
bytearray.fromhex(expected_leafs_zeroseed[i + 2]) + \
bytearray.fromhex(expected_leafs_zeroseed[i + 3])
answer = dev.send(INS_TEST_WRITE_LEAF, i, 0, data)
assert len(answer) == 0
#########################
# KEYGEN PHASE 3
answer = dev.send(INS_TEST_CALC_PK)
dev.send(INS_PUBLIC_KEY)
assert len(answer) == 67
leaf = binascii.hexlify(answer).upper()
print(leaf)
assert leaf == "000400" \
"106D0856A5198967360B6BDFCA4976A433FA48DEA2A726FDAF30EA8CD3FAD211" \
"3191DA3442686282B3D5160F25CF162A517FD2131F83FBF2698A58F9C46AFC5D"
| 65.673841
| 89
| 0.837623
| 1,102
| 39,667
| 30.033575
| 0.516334
| 0.006979
| 0.004441
| 0.003807
| 0.03674
| 0.028039
| 0.001752
| 0.001752
| 0.001752
| 0
| 0
| 0.54434
| 0.086898
| 39,667
| 603
| 90
| 65.782753
| 0.369437
| 0.004765
| 0
| 0.023636
| 0
| 0
| 0.820105
| 0.818431
| 0
| 1
| 0
| 0
| 0.012727
| 1
| 0.001818
| false
| 0
| 0.001818
| 0
| 0.003636
| 0.021818
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f7f08b52b4286a44491bc14d9f0e9a3ab181ec80
| 25,514
|
py
|
Python
|
cvlib/nn/instconv2d.py
|
AaronLeong/cvlib
|
5afe9804df2c162d8132f18ad0d9c9f7c2220dd0
|
[
"BSD-3-Clause"
] | null | null | null |
cvlib/nn/instconv2d.py
|
AaronLeong/cvlib
|
5afe9804df2c162d8132f18ad0d9c9f7c2220dd0
|
[
"BSD-3-Clause"
] | null | null | null |
cvlib/nn/instconv2d.py
|
AaronLeong/cvlib
|
5afe9804df2c162d8132f18ad0d9c9f7c2220dd0
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules import conv
from torch.nn.modules.utils import _pair
class InstConv2d(conv._ConvNd):
r"""Applies a 2D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`
can be precisely described as:
.. math::
\begin{array}{ll}
out(N_i, C_{out_j}) = bias(C_{out_j})
+ \sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \star input(N_i, k)
\end{array}
where :math:`\star` is the valid 2D `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
| :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
| :attr:`padding` controls the amount of implicit zero-paddings on both
| sides for :attr:`padding` number of points for each dimension.
| :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
| :attr:`groups` controls the connections between inputs and outputs.
`in_channels` and `out_channels` must both be divisible by `groups`.
| At groups=1, all inputs are convolved to all outputs.
| At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
At groups=`in_channels`, each input channel is convolved with its
own set of filters (of size `out_channels // in_channels`).
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The configuration when `groups == in_channels` and `out_channels = K * in_channels`
where `K` is a positive integer is termed in literature as depthwise convolution.
In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`, if you want a
depthwise convolution with a depthwise multiplier `K`,
then you use the constructor arguments
:math:`(in\_channels=C_{in}, out\_channels=C_{in} * K, ..., groups=C_{in})`
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
:math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)`
:math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)`
Attributes:
weight (Tensor): the learnable weights of the module of shape
(out_channels, in_channels,
kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (out_channels)
W(Tensor): Spectrally normalized weight
u (Tensor): the right largest singular value of W.
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(InstConv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
# self.register_buffer('dropout_mask', torch.Tensor(1, out_channels).normal_())
self.activation = nn.ReLU()
self.weight_size = (1, in_channels) + kernel_size
self.out_channels = out_channels
self.in_channels = in_channels
self.num_features = in_channels * out_channels
#Affine transform parameters
self.dropout_mask = nn.Parameter(torch.Tensor(
out_channels, in_channels))
# self.weight = Parameter(torch.Tensor(
# in_channels, out_channels // groups, *kernel_size))
#Parameter dropout initilization
self.set_dropout_parameters()
def set_dropout_parameters(self):
self.dropout_mask.data.uniform_()
def _inst_dropout(self,input,channel_mask):
# input size n c h w
# print('input', input.size())
# print(channel_mask)
channel_mask = self.activation(channel_mask)
channel_mask = channel_mask.view(1,self.in_channels,1,1).expand(input.size())
# print('channel_mask', channel_mask.size())
# print(channel_mask)
# channel_mask = channel_mask
# print(channel_mask)
# print('channel_mask', channel_mask.size())
# x = F.dropout(input)
return input * channel_mask
# return input.mul_(channel_mask)
def forward(self, input):
# print('weight',self.weight.data)
# print('size',self.weight.size())
# w_ = torch.split(self.weight.data,3,dim=0)
# print('w_',w_[0].size())
# print(self.weight.size())
# print(self.weight[0].view(1,2,3,3).size())
# print(input.size())
x = self._inst_dropout(input, self.dropout_mask[0])
channel_output = F.conv2d(x, self.weight[0].view(self.weight_size), self.bias[0].view(1), self.stride,
self.padding, self.dilation, self.groups)
# self.weight.size(0): output channel size
# self.weight.size(0)
# W_ = torch.split(self.weight,1)
for i in range(1, self.out_channels):
# print(i)
x = self._inst_dropout(input, self.dropout_mask[i])
channel_output = torch.cat([channel_output,F.conv2d(x, self.weight[i].view(self.weight_size), self.bias[i].view(1), self.stride,
self.padding, self.dilation, self.groups)], 1)
# print('channel_output', channel_output.size())
return channel_output
# return F.conv2d(input, self.weight[0].view(1,2,3,3), self.bias[0].view(1), self.stride,
# self.padding, self.dilation, self.groups)
class InstConv2dv2(conv._ConvNd):
r'''
_inst_dropout weight
'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(InstConv2dv2, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
# self.register_buffer('dropout_mask', torch.Tensor(1, out_channels).normal_())
self.activation = nn.ReLU(True)
self.weight_size = (1, in_channels) + kernel_size
self.out_channels = out_channels
self.in_channels = in_channels
self.num_features = in_channels * out_channels
#Affine transform parameters
self.dropout_mask = nn.Parameter(torch.Tensor(
out_channels, in_channels))
# self.weight = Parameter(torch.Tensor(
# in_channels, out_channels // groups, *kernel_size))
#Parameter dropout initilization
self.set_dropout_parameters()
def set_dropout_parameters(self):
self.dropout_mask.data.fill_(1)
def _inst_dropout(self,input,channel_mask):
# input size n c h w
# print('input', input.size())
# print(channel_mask)
channel_mask = self.activation(channel_mask)
channel_mask = channel_mask.view(1,self.in_channels,1,1).expand(input.size())
# print('channel_mask', channel_mask.size())
# print(channel_mask)
# channel_mask = channel_mask
# print(channel_mask)
# print('channel_mask', channel_mask.size())
# x = F.dropout(input)
return input * channel_mask
# return input.mul_(channel_mask)
def forward(self, input):
# print('weight',self.weight.data)
# print('size',self.weight.size())
# w_ = torch.split(self.weight.data,3,dim=0)
# print('w_',w_[0].size())
# print(self.weight.size())
# print(self.weight[0].view(1,2,3,3).size())
# print(input.size())
x = self._inst_dropout(input, self.dropout_mask[0])
channel_output = F.conv2d(x, self.weight[0].view(self.weight_size), self.bias[0].view(1), self.stride,
self.padding, self.dilation, self.groups)
# self.weight.size(0): output channel size
# self.weight.size(0)
# W_ = torch.split(self.weight,1)
for i in range(1, self.out_channels):
# print(i)
x = self._inst_dropout(input, self.dropout_mask[i])
channel_output = torch.cat([channel_output,F.conv2d(x, self.weight[i].view(self.weight_size), self.bias[i].view(1), self.stride,
self.padding, self.dilation, self.groups)], 1)
print('channel_output', channel_output.size())
return channel_output
# return F.conv2d(input, self.weight[0].view(1,2,3,3), self.bias[0].view(1), self.stride,
# self.padding, self.dilation, self.groups)
class InstConv2dv3(nn.Module):
r'''
__setattr__ conv
'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(InstConv2dv3, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.activation = nn.ReLU()
self.weight = Parameter(torch.Tensor(
out_channels, in_channels))
for i in range(out_channels):
self.__setattr__('conv_%d'%i,nn.Conv2d(in_channels, 1,kernel_size,stride,padding))
# self.reset_parameters()
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.xavier_uniform_(m.weight)
# nn.init.constant_(m.bias, 0.1)
def reset_parameters(self):
self.weight.data.fill_(1)
def _dropout(self,input,_weight):
# print(_weight.size())
_w0 = self.activation(_weight)
_w1 = _w0.view(1,_weight.size(0),1,1).expand_as(input)
return input.mul_(_w1)
# return input*_weight
def forward(self, input):
ouput = self.__getattr__('conv_0')(input)
for i in range(1, self.out_channels):
x = self._dropout(input, self.weight[i])
ouput = torch.cat((ouput,self.__getattr__('conv_%d'%i)(x)),1)
return ouput
class InstConv2dv5(conv._ConvNd):
r"""Applies a 2D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`
can be precisely described as:
.. math::
\begin{array}{ll}
out(N_i, C_{out_j}) = bias(C_{out_j})
+ \sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \star input(N_i, k)
\end{array}
where :math:`\star` is the valid 2D `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
| :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
| :attr:`padding` controls the amount of implicit zero-paddings on both
| sides for :attr:`padding` number of points for each dimension.
| :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
| :attr:`groups` controls the connections between inputs and outputs.
`in_channels` and `out_channels` must both be divisible by `groups`.
| At groups=1, all inputs are convolved to all outputs.
| At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
At groups=`in_channels`, each input channel is convolved with its
own set of filters (of size `out_channels // in_channels`).
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The configuration when `groups == in_channels` and `out_channels = K * in_channels`
where `K` is a positive integer is termed in literature as depthwise convolution.
In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`, if you want a
depthwise convolution with a depthwise multiplier `K`,
then you use the constructor arguments
:math:`(in\_channels=C_{in}, out\_channels=C_{in} * K, ..., groups=C_{in})`
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
:math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)`
:math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)`
Attributes:
weight (Tensor): the learnable weights of the module of shape
(out_channels, in_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (out_channels)
W(Tensor): Spectrally normalized weight
u (Tensor): the right largest singular value of W.
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(InstConv2dv5, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
self.register_buffer('dropout_mask',nn.Parameter(torch.Tensor(
out_channels, in_channels, 1, 1)))
# self.dropout_mask = nn.Parameter(torch.Tensor(
# out_channels, in_channels, 1, 1))
# self.weight = Parameter(torch.Tensor(
# in_channels, out_channels // groups, *kernel_size))
self.activation = nn.ReLU(True)
#Parameter dropout initilization
self.set_dropout_parameters()
def set_dropout_parameters(self):
self.dropout_mask.data.fill_(1)
@property
def W_(self):
_m = self.activation(self.self.dropout_mask.data)
w_mask = _m.expand_as(self.weight.data)
return self.weight.data.mul_(w_mask)
def forward(self, input):
return F.conv2d(input, self.W_, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class InstConv2dv4(conv._ConvNd):
r"""Applies a 2D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`
can be precisely described as:
.. math::
\begin{array}{ll}
out(N_i, C_{out_j}) = bias(C_{out_j})
+ \sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \star input(N_i, k)
\end{array}
where :math:`\star` is the valid 2D `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
| :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
| :attr:`padding` controls the amount of implicit zero-paddings on both
| sides for :attr:`padding` number of points for each dimension.
| :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
| :attr:`groups` controls the connections between inputs and outputs.
`in_channels` and `out_channels` must both be divisible by `groups`.
| At groups=1, all inputs are convolved to all outputs.
| At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
At groups=`in_channels`, each input channel is convolved with its
own set of filters (of size `out_channels // in_channels`).
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The configuration when `groups == in_channels` and `out_channels = K * in_channels`
where `K` is a positive integer is termed in literature as depthwise convolution.
In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`, if you want a
depthwise convolution with a depthwise multiplier `K`,
then you use the constructor arguments
:math:`(in\_channels=C_{in}, out\_channels=C_{in} * K, ..., groups=C_{in})`
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
:math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)`
:math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)`
Attributes:
weight (Tensor): the learnable weights of the module of shape
(out_channels, in_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (out_channels)
W(Tensor): Spectrally normalized weight
u (Tensor): the right largest singular value of W.
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(InstConv2dv4, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
self.dropout_mask = nn.Parameter(torch.Tensor(
out_channels, in_channels,1,1))
self.size = out_channels* in_channels
self.activation = nn.ReLU(True)
#Parameter dropout initilization
self.set_dropout_parameters()
self.m = torch.Tensor(out_channels, in_channels,1,1).fill_(1).cuda(0)
def set_dropout_parameters(self):
self.dropout_mask.data.uniform_()
def getinfo(self):
# print(self.dropout_mask.data)
idx = self.dropout_mask.data<=0
idx = idx.resize_(self.size).type_as(self.m)
# print(idx.size())
# print(idx)
m1 = self.m.clone().resize_(self.size)
# print(m1.size())
# print(m1)
print('idx:',torch.dot(idx,m1),self.dropout_mask.data[0])
@property
def W_(self):
# print(self.dropout_mask.size())
# print(self.m.size())
y = self.dropout_mask * self.m
# print(y.size())
return self.weight * self.activation(y).expand_as(self.weight)
# return self.weight * self.activation(self.dropout_mask.data).expand_as(self.weight)
def forward(self, input):
self.getinfo()
return F.conv2d(input, self.W_, self.bias, self.stride,
self.padding, self.dilation, self.groups)
from torch.autograd import Variable
if __name__ == '__main__':
model = InstConv2dv4(3,1,1,1,0,bias=False)
# print(model)
x = torch.ones([1,3,3,3])
# x = torch.split(x,2,dim=1)
# print('x',x,x[0].size())
x = Variable(x)
print(model(x))
# m1 = torch.Tensor(2,2,1,1).fill_(2)
# m2 = torch.Tensor(2,2,1,1).fill_(2)
# print(m1.resize_(4))
# print(torch.dot(m1.resize_(1),m2.resize_(1)))
| 43.613675
| 140
| 0.625735
| 3,508
| 25,514
| 4.401368
| 0.083238
| 0.038212
| 0.018459
| 0.019041
| 0.906153
| 0.884585
| 0.87513
| 0.872474
| 0.865933
| 0.865609
| 0
| 0.013936
| 0.257506
| 25,514
| 584
| 141
| 43.688356
| 0.801098
| 0.617778
| 0
| 0.60241
| 0
| 0
| 0.01563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126506
| false
| 0
| 0.042169
| 0.006024
| 0.259036
| 0.018072
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f7f5886c92cc4500bda948ea7952ec1d46de279f
| 961
|
py
|
Python
|
Examples/Demos.py
|
SimpleITK/SimpleITK-MICCAI-2011-Tutorial
|
c8cffa8888fda71b9e4f2fdb3e10c2c66dba8371
|
[
"CC-BY-3.0"
] | 25
|
2015-03-08T16:24:13.000Z
|
2021-07-23T02:44:04.000Z
|
Examples/Demos.py
|
SimpleITK/SimpleITK-MICCAI-2011-Tutorial
|
c8cffa8888fda71b9e4f2fdb3e10c2c66dba8371
|
[
"CC-BY-3.0"
] | null | null | null |
Examples/Demos.py
|
SimpleITK/SimpleITK-MICCAI-2011-Tutorial
|
c8cffa8888fda71b9e4f2fdb3e10c2c66dba8371
|
[
"CC-BY-3.0"
] | 4
|
2015-01-29T21:29:40.000Z
|
2022-03-11T08:14:07.000Z
|
import IPython.lib.demo as ipd
# To use, run ipython, then
#
# In [1]: %run Demos.py
# In [2]: d = ImageDemo()
# In [3]: d()
# In [4]: d()
def ImageDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/Image.py' )
def InputOutputDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/InputOutput.py' )
def MemoryManagementDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/MemoryManagement.py' )
def FiltersDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial2/Filters.py' )
def MorphologyDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial2/Morphology.py' )
def MeasureRegionsDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/MeasureRegions.py' )
def BorderChangeDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-01-BorderChange.py' )
def NumpyDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-02-Numpy.py' )
def RidgeDetectionDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-04-RidgeDetection.py' )
| 25.972973
| 76
| 0.712799
| 104
| 961
| 6.586538
| 0.442308
| 0.118248
| 0.262774
| 0.227737
| 0.179562
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.155047
| 961
| 36
| 77
| 26.694444
| 0.817734
| 0.098855
| 0
| 0
| 0
| 0
| 0.342657
| 0.342657
| 0
| 0
| 0
| 0
| 0
| 1
| 0.473684
| true
| 0
| 0.052632
| 0.473684
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f71db1164a4a62b58d179f38b09f4c707a5ebaf0
| 107
|
py
|
Python
|
ckanext-hdx_service_checker/ckanext/hdx_service_checker/tests/test_plugin.py
|
alexandru-m-g/hdx-ckan
|
647f1f23f0505fa195601245b758edcaf4d25985
|
[
"Apache-2.0"
] | 1
|
2020-03-07T02:47:15.000Z
|
2020-03-07T02:47:15.000Z
|
ckanext-hdx_service_checker/ckanext/hdx_service_checker/tests/test_plugin.py
|
datopian/hdx-ckan
|
2d8871c035a18e48b53859fec522b997b500afe9
|
[
"Apache-2.0"
] | null | null | null |
ckanext-hdx_service_checker/ckanext/hdx_service_checker/tests/test_plugin.py
|
datopian/hdx-ckan
|
2d8871c035a18e48b53859fec522b997b500afe9
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for plugin.py."""
import ckanext.hdx_service_checker.plugin as plugin
def test_plugin():
pass
| 21.4
| 51
| 0.747664
| 16
| 107
| 4.8125
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130841
| 107
| 5
| 52
| 21.4
| 0.827957
| 0.186916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
f75bfc6bd70d285e8df66e56086560c9c887971c
| 30,707
|
py
|
Python
|
tests/test_actionAngleTorus.py
|
turnergarrow/galpy
|
7132eddbf2dab491fe137790e31eacdc604b0534
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_actionAngleTorus.py
|
turnergarrow/galpy
|
7132eddbf2dab491fe137790e31eacdc604b0534
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_actionAngleTorus.py
|
turnergarrow/galpy
|
7132eddbf2dab491fe137790e31eacdc604b0534
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division
import os
import sys
import pytest
import warnings
import numpy
from galpy.util import galpyWarning
from test_actionAngle import reset_warning_registry
_TRAVIS= bool(os.getenv('TRAVIS'))
PY2= sys.version < '3'
# Print all galpyWarnings always for tests of warnings
warnings.simplefilter("always",galpyWarning)
#Basic sanity checking: circular orbit should have constant R, zero vR, vT=vc
def test_actionAngleTorus_basic():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential, rl, vcirc, \
FlattenedPowerPotential, PlummerPotential
tol= -4.
jr= 10.**-10.
jz= 10.**-10.
aAT= actionAngleTorus(pot=MWPotential)
# at R=1, Lz=1
jphi= 1.
angler= numpy.linspace(0.,2.*numpy.pi,101)
anglephi= numpy.linspace(0.,2.*numpy.pi,101)+1.
anglez= numpy.linspace(0.,2.*numpy.pi,101)+2.
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(MWPotential,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(MWPotential,rl(MWPotential,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=1.5, using Plummer
tol= -3.25
pp= PlummerPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(pp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(pp,rl(pp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=0.5, using FlattenedPowerPotential
tol= -4.
fp= FlattenedPowerPotential(normalize=1.)
aAT= actionAngleTorus(pot=fp)
jphi= 0.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(fp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(fp,rl(fp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
return None
#Basic sanity checking: close-to-circular orbit should have freq. = epicycle freq.
def test_actionAngleTorus_basic_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import epifreq, omegac, verticalfreq, rl, \
JaffePotential, PowerSphericalPotential, HernquistPotential
tol= -3.
jr= 10.**-6.
jz= 10.**-6.
jp= JaffePotential(normalize=1.)
aAT= actionAngleTorus(pot=jp)
# at Lz=1
jphi= 1.
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(jp,rl(jp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(jp,rl(jp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(jp,rl(jp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=1.5, w/ different potential
pp= PowerSphericalPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(pp,rl(pp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(pp,rl(pp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(pp,rl(pp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=0.5, w/ different potential
tol= -2.5 # appears more difficult
hp= HernquistPotential(normalize=1.)
aAT= actionAngleTorus(pot=hp)
jphi= 0.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(hp,rl(hp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(hp,rl(hp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(hp,rl(hp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
return None
#Test that orbit from actionAngleTorus is the same as an integrated orbit
def test_actionAngleTorus_orbit():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential2014
from galpy.orbit import Orbit
# Set up instance
aAT= actionAngleTorus(pot=MWPotential2014,tol=10.**-5.)
jr,jphi,jz= 0.05,1.1,0.025
# First calculate frequencies and the initial RvR
RvRom= aAT.xvFreqs(jr,jphi,jz,
numpy.array([0.]),
numpy.array([1.]),
numpy.array([2.]))
om= RvRom[1:]
# Angles along an orbit
ts= numpy.linspace(0.,100.,1001)
angler= ts*om[0]
anglephi= 1.+ts*om[1]
anglez= 2.+ts*om[2]
# Calculate the orbit using actionAngleTorus
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate the orbit using orbit integration
orb= Orbit([RvRom[0][0,0],RvRom[0][0,1],RvRom[0][0,2],
RvRom[0][0,3],RvRom[0][0,4],RvRom[0][0,5]])
orb.integrate(ts,MWPotential2014)
# Compare
tol= -3.
assert numpy.all(numpy.fabs(orb.R(ts)-RvR[0]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in R'
assert numpy.all(numpy.fabs(orb.vR(ts)-RvR[1]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vR'
assert numpy.all(numpy.fabs(orb.vT(ts)-RvR[2]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vT'
assert numpy.all(numpy.fabs(orb.z(ts)-RvR[3]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in z'
assert numpy.all(numpy.fabs(orb.vz(ts)-RvR[4]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vz'
assert numpy.all(numpy.fabs((orb.phi(ts)-RvR[5]+numpy.pi) % (2.*numpy.pi) -numpy.pi) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in phi'
return None
# Test that actionAngleTorus w/ interp pot gives same freqs as regular pot
# Doesn't work well: TM aborts because our interpolated forces aren't
# consistent enough with the potential for TM's taste, but we test that it at
# at least works somewhat
def test_actionAngleTorus_interppot_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import LogarithmicHaloPotential, interpRZPotential
lp= LogarithmicHaloPotential(normalize=1.)
ip= interpRZPotential(RZPot=lp,
interpPot=True,
interpDens=True,interpRforce=True,interpzforce=True,
enable_c=True)
aAT= actionAngleTorus(pot=lp)
aATi= actionAngleTorus(pot=ip)
jr,jphi,jz= 0.05,1.1,0.02
om= aAT.Freqs(jr,jphi,jz)
omi= aATi.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-omi[0])/om[0]) < 0.2, 'Radial frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[1]-omi[1])/om[1]) < 0.2, 'Azimuthal frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[2]-omi[2])/om[2]) < 0.8, 'Vertical frequency computed using the torus machine does not agree between potential and interpolated potential'
return None
#Test the actionAngleTorus against an isochrone potential: actions
def test_actionAngleTorus_Isochrone_actions():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAI
ji= aAI(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against an isochrone potential: frequencies and angles
def test_actionAngleTorus_Isochrone_freqsAngles():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAI.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for az at %f' % (numpy.nanmax(daz))
return None
#Test the actionAngleTorus against a Staeckel potential: actions
def test_actionAngleTorus_Staeckel_actions():
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleStaeckel
delta= 1.2
kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)
aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)
tol= -3.
aAT= actionAngleTorus(pot=kp,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAI
ji= aAS(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against an isochrone potential: frequencies and angles
def test_actionAngleTorus_Staeckel_freqsAngles():
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleStaeckel
delta= 1.2
kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)
aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)
tol= -3.
aAT= actionAngleTorus(pot=kp,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAS.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for az at %f' % (numpy.nanmax(daz))
return None
#Test the actionAngleTorus against a general potential w/ actionAngleIsochroneApprox: actions
def test_actionAngleTorus_isochroneApprox_actions():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochroneApprox
aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)
tol= -2.5
aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAIA
ji= aAIA(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleMWPotential2014 applied to MWPotential2014 potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against a general potential w/ actionAngleIsochrone: frequencies and angles
def test_actionAngleTorus_isochroneApprox_freqsAngles():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochroneApprox
aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)
tol= -3.5
aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,21)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,21)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,21)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAIA.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for az at %f' % (numpy.nanmax(daz))
return None
# Test that the frequencies returned by hessianFreqs are the same as those returned by Freqs
def test_actionAngleTorus_hessian_freqs():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
fO= aAT.Freqs(jr,jphi,jz)[:3]
hO= aAT.hessianFreqs(jr,jphi,jz)[1:4]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and hessianFreqs return different frequencies'
return None
# Test that the Hessian is approximately symmetric
def test_actionAngleTorus_hessian_symm():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]
assert numpy.all(numpy.fabs((h-h.T)/h) < 0.03), 'actionAngleTorus Hessian is not symmetric'
return None
# Test that the Hessian is approximately correct
def test_actionAngleTorus_hessian_linear():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]
dj= numpy.array([0.02,0.005,-0.01])
do_fromhessian= numpy.dot(h,dj)
O= numpy.array(aAT.Freqs(jr,jphi,jz)[:3])
do= numpy.array(aAT.Freqs(jr+dj[0],jphi+dj[1],jz+dj[2])[:3])-O
assert numpy.all(numpy.fabs((do_fromhessian-do)/O)< 0.001), 'actionAngleTorus Hessian does not return good approximation to dO/dJ'
return None
# Test that the frequencies returned by xvJacobianFreqs are the same as those returned by Freqs
def test_actionAngleTorus_jacobian_freqs():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
fO= aAT.Freqs(jr,jphi,jz)[:3]
hO= aAT.xvJacobianFreqs(jr,jphi,jz,
numpy.array([0.]),numpy.array([1.]),
numpy.array([2.]))[3:6]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and xvJacobianFreqs return different frequencies'
return None
# Test that the Hessian returned by xvJacobianFreqs are the same as those returned by hessianFreqs
def test_actionAngleTorus_jacobian_hessian():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
fO= aAT.hessianFreqs(jr,jphi,jz)[0]
hO= aAT.xvJacobianFreqs(jr,jphi,jz,
numpy.array([0.]),numpy.array([1.]),
numpy.array([2.]))[2]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods hessianFreqs and xvJacobianFreqs return different Hessians'
return None
# Test that the xv returned by xvJacobianFreqs are the same as those returned by __call__
def test_actionAngleTorus_jacobian_xv():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.,1.])
anglephi= numpy.array([1.,2.])
anglez= numpy.array([2.,3.])
fO= aAT(jr,jphi,jz,angler,anglephi,anglez)
hO= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)[0]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods __call__ and xvJacobianFreqs return different xv'
return None
# Test that the determinant of the Jacobian returned by xvJacobianFreqs is close to 1/R (should be 1 for rectangular coordinates, 1/R for cylindrical
def test_actionAngleTorus_jacobian_detone():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014,dJ=0.0001)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.,1.])
anglephi= numpy.array([1.,2.])
anglez= numpy.array([2.,3.])
jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)
assert numpy.fabs(jf[0][0,0]*numpy.fabs(numpy.linalg.det(jf[1][0]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'
assert numpy.fabs(jf[0][1,0]*numpy.fabs(numpy.linalg.det(jf[1][1]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'
return None
# Test that Jacobian returned by xvJacobianFreqs is approximately correct
def test_actionAngleTorus_jacobian_linear():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.5])
anglephi= numpy.array([1.])
anglez= numpy.array([2.])
jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)
xv= aAT(jr,jphi,jz,angler,anglephi,anglez)
dja= 2.*numpy.array([0.001,0.002,0.003,-0.002,0.004,0.002])
xv_direct= aAT(jr+dja[0],jphi+dja[1],jz+dja[2],
angler+dja[3],anglephi+dja[4],anglez+dja[5])
xv_fromjac= xv+numpy.dot(jf[1],dja)
assert numpy.all(numpy.fabs((xv_fromjac-xv_direct)/xv_direct) < 0.01), 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not appear to be correct'
return None
#Test error when potential is not implemented in C
def test_actionAngleTorus_nocerr():
from galpy.actionAngle import actionAngleTorus
from test_potential import BurkertPotentialNoC
bp= BurkertPotentialNoC()
try:
aAT= actionAngleTorus(pot=bp)
except RuntimeError: pass
else:
raise AssertionError("actionAngleTorus initialization with potential w/o C should have given a RuntimeError, but didn't")
return None
#Test error when potential is not axisymmetric
def test_actionAngleTorus_nonaxierr():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import TriaxialNFWPotential
np= TriaxialNFWPotential(normalize=1.,b=0.9)
try:
aAT= actionAngleTorus(pot=np)
except RuntimeError: pass
else:
raise AssertionError("actionAngleTorus initialization with non-axisymmetric potential should have given a RuntimeError, but didn't")
return None
# Test the Autofit torus warnings
def test_actionAngleTorus_AutoFitWarning():
from galpy.potential import LogarithmicHaloPotential
from galpy.actionAngle import actionAngleTorus
lp= LogarithmicHaloPotential(normalize=1.,q=0.9)
aAT= actionAngleTorus(pot=lp,tol=10.**-8.)
# These should give warnings
jr, jp, jz= 0.27209033, 1.80253892, 0.6078445
ar, ap, az= numpy.array([1.95732492]), numpy.array([6.16753224]), \
numpy.array([4.08233059])
#Turn warnings into errors to test for them
import warnings
with warnings.catch_warnings(record=True) as w:
if PY2: reset_warning_registry('galpy')
warnings.simplefilter("always",galpyWarning)
aAT(jr,jp,jz,ar,ap,az)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.xvFreqs(jr,jp,jz,ar,ap,az)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.Freqs(jr,jp,jz)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.hessianFreqs(jr,jp,jz)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.xvJacobianFreqs(jr,jp,jz,ar,ap,az)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
return None
def test_MWPotential_warning_torus():
# Test that using MWPotential throws a warning, see #229
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential
if PY2: reset_warning_registry('galpy')
warnings.simplefilter("error",galpyWarning)
try:
aAA= actionAngleTorus(pot=MWPotential)
except: pass
else:
raise AssertionError("actionAngleTorus with MWPotential should have thrown a warning, but didn't")
#Turn warnings back into warnings
warnings.simplefilter("always",galpyWarning)
return None
| 52.851979
| 184
| 0.702055
| 4,291
| 30,707
| 5.001631
| 0.083197
| 0.023483
| 0.017147
| 0.024788
| 0.825552
| 0.784922
| 0.765819
| 0.758317
| 0.736138
| 0.704128
| 0
| 0.038165
| 0.175725
| 30,707
| 580
| 185
| 52.943103
| 0.809767
| 0.098023
| 0
| 0.607214
| 0
| 0.01002
| 0.256921
| 0.01281
| 0
| 0
| 0
| 0
| 0.154309
| 1
| 0.044088
| false
| 0.006012
| 0.108216
| 0
| 0.196393
| 0.002004
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f76921b09cc5b8f3074a2c1583e0b02cc663632b
| 945
|
py
|
Python
|
tests/test_kernel.py
|
fallenpegasus/reconbf
|
bfd15bef549f011a3de885c3267d4f718223b798
|
[
"Apache-2.0"
] | 45
|
2016-08-12T21:37:25.000Z
|
2022-03-29T00:21:29.000Z
|
tests/test_kernel.py
|
fallenpegasus/reconbf
|
bfd15bef549f011a3de885c3267d4f718223b798
|
[
"Apache-2.0"
] | 20
|
2016-08-11T07:42:28.000Z
|
2016-09-09T13:33:47.000Z
|
tests/test_kernel.py
|
fallenpegasus/reconbf
|
bfd15bef549f011a3de885c3267d4f718223b798
|
[
"Apache-2.0"
] | 6
|
2016-08-25T06:31:38.000Z
|
2019-09-11T04:29:36.000Z
|
from reconbf.modules import test_kernel
from reconbf.lib.result import Result
from reconbf.lib import utils
import unittest
from mock import patch
class PtraceScope(unittest.TestCase):
def test_no_yama(self):
with patch.object(utils, 'kconfig_option', return_value=None):
res = test_kernel.test_ptrace_scope()
self.assertEqual(res.result, Result.FAIL)
def test_level_0(self):
with patch.object(utils, 'kconfig_option', return_value='y'):
with patch.object(utils, 'get_sysctl_value', return_value='0'):
res = test_kernel.test_ptrace_scope()
self.assertEqual(res.result, Result.FAIL)
def test_level_1(self):
with patch.object(utils, 'kconfig_option', return_value='y'):
with patch.object(utils, 'get_sysctl_value', return_value='1'):
res = test_kernel.test_ptrace_scope()
self.assertEqual(res.result, Result.PASS)
| 36.346154
| 75
| 0.688889
| 125
| 945
| 4.984
| 0.304
| 0.072231
| 0.120385
| 0.160514
| 0.70947
| 0.70947
| 0.70947
| 0.70947
| 0.70947
| 0.632424
| 0
| 0.005326
| 0.205291
| 945
| 25
| 76
| 37.8
| 0.824234
| 0
| 0
| 0.35
| 0
| 0
| 0.08254
| 0
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.15
| false
| 0.05
| 0.25
| 0
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f78f99f6aaf73533d37148d0695f552552389b54
| 113
|
py
|
Python
|
tf/__init__.py
|
HarshCasper/MNIST-Digit-Recognition
|
41312669b226ee2045c6d5a16b600388fb0d18c8
|
[
"MIT"
] | 2
|
2020-04-18T18:29:43.000Z
|
2020-07-07T15:16:00.000Z
|
tf/__init__.py
|
HarshCasper/MNIST-Digit-Recognition
|
41312669b226ee2045c6d5a16b600388fb0d18c8
|
[
"MIT"
] | null | null | null |
tf/__init__.py
|
HarshCasper/MNIST-Digit-Recognition
|
41312669b226ee2045c6d5a16b600388fb0d18c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .softmax import *
from .sigmoid import *
from .relu import *
from .conv2d import *
| 16.142857
| 23
| 0.654867
| 15
| 113
| 4.933333
| 0.6
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.19469
| 113
| 6
| 24
| 18.833333
| 0.791209
| 0.185841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e3e87024eb05a79734ec071ee7c7fda2f89b05ee
| 7,916
|
py
|
Python
|
tests/io_components/test_mutate.py
|
detritus3872/kartothek
|
e4155e4ec72decd6d5ee67d6258f7683cc690c01
|
[
"MIT"
] | 171
|
2019-05-02T15:47:20.000Z
|
2022-02-17T15:12:15.000Z
|
tests/io_components/test_mutate.py
|
detritus3872/kartothek
|
e4155e4ec72decd6d5ee67d6258f7683cc690c01
|
[
"MIT"
] | 414
|
2019-05-03T09:24:26.000Z
|
2022-03-30T21:02:40.000Z
|
tests/io_components/test_mutate.py
|
detritus3872/kartothek
|
e4155e4ec72decd6d5ee67d6258f7683cc690c01
|
[
"MIT"
] | 57
|
2019-05-03T08:00:18.000Z
|
2022-02-16T18:38:22.000Z
|
import types
import pandas as pd
import pytest
from kartothek.io_components.merge import align_datasets
from kartothek.io_components.metapartition import MetaPartition
from kartothek.io_components.write import store_dataset_from_partitions
def test_align_datasets_prefix(dataset, evaluation_dataset, store_session):
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how="prefix",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
# Two separate cluster_groups (e.g. cluster_1*)
assert len(list_metapartitions) == 2
mp_list = list_metapartitions[0]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
mp_list = list_metapartitions[1]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
# Test sorting of datasets by length, i.e. order of dataframes is different
generator = align_datasets(
left_dataset_uuid=evaluation_dataset.uuid,
right_dataset_uuid=dataset.uuid,
store=store_session,
match_how="prefix",
)
list_metapartitions = list(generator)
mp_list = list_metapartitions[0]
def test_align_datasets_prefix__equal_number_of_partitions(
dataset, evaluation_dataset, store_session
):
"""
Test a scenario where the simple prefix match algorithm didn't find any
matches in case of equal number of partitions in both datasets.
"""
# Create a reference dataset which matches the problem (equal number of
# partitions and suitable for prefix matching)
mp = MetaPartition(label="cluster_1_1", metadata_version=dataset.metadata_version)
mp2 = MetaPartition(label="cluster_2_1", metadata_version=dataset.metadata_version)
metapartitions = [mp, mp2]
store_dataset_from_partitions(
partition_list=metapartitions,
dataset_uuid="reference_dataset_uuid",
store=store_session,
)
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid="reference_dataset_uuid",
store=store_session,
match_how="prefix",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
# Two separate cluster_groups (e.g. cluster_1*)
assert len(list_metapartitions) == 2
mp_list = list_metapartitions[0]
assert len(mp_list) == 2
mp_list = list_metapartitions[1]
assert len(mp_list) == 2
# Test sorting of datasets by length, i.e. order of dataframes is different
generator = align_datasets(
left_dataset_uuid=evaluation_dataset.uuid,
right_dataset_uuid=dataset.uuid,
store=store_session,
match_how="prefix",
)
list_metapartitions = list(generator)
mp_list = list_metapartitions[0]
def test_align_datasets_exact(dataset, evaluation_dataset, store_session):
with pytest.raises(RuntimeError):
list(
align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how="exact",
)
)
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=dataset.uuid,
store=store_session,
match_how="exact",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
# Two separate cluster_groups (e.g. cluster_1*)
assert len(list_metapartitions) == 2
mp_list = list_metapartitions[0]
assert len(mp_list) == 2, [mp.label for mp in mp_list]
assert [mp.label for mp in mp_list] == ["cluster_1", "cluster_1"]
mp_list = list_metapartitions[1]
assert len(mp_list) == 2, [mp.label for mp in mp_list]
assert [mp.label for mp in mp_list] == ["cluster_2", "cluster_2"]
def test_align_datasets_left(dataset, evaluation_dataset, store_session):
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how="left",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
assert len(list_metapartitions) == len(dataset.partitions)
mp_list = list_metapartitions[0]
assert len(mp_list) == 5, [mp.label for mp in mp_list]
expected = ["cluster_1", "cluster_1_1", "cluster_1_2", "cluster_2_1", "cluster_2_2"]
assert [mp.label for mp in mp_list] == expected
mp_list = list_metapartitions[1]
assert len(mp_list) == 5, [mp.label for mp in mp_list]
expected = ["cluster_2", "cluster_1_1", "cluster_1_2", "cluster_2_1", "cluster_2_2"]
assert [mp.label for mp in mp_list] == expected
def test_align_datasets_right(dataset, evaluation_dataset, store_session):
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how="right",
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
assert len(list_metapartitions) == len(evaluation_dataset.partitions)
mp_list = list_metapartitions[0]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
expected = ["cluster_1_1", "cluster_1", "cluster_2"]
assert [mp.label for mp in mp_list] == expected
mp_list = list_metapartitions[1]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
expected = ["cluster_1_2", "cluster_1", "cluster_2"]
assert [mp.label for mp in mp_list] == expected
mp_list = list_metapartitions[2]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
expected = ["cluster_2_1", "cluster_1", "cluster_2"]
assert [mp.label for mp in mp_list] == expected
mp_list = list_metapartitions[3]
assert len(mp_list) == 3, [mp.label for mp in mp_list]
expected = ["cluster_2_2", "cluster_1", "cluster_2"]
assert [mp.label for mp in mp_list] == expected
def test_align_datasets_callable(dataset, evaluation_dataset, store_session):
def comp(left, right):
return left == right
with pytest.raises(RuntimeError):
list(
align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=evaluation_dataset.uuid,
store=store_session,
match_how=comp,
)
)
generator = align_datasets(
left_dataset_uuid=dataset.uuid,
right_dataset_uuid=dataset.uuid,
store=store_session,
match_how=comp,
)
assert isinstance(generator, types.GeneratorType)
list_metapartitions = list(generator)
# Two separate cluster_groups (e.g. cluster_1*)
assert len(list_metapartitions) == 2
mp_list = list_metapartitions[0]
assert len(mp_list) == 2, [mp.label for mp in mp_list]
assert [mp.label for mp in mp_list] == ["cluster_1", "cluster_1"]
mp_list = list_metapartitions[1]
assert len(mp_list) == 2, [mp.label for mp in mp_list]
assert [mp.label for mp in mp_list] == ["cluster_2", "cluster_2"]
def test_merge_metapartitions():
df = pd.DataFrame({"P": [1, 1], "L": [1, 2], "TARGET": [1, 2]})
df_2 = pd.DataFrame({"P": [1], "info": "a"})
mp = MetaPartition(label="cluster_1", data={"core": df, "helper": df_2})
df_3 = pd.DataFrame({"P": [1, 1], "L": [1, 2], "PRED": [0.1, 0.2]})
mp2 = MetaPartition(label="cluster_1", data={"predictions": df_3})
merged_mp = MetaPartition.merge_metapartitions(metapartitions=[mp, mp2])
df = pd.DataFrame(
{
"P": [1, 1],
"L": [1, 2],
"TARGET": [1, 2],
"info": ["a", "a"],
"PRED": [0.1, 0.2],
}
)
assert merged_mp.label == "cluster_1"
assert len(merged_mp.data) == 3
| 33.685106
| 88
| 0.674962
| 1,043
| 7,916
| 4.85139
| 0.100671
| 0.06166
| 0.043478
| 0.052174
| 0.83913
| 0.790514
| 0.775494
| 0.775494
| 0.759684
| 0.759684
| 0
| 0.019601
| 0.220187
| 7,916
| 234
| 89
| 33.82906
| 0.800097
| 0.073648
| 0
| 0.62069
| 0
| 0
| 0.066931
| 0.006022
| 0
| 0
| 0
| 0
| 0.218391
| 1
| 0.045977
| false
| 0
| 0.034483
| 0.005747
| 0.086207
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e3f2bf8f47196536debc8420c88881d7e6cae895
| 456
|
py
|
Python
|
src/pyyso/yso/__init__.py
|
cokeBeer/pyyso
|
c29171a5d2aea0d0c524fec4d8d6d0a1084f659f
|
[
"MIT"
] | 2
|
2022-03-18T15:17:25.000Z
|
2022-03-19T05:21:30.000Z
|
src/pyyso/yso/__init__.py
|
cokeBeer/pyyso
|
c29171a5d2aea0d0c524fec4d8d6d0a1084f659f
|
[
"MIT"
] | null | null | null |
src/pyyso/yso/__init__.py
|
cokeBeer/pyyso
|
c29171a5d2aea0d0c524fec4d8d6d0a1084f659f
|
[
"MIT"
] | null | null | null |
from pyyso.yso.urldns import *
from pyyso.yso.cc1 import *
from pyyso.yso.cc2 import *
from pyyso.yso.cc3 import *
from pyyso.yso.cc4 import *
from pyyso.yso.cc5 import *
from pyyso.yso.cc6 import *
from pyyso.yso.cc7 import *
from pyyso.yso.jdk7u21 import *
from pyyso.yso.jdk8u20 import *
from pyyso.yso.clazz import *
from pyyso.yso.cb1v183 import *
from pyyso.yso.cb1v192 import *
from pyyso.yso.jrmpclient import *
from pyyso.yso.beanfactory import *
| 28.5
| 35
| 0.769737
| 75
| 456
| 4.68
| 0.253333
| 0.384615
| 0.512821
| 0.717949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05303
| 0.131579
| 456
| 15
| 36
| 30.4
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e3fc58384246a573c015a511a9234fc0c7928832
| 167
|
py
|
Python
|
augur/datasources/librariesio/test_librariesio_routes.py
|
parthsharma2/augur
|
6d59c8c80f3c21eb97bfa4ea4817908ea9a7d10b
|
[
"MIT"
] | null | null | null |
augur/datasources/librariesio/test_librariesio_routes.py
|
parthsharma2/augur
|
6d59c8c80f3c21eb97bfa4ea4817908ea9a7d10b
|
[
"MIT"
] | null | null | null |
augur/datasources/librariesio/test_librariesio_routes.py
|
parthsharma2/augur
|
6d59c8c80f3c21eb97bfa4ea4817908ea9a7d10b
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import time
from subprocess import Popen
import pytest
import requests
@pytest.fixture(scope="module")
def librariesio_routes():
pass
| 15.181818
| 31
| 0.802395
| 22
| 167
| 6.045455
| 0.681818
| 0.240602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137725
| 167
| 10
| 32
| 16.7
| 0.923611
| 0
| 0
| 0
| 0
| 0
| 0.035928
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| true
| 0.111111
| 0.666667
| 0
| 0.777778
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
5402130600a0e28b26913b99d370aabe9e2fed9f
| 52,837
|
py
|
Python
|
tests/integration/individual_response/test_individual_response.py
|
pricem14pc/eq-questionnaire-runner
|
54cc2947ba181a2673ea1fb7cf6b4acdd609e06b
|
[
"MIT"
] | null | null | null |
tests/integration/individual_response/test_individual_response.py
|
pricem14pc/eq-questionnaire-runner
|
54cc2947ba181a2673ea1fb7cf6b4acdd609e06b
|
[
"MIT"
] | null | null | null |
tests/integration/individual_response/test_individual_response.py
|
pricem14pc/eq-questionnaire-runner
|
54cc2947ba181a2673ea1fb7cf6b4acdd609e06b
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
from datetime import datetime
from unittest.mock import MagicMock
from freezegun import freeze_time
from app import settings
from app.publisher.exceptions import PublicationFailed
from tests.integration.integration_test_case import IntegrationTestCase
from tests.integration.questionnaire import THANK_YOU_URL_PATH
@freeze_time("2020-11-25T11:59:00")
class IndividualResponseTestCase(IntegrationTestCase):
def setUp(self):
settings.EQ_INDIVIDUAL_RESPONSE_LIMIT = 2
settings.EQ_INDIVIDUAL_RESPONSE_POSTAL_DEADLINE = datetime.fromisoformat(
"2020-11-25T12:00:00+00:00"
)
# Dummy mobile number from the range published by Ofcom
# https://www.ofcom.org.uk/phones-telecoms-and-internet/information-for-industry/numbering/numbers-for-drama
self.DUMMY_MOBILE_NUMBER = "07700900258"
super().setUp()
self.launchSurvey("test_individual_response", region_code="GB-ENG")
@property
def individual_section_link(self):
return self.getHtmlSoup().find(
"a", {"data-qa": "hub-row-individual-section-1-link"}
)["href"]
@property
def individual_response_link(self):
response_paragraph = self.getHtmlSoup().find(
"p", {"data-qa": "individual-response-url"}
)
if response_paragraph:
return response_paragraph.find_next()["href"]
@property
def individual_response_start_link(self):
submit_button = self.getHtmlSoup().find("a", {"data-qa": "btn-submit"})
return submit_button.attrs["href"]
def get_link(self, index, text):
selector = f"[data-qa='list-item-{text}-{index}-link']"
selected = self.getHtmlSoup().select(selector)
return selected[0].get("href")
def get_who_choice(self, index):
label = (
self.getHtmlSoup()
.select(f"#individual-response-who-answer-{index}-label")[0]
.text.strip()
)
list_item_id = (
self.getHtmlSoup()
.select(f"#individual-response-who-answer-{index}")[0]
.attrs["value"]
)
return {
"label": label,
"list_item_id": list_item_id,
}
def _add_no_household_members(self):
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "No"})
self.post({"anyone-else": "No"})
self.post({"any-visitors": "No"})
self.get("questionnaire/")
def _add_primary(self):
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "Yes"})
self.post({"first-name": "Marie", "last-name": "Day"})
self.post({"anyone-else": "No"})
self.post({"any-visitors": "No"})
self.get("questionnaire/")
def _add_primary_and_household(self):
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "Yes"})
self.post({"first-name": "Marie", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "John", "last-name": "Doe"})
self.post({"anyone-else": "No"})
self.post({"any-visitors": "No"})
self.get("questionnaire/")
def _add_household_no_primary(self):
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "No"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Marie", "last-name": "Day"})
self.post({"anyone-else": "No"})
self.post({"any-visitors": "No"})
self.get("questionnaire/")
def _add_household_multiple_members_no_primary(self):
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "No"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Marie", "middle-names": "Carla", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Joe", "middle-names": "David", "last-name": "Day"})
self.post({"anyone-else": "No"})
self.post({"any-visitors": "No"})
self.get("questionnaire/")
def _add_household_members_with_same_names(self):
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "No"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Marie", "middle-names": "Carla", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Joe", "middle-names": "David", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Joe", "middle-names": "Eric", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Joe", "last-name": "Day"})
self.post({"anyone-else": "No"})
self.post({"any-visitors": "No"})
self.get("questionnaire/")
def _request_individual_response_by_post(self):
self._add_household_no_primary()
self.post()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Post"})
self.post(
{
"individual-response-post-confirm-answer": "Yes, send the access code by post"
}
)
def _request_individual_response_by_text(self):
self._add_household_no_primary()
self.post()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post(
{
"individual-response-enter-number-answer": self.DUMMY_MOBILE_NUMBER,
}
)
self.post({"individual-response-text-confirm-answer": "Yes, send the text"})
class TestIndividualResponseOnHubDisabled(IndividualResponseTestCase):
def setUp(self):
super().setUp()
self.launchSurvey(
"test_individual_response_on_hub_disabled", region_code="GB-ENG"
)
def test_show_on_hub_false(self):
self._add_household_no_primary()
self.assertIsNone(self.individual_response_link)
self.assertEqualUrl("questionnaire/")
class TestIndividualResponseErrorStatus(IndividualResponseTestCase):
def test_ir_raises_400_confirm_number_bad_signature(self):
# Given I request an individual response by mobile phone
self._add_household_no_primary()
self.post()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": "07970000000"})
# When I try to view the confirm number page with an incorrect mobile number hash
person_id = self.last_url.split("/")[2]
self.get(
f"individual-response/{person_id}/text/confirm-number?journey=hub&mobile_number=bad-signature"
)
# Then a BadRequest error is returned
self.assertBadRequest()
self.assertEqualPageTitle("An error has occurred - Test Individual Response")
def test_ir_raises_400_confirm_number_missing_mobile_param(self):
# Given I request an individual response by mobile phone
self._add_household_no_primary()
self.post()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": "07970000000"})
# When I try to view the confirm number page with no mobile number param
person_id = self.last_url.split("/")[2]
self.get(f"individual-response/{person_id}/text/confirm-number?journey=hub")
# Then a BadRequest error is returned
self.assertBadRequest()
self.assertEqualPageTitle("An error has occurred - Test Individual Response")
def test_ir_raises_400_confirmation_bad_signature(self):
# Given I request an individual response by mobile phone
self._add_household_no_primary()
self.post()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": "07970000000"})
self.post({"individual-response-text-confirm-answer": "Yes, send the text"})
# When I try to view the confirmation page with an incorrect mobile number hash
self.get(
"individual-response/text/confirmation?journey=hub&mobile_number=bad-signature"
)
# Then a BadRequest error is returned
self.assertBadRequest()
self.assertEqualPageTitle("An error has occurred - Test Individual Response")
def test_ir_raises_400_confirmation_missing_mobile_param(self):
# Given I request an individual response by mobile phone
self._add_household_no_primary()
self.post()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": "07970000000"})
self.post({"individual-response-text-confirm-answer": "Yes, send the text"})
# When I try to view the confirmation page with no mobile number param
self.get("individual-response/text/confirmation?journey=hub")
# Then a BadRequest error is returned
self.assertBadRequest()
self.assertEqualPageTitle("An error has occurred - Test Individual Response")
def test_ir_raises_401_without_session(self):
# Given the hub is enabled
# And I add a household member
self._add_household_no_primary()
# When I sign out and navigate to the individual response page
individual_response_link = self.individual_response_link
self.post()
self.get(individual_response_link)
self.sign_out()
self.get(individual_response_link)
# Then I should see the 401 page
self.assertStatusCode(401)
def test_401_after_signout(self):
# Given the hub is enabled
# And I add a household member
self._add_household_no_primary()
# When I sign out and navigate to the individual response page
self.sign_out()
self.get("/individual-response")
# Then I should see the 401 page
self.assertStatusCode(401)
def test_404_invalid_list_item_id(self):
# Given I add a household member
self._add_household_no_primary()
# When I use an invalid id in an individual response url
self.get("/individual-response/not-an-id/how")
# Then I should see the 404 page
self.assertStatusCode(404)
def test_404_when_hub_not_accessible(self):
# Given I try to navigate to the individual response page
self.get("/individual-response")
# Then I should see the 404 page
self.assertStatusCode(404)
def test_404_how_when_hub_not_accessible(self):
# Given I try to navigate to the individual response how page
self.get("/individual-response/fake-id/how")
# Then I should see the 404 page
self.assertStatusCode(404)
def test_404_confirm_when_hub_not_accessible(self):
# Given I try to navigate to the individual response how page
self.get("/individual-response/fake-id/post/confirm-address")
# Then I should see the 404 page
self.assertStatusCode(404)
def test_404_post_confirmation_when_hub_not_accessible(self):
# Given I try to navigate to the individual response how page
self.get("/individual-response/post/confirmation")
# Then I should see the 404 page
self.assertStatusCode(404)
def test_404_individual_response_page_if_primary_id_used(self):
# Given I add a primary person
self._add_primary()
# When I navigate to the how endpoint using the primary person's
# list item id
self.post()
primary_person_id = self.last_url.split("/")[3]
self.get(f"individual-response?list_item_id={primary_person_id}")
# Then I should see the 404 page
self.assertStatusCode(404)
def test_404_individual_response_how_page_if_primary_id_used(self):
# Given I add a primary person
self._add_primary()
# When I navigate to the how endpoint using the primary person's
# list item id
self.post()
primary_person_id = self.last_url.split("/")[3]
self.get(f"individual-response/{primary_person_id}/how")
# Then I should see the 404 page
self.assertStatusCode(404)
def test_404_individual_response_confirm_page_if_primary_id_used(self):
# Given I add a primary person
self._add_primary()
# When I navigate to the how endpoint using the primary person's
# list item id
self.post()
primary_person_id = self.last_url.split("/")[3]
self.get(f"individual-response/{primary_person_id}/post/confirm-address")
# Then I should see the 404 page
self.assertStatusCode(404)
def test_404_individual_response_no_list_items(self):
# Given I add no household members
self._add_no_household_members()
# When I navigate to the how endpoint using a fake id
self.get("/individual-response/no-id/how")
# Then I should see the 404 page
self.assertStatusCode(404)
def test_429_individual_response_limit_exceeded(self):
# Given I successfully request individual responses up to the limit
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": "07970000000"})
confirm_number_page = self.last_url
self.post({"individual-response-text-confirm-answer": "Yes, send the text"})
self.assertInUrl("/text/confirmation")
self.get(confirm_number_page)
self.post({"individual-response-text-confirm-answer": "Yes, send the text"})
self.assertInUrl("/text/confirmation")
# When I try to request an additional individual response, which would exceed the limit
self.get(confirm_number_page)
self.post({"individual-response-text-confirm-answer": "Yes, send the text"})
# Then I should see a 429 page
self.assertStatusCode(429)
self.assertInBody(
"You have reached the maximum number of individual access codes"
)
def test_500_publish_failed_text(self):
publisher = self._application.eq["publisher"]
publisher.publish = MagicMock(side_effect=PublicationFailed)
# Given I add a household member
self._request_individual_response_by_text()
self.assertStatusCode(500)
self.assertEqualPageTitle(
"Sorry, there was a problem sending the access code - Test Individual Response"
)
self.assertInSelector(self.last_url, "p[data-qa=retry]")
def test_500_publish_failed_post(self):
publisher = self._application.eq["publisher"]
publisher.publish = MagicMock(side_effect=PublicationFailed)
# Given I add a household member
self._request_individual_response_by_post()
self.assertEqualPageTitle(
"Sorry, there was a problem sending the access code - Test Individual Response"
)
self.assertInSelector(self.last_url, "p[data-qa=retry]")
class TestIndividualResponseIndividualSection(IndividualResponseTestCase):
def test_ir_page_titles_render_correctly(self):
# Given I add household members
self._add_household_no_primary()
# When I navigate to the individual response interstitial
self.get(self.individual_section_link)
self.get(self.individual_response_link)
# I should see the correct page title
self.assertEqualPageTitle(
"Cannot answer questions for others in your household: Person 1 - Test Individual Response"
)
def test_ir_guidance_not_displayed_when_primary(self):
# Given I add a primary person
self._add_primary()
# When I navigate to the individual section
self.post()
# Then I should not see the individual response guidance
self.assertInUrl("questionnaire/household/")
self.assertInBody("You will need to know personal details")
self.assertNotInBody("If you can't answer someone else's questions")
def test_ir_guidance_displayed_when_no_primary_person(self):
# Given I don't add a primary person
self._add_household_no_primary()
# When I navigate to the individual section
self.post()
# Then I should see the individual response guidance
self.assertInBody("You will need to know personal details")
self.assertInBody("If you can’t answer questions for this person")
self.assertInBody("Hide")
def test_ir_guidance_not_displayed_on_primary_page_when_primary_and_other_household_members(
self,
):
# Given I add a primary person and a household member
self._add_primary_and_household()
# When I navigate to the first individual section
self.post()
self.post()
self.post()
# Then I should not see the individual response guidance
self.assertInBody("Are you")
self.assertNotInBody("If you can’t answer someone else’s questions")
def test_ir_guidance_displayed_on_non_primary_page_when_primary_and_other_household_members(
self,
):
# Given I add a primary person and a household member
self._add_primary_and_household()
# When I navigate to the first non-primary individual section
self.post()
self.post()
self.post()
self.post({"proxy-answer": "Yes, I am"})
self.post()
# Then I should see the individual response guidance
self.assertInBody("You will need to know personal details such as")
self.assertInBody("If you can’t answer questions for this person")
def test_ir_guidance_not_displayed_on_second_non_primary_interstitial_page(
self,
):
# Given I add a primary person and a household member
self._add_primary_and_household()
# When I navigate to the second interstitial page of non-primary individual section
self.post()
self.post()
self.post()
self.post({"proxy-answer": "Yes, I am"})
self.post()
self.post()
# Then I shouldn't see the individual response guidance
self.assertNotInBody("You will need to know personal details such as")
self.assertNotInBody("If you can’t answer questions for this person")
def test_ir_guidance_displayed_on_remove_person_page(self):
# Given I add a primary person and a household member
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "Yes"})
self.post({"first-name": "Marie", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "John", "last-name": "Doe"})
# When I try to remove the household member
householder_remove_link = self.get_link("2", "remove")
self.get(householder_remove_link)
# Then I should see the individual response guidance
self.assertInBody("If you can’t answer questions for this person")
def test_ir_guidance_not_displayed_on_non_individual_response_list_remove_page(
self,
):
# Given I add a visitor
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "No"})
self.post({"anyone-else": "No"})
self.post({"any-visitors": "Yes"})
self.post({"visitor-first-name": "John", "visitor-last-name": "Doe"})
# When I try to remove the visitor
visitor_remove_link = self.get_link("1", "remove")
self.get(visitor_remove_link)
# Then I should not see the individual response guidance
self.assertNotInBody("If you can’t answer questions for this person")
class TestIndividualResponseHubViews(IndividualResponseTestCase):
def test_individual_response_requested(self):
# Given I request an individual response by post
self._request_individual_response_by_post()
# When I navigate to the hub
self.get("/questionnaire")
# Then I should see "Separate census requested" as
# the individual section status
self.assertInBody("Separate census requested")
self.assertInBody("Change or resend")
self.assertIn("/change", self.individual_section_link)
def test_individual_response_not_requested_status_unchanged(self):
# Given I navigate to the confirm page of individual response
# but don't request one
self._add_household_no_primary()
self.post()
self.get(self.individual_response_link)
self.post()
self.post({"individual-response-how-answer": "Post"})
# When I navigate to the hub
self.get("/questionnaire")
# Then I should see "Not started" as the individual section status
self.assertInBody("Not started")
class TestIndividualResponseNavigation(IndividualResponseTestCase):
def test_introduction_page_previous_goes_to_individual_section(self):
# Given I navigate to the individual response introduction page from an
# individual section
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
# When I click the previous link
self.previous()
# Then I should be taken back to the individual section
self.assertInUrl("individual-interstitial")
def test_ir_introduction_page_previous_goes_to_remove_page(self):
# Given I navigate to the individual response introduction page from a
# remove person page
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "Yes"})
self.post({"first-name": "Marie", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "John", "last-name": "Doe"})
householder_remove_link = self.get_link("2", "remove")
self.get(householder_remove_link)
# When I start an IR journey then click the previous link
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.previous()
self.previous()
# Then I should be taken back to the remove page
self.assertInUrl("remove-person")
def test_how_page_previous_goes_to_introduction_page(self):
# Given I navigate to the individual response how page from an
# individual response introduction page
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# When I click the previous link
self.previous()
# Then I should be taken back to the individual response introduction page
self.assertInBody("If you can't answer questions for others in your household")
def test_introduction_previous_goes_to_hub(self):
# Given I add a household member
# and navigate to the individual response introduction page
# without a list item id url param
self._add_household_no_primary()
self.get("/individual-response/")
# When I click the previous link
self.previous()
# Then I should be taken to the hub
self.assertInUrl("questionnaire/")
def test_previous_from_how_multiple_people(self):
# Given I add a number of non primary household members
# and select a response from the individual section
# and navigate to the method
self._add_household_multiple_members_no_primary()
self.post()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
person_id = self.last_url.split("/")[2]
# When I choose previous
self.previous()
# Then I should be taken to the previous page
self.assertInUrl(f"/individual-response/?list_item_id={person_id}")
def test_ir_guidance_not_displayed_on_hub_if_survey_complete(self):
# Given the survey had been completed
self._add_primary_and_household()
# When I reach the hub
self.post()
self.post()
self.post()
self.post({"proxy-answer": "Yes, I am"})
self.post()
self.post()
self.post()
self.post({"proxy-answer": "Yes, I am"})
# Then I should not see the individual response guidance
self.assertInBody("Submit survey")
self.assertNotInBody("If you can’t answer someone else’s questions")
def test_ir_after_submission(self):
# Given I complete the questionnaire and submit
self._add_primary_and_household()
self.post()
self.post()
self.post()
self.post({"proxy-answer": "Yes, I am"})
self.post()
self.post()
self.post()
self.post({"proxy-answer": "Yes, I am"})
self.post()
self.assertEqual(THANK_YOU_URL_PATH, self.last_url)
# When I try to get the individual-response response page
self.get("/individual-response/")
# Then I get re-directed to the thank you page
self.assertEqual(THANK_YOU_URL_PATH, self.last_url)
class TestIndividualResponseWho(IndividualResponseTestCase):
def test_who_not_shown_for_primary_only(self):
# Given I add a primary person
self._add_primary()
self.get("/individual-response/who")
# Then I should not be able to reach the member selector
self.assertStatusCode(404)
def test_who_cannot_be_reached_when_single_household(self):
# Given I add a single household member
# and navigate to the individual response from hub
self._add_household_no_primary()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# Then I should skip the member selector
self.assertInUrl("/how")
def test_goes_to_who_selector(self):
# Given I add a number of non primary household members
# and navigate to the individual response from hub
self._add_household_multiple_members_no_primary()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# Then I should be taken to the member selector
self.assertInUrl("/who")
def test_previous_returns_to_hub(self):
# Given I add a number of non primary household members
# and navigate to the individual response from hub
self._add_household_no_primary()
self.get(self.individual_response_link)
# When I choose previous
self.previous()
# Then I should be taken to the hub
self.assertInUrl("/questionnaire/")
def test_previous_from_who_returns_to_intro(self):
# Given I add a number of non primary household members
# and navigate beyond the individual response member selector from hub
self._add_household_multiple_members_no_primary()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# When I choose previous
self.previous()
# Then I should be taken to the response introduction
self.assertInUrl("/individual-response/?journey=hub")
def test_previous_from_how_returns_via_hub_route(self):
# Given I add a number of non primary household members
# and navigate beyond the individual response member selector from hub
self._add_household_multiple_members_no_primary()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
list_item_id = self.get_who_choice(0)["list_item_id"]
self.post({"individual-response-who-answer": list_item_id})
# When I choose previous
self.previous()
# Then I should be taken to the previous page
self.assertInUrl("/individual-response/who?journey=hub")
def test_previous_from_confirm_returns_via_hub_route(self):
# Given I add a number of non primary household members
# and navigate beyond the individual response member selector from hub
self._add_household_multiple_members_no_primary()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
list_item_id = self.get_who_choice(0)["list_item_id"]
self.post({"individual-response-who-answer": list_item_id})
self.post({"individual-response-how-answer": "Post"})
# When I choose previous
self.previous()
# Then I should be taken to the previous page
self.assertInUrl(f"/individual-response/{list_item_id}/how?journey=hub")
class TestIndividualResponseTextHandler(IndividualResponseTestCase):
def test_display_mobile_number_on_confirmation_page(self):
# Given I navigate to the confirmation page
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": self.DUMMY_MOBILE_NUMBER})
# When I post "Yes, send the text"
self.post({"individual-response-text-confirm-answer": "Yes, send the text"})
# Then I should see the phone number
self.assertInUrl("/text/confirmation")
self.assertInBody(self.DUMMY_MOBILE_NUMBER)
def test_mobile_is_not_shown_in_url(self):
# Given I navigate to the confirmation page
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
# When I post the number
self.post({"individual-response-enter-number-answer": self.DUMMY_MOBILE_NUMBER})
# Then I should not see the phone number in the url
self.assertNotInUrl(self.DUMMY_MOBILE_NUMBER)
def test_confirmation_page_redirects_to_hub(self):
# Given I navigate to the confirmation page
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": self.DUMMY_MOBILE_NUMBER})
# When I post "Yes, send the text"
self.post({"individual-response-text-confirm-answer": "Yes, send the text"})
self.post()
self.assertInUrl("/questionnaire")
def test_confirm_number_no_routes_back(self):
# Given I navigate to the confirm number page
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": self.DUMMY_MOBILE_NUMBER})
# When I post "No"
self.post(
{"individual-response-text-confirm-answer": "No, I need to change it"}
)
# Then I should see the enter number page, populated with the phone number
self.assertInUrl("text/enter-number")
self.assertInBody(self.DUMMY_MOBILE_NUMBER)
def test_confirm_number_previous_link(self):
# Given I navigate to the confirm number page
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": self.DUMMY_MOBILE_NUMBER})
# When I click the previous link
self.previous()
# Then I should see the enter number page, populated with the phone number
self.assertInUrl("text/enter-number")
self.assertInBody(self.DUMMY_MOBILE_NUMBER)
def test_enter_number_previous_persists_journey(self):
# Given I navigate to the enter number page
self._add_household_no_primary()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
# When I click the previous link
self.previous()
# Then the journey param should be in the url
self.assertInUrl("journey=hub")
def test_confirm_number_previous_persists_journey(self):
# Given I navigate to the confirm number page
self._add_household_no_primary()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Text message"})
self.post({"individual-response-enter-number-answer": self.DUMMY_MOBILE_NUMBER})
# When I click the previous link
self.previous()
# Then the journey param should be in the url
self.assertInUrl("journey=hub")
class TestIndividualResponseConfirmationPage(IndividualResponseTestCase):
def test_display_address_on_confirmation_page(self):
# Given I navigate to the confirmation page
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Post"})
# When I post "Yes, send the access code by post"
self.post(
{
"individual-response-post-confirm-answer": "Yes, send the access code by post"
}
)
# Then I should see the address
self.assertInUrl("/confirmation")
self.assertInBody("68 Abingdon Road, Goathill")
def test_navigate_back_to_how_page_from_post_page(self):
# Given I navigate to the individual response confirm post page
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Post"})
# When I click the previous link
self.previous()
# Then I should be taken back to the how page
self.assertInUrl("/how")
def test_redirect_to_how_page_when_no_send_another_way_selected(self):
# Given I navigate to the /individual-response/<id>/how url
# after adding a household member
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Post"})
# When I choose to send the individual response code another way
self.post(
{"individual-response-post-confirm-answer": "No, send it another way"}
)
# Then I should be redirected to the how page
self.assertInUrl("/how")
def test_mandatory_error_rendered_on_confirm_address(self):
# Given I navigate to the /individual-response/<id>/confirm-address url
# after adding a household member
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Post"})
# When I post with no data
self.post()
# Then I should see errors rendered correctly
self.assertInUrl("/confirm-address")
self.assertInBody("There is a problem with your answer")
def test_default_routing_uses_text_option(self):
# Given I navigate to the individual response how page
# after adding a household member
self._add_household_no_primary()
self.get(self.individual_section_link)
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# When I post without selecting a radio button
self.post()
# Then I should see the text 'enter number' page
self.assertInUrl("text/enter-number")
class TestIndividualResponseChange(IndividualResponseTestCase):
def test_hub_change_link_goes_to_change_page(self):
# Given I request an individual response by post
self._request_individual_response_by_post()
# When I navigate to the hub and click on the change individual response link
self.get("/questionnaire")
self.get(self.individual_section_link)
# Then I should see the change individual response page
self.assertInBody("How would you like to answer")
def test_change_page_previous_goes_to_hub(self):
# Given I navigate to the individual response change page
self._request_individual_response_by_post()
self.get("/questionnaire")
self.get(self.individual_section_link)
# When I click the previous link
self.previous()
# Then I should be taken to the hub
self.assertInUrl("questionnaire/")
def test_request_separate_census_option_is_preselected(self):
# Given I request an individual response
self._request_individual_response_by_post()
# When I navigate to the individual response change page
self.get("/questionnaire")
self.get(self.individual_section_link)
# Then the "I would like to request a separate census" option is preselected
checked_radio_input = self.getHtmlSoup().select(
"#individual-response-change-answer-0[checked]"
)
self.assertIsNotNone(checked_radio_input)
def test_request_separate_census_option_goes_to_how_page(self):
# Given I navigate to the individual response change page
self._request_individual_response_by_post()
self.get("/questionnaire")
self.get(self.individual_section_link)
# When I choose the "I would like to request a separate census" option
self.post(
{
"individual-response-change-answer": "I would like to request a separate census for them to complete"
}
)
# Then I should be taken to the how page
self.assertInUrl("/how")
# And the section status should not be updated
self.get("/questionnaire")
self.assertInBody("Change or resend")
def test_answer_own_questions_option_goes_to_hub(self):
# Given I navigate to the individual response change page
self._request_individual_response_by_post()
self.get("/questionnaire")
self.get(self.individual_section_link)
# When I choose the "I will ask them to answer" option
self.post(
{
"individual-response-change-answer": "I will ask them to answer their own questions"
}
)
# Then I should be taken to the hub
self.assertInUrl("/questionnaire")
def test_answer_own_questions_option_updates_section_status(self):
# Given I navigate to the individual response change page
self._request_individual_response_by_post()
self.get("/questionnaire")
self.get(self.individual_section_link)
# When I choose the "I will ask them to answer" option
self.post(
{
"individual-response-change-answer": "I will ask them to answer their own questions"
}
)
# Then the section status should be updated
self.assertNotInBody("Change or resend")
self.assertInBody("Not started")
self.assertInBody("Start section")
def test_answer_own_questions_option_after_starting_section_updates_section_status(
self,
):
# Given start a section and then request an individual response
self._add_household_no_primary()
self.post()
self.post()
self.previous()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
self.post({"individual-response-how-answer": "Post"})
self.post(
{
"individual-response-post-confirm-answer": "Yes, send the access code by post"
}
)
self.post()
# When I navigate to the individual response change page and choose the "I will ask them to answer" option
self.get("/questionnaire")
self.get(self.individual_section_link)
self.post(
{
"individual-response-change-answer": "I will ask them to answer their own questions"
}
)
# Then the section status should be updated
self.assertNotInBody("Change or resend")
self.assertInBody("Partially completed")
self.assertInBody("Continue with section")
def test_i_will_answer_option_goes_to_individual_section(self):
# Given I navigate to the individual response change page
self._request_individual_response_by_post()
self.get("/questionnaire")
self.get(self.individual_section_link)
# When I choose the "I will answer" option
self.post(
{"individual-response-change-answer": "I will answer for {person_name}"}
)
# Then I should be taken to the individual section introduction page
self.assertInBody("You will need to know personal details such as")
# And the section status should be updated
self.assertInUrl("/questionnaire")
self.assertNotInBody("Change or resend")
def test_how_page_previous_goes_to_change_page(self):
# Given I navigate to the individual response how page
self._request_individual_response_by_post()
self.get("/questionnaire")
self.get(self.individual_section_link)
self.post(
{
"individual-response-change-answer": "I would like to request a separate census for them to complete"
}
)
# When I click the previous link
self.previous()
# Then I should be taken to the change page
self.assertInUrl("/change")
def test_post_confirm_previous_previous_goes_to_change_page(self):
# Given I navigate to the individual response post confirm page
self._request_individual_response_by_post()
self.get("/questionnaire")
self.get(self.individual_section_link)
self.post(
{
"individual-response-change-answer": "I would like to request a separate census for them to complete"
}
)
self.post({"individual-response-how-answer": "Post"})
# When I click the previous link twice
self.previous()
self.previous()
# Then I should be taken to the change page
self.assertInUrl("/change")
class TestIndividualResponseSameNames(IndividualResponseTestCase):
def test_who_doesnt_display_middle_names_when_no_same_names(self):
# Given I add some people without same names
self._add_household_multiple_members_no_primary()
# When I navigate to the who page
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# Then the member selector should not show the middle names for anyone
self.assertNotInBody("Carla")
self.assertNotInBody("David")
def test_who_displays_middle_names_when_same_names_exist(self):
# Given I add some people with same names
self._add_household_members_with_same_names()
# When I navigate to the who page
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# Then the member selector should show the middle names for everyone that has one
self.assertInBody("Marie Carla Day")
self.assertInBody("Joe David Day")
self.assertInBody("Joe Eric Day")
self.assertInBody("Joe Day")
def test_who_displays_all_names_when_duplicates_exist(self):
# Given I add some people with duplicate names
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "No"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Marie", "middle-names": "Carla", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Marie", "middle-names": "Carla", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Joe", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Joe", "last-name": "Day"})
self.post({"anyone-else": "No"})
self.post({"any-visitors": "No"})
self.get("questionnaire/")
# When I navigate to the who page
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# Then everyone should be displayed
self.assertEqual(self.get_who_choice(0)["label"], "Marie Carla Day")
self.assertEqual(self.get_who_choice(1)["label"], "Marie Carla Day")
self.assertEqual(self.get_who_choice(2)["label"], "Joe Day")
self.assertEqual(self.get_who_choice(3)["label"], "Joe Day")
self.assertNotEqual(
self.get_who_choice(0)["list_item_id"],
self.get_who_choice(1)["list_item_id"],
)
self.assertNotEqual(
self.get_who_choice(2)["list_item_id"],
self.get_who_choice(3)["list_item_id"],
)
def test_how_doesnt_display_middle_names_when_not_same_name(self):
# Given I add some people with same names
self._add_household_members_with_same_names()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# When I choose someone that doesn't have a same name
list_item_id = self.get_who_choice(0)["list_item_id"]
self.post({"individual-response-who-answer": list_item_id})
# Then the how page should not show the middle names
self.assertInBody("Marie Day")
def test_how_displays_middle_names_when_same_name(self):
# Given I add some people with same names
self._add_household_members_with_same_names()
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# When I choose someone with a same name
list_item_id = self.get_who_choice(1)["list_item_id"]
self.post({"individual-response-who-answer": list_item_id})
# Then the how page should show the middle names
self.assertInBody("Joe David Day")
def test_how_has_correct_list_item_id_when_duplicates_exist(self):
# Given I add some people with duplicate names
self.get("questionnaire/primary-person-list-collector/")
self.post({"you-live-here": "No"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Marie", "middle-names": "Carla", "last-name": "Day"})
self.post({"anyone-else": "Yes"})
self.post({"first-name": "Marie", "middle-names": "Carla", "last-name": "Day"})
self.post({"anyone-else": "No"})
self.post({"any-visitors": "No"})
self.get("questionnaire/")
# When I navigate to the who page and select someone
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
list_item_id = self.get_who_choice(0)["list_item_id"]
self.post({"individual-response-who-answer": list_item_id})
# Then I should be on the how page for that person
self.assertInUrl(list_item_id)
class TestIndividualResponseHow(IndividualResponseTestCase):
def test_block_definition_before_postal_deadline(self):
# Given I add a household member
self._add_household_no_primary()
self.post()
# When I navigate to the individual response how page before the postal deadline
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# Then one of my radio box options should be 'Post'
self.assertInBody("Post")
self.assertInBody(
"We can only send this to an unnamed resident at the registered household address"
)
self.assertInBody("Select how to send access code")
@freeze_time("2020-11-25T12:01:00")
def test_block_definition_after_postal_deadline(self):
# Given I add a household member
self._add_household_no_primary()
self.post()
# When I navigate to the individual response how page after the postal deadline
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
# Then 'Post' should not be one of my radio box options, and I should have a message telling me it's no longer possible
self.assertNotInBody("Post")
self.assertNotInBody(
"We can only send this to an unnamed resident at the registered household address"
)
self.assertNotInBody("Select how to send access code")
self.assertInBody("It is no longer possible to receive an access code by post")
class TestIndividualResponsePostAddressConfirmHandler(IndividualResponseTestCase):
@freeze_time("2020-11-25T12:01:00")
def test_address_confirm_after_postal_deadline(self):
# Given I add a number of non primary household members
self._add_household_multiple_members_no_primary()
# When I try to access the address confirmation page after the postal deadline
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
list_item_id = self.get_who_choice(0)["list_item_id"]
self.get(f"/individual-response/{list_item_id}/post/confirm-address")
# Then I should be redirect to the how page
self.assertInUrl(f"/individual-response/{list_item_id}/how")
@freeze_time("2020-11-25T12:01:00")
def test_address_confirm_after_postal_deadline_post(self):
# Given I add a number of non primary household members
self._add_household_multiple_members_no_primary()
# When I try to post to the address confirmation page after the postal deadline
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
list_item_id = self.get_who_choice(0)["list_item_id"]
self.post(url=f"/individual-response/{list_item_id}/post/confirm-address")
# Then I should be redirect to the how page
self.assertInUrl(f"/individual-response/{list_item_id}/how")
def test_options_request_before_request(self):
# Given I add a number of non primary household members
self._add_household_multiple_members_no_primary()
# When I try to post to the address confirmation page after the postal deadline
self.get(self.individual_response_link)
self.get(self.individual_response_start_link)
list_item_id = self.get_who_choice(0)["list_item_id"]
with self.assertLogs() as logs:
self.options(
url=f"/individual-response/{list_item_id}/post/confirm-address"
)
self.assertStatusOK()
for output in logs.output:
self.assertNotIn("individual-response request", output)
| 39.637659
| 127
| 0.671461
| 6,728
| 52,837
| 5.074019
| 0.060939
| 0.141836
| 0.034155
| 0.065206
| 0.818911
| 0.785078
| 0.75807
| 0.723329
| 0.711729
| 0.69178
| 0
| 0.007563
| 0.23423
| 52,837
| 1,332
| 128
| 39.667417
| 0.836163
| 0.210459
| 0
| 0.659781
| 0
| 0.001215
| 0.222731
| 0.095429
| 0
| 0
| 0
| 0
| 0.153098
| 1
| 0.110571
| false
| 0
| 0.008505
| 0.001215
| 0.140948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
541b582501eb8c7c56389c1228d23d8bad8881c5
| 188
|
py
|
Python
|
api_site/src/api_x/main/entry/__init__.py
|
webee/pay
|
b48c6892686bf3f9014bb67ed119506e41050d45
|
[
"W3C"
] | 1
|
2019-10-14T11:51:49.000Z
|
2019-10-14T11:51:49.000Z
|
api_site/src/api_x/main/entry/__init__.py
|
webee/pay
|
b48c6892686bf3f9014bb67ed119506e41050d45
|
[
"W3C"
] | null | null | null |
api_site/src/api_x/main/entry/__init__.py
|
webee/pay
|
b48c6892686bf3f9014bb67ed119506e41050d45
|
[
"W3C"
] | null | null | null |
# coding=utf-8
from __future__ import unicode_literals
from flask import Blueprint
main_entry_mod = Blueprint('main_entry', __name__)
from . import views, test_views, deprecated_views
| 18.8
| 50
| 0.808511
| 26
| 188
| 5.307692
| 0.653846
| 0.188406
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.12766
| 188
| 9
| 51
| 20.888889
| 0.835366
| 0.06383
| 0
| 0
| 0
| 0
| 0.057471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
542b6cb8288823e49189371343d114fbb3d34d58
| 220
|
py
|
Python
|
tests/test_problem24.py
|
nolanwrightdev/blind-75-python
|
b92ef3449eb0143c760ddd339897a3f0a2972830
|
[
"MIT"
] | 6
|
2020-02-01T23:29:51.000Z
|
2022-02-20T20:46:56.000Z
|
tests/test_problem24.py
|
nolanwrightdev/blind-75-python
|
b92ef3449eb0143c760ddd339897a3f0a2972830
|
[
"MIT"
] | null | null | null |
tests/test_problem24.py
|
nolanwrightdev/blind-75-python
|
b92ef3449eb0143c760ddd339897a3f0a2972830
|
[
"MIT"
] | null | null | null |
import unittest
from problems.problem24 import solution
class Test(unittest.TestCase):
def test(self):
self.assertEqual(solution('12'), 2)
self.assertEqual(solution('226'), 3)
self.assertEqual(solution('0'), 0)
| 22
| 39
| 0.740909
| 29
| 220
| 5.62069
| 0.586207
| 0.276074
| 0.423313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05641
| 0.113636
| 220
| 9
| 40
| 24.444444
| 0.779487
| 0
| 0
| 0
| 0
| 0
| 0.027273
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
54399b963c0c9a223b698f49c60bab45455528d2
| 73
|
py
|
Python
|
py_tdlib/constructors/chat_report_reason_violence.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/chat_report_reason_violence.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/chat_report_reason_violence.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Type
class chatReportReasonViolence(Type):
pass
| 12.166667
| 37
| 0.794521
| 8
| 73
| 7.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136986
| 73
| 5
| 38
| 14.6
| 0.920635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
548abe7e98cffb39f722271730234948a6d2f87a
| 21
|
py
|
Python
|
iahr/commands/audio/__init__.py
|
B1Z0N/iahr
|
0f198a47406726c08018afb17f13ff8c31244eff
|
[
"MIT"
] | 8
|
2020-07-10T08:09:21.000Z
|
2021-06-01T23:47:29.000Z
|
iahr/commands/audio/__init__.py
|
B1Z0N/iahr
|
0f198a47406726c08018afb17f13ff8c31244eff
|
[
"MIT"
] | 1
|
2022-03-12T00:40:59.000Z
|
2022-03-12T00:40:59.000Z
|
iahr/commands/audio/__init__.py
|
B1Z0N/iahr
|
0f198a47406726c08018afb17f13ff8c31244eff
|
[
"MIT"
] | null | null | null |
from . import audio
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 21
| 1
| 21
| 21
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54b4d7364a58c6cb23342efcec304c441ccc9d06
| 161
|
py
|
Python
|
trains/multiTask/__init__.py
|
iyuge2/MMSA
|
e17a012b07609662a4bdfac8cb8e1f92a9297b41
|
[
"Apache-2.0"
] | 3
|
2020-07-06T06:32:16.000Z
|
2021-12-13T12:59:34.000Z
|
trains/multiTask/__init__.py
|
iyuge2/MMSA
|
e17a012b07609662a4bdfac8cb8e1f92a9297b41
|
[
"Apache-2.0"
] | null | null | null |
trains/multiTask/__init__.py
|
iyuge2/MMSA
|
e17a012b07609662a4bdfac8cb8e1f92a9297b41
|
[
"Apache-2.0"
] | null | null | null |
from trains.multiTask.MLF_DNN import MLF_DNN
from trains.multiTask.MLMF import MLMF
from trains.multiTask.MTFN import MTFN
__all__ = ['MLF_DNN', 'MLMF', 'MTFN']
| 32.2
| 44
| 0.78882
| 25
| 161
| 4.8
| 0.36
| 0.25
| 0.475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10559
| 161
| 5
| 45
| 32.2
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54b6427b3b82ae3c308aa98363f37b6f147255fd
| 33
|
py
|
Python
|
cortext_word/__init__.py
|
kodki/cortext-word
|
a87121eb629f154dc4a4948de9053326941b3e36
|
[
"MIT"
] | null | null | null |
cortext_word/__init__.py
|
kodki/cortext-word
|
a87121eb629f154dc4a4948de9053326941b3e36
|
[
"MIT"
] | null | null | null |
cortext_word/__init__.py
|
kodki/cortext-word
|
a87121eb629f154dc4a4948de9053326941b3e36
|
[
"MIT"
] | null | null | null |
def classify(word):
return word
| 11
| 19
| 0.757576
| 5
| 33
| 5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 33
| 2
| 20
| 16.5
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
b7228a1efd29b5ca5ed9bb4453776a19b638e568
| 11,858
|
py
|
Python
|
fexm/test/test_fuzzer.py
|
fgsect/fexm
|
cf213c9dea3778c09c1d475e6a16b9db78a6f1e6
|
[
"Apache-2.0"
] | 105
|
2018-08-09T22:13:59.000Z
|
2022-03-26T23:24:20.000Z
|
fexm/test/test_fuzzer.py
|
DeadManINDIA/fexm
|
ca6629bbcbf79639871d3ec52bc2a7de9ae453a4
|
[
"Apache-2.0"
] | 13
|
2018-08-23T13:40:04.000Z
|
2022-03-11T23:28:00.000Z
|
fexm/test/test_fuzzer.py
|
DeadManINDIA/fexm
|
ca6629bbcbf79639871d3ec52bc2a7de9ae453a4
|
[
"Apache-2.0"
] | 25
|
2018-08-09T21:56:12.000Z
|
2022-03-22T22:08:12.000Z
|
import json
import unittest
import unittest.mock
import sys
from unittest import mock
import sys
import os
from helpers import utils
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "configfinder/")))
sys.modules[
'configfinder.builder'] = unittest.mock.Mock() # Mocking builder like so:https://stackoverflow.com/questions/8658043/how-to-mock-an-import
sys.modules[
'builder'] = unittest.mock.Mock() # Mocking builder like so:https://stackoverflow.com/questions/8658043/how-to-mock-an-import
sys.modules["config_settings.MAX_TIMEOUT_PER_PACKAGE"] = 1 # unittest.mock.Mock(MAX_TIMEOUT_PER_PACKAGE=1)
import configfinder.fuzzer_wrapper
from configfinder import minimzer
import sh
import shutil
import os
class TestAflFuzzerWrapper(unittest.TestCase):
def setUp(self):
os.makedirs("test_data", exist_ok=True)
self.volume_path = "test_data/test_output_volume"
os.makedirs(self.volume_path, exist_ok=True)
self.jpg_binary_path = "test_data/jpg_binary_main"
aflgcc = sh.Command("afl-gcc")
aflgcc("test/mock_data/input_mock/jpg_binary/main.c", "-o", self.jpg_binary_path)
self.timeout_binary_path = "test_data/timeout_binary_main"
aflgcc("test/mock_data/input_mock/timeout_binary/main.c", "-o", self.timeout_binary_path)
def tearDown(self):
shutil.rmtree("test_data")
def test_multi_core_fuzzing(self):
package_name = "jpg_parser"
binary_path = self.jpg_binary_path
parameter = "@@"
fuzz_duration = 30
seeds_dir = "test/mock_data/mock_seeds/jpg_samples"
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
fuzzer_wrapper = configfinder.fuzzer_wrapper.AflFuzzWrapper(volume_path=self.volume_path, package=package_name, binary_path=binary_path, parameter=parameter, fuzz_duration=fuzz_duration,
seeds_dir=seeds_dir, afl_config_file_path=os.path.join(self.volume_path, package_name, os.path.basename(binary_path))+".afl_conf")
fuzzer_wrapper.start_fuzzer(cores=4)
self.assertTrue(os.path.exists(os.path.join(fuzzer_wrapper.get_afl_multi_core_config_dict()["output"], fuzzer_wrapper.session_name + "000/fuzzer_stats")))
self.assertGreater(int(utils.get_afl_stats_from_syncdir(fuzzer_wrapper.multicore_dict["output"])["execs_done"]), 0)
def test_multi_core_fuzzing_timeout(self):
package_name = "timeut_jpg_parser"
binary_path = self.timeout_binary_path
parameter = "@@"
fuzz_duration = 20
seeds_dir = "test/mock_data/mock_seeds/jpg_samples"
log_dict = {}
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
fuzzer_wrapper = configfinder.fuzzer_wrapper.AflFuzzWrapper(volume_path=self.volume_path, package=package_name, binary_path=binary_path, parameter=parameter, fuzz_duration=fuzz_duration,
seeds_dir=seeds_dir, log_dict=log_dict)
self.assertFalse(fuzzer_wrapper.start_fuzzer(cores=4))
print(log_dict)
"""
class TestFuzzingWrapper(unittest.TestCase):
def test_wrong_qemu_invocation(self, ):
if os.path.exists("afl_out"):
shutil.rmtree("afl_out")
aflgcc = sh.Command("afl-gcc")
aflgcc("test/mock_data/input_mock/jpg_binary/main.c", "-o", "test/mock_data/input_mock/jpg_binary/main")
fuzzer_args = ["-Q", "-i", "test/mock_data/mock_seeds", "-o", "afl_out", "--",
"test/mock_data/input_mock/jpg_binary/main", "@@"]
self.assertEqual(
configfinder.fuzzer_wrapper.afl_fuzz_wrapper(fuzzer_args, "test/mock_data/input_mock/jpg_binary/main",
fuzz_duration=6), True)
self.assertEqual(os.path.exists("afl_out/fuzzer_stats"), True)
shutil.rmtree("afl_out")
def test_wrong_nonqemu_invocation(self, ):
if os.path.exists("afl_out"):
shutil.rmtree("afl_out")
gcc = sh.Command("gcc")
command = gcc(
["test/mock_data/input_mock/jpg_binary/main.c", "-o", "test/mock_data/input_mock/jpg_binary/main"],
_out=sys.stdout)
fuzzer_args = ["-i", "test/mock_data/mock_seeds", "-o", "afl_out", "--",
"test/mock_data/input_mock/jpg_binary/main", "@@"]
self.assertEqual(
configfinder.fuzzer_wrapper.afl_fuzz_wrapper(fuzzer_args, "test/mock_data/input_mock/jpg_binary/main",
fuzz_duration=6), True)
self.assertEqual(os.path.exists("afl_out/fuzzer_stats"), True)
shutil.rmtree("afl_out")
def test_fuzzer_normal(self):
volume_path = "test/test_output_volume"
name = "test_package"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter=None,
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=name, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=10)
with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
aflconfigdict = json.load(testaflfp)
self.assertEqual(aflconfigdict["afl_out_dir"],
"test/test_output_volume/test_package/main/afl_fuzz_mockuuid")
self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
shutil.rmtree(volume_path, ignore_errors=True)
def test_fuzzer_minimized(self):
volume_path = "test/test_output_volume"
name = "main"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuidmin"
m = minimzer.minize(parameter="@@", seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main", package=None,
volume_path=volume_path, afl_config_file_name="main.afl_config", tmin_total_time=1000)
uuidmock.return_value = "mockuuid"
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter="@@",
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=None, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=10)
with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
aflconfigdict = json.load(testaflfp)
self.assertEqual(aflconfigdict["afl_out_dir"],
os.path.join(volume_path, name, "main/afl_fuzz_mockuuid"))
self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
shutil.rmtree(volume_path, ignore_errors=True)
def test_fuzzer_resume(self):
volume_path = "test/test_output_volume"
name = "test_package"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuid"
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter="@@",
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=name, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=15, timeout=1500.0)
with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
aflconfigdict = json.load(testaflfp)
self.assertEqual(aflconfigdict["afl_out_dir"],
"test/test_output_volume/test_package/main/afl_fuzz_mockuuid")
self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "resume"
configfinder.fuzzer_wrapper.resume_fuzzer("test/test_output_volume/test_package/main/afl_fuzz_mockuuid",
binary_path="test/mock_data/input_mock/jpg_binary/main",
parameter="@@", timeout=1500.0, fuzz_duration=10)
shutil.rmtree(volume_path, ignore_errors=True)
def test_fuzzer_minimized_failed(self):
volume_path = "test/test_output_volume"
name = "main"
shutil.rmtree(volume_path, ignore_errors=True)
os.makedirs(os.path.join(os.path.join(volume_path, name), "main/"))
with mock.patch("uuid.uuid4") as uuidmock:
uuidmock.return_value = "mockuuidmin"
m = minimzer.minize(parameter="@@", seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main", package=None,
volume_path=volume_path, afl_config_file_name="main.afl_config", tmin_total_time=1000)
uuidmock.return_value = "mockuuid"
for file in os.listdir(os.path.join(volume_path, name, "main/afl_tmin_mockuuidmin/")):
with open(os.path.join(os.path.join(volume_path, name, "main/afl_tmin_mockuuidmin/", file)),
"w"):
pass
# shutil.rmtree(os.path.join(volume_path,name,"main/afl_tmin_mockuuidmin/"))
configfinder.fuzzer_wrapper.prepare_and_start_fuzzer(parameter=None,
seeds_dir="test/mock_data/mock_seeds/jpg_samples",
binary_path="test/mock_data/input_mock/jpg_binary/main",
package=None, volume_path=volume_path,
afl_config_file_name="main.afl_config",
fuzz_duration=10)
# with open(os.path.join(os.path.join(volume_path, name), "main.afl_config")) as testaflfp:
# aflconfigdict = json.load(testaflfp)
# self.assertEqual(aflconfigdict["afl_out_dir"],
# os.path.join(volume_path, name, "main/afl_fuzz_mockuuid"))
# self.assertTrue(os.path.exists(aflconfigdict["afl_out_dir"]))
shutil.rmtree(volume_path, ignore_errors=True)
"""
| 61.123711
| 198
| 0.595126
| 1,351
| 11,858
| 4.928942
| 0.11399
| 0.066076
| 0.048656
| 0.0434
| 0.814987
| 0.78225
| 0.759874
| 0.759874
| 0.759874
| 0.74816
| 0
| 0.007698
| 0.29887
| 11,858
| 194
| 199
| 61.123711
| 0.79324
| 0.018975
| 0
| 0.280702
| 0
| 0
| 0.157788
| 0.096089
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.070175
| false
| 0
| 0.22807
| 0
| 0.315789
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f7a298261c96532b28c2a43e1fbbc4c431d4a60
| 145
|
py
|
Python
|
python/confuse-newbs-with-bad-python.py
|
vcokltfre/examples-of-horrible-code
|
a856f03592afa9fd00e2f7349d05c9dc5dd1449b
|
[
"MIT"
] | null | null | null |
python/confuse-newbs-with-bad-python.py
|
vcokltfre/examples-of-horrible-code
|
a856f03592afa9fd00e2f7349d05c9dc5dd1449b
|
[
"MIT"
] | null | null | null |
python/confuse-newbs-with-bad-python.py
|
vcokltfre/examples-of-horrible-code
|
a856f03592afa9fd00e2f7349d05c9dc5dd1449b
|
[
"MIT"
] | null | null | null |
print("Take an umbrella") if __import__("re").match(r"^y$", input("Is it raining? "), __import__("re").IGNORECASE) else print("Have a nice day")
| 72.5
| 144
| 0.682759
| 23
| 145
| 3.956522
| 0.869565
| 0.175824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 145
| 1
| 145
| 145
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0.365517
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
3f9b44ed051aad9e930ad59af5d8db1eaed5f11a
| 1,484
|
py
|
Python
|
catalyst/data/cv/mixins/tests/test_mixin.py
|
elephantmipt/catalyst
|
6c706e4859ed7c58e5e6a5b7634176bffd0e2465
|
[
"Apache-2.0"
] | 2
|
2019-04-19T21:34:31.000Z
|
2019-05-02T22:50:25.000Z
|
catalyst/data/cv/mixins/tests/test_mixin.py
|
elephantmipt/catalyst
|
6c706e4859ed7c58e5e6a5b7634176bffd0e2465
|
[
"Apache-2.0"
] | null | null | null |
catalyst/data/cv/mixins/tests/test_mixin.py
|
elephantmipt/catalyst
|
6c706e4859ed7c58e5e6a5b7634176bffd0e2465
|
[
"Apache-2.0"
] | 1
|
2020-12-02T18:42:31.000Z
|
2020-12-02T18:42:31.000Z
|
from catalyst import utils
from catalyst.data.cv import BlurMixin, FlareMixin, RotateMixin
jpg_rgb_uri = (
"https://raw.githubusercontent.com/catalyst-team/catalyst-pics/master"
"/test_images/catalyst_icon.jpg"
)
image = utils.imread(jpg_rgb_uri)
def test_blur_mixin():
"""@TODO: Docs. Contribution is welcome."""
global image
image_dump = image.copy()
mixin = BlurMixin()
input = {"image": image_dump} # noqa: WPS125
output = mixin(input)
assert mixin.input_key in output
assert mixin.output_key in output
assert output[mixin.input_key].shape == image_dump.shape
assert 0 <= output[mixin.output_key] < mixin.blur_max
def test_flare_mixin():
"""@TODO: Docs. Contribution is welcome."""
global image
image_dump = image.copy()
mixin = FlareMixin()
input = {"image": image_dump} # noqa: WPS125
output = mixin(input)
assert mixin.input_key in output
assert mixin.output_key in output
assert output[mixin.input_key].shape == image_dump.shape
assert 0 <= output[mixin.output_key]
def test_rotate_mixin():
"""@TODO: Docs. Contribution is welcome."""
global image
image_dump = image.copy()
mixin = RotateMixin()
input = {"image": image_dump} # noqa: WPS125
output = mixin(input)
assert mixin.input_key in output
assert mixin.output_key in output
assert output[mixin.input_key].shape == image_dump.shape
assert 0 <= output[mixin.output_key] < 8
| 25.586207
| 74
| 0.690027
| 198
| 1,484
| 5
| 0.242424
| 0.081818
| 0.084848
| 0.10303
| 0.724242
| 0.724242
| 0.724242
| 0.724242
| 0.724242
| 0.724242
| 0
| 0.010924
| 0.198113
| 1,484
| 57
| 75
| 26.035088
| 0.821008
| 0.1031
| 0
| 0.567568
| 0
| 0
| 0.086062
| 0.022848
| 0
| 0
| 0
| 0.017544
| 0.324324
| 1
| 0.081081
| false
| 0
| 0.054054
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3fc7d37cd2ac1afdb041c9e6cfef71731f7f6388
| 295
|
py
|
Python
|
src/git_repo_language_trends/_internal/tests/test_progress.py
|
Enselic/git-repo-language-trend
|
b701138a85f7c7b4e3cde5f6cd29b6d006b493cf
|
[
"MIT"
] | 1
|
2021-07-27T12:08:52.000Z
|
2021-07-27T12:08:52.000Z
|
src/git_repo_language_trends/_internal/tests/test_progress.py
|
Enselic/git-repo-language-trend
|
b701138a85f7c7b4e3cde5f6cd29b6d006b493cf
|
[
"MIT"
] | 5
|
2021-01-24T10:18:26.000Z
|
2021-07-02T09:48:00.000Z
|
src/git_repo_language_trends/_internal/tests/test_progress.py
|
Enselic/git-repo-language-trends
|
b701138a85f7c7b4e3cde5f6cd29b6d006b493cf
|
[
"MIT"
] | null | null | null |
from ..progress import padded_progress
def test_padding():
assert padded_progress(2, 5) == "2/5"
assert padded_progress(2, 50) == " 2/50"
assert padded_progress(2, 500) == " 2/500"
assert padded_progress(20, 500) == " 20/500"
assert padded_progress(200, 500) == "200/500"
| 29.5
| 49
| 0.654237
| 43
| 295
| 4.325581
| 0.325581
| 0.451613
| 0.537634
| 0.33871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168067
| 0.19322
| 295
| 9
| 50
| 32.777778
| 0.613445
| 0
| 0
| 0
| 0
| 0
| 0.098305
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 1
| 0.142857
| true
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3fd577e70501c8dd9d0f21f44b022e77e0d7393d
| 5,954
|
py
|
Python
|
data_augmentation/models.py
|
shikisawamura/nnabla-examples
|
baf4e4cc620dedbf4368683325c0fb868676850d
|
[
"Apache-2.0"
] | null | null | null |
data_augmentation/models.py
|
shikisawamura/nnabla-examples
|
baf4e4cc620dedbf4368683325c0fb868676850d
|
[
"Apache-2.0"
] | null | null | null |
data_augmentation/models.py
|
shikisawamura/nnabla-examples
|
baf4e4cc620dedbf4368683325c0fb868676850d
|
[
"Apache-2.0"
] | 1
|
2020-04-25T06:11:28.000Z
|
2020-04-25T06:11:28.000Z
|
# Copyright (c) 2019 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from args import get_args
import nnabla as nn
import nnabla.communicators as C
from nnabla.ext_utils import get_extension_context
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import numpy as np
def categorical_error(pred, label):
"""
Compute categorical error given score vectors and labels as
numpy.ndarray.
"""
pred_label = pred.argmax(1)
return (pred_label != label.flat).mean()
def resnet18_prediction(image, test=False, ncls=10, nmaps=64, act=F.relu):
"""
Construct ResNet 18
"""
# Residual Unit
def res_unit(x, nmap_out, scope_name, stride=1):
nmap_in = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Nonlinear
with nn.parameter_scope("conv1"):
h = PF.convolution(x, nmap_out, kernel=(3, 3), pad=(1, 1),
with_bias=False, stride=(stride, stride))
h = PF.batch_normalization(h, batch_stat=not test)
h = act(h)
# Conv -> BN -> Nonlinear
with nn.parameter_scope("conv2"):
h = PF.convolution(h, nmap_out, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Conv -> BN
if nmap_in != nmap_out:
with nn.parameter_scope("conv3"):
x2 = PF.convolution(x, nmap_out, kernel=(1, 1), pad=(0, 0),
with_bias=False, stride=(stride, stride))
x2 = PF.batch_normalization(x2, batch_stat=not test)
else:
x2 = x
# Residual -> Nonlinear
h = act(F.add2(h, x2))
return h
# Conv -> BN -> Nonlinear
with nn.parameter_scope("conv1"):
h = PF.convolution(image, nmaps, kernel=(3, 3),
pad=(1, 1), with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = act(h)
h = res_unit(h, nmaps, "conv2-1", 1) # -> 32x32
h = res_unit(h, nmaps, "conv2-2", 1) # -> 32x32
h = res_unit(h, nmaps*2, "conv3-1", 2) # -> 16x16
h = res_unit(h, nmaps*2, "conv3-2", 1) # -> 16x16
h = res_unit(h, nmaps*4, "conv4-1", 2) # -> 8x8
h = res_unit(h, nmaps*4, "conv4-2", 1) # -> 8x8
h = res_unit(h, nmaps*8, "conv5-1", 2) # -> 4x4
h = res_unit(h, nmaps*8, "conv5-2", 1) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
h = PF.affine(h, 1000, name="bottleneck") # -> 1x1000
h = act(h)
pred = PF.affine(h, ncls)
return pred
def resnet34_prediction(image, test=False, ncls=10, nmaps=64, act=F.relu):
"""
Construct ResNet 34
"""
# Residual Unit
def res_unit(x, nmap_out, scope_name, stride=1):
nmap_in = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Nonlinear
with nn.parameter_scope("conv1"):
h = PF.convolution(x, nmap_out, kernel=(3, 3), pad=(1, 1),
with_bias=False, stride=(stride, stride))
h = PF.batch_normalization(h, batch_stat=not test)
h = act(h)
# Conv -> BN -> Nonlinear
with nn.parameter_scope("conv2"):
h = PF.convolution(h, nmap_out, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Conv -> BN
if nmap_in != nmap_out:
with nn.parameter_scope("conv3"):
x2 = PF.convolution(x, nmap_out, kernel=(1, 1), pad=(0, 0),
with_bias=False, stride=(stride, stride))
x2 = PF.batch_normalization(x2, batch_stat=not test)
else:
x2 = x
# Residual -> Nonlinear
h = act(F.add2(h, x2))
return h
# Conv -> BN -> Nonlinear
with nn.parameter_scope("conv1"):
h = PF.convolution(image, nmaps, kernel=(3, 3),
pad=(1, 1), with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = act(h)
h = res_unit(h, nmaps, "conv2-1", 1) # -> 32x32
h = res_unit(h, nmaps, "conv2-2", 1) # -> 32x32
h = res_unit(h, nmaps, "conv2-3", 1) # -> 32x32
h = res_unit(h, nmaps*2, "conv3-1", 2) # -> 16x16
h = res_unit(h, nmaps*2, "conv3-2", 1) # -> 16x16
h = res_unit(h, nmaps*2, "conv3-3", 1) # -> 16x16
h = res_unit(h, nmaps*2, "conv3-4", 1) # -> 16x16
h = res_unit(h, nmaps*4, "conv4-1", 2) # -> 8x8
h = res_unit(h, nmaps*4, "conv4-2", 1) # -> 8x8
h = res_unit(h, nmaps*4, "conv4-3", 1) # -> 8x8
h = res_unit(h, nmaps*4, "conv4-4", 1) # -> 8x8
h = res_unit(h, nmaps*4, "conv4-5", 1) # -> 8x8
h = res_unit(h, nmaps*4, "conv4-6", 1) # -> 8x8
h = res_unit(h, nmaps*8, "conv5-1", 2) # -> 4x4
h = res_unit(h, nmaps*8, "conv5-2", 1) # -> 4x4
h = res_unit(h, nmaps*8, "conv5-3", 1) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
h = PF.affine(h, 1000, name="bottleneck") # -> 1x1000
h = act(h)
pred = PF.affine(h, ncls)
return pred
| 39.959732
| 81
| 0.545516
| 846
| 5,954
| 3.732861
| 0.1974
| 0.057631
| 0.060798
| 0.068398
| 0.728626
| 0.728626
| 0.728626
| 0.728626
| 0.721026
| 0.686194
| 0
| 0.065574
| 0.313571
| 5,954
| 148
| 82
| 40.22973
| 0.70712
| 0.194323
| 0
| 0.772277
| 0
| 0
| 0.048562
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049505
| false
| 0
| 0.09901
| 0
| 0.19802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3fd71134dad8658b768cb71fea04b7a19873cb2d
| 39
|
py
|
Python
|
__init__.py
|
tintin10q/python_json_database_manager
|
691471dc443b8642a694ed98138f0a11ac157fc3
|
[
"MIT"
] | 1
|
2020-09-14T23:05:02.000Z
|
2020-09-14T23:05:02.000Z
|
__init__.py
|
tintin10q/python-json-database-manager
|
691471dc443b8642a694ed98138f0a11ac157fc3
|
[
"MIT"
] | 1
|
2021-09-18T12:32:58.000Z
|
2021-09-18T12:32:58.000Z
|
__init__.py
|
tintin10q/python_json_database_manager
|
691471dc443b8642a694ed98138f0a11ac157fc3
|
[
"MIT"
] | null | null | null |
from .database_manager import Database
| 19.5
| 38
| 0.871795
| 5
| 39
| 6.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3fe9f05e53cdb42df7294bca0afb160976542396
| 27
|
py
|
Python
|
teensy/memzip_files/boot.py
|
lurch/micropython
|
28dfbc2ba2ef41a7810e4e39290031eb2207a0a9
|
[
"MIT"
] | 1
|
2015-06-15T11:52:01.000Z
|
2015-06-15T11:52:01.000Z
|
teensy/memzip_files/boot.py
|
lurch/micropython
|
28dfbc2ba2ef41a7810e4e39290031eb2207a0a9
|
[
"MIT"
] | null | null | null |
teensy/memzip_files/boot.py
|
lurch/micropython
|
28dfbc2ba2ef41a7810e4e39290031eb2207a0a9
|
[
"MIT"
] | null | null | null |
print("Executing boot.py")
| 13.5
| 26
| 0.740741
| 4
| 27
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 27
| 1
| 27
| 27
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b76f80b1506d021d67f2666d6d260cb0122383d9
| 201
|
py
|
Python
|
Day26/Value_Sum_is_greater_than_Keys_Sum.py
|
tushartrip1010/100_days_code_py
|
ee74b429e98cdd8bdf8661cf987da67c9fee5a3e
|
[
"Apache-2.0"
] | null | null | null |
Day26/Value_Sum_is_greater_than_Keys_Sum.py
|
tushartrip1010/100_days_code_py
|
ee74b429e98cdd8bdf8661cf987da67c9fee5a3e
|
[
"Apache-2.0"
] | null | null | null |
Day26/Value_Sum_is_greater_than_Keys_Sum.py
|
tushartrip1010/100_days_code_py
|
ee74b429e98cdd8bdf8661cf987da67c9fee5a3e
|
[
"Apache-2.0"
] | null | null | null |
def Values_Sum_Greater(Test_Dict):
return sum(list(Test_Dict.keys())) < sum(list(Test_Dict.values()))
Test_Dict = {5: 3, 1: 3, 10: 4, 7: 3, 8: 1, 9: 5}
print(Values_Sum_Greater(Test_Dict))
| 28.714286
| 71
| 0.656716
| 37
| 201
| 3.324324
| 0.486486
| 0.325203
| 0.260163
| 0.325203
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077381
| 0.164179
| 201
| 6
| 72
| 33.5
| 0.654762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0.25
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
b7851659f187f1c885804ccea33d38e781d8fbe0
| 255
|
py
|
Python
|
sparse_decomposition/__init__.py
|
bdpedigo/sparse_matrix_analysis
|
dbdff69b8ec56f60ba96b723a616f442755eacda
|
[
"MIT"
] | 2
|
2021-03-18T14:51:52.000Z
|
2021-03-18T16:05:55.000Z
|
sparse_decomposition/__init__.py
|
bdpedigo/sparse_matrix_analysis
|
dbdff69b8ec56f60ba96b723a616f442755eacda
|
[
"MIT"
] | 1
|
2021-03-18T05:08:25.000Z
|
2021-03-18T16:17:05.000Z
|
sparse_decomposition/__init__.py
|
bdpedigo/sparse_matrix_analysis
|
dbdff69b8ec56f60ba96b723a616f442755eacda
|
[
"MIT"
] | null | null | null |
__author__ = "Benjamin Pedigo"
__email__ = "benjamindpedigo@gmail.com"
__version__ = "0.1.0"
import sparse_decomposition.utils
from sparse_decomposition import *
import sparse_decomposition.decomposition
from sparse_decomposition.decomposition import *
| 25.5
| 48
| 0.835294
| 28
| 255
| 7.035714
| 0.535714
| 0.385787
| 0.253807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.094118
| 255
| 9
| 49
| 28.333333
| 0.839827
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 0.098039
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.571429
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b7d8f3a5daf398f079cc5bd8adbe788a48e700bb
| 13,238
|
py
|
Python
|
kddirkit/config/args.py
|
JohannesLiu/HNRE-Pytorch
|
395f026e54e02a631db522a828b1e017ffca6e59
|
[
"MIT"
] | 1
|
2021-03-03T14:06:45.000Z
|
2021-03-03T14:06:45.000Z
|
kddirkit/config/args.py
|
JohannesLiu/HNRE-Pytorch
|
395f026e54e02a631db522a828b1e017ffca6e59
|
[
"MIT"
] | null | null | null |
kddirkit/config/args.py
|
JohannesLiu/HNRE-Pytorch
|
395f026e54e02a631db522a828b1e017ffca6e59
|
[
"MIT"
] | null | null | null |
import datetime
import json
import os
import pickle
import sys
import time
import torch
import math
import argparse
class Parser(object):
def __init__(self, config_path, model , is_training = None):
self.config = json.loads(open(config_path,'r').read())
self.is_training = is_training
self.model = model
self._trainParser = argparse.ArgumentParser(description ="training-" + model)
self._testParser = argparse.ArgumentParser(description ="testing-" + model)
self._oneParser = argparse.ArgumentParser(description ="one-" + model)
if self.is_training == True:
self.reset_train_parser()
elif self.is_training == False:
self.reset_test_parser()
else :
self.reset_one_parser()
@property
def trainParser(self):
return self._trainParser
@property
def testParser(self):
return self._testParser
@property
def oneParser(self):
return self._oneParser
def reset_train_parser(self):
# training
self._trainParser.add_argument('--model', help='neural models to encode sentences', type=str,
default=self.model)
self._trainParser.add_argument('--use_baseline', help='baseline or hier', type=bool, default=False)
self._trainParser.add_argument('--mode', help='test mode', type=str, default='pr')
self._trainParser.add_argument('--gpu', help='gpu(s) to use', type=str, default='0')
self._trainParser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
self._trainParser.add_argument('--data_path', help ='path to load data', type=str, default='./data/')
self._trainParser.add_argument('--model_dir', help ='path to store model', type= str, default ='./outputs/ckpt/')
self._trainParser.add_argument('--summary_dir', help ='path to store summary_dir', type=str, default='./outputs/summary')
self._trainParser.add_argument('--batch_size', help ='entity numbers used each training time', type= int, default= 160)
self._trainParser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
self._trainParser.add_argument('--max_epoch', help='maximum of training epochs', type=int, default= 40)
self._trainParser.add_argument('--save_epoch', help='frequency of training epochs', type=int, default=2)
self._trainParser.add_argument('--restore_epoch', help='epoch to continue training', type=int, default=0)
self._trainParser.add_argument('--learning_rate', help='learning rate', type=float, default=0.2)
self._trainParser.add_argument('--weight_decay', help='weight_decay', type=float, default=0.00001)
self._trainParser.add_argument('--keep_prob', help='dropout rate', type=float, default=0.5)
self._trainParser.add_argument('--word_size', help='maximum of relations', type=int, default=self.config['word_size'])
self._trainParser.add_argument('--hidden_size', help='hidden feature size', type=int, default=230)
self._trainParser.add_argument('--pos_size', help='position embedding size', type=int, default=5)
# statistics
self._trainParser.add_argument('--max_length', help='maximum of number of words in one sentence', type=int,
default=self.config['fixlen'])
self._trainParser.add_argument('--pos_num', help='number of position embedding vectors', type=int,
default=self.config['maxlen']*2 +1)
self._trainParser.add_argument('--num_classes', help='maximum of relations', type=int,
default=len(self.config['relation2id']))
self._trainParser.add_argument('--vocabulary_size', help='maximum of relations', type=int,
default=len(self.config['word2id']))
def reset_test_parser(self):
# test_settings
self._testParser.add_argument('--model', help='neural models to encode sentences', type=str, default=self.model)
self._testParser.add_argument('--use_baseline', help='baseline or hier', type=bool, default=False)
self._testParser.add_argument('--mode', help='test mode', type=str, default='pr')
self._testParser.add_argument('--gpu', help='gpu(s) to use', type=str, default='0')
self._testParser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
self._testParser.add_argument('--allow_growth', help='occupying gpu(s) gradually', type=bool, default=True)
self._testParser.add_argument('--checkpoint_path', help='path to store model', type=str, default='./outputs/ckpt/')
self._testParser.add_argument('--logits_path', help='path to store model', type=str, default='./outputs/logits/')
self._testParser.add_argument('--data_path', help='path to load data', type=str, default='./data/')
self._testParser.add_argument('--batch_size',
help='instance(entity pair) numbers to use each training(testing) time', type=int,
default=262)
self._testParser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# training settings
self._testParser.add_argument('--max_epoch', help='maximum of training epochs', type=int, default=30)
self._testParser.add_argument('--save_epoch', help='frequency of training epochs', type=int, default=2)
self._testParser.add_argument('--learning_rate', help='entity numbers used each training time', type=float,
default=0.2)
self._testParser.add_argument('--weight_decay', help='weight_decay', type=float, default=0.00001)
self._testParser.add_argument('--keep_prob', help='dropout rate', type=float, default=1.0)
# test_settings
self._testParser.add_argument('--test_single', help='only test one checkpoint', type=bool, default=True)
self._testParser.add_argument('--test_start_ckpt', help='first epoch to test', type=int, default=1)
self._testParser.add_argument('--test_end_ckpt', help='last epoch to test', type=int, default=30)
self._testParser.add_argument('--test_sleep', help='time units to sleep ', type=float, default=10)
self._testParser.add_argument('--test_use_step', help='test step instead of epoch', type=bool, default=False)
self._testParser.add_argument('--test_start_step', help='first step to test', type=int, default=0 * 1832)
self._testParser.add_argument('--test_end_step', help='last step to test', type=int, default=30 * 1832)
self._testParser.add_argument('--test_step', help='step to add per test', type=int, default=1832)
# parameters
# self._testParser.add_argument('--word_size', help='maximum of relations', type=int, default=self.config['word_size'])
self._testParser.add_argument('--word_size', help='maximum of relations', type=int, default=50)
self._testParser.add_argument('--hidden_size', help='hidden feature size', type=int, default=230)
self._testParser.add_argument('--pos_size', help='position embedding size', type=int, default=5)
# statistics
self._testParser.add_argument('--max_length', help='maximum of number of words in one sentence', type=int,
default=self.config['fixlen'])
self._testParser.add_argument('--pos_num', help='number of position embedding vectors', type=int,
default=self.config['maxlen']*2+1)
self._testParser.add_argument('--num_classes', help='maximum of relations', type=int,
default=len(self.config['relation2id']))
self._testParser.add_argument('--vocabulary_size', help='maximum of relations', type=int,
default=len(self.config['word2id']))
def reset_one_parser(self):
#traning
# overall
self._oneParser.add_argument('--model', help='neural models to encode sentences', type=str,
default=self.model)
self._oneParser.add_argument('--use_baseline', help='baseline or hier', type=bool, default=False)
self._oneParser.add_argument('--mode', help='test mode', type=str, default='pr')
self._oneParser.add_argument('--gpu', help='gpu(s) to use', type=str, default='0')
self._oneParser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
self._oneParser.add_argument('--allow_growth', help='occupying gpu(s) gradually', type=bool, default=True)
self._oneParser.add_argument('--data_path', help ='path to load data', type=str, default='./data/')
self._oneParser.add_argument('--model_dir', help ='path to store model', type= str, default ='./outputs/ckpt/')
self._oneParser.add_argument('--summary_dir', help ='path to store summary_dir', type=str, default='./outputs/summary')
self._oneParser.add_argument('--training_batch_size', help ='entity numbers used each training time', type= int, default= 160)
self._oneParser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
self._oneParser.add_argument('--layer_pattern', help='default, ag-0, ag-1, ag-2', type=str, default='default')
# training
self._oneParser.add_argument('--max_epoch', help='maximum of training epochs', type=int, default= 80)
self._oneParser.add_argument('--save_epoch', help='frequency of training epochs', type=int, default=2)
self._oneParser.add_argument('--restore_epoch', help='epoch to continue training', type=int, default=0)
self._oneParser.add_argument('--learning_rate', help='learning rate', type=float, default=0.2)
self._oneParser.add_argument('--weight_decay', help='weight_decay', type=float, default=0.00001)
self._oneParser.add_argument('--keep_prob', help='dropout rate', type=float, default=0.5)
# parameters
self._oneParser.add_argument('--word_size', help='maximum of relations', type=int, default=self.config['word_size'])
self._oneParser.add_argument('--hidden_size', help='hidden feature size', type=int, default=230)
self._oneParser.add_argument('--pos_size', help='position embedding size', type=int, default=5)
self._oneParser.add_argument('--losses', help='loss_function', type=str, default='cross_entropy')
# statistics
self._oneParser.add_argument('--max_length', help='maximum of number of words in one sentence', type=int,
default=self.config['fixlen'])
self._oneParser.add_argument('--pos_num', help='number of position embedding vectors', type=int,
default=self.config['maxlen']*2 +1)
self._oneParser.add_argument('--num_classes', help='maximum of relations', type=int,
default=len(self.config['relation2id']))
self._oneParser.add_argument('--vocabulary_size', help='maximum of relations', type=int,
default=len(self.config['word2id']))
#testing
#overall
self._oneParser.add_argument('--checkpoint_path', help='path to store model', type=str, default='./outputs/ckpt/')
self._oneParser.add_argument('--logits_path', help='path to store model', type=str, default='./outputs/logits/')
self._oneParser.add_argument('--testing_batch_size',
help='instance(entity pair) numbers to use each training(testing) time', type=int,
default=262)
# test_settings
self._oneParser.add_argument('--test_single', help='only test one checkpoint', type=bool, default=True)
self._oneParser.add_argument('--test_start_ckpt', help='first epoch to test', type=int, default=1)
self._oneParser.add_argument('--test_end_ckpt', help='last epoch to test', type=int, default=30)
self._oneParser.add_argument('--test_sleep', help='time units to sleep ', type=float, default=10)
self._oneParser.add_argument('--test_use_step', help='test step instead of epoch', type=bool, default=False)
self._oneParser.add_argument('--test_start_step', help='first step to test', type=int, default=0 * 1832)
self._oneParser.add_argument('--test_end_step', help='last step to test', type=int, default=30 * 1832)
self._oneParser.add_argument('--test_step', help='step to add per test', type=int, default=1832)
if __name__=="__main__":
args = Parser("./data/config", "trials")
trainParser = args.trainParser
testParser = args.testParser
oneParser = args.oneParser
for key in args.__dict__:
print(f"{key}:{args.__dict__[key]}")
| 65.211823
| 134
| 0.6504
| 1,631
| 13,238
| 5.072961
| 0.108522
| 0.122311
| 0.079526
| 0.107324
| 0.828741
| 0.777133
| 0.754532
| 0.754532
| 0.737249
| 0.737249
| 0
| 0.012512
| 0.209095
| 13,238
| 203
| 135
| 65.211823
| 0.777746
| 0.021076
| 0
| 0.170886
| 0
| 0
| 0.275825
| 0.003631
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044304
| false
| 0
| 0.056962
| 0.018987
| 0.126582
| 0.006329
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4d2e52cf20bd45fe23ce222bc39a726d4846467b
| 162
|
py
|
Python
|
ProgramFlow/immutable.py
|
kumarvgit/python3
|
318c5e7503fafc9c60082fa123e2930bd82a4ec9
|
[
"MIT"
] | null | null | null |
ProgramFlow/immutable.py
|
kumarvgit/python3
|
318c5e7503fafc9c60082fa123e2930bd82a4ec9
|
[
"MIT"
] | null | null | null |
ProgramFlow/immutable.py
|
kumarvgit/python3
|
318c5e7503fafc9c60082fa123e2930bd82a4ec9
|
[
"MIT"
] | null | null | null |
result = True
another_result = result
print(id(result))
print(id(another_result))
# bool is immutable
result = False
print(id(result))
print(id(another_result))
| 16.2
| 25
| 0.765432
| 24
| 162
| 5.041667
| 0.375
| 0.231405
| 0.322314
| 0.297521
| 0.545455
| 0.545455
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 162
| 9
| 26
| 18
| 0.840278
| 0.104938
| 0
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.571429
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
4d4014be116468bafccaa2440d62be5cafec9cc6
| 13,683
|
py
|
Python
|
tests/projects/test_kubernetes.py
|
PeterSulcs/mlflow
|
14c48e7bb1ca6cd6a3c1b249a486cd98bd5e7051
|
[
"Apache-2.0"
] | 10,351
|
2018-07-31T02:52:49.000Z
|
2022-03-31T23:33:13.000Z
|
tests/projects/test_kubernetes.py
|
PeterSulcs/mlflow
|
14c48e7bb1ca6cd6a3c1b249a486cd98bd5e7051
|
[
"Apache-2.0"
] | 3,733
|
2018-07-31T01:38:51.000Z
|
2022-03-31T23:56:25.000Z
|
tests/projects/test_kubernetes.py
|
PeterSulcs/mlflow
|
14c48e7bb1ca6cd6a3c1b249a486cd98bd5e7051
|
[
"Apache-2.0"
] | 2,596
|
2018-07-31T06:38:39.000Z
|
2022-03-31T23:56:32.000Z
|
import yaml
import pytest
from unittest import mock
import kubernetes
from kubernetes.config.config_exception import ConfigException
from mlflow.projects import kubernetes as kb
from mlflow.exceptions import ExecutionException
from mlflow.entities import RunStatus
def test_run_command_creation(): # pylint: disable=unused-argument
"""
Tests command creation.
"""
command = [
"python train.py --alpha 0.5 --l1-ratio 0.1",
"--comment 'foo bar'",
'--comment-bis "bar foo"',
]
command = kb._get_run_command(command)
assert [
"python",
"train.py",
"--alpha",
"0.5",
"--l1-ratio",
"0.1",
"--comment",
"'foo bar'",
"--comment-bis",
"'bar foo'",
] == command
def test_valid_kubernetes_job_spec(): # pylint: disable=unused-argument
"""
Tests job specification for Kubernetes.
"""
custom_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" env: \n"
" - name: DUMMY\n"
' value: "test_var"\n'
" restartPolicy: Never\n"
)
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["mlflow", "run", ".", "--no-conda", "-P", "alpha=0.5"]
env_vars = {"RUN_ID": "1"}
job_definition = kb._get_kubernetes_job_definition(
project_name=project_name,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=custom_template,
)
container_spec = job_definition["spec"]["template"]["spec"]["containers"][0]
assert container_spec["name"] == project_name
assert container_spec["image"] == image_tag + "@" + image_digest
assert container_spec["command"] == command
assert 2 == len(container_spec["env"])
assert container_spec["env"][0]["name"] == "DUMMY"
assert container_spec["env"][0]["value"] == "test_var"
assert container_spec["env"][1]["name"] == "RUN_ID"
assert container_spec["env"][1]["value"] == "1"
def test_run_kubernetes_job():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = "docker-for-desktop"
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
args = kube_config_mock.call_args_list
assert args[0][1]["context"] == kube_context
def test_run_kubernetes_job_current_kubecontext():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = None
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
with mock.patch("kubernetes.config.load_incluster_config") as incluster_kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
assert kube_config_mock.call_count == 1
assert incluster_kube_config_mock.call_count == 0
def test_run_kubernetes_job_in_cluster():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = None
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
kube_config_mock.side_effect = ConfigException()
with mock.patch("kubernetes.config.load_incluster_config") as incluster_kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
assert kube_config_mock.call_count == 1
assert incluster_kube_config_mock.call_count == 1
def test_push_image_to_registry():
image_uri = "dockerhub_account/mlflow-kubernetes-example"
with mock.patch("docker.from_env") as docker_mock:
client = mock.MagicMock()
docker_mock.return_value = client
kb.push_image_to_registry(image_uri)
assert client.images.push.call_count == 1
args = client.images.push.call_args_list
assert args[0][1]["repository"] == image_uri
def test_push_image_to_registry_handling_errors():
image_uri = "dockerhub_account/mlflow-kubernetes-example"
with pytest.raises(ExecutionException):
kb.push_image_to_registry(image_uri)
def test_submitted_run_get_status_killed():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
with mock.patch("kubernetes.client.BatchV1Api.delete_namespaced_job") as kube_api_mock:
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
submitted_run.cancel()
assert RunStatus.KILLED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_failed():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
condition = kubernetes.client.models.V1JobCondition(type="Failed", status="True")
job_status = kubernetes.client.models.V1JobStatus(
active=1,
completion_time=None,
conditions=[condition],
failed=1,
start_time=1,
succeeded=None,
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
print("status", submitted_run.get_status())
assert RunStatus.FAILED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_succeeded():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True")
job_status = kubernetes.client.models.V1JobStatus(
active=None,
completion_time=None,
conditions=[condition],
failed=None,
start_time=None,
succeeded=1,
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
print("status", submitted_run.get_status())
assert RunStatus.FINISHED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_running():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
job_status = kubernetes.client.models.V1JobStatus(
active=1, completion_time=None, conditions=None, failed=1, start_time=1, succeeded=1
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
assert RunStatus.RUNNING == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
print(args)
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_state_transitions():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
def set_return_value(**kwargs):
job_status = kubernetes.client.models.V1JobStatus(**kwargs)
kube_api_mock.return_value = kubernetes.client.models.V1Job(status=job_status)
set_return_value()
assert RunStatus.SCHEDULED == submitted_run.get_status()
set_return_value(start_time=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, active=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, succeeded=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, succeeded=1, completion_time=2)
assert RunStatus.RUNNING == submitted_run.get_status()
condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True")
set_return_value(
conditions=[condition], failed=1, start_time=1, completion_time=2, succeeded=1
)
assert RunStatus.FINISHED == submitted_run.get_status()
| 38.76204
| 99
| 0.63049
| 1,653
| 13,683
| 4.91712
| 0.096189
| 0.05315
| 0.031127
| 0.046506
| 0.839813
| 0.807825
| 0.784203
| 0.746924
| 0.722933
| 0.704355
| 0
| 0.015083
| 0.258642
| 13,683
| 352
| 100
| 38.872159
| 0.786179
| 0.009355
| 0
| 0.664537
| 0
| 0
| 0.202692
| 0.064039
| 0
| 0
| 0
| 0
| 0.166134
| 1
| 0.041534
| false
| 0
| 0.025559
| 0
| 0.067093
| 0.009585
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4d40a140614398fe72ca71d0417abb7a40aafafa
| 892
|
py
|
Python
|
code-metrics-dev/gerar_pipeline/generate_pipeline.py
|
clodonil/audit-aws-pipeline
|
44a41c63fc84096c2327bf6d34909dff1ca3fdab
|
[
"Apache-2.0"
] | null | null | null |
code-metrics-dev/gerar_pipeline/generate_pipeline.py
|
clodonil/audit-aws-pipeline
|
44a41c63fc84096c2327bf6d34909dff1ca3fdab
|
[
"Apache-2.0"
] | null | null | null |
code-metrics-dev/gerar_pipeline/generate_pipeline.py
|
clodonil/audit-aws-pipeline
|
44a41c63fc84096c2327bf6d34909dff1ca3fdab
|
[
"Apache-2.0"
] | null | null | null |
from templates.pipelines import pipeline_success, pipeline_faild
import tools
for y in range(2):
num_pipeline = 2
account = tools.generate_account()
pipeline = tools.generate_name()
execution_id = tools.generate_execution_id()
pipeline_id = tools.generate_execution_id()
region = 'us-east-1'
pipelines = pipeline_success(account, execution_id,pipeline,region, pipeline_id)
tools.save_sqs(pipelines,region)
#for y in range(1):
# num_pipeline = 2
# account = tools.generate_account()
# pipeline = tools.generate_name()
# execution_id = tools.generate_execution_id()
# pipeline_id = tools.generate_execution_id()
# region = 'us-east-1'
# pipelines = pipeline_faild(account, execution_id,pipeline,region, pipeline_id)
# tools.save_sqs(pipelines,region)
# print(pipeline)
#print_pipeline(pipelines)
| 33.037037
| 87
| 0.707399
| 109
| 892
| 5.522936
| 0.238532
| 0.172757
| 0.099668
| 0.159468
| 0.770764
| 0.770764
| 0.770764
| 0.770764
| 0.770764
| 0.770764
| 0
| 0.008333
| 0.192825
| 892
| 27
| 88
| 33.037037
| 0.827778
| 0.463004
| 0
| 0
| 1
| 0
| 0.01919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4da19bef7c3c3cfa3c2a149adcd851c8bb2815ef
| 199
|
py
|
Python
|
compra/admin.py
|
cor14095/backGaresa
|
6dffcff513c1812a88315d16303b90996f6b98d7
|
[
"MIT"
] | null | null | null |
compra/admin.py
|
cor14095/backGaresa
|
6dffcff513c1812a88315d16303b90996f6b98d7
|
[
"MIT"
] | null | null | null |
compra/admin.py
|
cor14095/backGaresa
|
6dffcff513c1812a88315d16303b90996f6b98d7
|
[
"MIT"
] | 1
|
2021-08-09T00:55:17.000Z
|
2021-08-09T00:55:17.000Z
|
from .models import Purchase
from import_export.admin import ImportExportModelAdmin
from django.contrib import admin
@admin.register(Purchase)
class PurchaseAdmin(ImportExportModelAdmin):
pass
| 22.111111
| 54
| 0.839196
| 22
| 199
| 7.545455
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110553
| 199
| 8
| 55
| 24.875
| 0.937853
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.666667
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4dd3b389615242976ab11e19cdad84e07b541d7e
| 127
|
py
|
Python
|
qhal/quantum_simulators/__init__.py
|
abhishekagarwalnpl/QHAL-copy
|
b0dc496ba824b1545fb094e2462c044c8246846e
|
[
"Apache-2.0"
] | 16
|
2021-07-13T20:09:48.000Z
|
2022-01-06T12:07:53.000Z
|
qhal/quantum_simulators/__init__.py
|
abhishekagarwalnpl/QHAL-copy
|
b0dc496ba824b1545fb094e2462c044c8246846e
|
[
"Apache-2.0"
] | 3
|
2021-12-13T15:56:40.000Z
|
2022-03-10T14:55:06.000Z
|
qhal/quantum_simulators/__init__.py
|
abhishekagarwalnpl/QHAL-copy
|
b0dc496ba824b1545fb094e2462c044c8246846e
|
[
"Apache-2.0"
] | 1
|
2021-12-02T14:48:16.000Z
|
2021-12-02T14:48:16.000Z
|
from ._interface_quantum_simulator import IQuantumSimulator
from ._projectq_quantum_simulator import ProjectqQuantumSimulator
| 31.75
| 65
| 0.913386
| 12
| 127
| 9.166667
| 0.666667
| 0.290909
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070866
| 127
| 3
| 66
| 42.333333
| 0.932203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4ddaadefa053eddefca531e1748897a8bf6e9779
| 38
|
py
|
Python
|
src/backoffice/models/__init__.py
|
unikubehq/projects
|
0df69eafa2a0d2664a22c7a5866d4512ac4d57fe
|
[
"Apache-2.0"
] | 1
|
2021-10-05T13:17:03.000Z
|
2021-10-05T13:17:03.000Z
|
src/backoffice/models/__init__.py
|
unikubehq/projects
|
0df69eafa2a0d2664a22c7a5866d4512ac4d57fe
|
[
"Apache-2.0"
] | 48
|
2021-07-06T07:24:36.000Z
|
2022-03-24T08:27:30.000Z
|
src/backoffice/models/__init__.py
|
unikubehq/projects
|
0df69eafa2a0d2664a22c7a5866d4512ac4d57fe
|
[
"Apache-2.0"
] | null | null | null |
from backoffice.models.users import *
| 19
| 37
| 0.815789
| 5
| 38
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4deb5954dfdfc0e9101040fccae613eb804272a4
| 5,969
|
py
|
Python
|
pandapower/test/opf/test_cost_consistency.py
|
mathildebadoual/pandapower
|
9ba4bcb78e84b644d2ba6df0c08e285c54af8ddc
|
[
"BSD-3-Clause"
] | 1
|
2020-10-19T06:39:15.000Z
|
2020-10-19T06:39:15.000Z
|
pandapower/test/opf/test_cost_consistency.py
|
miek770/pandapower
|
de004efc1b7432a633792af4f551f7635a02db47
|
[
"BSD-3-Clause"
] | null | null | null |
pandapower/test/opf/test_cost_consistency.py
|
miek770/pandapower
|
de004efc1b7432a633792af4f551f7635a02db47
|
[
"BSD-3-Clause"
] | null | null | null |
import pandapower as pp
import pytest
from numpy import array
@pytest.fixture()
def base_net():
net = pp.create_empty_network()
pp.create_bus(net, vn_kv=10)
pp.create_bus(net, vn_kv=10)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_kw=200, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.runpp(net)
return net
def test_contingency_sgen(base_net):
net = base_net
pp.create_sgen(net, 1, p_kw=-100, q_kvar =0, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
# pwl costs
# maximize the sgen feed in by using a positive cost slope
# using a slope of 1
# | /
# | /
# | /
# |/
#-------------------------------------------
# p_min_kw /|
# / |
# / |
pp.create_piecewise_linear_cost(net, 0, "sgen", array([[net.sgen.min_p_kw.at[0], net.sgen.min_p_kw.at[0]], [0, 0]]))
pp.runopp(net)
assert abs(net.res_cost - net.res_sgen.p_kw.at[0]) < 1e-5
# minimize the sgen feed in by using a positive cost slope
# using a slope of 1
# \ |
# \ |
# \ |
# \|
#-------------------------------------------
# p_min_kw |\
# | \
# | \
net.piecewise_linear_cost.f.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_sgen.p_kw.at[0]*-1) < 1e-5
try:
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(index=0)
except:
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(0)
# first using a positive slope as in the case above
pp.create_polynomial_cost(net, 0, "sgen", array([1, 0]))
pp.runopp(net)
assert abs(net.res_cost - net.res_sgen.p_kw.at[0]) < 1e-5
# negative slope as in the case above
net.polynomial_cost.c.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_sgen.p_kw.at[0]*-1) < 1e-5
def test_contingency_load(base_net):
net = base_net
pp.create_load(net, 1, p_kw=-100, q_kvar=0, controllable=True, max_p_kw=150, min_p_kw=5,
max_q_kvar=50,
min_q_kvar=-50)
# pwl costs
# minimze the load by using a positive cost slope
# using a slope of 1
# | /
# | /
# | /
# |/
# -------------------------------------------
# p_min_kw /|
# / |
# / |
pp.create_piecewise_linear_cost(net, 1, "load", array(
[[0, 0],[net.load.max_p_kw.at[1], net.load.max_p_kw.at[1]]]))
pp.runopp(net)
assert abs(net.res_cost - net.res_load.p_kw.at[1]) < 1e-5
# maximize the load in by using a negative cost slope
# using a slope of 1
# \ |
# \ |
# \ |
# \|
# -------------------------------------------
# p_min_kw |\
# | \
# | \
net.piecewise_linear_cost.f.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_load.p_kw.at[1] * -1) < 1e-5
# poly costs
try:
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(index=0)
except:
# legacy fix
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(0)
# first using a positive slope as in the case above
pp.create_polynomial_cost(net, 1, "load", array([1, 0]))
pp.runopp(net)
assert abs(net.res_cost - net.res_load.p_kw.at[1]) < 1e-5
# negative slope as in the case above
net.polynomial_cost.c.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_load.p_kw.at[1]*-1) < 1e-5
def test_contingency_gen(base_net):
net = base_net
pp.create_gen(net, 1, p_kw=-100, vm_pu = 1.05, controllable=True, max_p_kw=-5, min_p_kw=-150, max_q_kvar=50,
min_q_kvar=-50)
# pwl costs
# maximize the sgen feed in by using a positive cost slope
# using a slope of 1
# | /
# | /
# | /
# |/
#-------------------------------------------
# p_min_kw /|
# / |
# / |
pp.create_piecewise_linear_cost(net, 0, "gen", array([[net.gen.min_p_kw.at[0], net.gen.min_p_kw.at[0]], [0, 0]]))
pp.runopp(net)
assert abs(net.res_cost - net.res_gen.p_kw.at[0]) < 1e-5
# minimize the sgen feed in by using a positive cost slope
# using a slope of 1
# \ |
# \ |
# \ |
# \|
#-------------------------------------------
# p_min_kw |\
# | \
# | \
net.piecewise_linear_cost.f.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_gen.p_kw.at[0]*-1) < 1e-5
try:
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(index=0)
except:
# legacy fix
net.piecewise_linear_cost = net.piecewise_linear_cost.drop(0)
# first using a positive slope as in the case above
pp.create_polynomial_cost(net, 0, "gen", array([1, 0]))
pp.runopp(net)
assert abs(net.res_cost - net.res_gen.p_kw.at[0]) < 1e-5
# negative slope as in the case above
net.polynomial_cost.c.at[0] *= -1
pp.runopp(net)
assert abs(net.res_cost - net.res_gen.p_kw.at[0]*-1) < 1e-5
if __name__ == "__main__":
# net = base_net()
# test_contingency_gen(net)
pytest.main(['-s', __file__])
| 31.75
| 120
| 0.499749
| 826
| 5,969
| 3.375303
| 0.128329
| 0.030129
| 0.122669
| 0.118364
| 0.845409
| 0.827834
| 0.814921
| 0.752869
| 0.752869
| 0.735653
| 0
| 0.042984
| 0.33741
| 5,969
| 187
| 121
| 31.919786
| 0.661947
| 0.32719
| 0
| 0.632911
| 0
| 0
| 0.009135
| 0
| 0
| 0
| 0
| 0
| 0.151899
| 1
| 0.050633
| false
| 0
| 0.037975
| 0
| 0.101266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
12891ae58e94d8a98c7bc4ddb5063b20f031168d
| 218
|
py
|
Python
|
profit/run/__init__.py
|
krystophny/profit
|
c6316c9df7cfaa7b30332fdbbf85ad27175eaf92
|
[
"MIT"
] | 14
|
2019-12-03T14:11:28.000Z
|
2022-03-15T13:44:06.000Z
|
profit/run/__init__.py
|
krystophny/profit
|
c6316c9df7cfaa7b30332fdbbf85ad27175eaf92
|
[
"MIT"
] | 118
|
2019-11-16T19:51:26.000Z
|
2022-03-26T13:52:00.000Z
|
profit/run/__init__.py
|
krystophny/profit
|
c6316c9df7cfaa7b30332fdbbf85ad27175eaf92
|
[
"MIT"
] | 9
|
2020-06-08T07:22:56.000Z
|
2021-03-21T14:12:21.000Z
|
from . import runner
from . import worker
from . import default
from . import zeromq
from . import slurm
from .runner import Runner, RunnerInterface
from .worker import Worker, Interface, Preprocessor, Postprocessor
| 21.8
| 66
| 0.793578
| 27
| 218
| 6.407407
| 0.407407
| 0.289017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155963
| 218
| 9
| 67
| 24.222222
| 0.940217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
129ed518e52ced7d65db8d014f8bfc22bd02cb2d
| 22
|
py
|
Python
|
src/python/packages/lmwg/__init__.py
|
susburrows/uvcmetrics
|
5a3c1266f3e5e97398a7671b01fa2816fb307c38
|
[
"X11",
"MIT"
] | 3
|
2017-03-03T21:28:06.000Z
|
2017-05-23T02:03:22.000Z
|
src/python/packages/lmwg/__init__.py
|
susburrows/uvcmetrics
|
5a3c1266f3e5e97398a7671b01fa2816fb307c38
|
[
"X11",
"MIT"
] | 192
|
2015-01-05T19:39:56.000Z
|
2017-01-17T22:28:34.000Z
|
src/python/packages/lmwg/__init__.py
|
susburrows/uvcmetrics
|
5a3c1266f3e5e97398a7671b01fa2816fb307c38
|
[
"X11",
"MIT"
] | 6
|
2016-02-26T19:03:46.000Z
|
2017-07-12T16:55:33.000Z
|
from defines import *
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12a41fc12d959ec0e6d34edf267f16ef2c46b5da
| 116
|
py
|
Python
|
ertk/tensorflow/__init__.py
|
bagustris/emotion
|
5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36
|
[
"MIT"
] | 3
|
2020-11-03T14:54:22.000Z
|
2021-04-12T12:23:10.000Z
|
src/ertk/tensorflow/__init__.py
|
agkphysics/emotion
|
36bb9265f9439b10676fb539d5334cce645e49ef
|
[
"MIT"
] | null | null | null |
src/ertk/tensorflow/__init__.py
|
agkphysics/emotion
|
36bb9265f9439b10676fb539d5334cce645e49ef
|
[
"MIT"
] | 2
|
2020-12-03T06:21:59.000Z
|
2021-01-16T04:47:12.000Z
|
from .models import get_tf_model, get_tf_model_fn
from .utils import compile_wrap, init_gpu_memory_growth, test_fit
| 38.666667
| 65
| 0.862069
| 21
| 116
| 4.285714
| 0.761905
| 0.111111
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094828
| 116
| 2
| 66
| 58
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12eaae7ef685bc6e276b06a4cc0062a494397278
| 496
|
py
|
Python
|
numpy_benchmarks/benchmarks/euclidean_distance_square.py
|
adriendelsalle/numpy-benchmarks
|
5c09448d045726b347e868756f9e1b004d0876ea
|
[
"BSD-3-Clause"
] | 33
|
2015-03-18T23:16:55.000Z
|
2021-12-17T11:00:01.000Z
|
numpy_benchmarks/benchmarks/euclidean_distance_square.py
|
adriendelsalle/numpy-benchmarks
|
5c09448d045726b347e868756f9e1b004d0876ea
|
[
"BSD-3-Clause"
] | 8
|
2015-04-17T15:14:15.000Z
|
2021-02-24T13:34:55.000Z
|
numpy_benchmarks/benchmarks/euclidean_distance_square.py
|
adriendelsalle/numpy-benchmarks
|
5c09448d045726b347e868756f9e1b004d0876ea
|
[
"BSD-3-Clause"
] | 12
|
2015-04-17T12:24:31.000Z
|
2021-01-27T08:06:01.000Z
|
#from: https://stackoverflow.com/questions/50658884/why-this-numba-code-is-6x-slower-than-numpy-code
#setup: import numpy as np; np.random.seed(0); x1 = np.random.random((1, 512)); x2 = np.random.random((10000, 512))
#run: euclidean_distance_square(x1, x2)
#pythran export euclidean_distance_square(float64[1,:], float64[:,:])
import numpy as np
def euclidean_distance_square(x1, x2):
return -2*np.dot(x1, x2.T) + np.sum(np.square(x1), axis=1)[:, np.newaxis] + np.sum(np.square(x2), axis=1)
| 62
| 115
| 0.717742
| 84
| 496
| 4.166667
| 0.5
| 0.068571
| 0.197143
| 0.085714
| 0.154286
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088692
| 0.090726
| 496
| 7
| 116
| 70.857143
| 0.687361
| 0.645161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
12f300552dca0224fff2b16cc466bf29345851ba
| 5,585
|
py
|
Python
|
problem_13.py
|
mc10/project-euler
|
406582facfa64d3ed9668240aa7ffd5529964d36
|
[
"MIT"
] | null | null | null |
problem_13.py
|
mc10/project-euler
|
406582facfa64d3ed9668240aa7ffd5529964d36
|
[
"MIT"
] | null | null | null |
problem_13.py
|
mc10/project-euler
|
406582facfa64d3ed9668240aa7ffd5529964d36
|
[
"MIT"
] | null | null | null |
'''
Problem 13
@author: mat.000
'''
numbers = """37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690"""
number_list = numbers.split()
def sum_of_list(number_list):
sum_of_list = 0
for element in number_list:
sum_of_list += int(element)
return sum_of_list
def first_ten_digits_of_number(number):
string = str(number)
return int(string[:10])
sum_of_list = sum_of_list(number_list)
print("Sum: " + str(sum_of_list))
print("First ten digits: " + str(first_ten_digits_of_number(sum_of_list)))
| 44.325397
| 74
| 0.954342
| 175
| 5,585
| 30.297143
| 0.714286
| 0.007544
| 0.01358
| 0.007356
| 0.020747
| 0
| 0
| 0
| 0
| 0
| 0
| 0.928956
| 0.034736
| 5,585
| 125
| 75
| 44.68
| 0.054535
| 0.005013
| 0
| 0
| 0
| 0
| 0.923049
| 0.901063
| 0
| 1
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0
| 0
| 0.035714
| 0.017857
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
421763ffd0d7057d7719aa73e6af7c54b29e615d
| 7,572
|
py
|
Python
|
myems-api/core/gsmmodem.py
|
hyh123a/myems
|
669ab8554995a622da595384698d670f9cee61f8
|
[
"MIT"
] | 2
|
2021-02-19T10:22:36.000Z
|
2021-02-19T10:23:22.000Z
|
myems-api/core/gsmmodem.py
|
hyh123a/myems
|
669ab8554995a622da595384698d670f9cee61f8
|
[
"MIT"
] | null | null | null |
myems-api/core/gsmmodem.py
|
hyh123a/myems
|
669ab8554995a622da595384698d670f9cee61f8
|
[
"MIT"
] | 1
|
2022-01-29T14:18:47.000Z
|
2022-01-29T14:18:47.000Z
|
import falcon
import json
import mysql.connector
import config
import base64
import re
class GSMModemCollection:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
query = (" SELECT id, serial_port, baud_rate "
" FROM tbl_gsm_modems ")
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
cnx.disconnect()
result = list()
if rows is not None and len(rows) > 0:
for row in rows:
meta_result = {"id": row[0],
"serial_port": row[1],
"baud_rate": row[2]}
result.append(meta_result)
resp.body = json.dumps(result)
@staticmethod
def on_post(req, resp):
"""Handles POST requests"""
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.ERROR', description=ex)
new_values = json.loads(raw_json)
if 'serial_port' not in new_values['data'].keys() or \
not isinstance(new_values['data']['serial_port'], str) or \
len(str.strip(new_values['data']['serial_port'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_SERIAL_PORT')
serial_port = str.strip(new_values['data']['serial_port'])
if 'baud_rate' not in new_values['data'].keys() or \
not isinstance(new_values['data']['baud_rate'], int) or \
new_values['data']['baud_rate'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_BAUD_RATE')
baud_rate = float(new_values['data']['baud_rate'])
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
cursor.execute(" SELECT id "
" FROM tbl_gsm_modems "
" WHERE serial_port = %s ", (serial_port,))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.BAD_REQUEST',
description='API.GSM_MODEM_SERIAL_PORT_IS_ALREADY_IN_USE')
add_value = (" INSERT INTO tbl_gsm_modems "
" (serial_port, baud_rate) "
" VALUES (%s, %s) ")
cursor.execute(add_value, (serial_port,
baud_rate))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_201
resp.location = '/gsmmodems/' + str(new_id)
class GSMModemItem:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, '400 Bad Request')
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
query = (" SELECT id, serial_port, baud_rate "
" FROM tbl_gsm_modems "
" WHERE id = %s ")
cursor.execute(query, (id_,))
row = cursor.fetchone()
cursor.close()
cnx.disconnect()
if row is None:
raise falcon.HTTPError(falcon.HTTP_404, 'API.NOT_FOUND')
result = {"id": row[0],
"serial_port": row[1],
"baud_rate": row[2]}
resp.body = json.dumps(result)
@staticmethod
def on_delete(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_GSM_MODEM_ID')
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
cursor.execute(" SELECT serial_port "
" FROM tbl_gsm_modems "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.GSM_MODEM_NOT_FOUND')
cursor.execute(" DELETE FROM tbl_gsm_modems WHERE id = %s ", (id_,))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_204
@staticmethod
def on_put(req, resp, id_):
"""Handles PUT requests"""
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_GSM_MODEM_ID')
new_values = json.loads(raw_json)
if 'serial_port' not in new_values['data'].keys() or \
not isinstance(new_values['data']['serial_port'], str) or \
len(str.strip(new_values['data']['serial_port'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_SERIAL_PORT')
serial_port = str.strip(new_values['data']['serial_port'])
if 'baud_rate' not in new_values['data'].keys() or \
not isinstance(new_values['data']['baud_rate'], int) or \
new_values['data']['baud_rate'] <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_BAUD_RATE')
baud_rate = float(new_values['data']['baud_rate'])
cnx = mysql.connector.connect(**config.myems_fdd_db)
cursor = cnx.cursor()
cursor.execute(" SELECT serial_port "
" FROM tbl_gsm_modems "
" WHERE id = %s ",
(id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.GSM_MODEM_NOT_FOUND')
cursor.execute(" SELECT serial_port "
" FROM tbl_gsm_modems "
" WHERE serial_port = %s AND id != %s ", (serial_port, id_))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.BAD_REQUEST',
description='API.GSM_MODEM_SERIAL_PORT_IS_ALREADY_IN_USE')
update_row = (" UPDATE tbl_gsm_modems "
" SET serial_port = %s, baud_rate = %s "
" WHERE id = %s ")
cursor.execute(update_row, (serial_port,
baud_rate,
id_,))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_200
| 35.716981
| 93
| 0.539884
| 850
| 7,572
| 4.578824
| 0.142353
| 0.074512
| 0.053443
| 0.093525
| 0.819887
| 0.809866
| 0.801387
| 0.799332
| 0.761562
| 0.761562
| 0
| 0.015716
| 0.344559
| 7,572
| 211
| 94
| 35.886256
| 0.768487
| 0.005547
| 0
| 0.690476
| 0
| 0
| 0.175289
| 0.035643
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0.011905
| 0.035714
| 0
| 0.10119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
427d3c9fe0331d3db37a3abf60645dfed45bb815
| 153
|
py
|
Python
|
tests/test_update_branch.py
|
r-ash/naomi_bot
|
ee49d9d236ca031176d555ee0d65eb8c1cd27f99
|
[
"MIT"
] | 1
|
2020-05-07T21:28:08.000Z
|
2020-05-07T21:28:08.000Z
|
tests/test_update_branch.py
|
mrc-ide/naomi_bot
|
ee49d9d236ca031176d555ee0d65eb8c1cd27f99
|
[
"MIT"
] | null | null | null |
tests/test_update_branch.py
|
mrc-ide/naomi_bot
|
ee49d9d236ca031176d555ee0d65eb8c1cd27f99
|
[
"MIT"
] | null | null | null |
import pytest
from naomi_bot.app.update_branch import update_branch
def test_update_branch():
# Some interaction test for the requests?
assert True
| 21.857143
| 53
| 0.810458
| 23
| 153
| 5.173913
| 0.73913
| 0.302521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143791
| 153
| 7
| 54
| 21.857143
| 0.908397
| 0.254902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
42a4131a19a45d5d33325785739131594629aa68
| 17,732
|
py
|
Python
|
tests/sentry/web/frontend/tests.py
|
pascalw/sentry
|
7cba3d95520c32afba007bd9adf2eb823be0ef66
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/web/frontend/tests.py
|
pascalw/sentry
|
7cba3d95520c32afba007bd9adf2eb823be0ef66
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/web/frontend/tests.py
|
pascalw/sentry
|
7cba3d95520c32afba007bd9adf2eb823be0ef66
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import json
from django.core.urlresolvers import reverse
from sentry.conf import settings
from sentry.constants import MEMBER_USER
from sentry.models import Group, Project, TeamMember, Team, User
from sentry.testutils import TestCase, fixture, before
logger = logging.getLogger(__name__)
class BaseViewTest(TestCase):
pass
class EnvStatusTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-admin-status')
def test_requires_auth(self):
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 302)
def test_renders_template(self):
self.login()
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/env.html')
class PackageStatusTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-admin-packages-status')
def test_requires_auth(self):
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 302)
def test_renders_template(self):
self.login()
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/packages.html')
class MailStatusTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-admin-mail-status')
def test_requires_auth(self):
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 302)
def test_renders_template(self):
self.login()
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/status/mail.html')
class StatsTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-admin-stats')
def test_requires_auth(self):
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 302)
def test_renders_template(self):
self.login()
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/admin/stats.html')
class GroupDetailsTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-group', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert resp.context['group'] == self.group
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
class GroupListTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-stream', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/group_list.html')
assert 'project' in resp.context
assert 'team' in resp.context
assert 'event_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
class GroupEventListTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-group-events', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/event_list.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert 'event_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
class GroupTagListTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-group-tags', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/tag_list.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert 'tag_list' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
class GroupEventDetailsTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-group-event', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
'event_id': self.event.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/groups/details.html')
assert 'group' in resp.context
assert 'project' in resp.context
assert 'team' in resp.context
assert 'event' in resp.context
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['group'] == self.group
assert resp.context['event'] == self.event
class GroupEventListJsonTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-group-events-json', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
})
def test_does_render(self):
self.login()
# HACK: force fixture creation
self.event
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
data = json.loads(resp.content)
assert len(data) == 1
assert data[0]['id'] == str(self.event.event_id)
def test_does_not_allow_beyond_limit(self):
self.login()
resp = self.client.get(self.path, {'limit': settings.MAX_JSON_RESULTS + 1})
assert resp.status_code == 400
class GroupEventJsonTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-group-event-json', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
'event_id_or_latest': self.event.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
data = json.loads(resp.content)
assert data['id'] == self.event.event_id
class ManageUsersTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-admin-users')
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/admin/users/list.html')
class ReplayTest(BaseViewTest):
@fixture
def path(self):
return reverse('sentry-replay', kwargs={
'team_slug': self.team.slug,
'project_id': self.project.slug,
'group_id': self.group.id,
'event_id': self.event.id,
})
def test_does_render(self):
self.login()
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/events/replay_request.html')
class PermissionBase(TestCase):
"""
These tests simply ensure permission requirements for various views.
"""
@fixture
def admin(self):
user = User(username="admin", email="admin@localhost", is_staff=True, is_superuser=True)
user.set_password('admin')
user.save()
return user
@fixture
def member(self):
user = User(username="member", email="member@localhost")
user.set_password('member')
user.save()
TeamMember.objects.create(
user=user,
team=self.team,
type=MEMBER_USER,
)
return user
@fixture
def nobody(self):
user = User(username="nobody", email="nobody@localhost")
user.set_password('nobody')
user.save()
return user
@fixture
def owner(self):
user = User(username="owner", email="owner@localhost")
user.set_password('owner')
user.save()
Team.objects.create(owner=user, name='foo', slug='foo')
return user
@fixture
def tm(self):
return TeamMember.objects.get(user=self.member, team=self.team)
@fixture
def team(self):
return Team.objects.get(owner=self.owner, slug='foo')
@fixture
def project(self):
project = Project.objects.get(id=1)
project.update(public=False, team=self.team)
return project
def _assertPerm(self, path, template, account=None, want=True):
"""
Requests ``path`` and asserts that ``template`` is
rendered for ``account`` (Anonymous if None) given ``want``
is Trueish.
"""
if account:
self.assertTrue(self.client.login(username=account, password=account))
else:
self.client.logout()
resp = self.client.get(path)
if want:
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, template)
else:
self.assertEquals(resp.status_code, 302)
self.assertTemplateNotUsed(resp, template)
class NewTeamProjectTest(PermissionBase):
template = 'sentry/teams/projects/new.html'
@fixture
def path(self):
return reverse('sentry-new-project', args=[self.team.slug])
def test_admin_can_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=False, SENTRY_ALLOW_TEAM_CREATION=False):
self._assertPerm(self.path, self.template, self.admin.username)
def test_user_cannot_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=False, SENTRY_ALLOW_TEAM_CREATION=False):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_anonymous_cannot_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=False, SENTRY_ALLOW_TEAM_CREATION=False):
self._assertPerm(self.path, self.template, None, False)
def test_public_creation_admin_can_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=True, SENTRY_ALLOW_TEAM_CREATION=True):
self._assertPerm(self.path, self.template, self.admin.username)
def test_public_anonymous_cannot_load(self):
with self.Settings(SENTRY_ALLOW_PROJECT_CREATION=True, SENTRY_ALLOW_TEAM_CREATION=True):
self._assertPerm(self.path, self.template, None, False)
class ManageProjectTest(PermissionBase):
template = 'sentry/projects/manage.html'
@fixture
def path(self):
return reverse('sentry-manage-project', kwargs={'team_slug': self.team.slug, 'project_id': self.project.id})
def test_admin_can_load(self):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
self._assertPerm(self.path, self.template, self.member.username, False)
class RemoveProjectTest(PermissionBase):
template = 'sentry/projects/remove.html'
@fixture
def path(self):
return reverse('sentry-remove-project', kwargs={'team_slug': self.team.slug, 'project_id': self.project.id})
def test_admin_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, self.admin.username, False)
def test_owner_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, self.owner.username, False)
def test_anonymous_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_remove_default(self):
with self.Settings(SENTRY_PROJECT=1):
self._assertPerm(self.path, self.template, self.member.username, False)
def test_admin_can_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
with self.Settings(SENTRY_PROJECT=2):
self._assertPerm(self.path, self.template, self.member.username, False)
class NewTeamMemberTest(PermissionBase):
template = 'sentry/teams/members/new.html'
@fixture
def path(self):
return reverse('sentry-new-team-member', kwargs={'team_slug': self.team.slug})
def test_admin_can_load(self):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
self._assertPerm(self.path, self.template, self.member.username, False)
class EditTeamMemberTest(PermissionBase):
template = 'sentry/teams/members/edit.html'
@fixture
def path(self):
return reverse('sentry-edit-team-member', kwargs={'team_slug': self.team.slug, 'member_id': self.tm.pk})
def test_admin_can_load(self):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
self._assertPerm(self.path, self.template, self.member.username, False)
class RemoveTeamMemberTest(PermissionBase):
template = 'sentry/teams/members/remove.html'
@fixture
def path(self):
return reverse('sentry-remove-team-member', kwargs={'team_slug': self.team.slug, 'member_id': self.tm.pk})
def test_admin_can_load(self):
self._assertPerm(self.path, self.template, self.admin.username)
def test_owner_can_load(self):
self._assertPerm(self.path, self.template, self.owner.username)
def test_anonymous_cannot_load(self):
self._assertPerm(self.path, self.template, None, False)
def test_user_cannot_load(self):
self._assertPerm(self.path, self.template, self.nobody.username, False)
def test_member_cannot_load(self):
self._assertPerm(self.path, self.template, self.member.username, False)
class SentrySearchTest(TestCase):
@before
def login_user(self):
self.login_as(self.user)
@fixture
def path(self):
return reverse('sentry-search', kwargs={'team_slug': self.team.slug, 'project_id': self.project.id})
def test_checksum_query(self):
checksum = 'a' * 32
group = Group.objects.create(
project=self.project,
logger='root',
culprit='a',
checksum=checksum,
message='hi',
)
response = self.client.get(self.path, {'q': '%s$%s' % (checksum, checksum)})
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], 'http://testserver%s' % (reverse('sentry-group', kwargs={
'project_id': group.project.slug,
'team_slug': group.team.slug,
'group_id': group.id,
}),))
| 32.53578
| 116
| 0.655143
| 2,154
| 17,732
| 5.243268
| 0.090994
| 0.045334
| 0.04675
| 0.068178
| 0.767399
| 0.75332
| 0.743581
| 0.740305
| 0.721445
| 0.670799
| 0
| 0.005834
| 0.226709
| 17,732
| 544
| 117
| 32.595588
| 0.817824
| 0.013704
| 0
| 0.657702
| 0
| 0
| 0.095623
| 0.039982
| 0
| 0
| 0
| 0
| 0.268949
| 1
| 0.202934
| false
| 0.01467
| 0.01956
| 0.05379
| 0.356968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
42b093a753f229fe7d19de68ff7a064b4d4a440d
| 1,517
|
py
|
Python
|
tests/test_c2020.py
|
atollk/flake8-multiline-conditionals-comprehensions
|
7843a0dc970502b4752726d6a52e114e743be50b
|
[
"MIT"
] | 1
|
2020-06-24T06:15:44.000Z
|
2020-06-24T06:15:44.000Z
|
tests/test_c2020.py
|
atollk/flake8-multiline-conditionals-comprehensions
|
7843a0dc970502b4752726d6a52e114e743be50b
|
[
"MIT"
] | null | null | null |
tests/test_c2020.py
|
atollk/flake8-multiline-conditionals-comprehensions
|
7843a0dc970502b4752726d6a52e114e743be50b
|
[
"MIT"
] | null | null | null |
from tests.util import BaseTest
class Test_C2020(BaseTest):
def error_code(self) -> str:
return "C2020"
def test_pass_1(self):
code = """
foo = 1 if 10 < 20 else 0
"""
result = self.run_flake8(code, True)
assert result == []
def test_pass_2(self):
code = """
foo = (1
if True
else 0)
"""
result = self.run_flake8(code, True)
assert result == []
def test_fail_1(self):
code = """
foo = (1 if 10 < 20 else
0)
"""
result = self.run_flake8(code, True)
self.assert_error_at(result, "C2020", 1, 8)
def test_fail_2(self):
code = """
foo = (1 if 10 < 20
else 0)
"""
result = self.run_flake8(code, True)
self.assert_error_at(result, "C2020", 1, 8)
def test_fail_3(self):
code = """
foo = (1 if (10 <
20) else 0)
"""
result = self.run_flake8(code, True)
self.assert_error_at(result, "C2020", 1, 8)
def test_fail_4(self):
code = """
foo = (1 if
10 < 20 else 0)
"""
result = self.run_flake8(code, True)
self.assert_error_at(result, "C2020", 1, 8)
def test_fail_5(self):
code = """
foo = (1
if 10 < 20 else 0)
"""
result = self.run_flake8(code, True)
self.assert_error_at(result, "C2020", 1, 8)
| 24.079365
| 51
| 0.474621
| 192
| 1,517
| 3.578125
| 0.177083
| 0.071325
| 0.112082
| 0.122271
| 0.847162
| 0.847162
| 0.823872
| 0.823872
| 0.823872
| 0.823872
| 0
| 0.098793
| 0.399473
| 1,517
| 62
| 52
| 24.467742
| 0.655324
| 0
| 0
| 0.603774
| 0
| 0
| 0.29532
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 1
| 0.150943
| false
| 0.037736
| 0.018868
| 0.018868
| 0.207547
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
35ff9af094a62d398edca1a4c506c76ce555cac7
| 212
|
py
|
Python
|
dags/rockflow/operators/common.py
|
day253/airflow-dags
|
a98d47396f2c6e0185d528e94d02fa8a8daaef7a
|
[
"Unlicense"
] | 2
|
2022-01-08T17:19:01.000Z
|
2022-02-10T06:41:28.000Z
|
dags/rockflow/operators/common.py
|
RockFlow-AI/airflow-dags
|
8172ed9041231264d491120d0c1f5c973fbed92a
|
[
"Unlicense"
] | null | null | null |
dags/rockflow/operators/common.py
|
RockFlow-AI/airflow-dags
|
8172ed9041231264d491120d0c1f5c973fbed92a
|
[
"Unlicense"
] | 1
|
2021-12-15T09:57:55.000Z
|
2021-12-15T09:57:55.000Z
|
def is_none_us_symbol(symbol: str) -> bool:
return symbol.endswith(".HK") or symbol.endswith(".SZ") or symbol.endswith(".SH")
def is_us_symbol(symbol: str) -> bool:
return not is_none_us_symbol(symbol)
| 30.285714
| 85
| 0.712264
| 34
| 212
| 4.205882
| 0.411765
| 0.167832
| 0.293706
| 0.195804
| 0.559441
| 0.377622
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136792
| 212
| 6
| 86
| 35.333333
| 0.781421
| 0
| 0
| 0
| 0
| 0
| 0.042453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c41cfc13016913a149a4600432d8551c94f97681
| 218
|
py
|
Python
|
allauth/socialaccount/providers/agave_provider/urls.py
|
Fuzzwah/django-allauth
|
071cbef1388bb61a563d3e41197bd5b7c26664d2
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/agave_provider/urls.py
|
Fuzzwah/django-allauth
|
071cbef1388bb61a563d3e41197bd5b7c26664d2
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/agave_provider/urls.py
|
Fuzzwah/django-allauth
|
071cbef1388bb61a563d3e41197bd5b7c26664d2
|
[
"MIT"
] | null | null | null |
from allauth.socialaccount.providers.agave_provider.provider import AgaveProvider
from allauth.socialaccount.providers.oauth2_provider.urls import default_urlpatterns
urlpatterns = default_urlpatterns(AgaveProvider)
| 36.333333
| 84
| 0.889908
| 23
| 218
| 8.26087
| 0.521739
| 0.115789
| 0.252632
| 0.347368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004878
| 0.059633
| 218
| 5
| 85
| 43.6
| 0.921951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c4741f5fb44931386a57c82750f89be3bbaba6e7
| 68
|
py
|
Python
|
torchclas/utils/__init__.py
|
hua1024/OpenClas
|
446b3f6f8cf5cc390c86d6e2674e525aeaa3a552
|
[
"Apache-2.0"
] | null | null | null |
torchclas/utils/__init__.py
|
hua1024/OpenClas
|
446b3f6f8cf5cc390c86d6e2674e525aeaa3a552
|
[
"Apache-2.0"
] | 1
|
2021-05-23T13:47:51.000Z
|
2021-05-24T11:39:32.000Z
|
torchclas/utils/__init__.py
|
hua1024/OpenClas
|
446b3f6f8cf5cc390c86d6e2674e525aeaa3a552
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# @Time : 2020/10/24 11:13
# @Auto : zzf-jeff
| 22.666667
| 29
| 0.544118
| 12
| 68
| 3.083333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26
| 0.264706
| 68
| 3
| 30
| 22.666667
| 0.48
| 0.882353
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
670c28a77e6b0ea1a70045abc0400f0145aa849c
| 755
|
py
|
Python
|
src/dispatch/plugins/dispatch_test/storage.py
|
roor0/dispatch
|
12c4f567096411abe62abaf61c7c124496764346
|
[
"Apache-2.0"
] | 3,417
|
2020-02-23T22:54:47.000Z
|
2022-03-31T13:01:01.000Z
|
src/dispatch/plugins/dispatch_test/storage.py
|
roor0/dispatch
|
12c4f567096411abe62abaf61c7c124496764346
|
[
"Apache-2.0"
] | 607
|
2020-02-24T14:27:02.000Z
|
2022-03-30T19:15:39.000Z
|
src/dispatch/plugins/dispatch_test/storage.py
|
roor0/dispatch
|
12c4f567096411abe62abaf61c7c124496764346
|
[
"Apache-2.0"
] | 359
|
2020-02-24T19:04:43.000Z
|
2022-03-29T06:48:12.000Z
|
from dispatch.plugins.bases import StoragePlugin
class TestStoragePlugin(StoragePlugin):
title = "Dispatch Test Plugin - Storage"
slug = "test-storage"
def get(self, **kwargs):
return
def create(self, items, **kwargs):
return
def update(self, items, **kwargs):
return
def delete(self, items, **kwargs):
return
def list(self, **kwargs):
return
def add_participant(self, items, **kwargs):
return
def remove_participant(self, items, **kwargs):
return
def add_file(self, **kwargs):
return
def delete_file(self, **kwargs):
return
def move_file(self, **kwargs):
return
def list_files(self, **kwargs):
return
| 18.875
| 50
| 0.6
| 83
| 755
| 5.385542
| 0.349398
| 0.295302
| 0.33557
| 0.212528
| 0.472036
| 0.1566
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288742
| 755
| 39
| 51
| 19.358974
| 0.832402
| 0
| 0
| 0.423077
| 0
| 0
| 0.055629
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.423077
| false
| 0
| 0.038462
| 0.423077
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
675dc106d8faa435cdb16055fb7192d7cb79d363
| 5,819
|
py
|
Python
|
tests/test_crud.py
|
luizklitzke1/cadastro_treinamento
|
96fbfc90be8fa846b614e4d6ea08c7accf2895c4
|
[
"MIT"
] | null | null | null |
tests/test_crud.py
|
luizklitzke1/cadastro_treinamento
|
96fbfc90be8fa846b614e4d6ea08c7accf2895c4
|
[
"MIT"
] | 1
|
2021-03-04T22:31:34.000Z
|
2021-03-06T17:26:04.000Z
|
tests/test_crud.py
|
luizklitzke1/cadastro_treinamento
|
96fbfc90be8fa846b614e4d6ea08c7accf2895c4
|
[
"MIT"
] | 1
|
2021-04-25T14:27:09.000Z
|
2021-04-25T14:27:09.000Z
|
from flask import json, jsonify, request
#Testes básicos para as rotas de CRUD
def test_add_sala(client):
response= client.post('/cadastrar_sala', data=json.dumps( {'nome': 'sala 31313'}), content_type='application/json')
response= client.post('/cadastrar_sala', data=json.dumps( {'nome': 'sala 341'}), content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'ok'
def test_editar_sala(client):
response= client.post('/editar_sala/1',data=json.dumps( {"novo_nome": 'tom'}), content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'ok'
def test_editar_sala_inexistente(client):
response= client.post('/editar_sala/23',data=json.dumps( {"novo_nome": 'tom'}), content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'erro'
def test_apagar_sala(client):
response= client.post('/cadastrar_sala', data=json.dumps( {'nome': 'sala 23'}), content_type='application/json')
response= client.post('/apagar_sala/3',)
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'ok'
def test_apagar_sala_inexistente(client):
response= client.post('/apagar_sala/24',)
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'erro'
def test_add_espaco_cafe(client):
response= client.post('/cadastrar_espaco_cafe', data=json.dumps( {'nome': 'café 123'}), content_type='application/json')
response= client.post('/cadastrar_espaco_cafe', data=json.dumps( {'nome': 'café 321'}), content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'ok'
def test_editar_espaco_cafe(client):
response= client.post('/editar_cafe/1',data=json.dumps( {"novo_nome": 'café 22'}), content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'ok'
def test_editar_espaco_cafe_inexistente(client):
response= client.post('/editar_cafe/21',data=json.dumps( {"novo_nome": 'tom'}), content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'erro'
def test_apagar_espaco_cafe(client):
response= client.post('/cadastrar_espaco_cafe', data=json.dumps( {'nome': 'café 333'}), content_type='application/json')
response= client.post('/apagar_espaco_cafe/3',)
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'ok'
def test_apagar_espaco_cafe_inexistente(client):
response= client.post('/apagar_espaco_cafe/99',)
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'erro'
def test_add_pessoa(client):
dados = {'cpf': '99112585483', 'nome': 'Carlos', 'sobrenome' : 'Silva',
'sala1_id' : 1, 'cafe1_id': 1, 'cafe2_id': 2,
}
response= client.post('/cadastrar_pessoa',
data=json.dumps( dados ),
content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['detalhes'] == 'ok'
def test_editar_pessoa(client):
dados = {'novo_cpf': '34602355196', 'novo_nome': 'Augusto', 'novo_sobrenome' : 'Carara',}
response= client.post('/editar_pessoa/99112585483',
data=json.dumps( dados ),
content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'ok'
def test_editar_pessoa_inexistente(client):
dados = {'novo_cpf': '53847856677', 'novo_nome': 'Augusto', 'novo_sobrenome' : 'Carara',}
response= client.post('/editar_pessoa/53847856677',
data=json.dumps( dados ),
content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'erro'
def test_apagar_pessoa(client):
dados = {'cpf': '17715550175', 'nome': 'Geralt', 'sobrenome' : 'Rivia',
'sala1_id' : 1, 'cafe1_id': 1, 'cafe2_id': 2,
}
response= client.post('/cadastrar_pessoa',
data=json.dumps( dados ),
content_type='application/json')
response= client.post('/apagar_pessoa/17715550175',)
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'ok'
def test_apagar_pessoa_inexistente(client):
response= client.post('/apagar_pessoa/9585785757',)
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'erro'
def test_add_pessoa_badCPF(client):
dados = {'cpf': 'awdawdawdawdawd', 'nome': 'Jonas', 'sobrenome' : 'Souza',
'sala1_id' : 2, 'cafe1_id': 1, 'cafe2_id': 2,
}
response= client.post('/cadastrar_pessoa',
data=json.dumps( dados ),
content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'erro'
def test_add_pessoa_cpf_repetido(client):
dados = {'cpf': '34602355196', 'nome': 'Jonas', 'sobrenome' : 'Souza',
'sala1_id' : 2, 'cafe1_id': 1, 'cafe2_id': 2,
}
response= client.post('/cadastrar_pessoa',
data=json.dumps( dados ),
content_type='application/json')
data = json.loads(response.get_data(as_text=True))
assert data['resultado'] == 'erro'
| 36.597484
| 123
| 0.623819
| 693
| 5,819
| 5.025974
| 0.116883
| 0.075797
| 0.113695
| 0.102498
| 0.883147
| 0.869653
| 0.795579
| 0.77261
| 0.722079
| 0.722079
| 0
| 0.034201
| 0.221172
| 5,819
| 159
| 124
| 36.597484
| 0.734334
| 0.006187
| 0
| 0.53
| 0
| 0
| 0.234307
| 0.036659
| 0
| 0
| 0
| 0
| 0.17
| 1
| 0.17
| false
| 0
| 0.01
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
67676984a4717306058fb05019e21f6c26df9123
| 3,177
|
py
|
Python
|
pyslam/thirdparty/disk/submodules/unets/tests.py
|
dysdsyd/VO_benchmark
|
a7602edab934419c1ec73618ee655e18026f834f
|
[
"Apache-2.0"
] | 2
|
2021-09-11T09:13:31.000Z
|
2021-11-03T01:39:56.000Z
|
pyslam/thirdparty/disk/submodules/unets/tests.py
|
dysdsyd/VO_benchmark
|
a7602edab934419c1ec73618ee655e18026f834f
|
[
"Apache-2.0"
] | null | null | null |
pyslam/thirdparty/disk/submodules/unets/tests.py
|
dysdsyd/VO_benchmark
|
a7602edab934419c1ec73618ee655e18026f834f
|
[
"Apache-2.0"
] | null | null | null |
import torch, unittest
from unets import thin_setup, fat_setup, Unet, ThinUnetUpBlock, \
ThinUnetDownBlock, AttentionGate
class BaseTests(unittest.TestCase):
def test_inequal_output_asymmetric(self):
unet = Unet(
in_features=3,
down=[16, 32, 64], up=[40, 4]
)
input = torch.zeros(2, 3, 104, 104)
output = unet(input)
self.assertEqual(torch.Size([2, 4, 24, 24]), output.size())
def test_inequal_output_symmetric(self):
unet = Unet(
down=[16, 32, 64], up=[40, 1]
)
input = torch.zeros(2, 1, 104, 104)
output = unet(input)
self.assertEqual(torch.Size([2, 1, 24, 24]), output.size())
class CheckpointedTests(unittest.TestCase):
def test_inequal_output_asymmetric(self):
unet = Unet(
in_features=3,
down=[16, 32, 64], up=[40, 4],
setup={**fat_setup, 'checkpointed': True}
)
input = torch.zeros(2, 3, 104, 104)
output = unet(input)
self.assertEqual(torch.Size([2, 4, 24, 24]), output.size())
class NoBiasTests(unittest.TestCase):
def test_bias(self):
unet = Unet(
in_features=3,
down=[16, 32, 64], up=[40, 4],
)
checker = lambda name_weight: 'bias' in name_weight[0]
bias = any(map(checker, unet.named_parameters()))
self.assertTrue(bias)
def test_no_bias(self):
unet = Unet(
in_features=3,
down=[16, 32, 64], up=[40, 4],
setup={**fat_setup, 'bias': False}
)
checker = lambda name_weight: 'bias' not in name_weight[0]
no_bias = all(map(checker, unet.named_parameters()))
self.assertTrue(no_bias)
class ThinTests(unittest.TestCase):
def test_inequal_output_asymmetric(self):
unet = Unet(
in_features=3,
down=[16, 32, 64],
up=[40, 4],
setup=thin_setup
)
input = torch.zeros(2, 3, 104, 104)
output = unet(input)
self.assertEqual(torch.Size([2, 4, 64, 64]), output.size())
def test_inequal_output_symmetric(self):
unet = Unet(
down=[16, 32, 64],
up=[40, 1],
setup=thin_setup
)
input = torch.zeros(2, 1, 104, 104)
output = unet(input)
self.assertEqual(torch.Size([2, 1, 64, 64]), output.size())
class AttentionTests(unittest.TestCase):
def test_inequal_output_asymmetric(self):
unet = Unet(
in_features=3,
down=[16, 32, 64],
up=[40, 4],
setup={**thin_setup, 'gate': AttentionGate}
)
input = torch.zeros(2, 3, 104, 104)
output = unet(input)
self.assertEqual(torch.Size([2, 4, 64, 64]), output.size())
def test_inequal_output_symmetric(self):
unet = Unet(
down=[16, 32, 64],
up=[40, 1],
setup={**thin_setup, 'gate': AttentionGate}
)
input = torch.zeros(2, 1, 104, 104)
output = unet(input)
self.assertEqual(torch.Size([2, 1, 64, 64]), output.size())
unittest.main()
| 32.090909
| 67
| 0.54926
| 393
| 3,177
| 4.323155
| 0.160305
| 0.037081
| 0.063567
| 0.052972
| 0.824014
| 0.778105
| 0.778105
| 0.727487
| 0.727487
| 0.707475
| 0
| 0.085505
| 0.311615
| 3,177
| 98
| 68
| 32.418367
| 0.691358
| 0
| 0
| 0.647727
| 0
| 0
| 0.010072
| 0
| 0
| 0
| 0
| 0
| 0.102273
| 1
| 0.102273
| false
| 0
| 0.022727
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
67689cab1349c8498f081f3f3ea8328b79af4b39
| 39
|
py
|
Python
|
PyCurrency_Converter/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3
|
2021-05-20T22:30:41.000Z
|
2022-01-15T14:20:06.000Z
|
PyCurrency_Converter/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
PyCurrency_Converter/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-03-03T02:25:28.000Z
|
2022-03-03T02:25:28.000Z
|
from .PyCurrency import convert, codes
| 19.5
| 38
| 0.820513
| 5
| 39
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 39
| 1
| 39
| 39
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6786564454fc379e514e4f40d50c8e1e8b237f2c
| 33
|
py
|
Python
|
instabot/bot/__init__.py
|
SOUFIANEZAZA/instapro
|
7ab33b035211345db12b75b64bdd7f9edd1dbd2b
|
[
"Apache-2.0"
] | 84
|
2017-04-26T08:42:11.000Z
|
2022-03-14T21:53:05.000Z
|
instabot/bot/__init__.py
|
sudoguy/instapro
|
7a7003cf07fdf992037641f61beee8815be8a0b1
|
[
"Apache-2.0"
] | 10
|
2017-05-15T07:18:51.000Z
|
2020-07-18T10:55:02.000Z
|
instabot/bot/__init__.py
|
sudoguy/instapro
|
7a7003cf07fdf992037641f61beee8815be8a0b1
|
[
"Apache-2.0"
] | 26
|
2017-05-12T15:03:32.000Z
|
2022-02-10T08:04:28.000Z
|
from .bot import Bot
assert Bot
| 8.25
| 20
| 0.757576
| 6
| 33
| 4.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 33
| 3
| 21
| 11
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
67b1611b9cb8c61d015963090988febb82d98d02
| 143
|
py
|
Python
|
EverPPMS/__init__.py
|
DomiDre/EverPPMS
|
968778fb35a628c8af3065f3e48ec077cdc42180
|
[
"MIT"
] | null | null | null |
EverPPMS/__init__.py
|
DomiDre/EverPPMS
|
968778fb35a628c8af3065f3e48ec077cdc42180
|
[
"MIT"
] | null | null | null |
EverPPMS/__init__.py
|
DomiDre/EverPPMS
|
968778fb35a628c8af3065f3e48ec077cdc42180
|
[
"MIT"
] | null | null | null |
from ._lib import generate_FORC_sequence, generate_IRM_DCD_sequence, get_cmap, closest_idx
from ._forc import FORC
from ._irmdcd import IRMDCD
| 35.75
| 90
| 0.853147
| 22
| 143
| 5.090909
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104895
| 143
| 3
| 91
| 47.666667
| 0.875
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
67c162b3c99c614d1a3dcba1a30bfb1eeaa608d4
| 68
|
py
|
Python
|
ansible-vault-password.py
|
takayukioda/theowner
|
9cd3422ca2059d8eb3fd7c8e96a59a088bc4da15
|
[
"MIT"
] | null | null | null |
ansible-vault-password.py
|
takayukioda/theowner
|
9cd3422ca2059d8eb3fd7c8e96a59a088bc4da15
|
[
"MIT"
] | null | null | null |
ansible-vault-password.py
|
takayukioda/theowner
|
9cd3422ca2059d8eb3fd7c8e96a59a088bc4da15
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
import os
print os.environ['VAULT_PASSWORD']
| 17
| 34
| 0.764706
| 11
| 68
| 4.636364
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0.088235
| 68
| 3
| 35
| 22.666667
| 0.806452
| 0.308824
| 0
| 0
| 0
| 0
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.5
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 1
|
0
| 6
|
67c1fd7165f7ccf91f19f7d7d4024476a93c054c
| 1,534
|
py
|
Python
|
pirates/leveleditor/worldData/Vegas.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/Vegas.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/Vegas.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.Vegas
from pandac.PandaModules import Point3, VBase3
objectStruct = {'Objects': {'1149705528.16Shochet': {'Type': 'Region', 'Name': 'default', 'Objects': {'1149705583.09Shochet': {'Type': 'Island', 'Name': 'Vegas', 'File': 'VegasIsland', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-410.0, 80.0, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/islands/bilgewater_zero'}}, '1170402213.06Shochet': {'Type': 'Ship Spawn Node', 'Flagship': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Level': '3', 'Pos': Point3(-772.601, -1686.021, -0.0), 'Spawnables': 'Merchant', 'Team': '2', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1170402362.67Shochet': {'Type': 'Ship Spawn Node', 'Flagship': False, 'Hpr': Point3(0.0, 0.0, 0.0), 'Level': '3', 'Pos': Point3(333.601, -1679.88, 0.0), 'Spawnables': 'Warship', 'Team': '1', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}}, 'Visual': {}}}, 'Layers': {}, 'ObjectIds': {'1149705528.16Shochet': '["Objects"]["1149705528.16Shochet"]', '1149705583.09Shochet': '["Objects"]["1149705528.16Shochet"]["Objects"]["1149705583.09Shochet"]', '1170402213.06Shochet': '["Objects"]["1149705528.16Shochet"]["Objects"]["1170402213.06Shochet"]', '1170402362.67Shochet': '["Objects"]["1149705528.16Shochet"]["Objects"]["1170402362.67Shochet"]'}}
| 255.666667
| 1,267
| 0.649283
| 206
| 1,534
| 4.830097
| 0.432039
| 0.046231
| 0.045226
| 0.036181
| 0.21206
| 0.21206
| 0.21206
| 0.21206
| 0.196985
| 0.196985
| 0
| 0.223016
| 0.088005
| 1,534
| 6
| 1,267
| 255.666667
| 0.488206
| 0.137549
| 0
| 0
| 0
| 0
| 0.559515
| 0.208491
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
67d37fbbb32144242c9c8a74bdb7107c62c0e8b3
| 75
|
py
|
Python
|
example/sns/preprocess.py
|
rog-works/lf3py
|
e89937f7aa133ed54d85764f06101ab9abf6b960
|
[
"CNRI-Python"
] | null | null | null |
example/sns/preprocess.py
|
rog-works/lf3py
|
e89937f7aa133ed54d85764f06101ab9abf6b960
|
[
"CNRI-Python"
] | 48
|
2020-12-19T13:47:26.000Z
|
2021-01-07T22:27:56.000Z
|
example/sns/preprocess.py
|
rog-works/lf3py
|
e89937f7aa133ed54d85764f06101ab9abf6b960
|
[
"CNRI-Python"
] | null | null | null |
import os
import sys
sys.path.append(f'{os.getcwd()}/example/sns/vendor')
| 15
| 52
| 0.733333
| 13
| 75
| 4.230769
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 75
| 4
| 53
| 18.75
| 0.797101
| 0
| 0
| 0
| 0
| 0
| 0.426667
| 0.426667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
db0d440ee553395207fa660c3b8b52d726fe6aae
| 3,893
|
bzl
|
Python
|
example/third_party/org_apache_maven_resolver.bzl
|
wix-playground/rules_maven_third_party
|
ff0b486df194779d7d8e6c9102cd12138e3305c3
|
[
"Apache-2.0"
] | null | null | null |
example/third_party/org_apache_maven_resolver.bzl
|
wix-playground/rules_maven_third_party
|
ff0b486df194779d7d8e6c9102cd12138e3305c3
|
[
"Apache-2.0"
] | null | null | null |
example/third_party/org_apache_maven_resolver.bzl
|
wix-playground/rules_maven_third_party
|
ff0b486df194779d7d8e6c9102cd12138e3305c3
|
[
"Apache-2.0"
] | null | null | null |
load("@rules_maven_third_party//:import_external.bzl", import_external = "import_external")
def dependencies():
import_external(
name = "org_apache_maven_resolver_maven_resolver_api",
artifact = "org.apache.maven.resolver:maven-resolver-api:1.4.0",
artifact_sha256 = "85aac254240e8bf387d737acf5fcd18f07163ae55a0223b107c7e2af1dfdc6e6",
srcjar_sha256 = "be7f42679a5485fbe30c475afa05c12dd9a2beb83bbcebbb3d2e79eb8aeff9c4",
)
import_external(
name = "org_apache_maven_resolver_maven_resolver_connector_basic",
artifact = "org.apache.maven.resolver:maven-resolver-connector-basic:1.4.0",
artifact_sha256 = "4283db771d9265136615637bd22d02929cfd548c8d351f76ecb88a3006b5faf7",
srcjar_sha256 = "556163b53b1f98df263adf1d26b269cd45316a827f169e0ede514ca5fca0c5d1",
deps = [
"@org_apache_maven_resolver_maven_resolver_api",
"@org_apache_maven_resolver_maven_resolver_spi",
"@org_apache_maven_resolver_maven_resolver_util",
"@org_slf4j_slf4j_api",
],
)
import_external(
name = "org_apache_maven_resolver_maven_resolver_impl",
artifact = "org.apache.maven.resolver:maven-resolver-impl:1.4.0",
artifact_sha256 = "004662079feeed66251480ad76fedbcabff96ee53db29c59f6aa564647c5bfe6",
srcjar_sha256 = "b544f134261f813b1a44ffcc97590236d3d6e2519722d55dea395a96fef18206",
deps = [
"@org_apache_maven_resolver_maven_resolver_api",
"@org_apache_maven_resolver_maven_resolver_spi",
"@org_apache_maven_resolver_maven_resolver_util",
"@org_slf4j_slf4j_api",
],
)
import_external(
name = "org_apache_maven_resolver_maven_resolver_spi",
artifact = "org.apache.maven.resolver:maven-resolver-spi:1.4.0",
artifact_sha256 = "8a2985eb28135eae4c40db446081b1533c1813c251bb370756777697e0b7114e",
srcjar_sha256 = "89099a02006b6ce46096d89f021675bf000e96300bcdc0ff439a86d6e322c761",
deps = [
"@org_apache_maven_resolver_maven_resolver_api",
],
)
import_external(
name = "org_apache_maven_resolver_maven_resolver_transport_file",
artifact = "org.apache.maven.resolver:maven-resolver-transport-file:1.4.0",
artifact_sha256 = "94eb9bcc073ac1591002b26a4cf558324b12d8f76b6d5628151d7f87733436f6",
srcjar_sha256 = "17abd750063fa74cbf754e803ba27ca0216b0bebc8e45e1872cd9ed5a1e5e719",
deps = [
"@org_apache_maven_resolver_maven_resolver_api",
"@org_apache_maven_resolver_maven_resolver_spi",
"@org_slf4j_slf4j_api",
],
)
import_external(
name = "org_apache_maven_resolver_maven_resolver_transport_http",
artifact = "org.apache.maven.resolver:maven-resolver-transport-http:1.4.0",
artifact_sha256 = "8dddd83ec6244bde5ef63ae679a0ce5d7e8fc566369d7391c8814206e2a7114f",
srcjar_sha256 = "5af0150a1ab714b164763d1daca4b8fdd1ab6dd445ec3c57e7ec916ccbdf7e4e",
deps = [
"@org_apache_httpcomponents_httpclient",
"@org_apache_httpcomponents_httpcore",
"@org_apache_maven_resolver_maven_resolver_api",
"@org_apache_maven_resolver_maven_resolver_spi",
"@org_apache_maven_resolver_maven_resolver_util",
"@org_slf4j_jcl_over_slf4j",
"@org_slf4j_slf4j_api",
],
)
import_external(
name = "org_apache_maven_resolver_maven_resolver_util",
artifact = "org.apache.maven.resolver:maven-resolver-util:1.4.0",
artifact_sha256 = "e83b6c2de4b8b8d99d3c226f5e447f70df808834824336c360aa615fc4d7beac",
srcjar_sha256 = "74dd3696e2df175db39b944079f7b49941e39e57f98e469f942635a2ba1cae57",
deps = [
"@org_apache_maven_resolver_maven_resolver_api",
],
)
| 43.255556
| 93
| 0.720267
| 335
| 3,893
| 7.856716
| 0.149254
| 0.266717
| 0.143617
| 0.225684
| 0.56535
| 0.520137
| 0.520137
| 0.441109
| 0.340805
| 0.300532
| 0
| 0.202182
| 0.199589
| 3,893
| 89
| 94
| 43.741573
| 0.64249
| 0
| 0
| 0.473684
| 0
| 0.039474
| 0.629848
| 0.605446
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013158
| true
| 0
| 0.105263
| 0
| 0.118421
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e1f2b3785a3998e1cf5648e4fbd5bf5e5e230b3c
| 25
|
py
|
Python
|
bitfinexpy/__init__.py
|
DS-DataMining/bitfinexpy
|
e3d061d79dda1135f6b44abc64f3d76f3a40d989
|
[
"MIT"
] | null | null | null |
bitfinexpy/__init__.py
|
DS-DataMining/bitfinexpy
|
e3d061d79dda1135f6b44abc64f3d76f3a40d989
|
[
"MIT"
] | null | null | null |
bitfinexpy/__init__.py
|
DS-DataMining/bitfinexpy
|
e3d061d79dda1135f6b44abc64f3d76f3a40d989
|
[
"MIT"
] | 1
|
2021-04-19T16:09:49.000Z
|
2021-04-19T16:09:49.000Z
|
from . import bitfinexpy
| 12.5
| 24
| 0.8
| 3
| 25
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e1f6685e9bef9bfc4706d6b78fd7b67f0b8e9142
| 22
|
py
|
Python
|
python/remap.py
|
rmu75/rover-342-retrofit
|
b16a4c39cdb6fb52455afbf89b0094789e6a0719
|
[
"CC0-1.0"
] | null | null | null |
python/remap.py
|
rmu75/rover-342-retrofit
|
b16a4c39cdb6fb52455afbf89b0094789e6a0719
|
[
"CC0-1.0"
] | null | null | null |
python/remap.py
|
rmu75/rover-342-retrofit
|
b16a4c39cdb6fb52455afbf89b0094789e6a0719
|
[
"CC0-1.0"
] | null | null | null |
from stdglue import *
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c05631e773972348c3a80ef95fe725196dd80d14
| 235
|
py
|
Python
|
da4py/main/__init__.py
|
BoltMaud/da4py
|
535372c9cbce2f6adfff181d3b2e1b33422fed8a
|
[
"MIT"
] | 2
|
2020-01-22T15:46:20.000Z
|
2020-12-26T19:15:18.000Z
|
da4py/main/__init__.py
|
BoltMaud/da4py
|
535372c9cbce2f6adfff181d3b2e1b33422fed8a
|
[
"MIT"
] | 1
|
2019-10-07T07:08:03.000Z
|
2019-10-07T07:08:03.000Z
|
da4py/main/__init__.py
|
BoltMaud/da4py
|
535372c9cbce2f6adfff181d3b2e1b33422fed8a
|
[
"MIT"
] | 1
|
2019-10-04T13:14:12.000Z
|
2019-10-04T13:14:12.000Z
|
from da4py.main.objects import logToFormulas, pnToFormulas
from da4py.main.conformanceChecking import conformanceArtefacts, distancesToFormulas
from da4py.main.utils import variablesGenerator, formulas
from da4py.main import analytics
| 47
| 84
| 0.876596
| 26
| 235
| 7.923077
| 0.538462
| 0.174757
| 0.252427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 0.080851
| 235
| 4
| 85
| 58.75
| 0.935185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c0627c67c2d92b69c29324a3b75edec54bc47c02
| 71
|
py
|
Python
|
encapsulation/restaurant/food/starter.py
|
ivan-yosifov88/python_oop_june_2021
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
[
"MIT"
] | 1
|
2021-08-03T19:14:24.000Z
|
2021-08-03T19:14:24.000Z
|
encapsulation/restaurant/food/starter.py
|
ivan-yosifov88/python_oop_june_2021
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
[
"MIT"
] | null | null | null |
encapsulation/restaurant/food/starter.py
|
ivan-yosifov88/python_oop_june_2021
|
7ae6126065abbcce7ce97c86d1150ae307360249
|
[
"MIT"
] | null | null | null |
from restaurant.food.food import Food
class Starter(Food):
pass
| 10.142857
| 37
| 0.732394
| 10
| 71
| 5.2
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197183
| 71
| 6
| 38
| 11.833333
| 0.912281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
fbf7e29f0d00ac3f541c7d23d1b6f408ab4e49f8
| 14,979
|
py
|
Python
|
oneflow/compatible_single_client_python/test/ops/test_boxing_v2.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
oneflow/compatible_single_client_python/test/ops/test_boxing_v2.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
oneflow/compatible_single_client_python/test/ops/test_boxing_v2.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.compatible import single_client as flow
from test_util import GenArgList
from oneflow.compatible.single_client import typing as oft
import os
def _test_split_to_split(
test_case, src_device_type, dst_device_type, src_axis, dst_axis,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def build_s2s(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(
input_blob.with_distribute(flow.distribute.split(src_axis))
)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
@flow.global_function(function_config=func_config)
def split_to_split_job(input_blob: oft.Numpy.Placeholder((96, 96))):
result_list = []
for i in (1, 2, 3):
for j in (1, 2, 3):
result_list.append(build_s2s(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(96, 96).astype(np.float32)
result_tuple = split_to_split_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_split_to_split_enable_all_to_all(
test_case, src_device_type, dst_device_type, src_device_num, dst_device_num,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
flow.config.collective_boxing.nccl_enable_all_to_all(True)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def build_s2s_all2all(input_blob, src_axis, dst_axis):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(
input_blob.with_distribute(flow.distribute.split(src_axis))
)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
@flow.global_function(function_config=func_config)
def split_to_split_all2all_job(input_blob: oft.Numpy.Placeholder((32, 16, 64, 48))):
result_list = []
for i in (0, 1, 2, 3):
for j in (0, 1, 2, 3):
if i == j:
continue
result_list.append(build_s2s_all2all(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(32, 16, 64, 48).astype(np.float32)
result_tuple = split_to_split_all2all_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_split_to_broadcast(
test_case, src_device_type, dst_device_type, src_axis,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def build_s2b(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(
input_blob.with_distribute(flow.distribute.split(src_axis))
)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
@flow.global_function(function_config=func_config)
def split_to_broadcast_job(input_blob: oft.Numpy.Placeholder((96, 96))):
result_list = []
for i in (1, 2, 3):
for j in (1, 2, 3):
result_list.append(build_s2b(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(96, 96).astype(np.float32)
result_tuple = split_to_broadcast_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_broadcast_to_split(
test_case, src_device_type, dst_device_type, dst_axis,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def build_b2s(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(input_blob.with_distribute(flow.distribute.broadcast()))
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
@flow.global_function(function_config=func_config)
def broadcast_to_split_job(input_blob: oft.Numpy.Placeholder((96, 96))):
result_list = []
for i in (1, 2, 3):
for j in (1, 2, 3):
result_list.append(build_b2s(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(96, 96).astype(np.float32)
result_tuple = broadcast_to_split_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_partial_sum_to_split(
test_case, src_device_type, dst_device_type, dst_axis,
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def build_p2s(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(input_blob.with_distribute(flow.distribute.split(0)))
src = flow.math.reduce_sum(src, axis=0)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
@flow.global_function(function_config=func_config)
def partial_sum_to_split_job(input_blob: oft.Numpy.Placeholder((96, 96, 96))):
result_list = []
for i in (2, 3):
for j in (1, 2, 3):
result_list.append(build_p2s(input_blob, i, j))
return tuple(result_list)
x = np.random.uniform(-1e-5, 1e-5, (96, 96, 96)).astype(np.float32)
result_tuple = partial_sum_to_split_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.allclose(np.sum(x, axis=0), out.numpy()))
def _test_partial_sum_to_broadcast(test_case, src_device_type, dst_device_type):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def build_p2b(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(input_blob.with_distribute(flow.distribute.split(0)))
src = flow.math.reduce_sum(src, axis=0)
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
@flow.global_function(function_config=func_config)
def partial_sum_to_broadcast_job(input_blob: oft.Numpy.Placeholder((96, 96, 96))):
result_list = []
for i in (2, 3):
for j in (1, 2, 3):
result_list.append(build_p2b(input_blob, i, j))
return tuple(result_list)
x = np.random.uniform(-1e-5, 1e-5, (96, 96, 96)).astype(np.float32)
result_tuple = partial_sum_to_broadcast_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.allclose(np.sum(x, axis=0), out.numpy()))
def _test_broadcast_to_broadcast(test_case, src_device_type, dst_device_type):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def build_b2b(input_blob, src_device_num, dst_device_num):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src = flow.identity(input_blob.with_distribute(flow.distribute.broadcast()))
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
@flow.global_function(function_config=func_config)
def broadcast_to_broadcast_job(input_blob: oft.Numpy.Placeholder((96, 96))):
result_list = []
for i in (1, 2, 3):
for j in (1, 2, 3):
result_list.append(build_b2b(input_blob, i, j))
return tuple(result_list)
x = np.random.rand(96, 96).astype(np.float32)
result_tuple = broadcast_to_broadcast_job(x).get()
for out in result_tuple:
test_case.assertTrue(np.array_equal(x, out.numpy()))
def _test_multi_lbi(
test_case, src_device_type, dst_device_type, src_device_num, dst_device_num
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def multi_lbi_job(x: oft.Numpy.Placeholder((96, 96, 96))):
with flow.scope.placement(src_device_type, "0:0-" + str(src_device_num - 1)):
src_s0 = flow.identity(x.with_distribute(flow.distribute.split(0)))
src_s1 = flow.identity(x.with_distribute(flow.distribute.split(1)))
src_b = flow.identity(x.with_distribute(flow.distribute.split(1)))
(t0_0, t0_1, t0_2) = flow.identity_n((src_s0, src_s1, src_b))
with flow.scope.placement(dst_device_type, "0:0-" + str(dst_device_num - 1)):
t0_0 = t0_0.with_distribute(flow.distribute.split(1))
t0_1 = t0_1.with_distribute(flow.distribute.broadcast())
t0_2 = t0_2.with_distribute(flow.distribute.split(1))
(t1_0, t1_1, t1_2) = flow.identity_n((t0_0, t0_1, t0_2))
return t1_0, t1_1, t1_2
x = np.random.uniform(-1e-5, 1e-5, (96, 96, 96)).astype(np.float32)
r0 = multi_lbi_job(x).get()[0].numpy()
r1 = multi_lbi_job(x).get()[1].numpy()
r2 = multi_lbi_job(x).get()[2].numpy()
test_case.assertTrue(np.array_equal(x, r0))
test_case.assertTrue(np.array_equal(x, r1))
test_case.assertTrue(np.array_equal(x, r2))
@flow.unittest.skip_unless_1n4d()
class TestBoxingV2(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_split_to_split(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["src_axis"] = [0, 1]
arg_dict["dst_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_split_to_split(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_split_to_split_all_to_all(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["gpu"]
arg_dict["dst_device_type"] = ["gpu"]
arg_dict["src_device_num"] = [4]
arg_dict["dst_device_num"] = [4]
for arg in GenArgList(arg_dict):
_test_split_to_split_enable_all_to_all(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_split_to_broadcast(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["src_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_split_to_broadcast(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_broadcast_to_split(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["dst_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_broadcast_to_split(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_partial_sum_to_split(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["dst_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_partial_sum_to_split(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_partial_sum_to_broadcast(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
for arg in GenArgList(arg_dict):
_test_partial_sum_to_broadcast(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_broadcast_to_broadcast(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
for arg in GenArgList(arg_dict):
_test_broadcast_to_broadcast(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_multi_lbi(test_case):
arg_dict = OrderedDict()
arg_dict["src_device_type"] = ["cpu", "gpu"]
arg_dict["dst_device_type"] = ["cpu", "gpu"]
arg_dict["src_device_num"] = [1, 2, 3]
arg_dict["dst_device_num"] = [1, 2, 3]
for arg in GenArgList(arg_dict):
_test_multi_lbi(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| 42.433428
| 88
| 0.678483
| 2,211
| 14,979
| 4.272275
| 0.085482
| 0.050815
| 0.03303
| 0.059284
| 0.897311
| 0.87455
| 0.850201
| 0.829663
| 0.813889
| 0.802033
| 0
| 0.026421
| 0.199012
| 14,979
| 352
| 89
| 42.553977
| 0.760877
| 0.038788
| 0
| 0.657343
| 0
| 0
| 0.056845
| 0.011675
| 0
| 0
| 0
| 0
| 0.034965
| 1
| 0.108392
| false
| 0
| 0.024476
| 0
| 0.188811
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22185d2926427541f4bc4f131e81136ff4d3e341
| 143
|
py
|
Python
|
satef/engine/__init__.py
|
kostrzmar/SATEF
|
b483b073f1ff3dd797413f212e26114ef93cfe08
|
[
"MIT"
] | null | null | null |
satef/engine/__init__.py
|
kostrzmar/SATEF
|
b483b073f1ff3dd797413f212e26114ef93cfe08
|
[
"MIT"
] | null | null | null |
satef/engine/__init__.py
|
kostrzmar/SATEF
|
b483b073f1ff3dd797413f212e26114ef93cfe08
|
[
"MIT"
] | null | null | null |
from .AbstractEngine import AbstractEngine
from .AbstractEngineFactory import AbstractEngineFactory
from .EngineFactory import EngineFactory
| 23.833333
| 56
| 0.881119
| 12
| 143
| 10.5
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097902
| 143
| 5
| 57
| 28.6
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
22353cd51ba678061bff06895c30eca957b7741c
| 19
|
py
|
Python
|
RK/__init__.py
|
vitesempl/RK-IDE-Python
|
fa948930b711ed831dda2aa04e741c39bf0d8022
|
[
"MIT"
] | null | null | null |
RK/__init__.py
|
vitesempl/RK-IDE-Python
|
fa948930b711ed831dda2aa04e741c39bf0d8022
|
[
"MIT"
] | null | null | null |
RK/__init__.py
|
vitesempl/RK-IDE-Python
|
fa948930b711ed831dda2aa04e741c39bf0d8022
|
[
"MIT"
] | null | null | null |
from . import ide
| 6.333333
| 17
| 0.684211
| 3
| 19
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 19
| 2
| 18
| 9.5
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
223cddeed3d47032df6d2fb7258cda20a60a0502
| 25
|
py
|
Python
|
keypointer/__init__.py
|
jhaux/keypointer
|
8b01ea0a8a0b5d6210b84495cd5462f3d42a1966
|
[
"MIT"
] | null | null | null |
keypointer/__init__.py
|
jhaux/keypointer
|
8b01ea0a8a0b5d6210b84495cd5462f3d42a1966
|
[
"MIT"
] | null | null | null |
keypointer/__init__.py
|
jhaux/keypointer
|
8b01ea0a8a0b5d6210b84495cd5462f3d42a1966
|
[
"MIT"
] | null | null | null |
from vid_to_key import *
| 12.5
| 24
| 0.8
| 5
| 25
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
22702edc59660c27293dd78a8f708aa85a13cedf
| 181
|
py
|
Python
|
muninn_django/tests.py
|
stcorp/muninn-django
|
dc23936cb55cbd0ca6b5b6895f2b2e963888cf96
|
[
"BSD-3-Clause"
] | 1
|
2019-02-08T03:27:20.000Z
|
2019-02-08T03:27:20.000Z
|
muninn_django/tests.py
|
stcorp/muninn-django
|
dc23936cb55cbd0ca6b5b6895f2b2e963888cf96
|
[
"BSD-3-Clause"
] | null | null | null |
muninn_django/tests.py
|
stcorp/muninn-django
|
dc23936cb55cbd0ca6b5b6895f2b2e963888cf96
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (C) 2018-2020 S[&]T, The Netherlands.
#
from __future__ import absolute_import, division, print_function
from django.test import TestCase
# Create your tests here.
| 18.1
| 64
| 0.762431
| 25
| 181
| 5.28
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051948
| 0.149171
| 181
| 9
| 65
| 20.111111
| 0.805195
| 0.392265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
97dcdf5f0b2537174badc9e4f894adba0c35a1b3
| 47
|
py
|
Python
|
pyxb/bundles/opengis/_smil20.py
|
thorstenb/pyxb
|
634e86f61dfb73a2900f32fc3d819e9c25365a49
|
[
"Apache-2.0"
] | null | null | null |
pyxb/bundles/opengis/_smil20.py
|
thorstenb/pyxb
|
634e86f61dfb73a2900f32fc3d819e9c25365a49
|
[
"Apache-2.0"
] | null | null | null |
pyxb/bundles/opengis/_smil20.py
|
thorstenb/pyxb
|
634e86f61dfb73a2900f32fc3d819e9c25365a49
|
[
"Apache-2.0"
] | null | null | null |
from pyxb.bundles.opengis.raw._smil20 import *
| 23.5
| 46
| 0.808511
| 7
| 47
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046512
| 0.085106
| 47
| 1
| 47
| 47
| 0.813953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
97de9a3206491e8dbde3a05e6f709dded400e554
| 82
|
py
|
Python
|
toy-conslaw/models/__init__.py
|
IanHawke/toy-conslaw
|
bb93a62f39f4fb4f770bd4ca15105967adf03663
|
[
"MIT"
] | null | null | null |
toy-conslaw/models/__init__.py
|
IanHawke/toy-conslaw
|
bb93a62f39f4fb4f770bd4ca15105967adf03663
|
[
"MIT"
] | 1
|
2017-08-18T09:38:36.000Z
|
2017-08-22T16:03:04.000Z
|
toy-conslaw/models/__init__.py
|
IanHawke/toy-conslaw
|
bb93a62f39f4fb4f770bd4ca15105967adf03663
|
[
"MIT"
] | null | null | null |
__all__ = ["euler_gamma_law", "sr_euler_gamma_law", "sr_mhd", "sr_rmhd", "sr_mf"]
| 41
| 81
| 0.707317
| 14
| 82
| 3.285714
| 0.571429
| 0.434783
| 0.565217
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085366
| 82
| 1
| 82
| 82
| 0.613333
| 0
| 0
| 0
| 0
| 0
| 0.621951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
97f7db0a3776181e8ffb50ea0317ff124cdd50c1
| 194
|
bzl
|
Python
|
third_party/systemlibs/grpc.bazel.grpc_deps.bzl
|
laoma023012/TensorFlow-practice
|
2b02167307eca3950cc7e49c7c50510ff5ccb92e
|
[
"Apache-2.0"
] | null | null | null |
third_party/systemlibs/grpc.bazel.grpc_deps.bzl
|
laoma023012/TensorFlow-practice
|
2b02167307eca3950cc7e49c7c50510ff5ccb92e
|
[
"Apache-2.0"
] | 58
|
2021-11-22T05:41:28.000Z
|
2022-01-19T01:33:40.000Z
|
third_party/systemlibs/grpc.bazel.grpc_deps.bzl
|
laoma023012/TensorFlow-practice
|
2b02167307eca3950cc7e49c7c50510ff5ccb92e
|
[
"Apache-2.0"
] | null | null | null |
"""Load dependencies needed to compile and test the grpc library as a 3rd-party consumer."""
def grpc_deps():
"""Loads dependencies need to compile and test the grpc library."""
pass
| 24.25
| 92
| 0.71134
| 29
| 194
| 4.724138
| 0.689655
| 0.131387
| 0.175182
| 0.233577
| 0.437956
| 0.437956
| 0.437956
| 0
| 0
| 0
| 0
| 0.00641
| 0.195876
| 194
| 7
| 93
| 27.714286
| 0.871795
| 0.762887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
3f13aec5e6b87b9524dbbb890b2a837a678f2ee4
| 40
|
py
|
Python
|
feat/face_detectors/Retinaface/__init__.py
|
kenneym/py-feat
|
59a25139ad52914d41ebf7fd63e25357c097b745
|
[
"MIT"
] | 93
|
2021-04-09T02:34:41.000Z
|
2022-03-14T01:18:59.000Z
|
feat/face_detectors/Retinaface/__init__.py
|
kenneym/py-feat
|
59a25139ad52914d41ebf7fd63e25357c097b745
|
[
"MIT"
] | 65
|
2018-02-04T02:39:13.000Z
|
2021-03-25T05:31:03.000Z
|
feat/face_detectors/Retinaface/__init__.py
|
kenneym/py-feat
|
59a25139ad52914d41ebf7fd63e25357c097b745
|
[
"MIT"
] | 31
|
2021-04-12T09:37:22.000Z
|
2022-03-11T17:48:05.000Z
|
from .Retinaface_test import RetinaFace
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3f1f77e60d9a1e8991647a513c8ed8d2841c0eca
| 2,457
|
py
|
Python
|
tests/unit/dataactvalidator/test_a8_appropriations.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | 1
|
2019-06-22T21:53:16.000Z
|
2019-06-22T21:53:16.000Z
|
tests/unit/dataactvalidator/test_a8_appropriations.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | null | null | null |
tests/unit/dataactvalidator/test_a8_appropriations.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | null | null | null |
from dataactcore.models.stagingModels import Appropriation
from dataactcore.models.domainModels import SF133
from tests.unit.dataactvalidator.utils import number_of_errors
_FILE = 'a8_appropriations'
_TAS = 'a8_appropriations_tas'
def test_success(database):
""" Tests that SF 133 amount sum for lines 1160, 1180, 1260, 1280 matches Appropriation
budget_authority_appropria_cpe for the specified fiscal year and period """
tas = "".join([_TAS, "_success"])
sf_1 = SF133(line=1160, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier="sys",
main_account_code="000", sub_account_code="000")
sf_2 = SF133(line=1180, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier="sys",
main_account_code="000", sub_account_code="000")
sf_3 = SF133(line=1260, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier="sys",
main_account_code="000", sub_account_code="000")
sf_4 = SF133(line=1280, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier="sys",
main_account_code="000", sub_account_code="000")
ap = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_appropria_cpe=4)
models = [sf_1, sf_2, sf_3, sf_4, ap]
assert number_of_errors(_FILE, database, models=models) == 0
def test_failure(database):
""" Tests that SF 133 amount sum for lines 1160, 1180, 1260, 1280 does not match Appropriation
budget_authority_appropria_cpe for the specified fiscal year and period """
tas = "".join([_TAS, "_failure"])
sf_1 = SF133(line=1160, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier="sys",
main_account_code="000", sub_account_code="000")
sf_2 = SF133(line=1180, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier="sys",
main_account_code="000", sub_account_code="000")
sf_3 = SF133(line=1260, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier="sys",
main_account_code="000", sub_account_code="000")
sf_4 = SF133(line=1280, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier="sys",
main_account_code="000", sub_account_code="000")
ap = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_appropria_cpe=1)
models = [sf_1, sf_2, sf_3, sf_4, ap]
assert number_of_errors(_FILE, database, models=models) == 1
| 50.142857
| 99
| 0.701262
| 364
| 2,457
| 4.461538
| 0.192308
| 0.108374
| 0.137931
| 0.064039
| 0.841133
| 0.841133
| 0.841133
| 0.841133
| 0.841133
| 0.841133
| 0
| 0.108309
| 0.177045
| 2,457
| 48
| 100
| 51.1875
| 0.694857
| 0.129833
| 0
| 0.580645
| 0
| 0
| 0.059772
| 0.009962
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0.064516
| false
| 0
| 0.096774
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3f42c7f437d38f08ff8c6eac4fcc6ff1f1bcaaeb
| 34
|
py
|
Python
|
application/attendance/__init__.py
|
lahdjirayhan/drive-kesma-library-linker
|
7c945e2b8efd8d05a571b563e0738dc3c086263e
|
[
"Unlicense"
] | null | null | null |
application/attendance/__init__.py
|
lahdjirayhan/drive-kesma-library-linker
|
7c945e2b8efd8d05a571b563e0738dc3c086263e
|
[
"Unlicense"
] | null | null | null |
application/attendance/__init__.py
|
lahdjirayhan/drive-kesma-library-linker
|
7c945e2b8efd8d05a571b563e0738dc3c086263e
|
[
"Unlicense"
] | null | null | null |
from .absen import absen_from_line
| 34
| 34
| 0.882353
| 6
| 34
| 4.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3f6092832626e249cd36b1167c70a2446058b504
| 120
|
py
|
Python
|
app/core/__init__.py
|
erik-at-techsanity/fastapi-template
|
f73b75ee461f0b8774adb41f9fccdab52314a1e3
|
[
"MIT"
] | 3
|
2021-04-20T23:44:52.000Z
|
2022-02-16T02:24:43.000Z
|
app/core/__init__.py
|
erik-at-techsanity/fastapi-template
|
f73b75ee461f0b8774adb41f9fccdab52314a1e3
|
[
"MIT"
] | null | null | null |
app/core/__init__.py
|
erik-at-techsanity/fastapi-template
|
f73b75ee461f0b8774adb41f9fccdab52314a1e3
|
[
"MIT"
] | null | null | null |
# Standard Library Imports
# None
# 3rd-Party Imports
# None
# App-Local Imports
from app.core.config import settings
| 13.333333
| 36
| 0.758333
| 17
| 120
| 5.352941
| 0.764706
| 0.241758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0.166667
| 120
| 8
| 37
| 15
| 0.9
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
58ad6575643fbea8ef7908bea2b9c6ba9dd310d9
| 18,788
|
py
|
Python
|
PrintTags/print_tags.py
|
MichaelDylan77/PrintTags
|
9a1b4bbbeaee2ac91b0f96c745c7cf0a7823e831
|
[
"MIT"
] | 31
|
2019-02-28T17:30:38.000Z
|
2019-05-19T07:01:03.000Z
|
PrintTags/print_tags.py
|
mdlockyer/PrintTags
|
9a1b4bbbeaee2ac91b0f96c745c7cf0a7823e831
|
[
"MIT"
] | 1
|
2019-02-28T17:34:06.000Z
|
2019-03-01T20:43:53.000Z
|
PrintTags/print_tags.py
|
mdlockyer/PrintTags
|
9a1b4bbbeaee2ac91b0f96c745c7cf0a7823e831
|
[
"MIT"
] | 3
|
2019-09-23T16:06:05.000Z
|
2020-04-04T02:41:47.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime
from .colors import Colors
from typing import List, Tuple, TextIO, Optional, Callable, Any
def _get_datetime() -> str:
return datetime.now().strftime('%d-%b-%Y %I:%M:%S%p')
def _print_with_color(args: Tuple[Any, ...], color_fn: Callable[[str], str],
add_datetime: bool, prefixes: Tuple[Optional[str], ...],
sep: str, end: str, closed_ok: bool, file: Optional[TextIO],
flush: bool) -> None:
_args: List[str] = [str(arg) for arg in args]
for prefix in reversed(prefixes):
if prefix is None:
continue
# Add a space to the end of the prefix if is doesn't already have one
_args[0] = f'{prefix}{_args[0]}' if prefix.endswith(' ') else f'{prefix} {_args[0]}'
if add_datetime:
_args[0] = f'{_get_datetime()} {_args[0]}'
_args = [color_fn(arg) for arg in _args]
try:
print(*_args, sep=color_fn(sep), end=color_fn(end), file=file, flush=flush)
except ValueError:
if closed_ok:
pass
else:
raise
def black(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None,
sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Prints values in black.
Args:
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (any, optional): A string interpolatable value that should be prepended to the print. Default `None`.
sep (str, optional): String inserted between values, default is a space. Default `' '`.
end (str, optional): String appended after the last value, default is a newline. Default `\n`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file: A file-like object (stream, optional): Defaults to the current sys.stdout. Default `None`.
flush (bool, optional): Whether to forcibly flush the stream. Default `False`.
"""
_print_with_color(args, Colors.black, add_datetime, (prefix,), sep, end, closed_ok, file, flush)
def red(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None,
sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Prints values in red.
Args:
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (any, optional): A string interpolatable value that should be prepended to the print. Default `None`.
sep (str, optional): String inserted between values, default is a space. Default `' '`.
end (str, optional): String appended after the last value, default is a newline. Default `\n`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file: A file-like object (stream, optional): Defaults to the current sys.stdout. Default `None`.
flush (bool, optional): Whether to forcibly flush the stream. Default `False`.
"""
_print_with_color(args, Colors.red, add_datetime, (prefix,), sep, end, closed_ok, file, flush)
def green(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None,
sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Prints values in green.
Args:
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (any, optional): A string interpolatable value that should be prepended to the print. Default `None`.
sep (str, optional): String inserted between values, default is a space. Default `' '`.
end (str, optional): String appended after the last value, default is a newline. Default `\n`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file: A file-like object (stream, optional): Defaults to the current sys.stdout. Default `None`.
flush (bool, optional): Whether to forcibly flush the stream. Default `False`.
"""
_print_with_color(args, Colors.green, add_datetime, (prefix,), sep, end, closed_ok, file, flush)
def yellow(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None,
sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Prints values in yellow.
Args:
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (any, optional): A string interpolatable value that should be prepended to the print. Default `None`.
sep (str, optional): String inserted between values, default is a space. Default `' '`.
end (str, optional): String appended after the last value, default is a newline. Default `\n`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file: A file-like object (stream, optional): Defaults to the current sys.stdout. Default `None`.
flush (bool, optional): Whether to forcibly flush the stream. Default `False`.
"""
_print_with_color(args, Colors.yellow, add_datetime, (prefix,), sep, end, closed_ok, file, flush)
def blue(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None,
sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Prints values in blue.
Args:
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (any, optional): A string interpolatable value that should be prepended to the print. Default `None`.
sep (str, optional): String inserted between values, default is a space. Default `' '`.
end (str, optional): String appended after the last value, default is a newline. Default `\n`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file: A file-like object (stream, optional): Defaults to the current sys.stdout. Default `None`.
flush (bool, optional): Whether to forcibly flush the stream. Default `False`.
"""
_print_with_color(args, Colors.blue, add_datetime, (prefix,), sep, end, closed_ok, file, flush)
def magenta(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None,
sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Prints values in magenta.
Args:
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (any, optional): A string interpolatable value that should be prepended to the print. Default `None`.
sep (str, optional): String inserted between values, default is a space. Default `' '`.
end (str, optional): String appended after the last value, default is a newline. Default `\n`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file: A file-like object (stream, optional): Defaults to the current sys.stdout. Default `None`.
flush (bool, optional): Whether to forcibly flush the stream. Default `False`.
"""
_print_with_color(args, Colors.magenta, add_datetime, (prefix,), sep, end, closed_ok, file, flush)
def cyan(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None,
sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Prints values in cyan.
Args:
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (any, optional): A string interpolatable value that should be prepended to the print. Default `None`.
sep (str, optional): String inserted between values, default is a space. Default `' '`.
end (str, optional): String appended after the last value, default is a newline. Default `\n`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file: A file-like object (stream, optional): Defaults to the current sys.stdout. Default `None`.
flush (bool, optional): Whether to forcibly flush the stream. Default `False`.
"""
_print_with_color(args, Colors.cyan, add_datetime, (prefix,), sep, end, closed_ok, file, flush)
def white(*args: Any, add_datetime: bool = False, prefix: Optional[str] = None,
sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Prints values in white.
Args:
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (any, optional): A string interpolatable value that should be prepended to the print. Default `None`.
sep (str, optional): String inserted between values, default is a space. Default `' '`.
end (str, optional): String appended after the last value, default is a newline. Default `\n`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file: A file-like object (stream, optional): Defaults to the current sys.stdout. Default `None`.
flush (bool, optional): Whether to forcibly flush the stream. Default `False`.
"""
_print_with_color(args, Colors.white, add_datetime, (prefix,), sep, end, closed_ok, file, flush)
# MARK: Tagged color printouts
def info(*args: Any, tag_text: Optional[str] = 'info', add_datetime: bool = False,
prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Used for printing basic information.
Args:
tag_text (str, optional): The text content of the tag that will be prepended to the print.
`None` for no tag. Default `'info'`.
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (str, optional): A string interpolatable value that will be prepended to the print. Default `None`.
sep (str, optional): string inserted between values, default is a space. Default `' '`.
end (str, optional): string appended after the last value, default is a newline. Default `'\n'`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file (TextIO, optional): defaults to the current sys.stdout. Default `None`.
flush (bool, optional): whether to forcibly flush the stream. Default `False`.
"""
tag: Optional[str] = tag_text if tag_text is None else f'[{tag_text}]'
_print_with_color(args, Colors.cyan, add_datetime, (prefix, tag), sep, end, closed_ok, file, flush)
def success(*args: Any, tag_text: Optional[str] = 'success', add_datetime: bool = False,
prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Used to indicate successful execution.
Args:
tag_text (str, optional): The text content of the tag that will be prepended to the print.
`None` for no tag. Default `'success'`.
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (str, optional): A string interpolatable value that will be prepended to the print. Default `None`.
sep (str, optional): string inserted between values, default is a space. Default `' '`.
end (str, optional): string appended after the last value, default is a newline. Default `'\n'`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file (TextIO, optional): defaults to the current sys.stdout. Default `None`.
flush (bool, optional): whether to forcibly flush the stream. Default `False`.
"""
tag: Optional[str] = tag_text if tag_text is None else f'[{tag_text}]'
_print_with_color(args, Colors.green, add_datetime, (prefix, tag), sep, end, closed_ok, file, flush)
def notice(*args: Any, tag_text: Optional[str] = 'notice', add_datetime: bool = False,
prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Used to print important information.
Args:
tag_text (str, optional): The text content of the tag that will be prepended to the print.
`None` for no tag. Default `'notice'`.
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (str, optional): A string interpolatable value that will be prepended to the print. Default `None`.
sep (str, optional): string inserted between values, default is a space. Default `' '`.
end (str, optional): string appended after the last value, default is a newline. Default `'\n'`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file (TextIO, optional): defaults to the current sys.stdout. Default `None`.
flush (bool, optional): whether to forcibly flush the stream. Default `False`.
"""
tag: Optional[str] = tag_text if tag_text is None else f'[{tag_text}]'
_print_with_color(args, Colors.blue, add_datetime, (prefix, tag), sep, end, closed_ok, file, flush)
def timeout(*args: Any, tag_text: Optional[str] = 'timeout', add_datetime: bool = False,
prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Used to indicate a timeout.
Args:
tag_text (str, optional): The text content of the tag that will be prepended to the print.
`None` for no tag. Default `'timeout'`.
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (str, optional): A string interpolatable value that will be prepended to the print. Default `None`.
sep (str, optional): string inserted between values, default is a space. Default `' '`.
end (str, optional): string appended after the last value, default is a newline. Default `'\n'`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file (TextIO, optional): defaults to the current sys.stdout. Default `None`.
flush (bool, optional): whether to forcibly flush the stream. Default `False`.
"""
tag: Optional[str] = tag_text if tag_text is None else f'[{tag_text}]'
_print_with_color(args, Colors.yellow, add_datetime, (prefix, tag), sep, end, closed_ok, file, flush)
def warn(*args: Any, tag_text: Optional[str] = 'warn', add_datetime: bool = False,
prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Used to highlight that there may be an issue, or that code has improperly executed.
Args:
tag_text (str, optional): The text content of the tag that will be prepended to the print.
`None` for no tag. Default `'warn'`.
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (str, optional): A string interpolatable value that will be prepended to the print. Default `None`.
sep (str, optional): string inserted between values, default is a space. Default `' '`.
end (str, optional): string appended after the last value, default is a newline. Default `'\n'`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file (TextIO, optional): defaults to the current sys.stdout. Default `None`.
flush (bool, optional): whether to forcibly flush the stream. Default `False`.
"""
tag: Optional[str] = tag_text if tag_text is None else f'[{tag_text}]'
_print_with_color(args, Colors.magenta, add_datetime, (prefix, tag), sep, end, closed_ok, file, flush)
def error(*args: Any, tag_text: Optional[str] = 'error', add_datetime: bool = False,
prefix: Optional[str] = None, sep: str = ' ', end: str = '\n', closed_ok: bool = False,
file: Optional[TextIO] = None, flush: bool = False) -> None:
"""
Can be used to print the description or message associated with an exception.
Args:
tag_text (str, optional): The text content of the tag that will be prepended to the print.
`None` for no tag. Default `'error'`.
add_datetime (bool, optional): Whether or not a datetime timestamp should be printed. Default `False`.
prefix (str, optional): A string interpolatable value that will be prepended to the print. Default `None`.
sep (str, optional): string inserted between values, default is a space. Default `' '`.
end (str, optional): string appended after the last value, default is a newline. Default `'\n'`.
closed_ok (bool, optional): Whether or not the ValueError raised by a closed stdout should be
suppressed. Default `False`.
file (TextIO, optional): defaults to the current sys.stdout. Default `None`.
flush (bool, optional): whether to forcibly flush the stream. Default `False`.
"""
tag: Optional[str] = tag_text if tag_text is None else f'[{tag_text}]'
_print_with_color(args, Colors.red, add_datetime, (prefix, tag), sep, end, closed_ok, file, flush)
if __name__ == "__main__":
pass
| 55.585799
| 116
| 0.656323
| 2,551
| 18,788
| 4.760094
| 0.058408
| 0.039858
| 0.065717
| 0.029976
| 0.918389
| 0.913613
| 0.90126
| 0.90126
| 0.90126
| 0.893189
| 0
| 0.000417
| 0.233607
| 18,788
| 337
| 117
| 55.750742
| 0.842906
| 0.655738
| 0
| 0.409091
| 0
| 0
| 0.037718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0.022727
| 0.034091
| 0.011364
| 0.227273
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
58b0bc3f2f2fab412486f325587c8d8bfc2be1f9
| 85
|
py
|
Python
|
pyffs/automaton_management/__init__.py
|
rominf/pyffs
|
6c805fbfd7771727138b169b32484b53c0b0fad1
|
[
"MIT"
] | 21
|
2018-07-17T13:21:11.000Z
|
2022-03-07T03:00:37.000Z
|
pyffs/automaton_management/__init__.py
|
rominf/pyffs
|
6c805fbfd7771727138b169b32484b53c0b0fad1
|
[
"MIT"
] | 10
|
2016-09-23T20:30:18.000Z
|
2021-03-07T12:56:56.000Z
|
pyffs/automaton_management/__init__.py
|
antoinewdg/pyffs
|
6ac2b6cac67422cbfd34ad0896d6faf35be9ccb9
|
[
"MIT"
] | 3
|
2018-08-21T12:08:36.000Z
|
2020-11-12T19:32:54.000Z
|
from .automaton_manager import manager
from .utils import generate_automaton_to_file
| 28.333333
| 45
| 0.882353
| 12
| 85
| 5.916667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 85
| 2
| 46
| 42.5
| 0.922078
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
58ce5c82dd9451b64af42f8396ca46490e4b4723
| 99
|
py
|
Python
|
gui/screens/screenmanagement.py
|
tonymorony/DiceCC-GUI
|
89dbdcf9fe762fe673a0c8c90d461efc10ab31e4
|
[
"MIT"
] | 1
|
2018-12-12T12:18:57.000Z
|
2018-12-12T12:18:57.000Z
|
gui/screens/screenmanagement.py
|
tonymorony/ChannelsCC-GUI
|
07df3706f8a250738311773eaf130fd8ebced64a
|
[
"MIT"
] | null | null | null |
gui/screens/screenmanagement.py
|
tonymorony/ChannelsCC-GUI
|
07df3706f8a250738311773eaf130fd8ebced64a
|
[
"MIT"
] | 1
|
2019-01-04T05:52:38.000Z
|
2019-01-04T05:52:38.000Z
|
from kivy.uix.screenmanager import ScreenManager
class ScreenManagement(ScreenManager):
pass
| 16.5
| 48
| 0.818182
| 10
| 99
| 8.1
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131313
| 99
| 5
| 49
| 19.8
| 0.94186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
58cf7bc7e499d9835a529fca885b9cad88f53185
| 6,318
|
py
|
Python
|
api/src/services/github_api.py
|
AlbertSuarez/git-inspect
|
4889210f0a47abe56b0f396ce0ecb08132ef4cf8
|
[
"MIT"
] | 6
|
2019-10-05T17:57:54.000Z
|
2019-10-10T07:37:15.000Z
|
api/src/services/github_api.py
|
AlbertSuarez/git-inspect
|
4889210f0a47abe56b0f396ce0ecb08132ef4cf8
|
[
"MIT"
] | 1
|
2019-10-07T22:05:12.000Z
|
2019-10-09T15:37:03.000Z
|
api/src/services/github_api.py
|
AlbertSuarez/git-inspect
|
4889210f0a47abe56b0f396ce0ecb08132ef4cf8
|
[
"MIT"
] | 2
|
2019-10-07T20:47:07.000Z
|
2019-12-04T19:52:50.000Z
|
import time
import requests
from src import *
from src.helper import log, env
from src.helper.response import get
def get_basic_user_information(username):
for attempt in range(0, GITHUB_API_RETRIES):
try:
endpoint = GITHUB_SINGLE_USER_ENDPOINT.format(username=username)
params = dict(client_id=env.get_github_client_id(), client_secret=env.get_github_client_secret())
response = requests.get(endpoint, params=params, timeout=GITHUB_API_TIMEOUT)
if response.ok:
response = response.json()
return response
except Exception as e:
if attempt < GITHUB_API_RETRIES - 1:
log.warn(f'Attempt number {attempt}: Failed - [{e}]. Retrying...')
time.sleep(GITHUB_API_RTD)
else:
log.error(f'Error in {get_basic_user_information.__name__} function. [{e}]')
return None
def get_repos_from_user(username):
repos_array = []
page_number = 1
while True:
for attempt in range(0, GITHUB_API_RETRIES):
try:
endpoint = GITHUB_USER_REPOS_ENDPOINT.format(username=username)
params = dict(
client_id=env.get_github_client_id(), client_secret=env.get_github_client_secret(),
per_page=GITHUB_PER_PAGE, page=page_number
)
response = requests.get(endpoint, params=params, timeout=GITHUB_API_TIMEOUT)
if response.ok:
response = response.json()
if response:
repos_array.extend(response)
break
else:
return repos_array
except Exception as e:
if attempt < GITHUB_API_RETRIES - 1:
log.warn(f'Attempt number {attempt}: Failed - [{e}]. Retrying...')
time.sleep(GITHUB_API_RTD)
else:
log.error(f'Error in {get_repos_from_user.__name__} function. [{e}]')
return None
page_number += 1
def get_languages(args):
username, repository = args
for attempt in range(0, GITHUB_API_RETRIES):
try:
endpoint = GITHUB_LANGUAGES_ENDPOINT.format(username=username, repository=repository)
params = dict(client_id=env.get_github_client_id(), client_secret=env.get_github_client_secret())
response = requests.get(endpoint, params=params, timeout=GITHUB_API_TIMEOUT)
if response.ok:
response = response.json()
return response
except Exception as e:
if attempt < GITHUB_API_RETRIES - 1:
log.warn(f'Attempt number {attempt}: Failed - [{e}]. Retrying...')
time.sleep(GITHUB_API_RTD)
else:
log.error(f'Error in {get_languages.__name__} function. [{e}]')
return None
def get_topics(args):
username, repository = args
for attempt in range(0, GITHUB_API_RETRIES):
try:
endpoint = GITHUB_TOPICS_ENDPOINT.format(username=username, repository=repository)
params = dict(client_id=env.get_github_client_id(), client_secret=env.get_github_client_secret())
headers = dict(Accept='application/vnd.github.mercy-preview+json')
response = requests.get(endpoint, params=params, headers=headers, timeout=GITHUB_API_TIMEOUT)
if response.ok:
response = response.json()['names']
return response
except Exception as e:
if attempt < GITHUB_API_RETRIES - 1:
log.warn(f'Attempt number {attempt}: Failed - [{e}]. Retrying...')
time.sleep(GITHUB_API_RTD)
else:
log.error(f'Error in {get_topics.__name__} function. [{e}]')
return None
def get_contributors(args):
username, repository = args
for attempt in range(0, GITHUB_API_RETRIES):
try:
endpoint = GITHUB_CONTRIBUTORS_ENDPOINT.format(username=username, repository=repository)
params = dict(client_id=env.get_github_client_id(), client_secret=env.get_github_client_secret())
response = requests.get(endpoint, params=params, timeout=GITHUB_API_TIMEOUT)
if response.ok:
response = response.json()
return response
except Exception as e:
if attempt < GITHUB_API_RETRIES - 1:
log.warn(f'Attempt number {attempt}: Failed - [{e}]. Retrying...')
time.sleep(GITHUB_API_RTD)
else:
log.error(f'Error in {get_contributors.__name__} function. [{e}]')
return None
def get_commit_messages(username):
commits_array = []
page_number = 1
while True:
for attempt in range(0, GITHUB_API_RETRIES):
try:
endpoint = GITHUB_USER_EVENTS_ENDPOINT.format(username=username)
params = dict(
client_id=env.get_github_client_id(), client_secret=env.get_github_client_secret(), page=page_number
)
response = requests.get(endpoint, params=params, timeout=GITHUB_API_TIMEOUT)
if response.ok:
response = response.json()
if response:
for res in response:
if get('type', res, default='') == GITHUB_PUSH_EVENT_TYPE:
res_payload = get('payload', res)
if res_payload:
res_commits = get('commits', res_payload, default=[])
commits_array.extend([get('message', c) for c in res_commits])
break
else:
return commits_array
except Exception as e:
if attempt < GITHUB_API_RETRIES - 1:
log.warn(f'Attempt number {attempt}: Failed - [{e}]. Retrying...')
time.sleep(GITHUB_API_RTD)
else:
log.error(f'Error in {get_commit_messages.__name__} function. [{e}]')
return None
page_number += 1
| 43.572414
| 120
| 0.572808
| 689
| 6,318
| 4.991292
| 0.124819
| 0.062809
| 0.05583
| 0.062809
| 0.80663
| 0.80663
| 0.795289
| 0.761559
| 0.741785
| 0.741785
| 0
| 0.003826
| 0.338082
| 6,318
| 144
| 121
| 43.875
| 0.818508
| 0
| 0
| 0.679389
| 0
| 0
| 0.112061
| 0.033238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045802
| false
| 0
| 0.038168
| 0
| 0.175573
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
58fa73b334a54ce307232d049c85eaadbb72be3a
| 93
|
py
|
Python
|
archive/2017/tasks/encode_decode/tests.py
|
YAtOff/python0
|
b5af5004131d64dd52d42746eddb72b6c43a13c7
|
[
"Apache-2.0"
] | 6
|
2017-11-08T14:04:39.000Z
|
2019-03-24T22:11:04.000Z
|
archive/2017/tasks/encode_decode/tests.py
|
YAtOff/python0
|
b5af5004131d64dd52d42746eddb72b6c43a13c7
|
[
"Apache-2.0"
] | null | null | null |
archive/2017/tasks/encode_decode/tests.py
|
YAtOff/python0
|
b5af5004131d64dd52d42746eddb72b6c43a13c7
|
[
"Apache-2.0"
] | 7
|
2015-10-27T09:04:58.000Z
|
2019-03-03T14:18:26.000Z
|
import doctest
import encode
import decode
doctest.testmod(encode)
doctest.testmod(decode)
| 11.625
| 23
| 0.827957
| 12
| 93
| 6.416667
| 0.416667
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107527
| 93
| 7
| 24
| 13.285714
| 0.927711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
450dd3e5a21435a8c74bcdcfdb615c8cd846706f
| 103
|
py
|
Python
|
python-to-C/app/sample.py
|
naumovdk/parsers
|
6c749d36f588114ce055891dab46c9c6be05a4c0
|
[
"MIT"
] | null | null | null |
python-to-C/app/sample.py
|
naumovdk/parsers
|
6c749d36f588114ce055891dab46c9c6be05a4c0
|
[
"MIT"
] | null | null | null |
python-to-C/app/sample.py
|
naumovdk/parsers
|
6c749d36f588114ce055891dab46c9c6be05a4c0
|
[
"MIT"
] | null | null | null |
a = int(input())
if a == 3:
a = 4
elif a == 1:
b = 4
else:
a = 4
elif a == 15:
b = 123
| 10.3
| 16
| 0.38835
| 21
| 103
| 1.904762
| 0.571429
| 0.1
| 0.3
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 0.427184
| 103
| 9
| 17
| 11.444444
| 0.508475
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
18dfca5b58c683c94eba90d98865c951bed73ade
| 114
|
py
|
Python
|
app/views/__init__.py
|
skj95/EzFood
|
013fab204621bf1746d1e8914c5a9ee14311f79b
|
[
"MIT"
] | 1
|
2021-07-14T19:45:19.000Z
|
2021-07-14T19:45:19.000Z
|
app/views/__init__.py
|
Trung-Jeager-2019/EzFood
|
03ef7253c3d8cfc150e6054d10d91ca7efdca5e6
|
[
"MIT"
] | 11
|
2021-02-08T20:46:16.000Z
|
2022-03-12T00:28:56.000Z
|
app/views/__init__.py
|
Trung-Jeager-2019/EzFood
|
03ef7253c3d8cfc150e6054d10d91ca7efdca5e6
|
[
"MIT"
] | 2
|
2020-03-05T12:30:46.000Z
|
2020-05-16T06:31:56.000Z
|
from .general import *
from .partner import *
from .checkout import *
from .rider import *
from .normal import *
| 16.285714
| 23
| 0.72807
| 15
| 114
| 5.533333
| 0.466667
| 0.481928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 114
| 6
| 24
| 19
| 0.892473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e14f20dc57887c5d8c7b2305fc97d24b8ee73680
| 43
|
py
|
Python
|
routes/__init__.py
|
texuf/myantname
|
f683a6ca0feb9dd5e2d0f2bb67204ff4193e262a
|
[
"MIT"
] | null | null | null |
routes/__init__.py
|
texuf/myantname
|
f683a6ca0feb9dd5e2d0f2bb67204ff4193e262a
|
[
"MIT"
] | null | null | null |
routes/__init__.py
|
texuf/myantname
|
f683a6ca0feb9dd5e2d0f2bb67204ff4193e262a
|
[
"MIT"
] | null | null | null |
import index
import myname
import species
| 8.6
| 14
| 0.837209
| 6
| 43
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 5
| 14
| 8.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.