hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
574c00d99f0d289b590626f845e693ddc4a0448f
| 2,686
|
py
|
Python
|
src/app/api/auth0/users.py
|
scraiber/scraiber-api
|
010d0875ba0820e0ec7790d74df8a2955fac360e
|
[
"Apache-2.0"
] | 1
|
2022-03-29T06:41:41.000Z
|
2022-03-29T06:41:41.000Z
|
src/app/api/auth0/users.py
|
scraiber/scraiber-api
|
010d0875ba0820e0ec7790d74df8a2955fac360e
|
[
"Apache-2.0"
] | null | null | null |
src/app/api/auth0/users.py
|
scraiber/scraiber-api
|
010d0875ba0820e0ec7790d74df8a2955fac360e
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
from pydantic import EmailStr
from typing import List
from fastapi import HTTPException
from app.api.models.auth0 import Auth0User
async def get_user_by_email(email: EmailStr) -> Auth0User:
headers = {'Authorization': 'Bearer '+os.environ["ACCESS_TOKEN"]}
params = {'email': email, 'fields': 'email,email_verified,nickname,name,user_id'}
response = requests.get('https://scraiber.eu.auth0.com/api/v2/users-by-email', headers=headers, params=params)
if response.status_code != 200 or len(response.json())==0:
raise HTTPException(status_code=404, detail="User could not be retrieved")
user_response = response.json()[0]
return Auth0User(**user_response)
async def get_user_by_id(id: str) -> Auth0User:
headers = {'Authorization': 'Bearer '+os.environ["ACCESS_TOKEN"]}
params = {'fields': 'email,email_verified,nickname,name,user_id'}
response = requests.get('https://scraiber.eu.auth0.com/api/v2/users/'+id, headers=headers, params=params)
if response.status_code != 200 or len(response.json())==0:
raise HTTPException(status_code=404, detail="User could not be retrieved")
user_response = response.json()
return Auth0User(**user_response)
async def get_user_list_by_email(email_list: List[EmailStr], require_200_status_code: bool = False) -> List[Auth0User]:
output_list = []
for email in email_list:
if require_200_status_code:
user = await get_user_by_email(email)
if user:
output_list.append(user)
else:
try:
user = await get_user_by_email(email)
if user:
output_list.append(user)
except:
continue
return output_list
async def get_user_list_by_id(id_list: List[str], require_200_status_code: bool = False) -> List[Auth0User]:
output_list = []
for id in id_list:
if require_200_status_code:
user = await get_user_by_id(id)
if user:
output_list.append(user)
else:
try:
user = await get_user_by_id(id)
if user:
output_list.append(user)
except:
continue
return output_list
async def delete_user_by_id(id: str) -> bool:
headers = {'Authorization': 'Bearer '+os.environ["ACCESS_TOKEN"]}
response = requests.delete('https://scraiber.eu.auth0.com/api/v2/users/'+id, headers=headers)
if response.status_code == 204:
return True
else:
raise HTTPException(status_code=404, detail="User could not be deleted")
| 31.6
| 119
| 0.645197
| 344
| 2,686
| 4.834302
| 0.212209
| 0.060132
| 0.032471
| 0.036079
| 0.82261
| 0.793145
| 0.78172
| 0.754059
| 0.703548
| 0.630186
| 0
| 0.023256
| 0.24758
| 2,686
| 85
| 120
| 31.6
| 0.799604
| 0
| 0
| 0.610169
| 0
| 0
| 0.153703
| 0.031262
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.101695
| 0
| 0.186441
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9e0288b177a01ecfce509c2b87a15442cdd0db85
| 11,016
|
py
|
Python
|
hallo/modules/channel_control/voice.py
|
SpangleLabs/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2022-01-27T13:25:01.000Z
|
2022-01-27T13:25:01.000Z
|
hallo/modules/channel_control/voice.py
|
joshcoales/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 75
|
2015-09-26T18:07:18.000Z
|
2022-01-04T07:15:11.000Z
|
hallo/modules/channel_control/voice.py
|
SpangleLabs/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2021-04-10T12:02:47.000Z
|
2021-04-10T12:02:47.000Z
|
from hallo.events import EventMode
from hallo.function import Function
import hallo.modules.channel_control.channel_control
from hallo.server import Server
class Voice(Function):
"""
Gives a user on an irc server "voice" status.
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "voice"
# Names which can be used to address the function
self.names = {"voice", "give voice", "gib voice", "get voice"}
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"Voice member in given channel, or current channel if no channel given, or command user if "
"no member given. Format: voice <name> <channel>"
)
def run(self, event):
# Get server object
server_obj = event.server
# If server isn't IRC type, we can't give voice.
if server_obj.type != Server.TYPE_IRC:
return event.create_response(
"Error, this function is only available for IRC servers."
)
# If 0 arguments, voice user who called command.
line_split = event.command_args.split()
if len(line_split) == 0:
# Check that this is a channel
if event.channel is None:
return event.create_response(
"Error, I can't voice you in a private message, please provide a channel."
)
# Give user voice
return event.create_response(self.give_voice(event.channel, event.user))
# If 1 argument, see if it's a channel or a user.
if len(line_split) == 1:
# If message was sent in private message, it's referring to a channel
if event.channel is None:
channel = server_obj.get_channel_by_name(event.command_args)
if channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.give_voice(channel, event.user))
# See if it's a channel that hallo is in
test_channel = server_obj.get_channel_by_name(event.command_args)
if test_channel is not None and test_channel.in_channel:
return event.create_response(self.give_voice(test_channel, event.user))
# Argument must be a user?
target_user = server_obj.get_user_by_name(event.command_args)
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.give_voice(event.channel, target_user))
# If 2 arguments, try with first argument as channel
target_channel = server_obj.get_channel_by_name(line_split[0])
if target_channel is not None and target_channel.in_channel:
target_user = server_obj.get_user_by_name(line_split[1])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
line_split[1], server_obj.name
)
)
return event.create_response(self.give_voice(target_channel, target_user))
# 2 args, try with second argument as channel
target_user = server_obj.get_user_by_name(line_split[0])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(line_split[0], server_obj.name)
)
target_channel = server_obj.get_channel_by_name(line_split[1])
if target_channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(line_split[1], server_obj.name)
)
return event.create_response(self.give_voice(target_channel, target_user))
def give_voice(self, channel, user):
"""
Gives voice to a user in a given channel, after checks.
:param channel: Channel to give user voice in
:type channel: destination.Channel
:param user: User to give voice to
:type user: destination.User
:return: Response to send to requester
:rtype: str
"""
# Check if in channel
if not channel.in_channel:
return "Error, I'm not in that channel."
# Check if user is in channel
if user not in channel.get_user_list():
return "Error, {} is not in {}.".format(user.name, channel.name)
# Check if hallo has op in channel
if not hallo.modules.channel_control.channel_control.hallo_has_op(channel):
return "Error, I don't have power to give voice in {}.".format(channel.name)
# Check that user does not have op in channel
user_membership = channel.get_membership_by_user(user)
if user_membership.is_voice or user_membership.is_op:
return "Error, this user already has voice."
mode_evt = EventMode(
channel.server, channel, None, "+v {}".format(user.address), inbound=False
)
channel.server.send(mode_evt)
return "Voice status given."
class DeVoice(Function):
"""
Removes a user on an irc server's "voice" status.
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
# Name for use in help listing
self.help_name = "devoice"
# Names which can be used to address the function
self.names = {
"devoice",
"unvoice",
"take voice",
"del voice",
"delete voice",
"remove voice",
}
# Help documentation, if it's just a single line, can be set here
self.help_docs = (
"UnVoice member in given channel, or current channel if no channel given, or command user "
"if no member given. Format: devoice <name> <channel>"
)
def run(self, event):
# Get server object
server_obj = event.server
# If server isn't IRC type, we can't take voice.
if server_obj.type != Server.TYPE_IRC:
return event.create_response(
"Error, this function is only available for IRC servers."
)
# If 0 arguments, take voice from user who called command.
line_split = event.command_args.split()
if len(line_split) == 0:
# Check that this is a channel
if event.channel is None:
return event.create_response(
"Error, I can't un-voice you in a private message, please provide a channel."
)
# Give user voice
return event.create_response(self.take_voice(event.channel, event.user))
# If 1 argument, see if it's a channel or a user.
if len(line_split) == 1:
# If message was sent in private message, it's referring to a channel
if event.channel is None:
channel = server_obj.get_channel_by_name(event.command_args)
if channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.take_voice(channel, event.user))
# See if it's a channel that hallo is in
test_channel = server_obj.get_channel_by_name(event.command_args)
if test_channel is not None and test_channel.in_channel:
return event.create_response(self.take_voice(test_channel, event.user))
# Argument must be a user?
target_user = server_obj.get_user_by_name(event.command_args)
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
event.command_args, server_obj.name
)
)
return event.create_response(self.take_voice(event.channel, target_user))
# If 2 arguments, try with first argument as channel
target_channel = server_obj.get_channel_by_name(line_split[0])
if target_channel is not None and target_channel.in_channel:
target_user = server_obj.get_user_by_name(line_split[1])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(
line_split[1], server_obj.name
)
)
return event.create_response(self.take_voice(target_channel, target_user))
# 2 args, try with second argument as channel
target_user = server_obj.get_user_by_name(line_split[0])
if target_user is None:
return event.create_response(
"Error, {} is not known on {}.".format(line_split[0], server_obj.name)
)
target_channel = server_obj.get_channel_by_name(line_split[1])
if target_channel is None:
return event.create_response(
"Error, {} is not known on {}.".format(line_split[1], server_obj.name)
)
return event.create_response(self.take_voice(target_channel, target_user))
def take_voice(self, channel, user):
"""
Takes voice from a user in a given channel, after checks.
:param channel: Channel to take voice from user in
:type channel: destination.Channel
:param user: User to take voice from
:type user: destination.User
:return: Response to send to requester
:rtype: str
"""
# Check if in channel
if not channel.in_channel:
return "Error, I'm not in that channel."
# Check if user is in channel
if user not in channel.get_user_list():
return "Error, {} is not in {}.".format(user.name, channel.name)
# Check if hallo has op in channel
if not hallo.modules.channel_control.channel_control.hallo_has_op(channel):
return "Error, I don't have power to take voice in {}.".format(channel.name)
# Check that user does not have op in channel
user_membership = channel.get_membership_by_user(user)
if not user_membership.is_voice:
return "Error, this user doesn't have voice."
mode_evt = EventMode(
channel.server, channel, None, "-v {}".format(user.address)
)
channel.server.send(mode_evt)
return "Voice status taken."
| 44.780488
| 104
| 0.589415
| 1,405
| 11,016
| 4.44911
| 0.102491
| 0.040314
| 0.070709
| 0.103983
| 0.909934
| 0.909934
| 0.896817
| 0.895537
| 0.882419
| 0.866421
| 0
| 0.003528
| 0.330973
| 11,016
| 245
| 105
| 44.963265
| 0.84464
| 0.185094
| 0
| 0.60355
| 0
| 0
| 0.142841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035503
| false
| 0
| 0.023669
| 0
| 0.284024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f510f755bf763bc5ea2135382d37feb8f477bd68
| 21
|
py
|
Python
|
scripts/__init__.py
|
tsherwen/sparse2spatial
|
6f5240c7641ad7a894476672b78c8184c514bf87
|
[
"MIT"
] | 1
|
2020-01-14T21:40:29.000Z
|
2020-01-14T21:40:29.000Z
|
scripts/__init__.py
|
tsherwen/sparse2spatial
|
6f5240c7641ad7a894476672b78c8184c514bf87
|
[
"MIT"
] | null | null | null |
scripts/__init__.py
|
tsherwen/sparse2spatial
|
6f5240c7641ad7a894476672b78c8184c514bf87
|
[
"MIT"
] | null | null | null |
from . import iodide
| 10.5
| 20
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f55d1d3dd285307956723c1510c15d267d46ff25
| 35
|
py
|
Python
|
datasets/__init__.py
|
bolero2/vggnet-torch
|
912046be3f0581e0217c2cf5b596e6318aad241b
|
[
"Apache-2.0"
] | 2
|
2021-04-23T03:49:30.000Z
|
2021-04-23T03:49:33.000Z
|
datasets/__init__.py
|
bolero2/vggnet-torch
|
912046be3f0581e0217c2cf5b596e6318aad241b
|
[
"Apache-2.0"
] | null | null | null |
datasets/__init__.py
|
bolero2/vggnet-torch
|
912046be3f0581e0217c2cf5b596e6318aad241b
|
[
"Apache-2.0"
] | null | null | null |
from .datasets import CustomDataset
| 35
| 35
| 0.885714
| 4
| 35
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 35
| 1
| 35
| 35
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
195a25eea10a9e38d24f93a48eb47f94dc59439e
| 2,144
|
py
|
Python
|
tests/test_setup_actions.py
|
gladsonvm/pii_filter
|
f7ab757bacede104d76e848997047fc77f7befa4
|
[
"MIT"
] | 1
|
2021-11-03T00:03:46.000Z
|
2021-11-03T00:03:46.000Z
|
tests/test_setup_actions.py
|
gladsonvm/pii_filter
|
f7ab757bacede104d76e848997047fc77f7befa4
|
[
"MIT"
] | null | null | null |
tests/test_setup_actions.py
|
gladsonvm/pii_filter
|
f7ab757bacede104d76e848997047fc77f7befa4
|
[
"MIT"
] | null | null | null |
import unittest
from unittest import mock
from setup_actions import setup_dir
class TestSetupActions(unittest.TestCase):
setup_dir_param = '/pwd'
list_dir = ['a', 'b']
@mock.patch('setup_actions.os')
def test_setup_action_delete_if_exists(self, os_mock):
"""
Scenario: Called setup_dir with delete_files_if_exists as True
Specified directory exists
"""
print('{}'.format(self._testMethodName))
os_mock.path.isdir.return_value = True
os_mock.listdir.return_value = TestSetupActions.list_dir
return_value = setup_dir(TestSetupActions.setup_dir_param, False)
assert os_mock.listdir.call_count == 1
assert os_mock.remove.call_count == 0
assert return_value == TestSetupActions.setup_dir_param
@mock.patch('setup_actions.os')
def test_setup_action_no_delete(self, os_mock):
"""
Scenario: Called setup_dir with delete_files_if_exists as False
Specified directory exists
"""
print('{}'.format(self._testMethodName))
os_mock.path.isdir.return_value = True
os_mock.listdir.return_value = TestSetupActions.list_dir
return_value = setup_dir(TestSetupActions.setup_dir_param, False)
assert os_mock.listdir.call_count == 1
assert os_mock.remove.call_count == 0
assert return_value == TestSetupActions.setup_dir_param
@mock.patch('setup_actions.os')
def test_setup_action_directory_does_not_exist(self, os_mock):
"""
Scenario: Called setup_dir with delete_files_if_exists as False
Specified directory does not exist and needs to be created
"""
print('{}'.format(self._testMethodName))
os_mock.path.isdir.return_value = True
os_mock.listdir.return_value = TestSetupActions.list_dir
return_value = setup_dir(TestSetupActions.setup_dir_param, True)
assert os_mock.listdir.call_count == 1
assert os_mock.remove.call_count == len(TestSetupActions.list_dir)
assert return_value == TestSetupActions.setup_dir_param
if __name__ == '__main__':
unittest.main()
| 38.981818
| 74
| 0.702425
| 273
| 2,144
| 5.157509
| 0.216117
| 0.06392
| 0.064631
| 0.12358
| 0.816761
| 0.816761
| 0.816761
| 0.784091
| 0.784091
| 0.754972
| 0
| 0.00296
| 0.21222
| 2,144
| 54
| 75
| 39.703704
| 0.830669
| 0.141325
| 0
| 0.628571
| 0
| 0
| 0.038924
| 0
| 0
| 0
| 0
| 0
| 0.257143
| 1
| 0.085714
| false
| 0
| 0.085714
| 0
| 0.257143
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1994705383f6ec32f22001927e08fd38ae09b74c
| 225
|
py
|
Python
|
test/test_2_validation_check_invalid_email_short_password.py
|
pavelwearevolt/Cross_Auth_TestsAutomatization
|
45c6a25352a1c893ef35a494a76088731db84ba7
|
[
"Apache-2.0"
] | null | null | null |
test/test_2_validation_check_invalid_email_short_password.py
|
pavelwearevolt/Cross_Auth_TestsAutomatization
|
45c6a25352a1c893ef35a494a76088731db84ba7
|
[
"Apache-2.0"
] | null | null | null |
test/test_2_validation_check_invalid_email_short_password.py
|
pavelwearevolt/Cross_Auth_TestsAutomatization
|
45c6a25352a1c893ef35a494a76088731db84ba7
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'pavelkosicin'
def test_validation_check_invalid_email_short_password(app):
app.validation.enter_wrong_data(username="p", password="a")
app.validation.check_invalid_email_short_password_error_message()
| 32.142857
| 69
| 0.826667
| 29
| 225
| 5.793103
| 0.655172
| 0.178571
| 0.261905
| 0.321429
| 0.47619
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 225
| 6
| 70
| 37.5
| 0.811594
| 0
| 0
| 0
| 0
| 0
| 0.062222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.75
| 0
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
27681ce63af79a0c97c9fda3fe58c1960f252757
| 24,892
|
py
|
Python
|
assets/tests/workers/managed_feeds/test_managed_feeds_manager.py
|
47lining/quickstart-osisoft-pisystem2aws-connector
|
f6bdcb84b3cb271d3498d057474be6833f67b5be
|
[
"Apache-2.0"
] | null | null | null |
assets/tests/workers/managed_feeds/test_managed_feeds_manager.py
|
47lining/quickstart-osisoft-pisystem2aws-connector
|
f6bdcb84b3cb271d3498d057474be6833f67b5be
|
[
"Apache-2.0"
] | null | null | null |
assets/tests/workers/managed_feeds/test_managed_feeds_manager.py
|
47lining/quickstart-osisoft-pisystem2aws-connector
|
f6bdcb84b3cb271d3498d057474be6833f67b5be
|
[
"Apache-2.0"
] | null | null | null |
from io import BytesIO
from operator import itemgetter
from datetime import datetime
from freezegun import freeze_time
from tests.fixtures import *
@freeze_time('2016-01-02 12:00:00')
def test_get_recent_events(managed_feeds_manager, events_status_table):
events_status_table.put_item(
Item={
"update_timestamp": '2016-01-02 11:12:13',
'create_date': '2016-01-02',
"pi_point": "point1",
'event_type': 'backfill',
"is_success": True,
"id": "1",
"message": "msg"
}
)
events_status_table.put_item(
Item={
"update_timestamp": '2016-01-02 11:12:14',
'create_date': '2016-01-02',
"pi_point": "point1",
'event_type': 'backfill',
"is_success": True,
"id": "2",
"message": "msg"
}
)
events_status_table.put_item(
Item={
"update_timestamp": '2016-01-01 11:12:14',
'create_date': '2016-01-01',
"pi_point": "point2",
'event_type': 'backfill',
"id": "3",
"is_success": True,
"message": "msg"
}
)
events_status_table.put_item(
Item={
"update_timestamp": '2015-31-12 11:12:13',
'create_date': '2015-31-12',
"pi_point": "point1",
'event_type': 'interpolate',
"is_success": True,
"id": "4",
"message": "msg"
}
)
events_status_table.put_item(
Item={
"update_timestamp": '2015-30-12 11:12:15',
'create_date': '2015-30-12',
"pi_point": "point2",
'event_type': 'interpolate',
"id": "5",
"is_success": True,
"message": "msg"
}
)
retrieved_events_last_day = managed_feeds_manager.get_recent_events(1)
assert len(retrieved_events_last_day) == 2
assert retrieved_events_last_day == [
{
"update_timestamp": '2016-01-02 11:12:14',
'create_date': '2016-01-02',
"pi_point": "point1",
'event_type': 'backfill',
"id": "2",
"is_success": True,
"message": "msg"
},
{
"update_timestamp": '2016-01-02 11:12:13',
'create_date': '2016-01-02',
"pi_point": "point1",
'event_type': 'backfill',
"id": "1",
"is_success": True,
"message": "msg"
},
]
retrieved_events_2_days = managed_feeds_manager.get_recent_events(2)
assert len(retrieved_events_2_days) == 3
assert retrieved_events_2_days == [
{
"update_timestamp": '2016-01-02 11:12:14',
'create_date': '2016-01-02',
"pi_point": "point1",
'event_type': 'backfill',
"id": "2",
"is_success": True,
"message": "msg"
},
{
"update_timestamp": '2016-01-02 11:12:13',
'create_date': '2016-01-02',
"pi_point": "point1",
'event_type': 'backfill',
"id": "1",
"is_success": True,
"message": "msg"
},
{
"update_timestamp": '2016-01-01 11:12:14',
'create_date': '2016-01-01',
"pi_point": "point2",
'event_type': 'backfill',
"is_success": True,
"id": "3",
"message": "msg"
}
]
def test_list_pi_points(managed_feeds_manager, pi_points_dynamo_table):
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1', 'subscription_status': 'pending'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point2', 'subscription_status': 'subscribed'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point3'})
points = managed_feeds_manager.get_pi_points()
sorted_points = sorted(points, key=itemgetter('pi_point'))
assert sorted_points == [
{'pi_point': 'point1', 'subscription_status': 'pending'},
{'pi_point': 'point2', 'subscription_status': 'subscribed'},
{'pi_point': 'point3'}
]
@freeze_time('2016-01-02 11:12:13')
def test_send_subscribe_request(managed_feeds_manager, pi_points_dynamo_table, incoming_queue,
sqs_uuid4, events_status_table):
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1', 'asset': 'asset1'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point2', 'asset': 'asset2'})
sqs_uuid4.return_value = '1'
managed_feeds_manager.send_subscribe_request(['point1', 'point2'])
points = pi_points_dynamo_table.scan()['Items']
sorted_points = sorted(points, key=itemgetter('pi_point'))
events = events_status_table.scan()['Items']
assert sorted_points == [
{'pi_point': 'point1', 'subscription_status': 'pending',
'update_timestamp': '2016-01-02T11:12:13', 'asset': 'asset1'},
{'pi_point': 'point2', 'subscription_status': 'pending',
'update_timestamp': '2016-01-02T11:12:13', 'asset': 'asset2'}
]
assert events == [
{'status': 'pending', 'event_type': 'subscribe', 'update_timestamp': '2016-01-02T11:12:13',
'id': '1', 'pi_points': ['point1', 'point2'], 'create_date': '2016-01-02'}
]
assert incoming_queue.messages == [
{
'id': '1',
'action': 'subscribe',
'created_at': '2016-01-02T11:12:13',
'payload': {'points': ['point1', 'point2']}
}
]
@freeze_time('2016-01-02 11:12:13')
def test_handle_subscribe_request(managed_feeds_manager, pi_points_dynamo_table, events_status_table):
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point2'})
events_status_table.put_item(Item={'id': '1', 'status': 'pending', 'pi_points': ['point1', 'point2'], 'create_date': '2016-01-02'})
payload = {'points': ['point1', 'point2']}
managed_feeds_manager.handle_subscribe_request('1', payload)
points = pi_points_dynamo_table.scan()['Items']
sorted_points = sorted(points, key=itemgetter('pi_point'))
events = events_status_table.scan()['Items']
assert sorted_points == [
{'pi_point': 'point1', 'subscription_status': 'subscribed', 'update_timestamp': '2016-01-02T11:12:13'},
{'pi_point': 'point2', 'subscription_status': 'subscribed', 'update_timestamp': '2016-01-02T11:12:13'}
]
assert events == [
{'id': '1', 'update_timestamp': '2016-01-02T11:12:13', 'pi_points': ['point1', 'point2'], 'status': 'success',
'create_date': '2016-01-02'}
]
@freeze_time('2016-01-02 11:12:13')
def test_handle_failed_subscribe_request(managed_feeds_manager, pi_points_dynamo_table, events_status_table):
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1', 'subscription_status': 'pending'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point2', 'subscription_status': 'pending'})
events_status_table.put_item(Item={'id': '1', 'status': 'pending', 'pi_points': ['point1', 'point2'], 'create_date': '2016-01-02'})
payload = {'points': ['point1'], 'error_message': 'point2 failed'}
managed_feeds_manager.handle_subscribe_request('1', payload)
points = pi_points_dynamo_table.scan()['Items']
sorted_points = sorted(points, key=itemgetter('pi_point'))
events = events_status_table.scan()['Items']
assert sorted_points == [
{'pi_point': 'point1', 'subscription_status': 'subscribed', 'update_timestamp': '2016-01-02T11:12:13'},
{'pi_point': 'point2', 'subscription_status': 'unsubscribed', 'update_timestamp': '2016-01-02T11:12:13'}
]
assert events == [
{'id': '1', 'update_timestamp': '2016-01-02T11:12:13', 'pi_points': ['point1', 'point2'],
'status': 'failure', 'error_message': 'point2 failed', 'create_date': '2016-01-02'}
]
@freeze_time('2016-01-02 11:12:13')
def test_send_unsubscribe_request(managed_feeds_manager, incoming_queue, sqs_uuid4,
events_status_table, pi_points_dynamo_table):
sqs_uuid4.return_value = '1'
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point2'})
managed_feeds_manager.send_unsubscribe_request(['point1', 'point2'])
points = pi_points_dynamo_table.scan()['Items']
sorted_points = sorted(points, key=itemgetter('pi_point'))
events = events_status_table.scan()['Items']
assert sorted_points == [
{'pi_point': 'point1', 'subscription_status': 'pending', 'update_timestamp': '2016-01-02T11:12:13'},
{'pi_point': 'point2', 'subscription_status': 'pending', 'update_timestamp': '2016-01-02T11:12:13'}
]
assert events == [
{'status': 'pending', 'event_type': 'unsubscribe', 'update_timestamp': '2016-01-02T11:12:13',
'id': '1', 'pi_points': ['point1', 'point2'], 'create_date': '2016-01-02'}
]
assert incoming_queue.messages == [
{
'id': '1',
'action': 'unsubscribe',
'created_at': '2016-01-02T11:12:13',
'payload': {'points': ['point1', 'point2']}
}
]
@freeze_time('2016-01-02 11:12:13')
def test_handle_unsubscribe_request(managed_feeds_manager, pi_points_dynamo_table, events_status_table):
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point2'})
events_status_table.put_item(Item={'id': '1', 'status': 'pending', 'pi_points': ['point1', 'point2'], 'create_date': '2016-01-02'})
payload = {'points': ['point1', 'point2']}
managed_feeds_manager.handle_unsubscribe_request('1', payload)
points = pi_points_dynamo_table.scan()['Items']
sorted_points = sorted(points, key=itemgetter('pi_point'))
events = events_status_table.scan()['Items']
assert sorted_points == [
{'pi_point': 'point1', 'subscription_status': 'unsubscribed', 'update_timestamp': '2016-01-02T11:12:13'},
{'pi_point': 'point2', 'subscription_status': 'unsubscribed', 'update_timestamp': '2016-01-02T11:12:13'}
]
assert events == [
{'id': '1', 'update_timestamp': '2016-01-02T11:12:13', 'pi_points': ['point1', 'point2'], 'status': 'success',
'create_date': '2016-01-02'}
]
@freeze_time('2016-01-02 11:12:13')
def test_handle_failed_unsubscribe_request(managed_feeds_manager, pi_points_dynamo_table, events_status_table):
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1', 'subscription_status': 'pending'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point2', 'subscription_status': 'pending'})
events_status_table.put_item(Item={'id': '1', 'status': 'pending', 'pi_points': ['point1', 'point2'], 'create_date': '2016-01-02'})
payload = {'points': ['point1'], 'error_message': 'point2 failed', 'create_date': '2016-01-02'}
managed_feeds_manager.handle_unsubscribe_request('1', payload)
points = pi_points_dynamo_table.scan()['Items']
sorted_points = sorted(points, key=itemgetter('pi_point'))
events = events_status_table.scan()['Items']
assert sorted_points == [
{'pi_point': 'point1', 'subscription_status': 'unsubscribed', 'update_timestamp': '2016-01-02T11:12:13'},
{'pi_point': 'point2', 'subscription_status': 'subscribed', 'update_timestamp': '2016-01-02T11:12:13'}
]
assert events == [
{'id': '1', 'update_timestamp': '2016-01-02T11:12:13', 'pi_points': ['point1', 'point2'],
'status': 'failure', 'error_message': 'point2 failed', 'create_date': '2016-01-02'}
]
@freeze_time('2016-01-02 11:12:13')
def test_send_sync_pi_points_request(managed_feeds_manager, incoming_queue, sqs_uuid4, events_status_table):
sqs_uuid4.return_value = '1'
managed_feeds_manager.send_sync_pi_points_request('bucket')
sync_pi_points = events_status_table.scan()['Items']
assert incoming_queue.messages == [
{
'id': '1',
'action': 'sync_pi_points',
'created_at': '2016-01-02T11:12:13',
'payload': {
's3_bucket': 'bucket',
's3_key': 'pi_points_sync/20160102_111213/pi_points.json'
}
}
]
assert sync_pi_points == [
{
'id': '1',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02',
'event_type': 'sync_pi_points',
'status': 'pending',
's3_bucket': 'bucket',
's3_key': 'pi_points_sync/20160102_111213/pi_points.json'
}
]
@freeze_time('2017-01-02 11:12:13')
def test_handle_sync_pi_points(managed_feeds_manager, pi_points_dynamo_table, events_status_table, s3_resource):
pi_points_dynamo_table.put_item(Item={'pi_point': 'point1', 'subscription_status': 'pending'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point2', 'subscription_status': 'pending'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point3', 'subscription_status': 'subscribed'})
pi_points_dynamo_table.put_item(Item={'pi_point': 'point4', 'subscription_status': 'subscribed'})
events_status_table.put_item(
Item={
'id': '1',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02',
'event_type': 'sync_pi_points',
'status': 'pending',
's3_bucket': 'bucket',
's3_key': 'pi_points.json'
}
)
s3_resource.Bucket('bucket').upload_fileobj(
BytesIO(b'["point1","point3","point5"]'),
'pi_points.json'
)
payload = {
'is_success': True
}
managed_feeds_manager.handle_sync_pi_points('1', payload)
points = pi_points_dynamo_table.scan()['Items']
sorted_points = sorted(points, key=itemgetter('pi_point'))
events = events_status_table.scan()['Items']
assert events == [
{
'id': '1', 'update_timestamp': '2017-01-02T11:12:13', 'event_type': 'sync_pi_points',
'status': 'success', 's3_bucket': 'bucket', 's3_key': 'pi_points.json', 'create_date': '2016-01-02'
}
]
assert sorted_points == [
{'pi_point': 'point1', 'subscription_status': 'pending'},
{'pi_point': 'point3', 'subscription_status': 'subscribed'},
{'pi_point': 'point5', 'subscription_status': 'unsubscribed', 'update_timestamp': '2017-01-02T11:12:13'}
]
@freeze_time('2016-01-02 11:12:13')
def test_send_sync_af_request(managed_feeds_manager, incoming_queue, events_status_table, sqs_uuid4):
sqs_uuid4.return_value = '1'
managed_feeds_manager.send_sync_af_request('bucket', 'database')
af_structures = events_status_table.scan()['Items']
assert incoming_queue.messages == [
{
'id': '1',
'action': 'sync_af',
'created_at': '2016-01-02T11:12:13',
'payload': {
'database': 'database',
's3_bucket': 'bucket',
's3_key': 'af_structure_sync/database/20160102_111213/af_structure.json'
}
}
]
assert af_structures == [
{
'id': '1',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02',
'event_type': 'sync_af',
'status': 'pending',
's3_bucket': 'bucket',
's3_key': 'af_structure_sync/database/20160102_111213/af_structure.json',
'database': 'database'
}
]
@freeze_time('2016-01-02 11:12:13')
def test_handle_sync_af(managed_feeds_manager, events_status_table):
events_status_table.put_item(
Item={
'id': '1',
'update_timestamp': '2015-11-02T22:22:22',
'create_date': '2016-01-02',
'status': 'pending',
's3_bucket': 's3_bucket_name',
's3_prefix': 's3_prefix',
'database': 'db_name'
}
)
payload = {
"is_success": True
}
managed_feeds_manager.handle_sync_af('1', payload)
af_structures = events_status_table.scan()['Items']
assert af_structures == [
{
'id': '1',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02',
'status': 'success',
's3_bucket': 's3_bucket_name',
's3_prefix': 's3_prefix',
'database': 'db_name'
}
]
@freeze_time('2016-01-02 11:12:13')
def test_send_backfill_request(managed_feeds_manager, incoming_queue, events_status_table, sqs_uuid4):
sqs_uuid4.return_value = 1
managed_feeds_manager.send_backfill_request(
query_syntax=False,
feeds=['point1', 'point2'],
request_from='2016-01-02T11:12:13',
request_to='2016-01-02T11:12:13',
name='name'
)
points = events_status_table.scan()['Items']
assert incoming_queue.messages == [
{
'id': '1',
'action': 'backfill',
'created_at': '2016-01-02T11:12:13',
'payload': {
'points': ['point1', 'point2'],
'from': '2016-01-02T11:12:13',
'to': '2016-01-02T11:12:13',
'use_query_syntax': False,
'backfill_name': 'name'
}
}
]
assert points == [
{
'id': '1',
'pi_points': ['point1', 'point2'],
'event_type': 'backfill',
'status': 'pending',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02',
'name': 'name'
}
]
@freeze_time('2016-01-02 11:12:13')
def test_send_backfill_request_with_query(managed_feeds_manager, incoming_queue, events_status_table, sqs_uuid4):
sqs_uuid4.return_value = 1
managed_feeds_manager.send_backfill_request(
query_syntax=True,
feeds=['point1', 'point2'],
query='-1d',
name='name'
)
points = events_status_table.scan()['Items']
assert incoming_queue.messages == [
{
'id': '1',
'action': 'backfill',
'created_at': '2016-01-02T11:12:13',
'payload': {
'points': ['point1', 'point2'],
'query': '-1d',
'use_query_syntax': True,
'backfill_name': 'name'
}
}
]
assert points == [
{
'id': '1',
'pi_points': ['point1', 'point2'],
'event_type': 'backfill',
'status': 'pending',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02',
'name': 'name'
}
]
@freeze_time('2016-01-02 11:12:13')
def test_handle_backfill(managed_feeds_manager, events_status_table):
events_status_table.put_item(
Item={
'id': '1',
'update_timestamp': '1999-11-11T22:22:22',
'create_date': '2016-01-02',
'pi_points': ['point1'],
'event_type': 'backfill',
'status': 'pending',
}
)
managed_feeds_manager.handle_backfill_status('1', {})
points = events_status_table.scan()['Items']
assert points == [
{
"id": '1',
"update_timestamp": '2016-01-02T11:12:13',
'create_date': '2016-01-02',
'event_type': 'backfill',
'pi_points': ['point1'],
'status': 'success',
}
]
@freeze_time('2016-01-02 11:12:13')
def test_handle_backfill_failed(managed_feeds_manager, events_status_table):
events_status_table.put_item(
Item={
"id": '1',
"update_timestamp": '1999-11-11T22:22:22',
"pi_points": ["point1"],
'event_type': 'backfill',
'status': 'pending',
}
)
payload = {
'failed_points': [{
'point': 'point1',
'error_message': 'fail'
}]
}
managed_feeds_manager.handle_backfill_status('1', payload)
points = events_status_table.scan()['Items']
assert points == [
{
"id": '1',
"update_timestamp": '2016-01-02T11:12:13',
"pi_points": ["point1"],
'event_type': 'backfill',
"error_message": "{'point1': 'fail'}",
'status': 'failure',
}
]
@freeze_time('2016-01-02 11:12:13')
def test_send_interpolate_request(managed_feeds_manager, incoming_queue, events_status_table, sqs_uuid4):
sqs_uuid4.return_value = 1
managed_feeds_manager.send_interpolate_request(
query_syntax=False,
feeds=['point1', 'point2'],
interval=1,
interval_unit='seconds',
request_from='2016-01-02T11:12:13',
request_to='2016-01-02T11:12:13',
name='name'
)
points = events_status_table.scan()['Items']
assert incoming_queue.messages == [
{
"id": "1",
"action": 'interpolate',
'created_at': '2016-01-02T11:12:13',
"payload": {
"points": ['point1', 'point2'],
'from': '2016-01-02T11:12:13',
'to': '2016-01-02T11:12:13',
'use_date_query_syntax': False,
'interval_seconds': 1,
'interpolation_name': 'name'
}
}
]
assert points == [
{
'id': '1',
'pi_points': ['point1', 'point2'],
'event_type': 'interpolate',
'status': 'pending',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02',
'name': 'name'
}
]
@freeze_time('2016-01-02 11:12:13')
def test_send_interpolate_request_with_query(managed_feeds_manager, incoming_queue, events_status_table, sqs_uuid4):
sqs_uuid4.return_value = 1
managed_feeds_manager.send_interpolate_request(
query_syntax=True,
feeds=['point1', 'point2'],
interval=1,
interval_unit='seconds',
query='-1d',
name='name'
)
points = events_status_table.scan()['Items']
assert incoming_queue.messages == [
{
"id": "1",
"action": 'interpolate',
'created_at': '2016-01-02T11:12:13',
"payload": {
"points": ['point1', 'point2'],
'date_query': '-1d',
'interval_seconds': 1,
'use_date_query_syntax': True,
'interpolation_name': 'name'
}
}
]
assert points == [
{
'id': '1',
'pi_points': ['point1', 'point2'],
'event_type': 'interpolate',
'status': 'pending',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02',
'name': 'name'
}
]
@freeze_time('2016-01-02 11:12:13')
def test_handle_interpolation(managed_feeds_manager, events_status_table):
events_status_table.put_item(
Item={
'id': '1',
'pi_points': ['point1'],
'event_type': 'interpolate',
'status': 'pending',
'update_timestamp': '1999-01-02T11:12:13',
'create_date': '2016-01-02'
}
)
managed_feeds_manager.handle_interpolation_status('1', {})
points = events_status_table.scan()['Items']
assert points == [
{
'id': '1',
'pi_points': ['point1'],
'event_type': 'interpolate',
'status': 'success',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02'
}
]
@freeze_time('2016-01-02 11:12:13')
def test_handle_interpolation_with_failure(managed_feeds_manager, events_status_table):
events_status_table.put_item(
Item={
'id': '1',
'pi_points': ['point1', 'point2'],
'event_type': 'interpolate',
'status': 'pending',
'update_timestamp': '1999-01-02T11:12:13',
'create_date': '2016-01-02'
}
)
payload = {
"failed_points": [
{
"point": "point1",
"error_message": "fail"
}
]
}
managed_feeds_manager.handle_interpolation_status('1', payload)
points = events_status_table.scan()['Items']
assert points == [
{
'id': '1',
'pi_points': ['point1', 'point2'],
'error_message': "{'point1': 'fail'}",
'event_type': 'interpolate',
'status': 'failure',
'update_timestamp': '2016-01-02T11:12:13',
'create_date': '2016-01-02'
}
]
| 33.637838
| 135
| 0.569299
| 2,786
| 24,892
| 4.774946
| 0.048816
| 0.04826
| 0.034278
| 0.041344
| 0.916786
| 0.904382
| 0.887319
| 0.859355
| 0.831166
| 0.804405
| 0
| 0.095915
| 0.268279
| 24,892
| 739
| 136
| 33.683356
| 0.634457
| 0
| 0
| 0.628165
| 0
| 0
| 0.323598
| 0.011249
| 0
| 0
| 0
| 0
| 0.060127
| 1
| 0.031646
| false
| 0
| 0.007911
| 0
| 0.039557
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
276f3c12f3d7113cfb3624c87f50d043631d8fc0
| 923
|
py
|
Python
|
tests/test_burn.py
|
boeddeker/ci_sdr
|
e1b5c1f9b25baab91f04eb2c96ed392cf0b313cd
|
[
"MIT"
] | 38
|
2021-01-16T22:59:42.000Z
|
2022-03-06T12:34:33.000Z
|
tests/test_burn.py
|
boeddeker/ci_sdr
|
e1b5c1f9b25baab91f04eb2c96ed392cf0b313cd
|
[
"MIT"
] | 2
|
2021-01-26T16:25:26.000Z
|
2021-05-27T08:07:09.000Z
|
tests/test_burn.py
|
boeddeker/ci_sdr
|
e1b5c1f9b25baab91f04eb2c96ed392cf0b313cd
|
[
"MIT"
] | 7
|
2021-01-18T01:43:38.000Z
|
2021-06-23T12:06:49.000Z
|
import torch
import ci_sdr
def test_burn_single_source():
t1 = torch.tensor([1., 2, 4, 7, 1, 3, 7, 8, 0, 3, 4])
t2 = torch.clone(t1)
t2[:4] += 2
sdr = ci_sdr.pt.ci_sdr(t1, t2, filter_length=3)
assert sdr.shape == (), sdr.shape
torch.testing.assert_allclose(sdr, 13.592828750610352)
def test_burn_multi_source():
t1 = torch.tensor([
[1., 2, 4, 7, 1, 3, 7, 8, 0, 3, 4],
[5., 2, 7, 9, 3, 8, 4, 2, 9, 4, 5],
])
t2 = torch.clone(t1)
t2[:, :4] += 2
sdr = ci_sdr.pt.ci_sdr(t1, t2, filter_length=3, compute_permutation=False)
assert sdr.shape == (2,), sdr.shape
torch.testing.assert_allclose(sdr, [13.592828750610352, 17.48115348815918])
sdr = ci_sdr.pt.ci_sdr(
t1, t2[(1, 0), :], filter_length=3, compute_permutation=True)
assert sdr.shape == (2,), sdr.shape
torch.testing.assert_allclose(sdr, [13.592828750610352, 17.48115348815918])
| 29.774194
| 79
| 0.611051
| 151
| 923
| 3.596026
| 0.251656
| 0.064457
| 0.044199
| 0.055249
| 0.832413
| 0.742173
| 0.742173
| 0.742173
| 0.707182
| 0.607735
| 0
| 0.194483
| 0.214518
| 923
| 30
| 80
| 30.766667
| 0.554483
| 0
| 0
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 1
| 0.086957
| false
| 0
| 0.086957
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
279761e2ea7e6d51aea747789239fced11d3554f
| 36
|
py
|
Python
|
evprediction/__init__.py
|
rohithdesikan/evprediction
|
3ea5a2b3db350397385c9c9835483eb7dfb2773b
|
[
"MIT"
] | 1
|
2021-03-23T01:25:21.000Z
|
2021-03-23T01:25:21.000Z
|
evprediction/__init__.py
|
rohithdesikan/evprediction
|
3ea5a2b3db350397385c9c9835483eb7dfb2773b
|
[
"MIT"
] | null | null | null |
evprediction/__init__.py
|
rohithdesikan/evprediction
|
3ea5a2b3db350397385c9c9835483eb7dfb2773b
|
[
"MIT"
] | null | null | null |
from .models import convert_to_array
| 36
| 36
| 0.888889
| 6
| 36
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
27b8e6ef6975fc2cafc1f708184d3597791f9866
| 37
|
py
|
Python
|
TagsAsADatabase/__init__.py
|
OrenLeung/AWSTagsAsADatabase
|
5d0fefc541170114fbde7c520ac903efac14d42a
|
[
"MIT"
] | 37
|
2021-08-31T22:14:26.000Z
|
2021-09-30T10:53:38.000Z
|
TagsAsADatabase/__init__.py
|
OrenLeung/AWSTagsAsADatabase
|
5d0fefc541170114fbde7c520ac903efac14d42a
|
[
"MIT"
] | 1
|
2021-09-06T23:44:22.000Z
|
2021-09-06T23:44:22.000Z
|
TagsAsADatabase/__init__.py
|
OrenLeung/AWSTagsAsADatabase
|
5d0fefc541170114fbde7c520ac903efac14d42a
|
[
"MIT"
] | null | null | null |
from .database import DatabaseClient
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fd6652da1ecad95dc433587280dbaba0f7c0dd48
| 20,450
|
py
|
Python
|
tasks-deploy/broadcast/generate.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
tasks-deploy/broadcast/generate.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
tasks-deploy/broadcast/generate.py
|
chankruze/qctf-school-2018
|
1e732cf264ee0a94bc2fc1fd8cf3a20660d57605
|
[
"MIT"
] | null | null | null |
tokens = ['0031834b-ae7c-e116-576f-5bca37d05b78', '01690fff-bad4-8cc7-c9fd-6f1a3617caaf', '01f08ea7-aa8a-751d-2170-fd9d8c6a6d53', '0271ba8e-982e-17f8-0696-df6217651a96', '02c4fe49-f60a-deea-e9a6-62b574351be9', '0308bb28-6e1f-b0f4-c0f1-bab9212069fb', '03af9789-f51f-466e-5030-5129cdb0a556', '04834ee4-d89b-cbb7-922f-7e44d1a2d5df', '05a16ff2-5e48-7dd2-8dbc-3deb3c4c8472', '06f84160-1255-399d-0bae-c0f154cbc11f', '0759ff1d-fa9e-7ec2-0d66-4ed31510d014', '076663c3-cbcd-a3fa-6192-42109ed33dab', '084015d2-0637-d56d-e7b3-3a2b508367ba', '08bdb0b5-e853-9480-108e-148236acf010', '0a484db2-08b3-01c6-77ca-df01c07aae8b', '0a6add03-30f4-26a7-47e4-7b9d7e62f879', '0aa5fae3-2c8e-48b9-8c65-58a606b94da5', '0b2f7ba5-ffd5-c17f-2063-17be2fdbd316', '0b564d2e-8df8-3ed3-134a-755d0dde4942', '0b838597-67db-6c28-bd6c-4d2d793a5c0a', '0bdd9a2b-5935-a215-b420-a60cb3673c26', '0d5a7c7b-a785-1315-ec13-dea7ebf80b06', '0d66c55a-76e3-5336-fa55-dfd3de4548cc', '0d7f4c57-df77-a497-3c67-5d4fee4a40a2', '0dabee82-ff2c-885d-0fd3-05c7561bdfb1', '1014a02f-fcc2-89e9-16f9-f9e394dd8140', '10572fcc-a281-bccd-bfa3-c569b777498b', '10674d89-d0f5-7038-b072-32a1da1a32ff', '10ee27b2-4b23-a9f0-c9cb-49ab34beb461', '11d13fa1-97cf-b649-e057-a95172212121', '1212d428-a2a1-7707-dbc1-9287dd9c0bca', '1219c620-f8a1-5425-758f-aee60b10471a', '1273e11f-7d97-6f5e-f599-2c8e442feabf', '1388cb76-de5a-4b7f-363d-bde45928c37a', '13bbb130-cc8b-7ce9-1838-28da8d003dec', '1463a45c-2e88-4196-d063-d1ead5608c35', '14ba0d1e-ab79-f7c0-de8f-3304fe6208c4', '14d3ad8b-6223-f1ac-8819-176fe738b392', '150cd363-c60f-da5f-1a3c-e8ce1499cb3a', '158f05ab-51f4-a919-f9db-a48e59de25c9', '15b47d54-79bd-8ea0-7984-2e69f6d2d847', '15bacea8-ad78-44b0-3d06-66c02f559aac', '16fc8e64-e93a-df0b-7082-6ff15cfaf62f', '18155208-f948-285f-0879-3cf518cf0ead', '191ba6a1-9782-5572-7f04-9ec8f5d74836', '19422c9c-f126-d26c-6191-e84188535992', '1956cbf9-d58e-773e-86b5-a704c59ba56b', '1a06849b-cc24-e8b9-3646-6a37007c6eff', '1ab7f05c-2957-5e1c-e0b4-ef0d4cce5232', '1afab32a-a327-1106-242b-72aa0b4dc1c7', '1b051285-77ae-d173-e1da-6f4489c4875e', '1c50764e-2f14-855c-53d3-48cef2522658', '1cce568c-a22f-a83b-47b6-102764cff1ca', '1eac20ab-48c3-a4a0-c5c1-e552d4f3b726', '1fd21f32-73bc-e0a2-6b17-e6cb8e90e5ee', '1fda9e9f-e0d5-f227-c7af-c78e3d83a064', '2214d499-a213-3ae5-4a24-2e8e5ccc5f17', '222f8364-4ae8-0041-1f4c-4b6c2e485951', '22654f7a-27a0-88a5-3314-559e9ba8365b', '23f8c3b4-288b-7746-ff55-61550d6c1cb9', '240dc978-d06b-1cf8-eafd-e3944d0bb890', '24743bfe-0f50-a803-2e9b-1b596838add9', '24d1e899-1933-bb66-583f-3c834a0643f7', '25da3424-126f-68c4-94f1-e5595fea5f58', '25ee5e09-3662-af84-f578-8bdd1afa1e38', '274ff6f3-151e-cb44-d349-be3fe581fa1a', '27684d6d-b465-efb1-b78b-e68fee452a01', '27714f03-9d83-be62-571f-e666a418bfce', '27f7f6d0-7e49-b5d6-b107-c6b2ec0c8ae0', '289342db-e054-9644-2955-c5f9b30b3b84', '28f92486-19ef-5e4f-c6d3-0bcea31ff32b', '29426581-59e6-afe1-0235-9fe9549db7f5', '2a5fdf59-1bb9-512c-93a3-fcf4860661d7', '2aa95ec5-af7d-a4f5-9c36-0f45bd1fe227', '2b56324b-d8bf-df42-f039-23e7266d230b', '2b68901b-4194-5fcf-2990-d970a3c5470a', '2be38ff8-09c2-3502-5841-1d4f01a01e4f', '2be9c69e-a859-5c58-c185-0c55f4e204b0', '2c7a3fdb-6453-2986-2f4b-12468c733fb2', '2c7cc0b2-dade-af9e-f09b-269751622b28', '2d32bd56-b742-bb83-14d7-76644b1ece4e', '2d89557c-9fe6-482d-7b54-1215aae31eec', '2d9b429d-cdd3-251f-bf69-0219b902141b', '2dbb73c4-7098-ccd9-2cb0-bbf0a44e8670', '2e010087-3d90-ac2c-3684-653657dd92ea', '2e1a3482-e0b5-4462-c81c-fc2bccddb455', '2e2a9ee2-b87b-c6cb-6592-830cf538166d', '2e6dea9c-c73e-c72e-b132-ee73b651cfb5', '2e729e3b-ae4a-4ede-2b7c-76b29a694ab9', '3044192d-30be-9e2f-de03-d6c933481b1d', '308eef56-0df1-fe36-d43d-12d6d32ad581', '3167596a-fcc4-3a03-0273-adf439a5b35c', '326ff8ff-9c18-6095-b26f-6595f053b1b7', '3339f3cf-415f-807d-902f-e5f3f0d4e7c7', '334a72ef-50eb-0a9a-7045-36833cad1f20', '34ec32ca-fcc7-947b-70f3-e8bd59faa9fb', '352a1639-612e-7caa-ad64-a91061ce136d', '353f2e87-4f43-11d1-3fb4-f92538de8bac', '35b3ae28-884d-36d9-758b-811356bdc9b9', '35c0e0bc-d8bf-092e-206c-727fd36ec18e', '35fb7c79-bd9a-54d8-22ef-9a33c8d090dd', '365af881-4070-4680-00f7-8f7a4f052f80', '368f4e6e-0e66-cf5b-a5ec-a7b2510c18c7', '372692e2-eea9-1371-594c-ce125faddcd6', '376f6849-6495-b427-b069-4c4b0857b58c', '377d6dc2-620b-002a-ea65-b1f27e536df5', '37def1d1-d74a-be3d-2514-227e9a3a48ce', '37e4c502-d53c-cff8-b44d-b6719a722b22', '380fec3d-6be9-d2b3-ce75-42868b72a77c', '3813b15d-6d7e-b4db-c0a0-3dbfb42686ec', '3825a45a-15a0-3fef-1a6c-3eac50f4cb28', '38313634-9242-77c1-1737-e892fbd0a4ab', '383d69c0-9dcd-3a0d-25d6-b2581c1122d3', '397c049c-dda9-98ee-f749-c52eeb85bb87', '3a156b30-5c6e-f86b-5de1-b070c7598449', '3a1caeda-05b7-aa5c-5bdb-13867577b793', '3ad33107-a8b3-de73-fed0-7d13c7571ce5', '3b09240a-9541-0611-e55d-f70e08a1a946', '3bbbc09b-15a9-7ddd-1b18-1f74bf0a6ca3', '3c55ca30-0304-7add-a372-7487093b0aa8', '3d093fdc-3d16-515a-9bb5-8ad32970a4a9', '3d20a8e5-7910-a180-6006-fba0dd95ae13', '3d8b743d-ab70-1a6a-cc4e-1dd4856e4330', '3da7dc8d-30b8-9ed4-ac78-23e0001bf0dd', '3df8dac8-97b4-8a9b-d7e9-d99a95d55014', '3e3907db-121c-3ee7-2b91-538e4283904e', '3f9eacc7-b992-b060-8d79-ed8975033ec8', '403032a6-0cf1-4a89-c04e-94aad64ee980', '40707b19-5e88-28d4-0d93-df7aae579059', '409786eb-ba6d-375a-e3d5-349463c604a5', '4117e446-42a2-9c0b-4d68-0190fead1acc', '4121dbd2-0a37-c39d-a397-d867957f072c', '420f3611-7b49-e656-f841-51c148468bdd', '4253a514-354f-5564-68f1-98db8926067e', '42c49758-ccc1-d976-8c57-c97d634e8946', '42d13a0a-f23a-e541-05d2-0d0d063a27c1', '43244b9e-6060-2a68-8a62-2b9febc4c3ed', '434e7fa7-9c91-952f-4151-b46e7693ac8d', '435c4bfb-cc85-6365-1713-90bd019bd7c6', '435f6edb-deac-48bd-5e44-38abb51ba7df', '4369bc12-d2c8-73dc-dcba-05b6a03eeba7', '43b7e2f9-0027-e128-f550-edfd94ec1c4a', '442ac487-0c9e-8c5f-f93d-66cd5d8fdb15', '452e54c2-968b-3d96-17a1-ed59f39f1639', '45728065-cdf3-27e8-f03f-e4ba1d58d2ea', '4629af29-6ff1-dfd6-aba0-645a3f9162c6', '46c895fa-3a2a-94f9-9564-feca76004bf0', '49a11105-59a1-2941-6373-91bcb341cf8c', '49d18c30-cd42-be27-e2f3-dae10c045d33', '4a8bf00e-516f-0bc6-b9b0-ea207f9de944', '4af11ede-b658-a58c-7167-f8ae12b025f0', '4b123037-beba-69c8-f7c5-723251f43d51', '4be93013-28cc-179b-1f77-4891747a5874', '4bfe0970-b34a-6192-115f-b78ae14061e8', '51698ed8-52d9-0b70-4136-b3c69928a0f2', '51e82cda-ea86-cfc5-1f0f-f9216c35738e', '52190e05-c679-989f-8e20-be43513f7fde', '527c0fa2-6cd5-0d09-c549-da40fa327642', '534eedaf-9b79-49b1-366b-5e1f98f7acd5', '540080e1-f262-e3bf-041a-1ab8c3aee571', '54657139-036e-c363-0707-a5ad98f48169', '54d309ab-62e2-0f9a-0480-899664e877c1', '55f3b2ac-68d4-9ff9-b95f-148bfcb718e1', '5640ce93-6d2b-57b0-957b-3dad34fc830b', '5677242c-eb27-f9c8-0fad-27667f340c24', '56c49a6b-15d2-173f-6ad3-e491be745687', '573fd27e-7473-5367-e3f0-53bab6c9c4cd', '586fc707-9171-1110-1c0d-19c33a2f0008', '58c61a69-b6b5-f87e-788c-794184147e0b', '5975427d-1f82-47bf-e35b-902daf27ae2a', '59776dd6-b26d-4fc3-8077-2c2b9334296a', '59d7fe1f-e086-8cef-34ce-0726db1b24de', '5a3d37cf-9f17-8eaf-5211-42a4496147f1', '5ae9760c-c405-bf63-f9e5-353b134bee45', '5af63001-c510-848b-14a2-49c86035b79d', '5b380a6a-fa5c-babe-4caf-29a1ac982e77', '5b686387-3242-3ea1-9e6d-c08b73f2bae2', '5c2d1b9a-8b9c-b2be-d0a2-c50f92684a04', '5cbd37ec-5e00-a821-ff27-6b48cd480c0b', '5cd51d0d-7755-6e03-63c0-80e5d0840b9a', '5dc4984f-bff4-d10b-3997-615620bf0f72', '5df76e03-bd93-7d3f-a48a-5011436e78b6', '5dfdee31-4bcb-bf3a-8ab5-3af8a303b0bd', '5e50de0c-2276-d0a5-a47e-7283aead8e67', '5f01c7eb-2efa-5d32-987d-0bc1a5f1522d', '5f04454a-c734-891f-2578-b4e78264db0a', '5f697eb9-e23d-cfef-23c7-0c58fc7a8bc3', '5fe69a8c-105e-8227-9ca4-9d3906ad37e3', '603a406c-fa8f-6dec-4def-384830d1ac76', '61386e11-b94c-d24d-1320-1fe17ce8b3ed', '626b9edb-c374-ef86-bceb-b9907b7a0d73', '627b7016-29ec-6318-c9e0-ae90f0b77278', '62ad6ca5-2b55-7dd4-edf6-15ca49181b22', '62bf9676-25b0-c8b2-6a00-e3a46f640755', '63165909-eca0-c4f9-60ac-f5e34fb4f338', '63203a11-57bd-4264-3412-9b14a6c3163d', '63a42a88-0c6c-ece9-775c-14df377a5d01', '6418ecad-c11e-ee4c-1457-4f509afaefdf', '6459e5bc-3d83-1cd0-58ed-7fd1cf0bd573', '6590ac98-68f3-bcd2-377a-7d46aef4ed2e', '65ede597-5ce6-e4c8-fe42-ee1741d53b6e', '66087e43-6464-9e2f-3930-9e10c58acad8', '663c3a5a-b027-4d4e-e8ef-77d0bb0034ac', '695eff6a-55bc-c7d4-3a61-363115c4062d', '699d075f-6e71-8f15-fa72-622cc08a5861', '69a2a517-d641-9bf3-df7a-9a2b8580d66e', '6bdf96b5-eeb0-ee82-2c92-ce5461093039', '6c9ff3f1-8830-a840-f5c5-157bd8ff3633', '6cfd31c0-45dc-5a76-e08a-f61eb51db2e2', '6d8b2045-6309-5962-b74f-1a78e0122f50', '6d996969-5642-6612-bd66-20ade8e53de2', '6df4b004-b26c-b9b1-c88e-aa95cf7bc3cf', '6e16af12-f688-41ff-cf46-abfab75f1f0d', '6e755cd6-3868-cba8-3217-870239b7574e', '6ea65946-cbdd-c4b9-bab5-161fb38b5fb1', '6ee50a73-d404-9364-fc1b-84a1cfa6c665', '6f6a448c-0dcb-e5fb-c2ba-bc848285bd81', '6f80d7fb-bf99-8eaa-13fb-4f4348120952', '6ffafef1-db89-efd2-3ec6-7cd247712219', '70e81d36-040e-33d0-b1f0-c4547a8cd893', '71010e1c-f037-8e1b-77eb-a0e78cb30811', '722e81bd-b5c1-cd1d-ac04-d50f7ee6ed6c', '722e8ce6-54e0-5d2e-4ffb-126af4467a71', '723a7504-0114-70ce-37f9-c6ddc5d6947f', '7250e4c9-abca-5223-d3e5-c8e3c7fa45fa', '7278b261-36a9-2c0e-4b5d-7fa67b6373c0', '72971992-ad14-c2db-5feb-286116c8db59', '72cd979c-c497-47a2-8ab0-18a9704b945e', '72ead2c1-791a-b80c-9893-f0038866d0ef', '749c77c0-c5a4-7fdf-7842-1ba794bb9cf7', '74a34662-70bf-679b-1845-481f21fa55b3', '7515be9e-204d-4068-4fb3-8c71bead1b2d', '752b3b73-dc3e-7616-7f62-4b9a3ae98719', '769567dd-22b0-99d7-5ec1-2383490952db', '76eba6c1-5e3c-8d91-069e-2f8b4838dd86', '76f0e599-c9a7-7abe-dc4b-a4df7141a70a', '77adc74b-3a52-75c0-6f8f-2ca3db758a96', '77ff6842-a453-5bfd-49ea-990599b6314f', '782d7722-2c39-6e58-90d1-bbefb06c57c7', '78e16e56-275a-e849-1287-d08253ce4268', '78e1cef9-6d28-0b99-df19-2beb681d3655', '7c174e6b-5f86-dfa9-3bc4-a891762b1cc9', '7c635c46-33fd-304c-0cb4-04602b18ba1f', '7d67a79c-ba7a-9a95-4334-aba8cd8b5f13', '7d79459c-4220-a527-e755-ede6270bdedc', '7da8e34e-427c-62cf-d603-f6f15f7edeff', '7dab1ca7-3491-3b8d-5ad3-881c25ce0ca4', '7deace2c-74c9-0b53-73c6-ea332f4aee0f', '7e00d405-c6ae-f8b1-6b7f-0bfaf9191b70', '7e0da420-23f5-d17b-30d3-026c87adcc98', '7e3604c7-b226-b89f-13f9-44bdc4f15abd', '7eb62cb5-9047-f40f-3959-43c4a96693bd', '7f0ee583-9177-4f4e-20ef-0e962edca1e8', '7f71b7e1-c255-8a68-739c-1b485e4f8f48', '808c9341-58df-f35e-40fa-4443118e5adb', '8128d140-7272-3d7f-f492-10e155c19ce8', '81a72733-a925-3a57-280c-05467526dc86', '8289587b-51fb-4c7e-ac3f-a4b40710d951', '82d6cabd-c42c-8e3b-0335-e49f60e604c9', '841c52af-a723-6e1b-022f-a12053c3de87', '84a493ba-74bf-0e7f-62c7-5b30733116a0', '84c80fe1-9a3a-3c9b-324e-846debd113f1', '8520541f-4e4f-9f6c-918e-baf3c4787a1b', '878d18db-0df9-738c-e91b-e74dba258245', '87d7e694-176e-3606-5841-dcbe40c90806', '87dee3ff-0071-37eb-cad2-a4963bae34f8', '899a67bd-da2a-4da6-8017-3090e7bb8388', '8b12c40b-6fc6-6fe6-5340-5ce8f433a583', '8b21fc48-fb2a-edb5-3911-91f23ef248d8', '8c1db319-4384-874c-cbb7-080450da9bed', '8c9b1188-5d6d-fe52-9ffa-58738114b997', '8c9e49f9-4f21-cd0b-021e-d7e91d7f4fd8', '8ca7fcf4-b0d0-9e84-a7bf-29d79a26cd1d', '8e0f464c-bd5e-d867-fbf1-1c3d687f90bc', '8f9e49e8-b7a4-670a-2345-a1a217e27479', '90e10854-7245-84d1-7028-da0f15bdfc6b', '9122d682-c651-166e-e64b-e0270326c38f', '92152192-45d7-a384-f364-066d58c3048c', '92963221-aca5-f68e-62c0-0231be86a962', '935d16ec-284a-4cf6-2001-d0a3b538a945', '93f9b926-3b27-55c0-9cd0-ebb78cf3bb02', '944ab947-9d78-9918-2432-57c9094abc3d', '946fa002-d6e0-8dac-650c-efd369c02cdf', '95159c71-13a7-7042-ee90-db1262a84e9a', '952dbb39-9c5d-1a52-380c-73ac20cb94d4', '9530f31a-7115-054c-95cb-acd4260ad11e', '95429889-8fe6-8c97-2a78-6106250e1f36', '9553e301-c61c-6361-c866-d9ee2d0834fb', '96ac30d7-31f1-e665-9738-afd908412e53', '9790bac7-f2bf-79c1-7162-6bdb317d61ec', '9834e569-aa15-1c32-23eb-4890d7382622', '9839dcbe-7d74-f22d-dbcb-636ed894073d', '985bbd31-87ad-5658-4fb2-3ddc501dd120', '99da325d-e90f-abc1-1313-4c3ef9ecea28', '99f8257a-e3d5-24f5-2cfc-e77475dd6d11', '9ac48286-5483-9e41-142f-8398a151dbad', '9aea55be-2380-c44f-d5ec-c2b7bb238bb1', '9aedd61f-248e-310c-40ff-31b29227fb3a', '9b0e9b90-e3b9-fb77-2971-0d465101f65e', '9baee3bb-f027-9774-456f-92c91d8dc7d9', '9c011ba3-fdab-3d05-8d85-b9d27ff01eac', '9c427d8f-2f1d-52aa-0ace-07b7a0beb1b1', '9e35c47c-c7f4-614b-3475-268e06a1ea60', '9edbb114-9d5d-c8a2-b85e-ef10f49c0456', '9ee374c3-08dc-071a-1cc8-09890dfda44b', '9f1a7ee6-12ca-8826-13de-2780c49e09a0', '9fd86c55-ec66-550c-099f-5fd70ae5cca0', '9fe109de-f8f1-4524-9d93-c7863250bd0b', 'a07a2f9a-9cdc-c5fc-75e4-486e75e94edb', 'a0bccc32-aa7a-ae11-6e14-b582793113c7', 'a16cea3a-6837-533b-7afe-f50a2a75e6b6', 'a1cbbe7e-460c-914b-1ff0-3fd541c48b82', 'a48b4e0e-2878-42fb-39d2-d264cdb4ef94', 'a55757fa-5725-e863-a27e-87476346fc01', 'a6674745-6894-7a3c-4b6f-f9a858f6f9c1', 'a698a7e3-4567-2b44-251f-6ae69a9543f0', 'a6ad8bdd-da26-f91b-2020-e4ebe5160e89', 'a85bcce9-fd64-50ad-7b8a-e78a3a899775', 'a8864a41-68cd-e0e4-3c3d-2caddbac3339', 'a8eb735b-97e1-42cd-5ec7-72ce28550e16', 'a913824a-786b-b980-63f2-49e3ac7f4084', 'a9255867-5b6a-1515-2e08-871041de5ef2', 'abb24ff2-405b-ebc2-750a-0072851d3ce6', 'ac1a4d71-df8b-68bc-a3d1-101b32085432', 'ac226f6f-edde-ba4e-ed14-92812156faa0', 'ac30a551-0c6b-d156-6035-3411c1b8b250', 'ac9b9949-6a0a-955b-2ad5-6d3b9e04ec7a', 'acf6a7a1-067f-bc11-4a6a-fc2bb15b6a1c', 'ae0e570b-1e7e-fc3c-9719-3ccb2b3f20b7', 'af24ce28-371c-2920-13b9-b320b53dff2f', 'af59bbe1-c4dd-dc86-8cdd-a7d70ae388c8', 'b05039a6-5a32-7b6e-ecd0-55d72235558d', 'b09aa58f-c709-3d95-14d8-af2454733dfb', 'b16f6a05-76d1-bfd0-331f-61dfc438912a', 'b16fc3ee-21a5-bc89-6782-20177064ba8a', 'b2565f82-1716-8517-fca5-334d6a0d6a89', 'b2e2dea7-da71-ac75-35ee-f0c65cf02530', 'b35fa3c5-3cb1-66fc-cdb9-92236823d087', 'b3863392-a641-2724-bd23-3f31672d1c21', 'b388ec65-2a49-821a-ac37-82eddaa8bdcb', 'b3c93c9a-5c69-076a-0c4a-e0d1aaa3790e', 'b3d3d92c-fe7f-3237-307a-bb1e2d48665c', 'b3e180b1-8750-ffc6-46f0-351f7984176e', 'b426763f-d5b1-0381-7caa-a9e7536d7f90', 'b4a12595-f037-3a62-9c50-5558adc3c192', 'b52dcf93-779f-e067-548d-fc5aa3fe6de8', 'b575e8ff-cf7f-cc4d-08ff-d443d17cac55', 'b685ba7c-c709-7266-cdd8-ca3374e98131', 'b6efd703-18d7-e589-0d27-a2cb90047f15', 'b73a5f07-9972-f127-831c-c925c1c5a978', 'b8bc38d7-3165-f526-42d0-609dd24fbad9', 'b8f51349-4158-a747-0f6f-44adfdf8a061', 'b9089b68-42d1-853a-1aa1-42192cafe811', 'b9360726-ea8d-0a94-bcc6-64f4b1b0f85b', 'b976ff6f-a77c-86ed-ddf2-ff59b122f63d', 'b9adbc55-3728-80be-8471-d13d372ac31e', 'bad44913-b544-bc29-8ab3-fbc03f67be6a', 'bb04b530-43d4-90c3-bd60-582ef47617ca', 'bb69f506-b8f1-f2d6-14f3-0492c4356438', 'bbc47d44-1216-21f4-e7fd-e6b12ad7ba1c', 'bbeca9f9-84d8-b55a-b5e8-98b860445a2e', 'bccae007-1039-b563-f3d2-2c0f02f64a0e', 'bccec73b-bcc0-24b9-09d7-645719a74486', 'bd176569-a97b-49f0-f878-4dfbd5f0aceb', 'bd386290-6fc3-e822-87d5-eecfc53d59b4', 'bd9965a3-e3f7-6dc4-d1b0-c6c292ed88f1', 'be7a1b79-6444-2ebe-044d-f94fadc52dd5', 'bed3f74c-46f7-3d41-c054-f671786aea5c', 'c0b04ab0-03cb-7a0d-32cb-c60a76988c08', 'c285bac2-bce1-4115-44b9-5e7b7e361fac', 'c3433729-8438-bf46-255e-c88d7dfee4cd', 'c439b768-0315-be4a-1de1-95844f8dc1b4', 'c466bda1-75c7-12d1-ecd9-d101d1b901a9', 'c46d5af9-b979-02fa-4bd9-448097798486', 'c47075bb-91fb-15cb-8568-f13cc81903c1', 'c516f322-4e9d-5ba3-5a26-3ad6b01f3e6f', 'c5267e5a-5189-fe93-a652-242c27790049', 'c5d6c750-acaa-06b7-f77d-a7894e6c5709', 'c692479d-018f-0455-8f9a-4e6cac3a02c0', 'c71c434c-9377-ae95-8bf6-49c3abdacf39', 'c75ac4de-d82a-9968-066a-2f113c570903', 'c7ff7479-f155-34db-4286-fdec281e96bd', 'c90555d2-675b-d603-af48-62f32f894af8', 'c99d528e-f245-b0ef-3b59-a9a72e0db9be', 'ca09086b-3562-8684-6dbe-91acfc2c92ac', 'ca0e6506-d603-b32b-6d8d-147d52879818', 'cab41501-a95a-58a8-6380-63cb408d8566', 'cafcc26c-1db5-2416-642e-d068c04c8a9b', 'cb5c46aa-49dc-9d59-3aa7-fddf2a748873', 'cd0aa638-5db7-1841-2c68-b05e764a1c70', 'cd0ab7a1-f195-13e7-b909-dd4715c6b2c9', 'cd8ad6f2-a7ac-8625-acc4-5292023e990d', 'ceacee23-9479-2558-59f7-f6b9718509ef', 'cecb0710-ccab-b655-9dca-a9b59ab26ce7', 'cf488068-ab6b-4eba-7597-6e93f3fdc50c', 'cf51394d-0696-94ee-9b1e-434c432aa6c3', 'cf7c2e62-43a2-7cca-16a3-aa7e23bcb1c8', 'cf8f734a-57ef-6784-e23e-3902c94c7b12', 'cfd7c05b-0d91-d178-b799-d9110160fd22', 'd02ec96c-2333-ea60-c43b-0c4484efb746', 'd04394ac-2702-c188-072b-d7e1d0838b51', 'd16e8d01-857e-e993-549c-ccedf93579f8', 'd1a4a7a8-f271-fb12-7979-5f0448c1ac91', 'd2b757f1-51b6-3577-01d5-ac78b811ad09', 'd4652525-7a33-00cc-ce9b-87ead1774a82', 'd5c03419-15a0-8d46-ee27-b0b632bf8f4f', 'd5dfb000-396a-cfd7-1603-6bfe6be3ca35', 'd66601cd-74ee-d67a-5d92-c2144ef06b2f', 'd68275e2-38d6-944d-2671-f7b7717c8996', 'd6d7a141-3b8b-97c4-0d60-d1538a5188cb', 'd7205118-7248-452e-d2a4-1d1e12e82ce3', 'd7878da8-7743-196d-426e-205765b816f6', 'd79695ae-2385-b4c6-4e53-2dba8335dc99', 'd94c1571-aec3-1437-15e8-cc33cdf1b011', 'd9c42a48-babf-b8dc-e3ad-bd5264c3c7f9', 'da0c4fd0-c139-c9d9-68f0-82a62c28d769', 'da170a41-1d46-b306-31dc-403d9f6c6d0f', 'da23a617-c225-6384-5dd2-608b5dbd36e8', 'dab54d7a-ff1d-fa7a-e0ef-d2d87cf9aee1', 'dada6eb7-63cb-748e-9b99-78a2eed49b33', 'dae49475-43cf-f183-4827-7818568c079c', 'daecb04c-7f58-d56a-ff80-9eb1c01a0869', 'db295349-1175-dd43-3c7b-1dab906f4c83', 'db781b73-a530-367e-b424-5e0bfa9a6716', 'db801b62-618c-62b3-7d5d-d2b61cf62991', 'dc509b0d-adaf-45f7-e5d5-d25aa76339f8', 'dc8aeae9-a6f7-b679-5c68-20b97d31e3f8', 'dcd9e281-a154-be24-c6fd-00b419c07f9a', 'ddfabcea-fcfe-9e98-22d7-11a10cc513a3', 'df194a46-61ac-eadd-e947-8aa9ebf9d5d6', 'df6b4dda-699d-fb66-fbfa-0b8ac07c22a7', 'dff4afcd-2cdb-66a4-dd76-364e8821e7af', 'e03cb285-ad2b-9774-ab8e-87d8f840e769', 'e0a62835-8cf5-9585-84be-0ab875110581', 'e2ec7932-a61e-0f95-9aad-b2e5cea263fa', 'e2f2b6a4-d2b0-63e1-3d97-b13721320e27', 'e33669fb-45a3-5783-7a3a-065b5a4dfd47', 'e4814c71-ab52-babd-92c8-6f59379c1a41', 'e4b41736-f01e-8340-e756-b183b8c50bbb', 'e4eba8f9-dc81-b053-2a31-9206ed755080', 'e507c0a7-3f0b-ff4a-7b14-e82a8b8e1029', 'e542eb70-7007-f1b0-ee35-c79f0d6c8459', 'e557117c-3e09-161c-39ed-97898e21f553', 'e5ffcfb6-eae3-d131-bfa7-4355fa923ccd', 'e6800cb4-4198-18d1-9413-416c04eb7be4', 'e6fb49b0-a343-550f-b686-f33f0e43afa1', 'e7b85157-6f2a-399b-90d5-7df04c9eee4a', 'e87deac0-7efc-ec8c-ea9d-e234bb4ab9c0', 'e9a25149-9add-5248-6916-dbea5604857f', 'e9ed2a8b-3c03-e61e-d721-c19fe7c8a4c0', 'e9ef82f7-915f-abc6-48ec-e9b8cf912df4', 'ea01c6cf-9f8b-b5a9-0d0a-a90acaf8ee7e', 'ea1dc4d5-8f35-f900-6ff4-e935395d01c4', 'eae5479f-7839-f56a-8738-a288ceb14f67', 'ec66e95d-c50a-791f-8518-8fe90987b6b6', 'eca1c090-53c8-1020-8ac5-6ce43157f1a9', 'ed6eec3b-11d1-0378-d760-af8d12a0c8cb', 'ed83c357-0864-0032-25a9-9c8adbb3cd0f', 'edc24549-488a-479f-f321-c8d30abdbf9e', 'edf10a97-04b0-a4f7-b4f2-83c2da7b47eb', 'ee0d94c3-697d-0ba8-3870-8870e1fcfdb9', 'eed8ff50-5530-9b0e-77a9-f47c0ad75204', 'ef1f88d7-b573-58a8-a5c8-c7c038dd2d08', 'ef289a5e-3456-a52e-8ca6-8e084ba5b6f6', 'ef84c541-38e7-803c-23e5-7128d733382c', 'ef9235f6-ead5-8f5e-fb10-f4a76f499a41', 'f066822d-827e-c46c-da4d-f09981518d6b', 'f09ee90d-632a-1a36-c4c7-2b8e5f4796f8', 'f13a7fdc-d9cb-4f77-d590-ea186e991888', 'f1437445-4e4c-5796-dea3-c4433af5147b', 'f177d15f-5330-1ddc-7030-d2be10794948', 'f28a7faf-805d-fa33-19c2-34d02114867f', 'f2e92707-f232-560d-f313-e84df3b3364f', 'f39528e5-a718-250e-0436-f6ac6874ea7b', 'f3e69c09-286a-bdff-bb82-2b1cf10043a5', 'f44f20b4-53ec-76ff-1de3-ddd73d698d33', 'f49131c0-6b1f-6d3f-b0a3-d01b8def0c3b', 'f6b00226-c4bd-2729-1fdd-4958b2b91a80', 'f73304e6-c83c-df2e-43a6-3ba64019d9cc', 'f73e2cb6-8a2e-7d0e-adbf-2728e2d3095e', 'f75fab6f-e6f7-732c-242c-9ddf76991eb6', 'f77d314e-a1c3-f40f-5e3a-b647d641b652', 'f7bbf97e-a571-d34b-f833-7d9789e264f9', 'f8bbabf1-0935-3465-f367-ed703a6dd98f', 'f9359735-bb5e-50f4-5f78-829a9cc79532', 'f97c6fe0-4880-e0cb-17ac-0dd49a1e5180', 'f9a19651-45c3-9a7c-5a4d-3b84d338344b', 'f9ff576c-487a-28b9-228d-8b0f7ef923b0', 'fa095914-4cc9-4e0f-3839-39ef04de136e', 'fab4907d-f9d7-0ea7-5fb2-86929df2203c', 'fae7a96a-b1c9-5355-4963-89c5b4092e71', 'fbebd5cc-73f4-e8fa-69e4-086c2287822b', 'fbf8dbc0-7a21-970f-d0be-d0664a83050c', 'fd1c382e-ed46-6ec6-f59e-d82305e6855f', 'fdd05cf3-b146-4a4a-9dec-4cd1bd5f6f15', 'fe3a7bf1-367a-bfa0-85d0-74810869b0d6', 'fed9eecf-e43d-51d9-987c-8958f4a838e2', 'ff0d4da9-84a2-c419-bccf-39900b39b439', 'ff1ae983-7d8f-4c8a-22e7-1c69dc29ce14', 'ffcc8620-170d-93f0-0295-6eddd6422bf1']
TITLE = "Странная радиопередача"
STATEMENT_TEMPLATE = '''
Отбившись от полчища мутантов, ты замечаешь, что у твоего КПК иногда получается ловить [странную радиопередачу](https://broadcast.contest.qctf.ru/{0}).
Рискнешь ли ты узнать, что же в ней такое?
'''
def generate(context):
participant = context['participant']
token = tokens[participant.id % len(tokens)]
return TaskStatement(TITLE, STATEMENT_TEMPLATE.format(token))
| 1,460.714286
| 20,009
| 0.798826
| 2,555
| 20,450
| 6.392955
| 0.98865
| 0.002082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.501483
| 0.027531
| 20,450
| 14
| 20,010
| 1,460.714286
| 0.319857
| 0
| 0
| 0
| 0
| 0.1
| 0.891448
| 0.880153
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fd727a89b01063c083154eb8c4bb8879cdcb30c5
| 31
|
py
|
Python
|
hello.py
|
ankur-prog/profile-rest-api
|
1348be376b5b9ad7395c0c766085105174c10b92
|
[
"MIT"
] | null | null | null |
hello.py
|
ankur-prog/profile-rest-api
|
1348be376b5b9ad7395c0c766085105174c10b92
|
[
"MIT"
] | null | null | null |
hello.py
|
ankur-prog/profile-rest-api
|
1348be376b5b9ad7395c0c766085105174c10b92
|
[
"MIT"
] | null | null | null |
print("hello ankur kushwaha")
| 15.5
| 30
| 0.741935
| 4
| 31
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0.677419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
fda8e502613de54ca02fa94cf4cfc85e1abc05af
| 12,035
|
py
|
Python
|
app/toscalib/templates/substitution_rule.py
|
onap/sdc-dcae-d-tosca-lab
|
b0120c1671e8987387ccae4f21793ceb303f471c
|
[
"Apache-2.0"
] | 1
|
2021-10-15T19:47:42.000Z
|
2021-10-15T19:47:42.000Z
|
app/toscalib/templates/substitution_rule.py
|
onap/archive-sdc-dcae-d-tosca-lab
|
b0120c1671e8987387ccae4f21793ceb303f471c
|
[
"Apache-2.0"
] | null | null | null |
app/toscalib/templates/substitution_rule.py
|
onap/archive-sdc-dcae-d-tosca-lab
|
b0120c1671e8987387ccae4f21793ceb303f471c
|
[
"Apache-2.0"
] | 1
|
2021-10-15T19:47:34.000Z
|
2021-10-15T19:47:34.000Z
|
from toscalib.templates.constant import *
import logging
class SubstitutionRule (object):
def __init__(self, type, item_name, prop_name, value):
self.type = type
self.item = item_name
self.property = prop_name
self.value = value
def _update_pointer(self, src_node, dst_template):
if type(self.value) is not list and len(self.value) < 1:
logging.warning( 'Incorrect mapping rule for property '+ self.property+ ': '+ self.value)
return
if self.type == SUB_PROPERTY:
if self.value[0] == SUB_INPUT:
# if hasattr(dst_template, 'inputs') and dst_template.inputs.has_key(self.value[1]):
if hasattr(dst_template, 'inputs') and self.value[1] in dst_template.inputs:
if src_node is not None:
src_node.properties[self.property].sub_pointer = dst_template.inputs[self.value[1]]
if src_node.properties[self.property].required is True or src_node.properties[self.property].filled is True:
dst_template.inputs[self.value[1]].used = True
elif src_node is not None and src_node.properties[self.property].required is True:
logging.warning( 'Incorrect mapping rule for property '+ self.property+ ': no input named '+ self.value[1])
# elif dst_template.node_dict.has_key(self.value[0]):
elif self.value[0] in dst_template.node_dict:
target_node = dst_template.node_dict[self.value[0]]
target_prop_item = target_node._get_property_item(self.value[1])
if target_prop_item is not None:
if src_node is not None:
src_prop_item = src_node._get_property_item(self.property)
if src_prop_item.required is True or src_prop_item.filled is True:
target_prop_item.used = True
if src_prop_item is not None:
src_prop_item.sub_pointer = target_prop_item
else:
logging.warning( 'Incorrect mapping rule for property '+ self.property+ ': no property named '+ self.value[1]+ ' in node '+ self.value[0])
else:
logging.warning('Incorrect mapping rule for property '+ self.property+ ': no node named '+ self.value[0])
elif self.type == SUB_ATTRIBUTE:
if self.value[0] == SUB_OUTPUT:
# if hasattr(dst_template, 'outputs') and dst_template.outputs.has_key(self.value[1]):
if hasattr(dst_template, 'outputs') and self.value[1] in dst_template.outputs:
if src_node is not None:
src_node.attributes[self.property].sub_pointer = dst_template.outputs[self.value[1]]
else:
logging.warning( 'Incorrect mapping rule for attribute '+ self.property+ ': no output named '+ self.value[1])
elif self.type == SUB_CAPABILITY:
if self.property is None:
# if dst_template.node_dict.has_key(self.value[0]):
if self.value[0] in dst_template.node_dict:
target_node = dst_template.node_dict[self.value[0]]
target_cap_item = target_node._get_capability_item(self.value[1])
if target_cap_item is not None:
if src_node is not None:
src_cap_item = src_node._get_capability_item(self.item)
if src_cap_item is not None:
src_cap_item.sub_pointer = target_cap_item
for prop_name in src_cap_item.properties.keys():
src_cap_item.properties[prop_name].sub_pointer = target_cap_item.properties[prop_name]
else:
logging.warning( 'Incorrect mapping rule for capability '+ self.item+ ': no capability named '+ self.value[1]+ ' in node '+ self.value[0])
else:
logging.warning( 'Incorrect mapping rule for capability '+ self.item+ ': no node named '+ self.value[0])
elif self.property == SUB_CAP_ID:
if self.value[0] == SUB_OUTPUT:
# if hasattr(dst_template, 'outputs') and dst_template.outputs.has_key(self.value[1]):
if hasattr(dst_template, 'outputs') and self.value[1] in dst_template.outputs:
target_node = dst_template.outputs[self.value[1]]
if src_node is not None:
src_cap_item = src_node._get_capability_item(self.item)
if src_cap_item is not None:
src_cap_item.sub_pointer = target_node
# elif dst_template.node_dict.has_key(self.value[0]):
elif self.value[0] in dst_template.node_dict:
target_node = dst_template.node_dict[self.value[0]]
if len(self.value) < 2:
target_item = target_node
# elif target_node.capabilities.has_key(self.value[1]) and len(self.value) > 1:
elif len(self.value) > 1 and self.value[1] in target_node.capabilities :
target_item = target_node._get_capability_property(self.value[1], self.value[2])
elif self.value[1] in target_node.properties:
target_item = target_node._get_property_item(self.value[1])
else:
target_item = None
logging.warning( 'Incorrect mapping rule for capability '+ self.item+ ': no capability/property named '+ self.value[1]+ ' in node '+ self.value[0])
if target_item is not None and src_node is not None:
src_cap_item = src_node._get_capability_item(self.item)
if src_cap_item is not None:
src_cap_item.sub_pointer = target_item
else:
if self.value[0] == SUB_INPUT:
# if hasattr(dst_template, 'inputs') and dst_template.inputs.has_key(self.value[1]):
if hasattr(dst_template, 'inputs') and self.value[1] in dst_template.inputs:
if src_node is not None:
src_cap_prop_item = src_node._get_capability_property(self.item, self.property)
src_cap_prop_item.sub_pointer = dst_template.inputs[self.value[1]]
if src_cap_prop_item.required is True or src_cap_prop_item.filled is True:
dst_template.inputs[self.value[1]].used = True
else:
logging.warning( 'Incorrect mapping rule for capability '+ self.item+ ': no input named '+ self.value[1])
# elif dst_template.node_dict.has_key(self.value[0]):
elif self.value[0] in dst_template.node_dict:
target_node = dst_template.node_dict[self.value[0]]
# if target_node.capabilities.has_key(self.value[1]):
if self.value[1] in target_node.capabilities:
target_cap_property = target_node._get_capability_property(self.value[1], self.value[2])
if target_cap_property is not None:
if src_node is not None:
src_cap_prop_item = src_node._get_capability_property(self.item, self.property)
if src_cap_prop_item is not None:
src_cap_prop_item.sub_pointer = target_cap_property
else:
logging.warning( 'Incorrect mapping rule for capability '+ self.item+ ': no property named '+ self.value[2]+ ' in capability '+ self.value[0]+ '->'+ self.value[1])
# elif target_node.properties.has_key(self.value[1]):
elif self.value[1] in target_node.properties:
target_prop_item = target_node._get_property_item(self.value[1])
if src_node is not None:
src_cap_prop_item = src_node._get_capability_property(self.item, self.property)
if src_cap_prop_item is not None:
src_cap_prop_item.sub_pointer = target_prop_item
else:
logging.warning( 'Incorrect mapping rule for capability '+ self.item+ ': no capability/property named '+ self.value[1]+ ' in node '+ self.value[0])
else:
logging.warning( 'Incorrect mapping rule for capability '+ self.item+ ': no node named '+ self.value[0])
elif self.type == SUB_REQUIREMENT:
if self.property is None:
# if dst_template.node_dict.has_key(self.value[0]):
if self.value[0] in dst_template.node_dict:
target_node = dst_template.node_dict[self.value[0]]
target_req_item = target_node._get_requirement_item_first(self.value[1])
if target_req_item is not None:
if src_node is not None:
src_req_item = src_node._get_requirement_item_first(self.item)
if src_req_item is not None:
src_req_item.sub_pointer = target_req_item
else:
logging.warning( 'Incorrect mapping rule for requirement '+ self.item+ ': no requirement named '+ self.value[1]+ ' in node '+ self.value[0])
else:
logging.warning( 'Incorrect mapping rule for requirement '+ self.item+ ': no node named '+ self.value[0])
elif self.property == SUB_REQ_ID:
if self.value[0] == SUB_INPUT:
# if hasattr(dst_template, 'inputs') and dst_template.inputs.has_key(self.value[1]):
if hasattr(dst_template, 'inputs') and self.value[1] in dst_template.inputs:
if src_node is not None:
src_req_item = src_node._get_requirement_item_first(self.item)
if src_req_item is not None:
src_req_item.sub_pointer = dst_template.inputs[self.value[1]]
dst_template.inputs[self.value[1]].used = True
else:
logging.warning( 'Incorrect mapping rule for property '+ self.property+ ': no input named '+ self.value[1])
# elif dst_template.node_dict.has_key(self.value[0]):
elif self.value[0] in dst_template.node_dict:
target_node = dst_template.node_dict[self.value[0]]
target_prop_item = target_node._get_property_item(self.value[1])
if target_prop_item is not None:
if src_node is not None:
src_req_item = src_node._get_requirement_item_first(self.item)
if src_req_item is not None:
src_req_item.sub_pointer = target_prop_item
else:
logging.warning( 'Incorrect mapping rule for requirement '+ self.item+ ': no property named '+ self.value[1]+ ' in node '+ self.value[0])
else:
logging.warning( 'Incorrect mapping rule for requirement '+ self.item+ ': no node named '+ self.value[0])
else:
logging.warning( 'Incorrect mapping rule for requirement '+ self.item+ ': wrong property name '+ self.property)
else:
logging.warning('Incorrect mapping rule type: '+ self.type)
| 66.861111
| 191
| 0.55995
| 1,460
| 12,035
| 4.373973
| 0.05
| 0.125431
| 0.073599
| 0.039461
| 0.8943
| 0.843094
| 0.811619
| 0.784842
| 0.742875
| 0.710774
| 0
| 0.01108
| 0.355048
| 12,035
| 179
| 192
| 67.234637
| 0.811646
| 0.093311
| 0
| 0.578947
| 0
| 0
| 0.105679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013158
| false
| 0
| 0.013158
| 0
| 0.039474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fde757cb23d8ce64ef1520ecfe7fa29e1e04b4cf
| 50
|
py
|
Python
|
tt_predictor_backend/__init__.py
|
yuqil725/tt_predictor_backend
|
86d0615ed94b5ad398e675676dd0a3442280c85e
|
[
"Apache-2.0"
] | null | null | null |
tt_predictor_backend/__init__.py
|
yuqil725/tt_predictor_backend
|
86d0615ed94b5ad398e675676dd0a3442280c85e
|
[
"Apache-2.0"
] | null | null | null |
tt_predictor_backend/__init__.py
|
yuqil725/tt_predictor_backend
|
86d0615ed94b5ad398e675676dd0a3442280c85e
|
[
"Apache-2.0"
] | null | null | null |
from . import TT_Predictor
from . import constant
| 16.666667
| 26
| 0.8
| 7
| 50
| 5.571429
| 0.714286
| 0.512821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 50
| 2
| 27
| 25
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e36be2c442a5c81d6805311b2e8c38000917199a
| 218
|
py
|
Python
|
torch_em/data/datasets/__init__.py
|
JoOkuma/torch-em
|
68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
|
[
"MIT"
] | null | null | null |
torch_em/data/datasets/__init__.py
|
JoOkuma/torch-em
|
68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
|
[
"MIT"
] | null | null | null |
torch_em/data/datasets/__init__.py
|
JoOkuma/torch-em
|
68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
|
[
"MIT"
] | null | null | null |
from .cremi import get_cremi_loader
from .dsb import get_dsb_loader
from .isbi2012 import get_isbi_loader
from .platynereis import (get_platynereis_cell_loader,
get_platynereis_nuclei_loader)
| 36.333333
| 56
| 0.770642
| 29
| 218
| 5.37931
| 0.37931
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022857
| 0.197248
| 218
| 5
| 57
| 43.6
| 0.868571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e387185a46c60083bd7611c868ed1a59cd0a66c7
| 586
|
py
|
Python
|
targets/PythonSdk/Archive/PlayFabBaseClasses.py
|
arturogutierrez/SDKGenerator
|
c493eca8ee7381c38eb328a12fb903e1d43568de
|
[
"Apache-2.0"
] | 67
|
2015-03-20T09:52:08.000Z
|
2022-03-22T01:25:47.000Z
|
targets/PythonSdk/Archive/PlayFabBaseClasses.py
|
arturogutierrez/SDKGenerator
|
c493eca8ee7381c38eb328a12fb903e1d43568de
|
[
"Apache-2.0"
] | 340
|
2015-07-23T23:16:24.000Z
|
2022-02-24T17:16:37.000Z
|
targets/PythonSdk/Archive/PlayFabBaseClasses.py
|
arturogutierrez/SDKGenerator
|
c493eca8ee7381c38eb328a12fb903e1d43568de
|
[
"Apache-2.0"
] | 85
|
2015-04-24T20:33:44.000Z
|
2022-03-06T07:35:29.000Z
|
class PlayFabBaseObject():
pass
class PlayFabRequestCommon(PlayFabBaseObject):
"""
This is a base-class for all Api-request objects.
It is currently unfinished, but we will add result-specific properties,
and add template where-conditions to make some code easier to follow
"""
pass
class PlayFabResultCommon(PlayFabBaseObject):
"""
This is a base-class for all Api-result objects.
It is currently unfinished, but we will add result-specific properties,
and add template where-conditions to make some code easier to follow
"""
pass
| 30.842105
| 75
| 0.726962
| 77
| 586
| 5.532468
| 0.441558
| 0.042254
| 0.107981
| 0.112676
| 0.793427
| 0.793427
| 0.793427
| 0.793427
| 0.793427
| 0.596244
| 0
| 0
| 0.213311
| 586
| 18
| 76
| 32.555556
| 0.924078
| 0.648464
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8b5c61ce93082153f7a2469fc4dc66f59247681d
| 45
|
py
|
Python
|
xomx/classifiers/__init__.py
|
perrin-isir/xomx
|
9ca0ad56c333ebf4444f38bd9fa59cdd4e533756
|
[
"BSD-3-Clause"
] | 4
|
2021-12-16T21:34:32.000Z
|
2021-12-22T09:25:53.000Z
|
xomx/classifiers/__init__.py
|
perrin-isir/xomx
|
9ca0ad56c333ebf4444f38bd9fa59cdd4e533756
|
[
"BSD-3-Clause"
] | 2
|
2021-12-15T15:51:42.000Z
|
2022-03-31T08:17:26.000Z
|
xomx/classifiers/__init__.py
|
perrin-isir/xomx
|
9ca0ad56c333ebf4444f38bd9fa59cdd4e533756
|
[
"BSD-3-Clause"
] | 2
|
2021-12-14T16:50:39.000Z
|
2022-03-14T09:27:51.000Z
|
from .multiclass import ScoreBasedMulticlass
| 22.5
| 44
| 0.888889
| 4
| 45
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8b8ef89fb6977d1a129d2717d821f51bd18df2c1
| 52,595
|
py
|
Python
|
cogs/moderation/sanction.py
|
TheophileDiot/Omnitron
|
0c147fc44da151481492da9a80d888e7a6f7dae5
|
[
"MIT"
] | 4
|
2021-09-29T08:28:34.000Z
|
2022-01-15T15:40:43.000Z
|
cogs/moderation/sanction.py
|
TheophileDiot/Omnitron
|
0c147fc44da151481492da9a80d888e7a6f7dae5
|
[
"MIT"
] | 2
|
2021-09-20T12:06:37.000Z
|
2021-10-16T12:50:22.000Z
|
cogs/moderation/sanction.py
|
TheophileDiot/Omnitron
|
0c147fc44da151481492da9a80d888e7a6f7dae5
|
[
"MIT"
] | null | null | null |
from math import ceil
from time import time
from typing import Union
from disnake import (
Embed,
Forbidden,
GuildCommandInteraction,
Member,
User,
)
from disnake.ext.commands import (
bot_has_permissions,
bot_has_guild_permissions,
BucketType,
Cog,
Context,
group,
guild_only,
has_guild_permissions,
max_concurrency,
Range,
slash_command,
)
from bot import Omnitron
from data import DurationType, Utils
class Moderation(Cog, name="moderation.sanction"):
def __init__(self, bot: Omnitron):
self.bot = bot
""" MAIN GROUP """
@group(
pass_context=True,
name="sanction",
aliases=["sanctions", "strike", "strikes"],
usage="(sub-command)",
description="This command manage the server's sanctions",
)
@Utils.check_bot_starting()
@Utils.check_moderator()
@bot_has_permissions(send_messages=True)
async def sanction_group(self, ctx: Context):
"""
This command group manages the server's sanctions
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
"""
if ctx.invoked_subcommand is None:
await ctx.send(
embed=self.bot.utils_class.get_embed_from_ctx(
ctx, title="Server's sanction feature"
)
)
@slash_command(
name="sanction",
description="This command manage the server's sanctions",
)
@guild_only()
@Utils.check_bot_starting()
@Utils.check_moderator()
async def sanction_slash_group(self, inter: GuildCommandInteraction):
"""
This slash command group manages the server's polls
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
"""
pass
""" MAIN GROUP'S GROUP(S) """
@sanction_group.group(
pass_context=True,
name="warn",
aliases=["warns"],
brief="⚠️",
usage="(sub-command)",
description="Manages the server's warns",
)
@max_concurrency(1, per=BucketType.guild)
async def sanction_warn_group(self, ctx: Context):
"""
This command group manages the server's warns
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
"""
if ctx.invoked_subcommand is None:
await ctx.send(
embed=self.bot.utils_class.get_embed_from_ctx(
ctx, title="Server's warns feature"
)
)
@sanction_slash_group.sub_command_group(
name="warn",
description="Manages the server's warns",
)
@max_concurrency(1, per=BucketType.guild)
async def sanction_warn_slash_group(self, inter: GuildCommandInteraction):
"""
This slash command group manages the server's warns
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
"""
pass
@sanction_group.group(
pass_context=True,
name="mute",
aliases=["mutes"],
brief="🔕️",
usage="(sub-command)",
description="Manages the server's mutes",
)
@max_concurrency(1, per=BucketType.guild)
async def sanction_mute_group(self, ctx: Context):
"""
This command group manages the server's mutes
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
"""
if ctx.invoked_subcommand is None:
await ctx.send(
embed=self.bot.utils_class.get_embed_from_ctx(
ctx, title="Server's mute feature"
)
)
@sanction_slash_group.sub_command_group(
name="mute",
description="Manages the server's mutes",
)
@max_concurrency(1, per=BucketType.guild)
async def sanction_mute_slash_group(self, inter: GuildCommandInteraction):
"""
This slash command group manages the server's mutes
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
"""
pass
@sanction_group.group(
pass_context=True,
name="ban",
aliases=["bans"],
brief="🔨",
usage="(sub-command)",
description="Manages the server's bans",
)
async def sanction_ban_group(self, ctx: Context):
"""
This command group manages the server's bans
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
"""
if ctx.invoked_subcommand is None:
await ctx.send(
embed=self.bot.utils_class.get_embed_from_ctx(
ctx, title="Server's sanction ban feature"
)
)
@sanction_slash_group.sub_command_group(
name="ban",
description="Manages the server's bans",
)
@max_concurrency(1, per=BucketType.guild)
async def sanction_ban_slash_group(self, inter: GuildCommandInteraction):
"""
This slash command group manages the server's bans
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
"""
pass
""" MAIN GROUP'S COMMAND(S) """
""" KICK """
@sanction_group.command(
name="kick",
brief="⚡",
usage='@member ("reason")',
description="Kicks a member from the server with a reason attached if specified",
)
@has_guild_permissions(kick_members=True)
@bot_has_guild_permissions(kick_members=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_kick_command(
self, ctx: Context, member: Member, *, reason: str = None
):
"""
This command kicks a member from the server with a reason attached if specified
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
member: :class:`disnake.Member`
The member you want to kick
reason: :class:`str` optional
The reason attached to the kick
"""
await self.handle_kick(ctx, member, reason)
@sanction_slash_group.sub_command(
name="kick",
description="Kick a member from the server with a reason attached if specified",
)
@has_guild_permissions(kick_members=True)
@bot_has_guild_permissions(kick_members=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_kick_slash_command(
self, inter: GuildCommandInteraction, member: Member, reason: str = None
):
"""
This command kicks a member from the server with a reason attached if specified
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
member: :class:`disnake.Member`
The member you want to kick
reason: :class:`str` optional
The reason attached to the kick
"""
await self.handle_kick(inter, member, reason)
async def handle_kick(
self,
source: Union[Context, GuildCommandInteraction],
member: Member,
reason: str = None,
):
em = Embed(
colour=self.bot.color,
title=f"🚫 - Kick",
description=f"The member {member} has been kicked by {source.author.mention}",
)
em = self.configure_embed(source, em)
if reason:
em.add_field(name="raison:", value=reason, inline=False)
try:
await member.kick(
reason=f"The member {member} has been kicked by {source.author} {f'for the reason: {reason}' if reason else ''}"
)
except Forbidden:
if isinstance(source, Context):
return await source.reply(
f"⛔ - {source.author.mention} - I can't kick the member `{member}`!",
delete_after=20,
)
else:
return await source.response.send_message(
f"⛔ - {source.author.mention} - I can't kick the member `{member}`!",
ephemeral=True,
)
if isinstance(source, Context):
await source.send(embed=em)
else:
await source.response.send_message(embed=em)
""" MAIN GROUP'S WARN COMMAND(S) """
""" WARN ADD """
@sanction_warn_group.command(
name="add",
brief="⚠️",
usage='@member ("reason")',
description="Warns a member with a reason attached if specified",
)
@max_concurrency(1, per=BucketType.member)
async def sanction_warn_add_command(
self, ctx: Context, member: Member, *, reason: str = None
):
"""
This command warns a member with a reason attached if specified
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
member: :class:`disnake.Member`
The member you want to warn
reason: :class:`str` optional
The reason attached to the warn
"""
await self.handle_warn_add(ctx, member, reason)
@sanction_warn_slash_group.sub_command(
name="add",
description="Warns a member with a reason attached if specified",
)
@max_concurrency(1, per=BucketType.member)
async def sanction_warn_add_slash_command(
self, inter: GuildCommandInteraction, member: Member, reason: str = None
):
"""
This slash command warns a member with a reason attached if specified
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
member: :class:`disnake.Member`
The member you want to warn
reason: :class:`str` optional
The reason attached to the warn
"""
await self.handle_warn_add(inter, member, reason)
async def handle_warn_add(
self,
source: Union[Context, GuildCommandInteraction],
member: Member,
reason: str = None,
):
if "muted_role" not in self.bot.configs[source.guild.id]:
if isinstance(source, Context):
return await source.reply(
f"⚠️ - {source.author.mention} - The server doesn't have a muted role yet! Please configure one with the command `{self.bot.utils_class.get_guild_pre(source.message)[0]}config muted_role` to set one!",
delete_after=20,
)
else:
return await source.response.send_message(
f"⚠️ - {source.author.mention} - The server doesn't have a muted role yet! Please configure one with the command `{self.bot.utils_class.get_guild_pre(source.author)[0]}config muted_role` to set one!",
ephemeral=True,
)
em = Embed(
colour=self.bot.color,
title=f"🚫 - Warn",
description=f"The user `{member}` has been warned by {source.author.mention}",
)
em = self.configure_embed(source, em)
if reason:
em.add_field(name="reason:", value=reason, inline=False)
self.bot.user_repo.warn_user(
source.guild.id, member.id, time(), f"{source.author}", reason
)
warns = len(self.bot.user_repo.get_warns(source.guild.id, member.id))
em.add_field(
name=f"**Number of warnings of {member}:**",
value=f"{warns}",
inline=False,
)
if warns == 2 or warns == 4:
if source.channel.permissions_for(source.guild.me).manage_roles:
em.add_field(
name="sanction",
value=f"🔇 - Muted {'3H' if warns == 2 else '24H'} - 🔇",
inline=False,
)
try:
await member.add_roles(
self.bot.configs[source.guild.id]["muted_role"]
)
except Forbidden as f:
f.text = f"⚠️ - I don't have the right permissions to add the role `{self.bot.configs[source.guild.id]['muted_role']}` to {member}! (maybe the role is above mine)"
raise
self.bot.user_repo.mute_user(
source.guild.id,
member.id,
10800 if warns == 2 else 86400,
time(),
f"{self.bot.user}",
f"{'2nd' if warns == 2 else '4th'} warn",
)
self.bot.tasks[source.guild.id]["mute_completions"][
member.id
] = self.bot.utils_class.task_launcher(
self.bot.utils_class.mute_completion,
(
self.bot.user_repo.get_user(member.guild.id, member.id),
member.guild.id,
),
count=1,
)
else:
await self.bot.utils_class.send_message_to_mods(
f"⚠️ - I don't have the right permissions to manage roles in this server (i tried to add the muted role to {member} after his {'2nd' if warns == 2 else '4th'} warn)! Required perms: `{', '.join(['MANAGE_ROLES'])}`",
source.guild.id,
)
elif warns == 5:
em.add_field(name="sanction", value="⚠️ - Warning - ⚠", inline=False)
try:
await member.send(
f"⚠ - ️️{member.mention} You are on your 5th warn! The next time you're warn, you will be kicked from this server {source.guild}! - ⚠️"
)
except Forbidden:
if isinstance(source, Context):
await source.send(
f"❌ - ️️{source.author.mention} - Couldn't send the message to {member}, please inform him that on the next warn he will be kicked from the server!"
)
else:
await source.response.send_message(
f"❌ - ️️{source.author.mention} - Couldn't send the message to {member}, please inform him that on the next warn he will be kicked from the server!"
)
elif warns > 5:
em.add_field(name="sanction", value="🚫 - kick - 🚫", inline=False)
try:
await member.kick(reason="6th warn")
except Forbidden:
if isinstance(source, Context):
await source.send(
f"❌ - {source.author.mention} - I don't have the permission to kick members (or I couldn't kick him myself)! (try kicking him yourself then!)"
)
else:
await source.response.send_message(
f"❌ - {source.author.mention} - I don't have the permission to kick members (or I couldn't kick him myself)! (try kicking him yourself then!)"
)
if isinstance(source, Context):
await source.send(embed=em)
else:
await source.response.send_message(embed=em)
""" WARN LIST """
@sanction_warn_group.command(
name="list",
brief="ℹ️",
usage="(@member)",
description="Shows the list of a member's warns or yours!",
)
async def sanction_warn_list_command(self, ctx: Context, member: Member = None):
"""
This command shows the list of a member's warns or yours!
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
member: :class:`disnake.Member`
The member you want to list warns
"""
await self.handle_warn_list(ctx, member)
@sanction_warn_slash_group.sub_command(
name="list",
description="Shows the list of a member's warns or yours!",
)
async def sanction_warn_list_slash_command(
self, inter: GuildCommandInteraction, member: Member = None
):
"""
This slash command shows the list of a member's warns or yours!
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
member: :class:`disnake.Member`
The member you want to list warns
"""
await self.handle_warn_list(inter, member)
async def handle_warn_list(
self,
source: Union[Context, GuildCommandInteraction],
member: Member = None,
):
if not member:
member = source.author
em = Embed(
colour=self.bot.color, title=f"⚠️ - list of previous warns from {member}"
)
em = self.configure_embed(source, em)
warns = self.bot.user_repo.get_warns(source.guild.id, member.id)
if not warns:
if isinstance(source, Context):
return await source.reply(
f"ℹ️ - {source.author.mention} - {f'The member {member}' if member != source.author else 'You'} has never been warned.",
delete_after=20,
)
else:
return await source.response.send_message(
f"ℹ️ - {source.author.mention} - {f'The member {member}' if member != source.author else 'You'} has never been warned.",
ephemeral=True,
)
x = 0
nl = "\n"
while x < len(warns) and x <= 24:
if x == 24:
em.add_field(
name="**Too many warns to display them all**",
value="...",
inline=False,
)
else:
em.add_field(
name=f"**{x + 1}:**",
value=f"**date :** {warns[x]['at']}{nl}**by :** {warns[x]['by']}{nl}**reason :** {warns[x]['reason'] if 'reason' in warns[x] else 'no reason specified'}",
inline=True,
)
x += 1
if isinstance(source, Context):
await source.send(embed=em)
else:
await source.response.send_message(embed=em)
""" WARN CLEAR """
@sanction_warn_group.command(
name="clear",
brief="🧹",
usage="(@member)",
description="Clears the warns of a member!",
)
async def sanction_warn_clear_command(self, ctx: Context, member: Member):
"""
This command clears the warns of a member!
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
member: :class:`disnake.Member`
The member you want to clear warns
"""
await self.handle_warn_clear(ctx, member)
@sanction_warn_slash_group.sub_command(
name="clear",
description="Clears the warns of a member!",
)
async def sanction_warn_clear_slash_command(
self, inter: GuildCommandInteraction, member: Member
):
"""
This slash command clears the warns of a member!
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
member: :class:`disnake.Member`
The member you want to clear warns
"""
await self.handle_warn_clear(inter, member)
async def handle_warn_clear(
self,
source: Union[Context, GuildCommandInteraction],
member: Member,
):
warns = self.bot.user_repo.get_warns(source.guild.id, member.id)
if not warns:
if isinstance(source, Context):
return await source.reply(
f"ℹ️ - {source.author.mention} - {f'The member {member}' if member != source.author else 'You'} has never been warned.",
delete_after=20,
)
else:
return await source.response.send_message(
f"ℹ️ - {source.author.mention} - {f'The member {member}' if member != source.author else 'You'} has never been warned.",
ephemeral=True,
)
self.bot.user_repo.clear_warns(source.guild.id, member.id)
if isinstance(source, Context):
await source.send(f"ℹ️ - `{member}`'s warns have been cleared!")
else:
await source.response.send_message(
f"ℹ️ - `{member}`'s warns have been cleared!"
)
""" MAIN GROUP'S MUTE COMMAND(S) """
""" MUTE ADD """
@sanction_mute_group.command(
name="add",
brief="🔇",
usage='@member ("reason") (<duration_value> <duration_type>)',
description="Mutes a member for a certain duration with a reason attached if specified! (default/minimum duration = 10 min) (duration format -> <duration value (more than 0)> <duration type (d, h, m, s)>",
)
@bot_has_permissions(manage_roles=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_mute_add_command(self, ctx: Context, member: Member, *args: str):
"""
This command mutes a member for a certain duration with a reason attached if specified! (default/minimum duration = 10 min) (duration format -> <duration value (more than 0)> <duration type (d, h, m, s)>
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
member: :class:`disnake.Member`
The member you want to mute
args: :class:`str` optional
The other options including a reason if there is one and a duration
"""
reason = None
_duration = "10"
type_duration = "m"
if args and not "".join(args[0][:-1]).isdigit():
reason = args[0]
if reason:
if len(args) > 2:
_duration, type_duration = (*args[1::],)
elif len(args) > 1:
_duration = args[1][0:-1]
type_duration = args[1][-1]
elif args:
if len(args) > 1:
_duration, type_duration = (*args[0::],)
else:
_duration = args[0][0:-1]
type_duration = args[0][-1]
if not _duration.isdigit():
try:
await ctx.reply(
f"⚠️ - {ctx.author.mention} - Please provide a valid duration! `{self.bot.utils_class.get_guild_pre(ctx.message)[0]}{f'{ctx.command.parents[0]}' if ctx.command.parents else f'help {ctx.command.qualified_name}'}` to get more help.",
delete_after=15,
)
except Forbidden as f:
f.text = f"⚠️ - I don't have the right permissions to send messages in the channel {ctx.channel.mention} (message: `⚠️ - {ctx.author.mention} - Please provide a valid duration! `{self.bot.utils_class.get_guild_pre(ctx.message)[0]}{f'{ctx.command.parents[0]}' if ctx.command.parents else f'help {ctx.command.qualified_name}'}` to get more help.`)! Required perms: `{', '.join(['SEND_MESSAGES'])}`"
raise
return
await self.handle_mute_add(ctx, member, reason, _duration, type_duration)
@sanction_mute_slash_group.sub_command(
name="add",
description="Mutes a member for a certain duration with a reason attached if specified!",
)
@bot_has_permissions(manage_roles=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_mute_add_slash_command(
self,
inter: GuildCommandInteraction,
member: Member,
reason: str = None,
duration: Range[1, ...] = 10,
type_duration: DurationType = "m",
):
"""
This slash command mutes a member for a certain duration with a reason attached if specified!
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
member: :class:`disnake.Member`
The member you want to mute
reason: :class:`str` optional
The reason attached to the mute
duration: :class:`disnake.ext.commands.Range` optional
The mute's duration value (defaults to 10)
type_duration: :class:`Utils.DurationType` optional
the mute's duration type (defaults to "m")
"""
await self.handle_mute_add(inter, member, reason, duration, type_duration)
async def handle_mute_add(
self,
source: Union[Context, GuildCommandInteraction],
member: Member,
reason: str,
duration: int,
type_duration: str,
):
if "muted_role" not in self.bot.configs[source.guild.id]:
if isinstance(source, Context):
return await source.reply(
f"⚠️ - {source.author.mention} - The server doesn't have a muted role yet! Please configure one with the command `{self.bot.utils_class.get_guild_pre(source.message)[0]}config muted_role` to set one!",
delete_after=20,
)
else:
return await source.response.send_message(
f"⚠️ - {source.author.mention} - The server doesn't have a muted role yet! Please configure one with the command `{self.bot.utils_class.get_guild_pre(source.author)[0]}config muted_role` to set one!",
ephemeral=True,
)
duration_s = await self.bot.utils_class.parse_duration(
int(duration), type_duration, source
)
if not duration_s:
return
em = Embed(
colour=self.bot.color,
title=f"🔇 - Mute",
description=f"The member `{member}` has been muted by {source.author.mention}",
)
em = self.configure_embed(source, em)
if reason:
em.add_field(name="reason:", value=reason, inline=False)
db_user = self.bot.user_repo.get_user(source.guild.id, member.id)
if (
self.bot.configs[source.guild.id]["muted_role"] not in member.roles
or not db_user["muted"]
):
em.description = f"The member `{member}` has been muted by {source.author.mention} for {self.bot.utils_class.duration(duration_s)}"
await member.add_roles(
self.bot.configs[source.guild.id]["muted_role"],
reason="Muted from command sanction.",
)
self.bot.user_repo.mute_user(
source.guild.id,
member.id,
duration_s,
time(),
f"{source.author}",
reason,
)
self.bot.tasks[source.guild.id]["mute_completions"][
member.id
] = self.bot.utils_class.task_launcher(
self.bot.utils_class.mute_completion,
(
self.bot.user_repo.get_user(source.guild.id, member.id),
source.guild.id,
),
count=1,
)
else:
last_mute = self.bot.user_repo.get_last_mute(source.guild.id, member.id)
em.description = f"The member {member} is already muted"
em.remove_field(0)
em.add_field(name="**muted by:**", value=last_mute["by"], inline=True)
em.add_field(name="**date:**", value=last_mute["at"], inline=True)
em.add_field(name="**duration:**", value=last_mute["duration"], inline=True)
em.add_field(
name="**time remaining:**",
value=self.bot.utils_class.duration(
last_mute["duration_s"] - (time() - last_mute["at_s"])
),
inline=True,
)
if "reason" in last_mute:
em.add_field(name="**reason:**", value=last_mute["reason"], inline=True)
if isinstance(source, Context):
await source.send(embed=em)
else:
await source.response.send_message(embed=em)
""" MUTE LIST """
@sanction_mute_group.command(
name="list",
brief="ℹ",
usage="(@member)",
description="Shows the list of a member's mutes or yours!",
)
async def sanction_mute_list_command(
self,
ctx: Context,
member: Member = None,
):
"""
This command shows the list of a member's mutes or yours!
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
member: :class:`disnake.Member`
The member you want to list the mutes
"""
await self.handle_mute_list(ctx, member)
@sanction_mute_slash_group.sub_command(
name="list",
description="Shows the list of a member's mutes or yours!",
)
async def sanction_mute_list_slash_command(
self,
inter: GuildCommandInteraction,
member: Member = None,
):
"""
This slash command shows the list of a member's mutes or yours!
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
member: :class:`disnake.Member`
The member you want to list the mutes
"""
await self.handle_mute_list(inter, member)
async def handle_mute_list(
self,
source: Union[Context, GuildCommandInteraction],
member: Member = None,
):
if not member:
member = source.author
em = Embed(
colour=self.bot.color,
title=f"🔇 - List of previous mutes of {member}",
)
em = self.configure_embed(source, em)
db_user = self.bot.user_repo.get_user(source.guild.id, member.id)
if "mutes" not in db_user or len(db_user["mutes"]) < 1:
if isinstance(source, Context):
return await source.reply(
f"ℹ️ - {source.author.mention} - {f'The member {member}' if member != source.author else 'You'} has never been muted."
)
else:
return await source.response.send_message(
f"ℹ️ - {source.author.mention} - {f'The member {member}' if member != source.author else 'You'} has never been muted."
)
x = 0
nl = "\n"
while x < len(db_user["mutes"]) and x <= 24:
if x == 24:
em.add_field(
name="**Too many mutes to display them all**.",
value="...",
inline=False,
)
else:
em.add_field(
name=f"**{x + 1}:**",
value=f"**date :** {db_user['mutes'][x]['at']}{nl}**by :** {db_user['mutes'][x]['by']}{nl}**duration :** {db_user['mutes'][x]['duration']}{nl}**reason :** {db_user['mutes'][x]['reason'] if 'reason' in db_user['mutes'][x] else 'no reason specified'}",
inline=True,
)
x += 1
if isinstance(source, Context):
await source.send(embed=em)
else:
await source.response.send_message(embed=em)
""" MUTE REMOVE """
@sanction_mute_group.command(
name="remove",
brief="🔉",
usage='@member ("reason")',
description="Unmute a member with a reason attached if specified!",
)
@bot_has_permissions(manage_roles=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_mute_remove_command(
self,
ctx: Context,
member: Member,
*,
reason: str = None,
):
"""
This command unmute a member with a reason attached if specified!
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
member: :class:`disnake.Member`
The member you want to unmute
reason: :class:`str` optional
The reason attached to the unmute
"""
await self.handle_mute_remove(ctx, member, reason)
@sanction_mute_slash_group.sub_command(
name="remove",
description="Unmute a member with a reason attached if specified!",
)
@bot_has_permissions(manage_roles=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_mute_remove_slash_command(
self,
inter: GuildCommandInteraction,
member: Member,
*,
reason: str = None,
):
"""
This slash command unmute a member with a reason attached if specified!
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
member: :class:`disnake.Member`
The member you want to unmute
reason: :class:`str` optional
The reason attached to the unmute
"""
await self.handle_mute_remove(inter, member, reason)
async def handle_mute_remove(
self,
source: Union[Context, GuildCommandInteraction],
member: Member,
reason: str = None,
):
db_user = self.bot.user_repo.get_user(source.guild.id, member.id)
if (
self.bot.configs[source.guild.id]["muted_role"] in member.roles
or db_user["muted"]
):
await member.remove_roles(
self.bot.configs[source.guild.id]["muted_role"], reason=reason
)
self.bot.user_repo.unmute_user(source.guild.id, member.id)
if (
"reason" in db_user["mutes"][-1]
and db_user["mutes"][-1]["reason"] != "joined the server"
):
self.bot.tasks[source.guild.id]["mute_completions"][member.id].cancel()
del self.bot.tasks[source.guild.id]["mute_completions"][member.id]
resp = (
f"🔊 - The member {member} has been unmuted by {source.author.mention}."
)
else:
resp = f"🔊 - {source.author.mention} - The member {member} is not or no longer muted."
if isinstance(source, Context):
await source.send(resp)
else:
await source.response.send_message(resp)
""" MAIN GROUP'S BAN COMMAND(S) """
""" ADD """
@sanction_ban_group.command(
name="add",
brief="🚷",
usage='@member ("reason") (<duration_value> <duration_type>)',
description="Bans a member for a certain duration with a reason attached if specified! (minimum duration = 1 day) (duration format -> <duration value (more than 0)> <duration type (d, h, m, s)>",
)
@has_guild_permissions(ban_members=True)
@bot_has_guild_permissions(ban_members=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_ban_add_command(self, ctx: Context, member: Member, *args: str):
"""
This command bans a member for a certain duration with a reason attached if specified! (minimum duration = 1 day) (duration format -> <duration value (more than 0)> <duration type (d, h, m, s)>
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
member: :class:`disnake.Member`
The member you want to ban
args: :class:`str` optional
The other options including a reason if there is one and a duration
"""
reason = None
_duration = None
type_duration = None
if args and not "".join(args[0][:-1]).isdigit():
reason = args[0]
if reason:
if len(args) > 2:
_duration, type_duration = (*args[1::],)
elif len(args) > 1:
_duration = args[1][0:-1]
type_duration = args[1][-1]
elif args:
if len(args) > 1:
_duration, type_duration = (*args[0::],)
else:
_duration = args[0][0:-1]
type_duration = args[0][-1]
if _duration and not _duration.isdigit():
try:
await ctx.reply(
f"⚠️ - {ctx.author.mention} - Please provide a valid duration! `{self.bot.utils_class.get_guild_pre(ctx.message)[0]}{f'{ctx.command.parents[0]}' if ctx.command.parents else f'help {ctx.command.qualified_name}'}` to get more help.",
delete_after=15,
)
except Forbidden as f:
f.text = f"⚠️ - I don't have the right permissions to send messages in the channel {ctx.channel.mention} (message: `⚠️ - {ctx.author.mention} - Please provide a valid duration! `{self.bot.utils_class.get_guild_pre(ctx.message)[0]}{f'{ctx.command.parents[0]}' if ctx.command.parents else f'help {ctx.command.qualified_name}'}` to get more help.`)! Required perms: `{', '.join(['SEND_MESSAGES'])}`"
raise
return
await self.handle_ban_add(ctx, member, reason, _duration, type_duration)
@sanction_ban_slash_group.sub_command(
name="add",
description="Bans a member for a certain duration with a reason attached if specified!",
)
@has_guild_permissions(ban_members=True)
@bot_has_guild_permissions(ban_members=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_ban_add_slash_command(
self,
inter: GuildCommandInteraction,
member: Member,
reason: str = None,
duration: Range[1, ...] = 1,
type_duration: DurationType = "d",
):
"""
This slash command bans a member for a certain duration with a reason attached if specified!
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
member: :class:`disnake.Member`
The member you want to ban
reason: :class:`str` optional
The reason attached to the ban
duration: :class:`disnake.ext.commands.Range` optional
The ban's duration value (defaults to 1)
type_duration: :class:`Utils.DurationType` optional
the ban's duration type (defaults to "d")
"""
await self.handle_ban_add(inter, member, reason, duration, type_duration)
async def handle_ban_add(
self,
source: Union[Context, GuildCommandInteraction],
member: Member,
reason: str = None,
duration: int = None,
type_duration: str = None,
):
duration_s = None
if duration:
duration_s = await self.bot.utils_class.parse_duration(
int(duration), type_duration, source
)
if not duration_s:
return
em = Embed(
colour=self.bot.color,
title=f"🚫 - Ban",
description=f"The member {member} has been banned by {source.author.mention}",
)
em = self.configure_embed(source, em)
if reason:
em.add_field(name="raison:", value=reason, inline=False)
try:
await member.ban(
reason=f"The member {member} has been banned by {source.author}"
+ (
f" for {self.bot.utils_class.duration(duration_s)}"
if duration_s
else ""
)
+ (f" for the reason: {reason}'" if reason else "")
)
self.bot.user_repo.ban_user(
source.guild.id,
member.id,
duration_s,
time(),
f"{source.author}",
reason,
)
if duration_s:
em.description += f" for {self.bot.utils_class.duration(duration_s)}"
self.bot.utils_class.task_launcher(
self.bot.utils_class.ban_completion,
(
self.bot.user_repo.get_user(source.guild.id, member.id),
source.guild.id,
),
count=1,
)
except Forbidden:
if isinstance(source, Context):
return await source.reply(
f"⛔ - {source.author.mention} - I can't ban the member `{member}`!",
delete_after=20,
)
else:
return await source.response.send_message(
f"⛔ - {source.author.mention} - I can't ban the member `{member}`!",
ephemeral=True,
)
except AttributeError:
if isinstance(source, Context):
return await source.reply(
f"⛔ - {source.author.mention} - I can't ban the member `{member}` because he is not present in the guild!",
delete_after=20,
)
else:
return await source.response.send_message(
f"⛔ - {source.author.mention} - I can't ban the member `{member}` because he is not present in the guild!",
ephemeral=True,
)
if isinstance(source, Context):
await source.send(embed=em)
else:
await source.response.send_message(embed=em)
""" REMOVE """
@sanction_ban_group.command(
name="remove",
brief="❤️🩹",
usage='@user ("reason")',
description="Unban a user from the server with a reason attached if specified!",
)
@has_guild_permissions(ban_members=True)
@bot_has_guild_permissions(ban_members=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_ban_remove_command(
self, ctx: Context, user: User, *, reason: str = None
):
"""
This command unban a user from the server with a reason attached if specified!
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
user: :class:`disnake.User`
The user you want to ban
reason: :class:`str` optional
The reason attached to the unban
"""
await self.handle_ban_remove(ctx, user, reason)
@sanction_ban_slash_group.sub_command(
name="remove",
description="Unban a user from the server with a reason attached if specified!",
)
@has_guild_permissions(ban_members=True)
@bot_has_guild_permissions(ban_members=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_ban_remove_slash_command(
self, inter: GuildCommandInteraction, user: User, reason: str = None
):
"""
This slash command unban a user from the server with a reason attached if specified!
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
user: :class:`disnake.User`
The user you want to ban
reason: :class:`str` optional
The reason attached to the unban
"""
await self.handle_ban_remove(inter, user, reason)
async def handle_ban_remove(
self,
source: Union[Context, GuildCommandInteraction],
user: User,
reason: str = None,
):
bans = await source.guild.bans()
if not bans:
if isinstance(source, Context):
return await source.send(
f"ℹ - {source.author.mention} - There is no ban in this server!",
delete_after=20,
)
else:
return await source.response.send_message(
f"ℹ - {source.author.mention} - There is no ban in this server!",
ephemeral=True,
)
banned = False
for ban in bans:
if ban.user.id == user.id:
banned = True
if not banned:
if isinstance(source, Context):
return await source.send(
f"ℹ - {source.author.mention} - The user `{user}` is not banned from the server!",
delete_after=20,
)
else:
return await source.response.send_message(
f"ℹ - {source.author.mention} - The user `{user}` is not banned from the server!",
ephemeral=True,
)
self.bot.user_repo.unban_user(
source.guild.id, user.id, time(), f"{source.author}", reason
)
await source.guild.unban(user, reason=reason)
if isinstance(source, Context):
await source.send(
f"🚫 - The user `{user}` is no longer banned from the server.",
)
else:
await source.response.send_message(
f"🚫 - The user `{user}` is no longer banned from the server.",
)
if user.id in self.bot.tasks[source.guild.id]["ban_completions"]:
del self.bot.tasks[source.guild.id]["ban_completions"][user.id]
""" LIST """
@sanction_ban_group.command(
name="list",
brief="🙅🏽♂️",
usage="(@member)",
description="Lists the server's bans or for a specific member!",
)
@has_guild_permissions(ban_members=True)
@bot_has_guild_permissions(ban_members=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_ban_list_command(self, ctx: Context, member: Member = None):
"""
This command lists the bans from the server or for a specific member!
Parameters
----------
ctx: :class:`disnake.ext.commands.Context`
The command context
member: :class:`disnake.Member`
The member you want to list bans
"""
await self.handle_ban_list(ctx, member)
@sanction_ban_slash_group.sub_command(
name="list",
description="Lists the server's bans or for a specific member!",
)
@has_guild_permissions(ban_members=True)
@bot_has_guild_permissions(ban_members=True)
@max_concurrency(1, per=BucketType.member)
async def sanction_ban_list_slash_command(
self, inter: GuildCommandInteraction, member: Member = None
):
"""
This slash command lists the bans from the server or for a specific member!
Parameters
----------
inter: :class:`disnake.ext.commands.GuildCommandInteraction`
The application command interaction
member: :class:`disnake.Member`
The member you want to list bans
"""
await self.handle_ban_list(inter, member)
async def handle_ban_list(
self,
source: Union[Context, GuildCommandInteraction],
member: Member = None,
):
if member:
db_user = self.bot.user_repo.get_user(source.guild.id, member.id)
if "unban" not in db_user:
if isinstance(source, Context):
return await source.send(
f"ℹ - {source.author.mention} - {member} has never been ban from the server!",
delete_after=20,
)
else:
return await source.response.send_message(
f"ℹ - {source.author.mention} - {member} has never been ban from the server!",
ephemeral=True,
)
em = Embed(
colour=self.bot.color,
title=f"🔨 - {member}'s bans",
description=f"The list of {member}'s old bans",
)
em = self.configure_embed(source, em, member)
bans = db_user["unban"]
x = 0
while x < len(bans) and x <= 24:
if x == 24:
em.add_field(
name="**Too many bans to display them all**",
value="...",
inline=False,
)
else:
ban = bans[list(bans.keys())[x]]
em.add_field(
name=f"ban `{ceil(ban['original_ban']['at_s'] * 1000)}`:",
value=f"**date**: {ban['original_ban']['at']}\n**by**: `{ban['original_ban']['by']}`\n**duration**: {ban['original_ban']['duration']}"
+ (
f"\n**reason**: {ban['original_ban']['reason']}"
if "reason" in ban["original_ban"]
else ""
)
+ f"\n\n**unbanned date**: {ban['at']}\n**by**: `{ban['by']}"
+ (f"\n**reason**: {ban['reason']}" if "reason" in ban else ""),
inline=True,
)
x += 1
if isinstance(source, Context):
await source.send(embed=em)
else:
await source.response.send_message(embed=em)
else:
bans = await source.guild.bans()
if not bans:
if isinstance(source, Context):
return await source.send(
f"ℹ - {source.author.mention} - There is no ban in this server!",
delete_after=20,
)
else:
return await source.response.send_message(
f"ℹ - {source.author.mention} - There is no ban in this server!",
ephemeral=True,
)
em = Embed(
colour=self.bot.color,
title=f"🔨 - Server's bans",
description=f"The list of the server bans",
)
em = self.configure_embed(source, em)
x = 0
while x < len(bans) and x <= 24:
if x == 24:
em.add_field(
name="**Too many bans to display them all**",
value="...",
inline=False,
)
else:
ban = bans[x]
db_user = self.bot.user_repo.get_user(source.guild.id, ban.user.id)
em.add_field(
name=f"{ban.user}",
value=f"**date**: {db_user['ban']['at']}\n**by**: `{db_user['ban']['by']}`\n**duration**: {db_user['ban']['duration']}"
+ (
f"\n**reason**: {db_user['ban']['reason']}"
if "reason" in db_user["ban"]
else ""
),
inline=True,
)
x += 1
if isinstance(source, Context):
await source.send(embed=em)
else:
await source.response.send_message(embed=em)
""" METHODS """
def configure_embed(
self,
source: Union[Context, GuildCommandInteraction],
em: Embed,
member: Member = None,
) -> Embed:
if not member:
member = source.author
if source.guild.icon:
em.set_thumbnail(url=source.guild.icon.url)
if member.avatar:
em.set_author(
name=f"{member}",
icon_url=member.avatar.url,
)
else:
em.set_author(
name=f"{member}",
)
if self.bot.user.avatar:
em.set_footer(text=self.bot.user.name, icon_url=self.bot.user.avatar.url)
else:
em.set_footer(text=self.bot.user.name)
return em
def setup(bot):
bot.add_cog(Moderation(bot))
| 36.12294
| 412
| 0.545261
| 5,935
| 52,595
| 4.739848
| 0.051727
| 0.018165
| 0.023639
| 0.024528
| 0.880737
| 0.851872
| 0.81316
| 0.777825
| 0.740073
| 0.725676
| 0
| 0.005131
| 0.344177
| 52,595
| 1,455
| 413
| 36.147766
| 0.807729
| 0
| 0
| 0.618526
| 0
| 0.036853
| 0.2321
| 0.054394
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002988
| false
| 0.007968
| 0.006972
| 0
| 0.039841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8b9f7122d10564f1b5c53daf870a95fbb7000c16
| 27,478
|
py
|
Python
|
tensorflow_checkpoint_reader/pb/tensorflow/core/profiler/protobuf/xplane_pb2.py
|
shawwn/tensorflow-checkpoint-reader
|
f0e65548411e3bd66a07e36bb1850907a05952d0
|
[
"MIT"
] | 1
|
2021-12-02T15:06:09.000Z
|
2021-12-02T15:06:09.000Z
|
tensorflow_checkpoint_reader/pb/tensorflow/core/profiler/protobuf/xplane_pb2.py
|
shawwn/tensorflow-checkpoint-reader
|
f0e65548411e3bd66a07e36bb1850907a05952d0
|
[
"MIT"
] | null | null | null |
tensorflow_checkpoint_reader/pb/tensorflow/core/profiler/protobuf/xplane_pb2.py
|
shawwn/tensorflow-checkpoint-reader
|
f0e65548411e3bd66a07e36bb1850907a05952d0
|
[
"MIT"
] | null | null | null |
'Generated protocol buffer code.'
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(name='tensorflow/core/profiler/protobuf/xplane.proto', package='tensorflow.profiler', syntax='proto3', serialized_options=b'\xf8\x01\x01', create_key=_descriptor._internal_create_key, serialized_pb=b'\n.tensorflow/core/profiler/protobuf/xplane.proto\x12\x13tensorflow.profiler"j\n\x06XSpace\x12+\n\x06planes\x18\x01 \x03(\x0b2\x1b.tensorflow.profiler.XPlane\x12\x0e\n\x06errors\x18\x02 \x03(\t\x12\x10\n\x08warnings\x18\x03 \x03(\t\x12\x11\n\thostnames\x18\x04 \x03(\t"\xba\x03\n\x06XPlane\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12)\n\x05lines\x18\x03 \x03(\x0b2\x1a.tensorflow.profiler.XLine\x12F\n\x0eevent_metadata\x18\x04 \x03(\x0b2..tensorflow.profiler.XPlane.EventMetadataEntry\x12D\n\rstat_metadata\x18\x05 \x03(\x0b2-.tensorflow.profiler.XPlane.StatMetadataEntry\x12)\n\x05stats\x18\x06 \x03(\x0b2\x1a.tensorflow.profiler.XStat\x1aY\n\x12EventMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x122\n\x05value\x18\x02 \x01(\x0b2#.tensorflow.profiler.XEventMetadata:\x028\x01\x1aW\n\x11StatMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x121\n\x05value\x18\x02 \x01(\x0b2".tensorflow.profiler.XStatMetadata:\x028\x01"\xbb\x01\n\x05XLine\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x12\n\ndisplay_id\x18\n \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x14\n\x0cdisplay_name\x18\x0b \x01(\t\x12\x14\n\x0ctimestamp_ns\x18\x03 \x01(\x03\x12\x13\n\x0bduration_ps\x18\t \x01(\x03\x12+\n\x06events\x18\x04 \x03(\x0b2\x1b.tensorflow.profiler.XEventJ\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x07\x10\x08J\x04\x08\x08\x10\t"\x95\x01\n\x06XEvent\x12\x13\n\x0bmetadata_id\x18\x01 \x01(\x03\x12\x13\n\toffset_ps\x18\x02 \x01(\x03H\x00\x12\x19\n\x0fnum_occurrences\x18\x05 \x01(\x03H\x00\x12\x13\n\x0bduration_ps\x18\x03 \x01(\x03\x12)\n\x05stats\x18\x04 \x03(\x0b2\x1a.tensorflow.profiler.XStatB\x06\n\x04data"\xad\x01\n\x05XStat\x12\x13\n\x0bmetadata_id\x18\x01 \x01(\x03\x12\x16\n\x0cdouble_value\x18\x02 \x01(\x01H\x00\x12\x16\n\x0cuint64_value\x18\x03 \x01(\x04H\x00\x12\x15\n\x0bint64_value\x18\x04 \x01(\x03H\x00\x12\x13\n\tstr_value\x18\x05 \x01(\tH\x00\x12\x15\n\x0bbytes_value\x18\x06 \x01(\x0cH\x00\x12\x13\n\tref_value\x18\x07 \x01(\x04H\x00B\x07\n\x05value"\x8f\x01\n\x0eXEventMetadata\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x14\n\x0cdisplay_name\x18\x04 \x01(\t\x12\x10\n\x08metadata\x18\x03 \x01(\x0c\x12)\n\x05stats\x18\x05 \x03(\x0b2\x1a.tensorflow.profiler.XStat\x12\x10\n\x08child_id\x18\x06 \x03(\x03">\n\rXStatMetadata\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x13\n\x0bdescription\x18\x03 \x01(\tB\x03\xf8\x01\x01b\x06proto3')
_XSPACE = _descriptor.Descriptor(name='XSpace', full_name='tensorflow.profiler.XSpace', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='planes', full_name='tensorflow.profiler.XSpace.planes', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='errors', full_name='tensorflow.profiler.XSpace.errors', index=1, number=2, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='warnings', full_name='tensorflow.profiler.XSpace.warnings', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='hostnames', full_name='tensorflow.profiler.XSpace.hostnames', index=3, number=4, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=71, serialized_end=177)
_XPLANE_EVENTMETADATAENTRY = _descriptor.Descriptor(name='EventMetadataEntry', full_name='tensorflow.profiler.XPlane.EventMetadataEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='key', full_name='tensorflow.profiler.XPlane.EventMetadataEntry.key', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='value', full_name='tensorflow.profiler.XPlane.EventMetadataEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=b'8\x01', is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=444, serialized_end=533)
_XPLANE_STATMETADATAENTRY = _descriptor.Descriptor(name='StatMetadataEntry', full_name='tensorflow.profiler.XPlane.StatMetadataEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='key', full_name='tensorflow.profiler.XPlane.StatMetadataEntry.key', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='value', full_name='tensorflow.profiler.XPlane.StatMetadataEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=b'8\x01', is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=535, serialized_end=622)
_XPLANE = _descriptor.Descriptor(name='XPlane', full_name='tensorflow.profiler.XPlane', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='id', full_name='tensorflow.profiler.XPlane.id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='name', full_name='tensorflow.profiler.XPlane.name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='lines', full_name='tensorflow.profiler.XPlane.lines', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='event_metadata', full_name='tensorflow.profiler.XPlane.event_metadata', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='stat_metadata', full_name='tensorflow.profiler.XPlane.stat_metadata', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='stats', full_name='tensorflow.profiler.XPlane.stats', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[_XPLANE_EVENTMETADATAENTRY, _XPLANE_STATMETADATAENTRY], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=180, serialized_end=622)
_XLINE = _descriptor.Descriptor(name='XLine', full_name='tensorflow.profiler.XLine', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='id', full_name='tensorflow.profiler.XLine.id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='display_id', full_name='tensorflow.profiler.XLine.display_id', index=1, number=10, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='name', full_name='tensorflow.profiler.XLine.name', index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='display_name', full_name='tensorflow.profiler.XLine.display_name', index=3, number=11, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='timestamp_ns', full_name='tensorflow.profiler.XLine.timestamp_ns', index=4, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='duration_ps', full_name='tensorflow.profiler.XLine.duration_ps', index=5, number=9, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='events', full_name='tensorflow.profiler.XLine.events', index=6, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=625, serialized_end=812)
_XEVENT = _descriptor.Descriptor(name='XEvent', full_name='tensorflow.profiler.XEvent', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='metadata_id', full_name='tensorflow.profiler.XEvent.metadata_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='offset_ps', full_name='tensorflow.profiler.XEvent.offset_ps', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='num_occurrences', full_name='tensorflow.profiler.XEvent.num_occurrences', index=2, number=5, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='duration_ps', full_name='tensorflow.profiler.XEvent.duration_ps', index=3, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='stats', full_name='tensorflow.profiler.XEvent.stats', index=4, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='data', full_name='tensorflow.profiler.XEvent.data', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=815, serialized_end=964)
_XSTAT = _descriptor.Descriptor(name='XStat', full_name='tensorflow.profiler.XStat', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='metadata_id', full_name='tensorflow.profiler.XStat.metadata_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='double_value', full_name='tensorflow.profiler.XStat.double_value', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='uint64_value', full_name='tensorflow.profiler.XStat.uint64_value', index=2, number=3, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='int64_value', full_name='tensorflow.profiler.XStat.int64_value', index=3, number=4, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='str_value', full_name='tensorflow.profiler.XStat.str_value', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='bytes_value', full_name='tensorflow.profiler.XStat.bytes_value', index=5, number=6, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='ref_value', full_name='tensorflow.profiler.XStat.ref_value', index=6, number=7, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='value', full_name='tensorflow.profiler.XStat.value', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=967, serialized_end=1140)
_XEVENTMETADATA = _descriptor.Descriptor(name='XEventMetadata', full_name='tensorflow.profiler.XEventMetadata', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='id', full_name='tensorflow.profiler.XEventMetadata.id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='name', full_name='tensorflow.profiler.XEventMetadata.name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='display_name', full_name='tensorflow.profiler.XEventMetadata.display_name', index=2, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='metadata', full_name='tensorflow.profiler.XEventMetadata.metadata', index=3, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b'', message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='stats', full_name='tensorflow.profiler.XEventMetadata.stats', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='child_id', full_name='tensorflow.profiler.XEventMetadata.child_id', index=5, number=6, type=3, cpp_type=2, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=1143, serialized_end=1286)
_XSTATMETADATA = _descriptor.Descriptor(name='XStatMetadata', full_name='tensorflow.profiler.XStatMetadata', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='id', full_name='tensorflow.profiler.XStatMetadata.id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='name', full_name='tensorflow.profiler.XStatMetadata.name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='description', full_name='tensorflow.profiler.XStatMetadata.description', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b''.decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=1288, serialized_end=1350)
_XSPACE.fields_by_name['planes'].message_type = _XPLANE
_XPLANE_EVENTMETADATAENTRY.fields_by_name['value'].message_type = _XEVENTMETADATA
_XPLANE_EVENTMETADATAENTRY.containing_type = _XPLANE
_XPLANE_STATMETADATAENTRY.fields_by_name['value'].message_type = _XSTATMETADATA
_XPLANE_STATMETADATAENTRY.containing_type = _XPLANE
_XPLANE.fields_by_name['lines'].message_type = _XLINE
_XPLANE.fields_by_name['event_metadata'].message_type = _XPLANE_EVENTMETADATAENTRY
_XPLANE.fields_by_name['stat_metadata'].message_type = _XPLANE_STATMETADATAENTRY
_XPLANE.fields_by_name['stats'].message_type = _XSTAT
_XLINE.fields_by_name['events'].message_type = _XEVENT
_XEVENT.fields_by_name['stats'].message_type = _XSTAT
_XEVENT.oneofs_by_name['data'].fields.append(_XEVENT.fields_by_name['offset_ps'])
_XEVENT.fields_by_name['offset_ps'].containing_oneof = _XEVENT.oneofs_by_name['data']
_XEVENT.oneofs_by_name['data'].fields.append(_XEVENT.fields_by_name['num_occurrences'])
_XEVENT.fields_by_name['num_occurrences'].containing_oneof = _XEVENT.oneofs_by_name['data']
_XSTAT.oneofs_by_name['value'].fields.append(_XSTAT.fields_by_name['double_value'])
_XSTAT.fields_by_name['double_value'].containing_oneof = _XSTAT.oneofs_by_name['value']
_XSTAT.oneofs_by_name['value'].fields.append(_XSTAT.fields_by_name['uint64_value'])
_XSTAT.fields_by_name['uint64_value'].containing_oneof = _XSTAT.oneofs_by_name['value']
_XSTAT.oneofs_by_name['value'].fields.append(_XSTAT.fields_by_name['int64_value'])
_XSTAT.fields_by_name['int64_value'].containing_oneof = _XSTAT.oneofs_by_name['value']
_XSTAT.oneofs_by_name['value'].fields.append(_XSTAT.fields_by_name['str_value'])
_XSTAT.fields_by_name['str_value'].containing_oneof = _XSTAT.oneofs_by_name['value']
_XSTAT.oneofs_by_name['value'].fields.append(_XSTAT.fields_by_name['bytes_value'])
_XSTAT.fields_by_name['bytes_value'].containing_oneof = _XSTAT.oneofs_by_name['value']
_XSTAT.oneofs_by_name['value'].fields.append(_XSTAT.fields_by_name['ref_value'])
_XSTAT.fields_by_name['ref_value'].containing_oneof = _XSTAT.oneofs_by_name['value']
_XEVENTMETADATA.fields_by_name['stats'].message_type = _XSTAT
DESCRIPTOR.message_types_by_name['XSpace'] = _XSPACE
DESCRIPTOR.message_types_by_name['XPlane'] = _XPLANE
DESCRIPTOR.message_types_by_name['XLine'] = _XLINE
DESCRIPTOR.message_types_by_name['XEvent'] = _XEVENT
DESCRIPTOR.message_types_by_name['XStat'] = _XSTAT
DESCRIPTOR.message_types_by_name['XEventMetadata'] = _XEVENTMETADATA
DESCRIPTOR.message_types_by_name['XStatMetadata'] = _XSTATMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
XSpace = _reflection.GeneratedProtocolMessageType('XSpace', (_message.Message,), {'DESCRIPTOR': _XSPACE, '__module__': 'tensorflow.core.profiler.protobuf.xplane_pb2'})
_sym_db.RegisterMessage(XSpace)
XPlane = _reflection.GeneratedProtocolMessageType('XPlane', (_message.Message,), {'EventMetadataEntry': _reflection.GeneratedProtocolMessageType('EventMetadataEntry', (_message.Message,), {'DESCRIPTOR': _XPLANE_EVENTMETADATAENTRY, '__module__': 'tensorflow.core.profiler.protobuf.xplane_pb2'}), 'StatMetadataEntry': _reflection.GeneratedProtocolMessageType('StatMetadataEntry', (_message.Message,), {'DESCRIPTOR': _XPLANE_STATMETADATAENTRY, '__module__': 'tensorflow.core.profiler.protobuf.xplane_pb2'}), 'DESCRIPTOR': _XPLANE, '__module__': 'tensorflow.core.profiler.protobuf.xplane_pb2'})
_sym_db.RegisterMessage(XPlane)
_sym_db.RegisterMessage(XPlane.EventMetadataEntry)
_sym_db.RegisterMessage(XPlane.StatMetadataEntry)
XLine = _reflection.GeneratedProtocolMessageType('XLine', (_message.Message,), {'DESCRIPTOR': _XLINE, '__module__': 'tensorflow.core.profiler.protobuf.xplane_pb2'})
_sym_db.RegisterMessage(XLine)
XEvent = _reflection.GeneratedProtocolMessageType('XEvent', (_message.Message,), {'DESCRIPTOR': _XEVENT, '__module__': 'tensorflow.core.profiler.protobuf.xplane_pb2'})
_sym_db.RegisterMessage(XEvent)
XStat = _reflection.GeneratedProtocolMessageType('XStat', (_message.Message,), {'DESCRIPTOR': _XSTAT, '__module__': 'tensorflow.core.profiler.protobuf.xplane_pb2'})
_sym_db.RegisterMessage(XStat)
XEventMetadata = _reflection.GeneratedProtocolMessageType('XEventMetadata', (_message.Message,), {'DESCRIPTOR': _XEVENTMETADATA, '__module__': 'tensorflow.core.profiler.protobuf.xplane_pb2'})
_sym_db.RegisterMessage(XEventMetadata)
XStatMetadata = _reflection.GeneratedProtocolMessageType('XStatMetadata', (_message.Message,), {'DESCRIPTOR': _XSTATMETADATA, '__module__': 'tensorflow.core.profiler.protobuf.xplane_pb2'})
_sym_db.RegisterMessage(XStatMetadata)
DESCRIPTOR._options = None
_XPLANE_EVENTMETADATAENTRY._options = None
_XPLANE_STATMETADATAENTRY._options = None
| 376.410959
| 3,206
| 0.822731
| 3,872
| 27,478
| 5.511105
| 0.060176
| 0.051361
| 0.077464
| 0.068326
| 0.806223
| 0.75041
| 0.68649
| 0.674165
| 0.663667
| 0.662496
| 0
| 0.037674
| 0.041742
| 27,478
| 72
| 3,207
| 381.638889
| 0.772739
| 0.001128
| 0
| 0
| 1
| 0.014085
| 0.221494
| 0.173418
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.056338
| 0
| 0.056338
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8bb4e1884d1469df1f3fa3ea6cecb45212823d86
| 5,764
|
py
|
Python
|
tests/test_metrics_two_stage.py
|
hshaban/epathermostat_nw
|
6fec9402484e1ef7e4e59e2c679d9a8efee99ad6
|
[
"MIT"
] | null | null | null |
tests/test_metrics_two_stage.py
|
hshaban/epathermostat_nw
|
6fec9402484e1ef7e4e59e2c679d9a8efee99ad6
|
[
"MIT"
] | null | null | null |
tests/test_metrics_two_stage.py
|
hshaban/epathermostat_nw
|
6fec9402484e1ef7e4e59e2c679d9a8efee99ad6
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from numpy.testing import assert_allclose
import tempfile
import pytest
from thermostat_nw.exporters import metrics_to_csv
from thermostat_nw.multiple import (
multiple_thermostat_calculate_epa_field_savings_metrics,
)
from .fixtures.two_stage import (
thermostat_hpeb_2_hp_2,
# thermostat_type_2,
thermostat_fu_2_ce_2,
thermostat_furnace_or_boiler_two_stage_none_single_stage,
thermostat_na_2_hp_2,
metrics_hpeb_2_hp_2_data,
)
from thermostat_nw.columns import EXPORT_COLUMNS
import six
@pytest.fixture(scope="session")
def metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage(
thermostat_hpeb_2_hp_2,
):
metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage = (
thermostat_hpeb_2_hp_2.calculate_epa_field_savings_metrics(
core_cooling_day_set_method="entire_dataset",
core_heating_day_set_method="entire_dataset",
)
)
return metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage
@pytest.fixture(scope="session")
def metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage_multiple(
thermostat_hpeb_2_hp_2,
):
metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage = (
multiple_thermostat_calculate_epa_field_savings_metrics(
[thermostat_hpeb_2_hp_2], how="entire_dataset"
)
)
return metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage
RTOL = 1e-3
ATOL = 1e-3
def test_calculate_epa_field_savings_metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage(
metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage,
metrics_hpeb_2_hp_2_data,
):
assert len(metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage) == len(
metrics_hpeb_2_hp_2_data
)
for key in metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage[
0
].keys():
test_value = metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage[0][
key
]
target_value = metrics_hpeb_2_hp_2_data[0][key]
if isinstance(test_value, six.string_types):
assert test_value == target_value
else:
assert_allclose(test_value, target_value, rtol=RTOL, atol=ATOL)
for key in metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage[
1
].keys():
test_value = metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage[1][
key
]
target_value = metrics_hpeb_2_hp_2_data[1][key]
if isinstance(test_value, six.string_types):
assert test_value == target_value
else:
assert_allclose(test_value, target_value, rtol=RTOL, atol=ATOL)
def test_multiple_thermostat_calculate_epa_field_savings_metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage(
metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage_multiple,
metrics_hpeb_2_hp_2_data,
):
# Test multiprocessing thermostat code
assert len(
metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage_multiple
) == len(metrics_hpeb_2_hp_2_data)
for key in metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage_multiple[
0
].keys():
test_value = (
metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage_multiple[0][
key
]
)
target_value = metrics_hpeb_2_hp_2_data[0][key]
if isinstance(test_value, six.string_types):
assert test_value == target_value
else:
assert_allclose(test_value, target_value, rtol=RTOL, atol=ATOL)
for key in metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage_multiple[
1
].keys():
test_value = (
metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage_multiple[1][
key
]
)
target_value = metrics_hpeb_2_hp_2_data[1][key]
if isinstance(test_value, six.string_types):
assert test_value == target_value
else:
assert_allclose(test_value, target_value, rtol=RTOL, atol=ATOL)
def test_calculate_epa_field_savings_metrics_type_3(thermostat_fu_2_ce_2):
metrics_type_3 = thermostat_fu_2_ce_2.calculate_epa_field_savings_metrics(
core_cooling_day_set_method="entire_dataset",
core_heating_day_set_method="entire_dataset",
)
assert len(metrics_type_3) == 2
def test_calculate_epa_field_savings_metrics_type_4(
thermostat_furnace_or_boiler_two_stage_none_single_stage,
):
metrics_type_4 = thermostat_furnace_or_boiler_two_stage_none_single_stage.calculate_epa_field_savings_metrics(
core_cooling_day_set_method="entire_dataset",
core_heating_day_set_method="entire_dataset",
)
assert len(metrics_type_4) == 1
def test_calculate_epa_field_savings_metrics_type_5(thermostat_na_2_hp_2):
metrics_type_5 = thermostat_na_2_hp_2.calculate_epa_field_savings_metrics(
core_cooling_day_set_method="entire_dataset",
core_heating_day_set_method="entire_dataset",
)
assert len(metrics_type_5) == 1
def test_metrics_to_csv(
metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage,
):
fd, fname = tempfile.mkstemp()
df = metrics_to_csv(
metrics_heat_pump_electric_backup_two_stage_heat_pump_two_stage, fname
)
assert isinstance(df, pd.DataFrame)
assert df.columns[0] == "sw_version"
assert df.columns[1] == "ct_identifier"
with open(fname, "r") as f:
lines = f.readlines()
assert len(lines) == 3
column_heads = lines[0].strip().split(",")
assert column_heads == EXPORT_COLUMNS
| 33.905882
| 121
| 0.752429
| 821
| 5,764
| 4.661389
| 0.124239
| 0.10034
| 0.086229
| 0.132218
| 0.859943
| 0.846355
| 0.829109
| 0.801672
| 0.751241
| 0.738699
| 0
| 0.015618
| 0.189105
| 5,764
| 169
| 122
| 34.106509
| 0.803166
| 0.009542
| 0
| 0.463768
| 0
| 0
| 0.028917
| 0
| 0
| 0
| 0
| 0
| 0.137681
| 1
| 0.057971
| false
| 0
| 0.072464
| 0
| 0.144928
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4744d0f3cec189ea9b45b7b9a494a83a38c01d42
| 213
|
py
|
Python
|
picamera/array/__init__.py
|
Daan4/vision-well-position-controller
|
3926b7a684aee80d159046d7683257f8c23229e8
|
[
"MIT"
] | null | null | null |
picamera/array/__init__.py
|
Daan4/vision-well-position-controller
|
3926b7a684aee80d159046d7683257f8c23229e8
|
[
"MIT"
] | 2
|
2021-09-08T00:43:55.000Z
|
2022-03-11T23:38:43.000Z
|
picamera/array/__init__.py
|
Daan4/vision-well-position-controller
|
3926b7a684aee80d159046d7683257f8c23229e8
|
[
"MIT"
] | null | null | null |
class PiRGBArray:
def __init__(self, *args, **kwargs):
self.array = None
def truncate(self, _):
pass
class PiYUVArray:
def __init__(self, *args, **kwargs):
self.array = None
| 17.75
| 40
| 0.591549
| 24
| 213
| 4.875
| 0.5
| 0.119658
| 0.188034
| 0.25641
| 0.581197
| 0.581197
| 0.581197
| 0.581197
| 0
| 0
| 0
| 0
| 0.286385
| 213
| 11
| 41
| 19.363636
| 0.769737
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0.125
| 0
| 0
| 0.625
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
47d8b144aa563fff9fe032784c58a6825f7c7ef1
| 1,558
|
py
|
Python
|
persons/migrations/0002_auto_20150307_1826.py
|
felix-engelmann/badgecc
|
5bc0ced339f18737e24cc34935a87e96ae14a825
|
[
"MIT"
] | null | null | null |
persons/migrations/0002_auto_20150307_1826.py
|
felix-engelmann/badgecc
|
5bc0ced339f18737e24cc34935a87e96ae14a825
|
[
"MIT"
] | null | null | null |
persons/migrations/0002_auto_20150307_1826.py
|
felix-engelmann/badgecc
|
5bc0ced339f18737e24cc34935a87e96ae14a825
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('persons', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='department',
name='default_image',
field=models.ImageField(upload_to='', blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='department',
name='rights',
field=models.ManyToManyField(to='persons.Right', blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='person',
name='extra_rights',
field=models.ManyToManyField(to='persons.Right', blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='person',
name='image',
field=models.ImageField(upload_to='', blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='person',
name='role',
field=models.ForeignKey(to='persons.Role', blank=True, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='role',
name='rights',
field=models.ManyToManyField(to='persons.Right', blank=True, null=True),
preserve_default=True,
),
]
| 30.54902
| 84
| 0.56611
| 143
| 1,558
| 6.013986
| 0.27972
| 0.139535
| 0.174419
| 0.202326
| 0.765116
| 0.765116
| 0.703488
| 0.703488
| 0.703488
| 0.703488
| 0
| 0.004669
| 0.31258
| 1,558
| 50
| 85
| 31.16
| 0.798319
| 0.013479
| 0
| 0.681818
| 0
| 0
| 0.102932
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
47df69d6d0e9af3740999c1bf47ce3804f369adc
| 20,377
|
py
|
Python
|
crazyflie_demo/scripts/u_v_controller.py
|
CooperDrones/VIP_Crazyswarm
|
331c8018efa8972d6f115798ea1dfda0dcb095b5
|
[
"MIT"
] | 3
|
2019-09-20T17:21:53.000Z
|
2022-02-07T20:18:27.000Z
|
crazyflie_demo/scripts/u_v_controller.py
|
CooperDrones/VIP_Crazyswarm
|
331c8018efa8972d6f115798ea1dfda0dcb095b5
|
[
"MIT"
] | null | null | null |
crazyflie_demo/scripts/u_v_controller.py
|
CooperDrones/VIP_Crazyswarm
|
331c8018efa8972d6f115798ea1dfda0dcb095b5
|
[
"MIT"
] | 1
|
2021-07-19T22:22:23.000Z
|
2021-07-19T22:22:23.000Z
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist,Vector3,TransformStamped # twist used in cmd_vel
from crazyflie_driver.msg import Hover
from std_msgs.msg import Empty
from vicon_bridge.srv import viconGrabPose
import numpy as np
from scipy.spatial.transform import Rotation
import math
import scipy.interpolate as si
import matplotlib.pyplot as plt
from threading import Thread
import time
class Tester:
def __init__(self, cf_name):
self.cf_name = cf_name
self.msg = Twist()
self.hz = 30.0
self.rate = rospy.Rate(self.hz)
self.pub = rospy.Publisher('crazyflie/cmd_vel', Twist, queue_size=0)
rospy.wait_for_service('/vicon/grab_vicon_pose')
self.pose_getter = rospy.ServiceProxy('/vicon/grab_vicon_pose', viconGrabPose)
def getPose(self, vicon_object):
self.pose = self.pose_getter(vicon_object, vicon_object, 1)
self.pose1 = self.pose.pose.pose
return self.pose1
def dummyForLoop(self):
# REQUIRED TO OVERCOME INITIAL PUBLISHER BLOCK IMPLEMENTED BY USC
self.msg.linear = Vector3(0, 0, 0)
self.msg.angular = Vector3(0, 0, 0)
for _ in range(100):
self.pub.publish(self.msg)
self.rate.sleep()
def hover(self, x_ref, y_ref, z_ref, circle_radius):
print('Start hover controller')
# Followed this paper, section 3.1, for PID controller
# https://arxiv.org/pdf/1608.05786.pdf
# Altitude (z) controller gains and initialization
self.z_feed_forward = 40000. # Eq. 3.1.8 - a bit less since we do not use UWB module
self.z_kp = 11000. # Table 3.1.3
self.z_ki = 3500.
self.z_kd = 9000.
self.z_error_historical = 0.
self.thrust_cap_high = 15000 # TODO add caps for all commands
self.thrust_cap_low = -20000
self.z_error_before = 0.
self.z_error_cap = 1.5
# xy controller gains and initialization
self.x_kp = 10. # Table 3.1.3
self.x_ki = 2.
self.y_kp = -10.
self.y_ki = -2.
self.x_error_historical = 0.
self.y_error_historical = 0.
self.x_before = 0.
self.y_before = 0.
self.x_cap = 15.
self.y_cap = 15.
# Yaw rate controller gains
self.yaw_kp = -4. # Table 3.1.3
# Set initial reference values
origin = self.getPose(self.cf_name)
self.pose_actual = origin
# Hold yaw constant throughout
yaw_ref = 0.0
time_step = (1/self.hz)
while not rospy.is_shutdown():
# Get current drone pose
self.pose_before = self.pose_actual
self.pose_actual = self.getPose(self.cf_name)
if math.isnan(self.pose_actual.orientation.x): # If nan is thrown, set to last known position
self.pose_actual = self.pose_before
### Altitude controller ###
# Get true z value
self.z_actual = self.pose_actual.position.z
# Get error
self.z_error = z_ref - self.z_actual
# Find integral component
if self.z_error_historical <= self.z_error_cap:
self.z_error_historical += (self.z_error * time_step)
# Find derivative component
self.z_error_der = (self.z_error - self.z_error_before) / time_step
self.z_error_before = self.z_error
# Sum PID errors and multiply by gains
self.z_error_scaled = (self.z_error * self.z_kp) + (self.z_error_historical * self.z_ki) \
+ (self.z_error_der * self.z_kd) # Eq. 3.1.7
# publish to thrust command
self.msg.linear.z = self.z_feed_forward + self.z_error_scaled
### xy position controller ###
# get true x and y values
self.x_actual = self.pose_actual.position.x
self.y_actual = self.pose_actual.position.y
# Obtain yaw angle from quaternion
self.quat_actual = [self.pose_actual.orientation.x, self.pose_actual.orientation.y, \
self.pose_actual.orientation.z, self.pose_actual.orientation.w]
R = Rotation.from_quat(self.quat_actual)
self.global_x = R.apply([1, 0, 0]) # project to world x-axis
self.yaw_angle = np.arctan2(np.cross([1, 0, 0], self.global_x)[2], \
np.dot(self.global_x, [1, 0, 0]))
# obtain position error
self.x_error_world = x_ref - self.x_actual
self.y_error_world = y_ref - self.y_actual
# x-position controller
self.x_e = self.x_error_world * np.cos(self.yaw_angle) + self.y_error_world * np.sin(self.yaw_angle)
self.u = (self.x_actual - self.x_before) / time_step
self.x_before = self.x_actual
# y-position controller
self.y_e = -(self.x_error_world * np.sin(self.yaw_angle)) + self.y_error_world * np.cos(self.yaw_angle)
self.v = (self.y_actual - self.y_before) / time_step
self.y_before = self.y_actual
# Eq. 3.1.11 and Eq. 3.1.12
self.x_diff = self.x_e - self.u
self.y_diff = self.y_e - self.v
# Find integral component - store historical error
self.x_error_historical += (self.x_diff * time_step)
self.y_error_historical += (self.y_diff * time_step)
# Sum PI errors and multiply by gains
self.x_error_scaled = (self.x_diff * self.x_kp) \
+ (self.x_error_historical * self.x_ki)
self.y_error_scaled = (self.y_diff * self.y_kp) \
+ (self.y_error_historical * self.y_ki)
# Cap errors to prevent unstable maneuvers
if self.x_error_scaled >= self.x_cap:
self.x_error_scaled = self.x_cap
elif self.x_error_scaled <= -self.x_cap:
self.x_error_scaled = -self.x_cap
elif self.y_error_scaled >= self.y_cap:
self.y_error_scaled = self.y_cap
elif self.y_error_scaled <= -self.y_cap:
self.y_error_scaled = -self.y_cap
# Plublish commanded actions
self.msg.linear.x = self.x_error_scaled
self.msg.linear.y = self.y_error_scaled
### Yaw-rate controller Eq. 3.1.13 ###
self.yaw_error = yaw_ref - self.yaw_angle
self.yaw_error_scaled = self.yaw_kp * self.yaw_error
self.msg.angular.z = self.yaw_error_scaled
# Kills hover once at stable position
if (self.x_actual > (x_ref - circle_radius) and self.x_actual < (x_ref + circle_radius)) and \
(self.y_actual > (y_ref - circle_radius) and self.y_actual < (y_ref + circle_radius)) and \
(self.z_actual > (z_ref - circle_radius) and self.z_actual < (z_ref + circle_radius)):
print('Found the hover setpoint!')
break
self.pub.publish(self.msg)
self.rate.sleep()
def uPathTracker(self, x_ref, y_ref, z_ref, u_ref):
print('Started u controller!')
# Set initial reference values
origin = self.getPose(self.cf_name)
self.pose_actual = origin
# Hold yaw constant throughout
yaw_ref = 0
time_step = (1/self.hz)
self.x_before = 0
self.u_kp = 5
while not rospy.is_shutdown():
# Get current drone pose
self.pose_before = self.pose_actual
self.pose_actual = self.getPose(self.cf_name)
if math.isnan(self.pose_actual.orientation.x): # If nan is thrown, set to last known position
self.pose_actual = self.pose_before
### Altitude controller ###
self.z_actual = self.pose_actual.position.z
self.z_error = z_ref - self.z_actual
if self.z_error_historical <= self.z_error_cap:
self.z_error_historical += (self.z_error * time_step)
self.z_error_der = (self.z_error - self.z_error_before) / time_step
self.z_error_before = self.z_error
self.z_error_scaled = (self.z_error * self.z_kp) + (self.z_error_historical * self.z_ki) \
+ (self.z_error_der * self.z_kd) # Eq. 3.1.7
self.msg.linear.z = self.z_feed_forward + self.z_error_scaled
### xy position controller ###
# get true x and y values
self.x_actual = self.pose_actual.position.x
self.y_actual = self.pose_actual.position.y
# Obtain yaw angle from quaternion
self.quat_actual = [self.pose_actual.orientation.x, self.pose_actual.orientation.y, \
self.pose_actual.orientation.z, self.pose_actual.orientation.w]
R = Rotation.from_quat(self.quat_actual)
self.global_x = R.apply([1, 0, 0]) # project to world x-axis
self.yaw_angle = np.arctan2(np.cross([1, 0, 0], self.global_x)[2], \
np.dot(self.global_x, [1, 0, 0]))
# obtain position error
self.x_error_world = x_ref - self.x_actual
self.y_error_world = y_ref - self.y_actual
# # x-position controller
# self.x_e = self.x_error_world * np.cos(self.yaw_angle) + self.y_error_world * np.sin(self.yaw_angle)
self.u = (self.x_actual - self.x_before) / time_step
self.x_before = self.x_actual
# u-velocitty controller
self.u_error = u_ref - self.u
self.msg.linear.x = self.u_kp * self.u_error
print('u is: {}'.format(self.u))
# y-position controller
self.y_e = -(self.x_error_world * np.sin(self.yaw_angle)) + self.y_error_world * np.cos(self.yaw_angle)
self.v = (self.y_actual - self.y_before) / time_step
self.y_before = self.y_actual
# Eq. 3.1.11 and Eq. 3.1.12
self.x_diff = self.x_e - self.u
self.y_diff = self.y_e - self.v
# Find integral component - store historical error
self.x_error_historical += (self.x_diff * time_step)
self.y_error_historical += (self.y_diff * time_step)
# Sum PI errors and multiply by gains
self.x_error_scaled = (self.x_diff * self.x_kp) \
+ (self.x_error_historical * self.x_ki)
self.y_error_scaled = (self.y_diff * self.y_kp) \
+ (self.y_error_historical * self.y_ki)
# Cap errors to prevent unstable maneuvers
if self.x_error_scaled >= self.x_cap:
self.x_error_scaled = self.x_cap
elif self.x_error_scaled <= -self.x_cap:
self.x_error_scaled = -self.x_cap
elif self.y_error_scaled >= self.y_cap:
self.y_error_scaled = self.y_cap
elif self.y_error_scaled <= -self.y_cap:
self.y_error_scaled = -self.y_cap
# Plublish commanded actions
# self.msg.linear.x = self.x_error_scaled
self.msg.linear.y = self.y_error_scaled
### Yaw-rate controller Eq. 3.1.13 ###
self.yaw_error = yaw_ref - self.yaw_angle
self.yaw_error_scaled = self.yaw_kp * self.yaw_error
self.msg.angular.z = self.yaw_error_scaled
# Kills hover once at stable position last statement
# ensures drone will stay at last point
offset = 0.05
if (self.x_actual < x_ref + offset) and (self.x_actual > x_ref - offset):
print('Found the velocity set point!')
break
self.pub.publish(self.msg)
self.rate.sleep()
def vPathTracker(self, x_ref, y_ref, z_ref, v_ref):
print('Started v controller!')
# Set initial reference values
origin = self.getPose(self.cf_name)
self.pose_actual = origin
# Hold yaw constant throughout
yaw_ref = 0
time_step = (1/self.hz)
self.v_kp = -5
self.y_before = 0
while not rospy.is_shutdown():
# Get current drone pose
self.pose_before = self.pose_actual
self.pose_actual = self.getPose(self.cf_name)
if math.isnan(self.pose_actual.orientation.x): # If nan is thrown, set to last known position
self.pose_actual = self.pose_before
### Altitude controller ###
self.z_actual = self.pose_actual.position.z
self.z_error = z_ref - self.z_actual
if self.z_error_historical <= self.z_error_cap:
self.z_error_historical += (self.z_error * time_step)
self.z_error_der = (self.z_error - self.z_error_before) / time_step
self.z_error_before = self.z_error
self.z_error_scaled = (self.z_error * self.z_kp) + (self.z_error_historical * self.z_ki) \
+ (self.z_error_der * self.z_kd) # Eq. 3.1.7
self.msg.linear.z = self.z_feed_forward + self.z_error_scaled
### xy position controller ###
# get true x and y values
self.x_actual = self.pose_actual.position.x
self.y_actual = self.pose_actual.position.y
# Obtain yaw angle from quaternion
self.quat_actual = [self.pose_actual.orientation.x, self.pose_actual.orientation.y, \
self.pose_actual.orientation.z, self.pose_actual.orientation.w]
R = Rotation.from_quat(self.quat_actual)
self.global_x = R.apply([1, 0, 0]) # project to world x-axis
self.yaw_angle = np.arctan2(np.cross([1, 0, 0], self.global_x)[2], \
np.dot(self.global_x, [1, 0, 0]))
# obtain position error
self.x_error_world = x_ref - self.x_actual
self.y_error_world = y_ref - self.y_actual
# x-position controller
self.x_e = self.x_error_world * np.cos(self.yaw_angle) + self.y_error_world * np.sin(self.yaw_angle)
self.u = (self.x_actual - self.x_before) / time_step
self.x_before = self.x_actual
# # y-position controller
# self.y_e = -(self.x_error_world * np.sin(self.yaw_angle)) + self.y_error_world * np.cos(self.yaw_angle)
self.v = (self.y_actual - self.y_before) / time_step
self.y_before = self.y_actual
print('u is: {}'.format(self.v))
# v-velocitty controller
self.v_error = v_ref - self.v
self.msg.linear.y = self.v_kp * self.v_error
# Eq. 3.1.11 and Eq. 3.1.12
self.x_diff = self.x_e - self.u
self.y_diff = self.y_e - self.v
# Find integral component - store historical error
self.x_error_historical += (self.x_diff * time_step)
self.y_error_historical += (self.y_diff * time_step)
# Sum PI errors and multiply by gains
self.x_error_scaled = (self.x_diff * self.x_kp) \
+ (self.x_error_historical * self.x_ki)
self.y_error_scaled = (self.y_diff * self.y_kp) \
+ (self.y_error_historical * self.y_ki)
# Cap errors to prevent unstable maneuvers
if self.x_error_scaled >= self.x_cap:
self.x_error_scaled = self.x_cap
elif self.x_error_scaled <= -self.x_cap:
self.x_error_scaled = -self.x_cap
elif self.y_error_scaled >= self.y_cap:
self.y_error_scaled = self.y_cap
elif self.y_error_scaled <= -self.y_cap:
self.y_error_scaled = -self.y_cap
# Plublish commanded actions
self.msg.linear.x = self.x_error_scaled
# self.msg.linear.y = self.y_error_scaled
### Yaw-rate controller Eq. 3.1.13 ###
self.yaw_error = yaw_ref - self.yaw_angle
self.yaw_error_scaled = self.yaw_kp * self.yaw_error
self.msg.angular.z = self.yaw_error_scaled
# Kills hover once at stable position last statement
# ensures drone will stay at last point
offset = 0.1
if (self.y_actual < y_ref + offset) and (self.y_actual > y_ref - offset):
print('Found the velocity set point!')
break
self.pub.publish(self.msg)
self.rate.sleep()
### Attempt to make threading work to fly multiple drones
# def handler(cf, cf_name):
# try:
# drone1 = Tester(cf_name)
# drone1.dummyForLoop()
# x_ref = 0.0 # m
# y_ref = 0.0 # m
# z_ref = 0.4 # m
# circle_radius = 0.1 # m
# drone1.hover(x_ref, y_ref, z_ref, circle_radius)
# x_ref = -1.0
# y_ref = -0.5
# drone1.hover(x_ref, y_ref, z_ref, circle_radius)
# u_ref = 1.5 # m/s
# x_ref = 1.0
# drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
# # u_ref = -2.0 # m/s
# # x_ref = -1.0 # m
# # drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
# v_ref = 1.5 # m/s
# y_ref = 0.5 # m
# drone1.vPathTracker(x_ref, y_ref, z_ref, v_ref)
# u_ref = -u_ref # m/s
# x_ref = -x_ref # m
# drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
# v_ref = -v_ref # m/s
# y_ref = -y_ref # m/s
# drone1.vPathTracker(x_ref, y_ref, z_ref, v_ref)
# u_ref = -u_ref # m/s
# x_ref = -x_ref # m
# drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
# v_ref = -v_ref # m/s
# y_ref = -y_ref # m/s
# drone1.vPathTracker(x_ref, y_ref, z_ref, v_ref)
# u_ref = -u_ref # m/s
# x_ref = -x_ref # m
# drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
# v_ref = -v_ref # m/s
# y_ref = -y_ref # m/s
# drone1.vPathTracker(x_ref, y_ref, z_ref, v_ref)
# # land the drone
# z_ref = 0.15
# drone1.hover(x_ref, y_ref, z_ref, circle_radius)
# except Exception as e:
# print(e)
# if __name__ == '__main__':
# rospy.init_node('test')
# cf3 = Tester("crazyflie3")
# cf4 = Tester("crazyflie4")
# # cf3 = Tester("crazyflie5")
# t3 = Thread(target=handler, args=(cf3, "crazyflie3",))
# t4 = Thread(target=handler, args=(cf4, 'crazyflie4',))
# # t3 = Thread(target=handler, args=(cf3,))
# t3.start()
# # time.sleep(20.0)
# t4.start()
# # time.sleep(0.5)
# # t3.start()
if __name__ == "__main__":
rospy.init_node('test')
# Works with drone 4 as of 01/28/2020
# Please do not change script directly!!!
# Copy all into new file if you would like to edit
try:
drone1 = Tester('crazyflie4')
drone1.dummyForLoop()
x_ref = 0.0 # m
y_ref = 0.0 # m
z_ref = 0.4 # m
circle_radius = 0.1 # m
drone1.hover(x_ref, y_ref, z_ref, circle_radius)
x_ref = -1.0
y_ref = -0.5
drone1.hover(x_ref, y_ref, z_ref, circle_radius)
u_ref = 1.5 # m/s
x_ref = 1.0
drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
# u_ref = -2.0 # m/s
# x_ref = -1.0 # m
# drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
v_ref = 1.5 # m/s
y_ref = 0.5 # m
drone1.vPathTracker(x_ref, y_ref, z_ref, v_ref)
u_ref = -u_ref # m/s
x_ref = -x_ref # m
drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
v_ref = -v_ref # m/s
y_ref = -y_ref # m/s
drone1.vPathTracker(x_ref, y_ref, z_ref, v_ref)
u_ref = -u_ref # m/s
x_ref = -x_ref # m
drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
v_ref = -v_ref # m/s
y_ref = -y_ref # m/s
drone1.vPathTracker(x_ref, y_ref, z_ref, v_ref)
u_ref = -u_ref # m/s
x_ref = -x_ref # m
drone1.uPathTracker(x_ref, y_ref, z_ref, u_ref)
v_ref = -v_ref # m/s
y_ref = -y_ref # m/s
drone1.vPathTracker(x_ref, y_ref, z_ref, v_ref)
# land the drone
z_ref = 0.15
drone1.hover(x_ref, y_ref, z_ref, circle_radius)
except Exception as e:
print(e)
| 38.230769
| 117
| 0.577563
| 2,982
| 20,377
| 3.689135
| 0.095909
| 0.044087
| 0.043632
| 0.019635
| 0.82538
| 0.806654
| 0.791019
| 0.78502
| 0.774566
| 0.767385
| 0
| 0.021396
| 0.318791
| 20,377
| 533
| 118
| 38.230769
| 0.771126
| 0.243166
| 0
| 0.680556
| 0
| 0
| 0.016215
| 0.0029
| 0
| 0
| 0
| 0.001876
| 0
| 1
| 0.020833
| false
| 0
| 0.041667
| 0
| 0.069444
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
47ed068b6dde7dceaa4a58f2289af4ceeea74dd1
| 225
|
py
|
Python
|
hydro_model_builder/model_generator.py
|
openearth/hydro-model-builder
|
cac34ee51ceb4bfe4122c87540e80be4c332cd62
|
[
"MIT"
] | 1
|
2018-06-09T01:40:22.000Z
|
2018-06-09T01:40:22.000Z
|
hydro_model_builder/model_generator.py
|
openearth/hydro-model-builder
|
cac34ee51ceb4bfe4122c87540e80be4c332cd62
|
[
"MIT"
] | 16
|
2018-06-21T08:15:40.000Z
|
2021-11-15T17:47:25.000Z
|
hydro_model_builder/model_generator.py
|
openearth/hydro-model-builder
|
cac34ee51ceb4bfe4122c87540e80be4c332cd62
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
class ModelGenerator:
def __init__(self):
pass
@abstractmethod
def get_name(self):
pass
@abstractmethod
def generate_model(self, options):
pass
| 13.235294
| 38
| 0.64
| 23
| 225
| 6
| 0.652174
| 0.115942
| 0.318841
| 0.362319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.302222
| 225
| 16
| 39
| 14.0625
| 0.878981
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0.3
| 0.1
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
9a4e2ed638678928a5b6b257d2bf4a0595392d12
| 172
|
py
|
Python
|
RigidFoilSimer/__init__.py
|
SoftwareDevEngResearch/RigidFoilUDFGenerator
|
f6ad91b33b897e3e87b8411819be630b50fc4445
|
[
"MIT"
] | 1
|
2021-03-24T05:36:03.000Z
|
2021-03-24T05:36:03.000Z
|
RigidFoilSimer/__init__.py
|
SoftwareDevEngResearch/RigidFoilUDFGenerator
|
f6ad91b33b897e3e87b8411819be630b50fc4445
|
[
"MIT"
] | 1
|
2020-06-11T06:30:33.000Z
|
2020-06-11T06:30:33.000Z
|
RigidFoilSimer/__init__.py
|
SoftwareDevEngResearch/RigidFoilSimulator
|
f6ad91b33b897e3e87b8411819be630b50fc4445
|
[
"MIT"
] | 1
|
2020-04-21T06:37:52.000Z
|
2020-04-21T06:37:52.000Z
|
from . import RigidFoilSimer
from . import Parameters
from . import CFile_Generation
from . import talkToAnsys
from . import processWallshear
from . import organizationTool
| 28.666667
| 30
| 0.831395
| 19
| 172
| 7.473684
| 0.473684
| 0.422535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133721
| 172
| 6
| 31
| 28.666667
| 0.95302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9a5d3709a588fb479ff1df6eed3c7c4d143c94b3
| 199
|
py
|
Python
|
dali/test/python/autoserialize_test/decorated_function.py
|
L-Net-1992/DALI
|
982224d8b53e1156ae092f73f5a7d600982a1eb9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
dali/test/python/autoserialize_test/decorated_function.py
|
L-Net-1992/DALI
|
982224d8b53e1156ae092f73f5a7d600982a1eb9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
dali/test/python/autoserialize_test/decorated_function.py
|
L-Net-1992/DALI
|
982224d8b53e1156ae092f73f5a7d600982a1eb9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from nvidia.dali.plugin.triton import autoserialize
from nvidia.dali import pipeline_def
@autoserialize
@pipeline_def(batch_size=1, num_threads=1, device_id=0)
def func_under_test():
return 42
| 22.111111
| 55
| 0.809045
| 31
| 199
| 4.967742
| 0.709677
| 0.12987
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028249
| 0.110553
| 199
| 8
| 56
| 24.875
| 0.841808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d0338bdae5e4ac57dea0834e6caaaf16c559faad
| 29
|
py
|
Python
|
evenOrOdd.py
|
Domene99/Python
|
067cee73398f6b9427ec638802abd3f5ab54448b
|
[
"MIT"
] | null | null | null |
evenOrOdd.py
|
Domene99/Python
|
067cee73398f6b9427ec638802abd3f5ab54448b
|
[
"MIT"
] | null | null | null |
evenOrOdd.py
|
Domene99/Python
|
067cee73398f6b9427ec638802abd3f5ab54448b
|
[
"MIT"
] | null | null | null |
def isEven(x):
return x&1
| 14.5
| 14
| 0.62069
| 6
| 29
| 3
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.241379
| 29
| 2
| 15
| 14.5
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d07fd84417d9a8499bbe8a3588ebaa1646dda37b
| 23,924
|
py
|
Python
|
Day09.py
|
RustyPotato/AdventOfCode2017
|
17f7552231f18f66d2adc2e3554cec8c3d3c3a3a
|
[
"MIT"
] | null | null | null |
Day09.py
|
RustyPotato/AdventOfCode2017
|
17f7552231f18f66d2adc2e3554cec8c3d3c3a3a
|
[
"MIT"
] | 2
|
2018-01-07T09:37:25.000Z
|
2018-01-07T09:43:22.000Z
|
Day09.py
|
RustyPotato/AdventOfCode2017
|
17f7552231f18f66d2adc2e3554cec8c3d3c3a3a
|
[
"MIT"
] | null | null | null |
inputText = "{{{{{{<!>!>,<o!>},<a,\"i!!!>i!!,!>,<<e<i<<>,{{{<!>},<!><!>,<!!!>!!!>!!!>{\"!>},<!!!<!>},<oi!>!,'>}}}},{{<{!!!>},<!!,!!!>!!!>!!e!a!!!!<!>},<!!!>u!>,<!>!!u!!!!!>},<!>!>,<{>}},{<\"a!!!!'!>},<!}\"!>!!<o}i!<>,<<e,<eo!a}!!\"!>,<!>!>!>,<e{\"e\">}},{{<!oi'<!o!>ue>,{<!>},<i!>,<i>,{<'>}}}},{{<\"a,!>},<!!e!>,<e!!}!!o!!!>,<',}>,{{<i!!!!o!!!>!}!!!!!>!a!!!>!>!!a!!!!,!!u!!!>,<>}}}},{{{<{i!!<!!!!e!>>}},{{<!!!>},<ei>,{}}}}}},{{{<eaieia,!{io\"{!!}eu!{{!!e'>,{<>}}},{{{{<o>},<e!!o,!>},<\"u{!>,<!>},<a{}{!u>}},{{{<!!u'ioi''!>},<\"u>},<'oa!!<,'!!\"!!!!!>!!!!!>{!!!!aa>},{{<u}\",!!i!!!>!>!!!>!>!!!>'}!o{!!{>}},{<oiau>,<!!!>!><!>!!<!!!>!!<i!a}!>},<<}a}>}},{<eeo!!}!!!>e!>,<!!!>!!<{!!<i{!>i!>,<>}},{{{},{<!!!!!>},<\"!!!>!>},<o{!>,<,ea>}},{{<},\"a\"oeie!>},<ie'!!oa!!<>}},{}}},{{{}},{{<}e<i!!!!io!>},<\"!!e\"<\">},{{<'!!{!>},<a\">}},{<u!!i!!o{>}},{{<\"oe!!!>\"o{>},{{},<!>,<!!,!}!>!!!!!>>},{}},{{{{<!!{,,}!>!>,<!>},<!{a!<e'!>,<>}},{}},{{{<!!o\"'!!!>\"i{''>},{{},{{<!!!>},<\"}<\",!>>,{{},{{}}}}}}}},{{<u}\"!!!!!!o!!\"!>,<!>,<eiu!!!>,<o>},{<!!<!!e>,<e,<<<u!'>}}}}},{{{{{{}},{}},{{{<!!!>\"!!!!oa!>},<{a<}>,<'i\"a!!!>>},{<\"!\"ea!}e!!!>!>},<}o{>,{<e!>,<!!}\"i!!,!>ie!{{!>},<!>},<'!>},<e!!!>!!!>\">}}},{{},{}},{{{<i!>},<}!>},<{!ae!>,<!>,<!>,<!!!>'i!!>},{}},{<{!!!>!!!>a,!!!}!!!!}!>,}e}<{!!'!!!>!>},<>,<!>,<!>\"\"e<!!u<!!,e>}}}},{{{{{},{<!!o}!>,!>!!!>>},{{{{<,>}},<\",\"!>},<!!!!!!!>!>!i!>,<{!!!>}!>,<!!!>{ou>}}},{{{<!!e<!'>},<!a!>!u!!!>!>,<!!!!!!!>{\"<<!><!!!!u!!!!u>},{<!!!i!!!!!>uo!!!>,<a<>}},{},{{{{<!\"{}!!!>{a{i{a!>,<!!!>'>},<\"{!!!>}!a!!!<!>,<>}},{{{<>},{<!!!>!,{{>,<{!!!>u!oi!\"{''oo!!!o,i}ao,e>},{{<<!!<!>},<!!!,>},<o!>,<}!!!>!e}o!>},<<>}},{}},{{{},<!!!>{!>},<!!!>i!e\"!!!>!!\",!>},<uuo!>},<'!!>},{{<>}}}}},{{{<{\"!>\"!!e!\"}<a!>>,{}},{<ui!>},<e!>!!!}u!<!>,<,{!!o!!!>!>,<!,>},{{{<a!!a!{!!!!u\"!!a,!!o!>,<}u\">}},{{}},{<!>},<!!!<}a!>,<u!!!>\",!!!>\"\"!!ee,!>},<!>},<u>,<,'<u!!!>!!<!!!!\"!!!!!>aa!!>}}},{{{}},{{<a!>!>!>,<>},{<}a{i<!!!>},<'!>,<\"e{!>,!!u,!!!!!>>,<!a,!>!}!o>},{{{<u!!uo\"!>,<a\",!!!>\">},{<,eoe>}}}},{{<!!!!!>!>},<ue!!!>,,!>u!!!>e'<!!!>!>,<ae!>>,<!!i}!>,<<'i'!!!>a!!u!!!>>},{<,ui{>}},{{{{}},{<{!!!>,<a{}!>o\">,<!!!!!uu\"a}'io!>},<!>\"}e!>},<>}},{},{}}},{{},{<>,{<'{u!>!!!>},<{!!!!!>i\"!!>}},{<!!i!!''!>!!!>!!!!!>!!u>}},{}},{{{<,!>!!!>!>,<a{a!!}!!u,'!>,<}!>},<<!!>,{{{<i!>},<,!!!>>}},<>}},{<<\"a!!\"!>,<!iuao!>!!!>u>,{<i'!!!!<,i!oiea!!!>>}},{{}}},{{{{<,!>,<!>,<ua,u!!{{!>,<!!,ui,!>},<io>},<!o!!!!!!!!!!!!\"uuu\"<{!!!>}>},{{<!!!!!>i>},{<e\"<'!!\"\"e!>,<!\">}}},{{{{},{<!>>,<a!>},<\"!!!>!>e{e<\"<!>,<eu!!!>,!!!>!!i>}},{<}}!!!>!>!!}e!>,<!!!>,<\"}!!e}{>,{<}u>,<u!eeo!>,<au!!!>e,>}}},{{{{},{{{<e,!>}!>o!,<!>},<}!>,<!>e!!aa<\",!!'>}},<{'i!>!!!!u{}!!!>},<u!!!!}>}},{{<i!!!>,<'!{\"!<a!!'!!u!!o!o<!!!>{!!!!ei!>>}}},{{{<aea!!!!{!>!!i!!!!!!!a<}\"i}!>},<iu,a>}},{{<u\"!!<!>a<>}},{<!!!!u{}!>},<!u}u<!>,<oe{!!!>!u}u>,{<\"!!iu!!!>}>}}},{{<{i<<!{\"!i{\"!>,<!!ou>,<oo!>{>},{},{{<e!!{!iaui!!e!!!>>},{<!>,<!>i!!{'u<\"\"u!>o\"{}!i!>}!!,>}}}},{{<{u!!!!!>\"{!>!!!!!>!!!i!>aue{,<,>}},{{{{}}},{<!>,e\"},i!>},<!!oe!!\"a<>,{<\"!!}!>,<u!!!>o!!!!\"!!u<!>{!>,<!<i!!\"!!\">}},{{{{<i<!\"!!\"!!!><i<!><\"!u!>},<a<!i!!!>,<!a!!!>>},{<i{!!!>a<u>}},<!>}o}!>a!!},!!!>!>}!!!>,<>},{<{!>,<i'u!!o}!auiou,!!}!>>}}}},{{{<<!!!>!!!>,<!!!>},<!>,<i>}},{<!>,<!>,<!!!!eu}!>},<!!}!!}>,{<{!!!!!>},<!!!!!!!!!>{!>,<},\">}},{<uu!!}e{!i!>},<>,<}!!o!>!!!>a!!!>!{<'a!>,e!!!>u>}},{{},{{{<>},{<!!!!\"o!>e!!>}}},{{{<!!!>a!!!>'!!a!!{u!!!a>}},<!>},<!>,<u!!,'\"!>!!!>},<>}}},{{{<!!>}}},{{<!!!!!>o!!!>o!!!!eu{\"}!o,e}>}}},{{{{<<!!!!{{!>},<!>,<e\"!!!!!>>}},{{<!}!>},<!!!>,<!!!>>},{{<!!i<>},{{<,\"!>},<>}}}},{{{<!}a!i'!!'i!>>,{{<eii!>},<!>},<!>},<,u<<!>,<ei!u!!!>},<!!a>},<,{!!!!!>,<!!'>}},<!>>},{{{<,!>!>ou!!u!>,<i}'u!!!!i\"!>,<i!>,<{!!>},<!!u\"}}'i!>},<o'!}!!!>e'u!>io!,>},{<!>!!}!>},<!!!>!>},<!>!>,<'!>,<i!>>}},{{}}},{{{<!!!>,<}'{!i}!>\"!!u\"a,a!!!><>}}}},{{{<u!!}\"\"e!ee,{!!,>,<,,!>},<>},{},{{<uo!>>},<!>,<eo'!>},<a!>,<!<ou!!!>,<,<>}},{{{{{<!<!!!>!!!>\"!\"a!>,<!!,!!!>!>},<u{!!i!>},<!eu>}}}}},{{{{},{{<!>,<!>},<{!!!>i!>},<>},{}}},<,uu>},{}}},{{{},{}},{{{<\"a!>,<<{!>!>},<'!>!>,<<!>,<}'!>!>,<>},{{<o,a!!!>}!!!>>}},{{<eoi!>!>!!!>},<!!!>!!!>}}!ee!!!>!!!>,<!\"!!u!!!>>}}},{{{{<!>,<\"!!{'}!!!!!>'!\"\"!!!>!>u!!o}!>},<>}}},{<!>,<>,{}}},{{<o'<>,{<!>,<!!!>!!}i,!!ai!>},<oo!}!>},<!!>}},{<{!>,<a>,<u!!>}},{{{{}}},{{{}},{<'!>ai,!>},<!!!!!{!>oei!!!>\"!>,<'!>},<o>,{<!>,<i,!!<!>,<!>'!!!!!!!>!!e,!!}o<>}}}}},{{{{<!!!!!!{!>,<!!!>,<}!>,<a!>}!>},<!!!>i!>,<'i>}},{<!!<!>{e'i!!o!!<u>}},{{{<!!a!!'ea!>},<}{a!!!>,<e!>,<!!!>'!!!>u\">},<!>e!>},<u!>,<!i!>!!<\"!!!>,<{u!>,<!!\"\",u>},{<\"}',!ui,!!!!!>>},{}},{{{<e!>,<'!!!>!>,<!!!>},<,eu,>}},{<{\"!>'\"!>,<}!>,<,!!!>}o!!a!>ei'o>,{{}}},{{<!><<}!\",<i!'<!!>},{{},{<!>!>},<!!!>},<>}},{{<<u}!>},<<!\",'\"\">},{<{!!!!,!>!>},<o!!}a,!>},<'o<o!>},<!>,<>}}}}},{{{},<,!,!>},<!\"o!!!>>},{},{{<!>,<!'!!'i!!,o'!!!>,\">}}}},{{{{<<,!!!!!!!!!>},<!!!>!!<!\"a!!!>,'!!!!a!>,<>}},{<!>},<!>,<!!}!!{\"u<{o!>},<!!o,>}},{{},{{{<!>,{!!!'>},{{<\"!>},<!'!{!u>},{<!}o!!!>eai!u}!>},<!>},<<,ae>}}},{<!>,<<!>,<>}},{{<}!'!!o!!!>a'!!!>!>,<\"!!!!!!\"!!!>},<!!!!!!!>>,{}},{<!>!><{>}}},{{{<}!!,\">}}}},{{{},{{{{{{<\"i!>},<',!>\"'!>\"{!!\"!>e!>u>},{<'}>}},{{<,i{>},{<{!aa}u!!!>!!!!ia'!>,<!!\">}}},{<\"!\">}},{},{{<!!!><{o!,'}a!!u!>},<}\"{<>},{<a!{i!>!}!!'ea!>,<!!!!!>!!!><i<!!!>},<!>},<>}}},{{{},{{<!>},<!{e!!!!}\"a!!o!!<o,!!!>!>},<>},<>}},{{}},{{<!!,a,}!!a!!!!!!'!\"a!!!!!\"au>,{<<!!!>!!!!!><!>,<a!!u!!!>!!e!!!!',{{o!>>}}}},{{{{<!!!!!>,!!!>,<!>},<!!<!!!!!!,!!>}},{{{<<!!!>\"u<!!!!,!>,<!!,>,<!!a!>,<>},{<!>uou!>>}},{<\",!!!>,!>>,<a'ia!>},<!,{!!!i!>},<>},{<},!>},<!!!>}!>},<!!!>!>,<o!!ou}>}}},{<>,<''!>,<oi\"!!!>},<<i{{!{e>}}},{{{}}}},{{{{<a,i!>,<a!>},<>,{{<!>!!!>}!oe!<uei'},!!!>},<!!!>u>},{{<!{!>e<>}}}},{},{{{},<!!,}!!!!!>,<i!>u}e}!!u\">},{{{<}o'!>',<!>!>},<{,''>},{{<oe}{u<!>!>>}}},{<!!!>!!!iaiu<!!\"!>,<'!>},<u!!a\">}}}},{},{{<{!,i!!!>,\"i!!<\"!!i<!!!>!>,<>},{{},<{!>,<!!!>u!!!!!>!!}uo!!!!a!>,<{\">},{<>,{<<\"\"oa!>},<}ao{i!a!!!>\">}}}},{{{},{<!!<oa!!\"}!>,<!>!!i!!!!!>,<!>},<!!e!>},<o>}},{{{},<!!!!!>!!u>}}},{{<!>!>},<!!!>\">,{{<!o!'\"!!!!'u!!!>,<\"o}>},<\">}},{{<i<!>},<!'a!!!>!>!>>},<!\"},}i}!!!>>},{{{<i!>}a<>}}}}},{{{{}}},{{<'!>,<!!\"!>,<!!}!!'!!eui!>},<}>},{{{{<\"!>o'e,eu!!i!!!!{!!o!>>}},{<>}},{<{!<!!{u!>},<>}}},{{{{<>},<!!!>uee!<!!>},{{<!!!>iu}!>>}}},{}}},{{{}},{{{{},{},{<o<!>'ea}>,<!>o,a'!>,<u\"i{e}o{>}},{<''{<!!}!!{i<oa>,<!!,\"iio{}>}},{<!>,<{oi!>},<e!!!>uaa!>},<!!!>u!!>,<a>},{}}},{{{{{}}},{<<!>},<,e!>,<{<a>}},{{{{}},{{},<!>!!!>!>!!!>{!>,<!!!>,<a,{{!!!>,<'!>!!{>}}},{{},{{}}},{{{<\"!a!>,<!>},<,io!!!>!>,<u'!>,<!!!>!!!>},<!}!!!>>},{{<!>,<!>,<'u!!!>,u!i!!\"!!!>\"!!\">}}},{<!!!>!!!>!>},<<>}}}}},{{{{{<'au!>,<!!!>},<!>>}},{<u{!e!,u!!a!e!>,<{ii!>},<!!!>!!>,<!}!!!>{'aou{!>},<o\">}},{{{<!!!>>},<\"!>>}},{{<uo!>!!!e>,<!>!!}!>},<o>},{{},{<o>,{{<!!!>},<<au>},{}}}}},{{{<\"!>!!u!>}i!!!>ua!>},<!!o,e!>},<a,u!>>},<,o!!!>},<!>,<!!i>},{<!>,<!''a!!<<o,!>},<{'!>>,{}}}},{{{{<o!>a!>,<{>}},{}},{{<o}'!>ui!!}'e>,{}},{<\"!!i!!,a!!!!ui,>,{<,!>},<e>}},{{<oa!>!>,<\"\"o,\"oo>}}},{{{{},{}},{},{<!>{!>,<!!!>!!!e!>{e!!>,{<{!!!i{\"!>},<uu!>,<>}}},{{<!!{\"!>,<,!!'>}},{<!>},<\"o\"!>,<io\"!!!!,'>}}},{{<e!!!!{oia}'''}!!!>,<>,{<'>}},{{},<!!,!>},<<e!!!!!!!!!>!>,<ua!<a!!i{,!!{'o<>}}}},{{{<<<!!<!>!>,<}\">},{{{{<!>'{>}}}},{{<,e<>},{<i>}}},{{{{<a}}!>},<o,!>!>},<{!>u}!!!>{a<,!>'>,<o<<i>}}},{{{<o}\"!!,>,<!!!!!!<o>},<!ua!<<!>,<<!>},<!!oa{!>o!>,<!!!!!>\"\">},{{{}},<!u}a!!!!>}},{{{<>}}}},{{{{<!o}a!!!>\"!e!>\">},<!{!!!>!>!>,<!!{\"ie!u!!{>},{{{<!,!'!>,<!!!>},<\"!>},<>,{}},{{<>},<o!!a}!}a!>},<a!!!>!!i!>,<!>},<!>,<!>i>}},{{<{i<\"!!!>{!!aa!!!>!!!>ai!>'ui!{!!!>>,{<<!!!>,<}!!!>!>!!<!!!>{!!!>!iiu>}},{{<i!>},<a<\"!>,<aao}<!!!o!>'e>,<!!{\",!>,!>,<!>},<!>},<io\"u!!!>!>,<a{!>},<'>},{{<!'!!oa!>,<!!!><!!!>!!!!!>},<{<'!o},>},{<!u!>,<{<!>},<{}<!!ee!>,<u\">}},{<uu!o!>,<!,!>,<!u<>,{<'!>},<i'!>,<u\"!!!>!!\"\"'!!a!>!>ui'!>>}}}},{<{!>,<e<!!!>,<i!>>,{}}}},{<'!!!>,<>,<}i\"!>,<!!!!!>!!}!!\"!>},<!>!a!!iu\">},{}}},{{{{{{{<{eee!!!>}!>,<!!,!>'>}},{<!>},<u>}},{{<!!{!!{!e{}'!!<!!ui!>}!!'!!,o!>},<}o>,<!\"!>},<!!!>},<!!!>!{<!>},<!!!>,!u'u}o<>}}},{{<\"'}',!a!!!<!!!!!><!!!!!>,<!>},<!!}!>!!!!u{!au>,{{},{<,,!!!>,<\"!!>}}}},{{<ioe!>!!!>e!i,,>}}},{{{{{{<!!!>},<!!!>e!oa{}!>},<\"}!,,<!>,<e>}},{{<o>,{<!><<!!!>\">,{<!>,<!>,<'e{e!!!>!!!>},iu!!!>\"!i>}}},{{<,!!!>},<uo!!!>oa\"!a!oa!!!>>,<!!<i!u!>},<i\"!>,<\"e!>,<<!!,{a>}}},{{{<!!!>!>,<!>,<{!!!!!>i!>},<<\"!>!!!!>}}}},{},{{{{},{{{<!>'{'!!!>},<!!>},<!!}>},<!!''{{>},{{{{<<!<iue},u}!!!>!>},<!!!>''>}},<i!!!!!!\"!>!>,!!!!!>},<,\"}!e!!!!!>>},{{}}}}},{{{{{{}},<\"i}<!!!!o,\"}!>>},{{<'a!>},<!!e!>},<>,{<e!>,<!>i,!>},<'<!>},<!>>}},<,!>,<o!>,<>}}},{{},{}}},{}},{{{}},{{},<{<o!>!>i\"'!>!!!!<!>,<!<{!e\">}}},{{<\"!!>,{<!>},<u'!>}{!>},<}!!{!>,<!,>}},{<!o<a<}!!!>>,<!!!>i!o!!!!}\"!!!!u{!>},<'!>!<!>},<>},{{<!!o}!>},<o!{}!!i!!!>!>,<o!>,<!>},<{{a>}}},{{{<u!!o!!!>!!!a!i!>,<!>},<<!!!!<}!!ua!>>}}}},{{},{{},{<i\",}>}}},{{{{<,!!!>!!!!\"}!>},<!>,<!!{!>},<}>},<!>},<!>oeo,!!{o<e>},{{},{<!{!>},<!!!!!>i,>}},{{{{<a!>,<o{ii\"!!i',o!>,<>}},{}},{{{}},<!!!>!>,<!>,<{>}}},{{{},{{{{}},{<!!!'!<!>},<>}},{}}}},{<<!u!!!!!!!!!>!>!!!>!!eiu>,<!>,<<!>},<,!!!>'{'!!\"ao}u!!!>'o>}}},{{<u\"!>},<!!aae<!!!>!!!>},<'>,{<!!o,!>a!!!>,>}}}},{{{{<!>,<\">},{{{},{<>,<o,!\"iei!>!>},<}!!!>!!!!!!!>!>,<>}}},{<!!!>!!!eao!>!!!>i<{o!>,<!!!>},<e>}},{{},{{{}},{{{},{<!!!>!!!>!o!!eu!!<<e<>}},{{{<\"!!!>!>\"!e!>e\"!!eu!>},<e!>},<'>},<!>},<<oa!>{!>},<!!!!i\"i}}!!'<!!!>},<>}},{{{}}}},{{},{{{{<\"iu!!!aoo,!\">,<o!!!>!!<!>!>,<oi!ao!!!>!>,<!!!>,,>},{{<}!>},<!>!!!>},<i'!!!,\">}},{{<!!!>a!!{,!!!\"!>,<\"i<iui!!!>!!!a}!''>}}},{},{{{{},{<iii!>!<,<!e!>}}\"o!!!!!>},<<}!!!>',>}},{<o{!>,<!>!u!>{!}!!e>}},{{<!>eo>}}}}},{{},{<e!>,<a!!!e{a!!e!>!i,'!!!>e\"aa!>,<>}}}},{{{},{{}}},{},{{}}},{}},{{{<!!!>!,u}!>uo!>},<,\"<!>},<a,!>},<>},{{}}},{{{},<<!>},<a{{!>,<e!!<!}{!!{!!!>},<'<>}},{{{},{{<a{o!!!!!>',,!\"!>eia!>},<!>,<{<,>},<e!>,<>},{{<!,>},{<a!!!>,<,'i!!!!,!!!>i!<!!!!<,!>},<,!>},<>}}},{{<!!i!>!!}o!>},<!!!>!!!>e,{!>},<ae!!'>,{{<!>},<>},<!!!>eea!!{!>},<!><!!!>!!!>,<>}},{<i{!!!>!>},<o,,iu!!!>!>,uee'>},{{<o!!<e!>i!!e!!!>>},<e\"io!>},<!a!>,<{!>},<!>e!>eu!!!!!!!!!>,<o!>},<'!!>}},{{{<!!!>'ooeaa!>,<{>}}}}},{{},{<!<{uo!!!!!{!>!uo!!o<ue\"!!ia,'>}}},{{{{<>}}},{{{{}},{{{<!\"!!<!!!>\">},{}},{{<a!!}u\"i!!!>},<<>}},{{}}}},{{{{{{{{<!!i\"!>},<u!!!a{i!!\">}}},{<!!!>o!!o<!<ui!>!!{o!!u{>}}},{{},{<,!!ueua,{o{!!!>,<<>}},{}},{{<>},<}'!>,<!!ui{o{!!!,!!,ue!!eu!!!>>}}}}},{{},{{{<eo!e!>!!!>!>!>,<i!!a!!!>},<!<!!e!!u,!>,<o,>,{<'!!!>,<<a!!!>,<!>},<o!!{}!!!!'!>,<o}<>}}},{{{<}!!,!!!!!>!>},<u{a!!{o,o>}},{{<>},{<!!i{!!!>!>u>}},{{},{{<,\"!!!>!!{>,<i!>},<<a>},{<!!!>u}'!!e!\"{''o>}},{<!u<oa!!<>,<uu>}}},{{{<}>,{<e!!!>!>},<!>,<!!!{}!!e!>>}},{<u,>},{<a!}\"!,!,!!!,uuiiu>,<!>,!>},<u{}<\"!>,<'!!'!>},<e\"}ui'>}},{{{{{{{<ua!>{iu!!',!>},<!e'e!>,<>}},{{<o!>,>}}}},{<!>!>,<!!!!}e>}}},{}},{{{<<!!<!>},<}{!!auo!!!!!><e!!e'<!!!<>},<!><a'}',eaei!>>},{{},{},{{{<!!euui!!!!!>},<'{oi>}},<o!!!>\"!>!>},<!><u!>,<u!>>}},{{},{{{}}},{{{<o{!u!!u!>,<!>},<!!!!!>!>,<!>},<i!>,<i!>>},<!>!>u!!a>}}}},{}}},{{{{<!>e}'\"oi!!!>a\"!>,<ooo!!}o>},{<'}>},{<\"!>},<<u,{{'ee'e>,<!!!>{!u!i!e!!uee!i!!a\"u{>}},{<{{!,!u!!,!!<\"!!{>,{}}}}},{{{{<e}!!!>!!!>,'!!,>},<>},{{{}}}}}},{{{<>},{<>,<e!>,<!!!>},<a!!'u!!!!>},{<{>}},{{{<eei!!}!>},<>,<,''e{!!{i!!'>},{{<!!!i'!!a!!!>!e}!>,<u!>,<{>}},{<!!!>},<!>!!,a!!'!!}e'!e!>},<!!i!!'u{u!>,<u>,{<,!!a,aoo!>!!,{!>},<i!!!>}!!,o!'>,<>}}},{{},<}u!!a>},{{{{{<'<a!>,<!>},<!>},<!!!!!!!!'},a!!>}}},{{<!}{!>o!'o!>,<!!!><!!ou!>!>},<!e!>!>,<>}}}}},{},{{{<!!!>o!>u!!!>!>>},<>},{{<,,>},<'{ao!i,!!!>e!ua\"{!\"!!!>>},{<\"!>},<a!!!>e\",'!!!>},<'oo!!!>!>>}}},{{{<o''},{!!!>>,{<{!!!!!>}!!!>a!<}!!ea!>!!>,{<!{!!!u!!a<!>,<,!>},<!>,<e!!a>}}}},{{{<oe}'\"e'\"a,!!!>i!!u,!i!i>},{{<!!!>}>},<a{!a!>},<!>,<!!}!>,<a!>a!>>}},{{},<!>,<>},{{<!!!ae!!!>i'o!!\"\">},{<\">}}},{{<!!ia!>},<}!!!>\"i!>!!!!u!!!>\"!{u'!!e'!!\"!!>,<oei!!!><<!!!!i'}i>},{<\"!>!>,<,!!>},{}},{{{{}}},{{},{<,!!!>u>}}}}},{{{{},<}!>ia,!>!>},<i{{}!>},<>},{{<>}}},{{<!!,!!!!!>'<!!ee!>},<,>},{}},{{{{{{<>},{<!>,<!!a>}},{{{<,o!!{!>\"i!>!<!!}{!!'!!\"<!>},<,>}}}}},{{<i}!>},<!>'!!e\"\">,{<},!!u!>,<!>,<\"'!!!>!>>}}},{{<!>!!!>!!!i!o!!{!!e!!\"u!iaa!>,<}o,}ao>}}},{{{<!!u{o>,<!}'o!!!>o}a!!}ui!!!!!>!>},<!>>},{<!!ao>,{<o>}}},{{{},{<!>},<!'!>},<,,!\"'!>,<>}},{{<\"!>},<!ou>,<o!'!,!>!{,!}e!!!u!}!o>},{<!<!!!>,<{{o<!>},<'>}},{{}}},{{}}},{{},{},{<{!!!>,<i!>},<'!!!!o!>},<o{!!!\"!!!>}!i>,{}}},{{<}!!!>},<!e'a>,{<!!!!\"ea{'i!}<<ui!!!!>}},{{<},!>},<!!!!!>,<'!!!!e!>!>,<\"<!>},<!>},<>,{<<}<>,{}}},{{},<!u,>}},{{<{<!!!>},<!>},<!!!><!>,<!eo,!>},<u,'!>,>},{<>}}}},{{},{{<a\"!!\"!>!!!>o!!!><'{'!!!!!!},i'\"uu>},{{{},<o>},<'<!!!>!!!i}!>}!!!>!>u!\"!!!!}!!!>!\"!>'!>,<>}},{{<i!!!!!>!e!!!!a'!!!!}o'}<!>,<e!,>,<!>,<<ooui!!,!!!!}!!<'o!>>},<o<!!<\"{!o!!a\"!!!!o{<<a!!!!a!!!!!>>}}},{}}},{{{}},{{{},{{<!!!>},<!>},<,!>},<!!!>{!!{i!>},<!>},<!''<!>,<>},<!>},<}!!!>u!i\"<!\"e!>},<>},{{<e''<o!!!>,}{!>},<e>},{{<!!!>>}}}}},{{{<!!o}uoei>},{{<,oi\"a>}}},{{<!!a<i!>{\"{e!>,<!>},<}<,e>}},{<!>,<i!!oio!>,<\"!,o!!!!'!!e',>}},{{<auie\"!!!>\"u{>,{<}!},}!>,<a!!!!!!e{!>!!,,e}<,{!>>,{}}}}},{{{{<\"!>},<\"a!>},<!>!!!!a}\"<}!!!>!!!!!!\"!!!!!>,>},<u>},{{<!!!>,<!{e}\"!!u!!!\"!!!!>}}},{{{<>},{<!!!>iu!!!!{!>},<>}},{{{<!>},<i>,{{<aa}i>}}},{<!!,o!!!>{!!!>,>,<<u!>,<<!!i<i!!!>ui,!!!!!>,<!>},<}!!!>>}},{{},{{{{},{<ii!>e!>e>}},{<!!{<{a!'{!>!o!>},<!!u}!!<{!{>,{<u!!{o>}},{{{{<!>},<!!!>i!!o!>},<!a}i<,a!>!>},<a!'!!!!!>'>}},{}}}},{{<!>},<!>},<!>},<!!\"{<\"!}i>},{{}}}}},{{{{},{<'ua<!!!!o!!!!}}}!>,<!!\"u>}},{{<!!!\"!!!!!!!>},<!!}!!i!>},<!a{!<a'i!!!e',!>,<>},{<{e!!!>!!!>},<{{>}},{{<}e!!}!!!>},<>,{<!!,>,{<>}}}}},{{{},<ao!!u{,oo'!!!!!>!!!>{'!!!>ia!>,<!u,!>},<>},{<!>,<i\"!!!>i>}},{{<!!a\"!!<,!'{!<\"o,>},<!!i!,}!>},<!!!!!!!!{>}},{{{<'u\"o!!!>a!!!>,<!!e{{{!>},<,!}!,{>,<!!!!}\"\"o!!!>!>},<!!!>!!!>a!>,<u!>},<o\"!!}ua!!>}},{},{<!>},<<{}!>{!o\"!>,<!!!!!>!>,<!>},<>}}}},{{{}},{{{<>,{{<}'i!!o}!!{!!!>}!!}<ea!>!>},<{>}}}},{{<u''{!>,<i!>!,!>},<{'o<!!!><!>!!!,>,{}}}}},{{{<!!\">}},{{<e\">,<},!!!!!>uo}}i!>,<!!!}o!>!!o!>,<,!!\">},{<!>,<e<\"{>}},{{<a!><!!!!i!>,<'u!!!>!,u{\"\"!!{}!!!>,<>,{{{{},{<'oea!e!!,oe!>},<o!!!!!'>}},<u!!u!!i}!>,<!!\"a'eu!>,<>},{}}},{<i!i{o!!<ueo{au!>!>},<!}o>}}}}},{{{},{{{{<!>},<,!!!>},<\"o!!!>a!!!>!!,!!!!!>e>,{<!>,<a{aa!!!!!>>}},{{{<>}},{{{},<!!!>{!!\"\"!>,<o!!,'e!>o!!!>!!i!>,<>}}},{{{<}ua!>},<!!!>},<!!!>ee}a>}},{<u,\",u'!>},<!>,<>}}},{{{}}},{{{<i!!!!!>e\">}},{{{{{}}},{{<i!!!>!{!!!>>}},{{{{<a{!!!>!!!!!!e!>,<\"!!!>},<!>,<i>},<o}'!>,<!>,<e>},{{<!!!>},u<u!!<!>,<i<,{',!ei!!,>},{<ea'!>},<!!!!!!\"e}{!!a,aee,!>,<>}}},{<!!!}'!}u<!>!!'u,aao!>{!>,<>,<'\"'!{!>!!u!!!>ia\"!!!>u!>},<oo>},{{{}}}}},{{<!>,<{!!!>},<!>},<\"}a!!!>!\">},<{!!}u}!!!>>}},{{<i}!!!'e!'!>},<a,{ao'}!!!>!>},<!>},<!>},<>},{<!!!>>}}}},{{{{},<!!!>,<!!!>{},!>},<!!<'<e}!!}!>,<!!!>u}{>},{{},{<>},{}},{{},{{},{}}}}},{{{},{{{},{{{<!>\"!>},!><u!<!>},<!i>},{<!!,u!>!>!i!>},<a>}},{{<'!>,<!>},<!i!!!!!>\"!>\"ou>},<!>!!!!!!!!ae!>,<'!>},<!!'o!>,<,!>},<{!>,<!>,!>},<>},{{<!!!>!!{<!}!>,<!>i\"!>!>,<oo>}}}},{{{<>},<'!>a>},{<!!u!><!!i!!!>>,{{{<!!!!!>!>,<!><{oe<e>},{<a!!!>ea{!>!>,<>}}}},{{<!>,o'!!i\"<\"!>,!>,<i!!>}}},{{{<!>!!>}}}},{{{<o!!!>!>,<a!!o!!!!i!!!!!>!!eu>}}},{{<!>},<!>!>},<}o}>},{{<}o,!e!!!>,<!>,<a!!<!!''a{!!''o>,{<a!!o}}}u!!!>>}},<\"\"u!>,<>},{}}},{{{{{{<<>},{<!!!!u'!>!!e}'\"!>!!!>i{!>},<>}},{}},<<u!!'i!!!>>}},{{{},{{<<!>!i<!!ei<e!>},<a'>},{<}}!>!!!>!{!<<{{e!!!>!!,e\"!!!!{,>}}},<{!!!><'>}},{{{{<!!ui!>o<a>},<i!>},<!!',a!>},<{!>,<!!ui!!ua!>},<>},{{<!>},<o{>,{<!>,<,!>>}},{<!'!!!>!>},<e!>,<{<'!'<!!i!><a!'!!}!!{!>},<>}},{{},{{{<!>},<i!>},<!>,<}!,!>!!<!>uu!!!!u!>},<!e!,u>},<e!aaooi!!!!!>o}!!!>!>,'i}>},<<}a>},{{<>},{}}}}},{{<}}<!!'{\"u,!>io<i!>,<>},{<!>'>}}},{{},{{{},{<a!>,<{'!!!!!>e<!!!!oe}!>},<>}},{{{<!!!><o{!!{!>\"u!>},<}u!!e>}}},{{{{{},<!!!!\"a'!u<!!<!!!>o!>o!!u{{>},{}},{<u,i!!!>,ai!o!>,<uii!>,<!!>,{<,u!!a!!!!uu<e!>},<<i>}},{<,!!{!!i<!!o>,<!>a!!''ue!!\"!>i,!}!>,<,a!<>}}}},{{<!!!!!!'!!!>!!\"u!>,<e'!>,<>,{}},{{<,,u\">},{{<!!e!>},<e!>},<}}o!>},<<>,{{{<!>,<!>,<\"!i!!!>a!!\"o<{>,{<!!!>ueo!\">}},{<eio!>},<!!!>!!!>>,{}}},{<!aa!!!>a!'}a!>e!!,!>,<>}}},<!!!<a!>,<o>}},{<}a!>,<!!!i!>},<!!!>\"<<>,{}}},{{<{{a\"!}{>},{{},{<!!!!!>!>!!!>i!>,<!>ea,>,{<!>,<\"i!>a\"!!<,,ii'>}}},{{<a!!!>\"e!!<!>},<i>,{{{},{{<'!>},<!!!>!!<,<!!eao,,'!>},<!>},<!>},<a\">}}}}},{<!!!>!>},<>}}}}},{{{{<i{'!ie!>},<!>},<a{!!>}},{<!>,<<\"!!}!!!{!!<<{!'e!!!<a!!!i!!!>!!!!!>>,{{<,!>},<a{!>ia!!\"o!!!!!>}>}}}},{{<\"!>,<a!!!u!!!>>,<!!!>!>,<{!{!!i!!!>,<!!'a!>,<\"i},!>!>,!!o>},{<!>},<{!!i!>,<,,!!!>,<!!!!'iao!!!>>},{<!>!>!a{'!!!>>,<a!>,<auu<>}},{{{{{},<!>!e!!!!ia}{!>},<{o!>,<!>,<{<\"e>},<>},{}},{{<i\"<o<!!\"!!!>},<!!!>,{!!e<!!!!}>},{<ai!>},<i!>},<u!e!>,<!>},<!>,<!>>}}},{{{{<'>}}},{<!!!>},<{e!!<ou!!!>!><o!>,<>,{{{},<\"!!!>!>},<\"a>},<!>}e}}!>!>o!>},<a{e{}>}},{{<>,{<,o{!\"!!!>eu!>},<\"a>}}}}},{{{{{<!!!!i!>},<o}o!>},<<!!!>u!!!>!'!!u'!>,<<!>},<u\">,{}},<}!!!!!>!!!><!!!\">},{<!o!>},<,!>},<}!!ia,!!!a!!\"!!'!!!>ea>,{{{<!>,<\"ea\"!!!>io!!!!!><!!!>},<>}}}},{<!'!!<\"!!!>},<o!!!>i,<e}}ei!!}!!!>,<}>,<<}o!>},<!>},<>}},{},{{},{{<!!!>!>},<eo!!{!>,!>,<e>,<!!}!>},<a!>,<!!!!!,!!<!!u>}},{{<{!>},<o{<{'>}}},{{{<oo\"!>,<>}},{<<!!uou<>,{}}}},{{},{{},{{<u{a!!'!>,<\"a},'<\"o>}}}}}},{{{{{<>},{<}!!a,}<!!<\"}u!!!>!!!>!>,<\"!!!>u!!!>>,<},!!!>oe!>},<{u!>,<'>}},{<!!e\"e{!!{<!!!>},<!e!>,<!>,<!>,<e\"!!!!>}},{{{{{<e!>},<!!!!!>,<!>},<o,}!{{'>,{{<!!uua!>>,<<iou,!>,<!!!>,o!>,<eo{!e<'!!!>>}}},{<!!!!!>,<!>,<>}},{{{<!!<ou!>},<!!!>,>},<}!!i!!!>!!!>},<!!iii!>>},{{{<a}eo''!o\"<!>},<>},{<iouii!>},<u!>},<}!!!><u'!!u!!!>,<u<>}},{{<!>,<!>,<>},<},!!!>ee!>},<!>},<!!>},{}}}},{{{<<''o!>,<}>,{{<!>!>},<!>>}}},{{{<{}!>},<!!a!>{a!!}<<'>},{}}},{{}}},{{{{},{{<!!!>{!!,},{>,{<!!'!!'!>,<ai!e!>,<!!!>>}}},{<!!!>o>}},{{<i!!a}u<!!eou!uo}!>,<!!\"!>,<i>}},{<a'!>},<!>,<!a!!!>!!!>u!!!>\"!!u>,{}}},{{<!!o!>},<a,}>,{<,!i!!!!!!uoi>}},{<>,{<}{,u!!<!!!!\"e'!i!o\"u>}}},{<>,{<!>,<ee}{{!!!>!>!!>}}}},{{}}}},{{<,o>,{<>}}},{{{{{{},{{{<!!\"i!!!>,<e}u>}},{<!>,<>}}},<<!>},<!!}!>},<u!'!>,<!,u>},{{},<\"!!!>,<i}!}!i!'!!!\"!!<!>!!!!}<!!>}},{},{<!!!>!>>,<!!!>{aa!!!!a!}e,\"!>,<}<<!!!a!>},<e<>}},{{},{}}}}}},{{},{{{{{{{<u,\"<e!!>}}},{<>}},<!>,<e!>,<!>},<!>},<a\"eaea!!oa}!!<>},{{<'!>,<!>},<!!ou!>},<<>}},{{<!!!>u!}!>},<au!\"!>!}>},{<i,<o!!!>u>}}}}},{{{{},{<}u\"i!!!>!!a!!u!!a{u'!!e<>},{{<>}}}},{{{{{}}},{<>,<!>{!i,}ie!!u!>,<o}!>,<!>,<>},{{{},{}},{<e'!!u\"ioi!!e!!o!!>}}},{{<\"}!!\"!!!>\"\"\"!>,<>,{<\"!!!>},<!'!!o!>},<{'a}!!a!!!>eo{!!!!!>!>,<,>}},{<a!!\"a{>,{}},{<!>,<i<\"!!u}<'iu>,<!>!>eo!!!>\",<!!o!!e>}},{{{{{<>}}},{<!!>,<!!,!!!>},<oo!>,<!,!!!>a!>>}},{<!>,<!>>}},{{{{},{<!!e!>!>!>!>},<u!>,<<!>,<!>,<!!!!!>!u>}},{{{{<,o!>a>,<!!!>,}!!i>},{<{>,<\"',e!!!>},<ie!!!>{!>},<!u!>,<}u!>},<a'>},{{},{<>,{}}}},{{<\"!>,<e!u!!o!>u!>},<>},{{{<{!>!!\"i!>,<<\"!>i!!\"a}}<!!!>\"i>}},{{<!!}<!>},<i!>,<u!>},<uo!!!!!!!>'<>,{{}}},{{}}},{<u!>},<}a>,{{<o<eiu!>},<!>\"u>}}}},{<o<o<<a!!u>,<<!>,<>}},{{{{<!\"!!!>o!a<}!>\">},{}},{{},{{<!!o\"!>!!!!{,{\"!!}'!!<!!!>,<>}}}}},{{{{},<!,!>!!!!i!!a}o!!u!!'!>,<{!,ouo,e>},{<!>},<!>,<!ua!!!!!>,<!!'e!>}!>!!!><>,{{<!{!e!!'!!!>o!!!>{<!!!>},<>},{<}{o!<!!!>,a!>!!!>>}}},{{<!'ae{!>},<>},<\"ou\"!>},<!!'{!!!>>}},{{<,>}},{<i!!!>!!!!!>},<!!o!!}<>}}},{{{<}o!!!>},<,}a!}e!!!>'>},{}},{<{\"{!!{!!,>},{{{<!!e,!!,a!>,<>,{}},{<!>},<a{\"ie'<ia!>>,{<!!!>,<\"e{!>,<!>,<e!<i!>,<<!>,<!!}>}},{<oa!!!!'!!!>e{<!!!e'<!!<!>},<!!u!>},<oa\"'o>,{}}},{}}},{{{<!>,<!!\"}oei!!'!>{<'{!!}e<!>},<!}>,<!!a!<<\"u!!a!>,<>},{{},{<!!}a>}},{{},{<a!!!>'!>},<!!!>!!!>u<{!u>},{{{}},{{},{{},{<<!>!>>}}}}}},{{{{<\"u!>},<!<!>>,{}}},{{<\"!>\",,>,{<!!!>'u!>'<{u!>u>}},{{<e!!i'i!!o,!!!>,<i!>,>}},{{{<!!!>!>,<!>},<u}'!!,{!!<!!!>!>,<!>>},{<{,\"!!\"'i>}}}},{{{{{},{<!ee!>i,<!>},<,>}},{}},{{<!!!!,!>>},{{<!!!u,!>,<!}ao{>},<!,!>o<e!>!>},<!'!>},<!!!>!>},<i{{!'>}},{<<\"oie!>},<!>!!u<oo,!!,o>,{}}},{<!!''!>'!!!>\"o,u}>},{{{{}},{{<!>},<oi!>},<<!!!>!!!<\"!>,<}!!!>>},{<!!!!a{>,{<{e!!!>'!!!>'!!e{ue'!!o!!!!!>!!!!!>!>,<<>}}}},{<{!!!>u!!!!u!!!!'>,<}{i!!,o\">},{<\"ea\"\"!>e}!>,<<a,!!!!!>},<\">}}},{{{{<>},{}}}}},{{{<u!!!>!!,!>},<''ue,>}},{<!>'{o!>!!!!!>!!\">,<<}o!o{!>},<ue>},{{<u!>}<!>,<!!!!!>>},<a!>},<{'o!!,,\"'<!e!!!>>}}},{}},{{{<a{!!!>!>},<{!!>}},{<!!,!<a\"!>!!!>},<!!,!!>,{}},{{{<!>},<e!!!!!>uu>}},{{{<,!!!>!!}\"!>},<{'!>!>a}!!a!!'a!>{>}},{{{{}},{{}}},<<!!ooa!'a!!o{>},{{<'>},<i,<!}'{\"!!!>e<!>},<e!!!>!!<<>}},{{<,e{!!!>a!!<!!,!>},<\"!!}iu!!}!>i<>}}}}}},{{{},{{{},{{{<oie>}}}}},{{<!!e!>\"!>i!!>,<e!!!>}io!!!>!>,<,<!>},<!>u!!!>>}}},{{{{<a<}'!>o'!>i!!a!,!!>}},<e!'>}},{{{{{{<,!>,<!\"!!<!!uo!i}!!''!>,<,'>}}}},{{<'u'!!'\"!aou{e}!!!>,<<>},<{a!>,<e!}<}!>},<!>'!o}>},{}},{{{<ao!!!>e>},{{<>},{}}},{{{}},<,!>,<>},{{{<\"'{i,'!!!>,<\"!>,<u!!!i!>,<!ioo!!!!!!!>!i>}},<a{,}!!!>!!<u{!!!>aui!!}>}}}},{{<!!!>\"!!{<>},{{<>},<\"!>},<{}{e<o}e!!\"\"au,'!>},<!!!>>},{{<!>},<},>},{<u!}a!>!>!!\"iu!!a\"u}>}}},{{{{<!>,<!>},<'a!>,<e!>>},{<!!<>,<!>,<!!!>,<oi'{,>}},{<{\"!<!\"!>},<!a'{!!u!>},<,u,!>,<!>},<!!}}>}}}}},{{{{<u<i!>},<!!!>}\"!>!!i\"e!!!!>}}}},{{{{<o'!>!!<!!<>},{<a!><!>,<!!<e<!!>}}},{{{{<'{i>}},{<e{{!i!!iea>}}},{{}}},{{{}},{{{{{{},{<!{'!>},<}!!!!e{!}<!>,<u}o!>i>}},<}{>}},{{<',!>},<!\"!>},<!!,<!,ee!>e!e}>}}},{{<'e!!'!>}}>,<!!'!>,<i!!e!!!>!}',!!u{iei!<!!>}},{{}},{{{{<!!e,{u!<}!!!'}a>},<{!>},<''i}{!u!!!>!>,<a>}}}}}},{{{{{{{<\"!!!!}u\"o!>},<u!!u!!!>!>},<!!}!>},<!!<e>}},{<e{!!!!o,!>!!!>},<!<!>,<<!>},<{!>},<>}}},{{<'e!!a'!>},<i}>}},{<u<i,>,<,o,}{!!\">}},{},{{{{{<!!!>>,{<!!,}e>}},{<!>},<!>!>},<},!!!!}i!!,!>},<!>ao!!e'<}>}},{{{<}>},<!!!!a!!!>u\">}}},{{<!>},<<ia>,<!>},<o!>!!!>},<oe!>},<a,i>},{}},{{{{},{{{<,'{'}!!!!e!!{>},{<,!a'a!,!!!>>}},<e{!!,\",e!!!!<i!>!!!>,<!!<!>}!!}'>},{<!!i!>,<,!e<!,a>,{<>}}},{{}},{{<>}}},{{<a>},{}},{{{{<\"i{}'!>},<{<i!>,<!>\"{!!!>!!oe{{o>}},{{},{<ae}!>!>!!!>!<}\"!>>}}}},{{{},{<<!>},<{>}},{<a}!}!>,<!!!>},<,{i>,<!a{!>,<!>,<},!!!!!!!>\"ua!!}>}}}}}},{{},{{{},{{{<i>},{<!!a!!'!!!>\"!!!>o{'!!u\"!!\"!!!!!!',>}},{<i}!{e!>},<!!eu!>,!!!>ea>,{}},{{{<,'{\"e,!>},<!o'<,i!!'!!{\"!!!>>},{<!>},<}{!>,<a!>},<!>o!'!>!<a!!!>!!!>{uo>}},<iou!!!>,<e},e\"i!!!>!}'!!{!u\"a>}},{{<ee!oeae}!!!>aue\"!>},<>,<e>},{<o!!o!!!>!!!!!>>},{{<!!!ui,}>}}}},{{{{<!<!!!>,<!>},<{i!!!>i>}},{{<'a}>},<{i\"!!a!a\"!!!!!>'!>,<<!!!!!>!>},<{>},{}},{{{<!!!!!>\"!>,<'!!}e\"a\"a!>,<!<u'!>},<\"}>,{<e!>},<!>!!\"\",!!<!!}e!!u>}},{{{<!>!!!>!>},<,!,!!<i\"{\"}<\"}!!!>!'!a!!<>}}},{{<aae!!!>,<'!<>},{}}},{{{{{<!>},<!!!>>}},{}}},{{<!u!!,!!!>ee<!!!!<!!{!>},<io!>,<>},{<}}iu!>},<e!!a,}<!>i!!!>!>!!>}}},{{{{}},{<a<e<!!!!!>,<!}!>},<!>},<u!!u!>{}'>}}},{{<u!!!ee'{\"e!>!>,<e!!'!!!>!>,<a!!,\",,!!,>,{<u''\"o!>!>!>oi{!!>}},{{<<i{>},{}},{{{}}}}},{{{},{{<'\"!!>},{<i>}},{{<!!!!!>'\"!!!>i'<oe!e!!iu'!!!!!!!>>}}}},{{{{<\"}!>i!!o!!!>u,!>!i!!!>ii>},<!!,!>,<oai!io!!i!>},<!!!<e<e!!\"!!!>,<>},{{<!>},<<!>},<!!i!!a!\"o!!e!>u!<!'!!!>,<<>},<e!u!!!>aiu<!>!>!!!!e!!!>,<!!'e!>!!>}},{{<!><!>,<!>},<!>},<,u<,}!>},<!!!!e,o'>,{{},{<a>}}},{}},{{{{<\",<!!\"o!>>},{<o\"!!<{}',!><>}},{<!!!>'i!!!>,<i>}}}}},{{{}},{}},{{{<a}ei{o!>,<!!!!ui!'!<{!>,<!>},<!>i>},{<<!!!>,<}}ao!>},<!!!>},<!!!>>}},{{{<!{u!!!!!>!}!!'\"!>,<<\"!>,<!!!>'>},{}},{{{<!,!eu!>'u!!{!!!>e}'o!e!!!!!>o>},{<\"!!!>}!<i!!!>!'!!!uooeo!a!\"!iio!!>}},<o!!!{e\"}!!!>!u!\"{,!>!>!>},<u!'>},{<!>},<a!a}!!!>{!i!!!!auo<i>}},{<\"'!>},<{e!!!>!!uu!>,<!!a>,{<o\"o!>},<!>,<!\"}\"o!{'{,!>},<>}}}},{{{<!!{\"\"\"!!,>},<!i<<>},{{<!!o\"!e>}},{{{<e}!!!}>}},{<<!!!>!>,<!>,<!u!!!!!!,u!!!!!!!!!!!>,<'!>\"!!!>{>}}}},{}}}"
#inputText = "{{{{{{<!>!>,<o!>},<a,\"i!!!>i!!,!>,<<e<i<<>,{{{<!>}"
def value(string):
totalPoints = 0
for i in range(0, len(string)):
if (string[i] == '}'):
specificPoints = 0
for j in range(0, i):
if (string[j] == '{'):
specificPoints += 1
if (string[j] == '}'):
specificPoints -= 1
totalPoints += specificPoints
return totalPoints
cleaned = ""
inTrash = False
trashChars = 0
i = 0
while i<len(inputText):
if inTrash == False:
if (inputText[i] == '<'):
inTrash = True
else:
cleaned += inputText[i]
else:
# Else in trash and keep track of everything.
if (inputText[i] == '!'):
i += 1 # This will effectively skip the next character.
elif (inputText[i] == '>'):
inTrash = False
else:
trashChars += 1
i += 1
print(cleaned)
print("It has", trashChars, "in the trash.")
print("Worth", value(cleaned), "points.")
| 569.619048
| 22,852
| 0.12105
| 1,977
| 23,924
| 1.464846
| 0.075873
| 0.041436
| 0.015539
| 0.006906
| 0.139848
| 0.023826
| 0.010359
| 0
| 0
| 0
| 0
| 0.000467
| 0.016218
| 23,924
| 41
| 22,853
| 583.512195
| 0.122578
| 0.006562
| 0
| 0.205882
| 0
| 0.823529
| 0.597273
| 0.545554
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0
| 0
| 0.058824
| 0.088235
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d0fa599d95c8f28cb21944109ce73c84b51117c1
| 144
|
py
|
Python
|
test/util.py
|
mcfunley/clippingsbot
|
2954d5b5aa854b57d062a98e2133d258f9fd86c7
|
[
"MIT"
] | 1
|
2019-02-06T16:52:05.000Z
|
2019-02-06T16:52:05.000Z
|
test/util.py
|
mcfunley/clippingsbot
|
2954d5b5aa854b57d062a98e2133d258f9fd86c7
|
[
"MIT"
] | null | null | null |
test/util.py
|
mcfunley/clippingsbot
|
2954d5b5aa854b57d062a98e2133d258f9fd86c7
|
[
"MIT"
] | null | null | null |
import os
from unittest.mock import Mock, patch
def patch_env(settings):
return patch.object(os, 'getenv', Mock(side_effect=settings.get))
| 24
| 69
| 0.763889
| 22
| 144
| 4.909091
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 144
| 5
| 70
| 28.8
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ef829e6ebf0c9b9a46ccbdfb3f3ad348525b3fd2
| 221
|
py
|
Python
|
features/birthday/channel_birthday.py
|
DAgostinateur/Woh-Bot-2.0
|
4e99d97218a59156bacb1669cc1cb6c8807dd5b1
|
[
"MIT"
] | null | null | null |
features/birthday/channel_birthday.py
|
DAgostinateur/Woh-Bot-2.0
|
4e99d97218a59156bacb1669cc1cb6c8807dd5b1
|
[
"MIT"
] | null | null | null |
features/birthday/channel_birthday.py
|
DAgostinateur/Woh-Bot-2.0
|
4e99d97218a59156bacb1669cc1cb6c8807dd5b1
|
[
"MIT"
] | null | null | null |
class ChannelBirthday:
def __init__(self, channel_id, server_id):
self.channel_id = channel_id
self.server_id = server_id
def __eq__(self, other):
return self.server_id == other.server_id
| 27.625
| 48
| 0.683258
| 30
| 221
| 4.5
| 0.366667
| 0.296296
| 0.192593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 221
| 7
| 49
| 31.571429
| 0.798817
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
efc307aeb48bcf00e68af067976fc7bc722f9559
| 40
|
py
|
Python
|
ImageDenoising/network/__init__.py
|
jiunbae/ITE4053
|
873d53493b7588f67406e0e6ed0e74e5e3f957bc
|
[
"MIT"
] | 5
|
2019-06-20T09:54:04.000Z
|
2021-06-15T04:22:49.000Z
|
ImageDenoising/network/__init__.py
|
jiunbae/ITE4053
|
873d53493b7588f67406e0e6ed0e74e5e3f957bc
|
[
"MIT"
] | null | null | null |
ImageDenoising/network/__init__.py
|
jiunbae/ITE4053
|
873d53493b7588f67406e0e6ed0e74e5e3f957bc
|
[
"MIT"
] | 1
|
2019-04-19T04:52:34.000Z
|
2019-04-19T04:52:34.000Z
|
from .denoising import DenoisingNetwork
| 20
| 39
| 0.875
| 4
| 40
| 8.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.972222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
efce52ef91cf3fcfc9778aef58e87236199e762a
| 219
|
py
|
Python
|
run4it/api/goal/__init__.py
|
andraune/Run4IT_BackEnd
|
a481427a0d1189a1f08c42e7ac1b452af6bbfc8d
|
[
"MIT"
] | 1
|
2022-03-29T06:11:20.000Z
|
2022-03-29T06:11:20.000Z
|
run4it/api/goal/__init__.py
|
andraune/run4it_backend
|
a481427a0d1189a1f08c42e7ac1b452af6bbfc8d
|
[
"MIT"
] | null | null | null |
run4it/api/goal/__init__.py
|
andraune/run4it_backend
|
a481427a0d1189a1f08c42e7ac1b452af6bbfc8d
|
[
"MIT"
] | null | null | null |
from .model import GoalCategory as GoalCategoryModel, Goal as GoalModel
from .resource import ProfileGoalList as ProfileGoalListResource, ProfileGoal as ProfileGoalResource, GoalCategoryList as GoalCategoryListResource
| 73
| 146
| 0.881279
| 21
| 219
| 9.190476
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09589
| 219
| 2
| 147
| 109.5
| 0.974747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
efe17c6646ff867b4f250e99c96fec179820a07a
| 39
|
py
|
Python
|
Nomic.StaticScripts/test1.py
|
vogon/nomic
|
31a30327d0e10b8af7ea5078d060c26ffa4042c3
|
[
"MIT"
] | 1
|
2015-03-22T03:48:56.000Z
|
2015-03-22T03:48:56.000Z
|
Nomic.StaticScripts/test1.py
|
vogon/nomic
|
31a30327d0e10b8af7ea5078d060c26ffa4042c3
|
[
"MIT"
] | null | null | null |
Nomic.StaticScripts/test1.py
|
vogon/nomic
|
31a30327d0e10b8af7ea5078d060c26ffa4042c3
|
[
"MIT"
] | null | null | null |
def test():
print("hey what's up")
| 13
| 26
| 0.564103
| 7
| 39
| 3.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 39
| 2
| 27
| 19.5
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
effedbaa371546861a9cefc903da4f4e96af68eb
| 12,857
|
py
|
Python
|
tests/test_neattext.py
|
Jcharis/neattext
|
9a6b104f478bb33e48f24fc0f6724279564313b7
|
[
"MIT"
] | 32
|
2020-03-18T18:36:54.000Z
|
2022-03-29T03:11:34.000Z
|
tests/test_neattext.py
|
Jcharis/neattext
|
9a6b104f478bb33e48f24fc0f6724279564313b7
|
[
"MIT"
] | 2
|
2020-07-22T11:09:52.000Z
|
2021-03-04T04:34:16.000Z
|
tests/test_neattext.py
|
Jcharis/neattext
|
9a6b104f478bb33e48f24fc0f6724279564313b7
|
[
"MIT"
] | 6
|
2020-09-30T18:08:50.000Z
|
2021-11-01T07:00:38.000Z
|
from neattext import __version__
from neattext import TextCleaner,TextExtractor,TextMetrics,TextFrame
# from neattext.neattext import clean_text,remove_emails,extract_emails,replace_emails,replace_urls,remove_currencies,remove_currency_symbols,extract_currencies
from neattext.functions import *
from neattext.explainer import *
from neattext.pipeline import TextPipeline
def test_version():
assert __version__ == '0.1.3'
def test_remove_emails():
docx = TextCleaner()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.remove_emails()
assert str(result) == 'This is the mail ,our WEBSITE is https://example.com 😊.'
def test_extract_emails():
docx = TextExtractor()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.extract_emails()
assert result == ['example@gmail.com']
def test_remove_emojis():
docx = TextCleaner()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.remove_emojis()
assert str(result) == 'This is the mail example@gmail.com ,our WEBSITE is https://example.com .'
def test_extract_emojis():
docx = TextExtractor()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.extract_emojis()
assert result == ['😊']
def test_remove_urls():
docx = TextCleaner()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.remove_urls()
assert str(result) == 'This is the mail example@gmail.com ,our WEBSITE is 😊.'
def test_extract_urls():
docx = TextExtractor()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.extract_urls()
assert result == ['https://example.com']
def test_remove_currencies():
docx = TextCleaner()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost $100 to subscribe."
result = docx.remove_currencies()
assert str(result) == 'This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost to subscribe.'
def test_extract_currencies():
docx = TextExtractor()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost $100 to subscribe."
result = docx.extract_currencies()
assert result == ['$100']
def test_remove_currency_symbols():
docx = TextCleaner()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost $100 to subscribe."
result = docx.remove_currency_symbols()
assert str(result) == 'This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost 100 to subscribe.'
def test_extract_currency_symbols():
docx = TextExtractor()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost $100 to subscribe."
result = docx.extract_currency_symbols()
assert result == ['$']
def test_remove_stopwords():
docx = TextCleaner()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.remove_stopwords()
assert str(result) == 'mail example@gmail.com ,our WEBSITE https://example.com 😊.'
def test_extract_stopwords():
docx = TextExtractor()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.extract_stopwords()
assert result == ['this', 'is', 'the', 'is']
def test_single_fxn_remove_emails():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = remove_emails(t1)
assert result == 'This is the mail ,our WEBSITE is https://example.com 😊.'
def test_single_fxn_extract_emails():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = extract_emails(t1)
assert result == ['example@gmail.com']
def test_single_fxn_clean_text():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com ."
result = clean_text(t1,stopwords=True)
assert result == 'mail example@gmail.com ,our website https://example.com .'
def test_single_fxn_clean_text_no_stopword():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com ."
result = clean_text(t1,stopwords=False)
assert result == 'this is the mail example@gmail.com ,our website is https://example.com .'
def test_single_fxn_clean_text_all():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = clean_text(t1)
assert result != 'this is the mail our website is '
def test_single_fxn_replace_emails():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = replace_emails(t1)
assert result == 'This is the mail <EMAIL> ,our WEBSITE is https://example.com 😊.'
def test_single_fxn_replace_urls():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = replace_urls(t1)
assert result == 'This is the mail example@gmail.com ,our WEBSITE is <URL> 😊.'
def test_single_fxn_remove_currencies():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost $100 to subscribe."
result = remove_currencies(t1)
assert result == 'This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost to subscribe.'
def test_single_fxn_remove_non_ascii():
t1 = "This is the mail example@gmail.com ,our WEBSITE is Ø https://example.com . "
result = remove_non_ascii(t1)
assert result == 'This is the mail example@gmail.com ,our WEBSITE is https://example.com . '
def test_single_fxn_remove_bad_quotes():
t1 = """He “went” home yesterday really ’late’."""
result = remove_bad_quotes(t1)
assert result == 'He went home yesterday really late .'
def test_single_fxn_remove_multiple_spaces():
t1 = 'He went home yesterday really late .'
result = remove_multiple_spaces(t1)
assert result == 'He went home yesterday really late .'
def test_multiple_methods_chaining():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost $100 to subscribe."
docx = TextCleaner(t1)
result = docx.remove_emails().remove_urls().remove_emojis()
assert str(result) == 'This is the mail ,our WEBSITE is and it will cost $100 to subscribe.'
def test_remove_dates():
docx = TextCleaner()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost $100 to subscribe 20/12/2005."
result = docx.remove_dates()
assert str(result) == "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊 and it will cost $100 to subscribe ."
def test_emojify():
result = emojify('Smiley')
assert result == '😃'
def test_emoji_explainer():
result = emoji_explainer('😃')
assert result == 'SMILING FACE WITH OPEN MOUTH'
def test_textframe():
docx = TextFrame()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.word_tokens()
assert result == ['This', 'is', 'the', 'mail', 'examplegmailcom', 'our', 'WEBSITE', 'is', 'httpsexamplecom', '😊']
def test_textframe_remove_html():
docx = TextFrame()
docx.text = "This is the <h2>example for html tags</h2>"
result = docx.remove_html_tags()
assert result.text == "This is the example for html tags"
def test_textframe_remove_stopwords():
docx = TextFrame()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.remove_stopwords(lang='en')
assert result.text == "mail example@gmail.com ,our WEBSITE https://example.com 😊."
def test_textframe_remove_puncts():
docx = TextFrame()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.remove_puncts()
assert result.text == "This is the mail example@gmailcom our WEBSITE is https://examplecom 😊"
def test_textframe_remove_hashtags():
docx = TextFrame()
docx.text = "This is the tag #jesuslives use wisely "
result = docx.remove_hashtags()
assert result.text == "This is the tag use wisely "
def test_textframe_remove_userhandles():
docx = TextFrame()
docx.text = "This is the tag @jesuslives use wisely "
result = docx.remove_userhandles()
assert result.text == "This is the tag use wisely "
def test_textframe_remove_shortwords():
docx = TextFrame()
docx.text = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = docx.remove_shortwords(length=3)
assert result.text == "This mail example gmail WEBSITE https example"
def test_single_fxn_remove_shortwords():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = remove_shortwords(t1,length=3)
assert result == "This mail example gmail WEBSITE https example"
def test_single_fxn_extract_shortwords():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊."
result = extract_shortwords(t1,length=3)
assert result == ['is', 'the', 'com', 'our', 'is', 'com', '']
def test_single_fxn_extract_pattern():
t1 = "This is the mail example@gmail.com ,our WEBSITE is Ø https://example.com #hello. "
result = extract_pattern(t1,r'#\S+')
assert result == ['#hello.']
def test_single_fxn_clean_text_custom_pattern():
t1 = "This is the mail example@gmail.com ,our WEBSITE is https://example.com ."
result = clean_text(t1,stopwords=False,custom_pattern=r'@\w+')
assert result == 'this is the mail example .com ,our website is https://example.com .'
def test_single_fxn_extract_btc_address():
t2 = """This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊. This is visa 4111 1111 1111 1111 and bitcoin 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2 with mastercard 5500 0000 0000 0004. Send it to PO Box 555, KNU"""
result = extract_btc_address(t2)
assert result == ['1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2']
def test_single_fxn_extract_mastercard_address():
t2 = """This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊. This is visa 4111 1111 1111 1111 and bitcoin 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2 with mastercard 5500 0000 0000 0004. Send it to PO Box 555, KNU"""
result = extract_mastercard_addr(t2)
assert result == ['5500 0000 0000 0004']
def test_single_fxn_extract_visacard_address():
t2 = """This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊. This is visa 4111 1111 1111 1111 and bitcoin 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2 with mastercard 5500 0000 0000 0004. Send it to PO Box 555, KNU"""
result = extract_visacard_addr(t2)
assert result == ['4111 1111 1111 1111']
result2 = extract_postoffice_box(t2)
assert result2 == ['PO Box 555']
def test_single_fxn_extract_postoffice_box():
t2 = """This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊. This is visa 4111 1111 1111 1111 and bitcoin 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2 with mastercard 5500 0000 0000 0004. Send it to PO Box 555, KNU"""
result2 = extract_postoffice_box(t2)
assert result2 == ['PO Box 555']
def test_single_fxn_remove_postoffice_box():
t2 = """This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊. This is visa 4111 1111 1111 1111 and bitcoin 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2 with mastercard 5500 0000 0000 0004. Send it to PO Box 555, KNU"""
result2 = remove_postoffice_box(t2)
assert result2 != "This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊.\nThis is visa 4111 1111 1111 1111 and bitcoin 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2 with mastercard 5500 0000 0000 0004. Send it to , KNU"
def test_single_fxn_remove_terms_in_bracket():
t2 = """This is the mail of {London} {Accra} different from [Berlin] [Germany] """
result2 = remove_terms_in_bracket(t2)
assert result2 == 'This is the mail of different from [Berlin] [Germany] '
def test_single_fxn_remove_terms_in_bracket_square():
t2 = """This is the mail of {London} {Accra} different from [Berlin] [Germany] """
result2 = remove_terms_in_bracket(t2,"[]")
assert result2 == 'This is the mail of {London} {Accra} different from '
def test_single_fxn_extract_terms_in_bracket():
t2 = """This is the mail of {London} {Accra} different from [Berlin] [Germany] """
result2 = extract_terms_in_bracket(t2)
assert result2 == ['London', 'Accra']
def test_txt_cleaning_pipeline():
t2 = """This is the mail example@gmail.com ,our WEBSITE is https://example.com 😊. This is visa 4111 1111 1111 1111 and bitcoin 1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2 with mastercard 5500 0000 0000 0004. Send it to PO Box 555, KNU"""
p = TextPipeline(steps=[remove_emails,remove_numbers,remove_emojis])
results2 = p.fit(t2)
assert results2 == 'This is the mail ,our WEBSITE is https://example.com . This is visa and bitcoin BvBMSEYstWetqTFnAumGFgxJaNVN with mastercard . Send it to PO Box , KNU'
| 43.289562
| 230
| 0.736875
| 1,977
| 12,857
| 4.687405
| 0.078907
| 0.048559
| 0.066041
| 0.085572
| 0.799612
| 0.760117
| 0.734758
| 0.715658
| 0.70519
| 0.68404
| 0
| 0.040226
| 0.145368
| 12,857
| 296
| 231
| 43.435811
| 0.798689
| 0.012289
| 0
| 0.308756
| 0
| 0.092166
| 0.521623
| 0.023631
| 0
| 0
| 0
| 0
| 0.225806
| 1
| 0.221198
| false
| 0
| 0.023041
| 0
| 0.24424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4bd44b81a1d95cbc5a35ea53184743a1a35999f2
| 27
|
py
|
Python
|
rubika/__init__.py
|
Amircfyt/rubika-1
|
db03f700fa8b2299d395ac9b5709bb09aca7fe89
|
[
"MIT"
] | 23
|
2021-12-06T09:54:01.000Z
|
2022-03-31T19:44:29.000Z
|
rubika/__init__.py
|
Amircfyt/rubika-1
|
db03f700fa8b2299d395ac9b5709bb09aca7fe89
|
[
"MIT"
] | 4
|
2022-01-08T19:27:40.000Z
|
2022-03-30T13:18:23.000Z
|
rubika/__init__.py
|
Amircfyt/rubika-1
|
db03f700fa8b2299d395ac9b5709bb09aca7fe89
|
[
"MIT"
] | 13
|
2021-12-08T14:18:39.000Z
|
2022-03-30T13:20:37.000Z
|
from rubika.client import *
| 27
| 27
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ef1caca916b465a065f58a73ab925d9c8a88f770
| 108
|
py
|
Python
|
tests/inputs/numpy-lib/44-numpy-arange.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | 4
|
2019-10-06T18:01:24.000Z
|
2020-07-03T05:27:35.000Z
|
tests/inputs/numpy-lib/44-numpy-arange.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | 5
|
2021-06-07T15:50:04.000Z
|
2021-06-07T15:50:06.000Z
|
tests/inputs/numpy-lib/44-numpy-arange.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | null | null | null |
import numpy as np
a = np.arange(24).reshape(2, 3, 4)
b = np.arange(24).reshape((2, 3, 4))
# show_store()
| 15.428571
| 36
| 0.62037
| 22
| 108
| 3
| 0.636364
| 0.242424
| 0.30303
| 0.515152
| 0.606061
| 0.606061
| 0.606061
| 0
| 0
| 0
| 0
| 0.111111
| 0.166667
| 108
| 6
| 37
| 18
| 0.622222
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
32545661d5d5ef6612dfdcbcb699bc2302069cd6
| 1,225
|
py
|
Python
|
2017/day4/day4.py
|
jibarra/advent-of-code
|
9be56354f59c8279e13a4b89348e32fdfffd4677
|
[
"MIT"
] | null | null | null |
2017/day4/day4.py
|
jibarra/advent-of-code
|
9be56354f59c8279e13a4b89348e32fdfffd4677
|
[
"MIT"
] | null | null | null |
2017/day4/day4.py
|
jibarra/advent-of-code
|
9be56354f59c8279e13a4b89348e32fdfffd4677
|
[
"MIT"
] | null | null | null |
def read_file(file_name):
data = ""
with open(file_name, "r") as file:
data = file.read()
return data
def parse_input(text):
lines = text.split("\n")
return lines
def part1():
lines = parse_input(read_file("day4_input.txt"))
good_passphrases = 0
for line in lines:
words = line.split(" ")
mapped_words = set()
is_good_passphrase = True
for word in words:
if word in mapped_words:
is_good_passphrase = False
else:
mapped_words.add(word)
if is_good_passphrase is True:
good_passphrases += 1
print(good_passphrases)
def part2():
lines = parse_input(read_file("day4_input.txt"))
good_passphrases = 0
for line in lines:
words = line.split(" ")
mapped_words = set()
is_good_passphrase = True
for word in words:
sorted_word = ''.join(sorted(word))
if sorted_word in mapped_words:
is_good_passphrase = False
else:
mapped_words.add(sorted_word)
if is_good_passphrase is True:
good_passphrases += 1
print(good_passphrases)
part1()
part2()
| 24.5
| 52
| 0.577959
| 151
| 1,225
| 4.443709
| 0.271523
| 0.134128
| 0.14307
| 0.056632
| 0.736215
| 0.736215
| 0.736215
| 0.736215
| 0.736215
| 0.736215
| 0
| 0.012255
| 0.333878
| 1,225
| 49
| 53
| 25
| 0.810049
| 0
| 0
| 0.585366
| 0
| 0
| 0.026939
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0.292683
| 0
| 0
| 0.146341
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
085e2c4d60db41704e518d78a9d76dfb7a264d01
| 63
|
py
|
Python
|
mutationplanner/__init__.py
|
cssd2019/mutationplanner
|
2d37e94953ac5df86ab5a5d1651c9f63e8cd4c90
|
[
"MIT"
] | 1
|
2019-03-13T13:19:36.000Z
|
2019-03-13T13:19:36.000Z
|
mutationplanner/__init__.py
|
cssd2019/mutationplanner
|
2d37e94953ac5df86ab5a5d1651c9f63e8cd4c90
|
[
"MIT"
] | 1
|
2019-03-18T12:54:32.000Z
|
2019-03-18T12:54:32.000Z
|
mutationplanner/__init__.py
|
cssd2019/mutationplanner
|
2d37e94953ac5df86ab5a5d1651c9f63e8cd4c90
|
[
"MIT"
] | null | null | null |
from mutationplanner.mutation_analyser import mutation_analyser
| 63
| 63
| 0.936508
| 7
| 63
| 8.142857
| 0.714286
| 0.561404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 63
| 1
| 63
| 63
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0875409ad085a9f3a9307f7c444bf1f1c6fbfec4
| 17,936
|
py
|
Python
|
py_shoco/tests/successor_tables/successor_tables.py
|
MATTHEWFRAZER/py_shoco
|
6a2f38d3ed74c5f6d850e7c6338ca810b9738619
|
[
"MIT"
] | null | null | null |
py_shoco/tests/successor_tables/successor_tables.py
|
MATTHEWFRAZER/py_shoco
|
6a2f38d3ed74c5f6d850e7c6338ca810b9738619
|
[
"MIT"
] | null | null | null |
py_shoco/tests/successor_tables/successor_tables.py
|
MATTHEWFRAZER/py_shoco
|
6a2f38d3ed74c5f6d850e7c6338ca810b9738619
|
[
"MIT"
] | null | null | null |
import ctypes
from py_shoco.constants import MAX_LEADING_CHARACTER_BITS, MAX_SUCCESSOR_BITS
from py_shoco.successor_tables.compression_successor_table import CompressionSuccessorTable
from py_shoco.successor_tables.decompression_successor_table import DecompressionSuccessorTable
from py_shoco.pack import define_pack
chars_count = 1 << MAX_LEADING_CHARACTER_BITS
successors_count = 1 << MAX_SUCCESSOR_BITS
pack_count = 3
max_successor_len = 7
min_char = 39
max_char = 122
chrs_by_chr_id1 = ['e', 'a', 'i', 'o', 't', 'h', 'n', 'r', 's', 'l', 'u', 'c', 'w', 'm', 'd', 'b', 'p', 'f', 'g', 'v', 'y', 'k', '-', 'H', 'M', 'T', '\'', 'B', 'x', 'I', 'W', 'L']
chrs_by_chr_id = (ctypes.c_char * chars_count)()
for i, x in enumerate(chrs_by_chr_id1):
chrs_by_chr_id[i] = ctypes.c_char(ord(x))
chr_ids_by_chr1 = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, -1, -1, -1, -1, -1, 22, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 27, -1, -1, -1, -1, -1, 23,
29, -1, -1, 31, 24, -1, -1, -1, -1, -1, -1, 25, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1,
15, 11, 14, 0, 17, 18, 5, 2, -1, 21, 9, 13, 6, 3, 16, -1, 7, 8, 4, 10, 19, 12, 28, 20, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1]
chr_ids_by_chr = (ctypes.c_int8 * 256)()
for i, x in enumerate(chr_ids_by_chr1):
chr_ids_by_chr[i] = ctypes.c_int8(x)
successor_ids_by_chr_id_and_chr_id1 = [
[7, 4, 12, -1, 6, -1, 1, 0, 3, 5, -1, 9, -1, 8, 2, -1, 15, 14, -1, 10, 11, -1, -1, -1, -1, -1, -1, -1, 13, -1, -1, -1],
[-1, -1, 6, -1, 1, -1, 0, 3, 2, 4, 15, 11, -1, 9, 5, 10, 13, -1, 12, 8, 7, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[9, 11, -1, 4, 2, -1, 0, 8, 1, 5, -1, 6, -1, 3, 7, 15, -1, 12, 10, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, 14, 7, 5, -1, 1, 2, 8, 9, 0, 15, 6, 4, 11, -1, 12, 3, -1, 10, -1, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 4, 3, 1, 5, 0, -1, 6, 10, 9, 7, 12, 11, -1, -1, -1, -1, 13, -1, -1, 8, -1, 15, -1, -1, -1, 14, -1, -1, -1, -1, -1],
[0, 1, 2, 3, 4, -1, -1, 5, 9, 10, 6, -1, -1, 8, 15, 11, -1, 14, -1, -1, 7, -1, 13, -1, -1, -1, 12, -1, -1, -1, -1, -1],
[2, 8, 7, 4, 3, -1, 9, -1, 6, 11, -1, 5, -1, -1, 0, -1, -1, 14, 1, 15, 10, 12, -1, -1, -1, -1, 13, -1, -1, -1, -1, -1],
[0, 3, 1, 2, 6, -1, 9, 8, 4, 12, 13, 10, -1, 11, 7, -1, -1, 15, 14, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 6, 3, 4, 1, 2, -1, -1, 5, 10, 7, 9, 11, 12, -1, -1, 8, 14, -1, -1, 15, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 6, 2, 5, 9, -1, -1, -1, 10, 1, 8, -1, 12, 14, 4, -1, 15, 7, -1, 13, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[8, 10, 9, 15, 1, -1, 4, 0, 3, 2, -1, 6, -1, 12, 11, 13, 7, 14, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 3, 6, 0, 4, 2, -1, 7, 13, 8, 9, 11, -1, -1, 15, -1, -1, -1, -1, -1, 10, 5, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[3, 0, 1, 4, -1, 2, 5, 6, 7, 8, -1, 14, -1, -1, 9, 15, -1, 12, -1, -1, -1, 10, 11, -1, -1, -1, 13, -1, -1, -1, -1, -1],
[0, 1, 3, 2, 15, -1, 12, -1, 7, 14, 4, -1, -1, 9, -1, 8, 5, 10, -1, -1, 6, -1, 13, -1, -1, -1, 11, -1, -1, -1, -1, -1],
[0, 3, 1, 2, -1, -1, 12, 6, 4, 9, 7, -1, -1, 14, 8, -1, -1, 15, 11, 13, 5, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 5, 7, 2, 10, 13, -1, 6, 8, 1, 3, -1, -1, 14, 15, 11, -1, -1, -1, 12, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 2, 6, 3, 7, 10, -1, 1, 9, 4, 8, -1, -1, 15, -1, 12, 5, -1, -1, -1, 11, -1, 13, -1, -1, -1, 14, -1, -1, -1, -1, -1],
[1, 3, 4, 0, 7, -1, 12, 2, 11, 8, 6, 13, -1, -1, -1, -1, -1, 5, -1, -1, 10, 15, 9, -1, -1, -1, 14, -1, -1, -1, -1, -1],
[1, 3, 5, 2, 13, 0, 9, 4, 7, 6, 8, -1, -1, 15, -1, 11, -1, -1, 10, -1, 14, -1, 12, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 2, 1, 3, -1, -1, -1, 6, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[1, 11, 4, 0, 3, -1, 13, 12, 2, 7, -1, -1, 15, 10, 5, 8, 14, -1, -1, -1, -1, -1, 9, -1, -1, -1, 6, -1, -1, -1, -1, -1],
[0, 9, 2, 14, 15, 4, 1, 13, 3, 5, -1, -1, 10, -1, -1, -1, -1, 6, 12, -1, 7, -1, 8, -1, -1, -1, 11, -1, -1, -1, -1, -1],
[-1, 2, 14, -1, 1, 5, 8, 7, 4, 12, -1, 6, 9, 11, 13, 3, 10, 15, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[0, 1, 3, 2, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 3, 1, 5, -1, -1, -1, 0, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[2, 8, 4, 1, -1, 0, -1, 6, -1, -1, 5, -1, 7, -1, -1, -1, -1, -1, -1, -1, 10, -1, -1, 9, -1, -1, -1, -1, -1, -1, -1, -1],
[12, 5, -1, -1, 1, -1, -1, 7, 0, 3, -1, 2, -1, 4, 6, -1, -1, -1, -1, 8, -1, -1, 15, -1, 13, 9, -1, -1, -1, -1, -1, 11],
[1, 3, 2, 4, -1, -1, -1, 5, -1, 7, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1, -1, 8, -1, -1],
[5, 3, 4, 12, 1, 6, -1, -1, -1, -1, 8, 2, -1, -1, -1, -1, 0, 9, -1, -1, 11, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, 0, -1, 1, 12, 3, -1, -1, -1, -1, 5, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, 4, -1, -1, 6, -1, 10],
[2, 3, 1, 4, -1, 0, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7, -1, -1, -1, -1, -1, -1, -1, -1, 6, -1, -1],
[5, 1, 3, 0, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, 9, -1, -1, 6, -1, 7]
]
successor_ids_by_chr_id_and_chr_id = ((ctypes.c_int8 * chars_count) * chars_count)()
for i, x in enumerate(successor_ids_by_chr_id_and_chr_id1):
for j, y in enumerate(x):
successor_ids_by_chr_id_and_chr_id[i][j] = ctypes.c_int8(y)
chrs_by_chr_and_successor_id1 = [
['s', 't', 'c', 'l', 'm', 'a', 'd', 'r', 'v', 'T', 'A', 'L', 'e', 'M', 'Y', '-'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['-', 't', 'a', 'b', 's', 'h', 'c', 'r', 'n', 'w', 'p', 'm', 'l', 'd', 'i', 'f'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['u', 'e', 'i', 'a', 'o', 'r', 'y', 'l', 'I', 'E', 'R', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['e', 'a', 'o', 'i', 'u', 'A', 'y', 'E', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['t', 'n', 'f', 's', '\'', 'm', 'I', 'N', 'A', 'E', 'L', 'Z', 'r', 'V', 'R', 'C'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['o', 'a', 'y', 'i', 'u', 'e', 'I', 'L', 'D', '\'', 'E', 'Y', '\x00', '\x00', '\x00', '\x00'],
['r', 'i', 'y', 'a', 'e', 'o', 'u', 'Y', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['h', 'o', 'e', 'E', 'i', 'u', 'r', 'w', 'a', 'H', 'y', 'R', 'Z', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['h', 'i', 'e', 'a', 'o', 'r', 'I', 'y', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['n', 't', 's', 'r', 'l', 'd', 'i', 'y', 'v', 'm', 'b', 'c', 'g', 'p', 'k', 'u'],
['e', 'l', 'o', 'u', 'y', 'a', 'r', 'i', 's', 'j', 't', 'b', 'v', 'h', 'm', 'd'],
['o', 'e', 'h', 'a', 't', 'k', 'i', 'r', 'l', 'u', 'y', 'c', 'q', 's', '-', 'd'],
['e', 'i', 'o', 'a', 's', 'y', 'r', 'u', 'd', 'l', '-', 'g', 'n', 'v', 'm', 'f'],
['r', 'n', 'd', 's', 'a', 'l', 't', 'e', 'm', 'c', 'v', 'y', 'i', 'x', 'f', 'p'],
['o', 'e', 'r', 'a', 'i', 'f', 'u', 't', 'l', '-', 'y', 's', 'n', 'c', '\'', 'k'],
['h', 'e', 'o', 'a', 'r', 'i', 'l', 's', 'u', 'n', 'g', 'b', '-', 't', 'y', 'm'],
['e', 'a', 'i', 'o', 't', 'r', 'u', 'y', 'm', 's', 'l', 'b', '\'', '-', 'f', 'd'],
['n', 's', 't', 'm', 'o', 'l', 'c', 'd', 'r', 'e', 'g', 'a', 'f', 'v', 'z', 'b'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['e', 'n', 'i', 's', 'h', 'l', 'f', 'y', '-', 'a', 'w', '\'', 'g', 'r', 'o', 't'],
['e', 'l', 'i', 'y', 'd', 'o', 'a', 'f', 'u', 't', 's', 'k', 'w', 'v', 'm', 'p'],
['e', 'a', 'o', 'i', 'u', 'p', 'y', 's', 'b', 'm', 'f', '\'', 'n', '-', 'l', 't'],
['d', 'g', 'e', 't', 'o', 'c', 's', 'i', 'a', 'n', 'y', 'l', 'k', '\'', 'f', 'v'],
['u', 'n', 'r', 'f', 'm', 't', 'w', 'o', 's', 'l', 'v', 'd', 'p', 'k', 'i', 'c'],
['e', 'r', 'a', 'o', 'l', 'p', 'i', 't', 'u', 's', 'h', 'y', 'b', '-', '\'', 'm'],
['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['e', 'i', 'o', 'a', 's', 'y', 't', 'd', 'r', 'n', 'c', 'm', 'l', 'u', 'g', 'f'],
['e', 't', 'h', 'i', 'o', 's', 'a', 'u', 'p', 'c', 'l', 'w', 'm', 'k', 'f', 'y'],
['h', 'o', 'e', 'i', 'a', 't', 'r', 'u', 'y', 'l', 's', 'w', 'c', 'f', '\'', '-'],
['r', 't', 'l', 's', 'n', 'g', 'c', 'p', 'e', 'i', 'a', 'd', 'm', 'b', 'f', 'o'],
['e', 'i', 'a', 'o', 'y', 'u', 'r', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'],
['a', 'i', 'h', 'e', 'o', 'n', 'r', 's', 'l', 'd', 'k', '-', 'f', '\'', 'c', 'b'],
['p', 't', 'c', 'a', 'i', 'e', 'h', 'q', 'u', 'f', '-', 'y', 'o', '\x00', '\x00', '\x00'],
['o', 'e', 's', 't', 'i', 'd', '\'', 'l', 'b', '-', 'm', 'a', 'r', 'n', 'p', 'w']
]
chrs_by_chr_and_successor_id = ((ctypes.c_char * successors_count) * (max_char - min_char))()
for i, x in enumerate(chrs_by_chr_and_successor_id1):
for j, y in enumerate(x):
chrs_by_chr_and_successor_id[i][j] = ctypes.c_char(ord(y))
packs = []
Pack = define_pack(8)
pack = Pack()
pack.word = ctypes.c_uint32(0x80000000)
pack.bytes_packed = ctypes.c_uint(1)
pack.bytes_unpacked = ctypes.c_uint(2)
pack.offsets = (ctypes.c_int * 8)()
for i, x in enumerate([26, 24, 24, 24, 24, 24, 24, 24 ]):
pack.offsets[i] = ctypes.c_int(x)
pack.masks = (ctypes.c_int16 * 8)()
for i, x in enumerate([ 15, 3, 0, 0, 0, 0, 0, 0 ]):
pack.masks[i] = ctypes.c_int16(x)
pack.header_mask = ctypes.c_char(0xc0)
pack.head = ctypes.c_char(0x80)
packs.append(pack)
pack1 = Pack()
pack1.word = ctypes.c_uint32(0xc0000000)
pack1.bytes_packed = ctypes.c_uint(2)
pack1.bytes_unpacked = ctypes.c_uint(4)
pack1.offsets = (ctypes.c_int * 8)()
for i, x in enumerate([25, 22, 19, 16, 16, 16, 16, 16 ]):
pack1.offsets[i] = ctypes.c_int(x)
pack1.masks = (ctypes.c_int16 * 8)()
for i, x in enumerate([ 15, 7, 7, 7, 0, 0, 0, 0]):
pack1.masks[i] = ctypes.c_int16(x)
pack1.header_mask = ctypes.c_char(0xe0)
pack1.head = ctypes.c_char(0xc0)
packs.append(pack1)
pack2 = Pack()
pack2.word = ctypes.c_uint32(0xe0000000)
pack2.bytes_packed = ctypes.c_uint(4)
pack2.bytes_unpacked = ctypes.c_uint(8)
pack2.offsets = (ctypes.c_int * 8)()
for i, x in enumerate([23, 19, 15, 11, 8, 5, 2, 0 ]):
pack2.offsets[i] = ctypes.c_int(x)
pack2.masks = (ctypes.c_int16 * 8)()
for i, x in enumerate([ 31, 15, 15, 15, 7, 7, 7, 3]):
pack2.masks[i] = ctypes.c_int16(x)
pack2.header_mask = ctypes.c_char(0xf0)
pack2.head = ctypes.c_char(0xe0)
packs.append(pack2)
compressor_successor_table = CompressionSuccessorTable(chr_ids_by_chr, successor_ids_by_chr_id_and_chr_id, max_successor_len)
decompressor_successor_table = DecompressionSuccessorTable(chrs_by_chr_id, chrs_by_chr_and_successor_id)
| 83.037037
| 179
| 0.378122
| 3,157
| 17,936
| 2.09281
| 0.039911
| 0.772817
| 1.141517
| 1.498411
| 0.690328
| 0.627214
| 0.575148
| 0.556077
| 0.522779
| 0.508703
| 0
| 0.232023
| 0.192852
| 17,936
| 216
| 180
| 83.037037
| 0.224356
| 0
| 0
| 0.291457
| 0
| 0.211055
| 0.222389
| 0
| 0
| 0
| 0.003011
| 0
| 0
| 1
| 0
| false
| 0
| 0.025126
| 0
| 0.025126
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
08a0af53a12ce1d530ac058b6e05964879c0485e
| 140
|
py
|
Python
|
backend/microservices/auth/core/entities/hash_password.py
|
MuhamedAbdalla/Automatic-Audio-Book-Based-On-Emotion-Detection
|
72130ad037b900461af5be6d80b27ab29c81de5e
|
[
"MIT"
] | 3
|
2021-04-26T00:17:14.000Z
|
2021-07-04T15:30:09.000Z
|
backend/microservices/auth/core/entities/hash_password.py
|
MuhamedAbdalla/Automatic-Audio-Book-Based-On-Emotion-Detection
|
72130ad037b900461af5be6d80b27ab29c81de5e
|
[
"MIT"
] | null | null | null |
backend/microservices/auth/core/entities/hash_password.py
|
MuhamedAbdalla/Automatic-Audio-Book-Based-On-Emotion-Detection
|
72130ad037b900461af5be6d80b27ab29c81de5e
|
[
"MIT"
] | null | null | null |
import hashlib
def hash_password(password: str, salt: str):
return hashlib.sha512((password + salt).encode('utf-8')).hexdigest()
| 23.333333
| 73
| 0.692857
| 18
| 140
| 5.333333
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0.157143
| 140
| 5
| 74
| 28
| 0.779661
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.666667
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
|
0
| 6
|
08c0ae5d4e8c0be8f95572574565fc99bb29880e
| 30
|
py
|
Python
|
src/mdscripts/updtopocount/__init__.py
|
awacha/mdscripts
|
831bda06557fa2d5f0899fc2f6552c9e49146cef
|
[
"BSD-3-Clause"
] | null | null | null |
src/mdscripts/updtopocount/__init__.py
|
awacha/mdscripts
|
831bda06557fa2d5f0899fc2f6552c9e49146cef
|
[
"BSD-3-Clause"
] | null | null | null |
src/mdscripts/updtopocount/__init__.py
|
awacha/mdscripts
|
831bda06557fa2d5f0899fc2f6552c9e49146cef
|
[
"BSD-3-Clause"
] | null | null | null |
from .updtopocount import main
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
08e58a2022abf43885040f073ee1b745b9bafc81
| 103
|
py
|
Python
|
pygenius/__init__.py
|
aorti017/pygenius
|
e8fef50abccacda4c741b843cc084110fe797fd2
|
[
"BSD-3-Clause"
] | 1
|
2015-06-03T22:03:29.000Z
|
2015-06-03T22:03:29.000Z
|
pygenius/__init__.py
|
aorti017/pygenius
|
e8fef50abccacda4c741b843cc084110fe797fd2
|
[
"BSD-3-Clause"
] | null | null | null |
pygenius/__init__.py
|
aorti017/pygenius
|
e8fef50abccacda4c741b843cc084110fe797fd2
|
[
"BSD-3-Clause"
] | null | null | null |
from artist import *
from extract import *
from search import *
from song import *
from tools import *
| 17.166667
| 21
| 0.757282
| 15
| 103
| 5.2
| 0.466667
| 0.512821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194175
| 103
| 5
| 22
| 20.6
| 0.939759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3ea96ce324a597831feb64ccafff08eff06fec7f
| 132
|
py
|
Python
|
app/controllers/main/__init__.py
|
loserrain/-flask-base-test
|
3b3c4478973c3e0219cb0a4cbb20d0411163e7ca
|
[
"MIT"
] | 6
|
2019-10-04T22:24:46.000Z
|
2021-07-13T19:15:49.000Z
|
app/controllers/main/__init__.py
|
leynier/flask-base
|
abcbe1774e44bf9d7afd921212662347f0f7adcc
|
[
"MIT"
] | null | null | null |
app/controllers/main/__init__.py
|
leynier/flask-base
|
abcbe1774e44bf9d7afd921212662347f0f7adcc
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
main_blueprint = Blueprint('main', __name__, template_folder='../../views')
from . import controllers
| 22
| 75
| 0.757576
| 15
| 132
| 6.266667
| 0.666667
| 0.276596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 132
| 5
| 76
| 26.4
| 0.803419
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
411342374bbe3f112ac51575f1461b0b0adcf909
| 103
|
py
|
Python
|
plugins/dbnd-luigi/src/dbnd_luigi/__main__.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 224
|
2020-01-02T10:46:37.000Z
|
2022-03-02T13:54:08.000Z
|
plugins/dbnd-luigi/src/dbnd_luigi/__main__.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 16
|
2020-03-11T09:37:58.000Z
|
2022-01-26T10:22:08.000Z
|
plugins/dbnd-luigi/src/dbnd_luigi/__main__.py
|
ipattarapong/dbnd
|
7bd65621c46c73e078eb628f994127ad4c7dbd1a
|
[
"Apache-2.0"
] | 24
|
2020-03-24T13:53:50.000Z
|
2022-03-22T11:55:18.000Z
|
from dbnd_luigi.luigi_tracking import dbnd_luigi_run
if __name__ == "__main__":
dbnd_luigi_run()
| 17.166667
| 52
| 0.776699
| 15
| 103
| 4.4
| 0.6
| 0.409091
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145631
| 103
| 5
| 53
| 20.6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.07767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
411426436030574b8cd963cdea19b9cb6abcce16
| 8,608
|
py
|
Python
|
bert_vectors_chandni.py
|
chandnii7/UsingBERT
|
ef8ebcc282c8dbc5a95529a49e39457ccb0c6639
|
[
"Apache-2.0"
] | null | null | null |
bert_vectors_chandni.py
|
chandnii7/UsingBERT
|
ef8ebcc282c8dbc5a95529a49e39457ccb0c6639
|
[
"Apache-2.0"
] | null | null | null |
bert_vectors_chandni.py
|
chandnii7/UsingBERT
|
ef8ebcc282c8dbc5a95529a49e39457ccb0c6639
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# # Text categorization model using the features derived from BERT
# In[ ]:
import os
import json
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix, classification_report
# In[2]:
ORIGINAL_DATA_DIR = os.path.join("data")
BERT_FEATURE_DIR = "bert_output_data"
# In[3]:
train_df = pd.read_csv(os.path.join(ORIGINAL_DATA_DIR, "lang_id_train.csv"))
print(train_df.shape)
bert_vectors_train = []
with open(os.path.join(BERT_FEATURE_DIR, "train.jsonlines"), "rt") as infile:
for line in infile:
bert_data = json.loads(line)
for t in bert_data["features"]:
# Only extract the [CLS] vector used for classification
if t["token"] == "[CLS]":
# We only use the representation at the final layer of the network
bert_vectors_train.append(t["layers"][0]["values"])
break
print(len(bert_vectors_train))
X_train = np.array(bert_vectors_train)
y_train = train_df["native_language"].values
# In[4]:
eval_df = pd.read_csv(os.path.join(ORIGINAL_DATA_DIR, "lang_id_eval.csv"))
print(eval_df.shape)
bert_vectors_eval = []
with open(os.path.join(BERT_FEATURE_DIR, "eval.jsonlines"), "rt") as infile:
for line in infile:
bert_data = json.loads(line)
for t in bert_data["features"]:
# Only extract the [CLS] vector used for classification
if t["token"] == "[CLS]":
# We only use the representation at the final layer of the network
bert_vectors_eval.append(t["layers"][0]["values"])
break
print(len(bert_vectors_eval))
X_eval = np.array(bert_vectors_eval)
y_eval = eval_df["native_language"].values
# In[5]:
test_df = pd.read_csv(os.path.join(ORIGINAL_DATA_DIR, "lang_id_test.csv"))
print(test_df.shape)
bert_vectors_test = []
with open(os.path.join(BERT_FEATURE_DIR, "test.jsonlines"), "rt") as infile:
for line in infile:
bert_data = json.loads(line)
for t in bert_data["features"]:
# Only extract the [CLS] vector used for classification
if t["token"] == "[CLS]":
# We only use the representation at the final layer of the network
bert_vectors_test.append(t["layers"][0]["values"])
break
print(len(bert_vectors_test))
X_test = np.array(bert_vectors_test)
y_test = test_df["native_language"].values
# # Logistic Regression
# In[6]:
lr_model = LogisticRegression(penalty="l2", C=1.0)
lr_model.fit(X_train, y_train)
print("Training Accuarcy: ", lr_model.score(X_train, y_train))
# In[15]:
# Adding predicted value in the dataframe
test_df['predicted1'] = lr_model.predict(X_test)
# Class list
list_of_languages = sorted(test_df['native_language'].unique())
# Precision, recall, f-measure and support for each class
print("Evaluation for each class")
print(classification_report(y_test,test_df['predicted1'].values,target_names=list_of_languages))
print()
print("**********************************************************************************************")
print()
# Confusion matrix
matrix = confusion_matrix(test_df['native_language'], test_df['predicted1'])
plt.figure(figsize = (10,5))
ax = sns.heatmap(matrix, annot=True, xticklabels=list_of_languages, yticklabels=list_of_languages)
plt.show()
print("**********************************************************************************************")
print()
# Calculate misclassification
test_predicted = test_df.groupby('predicted1').count()['native_language']
test_misclassifications = []
for i in range(len(list_of_languages)):
misclassification = ((200 - matrix[i][i] + (test_predicted[i] - matrix[i][i])) / 2000) * 100
test_misclassifications.append(misclassification)
# Misclassification for each class into one dataframe
evaluation_by_class = pd.DataFrame(columns=['Language', 'Misclassification'])
for i in range(len(list_of_languages)):
evaluation_by_class = evaluation_by_class.append(pd.DataFrame([[list_of_languages[i], test_misclassifications[i]]],
columns=['Language', 'Misclassification']))
print("Misclassification for each class")
print(evaluation_by_class.to_string())
print()
print("**********************************************************************************************")
print()
# Evaluate misclassification between all classes
evaluation_between_classes = pd.DataFrame(columns=['Language', 'Predicted', 'Misclassification'])
for i in list_of_languages:
for j in list_of_languages:
if(i != j):
evaluation_between_classes = evaluation_between_classes.append(pd.DataFrame([[i, j,
matrix[list_of_languages.index(i)][list_of_languages.index(j)]]],
columns=['Language', 'Predicted', 'Misclassification']))
print("Misclassification between each pair of classes")
print(evaluation_between_classes.sort_values(by=['Misclassification']).to_string())
print()
print("**********************************************************************************************")
print()
print("Summary")
print("Total records:", test_df.shape[0])
print("Incorrect predictions:", evaluation_between_classes['Misclassification'].sum())
print("Correct predictions:", (test_df.shape[0] - evaluation_between_classes['Misclassification'].sum()))
# # Neural Network
# In[8]:
nn_model = MLPClassifier(solver='lbfgs')
nn_model.fit(X_train, y_train)
print("Training Accuarcy: ", nn_model.score(X_train, y_train))
# In[16]:
# Adding predicted value in the dataframe
test_df['predicted2'] = nn_model.predict(X_test)
# Class list
list_of_languages = sorted(test_df['native_language'].unique())
# Precision, recall, f-measure and support for each class
print("Evaluation for each class")
print(classification_report(y_test,test_df['predicted2'].values,target_names=list_of_languages))
print()
print("**********************************************************************************************")
print()
# Confusion matrix
matrix = confusion_matrix(test_df['native_language'], test_df['predicted2'])
plt.figure(figsize = (10,5))
ax = sns.heatmap(matrix, annot=True, xticklabels=list_of_languages, yticklabels=list_of_languages)
plt.show()
print("**********************************************************************************************")
print()
# Calculate misclassification
test_predicted = test_df.groupby('predicted2').count()['native_language']
test_misclassifications = []
for i in range(len(list_of_languages)):
misclassification = ((200 - matrix[i][i] + (test_predicted[i] - matrix[i][i])) / 2000) * 100
test_misclassifications.append(misclassification)
# Misclassification for each class into one dataframe
evaluation_by_class = pd.DataFrame(columns=['Language', 'Misclassification'])
for i in range(len(list_of_languages)):
evaluation_by_class = evaluation_by_class.append(pd.DataFrame([[list_of_languages[i], test_misclassifications[i]]],
columns=['Language', 'Misclassification']))
print("Misclassification for each class")
print(evaluation_by_class.to_string())
print()
print("**********************************************************************************************")
print()
# Evaluate misclassification between all classes
evaluation_between_classes = pd.DataFrame(columns=['Language', 'Predicted', 'Misclassification'])
for i in list_of_languages:
for j in list_of_languages:
if(i != j):
evaluation_between_classes = evaluation_between_classes.append(pd.DataFrame([[i, j,
matrix[list_of_languages.index(i)][list_of_languages.index(j)]]],
columns=['Language', 'Predicted', 'Misclassification']))
print("Misclassification between each pair of classes")
print(evaluation_between_classes.sort_values(by=['Misclassification']).to_string())
print()
print("**********************************************************************************************")
print()
print("Summary")
print("Total records:", test_df.shape[0])
print("Incorrect predictions:", evaluation_between_classes['Misclassification'].sum())
print("Correct predictions:", (test_df.shape[0] - evaluation_between_classes['Misclassification'].sum()))
| 35.570248
| 128
| 0.639405
| 1,018
| 8,608
| 5.183694
| 0.169941
| 0.025014
| 0.062536
| 0.019329
| 0.833807
| 0.824711
| 0.824711
| 0.815615
| 0.782263
| 0.766724
| 0
| 0.007581
| 0.157179
| 8,608
| 241
| 129
| 35.717842
| 0.719779
| 0.120702
| 0
| 0.65493
| 0
| 0
| 0.250996
| 0.099867
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.06338
| 0
| 0.06338
| 0.352113
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4117b4e9efbd7b3a48fd36296da56b266566b473
| 2,355
|
py
|
Python
|
test/test_zmove.py
|
adacker10/showdown
|
8ceb1ff46d5c33ec3055928d6ad293224446f63c
|
[
"MIT"
] | 8
|
2019-02-02T01:15:57.000Z
|
2021-12-23T04:43:46.000Z
|
test/test_zmove.py
|
adacker10/showdown
|
8ceb1ff46d5c33ec3055928d6ad293224446f63c
|
[
"MIT"
] | null | null | null |
test/test_zmove.py
|
adacker10/showdown
|
8ceb1ff46d5c33ec3055928d6ad293224446f63c
|
[
"MIT"
] | 6
|
2020-09-11T13:15:05.000Z
|
2022-03-18T15:46:35.000Z
|
import unittest
from sim.battle import Battle
from data import dex
class TestZMove(unittest.TestCase):
def test_zmove(self):
battle = Battle(debug=False, rng=False)
"""tests tackle with STAB and no STAB"""
battle.join(0, [{'species': 'pikachuhoenn', 'item': 'pikashuniumz', 'moves': ['thunderbolt']}])
battle.join(1, [{'species': 'magnemite', 'item': 'normaliumz', 'moves': ['tackle']}])
battle.choose(0, dex.Decision('move', 0, zmove=True))
battle.choose(1, dex.Decision('move', 0, zmove=True))
battle.do_turn()
pikachu = battle.sides[0].pokemon[0]
magnemite = battle.sides[1].pokemon[0]
#damage calcs were done by hand
self.assertEqual(magnemite.hp, magnemite.maxhp-61)
self.assertEqual(pikachu.hp, pikachu.maxhp-42)
def test_zmove_protect(self):
battle = Battle(debug=False, rng=False)
"""tests tackle with STAB and no STAB"""
battle.join(0, [{'species': 'pikachuhoenn', 'item': 'pikashuniumz', 'moves': ['thunderbolt']}])
battle.join(1, [{'species': 'magnemite', 'moves': ['protect']}])
battle.choose(0, dex.Decision('move', 0, zmove=True))
battle.choose(1, dex.Decision('move', 0))
battle.do_turn()
pikachu = battle.sides[0].pokemon[0]
magnemite = battle.sides[1].pokemon[0]
#damage calcs were done by hand
self.assertEqual(magnemite.hp, magnemite.maxhp-15)
def test_zmove_twice(self):
battle = Battle(debug=False, rng=False)
"""tests tackle with STAB and no STAB"""
battle.join(0, [{'species': 'pikachuhoenn', 'item': 'pikashuniumz', 'moves': ['thunderbolt']}])
battle.join(1, [{'species': 'magnemite', 'moves': ['tackle']}])
battle.choose(0, dex.Decision('move', 0, zmove=True))
battle.choose(1, dex.Decision('move', 0))
battle.do_turn()
battle.choose(0, dex.Decision('move', 0, zmove=True))
battle.choose(1, dex.Decision('move', 0))
battle.do_turn()
pikachu = battle.sides[0].pokemon[0]
magnemite = battle.sides[1].pokemon[0]
#damage calcs were done by hand
self.assertEqual(magnemite.hp, magnemite.maxhp-89)
def runTest(self):
self.test_zmove()
self.test_zmove_protect()
self.test_zmove_twice()
| 36.796875
| 103
| 0.614013
| 292
| 2,355
| 4.90411
| 0.202055
| 0.067039
| 0.083799
| 0.089385
| 0.815642
| 0.815642
| 0.815642
| 0.809358
| 0.809358
| 0.809358
| 0
| 0.022913
| 0.221656
| 2,355
| 63
| 104
| 37.380952
| 0.75832
| 0.038217
| 0
| 0.547619
| 0
| 0
| 0.131186
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.095238
| false
| 0
| 0.071429
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
412b6dba6c3bd8525acec50c760e095a9fbed575
| 182
|
py
|
Python
|
joplin/pages/information_page/fixtures/__init__.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 15
|
2018-09-27T07:36:30.000Z
|
2021-08-03T16:01:21.000Z
|
joplin/pages/information_page/fixtures/__init__.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 183
|
2017-11-16T23:30:47.000Z
|
2020-12-18T21:43:36.000Z
|
joplin/pages/information_page/fixtures/__init__.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 12
|
2017-12-12T22:48:05.000Z
|
2021-03-01T18:01:24.000Z
|
from .test_cases.new_contact import new_contact
# You can import any test_case fixture individually
# Or you can load them all with this function
def load_all():
new_contact()
| 22.75
| 51
| 0.78022
| 30
| 182
| 4.533333
| 0.666667
| 0.220588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17033
| 182
| 7
| 52
| 26
| 0.900662
| 0.510989
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f5e1435b8feb482e8d7c7ad37fbb74324d252b1e
| 50
|
py
|
Python
|
lib/models/__init__.py
|
wanghm92/Singlish_parser_tf0.12
|
06e28922ab54f57ade7fb8518ab4d3132286cd01
|
[
"MIT"
] | 18
|
2017-05-17T13:51:08.000Z
|
2021-06-13T14:34:42.000Z
|
lib/models/__init__.py
|
wanghm92/Singlish_parser_tf0.12
|
06e28922ab54f57ade7fb8518ab4d3132286cd01
|
[
"MIT"
] | 1
|
2019-03-15T05:39:49.000Z
|
2019-03-15T06:49:20.000Z
|
lib/models/__init__.py
|
wanghm92/Singlish_parser_tf0.12
|
06e28922ab54f57ade7fb8518ab4d3132286cd01
|
[
"MIT"
] | 7
|
2018-04-24T11:25:03.000Z
|
2021-03-21T16:41:42.000Z
|
from nn import NN
import rnn
from parsers import *
| 16.666667
| 21
| 0.8
| 9
| 50
| 4.444444
| 0.555556
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18
| 50
| 3
| 21
| 16.666667
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f5e8916afd436ba61c176bfbea8bb706f5aa9c0c
| 107
|
py
|
Python
|
terrascript/cloudstack/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/cloudstack/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/cloudstack/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/cloudstack/__init__.py
import terrascript
class cloudstack(terrascript.Provider):
pass
| 15.285714
| 39
| 0.803738
| 11
| 107
| 7.454545
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121495
| 107
| 6
| 40
| 17.833333
| 0.87234
| 0.317757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
eb1741d079b18336bf1a9f94d552e2e10aa3f5f1
| 18
|
py
|
Python
|
python/test_module/run.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | 1
|
2017-10-14T04:23:45.000Z
|
2017-10-14T04:23:45.000Z
|
python/test_module/run.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
python/test_module/run.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
import b
import c
| 6
| 8
| 0.777778
| 4
| 18
| 3.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 18
| 2
| 9
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
eb2516b88d0dde92a6a36a934cf12df15aba2ab6
| 162
|
py
|
Python
|
chestxray/__init__.py
|
christian-5-28/aimlx-demos
|
ba63edb80f37b1a8ced70d5e29038eafa3b48b91
|
[
"MIT"
] | 6
|
2017-06-28T10:50:21.000Z
|
2022-01-05T18:28:39.000Z
|
chestxray/__init__.py
|
christian-5-28/aimlx-demos
|
ba63edb80f37b1a8ced70d5e29038eafa3b48b91
|
[
"MIT"
] | 3
|
2017-12-07T16:02:13.000Z
|
2018-09-06T11:39:36.000Z
|
chestxray/__init__.py
|
christian-5-28/aimlx-demos
|
ba63edb80f37b1a8ced70d5e29038eafa3b48b91
|
[
"MIT"
] | 23
|
2017-08-08T09:31:16.000Z
|
2018-10-24T14:31:36.000Z
|
from flask import Blueprint
chestxray = Blueprint('chestxray', __name__, template_folder='templates', static_folder='static')
from . import chestxray_controller
| 32.4
| 97
| 0.814815
| 18
| 162
| 6.944444
| 0.611111
| 0.288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092593
| 162
| 5
| 98
| 32.4
| 0.85034
| 0
| 0
| 0
| 0
| 0
| 0.147239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
de4ab6afc2604d9aab63823646fd9e4f80e1d88d
| 39
|
py
|
Python
|
ytsclient/__init__.py
|
onlinejudge95/yts-client
|
231fd318c654747a010655666af04d88cc527e43
|
[
"MIT"
] | 1
|
2020-03-15T09:42:38.000Z
|
2020-03-15T09:42:38.000Z
|
ytsclient/__init__.py
|
onlinejudge95/yts-client
|
231fd318c654747a010655666af04d88cc527e43
|
[
"MIT"
] | 53
|
2020-03-15T10:30:55.000Z
|
2022-03-18T18:33:43.000Z
|
ytsclient/__init__.py
|
onlinejudge95/yts-client
|
231fd318c654747a010655666af04d88cc527e43
|
[
"MIT"
] | null | null | null |
from ytsclient.client import YTSClient
| 19.5
| 38
| 0.871795
| 5
| 39
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
de58a3502bf8f5f03eefe7dc12701270d623da2b
| 37
|
py
|
Python
|
zencad/internal_models/__init__.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | 5
|
2018-04-11T14:11:40.000Z
|
2018-09-12T19:03:36.000Z
|
zencad/internal_models/__init__.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
zencad/internal_models/__init__.py
|
Spiritdude/zencad
|
4e63b1a6306dd235f4daa2791b10249f7546c95b
|
[
"MIT"
] | null | null | null |
from .knight import knight as knight
| 18.5
| 36
| 0.810811
| 6
| 37
| 5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 1
| 37
| 37
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
de860fcfa155e2df2abc0d233690fe643602d03b
| 127
|
py
|
Python
|
NvTK/Explainer/__init__.py
|
JiaqiLiZju/NvTK
|
6b887670a03d63c1747d9854ecbbac13cc06461c
|
[
"BSD-3-Clause"
] | null | null | null |
NvTK/Explainer/__init__.py
|
JiaqiLiZju/NvTK
|
6b887670a03d63c1747d9854ecbbac13cc06461c
|
[
"BSD-3-Clause"
] | null | null | null |
NvTK/Explainer/__init__.py
|
JiaqiLiZju/NvTK
|
6b887670a03d63c1747d9854ecbbac13cc06461c
|
[
"BSD-3-Clause"
] | null | null | null |
from .Motif import *
from .MotifVisualize import *
from .Featuremap import *
from .Influence import *
from .Gradiant import *
| 18.142857
| 29
| 0.755906
| 15
| 127
| 6.4
| 0.466667
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165354
| 127
| 7
| 30
| 18.142857
| 0.90566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dec043b6c89534fea30b759231de8c114fccc098
| 4,417
|
py
|
Python
|
src/utils/data_generator.py
|
coreyjadams/GAN_AE_tutorial
|
d218e314bb24b4263956811b0c5ba2ae7667c24b
|
[
"Apache-2.0"
] | null | null | null |
src/utils/data_generator.py
|
coreyjadams/GAN_AE_tutorial
|
d218e314bb24b4263956811b0c5ba2ae7667c24b
|
[
"Apache-2.0"
] | null | null | null |
src/utils/data_generator.py
|
coreyjadams/GAN_AE_tutorial
|
d218e314bb24b4263956811b0c5ba2ae7667c24b
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import random
import numpy
class mnist_generator(object):
'''
This class takes the mnist dataset and generates multi-digit examples.
The goal here is to create on-the-fly augmented data that is more complex
than just 0 to 9, but also very easy to get access to.
'''
def __init__(self, seed=0):
# Use TF to get the dataset, will download if needed.
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.astype(numpy.float32) * (1./256)
x_test = x_test.astype(numpy.float32) * (1./256)
self._x_train_base = x_train
self._y_train_base = y_train
self._x_test_base = x_test
self._y_test_base = y_test
self._base_shape = [28,28]
self._random = random.Random(seed)
def next_train_batch(self, batch_size=10, n_digits=2):
'''
Create a new training batch of a specified number of images,
with the specified number of digits per image.
Parameters
----------
batch_size : int (default = 10)
n_digits : int (default = 2)
Returns
-------
images : ndarray (shape = [batch_size, 28, n_digits*28]
labels : ndarray (shape = [batch_size] )
Examples
--------
# Get a batch with 10 images, each a 2 digit number (Default):
images, labels = generator.next_train_batch()
# Get a batch with 20 images, each a 3 digit number:
images, labels = generator.next_train_batch(20, 3)
'''
# First, allocate memory to hold the output data:
# Data is stored as [B, H, W, C]
images = numpy.zeros([batch_size, self._base_shape[0], n_digits*self._base_shape[1]])
labels = numpy.zeros([batch_size], dtype=numpy.int32)
indexes = numpy.asarray(
self._random.sample(
range(len(self._x_train_base)),
batch_size*n_digits
)
)
indexes = indexes.reshape([batch_size, n_digits])
dims = [10] * n_digits
for b in range(batch_size):
# pick a random number from the train set:
for n in range(n_digits):
i = indexes[b][n]
images[b, :, n*28:(n+1)*28] = self._x_train_base[i]
this_label = [ self._y_train_base[j] for j in indexes[b] ]
labels[b] = numpy.ravel_multi_index(this_label, dims)
return images, labels
def next_test_batch(self, batch_size=10, n_digits=2):
'''
Create a new testing batch of a specified number of images,
with the specified number of digits per image.
Parameters
----------
batch_size : int (default = 10)
n_digits : int (default = 2)
Returns
-------
images : ndarray (shape = [batch_size, 28, n_digits*28]
labels : ndarray (shape = [batch_size] )
Examples
--------
# Get a batch with 10 images, each a 2 digit number (Default):
images, labels = generator.next_train_batch()
# Get a batch with 20 images, each a 3 digit number:
images, labels = generator.next_train_batch(20, 3)
'''
# First, allocate memory to hold the output data:
# Data is stored as [B, H, W, C]
images = numpy.zeros([batch_size, self._base_shape[0], n_digits*self._base_shape[1]])
labels = numpy.zeros([batch_size], dtype=numpy.int32)
indexes = numpy.asarray(
self._random.sample(
range(len(self._x_test_base)),
batch_size*n_digits
)
)
indexes = indexes.reshape([batch_size, n_digits])
dims = [10] * n_digits
for b in range(batch_size):
# pick a random number from the train set:
for n in range(n_digits):
i = indexes[b][n]
images[b, :, n*28:(n+1)*28] = self._x_test_base[i]
this_label = [ self._y_test_base[j] for j in indexes[b] ]
labels[b] = numpy.ravel_multi_index(this_label, dims)
return images, labels
| 33.210526
| 93
| 0.547657
| 572
| 4,417
| 4.027972
| 0.213287
| 0.070313
| 0.023438
| 0.036458
| 0.766493
| 0.747396
| 0.730903
| 0.730903
| 0.730903
| 0.730903
| 0
| 0.027603
| 0.352049
| 4,417
| 133
| 94
| 33.210526
| 0.777428
| 0.354766
| 0
| 0.48
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.06
| 0
| 0.18
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9d05584fd6c7d58104073d3136f969ab63f08c97
| 121
|
py
|
Python
|
maniacal-moths/newsly/news_wrapper/models.py
|
Kushagra-0801/summer-code-jam-2020
|
aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0
|
[
"MIT"
] | null | null | null |
maniacal-moths/newsly/news_wrapper/models.py
|
Kushagra-0801/summer-code-jam-2020
|
aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0
|
[
"MIT"
] | null | null | null |
maniacal-moths/newsly/news_wrapper/models.py
|
Kushagra-0801/summer-code-jam-2020
|
aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0
|
[
"MIT"
] | 1
|
2020-08-04T05:44:34.000Z
|
2020-08-04T05:44:34.000Z
|
from django.db import models
class Article(models.Model):
"""Article gotten from the News Catcher API"""
pass
| 15.125
| 50
| 0.702479
| 17
| 121
| 5
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206612
| 121
| 7
| 51
| 17.285714
| 0.885417
| 0.330579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
19fbb993495450f5945642385188c7860adeb88f
| 46
|
py
|
Python
|
boilerplate/app/models/__init__.py
|
davideasaf/effortless_rest_flask
|
ee96069614aa670837152db36616b847f1cb5f73
|
[
"MIT"
] | 6
|
2019-10-31T17:10:06.000Z
|
2020-07-01T15:18:46.000Z
|
boilerplate/app/models/__init__.py
|
davideasaf/effortless_rest_flask
|
ee96069614aa670837152db36616b847f1cb5f73
|
[
"MIT"
] | 1
|
2019-11-07T20:31:27.000Z
|
2019-11-07T20:31:27.000Z
|
boilerplate/app/models/__init__.py
|
pydatacharlotte/effortless_rest_flask
|
4691d2ffda3f4eebae2ba1f089fdce087750c984
|
[
"MIT"
] | 2
|
2019-11-07T20:26:02.000Z
|
2019-12-09T01:29:32.000Z
|
from .user import User
from .iris import Iris
| 15.333333
| 22
| 0.782609
| 8
| 46
| 4.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dfcf7a5dddd1a21cf4e855c96871e40e913e6ae9
| 41
|
py
|
Python
|
daffodil/base_delegate.py
|
igorkramaric/daffodil
|
eefa2b2801e40246cc1deb4ca5940f39c77e3203
|
[
"MIT"
] | null | null | null |
daffodil/base_delegate.py
|
igorkramaric/daffodil
|
eefa2b2801e40246cc1deb4ca5940f39c77e3203
|
[
"MIT"
] | 45
|
2015-05-04T20:59:43.000Z
|
2022-02-08T20:57:12.000Z
|
daffodil/base_delegate.py
|
igorkramaric/daffodil
|
eefa2b2801e40246cc1deb4ca5940f39c77e3203
|
[
"MIT"
] | 4
|
2015-04-20T11:04:06.000Z
|
2021-09-22T14:29:50.000Z
|
from .parser import BaseDaffodilDelegate
| 20.5
| 40
| 0.878049
| 4
| 41
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a0338379114e39f3e9c73faeed4542636a829d53
| 35
|
py
|
Python
|
ifmodels/ifmodels.py
|
hhelmbre/ifmodels
|
24b81afa262c3905db5bc7d1046e269641948dbf
|
[
"MIT"
] | 1
|
2020-04-08T01:43:04.000Z
|
2020-04-08T01:43:04.000Z
|
ifmodels/ifmodels.py
|
hhelmbre/ifmodels
|
24b81afa262c3905db5bc7d1046e269641948dbf
|
[
"MIT"
] | null | null | null |
ifmodels/ifmodels.py
|
hhelmbre/ifmodels
|
24b81afa262c3905db5bc7d1046e269641948dbf
|
[
"MIT"
] | 1
|
2019-11-20T19:41:10.000Z
|
2019-11-20T19:41:10.000Z
|
#A python file
import numpy as np
| 8.75
| 18
| 0.742857
| 7
| 35
| 3.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228571
| 35
| 3
| 19
| 11.666667
| 0.962963
| 0.371429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a066489d8018924125f5c431b6037066d0973791
| 54,330
|
py
|
Python
|
species/plot/plot_color.py
|
tomasstolker/SPECIES
|
f74483a334f36cbeafeaf372446ae1ea9f278d95
|
[
"MIT"
] | null | null | null |
species/plot/plot_color.py
|
tomasstolker/SPECIES
|
f74483a334f36cbeafeaf372446ae1ea9f278d95
|
[
"MIT"
] | null | null | null |
species/plot/plot_color.py
|
tomasstolker/SPECIES
|
f74483a334f36cbeafeaf372446ae1ea9f278d95
|
[
"MIT"
] | null | null | null |
"""
Module with functions for creating plots with color-magnitude
diagrams and color-color diagrams.
"""
import warnings
from typing import Dict, List, Optional, Tuple, Union
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colorbar import Colorbar
from matplotlib.ticker import MultipleLocator
from scipy.interpolate import interp1d
from typeguard import typechecked
from species.core import box
from species.data import companions
from species.read import read_filter, read_object
from species.util import dust_util, plot_util
@typechecked
def plot_color_magnitude(
boxes: list,
objects: Optional[
Union[
List[Tuple[str, str, str, str]],
List[Tuple[str, str, str, str, Optional[dict], Optional[dict]]],
]
] = None,
mass_labels: Optional[Union[List[float],
List[Tuple[float, str]],
Dict[str, List[Tuple[float, str]]]]] = None,
teff_labels: Optional[Union[List[float], List[Tuple[float, str]]]] = None,
companion_labels: bool = False,
accretion: bool = False,
reddening: Optional[
List[Tuple[Tuple[str, str], Tuple[str, float], str, float, Tuple[float, float]]]
] = None,
ism_red: Optional[
List[Tuple[Tuple[str, str], str, float, Tuple[float, float]]]
] = None,
field_range: Optional[Tuple[str, str]] = None,
label_x: str = "Color",
label_y: str = "Absolute magnitude",
xlim: Optional[Tuple[float, float]] = None,
ylim: Optional[Tuple[float, float]] = None,
offset: Optional[Tuple[float, float]] = None,
legend: Optional[Union[str, dict, Tuple[float, float]]] = "upper left",
figsize: Optional[Tuple[float, float]] = (4.0, 4.8),
output: Optional[str] = "color-magnitude.pdf",
) -> None:
"""
Function for creating a color-magnitude diagram.
Parameters
----------
boxes : list(species.core.box.ColorMagBox, species.core.box.IsochroneBox)
Boxes with the color-magnitude and isochrone data from
photometric libraries, spectral libraries, and/or atmospheric
models. The synthetic data have to be created with
:func:`~species.read.read_isochrone.ReadIsochrone.get_color_magnitude`.
These boxes contain synthetic colors and magnitudes for a
given age and a range of masses.
objects : list(tuple(str, str, str, str)),
list(tuple(str, str, str, str, dict, dict)), None
Tuple with individual objects. The objects require a tuple with
their database tag, the two filter names for the color, and the
filter name for the absolute magnitude. Optionally, a
dictionary with keyword arguments can be provided for the
object's marker and label, respectively. For example,
``{'marker': 'o', 'ms': 10}`` for the marker and
``{'ha': 'left', 'va': 'bottom', 'xytext': (5, 5)})`` for the
label. The parameter is not used if set to ``None``.
mass_labels : dict(str, list(tuple(float, str))), None
Plot labels with masses next to the isochrone data.
The argument is a dictionary. The keys are the isochrone tags
and the values are lists of tuples. Each tuple contains the
mass in :math:`M_\\mathrm{J}` and the position of the label
('left' or 'right), for
example ``{'sonora+0.5': [(10., 'left'), (20., 'right')]}``.
No labels will be shown if the argument is set to ``None`` or
if an isochrone tag is not included in the dictionary. The
tags are stored as the ``iso_tag`` attribute of each
:class:`~species.core.box.ColorColorBox`.
teff_labels : list(float), list(tuple(float, str)), None
Plot labels with temperatures (K) next to the synthetic Planck
photometry. Alternatively, a list of tuples can be provided
with the planet mass and position of the label ('left' or
'right), for example ``[(1000., 'left'), (1200., 'right')]``.
No labels are shown if set to ``None``.
companion_labels : bool
Plot labels with the names of the directly imaged companions.
accretion : bool
Plot accreting, directly imaged objects with a different symbol
than the regular, directly imaged objects. The object names
from ``objects`` will be compared with the data from
:func:`~species.data.companions.get_data` to check if a
companion is accreting or not.
reddening : list(tuple(tuple(str, str), tuple(str, float),
str, float, tuple(float, float))), None
Include reddening arrows by providing a list with tuples. Each
tuple contains the filter names for the color, the filter name
and value of the magnitude, the mean particle radius (um), and
the start position (color, mag) of the arrow in the plot, so
``((filter_color_1, filter_color_2), (filter_mag, mag_value),
composition, radius, (x_pos, y_pos))``. The composition can be
either ``'Fe'`` or ``'MgSiO3'`` (both with crystalline
structure). A log-normal size distribution is used with the
specified mean radius and the geometric standard deviation is
fixed to 2. Both ``xlim`` and ``ylim`` need to be set for the
correct rotation of the reddening label. The parameter is not
used if set to ``None``.
ism_red : list(tuple(tuple(str, str), str, float,
tuple(float, float))), None
List with reddening arrows for ISM extinction. Each item in the
list is a tuple that itself contain a tuple with the filter
names for the color, the filter name of the magnitude, the
visual extinction, and the start position (color, mag) of the
arrow in the plot, so ``((filter_color_1, filter_color_2),
filter_mag, A_V, (x_pos, y_pos))``. The parameter is not used
if the argument is set to ``None``.
field_range : tuple(str, str), None
Range of the discrete colorbar for the field dwarfs. The tuple
should contain the lower and upper value ('early M', 'late M',
'early L', 'late L', 'early T', 'late T', 'early Y). The full
range is used if set to ``None``.
label_x : str
Label for the x-axis.
label_y : str
Label for the y-axis.
xlim : tuple(float, float), None
Limits for the x-axis. Not used if set to None.
ylim : tuple(float, float), None
Limits for the y-axis. Not used if set to None.
offset : tuple(float, float), None
Offset of the x- and y-axis label.
legend : str, tuple(float, float), dict, None
Legend position or keyword arguments. No legend
is shown if set to ``None``.
figsize : tuple(float, float)
Figure size.
output : str
Output filename for the plot. The plot is shown in an
interface window if the argument is set to ``None``.
Returns
-------
NoneType
None
"""
mpl.rcParams["font.serif"] = ["Bitstream Vera Serif"]
mpl.rcParams["font.family"] = "serif"
plt.rc("axes", edgecolor="black", linewidth=2.2)
# model_color = ("#234398", "#f6a432", "black")
model_color = ("tab:blue", "tab:orange", "tab:green",
"tab:red", "tab:purple", "tab:brown",
"tab:pink", "tab:olive", "tab:cyan")
model_linestyle = ("-", "--", ":", "-.")
isochrones = []
planck = []
models = []
empirical = []
for item in boxes:
if isinstance(item, box.IsochroneBox):
isochrones.append(item)
elif isinstance(item, box.ColorMagBox):
if item.object_type == "model":
models.append(item)
elif item.library == "planck":
planck.append(item)
else:
empirical.append(item)
else:
raise ValueError(
f"Found a {type(item)} while only ColorMagBox and IsochroneBox "
f"objects can be provided to 'boxes'."
)
if empirical:
plt.figure(1, figsize=figsize)
gridsp = mpl.gridspec.GridSpec(3, 1, height_ratios=[0.2, 0.1, 4.5])
gridsp.update(wspace=0.0, hspace=0.0, left=0, right=1, bottom=0, top=1)
ax1 = plt.subplot(gridsp[2, 0])
ax2 = plt.subplot(gridsp[0, 0])
else:
plt.figure(1, figsize=figsize)
gridsp = mpl.gridspec.GridSpec(1, 1)
gridsp.update(wspace=0.0, hspace=0.0, left=0, right=1, bottom=0, top=1)
ax1 = plt.subplot(gridsp[0, 0])
ax1.tick_params(
axis="both",
which="major",
colors="black",
labelcolor="black",
direction="in",
width=1,
length=5,
labelsize=12,
top=True,
bottom=True,
left=True,
right=True,
)
ax1.tick_params(
axis="both",
which="minor",
colors="black",
labelcolor="black",
direction="in",
width=1,
length=3,
labelsize=12,
top=True,
bottom=True,
left=True,
right=True,
)
ax1.xaxis.set_major_locator(MultipleLocator(1.0))
ax1.yaxis.set_major_locator(MultipleLocator(1.0))
ax1.xaxis.set_minor_locator(MultipleLocator(0.2))
ax1.yaxis.set_minor_locator(MultipleLocator(0.2))
ax1.set_xlabel(label_x, fontsize=14)
ax1.set_ylabel(label_y, fontsize=14)
ax1.invert_yaxis()
if offset is not None:
ax1.get_xaxis().set_label_coords(0.5, offset[0])
ax1.get_yaxis().set_label_coords(offset[1], 0.5)
else:
ax1.get_xaxis().set_label_coords(0.5, -0.08)
ax1.get_yaxis().set_label_coords(-0.12, 0.5)
if xlim is not None:
ax1.set_xlim(xlim[0], xlim[1])
if ylim is not None:
ax1.set_ylim(ylim[0], ylim[1])
if models is not None:
count = 0
model_dict = {}
for j, item in enumerate(models):
if item.library == "sonora-bobcat":
model_key = item.library + item.iso_tag[-4:]
else:
model_key = item.library
if model_key not in model_dict:
model_dict[model_key] = [count, 0]
count += 1
else:
model_dict[model_key] = [
model_dict[model_key][0],
model_dict[model_key][1] + 1,
]
model_count = model_dict[model_key]
if model_count[1] == 0:
label = plot_util.model_name(item.library)
if item.library == "sonora-bobcat":
metal = float(item.iso_tag[-4:])
label += f", [M/H] = {metal}"
if item.library == "zhu2015":
ax1.plot(
item.color,
item.magnitude,
marker="x",
ms=5,
linestyle=model_linestyle[model_count[1]],
linewidth=0.6,
color="gray",
label=label,
zorder=0,
)
xlim = ax1.get_xlim()
ylim = ax1.get_ylim()
for i, teff_item in enumerate(item.sptype):
teff_label = (
rf"{teff_item:.0e} $M_\mathregular{{Jup}}^{2}$ yr$^{{-1}}$"
)
if item.magnitude[i] > ylim[1]:
ax1.annotate(
teff_label,
(item.color[i], item.magnitude[i]),
color="gray",
fontsize=8,
ha="left",
va="center",
xytext=(item.color[i] + 0.1, item.magnitude[i] + 0.05),
zorder=3,
)
else:
ax1.plot(
item.color,
item.magnitude,
linestyle=model_linestyle[model_count[1]],
lw=1.0,
color=model_color[model_count[0]],
label=label,
zorder=0,
)
if mass_labels is not None:
interp_magnitude = interp1d(item.sptype, item.magnitude)
interp_color = interp1d(item.sptype, item.color)
if item.iso_tag in mass_labels:
m_select = mass_labels[item.iso_tag]
else:
m_select = []
for i, mass_item in enumerate(m_select):
if isinstance(mass_item, tuple):
mass_val = mass_item[0]
mass_pos = mass_item[1]
else:
mass_val = mass_item
mass_pos = "right"
if j == 0 or (j > 0 and mass_val < 20.0):
pos_color = interp_color(mass_val)
pos_mag = interp_magnitude(mass_val)
# if j == 1 and mass_val == 10.:
# mass_ha = "center"
# mass_xytext = (pos_color, pos_mag-0.2)
if mass_pos == "left":
mass_ha = "right"
mass_xytext = (pos_color - 0.05, pos_mag)
else:
mass_ha = "left"
mass_xytext = (pos_color + 0.05, pos_mag)
mass_label = (
str(int(mass_val)) + r" M$_\mathregular{J}$"
)
xlim = ax1.get_xlim()
ylim = ax1.get_ylim()
if (
xlim[0] + 0.2 < pos_color < xlim[1] - 0.2
and ylim[1] + 0.2 < pos_mag < ylim[0] - 0.2
):
ax1.scatter(
pos_color,
pos_mag,
c=model_color[model_count[0]],
s=15,
edgecolor="none",
zorder=0,
)
ax1.annotate(
mass_label,
(pos_color, pos_mag),
color=model_color[model_count[0]],
fontsize=9,
xytext=mass_xytext,
zorder=3,
ha=mass_ha,
va="center",
)
else:
ax1.plot(
item.color,
item.magnitude,
linestyle=model_linestyle[model_count[1]],
linewidth=0.6,
color=model_color[model_count[0]],
zorder=0,
)
if planck is not None:
planck_count = 0
for j, item in enumerate(planck):
if planck_count == 0:
label = plot_util.model_name(item.library)
else:
label = None
ax1.plot(
item.color,
item.magnitude,
linestyle="--",
linewidth=0.8,
color="gray",
label=label,
zorder=0,
)
if teff_labels is not None and planck_count == 0:
interp_magnitude = interp1d(item.sptype, item.magnitude)
interp_color = interp1d(item.sptype, item.color)
for i, teff_item in enumerate(teff_labels):
if isinstance(teff_item, tuple):
teff_val = teff_item[0]
teff_pos = teff_item[1]
else:
teff_val = teff_item
teff_pos = "right"
if j == 0 or (j > 0 and teff_val < 20.0):
pos_color = interp_color(teff_val)
pos_mag = interp_magnitude(teff_val)
if teff_pos == "left":
teff_ha = "right"
teff_xytext = (pos_color - 0.05, pos_mag)
else:
teff_ha = "left"
teff_xytext = (pos_color + 0.05, pos_mag)
teff_label = f"{int(teff_val)} K"
xlim = ax1.get_xlim()
ylim = ax1.get_ylim()
if (
xlim[0] + 0.2 < pos_color < xlim[1] - 0.2
and ylim[1] + 0.2 < pos_mag < ylim[0] - 0.2
):
ax1.scatter(
pos_color, pos_mag, c="gray", s=15, ec="none", zorder=0
)
if planck_count == 0:
ax1.annotate(
teff_label,
(pos_color, pos_mag),
color="gray",
fontsize=9,
xytext=teff_xytext,
zorder=3,
ha=teff_ha,
va="center",
)
planck_count += 1
if empirical:
cmap = plt.cm.viridis
bounds, ticks, ticklabels = plot_util.field_bounds_ticks(field_range)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
for item in empirical:
sptype = item.sptype
color = item.color
magnitude = item.magnitude
names = item.names
if isinstance(sptype, list):
sptype = np.array(sptype)
if item.object_type in ["field", None]:
indices = np.where(sptype != "None")[0]
sptype = sptype[indices]
color = color[indices]
magnitude = magnitude[indices]
spt_disc = plot_util.sptype_substellar(sptype, color.shape)
_, unique = np.unique(color, return_index=True)
sptype = sptype[unique]
color = color[unique]
magnitude = magnitude[unique]
spt_disc = spt_disc[unique]
scat = ax1.scatter(
color,
magnitude,
c=spt_disc,
cmap=cmap,
norm=norm,
s=50,
alpha=0.7,
edgecolor="none",
zorder=2,
)
cb = Colorbar(
ax=ax2,
mappable=scat,
orientation="horizontal",
ticklocation="top",
format="%.2f",
)
cb.ax.tick_params(
width=1, length=5, labelsize=10, direction="in", color="black"
)
cb.set_ticks(ticks)
cb.set_ticklabels(ticklabels)
elif item.object_type == "young":
if objects is not None:
object_names = []
for obj_item in objects:
object_names.append(obj_item[0])
indices = plot_util.remove_color_duplicates(object_names, names)
color = color[indices]
magnitude = magnitude[indices]
ax1.plot(
color,
magnitude,
marker="s",
ms=4,
linestyle="none",
alpha=0.7,
color="gray",
markeredgecolor="black",
label="Young/low-gravity",
zorder=2,
)
# for item in names[indices]:
#
# if item == '2MASSWJ2244316+204343':
# item = '2MASS 2244+2043'
#
# kwargs = {'ha': 'left', 'va': 'center', 'fontsize': 8.5,
# 'xytext': (5., 0.), 'color': 'black'}
#
# ax1.annotate(item, (color, magnitude), zorder=3,
# textcoords='offset points', **kwargs)
if isochrones:
for item in isochrones:
ax1.plot(
item.color, item.magnitude, linestyle="-", linewidth=1.0, color="black"
)
if reddening is not None:
for item in reddening:
ext_1, ext_2 = dust_util.calc_reddening(
item[0],
item[1],
composition=item[2],
structure="crystalline",
radius_g=item[3],
)
delta_x = ext_1 - ext_2
delta_y = item[1][1]
x_pos = item[4][0] + delta_x
y_pos = item[4][1] + delta_y
ax1.annotate(
"",
(x_pos, y_pos),
xytext=(item[4][0], item[4][1]),
fontsize=8,
arrowprops={"arrowstyle": "->"},
color="black",
zorder=3.0,
)
x_pos_text = item[4][0] + delta_x / 2.0
y_pos_text = item[4][1] + delta_y / 2.0
vector_len = np.sqrt(delta_x ** 2 + delta_y ** 2)
if item[2] == "MgSiO3":
dust_species = r"MgSiO$_{3}$"
elif item[2] == "Fe":
dust_species = "Fe"
if (item[3]).is_integer():
red_label = f"{dust_species} ({item[3]:.0f} µm)"
else:
red_label = f"{dust_species} ({item[3]:.1f} µm)"
text = ax1.annotate(
red_label,
(x_pos_text, y_pos_text),
xytext=(7.0 * delta_y / vector_len, 7.0 * delta_x / vector_len),
textcoords="offset points",
fontsize=8.0,
color="black",
ha="center",
va="center",
)
ax1.plot([item[4][0], x_pos], [item[4][1], y_pos], "-", color="white")
sp1 = ax1.transData.transform_point((item[4][0], item[4][1]))
sp2 = ax1.transData.transform_point((x_pos, y_pos))
angle = np.degrees(np.arctan2(sp2[1] - sp1[1], sp2[0] - sp1[0]))
text.set_rotation(angle)
if ism_red is not None:
for item in ism_red:
# Color filters
read_filt_0 = read_filter.ReadFilter(item[0][0])
read_filt_1 = read_filter.ReadFilter(item[0][1])
# Magnitude filter
read_filt_2 = read_filter.ReadFilter(item[1])
mean_wavel = np.array(
[
read_filt_0.mean_wavelength(),
read_filt_1.mean_wavelength(),
read_filt_2.mean_wavelength(),
]
)
ext_mag = dust_util.ism_extinction(item[2], 3.1, mean_wavel)
delta_x = ext_mag[0] - ext_mag[1]
delta_y = ext_mag[2]
x_pos = item[3][0] + delta_x
y_pos = item[3][1] + delta_y
ax1.annotate(
"",
(x_pos, y_pos),
xytext=(item[3][0], item[3][1]),
fontsize=8,
arrowprops={"arrowstyle": "->"},
color="black",
zorder=3.0,
)
x_pos_text = item[3][0] + delta_x / 2.0
y_pos_text = item[3][1] + delta_y / 2.0
vector_len = np.sqrt(delta_x ** 2 + delta_y ** 2)
if (item[2]).is_integer():
red_label = fr"A$_\mathregular{{V}}$ = {item[2]:.0f}"
else:
red_label = fr"A$_\mathregular{{V}}$ = {item[2]:.1f}"
text = ax1.annotate(
red_label,
(x_pos_text, y_pos_text),
xytext=(8.0 * delta_y / vector_len, 8.0 * delta_x / vector_len),
textcoords="offset points",
fontsize=8.0,
color="black",
ha="center",
va="center",
)
ax1.plot([item[3][0], x_pos], [item[3][1], y_pos], "-", color="white")
sp1 = ax1.transData.transform_point((item[3][0], item[3][1]))
sp2 = ax1.transData.transform_point((x_pos, y_pos))
angle = np.degrees(np.arctan2(sp2[1] - sp1[1], sp2[0] - sp1[0]))
text.set_rotation(angle)
if objects is not None:
for i, item in enumerate(objects):
objdata = read_object.ReadObject(item[0])
objcolor1 = objdata.get_photometry(item[1])
objcolor2 = objdata.get_photometry(item[2])
if objcolor1.ndim == 2:
print(
f"Found {objcolor1.shape[1]} values for filter {item[1]} of {item[0]}"
)
print(
f"so using the first value: {objcolor1[0, 0]} +/- {objcolor1[1, 0]} mag"
)
objcolor1 = objcolor1[:, 0]
if objcolor2.ndim == 2:
print(
f"Found {objcolor2.shape[1]} values for filter {item[2]} of {item[0]}"
)
print(
f"so using the first value: {objcolor2[0, 0]} +/- {objcolor2[1, 0]} mag"
)
objcolor2 = objcolor2[:, 0]
abs_mag, abs_err = objdata.get_absmag(item[3])
if isinstance(abs_mag, np.ndarray):
abs_mag = abs_mag[0]
abs_err = abs_err[0]
colorerr = np.sqrt(objcolor1[1] ** 2 + objcolor2[1] ** 2)
x_color = objcolor1[0] - objcolor2[0]
companion_data = companions.get_data()
if len(item) > 4 and item[4] is not None:
kwargs = item[4]
else:
kwargs = {
"marker": ">",
"ms": 6.0,
"color": "black",
"mfc": "white",
"mec": "black",
"label": "Direct imaging",
}
if (
accretion
and item[0] in companion_data
and companion_data[item[0]]["accretion"]
):
kwargs["marker"] = "X"
kwargs["ms"] = 7.0
kwargs["label"] = "Accreting"
ax1.errorbar(
x_color, abs_mag, yerr=abs_err, xerr=colorerr, zorder=3, **kwargs
)
if companion_labels:
if len(item) > 4 and item[5] is not None:
kwargs = item[5]
else:
kwargs = {
"ha": "left",
"va": "bottom",
"fontsize": 8.5,
"xytext": (5.0, 5.0),
"color": "black",
}
ax1.annotate(
objdata.object_name,
(x_color, abs_mag),
zorder=3,
textcoords="offset points",
**kwargs,
)
if output is None:
print("Plotting color-magnitude diagram...", end="", flush=True)
else:
print(f"Plotting color-magnitude diagram: {output}...", end="", flush=True)
if legend is not None:
handles, labels = ax1.get_legend_handles_labels()
# Prevent duplicates
by_label = dict(zip(labels, handles))
if handles:
ax1.legend(
by_label.values(),
by_label.keys(),
loc=legend,
fontsize=8.5,
frameon=False,
numpoints=1,
)
print(" [DONE]")
if output is None:
plt.show()
else:
plt.savefig(output, bbox_inches="tight")
plt.clf()
plt.close()
@typechecked
def plot_color_color(
boxes: list,
objects: Optional[
Union[
List[Tuple[str, Tuple[str, str], Tuple[str, str]]],
List[
Tuple[
str,
Tuple[str, str],
Tuple[str, str],
Optional[dict],
Optional[dict],
]
],
]
] = None,
mass_labels: Optional[Union[List[float],
List[Tuple[float, str]],
Dict[str, List[Tuple[float, str]]]]] = None,
teff_labels: Optional[Union[List[float], List[Tuple[float, str]]]] = None,
companion_labels: bool = False,
reddening: Optional[
List[
Tuple[
Tuple[str, str],
Tuple[str, str],
Tuple[str, float],
str,
float,
Tuple[float, float],
]
]
] = None,
field_range: Optional[Tuple[str, str]] = None,
label_x: str = "Color",
label_y: str = "Color",
xlim: Optional[Tuple[float, float]] = None,
ylim: Optional[Tuple[float, float]] = None,
offset: Optional[Tuple[float, float]] = None,
legend: Optional[Union[str, dict, Tuple[float, float]]] = "upper left",
figsize: Optional[Tuple[float, float]] = (4.0, 4.3),
output: Optional[str] = "color-color.pdf",
) -> None:
"""
Function for creating a color-color diagram.
Parameters
----------
boxes : list(species.core.box.ColorColorBox, species.core.box.IsochroneBox)
Boxes with the color-color from photometric libraries,
spectral libraries, isochrones, and/or atmospheric models.
objects : tuple(tuple(str, tuple(str, str), tuple(str, str))),
tuple(tuple(str, tuple(str, str), tuple(str, str), dict, dict)), None
Tuple with individual objects. The objects require a tuple
with their database tag, the two filter names for the first
color, and the two filter names for the second color.
Optionally, a dictionary with keyword arguments can be provided
for the object's marker and label, respectively. For
example, ``{'marker': 'o', 'ms': 10}`` for the marker
and ``{'ha': 'left', 'va': 'bottom', 'xytext': (5, 5)})``
for the label. The parameter is not used if set to ``None``.
mass_labels : dict(str, list(tuple(float, str))), None
Plot labels with masses next to the isochrone data.
The argument is a dictionary. The keys are the isochrone tags
and the values are lists of tuples. Each tuple contains the
mass in :math:`M_\\mathrm{J}` and the position of the label
('left' or 'right), for
example ``{'sonora+0.5': [(10., 'left'), (20., 'right')]}``.
No labels will be shown if the argument is set to ``None`` or
if an isochrone tag is not included in the dictionary. The
tags are stored as the ``iso_tag`` attribute of each
:class:`~species.core.box.ColorColorBox`.
teff_labels : list(float), list(tuple(float, str)), None
Plot labels with temperatures (K) next to the synthetic Planck
photometry. Alternatively, a list of tuples can be provided
with the planet mass and position of the label ('left' or
'right), for example ``[(1000., 'left'), (1200., 'right')]``.
No labels are shown if the argument is set to ``None``.
companion_labels : bool
Plot labels with the names of the directly imaged companions.
reddening : list(tuple(tuple(str, str), tuple(str, str),
tuple(str, float), str, float, tuple(float, float)), None
Include reddening arrows by providing a list with tuples.
Each tuple contains the filter names for the color, the filter
name for the magnitude, the particle radius (um), and the start
position (color, mag) of the arrow in the plot, so
(filter_color_1, filter_color_2, filter_mag, composition,
radius, (x_pos, y_pos)). The composition can be either 'Fe' or
'MgSiO3' (both with crystalline structure). The parameter is
not used if set to ``None``.
field_range : tuple(str, str), None
Range of the discrete colorbar for the field dwarfs. The tuple
should contain the lower and upper value ('early M', 'late M',
'early L', 'late L', 'early T', 'late T', 'early Y).
The full range is used if the argument is set to ``None``.
label_x : str
Label for the x-axis.
label_y : str
Label for the y-axis.
xlim : tuple(float, float)
Limits for the x-axis.
ylim : tuple(float, float)
Limits for the y-axis.
offset : tuple(float, float), None
Offset of the x- and y-axis label.
legend : str, tuple(float, float), dict, None
Legend position or dictionary with keyword arguments.
No legend is shown if the argument is set to ``None``.
figsize : tuple(float, float)
Figure size.
output : str
Output filename for the plot. The plot is shown in an
interface window if the argument is set to ``None``.
Returns
-------
NoneType
None
"""
mpl.rcParams["font.serif"] = ["Bitstream Vera Serif"]
mpl.rcParams["font.family"] = "serif"
plt.rc("axes", edgecolor="black", linewidth=2.2)
# model_color = ("#234398", "#f6a432", "black")
model_color = ("tab:blue", "tab:orange", "tab:green",
"tab:red", "tab:purple", "tab:brown",
"tab:pink", "tab:olive", "tab:cyan")
model_linestyle = ("-", "--", ":", "-.")
isochrones = []
planck = []
models = []
empirical = []
for item in boxes:
if isinstance(item, box.IsochroneBox):
isochrones.append(item)
elif isinstance(item, box.ColorColorBox):
if item.object_type == "model":
models.append(item)
elif item.library == "planck":
planck.append(item)
else:
empirical.append(item)
else:
raise ValueError(
f"Found a {type(item)} while only ColorColorBox and "
f"IsochroneBox objects can be provided to 'boxes'."
)
plt.figure(1, figsize=figsize)
if empirical:
gridsp = mpl.gridspec.GridSpec(3, 1, height_ratios=[0.2, 0.1, 4.0])
else:
gridsp = mpl.gridspec.GridSpec(1, 1)
gridsp.update(wspace=0.0, hspace=0.0, left=0, right=1, bottom=0, top=1)
if empirical:
ax1 = plt.subplot(gridsp[2, 0])
ax2 = plt.subplot(gridsp[0, 0])
else:
ax1 = plt.subplot(gridsp[0, 0])
ax2 = None
ax1.tick_params(
axis="both",
which="major",
colors="black",
labelcolor="black",
direction="in",
width=1,
length=5,
labelsize=12,
top=True,
bottom=True,
left=True,
right=True,
)
ax1.tick_params(
axis="both",
which="minor",
colors="black",
labelcolor="black",
direction="in",
width=1,
length=3,
labelsize=12,
top=True,
bottom=True,
left=True,
right=True,
)
ax1.xaxis.set_major_locator(MultipleLocator(0.5))
ax1.yaxis.set_major_locator(MultipleLocator(0.5))
ax1.xaxis.set_minor_locator(MultipleLocator(0.1))
ax1.yaxis.set_minor_locator(MultipleLocator(0.1))
ax1.set_xlabel(label_x, fontsize=14)
ax1.set_ylabel(label_y, fontsize=14)
ax1.invert_yaxis()
if offset:
ax1.get_xaxis().set_label_coords(0.5, offset[0])
ax1.get_yaxis().set_label_coords(offset[1], 0.5)
else:
ax1.get_xaxis().set_label_coords(0.5, -0.08)
ax1.get_yaxis().set_label_coords(-0.12, 0.5)
if xlim:
ax1.set_xlim(xlim[0], xlim[1])
if ylim:
ax1.set_ylim(ylim[0], ylim[1])
if models is not None:
count = 0
model_dict = {}
for j, item in enumerate(models):
if item.library == "sonora-bobcat":
model_key = item.library + item.iso_tag[-4:]
else:
model_key = item.library
if model_key not in model_dict:
model_dict[model_key] = [count, 0]
count += 1
else:
model_dict[model_key] = [
model_dict[model_key][0],
model_dict[model_key][1] + 1,
]
model_count = model_dict[model_key]
if model_count[1] == 0:
label = plot_util.model_name(item.library)
if item.library == "sonora-bobcat":
metal = float(item.iso_tag[-4:])
label += f", [M/H] = {metal}"
if item.library == "zhu2015":
ax1.plot(
item.color1,
item.color2,
marker="x",
ms=5,
linestyle=model_linestyle[model_count[1]],
linewidth=0.6,
color="gray",
label=label,
zorder=0,
)
xlim = ax1.get_xlim()
ylim = ax1.get_ylim()
for i, teff_item in enumerate(item.sptype):
teff_label = (
rf"{teff_item:.0e} $M_\mathregular{{Jup}}^{2}$ yr$^{{-1}}$"
)
if item.color2[i] < ylim[1]:
ax1.annotate(
teff_label,
(item.color1[i], item.color2[i]),
color="gray",
fontsize=8,
ha="left",
va="center",
xytext=(item.color1[i] + 0.1, item.color2[i] - 0.05),
zorder=3,
)
else:
ax1.plot(
item.color1,
item.color2,
linestyle=model_linestyle[model_count[1]],
lw=1.0,
color=model_color[model_count[0]],
label=label,
zorder=0,
)
if mass_labels is not None:
interp_color1 = interp1d(item.sptype, item.color1)
interp_color2 = interp1d(item.sptype, item.color2)
if item.iso_tag in mass_labels:
m_select = mass_labels[item.iso_tag]
else:
m_select = []
for i, mass_item in enumerate(m_select):
mass_val = mass_item[0]
mass_pos = mass_item[1]
pos_color1 = interp_color1(mass_val)
pos_color2 = interp_color2(mass_val)
if mass_pos == "left":
mass_ha = "right"
mass_xytext = (pos_color1 - 0.05, pos_color2)
else:
mass_ha = "left"
mass_xytext = (pos_color1 + 0.05, pos_color2)
mass_label = str(int(mass_val)) \
+ r" M$_\mathregular{J}$"
xlim = ax1.get_xlim()
ylim = ax1.get_ylim()
if (xlim[0] + 0.2 < pos_color1 < xlim[1] - 0.2
and ylim[0] + 0.2 < pos_color2 < ylim[1] - 0.2):
ax1.scatter(
pos_color1,
pos_color2,
c=model_color[model_count[0]],
s=15,
edgecolor="none",
zorder=0,
)
ax1.annotate(
mass_label,
(pos_color1, pos_color2),
color=model_color[model_count[0]],
fontsize=9,
xytext=mass_xytext,
ha=mass_ha,
va="center",
zorder=3,
)
else:
warnings.warn(
f"Please use larger axes limits "
f"to include the mass label for "
f"{mass_val} Mjup.")
else:
ax1.plot(
item.color1,
item.color2,
linestyle=model_linestyle[model_count[1]],
linewidth=0.6,
color=model_color[model_count[0]],
label=None,
zorder=0,
)
if planck is not None:
planck_count = 0
for j, item in enumerate(planck):
if planck_count == 0:
label = plot_util.model_name(item.library)
ax1.plot(
item.color1,
item.color2,
ls="--",
linewidth=0.8,
color="gray",
label=label,
zorder=0,
)
if teff_labels is not None:
interp_color1 = interp1d(item.sptype, item.color1)
interp_color2 = interp1d(item.sptype, item.color2)
for i, teff_item in enumerate(teff_labels):
if isinstance(teff_item, tuple):
teff_val = teff_item[0]
teff_pos = teff_item[1]
else:
teff_val = teff_item
teff_pos = "right"
if j == 0 or (j > 0 and teff_val < 20.0):
pos_color1 = interp_color1(teff_val)
pos_color2 = interp_color2(teff_val)
if teff_pos == "left":
teff_ha = "right"
teff_xytext = (pos_color1 - 0.05, pos_color2)
else:
teff_ha = "left"
teff_xytext = (pos_color1 + 0.05, pos_color2)
teff_label = f"{int(teff_val)} K"
xlim = ax1.get_xlim()
ylim = ax1.get_ylim()
if (
xlim[0] + 0.2 < pos_color1 < xlim[1] - 0.2
and ylim[0] + 0.2 < pos_color2 < ylim[1] - 0.2
):
ax1.scatter(
pos_color1,
pos_color2,
c="gray",
s=15,
edgecolor="none",
zorder=0,
)
ax1.annotate(
teff_label,
(pos_color1, pos_color2),
color="gray",
fontsize=9,
xytext=teff_xytext,
zorder=3,
ha=teff_ha,
va="center",
)
else:
ax1.plot(
item.color1, item.color2, ls="--", lw=0.5, color="gray", zorder=0
)
planck_count += 1
if empirical:
cmap = plt.cm.viridis
bounds, ticks, ticklabels = plot_util.field_bounds_ticks(field_range)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
for item in empirical:
sptype = item.sptype
names = item.names
color1 = item.color1
color2 = item.color2
if isinstance(sptype, list):
sptype = np.array(sptype)
if item.object_type in ["field", None]:
indices = np.where(sptype != "None")[0]
sptype = sptype[indices]
color1 = color1[indices]
color2 = color2[indices]
spt_disc = plot_util.sptype_substellar(sptype, color1.shape)
_, unique = np.unique(color1, return_index=True)
sptype = sptype[unique]
color1 = color1[unique]
color2 = color2[unique]
spt_disc = spt_disc[unique]
scat = ax1.scatter(
color1,
color2,
c=spt_disc,
cmap=cmap,
norm=norm,
s=50,
alpha=0.7,
edgecolor="none",
zorder=2,
)
cb = Colorbar(
ax=ax2,
mappable=scat,
orientation="horizontal",
ticklocation="top",
format="%.2f",
)
cb.ax.tick_params(
width=1, length=5, labelsize=10, direction="in", color="black"
)
cb.set_ticks(ticks)
cb.set_ticklabels(ticklabels)
elif item.object_type == "young":
if objects is not None:
object_names = []
for obj_item in objects:
object_names.append(obj_item[0])
indices = plot_util.remove_color_duplicates(object_names, names)
color1 = color1[indices]
color2 = color2[indices]
ax1.plot(
color1,
color2,
marker="s",
ms=4,
linestyle="none",
alpha=0.7,
color="gray",
markeredgecolor="black",
label="Young/low-gravity",
zorder=2,
)
if isochrones:
for item in isochrones:
ax1.plot(
item.colors[0],
item.colors[1],
linestyle="-",
linewidth=1.0,
color="black",
)
if reddening is not None:
for item in reddening:
ext_1, ext_2 = dust_util.calc_reddening(
item[0],
item[2],
composition=item[3],
structure="crystalline",
radius_g=item[4],
)
ext_3, ext_4 = dust_util.calc_reddening(
item[1],
item[2],
composition=item[3],
structure="crystalline",
radius_g=item[4],
)
delta_x = ext_1 - ext_2
delta_y = ext_3 - ext_4
x_pos = item[5][0] + delta_x
y_pos = item[5][1] + delta_y
ax1.annotate(
"",
(x_pos, y_pos),
xytext=(item[5][0], item[5][1]),
fontsize=8,
arrowprops={"arrowstyle": "->"},
color="black",
zorder=3.0,
)
x_pos_text = item[5][0] + delta_x / 2.0
y_pos_text = item[5][1] + delta_y / 2.0
vector_len = np.sqrt(delta_x ** 2 + delta_y ** 2)
if item[3] == "MgSiO3":
dust_species = r"MgSiO$_{3}$"
elif item[3] == "Fe":
dust_species = "Fe"
if item[4].is_integer():
red_label = f"{dust_species} ({item[4]:.0f} µm)"
else:
red_label = f"{dust_species} ({item[4]:.1f} µm)"
text = ax1.annotate(
red_label,
(x_pos_text, y_pos_text),
xytext=(-7.0 * delta_y / vector_len, 7.0 * delta_x / vector_len),
textcoords="offset points",
fontsize=8.0,
color="black",
ha="center",
va="center",
)
ax1.plot([item[5][0], x_pos], [item[5][1], y_pos], "-", color="white")
sp1 = ax1.transData.transform_point((item[5][0], item[5][1]))
sp2 = ax1.transData.transform_point((x_pos, y_pos))
angle = np.degrees(np.arctan2(sp2[1] - sp1[1], sp2[0] - sp1[0]))
text.set_rotation(angle)
if objects is not None:
for i, item in enumerate(objects):
objdata = read_object.ReadObject(item[0])
objphot1 = objdata.get_photometry(item[1][0])
objphot2 = objdata.get_photometry(item[1][1])
objphot3 = objdata.get_photometry(item[2][0])
objphot4 = objdata.get_photometry(item[2][1])
if objphot1.ndim == 2:
print(f"Found {objphot1.shape[1]} values for "
f"filter {item[1][0]} of {item[0]} "
f"so using the first magnitude: "
f"{objphot1[0, 0]} +/- {objphot1[1, 0]}")
objphot1 = objphot1[:, 0]
if objphot2.ndim == 2:
print(f"Found {objphot2.shape[1]} values for "
f"filter {item[1][1]} of {item[0]} "
f"so using the first magnitude: "
f"{objphot2[0, 0]} +/- {objphot2[1, 0]}")
objphot2 = objphot2[:, 0]
if objphot3.ndim == 2:
print(f"Found {objphot3.shape[1]} values for "
f"filter {item[2][0]} of {item[0]} "
f"so using the first magnitude: "
f"{objphot3[0, 0]} +/- {objphot3[1, 0]}")
objphot3 = objphot3[:, 0]
if objphot4.ndim == 2:
print(f"Found {objphot4.shape[1]} values for "
f"filter {item[2][1]} of {item[0]} "
f"so using the first magnitude: "
f"{objphot4[0, 0]} +/- {objphot4[1, 0]}")
objphot4 = objphot4[:, 0]
color1 = objphot1[0] - objphot2[0]
color2 = objphot3[0] - objphot4[0]
error1 = np.sqrt(objphot1[1] ** 2 + objphot2[1] ** 2)
error2 = np.sqrt(objphot3[1] ** 2 + objphot4[1] ** 2)
if len(item) > 3 and item[3] is not None:
kwargs = item[3]
else:
kwargs = {
"marker": ">",
"ms": 6.0,
"color": "black",
"mfc": "white",
"mec": "black",
"label": "Direct imaging",
}
ax1.errorbar(color1, color2, xerr=error1, yerr=error2, zorder=3, **kwargs)
if companion_labels:
if len(item) > 3 and item[4] is not None:
kwargs = item[4]
else:
kwargs = {
"ha": "left",
"va": "bottom",
"fontsize": 8.5,
"xytext": (5.0, 5.0),
"color": "black",
}
ax1.annotate(
objdata.object_name,
(color1, color2),
zorder=3,
textcoords="offset points",
**kwargs,
)
if output is None:
print("Plotting color-color diagram...", end="", flush=True)
else:
print(f"Plotting color-color diagram: {output}...", end="", flush=True)
handles, labels = ax1.get_legend_handles_labels()
if legend is not None:
handles, labels = ax1.get_legend_handles_labels()
# Prevent duplicates
by_label = dict(zip(labels, handles))
if handles:
ax1.legend(
by_label.values(),
by_label.keys(),
loc=legend,
fontsize=8.5,
frameon=False,
numpoints=1,
)
print(" [DONE]")
if output is None:
plt.show()
else:
plt.savefig(output, bbox_inches="tight")
plt.clf()
plt.close()
| 34.916452
| 93
| 0.4522
| 5,731
| 54,330
| 4.168557
| 0.083057
| 0.015069
| 0.016325
| 0.012725
| 0.839221
| 0.805944
| 0.773294
| 0.741356
| 0.699707
| 0.667015
| 0
| 0.035872
| 0.44226
| 54,330
| 1,555
| 94
| 34.938907
| 0.752525
| 0.165286
| 0
| 0.713628
| 0
| 0.003683
| 0.068165
| 0.002151
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001842
| false
| 0
| 0.011971
| 0
| 0.013812
| 0.012891
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a07f514ed648ad66813862f973267290e4437b41
| 27
|
py
|
Python
|
django_toolkit/tests/templatetags/__init__.py
|
alexhayes/django-toolkit
|
b64106392fad596defc915b8235fe6e1d0013b5b
|
[
"MIT"
] | 7
|
2015-06-23T07:36:04.000Z
|
2016-12-24T00:42:50.000Z
|
django_toolkit/tests/templatetags/__init__.py
|
alexhayes/django-toolkit
|
b64106392fad596defc915b8235fe6e1d0013b5b
|
[
"MIT"
] | 5
|
2020-02-12T00:49:28.000Z
|
2021-12-13T19:47:48.000Z
|
django_toolkit/tests/templatetags/__init__.py
|
alexhayes/django-toolkit
|
b64106392fad596defc915b8235fe6e1d0013b5b
|
[
"MIT"
] | 4
|
2015-06-23T07:37:40.000Z
|
2021-04-04T03:53:34.000Z
|
from .url_helpers import *
| 13.5
| 26
| 0.777778
| 4
| 27
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a0a00cb2365348e4354416f978d781eb4937a1c6
| 11,972
|
py
|
Python
|
imitation_cl/plot/trajectories.py
|
sayantanauddy/clfd
|
3c8658bf5722429f48b25a34b0fff90736ea0597
|
[
"MIT"
] | 2
|
2022-02-19T09:08:48.000Z
|
2022-03-03T22:38:13.000Z
|
imitation_cl/plot/trajectories.py
|
sayantanauddy/clfd
|
3c8658bf5722429f48b25a34b0fff90736ea0597
|
[
"MIT"
] | null | null | null |
imitation_cl/plot/trajectories.py
|
sayantanauddy/clfd
|
3c8658bf5722429f48b25a34b0fff90736ea0597
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import torch
#TODO remove
def get_quiver_data(t, y_all, ode_rhs, y_hat, x_lim=[-2.0,2.0], y_lim=[-2.0,2.0], L=1):
N_ = 10
min_x,min_y = x_lim[0], y_lim[0]
max_x,max_y = x_lim[1], y_lim[1]
t = t.detach().cpu().numpy()
xs1_,xs2_ = np.meshgrid(np.linspace(min_x, max_x, N_),np.linspace(min_y, max_y, N_))
Z = np.array([xs1_.T.flatten(), xs2_.T.flatten()]).T
Z = torch.from_numpy(Z).float().to(y_all.device)
Z = torch.stack([Z]*L)
F = ode_rhs(None,Z).detach().cpu().numpy()
F /= ((F**2).sum(-1,keepdims=True))**(0.25)
Z = Z.detach().cpu().numpy()
y_all = y_all.detach().cpu().numpy()
y_hat = y_hat.detach().cpu().numpy()
return (F, Z, xs1_, xs2_, N_, t, y_all, y_hat, x_lim, y_lim)
#TODO remove
def plot_ode_simple2(t, y_all, F, Z, xs1_, xs2_, N_, y_hat, L=1, ax=None, fontsize=10, x_lim=[-2.0,2.0], y_lim=[-2.0,2.0]):
"""[summary]
Args:
t (torch.Tensor): Time steps
y_all (torch.Tensor): Demonstrated trajectories
ode_rhs (function): ODE RHS of the NODE
y_hat (torch.Tensor, optional): Predicted trajectories. Defaults to None.
L (int, optional): [description]. Defaults to 1.
return_fig (bool, optional): Whether to return the fig. Defaults to False.
Returns:
[type]: [description]
"""
linewidth = 1.5
markersize = 5.0
alpha = 0.7
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
ax.set_xlabel('State $x_1$',fontsize=fontsize)
ax.set_ylabel('State $x_2$',fontsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
for F_ in F:
h1 = ax.quiver(xs1_, xs2_,
F_[:,0].reshape(N_,N_).T, F_[:,1].reshape(N_,N_).T,
cmap=plt.cm.Blues)
if y_hat is None: # only plotting data
for y_ in y_all:
h2, = ax.plot(y_[0,0],
y_[0,1],
'o',
fillstyle='none',
markersize=markersize,
linewidth=linewidth)
h3, = ax.plot(y_[:,0],
y_[:,1],
'-',
color=h2.get_color(),
linewidth=linewidth)
else: # plotting data and fits, set the color correctly!
# The initial position
h2, = ax.plot(y_all[:,0,0],
y_all[:,0,1],
'o',
color='firebrick',
fillstyle='none',
markersize=markersize,
linewidth=linewidth)
# The demonstrations
num_demos = y_all.shape[0]
for demo_idx in range(num_demos):
h3, = ax.plot(y_all[demo_idx,:,0],
y_all[demo_idx,:,1],
'-',color='firebrick',
alpha=alpha,
linewidth=linewidth)
for y_hat_ in y_hat:
h4, = ax.plot(y_hat_[:,0],
y_hat_[:,1],
'-',
color='royalblue',
alpha=alpha,
linewidth=linewidth)
if y_hat.shape[0]>1:
ax.plot(y_all[0,:,0],
y_all[0,:,1],
'-',
color='firebrick',
alpha=alpha,
linewidth=linewidth)
# Return handles for creating legend
return [h1,h2,h3,h4], ['Vector field','Initial value','Demonstration', 'Prediction']
def plot_ode_simple(t, y_all, ode_rhs, y_hat=None, L=1, ax=None, fontsize=10, explicit_time=0):
"""[summary]
Args:
t (torch.Tensor): Time steps
y_all (torch.Tensor): Demonstrated trajectories
ode_rhs (function): ODE RHS of the NODE
y_hat (torch.Tensor, optional): Predicted trajectories. Defaults to None.
L (int, optional): [description]. Defaults to 1.
return_fig (bool, optional): Whether to return the fig. Defaults to False.
Returns:
[type]: [description]
"""
N_ = 10
linewidth = 1.0
markersize = 5.0
alpha = 0.7
# To show the vector field, we need to evaluate the ODE at
# different starting points
min_x,min_y = y_all.min(dim=0)[0].min(dim=0)[0].detach().cpu().numpy()
max_x,max_y = y_all.max(dim=0)[0].max(dim=0)[0].detach().cpu().numpy()
limit = 3.0
min_x,min_y = -limit, -limit
max_x,max_y = limit, limit
ax.set_xlim([min_x, max_x])
ax.set_ylim([min_y, max_y])
xs1_,xs2_ = np.meshgrid(np.linspace(min_x, max_x, N_),np.linspace(min_y, max_y, N_))
Z = np.array([xs1_.T.flatten(), xs2_.T.flatten()]).T
Z = torch.from_numpy(Z).float().to(y_all.device)
Z = torch.stack([Z]*L)
if explicit_time == 1:
# Use the last time stamp for the vector field
F = ode_rhs(t[-1],Z).detach().cpu().numpy()
elif explicit_time == 0:
F = ode_rhs(None,Z).detach().cpu().numpy()
else:
raise NotImplementedError(f'Invalid value of explicit_time={explicit_time} (only 0 or 1 allowed)')
F /= ((F**2).sum(-1,keepdims=True))**(0.25)
Z = Z.detach().cpu().numpy()
t = t.detach().cpu().numpy()
y_all = y_all.detach().cpu().numpy()
ax.set_xlabel('State $x_1$',fontsize=fontsize)
ax.set_ylabel('State $x_2$',fontsize=fontsize)
ax.tick_params(axis='x', labelsize=fontsize)
ax.tick_params(axis='y', labelsize=fontsize)
for F_ in F:
h1 = ax.quiver(xs1_, xs2_,
F_[:,0].reshape(N_,N_).T, F_[:,1].reshape(N_,N_).T,
cmap=plt.cm.Blues)
if y_hat is None: # only plotting data
for y_ in y_all:
h2, = ax.plot(y_[0,0],
y_[0,1],
'o',
fillstyle='none',
markersize=markersize,
linewidth=linewidth)
h3, = ax.plot(y_[:,0],
y_[:,1],
'-',
color=h2.get_color(),
linewidth=linewidth)
else: # plotting data and fits, set the color correctly!
# The initial position
h2, = ax.plot(y_all[:,0,0],
y_all[:,0,1],
'o',
color='firebrick',
fillstyle='none',
markersize=markersize,
linewidth=linewidth)
# The demonstrations
num_demos = y_all.shape[0]
for demo_idx in range(num_demos):
h3, = ax.plot(y_all[demo_idx,:,0],
y_all[demo_idx,:,1],
'-',color='firebrick',
alpha=alpha,
linewidth=linewidth)
if y_hat is None:
#ax.set_aspect('equal')
# Return handles for creating legend
# Quiver plot legend does not work as expected, the below line fixes this
h1 = ax.scatter([],[],marker=r'$\rightarrow$', label='Vector Field', color='black', s=100)
return [h1,h2,h3], ['Vector field','Initial value','Demonstration']
else:
#ax.set_aspect('equal')
y_hat = y_hat.detach().cpu()
for y_hat_ in y_hat:
h4, = ax.plot(y_hat_[:,0],
y_hat_[:,1],
'-',
color='royalblue',
alpha=alpha,
linewidth=linewidth)
if y_hat.shape[0]>1:
ax.plot(y_all[0,:,0],
y_all[0,:,1],
'-',
color='firebrick',
alpha=alpha,
linewidth=linewidth)
# Return handles for creating legend
# Quiver plot legend does not work as expected, the below line fixes this
h1 = ax.scatter([],[],marker=r'$\leftarrow$', label='Vector Field', color='black', s=100)
return [h1,h2,h3,h4], ['Vector field','Initial value','Demonstration', 'Prediction']
def plot_ode(t, X, ode_rhs, Xhat=None, L=1, return_fig=False):
print(t.shape, X.shape, Xhat.shape)
N_ = 10
min_x,min_y = X.min(dim=0)[0].min(dim=0)[0].detach().cpu().numpy()
max_x,max_y = X.max(dim=0)[0].max(dim=0)[0].detach().cpu().numpy()
xs1_,xs2_ = np.meshgrid(np.linspace(min_x, max_x, N_),np.linspace(min_y, max_y, N_))
Z = np.array([xs1_.T.flatten(), xs2_.T.flatten()]).T
Z = torch.from_numpy(Z).float().to(X.device)
Z = torch.stack([Z]*L)
F = ode_rhs(t[-1],Z).detach().cpu().numpy()
F /= ((F**2).sum(-1,keepdims=True))**(0.25)
Z = Z.detach().cpu().numpy()
t = t.detach().cpu().numpy()
X = X.detach().cpu().numpy()
fig = plt.figure(1,[15,7.5],constrained_layout=True)
gs = fig.add_gridspec(3, 3)
ax1 = fig.add_subplot(gs[:, 0])
ax1.set_xlabel('State $x_1$',fontsize=17)
ax1.set_ylabel('State $x_2$',fontsize=17)
ax1.tick_params(axis='x', labelsize=15)
ax1.tick_params(axis='y', labelsize=15)
for F_ in F:
h1 = ax1.quiver(xs1_, xs2_, F_[:,0].reshape(N_,N_).T, F_[:,1].reshape(N_,N_).T, \
cmap=plt.cm.Blues)
if Xhat is None: # only plotting data
for X_ in X:
h2, = ax1.plot(X_[0,0],X_[0,1],'o', fillstyle='none', \
markersize=11.0, linewidth=2.0)
h3, = ax1.plot(X_[:,0],X_[:,1],'-',color=h2.get_color(),linewidth=3.0)
else: # plotting data and fits, set the color correctly!
h2, = ax1.plot(X[0,0,0],X[0,0,1],'o',color='firebrick', fillstyle='none', \
markersize=11.0, linewidth=2.0)
h3, = ax1.plot(X[0,:,0],X[0,:,1],'-',color='firebrick',linewidth=3.0)
if Xhat is not None and Xhat.ndim==3:
Xhat = Xhat.unsqueeze(0)
if Xhat is None:
plt.legend([h1,h2,h3],['Vector field','Initial value','State trajectory'],
loc='lower right', fontsize=20, bbox_to_anchor=(1.5, 0.05))
else:
Xhat = Xhat.detach().cpu()
for xhat in Xhat:
h4, = ax1.plot(xhat[0,:,0],xhat[0,:,1],'-',color='royalblue',linewidth=3.0)
if Xhat.shape[0]>1:
ax1.plot(X[0,:,0],X[0,:,1],'-',color='firebrick',linewidth=5.0)
plt.legend([h1,h2,h3,h4],['Vector field','Initial value','Data sequence', 'Forward simulation'],
loc='lower right', fontsize=20, bbox_to_anchor=(1.5, 0.05))
ax2 = fig.add_subplot(gs[0, 1:])
if Xhat is None: # only plotting data
for X_ in X:
h4, = ax2.plot(t,X_[:,0],linewidth=3.0)
else: # plotting data and fits, set the color correctly!
h4, = ax2.plot(t,X[0,:,0],color='firebrick',linewidth=3.0)
if Xhat is not None:
for xhat in Xhat:
ax2.plot(t,xhat[0,:,0],color='royalblue',linewidth=3.0)
if Xhat.shape[0]>1:
ax2.plot(t,X[0,:,0],color='firebrick',linewidth=5.0)
ax2.set_xlabel('time',fontsize=17)
ax2.set_ylabel('State $x_1$',fontsize=17)
ax3 = fig.add_subplot(gs[1, 1:])
if Xhat is None: # only plotting data
for X_ in X:
h5, = ax3.plot(t,X_[:,1],linewidth=3.0)
else: # plotting data and fits, set the color correctly!
h5, = ax3.plot(t,X[0,:,1],color='firebrick',linewidth=3.0)
if Xhat is not None:
for xhat in Xhat:
ax3.plot(t,xhat[0,:,1],color='royalblue',linewidth=3.0)
if Xhat.shape[0]>1:
ax3.plot(t,X[0,:,1],color='firebrick',linewidth=5.0)
ax3.set_xlabel('time',fontsize=17)
ax3.set_ylabel('State $x_2$',fontsize=17)
if return_fig:
return fig,ax1,h3,h4,h5
else:
import uuid
filename = str(uuid.uuid4())
#plt.savefig(filename)
| 37.4125
| 123
| 0.52698
| 1,707
| 11,972
| 3.54423
| 0.125366
| 0.019835
| 0.041653
| 0.017355
| 0.817025
| 0.778017
| 0.74876
| 0.721653
| 0.70595
| 0.681818
| 0
| 0.045041
| 0.311978
| 11,972
| 320
| 124
| 37.4125
| 0.68945
| 0.139325
| 0
| 0.689076
| 0
| 0
| 0.069704
| 0.002847
| 0
| 0
| 0
| 0.003125
| 0
| 1
| 0.016807
| false
| 0
| 0.02521
| 0
| 0.063025
| 0.004202
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a0c423d4c45e9fdb3b51619f0159891bcadaf77f
| 363
|
py
|
Python
|
function/python/brightics/function/clustering/__init__.py
|
GSByeon/studio
|
782cf484541c6d68e1451ff6a0d3b5dc80172664
|
[
"Apache-2.0"
] | null | null | null |
function/python/brightics/function/clustering/__init__.py
|
GSByeon/studio
|
782cf484541c6d68e1451ff6a0d3b5dc80172664
|
[
"Apache-2.0"
] | null | null | null |
function/python/brightics/function/clustering/__init__.py
|
GSByeon/studio
|
782cf484541c6d68e1451ff6a0d3b5dc80172664
|
[
"Apache-2.0"
] | 1
|
2020-11-19T06:44:15.000Z
|
2020-11-19T06:44:15.000Z
|
from .kmeans import kmeans_train_predict
from .kmeans import kmeans_predict
from .kmeans import kmeans_silhouette_train_predict
from .hierarchical_clustering import hierarchical_clustering
from .hierarchical_clustering import hierarchical_clustering_post
from .gaussian_mixture import gaussian_mixture_train
from .gaussian_mixture import gaussian_mixture_predict
| 45.375
| 65
| 0.903581
| 45
| 363
| 6.911111
| 0.244444
| 0.282958
| 0.154341
| 0.212219
| 0.790997
| 0.604502
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077135
| 363
| 7
| 66
| 51.857143
| 0.928358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cd05b80a2ad541ec6f10ada80c26629fc10bdab4
| 40
|
py
|
Python
|
TestMovimientos/RecurrentTest/parity.py
|
JDanielGar/ConvolutionMovements
|
eca831af8570023650d158bf8171909577dc4ec5
|
[
"Apache-2.0"
] | null | null | null |
TestMovimientos/RecurrentTest/parity.py
|
JDanielGar/ConvolutionMovements
|
eca831af8570023650d158bf8171909577dc4ec5
|
[
"Apache-2.0"
] | null | null | null |
TestMovimientos/RecurrentTest/parity.py
|
JDanielGar/ConvolutionMovements
|
eca831af8570023650d158bf8171909577dc4ec5
|
[
"Apache-2.0"
] | null | null | null |
import theano
import theano.tensor as T
| 13.333333
| 25
| 0.825
| 7
| 40
| 4.714286
| 0.714286
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 40
| 2
| 26
| 20
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
cd2f05d72f4396712fe54b937aad2d84addbfd20
| 263
|
py
|
Python
|
tests/conftest.py
|
sqggles/sqlalchemy_dremio
|
3ea961d52908fabd655c0348573c5abb0c490f12
|
[
"MIT"
] | 13
|
2017-10-20T10:41:20.000Z
|
2021-01-19T21:06:43.000Z
|
tests/conftest.py
|
ahmadimtcs/sqlalchemy_dremio
|
65eb87f22e2fa36073170312ddb3f360b440a9c8
|
[
"MIT"
] | 8
|
2017-10-27T08:42:19.000Z
|
2022-02-22T17:59:37.000Z
|
tests/conftest.py
|
sqggles/sqlalchemy_dremio
|
3ea961d52908fabd655c0348573c5abb0c490f12
|
[
"MIT"
] | 9
|
2017-10-27T07:01:29.000Z
|
2020-01-11T11:42:14.000Z
|
from sqlalchemy.dialects import registry
registry.register("dremio", "sqlalchemy_dremio.pyodbc", "DremioDialect_pyodbc")
registry.register("dremio.pyodbc", "sqlalchemy_dremio.pyodbc", "DremioDialect_pyodbc")
from sqlalchemy.testing.plugin.pytestplugin import *
| 37.571429
| 86
| 0.825095
| 28
| 263
| 7.607143
| 0.428571
| 0.169014
| 0.206573
| 0.328639
| 0.384977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060837
| 263
| 6
| 87
| 43.833333
| 0.862348
| 0
| 0
| 0
| 0
| 0
| 0.406844
| 0.18251
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
26ad31cd06bba78920a896eef91fb019e914fb34
| 121
|
py
|
Python
|
test.py
|
TDMangukiya/django-flatemails
|
dffec267ce35749a82445cf61c7660c3bcc7ec3f
|
[
"BSD-3-Clause"
] | 1
|
2016-01-08T06:00:10.000Z
|
2016-01-08T06:00:10.000Z
|
test.py
|
TDMangukiya/django-flatemails
|
dffec267ce35749a82445cf61c7660c3bcc7ec3f
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
TDMangukiya/django-flatemails
|
dffec267ce35749a82445cf61c7660c3bcc7ec3f
|
[
"BSD-3-Clause"
] | null | null | null |
from testproject.manage import execute_manager, settings
import sys
sys.argv.insert(1, 'test')
execute_manager(settings)
| 24.2
| 56
| 0.826446
| 17
| 121
| 5.764706
| 0.705882
| 0.285714
| 0.44898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009009
| 0.082645
| 121
| 4
| 57
| 30.25
| 0.873874
| 0
| 0
| 0
| 0
| 0
| 0.033058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
f80672bb133736fa2442e639769aa3be88d7bf76
| 16
|
py
|
Python
|
src/glue_launcher_lambda/__init__.py
|
dwp/dataworks-aws-glue-launcher
|
b918b172a58867815affa3985e45e239af32ba93
|
[
"0BSD"
] | 1
|
2021-08-30T02:58:12.000Z
|
2021-08-30T02:58:12.000Z
|
src/glue_launcher_lambda/__init__.py
|
dwp/dataworks-aws-glue-launcher
|
b918b172a58867815affa3985e45e239af32ba93
|
[
"0BSD"
] | 22
|
2021-08-03T08:33:36.000Z
|
2021-09-24T07:28:33.000Z
|
src/glue_launcher_lambda/__init__.py
|
dwp/dataworks-aws-glue-launcher
|
b918b172a58867815affa3985e45e239af32ba93
|
[
"0BSD"
] | null | null | null |
"Glue Launcher"
| 8
| 15
| 0.75
| 2
| 16
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 16
| 1
| 16
| 16
| 0.857143
| 0.8125
| 0
| 0
| 0
| 0
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f85f1c9e7d33dd050c0ad1913fd0524ad2a6ca53
| 2,803
|
py
|
Python
|
epytope/Data/pssms/smmpmbec/mat/A_02_06_11.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_02_06_11.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_02_06_11.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_02_06_11 = {0: {'A': -0.018, 'C': 0.101, 'E': 0.14, 'D': 0.256, 'G': 0.307, 'F': -0.391, 'I': -0.255, 'H': -0.048, 'K': -0.161, 'M': -0.683, 'L': -0.792, 'N': 0.312, 'Q': 0.357, 'P': 0.85, 'S': -0.023, 'R': 0.31, 'T': 0.07, 'W': 0.021, 'V': -0.09, 'Y': -0.262}, 1: {'A': 0.008, 'C': 0.002, 'E': 0.002, 'D': 0.005, 'G': 0.001, 'F': -0.002, 'I': -0.002, 'H': -0.001, 'K': 0.0, 'M': -0.007, 'L': -0.006, 'N': -0.002, 'Q': -0.004, 'P': 0.01, 'S': -0.002, 'R': 0.002, 'T': 0.003, 'W': -0.003, 'V': 0.001, 'Y': -0.004}, 2: {'A': -0.315, 'C': 0.013, 'E': -0.137, 'D': -0.212, 'G': 0.152, 'F': -0.04, 'I': -0.457, 'H': 0.391, 'K': 0.467, 'M': -0.083, 'L': -0.594, 'N': 0.21, 'Q': 0.202, 'P': -0.298, 'S': 0.337, 'R': 0.74, 'T': -0.001, 'W': 0.087, 'V': -0.429, 'Y': -0.032}, 3: {'A': -0.17, 'C': -0.05, 'E': -0.141, 'D': -0.043, 'G': 0.014, 'F': -0.084, 'I': 0.008, 'H': 0.013, 'K': 0.106, 'M': 0.062, 'L': -0.022, 'N': 0.029, 'Q': 0.03, 'P': -0.225, 'S': 0.076, 'R': 0.151, 'T': 0.041, 'W': 0.114, 'V': -0.009, 'Y': 0.1}, 4: {'A': 0.027, 'C': -0.005, 'E': -0.005, 'D': -0.011, 'G': -0.004, 'F': 0.001, 'I': 0.026, 'H': -0.024, 'K': 0.008, 'M': 0.003, 'L': 0.009, 'N': -0.032, 'Q': -0.013, 'P': 0.011, 'S': 0.003, 'R': -0.001, 'T': 0.007, 'W': -0.014, 'V': 0.024, 'Y': -0.009}, 5: {'A': -0.012, 'C': -0.018, 'E': 0.064, 'D': 0.03, 'G': -0.031, 'F': 0.051, 'I': -0.196, 'H': 0.068, 'K': 0.14, 'M': -0.041, 'L': 0.01, 'N': -0.111, 'Q': 0.017, 'P': 0.073, 'S': -0.114, 'R': 0.269, 'T': -0.121, 'W': -0.072, 'V': -0.088, 'Y': 0.082}, 6: {'A': 0.029, 'C': 0.006, 'E': 0.001, 'D': 0.015, 'G': -0.006, 'F': -0.005, 'I': -0.023, 'H': 0.0, 'K': 0.003, 'M': -0.007, 'L': -0.024, 'N': -0.004, 'Q': -0.014, 'P': 0.005, 'S': 0.003, 'R': 0.014, 'T': 0.009, 'W': -0.003, 'V': -0.008, 'Y': 0.01}, 7: {'A': -0.074, 'C': -0.009, 'E': 0.015, 'D': 0.052, 'G': -0.014, 'F': -0.116, 'I': -0.199, 'H': 0.11, 'K': 0.059, 'M': -0.074, 'L': -0.176, 'N': 0.061, 'Q': 0.051, 'P': 0.051, 'S': 0.085, 'R': 0.154, 'T': 0.066, 'W': 0.032, 'V': -0.088, 'Y': 0.014}, 8: {'A': -0.101, 'C': 0.01, 'E': 0.071, 'D': 0.168, 'G': 0.01, 'F': -0.11, 'I': -0.336, 'H': 0.147, 'K': 0.159, 'M': -0.179, 'L': -0.244, 'N': 0.043, 'Q': 0.065, 'P': 0.05, 'S': 0.043, 'R': 0.246, 'T': 0.053, 'W': 0.048, 'V': -0.221, 'Y': 0.078}, 9: {'A': 0.046, 'C': -0.004, 'E': -0.012, 'D': -0.011, 'G': 0.025, 'F': -0.02, 'I': -0.011, 'H': 0.014, 'K': 0.041, 'M': 0.001, 'L': -0.016, 'N': -0.013, 'Q': -0.009, 'P': -0.042, 'S': 0.027, 'R': 0.04, 'T': 0.012, 'W': -0.038, 'V': -0.007, 'Y': -0.022}, 10: {'A': -0.091, 'C': 0.112, 'E': 0.031, 'D': 0.069, 'G': 0.053, 'F': 0.107, 'I': -0.126, 'H': 0.094, 'K': 0.045, 'M': 0.011, 'L': -0.069, 'N': 0.09, 'Q': -0.027, 'P': -0.095, 'S': 0.078, 'R': -0.051, 'T': -0.056, 'W': 0.219, 'V': -0.31, 'Y': -0.083}, -1: {'con': 4.12927}}
| 2,803
| 2,803
| 0.392793
| 679
| 2,803
| 1.617084
| 0.22975
| 0.020036
| 0.009107
| 0.010929
| 0.076503
| 0
| 0
| 0
| 0
| 0
| 0
| 0.372232
| 0.162326
| 2,803
| 1
| 2,803
| 2,803
| 0.0954
| 0
| 0
| 0
| 0
| 0
| 0.079529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f8ab341a29a0afaba9c5280f52df3d54fccc3406
| 41
|
py
|
Python
|
todoist_costs/__init__.py
|
anatoly-scherbakov/todoist-costs
|
0c44a5c599205659cd23921c5ceba24802b4dd74
|
[
"MIT"
] | null | null | null |
todoist_costs/__init__.py
|
anatoly-scherbakov/todoist-costs
|
0c44a5c599205659cd23921c5ceba24802b4dd74
|
[
"MIT"
] | null | null | null |
todoist_costs/__init__.py
|
anatoly-scherbakov/todoist-costs
|
0c44a5c599205659cd23921c5ceba24802b4dd74
|
[
"MIT"
] | null | null | null |
from todoist_costs.cli import app as cli
| 20.5
| 40
| 0.829268
| 8
| 41
| 4.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 41
| 1
| 41
| 41
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f8b60eced6b9d82c0fc002254594f12ca827ca8a
| 5,269
|
py
|
Python
|
client/sym.py
|
symphonyprotocol/openplatform_client
|
7bfbc157244c603c8dcf2ea44a1047addbc39c23
|
[
"Apache-2.0"
] | null | null | null |
client/sym.py
|
symphonyprotocol/openplatform_client
|
7bfbc157244c603c8dcf2ea44a1047addbc39c23
|
[
"Apache-2.0"
] | null | null | null |
client/sym.py
|
symphonyprotocol/openplatform_client
|
7bfbc157244c603c8dcf2ea44a1047addbc39c23
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from .utils import get_md5_Str, get, post, get_timestamp, get_toml_file
import json
class SymClient:
company_id = ""
secret = ""
def __init__(self):
pass
def get_comapny_id(self):
return self.company_id
def register(self, company_name, user_name, password):
"""regieter company.
:param company_name: company name.
:param user_name: user name.
:param password: password.
"""
body = {
"name": company_name,
"user_name": user_name,
"pwd": get_md5_Str(password)
}
data = post('/auth/register', body)
if data is not None and data['code'] == 200:
self.company_id = data['result']['id']
self.secret = data['result']['secret']
else:
raise Exception('register failure')
def login(self, user_name, password):
body = {
"user_name": user_name,
"pwd": get_md5_Str(password)
}
data = post('/auth/login', body)
if data is not None and data['code'] == 200:
self.company_id = data['result']['id']
self.secret = data['result']['secret']
else:
raise Exception('login failure')
def upload_data_label_schema(self, toml_file):
toml_dict = get_toml_file(toml_file)
body = {
'toml': toml_dict
}
print(body)
url = '/data/schema?id={id}&ts={ts}&sign={sign}'.format(id=self.company_id, ts=get_timestamp(), sign="testsign")
data = post(url, body)
if data is None:
raise Exception('upload_data_label_schema failure')
elif data['code'] != 200:
raise Exception('upload_data_label_schema failure')
else:
return data['result']['schema_id']
def request_buffer_data(self, schema_id, start_date, end_date, cursor):
url = '/buffer/pull?id={id}&ts={ts}&sign={sign}'.format(id=self.company_id, ts=get_timestamp(), sign='testsign')
body = {
"schema_id": schema_id,
"scope": {
"start_date": start_date,
"end_date": end_date,
"cursor": cursor
}
}
data = post(url, body)
if data is not None and data['code'] == 200:
return data['result']['items'], data['result']['next_cursor']
else:
raise Exception('request_buffer_data failure')
def push_data_label(self, schema_id, data_dict):
body = {
"schema_id": schema_id,
"data": data_dict
}
url = '/data/push?id={id}&ts={ts}&sign={sign}'.format(id=self.company_id, ts=get_timestamp(), sign="testsign")
resp = post(url, body)
if resp is not None and resp['code'] == 200:
return 'success'
else:
raise Exception('push_data_label failure')
def upload_model_label_schema(self, toml_file):
toml_dict = get_toml_file(toml_file)
body = {
'toml': toml_dict
}
print(body)
url = '/label/schema?id={id}&ts={ts}&sign={sign}'.format(id=self.company_id, ts=get_timestamp(), sign="testsign")
data = post(url, body)
if data is None:
raise Exception('upload_model_label_schema failure')
elif data['code'] != 200:
raise Exception('upload_model_label_schema failure')
else:
return data['result']['schema_id']
def request_data_label(self, schema_id, start_date, end_date, cursor):
url = '/data/pull?id={id}&ts={ts}&sign={sign}'.format(id=self.company_id, ts=get_timestamp(), sign='testsign')
body = {
"schema_id": schema_id,
"scope": {
"start_date": start_date,
"end_date": end_date,
"cursor": cursor
}
}
data = post(url, body)
if data is not None and data['code'] == 200:
return data['result']['items'], data['result']['next_cursor']
else:
raise Exception('request_data_label failure')
def request_model_label(self, schema_id, start_date, end_date, cursor):
url = '/label/pull?id={id}&ts={ts}&sign={sign}'.format(id=self.company_id, ts=get_timestamp(), sign='testsign')
body = {
"schema_id": schema_id,
"scope": {
"start_date": start_date,
"end_date": end_date,
"cursor": cursor
}
}
data = post(url, body)
if data is not None and data['code'] == 200:
return data['result']['items'], data['result']['next_cursor']
else:
raise Exception('request_model_label failure')
def push_model_label(self, schema_id, data_dict):
body = {
"schema_id": schema_id,
"data": data_dict
}
url = '/label/push?id={id}&ts={ts}&sign={sign}'.format(id=self.company_id, ts=get_timestamp(), sign="testsign")
resp = post(url, body)
if resp is not None and resp['code'] == 200:
return 'success'
else:
raise Exception('push_data_label failure')
| 35.843537
| 121
| 0.552097
| 636
| 5,269
| 4.361635
| 0.119497
| 0.054795
| 0.046864
| 0.030281
| 0.809301
| 0.802812
| 0.802812
| 0.796323
| 0.796323
| 0.782985
| 0
| 0.00854
| 0.311065
| 5,269
| 147
| 122
| 35.843537
| 0.755647
| 0.028279
| 0
| 0.645669
| 0
| 0
| 0.200945
| 0.073411
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086614
| false
| 0.03937
| 0.015748
| 0.007874
| 0.188976
| 0.015748
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3e080551cbaf113ab404e90334d5fe8ea3292cc9
| 231
|
py
|
Python
|
src/member.py
|
stickittotheman/reminder-robin
|
29560697634be6cd30dbf8312cecc781e8f4906f
|
[
"Apache-2.0"
] | null | null | null |
src/member.py
|
stickittotheman/reminder-robin
|
29560697634be6cd30dbf8312cecc781e8f4906f
|
[
"Apache-2.0"
] | null | null | null |
src/member.py
|
stickittotheman/reminder-robin
|
29560697634be6cd30dbf8312cecc781e8f4906f
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
import discord
@dataclass
class Member:
display_name: str
@staticmethod
def from_discord_member(discord_member: discord.Member):
return Member(display_name=discord_member)
| 16.5
| 60
| 0.766234
| 27
| 231
| 6.333333
| 0.481481
| 0.304094
| 0.19883
| 0.304094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 231
| 13
| 61
| 17.769231
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0.125
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
3e0abbccddbeea73f561433b668892ac0b007e1c
| 5,909
|
py
|
Python
|
api/migrations/0038_auto_20200110_1333.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 11
|
2018-06-11T06:05:12.000Z
|
2022-03-25T09:31:44.000Z
|
api/migrations/0038_auto_20200110_1333.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 498
|
2017-11-07T21:20:13.000Z
|
2022-03-31T14:37:18.000Z
|
api/migrations/0038_auto_20200110_1333.py
|
IFRCGo/ifrcgo-api
|
c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a
|
[
"MIT"
] | 6
|
2018-04-11T13:29:50.000Z
|
2020-07-16T16:52:11.000Z
|
# Generated by Django 2.0.12 on 2020-01-10 13:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0037_auto_20200109_0902'),
]
operations = [
migrations.AlterModelOptions(
name='emergencyoperationsdataset',
options={'verbose_name': 'Emergency Operations Dataset', 'verbose_name_plural': 'Emergency Operations Datasets'},
),
migrations.AlterModelOptions(
name='emergencyoperationspeoplereached',
options={'verbose_name': 'Emergency Operations People Reached', 'verbose_name_plural': 'Emergency Operations People Reached'},
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='disaster_risk_reduction_people_targeted',
new_name='disaster_risk_reduction_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='health_people_targeted',
new_name='health_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='livelihoods_and_basic_needs_people_targeted',
new_name='livelihoods_and_basic_needs_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='migration_people_targeted',
new_name='migration_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='protection_gender_and_inclusion_people_targeted',
new_name='protection_gender_and_inclusion_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='raw_disaster_risk_reduction_people_targeted',
new_name='raw_disaster_risk_reduction_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='raw_health_people_targeted',
new_name='raw_health_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='raw_livelihoods_and_basic_needs_people_targeted',
new_name='raw_livelihoods_and_basic_needs_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='raw_migration_people_targeted',
new_name='raw_migration_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='raw_protection_gender_and_inclusion_people_targeted',
new_name='raw_protection_gender_and_inclusion_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='raw_shelter_people_targeted',
new_name='raw_shelter_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='raw_water_sanitation_and_hygiene_people_targeted',
new_name='raw_water_sanitation_and_hygiene_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='shelter_people_targeted',
new_name='shelter_people_reached',
),
migrations.RenameField(
model_name='emergencyoperationspeoplereached',
old_name='water_sanitation_and_hygiene_people_targeted',
new_name='water_sanitation_and_hygiene_people_reached',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='disaster_risk_reduction_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='health_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='livelihoods_and_basic_needs_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='migration_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='protection_gender_and_inclusion_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='raw_disaster_risk_reduction_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='raw_health_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='raw_livelihoods_and_basic_needs_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='raw_migration_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='raw_protection_gender_and_inclusion_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='raw_shelter_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='raw_water_sanitation_and_hygiene_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='shelter_people_targeted',
),
migrations.RemoveField(
model_name='emergencyoperationsfr',
name='water_sanitation_and_hygiene_people_targeted',
),
]
| 39.925676
| 138
| 0.655779
| 485
| 5,909
| 7.503093
| 0.123711
| 0.06925
| 0.094806
| 0.130805
| 0.920583
| 0.831272
| 0.818082
| 0.761748
| 0.658972
| 0.483924
| 0
| 0.007377
| 0.265866
| 5,909
| 147
| 139
| 40.197279
| 0.831489
| 0.007785
| 0
| 0.624113
| 1
| 0
| 0.433885
| 0.401126
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007092
| 0
| 0.028369
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3e640044a213e00402416b99802a47f05b69f2c8
| 31,580
|
py
|
Python
|
metrics_utils.py
|
mit-quest/necstlab-damage-segmentation
|
d2fef61680594b143ed5914a2ee315eb90852f46
|
[
"MIT"
] | 4
|
2019-10-25T19:35:39.000Z
|
2022-01-13T02:46:05.000Z
|
metrics_utils.py
|
BrendenBarbour/necstlab-damage-segmentation
|
f714fb917b396d0a5d7fa88a646dfa492bcce835
|
[
"MIT"
] | 87
|
2019-10-24T01:42:42.000Z
|
2022-02-09T23:35:42.000Z
|
metrics_utils.py
|
BrendenBarbour/necstlab-damage-segmentation
|
f714fb917b396d0a5d7fa88a646dfa492bcce835
|
[
"MIT"
] | 6
|
2019-11-04T18:45:58.000Z
|
2021-10-30T21:56:06.000Z
|
import os
from tensorflow import where as tfwhere, zeros_like as tfzeros_like
from tensorflow.keras.metrics import (Metric as MetricTfKeras, Accuracy as AccuracyTfKeras,
FalsePositives, TruePositives, TrueNegatives, FalseNegatives, Precision, Recall)
import tensorflow.keras.backend as K
from tensorflow.python.keras.utils import metrics_utils as metrics_utils_tf_keras
from tensorflow.python.keras.utils.generic_utils import to_list
from tensorflow.python.ops import init_ops, math_ops
import numpy as np
os.environ['SM_FRAMEWORK'] = 'tf.keras' # will tell segmentation models to use tensorflow's keras
from segmentation_models.base import Metric as MetricSM, functional
SMOOTH = 1e-5
assert SMOOTH <= 1e-5
# 0.5 is default prediction threshold for most metrics which use a threshold value
# and the threshold value is also effectively ignored for one hot metrics
global_threshold = 0.5
assert 0.0 <= global_threshold <= 1.0
# In summary, to achieve one hot metrics:
# 1. For a metric class who via definition inherits tf.keras.metrics.Metric or tf.keras.metric.MeanMetricWrapper, for
# one hot conversion in which this metric class is inherited by a sub-class one hot version:
# - in tf2, place 1H at __ call __ method or update_state method (or both), followed by corresponding super().
# - in tf1, place 1H at update_state method, followed by corresponding super().
# 2. For a metric class who via definition does NOT inherit tf.keras.metrics.Metric or tf.keras.metric.MeanMetricWrapper
# (e.g., instead, inherits segmentation_models.metrics.Metric), for one hot conversion in which this metric class is
# inherited by a sub-class one hot version (note, the class instance will be treated as a function and automatically
# wrapped with tf.keras.metrics.MeanMetricWrapper during model.compile) :
# - in tf2, place 1H at __ call __ method, followed by corresponding super(). Interestingly in tf2, the result is
# independent of whether or not the update_state method result has a return statement.
# - in tf1, place 1H at __ call __ method, followed by corresponding super().
# one hot classes are intended to act as pass-throughs. 1H (argmax) proceeds after thresholding, as done in infer.
# `MeanMetricWrapper` inheritance in custom metric: do not need to remove 'return' from `def update_state` in tf2.0
class OneHotAccuracyTfKeras(AccuracyTfKeras):
def __init__(self, name='accuracy_tfkeras_1H', dtype=None):
super().__init__(name=name, dtype=dtype)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, global_threshold), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
class OneHotFalseNegatives(FalseNegatives):
def __init__(self, thresholds=None, name='FN_1H', dtype=None):
super().__init__(
thresholds=thresholds,
name=name,
dtype=dtype
)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, self.thresholds), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
super().update_state(y_true, y_pred, sample_weight)
class OneHotFalsePositives(FalsePositives):
def __init__(self, thresholds=None, name='FP_1H', dtype=None):
super().__init__(
thresholds=thresholds,
name=name,
dtype=dtype
)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, self.thresholds), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
super().update_state(y_true, y_pred, sample_weight)
class OneHotTrueNegatives(TrueNegatives):
def __init__(self, thresholds=None, name='TN_1H', dtype=None):
super().__init__(
thresholds=thresholds,
name=name,
dtype=dtype
)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, self.thresholds), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
super().update_state(y_true, y_pred, sample_weight)
class OneHotTruePositives(TruePositives):
def __init__(self, thresholds=None, name='TP_1H', dtype=None):
super().__init__(
thresholds=thresholds,
name=name,
dtype=dtype
)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, self.thresholds), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
super().update_state(y_true, y_pred, sample_weight)
class OneHotPrecision(Precision):
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name='precision_1H',
dtype=None):
super().__init__(
thresholds=thresholds,
top_k=top_k,
class_id=class_id,
name=name,
dtype=dtype)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, self.thresholds), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
super().update_state(y_true, y_pred, sample_weight)
class OneHotRecall(Recall):
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name='recall_1H',
dtype=None):
super().__init__(
thresholds=thresholds,
top_k=top_k,
class_id=class_id,
name=name,
dtype=dtype)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, self.thresholds), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location# based on tf.keras binary_accuracy
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
super().update_state(y_true, y_pred, sample_weight)
# based on Keras/tf.keras precision and recall class definitions found at (depending on import source):
# keras: https://github.com/keras-team/keras/blob/7a39b6c62d43c25472b2c2476bd2a8983ae4f682/keras/metrics.py#L1154
# tf.keras: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/metrics.py#L1134
class FBetaScore(MetricTfKeras):
"""Abstract base class for F1Score.
For additional information, see the
following: https://en.wikipedia.org/wiki/F1_score#Definition
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, we'll calculate precision as how often on average a class
among the top-k classes with the highest predicted values of a batch entry is
correct and can be found in the label for that entry.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold and/or in the
top-k highest predictions, and computing the fraction of them for which
`class_id` is indeed a correct label."""
'''
Arguments
beta: The F-measure was derived so that F_β "measures the effectiveness of
retrieval with respect to a user who attaches β times as much importance to recall as precision".
beta=1 gives F_1 score, and is also known as the Sørensen–Dice coefficient or Dice similarity
coefficient (DSC).
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate precision with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
'''
def __init__(self,
beta=1,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
name = name or str('f' + str(beta) + 'score')
super().__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.beta = beta
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils_tf_keras.NEG_INF
self.thresholds = metrics_utils_tf_keras.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
# for tf v1, use 'return metrics_...'. for tf v2, use 'metrics_...' (for inherited keras/tf.keras Metric class)
metrics_utils_tf_keras.update_confusion_matrix_variables(
{
metrics_utils_tf_keras.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils_tf_keras.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils_tf_keras.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
denominator = ((1 + self.beta * self.beta) * self.true_positives + self.beta * self.beta * self.false_negatives
+ self.false_positives)
numerator = (1 + self.beta * self.beta) * self.true_positives
result = math_ops.div_no_nan(numerator, denominator)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'beta': self.beta,
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class OneHotFBetaScore(FBetaScore):
def __init__(self,
beta=1,
thresholds=None,
top_k=None,
class_id=None,
name=None,
dtype=None):
name = name or str('f' + str(beta) + 'score_1H')
super().__init__(
beta=beta,
thresholds=thresholds,
top_k=top_k,
class_id=class_id,
name=name,
dtype=dtype)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, self.thresholds), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
# based on Keras/tf.keras precision and recall class definitions found at (depending on import source):
# keras: https://github.com/keras-team/keras/blob/7a39b6c62d43c25472b2c2476bd2a8983ae4f682/keras/metrics.py#L1154
# tf.keras: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/metrics.py#L1134
class IoUScore(MetricTfKeras):
"""Computes the mean Intersection-Over-Union metric.
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation, which first computes the IOU for each semantic class and then
computes the average over classes. IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by
`sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
If `top_k` is set, we'll calculate precision as how often on average a class
among the top-k classes with the highest predicted values of a batch entry is
correct and can be found in the label for that entry.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is above the threshold and/or in the
top-k highest predictions, and computing the fraction of them for which
`class_id` is indeed a correct label."""
'''
# Arguments
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate precision with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
'''
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name='iou_score',
dtype=None):
super().__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils_tf_keras.NEG_INF
self.thresholds = metrics_utils_tf_keras.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
# for tf v1, use 'return metrics_...'. for tf v2, use 'metrics_...' (for inherited keras/tf.keras Metric class)
metrics_utils_tf_keras.update_confusion_matrix_variables(
{
metrics_utils_tf_keras.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils_tf_keras.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils_tf_keras.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
denominator = (self.true_positives + self.false_negatives + self.false_positives)
numerator = self.true_positives
result = math_ops.div_no_nan(numerator, denominator)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class OneHotIoUScore(IoUScore):
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name='iou_score_1H',
dtype=None):
super().__init__(
thresholds=thresholds,
top_k=top_k,
class_id=class_id,
name=name,
dtype=dtype)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, self.thresholds), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
# VERSION 2 CLASSBINARYACCURACY METHOD, BASED ON KERAS PACKAGE -- ACCUMULATED OVER EPOCH (inherit KERAS.METRIC)
# based on Keras/tf.keras precision and recall class definitions found at (depending on import source):
# keras: https://github.com/keras-team/keras/blob/7a39b6c62d43c25472b2c2476bd2a8983ae4f682/keras/metrics.py#L1154
# tf.keras: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/metrics.py#L1134
class ClassBinaryAccuracyTfKeras(MetricTfKeras):
r"""
.. math:: Binary Accuracy = (TN + TP)/(TN+TP+FN+FP) = Number of correct assessments/Number of all assessments,
for given class for more than one class input, output becomes mean accuracy (similar but not same as categorical)
# Arguments
thresholds: (Optional) A float value or a python list/tuple of float
threshold values in [0, 1]. A threshold is compared with prediction
values to determine the truth value of predictions (i.e., above the
threshold is `true`, below is `false`). One metric value is generated
for each threshold value. If neither thresholds nor top_k are set, the
default is to calculate precision with `thresholds=0.5`.
top_k: (Optional) Unset by default. An int value specifying the top-k
predictions to consider when calculating precision.
class_id: (Optional) Integer class ID for which we want binary metrics.
This must be in the half-open interval `[0, num_classes)`, where
`num_classes` is the last dimension of predictions.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name='class_all_binary_accuracy_tfkeras',
dtype=None):
super().__init__(name=name, dtype=dtype)
self.init_thresholds = thresholds
self.top_k = top_k
self.class_id = class_id
default_threshold = 0.5 if top_k is None else metrics_utils_tf_keras.NEG_INF
self.thresholds = metrics_utils_tf_keras.parse_init_thresholds(
thresholds, default_threshold=default_threshold)
self.true_positives = self.add_weight(
'true_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_positives = self.add_weight(
'false_positives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.false_negatives = self.add_weight(
'false_negatives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
self.true_negatives = self.add_weight(
'true_negatives',
shape=(len(self.thresholds),),
initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
# for tf v1, use 'return metrics_...'. for tf v2, use 'metrics_...' (for inherited keras/tf.keras Metric class)
metrics_utils_tf_keras.update_confusion_matrix_variables(
{
metrics_utils_tf_keras.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils_tf_keras.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils_tf_keras.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
metrics_utils_tf_keras.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight)
def result(self):
denominator = (self.true_positives + self.false_negatives + self.false_positives + self.true_negatives)
numerator = self.true_positives + self.true_negatives
result = math_ops.div_no_nan(numerator, denominator)
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
K.batch_set_value(
[(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {
'thresholds': self.init_thresholds,
'top_k': self.top_k,
'class_id': self.class_id
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class OneHotClassBinaryAccuracyTfKeras(ClassBinaryAccuracyTfKeras):
def __init__(self,
thresholds=None,
top_k=None,
class_id=None,
name='class_all_binary_accuracy_tfkeras_1H',
dtype=None):
super().__init__(
thresholds=thresholds,
top_k=top_k,
class_id=class_id,
name=name,
dtype=dtype)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction, **kwargs):
prediction = tfwhere(math_ops.greater(prediction, self.thresholds), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices,
K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot, **kwargs)
# VERSION 1 CLASSBINARYACCURACY METHOD, BASED ON SEGMENTATION_MODELS PACKAGE -- AVERAGED OVER EPOCH
# adapted from: s_m.IOUScore() from github.com/qubvel/segmentation_models/blob/master/segmentation_models/metrics.py
class ClassBinaryAccuracySM(MetricSM):
r"""
.. math:: Binary Accuracy = (TN + TP)/(TN+TP+FN+FP) = Number of correct assessments/Number of all assessments,
for given class for more than one class input, output becomes mean accuracy (similar but not same as categorical)
Args:
class_weights: 1. or ``np.array`` of class weights (``len(weights) = num_classes``).
class_indexes: Optional integer or list of integers, classes to consider, if ``None`` all classes are used.
smooth: value to avoid division by zero
per_image: if ``True``, metric is calculated as mean over images in batch (B),
else over whole batch
threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction will not be round
Returns:
A callable ``class_binary_accuracy`` instance. Can be used in ``model.compile(...)`` function.
Example:
.. code:: python
metric = ClassBinaryAccuracy()
model.compile('SGD', loss=loss, metrics=[metric])
"""
def __init__(
self,
class_weights=None,
class_indexes=None,
threshold=None,
per_image=False,
smooth=SMOOTH,
name=None
):
self.name = name or 'class_all_binary_accuracy_sm'
super().__init__(name=self.name)
self.class_weights = class_weights if class_weights is not None else 1
self.class_indexes = class_indexes
self.threshold = threshold
self.per_image = per_image
self.smooth = smooth
def __call__(self, gt, pr):
backend = self.submodules['backend']
gt, pr = functional.gather_channels(gt, pr, indexes=self.class_indexes, **self.submodules)
pr = functional.round_if_needed(pr, self.threshold, **self.submodules)
axes = functional.get_reduce_axes(self.per_image, **self.submodules)
# score calculation (assumed pr are 1-hot in practice)
tp = backend.sum(gt * pr, axis=axes)
fp = backend.sum(pr, axis=axes) - tp
fn = backend.sum(gt, axis=axes) - tp
tn = backend.sum((-gt + 1) * (-pr + 1), axis=axes)
score = (tp + tn) / (tp + tn + fp + fn + self.smooth)
# score is averaged over whole batch here (unlike Keras, where score is accumulated over batch)
score = functional.average(score, self.per_image, self.class_weights, **self.submodules)
return score
class OneHotClassBinaryAccuracySM(ClassBinaryAccuracySM):
def __init__(
self,
class_weights=None,
class_indexes=None,
threshold=None,
per_image=False,
smooth=SMOOTH,
name=None
):
self.name = name or 'class_all_binary_accuracy_sm_1H'
super().__init__(
class_weights=class_weights,
class_indexes=class_indexes,
threshold=threshold,
per_image=per_image,
smooth=smooth,
name=self.name)
# call redirects to parent class following one hot conversion
def __call__(self, groundtruth, prediction):
prediction = tfwhere(math_ops.greater(prediction, self.threshold), prediction, tfzeros_like(prediction)) # based on tf.keras binary_accuracy
prediction_onehot_indices = K.argmax(prediction, axis=-1) # based on keras.metrics.categorical_accuracy to determine max pred index (1 of channels) at each HW location
prediction_onehot = K.one_hot(prediction_onehot_indices, K.int_shape(prediction)[-1]) # assume 4D tensor is BHWC
return super().__call__(groundtruth, prediction_onehot)
| 49.968354
| 211
| 0.675174
| 4,008
| 31,580
| 5.109032
| 0.102545
| 0.010548
| 0.024711
| 0.025785
| 0.795136
| 0.788836
| 0.781316
| 0.771207
| 0.76647
| 0.7568
| 0
| 0.009924
| 0.240564
| 31,580
| 631
| 212
| 50.047544
| 0.843848
| 0.305668
| 0
| 0.745721
| 0
| 0
| 0.024362
| 0.006635
| 0
| 0
| 0
| 0
| 0.00489
| 1
| 0.110024
| false
| 0
| 0.022005
| 0
| 0.212714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e41357bce7a78c83508d551c6ecc3ac90d404935
| 34
|
py
|
Python
|
relativity/special/__init__.py
|
tdsymonds/relativity
|
89314f4a8b7003ae8ee3718ff5fc518c5bdb2973
|
[
"MIT"
] | null | null | null |
relativity/special/__init__.py
|
tdsymonds/relativity
|
89314f4a8b7003ae8ee3718ff5fc518c5bdb2973
|
[
"MIT"
] | null | null | null |
relativity/special/__init__.py
|
tdsymonds/relativity
|
89314f4a8b7003ae8ee3718ff5fc518c5bdb2973
|
[
"MIT"
] | null | null | null |
from .special_relativity import *
| 17
| 33
| 0.823529
| 4
| 34
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e42c21dd40af470ae8fb2c0a335a702e734146e1
| 816
|
py
|
Python
|
pyQuARC/code/base_validator.py
|
NASA-IMPACT/pyQuARC
|
9c174624a9d3e340cf91c7925aaae2203515e13f
|
[
"Apache-2.0"
] | 9
|
2021-03-12T18:04:25.000Z
|
2022-03-22T01:30:56.000Z
|
pyQuARC/code/base_validator.py
|
NASA-IMPACT/pyQuARC
|
9c174624a9d3e340cf91c7925aaae2203515e13f
|
[
"Apache-2.0"
] | 129
|
2021-04-19T15:42:12.000Z
|
2022-03-28T16:50:39.000Z
|
pyQuARC/code/base_validator.py
|
NASA-IMPACT/pyQuARC
|
9c174624a9d3e340cf91c7925aaae2203515e13f
|
[
"Apache-2.0"
] | 1
|
2022-03-30T20:33:30.000Z
|
2022-03-30T20:33:30.000Z
|
class BaseValidator:
"""
Base class for all the validators
"""
def __init__(self):
pass
@staticmethod
def eq(first, second):
return first == second
@staticmethod
def neq(first, second):
return first != second
@staticmethod
def lt(first, second):
return first < second
@staticmethod
def lte(first, second):
return first <= second
@staticmethod
def gt(first, second):
return first > second
@staticmethod
def gte(first, second):
return first >= second
@staticmethod
def is_in(value, list_of_values):
return value in list_of_values
@staticmethod
def compare(first, second, relation):
func = getattr(BaseValidator, relation)
return func(first, second)
| 19.902439
| 47
| 0.610294
| 88
| 816
| 5.556818
| 0.352273
| 0.314928
| 0.208589
| 0.269939
| 0.527607
| 0.527607
| 0.527607
| 0
| 0
| 0
| 0
| 0
| 0.306373
| 816
| 40
| 48
| 20.4
| 0.863958
| 0.040441
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.321429
| false
| 0.035714
| 0
| 0.25
| 0.642857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e42ea1919db2d7e87a325f8ccfaa7f841c8b1de9
| 34
|
py
|
Python
|
server/problem_sets/gen/gens/relation_analysis/__init__.py
|
iiridescent/problem-sets
|
e906fe7509cd158ecdb5920853636339d4d531c3
|
[
"MIT"
] | null | null | null |
server/problem_sets/gen/gens/relation_analysis/__init__.py
|
iiridescent/problem-sets
|
e906fe7509cd158ecdb5920853636339d4d531c3
|
[
"MIT"
] | 5
|
2021-03-09T10:36:59.000Z
|
2022-02-26T14:36:08.000Z
|
server/problem_sets/gen/gens/relation_analysis/__init__.py
|
vinhowe/problem-sets
|
e906fe7509cd158ecdb5920853636339d4d531c3
|
[
"MIT"
] | null | null | null |
from .relation_analysis import *
| 11.333333
| 32
| 0.794118
| 4
| 34
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 34
| 2
| 33
| 17
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e47f1a4f67c52e5fe35d0faac3f816d0ac84913c
| 4,063
|
py
|
Python
|
branchpro/tests/test_sliders.py
|
SABS-R3-Epidemiology/branching-process
|
d7dd5f612c45b280b0b369e8e0391ee6dcd84459
|
[
"BSD-3-Clause"
] | 1
|
2021-04-14T09:51:43.000Z
|
2021-04-14T09:51:43.000Z
|
branchpro/tests/test_sliders.py
|
SABS-R3-Epidemiology/branchpro
|
2b18e565f16564dfd537d992721f5f99a41c0f1e
|
[
"BSD-3-Clause"
] | 210
|
2020-10-25T18:49:59.000Z
|
2022-02-20T19:22:07.000Z
|
branchpro/tests/test_sliders.py
|
SABS-R3-Epidemiology/branching-process
|
d7dd5f612c45b280b0b369e8e0391ee6dcd84459
|
[
"BSD-3-Clause"
] | 1
|
2020-10-28T13:48:19.000Z
|
2020-10-28T13:48:19.000Z
|
#
# This file is part of BRANCHPRO
# (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released
# under the BSD 3-clause license. See accompanying LICENSE.md for copyright
# notice and full license details.
#
import unittest
import numpy as np
import branchpro as bp
class Test_SliderComponent(unittest.TestCase):
"""
Test the '_SliderComponent' class.
"""
def test__init__(self):
bp._SliderComponent()
def test_add_slider(self):
sliders = bp._SliderComponent()
sliders.add_slider('param1', '1', 0, 0, 1, 0.5)
sliders.add_slider('param2', '2', 0, 0, 15, 1, as_integer=True)
sliders.add_slider('param3', '3', 0, 0, 1, 0.5, invisible=True)
self.assertEqual(sliders._sliders[0].children[0].children, 'param1')
self.assertEqual(sliders._sliders[0].children[1].id, '1')
self.assertEqual(sliders._sliders[0].children[1].min, 0)
self.assertEqual(sliders._sliders[0].children[1].max, 1)
self.assertEqual(sliders._sliders[0].children[1].value, 0)
self.assertEqual(
sliders._sliders[0].children[1].marks,
{ri: '{:.2f}'.format(ri) for ri in [0, 0.50, 1]}
)
self.assertEqual(sliders._sliders[0].children[1].step, 0.5)
self.assertEqual(sliders._sliders[1].children[0].children, 'param2')
self.assertEqual(sliders._sliders[1].children[1].id, '2')
self.assertEqual(sliders._sliders[1].children[1].min, 0)
self.assertEqual(sliders._sliders[1].children[1].max, 15)
self.assertEqual(sliders._sliders[1].children[1].value, 0)
self.assertEqual(
sliders._sliders[1].children[1].marks,
{
# if the slider values need be integers
ri: '{:.0f}'.format(ri) for ri in np.round(
np.linspace(0, 15, 10), 0)
}
)
self.assertEqual(sliders._sliders[1].children[1].step, 1)
self.assertEqual(sliders._sliders[2].children[0].children, 'param3')
self.assertEqual(sliders._sliders[2].children[1].id, '3')
self.assertEqual(sliders._sliders[2].children[1].min, 0)
self.assertEqual(sliders._sliders[2].children[1].max, 1)
self.assertEqual(sliders._sliders[2].children[1].value, 0)
self.assertEqual(
sliders._sliders[0].children[1].marks,
{ri: '{:.2f}'.format(ri) for ri in [0.00, 0.50, 1.00]}
)
self.assertEqual(sliders._sliders[2].children[1].step, 0.5)
self.assertEqual(sliders._sliders[2].style['display'], 'none')
def test_get_sliders_div(self):
sliders = bp._SliderComponent()
sliders.add_slider('param1', '1', 0, 0, 1, 0.5)
sliders.add_slider('param2', '2', 0.5, 0, 1, 0.25)
div = sliders.get_sliders_div().children
self.assertEqual(div[0].children[0].children, 'param1')
self.assertEqual(div[0].children[1].id, '1')
self.assertEqual(div[0].children[1].min, 0)
self.assertEqual(div[0].children[1].max, 1)
self.assertEqual(div[0].children[1].value, 0)
self.assertEqual(
div[0].children[1].marks,
{ri: '{:.2f}'.format(ri) for ri in [0, 0.50, 1]}
)
self.assertEqual(div[0].children[1].step, 0.5)
self.assertEqual(div[1].children[0].children, 'param2')
self.assertEqual(div[1].children[1].id, '2')
self.assertEqual(div[1].children[1].min, 0)
self.assertEqual(div[1].children[1].max, 1)
self.assertEqual(div[1].children[1].value, 0.5)
self.assertEqual(
div[1].children[1].marks,
{ri: '{:.2f}'.format(ri) for ri in [
0, 0.25, 0.50, 0.75, 1]}
)
self.assertEqual(div[1].children[1].step, 0.25)
def test_slider_ids(self):
sliders = bp._SliderComponent()
sliders.add_slider('param1', '1', 0, 0, 1, 0.5)
sliders.add_slider('param2', '2', 0.5, 0, 1, 0.25)
self.assertEqual(sliders.slider_ids(), ['1', '2'])
| 40.63
| 76
| 0.60128
| 554
| 4,063
| 4.319495
| 0.158845
| 0.231926
| 0.21145
| 0.266611
| 0.742583
| 0.724196
| 0.707898
| 0.417468
| 0.253239
| 0.216465
| 0
| 0.06592
| 0.230864
| 4,063
| 99
| 77
| 41.040404
| 0.69984
| 0.070145
| 0
| 0.223684
| 0
| 0
| 0.033813
| 0
| 0
| 0
| 0
| 0
| 0.486842
| 1
| 0.052632
| false
| 0
| 0.039474
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e4bbadad41e783dab257204985c7be2ff37ba99a
| 364
|
py
|
Python
|
queues/priority_queue/queue.py
|
deveshpatel0101/python-data-structures-algorithms
|
3ae54fff1bb340b9f6b6ff4361eac38fb83eebb5
|
[
"MIT"
] | null | null | null |
queues/priority_queue/queue.py
|
deveshpatel0101/python-data-structures-algorithms
|
3ae54fff1bb340b9f6b6ff4361eac38fb83eebb5
|
[
"MIT"
] | null | null | null |
queues/priority_queue/queue.py
|
deveshpatel0101/python-data-structures-algorithms
|
3ae54fff1bb340b9f6b6ff4361eac38fb83eebb5
|
[
"MIT"
] | null | null | null |
from queues.priority_queue.heap import MaxHeap
class PriorityQueue:
def __init__(self):
self.priority_queue = MaxHeap()
def insert(self, name, priority):
self.priority_queue.insert(name, priority)
def remove(self):
return self.priority_queue.extractMax()
def display(self):
return self.priority_queue.getHeap()
| 22.75
| 50
| 0.692308
| 43
| 364
| 5.651163
| 0.44186
| 0.26749
| 0.279835
| 0.18107
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 364
| 15
| 51
| 24.266667
| 0.84965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.1
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e4de7386a1da304a9b42db841eca8ed5d9cd1ff1
| 74
|
py
|
Python
|
wsgi.py
|
hugofer93/aimo-api
|
fe3cc3f169f7a46d4ba68625a7936f37f55b1aad
|
[
"MIT"
] | null | null | null |
wsgi.py
|
hugofer93/aimo-api
|
fe3cc3f169f7a46d4ba68625a7936f37f55b1aad
|
[
"MIT"
] | null | null | null |
wsgi.py
|
hugofer93/aimo-api
|
fe3cc3f169f7a46d4ba68625a7936f37f55b1aad
|
[
"MIT"
] | null | null | null |
from client import app as client_app
from server import app as server_app
| 24.666667
| 36
| 0.837838
| 14
| 74
| 4.285714
| 0.428571
| 0.3
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 74
| 2
| 37
| 37
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
90044e55827169599901b495c26d7cb773c4037c
| 96
|
py
|
Python
|
vn/__init__.py
|
mklasby/mri-variationalnetwork
|
b784fbfcf24d833edb4a41dc70cd863052528f19
|
[
"MIT"
] | 119
|
2017-09-22T01:10:25.000Z
|
2022-03-17T18:44:39.000Z
|
vn/__init__.py
|
mklasby/mri-variationalnetwork
|
b784fbfcf24d833edb4a41dc70cd863052528f19
|
[
"MIT"
] | 11
|
2017-12-26T10:45:18.000Z
|
2021-03-04T17:10:04.000Z
|
vn/__init__.py
|
mklasby/mri-variationalnetwork
|
b784fbfcf24d833edb4a41dc70cd863052528f19
|
[
"MIT"
] | 55
|
2018-06-18T05:37:52.000Z
|
2022-03-14T22:41:27.000Z
|
from .data import *
from .paramdefinitions import *
from .utils import *
from .proxmaps import *
| 24
| 31
| 0.760417
| 12
| 96
| 6.083333
| 0.5
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 96
| 4
| 32
| 24
| 0.901235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
904a9cbdd7928a8cca7fce0d5fcd37b93d756740
| 201
|
py
|
Python
|
tests/test_config.py
|
SauravMaheshkar/MLP-Mixer
|
c854bcc8aece199931bedfd18bf231c20421d26c
|
[
"MIT"
] | 7
|
2021-07-02T03:26:23.000Z
|
2021-11-23T02:42:41.000Z
|
tests/test_config.py
|
SauravMaheshkar/MLP-Mixer
|
c854bcc8aece199931bedfd18bf231c20421d26c
|
[
"MIT"
] | null | null | null |
tests/test_config.py
|
SauravMaheshkar/MLP-Mixer
|
c854bcc8aece199931bedfd18bf231c20421d26c
|
[
"MIT"
] | 2
|
2021-07-13T05:30:53.000Z
|
2021-10-01T21:42:49.000Z
|
from typing import Dict
from mlpmixer_flax.config import configuration, mixer_b16_config
def test_config():
assert isinstance(configuration, Dict)
assert isinstance(mixer_b16_config, Dict)
| 20.1
| 64
| 0.800995
| 26
| 201
| 5.961538
| 0.538462
| 0.103226
| 0.180645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.144279
| 201
| 9
| 65
| 22.333333
| 0.877907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5fba82220ffe0624c8d68c2885507b0d04643005
| 105
|
py
|
Python
|
ch6/4 interac textbox.py
|
PacktPublishing/Learning-Jupyter
|
734ef16ade5f9874e5187e483746524a675bf915
|
[
"MIT"
] | 11
|
2017-02-02T08:47:32.000Z
|
2021-09-15T18:04:01.000Z
|
ch8/B05207_8.py
|
PacktPublishing/Learning-Jupyter
|
734ef16ade5f9874e5187e483746524a675bf915
|
[
"MIT"
] | 2
|
2016-12-02T04:43:11.000Z
|
2016-12-02T04:43:57.000Z
|
ch6/4 interac textbox.py
|
PacktPublishing/Learning-Jupyter
|
734ef16ade5f9874e5187e483746524a675bf915
|
[
"MIT"
] | 8
|
2016-12-02T04:39:10.000Z
|
2018-04-01T22:58:19.000Z
|
from ipywidgets import interact
def myfunction(x):
return x
interact(myfunction, x= "Hello World ");
| 21
| 40
| 0.742857
| 14
| 105
| 5.571429
| 0.714286
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161905
| 105
| 4
| 41
| 26.25
| 0.886364
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5fc0fcb2a3151e0d27e22b57c57fd6ccc51cf16c
| 6,789
|
py
|
Python
|
tests/test_taxonomy.py
|
alpae/pyham
|
e30a018794ce39adf5b77df8bc057841eb142a15
|
[
"MIT"
] | 7
|
2019-03-29T18:23:28.000Z
|
2021-12-07T07:41:27.000Z
|
tests/test_taxonomy.py
|
alpae/pyham
|
e30a018794ce39adf5b77df8bc057841eb142a15
|
[
"MIT"
] | 14
|
2019-04-29T07:57:01.000Z
|
2022-03-05T04:00:40.000Z
|
tests/test_taxonomy.py
|
alpae/pyham
|
e30a018794ce39adf5b77df8bc057841eb142a15
|
[
"MIT"
] | 3
|
2019-07-18T12:54:45.000Z
|
2021-04-22T07:25:47.000Z
|
import unittest
from pyham import taxonomy, EvolutionaryConceptError
import os
class HAMTaxonomy(unittest.TestCase):
def setUp(self):
self.newick_str = "((HUMAN, PANTR)Primates,(MOUSE, RATNO)Rodents)Euarchontoglires;"
self.newick_str_support = "((HUMAN, PANTR)1:0.1,(MOUSE, RATNO)1:0.1)1:0.1;"
self.newick_str_non_unique = "((HUMAN, HUMAN)Primates,(MOUSE, RATNO)Rodents)Euarchontoglires;"
self.expected_name_native_ns = {"Primates", "Rodents", "Euarchontoglires"}
self.expected_name_concat_ns = {"HUMAN/PANTR", "MOUSE/RATNO", "HUMAN/PANTR/MOUSE/RATNO"}
self.newick_file =os.path.join(os.path.dirname(__file__), './data/simpleEx.nwk')
self.expected_name_native_nf = {"Primates", "Rodents", "Euarchontoglires","Mammalia","Vertebrata"}
self.expected_name_concat_nf = {"HUMAN/PANTR", "MOUSE/RATNO", "HUMAN/PANTR/MOUSE/RATNO", "HUMAN/PANTR/MOUSE/RATNO/CANFA", "XENTR/HUMAN/PANTR/MOUSE/RATNO/CANFA"}
self.phyloxml_file = os.path.join(os.path.dirname(__file__), './data/simpleEx.phyloxml')
self.phyloxml_file_no_int_name = os.path.join(os.path.dirname(__file__), './data/simpleExNoName.phyloxml')
self.phyloxml_file_no_clade_name = os.path.join(os.path.dirname(__file__), './data/simpleExNoCladeName.phyloxml')
self.expected_name_concat_nf2 = {"PANTR/HUMAN", "RATNO/MOUSE", "RATNO/MOUSE/PANTR/HUMAN",
"RATNO/MOUSE/PANTR/HUMAN/CANFA", "XENTR/RATNO/MOUSE/PANTR/HUMAN/CANFA"}
self.set_species_name = {"HUMAN", "PANTR", "MOUSE", "RATNO", "CANFA", "XENTR"}
self.set_species_sciname = {"Homo Sapiens", "Chimp", "Mus Musculus", "Ratus Norvegicus", "Canis Familiaris", "Xenopus Tropicallis"}
def test_non_unique_leaf_names(self):
with self.assertRaises(KeyError):
taxonomy.Taxonomy(self.newick_str_non_unique)
with self.assertRaises(KeyError):
taxonomy.Taxonomy(self.newick_str_non_unique, use_internal_name=False)
def test_use_internal_name(self):
# using the normal newick
t = taxonomy.Taxonomy(self.newick_str, use_internal_name=True)
observed_name = {node.name for node in t.tree.traverse() if node.is_leaf() is False}
self.assertSetEqual(self.expected_name_native_ns, observed_name)
# using the file newick
t2 = taxonomy.Taxonomy(self.newick_file, use_internal_name=True, tree_format='newick')
observed_name = {node.name for node in t2.tree.traverse() if node.is_leaf() is False}
self.assertSetEqual(self.expected_name_native_nf, observed_name)
# using the file phyloxml with sciname
t3 = taxonomy.Taxonomy(self.phyloxml_file, use_internal_name=True, tree_format='phyloxml', phyloxml_internal_name_tag='taxonomy_scientific_name', phyloxml_leaf_name_tag='taxonomy_scientific_name')
observed_name = {node.name for node in t3.tree.traverse() if node.is_leaf() is False}
self.assertSetEqual(self.expected_name_native_nf, observed_name)
# using the file phyloxml with clade name
t4 = taxonomy.Taxonomy(self.phyloxml_file, use_internal_name=True, tree_format='phyloxml',
phyloxml_internal_name_tag='clade_name',
phyloxml_leaf_name_tag='taxonomy_scientific_name')
observed_name = {node.name for node in t4.tree.traverse() if node.is_leaf() is False}
self.assertSetEqual(self.expected_name_native_nf, observed_name)
def test_dont_use_internal_name(self):
# using the normal newick
t = taxonomy.Taxonomy(self.newick_str, use_internal_name=False)
observed_name = {node.name for node in t.tree.traverse() if node.is_leaf() is False}
self.assertSetEqual(self.expected_name_concat_ns, observed_name)
# using the file newick
t2 = taxonomy.Taxonomy(self.newick_file, use_internal_name=False, tree_format='newick')
observed_name = {node.name for node in t2.tree.traverse() if node.is_leaf() is False}
self.assertSetEqual(self.expected_name_concat_nf, observed_name)
# using the file phyloxml
t3 = taxonomy.Taxonomy(self.phyloxml_file, use_internal_name=False, tree_format='phyloxml', phyloxml_leaf_name_tag='taxonomy_code')
observed_name = {node.name for node in t3.tree.traverse() if node.is_leaf() is False}
self.assertSetEqual(self.expected_name_concat_nf2, observed_name)
# using the file phyloxml with phylogeny code
t6 = taxonomy.Taxonomy(self.phyloxml_file_no_int_name, use_internal_name=False, tree_format='phyloxml', phyloxml_leaf_name_tag='taxonomy_code')
observed_name = {node.name for node in t6.tree.traverse() if node.is_leaf() is False}
self.assertSetEqual(self.expected_name_concat_nf2, observed_name)
# using newick with support values as internal names
t_support = taxonomy.Taxonomy(self.newick_str_support, use_internal_name=False)
observed_name = {node.name for node in t_support.tree.traverse() if node.is_leaf() is False}
self.assertSetEqual(self.expected_name_concat_ns, observed_name)
def test_all_correct_name(self):
# using the file newick
t2 = taxonomy.Taxonomy(self.newick_file, use_internal_name=True, tree_format='newick')
observed_name = {node.name for node in t2.tree.traverse() if node.is_leaf() is True}
self.assertSetEqual(self.set_species_name, observed_name)
# using the file phyloxml with clade name
t4 = taxonomy.Taxonomy(self.phyloxml_file, use_internal_name=True, tree_format='phyloxml', phyloxml_internal_name_tag='taxonomy_scientific_name', phyloxml_leaf_name_tag='clade_name')
observed_name = {node.name for node in t4.tree.traverse() if node.is_leaf() is True}
self.assertSetEqual(self.set_species_name, observed_name)
# using the file phyloxml with phylogeny sciname
t5 = taxonomy.Taxonomy(self.phyloxml_file, use_internal_name=True, tree_format='phyloxml', phyloxml_internal_name_tag='taxonomy_scientific_name' ,
phyloxml_leaf_name_tag='taxonomy_scientific_name')
observed_name = {node.name for node in t5.tree.traverse() if node.is_leaf() is True}
self.assertSetEqual(self.set_species_sciname, observed_name)
# using the file phyloxml with phylogeny code
t6 = taxonomy.Taxonomy(self.phyloxml_file, use_internal_name=True, tree_format='phyloxml', phyloxml_internal_name_tag='taxonomy_scientific_name',
phyloxml_leaf_name_tag='taxonomy_code')
observed_name = {node.name for node in t6.tree.traverse() if node.is_leaf() is True}
self.assertSetEqual(self.set_species_name, observed_name)
| 59.034783
| 204
| 0.716011
| 908
| 6,789
| 5.070485
| 0.113436
| 0.067767
| 0.052129
| 0.056473
| 0.854692
| 0.789531
| 0.754778
| 0.746308
| 0.722632
| 0.699609
| 0
| 0.005747
| 0.17985
| 6,789
| 114
| 205
| 59.552632
| 0.821121
| 0.064958
| 0
| 0.309859
| 0
| 0.014085
| 0.163455
| 0.08844
| 0
| 0
| 0
| 0
| 0.211268
| 1
| 0.070423
| false
| 0
| 0.042254
| 0
| 0.126761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3963ed2560c11b7702d30d1e4fadc15f6a33bdf3
| 10,428
|
py
|
Python
|
tests/mechanisms/test_contrastive_hebbian_mechanism.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
tests/mechanisms/test_contrastive_hebbian_mechanism.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
tests/mechanisms/test_contrastive_hebbian_mechanism.py
|
bdsinger/PsyNeuLink
|
71d8a0bb1691ff85061d4ad3de866d9930a69a73
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import psyneulink as pnl
import pytest
import psyneulink.core.components.functions.learningfunctions
import psyneulink.core.components.functions.transferfunctions
class TestContrastiveHebbian:
def test_scheduled_contrastive_hebbian(self):
o = pnl.TransferMechanism()
m = pnl.ContrastiveHebbianMechanism(
input_size=2,
hidden_size=0,
target_size=2,
separated=False,
mode=pnl.SIMPLE_HEBBIAN,
integrator_mode=True,
enable_learning=False,
matrix=[[0,-1],[-1, 0]],
# auto=0,
# hetero=-1,
)
# set max passes to ensure failure if no convergence instead of infinite loop
m.max_passes = 1000
s = pnl.sys(m, o)
ms = pnl.Scheduler(system=s)
ms.add_condition(o, pnl.WhenFinished(m))
s.scheduler_processing = ms
# m.reinitialize_when=pnl.Never()
print('matrix:\n', m.afferents[1].matrix)
results = s.run(inputs=[2, 2], num_trials=4)
print(results)
np.testing.assert_allclose(results, [[np.array([2.])], [np.array([2.])], [np.array([2.])], [np.array([2.])]])
def test_using_Hebbian_learning_of_orthognal_inputs_without_integrator_mode(self):
'''Same as tests/mechanisms/test_recurrent_transfer_mechanism/test_learning_of_orthognal_inputs
Tests that ContrastiveHebbianMechanism behaves like RecurrentTransferMechanism with Hebbian LearningFunction
(allowing for epsilon differences due CONVERGENCE CRITERION.
'''
size=4
R = pnl.ContrastiveHebbianMechanism(
input_size=4,
hidden_size=0,
target_size=4,
mode=pnl.SIMPLE_HEBBIAN,
enable_learning=True,
function=psyneulink.core.components.functions.transferfunctions.Linear,
learning_function=psyneulink.core.components.functions.learningfunctions.Hebbian,
minus_phase_termination_criterion=.01,
plus_phase_termination_criterion=.01,
# auto=0,
hetero=np.full((size,size),0.0)
)
P=pnl.Process(pathway=[R])
S=pnl.System(processes=[P])
inputs_dict = {R:[1,0,1,0]}
S.run(num_trials=4,
inputs=inputs_dict)
# KDM 10/2/18: removing this test from here, as it's kind of unimportant to this specific test
# and the behavior of the scheduler's time can be a bit odd - should hopefully fix that in future
# and test in its own module
# assert S.scheduler_processing.get_clock(S).previous_time.pass_ == 6
np.testing.assert_allclose(R.output_states[pnl.ACTIVITY_DIFFERENCE_OUTPUT].parameters.value.get(S),
[1.20074767, 0.0, 1.20074767, 0.0])
np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S), [1.20074767, 0.0, 1.20074767, 0.0])
np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S), [0.0, 0.0, 0.0, 0.0])
np.testing.assert_allclose(R.output_states[pnl.CURRENT_ACTIVITY_OUTPUT].parameters.value.get(S), [1.20074767, 0.0, 1.20074767, 0.0])
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(S),
[
[0.0, 0.0, 0.2399363, 0.0 ],
[0.0, 0.0, 0.0, 0.0 ],
[0.2399363, 0.0, 0.0, 0.0 ],
[0.0, 0.0, 0.0, 0.0 ]
]
)
# Reset state so learning of new pattern is "uncontaminated" by activity from previous one
R.output_state.parameters.value.set([0, 0, 0, 0], S)
inputs_dict = {R:[0,1,0,1]}
S.run(num_trials=4,
inputs=inputs_dict)
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(S),
[
[0.0, 0.0, 0.2399363, 0.0 ],
[0.0, 0.0, 0.0, 0.2399363 ],
[0.2399363, 0.0, 0.0, 0.0 ],
[0.0, 0.2399363, 0.0, 0.0 ]
]
)
np.testing.assert_allclose(R.output_states[pnl.ACTIVITY_DIFFERENCE_OUTPUT].parameters.value.get(S), [0.0, 1.20074767, 0.0, 1.20074767])
np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S), [0.0, 1.20074767, 0.0, 1.20074767])
np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S), [0.0, 0.0, 0.0, 0.0])
def test_using_Hebbian_learning_of_orthognal_inputs_with_integrator_mode(self):
'''Same as tests/mechanisms/test_recurrent_transfer_mechanism/test_learning_of_orthognal_inputs
Tests that ContrastiveHebbianMechanism behaves like RecurrentTransferMechanism with Hebbian LearningFunction
(allowing for epsilon differences due to INTEGRATION and convergence criterion).
'''
size=4
R = pnl.ContrastiveHebbianMechanism(
input_size=4,
hidden_size=0,
target_size=4,
separated=False,
mode=pnl.SIMPLE_HEBBIAN,
enable_learning=True,
function=psyneulink.core.components.functions.transferfunctions.Linear,
integrator_mode=True,
integration_rate=0.2,
learning_function=psyneulink.core.components.functions.learningfunctions.Hebbian,
minus_phase_termination_criterion=.01,
plus_phase_termination_criterion=.01,
# auto=0,
hetero=np.full((size,size),0.0)
)
P=pnl.Process(pathway=[R])
S=pnl.System(processes=[P])
inputs_dict = {R:[1,0,1,0]}
S.run(num_trials=4,
inputs=inputs_dict)
# KDM 10/2/18: removing this test from here, as it's kind of unimportant to this specific test
# and the behavior of the scheduler's time can be a bit odd - should hopefully fix that in future
# and test in its own module
# assert S.scheduler_processing.get_clock(S).previous_time.pass_ == 19
np.testing.assert_allclose(R.output_states[pnl.ACTIVITY_DIFFERENCE_OUTPUT].parameters.value.get(S),
[1.14142296, 0.0, 1.14142296, 0.0])
np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S), [1.14142296, 0.0, 1.14142296, 0.0])
np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S), [0.0, 0.0, 0.0, 0.0])
np.testing.assert_allclose(R.output_states[pnl.CURRENT_ACTIVITY_OUTPUT].parameters.value.get(S),
[1.1414229612568625, 0.0, 1.1414229612568625, 0.0])
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(S),
[
[0.0, 0.0, 0.22035998, 0.0 ],
[0.0, 0.0, 0.0, 0.0 ],
[0.22035998, 0.0, 0.0, 0.0 ],
[0.0, 0.0, 0.0, 0.0 ]
]
)
# Reset state so learning of new pattern is "uncontaminated" by activity from previous one
R.output_state.parameters.value.set([0, 0, 0, 0], S)
inputs_dict = {R:[0,1,0,1]}
S.run(num_trials=4,
inputs=inputs_dict)
np.testing.assert_allclose(
R.recurrent_projection.get_mod_matrix(S),
[
[0.0, 0.0, 0.22035998, 0.0 ],
[0.0, 0.0, 0.0, 0.22035998],
[0.22035998, 0.0, 0.0, 0. ],
[0.0, 0.22035998, 0.0, 0. ]
]
)
np.testing.assert_allclose(R.output_states[pnl.CURRENT_ACTIVITY_OUTPUT].parameters.value.get(S),
[0.0, 1.1414229612568625, 0.0, 1.1414229612568625])
np.testing.assert_allclose(R.output_states[pnl.ACTIVITY_DIFFERENCE_OUTPUT].parameters.value.get(S),
[ 0.0, 1.14142296, 0.0, 1.14142296])
np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S), [0.0, 1.14142296, 0.0, 1.14142296])
np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S), [0.0, 0.0, 0.0, 0.0])
def test_additional_output_states(self):
CHL1 = pnl.ContrastiveHebbianMechanism(
input_size=2, hidden_size=0, target_size=2,
additional_output_states=[pnl.PLUS_PHASE_OUTPUT, pnl.MINUS_PHASE_OUTPUT])
assert len(CHL1.output_states)==5
assert pnl.PLUS_PHASE_OUTPUT in CHL1.output_states.names
CHL2 = pnl.ContrastiveHebbianMechanism(
input_size=2, hidden_size=0, target_size=2,
additional_output_states=[pnl.PLUS_PHASE_OUTPUT, pnl.MINUS_PHASE_OUTPUT],
separated=False)
assert len(CHL2.output_states)==5
assert pnl.PLUS_PHASE_OUTPUT in CHL2.output_states.names
def test_configure_learning(self):
o = pnl.TransferMechanism()
m = pnl.ContrastiveHebbianMechanism(
input_size=2, hidden_size=0, target_size=2,
mode=pnl.SIMPLE_HEBBIAN,
separated=False,
matrix=[[0,-.5],[-.5,0]]
)
with pytest.warns(UserWarning) as record:
m.learning_enabled = True
correct_message_found = False
for warning in record:
if ("Learning cannot be enabled" in str(warning.message) and
"because it has no LearningMechanism" in str(warning.message)):
correct_message_found = True
break
assert correct_message_found
m.configure_learning()
m.reinitialize_when=pnl.Never()
s = pnl.sys(m,o)
ms = pnl.Scheduler(system=s)
ms.add_condition(o, pnl.WhenFinished(m))
s.scheduler_processing=ms
results = s.run(inputs=[2,2], num_trials=4)
np.testing.assert_allclose(results, [[[2.671875]],
[[2.84093837]],
[[3.0510183]],
[[3.35234623]]])
| 46.972973
| 143
| 0.573552
| 1,285
| 10,428
| 4.490272
| 0.15642
| 0.054073
| 0.058232
| 0.063778
| 0.855806
| 0.817331
| 0.79688
| 0.79688
| 0.782322
| 0.753206
| 0
| 0.091868
| 0.318374
| 10,428
| 221
| 144
| 47.18552
| 0.719893
| 0.138665
| 0
| 0.546512
| 0
| 0
| 0.00786
| 0
| 0
| 0
| 0
| 0
| 0.151163
| 1
| 0.02907
| false
| 0.005814
| 0.02907
| 0
| 0.063953
| 0.011628
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
39a48c520dfbe38c733780908e454fa13abfe96d
| 50
|
py
|
Python
|
ulmo/lcra/__init__.py
|
sblack-usu/ulmo
|
3213bf0302b44e77abdff1f3f66e7f1083571ce8
|
[
"BSD-3-Clause"
] | 123
|
2015-01-29T12:35:52.000Z
|
2021-12-15T21:09:33.000Z
|
ulmo/lcra/__init__.py
|
sblack-usu/ulmo
|
3213bf0302b44e77abdff1f3f66e7f1083571ce8
|
[
"BSD-3-Clause"
] | 107
|
2015-01-05T17:56:22.000Z
|
2021-11-19T22:46:23.000Z
|
ulmo/lcra/__init__.py
|
sblack-usu/ulmo
|
3213bf0302b44e77abdff1f3f66e7f1083571ce8
|
[
"BSD-3-Clause"
] | 49
|
2015-02-15T18:11:34.000Z
|
2022-01-25T14:25:32.000Z
|
from . import hydromet
from . import waterquality
| 16.666667
| 26
| 0.8
| 6
| 50
| 6.666667
| 0.666667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 50
| 2
| 27
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
39dd137e02a0675da43b7911f7511f247929c0dd
| 123
|
py
|
Python
|
xmpe_l10n_pe_currency/models/__init__.py
|
dsilot/odoo
|
032c138954948c28b8fbef4e7bb9d5ba6921c288
|
[
"MIT"
] | null | null | null |
xmpe_l10n_pe_currency/models/__init__.py
|
dsilot/odoo
|
032c138954948c28b8fbef4e7bb9d5ba6921c288
|
[
"MIT"
] | null | null | null |
xmpe_l10n_pe_currency/models/__init__.py
|
dsilot/odoo
|
032c138954948c28b8fbef4e7bb9d5ba6921c288
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import res_config_settings
from . import rer_currency
from . import res_company
| 13.666667
| 34
| 0.666667
| 16
| 123
| 4.875
| 0.6875
| 0.384615
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.235772
| 123
| 8
| 35
| 15.375
| 0.819149
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
39ecfb07cb771f61d8a4d30a913cdd46c0cde8c5
| 73
|
py
|
Python
|
cyk/generate_string/__init__.py
|
azoimide/cyk
|
0dd06fc70136246ae59b783c566889802e50b06c
|
[
"MIT"
] | null | null | null |
cyk/generate_string/__init__.py
|
azoimide/cyk
|
0dd06fc70136246ae59b783c566889802e50b06c
|
[
"MIT"
] | null | null | null |
cyk/generate_string/__init__.py
|
azoimide/cyk
|
0dd06fc70136246ae59b783c566889802e50b06c
|
[
"MIT"
] | null | null | null |
from generate_string import generate_table, generate_string, rand_string
| 36.5
| 72
| 0.890411
| 10
| 73
| 6.1
| 0.6
| 0.459016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082192
| 73
| 1
| 73
| 73
| 0.910448
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
843268979c554670972ace20eca2f18a6fe30fb0
| 4,985
|
py
|
Python
|
opfython/utils/converter.py
|
gugarosa/opfython
|
19b467a92d85c7c26d231efec770645096827b4e
|
[
"Apache-2.0"
] | 26
|
2018-04-24T20:16:18.000Z
|
2022-03-09T14:03:28.000Z
|
opfython/utils/converter.py
|
gugarosa/opfython
|
19b467a92d85c7c26d231efec770645096827b4e
|
[
"Apache-2.0"
] | 4
|
2020-12-26T14:57:18.000Z
|
2022-03-30T02:34:18.000Z
|
opfython/utils/converter.py
|
gugarosa/opfython
|
19b467a92d85c7c26d231efec770645096827b4e
|
[
"Apache-2.0"
] | 16
|
2019-05-20T15:41:56.000Z
|
2022-03-23T17:59:53.000Z
|
"""Converts OPF binary data to a variety of extensions.
"""
import json as j
import struct
import numpy as np
import opfython.utils.logging as l
logger = l.get_logger(__name__)
def opf2txt(opf_path, output_file=None):
"""Converts a binary OPF file (.dat or .opf) to a .txt file.
Args:
opf_path (str): Path to the binary file.
output_file (str): The path to the output file.
"""
logger.info('Converting file: %s ...', opf_path)
# Defining header format
header_format = '<iii'
# Calculating size to be read
header_size = struct.calcsize(header_format)
with open(opf_path, 'rb') as f:
# Reading binary data and unpacking to desired format
header_data = struct.unpack(header_format, f.read(header_size))
# Retrieving number of samples and features
n_samples = header_data[0]
n_features = header_data[2]
# Defining the file format for each subsequent line
file_format = '<ii'
for _ in range(n_features):
file_format += 'f'
# Calculates the size based on the file format
data_size = struct.calcsize(file_format)
# Creates an empty list to hold the samples
samples = []
for _ in range(n_samples):
# Reading binary data and unpacking to desired format
data = struct.unpack(file_format, f.read(data_size))
# Appending the data to list
# Note that we subtract 1 from `labels` column
samples.append((data[0], data[1] - 1, *data[2:]))
if not output_file:
output_file = opf_path.split('.')[0] + '.txt'
np.savetxt(output_file, samples, delimiter=' ')
logger.info('File converted to %s.', output_file)
def opf2csv(opf_path, output_file=None):
"""Converts a binary OPF file (.dat or .opf) to a .csv file.
Args:
opf_path (str): Path to the binary file.
output_file (str): The path to the output file.
"""
logger.info('Converting file: %s ...', opf_path)
# Defining header format
header_format = '<iii'
# Calculating size to be read
header_size = struct.calcsize(header_format)
with open(opf_path, 'rb') as f:
# Reading binary data and unpacking to desired format
header_data = struct.unpack(header_format, f.read(header_size))
# Retrieving number of samples and features
n_samples = header_data[0]
n_features = header_data[2]
# Defining the file format for each subsequent line
file_format = '<ii'
for _ in range(n_features):
file_format += 'f'
# Calculates the size based on the file format
data_size = struct.calcsize(file_format)
# Creates an empty list to hold the samples
samples = []
for _ in range(n_samples):
# Reading binary data and unpacking to desired format
data = struct.unpack(file_format, f.read(data_size))
# Appending the data to list
# Note that we subtract 1 from `labels` column
samples.append((data[0], data[1] - 1, *data[2:]))
if not output_file:
output_file = opf_path.split('.')[0] + '.csv'
np.savetxt(output_file, samples, delimiter=',')
logger.info('File converted to %s.', output_file)
def opf2json(opf_path, output_file=None):
"""Converts a binary OPF file (.dat or .opf) to a .json file.
Args:
opf_path (str): Path to the binary file.
output_file (str): The path to the output file.
"""
logger.info('Converting file: %s ...', opf_path)
# Defining header format
header_format = '<iii'
# Calculating size to be read
header_size = struct.calcsize(header_format)
with open(opf_path, 'rb') as f:
# Reading binary data and unpacking to desired format
header_data = struct.unpack(header_format, f.read(header_size))
# Retrieving number of samples and features
n_samples = header_data[0]
n_features = header_data[2]
# Defining the file format for each subsequent line
file_format = '<ii'
for _ in range(n_features):
file_format += 'f'
# Calculates the size based on the file format
data_size = struct.calcsize(file_format)
# Creating a JSON structure
json = {
'data': []
}
for _ in range(n_samples):
# Reading binary data and unpacking to desired format
data = struct.unpack(file_format, f.read(data_size))
# Appending the data to JSON structure
json['data'].append({
'id': data[0],
'label': data[1] - 1,
'features': list(data[2:])
})
if not output_file:
output_file = opf_path.split('.')[0] + '.json'
with open(output_file, 'w') as f:
j.dump(json, f)
logger.info('File converted to %s.', output_file)
| 28.163842
| 71
| 0.612036
| 669
| 4,985
| 4.414051
| 0.153961
| 0.071114
| 0.018286
| 0.040637
| 0.899763
| 0.899763
| 0.899763
| 0.899763
| 0.887572
| 0.887572
| 0
| 0.007347
| 0.29007
| 4,985
| 176
| 72
| 28.323864
| 0.82707
| 0.342427
| 0
| 0.680556
| 0
| 0
| 0.064272
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.055556
| 0
| 0.097222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ffdc6eba42261c59ea6ca677054f64b79b22741f
| 2,963
|
py
|
Python
|
base_elements_cubic_matrix.py
|
geraldpe/cistercian_cipher
|
6c0ae9014a7771b1a792cf346cf039a69ae52f96
|
[
"Apache-2.0"
] | null | null | null |
base_elements_cubic_matrix.py
|
geraldpe/cistercian_cipher
|
6c0ae9014a7771b1a792cf346cf039a69ae52f96
|
[
"Apache-2.0"
] | null | null | null |
base_elements_cubic_matrix.py
|
geraldpe/cistercian_cipher
|
6c0ae9014a7771b1a792cf346cf039a69ae52f96
|
[
"Apache-2.0"
] | null | null | null |
"""
the first index is the power of ten and the second index is the digit between 0 and 8
"""
BaseElements = (
#milliers
(
(
(100, 350, 200, 350)
),
(
(100, 250, 200, 250)
),
(
(100, 250, 200, 350)
),
(
(100, 350, 200, 250)
),
(
(100, 350, 200, 350),
(100, 350, 200, 250)
),
(
(100, 250, 100, 350)
),
(
(100, 250, 100, 350),
(100, 350, 200, 350)
),
(
(100, 250, 100, 350),
(100, 250, 200, 250)
),
(
(100, 250, 100, 350),
(100, 250, 200, 250),
(100, 350, 200, 350)
),
),
#centaines
(
(
(200, 350, 300, 350)
),
(
(200, 250, 300, 250)
),
(
(200, 350, 300, 250)
),
(
(200, 250, 300, 350)
),
(
(200, 350, 300, 350),
(200, 250, 300, 350)
),
(
(300, 250, 300, 350)
),
(
(300, 250, 300, 350),
(200, 350, 300, 350)
),
(
(300, 250, 300, 350),
(200, 250, 300, 250)
),
(
(300, 250, 300, 350),
(200, 250, 300, 250),
(200, 350, 300, 350)
),
),
#dizaines
(
(
(100, 50, 200, 50)
),
(
(100, 150, 200, 150)
),
(
(100, 150, 200, 50)
),
(
(100, 50, 200, 150)
),
(
(100, 50, 200, 50),
(100, 50, 200, 150)
),
(
(100, 50, 100, 150)
),
(
(100, 50, 100, 150),
(100, 50, 200, 50)
),
(
(100, 50, 100, 150),
(100, 150, 200, 150)
),
(
(100, 50, 100, 150),
(100, 50, 200, 50),
(100, 150, 200, 150)
),
),
#unités
(
(
(200, 50, 300, 50)
),
(
(200, 150, 300, 150)
),
(
(200, 50, 300, 150)
),
(
(200, 150, 300, 50)
),
(
(200, 50, 300, 50),
(200, 150, 300, 50)
),
(
(300, 50, 300, 150)
),
(
(300, 50, 300, 150),
(200, 50, 300, 50)
),
(
(300, 50, 300, 150),
(200, 150, 300, 150)
),
(
(300, 50, 300, 150),
(200, 150, 300, 150),
(200, 50, 300, 50)
),
)
)
| 20.156463
| 86
| 0.253797
| 248
| 2,963
| 3.032258
| 0.100806
| 0.095745
| 0.071809
| 0.06383
| 0.835106
| 0.81516
| 0.726064
| 0.449468
| 0.211436
| 0.071809
| 0
| 0.543919
| 0.600405
| 2,963
| 147
| 87
| 20.156463
| 0.091216
| 0.039487
| 0
| 0.586957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
080ff0143c4641a1af246af4273424cb8a88fb9a
| 115
|
py
|
Python
|
utils.py
|
CompassMentis/mosaic_tiles
|
d8980cf65965aee77b1e14dc3760b11343a6a4a2
|
[
"MIT"
] | null | null | null |
utils.py
|
CompassMentis/mosaic_tiles
|
d8980cf65965aee77b1e14dc3760b11343a6a4a2
|
[
"MIT"
] | null | null | null |
utils.py
|
CompassMentis/mosaic_tiles
|
d8980cf65965aee77b1e14dc3760b11343a6a4a2
|
[
"MIT"
] | null | null | null |
def within_rect(x, y, rect):
return rect.x <= x <= rect.x + rect.width and rect.y <= y <= rect.y + rect.height
| 38.333333
| 85
| 0.608696
| 22
| 115
| 3.136364
| 0.409091
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208696
| 115
| 2
| 86
| 57.5
| 0.758242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f26ef64bb830b236ef4d362dac63efb84b4e04f4
| 103
|
py
|
Python
|
suvec/vk_api_impl/requesting/__init__.py
|
ProtsenkoAI/skady-user-vectorizer
|
9114337d4a5cb176f6980e73a93eef90a49b478e
|
[
"MIT"
] | 1
|
2021-05-07T16:48:16.000Z
|
2021-05-07T16:48:16.000Z
|
suvec/vk_api_impl/requesting/__init__.py
|
ProtsenkoAI/skady-user-vectorizer
|
9114337d4a5cb176f6980e73a93eef90a49b478e
|
[
"MIT"
] | null | null | null |
suvec/vk_api_impl/requesting/__init__.py
|
ProtsenkoAI/skady-user-vectorizer
|
9114337d4a5cb176f6980e73a93eef90a49b478e
|
[
"MIT"
] | null | null | null |
from .requests_creator import VkApiRequestsCreator
from .requests import GroupsRequest, FriendsRequest
| 34.333333
| 51
| 0.883495
| 10
| 103
| 9
| 0.7
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087379
| 103
| 2
| 52
| 51.5
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f2bc1b938c3934969ad2e27ebbf5a0ebd472f8f1
| 199
|
py
|
Python
|
energypy/envs/base.py
|
ADGEfficiency/energy-py-3-dev
|
9c5eb32718dec3f8195402d82c1d03d90fd1f5f9
|
[
"MIT"
] | 100
|
2018-09-14T07:58:56.000Z
|
2022-02-24T08:58:36.000Z
|
energypy/envs/base.py
|
ADGEfficiency/energy-py-3-dev
|
9c5eb32718dec3f8195402d82c1d03d90fd1f5f9
|
[
"MIT"
] | 26
|
2018-09-13T00:10:54.000Z
|
2022-02-09T23:29:47.000Z
|
energypy/envs/base.py
|
ADGEfficiency/energy-py-3-dev
|
9c5eb32718dec3f8195402d82c1d03d90fd1f5f9
|
[
"MIT"
] | 19
|
2018-11-12T11:52:25.000Z
|
2021-12-08T12:41:47.000Z
|
class AbstractEnv:
def reset(self):
raise NotImplementedError()
def step(self):
raise NotImplementedError()
def setup_test(self):
raise NotImplementedError()
| 15.307692
| 35
| 0.643216
| 18
| 199
| 7.055556
| 0.555556
| 0.212598
| 0.661417
| 0.488189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.276382
| 199
| 12
| 36
| 16.583333
| 0.881944
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
f2c95050d2d6cfa0362acdde1d57617596f22921
| 2,562
|
py
|
Python
|
epytope/Data/pssms/smm/mat/A_31_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smm/mat/A_31_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smm/mat/A_31_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_31_01_10 = {0: {'A': -0.182, 'C': -0.104, 'E': 0.472, 'D': 0.324, 'G': 0.273, 'F': -0.017, 'I': -0.056, 'H': -0.06, 'K': -0.62, 'M': -0.335, 'L': -0.121, 'N': 0.147, 'Q': 0.027, 'P': 0.527, 'S': -0.219, 'R': -0.504, 'T': 0.07, 'W': 0.263, 'V': 0.062, 'Y': 0.056}, 1: {'A': 0.008, 'C': 0.228, 'E': 0.188, 'D': 0.495, 'G': -0.113, 'F': -0.473, 'I': -0.214, 'H': -0.111, 'K': 0.352, 'M': -0.545, 'L': -0.081, 'N': 0.616, 'Q': -0.032, 'P': 0.641, 'S': -0.308, 'R': 0.304, 'T': -0.221, 'W': -0.1, 'V': -0.253, 'Y': -0.382}, 2: {'A': 0.177, 'C': -0.096, 'E': 0.442, 'D': 0.298, 'G': 0.191, 'F': -0.134, 'I': -0.191, 'H': -0.033, 'K': -0.091, 'M': -0.337, 'L': -0.098, 'N': 0.103, 'Q': -0.077, 'P': 0.171, 'S': -0.123, 'R': -0.254, 'T': 0.222, 'W': -0.057, 'V': 0.158, 'Y': -0.27}, 3: {'A': -0.013, 'C': -0.017, 'E': 0.189, 'D': 0.051, 'G': -0.056, 'F': -0.171, 'I': -0.02, 'H': -0.072, 'K': 0.002, 'M': -0.038, 'L': 0.039, 'N': -0.034, 'Q': 0.104, 'P': -0.029, 'S': 0.006, 'R': -0.006, 'T': 0.049, 'W': -0.102, 'V': 0.106, 'Y': 0.012}, 4: {'A': 0.088, 'C': -0.22, 'E': 0.299, 'D': 0.341, 'G': 0.134, 'F': -0.319, 'I': -0.159, 'H': -0.12, 'K': 0.008, 'M': -0.081, 'L': -0.083, 'N': 0.114, 'Q': 0.025, 'P': 0.076, 'S': 0.119, 'R': -0.048, 'T': 0.066, 'W': -0.155, 'V': -0.013, 'Y': -0.07}, 5: {'A': 0.077, 'C': -0.209, 'E': 0.218, 'D': 0.316, 'G': 0.06, 'F': -0.246, 'I': -0.188, 'H': -0.008, 'K': -0.086, 'M': 0.079, 'L': 0.014, 'N': 0.138, 'Q': 0.118, 'P': 0.313, 'S': -0.105, 'R': -0.139, 'T': 0.045, 'W': -0.252, 'V': -0.087, 'Y': -0.059}, 6: {'A': 0.092, 'C': -0.025, 'E': 0.281, 'D': 0.191, 'G': 0.076, 'F': -0.14, 'I': 0.085, 'H': -0.111, 'K': -0.06, 'M': -0.051, 'L': -0.109, 'N': 0.016, 'Q': 0.088, 'P': 0.121, 'S': 0.087, 'R': -0.294, 'T': 0.061, 'W': -0.219, 'V': 0.067, 'Y': -0.152}, 7: {'A': -0.013, 'C': -0.179, 'E': 0.124, 'D': 0.377, 'G': 0.267, 'F': -0.36, 'I': -0.07, 'H': -0.047, 'K': 0.146, 'M': -0.213, 'L': 0.016, 'N': 0.069, 'Q': 0.227, 'P': 0.168, 'S': -0.047, 'R': -0.124, 'T': 0.028, 'W': -0.098, 'V': 0.022, 'Y': -0.293}, 8: {'A': -0.138, 'C': -0.115, 'E': 0.071, 'D': 0.247, 'G': 0.029, 'F': -0.424, 'I': 0.247, 'H': 0.092, 'K': 0.163, 'M': 0.038, 'L': -0.074, 'N': 0.01, 'Q': 0.191, 'P': -0.066, 'S': 0.068, 'R': -0.044, 'T': -0.004, 'W': -0.144, 'V': 0.174, 'Y': -0.319}, 9: {'A': 0.239, 'C': -0.025, 'E': 0.242, 'D': 0.471, 'G': 0.0, 'F': 0.229, 'I': 0.141, 'H': -0.01, 'K': -0.704, 'M': -0.743, 'L': -0.025, 'N': 0.13, 'Q': 0.872, 'P': 0.973, 'S': 0.0, 'R': -1.711, 'T': -0.249, 'W': 0.121, 'V': 0.042, 'Y': 0.006}, -1: {'con': 4.1264}}
| 2,562
| 2,562
| 0.393052
| 618
| 2,562
| 1.624595
| 0.300971
| 0.01992
| 0.00996
| 0.011952
| 0.055777
| 0
| 0
| 0
| 0
| 0
| 0
| 0.372439
| 0.161593
| 2,562
| 1
| 2,562
| 2,562
| 0.094972
| 0
| 0
| 0
| 0
| 0
| 0.079204
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4bb4ab163735775ea8b1860cd375c29233d2a55c
| 4,576
|
py
|
Python
|
skygear/tests/test_action.py
|
IniZio/py-skygear
|
88479678f91e678fd931c28295189bfea2148c79
|
[
"Apache-2.0"
] | 8
|
2016-06-24T03:26:45.000Z
|
2018-05-12T09:06:33.000Z
|
skygear/tests/test_action.py
|
IniZio/py-skygear
|
88479678f91e678fd931c28295189bfea2148c79
|
[
"Apache-2.0"
] | 183
|
2016-03-23T08:03:28.000Z
|
2018-08-14T05:49:45.000Z
|
skygear/tests/test_action.py
|
IniZio/py-skygear
|
88479678f91e678fd931c28295189bfea2148c79
|
[
"Apache-2.0"
] | 24
|
2016-03-21T02:39:39.000Z
|
2020-09-17T12:28:58.000Z
|
# Copyright 2015 Oursky Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import MagicMock
from .. import action
class TestPushNotification(unittest.TestCase):
def setUp(self):
self.mock_container = MagicMock()
def tearDown(self):
self.mock_container = None
def test_push_device(self):
action.push_device(self.mock_container,
'device01',
{'apns': {'alert': 'hello'}})
self.mock_container.send_action\
.assert_called_once_with('push:device', {
'device_ids': ['device01'],
'notification': {'apns': {'alert': 'hello'}}
})
def test_push_devices(self):
action.push_devices(self.mock_container,
['device01', 'device02', 'device03'],
{'apns': {'alert': 'hello'}})
self.mock_container.send_action\
.assert_called_once_with('push:device', {
'device_ids': ['device01', 'device02', 'device03'],
'notification': {'apns': {'alert': 'hello'}}
})
def test_push_device_with_topic(self):
action.push_device(self.mock_container,
'device01',
{'apns': {'alert': 'hello'}},
topic='io.skygear.example.app')
self.mock_container.send_action\
.assert_called_once_with('push:device', {
'topic': 'io.skygear.example.app',
'device_ids': ['device01'],
'notification': {'apns': {'alert': 'hello'}}
})
def test_push_devices_with_topic(self):
action.push_devices(self.mock_container,
['device01', 'device02', 'device03'],
{'apns': {'alert': 'hello'}},
topic='io.skygear.example.app')
self.mock_container.send_action\
.assert_called_once_with('push:device', {
'topic': 'io.skygear.example.app',
'device_ids': ['device01', 'device02', 'device03'],
'notification': {'apns': {'alert': 'hello'}}
})
def test_push_user(self):
action.push_user(self.mock_container,
'user01',
{'apns': {'alert': 'hello'}})
self.mock_container.send_action\
.assert_called_once_with('push:user', {
'user_ids': ['user01'],
'notification': {'apns': {'alert': 'hello'}}
})
def test_push_users(self):
action.push_users(self.mock_container,
['user01', 'user02', 'user03'],
{'apns': {'alert': 'hello'}})
self.mock_container.send_action\
.assert_called_once_with('push:user', {
'user_ids': ['user01', 'user02', 'user03'],
'notification': {'apns': {'alert': 'hello'}}
})
def test_push_user_with_topic(self):
action.push_user(self.mock_container,
'user01',
{'apns': {'alert': 'hello'}},
topic='io.skygear.example.app')
self.mock_container.send_action\
.assert_called_once_with('push:user', {
'topic': 'io.skygear.example.app',
'user_ids': ['user01'],
'notification': {'apns': {'alert': 'hello'}}
})
def test_push_users_with_topic(self):
action.push_users(self.mock_container,
['user01', 'user02', 'user03'],
{'apns': {'alert': 'hello'}},
topic='io.skygear.example.app')
self.mock_container.send_action\
.assert_called_once_with('push:user', {
'topic': 'io.skygear.example.app',
'user_ids': ['user01', 'user02', 'user03'],
'notification': {'apns': {'alert': 'hello'}}
})
| 40.140351
| 74
| 0.527098
| 446
| 4,576
| 5.206278
| 0.233184
| 0.062016
| 0.131783
| 0.072351
| 0.725668
| 0.710164
| 0.710164
| 0.710164
| 0.701981
| 0.669251
| 0
| 0.023583
| 0.332823
| 4,576
| 113
| 75
| 40.495575
| 0.73698
| 0.119755
| 0
| 0.818182
| 0
| 0
| 0.222222
| 0.043847
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.113636
| false
| 0
| 0.034091
| 0
| 0.159091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29a8a162f69f40ab9143ade5538090c6726a095e
| 110
|
py
|
Python
|
py/functions.py
|
tyffical/TwilioQuest
|
79518006c5a08ebca386fa9087e522fab4aebd85
|
[
"MIT"
] | null | null | null |
py/functions.py
|
tyffical/TwilioQuest
|
79518006c5a08ebca386fa9087e522fab4aebd85
|
[
"MIT"
] | null | null | null |
py/functions.py
|
tyffical/TwilioQuest
|
79518006c5a08ebca386fa9087e522fab4aebd85
|
[
"MIT"
] | null | null | null |
def hail_friend(name):
print("Hail, " + name + "!")
def add_numbers(num1, num2):
return num1+num2
| 22
| 33
| 0.609091
| 15
| 110
| 4.333333
| 0.666667
| 0.246154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047059
| 0.227273
| 110
| 5
| 34
| 22
| 0.717647
| 0
| 0
| 0
| 0
| 0
| 0.065421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.25
| 0.75
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
29c7b9725e78dbaf5bb06d6ef1dd9de7a0789f90
| 21
|
py
|
Python
|
models/__init__.py
|
firstoxe/TAPI-Event-monitor
|
9408ca5b85eb936a091f4806785ce2e1f26b14d3
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
firstoxe/TAPI-Event-monitor
|
9408ca5b85eb936a091f4806785ce2e1f26b14d3
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
firstoxe/TAPI-Event-monitor
|
9408ca5b85eb936a091f4806785ce2e1f26b14d3
|
[
"MIT"
] | null | null | null |
from . import Enums
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 21
| 1
| 21
| 21
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
29ce78f091c23d0a77caf2251c382cbaa7843162
| 11,943
|
py
|
Python
|
geometry_utils/pytests/conftest.py
|
django-advance-utils/geometry-utils
|
b749cfdab67d8462cc5d02d566c2b526f7d0b418
|
[
"MIT"
] | null | null | null |
geometry_utils/pytests/conftest.py
|
django-advance-utils/geometry-utils
|
b749cfdab67d8462cc5d02d566c2b526f7d0b418
|
[
"MIT"
] | null | null | null |
geometry_utils/pytests/conftest.py
|
django-advance-utils/geometry-utils
|
b749cfdab67d8462cc5d02d566c2b526f7d0b418
|
[
"MIT"
] | null | null | null |
import math
import pytest
from geometry_utils.three_d.axis_aligned_box3 import AxisAlignedBox3
from geometry_utils.three_d.edge3 import Edge3
from geometry_utils.three_d.matrix4 import Matrix4
from geometry_utils.three_d.path3 import Path3
from geometry_utils.three_d.point3 import Point3
from geometry_utils.three_d.vector3 import Vector3
from geometry_utils.two_d.axis_aligned_box2 import AxisAlignedBox2
from geometry_utils.two_d.edge2 import Edge2
from geometry_utils.two_d.intersection import Intersection
from geometry_utils.two_d.matrix3 import Matrix3
from geometry_utils.two_d.path2 import Path2
from geometry_utils.two_d.point2 import Point2
from geometry_utils.two_d.vector2 import Vector2
'''
Point2
'''
@pytest.fixture(scope="session")
def test_point2_1():
return Point2(1.0, 1.0)
@pytest.fixture(scope="session")
def test_point2_2():
return Point2(1.0, 0.0)
@pytest.fixture(scope="session")
def test_point2_3():
return Point2(1.0, 1.0)
@pytest.fixture(scope="session")
def test_point2_4():
return Point2(0.0, 0.0)
'''
Point3
'''
@pytest.fixture(scope="session")
def test_point3_1():
return Point3(1.0, 1.0, 1.0)
@pytest.fixture(scope="session")
def test_point3_2():
return Point3(1.0, 0.0, 0.0)
@pytest.fixture(scope="session")
def test_point3_3():
return Point3(1.0, 1.0, 1.0)
@pytest.fixture(scope="session")
def test_point3_4():
return Point3(0.0, 0.0, 0.0)
'''
Vector2
'''
@pytest.fixture(scope="session")
def test_2d_string():
return '1, 2'
@pytest.fixture(scope="session")
def test_vector2_1():
return Vector2(1.0, 1.0)
@pytest.fixture(scope="session")
def test_vector2_2():
return Vector2(1.0, 0.0)
@pytest.fixture(scope="session")
def test_vector2_3():
return Vector2(1.0, 1.0)
@pytest.fixture(scope="session")
def test_vector2_4():
return Vector2(0.0, 0.0)
@pytest.fixture(scope="session")
def test_vector2_5():
return Vector2(0.0, 1.0)
@pytest.fixture(scope="session")
def test_vector2_6():
return Vector2(1.0, 0.0)
'''
Vector3
'''
@pytest.fixture(scope="session")
def test_3d_string():
return '1, 2, 3'
@pytest.fixture(scope="session")
def test_vector3_1():
return Vector3(1.0, 1.0, 1.0)
@pytest.fixture(scope="session")
def test_vector3_2():
return Vector3(1.0, 0.0, 0.0)
@pytest.fixture(scope="session")
def test_vector3_3():
return Vector3(1.0, 1.0, 1.0)
@pytest.fixture(scope="session")
def test_vector3_4():
return Vector3(0.0, 0.0, 0.0)
@pytest.fixture(scope="session")
def test_vector3_5():
return Vector3(0.0, 1.0, 0.0)
@pytest.fixture(scope="session")
def test_vector3_6():
return Vector3(1.0, 0.0, 0.0)
'''
Edge2
'''
@pytest.fixture(scope="session")
def test_edge2_1():
return Edge2()
@pytest.fixture(scope="session")
def test_edge2_2():
p1 = Point2(0.0, 0.0)
p2 = Point2(2.0, 2.0)
return Edge2(p1, p2)
@pytest.fixture(scope="session")
def test_edge2_3():
p1 = Point2(2.0, 2.0)
p2 = Point2(4.0, 4.0)
return Edge2(p1, p2)
@pytest.fixture(scope="session")
def test_edge2_4():
p1 = Point2(0.0, 0.0)
p2 = Point2(2.0, 2.0)
return Edge2(p1, p2)
@pytest.fixture(scope="session")
def test_edge2_5():
p1 = Point2(0.0, 0.0)
p2 = Point2(2.0, 0.0)
return Edge2(p1, p2, 1.0, True)
@pytest.fixture(scope="session")
def test_edge2_6():
p1 = Point2(0.0, 0.0)
p2 = Point2(0.0, 0.0)
return Edge2(p1, p2, 5.0, True)
@pytest.fixture(scope="session")
def test_edge2_7():
p1 = Point2(0.0, 0.0)
p2 = Point2(1.0, -1.0)
return Edge2(p1, p2, 1.0, True, True)
@pytest.fixture(scope="session")
def test_circle_points_1():
# this just generates a list of points in a circle at 1 degree increments at a radius of 600
radius = 600.0
circle = []
for i in range(360):
t = ((math.pi * 2) / 360.0) * float(i)
circle.append(Point2((math.sin(t) * radius),
(math.cos(t) * radius)))
return circle
'''
Edge3
'''
@pytest.fixture(scope="session")
def test_edge3_1():
return Edge3()
@pytest.fixture(scope="session")
def test_edge3_2():
p1 = Point3(0.0, 0.0, 0.0)
p2 = Point3(2.0, 2.0, 2.0)
return Edge3(p1, p2)
@pytest.fixture(scope="session")
def test_edge3_3():
p1 = Point3(0.0, 0.0, 0.0)
p2 = Point3(2.0, 0.0, 0.0)
return Edge3(p1, p2, radius=1.0, clockwise=True)
@pytest.fixture(scope="session")
def test_edge3_4():
p1 = Point3(0.0, 0.0, 0.0)
p2 = Point3(0.0, 0.0, 0.0)
return Edge3(p1, p2, radius=1, clockwise=True, large=True)
@pytest.fixture(scope="session")
def test_edge3_5():
p1 = Point3(2.0, 2.0, 2.0)
p2 = Point3(4.0, 4.0, 4.0)
return Edge3(p1, p2)
'''
Matrix3
'''
@pytest.fixture(scope="session")
def test_matrix3_1():
return Matrix3()
@pytest.fixture(scope="session")
def test_matrix3_2():
return Matrix3([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]])
@pytest.fixture(scope="session")
def test_matrix3_3():
return Matrix3([[-1, 0, 0], [0, -1, 0], [0, 0, -1]])
'''
Matrix4
'''
@pytest.fixture(scope="session")
def test_matrix4_1():
return Matrix4()
@pytest.fixture(scope="session")
def test_matrix4_2():
return Matrix4([[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0]])
@pytest.fixture(scope="session")
def test_matrix4_3():
return Matrix4([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]])
'''
AxisAlignedBox2
'''
@pytest.fixture(scope="session")
def test_box2_1():
return AxisAlignedBox2(Point2(0.0, 0.0), Point2(2.0, 2.0))
@pytest.fixture(scope="session")
def test_box2_2():
return AxisAlignedBox2(Point2(), Point2())
@pytest.fixture(scope="session")
def test_box2_3():
return AxisAlignedBox2(Point2(0.0, 0.0), Point2(0.0, 0.0))
@pytest.fixture(scope="session")
def test_box2_4():
return AxisAlignedBox2(Point2(0.0, 0.0), Point2(0.0, 0.0))
@pytest.fixture(scope="session")
def test_box2_5():
return AxisAlignedBox2()
'''
AxisAlignedBox3
'''
@pytest.fixture(scope="session")
def test_box3_1():
return AxisAlignedBox3(Point3(0.0, 0.0, 0.0), Point3(2.0, 2.0, 2.0))
@pytest.fixture(scope="session")
def test_box3_2():
return AxisAlignedBox3(Point3(), Point3())
@pytest.fixture(scope="session")
def test_box3_3():
return AxisAlignedBox3(Point3(0.0, 0.0, 0.0), Point3(0.0, 0.0, 0.0))
@pytest.fixture(scope="session")
def test_box3_4():
return AxisAlignedBox3(Point3(0.0, 0.0, 0.0), Point3(0.0, 0.0, 0.0))
@pytest.fixture(scope="session")
def test_box3_5():
return AxisAlignedBox3()
'''
Path2
'''
@pytest.fixture(scope="session")
def path2_1():
path = Path2()
path.list_of_edges = [Edge2(Point2(0.0, 0.0), Point2(1.0, 1.0)),
Edge2(Point2(1.0, 1.0), Point2(2.0, 2.0)),
Edge2(Point2(2.0, 2.0), Point2(0.0, 0.0))]
return path
@pytest.fixture(scope="session")
def path2_2():
path = Path2()
path.list_of_edges = [Edge2(Point2(1.0, 1.0), Point2(2.0, 2.0)),
Edge2(Point2(2.0, 2.0), Point2(3.0, 3.0)),
Edge2(Point2(3.0, 3.0), Point2(4.0, 4.0))]
return path
@pytest.fixture(scope="session")
def path2_3():
path = Path2()
path.list_of_edges = [Edge2(Point2(1.0, 1.0), Point2(2.0, 2.0)),
Edge2(Point2(2.0, 2.0), Point2(3.0, 3.0)),
Edge2(Point2(4.0, 4.0), Point2(5.0, 5.0))]
return path
@pytest.fixture(scope="session")
def path2_4():
path = Path2()
path.list_of_edges = [Edge2(Point2(0.0, 0.0), Point2(1.0, 1.0)),
Edge2(Point2(1.0, 1.0), Point2(2.0, 2.0)),
Edge2(Point2(2.0, 2.0), Point2(0.0, 0.0))]
return path
@pytest.fixture(scope="session")
def path2_5():
path = Path2()
path.list_of_edges = [Edge2(Point2(0.0, 0.0), Point2(1.0, 1.0)),
Edge2(Point2(1.0, 1.0), Point2(2.0, 2.0)),
Edge2(Point2(2.0, 2.0), Point2(3.0, 3.0)),
Edge2(Point2(3.0, 3.0), Point2(0.0, 0.0))]
return path
@pytest.fixture(scope="session")
def path2_6():
path = Path2()
path.list_of_edges = [Edge2(Point2(1.0, 1.0), Point2(1.0, 1.0), 1.0)]
return path
@pytest.fixture(scope="session")
def path2_7():
path = Path2()
path.list_of_edges = [Edge2(Point2(0.0, 0.0), Point2(1.0, 0.0)),
Edge2(Point2(1.0, 0.0), Point2(1.0, 1.0)),
Edge2(Point2(1.0, 1.0), Point2(0.0, 1.0)),
Edge2(Point2(0.0, 1.0), Point2(0.0, 0.0))]
return path
@pytest.fixture(scope="session")
def path2_8():
path = Path2()
path.list_of_edges = [Edge2(Point2(0.0, 0.0), Point2(1.0, 0.0)),
Edge2(Point2(1.0, 0.0), Point2(1.0, 1.0)),
Edge2(Point2(1.0, 1.0), Point2(0.0, 1.0), 0.5),
Edge2(Point2(0.0, 1.0), Point2(0.0, 0.0))]
return path
'''
Path3
'''
@pytest.fixture(scope="session")
def path3_1():
path = Path3()
path.list_of_edges = [Edge3(Point3(0.0, 0.0, 0.0), Point3(1.0, 1.0, 1.0)),
Edge3(Point3(1.0, 1.0, 1.0), Point3(2.0, 2.0, 2.0)),
Edge3(Point3(2.0, 2.0, 2.0), Point3(0.0, 0.0, 0.0))]
return path
@pytest.fixture(scope="session")
def path3_2():
path = Path3()
path.list_of_edges = [Edge3(Point3(1.0, 1.0, 1.0), Point3(2.0, 2.0, 2.0)),
Edge3(Point3(2.0, 2.0, 2.0), Point3(3.0, 3.0, 3.0)),
Edge3(Point3(3.0, 3.0, 3.0), Point3(4.0, 4.0, 4.0))]
return path
@pytest.fixture(scope="session")
def path3_3():
path = Path3()
path.list_of_edges = [Edge3(Point3(1.0, 1.0, 1.0), Point3(2.0, 2.0, 2.0)),
Edge3(Point3(2.0, 2.0, 2.0), Point3(3.0, 3.0, 3.0)),
Edge3(Point3(4.0, 4.0, 4.0), Point3(5.0, 5.0, 5.0))]
return path
@pytest.fixture(scope="session")
def path3_4():
path = Path3()
path.list_of_edges = [Edge3(Point3(0.0, 0.0, 0.0), Point3(1.0, 1.0, 1.0)),
Edge3(Point3(1.0, 1.0, 1.0), Point3(2.0, 2.0, 2.0)),
Edge3(Point3(2.0, 2.0, 2.0), Point3(0.0, 0.0, 0.0))]
return path
@pytest.fixture(scope="session")
def path3_5():
path = Path3()
path.list_of_edges = [Edge3(Point3(0.0, 0.0, 0.0), Point3(1.0, 1.0, 1.0)),
Edge3(Point3(1.0, 1.0, 1.0), Point3(2.0, 2.0, 2.0)),
Edge3(Point3(2.0, 2.0, 2.0), Point3(3.0, 3.0, 3.0)),
Edge3(Point3(3.0, 3.0, 3.0), Point3(0.0, 0.0, 0.0))]
return path
@pytest.fixture(scope="session")
def path3_6():
path = Path3()
path.list_of_edges = [Edge3(Point3(1.0, 1.0, 1.0), Point3(1.0, 1.0, 1.0), radius=1.0)]
return path
@pytest.fixture(scope="session")
def path3_7():
path = Path3()
path.list_of_edges = [Edge3(Point3(0.0, 0.0, 0.0), Point3(1.0, 0.0, 0.0)),
Edge3(Point3(1.0, 0.0, 0.0), Point3(1.0, 1.0, 0.0)),
Edge3(Point3(1.0, 1.0, 0.0), Point3(0.0, 1.0, 0.0)),
Edge3(Point3(0.0, 1.0, 0.0), Point3(0.0, 0.0, 0.0))]
return path
@pytest.fixture(scope="session")
def path3_8():
path = Path3()
path.list_of_edges = [Edge3(Point3(0.0, 0.0, 0.0), Point3(1.0, 0.0, 0.0)),
Edge3(Point3(1.0, 0.0, 0.0), Point3(1.0, 1.0, 0.0)),
Edge3(Point3(1.0, 1.0, 0.0), Point3(0.0, 1.0, 0.0), radius=0.5),
Edge3(Point3(0.0, 1.0, 0.0), Point3(0.0, 0.0, 0.0))]
return path
'''
Intersection
'''
@pytest.fixture(scope="session")
def intersection1():
return Intersection()
| 22.835564
| 96
| 0.580926
| 2,022
| 11,943
| 3.341741
| 0.037587
| 0.078141
| 0.079473
| 0.062158
| 0.852005
| 0.796803
| 0.770756
| 0.705195
| 0.606482
| 0.577771
| 0
| 0.144003
| 0.227832
| 11,943
| 522
| 97
| 22.87931
| 0.588701
| 0.007536
| 0
| 0.541139
| 0
| 0
| 0.041828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.21519
| false
| 0
| 0.047468
| 0.129747
| 0.477848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
29db89b8dd43869b1328085b1dcd9cd55c4c5407
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/future/backports/email/headerregistry.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/future/backports/email/headerregistry.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/future/backports/email/headerregistry.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/64/f6/ef/2ca5c3d0d30b494e235e55477c6c8670b32b166f86415511bbf5c78fcf
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29e7f3cbe71358af381d239f2ed5a600882cead9
| 6,552
|
py
|
Python
|
tests/mono_digits_recognizer/test_recognizer.py
|
Alladin9393/pattern-recognition-client
|
1b293eb8af41d46ecf256890743f748dad073ec6
|
[
"MIT"
] | null | null | null |
tests/mono_digits_recognizer/test_recognizer.py
|
Alladin9393/pattern-recognition-client
|
1b293eb8af41d46ecf256890743f748dad073ec6
|
[
"MIT"
] | 4
|
2020-10-22T11:44:18.000Z
|
2020-12-06T18:23:21.000Z
|
tests/mono_digits_recognizer/test_recognizer.py
|
Alladin9393/statprly
|
1b293eb8af41d46ecf256890743f748dad073ec6
|
[
"MIT"
] | null | null | null |
"""
Provide tests for MonoDigitRecognizer.
"""
import json
import random
from os.path import dirname
import numpy
import pytest
from statprly import MonoDigitRecognizer
from statprly.constants import (
LEAST_LIKELY,
MOST_LIKELY,
)
from statprly.errors import ValidationDataError
from statprly.mono_digits_recognizer.standards_provider import StandardsProvider
DIRNAME = dirname(__file__)
ACCURACY = 0.7
def test_recognize_random_digit_with_random_noise_less_than_half():
"""
Case: recognize random digit with random noise < 0.44 and scale.
Expect: recognized digit is returned.
"""
test_cases = 100
with open(DIRNAME + "/custom_standardts_data/mock_data_to_recognize.json") as f:
digit_data_to_recognize = json.loads(f.read())
standards_provider = StandardsProvider()
recognizer = MonoDigitRecognizer()
number_of_success = 0
for i in range(100):
random_noise = numpy.random.uniform(0, 0.44)
random_scale = random.randrange(20)
expected_digit = random.randrange(10)
digit_with_noise = standards_provider.get_scaled_standard_with_noise(
digit_data=digit_data_to_recognize[str(expected_digit)],
vertical_scale=random_scale,
horizontal_scale=random_scale,
noise_probability=random_noise,
)
digit_with_noise = numpy.array(digit_with_noise)
recognized_digit = recognizer.recognize(
digit_to_predict_data=digit_with_noise,
noise_probability=random_noise,
)
is_success = recognized_digit == expected_digit
number_of_success += int(is_success)
accuracy = number_of_success / test_cases
assert accuracy > ACCURACY
def test_recognize_random_digit_with_random_noise_more_than_half():
"""
Case: recognize random digit with random noise > 0.6 and scale.
Expect: recognized digit is returned.
"""
test_cases = 100
with open(DIRNAME + "/custom_standardts_data/mock_data_to_recognize.json") as f:
digit_data_to_recognize = json.loads(f.read())
standards_provider = StandardsProvider()
recognizer = MonoDigitRecognizer()
number_of_success = 0
for i in range(100):
random_noise = numpy.random.uniform(0.6, 1)
random_scale = random.randrange(20)
expected_digit = random.randrange(10)
digit_with_noise = standards_provider.get_scaled_standard_with_noise(
digit_data=digit_data_to_recognize[str(expected_digit)],
vertical_scale=random_scale,
horizontal_scale=random_scale,
noise_probability=random_noise,
)
digit_with_noise = numpy.array(digit_with_noise)
recognized_digit = recognizer.recognize(
digit_to_predict_data=digit_with_noise,
noise_probability=random_noise,
)
is_success = recognized_digit == expected_digit
number_of_success += int(is_success)
accuracy = number_of_success / test_cases
assert accuracy > ACCURACY
def test_recognize_random_digit_with_zero_noise():
"""
Case: recognize random digit.
Expect: recognized digit is returned.
"""
with open(DIRNAME + "/custom_standardts_data/mock_data_to_recognize.json") as f:
digit_data_to_recognize = json.loads(f.read())
recognizer = MonoDigitRecognizer()
digit_to_recognize = random.randrange(10)
digit_to_recognize_data = numpy.array(
digit_data_to_recognize.get(str(digit_to_recognize)),
)
noise = LEAST_LIKELY
recognized_digit = recognizer.recognize(
digit_to_recognize_data,
noise,
)
assert recognized_digit == digit_to_recognize
def test_recognize_random_digit_with_hundred_percent_noise():
"""
Case: recognize random digit.
Expect: recognized digit is returned.
"""
with open(DIRNAME + "/custom_standardts_data/inversed_digit_standards.json") as f:
digit_data_to_recognize = json.loads(f.read())
recognizer = MonoDigitRecognizer()
digit_to_recognize = random.randrange(10)
digit_to_recognize_data = numpy.array(
digit_data_to_recognize.get(str(digit_to_recognize)),
)
noise = MOST_LIKELY
recognized_digit = recognizer.recognize(
digit_to_recognize_data,
noise,
)
assert recognized_digit == digit_to_recognize
def test_get_digit_probability():
"""
Case: recognize random digit.
Expect: recognized digit is returned.
"""
with open(DIRNAME + "/custom_standardts_data/inversed_digit_standards.json") as f:
digit_data_to_get_prob = json.loads(f.read())
recognizer = MonoDigitRecognizer()
digit_to_get_prob = random.randrange(10)
digit_to_get_prob_data = numpy.array(
digit_data_to_get_prob.get(str(digit_to_get_prob)),
)
noise = MOST_LIKELY
digit_prob = recognizer.get_digit_probability(
digit_to_get_prob_data,
digit_to_get_prob,
noise,
)
assert digit_prob == MOST_LIKELY
def test_recognize_random_digit_with_invalid_digit_data_type():
"""
Case: recognize digit with invalid digit data type.
Expect: `digit_to_predict_data` must be a numpy array data error message.
"""
with open(DIRNAME + "/custom_standardts_data/mock_data_to_recognize.json") as f:
digit_data_to_recognize = json.loads(f.read())
recognizer = MonoDigitRecognizer()
digit_to_recognize = random.randrange(10)
noise = LEAST_LIKELY
with pytest.raises(ValidationDataError):
recognizer.recognize(
digit_data_to_recognize.get(str(digit_to_recognize)),
noise,
)
def test_recognize_random_digit_with_invalid_noise():
"""
Case: recognize digit with invalid digit data type.
Expect: `digit_to_predict_data` must be a numpy array data error message.
"""
with open(DIRNAME + "/custom_standardts_data/mock_data_to_recognize.json") as f:
digit_data_to_recognize = json.loads(f.read())
recognizer = MonoDigitRecognizer()
digit_to_recognize = random.randrange(10)
negative_noise = random.randrange(-100, -1)
positive_noise = random.randrange(1, 100)
with pytest.raises(ValidationDataError):
recognizer.recognize(
digit_data_to_recognize.get(str(digit_to_recognize)),
negative_noise,
)
with pytest.raises(ValidationDataError):
recognizer.recognize(
digit_data_to_recognize.get(str(digit_to_recognize)),
positive_noise,
)
| 31.2
| 86
| 0.708181
| 792
| 6,552
| 5.483586
| 0.125
| 0.083583
| 0.062169
| 0.059866
| 0.864379
| 0.841124
| 0.826848
| 0.809348
| 0.785632
| 0.785632
| 0
| 0.01049
| 0.214286
| 6,552
| 209
| 87
| 31.349282
| 0.833139
| 0.106685
| 0
| 0.649635
| 0
| 0
| 0.0632
| 0.0632
| 0
| 0
| 0
| 0
| 0.036496
| 1
| 0.051095
| false
| 0
| 0.065693
| 0
| 0.116788
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d9a92a3ccb431950b465ce2f95668e9d706b8f55
| 1,138
|
py
|
Python
|
root/usr/local/bin/authorized_keys.py
|
daniel-noland/config
|
443d43bb95bab1ec58495615e82d714251ec52df
|
[
"MIT"
] | 1
|
2016-12-08T15:45:14.000Z
|
2016-12-08T15:45:14.000Z
|
root/usr/local/bin/authorized_keys.py
|
daniel-noland/config
|
443d43bb95bab1ec58495615e82d714251ec52df
|
[
"MIT"
] | 5
|
2017-02-12T01:17:12.000Z
|
2017-03-22T21:18:17.000Z
|
root/usr/local/bin/authorized_keys.py
|
daniel-noland/config
|
443d43bb95bab1ec58495615e82d714251ec52df
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
def check_user(userName: str) -> bool:
print(userName)
return userName == "dnoland"
def check_fingerprint(fingerprint: str) -> bool:
print(fingerprint)
return true
def print_answer() -> str:
with open("/tmp/sshtest.txt", "w") as f:
for arg in sys.argv:
f.write(arg)
f.write("\n")
print("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDiMUdAG99iWp52AJmUcqfeqPAYbqBL5tTr4k5TEmtZTA4X6YihnYqtwbDkHA++nrWJxtByLdup3APdtK1RfVuuVhWSaXuAquCfVw/BiII0yKQ5zB7kxFt2KL4QFHzr30KlM0rxyEzMGtOXuyEd7USvug1V0zCgZpWBr3sMgv9SvPd3GaI9Y/a/DnMLk1LFnylwudu5Qr/FKb9zn9gSJn5+sNwITA+9RYf7wRGNlMUqVHcnzvcTUe8DyUeUY6l6PyYenanJ49PgcYd5+0zpoFIX3z778nVasuSubBc8CLjeZAQDLAG3W3dD6ZUbczdEaypaiOHtiOb3cJ38f1W4PpmSipBsO6upmjxteV3rxGvHfExQ3XVM6QL/na1loBES+BK98jdWJgUYA3Wd10pfhuKRqqeg3M3xHBIXnBTzBFhYG+ggotbGy2c9LPU9s4y+iE9tGxL092oGTjmiOScMCrecyOY5xjNon3FtyYFsriO/jl6zdzGOhSgFxtHmaD4GtfRD8m6huG9DvCsoWZ3wLw3KkUmh1pzFpVMSZo8fzbm06PYba0wSl8+DSn1NTfSxaT1KgHMy2IQEY2NCQsipin+veXiMUzUBKDFuhfttv2XcwkxvcN5AXw+bCOD424LlaXfSGAZSgk4PYGBNMs1m0lmZkBjbkNAbSwe4GdsVJTq5w== cardno:000604684577")
print_answer()
| 56.9
| 757
| 0.842707
| 73
| 1,138
| 13.082192
| 0.712329
| 0.016754
| 0.025131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112717
| 0.087873
| 1,138
| 19
| 758
| 59.894737
| 0.807322
| 0.01406
| 0
| 0
| 0
| 0.071429
| 0.686887
| 0.638715
| 0
| 1
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.071429
| 0
| 0.428571
| 0.428571
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.