hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35aebe62666189715c708dfc52a3ff733e471005 | 7,928 | py | Python | main.py | samikshamodi/TwoPassAssembler | fdd2a961fa045efd2aab2d6c9320cc1893824267 | [
"MIT"
] | null | null | null | main.py | samikshamodi/TwoPassAssembler | fdd2a961fa045efd2aab2d6c9320cc1893824267 | [
"MIT"
] | null | null | null | main.py | samikshamodi/TwoPassAssembler | fdd2a961fa045efd2aab2d6c9320cc1893824267 | [
"MIT"
] | null | null | null | opcode_table = {'CLA': '0000', 'LAC': '0001', 'SAC': '0010', 'ADD': '0011', 'SUB': '0100', 'BRZ': '0101',
'BRN': '0110', 'BRP': '0111', 'INP': '1000', 'DSP': '1001', 'MUL': '1010', 'DIV': '1011', 'STP': '1100'}
words = {'CLA': 1, 'LAC': 2, 'SAC': 2, 'ADD': 2, 'SUB': 2, 'BRZ': 2,
'BRN': 2, 'BRP': 2, 'INP': 2, 'DSP': 2, 'MUL': 2, 'DIV': 2, 'STP': 1}
symbol_table = {} # Stores all the labels and variables
declare_table = [] # Stores all the variables that have been declared
global input_file
global location_counter
xyz = 256
def to_binary(data):
return('{:012b}'.format(int(data)))
def process():
# Read it one list element at a time
for line in input_file:
# Remove lines having only comment
if (line.startswith('//')):
input_file.remove(line)
# Remove empty lines
if(line == ''):
input_file.remove('')
# Removing all the comments in the line eg CLA //Clear Accumulator converts to CLA
for iter, line in enumerate(input_file):
temp = line.find('//')
if (temp != -1):
line = line[:temp]
input_file[iter] = line
temp = [] # empty list
for line in input_file:
line = line.split(' ')
for element in line:
# Removing empty elements from line eg ['CLA','']
if(element == ''):
line.remove(element)
temp.append(line)
# Removing [] from temp
temp2 = [x for x in temp if x != []]
return temp2
def pass_one():
global location_counter
global xyz
todelete = [] # invalid instructions are daved here to be deleted later
for line in input_file:
if line[0][-1] == ':': # The line has a label
if line[0][:-1] in symbol_table:
error_file.write(
"\n Symbol defined more than once: " + str(line[0][:-1]))
else:
# Add label to symbol table
symbol_table[line[0][:-1]] = location_counter
location_counter += 1
if len(line) == 2:
if (line[1] != 'CLA' and line[1] != 'STP' and line[1] not in opcode_table):
error_file.write("\n Invalid opcode: " + str(line[1]))
todelete.append(line)
location_counter += 1
continue
else:
if(len(line) <= words[line[1]]):
error_file.write("\n Too few operands: " + str(line))
todelete.append(line)
location_counter += 1
continue
if(line[1] in opcode_table): # Checking if it is a valid opcode
if(len(line) > words[line[1]]+1):
error_file.write("\n Too many operands: " + str(line))
symbol_table[line[2]] = xyz # Add variable to symbol table
xyz += 1
else:
error_file.write("\n Invalid opcode: " + str(line[1]))
todelete.append(line)
elif len(line) > 1 and (line[1] == 'DS' or line[1] == 'DC'):
if line[0] in declare_table:
error_file.write(
"\n Symbol defined more than once: " + str(line[0]))
else:
declare_table.append(line[0])
location_counter += 1
else: # There is no label
if len(line) == 1:
if (line[0] != 'CLA' and line[0] != 'STP' and line[0] not in opcode_table):
error_file.write("\n Invalid opcode: " + str(line[0]))
todelete.append(line)
location_counter += 1
continue
else:
if(len(line) < words[line[0]]):
error_file.write("\n Too few operands: " + str(line))
todelete.append(line)
location_counter += 1
continue
if(line[0] in opcode_table): # Checking if it is a valid opcode
if(len(line) > words[line[0]]):
error_file.write("\n Too many operands: " + str(line))
location_counter += 1
if line[1] not in symbol_table:
symbol_table[line[1]] = xyz # Add variable to symbol table
xyz += 1
else:
error_file.write("\n Invalid opcode: " + str(line[0]))
todelete.append(line)
# Removing declarative statements from input_file
while len(input_file[-1]) == 3 and (input_file[-1][1] == 'DS' or input_file[-1][1] == 'DC'):
input_file.remove(input_file[-1])
# Removing todelete from input_file
for i in todelete:
input_file.remove(i)
def pass_two():
for line in input_file:
if(line[0][-1] == ':'): # The line has a label
if(line[1]in opcode_table and line[1] == 'CLA' or line[1] == 'STP'):
output_file.write("\n"+opcode_table[line[1]])
elif line[1] in opcode_table:
output_file.write("\n"+opcode_table[line[1]])
output_file.write("\t"+to_binary(str(symbol_table[line[2]])))
# Displays error if symbol is used but not defined
if((line[2] not in declare_table) and (line[2] not in symbol_table)):
error_file.write(
"\n Symbol used but not defined: " + str(line[2]))
else: # The line does not have a label
if(line[0]in opcode_table and line[0] == 'CLA' or line[0] == 'STP'):
output_file.write("\n"+opcode_table[line[0]])
elif line[0] in opcode_table:
output_file.write("\n"+opcode_table[line[0]])
output_file.write("\t"+to_binary(str(symbol_table[line[1]])))
# Displays error if symbol is used but not defined
if((line[1] not in declare_table) and (line[1] not in symbol_table)):
error_file.write(
"\n Symbol used but not defined: " + str(line[1]))
# Erasing output.txt file every time the program is run
open("output.txt", "w").close()
output_file = open("output.txt", "a")
# Erasing error.txt file every time the program is run
open("error.txt", "w").close()
error_file = open("error.txt", "a")
# Takes the file name where the assembly language program is stored
input_file_name = input("Enter input file name: ")
#input_file_name = "input.txt"
try:
input_file = open(input_file_name, "r")
except FileNotFoundError:
print("No file found. Please retry.")
exit()
# Reads the entire input file
input_file = input_file.read()
# Splits the input file at new line and converts it to list
input_file = input_file.split("\n")
print("\n", input_file)
# Removes the comments and empty lines
input_file = process()
# Checks if START is missing. If missing, it reports the error. If present it removes it from input_file list
if input_file[0][0] == 'START':
if (len(input_file[0])) > 1:
location_counter = int(input_file[0][1])
input_file.remove(input_file[0])
else:
location_counter = 0
error_file.write("\n START statement is missing")
# Checks if END is missing. If missing, it reports the error. If present it removes it from input_file list
if input_file[-1][0] == 'END':
input_file.remove(input_file[-1])
else:
error_file.write("\n END statement is missing")
print("\n", input_file)
# Calls pass_one of the assembler
pass_one()
print("\n Symbol table: ", symbol_table)
print("\n Declare table: ", declare_table)
# Because the address where the program is loaded might overlap with the address where the variable is stored
if(location_counter >= 256):
error_file.write("\n Memory address of instructions exceed 256")
print("\n New: ", input_file)
pass_two()
| 38.115385 | 120 | 0.553103 | 1,064 | 7,928 | 4.012218 | 0.180451 | 0.088545 | 0.044507 | 0.052706 | 0.470602 | 0.433357 | 0.391661 | 0.391661 | 0.372921 | 0.343406 | 0 | 0.030493 | 0.317482 | 7,928 | 207 | 121 | 38.299517 | 0.758455 | 0.187815 | 0 | 0.366667 | 0 | 0 | 0.11446 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0.026667 | 0 | 0.006667 | 0.033333 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35aee2038df7c186be075a44b6bbca028b2a7fb2 | 4,319 | py | Python | autumn/db/input_data.py | MattSegal/AuTuMN | 49d78d9c07ea3825ac31682a4d124eab9d3365ce | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | autumn/db/input_data.py | MattSegal/AuTuMN | 49d78d9c07ea3825ac31682a4d124eab9d3365ce | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | autumn/db/input_data.py | MattSegal/AuTuMN | 49d78d9c07ea3825ac31682a4d124eab9d3365ce | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """
Methods for creating an input database
"""
import os
import time
import glob
import pandas as pd
from .. import constants
from .database import Database
def build_input_database():
"""
Builds an input database from source Excel spreadsheets and stores it in the data directory.
"""
# Load input database, where we will store the data.
db_name = get_new_database_name()
database = Database(db_name)
# Load Excel sheets into the database.
excel_glob = os.path.join(constants.EXCEL_PATH, "*.xlsx")
excel_sheets = glob.glob(excel_glob)
for file_path in excel_sheets:
filename = os.path.basename(file_path)
header_row = HEADERS_LOOKUP[filename] if filename in HEADERS_LOOKUP else 0
data_title = OUTPUT_NAME[filename] if filename in OUTPUT_NAME else filename
file_df = pd.read_excel(
pd.ExcelFile(file_path),
header=header_row,
index_col=1,
sheet_name=TAB_OF_INTEREST[filename],
)
print("Reading '%s' tab of '%s' file" % (TAB_OF_INTEREST[filename], filename))
file_df.to_sql(data_title, con=database.engine, if_exists="replace")
# Load CSV files into the database
csv_glob = os.path.join(constants.EXCEL_PATH, "*.csv")
csv_sheets = glob.glob(csv_glob)
for file_path in csv_sheets:
file_title = os.path.basename(file_path).split(".")[0]
file_df = pd.read_csv(file_path)
print("Reading '%s' file" % (file_path))
file_df.to_sql(file_title, con=database.engine, if_exists="replace")
# Add mapped ISO3 code tables that only contain the UN country code
table_names = ["crude_birth_rate", "absolute_deaths", "total_population"]
for table_name in table_names:
print("Creating country code mapped database for", table_name)
# Create dictionary structure to map from un three numeric digit codes to iso3 three alphabetical digit codes.
map_df = database.db_query(table_name="un_iso3_map")[
["Location code", "ISO3 Alpha-code"]
].dropna()
table_df = database.db_query(table_name=table_name)
table_with_iso = pd.merge(
table_df, map_df, left_on="Country code", right_on="Location code"
)
# Rename columns to avoid using spaces.
table_with_iso.rename(columns={"ISO3 Alpha-code": "iso3"}, inplace=True)
# Remove index column to avoid creating duplicates.
if "Index" in table_with_iso.columns:
table_with_iso = table_with_iso.drop(columns=["Index"])
# Create a new 'mapped' database structure
table_with_iso.to_sql(table_name + "_mapped", con=database.engine, if_exists="replace")
return database
def get_new_database_name():
"""
Get a timestamped name for the new database.
"""
timestamp = int(time.time())
db_name = f"inputs.{timestamp}.db"
return os.path.join(constants.DATA_PATH, db_name)
# Mappings for Excel data that is used to populate the input database.
HEADERS_LOOKUP = {
"WPP2019_FERT_F03_CRUDE_BIRTH_RATE.xlsx": 16,
"WPP2019_F01_LOCATIONS.xlsx": 16,
"WPP2019_MORT_F04_1_DEATHS_BY_AGE_BOTH_SEXES.xlsx": 16,
"WPP2019_POP_F07_1_POPULATION_BY_AGE_BOTH_SEXES.xlsx": 16,
"life_expectancy_2015.xlsx": 3,
"rate_birth_2015.xlsx": 3,
}
TAB_OF_INTEREST = {
"WPP2019_FERT_F03_CRUDE_BIRTH_RATE.xlsx": "ESTIMATES",
"WPP2019_MORT_F04_1_DEATHS_BY_AGE_BOTH_SEXES.xlsx": "ESTIMATES",
"WPP2019_POP_F07_1_POPULATION_BY_AGE_BOTH_SEXES.xlsx": "ESTIMATES",
"WPP2019_F01_LOCATIONS.xlsx": "Location",
"coverage_estimates_series.xlsx": "BCG",
"gtb_2015.xlsx": "gtb_2015",
"gtb_2016.xlsx": "gtb_2016",
"life_expectancy_2015.xlsx": "life_expectancy_2015",
"rate_birth_2015.xlsx": "rate_birth_2015",
}
OUTPUT_NAME = {
"WPP2019_FERT_F03_CRUDE_BIRTH_RATE.xlsx": "crude_birth_rate",
"WPP2019_MORT_F04_1_DEATHS_BY_AGE_BOTH_SEXES.xlsx": "absolute_deaths",
"WPP2019_POP_F07_1_POPULATION_BY_AGE_BOTH_SEXES.xlsx": "total_population",
"WPP2019_F01_LOCATIONS.xlsx": "un_iso3_map",
"coverage_estimates_series.xlsx": "bcg",
"gtb_2015.xlsx": "gtb_2015",
"gtb_2016.xlsx": "gtb_2016",
"life_expectancy_2015.xlsx": "life_expectancy_2015",
"rate_birth_2015.xlsx": "rate_birth_2015",
}
| 37.885965 | 118 | 0.700857 | 611 | 4,319 | 4.620295 | 0.261866 | 0.022671 | 0.025505 | 0.029756 | 0.332979 | 0.305349 | 0.274176 | 0.18243 | 0.18243 | 0.18243 | 0 | 0.048837 | 0.194026 | 4,319 | 113 | 119 | 38.221239 | 0.762137 | 0.15536 | 0 | 0.101266 | 0 | 0 | 0.340089 | 0.179067 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025316 | false | 0 | 0.075949 | 0 | 0.126582 | 0.037975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35b642bc6f7c75bdecd30dabf8b4747b69a3152b | 1,833 | py | Python | r_min.py | ceccoangiolieri/r_on_heroku | f243017b16d5bc894a811b5f8b10a558cbea144c | [
"MIT"
] | null | null | null | r_min.py | ceccoangiolieri/r_on_heroku | f243017b16d5bc894a811b5f8b10a558cbea144c | [
"MIT"
] | null | null | null | r_min.py | ceccoangiolieri/r_on_heroku | f243017b16d5bc894a811b5f8b10a558cbea144c | [
"MIT"
] | null | null | null | import sys
import os
from django.conf import settings
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
settings.configure(
DEBUG=True,
SECRET_KEY='ac!5bu68^vf3_12)m1e&2ls#1uidd_33f)c!j=&&^b_91m7g#+',
ROOT_URLCONF=__name__,
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
ALLOWED_HOSTS = [ 'r-in-heroku.herokuapp.com',
'localhost'],
BASE_DIR = BASE_DIR,
STATIC_URL = '/static/',
STATIC_ROOT = os.path.join(BASE_DIR, 'static'),
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ os.path.join(BASE_DIR, 'templates'), ],
}],
INSTALLED_APPS = [ 'django.contrib.staticfiles', ],
)
from django.conf.urls import url
from django.http import HttpResponse
from django.shortcuts import render
from django.template import Context, loader
import subprocess
def batch_r(str_source):
# call_string = "fakechroot fakeroot chroot /app/.root /usr/bin/" + R CMD BATCH
call_string = os.getenv('R_EXEC_STRING', '') + 'R CMD BATCH'
file_target = os.getenv('R_SCRIPT_FOLDER_PREFIX', '') + str_source
subprocess.call(call_string + ' ' + file_target, shell=True)
return None
def index(request):
batch_r('./01-scripts/00_pm-bupar_MAIN.R')
return render(request, 'index.html')
urlpatterns = (
url(r'^$', index),
)
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| 29.095238 | 87 | 0.701037 | 225 | 1,833 | 5.457778 | 0.506667 | 0.057003 | 0.022801 | 0.022801 | 0.027687 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011921 | 0.176214 | 1,833 | 62 | 88 | 29.564516 | 0.801325 | 0.04419 | 0 | 0 | 0 | 0 | 0.242857 | 0.192571 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.234043 | 0 | 0.319149 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35b691796b1d742c51d95feecadbec7b27bf7010 | 774 | py | Python | exploration_scripts/test_tree.py | raymondEhlers/alice-jet-hadron | 8526567935c0339cebb9ef224b09a551a0b96932 | [
"BSD-3-Clause"
] | 1 | 2020-12-29T20:00:06.000Z | 2020-12-29T20:00:06.000Z | exploration_scripts/test_tree.py | raymondEhlers/alice-jet-hadron | 8526567935c0339cebb9ef224b09a551a0b96932 | [
"BSD-3-Clause"
] | 6 | 2019-10-22T22:17:05.000Z | 2020-09-26T00:24:08.000Z | exploration_scripts/test_tree.py | raymondEhlers/alice-jet-hadron | 8526567935c0339cebb9ef224b09a551a0b96932 | [
"BSD-3-Clause"
] | 2 | 2019-07-02T19:33:54.000Z | 2021-01-04T15:14:00.000Z | #!/usr/bin/env python
import IPython
import numpy as np
#import ROOT
DTYPE_BASE = np.dtype([("pT", np.float64), ("eta", np.float64), ("phi", np.float64), ("m", np.float64)])
print(f"DTYPE_BASE: {DTYPE_BASE}")
#status_dtype = DTYPE_EP.descr + [("status_code", np.int32)]
DTYPE_JETS = [(f"{label}_{name}", dtype) for label in ["part", "det"] for name, dtype in DTYPE_BASE.descr]
print(f"DTYPE_JETS: {DTYPE_JETS}")
output_array = np.zeros(1, dtype = DTYPE_JETS)
part_jet = np.array((1, 0.5, 0.5, 0), dtype = DTYPE_BASE)
det_jet = np.array((2, 0.5, 0.75, 1), dtype = DTYPE_BASE)
IPython.embed()
# None of this works...
#temp = np.concatenate(part_jet[:], det_jet[:], axis = 1)
#output_array[:4] = part_jet
#output_array[0] = temp
print(f"output_array: {output_array}")
| 27.642857 | 106 | 0.673127 | 131 | 774 | 3.793893 | 0.374046 | 0.108652 | 0.018109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039941 | 0.126615 | 774 | 27 | 107 | 28.666667 | 0.695266 | 0.280362 | 0 | 0 | 0 | 0 | 0.192727 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35b6cf54a0f3f5d3c05103479a4880c208a943f5 | 3,398 | py | Python | report/Data collection/Gini_Computation.py | joined/IN4334-MiningSoftwareRepositories | 207b0c91b68851320049d1ab902d7028a5523f4e | [
"MIT"
] | 2 | 2018-05-27T07:12:58.000Z | 2022-03-18T02:34:04.000Z | report/Data collection/Gini_Computation.py | joined/IN4334-MiningSoftwareRepositories | 207b0c91b68851320049d1ab902d7028a5523f4e | [
"MIT"
] | null | null | null | report/Data collection/Gini_Computation.py | joined/IN4334-MiningSoftwareRepositories | 207b0c91b68851320049d1ab902d7028a5523f4e | [
"MIT"
] | 3 | 2017-01-11T16:51:41.000Z | 2019-11-07T08:17:38.000Z | #!/usr/bin/env python3
import requests
import sys
import csv
import re
import numpy as np
def gini_index(array):
"""
Calculate the Gini coefficient of a numpy array
"""
array = array.flatten()
if np.amin(array) < 0:
array -= np.amin(array) # values cannot be negative
array += 0.0000001 # values cannot be 0
array = np.sort(array) # values must be sorted
index = np.arange(1, array.shape[0]+1) # index per array element
n = array.shape[0] # number of array elements
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
input_file = sys.argv[1]
# Store all the projects read from the CSV file in a list
projects = []
with open(input_file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
# Skip the first line with the header
next(reader)
for row in reader:
# Save the url of the repo and the name in the list
projects.append((row[1], row[3]))
result = []
# Iterate over all the projects and calculate the Gini coefficient
# for each of them, storing the results in the result list
for project_tuple in projects:
project_url, project_name = project_tuple
base_url = project_url + '/contributors'
# Make request to the Github API
r = requests.get(
base_url,
auth=('joined','7fb42c90a8b83b773082e1a337fec4555f65c893'))
contributors = []
# If the project doesn't exist skip to the next one
if r.status_code != 200:
result.append({'project_name': project_name})
continue
cur_contributors = r.json()
# If the response was empty for some reason skip to the next project
if not cur_contributors:
result.append({'project_name': project_name})
continue
# Store the number of contributions of each contributor in a list
contributors = []
for contributor in r.json():
contributors.append(contributor['contributions'])
# If there are more contributors to be downloaded, do it
if 'Link' in r.headers:
# Find first and last page of the results
matches = re.findall(r'<.+?page=(\d+)>', r.headers['Link'])
next_page, last_page = (int(p) for p in matches)
# For each results page add the contributions to the list
for page in range(next_page, last_page + 1):
url = base_url + '?page={}'.format(page)
r = requests.get(
url,
auth=('joined', '7fb42c90a8b83b773082e1a337fec4555f65c893'))
for contributor in r.json():
contributors.append(contributor['contributions'])
# Compute the Gini index from the array with contributions
gini_coeff = gini_index(np.array(contributors, dtype='float64'))
# Store the result in the result list
result.append({
'project_name': project_name,
'gini_index': gini_coeff,
'n_contributions': sum(contributors),
'n_contributors': len(contributors)
})
output_file = sys.argv[2]
# Save the results to the CSV output file
with open(output_file, 'w', newline='') as csvfile:
fieldnames = [
'project_name',
'gini_index',
'n_contributions',
'n_contributors'
]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for project in result:
writer.writerow(project)
| 29.547826 | 76 | 0.644791 | 449 | 3,398 | 4.799555 | 0.32294 | 0.040835 | 0.033411 | 0.032019 | 0.113225 | 0.113225 | 0.097448 | 0.058469 | 0.058469 | 0 | 0 | 0.03194 | 0.253679 | 3,398 | 114 | 77 | 29.807018 | 0.817823 | 0.278105 | 0 | 0.205882 | 0 | 0 | 0.123293 | 0.033099 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014706 | false | 0 | 0.073529 | 0 | 0.102941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35b6fe37214eed491c2c1869e28bae9454adc3c9 | 3,260 | py | Python | lib/galaxy/model/migrate/versions/0035_item_annotations_and_workflow_step_tags.py | yvanlebras/galaxy | 6b8489ca866825bcdf033523120a8b24ea6e6342 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/model/migrate/versions/0035_item_annotations_and_workflow_step_tags.py | yvanlebras/galaxy | 6b8489ca866825bcdf033523120a8b24ea6e6342 | [
"CC-BY-3.0"
] | 2 | 2017-05-18T16:12:55.000Z | 2022-03-08T12:08:43.000Z | lib/galaxy/model/migrate/versions/0035_item_annotations_and_workflow_step_tags.py | yvanlebras/galaxy | 6b8489ca866825bcdf033523120a8b24ea6e6342 | [
"CC-BY-3.0"
] | null | null | null | """
Migration script to (a) create tables for annotating objects and (b) create tags for workflow steps.
"""
import logging
from sqlalchemy import (
Column,
ForeignKey,
Index,
Integer,
MetaData,
Table,
TEXT,
Unicode,
)
from galaxy.model.migrate.versions.util import (
create_table,
drop_table,
)
log = logging.getLogger(__name__)
metadata = MetaData()
# Annotation tables.
HistoryAnnotationAssociation_table = Table(
"history_annotation_association",
metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index("ix_history_anno_assoc_annotation", "annotation", mysql_length=200),
)
HistoryDatasetAssociationAnnotationAssociation_table = Table(
"history_dataset_association_annotation_association",
metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index("ix_history_dataset_anno_assoc_annotation", "annotation", mysql_length=200),
)
StoredWorkflowAnnotationAssociation_table = Table(
"stored_workflow_annotation_association",
metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index("ix_stored_workflow_ann_assoc_annotation", "annotation", mysql_length=200),
)
WorkflowStepAnnotationAssociation_table = Table(
"workflow_step_annotation_association",
metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT),
Index("ix_workflow_step_ann_assoc_annotation", "annotation", mysql_length=200),
)
# Tagging tables.
WorkflowStepTagAssociation_table = Table(
"workflow_step_tag_association",
metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", Unicode(255), index=True),
Column("value", Unicode(255), index=True),
Column("user_value", Unicode(255), index=True),
)
TABLES = [
HistoryAnnotationAssociation_table,
HistoryDatasetAssociationAnnotationAssociation_table,
StoredWorkflowAnnotationAssociation_table,
WorkflowStepAnnotationAssociation_table,
WorkflowStepTagAssociation_table,
]
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
for table in TABLES:
create_table(table)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
for table in TABLES:
drop_table(table)
| 30.754717 | 112 | 0.734049 | 356 | 3,260 | 6.449438 | 0.202247 | 0.078397 | 0.08493 | 0.081446 | 0.533537 | 0.497387 | 0.497387 | 0.423345 | 0.423345 | 0.423345 | 0 | 0.007532 | 0.144785 | 3,260 | 105 | 113 | 31.047619 | 0.815997 | 0.041718 | 0 | 0.329268 | 0 | 0 | 0.238523 | 0.125522 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.036585 | 0 | 0.060976 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35ba0c62a62b97bb54b61a64a0db354de77fc6e0 | 2,119 | py | Python | cohesity_management_sdk/models/aag_and_databases.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/aag_and_databases.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/aag_and_databases.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.protection_source
class AAGAndDatabases(object):
"""Implementation of the 'AAG And Databases.' model.
Specifies an AAG and the database members of the AAG.
Attributes:
aag (ProtectionSource): Specifies a generic structure that represents
a node in the Protection Source tree. Node details will depend on
the environment of the Protection Source.
databases (list of ProtectionSource): Specifies databases found that
are members of the AAG.
"""
# Create a mapping from Model property names to API property names
_names = {
"aag":'aag',
"databases":'databases'
}
def __init__(self,
aag=None,
databases=None):
"""Constructor for the AAGAndDatabases class"""
# Initialize members of the class
self.aag = aag
self.databases = databases
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
aag = cohesity_management_sdk.models.protection_source.ProtectionSource.from_dictionary(dictionary.get('aag')) if dictionary.get('aag') else None
databases = None
if dictionary.get('databases') != None:
databases = list()
for structure in dictionary.get('databases'):
databases.append(cohesity_management_sdk.models.protection_source.ProtectionSource.from_dictionary(structure))
# Return an object of this model
return cls(aag,
databases)
| 31.626866 | 153 | 0.637093 | 232 | 2,119 | 5.74569 | 0.37069 | 0.026257 | 0.047262 | 0.060765 | 0.141785 | 0.141785 | 0.109527 | 0.109527 | 0.109527 | 0 | 0 | 0.003353 | 0.296366 | 2,119 | 66 | 154 | 32.106061 | 0.890677 | 0.471449 | 0 | 0 | 0 | 0 | 0.047761 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.041667 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35ba2e227bd917afda89945177f8421005144bc5 | 738 | py | Python | coursedashboards/management/commands/simulate_low_enrollment.py | uw-it-aca/course-dashboards | 0f195f7233fc8e24e9ca0d2624ca288869e133ba | [
"Apache-2.0"
] | 1 | 2018-04-05T19:00:27.000Z | 2018-04-05T19:00:27.000Z | coursedashboards/management/commands/simulate_low_enrollment.py | uw-it-aca/course-dashboards | 0f195f7233fc8e24e9ca0d2624ca288869e133ba | [
"Apache-2.0"
] | 188 | 2017-08-31T23:38:23.000Z | 2022-03-29T18:06:00.000Z | coursedashboards/management/commands/simulate_low_enrollment.py | uw-it-aca/course-dashboards | 0f195f7233fc8e24e9ca0d2624ca288869e133ba | [
"Apache-2.0"
] | null | null | null | import logging
from django.core.management.base import BaseCommand
from coursedashboards.models import (
Course, CourseOffering,
User)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Changes ESS 102 to have an enrollment of 3"
def handle(self, *args, **options):
ess_102 = Course.objects.get(curriculum="ESS", course_number=102)
bill = User.objects.get(uwnetid="bill")
offerings = CourseOffering.objects.filter(course=ess_102,
course__instructor__user=bill
)
for offering in offerings:
offering.current_enrollment = 3
offering.save()
| 29.52 | 79 | 0.607046 | 75 | 738 | 5.813333 | 0.626667 | 0.041284 | 0.055046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027668 | 0.314363 | 738 | 24 | 80 | 30.75 | 0.833992 | 0 | 0 | 0 | 0 | 0 | 0.066396 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35bac1e1e4d0c223c777e9290a4c9a28bd90aa8d | 1,511 | py | Python | micro.py | AkiraDemenech/WorldGameDict | bc6e7f86e0591599aee071a114f27ba8fc6b86e1 | [
"CC0-1.0"
] | null | null | null | micro.py | AkiraDemenech/WorldGameDict | bc6e7f86e0591599aee071a114f27ba8fc6b86e1 | [
"CC0-1.0"
] | null | null | null | micro.py | AkiraDemenech/WorldGameDict | bc6e7f86e0591599aee071a114f27ba8fc6b86e1 | [
"CC0-1.0"
] | null | null | null |
def build ():
return
class place:
#__name__ = None
def __init__ (self,*to,**here):
self.links = to
self.actions = here
self.run('__name__')
def __repr__ (self):
return self.__str__()
def __str__ (self):
s = ')'
for a in self.actions:
s = ',%s=%s' %(a,self.actions[a].__repr__()) + s
return self.__class__.__name__ + str(self.links).replace(')',s)
def act (self,action):
try:
return self.__getattribute__(action)
except AttributeError:
try:
a = self.actions[action]
if type(a) == str:
a = eval(a)
# a = a.__call__
except Exception:
pass
self.__setattr__(action,a)
return a
def run (self,action):
try:
a = self.__getattribute__(action)
try:
return a()
except AttributeError:
return a
# return self.__getattribute__(action)()
except AttributeError:
try:
b = a = self.actions[action]
if type(a) == str:
a = eval(a)
b = a()
# return b
except AttributeError:
b = a
# return a
except KeyError:
return NotImplemented
#except:
# print('An error has occurred calling %s: %s' %(action.__repr__(),self.actions[action].__repr__()))
# return
self.__setattr__(action,a)
return b
# print('Erro')
# if type(a) == function:
# return a()
# return a
def edit (self):
pass
def copy (self):
return place(*self.links,**self.actions)
a = place(12,1,2,art='lambda: print("Artes")',Artes=123,__name__=None)
a.run('art')
print(a.run('Artes'))
print(a.__name__)
print(a) | 19.881579 | 103 | 0.623428 | 208 | 1,511 | 4.163462 | 0.259615 | 0.088915 | 0.04157 | 0.064665 | 0.251732 | 0.196305 | 0.196305 | 0.078522 | 0.078522 | 0.078522 | 0 | 0.005957 | 0.222369 | 1,511 | 76 | 104 | 19.881579 | 0.731064 | 0.174719 | 0 | 0.351852 | 0 | 0 | 0.037247 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0.037037 | 0 | 0.055556 | 0.351852 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35bd4065d44b15a6d86c7e4846bf6823a2d3ba12 | 1,938 | py | Python | References and Tests/Tkinter Videos/ButtonFrame.py | 123prashanth123/Fault-Detection-System | fa59ca81ce4627a42648e654b55cdc505cde2103 | [
"MIT"
] | null | null | null | References and Tests/Tkinter Videos/ButtonFrame.py | 123prashanth123/Fault-Detection-System | fa59ca81ce4627a42648e654b55cdc505cde2103 | [
"MIT"
] | null | null | null | References and Tests/Tkinter Videos/ButtonFrame.py | 123prashanth123/Fault-Detection-System | fa59ca81ce4627a42648e654b55cdc505cde2103 | [
"MIT"
] | 1 | 2021-07-26T08:58:43.000Z | 2021-07-26T08:58:43.000Z | import tkinter as tk
# Tkinter Frame that handles the Buttons
class ButtonFrame(tk.Frame):
def __init__(self, master, VideoWidget=None, *args, **kwargs):
tk.Frame.__init__(self, master, *args, **kwargs)
"""
master: master widget upon which this works
VideoWidget: Video Capture Frame
"""
self.master = master
self.VideoWidget = VideoWidget
self.button_height = 2
self.button_width = 20
# Start Button Setup
self.startButton = tk.Button(self, text="Start",
width=self.button_width, height=self.button_height,
background="#23EF13", activebackground="#9AF592", foreground="black",
relief="raised", command=self.do_start)
self.startButton.grid(row=0, column=0)
# Stop Button Setup
self.stopButton = tk.Button(self, text="Stop",
width=self.button_width, height=self.button_height,
background="#FFC500", activebackground="#FFE99E", foreground="black",
relief="raised", command=self.do_stop)
self.stopButton.grid(row=0, column=1)
# Quit Button Setup
self.quitButton = tk.Button(self, text="Quit",
width=self.button_width, height=self.button_height,
background="red", activebackground="#FCAEAE", foreground="black",
relief="raised", command=self.do_quit)
self.quitButton.grid(row=0, column=2)
# Start Button Callback
def do_start(self):
self.VideoWidget.start()
# Stop Button Callback
def do_stop(self):
self.VideoWidget.stop()
# Quit Button Callback
def do_quit(self):
self.master.master.destroy()
| 39.55102 | 105 | 0.553664 | 196 | 1,938 | 5.362245 | 0.306122 | 0.076118 | 0.060894 | 0.045671 | 0.262607 | 0.262607 | 0.262607 | 0.14843 | 0.14843 | 0 | 0 | 0.017309 | 0.344169 | 1,938 | 48 | 106 | 40.375 | 0.809599 | 0.081011 | 0 | 0.103448 | 0 | 0 | 0.050694 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.034483 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35c0f58a33bf948c65617e24ad9c35b320c4e3d9 | 2,451 | py | Python | main/views.py | Tushar8645/Todo-List | 5f45c0c0f7d792f4476da9ce51db6a0039a5f704 | [
"MIT"
] | null | null | null | main/views.py | Tushar8645/Todo-List | 5f45c0c0f7d792f4476da9ce51db6a0039a5f704 | [
"MIT"
] | null | null | null | main/views.py | Tushar8645/Todo-List | 5f45c0c0f7d792f4476da9ce51db6a0039a5f704 | [
"MIT"
] | null | null | null | from django.shortcuts import get_object_or_404, redirect, render
from django.views import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
from django.urls import reverse_lazy
from main.models import List
from main.form import ListForm
def successPage():
return reverse_lazy('main:home_view')
@method_decorator(csrf_exempt, name='dispatch')
class HomeView(View):
template_name = 'main/index.html'
def get(self, request):
all_items = List.objects.all().order_by('-pk')
context = {
'all_items': all_items,
}
return render(request, self.template_name, context)
def post(self, request):
form = ListForm(request.POST or None)
if not form.is_valid():
context = {
'form': form,
}
return render(request, self.template_name, context)
form.save()
messages.success(request, ('Item Has Been Added To List!!!'))
return redirect(successPage())
class DeleteView(View):
def get(self, request, pk):
item = get_object_or_404(List, pk=pk)
item.delete()
messages.success(request, ('Item Has Been Deleted!!!'))
return redirect(successPage())
class CrossOffView(View):
def get(self, request, pk):
item = get_object_or_404(List, pk=pk)
item.completed = True
item.save()
return redirect(successPage())
class UncrossView(View):
def get(self, request, pk):
item = get_object_or_404(List, pk=pk)
item.completed = False
item.save()
return redirect(successPage())
@method_decorator(csrf_exempt, name='dispatch')
class UpdateView(View):
template_name = 'main/update_view.html'
def get(self, request, pk):
item = get_object_or_404(List, pk=pk)
context = {
'item': item,
}
return render(request, self.template_name, context)
def post(self, request, pk):
item = get_object_or_404(List, pk=pk)
form = ListForm(request.POST, instance=item)
if not form.is_valid():
context = {
'form': form,
}
return render(request, self.template_name, context)
form.save()
messages.success(request, ('Item Has Been Edited!!!'))
return redirect(successPage())
| 25.010204 | 69 | 0.627499 | 296 | 2,451 | 5.060811 | 0.246622 | 0.032043 | 0.044059 | 0.056075 | 0.538051 | 0.477303 | 0.455274 | 0.399199 | 0.399199 | 0.399199 | 0 | 0.009972 | 0.263566 | 2,451 | 97 | 70 | 25.268041 | 0.819945 | 0 | 0 | 0.477612 | 0 | 0 | 0.068135 | 0.008568 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119403 | false | 0 | 0.119403 | 0.014925 | 0.492537 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35c25f2922eaa06bfb72cbe9ee79962f4e85ac03 | 4,555 | py | Python | taskmage/db/db.py | mozey/taskmage | 6a01c98d71e9e034e31407df7d8b31a082bc91e5 | [
"MIT"
] | null | null | null | taskmage/db/db.py | mozey/taskmage | 6a01c98d71e9e034e31407df7d8b31a082bc91e5 | [
"MIT"
] | null | null | null | taskmage/db/db.py | mozey/taskmage | 6a01c98d71e9e034e31407df7d8b31a082bc91e5 | [
"MIT"
] | null | null | null | # http://stackoverflow.com/questions/6290162/how-to-automatically-reflect-database-to-sqlalchemy-declarative
from sqlalchemy import create_engine, MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
from contextlib import contextmanager
from collections import OrderedDict
import json
import os
import re
from taskmage import config
from datetime import datetime
from sqlalchemy.ext.declarative import DeclarativeMeta
# The default database path is ~/.task/taskmage.db,
# override this by setting taskmage.data.location="path/to/taskmage.db"
# in ~/.taskrc
home_dir = os.path.expanduser('~')
db_name = "taskmage.db"
if config.testing:
# It's annoying if the test database location is changing all the time.
# db_path = os.path.join(tempfile.gettempdir(), db_name)
# Rather used a fixed location
db_name = "taskmage.testing.db"
app_path = os.path.join(home_dir, ".taskmage")
db_path = os.path.join(app_path, db_name)
# Try to override default database location
try:
db_path_override = re.search(
'taskmage\.data\.location=(.*)',
open(os.path.join(home_dir, ".taskmagerc")).read(),
)
if db_path_override:
db_path = os.path.join(db_path_override.group(1), db_name)
except FileNotFoundError as e:
pass
if config.testing:
print("sqlite3", db_path)
timestamp_format = "%Y-%m-%d %H:%M:%S"
# ..............................................................................
# Serialize SqlAlchemy result to JSON
# http://stackoverflow.com/a/10664192/639133
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
fields = {}
if isinstance(obj.__class__, DeclarativeMeta):
# a SQLAlchemy class
for field in [x for x in dir(obj) if
not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
try:
if isinstance(data, datetime):
data = data.strftime(timestamp_format)
else:
# this will fail on non encode-able values,
# like other classes
json.dumps(data)
fields[field] = data
except TypeError:
fields[field] = None
else:
fields = json.JSONEncoder.default(self, obj)
# Modified to always return data ordered by key
return OrderedDict(sorted(fields.items()))
# ..............................................................................
class MyBase:
# From event listeners post link below, doesn't work.
# __abstract__ = True
def __repr__(self):
return json.dumps(self, cls=AlchemyEncoder)
# Used for adding event listeners to all models
# http://stackoverflow.com/a/13979333/639133
@classmethod
def _all_subclasses(self):
""" Get all subclasses of my_class, descending.
So, if A is a subclass of B is a subclass of my_class,
this will include A and B. (Does not include my_class) """
children = self.__subclasses__()
result = []
while children:
next = children.pop()
subclasses = next.__subclasses__()
result.append(next)
for subclass in subclasses:
children.append(subclass)
return result
# ..............................................................................
Base = declarative_base()
# Create an engine and get the metadata
engine = create_engine(
"sqlite:///{}".format(db_path),
# Write out all sql statements
echo=config.echo,
)
# http://docs.sqlalchemy.org/en/rel_0_9/core/constraints.html
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(bind=engine, naming_convention=convention)
session_factory = orm.sessionmaker(bind=engine)
# ..............................................................................
# Use get_session when not using threads.
@contextmanager
def get_session():
try:
db_session = session_factory()
assert(isinstance(db_session, orm.Session))
yield db_session
except Exception as e:
raise e
else:
# This gets executed if there was no exception
pass
finally:
db_session.close()
| 32.077465 | 108 | 0.601976 | 533 | 4,555 | 4.97561 | 0.386492 | 0.0181 | 0.018854 | 0.021116 | 0.073152 | 0.016591 | 0.016591 | 0 | 0 | 0 | 0 | 0.012059 | 0.235346 | 4,555 | 141 | 109 | 32.304965 | 0.749354 | 0.326015 | 0 | 0.114943 | 0 | 0 | 0.100629 | 0.059914 | 0 | 0 | 0 | 0 | 0.011494 | 1 | 0.045977 | false | 0.022989 | 0.126437 | 0.011494 | 0.229885 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35c47dafeebb19ee587dafd46dfb9444757360e8 | 352 | py | Python | Packs/CommonScripts/Scripts/DumpJSON/DumpJSON.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/CommonScripts/Scripts/DumpJSON/DumpJSON.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/CommonScripts/Scripts/DumpJSON/DumpJSON.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import json
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def main():
key = demisto.args()['key']
obj_str = json.dumps(demisto.get(demisto.context(), key))
demisto.setContext('JsonStr', obj_str)
return_results(obj_str)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 22 | 61 | 0.6875 | 44 | 352 | 5.136364 | 0.613636 | 0.079646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02069 | 0.176136 | 352 | 15 | 62 | 23.466667 | 0.758621 | 0.059659 | 0 | 0 | 0 | 0 | 0.112805 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35c65615d99829e1776ae991327d45a23935aff1 | 6,973 | py | Python | release/scripts/addons/io_scene_gltf2/io/exp/gltf2_io_get.py | noorbeast/BlenderSource | 65ebecc5108388965678b04b43463b85f6c69c1d | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2019-03-20T13:10:46.000Z | 2019-05-15T20:00:31.000Z | engine/2.80/scripts/addons/io_scene_gltf2/io/exp/gltf2_io_get.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | engine/2.80/scripts/addons/io_scene_gltf2/io/exp/gltf2_io_get.py | byteinc/Phasor | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | [
"Unlicense"
] | null | null | null | # Copyright 2018 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
import os
#
# Globals
#
#
# Functions
#
def get_material_requires_texcoords(glTF, index):
"""Query function, if a material "needs" texture coordinates. This is the case, if a texture is present and used."""
if glTF.materials is None:
return False
materials = glTF.materials
if index < 0 or index >= len(materials):
return False
material = materials[index]
# General
if material.emissive_texture is not None:
return True
if material.normal_texture is not None:
return True
if material.occlusion_texture is not None:
return True
# Metallic roughness
if material.pbr_metallic_roughness is not None and \
material.pbr_metallic_roughness.base_color_texture is not None:
return True
if material.pbr_metallic_roughness is not None and \
material.pbr_metallic_roughness.metallic_roughness_texture is not None:
return True
return False
def get_material_requires_normals(glTF, index):
"""
Query function, if a material "needs" normals. This is the case, if a texture is present and used.
At point of writing, same function as for texture coordinates.
"""
return get_material_requires_texcoords(glTF, index)
def get_material_index(glTF, name):
"""Return the material index in the glTF array."""
if name is None:
return -1
if glTF.materials is None:
return -1
index = 0
for material in glTF.materials:
if material.name == name:
return index
index += 1
return -1
def get_mesh_index(glTF, name):
"""Return the mesh index in the glTF array."""
if glTF.meshes is None:
return -1
index = 0
for mesh in glTF.meshes:
if mesh.name == name:
return index
index += 1
return -1
def get_skin_index(glTF, name, index_offset):
"""Return the skin index in the glTF array."""
if glTF.skins is None:
return -1
skeleton = get_node_index(glTF, name)
index = 0
for skin in glTF.skins:
if skin.skeleton == skeleton:
return index + index_offset
index += 1
return -1
def get_camera_index(glTF, name):
"""Return the camera index in the glTF array."""
if glTF.cameras is None:
return -1
index = 0
for camera in glTF.cameras:
if camera.name == name:
return index
index += 1
return -1
def get_light_index(glTF, name):
"""Return the light index in the glTF array."""
if glTF.extensions is None:
return -1
extensions = glTF.extensions
if extensions.get('KHR_lights_punctual') is None:
return -1
khr_lights_punctual = extensions['KHR_lights_punctual']
if khr_lights_punctual.get('lights') is None:
return -1
lights = khr_lights_punctual['lights']
index = 0
for light in lights:
if light['name'] == name:
return index
index += 1
return -1
def get_node_index(glTF, name):
"""Return the node index in the glTF array."""
if glTF.nodes is None:
return -1
index = 0
for node in glTF.nodes:
if node.name == name:
return index
index += 1
return -1
def get_scene_index(glTF, name):
"""Return the scene index in the glTF array."""
if glTF.scenes is None:
return -1
index = 0
for scene in glTF.scenes:
if scene.name == name:
return index
index += 1
return -1
def get_texture_index(glTF, filename):
"""Return the texture index in the glTF array by a given file path."""
if glTF.textures is None:
return -1
image_index = get_image_index(glTF, filename)
if image_index == -1:
return -1
for texture_index, texture in enumerate(glTF.textures):
if image_index == texture.source:
return texture_index
return -1
def get_image_index(glTF, filename):
"""Return the image index in the glTF array."""
if glTF.images is None:
return -1
image_name = get_image_name(filename)
for index, current_image in enumerate(glTF.images):
if image_name == current_image.name:
return index
return -1
def get_image_name(filename):
"""Return user-facing, extension-agnostic name for image."""
return os.path.splitext(filename)[0]
def get_scalar(default_value, init_value=0.0):
"""Return scalar with a given default/fallback value."""
return_value = init_value
if default_value is None:
return return_value
return_value = default_value
return return_value
def get_vec2(default_value, init_value=[0.0, 0.0]):
"""Return vec2 with a given default/fallback value."""
return_value = init_value
if default_value is None or len(default_value) < 2:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 2:
return return_value
return return_value
def get_vec3(default_value, init_value=[0.0, 0.0, 0.0]):
"""Return vec3 with a given default/fallback value."""
return_value = init_value
if default_value is None or len(default_value) < 3:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 3:
return return_value
return return_value
def get_vec4(default_value, init_value=[0.0, 0.0, 0.0, 1.0]):
"""Return vec4 with a given default/fallback value."""
return_value = init_value
if default_value is None or len(default_value) < 4:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 4:
return return_value
return return_value
def get_index(elements, name):
"""Return index of a glTF element by a given name."""
if elements is None or name is None:
return -1
index = 0
for element in elements:
if isinstance(element, dict):
if element.get('name') == name:
return index
else:
if element.name == name:
return index
index += 1
return -1
| 21.996845 | 120 | 0.635594 | 968 | 6,973 | 4.456612 | 0.158058 | 0.038943 | 0.041725 | 0.039175 | 0.544506 | 0.470097 | 0.401484 | 0.33032 | 0.248957 | 0.248957 | 0 | 0.017839 | 0.284526 | 6,973 | 316 | 121 | 22.066456 | 0.846863 | 0.228883 | 0 | 0.52381 | 0 | 0 | 0.011041 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10119 | false | 0 | 0.005952 | 0 | 0.440476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35c6af208eb2abc9e0deb84c61ed25aa5d79412e | 8,402 | py | Python | annofabcli/project_member/put_project_members.py | kurusugawa-computer/annofab-cli | 8edad492d439bc8fe64e9471464f545d07aba8b7 | [
"MIT"
] | 9 | 2019-07-22T23:54:05.000Z | 2020-11-05T06:26:04.000Z | annofabcli/project_member/put_project_members.py | kurusugawa-computer/annofab-cli | 8edad492d439bc8fe64e9471464f545d07aba8b7 | [
"MIT"
] | 389 | 2019-07-03T04:39:11.000Z | 2022-03-28T14:06:11.000Z | annofabcli/project_member/put_project_members.py | kurusugawa-computer/annofab-cli | 8edad492d439bc8fe64e9471464f545d07aba8b7 | [
"MIT"
] | 1 | 2021-08-30T14:22:04.000Z | 2021-08-30T14:22:04.000Z | import argparse
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional
import more_itertools
import numpy
import pandas
import requests
from annofabapi.models import ProjectMemberRole, ProjectMemberStatus
from dataclasses_json import DataClassJsonMixin
import annofabcli
from annofabcli import AnnofabApiFacade
from annofabcli.common.cli import AbstractCommandLineInterface, ArgumentParser, build_annofabapi_resource_and_login
logger = logging.getLogger(__name__)
@dataclass
class Member(DataClassJsonMixin):
"""
登録するプロジェクトメンバ
"""
user_id: str
member_role: ProjectMemberRole
sampling_inspection_rate: Optional[int]
sampling_acceptance_rate: Optional[int]
class PutProjectMembers(AbstractCommandLineInterface):
"""
プロジェクトメンバをCSVで登録する。
"""
@staticmethod
def find_member(members: List[Dict[str, Any]], user_id: str) -> Optional[Dict[str, Any]]:
member = more_itertools.first_true(members, default=None, pred=lambda e: e["user_id"] == user_id)
return member
@staticmethod
def member_exists(members: List[Dict[str, Any]], user_id) -> bool:
return PutProjectMembers.find_member(members, user_id) is not None
def invite_project_member(self, project_id, member: Member, old_project_members: List[Dict[str, Any]]):
old_member = self.find_member(old_project_members, member.user_id)
last_updated_datetime = old_member["updated_datetime"] if old_member is not None else None
request_body = {
"member_status": ProjectMemberStatus.ACTIVE.value,
"member_role": member.member_role.value,
"sampling_inspection_rate": member.sampling_inspection_rate,
"sampling_acceptance_rate": member.sampling_acceptance_rate,
"last_updated_datetime": last_updated_datetime,
}
updated_project_member = self.service.api.put_project_member(
project_id, member.user_id, request_body=request_body
)[0]
return updated_project_member
def delete_project_member(self, project_id, deleted_member: Dict[str, Any]):
request_body = {
"member_status": ProjectMemberStatus.INACTIVE.value,
"member_role": deleted_member["member_role"],
"last_updated_datetime": deleted_member["updated_datetime"],
}
updated_project_member = self.service.api.put_project_member(
project_id, deleted_member["user_id"], request_body=request_body
)[0]
return updated_project_member
def put_project_members(self, project_id: str, members: List[Member], delete: bool = False):
"""
プロジェクトメンバを一括で登録する。
Args:
project_id: プロジェクトメンバの登録先のプロジェクトのプロジェクトID
members: 登録するプロジェクトメンバのList
delete: Trueならば、membersにないメンバを、対象プロジェクトから削除する。
"""
super().validate_project(project_id, [ProjectMemberRole.OWNER])
organization_name = self.facade.get_organization_name_from_project_id(project_id)
organization_members = self.service.wrapper.get_all_organization_members(organization_name)
old_project_members = self.service.wrapper.get_all_project_members(project_id)
project_title = self.facade.get_project_title(project_id)
count_invite_members = 0
# プロジェクトメンバを登録
logger.info(f"{project_title} に、{len(members)} 件のプロジェクトメンバを登録します。")
for member in members:
if member.user_id == self.service.api.login_user_id:
logger.debug(f"ユーザ '{member.user_id}'は自分自身なので、登録しません。")
continue
if not self.member_exists(organization_members, member.user_id):
logger.warning(f"ユーザ '{member.user_id}' は、" f"'{organization_name}' 組織の組織メンバでないため、登録できませんでした。")
continue
message_for_confirm = (
f"ユーザ '{member.user_id}'を、{project_title} プロジェクトのメンバに登録しますか?" f"member_role={member.member_role.value}"
)
if not self.confirm_processing(message_for_confirm):
continue
# メンバを登録
try:
self.invite_project_member(project_id, member, old_project_members)
logger.debug(
f"user_id = {member.user_id}, member_role = {member.member_role.value} のユーザをプ" f"ロジェクトメンバに登録しました。"
)
count_invite_members += 1
except requests.exceptions.HTTPError as e:
logger.warning(e)
logger.warning(
f"プロジェクトメンバの登録に失敗しました。" f"user_id = {member.user_id}, member_role = {member.member_role.value}"
)
logger.info(f"{project_title} に、{count_invite_members} / {len(members)} 件のプロジェクトメンバを登録しました。")
# プロジェクトメンバを削除
if delete:
user_id_list = [e.user_id for e in members]
# 自分自身は削除しないようにする
deleted_members = [
e
for e in old_project_members
if (e["user_id"] not in user_id_list and e["user_id"] != self.service.api.login_user_id)
]
count_delete_members = 0
logger.info(f"{project_title} から、{len(deleted_members)} 件のプロジェクトメンバを削除します。")
for deleted_member in deleted_members:
message_for_confirm = f"ユーザ '{deleted_member['user_id']}'を、" f"{project_title} のプロジェクトメンバから削除しますか?"
if not self.confirm_processing(message_for_confirm):
continue
try:
self.delete_project_member(project_id, deleted_member)
logger.debug(f"ユーザ '{deleted_member['user_id']}' をプロジェクトメンバから削除しました。")
count_delete_members += 1
except requests.exceptions.HTTPError as e:
logger.warning(e)
logger.warning(f"プロジェクトメンバの削除に失敗しました。user_id = '{deleted_member['user_id']}' ")
logger.info(f"{project_title} から {count_delete_members} / {len(deleted_members)} 件の" f"プロジェクトメンバを削除しました。")
@staticmethod
def get_members_from_csv(csv_path: Path) -> List[Member]:
def create_member(e):
return Member(
user_id=e.user_id,
member_role=ProjectMemberRole(e.member_role),
sampling_inspection_rate=e.sampling_inspection_rate,
sampling_acceptance_rate=e.sampling_acceptance_rate,
)
df = pandas.read_csv(
str(csv_path),
sep=",",
header=None,
names=("user_id", "member_role", "sampling_inspection_rate", "sampling_acceptance_rate"),
).replace({numpy.nan: None})
members = [create_member(e) for e in df.itertuples()]
return members
def main(self):
args = self.args
members = self.get_members_from_csv(Path(args.csv))
self.put_project_members(args.project_id, members=members, delete=args.delete)
def main(args):
service = build_annofabapi_resource_and_login(args)
facade = AnnofabApiFacade(service)
PutProjectMembers(service, facade, args).main()
def parse_args(parser: argparse.ArgumentParser):
argument_parser = ArgumentParser(parser)
argument_parser.add_project_id()
parser.add_argument(
"--csv",
type=str,
required=True,
help=(
"プロジェクトメンバが記載されたCVファイルのパスを指定してください。"
"CSVのフォーマットは、「1列目:user_id(required), 2列目:member_role(required), "
"3列目:sampling_inspection_rate, 4列目:sampling_acceptance_rate, ヘッダ行なし, カンマ区切り」です。"
"member_roleは ``owner``, ``worker``, ``accepter``, ``training_data_user`` のいずれかです。"
"sampling_inspection_rate, sampling_acceptance_rate を省略した場合は未設定になります。"
"ただし自分自身は登録しません。"
),
)
parser.add_argument("--delete", action="store_true", help="CSVファイルに記載されていないプロジェクトメンバを削除します。ただし自分自身は削除しません。")
parser.set_defaults(subcommand_func=main)
def add_parser(subparsers: Optional[argparse._SubParsersAction] = None):
subcommand_name = "put"
subcommand_help = "プロジェクトメンバを登録する。"
description = "プロジェクトメンバを登録する。"
epilog = "オーナロールを持つユーザで実行してください。"
parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description, epilog=epilog)
parse_args(parser)
return parser
| 38.541284 | 119 | 0.661866 | 911 | 8,402 | 5.812294 | 0.216246 | 0.037394 | 0.031728 | 0.016619 | 0.305194 | 0.231161 | 0.139377 | 0.129178 | 0.117469 | 0.098206 | 0 | 0.001577 | 0.24518 | 8,402 | 217 | 120 | 38.718894 | 0.833333 | 0.028327 | 0 | 0.146497 | 0 | 0 | 0.195938 | 0.104657 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070064 | false | 0 | 0.089172 | 0.012739 | 0.242038 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35cb59bbbb9a78cb660454a64e8dda44348f1716 | 825 | py | Python | Log1/HiPyQt3/HiPyQt31QLabel.py | codenara/PyQt1 | 1550920577188e4d318b47fc69ba5ee243092d88 | [
"MIT"
] | null | null | null | Log1/HiPyQt3/HiPyQt31QLabel.py | codenara/PyQt1 | 1550920577188e4d318b47fc69ba5ee243092d88 | [
"MIT"
] | null | null | null | Log1/HiPyQt3/HiPyQt31QLabel.py | codenara/PyQt1 | 1550920577188e4d318b47fc69ba5ee243092d88 | [
"MIT"
] | null | null | null | # HiPyQt version 3.1
# use QLabel
# use QPushButton
import sys
from PyQt5.QtWidgets import *
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Hi PyQt")
self.setGeometry(50, 50, 400, 300)
# QLabel
self.label = QLabel("QLabel", self)
self.label.move(20, 60)
self.label.resize(150, 30)
# QPushButton
self.button = QPushButton("Label", self)
self.button.move(20, 20)
self.button.clicked.connect(self.button_clicked)
def button_clicked(self):
if self.label.text() == "":
self.label.setText("QLabel")
else:
self.label.clear()
if __name__ == "__main__":
app = QApplication(sys.argv)
myWindow = MyWindow()
myWindow.show()
app.exec()
| 22.297297 | 56 | 0.596364 | 95 | 825 | 4.989474 | 0.505263 | 0.113924 | 0.07173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0.275152 | 825 | 36 | 57 | 22.916667 | 0.749164 | 0.077576 | 0 | 0 | 0 | 0 | 0.042384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35cbcfc2f688c564b54be33ab28600f85f779b4b | 3,340 | py | Python | fix_space_in_path.py | stefs/fix-space-in-path | 77c8e5d4bbeb430f114d88f0ac559df581bea4fc | [
"MIT"
] | null | null | null | fix_space_in_path.py | stefs/fix-space-in-path | 77c8e5d4bbeb430f114d88f0ac559df581bea4fc | [
"MIT"
] | null | null | null | fix_space_in_path.py | stefs/fix-space-in-path | 77c8e5d4bbeb430f114d88f0ac559df581bea4fc | [
"MIT"
] | null | null | null | import os
from typing import Iterable, Tuple, List
class NameFix(object):
def __init__(
self,
root_directory: str,
exclude: Iterable[str],
max_directory_length: int,
max_filename_length: int
) -> None:
self.root_directory = root_directory
exclude = [os.path.join(self.root_directory, entry) for entry in exclude]
exclude = [entry for entry in exclude if os.path.exists(entry)]
exclude = [os.path.abspath(entry) for entry in exclude]
self.exclude_dirs = [entry.lower() for entry in exclude if os.path.isdir(entry)]
self.exclude_files = [entry.lower() for entry in exclude if os.path.isfile(entry)]
self.max_directory_length = max_directory_length
self.max_filename_length = max_filename_length
self.log: List[Tuple[str, str]] = []
def run(self) -> None:
# rename things
self.log.clear()
for root, directories, files in os.walk(self.root_directory):
for directory in directories:
self._rename(broken=os.path.join(root, os.path.join(root, directory)) + os.sep,
fixed=os.path.join(root, self._fix_dir(directory)) + os.sep)
for file in files:
if file.lower() in self.exclude_files:
return
self._rename(broken=os.path.join(root, file),
fixed=os.path.join(root, self._fix_file(file)))
# print log
width = len(str(len(self.log)))
print('---- RENAME LOG BEGIN ----')
for index, (before, after) in enumerate(self.log):
print(f' {str(index + 1).rjust(width)} | BEFORE | "{before}"')
print(f' {str().rjust(width)} | AFTER | "{after}"')
print('---- RENAME LOG END ----')
def _fix_file(
self,
name: str
) -> str:
# fix filename characters
name, extension = os.path.splitext(name)
name = name.strip(' ')
# fix filename length
extension = extension.strip(' ')
max_length = self.max_filename_length - len(extension)
name = name[:max_length]
# fix filename characters again
name = name.strip(' ')
# done
return name + extension
def _fix_dir(
self,
name: str
) -> str:
return name.strip(' ')[:self.max_directory_length]
def _rename(
self,
broken: str,
fixed: str
) -> None:
if broken == fixed:
return
if any(broken.lower().startswith(entry) for entry in self.exclude_dirs):
return
print(f' BROKEN: "{broken}"\n FIX: "{fixed}"')
if os.path.exists(fixed):
print('Cannot fix, target already exists.')
elif input('Rename this? ') == 'y':
os.rename(broken, fixed)
self.log.append((broken, fixed))
print('Fixed.\n')
else:
print('Not fixed.\n')
def main() -> None:
NameFix(
root_directory='D:\\',
exclude=['System Volume Information',
'$Recycle.Bin',
'RECYCLE?'],
max_directory_length=255,
max_filename_length=134
).run()
if __name__ == '__main__':
main()
| 34.081633 | 95 | 0.548503 | 385 | 3,340 | 4.615584 | 0.233766 | 0.040518 | 0.033765 | 0.047833 | 0.174451 | 0.116488 | 0.116488 | 0.039392 | 0.039392 | 0 | 0 | 0.003118 | 0.327844 | 3,340 | 97 | 96 | 34.43299 | 0.788419 | 0.030539 | 0 | 0.185185 | 0 | 0 | 0.096565 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.024691 | 0.012346 | 0.17284 | 0.098765 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35d05b3e58e32622310f06450fbef9888f0fd8eb | 2,474 | py | Python | python/keras_small/module_metrics_separate_regions.py | mbarbie1/deepSlice | 5368f02f55ecd709e4746155888528617fc34c09 | [
"Apache-2.0"
] | 2 | 2021-05-17T22:53:21.000Z | 2021-06-25T02:25:58.000Z | python/keras_small/module_metrics_separate_regions.py | mbarbie1/DeepSlice | 5368f02f55ecd709e4746155888528617fc34c09 | [
"Apache-2.0"
] | null | null | null | python/keras_small/module_metrics_separate_regions.py | mbarbie1/DeepSlice | 5368f02f55ecd709e4746155888528617fc34c09 | [
"Apache-2.0"
] | null | null | null | """
A weighted version of categorical_crossentropy for keras (2.0.6). This lets you apply a weight to unbalanced classes.
@url: https://gist.github.com/wassname/ce364fddfc8a025bfab4348cf5de852d
@author: wassname
"""
from keras import backend as K
def region_dice_overlap(weights):
"""
A weighted version of keras.objectives.categorical_crossentropy
Variables:
weights: numpy array of shape (C,) where C is the number of classes
Usage:
weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
loss = weighted_categorical_crossentropy(weights)
model.compile(loss=loss,optimizer='adam')
"""
weights = K.variable(weights)
def loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc
loss = y_true * K.log(y_pred) * weights
loss = -K.sum(loss, -1)
return loss
return loss
import numpy as np
from keras.activations import softmax
from keras.objectives import categorical_crossentropy
# init tests
samples=3
maxlen=4
vocab=5
y_pred_n = np.random.random((samples,maxlen,vocab)).astype(K.floatx())
y_pred = K.variable(y_pred_n)
y_pred = softmax(y_pred)
y_true_n = np.random.random((samples,maxlen,vocab)).astype(K.floatx())
y_true = K.variable(y_true_n)
y_true = softmax(y_true)
# test 1 that it works the same as categorical_crossentropy with weights of one
weights = np.ones(vocab)
loss_weighted=weighted_categorical_crossentropy(weights)(y_true,y_pred).eval(session=K.get_session())
loss=categorical_crossentropy(y_true,y_pred).eval(session=K.get_session())
np.testing.assert_almost_equal(loss_weighted,loss)
print('OK test1')
# test 2 that it works differen't than categorical_crossentropy with weights of less than one
weights = np.array([0.1,0.3,0.5,0.3,0.5])
loss_weighted=weighted_categorical_crossentropy(weights)(y_true,y_pred).eval(session=K.get_session())
loss=categorical_crossentropy(y_true,y_pred).eval(session=K.get_session())
np.testing.assert_array_less(loss_weighted,loss)
print('OK test2')
# same keras version as I tested it on?
import keras
assert keras.__version__.split('.')[:2]==['2', '0'], 'this was tested on keras 2.0.6 you have %s' % keras.__version
print('OK version') | 34.84507 | 117 | 0.721504 | 392 | 2,474 | 4.392857 | 0.32398 | 0.043554 | 0.017422 | 0.029036 | 0.304297 | 0.235772 | 0.235772 | 0.235772 | 0.235772 | 0.235772 | 0 | 0.027053 | 0.163298 | 2,474 | 71 | 118 | 34.84507 | 0.804831 | 0.359337 | 0 | 0.171429 | 0 | 0 | 0.046557 | 0 | 0 | 0 | 0 | 0 | 0.085714 | 1 | 0.057143 | false | 0 | 0.142857 | 0 | 0.257143 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35d1681b32652ae64d777846da7fc45306f656ec | 476 | py | Python | ___Python/Angela/PyKurs/p07_file_io/m01_count_files.py | uvenil/PythonKurs201806 | 85afa9c9515f5dd8bec0c546f077d8cc39568fe8 | [
"Apache-2.0"
] | null | null | null | ___Python/Angela/PyKurs/p07_file_io/m01_count_files.py | uvenil/PythonKurs201806 | 85afa9c9515f5dd8bec0c546f077d8cc39568fe8 | [
"Apache-2.0"
] | null | null | null | ___Python/Angela/PyKurs/p07_file_io/m01_count_files.py | uvenil/PythonKurs201806 | 85afa9c9515f5dd8bec0c546f077d8cc39568fe8 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
# Zaehle die Anzahl Ordner in einem Ordner (inkl. allen Unterordnern)
def count_dirs(path):
subdirs = [subdir for subdir in path.iterdir() if subdir.is_dir()] #Bestimme die direkten Unterordner des Ordners path
count = 1 # Spielwiese selbst
for subdir in subdirs:
count += count_dirs(subdir) # fuer jedes einzelne Kind
return count
count = count_dirs(Path("O:\Spielwiese"))
print(count)
# Iterative Lösung | 29.75 | 123 | 0.701681 | 64 | 476 | 5.15625 | 0.625 | 0.081818 | 0.078788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002695 | 0.220588 | 476 | 16 | 124 | 29.75 | 0.886792 | 0.371849 | 0 | 0 | 0 | 0 | 0.046595 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35d5088961c23d2c2fc51a5d1011b321ec5babe7 | 3,898 | py | Python | dusty/systems/docker/__init__.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 421 | 2015-06-02T16:29:59.000Z | 2021-06-03T18:44:42.000Z | dusty/systems/docker/__init__.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 404 | 2015-06-02T20:23:42.000Z | 2019-08-21T16:59:41.000Z | dusty/systems/docker/__init__.py | gamechanger/dusty | dd9778e3a4f0c623209e53e98aa9dc1fe76fc309 | [
"MIT"
] | 16 | 2015-06-16T17:21:02.000Z | 2020-03-27T02:27:09.000Z | import os
import docker
import logging
from ... import constants
from ...log import log_to_client
from ...memoize import memoized
from ...subprocess import check_output_demoted
from ...compiler.spec_assembler import get_specs
def exec_in_container(container, command, *args):
client = get_docker_client()
exec_instance = client.exec_create(container['Id'],
' '.join([command] + list(args)))
return client.exec_start(exec_instance['Id'])
def get_dusty_images():
"""Returns all images listed in dusty specs (apps + bundles), in the form repository:tag. Tag will be set to latest
if no tag is specified in the specs"""
specs = get_specs()
dusty_image_names = [spec['image'] for spec in specs['apps'].values() + specs['services'].values() if 'image' in spec]
dusty_images = set([name if ':' in name else "{}:latest".format(name) for name in dusty_image_names])
return dusty_images
def get_dusty_container_name(service_name):
return 'dusty_{}_1'.format(service_name)
@memoized
def get_docker_env():
env = {}
output = check_output_demoted(['docker-machine', 'env', constants.VM_MACHINE_NAME, '--shell', 'bash'], redirect_stderr=True)
for line in output.splitlines():
if not line.strip().startswith('export'):
continue
k, v = line.strip().split()[1].split('=')
v = v.replace('"', '')
env[k] = v
return env
@memoized
def get_docker_client():
"""Ripped off and slightly modified based on docker-py's
kwargs_from_env utility function."""
env = get_docker_env()
host, cert_path, tls_verify = env['DOCKER_HOST'], env['DOCKER_CERT_PATH'], env['DOCKER_TLS_VERIFY']
params = {'base_url': host.replace('tcp://', 'https://'),
'timeout': None,
'version': 'auto'}
if tls_verify and cert_path:
params['tls'] = docker.tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=True,
ssl_version=None,
assert_hostname=False)
return docker.Client(**params)
def get_dusty_containers(services, include_exited=False):
"""Get a list of containers associated with the list
of services. If no services are provided, attempts to
return all containers associated with Dusty."""
client = get_docker_client()
if services:
containers = [get_container_for_app_or_service(service, include_exited=include_exited) for service in services]
return [container for container in containers if container]
else:
return [container
for container in client.containers(all=include_exited)
if any(name.startswith('/dusty') for name in container.get('Names', []))]
def get_container_for_app_or_service(app_or_service_name, raise_if_not_found=False, include_exited=False):
client = get_docker_client()
for container in client.containers(all=include_exited):
if '/{}'.format(get_dusty_container_name(app_or_service_name)) in container['Names']:
return container
if raise_if_not_found:
raise RuntimeError('No running container found for {}'.format(app_or_service_name))
return None
def get_canonical_container_name(container):
"""Return the canonical container name, which should be
of the form dusty_<service_name>_1. Containers are returned
from the Python client with many names based on the containers
to which they are linked, but simply taking the shortest name
should be sufficient to get us the shortest one."""
return sorted(container['Names'], key=lambda name: len(name))[0][1:]
def get_app_or_service_name_from_container(container):
return get_canonical_container_name(container).split('_')[1]
| 41.913978 | 128 | 0.68394 | 527 | 3,898 | 4.83871 | 0.286528 | 0.018824 | 0.028235 | 0.025098 | 0.127059 | 0.076078 | 0.037647 | 0.037647 | 0.037647 | 0 | 0 | 0.001933 | 0.203694 | 3,898 | 92 | 129 | 42.369565 | 0.819588 | 0.172653 | 0 | 0.073529 | 0 | 0 | 0.078499 | 0 | 0 | 0 | 0 | 0 | 0.014706 | 1 | 0.132353 | false | 0 | 0.117647 | 0.029412 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35d72eb0340c429101b632a5d5b0a00ec70162fd | 17,291 | py | Python | corehq/apps/app_manager/detail_screen.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/app_manager/detail_screen.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:03:25.000Z | 2022-03-12T01:03:25.000Z | corehq/apps/app_manager/detail_screen.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | from corehq.apps.app_manager import id_strings
from corehq.apps.app_manager.suite_xml import xml_models as sx
from corehq.apps.app_manager.suite_xml import const
from corehq.apps.app_manager.util import is_sort_only_column
from corehq.apps.app_manager.xpath import (
CaseXPath,
CommCareSession,
IndicatorXpath,
LedgerdbXpath,
LocationXpath,
XPath,
dot_interpolate,
UserCaseXPath)
from corehq.apps.hqmedia.models import CommCareMultimedia
CASE_PROPERTY_MAP = {
# IMPORTANT: if you edit this you probably want to also edit
# the corresponding map in cloudcare
# (corehq/apps/cloudcare/static/cloudcare/js/backbone/cases.js)
'external-id': 'external_id',
'date-opened': 'date_opened',
'status': '@status',
'name': 'case_name',
'owner_id': '@owner_id',
}
def get_column_generator(app, module, detail, column, sort_element=None,
order=None, detail_type=None):
cls = get_class_for_format(column.format)
return cls(app, module, detail, column, sort_element, order, detail_type=detail_type)
def get_class_for_format(slug):
return get_class_for_format._format_map.get(slug, FormattedDetailColumn)
get_class_for_format._format_map = {}
class register_format_type(object):
def __init__(self, slug):
self.slug = slug
def __call__(self, klass):
get_class_for_format._format_map[self.slug] = klass
return klass
def get_column_xpath_generator(app, module, detail, column):
cls = get_class_for_type(column.field_type)
return cls(app, module, detail, column)
def get_class_for_type(slug):
return get_class_for_type._type_map.get(slug, BaseXpathGenerator)
get_class_for_type._type_map = {}
class register_type_processor(object):
def __init__(self, slug):
self.slug = slug
def __call__(self, klass):
get_class_for_type._type_map[self.slug] = klass
return klass
class BaseXpathGenerator(object):
def __init__(self, app, module, detail, column):
self.app = app
self.module = module
self.detail = detail
self.column = column
self.id_strings = id_strings
@property
def xpath(self):
return self.column.field
class FormattedDetailColumn(object):
header_width = None
template_width = None
template_form = None
def __init__(self, app, module, detail, column, sort_element=None,
order=None, detail_type=None):
self.app = app
self.module = module
self.detail = detail
self.detail_type = detail_type
self.column = column
self.sort_element = sort_element
self.order = order
self.id_strings = id_strings
@property
def locale_id(self):
if not is_sort_only_column(self.column):
return self.id_strings.detail_column_header_locale(
self.module, self.detail_type, self.column,
)
else:
return None
@property
def header(self):
header = sx.Header(
text=sx.Text(locale_id=self.locale_id),
width=self.header_width
)
return header
variables = None
@property
def template(self):
template = sx.Template(
text=sx.Text(xpath_function=self.xpath_function),
form=self.template_form,
width=self.template_width,
)
if self.variables:
for key, value in sorted(self.variables.items()):
template.text.xpath.variables.node.append(
sx.XpathVariable(name=key, locale_id=value).node
)
return template
@property
def sort_node(self):
if not (self.app.enable_multi_sort and self.detail.display == 'short'):
return
sort = None
if self.sort_xpath_function:
sort = sx.Sort(
text=sx.Text(xpath_function=self.sort_xpath_function),
type='string',
)
if self.sort_element:
if not sort:
# these have to be distinguished for the UI to be able to give
# user friendly choices
if self.sort_element.type in ('date', 'plain'):
sort_type = 'string'
else:
sort_type = self.sort_element.type
sort = sx.Sort(
text=sx.Text(xpath_function=self.xpath_function),
type=sort_type,
)
sort.order = self.order
sort.direction = self.sort_element.direction
# Flag field as index by making order "-2"
# this is for the CACHE_AND_INDEX toggle
# (I know, I know, it's hacky - blame Clayton)
if sort.type == 'index':
sort.type = 'string'
sort.order = -2
return sort
@property
def xpath(self):
return get_column_xpath_generator(self.app, self.module, self.detail,
self.column).xpath
XPATH_FUNCTION = u"{xpath}"
def evaluate_template(self, template):
if template:
return template.format(
xpath=self.xpath,
app=self.app,
module=self.module,
detail=self.detail,
column=self.column
)
@property
def xpath_function(self):
return self.evaluate_template(self.XPATH_FUNCTION)
@property
def hidden_header(self):
return sx.Header(
text=sx.Text(),
width=0,
)
@property
def hidden_template(self):
return sx.Template(
text=sx.Text(xpath_function=self.sort_xpath_function),
width=0,
)
SORT_XPATH_FUNCTION = None
@property
def sort_xpath_function(self):
return self.evaluate_template(self.SORT_XPATH_FUNCTION)
@property
def fields(self):
if self.app.enable_multi_sort:
yield sx.Field(
header=self.header,
template=self.template,
sort_node=self.sort_node,
)
elif self.sort_xpath_function and self.detail.display == 'short':
yield sx.Field(
header=self.header,
template=self.hidden_template,
)
yield sx.Field(
header=self.hidden_header,
template=self.template,
)
else:
yield sx.Field(
header=self.header,
template=self.template,
)
class HideShortHeaderColumn(FormattedDetailColumn):
@property
def header(self):
if self.detail.display == 'short':
header = sx.Header(
text=sx.Text(),
width=self.template_width
)
else:
header = super(HideShortHeaderColumn, self).header
return header
class HideShortColumn(HideShortHeaderColumn):
@property
def template_width(self):
if self.detail.display == 'short':
return 0
@register_format_type('plain')
class Plain(FormattedDetailColumn):
pass
@register_format_type('date')
class Date(FormattedDetailColumn):
XPATH_FUNCTION = u"if({xpath} = '', '', format_date(date(if({xpath} = '', 0, {xpath})),'short'))"
SORT_XPATH_FUNCTION = u"{xpath}"
@register_format_type('time-ago')
class TimeAgo(FormattedDetailColumn):
XPATH_FUNCTION = u"if({xpath} = '', '', string(int((today() - date({xpath})) div {column.time_ago_interval})))"
SORT_XPATH_FUNCTION = u"{xpath}"
@register_format_type('phone')
class Phone(FormattedDetailColumn):
@property
def template_form(self):
if self.detail.display == 'long':
return 'phone'
@register_format_type('enum')
class Enum(FormattedDetailColumn):
def _make_xpath(self, type):
if type == 'sort':
xpath_fragment_template = u"if({xpath} = '{key}', {i}, "
elif type == 'display':
xpath_fragment_template = u"if({xpath} = '{key}', ${key_as_var}, "
else:
raise ValueError('type must be in sort, display')
parts = []
for i, item in enumerate(self.column.enum):
parts.append(
xpath_fragment_template.format(
key=item.key,
key_as_var=item.key_as_variable,
xpath=self.xpath,
i=i,
)
)
parts.append(u"''")
parts.append(u")" * len(self.column.enum))
return ''.join(parts)
@property
def xpath_function(self):
return self._make_xpath(type='display')
@property
def sort_xpath_function(self):
return self._make_xpath(type='sort')
@property
def variables(self):
variables = {}
for item in self.column.enum:
v_key = item.key_as_variable
v_val = self.id_strings.detail_column_enum_variable(
self.module, self.detail_type, self.column, v_key)
variables[v_key] = v_val
return variables
@register_format_type('enum-image')
class EnumImage(Enum):
template_form = 'image'
@property
def header_width(self):
return self.template_width
@property
def template_width(self):
'''
Set column width to accommodate widest image.
'''
width = 0
if self.app.enable_case_list_icon_dynamic_width:
for i, item in enumerate(self.column.enum):
for path in item.value.values():
map_item = self.app.multimedia_map[path]
if map_item is not None:
image = CommCareMultimedia.get(map_item.multimedia_id)
if image is not None:
for media in image.aux_media:
width = max(width, media.media_meta['size']['width'])
if width == 0:
return '13%'
return str(width)
@register_format_type('late-flag')
class LateFlag(HideShortHeaderColumn):
template_width = "11%"
XPATH_FUNCTION = u"if({xpath} = '', '*', if(today() - date({xpath}) > {column.late_flag}, '*', ''))"
@register_format_type('invisible')
class Invisible(HideShortColumn):
pass
@register_format_type('filter')
class Filter(HideShortColumn):
@property
def fields(self):
return []
@register_format_type('calculate')
class Calculate(FormattedDetailColumn):
@property
def xpath_function(self):
return dot_interpolate(self.column.calc_xpath, self.xpath)
@register_format_type('address')
class Address(HideShortColumn):
template_form = 'address'
template_width = 0
@register_format_type('picture')
class Picture(FormattedDetailColumn):
template_form = 'image'
@register_format_type('audio')
class Audio(FormattedDetailColumn):
template_form = 'audio'
@register_format_type('graph')
class Graph(FormattedDetailColumn):
template_form = "graph"
@property
def template(self):
template = sx.GraphTemplate(
form=self.template_form,
graph=sx.Graph(
type=self.column.graph_configuration.graph_type,
series=[
sx.Series(
nodeset=s.data_path,
x_function=s.x_function,
y_function=s.y_function,
radius_function=s.radius_function,
configuration=sx.ConfigurationGroup(
configs=[
# TODO: It might be worth wrapping
# these values in quotes (as appropriate)
# to prevent the user from having to
# figure out why their unquoted colors
# aren't working.
sx.ConfigurationItem(id=k, xpath_function=v)
for k, v in s.config.iteritems()]
)
)
for s in self.column.graph_configuration.series],
configuration=sx.ConfigurationGroup(
configs=(
[
sx.ConfigurationItem(id=k, xpath_function=v)
for k, v
in self.column.graph_configuration.config.iteritems()
] + [
sx.ConfigurationItem(
id=k,
locale_id=self.id_strings.graph_configuration(
self.module,
self.detail_type,
self.column,
k
)
)
for k, v
in self.column.graph_configuration.locale_specific_config.iteritems()
]
)
),
annotations=[
sx.Annotation(
x=sx.Text(xpath_function=a.x),
y=sx.Text(xpath_function=a.y),
text=sx.Text(
locale_id=self.id_strings.graph_annotation(
self.module,
self.detail_type,
self.column,
i
)
)
)
for i, a in enumerate(
self.column.graph_configuration.annotations
)]
)
)
# TODO: what are self.variables and do I need to care about them here?
# (see FormattedDetailColumn.template)
return template
@register_type_processor(const.FIELD_TYPE_ATTACHMENT)
class AttachmentXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
return const.FIELD_TYPE_ATTACHMENT + "/" + self.column.field_property
@register_type_processor(const.FIELD_TYPE_PROPERTY)
class PropertyXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
if self.column.model == 'product':
return self.column.field
parts = self.column.field.split('/')
if self.column.model == 'case':
parts[-1] = CASE_PROPERTY_MAP.get(parts[-1], parts[-1])
property = parts.pop()
indexes = parts
use_relative = property != '#owner_name'
if use_relative:
case = CaseXPath('')
else:
case = CaseXPath(u'current()')
if indexes and indexes[0] == 'user':
case = CaseXPath(UserCaseXPath().case())
else:
for index in indexes:
case = case.index_id(index).case()
if property == '#owner_name':
return self.owner_name(case.property('@owner_id'))
else:
return case.property(property)
@staticmethod
def owner_name(owner_id):
groups = XPath(u"instance('groups')/groups/group")
group = groups.select('@id', owner_id)
return XPath.if_(
group.count().neq(0),
group.slash('name'),
XPath.if_(
CommCareSession.userid.eq(owner_id),
CommCareSession.username,
XPath.string('')
)
)
@register_type_processor(const.FIELD_TYPE_INDICATOR)
class IndicatorXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
indicator_set, indicator = self.column.field_property.split('/', 1)
instance_id = self.id_strings.indicator_instance(indicator_set)
return IndicatorXpath(instance_id).instance().slash(indicator)
@register_type_processor(const.FIELD_TYPE_LOCATION)
class LocationXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
from corehq.apps.locations.util import parent_child
hierarchy = parent_child(self.app.domain)
return LocationXpath('commtrack:locations').location(self.column.field_property, hierarchy)
@register_type_processor(const.FIELD_TYPE_LEDGER)
class LedgerXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
session_case_id = 'case_id_case_{0}'.format(self.module.case_type)
section = self.column.field_property
return "if({0} = 0 or {1} = 0 or {2} = 0, '', {3})".format(
LedgerdbXpath(session_case_id).ledger().count(),
LedgerdbXpath(session_case_id).ledger().section(section).count(),
LedgerdbXpath(session_case_id).ledger().section(section).entry(u'current()/@id').count(),
LedgerdbXpath(session_case_id).ledger().section(section).entry(u'current()/@id')
)
@register_type_processor(const.FIELD_TYPE_SCHEDULE)
class ScheduleXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
return "${}".format(self.column.field_property)
| 30.495591 | 115 | 0.571049 | 1,818 | 17,291 | 5.225523 | 0.154015 | 0.030526 | 0.028421 | 0.016842 | 0.357053 | 0.283368 | 0.185789 | 0.150947 | 0.082 | 0.057684 | 0 | 0.002345 | 0.334047 | 17,291 | 566 | 116 | 30.54947 | 0.822666 | 0.040541 | 0 | 0.301843 | 0 | 0.009217 | 0.052682 | 0.005256 | 0 | 0 | 0 | 0.001767 | 0 | 1 | 0.09447 | false | 0.004608 | 0.016129 | 0.034562 | 0.306452 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35d7f8fe31e2b84162f58c490aa5993bec9f6839 | 17,070 | py | Python | automator/MplCanvas.py | C-CINA/zorro | ac13e6bf9900d11f37dc5910b560c84e285976b1 | [
"BSD-2-Clause"
] | 8 | 2016-09-04T01:02:23.000Z | 2020-09-10T18:46:41.000Z | automator/MplCanvas.py | C-CINA/zorro | ac13e6bf9900d11f37dc5910b560c84e285976b1 | [
"BSD-2-Clause"
] | 14 | 2016-08-30T06:11:52.000Z | 2016-09-29T10:17:40.000Z | automator/MplCanvas.py | C-CINA/zorro | ac13e6bf9900d11f37dc5910b560c84e285976b1 | [
"BSD-2-Clause"
] | 3 | 2016-09-04T01:02:29.000Z | 2020-05-25T12:32:45.000Z | # -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
MplCanvas
This is a QWidget that can be used for fast-ish plotting within a Qt GUI interface.
Originally I was going to subclass for different types of plots, but this seems a
little hard with the amount of initialization required to setup the plot properly
within its parent frame, so we will draw based on class members.
Based on:
embedding_in_qt4.py --- Simple Qt4 application embedding matplotlib canvases
Copyright (C) 2005 Florent Rougon
2006 Darren Dale
This file is an example program for matplotlib. It may be used and
modified with no restriction; raw copies as well as modified versions
may be distributed without limitation.
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import matplotlib
matplotlib.use( 'Qt4Agg' )
try:
from PySide import QtGui
matplotlib.rcParams['backend.qt4']='PySide'
os.environ.setdefault('QT_API','pyside')
except:
# Import PyQt4 as backup?
print( "MplCanvas: PySide not found." )
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.figure import Figure
import numpy as np
#from itertools import cycle
#from collections import OrderedDict
import skimage.io
from zorro import plot as zplt
import subprocess
import tempfile
# How to design custom controls with PyQT:
# http://doc.qt.digia.com/qq/qq26-pyqtdesigner.html
class MplCanvas(FigureCanvas,object):
"""This is an empty QWidget of type FigureCanvasQTAgg. Uses a zorro_plotting.zorroPlot object to do all
the live plotting, or it can load graphics files from disk."""
@property
def zorroObj(self):
return self._zorroObj
@zorroObj.setter
def zorroObj(self, newZorroObj ):
#print( "Set _zorroObj" )
if not bool( newZorroObj ):
return
self._zorroObj = newZorroObj
# Used for mapping combo box text to files in the zorroObj
# baseName should be location of the config file
baseDir = ''
# if 'config' in self._zorroObj.files:
# baseDir = os.path.split( self._zorroObj.files['config'] )[0]
if 'figBoxMask' in self._zorroObj.files:
# This isn't here... it's next to sum...
self.pixmapDict[u'Box Mask'] = os.path.join( baseDir, self._zorroObj.files['figBoxMask'] )
if 'figStats' in self._zorroObj.files:
self.pixmapDict[u'Statistics'] = os.path.join( baseDir, self._zorroObj.files['figStats'] )
if 'figTranslations' in self._zorroObj.files:
self.pixmapDict[u'Drift'] = os.path.join( baseDir, self._zorroObj.files['figTranslations'] )
if 'figPixRegError' in self._zorroObj.files:
self.pixmapDict[u'Drift error'] = os.path.join( baseDir, self._zorroObj.files['figPixRegError'] )
if 'figPeaksigTriMat' in self._zorroObj.files:
self.pixmapDict[u'Peak significance'] = os.path.join( baseDir, self._zorroObj.files['figPeaksigTriMat'] )
if 'figCorrTriMat' in self._zorroObj.files:
self.pixmapDict[u'Correlation coefficient'] = os.path.join( baseDir, self._zorroObj.files['figCorrTriMat'] )
if 'figCTFDiag' in self._zorroObj.files:
self.pixmapDict[u'CTF diagnostic'] = os.path.join( baseDir, self._zorroObj.files['figCTFDiag'] )
if 'figLogisticWeights' in self._zorroObj.files:
self.pixmapDict[u'Logistic weights'] = os.path.join( baseDir, self._zorroObj.files['figLogisticWeights'] )
if 'figImageSum' in self._zorroObj.files:
self.pixmapDict[u'Image sum'] = os.path.join( baseDir, self._zorroObj.files['figImageSum'] )
if 'figFFTSum' in self._zorroObj.files:
self.pixmapDict[u'Fourier mag'] = os.path.join( baseDir, self._zorroObj.files['figFFTSum'] )
if 'figPolarFFTSum' in self._zorroObj.files:
self.pixmapDict[u'Polar mag'] = os.path.join( baseDir, self._zorroObj.files['figPolarFFTSum'] )
if 'figFiltSum' in self._zorroObj.files:
self.pixmapDict[u'Dose filtered sum'] = os.path.join( baseDir, self._zorroObj.files['figFiltSum'] )
if 'figFRC' in self._zorroObj.files:
self.pixmapDict[u'Fourier Ring Correlation'] = os.path.join( baseDir, self._zorroObj.files['figFRC'] )
def __init__(self, parent=None, width=4, height=4, plot_dpi=72, image_dpi=250):
object.__init__(self)
self.plotObj = zplt.zorroPlot( width=width, height=height,
plot_dpi=plot_dpi, image_dpi=image_dpi,
facecolor=[0,0,0,0], MplCanvas=self )
FigureCanvas.__init__(self, self.plotObj.fig)
self.currPlotFunc = self.plotObj.plotTranslations
self.cmap = 'gray'
self._zorroObj = None
self.plotName = None
self.live = True # Whether to re-render the plots with each update event or use a rendered graphics-file loaded from disk
self.PixmapName = None
self.Pixmap = None
# plotFuncs is a hash to function mapping
# These may need to add the appropriate data to plotDict? I could use functools.partial?
self.plotFuncs = {}
self.plotFuncs[""] = None
self.plotFuncs[u'Statistics'] = self.plotObj.plotStats
self.plotFuncs[u'Drift'] = self.plotObj.plotTranslations
self.plotFuncs[u'Drift error'] = self.plotObj.plotPixRegError
self.plotFuncs[u'Peak significance'] = self.plotObj.plotPeaksigTriMat
self.plotFuncs[u'Correlation coefficient'] = self.plotObj.plotCorrTriMat
self.plotFuncs[u'CTF diagnostic'] = self.plotObj.plotCTFDiag
self.plotFuncs[u'Logistic weights'] = self.plotObj.plotLogisticWeights
self.plotFuncs[u'Stack'] = self.plotObj.plotStack
self.plotFuncs[u'Image sum'] = self.plotObj.plotImage
self.plotFuncs[u'Fourier mag'] = self.plotObj.plotFFT
self.plotFuncs[u'Polar mag'] = self.plotObj.plotPolarFFT
self.plotFuncs[u'Cross correlations'] = self.plotObj.plotStack # TODO
self.plotFuncs[u'Dose filtered sum'] = self.plotObj.plotImage
self.plotFuncs[u'Fourier Ring Correlation'] = self.plotObj.plotFRC
self.liveFuncs = {}
self.liveFuncs[u'Statistics'] = self.liveStats
self.liveFuncs[u'Image sum'] = self.liveImageSum
self.liveFuncs[u'Dose filtered sum'] = self.liveFiltSum
self.liveFuncs[u'Drift'] = self.liveTranslations
self.liveFuncs[u'Drift error'] = self.livePixRegError
self.liveFuncs[u'Peak significance'] = self.livePeaksigTriMat
self.liveFuncs[u'Correlation coefficient'] = self.livePeaksigTriMat
self.liveFuncs[u'Logistic weights'] = self.liveLogisticWeights
self.liveFuncs[u'Fourier Ring Correlation'] = self.liveFRC
self.liveFuncs[u'CTF diagnostic'] = self.liveCTFDiag
self.pixmapDict = {}
# WARNING WITH SPYDER: Make sure PySide is the default in the console
# self.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.updateGeometry()
##### 2DX VIEW #####
def exportTo2dx( self ):
# Write a params file
#paramFile = tempfile.mktemp()
#with open( paramFile, 'w' ):
# pass
# Temporary directory that we can delete? We could use tempfile
# Invoke
#subprocess.Popen( "2dx_viewer -p %s %s" % (paramFile) )
# When to delete paramFile?
if self.plotName == u'Dose filtered sum':
realPath = os.path.realpath( self._zorroObj.files['filt'] )
subprocess.Popen( "2dx_viewer %s" % (realPath), shell=True )
elif self.plotName == u'Image sum':
realPath = os.path.realpath( self._zorroObj.files['sum'] )
subprocess.Popen( "2dx_viewer %s" % (realPath), shell=True )
else:
print( "Unsupported plot function for 2dx_viewer" )
pass
def exportToIms( self ):
if self.plotName == u'Dose filtered sum':
realPath = os.path.realpath( self._zorroObj.files['filt'] )
subprocess.Popen( "ims %s" % (realPath), shell=True )
elif self.plotName == u'Image sum':
realPath = os.path.realpath( self._zorroObj.files['sum'] )
subprocess.Popen( "ims %s" % (realPath), shell=True )
else:
print( "Unsupported plot function for ims" )
pass
##### LIVE VIEW #####
def livePlot(self, plotName ):
print( "called livePlot" )
# Check the plotObj's plotDict for correct fields
# Do seperate sub-functions for each plot type?
if self._zorroObj == None:
return
if plotName in self.liveFuncs:
self.liveFuncs[plotName]()
else:
print( "Live function: %s not found." % plotName )
self.currPlotFunc = self.plotObj.plotEmpty
# Plot
self.currPlotFunc()
self.redraw()
def liveStats( self ):
self.plotObj.plotDict['pixelsize'] = self._zorroObj.pixelsize
self.plotObj.plotDict['voltage'] = self._zorroObj.voltage
self.plotObj.plotDict['c3'] = self._zorroObj.C3
if len( self._zorroObj.errorDictList ) > 0 and 'peaksigTriMat' in self._zorroObj.errorDictList[-1]:
peaksig = self._zorroObj.errorDictList[-1]['peaksigTriMat']
peaksig = peaksig[ peaksig > 0.0 ]
self.plotObj.plotDict['meanPeaksig'] = np.mean( peaksig )
self.plotObj.plotDict['stdPeaksig'] = np.std( peaksig )
if np.any( self._zorroObj.CTFInfo['DefocusU'] ):
self.plotObj.plotDict['CTFInfo'] = self._zorroObj.CTFInfo
self.currPlotFunc = self.plotObj.plotStats
def liveImageSum( self ):
try:
if not np.any(self._zorroObj.imageSum): # Try to load it
self._zorroObj.loadData( stackNameIn = self._zorroObj.files['sum'], target="sum" )
self.plotObj.plotDict['image'] = self._zorroObj.getSumCropToLimits()
self.plotObj.plotDict['image_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotImage
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveFiltSum( self ):
try:
if not np.any(self._zorroObj.filtSum): # Try to load it
self._zorroObj.loadData( stackNameIn = self._zorroObj.files['filt'], target="filt" )
self.plotObj.plotDict['image'] = self._zorroObj.getFiltSumCropToLimits()
self.plotObj.plotDict['image_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotImage
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveTranslations( self ):
if np.any( self._zorroObj.translations ):
self.plotObj.plotDict['translations'] = self._zorroObj.translations
try:
self.plotObj.plotDict['errorX'] = self._zorroObj.errorDictList[0]['errorX']
self.plotObj.plotDict['errorY'] = self._zorroObj.errorDictList[0]['errorY']
except: pass
self.currPlotFunc = self.plotObj.plotTranslations
else:
self.currPlotFunc = self.plotObj.plotEmpty
def livePixRegError( self ):
try:
self.plotObj.plotDict['errorX'] = self._zorroObj.errorDictList[0]['errorX']
self.plotObj.plotDict['errorY'] = self._zorroObj.errorDictList[0]['errorY']
self.plotObj.plotDict['errorXY'] = self._zorroObj.errorDictList[0]['errorXY']
self.currPlotFunc = self.plotObj.plotPixRegError
except:
self.currPlotFunc = self.plotObj.plotEmpty
def livePeaksigTriMat( self ):
try:
self.plotObj.plotDict['peaksigTriMat'] = self._zorroObj.errorDictList[0]['peaksigTriMat']
self.plotObj.plotDict['graph_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotPeaksigTriMat
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveCorrTriMat( self ):
try:
self.plotObj.plotDict['corrTriMat'] = self._zorroObj.errorDictList[0]['corrTriMat']
self.plotObj.plotDict['graph_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotCorrTriMat
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveLogisticWeights( self ):
try:
if self._zorroObj.weightMode == 'autologistic' or self._zorroObj.weightMode == 'logistic':
self.plotObj.plotDict['peaksigThres'] = self._zorroObj.peaksigThres
self.plotObj.plotDict['logisticK'] = self._zorroObj.logisticK
self.plotObj.plotDict['logisticNu'] = self._zorroObj.logisticNu
self.plotObj.plotDict['errorXY'] = self._zorroObj.errorDictList[0]["errorXY"]
self.plotObj.plotDict['peaksigVect'] = self._zorroObj.errorDictList[0]["peaksigTriMat"][ self._zorroObj.errorDictList[0]["peaksigTriMat"] > 0.0 ]
if 'cdfPeaks' in self._zorroObj.errorDictList[0]:
self.plotObj.plotDict['cdfPeaks'] = self._zorroObj.errorDictList[0]['cdfPeaks']
self.plotObj.plotDict['hSigma'] = self._zorroObj.errorDictList[0]['hSigma']
self.currPlotFunc = self.plotObj.plotLogisticWeights
except Exception as e:
print( "MplCanvas.liveLogisticWeights received exception " + str(e) )
self.currPlotFunc = self.plotObj.plotEmpty
def liveFRC( self ):
try:
self.plotObj.plotDict['FRC'] = self._zorroObj.FRC
self.plotObj.plotDict['pixelsize'] = self._zorroObj.pixelsize
if bool( self.zorroObj.doEvenOddFRC ):
self.plotObj.plotDict['labelText'] = "Even-odd frame independent FRC"
else:
self.plotObj.plotDict['labelText'] = "Non-independent FRC is not a resolution estimate"
self.currPlotFunc = self.plotObj.plotFRC
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveCTFDiag( self ):
try:
self.plotObj.plotDict['CTFDiag'] = self._zorroObj.CTFDiag
self.plotObj.plotDict['CTFInfo'] = self._zorroObj.CTFInfo
self.plotObj.plotDict['pixelsize'] = self._zorroObj.pixelsize
self.plotObj.plotDict['image_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotCTFDiag
except:
self.currPlotFunc = self.plotObj.plotEmpty
##### DEAD VIEW #####
def loadPixmap( self, plotName, filename = None ):
if not bool(filename):
# Pull the filename from the zorro log
try:
# print( plotName )
filename = self.pixmapDict[plotName]
print( "Pulling figure name: %s"%filename )
except KeyError:
self.currPlotFunc = self.plotObj.plotEmpty()
self.redraw()
if not bool( filename ): # Probably an unprocessed stack
return
if not os.path.isfile(filename):
raise IOError("automator.MplCanvas.loadPixmap: file not found: %s" % filename )
return
self.PixmapName = filename
self.Pixmap = skimage.io.imread( filename )
self.plotObj.plotDict['pixmap'] = self.Pixmap
self.currPlotFunc = self.plotObj.plotPixmap()
self.redraw()
def updatePlotFunc(self, plotName, newZorroObj = None ):
# print( "plotName = " + str(plotName) +", zorroObj = " + str(newZorroObj) )
try:
self.plotName = plotName
self.currPlotFunc = self.plotFuncs[ plotName ]
except KeyError:
raise KeyError( "automator.MplCanvas.updatePlotFunc: Plot type not found in plotDict: %s" % plotName )
self.zorroObj = newZorroObj # setter auto-checks validity... settler isn't working right...
if bool( self.live ):
self.plotObj.axes2 = None
self.livePlot( plotName )
else:
self.loadPixmap( plotName )
def redraw(self):
#self.plotObj.updateCanvas()
self.draw()
| 45.764075 | 159 | 0.609197 | 1,787 | 17,070 | 5.753777 | 0.227196 | 0.0922 | 0.066524 | 0.060397 | 0.355378 | 0.311321 | 0.280004 | 0.200934 | 0.145886 | 0.145886 | 0 | 0.004769 | 0.287463 | 17,070 | 372 | 160 | 45.887097 | 0.840582 | 0.142472 | 0 | 0.311024 | 0 | 0 | 0.130193 | 0.0067 | 0 | 0 | 0 | 0.002688 | 0 | 1 | 0.074803 | false | 0.011811 | 0.03937 | 0.003937 | 0.137795 | 0.031496 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35dca20f855bd35614745043a3aae47471c2f230 | 12,741 | py | Python | fink_filters/filter_rate_based_kn_candidates/filter.py | tallamjr/fink-filters | 5fff5717eae95dac28d6cff313457d4427b42a86 | [
"Apache-2.0"
] | null | null | null | fink_filters/filter_rate_based_kn_candidates/filter.py | tallamjr/fink-filters | 5fff5717eae95dac28d6cff313457d4427b42a86 | [
"Apache-2.0"
] | 57 | 2020-01-20T09:36:58.000Z | 2022-03-23T15:22:39.000Z | fink_filters/filter_rate_based_kn_candidates/filter.py | tallamjr/fink-filters | 5fff5717eae95dac28d6cff313457d4427b42a86 | [
"Apache-2.0"
] | 2 | 2019-11-17T14:10:07.000Z | 2022-02-22T08:51:25.000Z | # Copyright 2019-2021 AstroLab Software
# Authors: Julien Peloton, Juliette Vlieghe
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import BooleanType
import numpy as np
import pandas as pd
import datetime
import requests
import os
import logging
from scipy.optimize import curve_fit
from astropy.coordinates import SkyCoord
from astropy.coordinates import Angle
from astropy import units as u
from astropy.time import Time
from astroquery.sdss import SDSS
from fink_science.conversion import dc_mag
@pandas_udf(BooleanType(), PandasUDFType.SCALAR)
def rate_based_kn_candidates(
objectId, rfscore, snn_snia_vs_nonia, snn_sn_vs_all, drb,
classtar, jdstarthist, ndethist, cdsxmatch, ra, dec, ssdistnr, cjdc,
cfidc, cmagpsfc, csigmapsfc, cmagnrc, csigmagnrc, cmagzpscic,
cisdiffposc) -> pd.Series:
"""
Return alerts considered as KN candidates.
The cuts are based on Andreoni et al. 2021 https://arxiv.org/abs/2104.06352
If the environment variable KNWEBHOOK is defined and match a webhook url,
the alerts that pass the filter will be sent to the matching Slack channel.
Parameters
----------
objectId: Spark DataFrame Column
Column containing the alert IDs
rfscore, snn_snia_vs_nonia, snn_sn_vs_all: Spark DataFrame Columns
Columns containing the scores for: 'Early SN Ia',
'Ia SN vs non-Ia SN', 'SN Ia and Core-Collapse vs non-SN events'
drb: Spark DataFrame Column
Column containing the Deep-Learning Real Bogus score
classtar: Spark DataFrame Column
Column containing the sextractor score
jdstarthist: Spark DataFrame Column
Column containing earliest Julian dates of epoch [days]
ndethist: Spark DataFrame Column
Column containing the number of prior detections (theshold of 3 sigma)
cdsxmatch: Spark DataFrame Column
Column containing the cross-match values
ra: Spark DataFrame Column
Column containing the right Ascension of candidate; J2000 [deg]
dec: Spark DataFrame Column
Column containing the declination of candidate; J2000 [deg]
ssdistnr: Spark DataFrame Column
distance to nearest known solar system object; -999.0 if none [arcsec]
cjdc, cfidc, cmagpsfc, csigmapsfc, cmagnrc, csigmagnrc, cmagzpscic: Spark DataFrame Columns
Columns containing history of fid, magpsf, sigmapsf, magnr, sigmagnr,
magzpsci, isdiffpos as arrays
Returns
----------
out: pandas.Series of bool
Return a Pandas DataFrame with the appropriate flag:
false for bad alert, and true for good alert.
"""
# Extract last (new) measurement from the concatenated column
jd = cjdc.apply(lambda x: x[-1])
fid = cfidc.apply(lambda x: x[-1])
isdiffpos = cisdiffposc.apply(lambda x: x[-1])
high_drb = drb.astype(float) > 0.9
high_classtar = classtar.astype(float) > 0.4
new_detection = jd.astype(float) - jdstarthist.astype(float) < 14
small_detection_history = ndethist.astype(float) < 20
appeared = isdiffpos.astype(str) == 't'
far_from_mpc = (ssdistnr.astype(float) > 10) | (ssdistnr.astype(float) < 0)
# galactic plane
b = SkyCoord(ra.astype(float), dec.astype(float), unit='deg'
).galactic.b.deg
awaw_from_galactic_plane = np.abs(b) > 10
list_simbad_galaxies = [
"galaxy",
"Galaxy",
"EmG",
"Seyfert",
"Seyfert_1",
"Seyfert_2",
"BlueCompG",
"StarburstG",
"LSB_G",
"HII_G",
"High_z_G",
"GinPair",
"GinGroup",
"BClG",
"GinCl",
"PartofG",
]
keep_cds = \
["Unknown", "Transient", "Fail"] + list_simbad_galaxies
f_kn = high_drb & high_classtar & new_detection & small_detection_history
f_kn = f_kn & cdsxmatch.isin(keep_cds) & appeared & far_from_mpc
f_kn = f_kn & awaw_from_galactic_plane
# Compute rate and error rate, get magnitude and its error
rate = np.zeros(len(fid))
sigma_rate = np.zeros(len(fid))
mag = np.zeros(len(fid))
err_mag = np.zeros(len(fid))
index_mask = np.argwhere(f_kn)
for i, alertID in enumerate(objectId[f_kn]):
# Spark casts None as NaN
maskNotNone = ~np.isnan(np.array(cmagpsfc[f_kn].values[i]))
maskFilter = np.array(cfidc[f_kn].values[i]) == np.array(fid)[f_kn][i]
m = maskNotNone * maskFilter
if sum(m) < 2:
continue
# DC mag (history + last measurement)
mag_hist, err_hist = np.array([
dc_mag(k[0], k[1], k[2], k[3], k[4], k[5], k[6])
for k in zip(
cfidc[f_kn].values[i][m],
cmagpsfc[f_kn].values[i][m],
csigmapsfc[f_kn].values[i][m],
cmagnrc[f_kn].values[i][m],
csigmagnrc[f_kn].values[i][m],
cmagzpscic[f_kn].values[i][m],
cisdiffposc[f_kn].values[i][m],
)
]).T
# remove abnormal values
mask_outliers = mag_hist < 21
if sum(mask_outliers) < 2:
continue
jd_hist = cjdc[f_kn].values[i][m][mask_outliers]
if jd_hist[-1] - jd_hist[0] > 0.5:
# Compute rate
popt, pcov = curve_fit(
lambda x, a, b: a * x + b,
jd_hist,
mag_hist[mask_outliers],
sigma=err_hist[mask_outliers],
)
rate[index_mask[i]] = popt[0]
sigma_rate[index_mask[i]] = pcov[0, 0]
# Grab the last measurement and its error estimate
mag[index_mask[i]] = mag_hist[-1]
err_mag[index_mask[i]] = err_hist[-1]
# filter on rate. rate is 0 where f_kn is already false.
f_kn = pd.Series(np.array(rate) > 0.3)
# check the nature of close objects in SDSS catalog
if f_kn.any():
no_star = []
for i in range(sum(f_kn)):
pos = SkyCoord(
ra=np.array(ra[f_kn])[i] * u.degree,
dec=np.array(dec[f_kn])[i] * u.degree
)
# for a test on "many" objects, you may wait 1s to stay under the
# query limit.
table = SDSS.query_region(pos, fields=['type'],
radius=5 * u.arcsec)
type_close_objects = []
if table is not None:
type_close_objects = table['type']
# types: 0: UNKNOWN, 1: STAR, 2: GALAXY, 3: QSO, 4: HIZ_QSO,
# 5: SKY, 6: STAR_LATE, 7: GAL_EM
to_remove_types = [1, 3, 4, 6]
no_star.append(
len(np.intersect1d(type_close_objects, to_remove_types)) == 0
)
f_kn.loc[f_kn] = np.array(no_star, dtype=bool)
# Simplify notations
if f_kn.any():
# coordinates
b = np.array(b)[f_kn]
ra = Angle(
np.array(ra.astype(float)[f_kn]) * u.degree
).deg
dec = Angle(
np.array(dec.astype(float)[f_kn]) * u.degree
).deg
ra_formatted = Angle(ra * u.degree).to_string(
precision=2, sep=' ', unit=u.hour
)
dec_formatted = Angle(dec * u.degree).to_string(
precision=1, sep=' ', alwayssign=True
)
delta_jd_first = np.array(
jd.astype(float)[f_kn] - jdstarthist.astype(float)[f_kn]
)
# scores
rfscore = np.array(rfscore.astype(float)[f_kn])
snn_snia_vs_nonia = np.array(snn_snia_vs_nonia.astype(float)[f_kn])
snn_sn_vs_all = np.array(snn_sn_vs_all.astype(float)[f_kn])
# time
fid = np.array(fid.astype(int)[f_kn])
jd = np.array(jd)[f_kn]
# measurements
mag = mag[f_kn]
rate = rate[f_kn]
err_mag = err_mag[f_kn]
sigma_rate = sigma_rate[f_kn]
# message for candidates
for i, alertID in enumerate(objectId[f_kn]):
# Time since last detection (independently of the band)
maskNotNone = ~np.isnan(np.array(cmagpsfc[f_kn].values[i]))
jd_hist_allbands = np.array(np.array(cjdc[f_kn])[i])[maskNotNone]
delta_jd_last = jd_hist_allbands[-1] - jd_hist_allbands[-2]
# information to send
dict_filt = {1: 'g', 2: 'r'}
alert_text = """
*New kilonova candidate:* <http://134.158.75.151:24000/{}|{}>
""".format(alertID, alertID)
score_text = """
*Scores:*\n- Early SN Ia: {:.2f}\n- Ia SN vs non-Ia SN: {:.2f}\n- SN Ia and Core-Collapse vs non-SN: {:.2f}
""".format(rfscore[i], snn_snia_vs_nonia[i], snn_sn_vs_all[i])
time_text = """
*Time:*\n- {} UTC\n - Time since last detection: {:.1f} days\n - Time since first detection: {:.1f} days
""".format(Time(jd[i], format='jd').iso, delta_jd_last, delta_jd_first[i])
measurements_text = """
*Measurement (band {}):*\n- Apparent magnitude: {:.2f} ± {:.2f} \n- Rate: ({:.2f} ± {:.2f}) mag/day\n
""".format(dict_filt[fid[i]], mag[i], err_mag[i], rate[i], sigma_rate[i])
radec_text = """
*RA/Dec:*\n- [hours, deg]: {} {}\n- [deg, deg]: {:.7f} {:+.7f}
""".format(ra_formatted[i], dec_formatted[i], ra[i], dec[i])
galactic_position_text = """
*Galactic latitude:*\n- [deg]: {:.7f}""".format(b[i])
tns_text = '*TNS:* <https://www.wis-tns.org/search?ra={}&decl={}&radius=5&coords_unit=arcsec|link>'.format(ra[i], dec[i])
# message formatting
blocks = [
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": alert_text
},
]
},
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": time_text
},
{
"type": "mrkdwn",
"text": score_text
},
{
"type": "mrkdwn",
"text": radec_text
},
{
"type": "mrkdwn",
"text": measurements_text
},
{
"type": "mrkdwn",
"text": galactic_position_text
},
{
"type": "mrkdwn",
"text": tns_text
},
]
},
]
error_message = """
{} is not defined as env variable
if an alert has passed the filter,
the message has not been sent to Slack
"""
for url_name in ['KNWEBHOOK', 'KNWEBHOOK_FINK']:
if (url_name in os.environ):
requests.post(
os.environ[url_name],
json={
'blocks': blocks,
'username': 'Rate-based kilonova bot'
},
headers={'Content-Type': 'application/json'},
)
else:
log = logging.Logger('Kilonova filter')
log.warning(error_message.format(url_name))
ama_in_env = ('KNWEBHOOK_AMA_RATE' in os.environ)
# Send alerts to amateurs only on Friday
now = datetime.datetime.utcnow()
# Monday is 1 and Sunday is 7
is_friday = (now.isoweekday() == 5)
if (np.abs(b[i]) > 20) & (mag[i] < 20) & is_friday & ama_in_env:
requests.post(
os.environ['KNWEBHOOK_AMA_RATE'],
json={
'blocks': blocks,
'username': 'Rate-based kilonova bot'
},
headers={'Content-Type': 'application/json'},
)
else:
log = logging.Logger('Kilonova filter')
log.warning(error_message.format(url_name))
return f_kn
| 36.823699 | 129 | 0.553018 | 1,590 | 12,741 | 4.292453 | 0.273585 | 0.01978 | 0.014505 | 0.016117 | 0.221978 | 0.157509 | 0.104615 | 0.081758 | 0.064469 | 0.055385 | 0 | 0.016078 | 0.331214 | 12,741 | 345 | 130 | 36.930435 | 0.78465 | 0.238286 | 0 | 0.157447 | 0 | 0.021277 | 0.141146 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004255 | false | 0.004255 | 0.06383 | 0 | 0.07234 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35dcd8a5e60a27407d41c0cf61c51f9e012a13bf | 53,426 | py | Python | Pythagoras/feature_engineering.py | kaimzhao/Pythagoras | 66669f7bf9da4c18acc280eee557738585cb8d68 | [
"MIT"
] | null | null | null | Pythagoras/feature_engineering.py | kaimzhao/Pythagoras | 66669f7bf9da4c18acc280eee557738585cb8d68 | [
"MIT"
] | null | null | null | Pythagoras/feature_engineering.py | kaimzhao/Pythagoras | 66669f7bf9da4c18acc280eee557738585cb8d68 | [
"MIT"
] | null | null | null | import pandas as pd
from copy import deepcopy
from typing import Optional, Set, List, Dict
from numpy import mean, median
from sklearn import clone
from sklearn.base import BaseEstimator
from sklearn.model_selection import cross_val_score, RepeatedKFold
from Pythagoras.util import *
from Pythagoras.logging import *
from Pythagoras.caching import *
class NotProvidedType:
not_provided_single_instance = None
def __new__(cls):
if cls.not_provided_single_instance is None:
cls.not_provided_single_instance = super().__new__(cls)
return cls.not_provided_single_instance
NotProvided = NotProvidedType()
# Workaround to ensure compatibility with Python <= 3.6
# Versions 3.6 and below do not support postponed evaluation
class PEstimator(LoggableObject):
pass
class PEstimator(LoggableObject):
""" Abstract base class for all estimators (classes with fit() method).
Warning: This class should not be used directly. Use derived classes
instead.
"""
def __init__(self, * ,random_state = None, **kwargs):
kwargs["reveal_calling_method"] = kwargs.get(
"reveal_calling_method", True)
super().__init__(**kwargs)
self.set_params(random_state=random_state, **kwargs)
def get_params(self, deep=True):
if type(self) == PEstimator:
raise NotImplementedError
return dict(random_state = self.random_state)
def set_params(self, *, random_state = None, **kwards) -> PEstimator:
if type(self) == PEstimator:
raise NotImplementedError
self.random_state = random_state
return self
def _preprocess_X(self, X:pd.DataFrame) -> pd.DataFrame:
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(data=X, copy=True)
else:
X = deepcopy(X)
X.columns = [str(c) for c in X.columns]
assert len(X), "X can not be empty."
assert len(X.columns) == len(set(X.columns)), (
"Input columns must have unique names.")
X.columns = list(X.columns)
if self.input_can_have_nans is NotProvided:
self.warning("Flag input_can_have_nans was not provided.")
elif not self.input_can_have_nans:
assert X.isna().sum().sum() == 0, "NaN-s are not allowed."
X.sort_index(inplace=True)
return X
def _preprocess_y(self, y:pd.Series) -> pd.Series:
if isinstance(y, pd.Series):
y = deepcopy(y)
else:
y = pd.Series(y, copy=True)
if y.name is None:
y.name = "y_target"
assert y.isna().sum() == 0
y.sort_index(inplace=True)
return y
def start_fitting(self
,X:Any
,y:Any
,write_to_log:bool=True
) -> Tuple[pd.DataFrame,pd.Series]:
if write_to_log:
log_message = f"==> Starting fittig {type(self).__name__} "
log_message += f"using a {type(X).__name__} named < "
log_message += NeatStr.object_names(X, div_ch=" / ")
log_message += f" > with the shape {X.shape}, "
log_message += f"and a {type(y).__name__} named < "
log_message += NeatStr.object_names(y, div_ch=" / ") + " >."
self.info(log_message)
X = self._preprocess_X(X)
if y is not None:
y = self._preprocess_y(y)
assert len(X) == len(y), "X and y must have equal length."
assert set(X.index) == set(y.index)
return (X,y)
@property
def is_fitted_(self) -> bool:
raise NotImplementedError
@property
def input_columns_(self) -> List[str]:
raise NotImplementedError
@property
def input_can_have_nans(self) -> bool:
raise NotImplementedError
@property
def output_can_have_nans(self) -> bool:
raise NotImplementedError
Estimator = Union[BaseEstimator, PEstimator]
def update_param_if_supported(
estimator: Estimator
,param_name:str
,param_value:Any
) -> Estimator:
current_params = estimator.get_params()
if param_name in current_params:
new_params = {**current_params, param_name:param_value}
return type(estimator)(**new_params)
return type(estimator)(**current_params)
class PFeatureMaker(PEstimator):
def __init__(self, *, random_state = None, **kwargs):
super().__init__(random_state=random_state, **kwargs)
@property
def output_columns_(self) -> List[str]:
raise NotImplementedError
def start_transforming(self
, X: pd.DataFrame
, write_to_log: bool = True
) -> pd.DataFrame:
if write_to_log:
log_message = f"==> Starting generating features "
log_message += f"using a {type(X).__name__} named < "
log_message += NeatStr.object_names(X, div_ch=" / ")
log_message += f" > with the shape {X.shape}."
self.info(log_message)
assert self.is_fitted_
X = self._preprocess_X(X)
if self.input_columns_ is NotProvided:
self.warning("Attribute input_columns_ was not provided.")
else:
assert set(self.input_columns_) <= set(X)
X = deepcopy(X[self.input_columns_])
return X
def finish_transforming(self
, X: pd.DataFrame
, write_to_log: bool = True
) -> pd.DataFrame:
if write_to_log:
log_message = f"<== {len(X.columns)} features "
log_message += "have been generated/returned."
self.info(log_message)
assert len(X)
assert len(set(X.columns)) == len(X.columns)
if self.output_columns_ is NotProvided:
self.warning("Attribute output_columns_ was not provided.")
else:
assert set(X.columns) == set(self.output_columns_)
if self.output_can_have_nans is NotProvided:
self.warning("Attribute output_can_have_nans was not provided.")
elif not self.output_can_have_nans:
n_NaNs = X.isna().sum().sum()
assert n_NaNs==0, f"{n_NaNs} NaN-s found, while expecting 0"
return X
def fit_transform(self
,X:pd.DataFrame
,y:Optional[pd.Series]=None
) -> pd.DataFrame:
raise NotImplementedError
class NaN_Inducer(PFeatureMaker):
"""A transformer that adds random NaN-s to a dataset
NaN_Inducer introduces random NaN values to the features dataframe
during the fitting process. Later, when the .transform() method is called,
no NaNs are added. In other words, NaNs are only induced during the
training process, while during the inference original data are used
with not modification. It’s an equivalent of
a dropout layer in neural networks.
Parameters:
----------
min_nan_level : float between 0 and 1, default = 0.05
Determines minimum level of NaN-s required for each column.
If an input column X[feature] has more than len(X)* min_nan_level NaNs,
no new NaNs are introduced. However, if the number of NaN-s is less than
len(X)* min_nan_level NaNs, they are randomly added until they reach the
required level. This process is repeated for all columns(features) in X.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator.
Pass an int for reproducible output across multiple function calls.
When RandomState is set to None, it disables file caching functionality
(see documentation for PickleCache for details).
Attributes:
----------
log_df_: Pandas DataFrame
a detailed report of actions, performed by NaN_Inducer during the
.fit_transform() call
transformabe_columns_: list of str
names of the columns, used by the .fit_transform() method
"""
log_df_: Optional[pd.DataFrame]
transformabe_columns_: Optional[List[str]]
min_nan_level: float
def __init__(self, *
, min_nan_level: float = 0.05
, random_state
, **kwargs) -> None:
super().__init__(**kwargs)
self.set_params(min_nan_level=min_nan_level
, random_state=random_state
, **kwargs)
def get_params(self, deep=True):
params = dict(min_nan_level=self.min_nan_level
, random_state = self.random_state)
return params
def set_params(self, *
, min_nan_level = None
, random_state = None
, **kwargs):
self.min_nan_level = min_nan_level
self.transformabe_columns_ = None
self.log_df_ = None
self.random_state = random_state
return self
@property
def is_fitted_(self) -> bool:
return self.transformabe_columns_ is not None
@property
def input_can_have_nans(self) -> bool:
return True
@property
def output_can_have_nans(self) -> bool:
return True
@property
def input_columns_(self) -> List[str]:
assert self.is_fitted_
return sorted(self.transformabe_columns_)
@property
def output_columns_(self) -> List[str]:
return self.input_columns_
def fit_transform(self
, X: pd.DataFrame
, y: Optional[pd.Series] = None
) -> pd.DataFrame:
assert 0 <= self.min_nan_level < 1
type_of_x = type(X).__name__
self.log_df_ = pd.DataFrame()
(X, y) = self.start_fitting(X, y, write_to_log=False)
assert isinstance(X, pd.DataFrame)
total_nans = int(X.isna().sum().sum())
total_values = X.shape[0] * X.shape[1]
current_nan_level = total_nans / total_values
log_message = f"==> Starting adding random NaNs "
log_message += f"to a copy of a {type_of_x} "
log_message += "named < " + NeatStr.object_names(X, div_ch=" / ")
log_message += f" > with shape {X.shape}, aiming to reach "
log_message += f"{self.min_nan_level:.2%} level for each column. "
log_message += f"Currently the dataset contains {total_nans} NaN-s,"
log_message += f" which is {current_nan_level:.2%}"
log_message += f" of {total_values} values from the dataframe."
self.info(log_message)
self.transformabe_columns_ = list(X.columns)
target_n_nans_per_feature = math.ceil(
self.min_nan_level * len(X))
log_line = {}
n_updated_columns = 0
for f in self.transformabe_columns_:
a_column = X[f]
n_values = len(a_column)
nans = a_column.isna()
n_nans_before = nans.sum()
if n_nans_before < target_n_nans_per_feature:
n_updated_columns += 1
nans_to_add = target_n_nans_per_feature - n_nans_before
not_nans = a_column[a_column.notna()]
set_to_nan_index = not_nans.sample(
nans_to_add, random_state=self.random_state).index
X.loc[set_to_nan_index, f] = None
n_nans_after = X[f].isna().sum()
assert n_nans_after >= target_n_nans_per_feature
if n_nans_before < target_n_nans_per_feature:
n_nans_added = n_nans_after - n_nans_before
else:
n_nans_added = 0
log_line = {"Feature Name": f
, "# NaN-s Before": n_nans_before
, "# NaN-s Added": n_nans_added
, "# NaN-s After": n_nans_after
, "NaN Level Before": n_nans_before / n_values
, "NaN Level After": n_nans_after / n_values
, "total # of values": n_values}
self.log_df_ = self.log_df_.append(log_line, ignore_index=True)
if len(log_line):
self.log_df_ = self.log_df_[list(log_line)]
self.log_df_.set_index("Feature Name", inplace=True)
for c in [col for col in self.log_df_ if "#" in col]:
self.log_df_[c] = self.log_df_[c].astype(int)
total_nans = int(X.isna().sum().sum())
total_values = X.shape[0] * X.shape[1]
nan_level = total_nans / total_values
log_message = f"<== Returning a new dataframe"
log_message += f" with shape {X.shape}."
log_message += f" NaN-s were added to {n_updated_columns} columns."
log_message += f" The resulting dataset contains {total_nans} NaN-s,"
log_message += f" which is {nan_level:.2%}"
log_message += f" of {total_values} values from the new dataframe."
self.info(log_message)
return self.finish_transforming(X, write_to_log=False)
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
X = self.start_transforming(X)
log_message = f"<==Returning exactly the same data with no changes."
self.info(log_message)
return self.finish_transforming(X, write_to_log=False)
class Deduper(PFeatureMaker):
"""A transformer that removes duplicated columns (features)
Deduper identifies duplicated columns in a dataframe used during
the fitting process, and removes these columns.
The same columns are later removed
when the .transform() method is called.
Parameters:
----------
keep : str, default = ”first”
Determines which duplicates to keep.
- first : Drop duplicates except for the first occurrence.
- last : Drop duplicates except for the last occurrence.
allow_nans : bool, default = False
Determines whether NaN values are allowed
in the input/output of the transformer.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator.
Random number generator is not directly used by the Deduper;
however, the parameter is present for compatibility with PEstimator
class. When RandomState is set to None,
it disables file caching functionality
(see documentation for PickleCache for details).
Attributes:
----------
columns_to_keep_ : list of str
list of column names to keep
columns_to_drop_ : list of str
list of column names to delete
"""
keep: str
allow_nans: bool
columns_to_keep_: List[str]
columns_to_drop_: List[str]
def __init__(self
, keep: str = "first"
, allow_nans: bool = False
, random_state = None
, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.set_params(keep=keep
, allow_nans=allow_nans
, random_state=random_state)
def get_params(self, deep=True):
params = dict(keep=self.keep
, allow_nans=self.allow_nans
, random_state = self.random_state)
return params
def set_params(self , *
, keep = None
, allow_nans = None
, random_state = None
,**kwargs
) -> PFeatureMaker:
self.keep = keep
self.allow_nans = allow_nans
self.random_state = random_state
self.columns_to_keep_ = []
self.columns_to_drop_ = []
return self
@property
def is_fitted_(self) -> bool:
return bool(len(self.columns_to_keep_))
@property
def input_can_have_nans(self) -> bool:
return self.allow_nans
@property
def output_can_have_nans(self) -> bool:
return self.allow_nans
@property
def input_columns_(self) -> List[str]:
assert self.is_fitted_
return sorted(self.columns_to_keep_ + self.columns_to_drop_)
@property
def output_columns_(self) -> List[str]:
return sorted(self.columns_to_keep_)
def fit_transform(self
, X: pd.DataFrame
, y: Optional[pd.Series] = None
) -> pd.DataFrame:
assert self.keep in {"first", "last"}
X, y = self.start_fitting(X, y)
X_dd = X.T.drop_duplicates(keep=self.keep).T
self.columns_to_keep_ = list(X_dd.columns)
self.columns_to_drop_ = list(set(X.columns) - set(X_dd.columns))
log_message = f"{len(self.columns_to_drop_)}"
log_message += f" duplicate features have been removed, "
log_message += f"{len(self.columns_to_keep_)} unique features left."
self.info(log_message)
return self.finish_transforming(X_dd)
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
X = self.start_transforming(X)
log_message = f"{len(self.columns_to_drop_)}"
log_message += f" duplicate features have been removed, "
log_message += f"{len(self.columns_to_keep_)} unique features left."
self.info(log_message)
return self.finish_transforming(X[self.output_columns_])
class NumericImputer(PFeatureMaker):
"""A transformer that creates NaN-less versions of numeric columns"""
imputation_aggr_funcs: Optional[List[Any]]
fill_values_: Optional[pd.DataFrame]
def __init__(self, *
, imputation_aggr_funcs= (
np.min, np.max, percentile50, minmode, maxmode)
, random_state = None
, **kwargs
) -> None:
super().__init__(**kwargs)
self.set_params(
imputation_aggr_funcs = imputation_aggr_funcs
, random_state = random_state
, **kwargs)
def get_params(self, deep=True):
params = dict(imputation_aggr_funcs=self.imputation_aggr_funcs
, random_state = self.random_state)
return params
def set_params(self, *
, imputation_aggr_funcs = None
, random_state = None
, **kwargs
) -> PFeatureMaker:
self.imputation_aggr_funcs = imputation_aggr_funcs
self.random_state = random_state
self.fill_values_ = None
return self
@property
def is_fitted_(self) -> bool:
return self.fill_values_ is not None
@property
def input_can_have_nans(self) -> bool:
return True
@property
def output_can_have_nans(self) -> bool:
return False
@property
def input_columns_(self) -> List[str]:
assert self.is_fitted_
return sorted(self.fill_values_.columns)
@property
def output_columns_(self) -> List[str]:
all_columns = []
for col in self.input_columns_:
for f in self.imputation_aggr_funcs:
label = f.__name__
column_name = "fillna_" + label + "(" + col + ")"
all_columns += [column_name]
return sorted(all_columns)
def fit_transform(self
, X: pd.DataFrame
, y: Optional[pd.Series] = None
) -> pd.DataFrame:
for f in self.imputation_aggr_funcs:
assert callable(f)
type_of_X = type(X).__name__
X, y = self.start_fitting(X, y, write_to_log=False)
X_num = X.select_dtypes(include="number")
num_nans = int(X_num.isna().sum().sum())
aggr_func_names = [f.__name__ for f in self.imputation_aggr_funcs]
n_func = len(aggr_func_names)
log_message = f"==> Starting removing NaNs from "
log_message += f"{len(X_num.columns)} numeric columns of a {type_of_X}"
log_message += " named < " + NeatStr.object_names(X, div_ch=" / ")
log_message += f" > with shape {X.shape}. "
log_message += f"Currently, the numeric columns of a dataset"
log_message += f" contain {num_nans} NaN-s. "
log_message += f"Each numeric columns will be replaced with "
log_message += f"{n_func} new ones, with imputation performed "
log_message += f"using the following functions: {aggr_func_names}."
self.info(log_message)
aggregations = {}
for col in X_num:
aggregations[col] = [f(X_num[col]) for f in self.imputation_aggr_funcs]
self.fill_values_ = pd.DataFrame(
data=aggregations, index=aggr_func_names)
self.log_df_ = self.fill_values_
return self.transform(X_num)
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
X = self.start_transforming(X, write_to_log=False)
X_num = X.select_dtypes(include="number")[self.input_columns_]
num_nans = X_num.isna().sum().sum()
all_columns = []
for col in X_num.columns:
for f in self.imputation_aggr_funcs:
label = f.__name__
f_val = self.fill_values_.at[label, col]
filled_column = X_num[col].fillna(value=f_val)
filled_column.name = "fillna_" + label + "(" + col + ")"
all_columns += [filled_column]
result = pd.concat(all_columns, axis=1)
log_message = f"<== Returning a new, numeric-only dataframe"
log_message += f" with shape {result.shape}."
log_message += f" {num_nans} original NaN-s were removed"
log_message += f" by applying {len(self.imputation_aggr_funcs)}"
log_message += f" imputation functions."
self.info(log_message)
return self.finish_transforming(result, write_to_log=False)
class NumericFuncTransformer(PFeatureMaker):
"""A transformer that applies math functions to numeric features"""
columns_to_a_transform_: Optional[List[str]]
columns_to_p_transform_: Optional[List[str]]
positive_arg_num_functions: List[Any]
any_arg_num_functions: List[Any]
def __init__(self, *
, positive_arg_num_functions=(power_m1_1p, np.log1p, root_2, power_2)
, any_arg_num_functions=(passthrough, power_3)
, random_state = None
, **kwargs) -> None:
super().__init__(**kwargs)
self.set_params(
positive_arg_num_functions, any_arg_num_functions, random_state,**kwargs)
def get_params(self, deep=True):
params = dict(positive_arg_num_functions=self.positive_arg_num_functions
, any_arg_num_functions=self.any_arg_num_functions
, random_state = self.random_state)
return params
def set_params(self
, positive_arg_num_functions = None
, any_arg_num_functions = None
, random_state = None
, **kwargs
) -> PFeatureMaker:
if positive_arg_num_functions is not None:
for f in positive_arg_num_functions + any_arg_num_functions:
assert callable(f)
self.positive_arg_num_functions = positive_arg_num_functions
self.any_arg_num_functions = any_arg_num_functions
self.random_state = random_state
self.columns_to_p_transform_ = None
self.columns_to_a_transform_ = None
return self
@property
def is_fitted_(self):
result = (self.columns_to_a_transform_ is not None
and self.columns_to_p_transform_ is not None)
return result
@property
def input_can_have_nans(self) -> bool:
return True
@property
def output_can_have_nans(self) -> bool:
return True
@property
def input_columns_(self) -> List[str]:
assert self.is_fitted_
return sorted(set(self.columns_to_a_transform_)
| set(self.columns_to_p_transform_))
@property
def output_columns_(self) -> List[str]:
all_columns = []
for a_func in self.any_arg_num_functions:
f_columns = [a_func.__name__ + "(" + c + ")"
for c in self.columns_to_a_transform_]
all_columns += f_columns
for p_func in self.positive_arg_num_functions:
f_columns = [p_func.__name__ + "(" + c + ")"
for c in self.columns_to_p_transform_]
all_columns += f_columns
return sorted(all_columns)
def fit_transform(self
, X: pd.DataFrame
, y: Optional[pd.Series] = None
) -> pd.DataFrame:
(X, y) = self.start_fitting(X, y)
self.columns_to_p_transform_ = None
self.columns_to_a_transform_ = None
X_numbers = X.select_dtypes(include="number")
assert len(X_numbers.columns)
self.columns_to_a_transform_ = list(X_numbers.columns)
feature_mins = X_numbers.min()
p_transformable_features = feature_mins[feature_mins >= 0]
self.columns_to_p_transform_ = list(p_transformable_features.index)
result = self.transform(X)
return result
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
all_funcs = self.positive_arg_num_functions + self.any_arg_num_functions
all_funcs = [f.__name__ for f in all_funcs]
X_numbers = self.start_transforming(
X, write_to_log=False).select_dtypes("number")
log_message = f"==> Starting generating features "
log_message += f"using a {type(X).__name__} named < "
log_message += NeatStr.object_names(X, div_ch=" / ")
log_message += f" > with the shape {X.shape} and the following "
log_message += f"{len(all_funcs)} functions: {all_funcs}."
self.info(log_message)
all_transformations = []
for a_func in self.any_arg_num_functions:
X_new = a_func(X_numbers)
X_new.columns = [a_func.__name__ + "(" + c + ")" for c in X_new]
all_transformations += [X_new]
if len(self.columns_to_p_transform_):
X_positive_numbers = deepcopy(
X_numbers[self.columns_to_p_transform_])
negative_flags = (X_positive_numbers < 0)
below_zero = negative_flags.sum().sum()
X_positive_numbers[negative_flags] = 0
if below_zero > 0:
log_message = f"{below_zero} negative values were found in "
log_message += "the features, scheduled for transformation "
log_message += "via functions that expect positive input "
log_message += "values. Negatives will be replaced "
log_message += "with zeros."
self.warning(log_message)
for p_func in self.positive_arg_num_functions:
X_new = p_func(X_positive_numbers)
X_new.columns = [p_func.__name__ + "(" + c + ")" for c in
X_new]
all_transformations += [X_new]
result = pd.concat(all_transformations, axis=1)
return self.finish_transforming(result)
class CatSelector(PFeatureMaker):
""" Abstract base class that finds categorical features.
Warning: This class should not be used directly. Use derived classes
instead.
"""
min_cat_size: int
max_uniques_per_cat: int
cat_columns_: Optional[Set[str]]
cat_values_: Optional[Dict[str, Set[str]]]
def __init__(self
, *
, min_cat_size: int = 20
, max_uniques_per_cat: int = 100
, random_state = None
, **kwargs) -> None:
super().__init__( **kwargs)
self.set_params(min_cat_size=min_cat_size
, max_uniques_per_cat=max_uniques_per_cat
, random_state = random_state)
def get_params(self, deep=True):
params = dict(min_cat_size = self.min_cat_size
, max_uniques_per_cat = self.max_uniques_per_cat
, random_state = self.random_state)
return params
def set_params(self, *
, min_cat_size = None
, max_uniques_per_cat = None
, random_state = None
, **kwards) -> PFeatureMaker:
self.min_cat_size = min_cat_size
self.max_uniques_per_cat = max_uniques_per_cat
self.random_state = random_state
self.cat_columns_ = None
self.cat_values_ = None
return self
def start_fitting(self
, X: Any
, y: Any
, write_to_log: bool = True
) -> Tuple[pd.DataFrame,pd.Series]:
X, y = super().start_fitting(X, y, write_to_log)
uniques = X.nunique()
uniques = uniques[uniques <= self.max_uniques_per_cat]
self.cat_columns_ = set(uniques.index)
self.cat_values_ = dict()
for c in self.cat_columns_:
uniques = X[c].value_counts()
uniques = uniques[uniques >= self.min_cat_size]
self.cat_values_[c] = set(uniques.index)
if len(self.cat_values_[c]) == 0:
del self.cat_values_[c]
self.cat_columns_ = set(self.cat_values_)
X = deepcopy(X[self.cat_columns_])
for col in X:
nan_idx = ~ X[col].isin(self.cat_values_[col])
X.loc[nan_idx, col] = None
return X,y
def start_transforming(self
, X: pd.DataFrame
, write_to_log: bool = True
) -> pd.DataFrame:
X = super().start_transforming(X, write_to_log)
for col in X:
nan_idx = ~ X[col].isin(self.cat_values_[col])
X.loc[nan_idx, col] = None
return X
class TargetMultiEncoder(CatSelector):
""" A transformer for target-encoding categorical features"""
tme_aggr_funcs: List[Any]
tme_cat_values_: Optional[Dict[str, pd.DataFrame]]
tme_default_values_: Optional[Dict[str, float]]
nan_string:str
def __init__(self, *
, min_cat_size=20
, max_uniques_per_cat=100
, tme_aggr_funcs=(
percentile01
, percentile25
, percentile50
, percentile75
, percentile99
, minmode
, maxmode)
, random_state = None
, **kwargs
) -> None:
super().__init__(**kwargs)
self.set_params(min_cat_size=min_cat_size
, max_uniques_per_cat=max_uniques_per_cat
, tme_aggr_funcs=tme_aggr_funcs
, random_state=random_state)
def get_params(self, deep=True):
params = super().get_params(deep)
params["tme_aggr_funcs"] = self.tme_aggr_funcs
return params
def set_params(self
, min_cat_size=None
, max_uniques_per_cat=None
, tme_aggr_funcs = None
, random_state = None
, **kwargs
) -> CatSelector:
super().set_params(min_cat_size=min_cat_size
, max_uniques_per_cat=max_uniques_per_cat
, random_state=random_state, **kwargs)
self.tme_aggr_funcs = tme_aggr_funcs
self.tme_cat_values_ = None
self.tme_default_values_ = None
self.nan_string: str = "<<<<-----TME-NaN----->>>>"
return self
@property
def is_fitted_(self):
return self.tme_default_values_ is not None
@property
def input_can_have_nans(self) -> bool:
return True
@property
def output_can_have_nans(self) -> bool:
return False
@property
def input_columns_(self) -> List[str]:
return sorted(self.tme_cat_values_)
@property
def output_columns_(self) -> List[str]:
assert self.is_fitted_
return sorted([self.tme_column_name(f, c)
for c in self.tme_cat_values_ for f in
self.tme_aggr_funcs])
def tme_column_name(self, func, column: str) -> str:
if callable(func):
func = func.__name__
name = "targ_enc_" + func + "(" + str(column) + ")"
return name
def convert_X(self, X: pd.DataFrame) -> pd.DataFrame:
assert set(X.columns) == set(self.cat_columns_)
for cat in X:
X[cat] = X[cat].astype("object")
X.fillna(self.nan_string, inplace=True)
for cat in self.cat_values_:
self.cat_values_[cat] |= {self.nan_string}
nan_idx = ~ (X[cat].isin(self.cat_values_[cat]))
X.loc[nan_idx, cat] = self.nan_string
return X
def fit_transform(self
, X: pd.DataFrame
, y: pd.Series
) -> pd.DataFrame:
X, y = self.start_fitting(X, y)
X = self.convert_X(X)
assert len(X) == len(y)
log_message = f"A total of {len(X.columns)} features "
log_message += f"will be encoded using {len(self.tme_aggr_funcs)} "
log_message += f"functions: {[f.__name__ for f in self.tme_aggr_funcs]}."
self.info(log_message)
columns = deepcopy(X.columns)
taget_name = "TAGET_" + y.name + "_TARGET"
assert taget_name not in columns
X[taget_name] = y
self.tme_default_values_ = {}
self.tme_cat_values_ = {}
for f in self.tme_aggr_funcs:
self.tme_default_values_[f] = f(X[taget_name])
for col in columns:
v = pd.pivot_table(X[[col, taget_name]]
, values=taget_name
, index=col
, aggfunc=list(self.tme_aggr_funcs)
, dropna=False)
v = v.astype(float)
n_nans = v.isna().sum().sum()
if n_nans:
log_message = f"Got {n_nans} NaN-s while generating "
log_message += f"target encoding values for {col}."
log_message += " Replacing with default values."
self.warning(log_message)
for i in range(len(self.tme_aggr_funcs)):
a_func = self.tme_aggr_funcs[i]
def_value = self.tme_default_values_[a_func]
v[v.columns[i]] = v[v.columns[i]].fillna(def_value)
v.at[self.nan_string,v.columns[i]] = def_value
v.columns = [
self.tme_column_name(c[0], col) for c in v.columns]
self.tme_cat_values_[col] = v
X.drop(columns=taget_name, inplace=True)
result = self.transform(X)
return result
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
X = self.start_transforming(X)
X = self.convert_X(X)
columns = deepcopy(X.columns)
for col in X.columns:
index_col_name = "____>>__INDEX_<<_____"+str(id(self))
X[index_col_name] = X.index
X = X.merge(self.tme_cat_values_[col], on=col, how="inner")
X.index = X[index_col_name]
X.drop(columns=index_col_name, inplace = True)
for i in range(len(self.tme_aggr_funcs)):
a_func = self.tme_aggr_funcs[i]
a_column = self.tme_column_name(a_func, col)
def_value = self.tme_default_values_[a_func]
n_nans = X[a_column].isna().sum()
if n_nans:
log_message = f"Found {n_nans} NaN-s in column {a_column}"
log_message += f" after replacing know values"
log_message += f" with targed-encodings,"
log_message += f" filling NaN-s with default value."
self.warning(log_message)
X[a_column] = X[a_column].fillna(def_value)
X.drop(columns=col, inplace=True)
return self.finish_transforming(X)
class LOOMeanTargetEncoder(CatSelector):
"""Leave-One-Out Mean Target Encoder for categorical features"""
encodable_columns_: Optional[Set[str]]
sums_counts_: Optional[Dict[str, Dict[str, float]]]
nan_string:str
def __init__(self
, min_cat_size: int = 20
, max_uniques_per_cat: int = 100
, random_state = None
, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.set_params(min_cat_size=min_cat_size
, max_uniques_per_cat=max_uniques_per_cat
, random_state=random_state)
def get_params(self, deep=True):
params = super().get_params(deep)
return params
def set_params(self
, min_cat_size = None
, max_uniques_per_cat = None
, random_state = None
, **kwargs):
super().set_params(min_cat_size=min_cat_size
, max_uniques_per_cat=max_uniques_per_cat
, random_state= random_state
, **kwargs)
self.sums_counts_ = None
self.encodable_columns_ = None
self.nan_string: str = "<<<<-----LOO-NaN----->>>>"
return self
@property
def is_fitted_(self):
return self.sums_counts_ is not None
@property
def input_can_have_nans(self) -> bool:
return True
@property
def output_can_have_nans(self) -> bool:
return False
@property
def input_columns_(self) -> List[str]:
return sorted(self.encodable_columns_)
@property
def output_columns_(self) -> List[str]:
return sorted(["LOOMean(" + c + ")" for c in self.input_columns_])
def fit_transform(self
, X: pd.DataFrame
, y: pd.Series
) -> pd.DataFrame:
X, y = self.start_fitting(X, y)
# X.fillna(self.nan_string, inplace=True)
self.sums_counts_ = dict()
for c in self.cat_columns_:
self.sums_counts_[c] = dict()
for v in set(self.cat_values_[c] ):
ix = (X[c] == v)
self.sums_counts_[c][v] = (y[ix].sum(), ix.sum())
self.sums_counts_[c][self.nan_string] = (y.sum(),len(y))
X = X[self.cat_columns_]
nontrivial = X.nunique()
nontrivial = nontrivial[nontrivial > 1]
self.encodable_columns_ = set(nontrivial.index)
to_delete = set(self.cat_columns_) - set(self.encodable_columns_)
for c in to_delete:
del self.sums_counts_[c]
for c in self.sums_counts_:
vals = np.full(len(X), np.nan)
for cat_val, sum_count in self.sums_counts_[c].items():
if cat_val != self.nan_string:
ix = (X[c] == cat_val)
else:
ix = (X[c].isna())
vals[ix] = (sum_count[0] - y[ix]) / (sum_count[1] - 1)
X[c] = vals
X = X[self.encodable_columns_]
X.columns = ["LOOMean(" + c + ")" for c in X.columns]
return self.finish_transforming(X)
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
X = self.start_transforming(X)
for c in self.input_columns_:
vals = np.full(len(X), np.nan)
for cat_val, sum_count in self.sums_counts_[c].items():
if cat_val != self.nan_string:
vals[X[c] == cat_val] = sum_count[0] / sum_count[1]
else:
vals[X[c].isna()] = sum_count[0] / sum_count[1]
X[c] = vals
X.columns = ["LOOMean(" + c + ")" for c in X.columns]
self.error(f"X contains {X.isna().sum().sum()} NaNs")
return self.finish_transforming(X)
class DummiesMaker(CatSelector):
""" A tramsformer that creates dummies for categorical features"""
dummy_names_: Optional[str]
def __init__(self
, min_cat_size: int = 20
, max_uniques_per_cat: int = 100
, random_state = None
, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.set_params(min_cat_size=min_cat_size
, max_uniques_per_cat=max_uniques_per_cat
, random_state=random_state)
def get_params(self, deep=True):
params = super().get_params(deep)
return params
def set_params(self
, min_cat_size = None
, max_uniques_per_cat = None
, random_state = None
, **kwargs):
super().set_params(min_cat_size=min_cat_size
, max_uniques_per_cat=max_uniques_per_cat
, random_state=random_state
, **kwargs)
self.dummy_names_ = None
return self
@property
def is_fitted_(self):
return self.dummy_names_ is not None
@property
def input_can_have_nans(self) -> bool:
return True
@property
def output_can_have_nans(self) -> bool:
return False
@property
def input_columns_(self) -> List[str]:
return sorted(self.cat_columns_)
@property
def output_columns_(self) -> List[str]:
return sorted(self.dummy_names_)
def _get_dummies(self, feature: pd.Series) -> pd.DataFrame:
all_dummies = []
new_dummy = feature.isna().astype(int)
new_dummy.name = f"{feature.name}==eNaN"
all_dummies += [new_dummy]
for val in self.cat_values_[feature.name]:
new_dummy = (feature == val).astype(int)
new_dummy.name = f"{feature.name}=={str(val)}"
all_dummies += [new_dummy]
result = pd.concat(all_dummies, axis=1)
return result
def fit_transform(self
, X: pd.DataFrame
, y=None
) -> pd.DataFrame:
X, y = self.start_fitting(X, y)
all_dummies = []
for col in self.cat_columns_:
all_dummies += [self._get_dummies(X[col])]
result = pd.concat(all_dummies, axis=1)
self.dummy_names_ = list(result.columns)
return self.finish_transforming(result)
def transform(self
, X: pd.DataFrame
) -> pd.DataFrame:
X = self.start_transforming(X)
all_dummies = []
for col in self.cat_columns_:
all_dummies += [self._get_dummies(X[col])]
result = pd.concat(all_dummies, axis=1)
return self.finish_transforming(result)
class RectifierSplitter(PFeatureMaker):
split_percentiles: List[int]
is_fitted_flag_: bool
numeric_columns_: Optional[List[str]]
generated_columns_: Optional[List[str]]
percentiles_values_: Optional[Dict[str, List[float]]]
def __init__(self
, split_percentiles=(1, 25, 50, 75, 99)
, random_state = None
, *args
, **kwargs
) -> None:
super().__init__(*args, **kwargs)
self.set_params(
split_percentiles=split_percentiles
,random_state=random_state)
def get_params(self, deep=True):
params = dict(split_percentiles=self.split_percentiles
, random_state = self.random_state)
return params
def set_params(self, *
, split_percentiles = None
, random_state = None
,**kwargs):
self.split_percentiles = split_percentiles
self.random_state = random_state
self.is_fitted_flag_ = False
self.numeric_columns_ = None
self.generated_columns_ = None
self.percentiles_values_ = None
return self
@property
def is_fitted_(self):
return self.is_fitted_flag_
@property
def input_can_have_nans(self) -> bool:
return False
@property
def output_can_have_nans(self) -> bool:
return False
@property
def input_columns_(self) -> List[str]:
assert self.is_fitted_
return sorted(self.numeric_columns_)
@property
def output_columns_(self) -> List[str]:
assert self.is_fitted_
return self.generated_columns_
def fit_transform(self
, X: pd.DataFrame
, y: pd.Series
) -> pd.DataFrame:
assert len(self.split_percentiles)
for p in self.split_percentiles:
assert 0 < p < 100
X, y = self.start_fitting(X, y)
self.percentiles_values_ = dict()
X_num = X.select_dtypes("number")
self.numeric_columns_ = X_num.columns
for col in X_num:
column_percentiles = []
for p in self.split_percentiles:
column_percentiles += [ np.nanpercentile(X_num[col], p)]
self.percentiles_values_[col] = sorted(column_percentiles)
self.is_fitted_flag_ = True
result = self.transform(X, generate_column_names=True)
return result
def transform(self
, X: pd.DataFrame
, generate_column_names=False
) -> pd.DataFrame:
X = self.start_transforming(X)
for col in X:
for threshold in self.percentiles_values_[col]:
above_idx = (X[col] >= threshold).astype(int)
new_col_name = f"{col} >= {threshold}"
X[new_col_name] = above_idx
below_idx = (X[col] < threshold).astype(int)
new_col_name = f"{col} < {threshold}"
X[new_col_name] = below_idx
above_values = above_idx * X[col] + below_idx * threshold
new_col_name = f"{col} if ({col} >= {threshold})"
new_col_name += f" else {threshold}"
X[new_col_name] = above_values
below_values = below_idx * X[col] + above_idx * threshold
new_col_name = f"{col} if ({col} < {threshold})"
new_col_name += f" else {threshold}"
X[new_col_name] = below_values
X.drop(columns=[col], inplace=True)
if generate_column_names:
self.generated_columns_ = sorted(X.columns)
return self.finish_transforming(X)
class FeatureShower(PFeatureMaker):
""" A transformer that creates large number of various new features"""
is_fitted_flag_: bool
def __init__(self, *,
min_nan_level: float = 0.05
, min_cat_size: int = 20
, max_uniques_per_cat: int = 100
, positive_arg_num_functions=(
power_m1_1p, np.log1p, root_2, power_2)
, any_arg_num_functions=(passthrough, power_3)
, imputation_aggr_funcs = (
np.min, np.max, percentile50, minmode, maxmode)
, tme_aggr_funcs = (percentile01
, percentile25
, percentile50
, percentile75
, percentile99
, minmode
, maxmode)
, split_percentiles=(1, 25, 50, 75, 99)
, random_state = None
, **kwargs) -> None:
super().__init__(**kwargs)
self.set_params(
min_nan_level=min_nan_level
, min_cat_size = min_cat_size
, max_uniques_per_cat= max_uniques_per_cat
, positive_arg_num_functions = positive_arg_num_functions
, any_arg_num_functions=any_arg_num_functions
, imputation_aggr_funcs = imputation_aggr_funcs
, tme_aggr_funcs = tme_aggr_funcs
, split_percentiles = split_percentiles
, random_state = random_state
, **kwargs)
def set_params(self, *
, min_nan_level = None
, min_cat_size = None
, max_uniques_per_cat = None
, positive_arg_num_functions = None
, any_arg_num_functions = None
, imputation_aggr_funcs = None
, tme_aggr_funcs = None
, split_percentiles = None
, random_state = None
, deep: bool = False
, **kwargs) -> PFeatureMaker:
self.random_state = random_state
self.nan_inducer = NaN_Inducer(
min_nan_level=min_nan_level
,random_state = random_state)
self.dummies_maker = DummiesMaker(
min_cat_size = min_cat_size
,max_uniques_per_cat = max_uniques_per_cat
,random_state = random_state)
self.numeric_func_trnsf = NumericFuncTransformer(
positive_arg_num_functions = positive_arg_num_functions
,any_arg_num_functions = any_arg_num_functions
,random_state = random_state)
self.numeric_imputer = NumericImputer(
imputation_aggr_funcs = imputation_aggr_funcs
,random_state = random_state)
self.target_multi_encoder = TargetMultiEncoder(
min_cat_size = min_cat_size
,max_uniques_per_cat = max_uniques_per_cat
,tme_aggr_funcs = tme_aggr_funcs
,random_state = random_state)
self.rectifier_splitter = RectifierSplitter(
split_percentiles = split_percentiles
,random_state = random_state)
self.deduper = Deduper(random_state = random_state)
self.is_fitted_flag_ = False
return self
def get_params(self, deep: bool = False) -> Dict[str, Any]:
params = dict(
min_nan_level = self.nan_inducer.min_nan_level
,min_cat_size = self.dummies_maker.min_cat_size
,max_uniques_per_cat = self.dummies_maker.max_uniques_per_cat
,positive_arg_num_functions = self.numeric_func_trnsf.positive_arg_num_functions
,any_arg_num_functions = self.numeric_func_trnsf.any_arg_num_functions
,imputation_aggr_funcs = self.numeric_imputer.imputation_aggr_funcs
,tme_aggr_funcs = self.target_multi_encoder.tme_aggr_funcs
,split_percentiles = self.rectifier_splitter.split_percentiles
,random_state = self.random_state)
return params
@property
def is_fitted_(self):
return self.is_fitted_flag_
@property
def input_columns_(self) -> List[str]:
return self.nan_inducer.input_columns_
@property
def output_columns_(self) -> List[str]:
return self.deduper.output_columns_
@property
def input_can_have_nans(self) -> bool:
return True
@property
def output_can_have_nans(self) -> bool:
return False
def fit_transform(self
, X:pd.DataFrame
, y:pd.Series
) -> pd.DataFrame:
X, y = self.start_fitting(X, y)
X_with_NaNs = self.nan_inducer.fit_transform(X, y)
X_numeric_tr = self.numeric_func_trnsf.fit_transform(X_with_NaNs, y)
X_numeric_no_NaNs = self.numeric_imputer.fit_transform(X_numeric_tr, y)
X_target_encoded_cats = self.target_multi_encoder.fit_transform(
X_with_NaNs, y)
X_dummies = self.dummies_maker.fit_transform(X_with_NaNs, y)
X_full = pd.concat(
[X_numeric_no_NaNs, X_target_encoded_cats, X_dummies], axis=1)
per50_cols = [c for c in X_full.columns if "percentile50" in c]
targ_enc_cols = [c for c in per50_cols if "targ_enc" in c]
passthrough_cols = [c for c in per50_cols if "passthrough" in c]
ps_cols = targ_enc_cols + passthrough_cols
X_rs = self.rectifier_splitter.fit_transform(X_full[ps_cols],y)
X_pre_final = pd.concat([X_full, X_rs], axis = 1)
X_final = self.deduper.fit_transform(X_pre_final, y)
self.is_fitted_flag_ = True
return self.finish_transforming(X_final)
def transform(self, X):
X = self.start_transforming(X)
X_with_NaNs = self.nan_inducer.transform(X)
X_numeric_tr = self.numeric_func_trnsf.transform(X_with_NaNs)
X_numeric_no_NaNs = self.numeric_imputer.transform(X_numeric_tr)
X_target_encoded_cats = self.target_multi_encoder.transform(
X_with_NaNs)
X_dummies = self.dummies_maker.transform(X_with_NaNs)
X_full = pd.concat(
[X_numeric_no_NaNs, X_target_encoded_cats, X_dummies], axis=1)
per50_cols = [c for c in X_full.columns if "percentile50" in c]
targ_enc_cols = [c for c in per50_cols if "targ_enc" in c]
passthrough_cols = [c for c in per50_cols if "passthrough" in c]
ps_cols = targ_enc_cols + passthrough_cols
X_rs = self.rectifier_splitter.transform(X_full[ps_cols])
X_pre_final = pd.concat([X_full, X_rs], axis=1)
X_final = self.deduper.transform(X_pre_final)
return self.finish_transforming(X_final)
| 34.007638 | 92 | 0.586325 | 6,550 | 53,426 | 4.459389 | 0.070687 | 0.038413 | 0.020713 | 0.020816 | 0.642816 | 0.575747 | 0.517854 | 0.468623 | 0.420726 | 0.386388 | 0 | 0.004225 | 0.322203 | 53,426 | 1,570 | 93 | 34.029299 | 0.802386 | 0.063415 | 0 | 0.551784 | 0 | 0 | 0.066052 | 0.008475 | 0 | 0 | 0 | 0 | 0.02698 | 1 | 0.100087 | false | 0.006092 | 0.008703 | 0.030461 | 0.234117 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35dce94a6594c6e9e0222cdb24738ba16887fe9f | 3,932 | py | Python | lefci/api.py | reo-git/lefci | 2c7e2e887e5f047211c99138dabba96df9653122 | [
"MIT"
] | null | null | null | lefci/api.py | reo-git/lefci | 2c7e2e887e5f047211c99138dabba96df9653122 | [
"MIT"
] | 2 | 2021-10-06T16:47:42.000Z | 2022-01-22T12:02:24.000Z | lefci/api.py | reo-git/lefci | 2c7e2e887e5f047211c99138dabba96df9653122 | [
"MIT"
] | null | null | null | from http import HTTPStatus
from flask_restful import Api, Resource, request
from lefci import app, model
from lefci.deploy import deploy
api = Api(app)
state = model.State()
def create_report(message, status=model.Status.OK):
report = model.Report(message, status)
report_with_source = model.ReportBySource(report, 'api')
return report_with_source.encode()
def error_report(message):
return create_report(message, model.Status.ERROR)
class Configs(Resource):
def get(self, name=None):
if name:
return state.get_config(name).encode()
else:
return state.saved_configs
def post(self):
config_raw = request.get_json()['config']
try:
config = model.Config(**config_raw)
state.save_config(config)
except Exception as e:
return error_report(str(e)), HTTPStatus.BAD_REQUEST.value
return create_report('Current configuration saved'), HTTPStatus.OK.value
def put(self, name):
try:
config = state.get_config(name)
except Exception as e:
return error_report(str(e)), HTTPStatus.NOT_FOUND.value
data = request.get_json()
deploy(config, data['server'])
def delete(self, name):
try:
state.delete_config(name)
except Exception as e:
return error_report(str(e)), HTTPStatus.NOT_FOUND.value
return create_report(f'Configuration {name} deleted'), HTTPStatus.OK.value
class Trees(Resource):
def get(self, name, uuid=None):
try:
config = state.get_config(name)
except Exception as e:
return error_report(str(e)), HTTPStatus.NOT_FOUND.value
if uuid:
tree = config.find_tree(uuid)
return tree.encode(), HTTPStatus.OK.value
else:
return [tree.encode() for tree in config.log_trees]
def put(self, name, uuid):
try:
config = state.get_config(name)
except Exception as e:
return error_report(str(e)), HTTPStatus.NOT_FOUND.value
node = config.find_tree(uuid)
if node:
data = request.get_json()
node.update_config(**data)
verify_reports = config.verify_node(node)
return verify_reports.encode(), HTTPStatus.OK.value
else:
return error_report(f'No node with {uuid} found!'), HTTPStatus.NOT_FOUND.value
def post(self, name, uuid=None):
try:
config = state.get_config(name)
except Exception as e:
return error_report(str(e)), HTTPStatus.NOT_FOUND.value
parent = config.find_tree(uuid)
child = model.LogTree(parent=parent, **request.get_json())
if parent:
parent.add_tree(child)
else:
config.add_tree(child)
verify_reports = config.verify_node(child)
state.save_config(config)
return verify_reports.encode(), HTTPStatus.OK.value
def delete(self, name, uuid):
try:
config = state.get_config(name)
except Exception as e:
return error_report(str(e)), HTTPStatus.NOT_FOUND.value
tree = config.find_tree(uuid)
if not tree:
return error_report(f'No node with {uuid} found!'), HTTPStatus.NOT_FOUND.value
parent = tree.parent
if parent:
parent.remove_tree(tree)
state.save_config(config)
return create_report(f'Removed tree {uuid} from {parent.id}'), HTTPStatus.OK.value
else:
config.remove_tree(tree)
state.save_config(config)
return create_report(f'Removed tree {uuid} from config'), HTTPStatus.OK.value
api.add_resource(Configs, '/v1/configs', '/v1/configs/<string:name>')
api.add_resource(Trees, '/v1/configs/<string:name>/trees', '/v1/configs/<string:name>/trees/<string:uuid>')
| 31.456 | 107 | 0.625636 | 489 | 3,932 | 4.895706 | 0.161554 | 0.045948 | 0.06391 | 0.076859 | 0.524227 | 0.443609 | 0.406015 | 0.370927 | 0.370927 | 0.370927 | 0 | 0.001394 | 0.270346 | 3,932 | 124 | 108 | 31.709677 | 0.833043 | 0 | 0 | 0.458333 | 0 | 0 | 0.076571 | 0.025693 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104167 | false | 0 | 0.041667 | 0.010417 | 0.385417 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35dd012bff0b1ca1dae99f8b4ca18ff2d2ee4e5f | 4,334 | py | Python | ubteacher/modeling/meta_arch/rcnn.py | yulonghui/yingying_boss | f9cf956cb6507ef43f8005c61027f6b54f418224 | [
"MIT"
] | 1 | 2022-03-31T02:31:22.000Z | 2022-03-31T02:31:22.000Z | ubteacher/modeling/meta_arch/rcnn.py | yulonghui/DucTeacher | f9cf956cb6507ef43f8005c61027f6b54f418224 | [
"MIT"
] | null | null | null | ubteacher/modeling/meta_arch/rcnn.py | yulonghui/DucTeacher | f9cf956cb6507ef43f8005c61027f6b54f418224 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
@META_ARCH_REGISTRY.register()
class TwoStagePseudoLabGeneralizedRCNN(GeneralizedRCNN):
def forward(
self, batched_inputs, branch="supervised", given_proposals=None, val_mode=False
):
if (not self.training) and (not val_mode):
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
# self.domain_label = self.get_domain(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if branch == "supervised":
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# # roi_head lower branch
_, detector_losses = self.roi_heads(
images, features, proposals_rpn, gt_instances, branch=branch
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
elif branch == "unsupervised":
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# # roi_head lower branch
_, detector_losses = self.roi_heads(
images, features, proposals_rpn, gt_instances, branch=branch
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
elif branch == "unsup_data_weak":
# Region proposal network
proposals_rpn, _ = self.proposal_generator(
images, features, None, compute_loss=False
)
# roi_head lower branch (keep this for further production) # notice that we do not use any target in ROI head to do inference !
proposals_roih, ROI_predictions = self.roi_heads(
images,
features,
proposals_rpn,
targets=None,
compute_loss=False,
branch=branch,
)
return {}, proposals_rpn, proposals_roih, ROI_predictions
elif branch == "val_loss":
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances, compute_val_loss=True
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
gt_instances,
branch=branch,
compute_val_loss=True,
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
# def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
# assert not self.training
# images = self.preprocess_image(batched_inputs)
# features = self.backbone(images.tensor)
# if detected_instances is None:
# if self.proposal_generator:
# proposals, _ = self.proposal_generator(images, features, None)
# else:
# assert "proposals" in batched_inputs[0]
# proposals = [x["proposals"].to(self.device) for x in batched_inputs]
# results, _ = self.roi_heads(images, features, proposals, None)
# else:
# detected_instances = [x.to(self.device) for x in detected_instances]
# results = self.roi_heads.forward_with_given_boxes(
# features, detected_instances
# )
# if do_postprocess:
# return GeneralizedRCNN._postprocess(
# results, batched_inputs, images.image_sizes
# )
# else:
# return results
| 34.672 | 140 | 0.582141 | 422 | 4,334 | 5.741706 | 0.239336 | 0.059018 | 0.052002 | 0.055716 | 0.523318 | 0.484936 | 0.37144 | 0.355757 | 0.330169 | 0.330169 | 0 | 0.0014 | 0.340563 | 4,334 | 124 | 141 | 34.951613 | 0.846396 | 0.310337 | 0 | 0.454545 | 0 | 0 | 0.024721 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015152 | false | 0 | 0.030303 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35dd0c4119509c62a1b2d297e9422000072ac852 | 1,673 | py | Python | mobi/management/commands/xlstocsv.py | TCastus/mobilite2-back | fc38d3cbed6ebd958c84b1f4f80db633695ab65e | [
"MIT"
] | 2 | 2021-02-17T18:37:25.000Z | 2021-03-04T05:47:06.000Z | mobi/management/commands/xlstocsv.py | TCastus/mobilite2-back | fc38d3cbed6ebd958c84b1f4f80db633695ab65e | [
"MIT"
] | 24 | 2021-03-09T15:20:20.000Z | 2021-06-07T11:53:34.000Z | mobi/management/commands/xlstocsv.py | TCastus/mobilite2-back | fc38d3cbed6ebd958c84b1f4f80db633695ab65e | [
"MIT"
] | 1 | 2021-02-23T15:31:28.000Z | 2021-02-23T15:31:28.000Z | import xlrd
import csv
from geopy.geocoders import Nominatim
from django.core.management.base import BaseCommand
class Command(BaseCommand):
loc = ("Places-Europe-TC.xls")
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
sheet.cell_value(0, 0)
nom = Nominatim(user_agent="CSVToLatLong")
u = open('new_universities', 'w')
c = open('new_cities', 'w')
co = open('new_countries', 'w')
writer_u = csv.writer(u)
writer_c = csv.writer(c)
writer_co = csv.writer(co)
writer_co.writerow(["name", "continent", "ECTSConversion", "id"])
writer_c.writerow(["name", "country", "id"])
writer_u.writerow(["name", "city", "univ_appartment", "latitude", "longitude", "id"])
countrytemp = None
citytemp = None
idCo = 2
idC = 2
for i in range(1, sheet.nrows):
country = sheet.cell_value(i, 0)
city = sheet.cell_value(i, 1)
if country == "":
break
if country != countrytemp:
writer_co.writerow([country, "Europe", "1"])
idCo += 1
if city == "":
break
if city != citytemp:
writer_c.writerow([sheet.cell_value(i, 1), idCo])
idC += 1
countrytemp = country
citytemp = city
try:
print(sheet.cell_value(i, 3))
n = nom.geocode(sheet.cell_value(i, 3))
lat = n.latitude
lon = n.longitude
writer_u.writerow([sheet.cell_value(i, 3), idC, "False", lat, lon])
except AttributeError:
writer_u.writerow([sheet.cell_value(i, 3), idC, "False"])
u.close()
c.close()
co.close()
| 25.738462 | 89 | 0.571429 | 211 | 1,673 | 4.407583 | 0.364929 | 0.077419 | 0.12043 | 0.112903 | 0.16129 | 0.083871 | 0.083871 | 0.083871 | 0.083871 | 0.083871 | 0 | 0.013389 | 0.285714 | 1,673 | 64 | 90 | 26.140625 | 0.764854 | 0 | 0 | 0.040816 | 0 | 0 | 0.104728 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.081633 | 0 | 0.387755 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35dde0fb942ef7cd76dbe2d4dca170ff7b7392d3 | 3,332 | py | Python | ray/adaptdl_ray/adaptdl/utils.py | odp/adaptdl | 8a0ad47da91ab4b8f5e13c819cb4701a2ebe8ca8 | [
"Apache-2.0"
] | null | null | null | ray/adaptdl_ray/adaptdl/utils.py | odp/adaptdl | 8a0ad47da91ab4b8f5e13c819cb4701a2ebe8ca8 | [
"Apache-2.0"
] | null | null | null | ray/adaptdl_ray/adaptdl/utils.py | odp/adaptdl | 8a0ad47da91ab4b8f5e13c819cb4701a2ebe8ca8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
from collections import Counter, defaultdict
from copy import deepcopy
from ray import tune
from ray.util.placement_group import get_current_placement_group
from adaptdl_ray.adaptdl import config
def pgf_to_allocation(pgf) -> List[str]:
""" Convert a Placement Groups Factory to AdaptDL allocation"""
bundles = pgf._bundles[1:]
allocs, node_keys, num_devices = [], [], []
for bundle in bundles:
node_keys += [k.split(":")[1] for k, v in bundle.items()
if k.startswith("node")]
num_devices += [int(v) for k, v in bundle.items()
if k == config.default_device()]
for node, count in zip(node_keys, num_devices):
allocs += [node] * count
return allocs
def allocation_to_pgf(alloc: List[str], resources_per_node=None):
""" Convert AdaptDL allocation to a Placement Group Factory"""
if not resources_per_node:
resources_per_node = {"CPU": 1.0}
if config.default_device() == "GPU":
resources_per_node["GPU"] = 1.0
def _construct_bundle(node, number_of_instances):
resources = deepcopy(resources_per_node)
resources["CPU"] *= number_of_instances
if "GPU" in resources:
resources["GPU"] *= number_of_instances
if "adaptdl_virtual" not in node:
resources[f"node:{node}"] = 0.01
return resources
assert len(alloc) > 0
resources = [{"CPU": 0.001}]
alloc = Counter(alloc)
for node, res in alloc.items():
resources.append(_construct_bundle(node, res))
return tune.PlacementGroupFactory(resources)
def pgf_to_num_replicas(pgf) -> int:
""" Extract the number of replicas of the trial from its PGF"""
return sum(int(bundle.get(config.default_device(), 0))
for bundle in pgf._bundles[1:])
def pgs_to_resources(pgs: List[Dict]) -> Dict:
""" Return node-level resource usage by all PGs in pgs."""
# Note that every bundle is tagged with the node resource
resources = defaultdict(Counter)
for pg in pgs:
for bundle in pg["bundle_cache"][1:]:
# Every bundle has a node resource
node_ip = [k.split(":")[1] for k in bundle.keys()
if k.startswith("node")][0]
for k, v in bundle.items():
resources[node_ip][k] += v
return resources
def unique_nodes_pg() -> int:
nodes = []
if get_current_placement_group() is None:
return 0
else:
for bundle in get_current_placement_group().bundle_specs:
for resource in bundle:
if "node" in resource:
nodes.append(resource)
return len(set(nodes))
| 36.217391 | 74 | 0.65066 | 456 | 3,332 | 4.627193 | 0.33114 | 0.028436 | 0.037915 | 0.034123 | 0.036967 | 0.028436 | 0.019905 | 0.019905 | 0 | 0 | 0 | 0.011187 | 0.2488 | 3,332 | 91 | 75 | 36.615385 | 0.831802 | 0.265006 | 0 | 0.034483 | 0 | 0 | 0.03029 | 0 | 0 | 0 | 0 | 0 | 0.017241 | 1 | 0.103448 | false | 0 | 0.103448 | 0 | 0.327586 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35de4623800e4b3109de2e8f8dff0b5116b7f2a9 | 1,456 | py | Python | Part2/reply_post.py | johnchower/RedditBot | 951585c462c50ca176f2a55b6f98983e4237875f | [
"MIT"
] | null | null | null | Part2/reply_post.py | johnchower/RedditBot | 951585c462c50ca176f2a55b6f98983e4237875f | [
"MIT"
] | null | null | null | Part2/reply_post.py | johnchower/RedditBot | 951585c462c50ca176f2a55b6f98983e4237875f | [
"MIT"
] | null | null | null | #!/usr/bin/python
import praw
import pdb
import re
import os
# Create the Reddit instance
reddit = praw.Reddit('tutorial_bot')
# and login
#reddit.login(REDDIT_USERNAME, REDDIT_PASS)
# Have we run this code before? If not, create an empty list
if not os.path.isfile("posts_replied_to.txt"):
posts_replied_to = []
# If we have run the code before, load the list of posts we have replied to
else:
# Read the file into a list and remove any empty values
with open("posts_replied_to.txt", "r") as f:
posts_replied_to = f.read()
posts_replied_to = posts_replied_to.split("\n")
posts_replied_to = list(filter(None, posts_replied_to))
# Get the top 5 values from our subreddit
subreddit = reddit.subreddit('pythonforengineers')
for submission in subreddit.hot(limit=10):
#print(submission.title)
# If we haven't replied to this post before
if submission.id not in posts_replied_to:
# Do a case insensitive search
if re.search("i love python", submission.title, re.IGNORECASE):
# Reply to the post
submission.reply("I am a bot. I am also: not a bot.")
print("Bot replying to : ", submission.title)
# Store the current id into our list
posts_replied_to.append(submission.id)
# Write our updated list back to the file
with open("posts_replied_to.txt", "w") as f:
for post_id in posts_replied_to:
f.write(post_id + "\n")
| 30.978723 | 75 | 0.68544 | 229 | 1,456 | 4.231441 | 0.39738 | 0.130031 | 0.173375 | 0.052632 | 0.0516 | 0.0516 | 0 | 0 | 0 | 0 | 0 | 0.002655 | 0.223901 | 1,456 | 46 | 76 | 31.652174 | 0.854867 | 0.349588 | 0 | 0 | 0 | 0 | 0.17149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35e25fe73f03f9c588ddbaac800e3a461d03b179 | 26,053 | py | Python | drl_neural_ros/src/CNN.py | natanaelmgomes/drl_ros | 929ae0c99a0ce11f535d2570db0138dd18760065 | [
"MIT"
] | 2 | 2021-03-05T22:14:03.000Z | 2021-11-11T11:19:05.000Z | drl_neural_ros/src/CNN.py | natanaelmgomes/drl_ros | 929ae0c99a0ce11f535d2570db0138dd18760065 | [
"MIT"
] | 1 | 2021-11-11T11:18:43.000Z | 2021-11-12T08:56:33.000Z | drl_neural_ros/src/CNN.py | natanaelmgomes/drl_ros | 929ae0c99a0ce11f535d2570db0138dd18760065 | [
"MIT"
] | 1 | 2021-03-24T20:29:40.000Z | 2021-03-24T20:29:40.000Z | #!/usr/bin/env python3.6
# Python
from collections import OrderedDict
import os
import random
import math
import numpy as np
import argparse
import time
import matplotlib.pyplot as plt
import cv2
import shlex, subprocess
import yaml
# Pytorch
import PIL.Image as Image
#from scipy import ndimage
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
# ROS
import rospy
from integrator.camera import Camera
from integrator.Kinematics import URKinematics
from integrator.supervisor_user import SupervisorUser
from integrator.gripper_user import GripperUser
from integrator.srv import WatchdogService
# this folder
from model import reinforcement_module
from memory import ReplayMemory
from manager import Manager
# other
from skimage.transform import ProjectiveTransform
def check_memory():
t = torch.cuda.get_device_properties(0).total_memory
c = torch.cuda.memory_reserved(0)
a = torch.cuda.memory_allocated(0)
f = c - a # free inside cache
print('total: ' + str(t / 1024 / 1024 / 1024))
print('reservado: ' + str(c / 1024 / 1024 / 1024))
print('alocado: ' + str(a / 1024 / 1024 / 1024))
print('livre: ' + str(f / 1024 / 1024 / 1024))
def ImagetoTensor(img):
"""convert a numpy array of shape HWC to CHW tensor"""
img = img.transpose((2, 0, 1)).astype(np.float32)
tensor = torch.from_numpy(img).float()
return tensor/255.0
def choose_action(args, q_values):
"""
The input to the CNN is 2 x 3 x 214 x 214
The output is 112 by 112
ponto a # 0.16, -0.22
ponto b # 0.16, 0.22
ponto c # 0.44, -0.22
ponto d # 0.44, 0.22
The action is epsilon-greedy with decay
"""
sample = random.random()
eps_threshold = args['eps_end'] + (args['eps_start'] - args['eps_end']) * math.exp(-1. * args['epoch'] / args['eps_decay'])
q_values = q_values.cpu().detach().numpy().squeeze()
h, w = q_values.shape
if sample > eps_threshold or args['testing']:
u, v = np.unravel_index(q_values.argmax(), q_values.shape)
rospy.loginfo('Fair attempt')
fair = True
else:
u = int(random.uniform(0, h))
v = int(random.uniform(0, w))
rospy.loginfo('Random attempt')
fair = False
from skimage.transform import ProjectiveTransform
t = ProjectiveTransform()
src = np.asarray([[0, 0], [0, w], [h, 0], [h, w]])
with open('/home/ubuntu/ur_ws/src/integrator/config/coordinates.yaml') as file:
data = yaml.load(file, Loader=yaml.FullLoader)
a = data['coord_a'][0]
b = data['coord_a'][1]
c = data['coord_d'][0]
d = data['coord_d'][1]
dst = np.asarray([[a, b], [a, d], [c, b], [c, d]])
if not t.estimate(src, dst): raise Exception("estimate failed")
a = t((u, v)).squeeze()
x = a[0]
y = a[1]
return x, y, u, v, fair
def grasp(args, x, y, manager):
"""
given a coordenate (x, y) attempt to grasp and gives a bool as result
"""
# if args['epoch'] % 10 == 0: manager.robot.go_to_and_wait('stop')
# time.sleep(0.1)
try:
manager.robot.movel([0.21, 0, 0.10])
if not args['simulation']: time.sleep(0.1)
if x < 0.21: manager.robot.movel([x, y, 0.10], speed=0.1)
else: manager.robot.movel([x, y, 0.10])
if not args['simulation']: time.sleep(0.1)
manager.gripper.open_and_wait()
if not args['simulation']: time.sleep(0.1)
z = manager.camera.get_z((x, y))
# z = 0.02
manager.robot.movel([x, y, z], speed=0.1)
if not args['simulation']: time.sleep(0.1)
rst = manager.gripper.close()
manager.robot.movel([x, y, 0.10])
if not args['simulation']: time.sleep(0.1)
if rst:
# manager.robot.go_to_and_wait('drop')
x = random.uniform(manager.min_x, manager.max_x)
y = random.uniform(manager.min_y, manager.max_y)
manager.robot.movel([x,y,0.10])
if not args['simulation']: time.sleep(0.1)
manager.robot.movel([x, y, z + 0.005], speed=0.1)
manager.gripper.open()
if not args['simulation']: time.sleep(1)
manager.robot.movel([0.21, 0, 0.10])
manager.gripper.open()
except Exception as e:
rospy.logerr(e)
rospy.logerr((x,y))
manager.supervisor.service('reset')
call_watchdog()
time.sleep(5)
manager.robot.go_to_and_wait('stop')
return False
# if args['epoch'] % 10 == 0: manager.robot.go_to_and_wait('stop')
return rst
def show_and_save(rgb_raw_, q_values, camera):
plt.figure()
plt.imshow(rgb_raw_)
# cv2.imshow("Image", cv2.cvtColor(rgb_raw_, cv2.COLOR_BGR2RGB))
delta = int(camera.delta / 1.8)
q_values = q_values.cpu().detach().numpy().squeeze()
tmp = cv2.copyMakeBorder(q_values, delta, delta, delta, delta, cv2.BORDER_CONSTANT, None, np.NaN)
prob_plot = cv2.resize(tmp, (rgb_raw_.shape[0], rgb_raw_.shape[1]))
plt.imshow(prob_plot, alpha=0.3)
plt.axis('off')
plt.colorbar()
timestr = time.strftime("%Y%m%d-%H%M%S")
os.chdir('/home/ubuntu')
# cv2.imwrite('Pictures/' + timestr + ' heatmap.png', cv2.cvtColor(self.img_croped, cv2.COLOR_BGR2RGB))
plt.savefig('Pictures/' + timestr + ' heatmap.png', facecolor='w', dpi=300)
# plt.show()
def call_watchdog():
rospy.wait_for_service('watchdog_service')
try:
watchdog_service = rospy.ServiceProxy('watchdog_service', WatchdogService)
except rospy.ServiceException as e:
print("Service call failed: %s" % e)
rst = watchdog_service(True)
watchdog_service.close()
return rst
def generate(args, manager):
rospy.loginfo('Preparing to generate %s data points for training' % args['epoch_num'])
# prepare simulation env
rospy.loginfo('Init simulation')
manager.supervisor.service('r') # release all objects
# manager.supervisor.service('reset')
time.sleep(3)
manager.robot.go_to_and_wait('stop')
# manager.gripper.close()
# manager.gripper.open()
# manager.gripper.open_and_wait()
manager.supervisor.service('clean')
rospy.loginfo('Simulation set')
args['directory'] = '/home/ubuntu/Documents/data-generated'
manager.memory = ReplayMemory(args)
n = 0
for i in range(args['epoch_num']):
if i%500 == 0: n += 1
rospy.loginfo(' --- --- Iteration %s --- ---' % args['epoch'])
args['epoch'] += 1
rospy.loginfo('Randomizing items')
manager.supervisor.service('prepare' + str(n))
rospy.loginfo('Items set')
rospy.loginfo('Acquire images')
rgb, dep, rgb_raw, dep_raw = manager.get_images() # observation
q_values = torch.tensor(manager.estimar_valores_q()).unsqueeze(0).unsqueeze(0)
img_gen = manager.draw_from_q_values(rgb_raw, q_values.cpu().detach().numpy().squeeze())
# save all data generated
kwargs = {'success': True,
'simulated': True,
'generated': True}
manager.memory.add(rgb, dep, q_values, [rgb_raw, dep_raw, img_gen], kwargs)
rospy.loginfo('Data saved')
def train_with_generated(manager, model, writer=None):
args['directory'] = '/home/ubuntu/Documents/data-generated'
# args['batch_size'] = 20
manager.memory = ReplayMemory(args)
data_loader = DataLoader(manager.memory,
batch_size=args['batch_size'],
shuffle=True)
rospy.loginfo(
'Preparing to trains for {0} epochs with batch size: {1}'.format(len(data_loader), args['batch_size']))
# forward_time = []
# backward_time = []
for k, batch in enumerate(data_loader):
rospy.loginfo('------ Epoch {0} / {1} ------'.format(k, len(data_loader)))
# unpack data
rgb, dep, q_values, kwargs = batch
# = imgs
rgb = rgb.squeeze()
dep = dep.squeeze()
q_values = q_values.squeeze().unsqueeze(1)
# forward
start_time = time.time()
q_values_pred = model(rgb, dep)
seconds = time.time() - start_time
# forward_time.append(seconds)
writer.add_scalar('Train/Forward', seconds, k)
# rospy.loginfo("---- forward ---- %s seconds ----" % seconds)
# backward
start_time = time.time()
loss = model.criterion(q_values, q_values_pred)
writer.add_scalar('Train/Loss', loss.item(), k)
rospy.loginfo('LOSS: ' + str(loss.item()))
# perdas.append(loss.item())
model.optimizer.zero_grad()
loss.backward()
model.optimizer.step()
seconds = time.time() - start_time
# backward_time.append(seconds)
writer.add_scalar('Train/Backward', seconds, k)
return model
def train_with_all_data(manager, model):
args['directory'] = '/home/ubuntu/Documents/data'
# args['batch_size'] = 10
manager.memory = ReplayMemory(args)
data_loader = DataLoader(manager.memory,
batch_size=args['batch_size'],
shuffle=True)
rospy.loginfo(
'Preparing to trains for {0} epochs with batch size: {1}'.format(len(data_loader), args['batch_size']))
forward_time = []
backward_time = []
for k, batch in enumerate(data_loader):
rospy.loginfo('------ Epoch {0} / {1} ------'.format(k, len(data_loader)))
# unpack data
imgs, q_values = batch
rgb, dep = imgs
rgb = rgb.squeeze()
dep = dep.squeeze()
q_values = q_values.squeeze().unsqueeze(1)
# forward
start_time = time.time()
q_values_pred = model(rgb, dep)
seconds = time.time() - start_time
forward_time.append(seconds)
rospy.loginfo("---- forward ---- %s seconds ----" % seconds)
# backward
start_time = time.time()
loss = model.criterion(q_values, q_values_pred)
rospy.loginfo('LOSS: ' + str(loss.item()))
# perdas.append(loss.item())
model.optimizer.zero_grad()
loss.backward()
model.optimizer.step()
seconds = time.time() - start_time
backward_time.append(seconds)
rospy.loginfo("---- backward ---- %s seconds ----" % seconds)
np_forward = np.array(forward_time)
np_backward = np.array(backward_time)
rospy.loginfo(
"-- forward -- mean -- {:2.3f} seconds -- +- {:2.3f} --".format(np_forward.mean(), np_forward.std()))
rospy.loginfo(
"-- backward -- mean -- {:2.3f} seconds -- +- {:2.3f} --".format(np_backward.mean(), np_backward.std()))
return model
def train(args, model, manager, writer):
save = False
# Start training only if certain number of samples is already saved
if len(manager.memory) < args['min_replay_memory']:
return model
rospy.loginfo('Training.')
data_loader = DataLoader(manager.memory,
batch_size=args['batch_size'],
shuffle=True)
for k, batch in enumerate(data_loader):
# rospy.loginfo('------ Epoch {0} / {1} ------'.format(k, len(data_loader)))
# unpack data
# pose, rgb, dep, new_pose, new_rgb, new_dep, q_values, kwargs = batch
rgb, dep, q_values, kwargs = batch
rgb = rgb.squeeze()
dep = dep.squeeze()
# q_values = q_values.squeeze() #.unsqueeze(1)
# new_q_values = new_q_values.squeeze().unsqueeze(1)
# Get current states from minibatch, then query NN model for Q values
# current_states = [rgb, dep, pose]
# with torch.no_grad():
# current_qs_list = model(*current_states)
# forward
start_time = time.time()
q_values_pred = model(rgb, dep)
seconds = time.time() - start_time
writer.add_scalar('Train/Forward', seconds, args['epoch'])
q_values = q_values_pred.clone().detach().to(args['device'])
# update the q values
for k in range(len(kwargs)):
u = kwargs['attempt(u,v)'][0][k]
v = kwargs['attempt(u,v)'][1][k]
rst = kwargs['success'][k]
if rst:
reward = args['grasp_reward']
else:
reward = 0
# update q values
q_values[k, :, :, :] = update_q_values(args, q_values[k, :, :, :].detach(), u, v, rst)
# backward
start_time = time.time()
loss = model.criterion(q_values, q_values_pred)
rospy.loginfo('LOSS: ' + str(loss.item()))
model.optimizer.zero_grad()
loss.backward()
model.optimizer.step()
seconds = time.time() - start_time
writer.add_scalar('Train/Loss', loss.item(), args['epoch'])
writer.add_scalar('Train/Backward', seconds, args['epoch'])
break
if save:
# save the model
rospy.loginfo('Saving the model')
new_dir = os.path.join('/home/ubuntu/Documents/kin-models', time.strftime("%Y%m%d-%H%M%S"))
os.mkdir(new_dir)
file = os.path.join(new_dir, 'model.pt')
torch.save(model, file)
return model
def test_model(args, manager, model):
# return 0
rospy.loginfo(' --- --- Evaluating model --- ---')
rst = []
previous_arg_testing = args['testing']
args['testing'] = True
for i in range(10):
call_watchdog()
# rospy.loginfo('Randomizing items')
if args['simulation']:
manager.supervisor.service('prepare' + str(random.randint(1, 4)))
# rospy.loginfo('Items set')
rgb, dep, rgb_raw, dep_raw = manager.get_images() # observation
q_values_pred = model(rgb, dep)
q_values = q_values_pred.clone().detach()
x, y, u, v, fair = choose_action(args, q_values_pred)
g = grasp(args, x, y, manager)
rst.append(g)
if rst[i]:
rospy.loginfo('Grasp success')
reward = args['grasp_reward']
else:
rospy.loginfo('Grasp fail')
reward = 0
if not args['simulation']: time.sleep(0.2)
# # save all data generated
# update q values
q_values[0,:,:,:] = update_q_values(args, q_values[0,:,:,:].detach(), u, v, rst)
img_pred = manager.draw_from_q_values(rgb_raw, q_values_pred.cpu().detach().numpy().squeeze(), attempt=(v,u))
# img_res = manager.draw_from_q_values(rgb_raw, q_values.cpu().detach().numpy().squeeze())
kwargs = {'success': g,
'simulated': False,
'generated': False,
'attempt(u,v)': (int(u), int(v)),
'attempt(x,y)': (float(x), float(y)),
'model': model.model_name,
'fair attempt': fair}
manager.memory.add(rgb, dep, q_values_pred, [rgb_raw, dep_raw, img_pred], kwargs)
args['testing'] = previous_arg_testing
return np.mean(rst)
def update_q_values(args, q_values, u, v, rst):
"""
Update the Q-values with coordinates of the attempt and if it was success of fail
"""
for i in range(q_values.size()[1]):
for j in range(q_values.size()[2]):
distance = np.sqrt((u - i) ** 2 + (v - j) ** 2)
if distance < 20:
value = args['rl_lr'] * (1 / (distance + args['grasp_reward']))
# value = (-distance/20.0) + reward
# print(q_values[0, 0, i, j])
if rst:
q_values[0, i, j] += value
else:
q_values[0, i, j] -= value
# print(q_values[0, 0, i, j])
# torch.clamp
# q_values[k,:,:,:].clamp(min = 0.0, max = 1.0)
if q_values[0, i, j] > 1.0: q_values[0, i, j] = 1.0
if q_values[0, i, j] < 0.0: q_values[0, i, j] = 0.0
return q_values
def main(args):
rospy.init_node('Neural', anonymous=False)
if args['simulation']:
# forward_time = []
# backward_time = []
model_names = ['mnasnet',
'resnext',
'mobilenet',
'densenet']
for model_name in model_names:
if model_name == 'mnasnet' or 'mobilenet': args['device'] = torch.device('cuda')
else: args['device'] = torch.device('cpu')
manager = Manager(args)
rospy.loginfo('Model: ' + model_name)
if args['device'] == torch.device('cuda'):
rospy.loginfo('Running on CUDA')
else:
rospy.loginfo('Running on CPU')
model = reinforcement_module(args, model_name)
writer = SummaryWriter('/home/ubuntu/Documents/Tensorboard5/' + model_name)
args['epoch'] = 1
writer.add_scalar('Test/Acc', test_model(args, manager, model), 0)
for i in range(args['epoch_num']):
manager.robot.go_to_and_wait('stop')
call_watchdog()
rospy.loginfo(' --- --- Epoch %s --- ---' % args['epoch'])
eps_threshold = args['eps_end'] + (args['eps_start'] - args['eps_end']) * math.exp(
-1. * args['epoch'] / args['eps_decay'])
writer.add_scalar('Train/Epsilon', eps_threshold, args['epoch'])
# rospy.loginfo('Randomizing items')
manager.supervisor.service('prepare'+str(random.randint(1, 4)))
# rospy.loginfo('Items set')
rgb, dep, rgb_raw, dep_raw = manager.get_images() # observation
# rospy.loginfo('Images acquired')
with torch.no_grad():
q_values_pred = model(rgb, dep)
q_values = q_values_pred.clone().detach()
# rospy.loginfo('')
# seconds = time.time() - start_time
# forward_time.append(seconds)
# rospy.loginfo("---- forward ---- %s seconds ----" % seconds)
# writer.add_scalar('Train/Forward', seconds, args['epoch'])
q_values = torch.tensor(manager.estimar_valores_q()).unsqueeze(0).unsqueeze(0)
x, y, u, v, fair = choose_action(args, q_values)
# rst = False
# rospy.loginfo('Test the grasp')
# if x > 0.2: continue
rst = grasp(args, x, y, manager)
img_pred = manager.draw_from_q_values(rgb_raw, q_values_pred.cpu().detach().numpy().squeeze(), attempt=(v,u))
img_res = manager.draw_from_q_values(rgb_raw, q_values.cpu().detach().numpy().squeeze())
writer.add_image('Predicted', ImagetoTensor(img_pred), args['epoch'])
writer.add_image('Result', ImagetoTensor(img_res), args['epoch'])
args['epoch'] += 1
if rospy.is_shutdown(): return
rospy.loginfo('Saving the model')
new_dir = os.path.join('/home/ubuntu/Documents/models', time.strftime("%Y%m%d-%H%M%S") + ' ' + model_name)
os.mkdir(new_dir)
file = os.path.join(new_dir, 'model.pt')
torch.save(model, file)
else:
# real robot
base_folder = '/home/ubuntu/Documents/models'
models = [
'20210108-140316 resnext',
'20210108-141440 densenet',
'20210108-135306 mnasnet',
'20210108-140616 mobilenet']
for model_name in models:
if 'mnasnet' in model_name or 'mobilenet' in model_name: args['device'] = torch.device('cuda')
else: args['device'] = torch.device('cpu')
manager = Manager(args)
if args['device'] == torch.device('cuda'):
rospy.loginfo('Running on CUDA')
else:
rospy.loginfo('Running on CPU')
rospy.loginfo('Model: %s' % model_name)
manager.robot.go_to_and_wait('stop')
file = os.path.join(base_folder, model_name)
file = os.path.join(file, 'model.pt')
if args['device'] == torch.device('cuda'):
rospy.loginfo('Running on CUDA')
model = torch.load(file, map_location="cuda:0")
else:
rospy.loginfo('Running on CPU')
model = torch.load(file)
writer = SummaryWriter('/home/ubuntu/Documents/Tensorboard7/' + model_name)
results = []
args['epoch'] = 1
while args['epoch'] < args['epoch_num']:
rospy.loginfo(' --- --- Epoch %s --- ---' % args['epoch'])
# eps_threshold = args['eps_end'] + (args['eps_start'] - args['eps_end']) * math.exp(
# -1. * args['epoch'] / args['eps_decay'])
# writer.add_scalar('Train/Epsilon', eps_threshold, args['epoch'])
rgb, dep, rgb_raw, dep_raw = manager.get_images() # observation
# rospy.loginfo('Images acquired')
q_values_pred = model(rgb, dep)
q_values = q_values_pred.clone().detach().to(args['device'])
x, y, u, v, fair = choose_action(args, q_values_pred)
rst = grasp(args, x, y, manager)
if rst:
rospy.loginfo('Grasp success')
# reward = args['grasp_reward']
else:
rospy.loginfo('Grasp fail')
# reward = 0
#update q values
q_values[0,:,:,:] = update_q_values(args, q_values[0,:,:,:].detach(), u, v, rst)
results.append(rst)
if len(results) == 10:
writer.add_scalar('Test/Acc_10', np.mean(results), args['epoch'])
results = []
writer.add_scalar('Test/Acc_1', int(rst), args['epoch'])
loss = model.criterion(q_values, q_values_pred)
# print(loss.device)
rospy.loginfo('LOSS: ' + str(loss.item()))
model.optimizer.zero_grad()
loss.backward()
model.optimizer.step()
writer.add_scalar('Train/Loss', loss.item(), args['epoch'])
img_pred = manager.draw_from_q_values(rgb_raw, q_values_pred.cpu().detach().numpy().squeeze(), attempt=(v,u))
img_after = manager.draw_from_q_values(rgb_raw, q_values.cpu().detach().numpy().squeeze())
writer.add_image('Predicted', ImagetoTensor(img_pred), args['epoch'])
writer.add_image('Result', ImagetoTensor(img_after), args['epoch'])
cv2.imshow("Predicted", cv2.cvtColor(img_pred, cv2.COLOR_BGR2RGB))
cv2.imshow("Result", cv2.cvtColor(img_after, cv2.COLOR_BGR2RGB))
cv2.imshow("Depth", cv2.cvtColor(manager.camera.dep, cv2.COLOR_BGR2RGB))
cv2.drawMarker(manager.camera.img, manager.camera.ponto_a, (0, 255, 0))
cv2.drawMarker(manager.camera.img, manager.camera.ponto_b, (0, 255, 0))
cv2.drawMarker(manager.camera.img, manager.camera.ponto_c, (0, 255, 0))
cv2.drawMarker(manager.camera.img, manager.camera.ponto_d, (0, 255, 0))
cv2.imshow("Image Full", cv2.cvtColor(manager.camera.img, cv2.COLOR_BGR2RGB))
if cv2.waitKey(1) & 0xFF == ord('q'):
return
# save all data generated
kwargs = {'success': rst,
'simulated': False,
'generated': False,
'attempt(u,v)': (int(u), int(v)),
'attempt(x,y)': (float(x), float(y)),
'fair attempt': fair,
'model': model_name}
manager.memory.add(rgb, dep, q_values, [rgb_raw, dep_raw, img_pred, img_after], kwargs)
args['epoch'] += 1
if rospy.is_shutdown(): return
# save the model
new_dir = os.path.join('/home/ubuntu/Documents/models', time.strftime("%Y%m%d-%H%M%S") + ' ' + model_name)
os.mkdir(new_dir)
file = os.path.join(new_dir, 'model.pt')
torch.save(model, file)
writer.close()
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(description='Deep reinforcement learning in PyTorch.')
parser.add_argument('--real', dest='is_sim', action='store_false', default=True, help='Real or simulated, default is simulated.')
parser.add_argument('--gpu', dest='is_cuda', action='store_true', default=False, help='GPU mode, default is CPU.')
parser.add_argument('--test', dest='is_test', action='store_true', default=False, help='Testing only.')
parser.add_argument('--train', dest='is_train', action='store_true', default=False, help='Training only')
args_parser = parser.parse_args()
# hyperparameters
args = {
'epoch_num': 100, # Número de épocas.
'epoch': 0, # Número de épocas.
'lr': 1e-3, # Taxa de aprendizado.
'rl_lr': 0.7, # Taxa de aprendizado.
'weight_decay': 8e-5, # Penalidade L2 (Regularização).
'batch_size': 10, # Tamanho do batch.
'gamma' : 0.99,
'eps_start' : 0.9, # initial randomness
'eps_end' : 0.05, # final randomness
'eps_decay' : 100, # exponential decay
'target_update' : 10,
'grasp_reward': 1,
'proportional_reward': 0.25,
'min_replay_memory': 20
}
# convert to dictionary
args['simulation'] = args_parser.is_sim
args['device'] = torch.device('cuda') if args_parser.is_cuda else torch.device('cpu')
args['testing'] = args_parser.is_test
args['training'] = args_parser.is_train
args['kinematic'] = False
main(args) | 37.648844 | 133 | 0.570184 | 3,261 | 26,053 | 4.413677 | 0.139221 | 0.046203 | 0.015285 | 0.015563 | 0.547627 | 0.503717 | 0.466129 | 0.434169 | 0.400195 | 0.385187 | 0 | 0.023285 | 0.284574 | 26,053 | 692 | 134 | 37.648844 | 0.748914 | 0.127125 | 0 | 0.427039 | 0 | 0 | 0.13258 | 0.015514 | 0 | 0 | 0.000177 | 0 | 0 | 1 | 0.027897 | false | 0 | 0.066524 | 0 | 0.120172 | 0.01073 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35e29825a2c22d91ae68ace8eae1daecbd810824 | 653 | py | Python | array/0731_my_calendar_2.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
] | null | null | null | array/0731_my_calendar_2.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
] | null | null | null | array/0731_my_calendar_2.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
] | null | null | null | class MyCalendarTwo:
def __init__(self):
self.calendar = []
self.overlaps = []
def book(self, start: int, end: int) -> bool:
for event in self.overlaps:
if event[0] < end and start < event[1]:
return False
for event in self.calendar:
if event[0] < end and start < event[1]:
self.overlaps.append((max(start, event[0]), min(end, event[1])))
self.calendar.append((start, end))
return True
# Your MyCalendarTwo object will be instantiated and called as such:
# obj = MyCalendarTwo()
# param_1 = obj.book(start,end) | 28.391304 | 80 | 0.558959 | 81 | 653 | 4.444444 | 0.432099 | 0.1 | 0.055556 | 0.077778 | 0.138889 | 0.138889 | 0.138889 | 0.138889 | 0 | 0 | 0 | 0.015909 | 0.326187 | 653 | 23 | 81 | 28.391304 | 0.802273 | 0.180704 | 0 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35e2d510a9ac1b153c443d0037b20f2845299a47 | 6,640 | py | Python | ArrangeWindows.glyphsPlugin/Contents/Resources/plugin.py | Mark2Mark/ArrangeWindows | 4848bff76afc68d433935acb9b0095c6435ef39f | [
"Apache-2.0"
] | null | null | null | ArrangeWindows.glyphsPlugin/Contents/Resources/plugin.py | Mark2Mark/ArrangeWindows | 4848bff76afc68d433935acb9b0095c6435ef39f | [
"Apache-2.0"
] | 1 | 2018-05-02T08:39:36.000Z | 2018-05-02T08:39:36.000Z | ArrangeWindows.glyphsPlugin/Contents/Resources/plugin.py | Mark2Mark/ArrangeWindows | 4848bff76afc68d433935acb9b0095c6435ef39f | [
"Apache-2.0"
] | 1 | 2017-12-30T21:08:30.000Z | 2017-12-30T21:08:30.000Z | # encoding: utf-8
from __future__ import division, print_function, unicode_literals
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - Run with Option Key to include the MacroPanel.
# - Run with Shift Key to Arrange 2 Fonts to 2 Screens.
#
# --> let me know if you have ideas for improving
# --> Mark Froemberg aka Mark2Mark @ GitHub
# --> www.markfromberg.com
#
# ToDo:
# - Tiles for 3 or 4 fonts
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from GlyphsApp import *
from GlyphsApp.plugins import *
from AppKit import NSScreen, NSAnimationEaseIn, NSViewAnimationEndFrameKey
import traceback
# class MFWindow(NSWindow):
# def init(self):
# return self
# def animationResizeTime_(self, rect):
# return 0.2
screens = NSScreen.screens()
screenCount = len(screens)
specialWindowName = "Skedge"
class ArrangeWindows(GeneralPlugin):
@objc.python_method
def settings(self):
self.name = Glyphs.localize({
'en': 'Arrange Windows',
'de': 'Fenster anordnen',
'fr': 'Organiser les fenêtres',
'es': 'Organizar ventanas',
})
self.nameAlt = Glyphs.localize({
'en': 'Arrange Windows & Macro Panel',
'de': 'Fenster & Macro Panel anordnen',
'fr': 'Organiser les fenêtres et le panneau des macros',
'es': 'Organizar ventanas y el panel de macros',
})
self.nameAltScreens = Glyphs.localize({
'en': 'Arrange Windows Across Screens',
'de': 'Verteile Fenster auf Monitore',
'fr': 'Organiser les fenêtres à travers les écrans',
'es': 'Organizar ventanas en pantallas',
})
@objc.python_method
def start(self):
try:
# new API in Glyphs 2.3.1-910
targetMenu = WINDOW_MENU # EDIT_MENU # SCRIPT_MENU
## Without the separator, it overwrites the `Kerning` menu entry, if put in WINDOW_MENU
separator = NSMenuItem.separatorItem()
Glyphs.menu[targetMenu].append(separator)
newMenuItem = NSMenuItem(self.name, self.doArrangeWindows_)
# Alt 1
newMenuItemAlt = NSMenuItem(self.nameAlt, self.doArrangeWindows_)
newMenuItemAlt.setKeyEquivalentModifierMask_(NSAlternateKeyMask)
newMenuItemAlt.setAlternate_(True) # A Boolean value that marks the menu item as an alternate to the previous menu item.
# Alt 2
if screenCount == 2:
newMenuItemAltScreens = NSMenuItem(self.nameAltScreens, self.doArrangeWindowsOnScreens_)
newMenuItemAltScreens.setKeyEquivalentModifierMask_(NSShiftKeyMask)
newMenuItemAltScreens.setAlternate_(True) # A Boolean value that marks the menu item as an alternate to the previous menu item.
Glyphs.menu[targetMenu].append(newMenuItem)
Glyphs.menu[targetMenu].append(newMenuItemAlt)
if screenCount == 2:
Glyphs.menu[targetMenu].append(newMenuItemAltScreens)
except:
print(traceback.format_exc())
# mainMenu = Glyphs.mainMenu()
# s = objc.selector(self.doArrangeWindows,signature='v@:@')
# newMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(self.name, s, "")
# newMenuItem.setTarget_(self)
# mainMenu.itemWithTag_(5).submenu().addItem_(newMenuItem)
@objc.python_method
def distribute(self, allWindows, screenWidth, screenHeight):
amount = len(allWindows)
for i, window in enumerate(allWindows):
# Optional: deminiaturize:
# if window.isMiniaturized():
# window.deminiaturize_(True)
share = screenWidth / amount-1
point = screenWidth / amount*(i)
newRect = ((point, 0), (share, screenHeight))
# window = MFWindow.alloc().init() ## Subclass, dont do that!
#window.animationResizeTime_( newRect )
window.setFrame_display_animate_(newRect, True, True) #window.setFrameOrigin_((point, 0))
# window.animator().setAlphaValue_(0.0)
def doArrangeWindows_(self, sender):
screenHeight = NSScreen.mainScreen().frame().size.height
screenWidth = NSScreen.mainScreen().frame().size.width
optionKeyFlag = 524288
optionKeyPressed = NSEvent.modifierFlags() & optionKeyFlag == optionKeyFlag
includeMacroPanel = False
if optionKeyPressed:
includeMacroPanel = True
if includeMacroPanel:
#allWindows = [x for x in Glyphs.windows() if x.class__().__name__ == "GSWindow" and x.document() or x.class__().__name__ == "GSMacroWindow"] # A: Without special window
allWindows = [x for x in Glyphs.windows() if x.class__().__name__ == "GSWindow" and x.document() or x.class__().__name__ == "GSMacroWindow" or specialWindowName in x.title() ] # B: With special window
Glyphs.showMacroWindow()
else:
#allWindows = [x for x in Glyphs.windows() if x.class__().__name__ == "GSWindow" and x.document()] # A: Without special window
allWindows = [x for x in Glyphs.windows() if x.class__().__name__ == "GSWindow" and x.document() or specialWindowName in x.title() ] # B: With special window
macroWindow = [x for x in Glyphs.windows() if x.class__().__name__ == "GSMacroWindow"][0]
macroWindow.close()
self.distribute(allWindows, screenWidth, screenHeight)
### just for debugging:
# for x in Glyphs.windows():
# className = x.class__().__name__
# if className == "GSWindow":
# print x.document()
# help(x)
#######################
def doArrangeWindowsOnScreens_(self, sender):
allWindows = [x for x in Glyphs.windows() if x.class__().__name__ == "GSWindow" and x.document()]
macroWindow = [x for x in Glyphs.windows() if x.class__().__name__ == "GSMacroWindow"][0]
if screenCount == len(allWindows) == 2: # only limited to exactly 2
macroWindow.close()
w1, w2 = allWindows[0], allWindows[1]
s1, s2 = screens[0].frame(), screens[1].frame()
s1Rect = ((s1.origin.x, s1.origin.x), (s1.size.width, s1.size.height))
w1.setFrame_display_animate_(s1Rect, True, True)
s2Rect = ((s2.origin.x, s2.origin.x), (s2.size.width, s2.size.height))
w2.setFrame_display_animate_(s2Rect, True, True)
else:
Message(
title = Glyphs.localize({
'en': "Wrong Number of Fonts",
'de': 'Falsche Anzahl Schriften',
'fr': 'Nombre des polices incorrecte',
'es': 'Numero de fuentes incorrecto',
}),
message = Glyphs.localize({
'en': "You need exactly two fonts to be open.",
'de': 'Es müssen genau zwei Schriftdateien geöffnet sein.',
'fr': 'Il faut que exactement deux fichiers .glyphs sont ouverts.',
'es': 'Exactamente dos archivos de fuentes deben estar abiertos.',
}),
OKButton = "OK",
)
@objc.python_method
def __file__(self):
"""Please leave this method unchanged"""
return __file__
| 36.685083 | 203 | 0.67003 | 752 | 6,640 | 5.767287 | 0.361702 | 0.013834 | 0.023057 | 0.022135 | 0.21697 | 0.178003 | 0.178003 | 0.178003 | 0.178003 | 0.157713 | 0 | 0.010061 | 0.161747 | 6,640 | 180 | 204 | 36.888889 | 0.769134 | 0.309036 | 0 | 0.165049 | 0 | 0 | 0.169962 | 0 | 0 | 0 | 0 | 0.005556 | 0 | 1 | 0.058252 | false | 0 | 0.048544 | 0 | 0.126214 | 0.019417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35e6244ec35b2b3a8cc358404a59e3b5cf013e08 | 15,810 | py | Python | RestPy/Modules/StatisticsMgmt.py | NickKeating/IxNetwork | 0a54c0b8d1a1664d2826ad20a826ef384c48432f | [
"MIT"
] | 46 | 2018-01-24T06:43:45.000Z | 2022-03-17T07:27:08.000Z | RestPy/Modules/StatisticsMgmt.py | NickKeating/IxNetwork | 0a54c0b8d1a1664d2826ad20a826ef384c48432f | [
"MIT"
] | 104 | 2018-03-16T18:16:29.000Z | 2022-03-17T07:16:43.000Z | RestPy/Modules/StatisticsMgmt.py | NickKeating/IxNetwork | 0a54c0b8d1a1664d2826ad20a826ef384c48432f | [
"MIT"
] | 58 | 2018-01-23T05:54:20.000Z | 2022-03-30T22:55:20.000Z | import re, time
class Statistics(object):
def __init__(self, ixNetObj):
self.ixNetObj = ixNetObj
def getStatView(self, caption):
"""
Get a statistics view.
:param caption: <str>: The statistics view caption name.
Example: Protocols Summary, Flow Statistics, etc.
Return
The statistics view object attributes.
"""
viewResults = []
counterStop = 60
for counter in range(1, counterStop+1):
print('\nWaiting for statview: {0}\n'.format(caption))
viewResults = self.ixNetObj.Statistics.View.find(Caption=caption)
if counter < counterStop and len(viewResults) == 0:
print('\n{0} is not ready yet. Wait {1}/{2} seconds\n'.format(caption, counter, counterStop))
time.sleep(1)
continue
if counter < counterStop and len(viewResults) != 0:
print('\n{0} is ready\n'.format(caption))
return viewResults
if counter == counterStop and len(viewResults) == 0 :
raise Exception('\nAPI server failed to provide stat views')
def getStatViewResults(self, statViewName=False, getColumnCaptions=False, getPageValues=False,
rowValuesLabel=None, getTotalPages=False, timeout=60):
"""
Wait for a statistic view to be ready with stats. Cannot assume the stats are ready.
For example, if startAllProtocols was executed, protocol summary stats may not be ready
provided by the API server.
This function takes in statViewName as a mandatory parameter.
Note:
Getting stats is always a two step process. You normally need to get the statview and then
get the stat page values. You must verify each seperately for readiness.
:param statViewName: <Mandatory>: The name of the stat view sucha as:
Protocols Summary, Port Statistics, Flow Statistics, Traffic Item Statistics, etc.
:param getColumnCaptions: <bool>: Optional: Returns the statViewName column caption names in a list.
:param getPageValues: <bool>: Optional: Returns the statViewName page values in a list.
:param rowValuesLabel: <str>: Optional: Return the stats for just the row's label name.
:param getTotalPages: <bool>: Optional: Return the total amount of pages for the statview.
Example 1:
# Wait for statViewName='Protocols Summary' to be ready and return the data.
results = self.getStatView(caption='Protocols Summary')
Example 2:
# Wait for each statViewName to be ready.
# Then get the column captions, which are the names of the stats
# and get the page values, which are the stat values for each caption.
columnCaptions = self.getStatViewResults(statViewName='Protocols Summary', getColumnCaptions=True)
pageValues = self.getStatViewResults(statViewName='Protocols Summary', getPageValues=True)
Example 3:
columnCaptions= statObj.getStatViewResults(statViewName='Traffic Item Statistics', getColumnCaptions=True)
trafficItemStats = statObj.getStatViewResults(statViewName='Traffic Item Statistics',
rowValuesLabel=trafficItemName)
txFramesIndex = columnCaptions.index('Tx Frames')
rxFramesIndex = columnCaptions.index('Rx Frames')
"""
# Verify for statViewName readiness first
self.getStatView(caption=statViewName)
viewResults = []
counterStop = timeout
for counter in range(1, counterStop+1):
if getColumnCaptions:
print('\nWaiting for {0} Data.ColumnCaptions\n'.format(statViewName))
viewResults = self.ixNetObj.Statistics.View.find(Caption=statViewName)[0].Data.ColumnCaptions
deeperView = 'Data.ColumnCaptions'
if getPageValues:
print('\nWaiting for {0} Data.PageValues\n'.format(statViewName))
viewResults = self.ixNetObj.Statistics.View.find(Caption=statViewName)[0].Data.PageValues
deeperView = 'Data.PageValues'
if getTotalPages:
print('\nWaiting for {0} Data.TotalPages\n'.format(statViewName))
return self.ixNetObj.Statistics.View.find(Caption=statViewName)[0].Data.TotalPages
if rowValuesLabel is not None:
print('\nWaiting for {0} Data.GetRowValues\n'.format(statViewName))
viewResults = self.ixNetObj.Statistics.View.find(Caption=statViewName)[0].GetRowValues(Arg2=rowValuesLabel)
deeperView = 'GetRowValues'
if counter < counterStop and len(viewResults) == 0:
print('\n{0} {1}: is not ready yet.\n\tWait {2}/{3} seconds\n'.format(statViewName, deeperView,
counter, counterStop))
time.sleep(1)
continue
if counter < counterStop and len(viewResults) != 0:
print('\n{0} {1}: is ready\n'.format(statViewName, deeperView))
return viewResults
if counter == counterStop and len(viewResults) == 0 :
raise Exception('\nAPI server failed to provide stat views for {0} {1}'.format(statViewName, deeperView))
def verifyAllProtocolSessions(self, timeout=90):
"""
Verify all configured protocols summary sessions for up.
"""
# Verify for Protocols Summary stats readiness
self.getStatView(caption='Protocols Summary')
columnCaptions = self.getStatViewResults(statViewName='Protocols Summary', getColumnCaptions=True, timeout=timeout)
counterStop = timeout
for counter in range(1, counterStop+1):
pageValues = self.getStatViewResults(statViewName='Protocols Summary', getPageValues=True, timeout=timeout)
print('\n%-16s %-14s %-16s %-23s' % \
(columnCaptions[0], columnCaptions[1], columnCaptions[2], columnCaptions[3]))
print('%s' % '-' * 70)
sessionDownFlag = 0
sessionNotStartedFlag = 0
sessionFailedFlag = 0
for pageValue in pageValues:
pageValue = pageValue[0]
protocol = pageValue[0]
sessionsUp = int(pageValue[1])
sessionsDown = int(pageValue[2])
sessionsNotStarted = int(pageValue[3])
print('%-16s %-14s %-16s %-23s' % (protocol, sessionsUp, sessionsDown, sessionsNotStarted))
if sessionsNotStarted != 0:
sessoinNotStartedFlag = 1
if counter < counterStop and sessionsDown != 0:
sessionDownFlag = 1
if counter == counterStop and sessionsDown != 0:
sessionFailedFlag = 1
if sessionNotStartedFlag == 1:
if counter < timeout:
sessionNotStartedFlag = 0
print('Protocol sessions are not started yet. Waiting {0}/{}1 seconds'.format(counter, timeout))
time.sleep(1)
continue
if counter == timeout:
raise Exception('Protocol session is not started')
if sessionDownFlag == 1:
print('\nWaiting {0}/{1} seconds'.format(counter, timeout))
time.sleep(1)
continue
if counter < counterStop and sessionDownFlag == 0:
print('\nProtocol sessions are all up')
break
if sessionFailedFlag == 1:
raise Exception('Protocol session failed to come up')
def getStatsByRowLabelName(self, statViewName=None, rowLabelName='all', timeout=90):
"""
This is an internal helper function for: getTrafficItemStats, getPortStatistics, getProtocolsSummary,
getGlobalProtocolStatistics, getDataPlanePortStatistics.
These stats are identified by a label name for each row shown in the GUI.
The label name is the first column value shown in the GUI.
:param statViewName: 'Port Statistics', 'Traffic Item Statistics', 'Protocols Summary', 'Port CPU Statistics'
'Global Protocol Statistics', 'Data Plane Statistics'
:param rowLabelName: <str|list|all>: If you look at the IxNetwork GUI for any of the statViewName listed above,
their rowLabelName is the first in the column stats.
If you're just getting one specific stat, pass in the rowLabelName.
If you want to get multiple stats, pass in a list of rowLabelName.
Defaults to return all the row of stats.
Return
A dict: stats
"""
columnNames = self.getStatViewResults(statViewName=statViewName, getColumnCaptions=True)
totalPages = self.getStatViewResults(statViewName=statViewName, getTotalPages=True)
stats = {}
if type(rowLabelName) == list or rowLabelName == 'all':
for pageNumber in range(1, totalPages+1):
self.ixNetObj.Statistics.View.find(Caption=statViewName)[0].Data.CurrentPage = pageNumber
statViewValues = self.getStatViewResults(statViewName=statViewName, getPageValues=True)
if type(rowLabelName) == list:
# Get the specified list of traffic item's stats
for eachViewStats in statViewValues:
currentRowLabelName = eachViewStats[0][0]
if currentRowLabelName in rowLabelName:
stats[currentRowLabelName] = {}
for columnName, statValue in zip(columnNames, eachViewStats[0]):
stats[currentRowLabelName][columnName] = statValue
else:
# Get all the traffic items
for eachViewStat in statViewValues:
currentRowLabelName = eachViewStat[0][0]
stats[currentRowLabelName] = {}
for columnName, statValue in zip(columnNames, eachViewStat[0]):
stats[currentRowLabelName][columnName] = statValue
else:
# Get just one traffic item stat
statViewValues = self.getStatViewResults(statViewName=statViewName, rowValuesLabel=rowLabelName, timeout=timeout)
if statViewValues == 'kVoid':
raise Exception('No such port name found. Verify for typo: {}'.format(rowLabelName))
stats[rowLabelName] = {}
for columnName, statValue in zip(columnNames, statViewValues):
stats[rowLabelName][columnName] = statValue
return stats
def getFlowStatistics(self, timeout=90):
"""
Get Flow Statistics and put each row in a list.
Return
A dict of Flow Statistics: flowStatistics[rowNumber][columnName] = value
"""
columnNames = self.getStatViewResults(statViewName='Flow Statistics', getColumnCaptions=True)
totalPages = self.getStatViewResults(statViewName='Flow Statistics', getTotalPages=True)
flowStatistics = {}
rowNumber = 1
for pageNumber in range(1, totalPages+1):
self.ixNetObj.Statistics.View.find(Caption='Flow Statistics')[0].Data.CurrentPage = pageNumber
pageValues = self.getStatViewResults(statViewName='Flow Statistics', getPageValues=True, timeout=timeout)
for eachRowValue in pageValues:
flowStatistics[rowNumber] = {}
for columnName, rowValue in zip(columnNames, eachRowValue[0]):
flowStatistics[rowNumber][columnName] = rowValue
rowNumber += 1
return flowStatistics
def getTrafficItemStats(self, trafficItemName='all', timeout=90):
"""
Get Traffic Item statistics.
:param trafficItemName: <str|list>: The Traffic Item name.
If you're just getting one traffic item stat, pass in a string name.
If you want to get multiple traffic item stats, pass in a list.
Defaults to return all Traffic Item stats.
Return
A dict of all the TrafficItem statistics
"""
return self.getStatsByRowLabelName(statViewName='Traffic Item Statistics', rowLabelName=trafficItemName, timeout=timeout)
def getPortStatistics(self, rowLabelName='all', timeout=90):
"""
Get port statistics.
:param rowLabelName: <str|list>: Format: '192.168.70.128/Card01/Port01'
If you're just getting one stat, pass in a rowLabelName.
If you want to get multiple port stats, pass in a list of rowLabelName.
Defaults to return all stats.
Return
dict
"""
return self.getStatsByRowLabelName(statViewName='^Port Statistics$', rowLabelName=rowLabelName, timeout=timeout)
def getPortCpuStatistics(self, rowLabelName='all', timeout=90):
"""
Get port cpu statistics.
:param rowLabelName: <str|list>: Format: '192.168.70.128/Card01/Port01'
If you're just getting one port stat, pass in a rowLabelName.
If you want to get multiple port stats, pass in a list of rowLabelName.
Defaults to return all stats.
Return
A dict of Port statistics in rows: portStatistics[statName]
"""
return self.getStatsByRowLabelName(statViewName='Port CPU Statistics', rowLabelName=rowLabelName, timeout=timeout)
def getGlobalProtocolStatistics(self, rowLabelName='all', timeout=90):
"""
Get global protocol statistics.
:param rowLabelName: <str|list>: Format: '192.168.70.128/Card01/Port01'
If you're just getting one protocol stat, pass in a string rowLabelName.
If you want to get multiple protocol stats, pass in a list of rowLabelName.
Defaults to return all stats.
Return
dict
"""
return self.getStatsByRowLabelName(statViewName='Global Protocol Statistics', rowLabelName=rowLabelName, timeout=timeout)
def getDataPlanePortStatistics(self, rowLabelName='all', timeout=90):
"""
Get data plane port statistics.
:param rowLabelName: <str|list>: The port name
If you're just getting one port stat, pass in the port name.
If you want to get multiple port stats, pass in a list of port names.
Defaults to return all stats.
Return
dict
"""
return self.getStatsByRowLabelName(statViewName='Data Plane Port Statistics', rowLabelName=rowLabelName, timeout=90)
def getProtocolsSummary(self, protocolLabelName='all', timeout=90):
"""
Get protocols summary statistics.
:param protocolLabelName: <str|list>: The protocol label name: BGP Peer, IPv4, etc.
Return
dict
"""
return self.getStatsByRowLabelName(statViewName='Protocols Summary', rowLabelName=protocolLabelName, timeout=90)
| 46.775148 | 129 | 0.601708 | 1,544 | 15,810 | 6.158679 | 0.160622 | 0.023557 | 0.039331 | 0.021769 | 0.438742 | 0.367441 | 0.303081 | 0.255653 | 0.207067 | 0.189505 | 0 | 0.016116 | 0.320999 | 15,810 | 337 | 130 | 46.913947 | 0.869679 | 0.344086 | 0 | 0.22449 | 0 | 0.006803 | 0.105352 | 0.002217 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.006803 | 0 | 0.170068 | 0.102041 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35e850936545e13bf86122a0daa80161775d468f | 2,392 | py | Python | ldndctools/misc/types.py | cwerner/ldndctools | b4c411e90d9e430dbd61da1ef565e740d71dee8a | [
"Apache-2.0"
] | 1 | 2020-11-14T06:33:38.000Z | 2020-11-14T06:33:38.000Z | ldndctools/misc/types.py | cwerner/ldndctools | b4c411e90d9e430dbd61da1ef565e740d71dee8a | [
"Apache-2.0"
] | 38 | 2019-05-24T11:12:20.000Z | 2022-03-31T15:04:27.000Z | ldndctools/misc/types.py | cwerner/ldndctools | b4c411e90d9e430dbd61da1ef565e740d71dee8a | [
"Apache-2.0"
] | null | null | null | from enum import Enum
from typing import Dict, Iterable, Optional
try:
from dataclasses import dataclass
except ImportError:
print(
"Dataclasses required. Install Python >= 3.7 or the dataclasses package from"
" PyPi"
)
class BetterEnum(Enum):
"""a better enum type that also allows checking for members"""
@classmethod
def contains(cls, name):
return name in cls.__members__
@classmethod
def names(cls):
return [x for x in cls.__members__]
@classmethod
def members(cls):
return [x for x in cls]
class RES(BetterEnum):
LR = "Low-res [0.5°]"
MR = "Medium-res [0.25°]"
HR = "High-res [0.083°]"
@dataclass
class BoundingBox:
x1: float = -180
x2: float = 180
y1: float = -90
y2: float = 90
NODATA = -99.99
# map from isiric-wise fields and units to ldndc
# ldndcname, conversion, significant digits
nmap = {
"TOTC": ("corg", 0.001, 5),
"TOTN": ("norg", 0.001, 6),
"PHAQ": ("ph", 1, 2),
"BULK": ("bd", 1, 2),
"CFRAG": ("scel", 0.01, 2),
"SDTO": ("sand", 0.01, 2),
"STPC": ("silt", 0.01, 2),
"CLPC": ("clay", 0.01, 2),
"TopDep": ("topd", 1, 0),
"BotDep": ("botd", 1, 0),
}
@dataclass
class LayerData:
depth: int = -1
split: int = -1
ph: float = NODATA
scel: float = NODATA
bd: float = NODATA
sks: float = NODATA
norg: float = NODATA
corg: float = NODATA
clay: float = NODATA
wcmin: float = NODATA
wcmax: float = NODATA
sand: float = NODATA
silt: float = NODATA
iron: float = NODATA
def as_dict(self, ignore: Optional[Iterable[str]] = None) -> Dict[str, str]:
precision = dict((x[0], x[2]) for x in nmap.values())
precision["depth"] = 0
precision["split"] = 0
precision["wcmin"] = 1
precision["wcmax"] = 1
precision["sks"] = 2
precision["iron"] = 5
out = {}
for field, field_type in self.__annotations__.items():
value = getattr(self, field)
if field == NODATA:
out[field] = f"{value:.2f}"
elif isinstance(field_type, int):
out[field] = str(value)
else:
out[field] = f"{value:.{precision[field]}f}"
if ignore:
for key in ignore:
out.pop(key, None)
return out
| 23.223301 | 85 | 0.548913 | 309 | 2,392 | 4.210356 | 0.423948 | 0.10146 | 0.012298 | 0.035357 | 0.065334 | 0.029208 | 0.029208 | 0 | 0 | 0 | 0 | 0.044713 | 0.30811 | 2,392 | 102 | 86 | 23.45098 | 0.739577 | 0.061037 | 0 | 0.063291 | 0 | 0 | 0.123269 | 0.012506 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0 | 0.050633 | 0.037975 | 0.468354 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
35e8c507805bbd492b9c6987b0d5a199fc69f47d | 1,646 | py | Python | 1_Sintaxis_basica/4.calculadora/calculadora.py | igijon/sge_2022 | 48228dad24c3d9fbcd7b0975c28095c40b15c4c3 | [
"MIT"
] | null | null | null | 1_Sintaxis_basica/4.calculadora/calculadora.py | igijon/sge_2022 | 48228dad24c3d9fbcd7b0975c28095c40b15c4c3 | [
"MIT"
] | null | null | null | 1_Sintaxis_basica/4.calculadora/calculadora.py | igijon/sge_2022 | 48228dad24c3d9fbcd7b0975c28095c40b15c4c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
salir = False
operandos = False
while not salir:
operacion = input("Introduzca una operación válida: +, -, *, /, ^ (-1 para salir)")
if (operacion != '+' and operacion != '-' and operacion != '*' and operacion != '/' and operacion != '^' and operacion != '-1'):
print("Error, la operación no es válida")
elif operacion == '-1':
salir = True
else:
while not operandos:
operando1 = float(input("Introduzca un operando:"))
operando2 = float(input("Introduzca el segundo operando:"))
if((type(operando1) != int and type(operando1) != float) or (type(operando2) != int and type(operando2) != float)):
print("Error, los operandos deben ser numéricos")
else:
operandos = True
operando1 = float(operando1)
operando2 = float(operando2)
if operacion == '+': print("El resultado de %.2f %s %.2f es %.2f" % (operando1, operacion, operando2, float(operando1+operando2)))
elif operacion == '-': print("El resultado de %.2f %s %.2f es %.2f" % (operando1, operacion, operando2, float(operando1-operando2)))
elif operacion == '*': print("El resultado de %.2f %s %.2f es %.2f" % (operando1, operacion, operando2, float(operando1*operando2)))
elif operacion == '/': print("El resultado de %.2f %s %.2f es %.2f" % (operando1, operacion, operando2, float(operando1/operando2)))
elif operacion == '^': print("El resultado de %.2f %s %.2f es %.2f" % (operando1, operacion, operando2, float(operando1**operando2)))
| 63.307692 | 149 | 0.585662 | 179 | 1,646 | 5.385475 | 0.251397 | 0.116183 | 0.143154 | 0.129668 | 0.534232 | 0.534232 | 0.534232 | 0.534232 | 0.534232 | 0.462656 | 0 | 0.039344 | 0.258809 | 1,646 | 25 | 150 | 65.84 | 0.75082 | 0.012151 | 0 | 0.086957 | 0 | 0 | 0.235077 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.304348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea0149706edc9f62005c8edc5428f45bdab9cddc | 16,841 | py | Python | fmri_power_analysis_comparison_multiple_datasets.py | BlissChapman/SyntheticStatistics | 4c85d89f926a1d7944d675d4f4c3d3fe77fc45c6 | [
"MIT"
] | 3 | 2018-04-28T14:14:30.000Z | 2018-08-06T23:49:26.000Z | fmri_power_analysis_comparison_multiple_datasets.py | BlissChapman/SyntheticStatistics | 4c85d89f926a1d7944d675d4f4c3d3fe77fc45c6 | [
"MIT"
] | null | null | null | fmri_power_analysis_comparison_multiple_datasets.py | BlissChapman/SyntheticStatistics | 4c85d89f926a1d7944d675d4f4c3d3fe77fc45c6 | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import argparse
import pickle
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
import shutil
from brainpedia.brainpedia import Brainpedia
from brainpedia.fmri_processing import invert_preprocessor_scaling
from utils.multiple_comparison import bootstrap_rejecting_voxels_mask, fmri_power_calculations
from nilearn import plotting
from utils.sampling import *
# Parse arguments
parser = argparse.ArgumentParser(description="Compare classical two sample t test to non-parametric tests for real and synthetic fMRI brain imaging datasets.")
parser.add_argument('real_dataset_1_dir', help='the directory containing the first real fMRI dataset')
parser.add_argument('real_dataset_1_cache_dir', help='the directory to use as a cache for real dataset 1 preprocessing')
parser.add_argument('syn_dataset_1_dir', help='the directory containing the synthetic fMRI dataset generated from a model trained on real dataset 1')
parser.add_argument('syn_dataset_1_cache_dir', help='the directory to use as a cache for synthetic dataset 1 preprocessing')
parser.add_argument('dataset_1_label', help='the label to use when describing contents of dataset 1')
parser.add_argument('real_dataset_2_dir', help='the directory containing the second real fMRI dataset')
parser.add_argument('real_dataset_2_cache_dir', help='the directory to use as a cache for real dataset 2 preprocessing')
parser.add_argument('syn_dataset_2_dir', help='the directory containing the synthetic fMRI dataset generated from a model trained on real dataset 2')
parser.add_argument('syn_dataset_2_cache_dir', help='the directory to use as a cache for synthetic dataset 2 preprocessing')
parser.add_argument('dataset_2_label', help='the label to use when describing contents of dataset 1')
parser.add_argument('output_dir', help='the directory to save power analysis results')
args = parser.parse_args()
OUTPUT_DATA_DIR = args.output_dir + 'data/'
# Setup output directory
# shutil.rmtree(args.output_dir, ignore_errors=True)
try:
os.makedirs(args.output_dir)
except:
pass
try:
os.makedirs(OUTPUT_DATA_DIR)
except:
pass
# Load datasets
DOWNSAMPLE_SCALE = 0.25
MULTI_TAG_LABEL_ENCODING = False
real_dataset_1_brainpedia = Brainpedia(data_dirs=[args.real_dataset_1_dir],
cache_dir=args.real_dataset_1_cache_dir,
scale=DOWNSAMPLE_SCALE,
multi_tag_label_encoding=MULTI_TAG_LABEL_ENCODING)
syn_dataset_1_brainpedia = Brainpedia(data_dirs=[args.syn_dataset_1_dir],
cache_dir=args.syn_dataset_1_cache_dir,
scale=DOWNSAMPLE_SCALE,
multi_tag_label_encoding=MULTI_TAG_LABEL_ENCODING)
real_dataset_2_brainpedia = Brainpedia(data_dirs=[args.real_dataset_2_dir],
cache_dir=args.real_dataset_2_cache_dir,
scale=DOWNSAMPLE_SCALE,
multi_tag_label_encoding=MULTI_TAG_LABEL_ENCODING)
syn_dataset_2_brainpedia = Brainpedia(data_dirs=[args.syn_dataset_2_dir],
cache_dir=args.syn_dataset_2_cache_dir,
scale=DOWNSAMPLE_SCALE,
multi_tag_label_encoding=MULTI_TAG_LABEL_ENCODING)
real_dataset_1, _ = real_dataset_1_brainpedia.all_data()
syn_dataset_1, _ = syn_dataset_1_brainpedia.all_data()
real_dataset_2, _ = real_dataset_2_brainpedia.all_data()
syn_dataset_2, _ = syn_dataset_2_brainpedia.all_data()
# Trim real datasets to the same length
real_dataset_length = min(len(real_dataset_1), len(real_dataset_2))
real_dataset_1 = np.array(real_dataset_1[:real_dataset_length])
real_dataset_2 = np.array(real_dataset_2[:real_dataset_length])
# Trim synthetic datasets to the same length
syn_dataset_length = min(len(syn_dataset_1), len(syn_dataset_2))
syn_dataset_1 = np.array(syn_dataset_1[:syn_dataset_length])
syn_dataset_2 = np.array(syn_dataset_2[:syn_dataset_length])
# Plot examples from datasets
real_dataset_1_img = invert_preprocessor_scaling(
real_dataset_1[0].squeeze(), real_dataset_1_brainpedia.preprocessor)
real_dataset_2_img = invert_preprocessor_scaling(
real_dataset_2[0].squeeze(), real_dataset_2_brainpedia.preprocessor)
syn_dataset_1_img = invert_preprocessor_scaling(
syn_dataset_1[0].squeeze(), syn_dataset_2_brainpedia.preprocessor)
syn_dataset_2_img = invert_preprocessor_scaling(
syn_dataset_2[2].squeeze(), syn_dataset_2_brainpedia.preprocessor)
figure, axes = plt.subplots(nrows=6, ncols=1, figsize=(15, 40))
plotting.plot_glass_brain(real_dataset_1_img, threshold='auto',
title="[REAL {0}]".format(args.dataset_1_label), axes=axes[0])
plotting.plot_glass_brain(syn_dataset_1_img, threshold='auto',
title="[SYN {0}]".format(args.dataset_1_label), axes=axes[1])
plotting.plot_glass_brain(real_dataset_2_img, threshold='auto',
title="[REAL {0}]".format(args.dataset_2_label), axes=axes[2])
plotting.plot_glass_brain(syn_dataset_2_img, threshold='auto',
title="[SYN {0}]".format(args.dataset_2_label), axes=axes[3])
# Compute statistical significance weights of each voxel in non-visual vs
# visual
num_trials = 5
k = 10
real_rejecting_voxels_mask = bootstrap_rejecting_voxels_mask(
real_dataset_1.squeeze(), real_dataset_2.squeeze(), k=k)
# Compute power for various n
n = np.linspace(10, 100, num=18)
fdr_test_p_values_for_n = np.zeros((len(n), num_trials, k))
syn_fdr_test_p_values_for_n = np.zeros((len(n), num_trials, k))
mmd_test_p_values_for_n = np.zeros((len(n), num_trials, k))
syn_mmd_test_p_values_for_n = np.zeros((len(n), num_trials, k))
fdr_test_power_for_n = np.zeros((len(n), num_trials))
syn_fdr_test_power_for_n = np.zeros((len(n), num_trials))
mmd_test_power_for_n = np.zeros((len(n), num_trials))
syn_mmd_test_power_for_n = np.zeros((len(n), num_trials))
percent_rejecting_voxels_syn_for_n = np.zeros((len(n), k))
percent_rejecting_voxels_real_for_n = np.zeros((len(n), k))
wtp_syn_for_n = np.zeros((len(n), k))
wtn_syn_for_n = np.zeros((len(n), k))
wfp_syn_for_n = np.zeros((len(n), k))
wfn_syn_for_n = np.zeros((len(n), k))
wtp_real_for_n = np.zeros((len(n), k))
wtn_real_for_n = np.zeros((len(n), k))
wfp_real_for_n = np.zeros((len(n), k))
wfn_real_for_n = np.zeros((len(n), k))
for i in range(len(n)):
# Determine sample sizes to draw from synthetic and real datasets
# Note: there is limited real data. When there is none left, simply use the
# max available amount of data.
syn_n = int(n[i])
real_n = min(real_dataset_1.shape[0], int(n[i]))
for t in range(num_trials):
fdr_real_p_val, mmd_p_val, fdr_real_power, mmd_power, percent_rejecting_voxels_real, wtp_real, wtn_real, wfp_real, wfn_real = fmri_power_calculations(
real_dataset_1, real_dataset_2, real_n, real_n, real_rejecting_voxels_mask, k=k)
fdr_syn_p_val, mmd_syn_p_val, fdr_syn_power, mmd_syn_power, percent_rejecting_voxels_syn, wtp_syn, wtn_syn, wfp_syn, wfn_syn = fmri_power_calculations(
syn_dataset_1, syn_dataset_2, syn_n, syn_n, real_rejecting_voxels_mask, k=k)
fdr_test_p_values_for_n[i][t][:] = fdr_real_p_val[:]
syn_fdr_test_p_values_for_n[i][t][:] = fdr_syn_p_val[:]
mmd_test_p_values_for_n[i][t][:] = mmd_p_val[:]
syn_mmd_test_p_values_for_n[i][t][:] = mmd_syn_p_val[:]
fdr_test_power_for_n[i][t] = fdr_real_power
syn_fdr_test_power_for_n[i][t] = fdr_syn_power
mmd_test_power_for_n[i][t] = mmd_power
syn_mmd_test_power_for_n[i][t] = mmd_syn_power
percent_rejecting_voxels_syn_for_n[i][:] = percent_rejecting_voxels_syn
percent_rejecting_voxels_real_for_n[i][:] = percent_rejecting_voxels_real
wtp_syn_for_n[i][:] = wtp_syn[:]
wtn_syn_for_n[i][:] = wtn_syn[:]
wfp_syn_for_n[i][:] = wfp_syn[:]
wfn_syn_for_n[i][:] = wfn_syn[:]
wtp_real_for_n[i][:] = wtp_real[:]
wtn_real_for_n[i][:] = wtn_real[:]
wfp_real_for_n[i][:] = wfp_real[:]
wfn_real_for_n[i][:] = wfn_real[:]
print("PERCENT COMPLETE: {0:.2f}%\r".format(
100 * float(i + 1) / float(len(n))), end='')
# Calculate Beta value for every trial and every sample size
def compute_beta(real_pvals, syn_pvals, alpha=0.05, k=50):
l = 0.0
h = 1.0
for _ in range(k):
beta = (l + h) / 2.0
syn_reject_too_often = False
for n in range(real_pvals.shape[0]):
for trial in range(real_pvals.shape[1]):
avg_real_rejection = np.mean(real_pvals[n][trial] < alpha)
avg_syn_rejection = np.mean(syn_pvals[n][trial] + beta < alpha)
if avg_syn_rejection > avg_real_rejection:
syn_reject_too_often = True
if syn_reject_too_often:
l = beta
else:
h = beta
return beta
computed_fdr_beta = compute_beta(fdr_test_p_values_for_n, syn_fdr_test_p_values_for_n)
computed_mmd_beta = compute_beta(mmd_test_p_values_for_n, syn_mmd_test_p_values_for_n)
fdr_beta = 0.049997 # avg = 0.0443541875
mmd_beta = 0.0277111875
#((len(n), num_trials, k))
conservative_syn_fdr_test_p_values_for_n = np.copy(
syn_fdr_test_p_values_for_n) + fdr_beta
conservative_syn_fdr_test_power = np.mean(
conservative_syn_fdr_test_p_values_for_n < 0.05, axis=2)
conservative_syn_mmd_test_p_values_for_n = np.copy(
syn_mmd_test_p_values_for_n) + mmd_beta
conservative_syn_mmd_test_power = np.mean(
conservative_syn_mmd_test_p_values_for_n < 0.05, axis=2)
# Save p-values
pickle.dump(fdr_test_p_values_for_n, open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_fdr_p_vals_real.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "wb"))
pickle.dump(syn_fdr_test_p_values_for_n, open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_fdr_p_vals_syn.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "wb"))
pickle.dump(conservative_syn_fdr_test_p_values_for_n, open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_fdr_p_vals_syncon.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "wb"))
pickle.dump(mmd_test_p_values_for_n, open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_mmd_power_real.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "wb"))
pickle.dump(syn_mmd_test_p_values_for_n, open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_mmd_power_syn.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "wb"))
pickle.dump(conservative_syn_mmd_test_p_values_for_n, open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_mmd_power_syncon.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "wb"))
fdr_test_p_values_for_n = pickle.load( open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_fdr_p_vals_real.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "rb") )
syn_fdr_test_p_values_for_n = pickle.load( open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_fdr_p_vals_syn.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "rb") )
conservative_syn_fdr_test_p_values_for_n = pickle.load( open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_fdr_p_vals_syncon.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "rb") )
mmd_test_p_values_for_n = pickle.load( open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_mmd_power_real.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "rb") )
syn_mmd_test_p_values_for_n = pickle.load( open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_mmd_power_syn.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "rb") )
conservative_syn_mmd_test_p_values_for_n = pickle.load( open('{0}[fmri_power_analysis]_[{1}_VS_{2}]_mmd_power_syncon.pkl'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), "rb") )
# Save power
np.save('{0}[fmri_power_analysis]_[{1}_VS_{2}]_fdr_power_real.npy'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), fdr_test_power_for_n)
np.save('{0}[fmri_power_analysis]_[{1}_VS_{2}]_fdr_power_syn.npy'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), syn_fdr_test_power_for_n)
np.save('{0}[fmri_power_analysis]_[{1}_VS_{2}]_fdr_power_syncon.npy'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), conservative_syn_fdr_test_power)
np.save('{0}[fmri_power_analysis]_[{1}_VS_{2}]_mmd_power_real.npy'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), mmd_test_power_for_n)
np.save('{0}[fmri_power_analysis]_[{1}_VS_{2}]_mmd_power_syn.npy'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), syn_mmd_test_power_for_n)
np.save('{0}[fmri_power_analysis]_[{1}_VS_{2}]_mmd_power_syncon.npy'.format(
OUTPUT_DATA_DIR, args.dataset_1_label, args.dataset_2_label), conservative_syn_mmd_test_power)
# Plot curve of n vs FDR corrected t test power
sns.tsplot(data=fdr_test_power_for_n.T, time=n, ci=[
68, 95], color='blue', condition='REAL', ax=axes[4])
sns.tsplot(data=syn_fdr_test_power_for_n.T, time=n, ci=[
68, 95], color='orange', condition='SYN', ax=axes[4])
sns.tsplot(data=conservative_syn_fdr_test_power.T, time=n, ci=[
68, 95], color='green', condition='SYN CONSERVATIVE', ax=axes[4])
axes[4].set_title('Sample Size vs FDR Corrected T Test Power')
axes[4].set_xlabel('Sample Size, Applied Beta = %f, Computed Beta = %f' % (fdr_beta, computed_fdr_beta))
axes[4].set_ylabel('Power')
axes[4].set_ylim([-0.1, 1.1])
axes[4].legend(loc="upper right")
# Plot curve of n vs MMD test power
sns.tsplot(data=mmd_test_power_for_n.T, time=n, ci=[
68, 95], color='blue', condition='REAL', ax=axes[5])
sns.tsplot(data=syn_mmd_test_power_for_n.T, time=n, ci=[
68, 95], color='orange', condition='SYN', ax=axes[5])
sns.tsplot(data=conservative_syn_mmd_test_power.T, time=n, ci=[
68, 95], color='green', condition='SYN CONSERVATIVE', ax=axes[5])
axes[5].set_title('Sample Size vs MMD Test Power')
axes[5].set_xlabel('Sample Size, Applied Beta = %f, Computed Beta = %f' % (mmd_beta, computed_mmd_beta))
axes[5].set_ylabel('Power')
axes[5].set_ylim([-0.1, 1.1])
axes[5].legend(loc="upper right")
# # Plot curve of percent rejecting voxels
# sns.tsplot(data=percent_rejecting_voxels_real_for_n.T, time=n, ci=[
# 68, 95], color='blue', condition='REAL', ax=axes[6])
# sns.tsplot(data=percent_rejecting_voxels_syn_for_n.T, time=n, ci=[
# 68, 95], color='orange', condition='SYN', ax=axes[6])
# axes[6].set_title('Sample Size vs Percent Significant Voxels')
# axes[6].set_xlabel('Sample Size')
# axes[6].set_ylabel('% Sig Voxels')
# axes[6].legend(loc="upper right")
# # Plot curve of n vs rejection overlaps
# # True Positive
# sns.tsplot(data=wtp_real_for_n.T, time=n, ci=[
# 68, 95], color='blue', condition='REAL', ax=axes[7])
# sns.tsplot(data=wtp_syn_for_n.T, time=n, ci=[
# 68, 95], color='orange', condition='SYN', ax=axes[7])
# axes[7].set_title('Sample Size vs Weighted True Positive')
# axes[7].set_xlabel('Sample Size')
# axes[7].set_ylabel('W_TP')
# axes[7].legend(loc="upper right")
# # True Negative
# sns.tsplot(data=wtn_real_for_n.T, time=n, ci=[
# 68, 95], color='blue', condition='REAL', ax=axes[8])
# sns.tsplot(data=wtn_syn_for_n.T, time=n, ci=[
# 68, 95], color='orange', condition='SYN', ax=axes[8])
# axes[8].set_title('Sample Size vs Weighted True Negatives')
# axes[8].set_xlabel('Sample Size')
# axes[8].set_ylabel('W_TN')
# axes[8].legend(loc="upper right")
# # False Positive
# sns.tsplot(data=wfp_real_for_n.T, time=n, ci=[
# 68, 95], color='blue', condition='REAL', ax=axes[9])
# sns.tsplot(data=wfp_syn_for_n.T, time=n, ci=[
# 68, 95], color='orange', condition='SYN', ax=axes[9])
# axes[9].set_title('Sample Size vs Weighted False Positives')
# axes[9].set_xlabel('Sample Size')
# axes[9].set_ylabel('W_FP')
# axes[9].legend(loc="upper right")
# # False Negative
# sns.tsplot(data=wfn_real_for_n.T, time=n, ci=[
# 68, 95], color='blue', condition='REAL', ax=axes[10])
# sns.tsplot(data=wfn_syn_for_n.T, time=n, ci=[
# 68, 95], color='orange', condition='SYN', ax=axes[10])
# axes[10].set_title('Sample Size vs Weighted False Negatives')
# axes[10].set_xlabel('Sample Size')
# axes[10].set_ylabel('W_FN')
# axes[10].legend(loc="upper right")
# Save results
figure.subplots_adjust(hspace=0.5)
figure.savefig('{0}[fmri_power_analysis]_[{1}_VS_{2}].pdf'.format(
args.output_dir, args.dataset_1_label, args.dataset_2_label), format='pdf')
| 49.825444 | 159 | 0.724304 | 2,792 | 16,841 | 3.975645 | 0.095272 | 0.027748 | 0.02973 | 0.037838 | 0.725676 | 0.648108 | 0.546577 | 0.496126 | 0.432883 | 0.395856 | 0 | 0.02685 | 0.146369 | 16,841 | 337 | 160 | 49.973294 | 0.74527 | 0.166023 | 0 | 0.117117 | 0 | 0 | 0.178513 | 0.082671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004505 | false | 0.009009 | 0.058559 | 0 | 0.067568 | 0.004505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea0255f4254151444f15b7ab3e3996d9fc19d782 | 5,096 | py | Python | swift/codegen/test/test_cppgen.py | Semmle/ql | 4a025053cc9acdda596565ff30f2e3ff36301b04 | [
"MIT"
] | 643 | 2018-08-03T11:16:54.000Z | 2020-04-27T23:10:55.000Z | swift/codegen/test/test_cppgen.py | DirtyApexAlpha/codeql | 4c59b0d2992ee0d90cc2f46d6a85ac79e1d57f21 | [
"MIT"
] | 1,880 | 2018-08-03T11:28:32.000Z | 2020-04-28T13:18:51.000Z | swift/codegen/test/test_cppgen.py | DirtyApexAlpha/codeql | 4c59b0d2992ee0d90cc2f46d6a85ac79e1d57f21 | [
"MIT"
] | 218 | 2018-08-03T11:16:58.000Z | 2020-04-24T02:24:00.000Z | import sys
from swift.codegen.generators import cppgen
from swift.codegen.lib import cpp
from swift.codegen.test.utils import *
output_dir = pathlib.Path("path", "to", "output")
@pytest.fixture
def generate(opts, renderer, input):
opts.cpp_output = output_dir
opts.cpp_namespace = "test_namespace"
opts.trap_affix = "TestTrapAffix"
opts.cpp_include_dir = "my/include/dir"
def ret(classes):
input.classes = classes
generated = run_generation(cppgen.generate, opts, renderer)
assert set(generated) == {output_dir / "TestTrapAffixClasses.h"}
generated = generated[output_dir / "TestTrapAffixClasses.h"]
assert isinstance(generated, cpp.ClassList)
assert generated.namespace == opts.cpp_namespace
assert generated.trap_affix == opts.trap_affix
assert generated.include_dir == opts.cpp_include_dir
return generated.classes
return ret
def test_empty(generate):
assert generate([]) == []
def test_empty_class(generate):
assert generate([
schema.Class(name="MyClass"),
]) == [
cpp.Class(name="MyClass", final=True, trap_name="MyClasses")
]
def test_two_class_hierarchy(generate):
base = cpp.Class(name="A")
assert generate([
schema.Class(name="A", derived={"B"}),
schema.Class(name="B", bases={"A"}),
]) == [
base,
cpp.Class(name="B", bases=[base], final=True, trap_name="Bs"),
]
def test_complex_hierarchy_topologically_ordered(generate):
a = cpp.Class(name="A")
b = cpp.Class(name="B")
c = cpp.Class(name="C", bases=[a])
d = cpp.Class(name="D", bases=[a])
e = cpp.Class(name="E", bases=[b, c, d], final=True, trap_name="Es")
f = cpp.Class(name="F", bases=[c], final=True, trap_name="Fs")
assert generate([
schema.Class(name="F", bases={"C"}),
schema.Class(name="B", derived={"E"}),
schema.Class(name="D", bases={"A"}, derived={"E"}),
schema.Class(name="C", bases={"A"}, derived={"E", "F"}),
schema.Class(name="E", bases={"B", "C", "D"}),
schema.Class(name="A", derived={"C", "D"}),
]) == [a, b, c, d, e, f]
@pytest.mark.parametrize("type,expected", [
("a", "a"),
("string", "std::string"),
("boolean", "bool"),
("MyClass", "TestTrapAffixLabel<MyClassTag>"),
])
@pytest.mark.parametrize("property_cls,optional,repeated,trap_name", [
(schema.SingleProperty, False, False, None),
(schema.OptionalProperty, True, False, "MyClassProps"),
(schema.RepeatedProperty, False, True, "MyClassProps"),
(schema.RepeatedOptionalProperty, True, True, "MyClassProps"),
])
def test_class_with_field(generate, type, expected, property_cls, optional, repeated, trap_name):
assert generate([
schema.Class(name="MyClass", properties=[property_cls("prop", type)]),
]) == [
cpp.Class(name="MyClass",
fields=[cpp.Field("prop", expected, is_optional=optional,
is_repeated=repeated, trap_name=trap_name)],
trap_name="MyClasses",
final=True)
]
def test_class_with_predicate(generate):
assert generate([
schema.Class(name="MyClass", properties=[schema.PredicateProperty("prop")]),
]) == [
cpp.Class(name="MyClass",
fields=[cpp.Field("prop", "bool", trap_name="MyClassProp", is_predicate=True)],
trap_name="MyClasses",
final=True)
]
@pytest.mark.parametrize("name",
["start_line", "start_column", "end_line", "end_column", "index", "num_whatever", "width"])
def test_class_with_overridden_unsigned_field(generate, name):
assert generate([
schema.Class(name="MyClass", properties=[
schema.SingleProperty(name, "bar")]),
]) == [
cpp.Class(name="MyClass",
fields=[cpp.Field(name, "unsigned")],
trap_name="MyClasses",
final=True)
]
def test_class_with_overridden_underscore_field(generate):
assert generate([
schema.Class(name="MyClass", properties=[
schema.SingleProperty("something_", "bar")]),
]) == [
cpp.Class(name="MyClass",
fields=[cpp.Field("something", "bar")],
trap_name="MyClasses",
final=True)
]
@pytest.mark.parametrize("name", cpp.cpp_keywords)
def test_class_with_keyword_field(generate, name):
assert generate([
schema.Class(name="MyClass", properties=[
schema.SingleProperty(name, "bar")]),
]) == [
cpp.Class(name="MyClass",
fields=[cpp.Field(name + "_", "bar")],
trap_name="MyClasses",
final=True)
]
if __name__ == '__main__':
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| 34.666667 | 116 | 0.574765 | 547 | 5,096 | 5.195612 | 0.213894 | 0.08867 | 0.073892 | 0.070373 | 0.432794 | 0.320901 | 0.286418 | 0.258269 | 0.20197 | 0.089374 | 0 | 0.000268 | 0.266484 | 5,096 | 146 | 117 | 34.90411 | 0.760032 | 0 | 0 | 0.308333 | 0 | 0 | 0.113619 | 0.02237 | 0 | 0 | 0 | 0 | 0.116667 | 1 | 0.091667 | false | 0 | 0.033333 | 0 | 0.141667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea0273ed1a033832802b82b46320e1ae06cc6785 | 2,890 | py | Python | capture.py | cipriantarta/python-capture | b6e8fdb1a75b2d8d77e161a3ff7b7e0f4eec0bb7 | [
"BSD-3-Clause"
] | 5 | 2016-03-22T15:47:47.000Z | 2021-04-22T23:41:55.000Z | capture.py | cipriantarta/python-capture | b6e8fdb1a75b2d8d77e161a3ff7b7e0f4eec0bb7 | [
"BSD-3-Clause"
] | null | null | null | capture.py | cipriantarta/python-capture | b6e8fdb1a75b2d8d77e161a3ff7b7e0f4eec0bb7 | [
"BSD-3-Clause"
] | 3 | 2019-11-29T12:31:42.000Z | 2020-08-21T05:00:25.000Z | import os
import socket
import subprocess
import time
import sys
class Streamer:
command = 'ffmpeg ' \
'-y ' \
'-f avfoundation ' \
'-r 30 ' \
'-pixel_format bgr0 ' \
'-s 640x480 ' \
'-video_device_index 0 ' \
'-i ":0" ' \
'-c:v libvpx ' \
'-b:v 1M ' \
'-c:a libvorbis ' \
'-b:a 96k ' \
'-deadline realtime ' \
'-flags +global_header ' \
'-cpu-used 1 ' \
'-threads 8 ' \
'-f segment ' \
'-f webm ' \
'-'
__connected = False
def __init__(self, host, port):
self.server = None
self.host = host
self.port = port
def connect(self):
while True:
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Connecting to {}:{}...'.format(self.host, self.port))
self.server.connect((self.host, self.port))
self.__connected = True
print('Connected.\n')
break
except ConnectionRefusedError:
print('Connection refused. Retrying in 3 seconds.\n')
time.sleep(3)
def stream(self):
p = subprocess.Popen(self.command.split(), stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print('Streaming...\n')
while True:
data = p.stdout.read(1024)
if len(data) == 0:
err = p.stderr.readlines()
if len(err) > 0:
print('Error')
print(''.join([l.decode() for l in err]))
break
try:
self.server.send(data)
except BrokenPipeError:
print('Disconnected from server. Reconnecting in 3 seconds\n')
time.sleep(3)
self.connect()
print('Streaming...\n')
def close(self):
if not self.connected:
return
print('Disconnected.')
self.server.close()
@property
def connected(self):
return self.__connected
if __name__ == '__main__':
try:
host = sys.argv[1]
except IndexError:
host = 'localhost'
try:
port = int(sys.argv[2])
except IndexError:
port = 8889
except ValueError:
print('Invalid port.')
exit(1)
streamer = Streamer(host, port)
try:
streamer.connect()
streamer.stream()
except KeyboardInterrupt:
print('Exiting...')
except OSError as e:
print(e)
exit(1)
finally:
streamer.close()
| 27.788462 | 123 | 0.457785 | 272 | 2,890 | 4.775735 | 0.455882 | 0.038491 | 0.027714 | 0.024634 | 0.063125 | 0.032333 | 0.032333 | 0 | 0 | 0 | 0 | 0.020619 | 0.429412 | 2,890 | 103 | 124 | 28.058252 | 0.767132 | 0 | 0 | 0.186813 | 0 | 0 | 0.156799 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054945 | false | 0 | 0.054945 | 0.010989 | 0.164835 | 0.131868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea036a45dec9a7f3f1a9dc4e2a76626c4578aa28 | 1,714 | py | Python | course/lesson09/task01/copy_file.py | mstepovanyy/python-training | 0a6766674855cbe784bc1195774016aee889ad6c | [
"MIT",
"Unlicense"
] | null | null | null | course/lesson09/task01/copy_file.py | mstepovanyy/python-training | 0a6766674855cbe784bc1195774016aee889ad6c | [
"MIT",
"Unlicense"
] | null | null | null | course/lesson09/task01/copy_file.py | mstepovanyy/python-training | 0a6766674855cbe784bc1195774016aee889ad6c | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/python3
"""
File Copy
---------
Write a simple program that reads content from one file an writes it to yet
another file. All possible I/O and OS errors shall be handled gracefully (e.g.
nonexisting input file, insufficient permissions etc) and an appropriate
diagnostic information shall be printed to standard error. If a read of an
input file fails - not subsequent write shall be done. An output file shall be
written only if it does not exist, otherwise an error shall occur (think of
concurrency problems associated with this part of a task).
An application shall return an appropriate exit code identifying success or
failure do fulfill a requested operation.
"""
import os
def copy_file(in_file, out_file):
try:
with open(in_file, mode="r", encoding="utf-8") as in_fd:
if os.path.exists(out_file):
print("Output file already exist: {}".format(out_file))
exit(1)
with open(out_file, mode="w", encoding="utf-8") as out_fd:
try:
for line in in_fd.readline():
try:
out_fd.write(line)
except IOError as w_err:
print("Cannot write to file {}, error: {}".format(out_file, w_err))
except IOError as r_error:
print("Cannot read from file {}, error: {}".format(in_file, r_error))
except IOError as err:
print("Cannot open file: ({}): {}".format(type(err), err))
except Exception as u_error:
print("Cannot copy file, due to error: {}".format(u_error))
if __name__ == "__main__":
copy_file("../../alice.txt", "./alice.txt")
| 38.088889 | 95 | 0.620187 | 241 | 1,714 | 4.294606 | 0.46888 | 0.033816 | 0.043478 | 0.027053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003236 | 0.27888 | 1,714 | 44 | 96 | 38.954545 | 0.834142 | 0.394399 | 0 | 0.136364 | 0 | 0 | 0.198251 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.045455 | 0 | 0.090909 | 0.227273 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea04ef043020cf60b894c466cdbd3b903949299f | 1,134 | pyde | Python | mode/examples/Basics/Objects/MultipleConstructors/MultipleConstructors.pyde | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
] | 1,224 | 2015-01-01T22:09:23.000Z | 2022-03-29T19:43:56.000Z | mode/examples/Basics/Objects/MultipleConstructors/MultipleConstructors.pyde | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
] | 253 | 2015-01-14T03:45:51.000Z | 2022-02-08T01:18:19.000Z | mode/examples/Basics/Objects/MultipleConstructors/MultipleConstructors.pyde | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
] | 225 | 2015-01-13T18:38:33.000Z | 2022-03-30T20:27:39.000Z | '''
Multiple Constructors
A class can have multiple constructors that assign the fields in different ways.
Sometimes it's beneficial to specify every aspect of an object's data by assigning
parameters to the fields, but other times it might be appropriate to define only
one or a few.
In Python, as there is no method overloading, one can provide different ways of creating
instances by setting default values for parameters in the __init__ method.
'''
def setup():
size(640, 360)
background(204)
noLoop()
global spots
spots = (Spot(),
Spot(x=120, y=70),
Spot(width / 2, height / 2, 120),
Spot(radius=10),
)
def draw():
for sp in spots:
sp.display()
class Spot:
def __init__(self, x=None, y=None, radius=40):
if x is None:
self.x = width / 4
else:
self.x = x
if y is None:
self.y = height / 2
else:
self.y = y
self.radius = radius
self.diam = radius * 2
def display(self):
ellipse(self.x, self.y, self.diam, self.diam)
| 23.142857 | 88 | 0.594356 | 162 | 1,134 | 4.111111 | 0.524691 | 0.03003 | 0.03003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03381 | 0.321869 | 1,134 | 48 | 89 | 23.625 | 0.83225 | 0.396825 | 0 | 0.074074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea07b3ab76e1fb7c25770a29fa465f8fd71ec436 | 2,165 | py | Python | mythril/analysis/modules/deprecated_ops.py | afuch05/mythril-classic | de562335a62703429e085de7088a3e543f84e94f | [
"MIT"
] | 6 | 2021-02-13T05:03:32.000Z | 2021-09-19T14:57:58.000Z | mythril/analysis/modules/deprecated_ops.py | cryptobarbossa/mythril-classic | 5dd544d301238db2bc536d7cee69b96e9a15e9c4 | [
"MIT"
] | null | null | null | mythril/analysis/modules/deprecated_ops.py | cryptobarbossa/mythril-classic | 5dd544d301238db2bc536d7cee69b96e9a15e9c4 | [
"MIT"
] | 2 | 2020-05-26T15:03:20.000Z | 2021-07-29T09:09:05.000Z | from mythril.analysis.report import Issue
from mythril.analysis.swc_data import TX_ORIGIN_USAGE
from mythril.analysis.modules.base import DetectionModule
import logging
"""
MODULE DESCRIPTION:
Check for constraints on tx.origin (i.e., access to some functionality is restricted to a specific origin).
"""
class DeprecatedOperationsModule(DetectionModule):
def __init__(self):
super().__init__(
name="Deprecated Operations",
swc_id=TX_ORIGIN_USAGE,
hooks=["ORIGIN"],
description=(
"Check for constraints on tx.origin (i.e., access to some "
"functionality is restricted to a specific origin)."
),
)
def execute(self, statespace):
logging.debug("Executing module: DEPRECATED OPCODES")
issues = []
for k in statespace.nodes:
node = statespace.nodes[k]
for state in node.states:
instruction = state.get_current_instruction()
if instruction["opcode"] == "ORIGIN":
description = (
"The function `{}` retrieves the transaction origin (tx.origin) using the ORIGIN opcode. "
"Use msg.sender instead.\nSee also: "
"https://solidity.readthedocs.io/en/develop/security-considerations.html#tx-origin".format(
node.function_name
)
)
issue = Issue(
contract=node.contract_name,
function_name=node.function_name,
address=instruction["address"],
title="Use of tx.origin",
bytecode=state.environment.code.bytecode,
_type="Warning",
swc_id=TX_ORIGIN_USAGE,
description=description,
gas_used=(state.mstate.min_gas_used, state.mstate.max_gas_used),
)
issues.append(issue)
return issues
detector = DeprecatedOperationsModule()
| 33.307692 | 115 | 0.549192 | 201 | 2,165 | 5.771144 | 0.482587 | 0.055172 | 0.049138 | 0.051724 | 0.196552 | 0.165517 | 0.165517 | 0.165517 | 0.165517 | 0.165517 | 0 | 0 | 0.368591 | 2,165 | 64 | 116 | 33.828125 | 0.848574 | 0 | 0 | 0.090909 | 0 | 0.022727 | 0.205027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea07d921536786bfcb98114a0624026ec534ceeb | 8,156 | py | Python | game.py | wangz49777/color-game | f291aa8750e4efb5f23a281213c1be79a0332b96 | [
"MIT"
] | 2 | 2022-03-22T08:18:31.000Z | 2022-03-22T08:18:33.000Z | game.py | Velours-mop/color-game | f291aa8750e4efb5f23a281213c1be79a0332b96 | [
"MIT"
] | null | null | null | game.py | Velours-mop/color-game | f291aa8750e4efb5f23a281213c1be79a0332b96 | [
"MIT"
] | 1 | 2022-03-22T08:18:16.000Z | 2022-03-22T08:18:16.000Z | import tkinter as tk
from random import choice, sample
import tkinter.messagebox
from PIL import Image, ImageTk
import sys
import os
import global_value
colors = ['red', 'blue', 'yellow', 'green', 'white', 'purple']
def resource_path(relative_path):
if getattr(sys, 'frozen', False): # 是否Bundle Resource
base_path = sys._MEIPASS
else:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def mouse_press(event, color):
if global_value.is_over:
return
global_value.tool[color].config(image=global_value.press[color])
def del_label(event, label):
if label in global_value.res:
index = global_value.res.index(label)
global_value.row = min(global_value.row, index)
global_value.res[index] = 0
label.place_forget()
def add_label(event, color):
if global_value.is_over:
return
if global_value.row >= 4:
pass
else:
if event.x > 0 and event.x < 90 and event.y > 0 and event.y < 90:
lab_img = tkinter.Label(canvas, bd=0, bg='Gray', image=global_value.ball[color])
lab_img.place(x=global_value.col * 110 + 40, y=global_value.row * 79 + 147, anchor='nw')
lab_img.color = color
lab_img.bind('<Button-3>', lambda e, l=lab_img: del_label(e, l))
global_value.res[global_value.row] = lab_img
while global_value.row < 4 and global_value.res[global_value.row] != 0:
global_value.row += 1
global_value.tool[color].config(image=global_value.release[color])
def cal_show():
if 0 in global_value.res:
return
global_value.labels += global_value.res
res_color = []
correct = 0
sub_correct = 0
for i in range(4):
res_color.append(global_value.res[i].color)
if global_value.answer[i] == global_value.res[i].color:
correct += 1
print('guess:', res_color)
for e in global_value.answer:
if e in res_color:
sub_correct += 1
res_color.remove(e)
sub_correct -= correct
print('correct:', correct, '\nsub_correct:', sub_correct)
for i, color in enumerate([1] * correct + [0] * sub_correct):
image = red_tin if color else white_tin
tin = tkinter.Label(canvas, bd=0, bg='Gray', image=image)
tin.place(x=global_value.col * 110 + 39 + i % 2 * 33, y=40 + int(i / 2) * 33, anchor='nw')
global_value.res_tin.append(tin)
if correct == 4:
tkinter.messagebox.showinfo(title='恭喜', message='猜对了')
show_answer()
tk.Label(canvas, bd=0, bg='dimgrey', image=cup_img).place(x=960, y=40, anchor='nw')
global_value.col += 1
global_value.frame.place_forget()
global_value.res = [0] * 4
global_value.row = 0
if global_value.col > 7:
tkinter.messagebox.showinfo(title='遗憾', message='游戏结束')
show_answer()
global_value.is_over = True
else:
global_value.frame.place(x=110 * global_value.col + 20, y=120, anchor='nw')
def show_answer():
global_value.frame.place(x=950, y=120, anchor='nw')
for i, color in enumerate(global_value.answer):
res_img = tk.Label(canvas, bd=0, bg='Gray', image=global_value.ball[color])
res_img.place(x=970, y=i * 79 + 147, anchor='nw')
global_value.labels.append(res_img)
def restart():
if is_duplicate.get():
global_value.answer = []
for i in range(4):
global_value.answer.append(choice(colors))
else:
global_value.answer = sample(colors, 4)
print('answer:', global_value.answer)
global_value.res = list(set(global_value.res))
if 0 in global_value.res:
global_value.res.remove(0)
global_value.labels += global_value.res
global_value.is_over = False
global_value.row = 0
global_value.col = 0
global_value.res = [0] * 4
global_value.frame.place_forget()
global_value.frame.place(x=110 * global_value.col + 20, y=120, anchor='nw')
tk.Label(canvas, bd=0, bg='dimgrey', image=cup_white_img).place(x=960, y=40, anchor='nw')
while len(global_value.labels):
lab = global_value.labels.pop()
lab.place_forget()
while len(global_value.res_tin):
tin = global_value.res_tin.pop()
tin.place_forget()
def set_is_duplicate():
restart()
def show_help():
tkinter.messagebox.showinfo(title='帮助', message='红色————位置,颜色全部正确\n白色————颜色正确,位置错误\n右键————清除')
if __name__ == '__main__':
window = tk.Tk()
window.title('猜猜猜')
window.iconbitmap(resource_path(os.path.join('img', 'game.ico')))
window.geometry('1080x600')
canvas = tk.Canvas(window, bg='dimgrey', height=600, width=1080)
tk.Label(canvas, bd=0, fg='white',bg='dimgrey', text='答案', font=('黑体', 25)).place(x=960, y=70, anchor='nw')
is_duplicate = tk.IntVar()
tk.Checkbutton(canvas, fg='white',bg='dimgrey', text='可重复', variable=is_duplicate, onvalue=1, offvalue=0,
command=set_is_duplicate).place(
x=750,
y=525,
anchor='nw')
bg_img = ImageTk.PhotoImage(Image.open(resource_path(os.path.join('img', 'bg.png'))))
res_img = ImageTk.PhotoImage(Image.open(resource_path(os.path.join('img', 'result.png'))))
red_tin = ImageTk.PhotoImage(Image.open(resource_path(os.path.join('img', 'red_label.png'))))
white_tin = ImageTk.PhotoImage(Image.open(resource_path(os.path.join('img', 'white_label.png'))))
tool_img = ImageTk.PhotoImage(Image.open(resource_path(os.path.join('img', 'tool.png'))))
frame_img = ImageTk.PhotoImage(Image.open(resource_path(os.path.join('img', 'frame.png'))))
cup_img = ImageTk.PhotoImage(Image.open(resource_path(os.path.join('img', 'cup.png'))))
cup_white_img = ImageTk.PhotoImage(Image.open(resource_path(os.path.join('img', 'cup_white.png'))))
for i in range(8):
tk.Label(canvas, bd=0, bg='dimgrey', image=bg_img).place(x=110 * i + 20, y=120, anchor='nw')
tk.Label(canvas, bd=0, bg='dimgrey', image=res_img).place(x=110 * i + 30, y=30, anchor='nw')
tk.Label(canvas, bd=0, bg='dimgrey', image=bg_img).place(x=950, y=120, anchor='nw')
tk.Label(canvas, bd=0, bg='dimgrey', image=tool_img).place(x=50, y=480, anchor='nw')
global_value.frame = tkinter.Label(canvas, bd=0, bg='dimgrey', image=frame_img)
restart()
canvas.pack()
for i, color in enumerate(colors):
img_release = Image.open(resource_path(os.path.join('img', color + '_release.png')))
img = Image.open(resource_path(os.path.join('img', color + '.png')))
img_press = Image.open(resource_path(os.path.join('img', color + '_press.png')))
global_value.ball[color] = ImageTk.PhotoImage(img)
global_value.release[color] = ImageTk.PhotoImage(img_release)
global_value.press[color] = ImageTk.PhotoImage(img_press)
t = tk.Label(canvas, bd=0, image=global_value.release[color])
t.bind('<Button-1>', lambda e, c=color: mouse_press(e, c))
t.bind('<ButtonRelease-1>', lambda e, c=color: add_label(e, c))
t.place(x=120 + 90 * i, y=485, anchor='nw')
global_value.tool[color] = t
tk.Button(window, width=10, height=1, activebackground='gray',fg='white',bg='gray', text='猜猜猜', command=cal_show).place(x=850, y=505,
anchor='nw')
tk.Button(window, width=10, height=1, activebackground='gray',fg='white',bg='gray', text='帮助', command=show_help).place(x=950, y=505,
anchor='nw')
tk.Button(window, width=10, height=1, activebackground='gray',fg='white',bg='gray', text='显示答案', command=show_answer).place(x=850, y=545,
anchor='nw')
tk.Button(window, width=10, height=1, activebackground='gray',fg='white',bg='gray', text='重新开始', command=restart).place(x=950, y=545,
anchor='nw')
window.mainloop()
| 41.612245 | 141 | 0.618073 | 1,174 | 8,156 | 4.15247 | 0.162692 | 0.157949 | 0.054564 | 0.034462 | 0.474667 | 0.399385 | 0.343179 | 0.312615 | 0.26359 | 0.232205 | 0 | 0.033567 | 0.229279 | 8,156 | 195 | 142 | 41.825641 | 0.740057 | 0.002084 | 0 | 0.198758 | 0 | 0 | 0.065749 | 0.005162 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055901 | false | 0.012422 | 0.043478 | 0 | 0.124224 | 0.018634 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea097256a6592e5569664755f10c07c9659112a7 | 918 | py | Python | hashing/majority_element_ii.py | elenaborisova/A2SV-interview-prep | 02b7166a96d22221cd6adaedf14f845537f0752d | [
"MIT"
] | null | null | null | hashing/majority_element_ii.py | elenaborisova/A2SV-interview-prep | 02b7166a96d22221cd6adaedf14f845537f0752d | [
"MIT"
] | null | null | null | hashing/majority_element_ii.py | elenaborisova/A2SV-interview-prep | 02b7166a96d22221cd6adaedf14f845537f0752d | [
"MIT"
] | null | null | null | import collections
# Time: O(n); Space: O(n)
def majority_element(nums):
n = len(nums)
freq = collections.Counter(nums)
return [el for el, fr in freq.items() if fr > n / 3]
# Boyer-Moore Voting Algorithm; Time: O(n); Space: O(1)
def majority_element2(nums):
count1 = count2 = 0
candidate1 = candidate2 = None
for n in nums:
if n == candidate1:
count1 += 1
elif n == candidate2:
count2 += 1
elif count1 == 0:
candidate1, count1 = n, 1
elif count2 == 0:
candidate2, count2 = n, 1
else:
# Fully pairing (votes cancel each other out)
count1 -= 1
count2 -= 1
return [n for n in (candidate1, candidate2)
if nums.count(n) > len(nums) // 3]
# Test cases:
print(majority_element2([3, 2, 3]))
print(majority_element2([1]))
print(majority_element2([1, 2]))
| 24.157895 | 57 | 0.561002 | 122 | 918 | 4.180328 | 0.385246 | 0.12549 | 0.123529 | 0.043137 | 0.047059 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064 | 0.319172 | 918 | 37 | 58 | 24.810811 | 0.752 | 0.14488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.04 | 0 | 0.2 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea0b65a3b96306a2dbc535281a6c2a13988cbd15 | 10,238 | py | Python | src/fc100_dataset.py | zihangJiang/Adaptive-Attention | 45eeb8fd629a81eebb3c8a8b869551f4f8738325 | [
"Apache-2.0"
] | 22 | 2021-04-06T11:54:50.000Z | 2022-03-18T03:27:31.000Z | src/fc100_dataset.py | zihangJiang/Adaptive-Attention | 45eeb8fd629a81eebb3c8a8b869551f4f8738325 | [
"Apache-2.0"
] | 1 | 2021-06-01T15:26:44.000Z | 2021-06-01T17:21:02.000Z | src/fc100_dataset.py | zihangJiang/Adaptive-Attention | 45eeb8fd629a81eebb3c8a8b869551f4f8738325 | [
"Apache-2.0"
] | 1 | 2021-06-29T06:07:16.000Z | 2021-06-29T06:07:16.000Z | # coding=utf-8
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import os
import argparse
import csv
import glob
import cv2
from shutil import copyfile
from tqdm import tqdm
from copy import deepcopy
from torchvision import transforms
from torchvision.datasets.utils import download_url, check_integrity
from PIL import Image
import pickle
'''
Inspired by https://github.com/pytorch/vision/pull/46
and
https://github.com/y2l/mini-imagenet-tools
'''
IMG_CACHE = {}
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='latin1')
return dict
def save_pickle(dicts,file):
with open(file, 'wb') as fo:
pickle.dump(dicts, fo)
class FC100Generator(object):
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
def __init__(self, input_args, download=True):
self.input_args = input_args
self.image_dir = self.input_args.image_dir
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.data = []
self.labels = []
self.super_labels = []
self.filenames = []
for fentry in self.train_list+self.test_list:
f = fentry[0]
file = os.path.join(self.image_dir, self.base_folder, f)
fo = open(file, 'rb')
entry = pickle.load(fo, encoding='latin1')
self.data.append(entry['data'])
self.super_labels += entry['coarse_labels']
self.labels += entry['fine_labels']
self.filenames += entry['filenames']
fo.close()
self.data = np.concatenate(self.data)
self.data = self.data.reshape((60000, 3, 32, 32))
self.data = self.data.transpose((0, 2, 3, 1))
#img = Image.fromarray(data, 'RGB')
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
root = self.image_dir
download_url(self.url, root, self.filename, self.tgz_md5)
# extract file
cwd = os.getcwd()
tar = tarfile.open(os.path.join(root, self.filename), "r:gz")
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
def _check_integrity(self):
root = self.image_dir
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def process_original_files(self):
self.processed_img_dir = '../dataset/FC100/processed_images'
split_lists = ['train', 'val', 'test']
super_class_split = {'train':[1, 2, 3, 4, 5, 6, 9, 10, 15, 17, 18, 19], 'val':[8, 11, 13, 16], 'test':[0,7,12,14]}
if not os.path.exists(self.processed_img_dir):
os.makedirs(self.processed_img_dir)
# split data
# idxs = {'train':[], 'val':[], 'test':[]}
# data = {'train':[], 'val':[], 'test':[]}
# label = {'train':[], 'val':[], 'test':[]}
# filenames = {'train':[], 'val':[], 'test':[]}
for idx, super_label in tqdm(enumerate(self.super_labels)):
for stage in split_lists:
if super_label in super_class_split[stage]:
file_dir = os.path.join(self.processed_img_dir,stage,str(self.labels[idx]))
file_path = os.path.join(file_dir,self.filenames[idx])
if not os.path.exists(file_dir):
os.makedirs(file_dir)
cv2.imwrite(file_path, self.data[idx])
# data[stage].append(self.data[idx:idx+1])
# label[stage].append(self.labels[idx])
# filenames[stage].append(self.filenames[idx])
# train_pickle = {'data':np.concatenate(data['train']), 'label':label['train'], 'filenames':filenames['train']}
# val_pickle = {'data':np.concatenate(data['val']), 'label':label['val'], 'filenames':filenames['val']}
# test_pickle = {'data':np.concatenate(data['test']), 'label':label['test'], 'filenames':filenames['test']}
# save_pickle(train_pickle, self.processed_img_dir+'/train')
# save_pickle(val_pickle, self.processed_img_dir+'/val')
# save_pickle(test_pickle, self.processed_img_dir+'/test')
class FC100Dataset(data.Dataset):
processed_folder = 'processed_images'
def __init__(self, mode='train', root='../FC100', transform=None, target_transform=None):
'''
The items are (filename,category). The index of all the categories can be found in self.idx_classes
Args:
- root: the directory where the dataset will be stored
- transform: how to transform the input
- target_transform: how to transform the target
- download: need to download the dataset
'''
super(FC100Dataset, self).__init__()
self.root = root
self.transform = transform
self.image_size = 32
self.mode = mode
if transform == None:
if not self.mode=='train':
self.transform = transforms.Compose([
transforms.Resize(self.image_size),
transforms.CenterCrop(self.image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[x/255.0 for x in [120.39586422, 115.59361427, 104.54012653]],
std=[x/255.0 for x in [70.68188272, 68.27635443, 72.54505529]])
])
else:
self.transform = transforms.Compose([
transforms.Resize(self.image_size),
transforms.RandomCrop(self.image_size, padding=4),
transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x),
transforms.ToTensor(),
transforms.Normalize(mean=[x/255.0 for x in [120.39586422, 115.59361427, 104.54012653]],
std=[x/255.0 for x in [70.68188272, 68.27635443, 72.54505529]])
])
self.target_transform = target_transform
if not self._check_exists():
raise RuntimeError('Dataset not found.')
self.classes = sorted(os.listdir(os.path.join(self.root, self.processed_folder, mode)))
self.all_items = find_items(os.path.join(
self.root, self.processed_folder, mode), self.classes)
self.idx_classes = index_classes(self.all_items)
paths, self.y = zip(*[self.get_path_label(pl)
for pl in range(len(self))])
self.x = paths
def __getitem__(self, idx):
file_path = self.x[idx]
x = Image.open(file_path).convert('RGB')
#x = self.x[idx]
if self.transform:
x = self.transform(deepcopy(x))
return x, self.y[idx]
def __len__(self):
return len(self.all_items)
def switch_image_size(self, size = 0):
if self.image_size == 84:
self.image_size = 224
else:
self.image_size = 84
if size>0:
self.image_size = size
if not self.mode=='train':
self.transform = transforms.Compose([
transforms.Resize(self.image_size),
transforms.CenterCrop(self.image_size),
transforms.ToTensor(),
transforms.Normalize(mean=[x/255.0 for x in [120.39586422, 115.59361427, 104.54012653]],
std=[x/255.0 for x in [70.68188272, 68.27635443, 72.54505529]])
])
else:
self.transform = transforms.Compose([
#transforms.Pad(16,padding_mode='reflect'),
transforms.RandomCrop(self.image_size, padding=8),
transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x),
transforms.ToTensor(),
transforms.Normalize(mean=[x/255.0 for x in [120.39586422, 115.59361427, 104.54012653]],
std=[x/255.0 for x in [70.68188272, 68.27635443, 72.54505529]])
])
def get_path_label(self, index):
filename = self.all_items[index][0]
img = str.join('/', [self.all_items[index][2], filename])
target = self.idx_classes[self.all_items[index][1]]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder))
def find_items(root_dir, classes):
retour = []
for (root, dirs, files) in sorted(os.walk(root_dir)):
for f in sorted(files):
r = root.split('/')
lr = len(r)
label = r[lr - 1]
if label in classes and (f.endswith("jpg")):
retour.extend([(f, label, root)])
print("== Dataset: Found %d items " % len(retour))
return retour
def index_classes(items):
idx = {}
for i in items:
if (not i[1] in idx):
idx[i[1]] = len(idx)
print("== Dataset: Found %d classes" % len(idx))
return idx
if __name__=='__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--image_dir', type=str, default = '../dataset/FC100', help='untar cifar dir')
parser.add_argument('--image_resize', type=int, default=84)
args = parser.parse_args()
dataset_generator = FC100Generator(args)
dataset_generator.process_original_files() | 39.836576 | 122 | 0.57814 | 1,233 | 10,238 | 4.662612 | 0.218978 | 0.025048 | 0.027135 | 0.011132 | 0.286659 | 0.211515 | 0.199339 | 0.199339 | 0.192903 | 0.180379 | 0 | 0.062491 | 0.293514 | 10,238 | 257 | 123 | 39.836576 | 0.732338 | 0.117308 | 0 | 0.181818 | 0 | 0.005051 | 0.068829 | 0.017179 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070707 | false | 0 | 0.085859 | 0.010101 | 0.252525 | 0.020202 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea0cf5b063e13f5306d4b7781b36e6b749f9e1f5 | 7,359 | py | Python | tests/test_baseSampler.py | clementchadebec/benchmark_VAE | 943e231f9e5dfa40b4eec14d4536f1c229ad9be1 | [
"Apache-2.0"
] | 143 | 2021-10-17T08:43:33.000Z | 2022-03-31T11:10:53.000Z | tests/test_baseSampler.py | eknag/benchmark_VAE | 8b727f29a68aff7771c4c97aff15f75f88320e1f | [
"Apache-2.0"
] | 6 | 2022-01-21T17:40:09.000Z | 2022-03-16T13:09:22.000Z | tests/test_baseSampler.py | eknag/benchmark_VAE | 8b727f29a68aff7771c4c97aff15f75f88320e1f | [
"Apache-2.0"
] | 18 | 2021-12-16T15:17:08.000Z | 2022-03-15T01:30:13.000Z | import os
import numpy as np
import pytest
import torch
from imageio import imread
from pythae.models import BaseAE, BaseAEConfig
from pythae.samplers import BaseSampler, BaseSamplerConfig
PATH = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def dummy_data():
### 3 imgs from mnist that are used to simulated generated ones
return torch.load(os.path.join(PATH, "data/mnist_clean_train_dataset_sample")).data
@pytest.fixture(params=[torch.rand(3, 10, 20), torch.rand(1, 2, 2)])
def img_tensors(request):
return request.param
@pytest.fixture
def model_sample():
return BaseAE((BaseAEConfig(input_dim=(1, 28, 28))))
@pytest.fixture()
def sampler_sample(tmpdir, model_sample):
tmpdir.mkdir("dummy_folder")
return BaseSampler(model=model_sample, sampler_config=BaseSamplerConfig())
class Test_BaseSampler_saving:
def test_save_config(self, tmpdir, sampler_sample):
sampler = sampler_sample
dir_path = os.path.join(tmpdir, "dummy_folder")
sampler.save(dir_path)
sampler_config_file = os.path.join(dir_path, "sampler_config.json")
assert os.path.isfile(sampler_config_file)
generation_config_rec = BaseSamplerConfig.from_json_file(sampler_config_file)
assert generation_config_rec.__dict__ == sampler_sample.sampler_config.__dict__
def test_save_image_tensor(self, img_tensors, tmpdir, sampler_sample):
sampler = sampler_sample
dir_path = os.path.join(tmpdir, "dummy_folder")
img_path = os.path.join(dir_path, "test_img.png")
sampler.save_img(img_tensors, dir_path, "test_img.png")
assert os.path.isdir(dir_path)
assert os.path.isfile(img_path)
rec_img = torch.tensor(imread(img_path)) / 255.0
assert 1 >= rec_img.max() >= 0
# class Test_Sampler_Set_up:
# @pytest.fixture(
# params=[
# BaseSamplerConfig(
# batch_size=1
# ), # (target full batch number, target last full batch size, target_batch_number)
# BaseSamplerConfig(),
# ]
# )
# def sampler_config(self, tmpdir, request):
# return request.param
#
# def test_sampler_set_up(self, model_sample, sampler_config):
# sampler = BaseSampler(model=model_sample, sampler_config=sampler_config)
#
# assert sampler.batch_size == sampler_config.batch_size
# assert sampler.samples_per_save == sampler_config.samples_per_save
# class Test_RHVAE_Sampler:
# @pytest.fixture(
# params=[
# RHVAESamplerConfig(batch_size=1, mcmc_steps_nbr=15, samples_per_save=5),
# RHVAESamplerConfig(batch_size=2, mcmc_steps_nbr=15, samples_per_save=1),
# RHVAESamplerConfig(
# batch_size=3, n_lf=1, eps_lf=0.01, mcmc_steps_nbr=10, samples_per_save=5
# ),
# RHVAESamplerConfig(
# batch_size=3, n_lf=1, eps_lf=0.01, mcmc_steps_nbr=10, samples_per_save=3
# ),
# RHVAESamplerConfig(
# batch_size=10,
# n_lf=1,
# eps_lf=0.01,
# mcmc_steps_nbr=10,
# samples_per_save=3,
# ),
# ]
# )
# def rhvae_sampler_config(self, tmpdir, request):
# tmpdir.mkdir("dummy_folder")
# request.param.output_dir = os.path.join(tmpdir, "dummy_folder")
# return request.param
#
# @pytest.fixture(
# params=[
# np.random.randint(1, 15),
# np.random.randint(1, 15),
# np.random.randint(1, 15),
# ]
# )
# def samples_number(self, request):
# return request.param
#
# @pytest.fixture(
# params=[
# RHVAE(RHVAEConfig(input_dim=784, latent_dim=2)),
# RHVAE(RHVAEConfig(input_dim=784, latent_dim=3)),
# ]
# )
# def rhvae_sample(self, request):
# return request.param
#
# def test_hmc_sampling(self, rhvae_sample, rhvae_sampler_config):
#
# # simulates a trained model
# # rhvae_sample.centroids_tens = torch.randn(20, rhvae_sample.latent_dim)
# # rhvae_sample.M_tens = torch.randn(20, rhvae_sample.latent_dim, rhvae_sample.latent_dim)
#
# sampler = RHVAESampler(model=rhvae_sample, sampler_config=rhvae_sampler_config)
#
# out = sampler.hmc_sampling(rhvae_sampler_config.batch_size)
#
# assert out.shape == (rhvae_sampler_config.batch_size, rhvae_sample.latent_dim)
#
# assert sampler.eps_lf == rhvae_sampler_config.eps_lf
#
# assert all(
# [
# not torch.equal(out[i], out[j])
# for i in range(len(out))
# for j in range(i + 1, len(out))
# ]
# )
#
# def test_sampling_loop_saving(
# self, tmpdir, rhvae_sample, rhvae_sampler_config, samples_number
# ):
#
# sampler = RHVAESampler(model=rhvae_sample, sampler_config=rhvae_sampler_config)
# sampler.sample(samples_number=samples_number)
#
# generation_folder = os.path.join(tmpdir, "dummy_folder")
# generation_folder_list = os.listdir(generation_folder)
#
# assert f"generation_{sampler._sampling_signature}" in generation_folder_list
#
# data_folder = os.path.join(
# generation_folder, f"generation_{sampler._sampling_signature}"
# )
# files_list = os.listdir(data_folder)
#
# full_data_file_nbr = int(samples_number / rhvae_sampler_config.samples_per_save)
# last_file_data_nbr = samples_number % rhvae_sampler_config.samples_per_save
#
# if last_file_data_nbr == 0:
# expected_num_of_data_files = full_data_file_nbr
# else:
# expected_num_of_data_files = full_data_file_nbr + 1
#
# assert len(files_list) == 1 + expected_num_of_data_files
#
# assert "sampler_config.json" in files_list
#
# assert all(
# [
# f"generated_data_{rhvae_sampler_config.samples_per_save}_{i}.pt"
# in files_list
# for i in range(full_data_file_nbr)
# ]
# )
#
# if last_file_data_nbr > 0:
# assert (
# f"generated_data_{last_file_data_nbr}_{expected_num_of_data_files-1}.pt"
# in files_list
# )
#
# data_rec = []
#
# for i in range(full_data_file_nbr):
# data_rec.append(
# torch.load(
# os.path.join(
# data_folder,
# "generated_data_"
# f"{rhvae_sampler_config.samples_per_save}_{i}.pt",
# )
# )
# )
#
# if last_file_data_nbr > 0:
# data_rec.append(
# torch.load(
# os.path.join(
# data_folder,
# f"generated_data_"
# f"{last_file_data_nbr}_{expected_num_of_data_files-1}.pt",
# )
# )
# )
#
# data_rec = torch.cat(data_rec)
# assert data_rec.shape[0] == samples_number
#
# # check sampler_config
#
# sampler_config_rec = RHVAESamplerConfig.from_json_file(
# os.path.join(data_folder, "sampler_config.json")
# )
#
# assert sampler_config_rec.__dict__ == rhvae_sampler_config.__dict__
#
| 31.719828 | 98 | 0.616116 | 881 | 7,359 | 4.788876 | 0.163451 | 0.098602 | 0.055463 | 0.021332 | 0.484949 | 0.37497 | 0.287035 | 0.249822 | 0.199573 | 0.182034 | 0 | 0.015807 | 0.277891 | 7,359 | 231 | 99 | 31.857143 | 0.778133 | 0.717217 | 0 | 0.153846 | 0 | 0 | 0.060701 | 0.019362 | 0 | 0 | 0 | 0 | 0.128205 | 1 | 0.153846 | false | 0 | 0.179487 | 0.076923 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea0d5010ae8b0826a88469d3b3e189859e3e409a | 1,199 | py | Python | 78.py | tsbxmw/leetcode | e751311b8b5f2769874351717a22c35c19b48a36 | [
"MIT"
] | null | null | null | 78.py | tsbxmw/leetcode | e751311b8b5f2769874351717a22c35c19b48a36 | [
"MIT"
] | null | null | null | 78.py | tsbxmw/leetcode | e751311b8b5f2769874351717a22c35c19b48a36 | [
"MIT"
] | null | null | null | # 78. 子集
# 给定一组不含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。
#
# 说明:解集不能包含重复的子集。
#
# 示例:
#
# 输入: nums = [1,2,3]
# 输出:
# [
# [3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []
# ]
# 先看 全排列吧
class Solution1:
def subsets(self, nums):
ln = len(nums)
if ln == 0:
return [[]]
self.rev = []
def dfs(nums, temp):
if not nums:
self.rev.append(temp)
return
for i, num in enumerate(nums):
temp.append(num)
dfs(nums[:i]+nums[i+1:], temp[:])
temp.pop()
dfs(nums, [])
print(self.rev)
return self.rev
# 全拍列改一改入库条件,这里其实不需要有排列,只是看数据
class Solution:
def subsets(self, nums):
ln = len(nums)
if ln == 0:
return [[]]
def dfs(temp, nums, i):
if i == ln:
return temp
v = [x[:] for x in temp]
for t in temp:
t.append(nums[i])
temp = temp + v
return dfs(temp, nums, i+1)
return dfs([[]], nums, 0)
if __name__ == "__main__":
s = Solution()
print(s.subsets([1,2,3,4,5])) | 18.734375 | 49 | 0.421184 | 148 | 1,199 | 3.358108 | 0.337838 | 0.020121 | 0.018109 | 0.072435 | 0.152918 | 0.152918 | 0.152918 | 0.152918 | 0.152918 | 0.152918 | 0 | 0.039829 | 0.413678 | 1,199 | 64 | 50 | 18.734375 | 0.667141 | 0.160133 | 0 | 0.235294 | 0 | 0 | 0.008114 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0 | 0.382353 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea0ea19ad973cdb83f0e2cc01080232ca0ea3f83 | 12,531 | py | Python | tenable_io/api/exports.py | lanz/Tenable.io-SDK-for-Python | e81a61c369ac103d1524b0898153a569536a131e | [
"MIT"
] | 90 | 2017-02-02T18:36:17.000Z | 2022-02-05T17:58:50.000Z | tenable_io/api/exports.py | lanz/Tenable.io-SDK-for-Python | e81a61c369ac103d1524b0898153a569536a131e | [
"MIT"
] | 64 | 2017-02-03T00:54:00.000Z | 2020-08-06T14:06:50.000Z | tenable_io/api/exports.py | lanz/Tenable.io-SDK-for-Python | e81a61c369ac103d1524b0898153a569536a131e | [
"MIT"
] | 49 | 2017-02-03T01:01:00.000Z | 2022-02-25T13:25:28.000Z | from json import loads
from tenable_io.api.base import BaseApi, BaseRequest
from tenable_io.api.models import AssetsExport, ExportsAssetsStatus, ExportsVulnsStatus, VulnsExport
from tenable_io.util import payload_filter
class ExportsApi(BaseApi):
def vulns_request_export(self, exports_vulns):
"""Export all vulnerabilities in the user's container that match the request criteria.
:param exports_vulns: An instance of :class:`ExportsVulnsRequest`.
:raise TenableIOApiException: When API error is encountered.
:return: The export UUID.
"""
response = self._client.post('vulns/export', payload=exports_vulns)
return loads(response.text).get('export_uuid')
def vulns_export_status(self, export_uuid):
"""Returns the status of your export request (QUEUED, PROCESSING, FINISHED, ERROR)
Chunks are processed in parallel and may not complete in order.
:param export_uuid: The export UUID.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of `ExportsVulnsStatus`
"""
response = self._client.get('vulns/export/%(export_uuid)s/status',
path_params={'export_uuid': export_uuid})
return ExportsVulnsStatus.from_json(response.text)
def vulns_chunk(self, export_uuid, chunk_id):
"""Retrieve vulnerability chunk by ID.
:param export_uuid: The export request UUID.
:param chunk_id: The chunk ID.
:raise TenableIOApiException: When API error is encountered.
:return: A list of :class:`tenable_io.api.models.VulnsExport` instances.
"""
response = self._client.get('vulns/export/%(export_uuid)s/chunks/%(chunk_id)s',
path_params={'export_uuid': export_uuid, 'chunk_id': chunk_id})
return VulnsExport.from_json_list(response.text)
def vulns_download_chunk(self, export_uuid, chunk_id, stream=True, chunk_size=1024):
"""Download vulnerability chunk by ID.
:param export_uuid: The export request UUID.
:param chunk_id: The chunk ID.
:raise TenableIOApiException: When API error is encountered.
:return: The downloaded file.
"""
response = self._client.get('vulns/export/%(export_uuid)s/chunks/%(chunk_id)s',
path_params={'export_uuid': export_uuid, 'chunk_id': chunk_id},
stream=stream)
return response.iter_content(chunk_size=chunk_size)
def assets_request_export(self, exports_assets):
"""Exports all assets in your container that match the request criteria.
:param exports_assets: An instance of :class:`ExportsAssetsRequest`.
:raise TenableIOApiException: When API error is encountered.
:return: The UUID for the export request.
"""
response = self._client.post('assets/export', payload=exports_assets)
return loads(response.text).get('export_uuid')
def assets_export_status(self, export_uuid):
"""Returns the status of your export request. Chunks are processed in serial and will complete in order.
:param export_uuid: The UUID for the export request.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of `ExportsAssetsStatus`
"""
response = self._client.get('assets/export/%(export_uuid)s/status',
path_params={'export_uuid': export_uuid})
return ExportsAssetsStatus.from_json(response.text)
def assets_chunk(self, export_uuid, chunk_id):
"""Retrieve chunk by id. Chunks are available for export for up to 24 hours after they have been created. A
404 is returned for expired chunks.
:param export_uuid: The UUID for the export request.
:param chunk_id: The ID of the asset chunk you want to export.
:raise TenableIOApiException: When API error is encountered.
:return: A list of :class:`tenable_io.api.models.AssetsExport` instances.
"""
response = self._client.get('assets/export/%(export_uuid)s/chunks/%(chunk_id)s',
path_params={'export_uuid': export_uuid, 'chunk_id': chunk_id})
return AssetsExport.from_json_list(response.text)
def assets_download_chunk(self, export_uuid, chunk_id, stream=True, chunk_size=1024):
"""Download chunk by id. Chunks are available for download for up to 24 hours after they have been created. A
404 is returned for expired chunks.
:param export_uuid: The UUID for the export request.
:param chunk_id: The ID of the asset chunk you want to export.
:raise TenableIOApiException: When API error is encountered.
:return: The downloaded file.
"""
response = self._client.get('assets/export/%(export_uuid)s/chunks/%(chunk_id)s',
path_params={'export_uuid': export_uuid, 'chunk_id': chunk_id},
stream=stream)
return response.iter_content(chunk_size=chunk_size)
class ExportsAssetsRequest(BaseRequest):
def __init__(self, chunk_size, filters=None):
"""Request for ExportApi.assets_request_export
:param chunk_size: Specifies the number of assets per exported chunk. Range is 100-10000. If you specify a value
outside of that range, a 400 error is returned.
:type chunk_size: int
:param filters: Specifies filters for exported assets. To return all assets, omit the filters object. If your
request specifies multiple filters, the system combines the filters using the AND search operator.
:type filters: dict
:param filters.created_at: Returns all assets created later than the date specified. The specified date must be
in the Unix timestamp format.
:type filters.created_at: long
:param filters.updated_at: Returns all assets updated later than the date specified. The specified date must be
in the Unix timestamp format.
:type filters.updated_at: long
:param filters.terminated_at: Returns all assets terminated later than the date specified. The specified date
must be in the Unix timestamp format.
:type filters.terminated_at: long
:param filters.deleted_at: Returns all assets deleted later than the date specified. The specified date must in
the Unix timestamp format.
:type filters.deleted_at: long
:param filters.first_scan_time: Returns all assets with a first scan time later than the date specified. The
specified date must be in the Unix timestamp format.
:type filters.first_scan_time: long
:param filters.last_authenticated_scan_time: Returns all assets with a last credentialed scan time later than
the date specified. The specified date must be in the Unix timestamp format.
:type filters.last_authenticated_scan_time: long
:param filters.last_assessed: Returns all assets with a last assessed time later than the date specified. An
asset is considered assessed if it has been scanned by a credentialed or non-credentialed scan. The
specified date must be in the Unix timestamp format.
:type filters.last_assessed: long
:param filters.servicenow_sysid: If true, returns all assets that have a ServiceNow Sys ID, regardless of value.
If false, returns all assets that do not have a ServiceNow Sys ID.
:type filters.servicenow_sysid: bool
:param filters.sources: Returns assets that have the specified source. An asset source is the entity that
reported the asset details. Sources can include sensors, connectors, and API imports. If your request
specifies multiple sources, this request returns all assets that have been seen by any of the specified
sources.
:type filters.sources: list
:param filters.has_plugin_results: If true, returns all assets that have plugin results. If false, returns all
assets that do not have plugin results. An asset may not have plugin results if the asset details originated
from a connector, an API import, or a discovery scan, rather than a vulnerabilities scan.
:type filters.has_plugin_results: bool
:param filters.tag.<category>: Returns all assets with the specified tags. The filter is defined as "tag",
a period ("."), and the tag category name. The value of the filter is a list of tag values.
ex. 'tag.City': ['Chicago', 'LA']
:type filters.tag.<category>: list<str>
"""
self.chunk_size = chunk_size
self.filters = filters
def as_payload(self, filter_=None):
payload = super(ExportsAssetsRequest, self).as_payload(filter_)
if u'filters' in payload:
payload[u'filters'] = payload_filter(payload[u'filters'], filter_) or None
return payload_filter(payload, filter_)
class ExportsVulnsRequest(BaseRequest):
FILTERS_SEVERITIES = [u'info', u'low', u'medium', u'high', u'critical']
FILTERS_STATES = [u'open', u'reopened', u'fixed']
def __init__(self, num_assets=None, filters=None):
"""Request for ExportApi.vulns_request_export
:param num_assets: Specifies the number of assets per exported chunk. Default is 50. Range is 50-5000. If you
specify a value outside of that range, the system uses lower or upper bound value.
:type num_assets: int
:param filters.severity: Defaults to all severity levels. Supported values are [info, low, medium, high,
critical].
:type filters.severity: list
:param filters.state: The state of the vulnerabilities to include in the export. If not provided, default states
are OPEN and REOPENED. Acceptable values are [OPEN, REOPENED, FIXED]. Case insensitive.
:type filters.state: list
:param filters.plugin_family: The plugin family of the exported vulnerabilities. This filter is case sensitive.
:type filters.plugin_family: list
:param filters.since: The start date (in Unix time) for the range of new or updated vulnerability data you want
to export. If your request omits this parameter, exported data includes all vulnerabilities, regardless of
date.
:type filters.since: int
:param filters.tag.<category>: Returns all assets with the specified tags. The filter is defined as "tag",
a period ("."), and the tag category name. The value of the filter is a list of tag values.
ex. 'tag.City': ['Chicago', 'LA']
:type filters.tag.<category>: list<str>
:param cidr_range: Restricts search for vulnerabilities to assets assigned an IP address within the specified
CIDR range. For example, 0.0.0.0/0 restricts the search to 0.0.0.1 and 255.255.255.254.
:type filters.cidr_range: str
:param first_found: The start date (in Unix time) for the range of vulnerability data you want to export,
based on when a scan first found a vulnerability on an asset.
:type filters.first_found: int
:param last_found: The start date (in Unix time) for the range of vulnerability data you want to export,
based on when a scan last found a vulnerability on an asset.
:type filters.last_found: int
:param last_fixed: The start date (in Unix time) for the range of vulnerability data you want to export,
based on when the vulnerability state was changed to fixed.
:type filters.last_fixed: int
"""
if filters and u'severity' in filters and filters[u'severity']:
for severity in filters[u'severity']:
assert severity in self.FILTERS_SEVERITIES
if filters and u'state' in filters and filters[u'state']:
for state in filters[u'state']:
assert state.lower() in self.FILTERS_STATES
self.num_assets = num_assets
self.filters = filters
def as_payload(self, filter_=None):
payload = super(ExportsVulnsRequest, self).as_payload(filter_)
if u'filters' in payload:
payload[u'filters'] = payload_filter(payload[u'filters'], filter_) or None
return payload_filter(payload, filter_)
| 56.445946 | 120 | 0.679036 | 1,664 | 12,531 | 5.003005 | 0.155048 | 0.040841 | 0.026907 | 0.031712 | 0.595916 | 0.558438 | 0.531652 | 0.492132 | 0.451772 | 0.403363 | 0 | 0.006165 | 0.249222 | 12,531 | 221 | 121 | 56.701357 | 0.87872 | 0.603064 | 0 | 0.412698 | 0 | 0 | 0.133752 | 0.066499 | 0 | 0 | 0 | 0 | 0.031746 | 1 | 0.190476 | false | 0 | 0.063492 | 0 | 0.492063 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea1054b0ab6dec5f3969af9fb09813f3865dd860 | 3,853 | py | Python | Ori_data_cleaning/build_kongpai.py | wuyifan2233/Tencent_WWF | 2b248a810295f95cb0483837cb8cb8797c144821 | [
"MIT"
] | 2 | 2021-07-08T01:52:15.000Z | 2021-07-29T08:46:06.000Z | Ori_data_cleaning/build_kongpai.py | wuyifan2233/Tencent_WWF | 2b248a810295f95cb0483837cb8cb8797c144821 | [
"MIT"
] | null | null | null | Ori_data_cleaning/build_kongpai.py | wuyifan2233/Tencent_WWF | 2b248a810295f95cb0483837cb8cb8797c144821 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import os
import pandas as pd
import shutil
import numpy as np
from tqdm import tqdm
import pyfastcopy
import random
def stat_count(dir):
csv_list=os.listdir(dir)
df_store=pd.DataFrame(columns=['Categories','Path','Frames'])
modify_class=['misssing','wufashibei','gongzuorengyuan','qitarenyuan','konpai','gongzuorenyuan','hongzuiya']
modified_class=['missing','wufashibie','person','person','kongpai','person','hongzuishanya']
#drop_class=['cuowu','wufashibie','kongpai','missing','banchunlu','yanyang:konpai']
drop_class=['cuowu','wufashibie','missing','banchunlu','yanyang:konpai']
for csv in csv_list:
df=pd.read_csv(dir+csv)
for a,b in zip(modify_class,modified_class):
df.loc[df['Categories']==a,'Categories']=b
cat_list=df['Categories'].values
for i,cate in enumerate(cat_list):
if cate not in df_store['Categories'].values and cate not in drop_class:
df_store=df_store.append([{'Categories':cate}], ignore_index=True)
index = df_store[df_store.Categories == cate].index.tolist()[0]
df_store.loc[index,'Path']=[df.loc[i,'Path']]
df_store.loc[index,'Frames']=df.loc[i,'Frames']
elif cate in df_store['Categories'].values and cate not in drop_class:
index = df_store[df_store.Categories == cate].index.tolist()[0]
#['[path]', '[path2]','[path3]']
df_store.loc[index,'Path']+=[df.loc[i,'Path']]
df_store.loc[index,'Frames']+=df.loc[i,'Frames']
df_store=df_store.sort_values(by="Frames" , ascending=False)
df_store=df_store.reset_index().drop(['index'], axis=1)
return df_store
def main():
random.seed(2021)
new_df=stat_count('F:\All_CSV\csv/')[:1]
for cate,file_list in (zip(new_df['Categories'].values,new_df['Path'].values)):
image_folder='E:/WWF_Det/WWF_Data/Raw_Data/empty/'+cate+'/images/'
video_folder='E:/WWF_Det/WWF_Data/Raw_Data/empty/'+cate+'/videos/'
if not os.path.exists(image_folder):
os.makedirs(image_folder)
if not os.path.exists(video_folder):
os.makedirs(video_folder)
count_image=0
count_video=0
all_source_list=[]
for mini_list in (file_list):
source_list=mini_list[1:-1].split(',')
for s_item in source_list:
source=s_item.strip()[1:-1]
if source.lower().strip().endswith('.jpg') or source.lower().strip().endswith('.png') :
count_image+=1
target=image_folder+'%05d' % (count_image) +os.path.splitext(source)[1]
if not os.path.exists(target):
source=source.replace('E:','F:\Raw_Dataset',1)
k=0
elif source.lower().strip().endswith('.mov') or source.lower().strip().endswith('.avi') or source.lower().strip().endswith('.mp4'):
count_video+=1
target=video_folder+'%05d' % (count_video) +os.path.splitext(source)[1]
if not os.path.exists(target):
source=source.replace('E:','F:\Raw_Dataset',1)
all_source_list.append(source)
k=0
else:
print((os.path.splitext(source)[1])[-3:])
random.shuffle(all_source_list)
num_vid=0
for source in tqdm(all_source_list[:3000]):
num_vid+=1
target=video_folder+'%05d' % (num_vid) +os.path.splitext(source)[1]
shutil.copyfile(source,target)
#print(source,)
print(num_vid)
if __name__ == "__main__":
main() | 40.989362 | 147 | 0.572541 | 487 | 3,853 | 4.342916 | 0.264887 | 0.059574 | 0.021277 | 0.033097 | 0.343262 | 0.250591 | 0.250591 | 0.250591 | 0.250591 | 0.250591 | 0 | 0.014601 | 0.271217 | 3,853 | 94 | 148 | 40.989362 | 0.738604 | 0.038412 | 0 | 0.111111 | 0 | 0 | 0.132937 | 0.018914 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.097222 | 0 | 0.138889 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea10b7c8da447d8bce0ff5a3d3f67a953fdcd34c | 1,322 | py | Python | gdown/extractall.py | ricocf/gdown | 442411be2fe10d9045103212031e51c50a8366cd | [
"MIT"
] | 1,856 | 2015-10-25T04:36:12.000Z | 2022-03-31T18:30:12.000Z | gdown/extractall.py | ricocf/gdown | 442411be2fe10d9045103212031e51c50a8366cd | [
"MIT"
] | 118 | 2017-05-08T11:43:59.000Z | 2022-03-26T01:19:45.000Z | gdown/extractall.py | ricocf/gdown | 442411be2fe10d9045103212031e51c50a8366cd | [
"MIT"
] | 190 | 2017-11-29T14:57:30.000Z | 2022-03-31T15:43:46.000Z | import os.path as osp
import tarfile
import zipfile
def extractall(path, to=None):
"""Extract archive file.
Parameters
----------
path: str
Path of archive file to be extracted.
to: str, optional
Directory to which the archive file will be extracted.
If None, it will be set to the parent directory of the archive file.
"""
if to is None:
to = osp.dirname(path)
if path.endswith(".zip"):
opener, mode = zipfile.ZipFile, "r"
elif path.endswith(".tar"):
opener, mode = tarfile.open, "r"
elif path.endswith(".tar.gz") or path.endswith(".tgz"):
opener, mode = tarfile.open, "r:gz"
elif path.endswith(".tar.bz2") or path.endswith(".tbz"):
opener, mode = tarfile.open, "r:bz2"
else:
raise ValueError(
"Could not extract '%s' as no appropriate "
"extractor is found" % path
)
def namelist(f):
if isinstance(f, zipfile.ZipFile):
return f.namelist()
return [m.path for m in f.members]
def filelist(f):
files = []
for fname in namelist(f):
fname = osp.join(to, fname)
files.append(fname)
return files
with opener(path, mode) as f:
f.extractall(path=to)
return filelist(f)
| 26.44 | 76 | 0.575643 | 174 | 1,322 | 4.373563 | 0.385057 | 0.094612 | 0.063075 | 0.074901 | 0.137976 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002181 | 0.306354 | 1,322 | 49 | 77 | 26.979592 | 0.827699 | 0.186082 | 0 | 0 | 0 | 0 | 0.097396 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.09375 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea1450ae2573bde817bc54a366ca3a062b2daf73 | 3,553 | py | Python | scenes/validators.py | jordifierro/pachatary-api | c03ad67ceb856068daa6d082091372eb1ed3d009 | [
"MIT"
] | 3 | 2018-12-05T16:44:59.000Z | 2020-08-01T14:12:32.000Z | scenes/validators.py | jordifierro/pachatary-api | c03ad67ceb856068daa6d082091372eb1ed3d009 | [
"MIT"
] | 6 | 2020-06-03T15:56:59.000Z | 2022-02-10T07:23:55.000Z | scenes/validators.py | jordifierro/pachatary-api | c03ad67ceb856068daa6d082091372eb1ed3d009 | [
"MIT"
] | null | null | null | from pachatary.exceptions import InvalidEntityException, EntityDoesNotExistException
class SceneValidator:
MIN_TITLE_LENGHT = 1
MAX_TITLE_LENGHT = 80
MIN_LATITUDE = -90
MAX_LATITUDE = +90
MIN_LONGITUDE = -180
MAX_LONGITUDE = +180
def __init__(self, experience_repo):
self.experience_repo = experience_repo
def validate_scene(self, scene):
if scene.title is None:
raise InvalidEntityException(source='title', code='empty_attribute', message='Title cannot be empty')
if type(scene.title) is not str:
raise InvalidEntityException(source='title', code='wrong_type', message='Title must be string')
if len(scene.title) < SceneValidator.MIN_TITLE_LENGHT or len(scene.title) > SceneValidator.MAX_TITLE_LENGHT:
raise InvalidEntityException(source='title', code='wrong_size',
message='Title must be between 1 and 80 chars')
if scene.description is not None and type(scene.description) is not str:
raise InvalidEntityException(source='description', code='wrong_type', message='Description must be string')
if scene.latitude is None:
raise InvalidEntityException(source='latitude', code='empty_attribute', message='Latitude cannot be empty')
if not isinstance(scene.latitude, (int, float, complex)):
raise InvalidEntityException(source='latitude', code='wrong_type', message='Latitude must be numeric')
if scene.latitude < SceneValidator.MIN_LATITUDE or scene.latitude > SceneValidator.MAX_LATITUDE:
raise InvalidEntityException(source='latitude', code='wrong_size',
message='Latitude must be between -90 and +90')
if scene.longitude is None:
raise InvalidEntityException(source='longitude', code='empty_attribute',
message='Longitude cannot be empty')
if not isinstance(scene.longitude, (int, float, complex)):
raise InvalidEntityException(source='longitude', code='wrong_type', message='Longitude must be numeric')
if scene.longitude < SceneValidator.MIN_LONGITUDE or scene.longitude > SceneValidator.MAX_LONGITUDE:
raise InvalidEntityException(source='longitude', code='wrong_size',
message='Longitude must be between -180 and +180')
if scene.experience_id is None:
raise InvalidEntityException(source='experience_id', code='empty_attribute',
message='Experience id cannot be empty')
try:
self.experience_repo.get_experience(scene.experience_id)
except EntityDoesNotExistException:
raise InvalidEntityException(source='experience_id', code='does_not_exist',
message='Experience does not exist')
return True
class ScenePermissionsValidator:
def __init__(self, scene_repo, experience_permissions_validator):
self.scene_repo = scene_repo
self.experience_permissions_validator = experience_permissions_validator
def validate_permissions(self, logged_person_id, has_permissions_to_modify_scene):
scene = self.scene_repo.get_scene(id=has_permissions_to_modify_scene)
return self.experience_permissions_validator.validate_permissions(
logged_person_id=logged_person_id,
has_permissions_to_modify_experience=scene.experience_id)
| 52.25 | 119 | 0.678019 | 378 | 3,553 | 6.15873 | 0.185185 | 0.139175 | 0.170103 | 0.056701 | 0.361684 | 0.277062 | 0.059278 | 0 | 0 | 0 | 0 | 0.009673 | 0.243456 | 3,553 | 67 | 120 | 53.029851 | 0.856399 | 0 | 0 | 0 | 0 | 0 | 0.162398 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.018868 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea14539ed3d68442c5de0f061063d94c4ecff6d4 | 2,064 | py | Python | day7.py | beyonddream/aoc2021 | f571247d5da702d26259626294057d5cec96cacf | [
"MIT"
] | null | null | null | day7.py | beyonddream/aoc2021 | f571247d5da702d26259626294057d5cec96cacf | [
"MIT"
] | null | null | null | day7.py | beyonddream/aoc2021 | f571247d5da702d26259626294057d5cec96cacf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import math
def solve():
with open("inputs/day7.txt") as file:
data = [line for line in file]
solve_part_1(data)
solve_part_2(data)
def solve_part_1(data):
crab_positions = list(map(int, filter(None, data[0].split(','))))
sorted_crab_positions = sorted(crab_positions)
no_of_crabs = len(sorted_crab_positions)
def get_total_fuel_cost(aligned_pos_idx, positions):
total_cost = 0
for idx, pos in enumerate(positions):
total_cost += abs(positions[idx] - positions[aligned_pos_idx])
return total_cost
total_fuel_cost = 0
if no_of_crabs % 2 == 0:
mid = no_of_crabs // 2
total_fuel_cost = min(get_total_fuel_cost(mid, sorted_crab_positions),
get_total_fuel_cost(mid - 1, sorted_crab_positions))
else:
mid = no_of_crabs // 2
total_fuel_cost = get_total_fuel_cost(mid, sorted_crab_positions)
print("The total fuel spent to align to a position is\
{}".format(total_fuel_cost))
return total_fuel_cost
def solve_part_2(data):
crab_positions = list(map(int, filter(None, data[0].split(','))))
def get_total_fuel_cost(max_aligned_pos, positions):
min_total_cost = math.inf
for aligned_pos in range(max_aligned_pos + 1):
total_cost = 0
for idx, pos in enumerate(positions):
diff = abs(positions[idx] - aligned_pos)
total_cost += (diff * (diff + 1)) // 2
min_total_cost = min(min_total_cost, total_cost)
return min_total_cost
total_fuel_cost = 0
avg_crab_position = round(sum(crab_positions) / len(crab_positions))
# try all positions from 0 to max = avg_crab_position (inclusive) and find
# the minimum of all of these
total_fuel_cost = get_total_fuel_cost(avg_crab_position, crab_positions)
print("The total fuel spent to align to a position is\
{}".format(total_fuel_cost))
return total_fuel_cost
if __name__ == '__main__':
solve()
| 33.836066 | 78 | 0.662306 | 301 | 2,064 | 4.189369 | 0.255814 | 0.121332 | 0.154639 | 0.07613 | 0.479778 | 0.434576 | 0.398097 | 0.375099 | 0.283902 | 0.222046 | 0 | 0.013462 | 0.244186 | 2,064 | 60 | 79 | 34.4 | 0.794872 | 0.059109 | 0 | 0.347826 | 0 | 0 | 0.01292 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.043478 | 0 | 0.23913 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea18f624223828cedfb2d32e37e5f10a2d5bf537 | 15,235 | py | Python | pyspawner/sandbox.py | CJWorkbench/pyspawner | 73f31320e925a4a035624699df14a6a1630e8e54 | [
"MIT"
] | 2 | 2020-09-23T06:21:35.000Z | 2022-01-18T13:27:55.000Z | pyspawner/sandbox.py | CJWorkbench/pyspawner | 73f31320e925a4a035624699df14a6a1630e8e54 | [
"MIT"
] | 1 | 2021-02-07T13:19:20.000Z | 2021-02-15T18:51:06.000Z | pyspawner/sandbox.py | CJWorkbench/pyspawner | 73f31320e925a4a035624699df14a6a1630e8e54 | [
"MIT"
] | null | null | null | import errno
import os
import pkg_resources
import sys
import textwrap
from dataclasses import dataclass, field
from pathlib import Path
from typing import FrozenSet, Optional
import pyroute2
from . import c
seccomp_bpf_bytes = pkg_resources.resource_stream(
__name__, "sandbox-seccomp.bpf"
).read()
@dataclass(frozen=True)
class NetworkConfig:
"""
Network configuration that lets children access the Internet.
Pyspawner will create a veth interface that may be used to route traffic
from the child to the Internet via network address translation (NAT).
*You must write the iptables rules yourself! pyspawner does not invoke
iptables!* The intent is for you to set up iptables rules once, and then
reuse the same rules for every clone.
One iptables rule to route network traffic from a child process to the
Internet::
iptables -t nat -a POSTROUTING -s [child_ipv4_address] -j SNAT --to-source=[our IP address]
You should also firewall the traffic to secure the rest of your network
from sandboxed processes. See ``tests/setup-sandbox.sh`` for a minimal
set of iptables rules.
We do not yet support IPv6, because Kubernetes support is shaky. Follow
https://github.com/kubernetes/kubernetes/issues/62822.
Here's how networking works. When cloning, the child process gets a new,
anonymous network namespace. pyspawner creates a veth pair, and it passes
the "child" veth interface to the child process. The child process brings
up its network interface and can only see the public Internet.
After the child dies, the Linux kernel will delete the network interface.
(There's a bit of a race here: the interface may exist a few milliseconds
after the child dies. Pyspawner will explicitly ensure the interface is
deleted before creating it.)
Beware if running multiple children at once that all access the Internet.
Each must have a unique interface name and IP addresses.
The default values match those in `tests/setup-sandbox.sh`. Don't
edit one without editing the other.
"""
kernel_veth_name: str = "veth-pyspawn"
"""
Name of veth interface run by the kernel.
Maximum length is 15 characters. Any longer gives NetlinkError 34.
This name must not conflict with any other network device in the kernel's
container.
"""
child_veth_name: str = "veth-pyspawn-c"
"""
Name of veth interface run by the child.
Maximum length is 15 characters. Any longer gives NetlinkError 34.
This name must not conflict with any other network device in the kernel's
container. (The kernel creates this device before sending it into the
child's network namespace.)
"""
kernel_ipv4_address: str = "192.168.123.1"
"""
IPv4 address of the kernel.
This must not conflict with any other IP address in the kernel's container.
This should be a private address. Be sure it doesn't conflict with your
network's addresses. Kubernetes uses 10.0.0.0/8; Docker uses 172.16.0.0/12.
The hard-coded "192.168.123/24" should be safe for Docker and Kubernetes.
The child will use this address as its default gateway.
"""
child_ipv4_address: str = "192.168.123.2"
"""
IPv4 address of the child.
The kernel will maintain iptables rules to route from this IP address to
the public Internet.
This must be in the same `/24` network block as `kernel_ipv4_address`.
"""
@dataclass(frozen=True)
class SandboxConfig:
chroot_dir: Optional[Path] = None
"""
Setting for "chroot" security layer.
If `chroot_dir` is set, it must point to a directory on the filesystem.
Remember that we call setuid() to an extreme UID (>65535) by default:
that means the child will only be able to read files that are
world-readable (i.e., "chmod o+r").
(TODO `chroot_dir` should use pivot_root, for security. When Kubernetes
lets us modify our mount namespace in an unprivileged container, switch
to pivot_root.)
"""
network: Optional[NetworkConfig] = None
"""
If set, network configuration so child processes can access the Internet.
If None, child processes have no network interfaces.
:type: pyspawner.NetworkConfig
"""
skip_sandbox_except: FrozenSet[str] = field(default_factory=frozenset)
"""
Security layers to enable in child processes. (DO NOT USE IN PRODUCTION.)
MUST BE EXACTLY `frozenset()`. Other values are only for unit tests. See
`protocol.SpawnChild` for details.
By default, child processes are sandboxed: user code should not be able to
access the rest of the system. (In particular, it should not be able to
access parent-process state; influence parent-process behavior in any way
but its stdout, stderr and exit code; or communicate with any internal
services.)
Our layers of sandbox security overlap: for instance: we (a) restrict the
user code to run as non-root _and_ (b) disallow root from escaping its
chroot. We can't test layer (b) unless we disable layer (a); and that's
what this feature is for.
By default, all sandbox features are enabled. To enable only a subset, set
`skip_sandbox_except` to a `frozenset()` with one or more of the following
strings:
* "drop_capabilities": limit root's capabilities
* "setuid": become an anonymous, non-root user
* "no_new_privs": prevent setuid-root programs from gaining capabilities
* "seccomp": filter system calls
"""
def sandbox_child_from_pyspawner(child_pid: int, config: SandboxConfig) -> None:
"""
Sandbox the child process from the pyspawner side of things.
The child must wait for this to complete before it embarks upon its own
sandboxing adventure.
"""
_write_namespace_uidgid(child_pid)
if config.network is not None:
_setup_network_namespace_from_pyspawner(config.network, child_pid)
def sandbox_child_self(config: SandboxConfig) -> None:
"""
Sandbox our own process.
This must not be called before pyspawner finishes calling
sandbox_child_from_pyspawner().
"""
_Sandbox(config).run()
@dataclass(frozen=True)
class _Sandbox:
config: SandboxConfig
def _should_sandbox(self, feature: str) -> bool:
"""
Return `True` if we should call a particular sandbox function.
This should _always_ return `True` on production code. The function only
exists to help with unit testing.
"""
if self.config.skip_sandbox_except:
# test code only
return feature in self.config.skip_sandbox_except
else:
# production code
return True
def run(self) -> None:
"""
prevent child code from interacting with the rest of our system.
tasks with rationale ('[x]' means, "unit-tested"):
[x] bring up external network
[x] wait for pyspawner to write uid_map
[x] close `sock` (so "pyspawner" does not misbehave)
[x] drop capabilities (like cap_sys_admin)
[x] set seccomp filter
[x] setuid to 1000
[x] use chroot (so children can't see other files)
"""
if self.config.network is not None:
_install_network(self.config.network)
if self._should_sandbox("no_new_privs"):
_set_no_new_privs()
if self.config.chroot_dir is not None:
_chroot(self.config.chroot_dir)
if self._should_sandbox("setuid"):
_setuid()
if self._should_sandbox("drop_capabilities"):
_drop_capabilities()
if self._should_sandbox("seccomp"):
_install_seccomp(seccomp_bpf_bytes)
def _write_namespace_uidgid(child_pid: int) -> None:
"""
Write /proc/child_pid/uid_map and /proc/child_pid/gid_map.
Why call this? Because otherwise, the called code can do it for us. That
would mean root in the child would be equal to root in the parent -- so the
child could, for instance, modify files owned outside of it.
ref: man user_namespaces(7).
"""
Path(f"/proc/{child_pid}/uid_map").write_text("0 100000 65536")
Path(f"/proc/{child_pid}/setgroups").write_text("deny")
Path(f"/proc/{child_pid}/gid_map").write_text("0 100000 65536")
def _setup_network_namespace_from_pyspawner(
config: NetworkConfig, child_pid: int
) -> None:
"""
Send new veth device to `child_pid`'s network namespace.
See `_network()` for the child's logic. Read the `NetworkConfig`
docstring to understand how the network namespace works.
"""
with pyroute2.IPRoute() as ipr:
# Avoid a race: what if another forked process already created this
# interface?
#
# If that's the case, assume the other process has already exited
# (because [2019-11-11] we only run one networking-enabled child at a
# time). So the veth device is about to be deleted anyway.
try:
ipr.link("del", ifname=config.kernel_veth_name)
except pyroute2.NetlinkError as err:
if err.code == errno.ENODEV:
pass # common case -- the device doesn't exist
else:
if err.code == errno.EPERM:
sys.stderr.write(
textwrap.dedent(
r"""
*** pyspawner failed to use netlink. ***
Are you using pyspawner in Docker? Docker
containers don't have CAP_NET_ADMIN by default. To
use pyspawner you'll need to relax this
restriction:
docker run \
--cap-add NET_ADMIN \
...
"""
% seccomp_profile_path
)
)
raise
# Create kernel_veth + child_veth veth pair
ipr.link(
"add",
ifname=config.kernel_veth_name,
peer=config.child_veth_name,
kind="veth",
)
# Bring up kernel_veth
kernel_veth_index = ipr.link_lookup(ifname=config.kernel_veth_name)[0]
ipr.addr(
"add",
index=kernel_veth_index,
address=config.kernel_ipv4_address,
prefixlen=24,
)
ipr.link("set", index=kernel_veth_index, state="up")
# Send child_veth to child namespace
child_veth_index = ipr.link_lookup(ifname=config.child_veth_name)[0]
ipr.link("set", index=child_veth_index, net_ns_pid=child_pid)
def _chroot(root: Path) -> None:
"""
Enter a restricted filesystem, so absolute paths are relative to `root`.
Why call this? So the user can't read files from our filesystem (which
include our secrets and our users' secrets); and the user can't *write*
files to our filesystem (which might inject code into a parent process).
SECURITY: entering a chroot is not enough. To prevent this process from
accessing files outside the chroot, this process must drop its ability to
chroot back _out_ of the chroot. Use _drop_capabilities().
SECURITY: TODO: switch from chroot to pivot_root. pivot_root makes it far
harder for root to break out of the jail. It needs a process-specific mount
namespace. But on Kubernetes (and Docker), we'd need so many privileges to
pivot_root that we'd be _decreasing_ security. Find out how to do it with
fewer privileges.
For now, since we don't use a separate mount namespace, chroot doesn't
add much "security" in the case of privilege escalation: root will be able
to escape the chroot. (Even root doesn't have permission to read our
secrets, though.) Chroot isn't to allay evildoers: it's so child-code
developers see the filesystem tree we want them to see.
"""
os.chroot(str(root))
os.chdir("/")
def _install_network(config: NetworkConfig) -> None:
"""
Set up networking, assuming pyspawner passed us a network interface.
Set ip address of veth interface, then bring it up.
Also bring up the "lo" interface.
This requires CAP_NET_ADMIN. Use the "drop_capabilities" sandboxing step
afterwards to prevent further fiddling.
"""
with pyroute2.IPRoute() as ipr:
lo_index = ipr.link_lookup(ifname="lo")[0]
ipr.link("set", index=lo_index, state="up")
veth_index = ipr.link_lookup(ifname=config.child_veth_name)[0]
ipr.addr(
"add", index=veth_index, address=config.child_ipv4_address, prefixlen=24
)
ipr.link("set", index=veth_index, state="up")
ipr.route("add", gateway=config.kernel_ipv4_address)
def _drop_capabilities():
"""
Drop all capabilities in the caller.
Also, set the process "securebits" to prevent regaining capabilities.
Why call this? So if user code manages to setuid to root (which should be
impossible), it still won't have permission to call dangerous kernel code.
(For example: after dropping privileges, "pivot_root" will fail with
EPERM, even for root.)
ref: http://people.redhat.com/sgrubb/libcap-ng/
ref: man capabilities(7)
"""
# straight from man capabilities(7):
# "An application can use the following call to lock itself, and all of
# its descendants, into an environment where the only way of gaining
# capabilities is by executing a program with associated file capabilities"
c.libc_prctl_set_securebits()
# And now, _drop_ the capabilities (and we can never gain them again)
# Drop the Bounding set...
c.libc_prctl_capbset_drop_all_capabilities()
# ... and drop permitted/effective/inheritable capabilities
c.libcap_cap_set_proc_empty_capabilities()
def _set_no_new_privs():
"""
Prevent a setuid bit on a file from restoring capabilities.
"""
c.libc_prctl_pr_set_no_new_privs(1)
def _install_seccomp(bpf_bytes):
"""
Install a whitelist filter to prevent unwanted syscalls.
Why call this? Two reasons:
1. Redundancy: if there's a Linux bug, there's a good chance our seccomp
filter may prevent an attacker from exploiting it.
2. Speculative execution: seccomp implicitly prevents _all_ syscalls from
exploiting Spectre-type CPU security bypasses.
Docker comes with seccomp by default, making seccomp mostly redundant. But
Kubernetes 1.14 still doesn't use seccomp, and [2019-11-07] that's what we
use on prod.
To maintain our whitelist, read `docker/seccomp/README.md`. The compiled
file, for x86-64, belongs in `cjwkernel/pyspawner/sandbox-seccomp.bpf`.
Requires `no_new_privs` sandbox (or CAP_SYS_ADMIN).
"""
c.libc_prctl_pr_set_seccomp_mode_filter(bpf_bytes)
def _setuid():
"""
Drop root: switch to UID 1000.
Why call this? Because Linux gives special capabilities to root (even after
we drop privileges).
ref: man setresuid(2)
"""
os.setresuid(1000, 1000, 1000)
os.setresgid(1000, 1000, 1000)
| 35.931604 | 99 | 0.673843 | 2,134 | 15,235 | 4.703374 | 0.261012 | 0.01355 | 0.005978 | 0.007572 | 0.129023 | 0.079207 | 0.054598 | 0.042244 | 0.034871 | 0.034871 | 0 | 0.015303 | 0.253692 | 15,235 | 423 | 100 | 36.016548 | 0.867458 | 0.428815 | 0 | 0.097345 | 0 | 0 | 0.057532 | 0.016908 | 0 | 0 | 0 | 0.004728 | 0 | 1 | 0.106195 | false | 0.00885 | 0.088496 | 0 | 0.309735 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea19dad1a607d6de6f67ce076763aed124736fa3 | 2,079 | py | Python | tests/laser/smt/bitvecfunc_test.py | yxliang01/mythril-classic | 2348c75a5816cb4201ba680b3e0a062d4e467dbc | [
"MIT"
] | 8 | 2018-05-15T01:39:48.000Z | 2020-09-14T03:56:54.000Z | tests/laser/smt/bitvecfunc_test.py | yxliang01/mythril-classic | 2348c75a5816cb4201ba680b3e0a062d4e467dbc | [
"MIT"
] | 21 | 2019-04-12T17:54:51.000Z | 2021-11-04T18:47:45.000Z | tests/laser/smt/bitvecfunc_test.py | yxliang01/mythril-classic | 2348c75a5816cb4201ba680b3e0a062d4e467dbc | [
"MIT"
] | 2 | 2018-05-11T01:10:29.000Z | 2018-05-15T17:35:37.000Z | from mythril.laser.smt import Solver, symbol_factory, bitvec
import z3
import pytest
import operator
@pytest.mark.parametrize(
"operation,expected",
[
(operator.add, z3.unsat),
(operator.sub, z3.unsat),
(operator.and_, z3.sat),
(operator.or_, z3.sat),
(operator.xor, z3.unsat),
],
)
def test_bitvecfunc_arithmetic(operation, expected):
# Arrange
s = Solver()
input_ = symbol_factory.BitVecVal(1, 8)
bvf = symbol_factory.BitVecFuncSym("bvf", "sha3", 256, input_=input_)
x = symbol_factory.BitVecSym("x", 256)
y = symbol_factory.BitVecSym("y", 256)
# Act
s.add(x != y)
s.add(operation(bvf, x) == operation(y, bvf))
# Assert
assert s.check() == expected
@pytest.mark.parametrize(
"operation,expected",
[
(operator.eq, z3.sat),
(operator.ne, z3.unsat),
(operator.lt, z3.unsat),
(operator.le, z3.sat),
(operator.gt, z3.unsat),
(operator.ge, z3.sat),
(bitvec.UGT, z3.unsat),
(bitvec.UGE, z3.sat),
(bitvec.ULT, z3.unsat),
(bitvec.ULE, z3.sat),
],
)
def test_bitvecfunc_bitvecfunc_comparison(operation, expected):
# Arrange
s = Solver()
input1 = symbol_factory.BitVecSym("input1", 256)
input2 = symbol_factory.BitVecSym("input2", 256)
bvf1 = symbol_factory.BitVecFuncSym("bvf1", "sha3", 256, input_=input1)
bvf2 = symbol_factory.BitVecFuncSym("bvf2", "sha3", 256, input_=input2)
# Act
s.add(operation(bvf1, bvf2))
s.add(input1 == input2)
# Assert
assert s.check() == expected
def test_bitvecfunc_bitvecfuncval_comparison():
# Arrange
s = Solver()
input1 = symbol_factory.BitVecSym("input1", 256)
input2 = symbol_factory.BitVecVal(1337, 256)
bvf1 = symbol_factory.BitVecFuncSym("bvf1", "sha3", 256, input_=input1)
bvf2 = symbol_factory.BitVecFuncVal(12345678910, "sha3", 256, input_=input2)
# Act
s.add(bvf1 == bvf2)
# Assert
assert s.check() == z3.sat
assert s.model().eval(input2.raw) == 1337
| 25.048193 | 80 | 0.626263 | 250 | 2,079 | 5.088 | 0.256 | 0.132862 | 0.058962 | 0.042453 | 0.413522 | 0.334906 | 0.262579 | 0.22327 | 0.22327 | 0.22327 | 0 | 0.062772 | 0.22607 | 2,079 | 82 | 81 | 25.353659 | 0.727781 | 0.026936 | 0 | 0.267857 | 0 | 0 | 0.045206 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.053571 | false | 0 | 0.071429 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea1b5d298881a7bbf8744fbfd0e0e7504f7a5613 | 6,151 | py | Python | airsim_ros_images/publish_images.py | blakermchale/airsim_ros_images | 9a8566f2afeaf4c3c80895a262e9b9644080d28a | [
"MIT"
] | null | null | null | airsim_ros_images/publish_images.py | blakermchale/airsim_ros_images | 9a8566f2afeaf4c3c80895a262e9b9644080d28a | [
"MIT"
] | null | null | null | airsim_ros_images/publish_images.py | blakermchale/airsim_ros_images | 9a8566f2afeaf4c3c80895a262e9b9644080d28a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from airsim import MultirotorClient, ImageType, ImageRequest, ImageResponse
from airsim import CameraInfo as SimCameraInfo
import os
import json
import numpy as np
import rclpy
from rclpy.node import Node
from tf2_ros.transform_broadcaster import TransformBroadcaster
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, CameraInfo
from std_msgs.msg import Header
from geometry_msgs.msg import PoseStamped, Vector3, Quaternion, TransformStamped
class ImagePublisher(Node):
def __init__(self, rate=60):
super().__init__("airsim_images")
# Create timer for calling publish at predefined rate
self.create_timer(1/rate, self.publish)
# AirSim variables
self._airsim_client = MultirotorClient(ip=os.environ["WSL_HOST_IP"])
# self._camera_name = "front_center"
self._camera_name = "bottom_center"
self._camera_frame_id = "realsense"
self._vehicle_name = self.get_namespace().split("/")[1]
# ROS Publishers
# self._pub_ir = self.create_publisher(Image, "ir/image_raw", 1)
self._pub_color = self.create_publisher(Image, "color/image_raw", 1)
# self._pub_depth = self.create_publisher(Image, "depth/image_raw", 1)
# self._pub_info_ir = self.create_publisher(CameraInfo, "ir/camera_info", 1)
self._pub_info_color = self.create_publisher(CameraInfo, "color/camera_info", 1)
# self._pub_depth = self.create_publisher(Image, "depth/image_raw", 1)
# TF related variables
self.br = TransformBroadcaster(self)
# CV
self.bridge = CvBridge()
# Internal variables
self._cam_info_msgs = {}
self.get_logger().info("Initialized image publisher")
def publish(self):
"""Publish images from AirSim to ROS"""
responses = self._airsim_client.simGetImages([
# uncompressed RGB array bytes
ImageRequest(self._camera_name, ImageType.Scene, compress=False),
# # infrared uncompressed image
# ImageRequest(self._camera_name, ImageType.Infrared, compress=False),
# # floating point uncompressed image
# ImageRequest(self._camera_name, ImageType.DepthPlanner, pixels_as_float=True, compress=False),
], self._vehicle_name)
color_response = responses[0]
# ir_response = responses[1]
# depth_response = responses[2]
header = Header()
header.stamp = self.get_clock().now().to_msg()
# TODO: implement parameter for frame id, also decide on if each separate image type should have a different frame id
# This may mean we should load the ids via ros parameters
header.frame_id = self._camera_frame_id
# Handle cam info it has not been found yet
if self._vehicle_name not in self._cam_info_msgs.keys():
self._cam_info_msgs[self._vehicle_name] = {}
cam_info = self._airsim_client.simGetCameraInfo(self._camera_name, self._vehicle_name)
d_params = self._airsim_client.simGetDistortionParams(self._camera_name, self._vehicle_name)
self.get_logger().info(f"{d_params}")
self.get_logger().info(f"""
HFOV: {cam_info.fov},
PROJ: {cam_info.proj_mat}
""")
# TODO: implement multiple cameras for each lens on realsense and update this method
self._cam_info_msgs[self._vehicle_name]["color"] = construct_info(header, cam_info, color_response.height, color_response.width)
# self._cam_info_msgs[self._vehicle_name]["ir"] = self._cam_info_msgs[self._vehicle_name]["color"]
image_color = construct_image(header, color_response, "bgr8")
# image_ir = construct_image(header, ir_response, "rgb8")
# image_depth = construct_image(header, depth_response, "rgb8")
# TODO: use camera pose from airsim
tfmsg = TransformStamped()
translation = Vector3(x=0., y=0., z=0.)
tfmsg.transform.translation = translation
tfmsg.transform.rotation = Quaternion(x=0., y=0., z=0., w=1.)
tfmsg.child_frame_id = self._camera_frame_id
tf_header = Header()
tf_header.stamp = header.stamp
tfmsg.header = tf_header
tfmsg.header.frame_id = "world"
self.br.sendTransform(tfmsg)
self._pub_color.publish(image_color)
# self._pub_ir.publish(image_ir)
# self._pub_depth.publish(image_depth)
self._pub_info_color.publish(self._cam_info_msgs[self._vehicle_name]["color"])
# self._pub_info_ir.publish(self._cam_info_msgs[self._vehicle_name]["ir"])
def construct_info(header: Header, info: SimCameraInfo, height: int, width: int) -> CameraInfo:
msg = CameraInfo()
Tx = 0.0 # Assumed for now since we are not using stereo
hfov = np.deg2rad(info.fov)
# https://github.com/microsoft/AirSim-NeurIPS2019-Drone-Racing/issues/86
f = width / (2 * np.tan(0.5 * hfov))
Fx = Fy = f
cx = width / 2
cy = height / 2
K = np.array([
[Fx, 0.0, cx],
[0.0, Fy, cy],
[0.0, 0.0, 1 ]
]).flatten()
R = np.array([
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]
]).flatten()
P = np.array([
[Fx, 0.0, cx, Tx ],
[0.0, Fy, cy, 0.0],
[0.0, 0.0, 1.0, 0.0]
]).flatten()
msg.header = header
msg.height = height
msg.width = width
msg.k = K
msg.r = R
msg.p = P
msg.binning_x = 0
msg.binning_y = 0
return msg
def construct_image(header: Header, response: ImageResponse, encoding: str) -> Image:
msg = Image()
msg.header = header
msg.encoding = encoding
msg.height = response.height
msg.width = response.width
msg.data = response.image_data_uint8 if response.image_type != ImageType.DepthPlanar else response.image_data_float
msg.is_bigendian = 0
msg.step = response.width * 3
return msg
def main(args=None):
rclpy.init(args=args)
image_publisher = ImagePublisher()
rclpy.spin(image_publisher)
if __name__=="__main__":
main()
| 36.182353 | 140 | 0.652739 | 808 | 6,151 | 4.727723 | 0.27104 | 0.016754 | 0.018063 | 0.019895 | 0.203141 | 0.157853 | 0.119634 | 0.084555 | 0.031414 | 0.031414 | 0 | 0.018538 | 0.237035 | 6,151 | 169 | 141 | 36.39645 | 0.79544 | 0.262071 | 0 | 0.085714 | 0 | 0 | 0.050667 | 0 | 0 | 0 | 0 | 0.005917 | 0 | 1 | 0.047619 | false | 0 | 0.114286 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea1bd7be16c58bca190cad2b191c360588270911 | 1,471 | py | Python | mysite/restaurants/migrations/0001_initial.py | leixiayang/django-python | 8faa84867af5645d3d3d8e67fe8020be4dc68551 | [
"Apache-2.0"
] | 54 | 2015-07-13T14:23:01.000Z | 2021-08-05T10:51:00.000Z | mysite/restaurants/migrations/0001_initial.py | leixiayang/django-python | 8faa84867af5645d3d3d8e67fe8020be4dc68551 | [
"Apache-2.0"
] | 32 | 2015-07-16T08:58:00.000Z | 2020-04-30T09:41:57.000Z | mysite/restaurants/migrations/0001_initial.py | leixiayang/django-python | 8faa84867af5645d3d3d8e67fe8020be4dc68551 | [
"Apache-2.0"
] | 31 | 2015-07-13T15:32:01.000Z | 2022-02-19T17:19:51.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Food',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
('price', models.DecimalField(max_digits=3, decimal_places=0)),
('comment', models.CharField(max_length=50, blank=True)),
('is_spicy', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
('phone_number', models.CharField(max_length=15)),
('address', models.CharField(max_length=50, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='food',
name='restaurant',
field=models.ForeignKey(to='restaurants.Restaurant'),
preserve_default=True,
),
]
| 32.688889 | 114 | 0.539089 | 132 | 1,471 | 5.840909 | 0.469697 | 0.097276 | 0.116732 | 0.155642 | 0.459144 | 0.373541 | 0.373541 | 0.28275 | 0.28275 | 0.28275 | 0 | 0.013092 | 0.324949 | 1,471 | 44 | 115 | 33.431818 | 0.763343 | 0.014276 | 0 | 0.447368 | 0 | 0 | 0.072514 | 0.015193 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.131579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea1c3186f01033ae7c69457a3e8c0a9198f56332 | 1,758 | py | Python | stage5/01-sys-tweaks/files/uploadRomi.py | RyanHir/WPILibPi | 36788aae0bdaaee27a540357d111f4adf07b3973 | [
"BSD-3-Clause"
] | 59 | 2018-11-25T22:48:40.000Z | 2020-03-21T17:01:13.000Z | stage5/01-sys-tweaks/files/uploadRomi.py | RyanHir/WPILibPi | 36788aae0bdaaee27a540357d111f4adf07b3973 | [
"BSD-3-Clause"
] | 94 | 2018-12-21T20:30:13.000Z | 2020-11-14T04:03:44.000Z | stage5/01-sys-tweaks/files/uploadRomi.py | RyanHir/WPILibPi | 36788aae0bdaaee27a540357d111f4adf07b3973 | [
"BSD-3-Clause"
] | 35 | 2018-12-21T22:47:22.000Z | 2020-11-08T16:25:51.000Z | #!/usr/bin/env python3 -u
# This file uploads to the Romi using a USB cable
import time
import serial
import os
import sys
import getopt
import subprocess
def main(argv):
try:
opts, args = getopt.getopt(argv,"hp:f:",["port=","file="])
except getopt.GetoptError as err:
print(err)
print("Example: uploadRomi.py -p /dev/ttyACM0 -f firmware/.pio/build/a-start32U4/firmware.hex")
sys.exit(1)
# Set Defaults
usbport = '/dev/ttyACM0'
hexfile = '$NVM_BIN/../lib/node_modules/@wpilib/wpilib-ws-robot-romi/firmware/.pio/build/a-star32U4/firmware.hex'
for opt, arg in opts:
if opt == "-h":
print("uploadRomi.py -p <full_port_path> -f <file_path>")
sys.exit(1)
if opt in ("-p", "--port"):
usbport = arg
if opt in ("-f", "--file"):
hexfile = arg
print("Beginning binary upload to Romi ...")
# baudrate of 1200 resets the Arduino to boot mode for 8 seconds
brate = 1200
print("Resetting Romi to boot mode (should see quickly flashing yellow LED")
conn = {}
try:
conn = serial.Serial(port=usbport, baudrate=1200)
except serial.SerialException as err:
print(err)
sys.exit(1)
if not conn.isOpen():
print("Problem connecting to port " + usbport)
sys.exit(1)
conn.close()
# Allow Romi to go into boot mode
sys.stdout.flush()
time.sleep(1)
# Upload binary to Romi
print("Running imaging tool")
sys.stdout.flush()
sys.exit(subprocess.call(['avrdude', '-v', '-q', '-patmega32u4', '-cavr109', '-P' + usbport, '-b57600', '-D', '-Uflash:w:' + hexfile + ':i'], stderr=sys.stdout.fileno()))
if __name__ == "__main__":
main(sys.argv[1:])
| 28.819672 | 174 | 0.606371 | 241 | 1,758 | 4.369295 | 0.493776 | 0.033238 | 0.030389 | 0.024691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02939 | 0.245165 | 1,758 | 60 | 175 | 29.3 | 0.76413 | 0.114903 | 0 | 0.232558 | 0 | 0.046512 | 0.316979 | 0.093609 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.139535 | 0 | 0.162791 | 0.186047 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea1f5d1c553507a34defc6fd3af850a35533f58b | 18,585 | py | Python | src/deepex/data/re_data.py | HaoyunHong/deepex | da8b6f1ec87e7cb826f287f2f4d7630e4cce3a74 | [
"Apache-2.0"
] | 51 | 2021-09-25T04:38:27.000Z | 2022-03-28T07:53:30.000Z | src/deepex/data/re_data.py | HaoyunHong/deepex | da8b6f1ec87e7cb826f287f2f4d7630e4cce3a74 | [
"Apache-2.0"
] | 11 | 2021-09-29T17:27:32.000Z | 2022-03-31T09:56:14.000Z | src/deepex/data/re_data.py | HaoyunHong/deepex | da8b6f1ec87e7cb826f287f2f4d7630e4cce3a74 | [
"Apache-2.0"
] | 4 | 2021-09-29T01:25:56.000Z | 2022-03-15T11:36:45.000Z | import logging
import os
import time
from zipfile import ZipFile
from bisect import bisect, bisect_left
from html.parser import HTMLParser
from dataclasses import dataclass, field
from filelock import FileLock
from typing import List, Optional, Tuple, Dict, NewType, Any
import xml.etree.ElementTree as ET
import re
from collections import namedtuple
import json
import math
import itertools
from tqdm import tqdm
import spacy
from spacy.lang.en import English
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from .text_handler import TextHandler, re_pronouns
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
Entity = namedtuple('Entity', 'name, span, score')
@dataclass
class InputExample:
docid: str
text: str
offset: int
@dataclass(frozen=True)
class InputFeatures:
docid: str
offset: int
input_ids: List[int]
attention_mask: Optional[List[int]] = None
token_type_ids: Optional[List[int]] = None
special_tokens_mask: Optional[List[int]] = None
entity_ids: List[Entity] = None
head_entity_ids: List[Entity] = None
tail_entity_ids: List[Entity] = None
relation_entity_ids: List[Entity] = None
text: str = ""
class SequentialDataset(Dataset):
def __init__(self, filepaths,
tokenizer,
mention_generator,
max_seq_length,
overwrite_cache: Optional[bool] = False):
if len(filepaths) == 0:
self.features = []
else:
logger.addHandler(logging.FileHandler(os.path.join('/'.join(filepaths[0].split('/')[:-2]),
'run_kbp_{}_{}.log'.format(tokenizer.__class__.__name__,
mention_generator.__class__.__name__))))
self.features = []
for filepath in filepaths:
dataset = REDataset(tokenizer,
mention_generator,
max_seq_length,
overwrite_cache)
self.features.extend(dataset.features)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
class REDataset:
def __init__(
self,
filedir,
index,
tokenizer,
mention_generator,
max_seq_length,
example_batch_size=2048,
overwrite_cache: Optional[bool] = False,
):
self.filedir = filedir
self.index = index
self.max_seq_length = max_seq_length
self.overwrite_cache = overwrite_cache
self.use_coref = False
self.text_handler = TextHandler(index=self.index, use_coref=self.use_coref, DIR=filedir)
self.processor = Processor(tokenizer, self.text_handler, mention_generator, example_batch_size)
def generate_batched_datasets(self):
for i, self.features in enumerate(
tqdm(self.processor._convert_batch_examples_to_features(
self.filedir, self.index, self.overwrite_cache,
max_length=self.max_seq_length, use_coref=self.use_coref
), desc='process feature files...')):
logger.debug('features size {}'.format(len(self.features)))
yield DatasetWrapper(self.features)
class DatasetWrapper(Dataset):
def __init__(
self,
features,
):
self.features = features
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> InputFeatures:
return self.features[i]
class Processor:
def __init__(self, tokenizer, text_handler, mention_generator, example_batch_size=2048):
self.tokenizer = tokenizer
self.text_handler = text_handler
self.mention_generator = mention_generator
self.example_batch_size = example_batch_size
self.examples = []
self.features = []
def overlap_span(self, span0, span1, tokenizer):
return span1[1] > span0[0] and span1[0] < span0[1]
def _create_batch_examples(self):
last_dir_name = None
file_cnt = 0
for i, (text, offset, dir_name, filename) in enumerate(tqdm(self.text_handler, desc='create batch examples...')):
logger.debug('text: {}'.format(text))
logger.debug('offset: {}'.format(offset))
logger.debug('dir_name: {}'.format(dir_name))
logger.debug('filename: {}'.format(filename))
if last_dir_name != dir_name:
file_cnt += 1
last_dir_name = dir_name
self.examples.append(InputExample(docid=dir_name, text=text, offset=offset))
if (i+1) % self.example_batch_size == 0:
logger.debug('processed number of sentences/samples {}'.format(i+1))
yield self.examples
self.examples = []
logger.debug('cleaned example size {}'.format(len(self.examples)))
if len(self.examples) != 0:
yield self.examples
self.examples = []
def _convert_to_coref(self, name, span):
coref = self.text_handler.get_coref(span)
if coref and self.text_handler.cur_text[coref[1][0]:coref[1][1]].strip(' ').lower() in re_pronouns:
logger.debug('org name: {}'.format(name))
name = coref[0].strip('\n')
logger.debug('coref name: {}'.format(name))
logger.debug('org span: {}'.format(str(span)))
span = coref[1]
logger.debug('coref span: {}'.format(str(span)))
return name, span
def _convert_batch_examples_to_features(self, filedir, index, overwrite_cache, use_coref=False,
max_length: Optional[int] = None):
for i, self.examples in enumerate(tqdm(self._create_batch_examples(), desc='convert batch examples to features...')):
logger.debug('example size {}'.format(len(self.examples)))
cached_features_file = os.path.join(
filedir,
"cached_{}_{}_{}_{}_{}_{}_{}".format(
index, self.tokenizer.__class__.__name__, self.mention_generator.__class__.__name__, max_length, i,
use_coref, self.example_batch_size
),
)
cached_mentions_file = os.path.join(
filedir,
"cachedmentions_{}_{}_{}_{}_{}_{}_{}".format(
index, self.tokenizer.__class__.__name__, self.mention_generator.__class__.__name__, max_length, i,
use_coref, self.example_batch_size
),
)
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not overwrite_cache:
start = time.time()
try:
if os.path.getsize(cached_features_file) == 0:
self.features = []
logger.debug(
f"Skipping features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
self.features = torch.load(cached_features_file)
logger.debug(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
except:
self.features = []
else:
logger.debug(f"Creating features from dataset file at {index} {i}")
if max_length is None:
max_length = self.tokenizer.max_len
batch_encoding = self.tokenizer.batch_encode_plus(
[example.text for example in self.examples],
max_length=max_length,
padding="max_length",
truncation=True,
return_special_tokens_mask=True,
return_offsets_mapping=True
)
all_mentions = {}
for i in range(len(self.examples)):
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
mentions = self.mention_generator.get_mentions_raw_text(self.examples[i].text,extra=(self.examples[i].docid,self.examples[i].offset))
all_mentions[(self.examples[i].docid,self.examples[i].offset)] = mentions
logger.debug(('candidate entities: {}'.format(str(mentions['candidate_entities']))))
entity_ids = []
for j, encoding_span in enumerate(batch_encoding['offset_mapping'][i]):
if encoding_span[0] == 0 and encoding_span[1] == 0:
entity_ids.append(Entity(name='$NIL$', span=[-1, -1], score=1.0))
continue
has_entity = False
logger.debug('encoding_span: {} name: {}'.format(encoding_span,
self.tokenizer.convert_ids_to_tokens(
batch_encoding['input_ids'][i][j])))
for m, (name, raw_span) in enumerate(
zip(mentions['candidate_entities'], mentions['candidate_positions'])):
if raw_span[0] == -1 and raw_span[1] == -1:
continue
logger.debug('raw_span: {} name: {}'.format(raw_span, name))
if self.overlap_span(encoding_span, raw_span, self.tokenizer):
char_span = [raw_span[0] + self.examples[i].offset,
raw_span[1] + self.examples[i].offset]
char_name = name[0]
if use_coref:
char_name, char_span = self._convert_to_coref(char_name, char_span)
entity_ids.append(Entity(name=char_name, span=char_span,
score=1.0))
has_entity = True
break
if not has_entity:
entity_ids.append(Entity(name='$NIL$', span=[-1, -1], score=1.0))
head_entity_ids = []
for j, encoding_span in enumerate(batch_encoding['offset_mapping'][i]):
if encoding_span[0] == 0 and encoding_span[1] == 0:
head_entity_ids.append(Entity(name='$NIL$', span=[-1, -1], score=1.0))
continue
has_entity = False
logger.debug('encoding_span: {} name: {}'.format(encoding_span,
self.tokenizer.convert_ids_to_tokens(
batch_encoding['input_ids'][i][j])))
for m, (name, raw_span) in enumerate(
zip(mentions['head_candidate_entities'], mentions['head_candidate_positions'])):
if raw_span[0] == -1 and raw_span[1] == -1:
continue
logger.debug('raw_span: {} name: {}'.format(raw_span, name))
if self.overlap_span(encoding_span, raw_span, self.tokenizer):
char_span = [raw_span[0] + self.examples[i].offset,
raw_span[1] + self.examples[i].offset]
char_name = name[0]
if use_coref:
char_name, char_span = self._convert_to_coref(char_name, char_span)
head_entity_ids.append(Entity(name=char_name, span=char_span,
score=1.0))
has_entity = True
break
if not has_entity:
head_entity_ids.append(Entity(name='$NIL$', span=[-1, -1], score=1.0))
tail_entity_ids = []
for j, encoding_span in enumerate(batch_encoding['offset_mapping'][i]):
if encoding_span[0] == 0 and encoding_span[1] == 0:
tail_entity_ids.append(Entity(name='$NIL$', span=[-1, -1], score=1.0))
continue
has_entity = False
logger.debug('encoding_span: {} name: {}'.format(encoding_span,
self.tokenizer.convert_ids_to_tokens(
batch_encoding['input_ids'][i][j])))
for m, (name, raw_span) in enumerate(
zip(mentions['tail_candidate_entities'], mentions['tail_candidate_positions'])):
if raw_span[0] == -1 and raw_span[1] == -1:
continue
logger.debug('raw_span: {} name: {}'.format(raw_span, name))
if self.overlap_span(encoding_span, raw_span, self.tokenizer):
char_span = [raw_span[0] + self.examples[i].offset,
raw_span[1] + self.examples[i].offset]
char_name = name[0]
if use_coref:
char_name, char_span = self._convert_to_coref(char_name, char_span)
tail_entity_ids.append(Entity(name=char_name, span=char_span,
score=1.0))
has_entity = True
break
if not has_entity:
tail_entity_ids.append(Entity(name='$NIL$', span=[-1, -1], score=1.0))
relation_entity_ids = []
for j, encoding_span in enumerate(batch_encoding['offset_mapping'][i]):
if encoding_span[0] == 0 and encoding_span[1] == 0:
relation_entity_ids.append(Entity(name='$NIL$', span=[-1, -1], score=1.0))
continue
has_entity = False
logger.debug('encoding_span: {} name: {}'.format(encoding_span,
self.tokenizer.convert_ids_to_tokens(
batch_encoding['input_ids'][i][j])))
for m, (name, raw_span) in enumerate(
zip(mentions['relation_candidate_entities'], mentions['relation_candidate_positions'])):
if raw_span[0] == -1 and raw_span[1] == -1:
continue
logger.debug('raw_span: {} name: {}'.format(raw_span, name))
if self.overlap_span(encoding_span, raw_span, self.tokenizer):
char_span = [raw_span[0] + self.examples[i].offset,
raw_span[1] + self.examples[i].offset]
char_name = name[0]
if use_coref:
char_name, char_span = self._convert_to_coref(char_name, char_span)
relation_entity_ids.append(Entity(name=char_name, span=char_span,
score=1.0))
has_entity = True
break
if not has_entity:
relation_entity_ids.append(Entity(name='$NIL$', span=[-1, -1], score=1.0))
inputs['docid'] = self.examples[i].docid
inputs['entity_ids'] = entity_ids
inputs['head_entity_ids'] = head_entity_ids
inputs['tail_entity_ids'] = tail_entity_ids
inputs['relation_entity_ids'] = relation_entity_ids
inputs['offset'] = self.examples[i].offset
inputs['text'] = self.examples[i].text
inputs.pop('offset_mapping')
feature = InputFeatures(**inputs)
self.features.append(feature)
start = time.time()
if len(self.features) == 0:
logger.debug(
f"Empty features to cached file {cached_features_file} [took %.3f s]", time.time() - start
)
torch.save(self.features, cached_features_file)
torch.save(all_mentions, cached_mentions_file)
logger.debug(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
yield self.features
self.features = []
logger.debug('cleaned features size {}'.format(len(self.features))) | 52.948718 | 157 | 0.480764 | 1,774 | 18,585 | 4.767756 | 0.120068 | 0.026484 | 0.024592 | 0.029794 | 0.511587 | 0.458383 | 0.436746 | 0.416883 | 0.396075 | 0.396075 | 0 | 0.011523 | 0.425666 | 18,585 | 351 | 158 | 52.948718 | 0.780869 | 0 | 0 | 0.455108 | 0 | 0 | 0.07312 | 0.014904 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040248 | false | 0 | 0.068111 | 0.01548 | 0.188854 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea20424faf2db5df2cbdac22b27cbf60154a081d | 4,983 | py | Python | redux/mods/games/Blackjack.py | PanjaCo/Redux-Bot | 15f4410b3cff137785028b0df4e27258ecad1a04 | [
"MIT"
] | 1 | 2018-02-18T04:05:18.000Z | 2018-02-18T04:05:18.000Z | redux/mods/games/Blackjack.py | iPanja/Redux-Bot | 15f4410b3cff137785028b0df4e27258ecad1a04 | [
"MIT"
] | null | null | null | redux/mods/games/Blackjack.py | iPanja/Redux-Bot | 15f4410b3cff137785028b0df4e27258ecad1a04 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from random import shuffle
import time
class Blackjack:
def __init__(self, bot):
self.bot = bot;
self.state = 3
def setup(self):
self.deck = []
self.deck = self.newDeck()
self.pHand = []
self.dHand = []
self.game_msg = None
self.game_channel = None
self.state = 1 # 1=Player, 2=Dealer, 3=GameOver, 4=CalcWinner
self.assignHands()
@staticmethod
def newDeck():
nDeck = [];
for suit in ["H", "D", "S", "C"]:
for card in [2, 3, 4, 5, 6, 7, 8, 9, "T", "J", "Q", "K"]:
nDeck.append(suit + str(card));
shuffle(nDeck);
return nDeck;
def assignHands(self):
for hand in [self.pHand, self.dHand]:
for i in range(0, 2):
hand.append(self.deck[0]);
self.deck.pop(0);
@staticmethod
def calc(hand):
total = 0;
for card in hand:
try:
total += int(card[1])
except ValueError:
if (card[1] in ["T", "J", "Q", "K"]):
total += 10;
elif (card[1] == "A"):
if (not (total + 11 > 21)):
total += 11;
else:
total += 1;
else:
print("Card Type Error")
return total;
def winner(self):
pCalc = self.calc(self.pHand);
dCalc = self.calc(self.dHand);
if (pCalc > 21):
return 1;
elif (dCalc > 21):
return 0;
elif (pCalc > dCalc):
return 0;
elif (dCalc > pCalc):
return 1;
elif (pCalc == dCalc):
return 2;
else:
print("winner: error");
def hit(self, hand):
hand.append(self.deck[0]);
self.deck.pop(0);
async def display(self):
embed = discord.Embed(title="Blackjack", description="You vs AI", color=0x00ff00)
temp = ""
for card in self.pHand:
temp += card + ", "
embed.add_field(name="Your Hand: " + str(self.calc(self.pHand)), value=temp)
temp = ""
calc = ""
if self.state == 1:
temp = self.dHand[0] + ", ?"
calc = "?"
else:
for card in self.dHand:
temp += card + ", "
calc = str(self.calc(self.dHand))
embed.add_field(name="Dealer's Hand: " + calc, value=temp)
temp = ""
if self.state == 1:
temp = "Select an option"
elif self.state == 2:
temp = "The dealer is taking his turn..."
elif self.state == 4:
self.state = 3
temp = "Game Over - You have "
if self.winner() == 0:
temp += "won"
elif self.winner() == 1:
temp += "lost"
else:
temp += "tied"
else:
temp = "ERROR, uh..."
embed.add_field(name="Status", value=temp, inline=False)
self.game_msg = await self.bot.edit_message(self.game_msg, new_content=".", embed=embed)
if(self.state == 3):
self.game_msg = None
@commands.command(pass_context=True)
@commands.cooldown(1, 2, commands.BucketType.server)
async def blackjack(self, ctx, choice:str):
if(choice == "new"):
if self.state != 3:
await self.bot.send_message(ctx.message.channel, "A game is currently in progress... use '$blackjack reset' to confirm this action")
return
self.setup()
self.game_channel = ctx.message.channel
self.game_msg = await self.bot.send_message(ctx.message.channel, "The game is being created...")
time.sleep(2)
await self.display()
elif(choice == "hit" or choice == "1") and self.state == 1:
self.hit(self.pHand);
if self.calc(self.pHand) > 21:
self.state = 4
await self.display()
elif(choice == "stay" or choice == "2") and self.state == 1:
while(self.state != 3):
dCalc = self.calc(self.dHand);
if(dCalc <= 16):
self.hit(self.dHand)
else:
self.state = 4;
await self.display()
elif(choice == "reset"):
self.setup()
self.game_channel = ctx.message.channel
self.game_msg = await self.bot.send_message(ctx.message.channel, "The game is being created...")
time.sleep(2)
await self.display()
#Cleanup Command
await self.bot.delete_message(ctx.message)
def setup(bot):
try:
bot.add_cog(Blackjack(bot))
print("[Blackjack Module Loaded]")
except Exception as e:
print(" >> Blackjack Module: {0}".format(e)) | 31.339623 | 148 | 0.483845 | 578 | 4,983 | 4.129758 | 0.266436 | 0.052786 | 0.02765 | 0.021366 | 0.236699 | 0.219103 | 0.189359 | 0.189359 | 0.142438 | 0.116464 | 0 | 0.023794 | 0.384307 | 4,983 | 159 | 149 | 31.339623 | 0.754237 | 0.01184 | 0 | 0.316547 | 0 | 0 | 0.080252 | 0 | 0 | 0 | 0.001625 | 0 | 0 | 1 | 0.057554 | false | 0.007194 | 0.028777 | 0 | 0.151079 | 0.028777 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea20e7919ca4f34d0ed56d89fa6fb0115c0dca23 | 4,409 | py | Python | RaspberryPi/DisplayIPAddressDaemon.py | maxheadroom/helpers | 45b2b418ea06445cde142fb606b137664e6e397f | [
"MIT"
] | null | null | null | RaspberryPi/DisplayIPAddressDaemon.py | maxheadroom/helpers | 45b2b418ea06445cde142fb606b137664e6e397f | [
"MIT"
] | null | null | null | RaspberryPi/DisplayIPAddressDaemon.py | maxheadroom/helpers | 45b2b418ea06445cde142fb606b137664e6e397f | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# radio.py, version 3.4 (RGB LCD Pi Plate version)
# September 14.3, 2013
# Edited by Dylan Leite
# Written by Sheldon Hartling for Usual Panic
# BSD license, all text above must be included in any redistribution
#
#
# based on code from Kyle Prier (http://wwww.youtube.com/meistervision)
# and AdaFruit Industries (https://www.adafruit.com)
# Kyle Prier - https://www.dropbox.com/s/w2y8xx7t6gkq8yz/radio.py
# AdaFruit - https://github.com/adafruit/Adafruit-Raspberry-Pi-Python-Code.git, Adafruit_CharLCDPlate
#
#dependancies
from Adafruit_I2C import Adafruit_I2C
from Adafruit_MCP230xx import Adafruit_MCP230XX
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
from datetime import datetime
from subprocess import *
from time import sleep, strftime
from Queue import Queue
from threading import Thread
import smbus
import os
import time
import subprocess
#standard python libs
import logging
import time
#third party libs
from daemon import runner
class DisplayIPAddressDaemon:
# initialize the LCD plate
# use busnum = 0 for raspi version 1 (256MB)
# and busnum = 1 for raspi version 2 (512MB)
LCD = ""
# lcd = ""
# Define a queue to communicate with worker thread
LCD_QUEUE = ""
# Globals
astring = ""
setscroll = ""
# Buttons
NONE = 0x00
SELECT = 0x01
RIGHT = 0x02
DOWN = 0x04
UP = 0x08
LEFT = 0x10
UP_AND_DOWN = 0x0C
LEFT_AND_RIGHT = 0x12
def __init__(self):
self.LCD = Adafruit_CharLCDPlate(busnum = 0)
# self.lcd = Adafruit_CharLCDPlate()
self.LCD_QUEUE = Queue()
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = '/var/run/testdaemon.pid'
self.pidfile_timeout = 5
# ----------------------------
# WORKER THREAD
# ----------------------------
# Define a function to run in the worker thread
def update_lcd(self,q):
while True:
msg = q.get()
# if we're falling behind, skip some LCD updates
while not q.empty():
q.task_done()
msg = q.get()
self.LCD.setCursor(0,0)
self.LCD.message(msg)
q.task_done()
return
# ----------------------------
# MAIN LOOP
# ----------------------------
def run(self):
global astring, setscroll
# Setup AdaFruit LCD Plate
self.LCD.begin(16,2)
self.LCD.clear()
self.LCD.backlight(self.LCD.ON)
# Create the worker thread and make it a daemon
worker = Thread(target=self.update_lcd, args=(self.LCD_QUEUE,))
worker.setDaemon(True)
worker.start()
self.display_ipaddr()
def delay_milliseconds(self, milliseconds):
seconds = milliseconds / float(1000) # divide milliseconds by 1000 for seconds
sleep(seconds)
# ----------------------------
# DISPLAY TIME AND IP ADDRESS
# ----------------------------
def display_ipaddr(self):
show_eth0 = "ip addr show eth0 | cut -d/ -f1 | awk '/inet/ {printf \"e%15.15s\", $2}'"
ipaddr = self.run_cmd(show_eth0)
self.LCD.backlight(self.LCD.ON)
i = 29
muting = False
keep_looping = True
while (keep_looping):
# Every 1/2 second, update the time display
i += 1
#if(i % 10 == 0):
if(i % 5 == 0):
self.LCD_QUEUE.put(datetime.now().strftime('%b %d %H:%M:%S\n')+ ipaddr, True)
# Every 3 seconds, update ethernet or wi-fi IP address
if(i == 60):
ipaddr = self.run_cmd(show_eth0)
i = 0
self.delay_milliseconds(99)
# ----------------------------
def run_cmd(self,cmd):
p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT)
output = p.communicate()[0]
return output
app = DisplayIPAddressDaemon()
logger = logging.getLogger("DisplayIPAddressDaemonLog")
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = logging.FileHandler("/var/log/testdaemon.log")
handler.setFormatter(formatter)
logger.addHandler(handler)
daemon_runner = runner.DaemonRunner(app)
#This ensures that the logger file handle does not get closed during daemonization
daemon_runner.daemon_context.files_preserve=[handler.stream]
daemon_runner.do_action()
if __name__ == "__main__":
app = DisplayIPAddressDaemon()
app.run()
| 25.783626 | 103 | 0.634384 | 564 | 4,409 | 4.859929 | 0.448582 | 0.0332 | 0.008756 | 0.019701 | 0.035753 | 0.035753 | 0 | 0 | 0 | 0 | 0 | 0.02848 | 0.219551 | 4,409 | 170 | 104 | 25.935294 | 0.768091 | 0.329326 | 0 | 0.133333 | 0 | 0 | 0.080673 | 0.024374 | 0 | 0 | 0.010985 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.166667 | 0 | 0.4 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea26bdc3985dbf4452d8b80379dfcd779150d0ce | 1,703 | py | Python | app.py | hculpan/StarTradingCompany | 70f4ea42ad08253bdb9e26c770922883e44bdaa0 | [
"MIT"
] | null | null | null | app.py | hculpan/StarTradingCompany | 70f4ea42ad08253bdb9e26c770922883e44bdaa0 | [
"MIT"
] | null | null | null | app.py | hculpan/StarTradingCompany | 70f4ea42ad08253bdb9e26c770922883e44bdaa0 | [
"MIT"
] | null | null | null | import pygame
import random
from StarTradingCompany import MainScene
class MainApp:
def main_loop(self, width, height, fps):
random.seed()
pygame.init()
pygame.font.init()
screen = pygame.display.set_mode(
(width, height), pygame.SCALED)
pygame.display.set_caption("Star Trading Company")
clock = pygame.time.Clock()
no_keys_pressed = pygame.key.get_pressed()
active_scene = MainScene.MainScene(width, height)
while active_scene is not None:
# Event filtering
filtered_events = []
for event in pygame.event.get():
pressed_keys = no_keys_pressed
quit_attempt = False
if event.type == pygame.QUIT:
quit_attempt = True
elif event.type == pygame.KEYDOWN:
pressed_keys = pygame.key.get_pressed()
alt_pressed = pressed_keys[pygame.K_LALT] or \
pressed_keys[pygame.K_RALT]
if event.key == pygame.K_ESCAPE:
quit_attempt = True
elif event.key == pygame.K_F4 and alt_pressed:
quit_attempt = True
if quit_attempt and active_scene.Terminate():
pygame.quit()
filtered_events.append(event)
active_scene.ProcessInput(filtered_events, pressed_keys)
active_scene.Update()
active_scene.Render(screen)
active_scene = active_scene.next
pygame.display.flip()
clock.tick(fps)
app = MainApp()
app.main_loop(1200, 1071, 30)
| 29.877193 | 72 | 0.559014 | 180 | 1,703 | 5.083333 | 0.411111 | 0.096175 | 0.04918 | 0.04153 | 0.052459 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010138 | 0.362889 | 1,703 | 56 | 73 | 30.410714 | 0.83318 | 0.008808 | 0 | 0.075 | 0 | 0 | 0.011862 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.075 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea29bdea317026f374e6ba072090f874f716bbff | 5,243 | py | Python | polarishub_flask/server/__init__.py | Christian0210/polarishub_flask | 9ff616baaa7cb9c8c451d8d9c64f3c06b09b062b | [
"MIT"
] | 7 | 2019-08-29T13:38:46.000Z | 2020-07-01T15:04:35.000Z | polarishub_flask/server/__init__.py | Christian0210/polarishub_flask | 9ff616baaa7cb9c8c451d8d9c64f3c06b09b062b | [
"MIT"
] | null | null | null | polarishub_flask/server/__init__.py | Christian0210/polarishub_flask | 9ff616baaa7cb9c8c451d8d9c64f3c06b09b062b | [
"MIT"
] | 5 | 2019-08-29T03:15:24.000Z | 2019-09-29T07:18:24.000Z | import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from polarishub_flask.server.parser import printv
# printv(sys.path)
from flask import Flask, request, abort, send_file, render_template, redirect, url_for
from polarishub_flask.server import network as server
from polarishub_flask.server import file_handler as file_handler
from polarishub_flask.server import myqrcode as myqrcode
import json
from polarishub_flask.server import help
os_name = os.name
platform = sys.platform
# printv("os_name:", os_name)
printv ("platform:", platform)
printv ("cwd:", os.getcwd())
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True, static_url_path='/static')
app.config.from_mapping(
SECRET_KEY='dev',
# DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
@app.route('/hello')
def hello():
return 'Hello, World!'
@app.route('/')
def main():
if network.checkIP(request.remote_addr):
# From Host
return redirect("/files/")
else:
# From client
return redirect("/files/")
@app.route('/files/', defaults = {"filename":""})
@app.route('/files/<path:filename>', methods=['GET', 'POST'])
def file(filename):
if ".." in filename:
return abort(403)
printv("files/" + filename)
local_path = os.path.join(os.getcwd(), 'files', filename)
if platform=="win32":
local_path = local_path.replace("/", "\\")
printv (local_path)
is_admin = network.checkIP(request.remote_addr)
if os.path.isfile(local_path):
return send_file(local_path)
elif os.path.isdir(local_path):
return render_template('index.html', cwd = local_path.replace('\\', "\\\\") if platform=="win32" else local_path,
dirs = file_handler.get_dir(local_path), is_admin = is_admin,
user_settings = file_handler.get_settings(), ip = network.get_host_ip())
else:
abort(404)
@app.route('/opendir')
def opendir():
if network.checkIP(request.remote_addr):
local_path = request.values.get('dir')
printv(local_path)
if platform == "win32":
os.system("explorer {}".format(local_path))
elif platform == "darwin":
os.system("open {}".format(local_path))
else:
os.system("nautilus {}".format(local_path))
return "Success"
else:
return abort(403)
@app.route('/settings')
def open_setting():
if network.checkIP(request.remote_addr):
return render_template("settings.html", user_settings = file_handler.get_settings())
else:
return abort(403)
@app.route('/temp/<path:temppath>')
def temp(temppath):
file_path = os.path.join(os.getcwd(), 'temp', temppath)
return send_file(file_path)
@app.route('/qr', methods = ['POST'])
def qr():
file_path = request.form["filepath"]
# file_path = request.form.get('filepath')
printv(file_path, hash(file_path))
file_name = str(hash(file_path)) + ".png"
printv(file_name)
network_path = "http://{}:{}".format(network.get_host_ip(), request.host[request.host.find(":")+1:]) + file_path
printv("network_path", network_path)
return render_template("qrcode.html", filepath=myqrcode.generateCode(network_path, file_name)[1], filename=file_path, user_settings = file_handler.get_settings())
@app.route("/about")
def about():
return redirect('/static/about.html')
# return render_template("about.html")
@app.route('/update_settings', methods = ["POST"])
def update_settings():
if network.checkIP(request.remote_addr):
if file_handler.update_settings(request.form):
return redirect("/")
else:
return abort(500)
else:
return abort(403)
@app.route('/halt')
def halt():
if network.checkIP(request.remote_addr):
printv("Halting")
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return "PolarisHub shutting down..."
else:
return abort(403)
@app.route('/help')
def help_page():
return redirect('/static/help.html')
# return render_template('help.html', help_content = help.help_content)
return app | 35.187919 | 170 | 0.609002 | 624 | 5,243 | 4.942308 | 0.24359 | 0.043774 | 0.040856 | 0.052529 | 0.209792 | 0.16537 | 0.019455 | 0 | 0 | 0 | 0 | 0.007748 | 0.261492 | 5,243 | 149 | 171 | 35.187919 | 0.78874 | 0.087164 | 0 | 0.215517 | 0 | 0 | 0.100147 | 0.014037 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0.008621 | 0.077586 | 0.025862 | 0.353448 | 0.086207 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea2c17521d98033f81eeb71545bc9b118af0c891 | 3,934 | py | Python | playlist/views.py | Arvind-4/Membership- | 09b26cd503f77d1be0d577052bd20233e7790446 | [
"MIT"
] | 2 | 2022-01-21T11:28:43.000Z | 2022-01-21T18:35:25.000Z | playlist/views.py | Arvind-4/Membership- | 09b26cd503f77d1be0d577052bd20233e7790446 | [
"MIT"
] | null | null | null | playlist/views.py | Arvind-4/Membership- | 09b26cd503f77d1be0d577052bd20233e7790446 | [
"MIT"
] | null | null | null | from django.shortcuts import redirect, render
from django.http import Http404, HttpResponse
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from videos.models import Video
from .forms import (
PlayListCreateForm,
)
from .models import Playlist
# Create your views here.
@login_required
def playlist_create_view(request, *args, **kwargs):
form = PlayListCreateForm(request.POST or None)
context = {
'form': form
}
if form.is_valid():
title = form.cleaned_data.get('title')
obj = Playlist.objects.create(
user_id=request.user.id,
title=title
)
if request.htmx:
context['object'] = obj
return render(request, 'playlist/snippits/list-inline.html', context)
# return redirect('playlist-list')
return render(request, 'playlist/create-view.html', context=context)
@login_required
def playlist_edit_view(request, db_id, user_id):
qs = Playlist.objects.filter(db_id=db_id, user_id=user_id)
if not qs.exists():
raise Http404
initial_data = {
'title': qs.first().title
}
obj_old = qs.first()
form = PlayListCreateForm(request.POST or None, initial=initial_data)
context = {
'form': form,
'object': obj_old
}
if form.is_valid():
new_title = form.cleaned_data.get('title')
obj = qs.first()
obj.title = new_title
obj.save()
if request.htmx:
context['message'] = True
context['object'] = obj
return render(request, 'playlist/snippits/list-inline.html', context)
return render(request, 'playlist/edit-view.html', context)
@login_required
def playlist_list_view(request, *args, **kwargs):
qs = Playlist.objects.filter(user_id=request.user.id)
if qs.exists():
obj = qs
else:
obj = []
context = {
'object_list': list(obj) or []
}
return render(request, 'playlist/list-view.html', context=context)
@login_required
def playlist_detail_view(request, user_id, db_id, *args, **kwargs):
obj = Playlist.objects.filter(user_id=user_id, db_id=db_id)
if not obj.exists():
raise Http404
context = {
'object': obj.first(),
'video_object_list': obj.first().get_videos()
}
return render(request, 'playlist/detail-view.html', context)
@login_required
def playlist_delete_view(request, user_id, db_id, *args, **kwargs):
qs = Playlist.objects.filter(db_id=db_id, user_id=user_id)
deleted = False
if qs.exists():
qs.first().delete()
deleted = True
if deleted:
return HttpResponse('')
else:
raise Http404
@login_required
def playlist_add_videos(request, user_id, db_id, *args, **kwargs):
obj = Playlist.objects.filter(user_id=user_id, db_id=db_id)
if not obj.exists():
raise Http404
context = {
'object': obj.first(),
'object_list': list(Video.objects.filter(user_id=request.user.id))
}
if request.method == 'POST':
video_list = request.POST.getlist('playlist_videos')
# exists_flag = obj.first().exists_or_not(obj=obj.first(), url_extracted=video_list)
saved, qs_object = obj.first().add_video_to_playlist(obj=obj.first(), value=video_list)
if request.htmx:
context['video_object_list'] = qs_object.get_videos()
return render(request, 'playlist/snippits/detail-inline.html', context=context)
return render(request, 'playlist/add-video.html', context=context)
@login_required
def playlist_delete_video(request, user_id, db_id, host_id, *args, **kwargs):
obj = Playlist.objects.filter(user_id=user_id, db_id=db_id)
if not obj.exists():
raise Http404
obj.first().host_ids.remove(host_id)
obj.first().save()
return HttpResponse('') | 23.698795 | 95 | 0.647941 | 502 | 3,934 | 4.900398 | 0.169323 | 0.05122 | 0.029268 | 0.087805 | 0.495122 | 0.454878 | 0.380894 | 0.306911 | 0.227642 | 0.227642 | 0 | 0.005943 | 0.230046 | 3,934 | 166 | 96 | 23.698795 | 0.806207 | 0.035333 | 0 | 0.4 | 0 | 0 | 0.094409 | 0.058808 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea2d226e49a7677d73e470c6c2bbffb106fedcbc | 1,551 | py | Python | checkov/terraform/checks/data/BaseCloudsplainingIAMCheck.py | antonblr/checkov | 9415c6593c537945c08f7a19f28bdd8b96966f67 | [
"Apache-2.0"
] | 3 | 2021-04-19T17:17:21.000Z | 2021-09-06T06:31:09.000Z | checkov/terraform/checks/data/BaseCloudsplainingIAMCheck.py | antonblr/checkov | 9415c6593c537945c08f7a19f28bdd8b96966f67 | [
"Apache-2.0"
] | 16 | 2021-03-09T07:38:38.000Z | 2021-06-09T03:53:55.000Z | checkov/terraform/checks/data/BaseCloudsplainingIAMCheck.py | antonblr/checkov | 9415c6593c537945c08f7a19f28bdd8b96966f67 | [
"Apache-2.0"
] | 1 | 2022-01-06T08:04:56.000Z | 2022-01-06T08:04:56.000Z | import json
import logging
from abc import abstractmethod
from cloudsplaining.scan.policy_document import PolicyDocument
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.common.multi_signature import multi_signature
from checkov.terraform.checks.data.base_check import BaseDataCheck
from checkov.terraform.checks.utils.iam_terraform_document_to_policy_converter import \
convert_terraform_conf_to_iam_policy
class BaseCloudsplainingIAMCheck(BaseDataCheck):
def __init__(self, name, id):
super().__init__(name=name, id=id, categories=CheckCategories.IAM, supported_data=['aws_iam_policy_document'])
def scan_data_conf(self, conf):
key = 'statement'
if key in conf.keys():
try:
converted_conf = convert_terraform_conf_to_iam_policy(conf)
policy = PolicyDocument(converted_conf)
violations = self.cloudsplaining_analysis(policy)
except Exception as e:
# this might occur with templated iam policies where ARN is not in place or similar
logging.debug("could not run cloudsplaining analysis on policy {}", conf)
return CheckResult.UNKNOWN
if violations:
logging.debug("detailed cloudsplainging finding: {}", json.dumps(violations))
return CheckResult.FAILED
return CheckResult.PASSED
@multi_signature()
@abstractmethod
def cloudsplaining_analysis(self, policy):
raise NotImplementedError()
| 41.918919 | 118 | 0.715023 | 171 | 1,551 | 6.263158 | 0.479532 | 0.041083 | 0.031746 | 0.048553 | 0.05789 | 0.05789 | 0 | 0 | 0 | 0 | 0 | 0 | 0.220503 | 1,551 | 36 | 119 | 43.083333 | 0.885856 | 0.052224 | 0 | 0 | 0 | 0 | 0.080381 | 0.015668 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.033333 | 0.266667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea2d3728b55efd8c1486270c3cd39997ebe31614 | 4,229 | py | Python | pygks/ae_backup.py | sbxzy/pygks_package | 9e2c4910ee0eb83e6fa710f97aa39dde285bc761 | [
"BSD-3-Clause"
] | null | null | null | pygks/ae_backup.py | sbxzy/pygks_package | 9e2c4910ee0eb83e6fa710f97aa39dde285bc761 | [
"BSD-3-Clause"
] | null | null | null | pygks/ae_backup.py | sbxzy/pygks_package | 9e2c4910ee0eb83e6fa710f97aa39dde285bc761 | [
"BSD-3-Clause"
] | null | null | null | from numpy import array, matrix, diag, exp, inner, nan_to_num
from numpy.core.umath_tests import inner1d
from numpy import argmin, array
class GKS:
"""Gaussian kernel smoother to transform any clustering method into regression. setN is the list containing numpy arrays which are the weights of clustering centors.
populations is a list of integers of cluster populations. standard_variances is the list of real
numbers meaning the standard variances of the dataset along each dimension. smooth is None or real number.
While set to None, an SSL procedure will be employed. For details, see the responses() method."""
sv_kernel = None
setN = None #:Weights of the clustering centers, after instance initialization, it will be a list data structure.
Y = 1 #:Number of response variables.
percentages = None #:Distribution of the cluster populations.
xdim = None #:Dimension of the explanatory variables.
ydim = None #:Dimension of the response variables.
__global = True
smooth = None #:Smooth parameter.
__S = 0.0
K = 5 #: Number of clustering centers for smooth parameter calculation.
def __init__(self, setN, populations, standard_variances, Y_number, smooth = None, K = 5):
if len(setN[0])!=len(standard_variances):
print('ill GKS initialization')
else:
self.sv_kernel = matrix(diag(array(standard_variances)[:-1*Y_number]**-1.0))
self.setN = []
self.Y = []
for each in setN:
self.setN.append(each[:-1*Y_number])
self.Y.append(each[-1*Y_number:])
self.Y = matrix(self.Y).T
self.percentages = array(populations) / float(sum(populations))
self.setN = array(self.setN)
self.xdim = float(len(setN[0]) - Y_number)
self.ydim = float(Y_number)
self.smooth = smooth
self.K = K
def response_1s(self, point):
dif_vectors = self.setN - point
dif_and_varianced = array(matrix(dif_vectors)*self.sv_kernel)
dif_traces = inner1d(dif_and_varianced , dif_vectors)
weights = exp(-0.5*self.__S*dif_traces)
results = (self.Y*(matrix(self.percentages * weights).T))/(inner(self.percentages, weights))
return array(results.T)[0]
def responses(self, points, prototypes = None):
"""points is a list or array of numpy arrays, and this method returns the regression results
of the dataset points. If the smooth parameter is initialized as None, the prototypes parameter
will be required as a list or array of clustering centers in the form of numpy arrays, which is genertated
by the user chosen clustering method on the same dataset to the one specified by points variable."""
if self.smooth == None:
self.K = min(self.K, prototypes)
accumulated_traces = 0.0
for point in prototypes:
dif_vectors = self.setN - point
dif_and_varianced = array(matrix(dif_vectors)*self.sv_kernel)
dif_traces = inner1d(dif_and_varianced , dif_vectors)
nn_index = argmin(dif_traces)
accumulated_traces += float(dif_traces[nn_index])
for i in range(self.K - 1):
dif_traces[nn_index] = float('inf')
nn_index = argmin(dif_traces)
accumulated_traces += float(dif_traces[nn_index])
self.__S = len(self.setN)*self.xdim/accumulated_traces
if self.__S < 0.0:
self.__S = 0.0
else:
self.__S = len(self.setN)**(-2.0*self.smooth)
results = []
if self.ydim == 1:
for each in points:
results.append(self.response_1s(each)[0])
else:
for each in points:
results.append(self.response_1s(each))
return results
if __name__ == '__main__':
testgks = GKS([array([1, 2, 2,3]), array([2, 3, 1,5])], array([1, 2]), array([1, 2, 3,1]), 2, smooth = -0.4)
print(testgks.response_1s(array([1,2])))
print(testgks.responses([array([1,2]),array([2,0])]))
| 49.752941 | 169 | 0.622133 | 566 | 4,229 | 4.510601 | 0.254417 | 0.028202 | 0.013709 | 0.018801 | 0.212299 | 0.188797 | 0.188797 | 0.170779 | 0.170779 | 0.170779 | 0 | 0.018385 | 0.279735 | 4,229 | 84 | 170 | 50.345238 | 0.819764 | 0.279735 | 0 | 0.217391 | 0 | 0 | 0.011026 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.043478 | 0 | 0.275362 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea3147dd6326d0821a546f281552207e03f911e6 | 1,887 | py | Python | supports/pyload/src/pyload/plugins/downloaders/ZDF.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | 1 | 2020-04-02T17:03:39.000Z | 2020-04-02T17:03:39.000Z | supports/pyload/src/pyload/plugins/downloaders/ZDF.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | supports/pyload/src/pyload/plugins/downloaders/ZDF.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import xml.etree.ElementTree as etree
from ..base.downloader import BaseDownloader
# Based on zdfm by Roland Beermann (http://github.com/enkore/zdfm/)
class ZDF(BaseDownloader):
__name__ = "ZDF Mediathek"
__type__ = "downloader"
__version__ = "0.89"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"http://(?:www\.)?zdf\.de/ZDFmediathek/\D*(\d+)\D*"
__config__ = [("enabled", "bool", "Activated", True)]
__description__ = """ZDF.de downloader plugin"""
__license__ = "GPLv3"
__authors__ = []
XML_API = "http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?id={}"
@staticmethod
def video_key(video):
return (
int(video.findtext("videoBitrate", "0")),
any(f.text == "progressive" for f in video.iter("facet")),
)
@staticmethod
def video_valid(video):
return (
video.findtext("url").startswith("http")
and video.findtext("url").endswith(".mp4")
and video.findtext("facets/facet").startswith("progressive")
)
@staticmethod
def get_id(url):
return int(re.search(r"\D*(\d{4,})\D*", url).group(1))
def process(self, pyfile):
id = self.get_id(pyfile.url)
url = self.XML_API.format(id)
xml = etree.fromstring(self.load(url, decode=False))
status = xml.findtext("./status/statuscode")
if status != "ok":
self.fail(self._("Error retrieving manifest"))
video = xml.find("video")
title = video.findtext("information/title")
pyfile.name = title.encode("ascii", errors="replace")
target_url = sorted(
(v for v in video.iter("formitaet") if self.video_valid(v)),
key=self.video_key,
)[-1].findtext("url")
self.download(target_url)
| 29.030769 | 83 | 0.598834 | 220 | 1,887 | 4.9 | 0.504545 | 0.060297 | 0.018553 | 0.022263 | 0.044527 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008374 | 0.240594 | 1,887 | 64 | 84 | 29.484375 | 0.743894 | 0.046105 | 0 | 0.108696 | 0 | 0 | 0.208125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.065217 | 0.065217 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea34bbffa972622391b1b4e8cdbd765dead3a998 | 2,335 | py | Python | test/testderivatives.py | aaiijmrtt/net | 92594b0bb65fc721eabfedcfccfc797ea5a475c7 | [
"MIT"
] | null | null | null | test/testderivatives.py | aaiijmrtt/net | 92594b0bb65fc721eabfedcfccfc797ea5a475c7 | [
"MIT"
] | null | null | null | test/testderivatives.py | aaiijmrtt/net | 92594b0bb65fc721eabfedcfccfc797ea5a475c7 | [
"MIT"
] | null | null | null | import sys, os, numpy, unittest
import net
class DerivativesTestCase(unittest.TestCase):
conformists = None
def setUp(self):
self.conformists = [net.Step, net.Sigmoid, net.HardHyperbolicTangent, net.RectifiedLinearUnit, net.ParametricRectifiedLinearUnit, net.HardShrink, net.SoftShrink, net.SoftPlus, net.ShiftScale, net.HyperbolicTangent, net.SoftSign]
self.rebels = [net.SoftMax]
def testconformists(self):
epsilon = 0.0001
delta = 0.0000001
for conformist in self.conformists:
for i in range(1, 100):
conformer = conformist(i)
inputvector = numpy.random.rand(i, 1)
conformer.feedforward(inputvector)
derivativevector = conformer.backpropagate(numpy.ones((i, 1), dtype = float))
deltavector = numpy.empty((i, 1), dtype = float)
for j in range(i):
epsilonvector = numpy.zeros((i, 1), dtype = float)
epsilonvector[j][0] = epsilon
deltavector[j][0] = numpy.divide(numpy.subtract(conformer.feedforward(numpy.add(inputvector, epsilonvector)), conformer.feedforward(numpy.subtract(inputvector, epsilonvector))), 2.0 * epsilon)[j][0]
self.assertTrue(numpy.linalg.norm(numpy.subtract(deltavector, derivativevector)) < delta, 'backpropagate derivative error in class %s' %conformist)
conformer = None
def testrebels(self):
epsilon = 0.0001
delta = 0.05
for rebel in self.rebels:
for i in range(500, 525):
rebeler = rebel(i)
inputvector = numpy.random.rand(i, 1)
rebeler.feedforward(inputvector)
derivativevector = rebeler.backpropagate(numpy.ones((i, 1), dtype = float))
deltavector = numpy.empty((i, 1), dtype = float)
for j in range(i):
epsilonvector = numpy.zeros((i, 1), dtype = float)
epsilonvector[j][0] = epsilon
deltavector[j][0] = numpy.divide(numpy.subtract(rebeler.feedforward(numpy.add(inputvector, epsilonvector)), rebeler.feedforward(numpy.subtract(inputvector, epsilonvector))), 2.0 * epsilon)[j][0]
self.assertTrue(numpy.linalg.norm(numpy.subtract(deltavector, derivativevector)) < delta, 'backpropagate derivative error in class %s' %rebel)
rebeler = None
def tearDown(self):
self.conformists = None
self.rebels = None
if __name__ == '__main__':
numpy.random.seed(1)
suite = unittest.TestLoader().loadTestsFromTestCase(DerivativesTestCase)
unittest.TextTestRunner(verbosity = 9).run(suite)
| 43.240741 | 230 | 0.728051 | 285 | 2,335 | 5.936842 | 0.291228 | 0.009456 | 0.024823 | 0.042553 | 0.515366 | 0.464539 | 0.438534 | 0.404255 | 0.404255 | 0.404255 | 0 | 0.025513 | 0.143897 | 2,335 | 53 | 231 | 44.056604 | 0.82091 | 0 | 0 | 0.26087 | 0 | 0 | 0.0394 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.086957 | false | 0 | 0.043478 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea3513af163ee4858ccb9507633f85bb7430d30a | 2,192 | py | Python | utils/relisten.py | LegNBass/dead_discord | 7254f035a424e101f5c58c914505720dcbe7cb72 | [
"MIT"
] | null | null | null | utils/relisten.py | LegNBass/dead_discord | 7254f035a424e101f5c58c914505720dcbe7cb72 | [
"MIT"
] | null | null | null | utils/relisten.py | LegNBass/dead_discord | 7254f035a424e101f5c58c914505720dcbe7cb72 | [
"MIT"
] | null | null | null | import requests
import discord
class RelistenAPI:
base_url = "https://api.relisten.net"
api_prefix = "api/v2"
headers = {
"accept": "application/json"
}
def __init__(self, artist='grateful-dead'):
self.artist = artist
@property
def artists(self):
return requests.get(
f"{self.base_url}/{self.api_prefix}/artists",
headers=self.headers
).json()
def show(self, show_date):
response = requests.get(
f"{self.base_url}/{self.api_prefix}/artists/{self.artist}/shows/{show_date}",
headers=self.headers
)
if response.status_code == 200:
try:
source = next(iter(
response.json().get("sources", [])
))
return 200, source
except StopIteration:
return []
else:
return response.status_code, response.json()
def format_show(self, date):
code, sources = self.show(date)
if code == 200:
url = sources['links'][0]['url']
description = sources['description']
tracks = [
track for _set in sources['sets'] for track in _set['tracks']
]
# print(tracks)
embed = discord.Embed(
title=date,
description=description,
url=url
)
for ix, track in enumerate(tracks, 1):
embed.add_field(
name=ix,
value=f"[{track['title']}]({track['mp3_url'].replace('mp3', 'shn').replace('download', 'details')})",
inline=False
)
return embed
if __name__ == "__main__":
# Shell entrypoint for testing
import sys
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'show',
help="The date of the show in YYYY-MM-DD format"
)
args = parser.parse_args()
api = RelistenAPI()
try:
sys.stdout.write(
json.dumps(api.show(args.show)[1])
)
except IOError:
pass
| 26.731707 | 121 | 0.511861 | 222 | 2,192 | 4.923423 | 0.418919 | 0.019213 | 0.021958 | 0.029277 | 0.078683 | 0.078683 | 0.078683 | 0.078683 | 0.078683 | 0.078683 | 0 | 0.010885 | 0.37135 | 2,192 | 81 | 122 | 27.061728 | 0.782293 | 0.019161 | 0 | 0.058824 | 0 | 0.014706 | 0.16721 | 0.088961 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0.014706 | 0.073529 | 0.014706 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea37fec737000033eb642b2ce9d97f9adec17274 | 638 | py | Python | pyleecan/Methods/Slot/Slot/comp_height.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | pyleecan/Methods/Slot/Slot/comp_height.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | pyleecan/Methods/Slot/Slot/comp_height.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | # -*- coding: utf-8 -*-
from numpy import array
def comp_height(self, Ndisc=200):
"""Compute the height of the Slot.
Caution, the bottom of the Slot is an Arc
Parameters
----------
self : Slot
A Slot object
Ndisc : int
Number of point to discretize the lines
Returns
-------
Htot: float
Height of the slot [m]
"""
Rbo = self.get_Rbo()
surf = self.get_surface()
point_list = surf.discretize(Ndisc)
point_list = array(point_list)
if self.is_outwards():
return max(abs(point_list)) - Rbo
else:
return Rbo - min(abs(point_list))
| 18.764706 | 47 | 0.584639 | 86 | 638 | 4.232558 | 0.546512 | 0.123626 | 0.074176 | 0.082418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008949 | 0.299373 | 638 | 33 | 48 | 19.333333 | 0.805369 | 0.409091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea38bdcf0423c414d15169911b768a941eadfe88 | 6,087 | py | Python | happy_control/Lie_tools.py | ViktorRusakov/napalm-control | a8bcb6d91f23e29464302ad0c3a0d7a04a756b5c | [
"MIT"
] | null | null | null | happy_control/Lie_tools.py | ViktorRusakov/napalm-control | a8bcb6d91f23e29464302ad0c3a0d7a04a756b5c | [
"MIT"
] | null | null | null | happy_control/Lie_tools.py | ViktorRusakov/napalm-control | a8bcb6d91f23e29464302ad0c3a0d7a04a756b5c | [
"MIT"
] | null | null | null | import pickle
import sympy as sym
import numpy as np
from functools import reduce
from itertools import groupby
def lie_bracket(element_1, element_2):
"""
Unfolds a Lie bracket. It is assumed that the second element is homogeneous (the bracket grows to the left).
Returns a string encoding the result of unfolding: each addend is represented as a sequence of indeces (which
are separated by '.'), the addends are separated by '|' and there is also a sign before each addend (it is assumed
to be '+' if there is no sign).
Example 1:
lie_bracket('1.2', '3') = [[xi_{1}, xi_{2}], xi_{3}] = '1.2.3|-2.1.3|-3.1.2|3.2.1', where
'1.2.3|-2.1.3|-3.1.2|3.2.1' = xi_{123} - xi_{213} - xi_{312} + xi_{321}
Example 2:
lie_bracket('1.0|-1.1', '3') = [xi_{10} - xi_{11}, xi_3] = '1.0.3|-1.1.3|-3.1.0|3.1.1'
"""
if '.' not in element_1:
# if the first element is homogeneous we know the result already
return '|'.join([element_1 + '.' + element_2, '-' + element_2 + '.' + element_1])
elif '|' not in element_1:
# if the first element is another Lie bracket we need to unfold it first
element_1 = element_1.split('.')
element_1 = lie_bracket(element_1[0], element_1[1])
moments = element_1.split('|')
first_addend = [m + '.' + element_2 for m in moments]
moments = [m.replace('-', '') if m.startswith('-') else '-' + m for m in moments]
second_addend = ['-' + element_2 + '.' + m[1:] if m.startswith('-') else element_2 + '.' + m for m in moments]
res = '|'.join(first_addend + second_addend)
return res
def unfold_lie_bracket(lie_element):
"""
Generalization of lie_bracket function - unfolds brackets with nested brackets.
"""
if len(lie_element) in [1, 2]:
return lie_element
elif ']' not in lie_element:
moment_1, moment_2 = lie_element.split('.')
return lie_bracket(moment_1, moment_2)
else:
bracket = lie_element.split(']')
res = reduce(lambda x, y: lie_bracket(x, y), bracket)
return res
def calculate_lie_elements(max_order):
"""
Calculates Lie elements up to max_order (without including Jacobi identity). Returns a dictionary where key
represents order and value is a dictionary where key is an encoded Lie element and value is its representation
in R^p (as numpy array).
Example:
max_order = 3
res = calculate_lie_elements(max_order) =>
res = {
'1': {
'0': np.array([[1]])
}
'2': {
'1': np.array([[1], [0]])
}
'3': {
'2': np.array([[1], [0], [0], [0]]),
'0.1': np.array([[0], [1], [-1], [0]]),
'1.0': np.array([[0], [-1], [1], [0]])
}
}
Lie element encoding example:
1) '1.2' = [xi_{1}, xi_{2}]
2) '1.2]3]5' = [[[xi_{1}, xi_{2}], xi_{3}], xi_{5}]
"""
res = {}
with open('api/moments_grading.pickle', 'rb') as f:
moments = pickle.load(f)
for order in range(1, max_order + 1):
order_moments = moments[order]
dim = len(order_moments)
lie_elements = {}
for index_set in order_moments.keys():
if '.' not in index_set:
# homogeneous element can be added already
lie_elements[index_set] = order_moments[index_set]
continue
else:
index_set = index_set.split('.')
if index_set[0] == index_set[1]:
continue
# find element of current length from already obtained Lie elements to check for antisymmetry
# (for outer left elements of the bracket, additionally the other ones have to match)
with_current_length = filter(lambda x: len(x) == len(index_set), lie_elements.keys())
for value in with_current_length:
if index_set[:2] == value[:2][::-1] and index_set[2:] == value[2:]:
break
else:
if len(index_set) == 2:
as_bracket = '.'.join(index_set)
else:
as_bracket = index_set[0] + '.' + ']'.join(index_set[1:])
lie_repr = np.zeros((dim, 1), dtype=int)
lie_unfolded = unfold_lie_bracket(as_bracket)
lie_unfolded = lie_unfolded.split('|')
for moment in lie_unfolded:
if moment.startswith('-'):
lie_repr -= order_moments[moment[1:]]
else:
lie_repr += order_moments[moment]
lie_elements[as_bracket] = lie_repr
res[order] = lie_elements
with open('api/lie_elements.pickle', 'wb') as f:
pickle.dump(res, f)
return res
def get_basis_lie_elements(max_order):
"""
Constructs a basis of graded Lie algebra up to max_order.
Returns a dictionary where key represents the order of the grading and value is basis data of that grading
represented as a dictionary where key is encoded Lie element and value (dictionary with key 'repr')
is its representation in R^p (as numpy array).
"""
res = {}
with open('api/lie_elements.pickle', 'rb') as f:
lie_elements = pickle.load(f)
for order in range(1, max_order + 1):
grouped = [list(g) for k, g in groupby(lie_elements[order].items(), key=lambda x: len(x[0]))]
basis_elements = {}
for group in grouped:
lie, cols = zip(*group)
mat = np.concatenate(cols).reshape((-1, len(cols)), order='F')
_, inds = sym.Matrix(mat).rref()
for ind in inds:
basis_elements[lie[ind]] = {
'repr': cols[ind]
}
res[order] = basis_elements
with open('api/lie_basis_new.pickle', 'wb') as lb:
pickle.dump(res, lb)
return res
class LieElementsNotFound(Exception):
pass
class SystemIsTooDeep(Exception):
pass
| 36.668675 | 118 | 0.563332 | 839 | 6,087 | 3.93683 | 0.214541 | 0.036331 | 0.00545 | 0.004844 | 0.210718 | 0.12413 | 0.069634 | 0.069634 | 0.069634 | 0.029064 | 0 | 0.035579 | 0.307376 | 6,087 | 165 | 119 | 36.890909 | 0.747865 | 0.35354 | 0 | 0.193182 | 0 | 0 | 0.036344 | 0.025655 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.022727 | 0.056818 | 0 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea3bcedddcdfe8755222fa63f0a7e9ba5c1c7a2d | 1,948 | py | Python | toy/routing.py | osantana/toy | a87687582aacb4172da76dc5b9c578d362e73e28 | [
"Unlicense"
] | 16 | 2019-02-12T19:50:11.000Z | 2022-03-26T18:08:28.000Z | toy/routing.py | osantana/toy | a87687582aacb4172da76dc5b9c578d362e73e28 | [
"Unlicense"
] | null | null | null | toy/routing.py | osantana/toy | a87687582aacb4172da76dc5b9c578d362e73e28 | [
"Unlicense"
] | null | null | null | import re
from .exceptions import InvalidRouteHandlerException
from . import handlers
class Route:
def __init__(self, path, handler):
if not path:
raise ValueError('Invalid path')
self.path = path
if not callable(handler):
raise InvalidRouteHandlerException('Handlers must be callable objects')
self.handler = handler
self.pattern = re.compile(path)
self.path_arguments = {}
def match(self, path):
match = self.pattern.search(path)
if not match:
return
self.path_arguments.update(match.groupdict())
return self.path_arguments
def __repr__(self):
return f'<Route {self.path} {self.handler.__class__.__name__}>'
def __eq__(self, other):
return self.pattern == other.pattern and self.handler == other.handler
class Routes:
def __init__(
self,
routes=None,
not_found=handlers.not_found_handler,
internal_error=handlers.internal_error_handler,
unauthorized=handlers.unauthorized_handler,
unsupported_media_type=handlers.unsupported_media_type_handler,
):
if routes is None:
routes = []
self._routes = routes
self.not_found = not_found
self.internal_error = internal_error
self.unauthorized = unauthorized
self.unsupported_media_type = unsupported_media_type
def __len__(self):
return len(self._routes)
def __getitem__(self, item):
return self._routes[item]
def add(self, route: Route):
if [r for r in self._routes if r == route]:
raise ValueError('Duplicated route/handler')
self._routes.append(route)
def add_route(self, path, handler):
self.add(Route(path, handler))
def match(self, path):
return [route for route in self._routes if route.match(path) is not None]
| 27.43662 | 83 | 0.63809 | 224 | 1,948 | 5.272321 | 0.245536 | 0.060965 | 0.067739 | 0.03387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.277207 | 1,948 | 70 | 84 | 27.828571 | 0.838778 | 0 | 0 | 0.039216 | 0 | 0 | 0.062628 | 0.017454 | 0 | 0 | 0 | 0 | 0 | 1 | 0.196078 | false | 0 | 0.058824 | 0.098039 | 0.431373 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea3c6c0c55b41cc197fbc3da88c6a70b229f2089 | 6,503 | py | Python | tools/dist/advisory.py | timgates42/subversion | 0f088f530747140c6783c2eeb77ceff8e8613c42 | [
"Apache-2.0"
] | 3 | 2017-01-03T03:20:56.000Z | 2018-12-24T22:05:09.000Z | tools/dist/advisory.py | timgates42/subversion | 0f088f530747140c6783c2eeb77ceff8e8613c42 | [
"Apache-2.0"
] | 3 | 2016-06-12T17:02:25.000Z | 2019-02-03T11:08:18.000Z | tools/dist/advisory.py | timgates42/subversion | 0f088f530747140c6783c2eeb77ceff8e8613c42 | [
"Apache-2.0"
] | 3 | 2017-01-21T00:15:13.000Z | 2020-11-04T07:23:50.000Z | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Send GPG-signed security advisory e-mails from an @apache.org address
to a known list of recipients, or write the advisory text in a form
suitable for publishing on http://subversion.apache.org/.
Usage: cd to the root directory of the advisory descriptions, then:
$ ${TRUNK_WC}/tools/dist/advisory.py send \
--username=<ASF-username> \
--revision=<dist-dev-revision-number>
--release-versions=<target-releases> \
--release-date=<expected-release-date> <CVE-number>...
or
$ ${TRUNK_WC}/tools/dist/advisory.py test \
(... --username, etc. as above)
or
$ ${TRUNK_WC}/tools/dist/advisory.py generate \
--destination=${SITE_WC}/publish/security \
<CVE-number>...
"""
from __future__ import absolute_import
import os
import sys
import argparse
import datetime
import getpass
import re
import security.parser
import security.adviser
import security.mailer
import security.mailinglist
ROOTDIR = os.path.abspath(os.getcwd())
NOTICE_TEMPLATE = 'notice-template.txt'
MAILING_LIST = 'pre-notifications.txt'
def parse_args(argv):
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__), add_help=True,
description="""\
Send GPG-signed security advisory e-mails from an @apache.org address
to a known list of recipients, or write the advisory text in a form
suitable for publishing on http://subversion.apache.org/.
""")
parser.add_argument(
'command', action='store',
choices=['send', 'test', 'generate'],
help=('send: send mail; '
'test: write the mail to standard output; '
'generate: write an advisory for the website'))
parser.add_argument(
'--username', action='store', required=False,
help='the @apache.org username of the sender')
parser.add_argument(
'--revision', action='store', required=False, type=int,
help=('revision on dist.a.o./repos/dist/dev/subversion '
'in which the patched tarballs are available'))
parser.add_argument(
'--release-versions', action='store', required=False,
help=('comma-separated list of future released versions '
'that will contain the fix(es)'))
parser.add_argument(
'--release-date', action='store', required=False,
help=('expected release date for the above mentioned'
' versions (in ISO format, YYYY-MM-DD)'))
parser.add_argument(
'--destination', action='store', required=False,
help=('the directory where the website advisory should be '
'written; usually ${SITE_WC}/publish/security'))
parser.add_argument('cve', nargs='+')
return parser.parse_args(argv)
def check_root():
if not os.path.isfile(os.path.join(ROOTDIR, NOTICE_TEMPLATE)):
sys.stderr.write('Missing file: ' + NOTICE_TEMPLATE + '\n')
sys.exit(1)
if not os.path.isfile(os.path.join(ROOTDIR, MAILING_LIST)):
sys.stderr.write('Missing file: ' + MAILING_LIST + '\n')
sys.exit(1)
def check_sendmail(args):
if (not (args.username and args.revision
and args.release_versions
and args.release_date and args.cve)
or args.destination):
sys.stderr.write(
'The "' + args.command + '" command requires the '
'following options:\n'
' --username, --revision, --release-versions, --release-date\n'
' and a list of CVE numbers.\n')
sys.exit(1)
args.release_versions = re.split(r'\s*,\s*', args.release_versions)
args.release_date = datetime.datetime.strptime(args.release_date,
'%Y-%m-%d')
def sendmail(really_send, args):
notice_template = os.path.join(ROOTDIR, NOTICE_TEMPLATE)
mailing_list = os.path.join(ROOTDIR, MAILING_LIST)
sender = args.username + '@apache.org'
notification = security.parser.Notification(ROOTDIR, *args.cve)
mailer = security.mailer.Mailer(notification,
args.username + '@apache.org',
notice_template,
args.release_date,
args.revision,
*args.release_versions)
message = mailer.generate_message()
recipients = security.mailinglist.MailingList(mailing_list)
if (not really_send):
sys.stdout.write(message.as_string())
return
password = getpass.getpass('Password for ' + args.username
+ ' at mail-relay.apache.org: ')
mailer.send_mail(message, args.username, password,
recipients=recipients)
def check_generate(args):
if (not (args.destination and args.cve)
or args.username or args.revision
or args.release_versions
or args.release_date):
sys.stderr.write(
'The "generate" command requires the '
'--destination option '
'and a list of CVE numbers.\n')
sys.exit(1)
if not os.path.isdir(args.destination):
sys.stderr.write(args.destination + ' is not a directory')
sys.exit(1)
def generate(args):
notification = security.parser.Notification(ROOTDIR, *args.cve)
security.adviser.generate(notification, args.destination);
def main():
check_root()
args = parse_args(sys.argv[1:])
if args.command in ('send', 'test'):
check_sendmail(args)
sendmail(args.command == 'send', args)
elif args.command == 'generate':
check_generate(args)
generate(args)
if __name__ == '__main__':
main()
| 35.535519 | 76 | 0.64155 | 803 | 6,503 | 5.113325 | 0.286426 | 0.02679 | 0.028982 | 0.029226 | 0.239162 | 0.192401 | 0.150511 | 0.111544 | 0.106673 | 0.090112 | 0 | 0.002036 | 0.24481 | 6,503 | 182 | 77 | 35.730769 | 0.834046 | 0.231739 | 0 | 0.145299 | 0 | 0 | 0.247075 | 0.021178 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059829 | false | 0.025641 | 0.094017 | 0 | 0.17094 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea3cf3fc91e6aa38eae6b36bc6e389761fea9f65 | 826 | py | Python | commands/ckpt_fixer.py | Vichoko/aidio | df1c26047574fbe0a7b103ebc26687bc04739229 | [
"MIT"
] | 2 | 2019-08-20T04:46:11.000Z | 2021-02-16T13:19:13.000Z | commands/ckpt_fixer.py | Vichoko/aidio | df1c26047574fbe0a7b103ebc26687bc04739229 | [
"MIT"
] | null | null | null | commands/ckpt_fixer.py | Vichoko/aidio | df1c26047574fbe0a7b103ebc26687bc04739229 | [
"MIT"
] | null | null | null | """
Remove metric records on state_dict.
"""
import argparse
from pathlib import Path
import torch
state_dict_keys_to_remove = ['test_acc.total', 'test_acc.correct',
'val_acc.total', 'val_acc.correct',
'train_acc.total', 'train_acc.correct', ]
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--src_path', help='', )
parser.add_argument('--dest_path', help='', )
args = parser.parse_args()
src_path = Path(args.src_path)
dest_path = Path(args.dest_path)
ckpt = torch.load(src_path)
print('info: state_dict keys: {}'.format(ckpt['state_dict'].keys()))
for k in state_dict_keys_to_remove:
del ckpt['state_dict'][k]
torch.save(ckpt, dest_path)
if __name__ == '__main__':
main()
| 25.8125 | 72 | 0.634383 | 108 | 826 | 4.509259 | 0.407407 | 0.110883 | 0.106776 | 0.061602 | 0.086242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.220339 | 826 | 31 | 73 | 26.645161 | 0.756211 | 0.043584 | 0 | 0 | 0 | 0 | 0.209987 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.2 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea3e73581d306dbbc6b608074bb212c082015ef9 | 4,086 | py | Python | databuilder/models/table_stats.py | jacobhjkim/amundsendatabuilder | 26a0d0a4ffe5bf004507c9d1598a5f08b30ecdf0 | [
"Apache-2.0"
] | 3 | 2021-02-09T13:52:03.000Z | 2022-02-26T02:36:02.000Z | databuilder/models/table_stats.py | jacobhjkim/amundsendatabuilder | 26a0d0a4ffe5bf004507c9d1598a5f08b30ecdf0 | [
"Apache-2.0"
] | 1 | 2021-02-08T23:21:04.000Z | 2021-02-08T23:21:04.000Z | databuilder/models/table_stats.py | youcandanch/amundsendatabuilder | f02c823c655d8fbfd32c334d7e72a3f3520e063a | [
"Apache-2.0"
] | 2 | 2021-02-23T18:23:35.000Z | 2022-03-18T15:12:25.000Z | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from typing import List, Optional
from databuilder.models.graph_node import GraphNode
from databuilder.models.graph_relationship import GraphRelationship
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.models.table_metadata import ColumnMetadata
class TableColumnStats(GraphSerializable):
"""
Hive table stats model.
Each instance represents one row of hive watermark result.
"""
LABEL = 'Stat'
KEY_FORMAT = '{db}://{cluster}.{schema}' \
'/{table}/{col}/{stat_name}/'
STAT_Column_RELATION_TYPE = 'STAT_OF'
Column_STAT_RELATION_TYPE = 'STAT'
def __init__(self,
table_name: str,
col_name: str,
stat_name: str,
stat_val: str,
start_epoch: str,
end_epoch: str,
db: str = 'hive',
cluster: str = 'gold',
schema: str = None
) -> None:
if schema is None:
self.schema, self.table = table_name.split('.')
else:
self.table = table_name
self.schema = schema
self.db = db
self.col_name = col_name
self.start_epoch = start_epoch
self.end_epoch = end_epoch
self.cluster = cluster
self.stat_name = stat_name
self.stat_val = str(stat_val)
self._node_iter = iter(self.create_nodes())
self._relation_iter = iter(self.create_relation())
def create_next_node(self) -> Optional[GraphNode]:
# return the string representation of the data
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self) -> Optional[GraphRelationship]:
try:
return next(self._relation_iter)
except StopIteration:
return None
def get_table_stat_model_key(self) -> str:
return TableColumnStats.KEY_FORMAT.format(db=self.db,
cluster=self.cluster,
schema=self.schema,
table=self.table,
col=self.col_name,
stat_name=self.stat_name)
def get_col_key(self) -> str:
# no cluster, schema info from the input
return ColumnMetadata.COLUMN_KEY_FORMAT.format(db=self.db,
cluster=self.cluster,
schema=self.schema,
tbl=self.table,
col=self.col_name)
def create_nodes(self) -> List[GraphNode]:
"""
Create a list of Neo4j node records
:return:
"""
node = GraphNode(
key=self.get_table_stat_model_key(),
label=TableColumnStats.LABEL,
attributes={
'stat_val': self.stat_val,
'stat_name': self.stat_name,
'start_epoch': self.start_epoch,
'end_epoch': self.end_epoch,
}
)
results = [node]
return results
def create_relation(self) -> List[GraphRelationship]:
"""
Create a list of relation map between table stat record with original hive table
:return:
"""
relationship = GraphRelationship(
start_key=self.get_table_stat_model_key(),
start_label=TableColumnStats.LABEL,
end_key=self.get_col_key(),
end_label=ColumnMetadata.COLUMN_NODE_LABEL,
type=TableColumnStats.STAT_Column_RELATION_TYPE,
reverse_type=TableColumnStats.Column_STAT_RELATION_TYPE,
attributes={}
)
results = [relationship]
return results
| 36.810811 | 88 | 0.543808 | 405 | 4,086 | 5.251852 | 0.234568 | 0.030089 | 0.039492 | 0.036671 | 0.172073 | 0.134462 | 0.078984 | 0.053597 | 0.053597 | 0.053597 | 0 | 0.001183 | 0.379589 | 4,086 | 110 | 89 | 37.145455 | 0.83787 | 0.094469 | 0 | 0.142857 | 0 | 0 | 0.031259 | 0.014385 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.059524 | 0.02381 | 0.297619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea3ee4e1bdd317fda22a50af0455e0acc8e76f27 | 1,035 | py | Python | Code/test.py | alefrancia/100-Days-Of-ML-Code-ale | 0d184cc0ff037f646c2e4521d211e3f3c66a8025 | [
"MIT"
] | null | null | null | Code/test.py | alefrancia/100-Days-Of-ML-Code-ale | 0d184cc0ff037f646c2e4521d211e3f3c66a8025 | [
"MIT"
] | null | null | null | Code/test.py | alefrancia/100-Days-Of-ML-Code-ale | 0d184cc0ff037f646c2e4521d211e3f3c66a8025 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
dataset = pd.read_csv('datasets/Data.csv')
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, 3].values
# from sklearn.preprocessing import Imputer
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy="mean")
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
onehotencoder = OneHotEncoder(handle_unknown='ignore')
X = onehotencoder.fit_transform(X).toarray()
labelencoder_Y = LabelEncoder()
Y = labelencoder_Y.fit_transform(Y)
# from sklearn. .cross_validation import train_test_split
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.fit_transform(X_test)
| 28.75 | 88 | 0.774879 | 154 | 1,035 | 4.993506 | 0.318182 | 0.085826 | 0.06762 | 0.117035 | 0.041612 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014039 | 0.105314 | 1,035 | 35 | 89 | 29.571429 | 0.816415 | 0.09372 | 0 | 0 | 0 | 0 | 0.028877 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea3ee4ff8e93d44e1b64924e800002226b791936 | 1,822 | py | Python | xendbg/gdbserver/protocol.py | nspin/pyxendbg | c3d39e35e3319188558c8b8fd5cedf812ea7d15a | [
"MIT"
] | null | null | null | xendbg/gdbserver/protocol.py | nspin/pyxendbg | c3d39e35e3319188558c8b8fd5cedf812ea7d15a | [
"MIT"
] | null | null | null | xendbg/gdbserver/protocol.py | nspin/pyxendbg | c3d39e35e3319188558c8b8fd5cedf812ea7d15a | [
"MIT"
] | null | null | null | import re
from xendbg.gdbserver.handler import handle
ack_re = re.compile(br'\+(?P<rest>.*)')
packet_re = re.compile(br'\$(?P<content>[^#]*)#(?P<checksum>[0-9a-f]{2})(?P<rest>.*)')
# breakin_re = re.compile(br'\x03(?P<rest>.*)')
def checksum(content):
return '{:02x}'.format(sum(content) % 256).encode('ascii')
def protocol(send_raw, recv_raw, config):
ack_mode = True
expecting_ack = True
def send_packet(content):
nonlocal ack_mode
nonlocal expecting_ack
raw = b'$' + content + b'#' + checksum(content)
send_raw(raw)
if ack_mode:
expecting_ack = True
def packets():
nonlocal ack_mode
nonlocal expecting_ack
buf = b''
while True:
chunk = recv_raw()
if len(chunk) == 0:
if len(buf) != 0:
raise Exception('connection closed mid-packet:', buf)
return
buf += chunk
if expecting_ack:
m = ack_re.fullmatch(buf)
if m is None:
raise Exception('was expecting ack:', buf)
expecting_ack = False
buf = m['rest']
m = packet_re.fullmatch(buf)
if m is not None:
content = m['content']
if checksum(content) != m['checksum']:
raise Exception('invalid checksum:', buf)
buf = m['rest']
# Protocol expects ack after QStartNoAckMode and its response
if ack_mode:
send_raw(b'+')
if content == b'QStartNoAckMode':
send_packet(b'OK')
ack_mode = False
else:
yield content
handle(send_packet, packets(), config)
| 31.964912 | 86 | 0.50494 | 203 | 1,822 | 4.408867 | 0.330049 | 0.093855 | 0.036872 | 0.043575 | 0.151955 | 0.12067 | 0 | 0 | 0 | 0 | 0 | 0.010573 | 0.377058 | 1,822 | 56 | 87 | 32.535714 | 0.777974 | 0.057629 | 0 | 0.212766 | 0 | 0.021277 | 0.110852 | 0.033839 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.042553 | 0.021277 | 0.170213 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea3f5d3a03c426f5d688ac516967d864ca74a145 | 11,708 | py | Python | src/cars/Car.py | remi2257/little-car-ai | 006f2f515d46dd9e94457c191f017a9f3d749fa8 | [
"MIT"
] | 2 | 2020-11-07T15:29:42.000Z | 2022-01-18T08:59:00.000Z | src/cars/Car.py | remi2257/little-car-ai | 006f2f515d46dd9e94457c191f017a9f3d749fa8 | [
"MIT"
] | null | null | null | src/cars/Car.py | remi2257/little-car-ai | 006f2f515d46dd9e94457c191f017a9f3d749fa8 | [
"MIT"
] | null | null | null | import math
import pygame
from math import cos, sin, radians, exp
from src.const import *
from src.objects.LIDAR import LIDAR
from .CarCommands import CommandGas, CommandDir
# Todo : should be moved !
weight_on_road = 10
boost_checkpoint = 250
class Car:
def __init__(self, track):
# INIT VARIABLES
self._track = track
self._theta = 0.0
self._speed = 0.0
self._speed_max = track.speed_max
self._x_speed = 0.0
self._y_speed = 0.0
self._rest_pos_x = 0.0
self._rest_pos_y = 0.0
self._n_speed = 0.0
self._fitness = 0
self._bonus_checkpoints = 0
# Set startPosition
self._x_init = track.init_car_x
self._y_init = track.init_car_y
# Count time outside road to penalize
self._time_outside_road = 0
self._on_road = True
self._last_dir_cmd = CommandDir.NONE
self._last_gas_cmd = CommandGas.OFF
# GEN CAR IMAGE
self._img = self._gen_car_img(path_audi)
if track.start_direction == Direction.RIGHT:
self._img = pygame.transform.rotate(self._img, -90.0)
self._actual_img = self._img
# SET POSITION
self._position_car = self._actual_img.get_rect()
self._position_car = self._position_car.move(self._x_init, self._y_init)
# GEN LIDAR
self._lidar_case_size = self._track.lidar_case_size
self._lidar_grid_car_x = width_grid_LIDAR // 2
self._lidar_grid_car_y = height_grid_LIDAR - offset_y_LIDAR - 1
self._lidar = LIDAR()
self._refresh_lidar()
# Checkpoints
self._checkpoints = self.set_checkpoints()
# Is aptly named
def actualize_direction_and_gas(self, new_commands):
for command in new_commands:
self.actualize_direction_or_gas(command)
def actualize_direction_or_gas(self, new_command):
if isinstance(new_command, CommandGas):
self.calculate_new_speed(new_command)
self._last_gas_cmd = new_command
elif isinstance(new_command, CommandDir):
self.calculate_new_angle(new_command)
self._actual_img = pygame.transform.rotate(self._img, self._theta)
new_rect = self._actual_img.get_rect()
self._position_car = new_rect.move(self.new_pos_after_turn(new_rect))
self._last_dir_cmd = new_command
def calculate_new_speed(self, command):
if not self._on_road:
if (1 - exp(-self._n_speed / n0_speed)) > 0.3:
# If speed at more than 30% of the maximum
# self.n_speed = self.n_speed - 4
self._speed = max(self._speed * 0.70, 0)
self.recalculate_n_speed()
if command == CommandGas.OFF:
self._speed = max(self._speed * 0.97, 0)
self.recalculate_n_speed()
elif command == CommandGas.BRAKE and self._speed > 1:
self._speed = max(self._speed * 0.90, 0)
self.recalculate_n_speed()
else:
if command == CommandGas.BRAKE:
self._n_speed = max(self._n_speed - 2.0, -n0_speed / 2)
else: # if command == CommandGas.ON
if self._on_road:
self._n_speed = min(self._n_speed + 1.0, max_n_speed)
else:
self._n_speed = min(self._n_speed + .3, max_n_speed)
if self._n_speed > 0:
self._speed = self._speed_max * (1 - exp(-self._n_speed / n0_speed))
else:
self._speed = - 0.5 * self._speed_max * (1 - exp(self._n_speed / n0_speed))
def recalculate_n_speed(self):
if self._speed > 0:
self._n_speed = -n0_speed * math.log2(1 - (self._speed / self._speed_max))
else:
self._n_speed = n0_speed * math.log2(1 - (self._speed / self._speed_max))
def reduce_all_speeds(self, fact):
self._speed = max(self._speed - fact, 0)
self._x_speed = max(self._x_speed - fact, 0)
self._y_speed = max(self._y_speed - fact, 0)
def calculate_new_angle(self, command):
if command == CommandDir.LEFT:
self._theta += car_step_angle
elif command == CommandDir.RIGHT:
self._theta -= car_step_angle
drift_fact = min(drift_factor_cst * math.pow(self._speed / self._speed_max, 2), drift_factor_max)
self._x_speed = drift_fact * self._x_speed + (1 - drift_fact) * round(self._speed * cos(radians(self._theta)),
6)
self._y_speed = drift_fact * self._y_speed + (1 - drift_fact) * round(-self._speed * sin(radians(self._theta)),
6)
def move_car(self):
# The fact is that pygame delete post comma digits
# We save them !
# print(50 * "*")
# print("Before : {} / {}".format(self.rest_pos_x, self.rest_pos_y))
self._rest_pos_x, x_move_int = math.modf(self._x_speed + self._rest_pos_x)
self._rest_pos_y, y_move_int = math.modf(self._y_speed + self._rest_pos_y)
# print("After : {} / {}".format(self.rest_pos_x, self.rest_pos_y))
self._position_car = self._position_car.move(x_move_int,
y_move_int)
def move_car_and_refresh_lidar(self):
# Car
self.move_car()
# Lidar
self._refresh_lidar()
self._on_road = self._lidar.is_practicable(self._lidar_grid_car_y, self._lidar_grid_car_x)
def get_position_left_top(self):
return tuple([self._position_car.x,
self._position_car.y])
def get_position_center(self):
return tuple([self._position_car.centerx,
self._position_car.centery])
def new_pos_after_turn(self, new_rect):
return tuple([self._position_car.centerx - new_rect.w // 2,
self._position_car.centery - new_rect.h // 2])
def reset_car(self):
self._theta = 0.0
self._speed = 0.0
self._x_speed = 0.0
self._y_speed = 0.0
self._rest_pos_x = 0.0
self._rest_pos_y = 0.0
self._n_speed = 0.0
self._fitness = 0
self._bonus_checkpoints = 0
self._time_outside_road = 0
self._actual_img = pygame.transform.rotate(self._img, self._theta)
new_rect = self._actual_img.get_rect()
self._position_car = new_rect.move(self._x_init,
self._y_init)
self._refresh_lidar()
self.reset_checkpoints()
def _refresh_lidar(self):
car_x, car_y = self.get_position_center()
for i in range(height_grid_LIDAR):
for j in range(width_grid_LIDAR):
dx_rel_grid = (j - self._lidar_grid_car_x) * self._lidar_case_size
dy_rel_grid = (i - self._lidar_grid_car_y) * self._lidar_case_size
# /!\ I took theta = 0° when pointing left but the lidar map is pointing top
dx_rel = dx_rel_grid * cos(radians(self._theta - 90.0)) + dy_rel_grid * sin(radians(self._theta - 90.0))
dy_rel = -dx_rel_grid * sin(radians(self._theta - 90.0)) + dy_rel_grid * cos(
radians(self._theta - 90.0))
true_x = round(dx_rel + car_x)
true_y = round(dy_rel + car_y)
true_x_grid = true_x // self._track.case_size
true_y_grid = true_y // self._track.case_size
if 0 < true_x_grid < self._track.grid_w and 0 < true_y_grid < self._track.grid_h:
road_type = self._track.get_road_name(true_y_grid, true_x_grid)
else:
road_type = "xx"
# is_practicable = track_part_1w_practicable[corresponding_square]
is_practicable = "x" not in road_type
self._lidar.refresh_case(i, j, road_type, is_practicable, [true_x, true_y])
# ----Fitness & Checkpoints---#
# Use some functions to calculate new fitness
def refresh_fitness_v1(self):
if self._on_road:
self._fitness += max(self._speed, 0) * weight_on_road / FPS_MAX_init
self._time_outside_road = max(0, self._time_outside_road - 0.1)
else:
self._time_outside_road += 1
self._fitness -= 40 * (max(self._speed, 0) + weight_on_road + self._time_outside_road) / FPS_MAX_init
def refresh_fitness_v2(self): # With Checkpoint
if self._on_road:
self._fitness += max(self._speed, 0) * weight_on_road / FPS_MAX_init
self._time_outside_road = max(0, self._time_outside_road - 0.1)
if self._checkpoints:
# Check if on checkpoint
x, y = self.get_position_center()
x_grid = x // self._track.case_size
y_grid = y // self._track.case_size
for checkpoint in self._checkpoints:
if not checkpoint[1]:
continue
if y_grid == checkpoint[0][0] and x_grid == checkpoint[0][1]:
self._fitness += boost_checkpoint
self._bonus_checkpoints += boost_checkpoint
checkpoint[1] = False
# print("ON CHECKPOINT")
break
# Todo Ne pas reset directement les CP sinon ça fait doublon
if not any([cp[1] for cp in self._checkpoints]):
self.reset_checkpoints()
# print("RESET CHECKPOINT")
else:
self._time_outside_road += 1
self._fitness -= max(self._speed, 0) * self._time_outside_road * weight_on_road / FPS_MAX_init
def set_checkpoints(self):
my_list = []
for checkpoint in self._track.checkpoints:
x, y = self.get_position_center()
x_grid = x // self._track.case_size
y_grid = y // self._track.case_size
if x_grid != checkpoint[1] or y_grid != checkpoint[0]:
my_list.append([checkpoint, True])
else:
my_list.append([checkpoint, False])
return my_list
def reset_checkpoints(self):
for checkpoint in self._checkpoints:
checkpoint[1] = True
def _gen_car_img(self, path_img):
img = pygame.image.load(path_img).convert_alpha()
width = img.get_width()
height = img.get_height()
ratio = float(width / height)
car_len = self._track.car_size
if ratio < 1:
width = car_len
height = int(car_len * ratio)
else:
height = car_len
width = int(car_len / ratio)
img_resize = pygame.transform.scale(img, (height, width))
return img_resize
@property
def last_dir_cmd(self):
return self._last_dir_cmd
@property
def last_gas_cmd(self):
return self._last_gas_cmd
@property
def fitness(self):
return self._fitness
@property
def bonus_checkpoints(self):
return self._bonus_checkpoints
@property
def actual_img(self):
return self._actual_img
@property
def lidar_grid_car_x(self):
return self._lidar_grid_car_x
@property
def lidar_grid_car_y(self):
return self._lidar_grid_car_y
def lidar_is_practicable(self, i, j):
return self._lidar.is_practicable(i, j)
def lidar_get_true_pos(self, i, j):
return self._lidar.get_true_pos(i, j)
| 36.024615 | 120 | 0.58712 | 1,556 | 11,708 | 4.02635 | 0.134961 | 0.023943 | 0.025539 | 0.030327 | 0.396488 | 0.312051 | 0.249481 | 0.20431 | 0.174621 | 0.166959 | 0 | 0.017592 | 0.320294 | 11,708 | 324 | 121 | 36.135802 | 0.76954 | 0.071831 | 0 | 0.296943 | 0 | 0 | 0.000277 | 0 | 0 | 0 | 0 | 0.003086 | 0 | 1 | 0.122271 | false | 0 | 0.026201 | 0.052402 | 0.213974 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea41696464b913f754d33896eb9a31ef2a7cf1a9 | 19,207 | py | Python | tpDcc/tools/datalibrary/widgets/listview.py | tpDcc/tpDcc-tools-datalibrary | fe867ac35a59d13300af20a998dccdabc2e145ba | [
"MIT"
] | null | null | null | tpDcc/tools/datalibrary/widgets/listview.py | tpDcc/tpDcc-tools-datalibrary | fe867ac35a59d13300af20a998dccdabc2e145ba | [
"MIT"
] | null | null | null | tpDcc/tools/datalibrary/widgets/listview.py | tpDcc/tpDcc-tools-datalibrary | fe867ac35a59d13300af20a998dccdabc2e145ba | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains library item tree view implementation
"""
from __future__ import print_function, division, absolute_import
import logging
import traceback
from Qt.QtCore import Qt, Signal, QPoint, QRect, QSize, QMimeData
from Qt.QtWidgets import QListView, QAbstractItemView, QRubberBand
from Qt.QtGui import QFont, QColor, QPixmap, QPalette, QPainter, QBrush, QDrag
from tpDcc.libs.qt.core import contexts as qt_contexts
from tpDcc.tools.datalibrary.core import consts
from tpDcc.tools.datalibrary.widgets import mixinview
LOGGER = logging.getLogger('tpDcc-tools-datalibrary')
class ViewerListView(mixinview.ViewerViewWidgetMixin, QListView):
DEFAULT_DRAG_THRESHOLD = consts.LIST_DEFAULT_DRAG_THRESHOLD
itemMoved = Signal(object)
itemDropped = Signal(object)
itemClicked = Signal(object)
itemDoubleClicked = Signal(object)
def __init__(self, *args, **kwargs):
QListView.__init__(self, *args, **kwargs)
mixinview.ViewerViewWidgetMixin.__init__(self)
self.setSpacing(5)
self.setMouseTracking(True)
self.setSelectionRectVisible(True)
self.setViewMode(QListView.IconMode)
self.setResizeMode(QListView.Adjust)
self.setSelectionMode(QListView.ExtendedSelection)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setAcceptDrops(True)
self.setDragEnabled(True)
self.setDragDropMode(QAbstractItemView.DragDrop)
self._tree_widget = None
self._rubber_band = None
self._rubber_band_start_pos = None
self._rubber_band_color = QColor(Qt.white)
self._custom_sort_order = list()
self._drag = None
self._drag_start_pos = None
self._drag_start_index = None
self._drop_enabled = True
self.clicked.connect(self._on_index_clicked)
self.doubleClicked.connect(self._on_index_double_clicked)
# ============================================================================================================
# OVERRIDES
# ============================================================================================================
def startDrag(self, event):
"""
Overrides bae QListView startDrag function
:param event: QEvent
"""
if not self.dragEnabled():
return
if self._drag_start_pos and hasattr(event, 'pos'):
item = self.item_at(event.pos())
if item and item.drag_enabled():
self._drag_start_index = self.indexAt(event.pos())
point = self._drag_start_pos - event.pos()
dt = self.drag_threshold()
if point.x() > dt or point.y() > dt or point.x() < -dt or point.y() < -dt:
items = self.selected_items()
mime_data = self.mime_data(items)
pixmap = self._drag_pixmap(item, items)
hotspot = QPoint(pixmap.width() * 0.5, pixmap.height() * 0.5)
self._drag = QDrag(self)
self._drag.setPixmap(pixmap)
self._drag.setHotSpot(hotspot)
self._drag.setMimeData(mime_data)
self._drag.start(Qt.MoveAction)
def endDrag(self):
"""
Function that ends current drag
"""
self._drag_start_pos = None
self._drag_start_index = None
if self._drag:
del self._drag
self._drag = None
def dragEnterEvent(self, event):
"""
Overrides bae QListView dragEnterEvent function
:param event: QDragEvent
"""
mimedata = event.mimeData()
if (mimedata.hasText() or mimedata.hasUrls()) and self.drop_enabled():
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
"""
Overrides bae QListView dragMoveEvent function
:param event: QDragEvent
"""
mimedata = event.mimeData()
if (mimedata.hasText() or mimedata.hasUrls()) and self.drop_enabled():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
"""
Overrides bae QListView dropEvent function
:param event: QDropEvent
"""
item = self.item_at(event.pos())
selected_items = self.selected_item()
if selected_items and item:
if self.tree_widget().is_sort_by_custom_order():
self.move_items(selected_items, item)
else:
LOGGER.info('You can only re-order items when sorting by custom order')
if item:
item.drop_event(event)
self.itemDropped.emit(event)
# ============================================================================================================
# OVERRIDES - MIXIN
# ============================================================================================================
def mousePressEvent(self, event):
"""
Overrides base QListView mousePressEvent function
:param event: QMouseEvent
"""
item = self.item_at(event.pos())
if not item:
self.clearSelection()
mixinview.ViewerViewWidgetMixin.mousePressEvent(self, event)
if event.isAccepted():
QListView.mousePressEvent(self, event)
if item:
# NOTE: This causes viewer tree widget selectionChanged signal to be emitted multiple times.
# NOTE: This causes that item preview widgets are created twice when selecting an item in the viewer.
# NOTE: For this reason, we block tree widgets signals before selecting the item
with qt_contexts.block_signals(self.tree_widget()):
item.setSelected(True)
self.endDrag()
self._drag_start_pos = event.pos()
is_left_button = self.mouse_press_button() == Qt.LeftButton
is_item_draggable = item and item.drag_enabled()
is_selection_empty = not self.selected_items()
if is_left_button and (is_selection_empty or not is_item_draggable):
self.rubber_band_start_event(event)
def mouseMoveEvent(self, event):
"""
Overrides base QListView mouseMoveEvent function
:param event: QMouseEvent
"""
if not self.is_dragging_items():
is_left_button = self.mouse_press_button() == Qt.LeftButton
if is_left_button and self.rubber_band().isHidden() and self.selected_items():
self.startDrag(event)
else:
mixinview.ViewerViewWidgetMixin.mouseMoveEvent(self, event)
QListView.mouseMoveEvent(self, event)
if is_left_button:
self.rubber_band_move_event(event)
def mouseReleaseEvent(self, event):
"""
Override base QListView mouseReleaseEvent function
:param event: QMouseEvent
"""
item = self.item_at(event.pos())
items = self.selected_items()
mixinview.ViewerViewWidgetMixin.mouseReleaseEvent(self, event)
if item not in items:
if event.button() != Qt.MidButton:
QListView.mouseReleaseEvent(self, event)
elif not items:
QListView.mouseReleaseEvent(self, event)
self.endDrag()
self.rubber_band().hide()
# ============================================================================================================
# BASE
# ============================================================================================================
def scroll_to_item(self, item, pos=None):
"""
Ensures that the item is visible
:param item: LibraryItem
:param pos: QPoint or None
"""
index = self.index_from_item(item)
pos = pos or QAbstractItemView.PositionAtCenter
self.scrollTo(index, pos)
# ============================================================================================================
# TREE WIDGET
# ============================================================================================================
def tree_widget(self):
"""
Return the tree widget that contains the items
:return: LibraryTreeWidget
"""
return self._tree_widget
def set_tree_widget(self, tree_widget):
"""
Set the tree widget that contains the items
:param tree_widget: LibraryTreeWidget
"""
self._tree_widget = tree_widget
self.setModel(tree_widget.model())
self.setSelectionModel(tree_widget.selectionModel())
def items(self):
"""
Return all the items
:return: list(LibraryItem)
"""
return self.tree_widget().items()
def row_at(self, pos):
"""
Returns the row for the given pos
:param pos: QPoint
:return:
"""
return self.tree_widget().row_at(pos)
def item_at(self, pos):
"""
Returns a pointer to the item at the coordinates p
The coordinates are relative to the tree widget's viewport
:param pos: QPoint
:return: LibraryItem
"""
index = self.indexAt(pos)
return self.item_from_index(index)
def selected_item(self):
"""
Returns the last selected non-hidden item
:return: QTreeWidgetItem
"""
return self.tree_widget().selected_item()
def selected_items(self):
"""
Returns a list of all selected non-hidden items
:return: list(QTreeWidgetItem)
"""
return self.tree_widget().selectedItems()
def insert_item(self, row, item):
"""
Inserts the item at row in the top level in the view
:param row: int
:param item: QTreeWidgetItem
"""
self.tree_widget().insertTopLevelItem(row, item)
def take_items(self, items):
"""
Removes and returns the items from the view
:param items: list(QTreeWidgetItem)
:return: list(QTreeWidgetItem)
"""
for item in items:
row = self.tree_widget().indexOfTopLevelItem(item)
self.tree_widget().takeTopLevelItem(row)
return items
def set_indexes_selected(self, indexes, value):
"""
Set the selected state for the given indexes
:param indexes: list(QModelIndex)
:param value: bool
"""
items = self.items_from_indexes(indexes)
self.set_items_selected(items, value)
def set_items_selected(self, items, value):
"""
Sets the selected state for the given items
:param items: list(LibraryItem)
:param value: bool
"""
with qt_contexts.block_signals(self.tree_widget()):
try:
for item in items:
item.setSelected(value)
except Exception:
LOGGER.error(str(traceback.format_exc()))
def move_items(self, items, item_at):
"""
Moves the given items to the position at the given row
:param items: list(LibraryItem)
:param item_at: LibraryItem
"""
scroll_value = self.verticalScrollBar().value()
self.tree_widget().move_items(items, item_at)
self.itemMoved.emit(items[-1])
self.verticalScrollBar().setValue(scroll_value)
def index_from_item(self, item):
"""
Returns QModelIndex associated with the given item
:param item: LibraryItem
:return: QModelIndex
"""
return self.tree_widget().indexFromItem(item)
def item_from_index(self, index):
"""
Return a pointer to the LibraryItem associated with the given model index
:param index: QModelIndex
:return: LibraryItem
"""
return self.tree_widget().itemFromIndex(index)
def items_from_urls(self, urls):
"""
Returns items from the given URL objects
:param urls: list(QUrl)
:return: DataItem
"""
items = list()
for url in urls:
item = self.item_from_url(url)
if item:
items.append(item)
return items
def item_from_url(self, url):
"""
Returns the item from the given url object
:param url: QUrl
:return: DataItem
"""
return self.item_from_path(url.path())
def items_from_paths(self, paths):
"""
Returns the items from the given paths
:param paths: list(str)
:return: QUrl
"""
items = list()
for path in paths:
item = self.item_from_path(path)
if item:
items.append(item)
return items
def item_from_path(self, path):
"""
Returns the item from the given path
:param path: str
:return: DataItem
"""
for item in self.items():
item_path = item.url().path()
if item_path and path == item_path:
return item
return None
# ============================================================================================================
# DRAG & DROP
# ============================================================================================================
def drop_enabled(self):
"""
Returns whether drop functionality is enabled or not
:return: bool
"""
return self._drop_enabled
def set_drop_enabled(self, flag):
"""
Sets whether drop functionality is enabled or not
:param flag: bool
"""
self._drop_enabled = flag
def drag_threshold(self):
"""
Returns current drag threshold
:return: float
"""
return self.DEFAULT_DRAG_THRESHOLD
def is_dragging_items(self):
"""
Returns whether the user is currently dragging items or not
:return: bool
"""
return bool(self._drag)
def mime_data(self, items):
"""
Returns drag mime data
:param items: list(LibraryItem)
:return: QMimeData
"""
mimedata = QMimeData()
urls = [item.url() for item in items]
text = '\n'.join([item.mime_text() for item in items])
mimedata.setUrls(urls)
mimedata.setText(text)
return mimedata
# ============================================================================================================
# RUBBER BAND
# ============================================================================================================
def create_rubber_band(self):
"""
Creates a new instance of the selection rubber band
:return: QRubberBand
"""
rubber_band = QRubberBand(QRubberBand.Rectangle, self)
palette = QPalette()
color = self.rubber_band_color()
palette.setBrush(QPalette.Highlight, QBrush(color))
rubber_band.setPalette(palette)
return rubber_band
def rubber_band(self):
"""
Retursn the selection rubber band for this widget
:return: QRubberBand
"""
if not self._rubber_band:
self.setSelectionRectVisible(False)
self._rubber_band = self.create_rubber_band()
return self._rubber_band
def rubber_band_color(self):
"""
Returns the rubber band color for this widget
:return: QColor
"""
return self._rubber_band_color
def set_rubber_band_color(self, color):
"""
Sets the color for the rubber band
:param color: QColor
"""
self._rubber_band = None
self._rubber_band_color = color
def rubber_band_start_event(self, event):
"""
Triggered when the user presses an empty area
:param event: QMouseEvent
"""
self._rubber_band_start_pos = event.pos()
rect = QRect(self._rubber_band_start_pos, QSize())
rubber_band = self.rubber_band()
rubber_band.setGeometry(rect)
rubber_band.show()
def rubber_band_move_event(self, event):
"""
Triggered when the user moves the mouse over the current viewport
:param event: QMouseEvent
"""
if self.rubber_band() and self._rubber_band_start_pos:
rect = QRect(self._rubber_band_start_pos, event.pos())
rect = rect.normalized()
self.rubber_band().setGeometry(rect)
# ============================================================================================================
# INTERNAL
# ============================================================================================================
def _drag_pixmap(self, item, items):
"""
Internal function that shows the pixmap for the given item during drag operation
:param item: LibraryItem
:param items: list(LibraryItem)
:return: QPixmap
"""
rect = self.visualRect(self.index_from_item(item))
pixmap = QPixmap()
pixmap = pixmap.grabWidget(self, rect)
if len(items) > 1:
custom_width = 35
custom_padding = 5
custom_text = str(len(items))
custom_x = pixmap.rect().center().x() - float(custom_width * 0.5)
custom_y = pixmap.rect().top() + custom_padding
custom_rect = QRect(custom_x, custom_y, custom_width, custom_width)
painter = QPainter(pixmap)
painter.setRenderHint(QPainter.Antialiasing)
painter.setPen(Qt.NoPen)
painter.setBrush(self.viewer().background_selected_color())
painter.drawEllipse(custom_rect.center(), float(custom_width * 0.5), float(custom_width * 0.5))
font = QFont('Serif', 12, QFont.Light)
painter.setFont(font)
painter.setPen(self.viewer().text_selected_color())
painter.drawText(custom_rect, Qt.AlignCenter, str(custom_text))
return pixmap
# ============================================================================================================
# CALLBACKS
# ============================================================================================================
def _on_index_clicked(self, index):
"""
Callback function that is called when the user clicks on an item
:param index: QModelIndex
"""
item = self.item_from_index(index)
item.clicked()
self.set_items_selected([item], True)
self.itemClicked.emit(item)
def _on_index_double_clicked(self, index):
"""
Callback function that is called when the user double clicks on an item
:param index: QModelIndex
"""
item = self.item_from_index(index)
self.set_items_selected([item], True)
item.double_clicked()
self.itemDoubleClicked.emit(item)
| 31.694719 | 117 | 0.543864 | 1,919 | 19,207 | 5.269932 | 0.177176 | 0.038564 | 0.029071 | 0.013844 | 0.239395 | 0.158904 | 0.135964 | 0.102739 | 0.084248 | 0.075546 | 0 | 0.001385 | 0.285729 | 19,207 | 605 | 118 | 31.747107 | 0.735768 | 0.28802 | 0 | 0.189591 | 0 | 0 | 0.007276 | 0.00188 | 0 | 0 | 0 | 0 | 0 | 1 | 0.156134 | false | 0 | 0.033457 | 0 | 0.297398 | 0.003717 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea42ff0ab964584587be1485aebf77b2100a9ba1 | 26,043 | py | Python | api/File.py | variski/utu-vm-site | 8a2eeaeac019fad0663caca035820c288e3d8849 | [
"MIT"
] | null | null | null | api/File.py | variski/utu-vm-site | 8a2eeaeac019fad0663caca035820c288e3d8849 | [
"MIT"
] | null | null | null | api/File.py | variski/utu-vm-site | 8a2eeaeac019fad0663caca035820c288e3d8849 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Turku University (2019) Department of Future Technologies
# Course Virtualization / Website
# Class for Course Virtualization site downloadables
#
# File.py - Jani Tammi <jasata@utu.fi>
#
# 2019-12-07 Initial version.
# 2019-12-28 Add prepublish(), JSONFormSchema()
# 2019-12-28 Add publish()
# 2020-08-30 Fix owner check in update()
# 2020-09-23 Add decode_bytemultiple()
#
#
# TODO: remove _* -columns from result sets.
#
import os
import json
import time
import logging
import sqlite3
import flask
from flask import g
from application import app
from .Exception import *
from .DataObject import DataObject
from .OVFData import OVFData
from .Teacher import Teacher
# Pylint doesn't understand app.logger ...so we disable all these warnings
# pylint: disable=maybe-no-member
# Extends api.DataObject
class File(DataObject):
class DefaultDict(dict):
"""Returns for missing key, value for key '*' is returned or raises KeyError if default has not been set."""
def __missing__(self, key):
if key == '*':
raise KeyError("Key not found and default ('*') not set!")
else:
return self['*']
# Translate file.downloadable_to <-> sso.role ACL
# (who can access if file.downloadable_to says...)
# Default value '*' is downloadable to noone.
_downloadable_to2acl = DefaultDict({
'teacher': ['teacher'],
'student': ['student', 'teacher'],
'anyone': ['anonymous', 'student', 'teacher'],
'*': []
})
# Translate current sso.role into a list of file.downloadable_to -values
# (what can I access with my role...)
_role2acl = DefaultDict({
'teacher': ['anyone', 'student', 'teacher'],
'student': ['anyone', 'student'],
'*': ['anyone']
})
# Columns that must not be updated (by client)
_readOnly = ['id', 'name', 'size', 'sha1', 'created']
def __init__(self):
self.cursor = g.db.cursor()
# Init super for table name 'file'
super().__init__(self.cursor, 'file')
def schema(self):
"""Return file -table database schema in JSON.
Possible responses:
500 InternalError - Other processing error
200 OK"""
try:
# Do not send owner data to client
schema = super().schema(['owner'])
# Set readonly columns
for col, attribute in schema.items():
if col in self._readOnly:
attribute['readonly'] = True
except Exception as e:
app.logger.exception("error creating JSON")
raise InternalError(
"schema() error while generating schema JSON", str(e)
) from None
#
# Return schema
#
return (200, {"schema": schema})
def search(
self,
file_type: str = None,
downloadable_to: str = None,
owner: str = None
):
"""Argument 'file_type' as per column file.type, 'role' as column file.downloadable_to, 'owner' as per column file.owner."""
app.logger.debug(
f"search(type='{file_type}', downloadable_to='{downloadable_to}', owner='{owner}')"
)
self.sql = f"SELECT * FROM {self.table_name}"
where = [] # SQL WHERE conditions and bind symbols ('?')
bvars = [] # list of bind variables to match the above
if file_type is not None:
where.append("type = ?")
bvars.append(file_type)
if downloadable_to is not None:
acl = self._role2acl[downloadable_to]
where.append(
f"downloadable_to IN ({','.join(['?'] * len(acl))})"
)
bvars.extend(acl)
if owner is not None:
where.append("owner = ?")
bvars.append(owner)
#
# Create WHERE clause
#
if where:
self.sql += " WHERE " + " AND ".join(where)
app.logger.debug("SQL: " + self.sql)
try:
self.cursor.execute(self.sql, bvars)
except sqlite3.Error as e:
app.logger.exception(
f"'{self.table_name}' -table query failed! ({self.sql})"
)
raise
else:
cursor = self.cursor
data = [dict(zip([key[0] for key in cursor.description], row)) for row in cursor]
finally:
self.cursor.close()
if app.config.get("DEBUG", False):
return (
200,
{
"data" : data,
"query" : {
"sql" : self.sql,
"variables" : bvars
}
}
)
else:
return (200, {"data": data})
def prepublish(self, filepath, owner) -> tuple:
"""Arguments 'filepath' must be an absolute path to the VM image and 'owner' must be an /active/ UID in the 'teacher' table.
Extract information from the file and prepopulate 'file' table row. On success, returns the 'file' table ID value.
Returns:
(200, "{ 'id': <file.id> }")
Exceptions:
404, "Not Found" NotFound()
406, "Not Acceptable" InvalidArgument()
409, "Conflict" Conflict()
500, "Internal Server Error") InternalError()
"""
self.filepath = filepath
self.filedir, self.filename = os.path.split(self.filepath)
_, self.filesuffix = os.path.splitext(self.filename)
# Specified file must exist
if not File.exists(self.filepath):
raise NotFound(f"File '{self.filepath}' does not exist!")
# Check that the teacher is active
if not Teacher(owner).active:
raise InvalidArgument(f"Teacher '{owner}' is not active!")
app.logger.debug("File and owner checks completed!")
# Build a dictionary where keys match 'file' -table column names
# Populate with values either from .OVA or other
#
try:
if self.filesuffix == '.ova':
attributes = File.__ova_attributes(self.filepath)
else:
attributes = File.__img_attributes(self.filepath)
# Cannot be inserted without owner
attributes['owner'] = owner
# File size in bytes
attributes['size'] = os.stat(self.filepath).st_size
except Exception as e:
app.logger.exception("Unexpected error reading file attributes!")
raise InternalError(
"prepublish() error while reading file attributes", str(e)
) from None
app.logger.debug("OVA/IMG attribute collection successful!")
#
# Data collected, insert a row
#
try:
self.sql = f"INSERT INTO file ({','.join(attributes.keys())}) "
self.sql += f"VALUES (:{',:'.join(attributes.keys())})"
self.cursor.execute(self.sql, attributes)
# Get AUTOINCREMENT PK
file_id = self.cursor.lastrowid
self.cursor.connection.commit()
except sqlite3.IntegrityError as e:
self.cursor.connection.rollback()
app.logger.exception("sqlite3.IntegrityError" + self.sql + str(e))
raise Conflict("SQLite3 integrity error", str(e)) from None
except Exception as e:
self.cursor.connection.rollback()
app.logger.exception("Unexpected error while inserting 'file' row!" + str(e))
raise InternalError(
"prepublish() error while inserting", str(e)
) from None
#
# Return with ID
#
if app.config.get("DEBUG", False):
return (
200,
{
"id" : file_id,
"query" : {
"sql" : self.sql,
"variables" : attributes
}
}
)
else:
return (200, {"id": file_id})
# TODO ####################################################################
def publish(self, file_id: int) -> tuple:
"""Moves a file from upload folder to download folder and makes it accessible/downloadable."""
return (200, { "data": "OK" })
# TODO!! ##################################################################
def create(self, request) -> tuple:
"""POST method handler - INSERT new row."""
if not request.json:
raise InvalidArgument("API Request has no JSON payload!")
try:
# Get JSON data as dictionary
data = json.loads(request.json)
except Exception as e:
app.logger.exception("Error getting JSON data")
raise InvalidArgument(
"Argument parsing error",
{'request.json' : request.json, 'exception' : str(e)}
) from None
try:
self.sql = f"INSERT INTO {self.table_name} "
self.sql += f"({','.join(data.keys())}) "
self.sql += f"VALUES (:{',:'.join(data.keys())})"
except Exception as e:
app.logger.exception("Error parsing SQL")
raise InternalError(
"Error parsing SQL",
{'sql': self.sql or '', 'exception' : str(e)}
)
# TO BE COMPLETED!!!! #################################################
def fetch(self, id):
"""Retrieve and return a table row. There are no restrictions for retrieving and viewing file data (but update() and create() methods do require a role)."""
self.sql = f"SELECT * FROM {self.table_name} WHERE id = ?"
try:
# ? bind vars want a list argument
self.cursor.execute(self.sql, [id])
except sqlite3.Error as e:
app.logger.exception(
"psu -table query failed! ({})".format(self.sql)
)
raise
else:
# list of tuples
result = self.cursor.fetchall()
if len(result) < 1:
raise NotFound(
f"File (ID: {id}) not found!",
{ 'sql': self.sql }
)
# Create data dictionary from result
data = dict(zip([c[0] for c in self.cursor.description], result[0]))
finally:
self.cursor.close()
if app.config.get("DEBUG", False):
return (
200,
{
"data" : data,
"query" : {
"sql" : self.sql,
"variables" : {'id': id}
}
}
)
else:
return (200, {"data": data})
def update(self, id, request, owner):
# 2nd argument must be the URI Parameter /api/file/<int:id>.
# Second copy is expected to be found within the request data
# and it has to match with the URI parameter.
"""
PATCH method routine - UPDATE record
Possible results:
404 Not Found raise NotFound()
406 Not Acceptable raise InvalidArgument()
200 OK
{
'id' : <int>
}
"""
app.logger.debug("this.primarykeys: " + str(self.primarykeys))
app.logger.debug("fnc arg id: " + str(id))
if not request.json:
raise InvalidArgument("API Request has no JSON payload!")
else:
data = request.json # json.loads(request.json)
# This is horrible solution - client code should take care of this!
data['id'] = int(data['id'])
app.logger.debug(data)
# Extract POST data into dict
try:
#
# Primary key checking
#
if not data[self.primarykeys[0]]:
raise ValueError(
f"Primary key '{self.primarykeys[0]}' not in dataset!"
)
if not id:
raise ValueError(
f"Primary key value for '{self.primarykeys[0]}' cannot be None!"
)
if data[self.primarykeys[0]] != id:
raise ValueError(
"Primary key '{self.primarykeys[0]}' values do not match! One provided as URI parameter, one included in the data set."
)
#
# Check ownership
#
result = self.cursor.execute(
"SELECT owner FROM file WHERE id = ?",
[id]
).fetchall()
if len(result) != 1:
raise ValueError(
f"File (id: {id}) does not exist!"
)
else:
if result[0][0] != owner:
raise ValueError(
f"User '{owner}' not the owner of file {id}, user '{result[0][0]}' is!"
)
except Exception as e:
app.logger.exception("Prerequisite failure!")
raise InvalidArgument(
"Argument parsing error",
{'request.json' : request.json, 'exception' : str(e)}
) from None
app.logger.debug("Prerequisites OK!")
#
# Handle byte-size variables ('disksize' and 'ram')
# "2 GB" (etc) -> 2147483648 and so on... except when the string makes
# no sense. Then it is used as-is instad.
#
if "ram" in data:
data['ram'] = File.decode_bytemultiple(data['ram'])
if "disksize" in data:
data['disksize'] = File.decode_bytemultiple(data['disksize'])
#
# Generate SQL
#
try:
# columns list, without primary key(s)
cols = [ c for c in data.keys() if c not in self.primarykeys ]
# Remove read-only columns, in case someone injected them
cols = [ c for c in cols if c not in self._readOnly ]
app.logger.debug(f"Columns: {','.join(cols)}")
self.sql = f"UPDATE {self.table_name} SET "
self.sql += ",".join([ c + ' = :' + c for c in cols ])
self.sql += " WHERE "
self.sql += " AND ".join([k + ' = :' + k for k in self.primarykeys])
except Exception as e:
raise InternalError(
"SQL parsing error",
{'sql' : self.sql or '', 'exception' : str(e)}
) from None
app.logger.debug("SQL: " + self.sql)
#
# Execute Statement
#
try:
self.cursor.execute(self.sql, data)
#
# Number of updated rows must be one
#
if self.cursor.rowcount != 1:
nrows = self.cursor.rowcount
g.db.rollback()
if nrows > 1:
raise InternalError(
"Error! Update affected more than one row!",
{'sql': self.sql or '', 'data': data}
)
else:
raise NotFound(
"Entity not found - nothing was updated!",
{'sql': self.sql or '', 'data': data}
)
except sqlite3.Error as e:
# TODO: Check what actually caused the issue
raise InvalidArgument(
"UPDATE failed!",
{'sql': self.sql or '', 'exception': str(e)}
) from None
finally:
g.db.commit()
self.cursor.close()
# Return id
return (200, {'data': {'id' : id}})
def delete(self, vm_id, user_id):
"""Delete file with the given id.
Checks that:
- file exists
- user is the owner of the file
(TODO) users with admin rights should be able to delete others' files
Possible return values:
200: OK (Delete query sent)
403: User is not allowed to delete the file
404: Specified file does not exist
404: Database record not found
500: An exception ocurred (and was logged)"""
#
# Check that the file exists and get filename
#
self.sql = "SELECT name FROM file WHERE id = ?"
try:
result = self.cursor.execute(self.sql,[vm_id]).fetchall()
if len(result) != 1:
app.logger.debug(f"VM with id '{vm_id}' not found")
return (404, { "data": "Not found" })
filename = result[0][0]
app.logger.debug("fetched: " + filename)
folder = app.config.get("DOWNLOAD_FOLDER")
filepath = os.path.join(folder, filename)
if not File.exists(filepath):
app.logger.error(
f"File '{filepath}' does not exist!"
)
return (404, {"data": "File not found"})
except Exception as e:
app.logger.exception(
f"Exception while checking if '{filepath}' exists!"
)
return (500, {"data": "Internal Server Error"})
#
# Check ownership
#
self.sql = "SELECT owner FROM file WHERE id = ?"
try:
result = self.cursor.execute(self.sql, [vm_id]).fetchall()
if len(result) != 1:
app.logger.debug(f"VM with id '{vm_id}' not found")
return (404, { "data": "Not found" })
owner = result[0][0]
app.logger.debug("fetched: " + owner)
if owner != user_id:
app.logger.debug(
f"User '{user_id}' tried to delete '{filename}', owner: '{owner}' (denied)"
)
return (403, { "data": "Forbidden" })
except Exception as e:
app.logger.exception(
f"Exception while checking file '{vm_id}' ownership!"
)
return (500, {"data": "Internal Server Error"})
#
# If all checks have been passed, delete database row and file
#
self.sql = "DELETE FROM file WHERE id = ?"
try:
self.cursor.execute(self.sql, [vm_id])
self.cursor.connection.commit()
# Check that the row has been deleted before deleting file
self.sql = "SELECT name FROM file WHERE id = ?"
result = self.cursor.execute(self.sql,[vm_id]).fetchall()
if len(result) != 0:
app.logger.exception(
f"Exception while trying to delete '{filepath}'"
)
return (500, {"data": "Internal Server Error"})
else:
os.remove(filepath)
except Exception as e:
app.logger.exception(
f"Exception while trying to delete '{filepath}'"
)
return (500, {"data": "Internal Server Error"})
app.logger.debug(f"File '{vm_id}' deleted")
return (200, { "data": "OK" })
def download(self, filename: str, role: str) -> tuple:
"""Checks that the file exists, has a database record and can be downloaded by the specified role.
Possible return values:
200: OK (Download started by Nginx/X-Accel-Redirect)
401: Role not allowed to download the file
404: Specified file does not exist
404: Database record not found
500: An exception ocurred (and was logged)"""
#
# Check that the file exists
#
try:
folder = app.config.get("DOWNLOAD_FOLDER")
filepath = os.path.join(folder, filename)
if not File.exists(filepath):
app.logger.error(
f"File '{filepath}' does not exist!"
)
return "File not found", 404
except Exception as e:
app.logger.exception(
f"Exception while checking if '{filepath}' exists!"
)
return "Internal Server Error", 500
#
# Retrieve information on to whom is it downloadable to
#
self.sql = "SELECT downloadable_to FROM file WHERE name = ?"
try:
self.cursor.execute(self.sql, [filename])
# list of tuples
result = self.cursor.fetchone()
if len(result) < 1:
app.logger.error(
f"No database record for existing file '{filepath}'"
)
return "File Not Found", 404
except Exception as e:
# All other exceptions
app.logger.exception("Error executing a query!")
return "Internal Server Error", 500
#
# Send file
#
# 'X-Accel-Redirect' (header directive) is Nginx feature that is
# intercepted by Nginx and the pointed to by that directive is
# then streamed to the client, freeing Flask thread next request.
#
# More important is the fact that this header allows serving files
# that are not in the request pointed location (URL), letting the
# application code verify access privileges and/or change the
# content (specify a different file in X-Accel-Redirect).
#
try:
# Filepath for X-Accel-Redirect
abs_url_path = os.path.join(
app.config.get("DOWNLOAD_URLPATH"),
filename
)
allowlist = self._downloadable_to2acl[result[0]]
if role in allowlist:
response = flask.Response("")
response.headers['Content-Type'] = ""
response.headers['X-Accel-Redirect'] = abs_url_path
app.logger.debug(
f"Returning response with header X-Accel-Redirect = {response.headers['X-Accel-Redirect']}"
)
return response
else:
app.logger.info(
f"User with role '{role}' attempted to download '{filepath}' that is downloadable to '{allowlist}' (file.downloadable_to: '{result[0]}') (DENIED!)"
)
return "Unauthorized!", 401
except Exception as e:
app.logger.exception(
f"Exception while permission checking role '{role}' (downloadable_to:) '{result[0]}' and/or sending download"
)
return "Internal Server Error", 500
@staticmethod
def decode_bytemultiple(value: str):
mult = {
"KB" : 1024,
"MB" : 1048576,
"GB" : 1073741824,
"TB" : 1099511627776,
"PB" : 2214416418340864
}
try:
# Assume bytes first
return int(value, 10)
except:
for k, v in mult.items():
if value.strip().upper().endswith(k):
try:
return int(float(value[:-2].replace(',', '.')) * v)
#return int(value[:-2], 10) * v
except:
app.logger.debug(f"Unable to convert '{value}'")
return value
# multiple not found, return as-is
return value
@staticmethod
def exists(file: str) -> bool:
"""Accepts path/file or file and tests if it exists (as a file)."""
if os.path.exists(file):
if os.path.isfile(file):
return True
return False
@staticmethod
def __ova_attributes(file: str) -> dict:
# Establish defaults
filedir, filename = os.path.split(file)
attributes = {
'name': filename,
'label': filename,
'size': 0,
'type': 'vm'
}
#
# Extract XML from .OVF -file from inside the .OVA tar-archive
# into variable 'xmlstring'
#
try:
import tarfile
# Extract .OVF - exactly one should exist
with tarfile.open(file, "r") as ova:
for tarinfo in ova.getmembers():
if os.path.splitext(tarinfo.name)[1].lower() == '.ovf':
ovf = tarinfo
break
if not ovf:
raise ValueError(".OVF file not found!!")
xmlstring = ova.extractfile(ovf).read().decode("utf-8")
except Exception as e:
app.logger.exception(f"Error extracting .OVF from '{filename}'")
return attributes
#
# Read OVF XML
#
try:
ovfdata = OVFData(xmlstring, app.logger)
if ovfdata.cpus:
attributes['cores'] = ovfdata.cpus
if ovfdata.ram:
attributes['ram'] = ovfdata.ram
if ovfdata.name:
attributes['label'] = ovfdata.name
if ovfdata.description:
attributes['description'] = ovfdata.description
if ovfdata.disksize:
attributes['disksize'] = ovfdata.disksize
if ovfdata.ostype:
attributes['ostype'] = ovfdata.ostype
except Exception as e:
app.logger.exception("Error reading OVF XML!")
return attributes
@staticmethod
def __img_attributes(file: str) -> dict:
# Images and .ZIP archives (for pendrives)
filedir, filename = os.path.split(file)
# Establish defaults
attributes = {
'name': filename,
'label': filename,
'size': 0,
'type': 'vm'
}
return attributes
# EOF | 35.336499 | 167 | 0.50551 | 2,731 | 26,043 | 4.789088 | 0.185646 | 0.028901 | 0.024773 | 0.020644 | 0.304152 | 0.255065 | 0.207279 | 0.191299 | 0.161557 | 0.13235 | 0 | 0.017955 | 0.381945 | 26,043 | 737 | 168 | 35.336499 | 0.794607 | 0.203663 | 0 | 0.408898 | 0 | 0.010593 | 0.188858 | 0.016394 | 0.002119 | 0 | 0 | 0.005427 | 0 | 1 | 0.03178 | false | 0 | 0.027542 | 0 | 0.144068 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea44f9316cc6ceed6c452cfed7b6eb629ac29954 | 504 | py | Python | title_plugin/title_plugin.py | dersteps/pylint | ea5e072d5fb230439fe6b143533db0bf30a3808d | [
"MIT"
] | null | null | null | title_plugin/title_plugin.py | dersteps/pylint | ea5e072d5fb230439fe6b143533db0bf30a3808d | [
"MIT"
] | null | null | null | title_plugin/title_plugin.py | dersteps/pylint | ea5e072d5fb230439fe6b143533db0bf30a3808d | [
"MIT"
] | null | null | null | config_map = {}
def execute(soup):
ret_map = {
"info": [],
"warn": [],
"error": [],
"config":config_map
}
if soup is None:
return ret_map
title = soup.title
if title is None:
ret_map["error"].append("Site has no title")
elif title.string is None or len(title.string) == 0:
ret_map["warn"].append("Site's title is empty")
else:
ret_map["info"].append("Site's title is '%s'" % title.string)
return ret_map
| 21.913043 | 69 | 0.543651 | 69 | 504 | 3.855072 | 0.376812 | 0.135338 | 0.075188 | 0.120301 | 0.135338 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002857 | 0.305556 | 504 | 22 | 70 | 22.909091 | 0.757143 | 0 | 0 | 0.111111 | 0 | 0 | 0.178571 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea45bd8846d91252671a1052a0b12bf6421c21c2 | 1,432 | py | Python | edflow/problem2_solution/trainer.py | theRealSuperMario/GPN19_ML_Workflow_Overview | bf3cd0710040fda95e187df944a1a2244c611cd2 | [
"MIT"
] | 2 | 2019-05-31T20:29:00.000Z | 2019-06-01T12:59:54.000Z | edflow/problem2_solution/trainer.py | theRealSuperMario/GPN19_ML_Workflow_Overview | bf3cd0710040fda95e187df944a1a2244c611cd2 | [
"MIT"
] | 19 | 2020-01-28T22:44:32.000Z | 2022-03-11T23:49:01.000Z | edflow/problem2_solution/trainer.py | theRealSuperMario/GPN19 | bf3cd0710040fda95e187df944a1a2244c611cd2 | [
"MIT"
] | null | null | null | from edflow.iterators.tf_trainer import TFBaseTrainer
import tensorflow as tf
def loss(logits, labels):
"""Calculates the loss from the logits and the labels.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size].
Returns:
loss: Loss tensor of type float.
"""
labels = tf.to_int64(labels)
return tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
class Trainer(TFBaseTrainer):
def get_restore_variables(self):
''' nothing fancy here '''
return super().get_restore_variables()
def initialize(self, checkpoint_path = None):
''' in this case, we do not need to initialize anything special '''
return super().initialize(checkpoint_path)
def make_loss_ops(self):
probs = self.model.outputs["probs"]
logits = self.model.logits
targets = self.model.inputs["target"]
correct = tf.nn.in_top_k(probs, tf.cast(targets, tf.int32), k=1)
acc = tf.reduce_mean(tf.cast(correct, tf.float32))
ce = loss(logits, targets)
# losses are applied for each model
# basically, we look for the string in the variables and update them with the loss provided here
losses = dict()
losses["model"] = ce
# metrics for logging
self.log_ops["acc"] = acc
self.log_ops["ce"] = ce
return losses | 31.822222 | 104 | 0.655028 | 189 | 1,432 | 4.851852 | 0.481481 | 0.029444 | 0.041439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00831 | 0.243715 | 1,432 | 45 | 105 | 31.822222 | 0.838412 | 0.302374 | 0 | 0 | 0 | 0 | 0.02199 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea470acab94dc069688079b6819c173a204b6a95 | 3,744 | py | Python | pcen/pcen.py | daemon/pytorch-pcen | 942c519ad46450ea55cdbfc4afd91d0881927de7 | [
"MIT"
] | 64 | 2019-01-11T17:31:43.000Z | 2022-03-23T03:14:52.000Z | pcen/pcen.py | daemon/pytorch-pcen | 942c519ad46450ea55cdbfc4afd91d0881927de7 | [
"MIT"
] | 4 | 2019-01-13T15:12:14.000Z | 2021-03-02T18:56:02.000Z | pcen/pcen.py | daemon/pytorch-pcen | 942c519ad46450ea55cdbfc4afd91d0881927de7 | [
"MIT"
] | 18 | 2019-04-28T11:34:07.000Z | 2022-02-17T05:43:36.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .f2m import F2M
def pcen(x, eps=1E-6, s=0.025, alpha=0.98, delta=2, r=0.5, training=False, last_state=None, empty=True):
frames = x.split(1, -2)
m_frames = []
if empty:
last_state = None
for frame in frames:
if last_state is None:
last_state = frame
m_frames.append(frame)
continue
if training:
m_frame = ((1 - s) * last_state).add_(s * frame)
else:
m_frame = (1 - s) * last_state + s * frame
last_state = m_frame
m_frames.append(m_frame)
M = torch.cat(m_frames, 1)
if training:
pcen_ = (x / (M + eps).pow(alpha) + delta).pow(r) - delta ** r
else:
pcen_ = x.div_(M.add_(eps).pow_(alpha)).add_(delta).pow_(r).sub_(delta ** r)
return pcen_, last_state
class StreamingPCENTransform(nn.Module):
def __init__(self, eps=1E-6, s=0.025, alpha=0.98, delta=2, r=0.5, trainable=False,
use_cuda_kernel=False, **stft_kwargs):
super().__init__()
self.use_cuda_kernel = use_cuda_kernel
if trainable:
self.s = nn.Parameter(torch.Tensor([s]))
self.alpha = nn.Parameter(torch.Tensor([alpha]))
self.delta = nn.Parameter(torch.Tensor([delta]))
self.r = nn.Parameter(torch.Tensor([r]))
else:
self.s = s
self.alpha = alpha
self.delta = delta
self.r = r
self.eps = eps
self.trainable = trainable
self.stft_kwargs = stft_kwargs
self.register_buffer("last_state", torch.zeros(stft_kwargs["n_mels"]))
mel_keys = {"n_mels", "sr", "f_max", "f_min", "n_fft"}
mel_keys = set(stft_kwargs.keys()).intersection(mel_keys)
mel_kwargs = {k: stft_kwargs[k] for k in mel_keys}
stft_keys = set(stft_kwargs.keys()) - mel_keys
self.n_fft = stft_kwargs["n_fft"]
self.stft_kwargs = {k: stft_kwargs[k] for k in stft_keys}
self.f2m = F2M(**mel_kwargs)
self.reset()
def reset(self):
self.empty = True
def forward(self, x):
x = torch.stft(x, self.n_fft, **self.stft_kwargs).norm(dim=-1, p=2)
x = self.f2m(x.permute(0, 2, 1))
if self.use_cuda_kernel:
x, ls = pcen_cuda_kernel(x, self.eps, self.s, self.alpha, self.delta, self.r, self.trainable, self.last_state, self.empty)
else:
x, ls = pcen(x, self.eps, self.s, self.alpha, self.delta, self.r, self.training and self.trainable, self.last_state, self.empty)
self.last_state = ls.detach()
self.empty = False
return x
if __name__ == "__main__":
import time
import librosa
import librosa.display
import matplotlib.pyplot as plt
transform = StreamingPCENTransform(n_mels=40, n_fft=480, hop_length=160).cuda()
x = torch.tensor(librosa.core.load("yes.wav", sr=16000)[0]).unsqueeze(0).cuda()
n = 200
# Non-streaming
a = time.perf_counter()
for _ in range(n):
y = transform(x)
transform.reset()
b = time.perf_counter()
print("{:.2} ms per second of audio.".format((b - a) / n * 1000))
# Streaming in chunks of 1600
x_chunks = x.split(1600, 1)
a = time.perf_counter()
for _ in range(n):
y_chunks = list(map(transform, x_chunks))
transform.reset()
b = time.perf_counter()
print("{:.2} ms per second of audio.".format((b - a) / n * 1000))
librosa.display.specshow(y[0].cpu().numpy().T)
plt.title("Non-streaming")
plt.show()
librosa.display.specshow(torch.cat(y_chunks, 1)[0].cpu().numpy().T)
plt.title("Streaming")
plt.show()
| 34.036364 | 140 | 0.594017 | 555 | 3,744 | 3.830631 | 0.23964 | 0.0508 | 0.024459 | 0.041392 | 0.273754 | 0.239887 | 0.206961 | 0.175917 | 0.15334 | 0.126999 | 0 | 0.028675 | 0.264156 | 3,744 | 109 | 141 | 34.348624 | 0.743013 | 0.010951 | 0 | 0.193548 | 0 | 0 | 0.037568 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043011 | false | 0 | 0.096774 | 0 | 0.172043 | 0.021505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea47161f390bdabf959a10a356a7a46b61116e0f | 1,465 | py | Python | pyplanter/devices/water_pump.py | nielse63/PiPlanter | 94ed5265fd4d9b4183edd4a67047d976ee5cdd72 | [
"MIT"
] | null | null | null | pyplanter/devices/water_pump.py | nielse63/PiPlanter | 94ed5265fd4d9b4183edd4a67047d976ee5cdd72 | [
"MIT"
] | 118 | 2021-03-08T11:04:41.000Z | 2022-03-31T11:07:05.000Z | pyplanter/devices/water_pump.py | nielse63/PiPlanter | 94ed5265fd4d9b4183edd4a67047d976ee5cdd72 | [
"MIT"
] | null | null | null | import time
from datetime import datetime
from gpiozero import OutputDevice
from pyplanter.constants import GPIOPins
from pyplanter.logger import logger
"""
resources:
- https://gpiozero.readthedocs.io/en/stable/api_output.html#outputdevice
- https://github.com/ankitr42/gardenpi/blob/master/pumpcontroller.py
- https://www.randomgarage.com/2018/12/raspberry-pi-automated-irrigation-system.html
"""
class WaterPump:
def __init__(self):
# our relay module that controls the pump
self.device = OutputDevice(
GPIOPins.water_pump, active_high=True, initial_value=False
)
@property
def is_running(self) -> bool:
return self.device.value == 1
def start(self):
logger.info("Starting water pump")
if self.is_running:
return
try:
self.device.on()
except Exception as error:
logger.error(error)
raise error
def stop(self):
logger.info("Stopping water pump")
try:
self.device.off()
except Exception as error:
logger.error(error)
raise error
def run(self, timeout: int = 15) -> None:
if self.is_running:
return
self.start()
time.sleep(timeout)
self.stop()
now = datetime.now()
logger.info(f"Watered plants at {now}")
if __name__ == "__main__":
water_pump = WaterPump()
water_pump.start()
| 24.016393 | 84 | 0.624573 | 172 | 1,465 | 5.197674 | 0.517442 | 0.050336 | 0.03132 | 0.033557 | 0.161074 | 0.114094 | 0.114094 | 0.114094 | 0.114094 | 0.114094 | 0 | 0.010368 | 0.275768 | 1,465 | 60 | 85 | 24.416667 | 0.832234 | 0.026621 | 0 | 0.3 | 0 | 0 | 0.058624 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0.025 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea4746c75070b5d5b5b939ca050e3547087fc2e2 | 9,718 | py | Python | scripts/vep/s10_vep_stat.py | dbmi-bgm/cgap-annotation-server | 05d022f254b5e3057abf13aa9c8bdae5eb8b6e3a | [
"MIT"
] | 1 | 2021-05-27T14:27:47.000Z | 2021-05-27T14:27:47.000Z | scripts/vep/s10_vep_stat.py | dbmi-bgm/cgap-annotation-server | 05d022f254b5e3057abf13aa9c8bdae5eb8b6e3a | [
"MIT"
] | 8 | 2020-02-11T20:06:10.000Z | 2020-09-28T20:03:17.000Z | scripts/vep/s10_vep_stat.py | dbmi-bgm/cgap-annotation-server | 05d022f254b5e3057abf13aa9c8bdae5eb8b6e3a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# s10_vep_stat.py
# made by Daniel Minseok Kwon
# 2020-02-05 11:55:01
#########################
import sys
import os
SVRNAME = os.uname()[1]
if "MBI" in SVRNAME.upper():
sys_path = "/Users/pcaso/bin/python_lib"
elif SVRNAME == "T7":
sys_path = "/ms1/bin/python_lib"
else:
sys_path = "/home/mk446/bin/python_lib"
sys.path.append(sys_path)
import tabix
def s10_vep_stat_splitrun(chrom, spos, epos, k):
cnt = {}
cnt_each = {}
cnt_merge = {}
vep = vepfile.replace('#CHROM#',chrom)
print(vep)
tb = tabix.open(vep)
print(vep)
i = 0
recs = tb.query(chrom, spos, epos)
for arr in recs:
i += 1
i_cnt_tags = {}
i_cnt_tag = {}
i_total_tags = 0
i_total_tag = 0
for transcriptinfo in arr[5].split(','):
arr2 = transcriptinfo.split('|')
tags = arr2[1]
try:
i_cnt_tags[tags] += 1
except KeyError:
i_cnt_tags[tags] = 1
i_total_tags += 1
for tag in tags.split('&'):
try:
i_cnt_tag[tag] += 1
except KeyError:
i_cnt_tag[tag] = 1
i_total_tag += 1
for tags in i_cnt_tags.keys():
try:
cnt[tags] += i_cnt_tags[tags] / i_total_tags
except KeyError:
cnt[tags] = i_cnt_tags[tags] / i_total_tags
for tag in i_cnt_tag.keys():
try:
cnt_each[tag] += i_cnt_tag[tag] / i_total_tag
except KeyError:
cnt_each[tag] = i_cnt_tag[tag] / i_total_tag
arrtag = list(i_cnt_tag.keys())
mtag = '&'.join(sorted(arrtag))
try:
cnt_merge[mtag] += 1
except KeyError:
cnt_merge[mtag] = 1
if i % 10000 == 0:
print(i, chrom, arr[1], len(cnt.keys()))
# break
cnt['snv'] = i
cnt_each['snv'] = i
cnt_merge['snv'] = i
print(cnt)
print(cnt_each)
print(cnt_merge)
file_util.jsonSave(vep + '.' + k + '.stat_tag.json', cnt_each)
file_util.jsonSave(vep + '.' + k + '.stat_tags.json', cnt)
file_util.jsonSave(vep + '.' + k + '.stat_mergedtag.json', cnt_merge)
print('Saved', vep + '.' + k + '.stat_tag.json')
def s10_vep_stat(chrom):
cnt = {}
cnt_each = {}
cnt = {}
cnt_each = {}
vep = vepfile.replace('#CHROM#',chrom)
print(vep)
i = 0
for line in file_util.gzopen(vep):
line = line.decode('UTF-8')
if line[0] != '#':
arr = line.split('\t')
i += 1
for transcriptinfo in arr[5].split(','):
arr2 = transcriptinfo.split('|')
tags = arr2[1]
try:
cnt[tags] += 1
except KeyError:
cnt[tags] = 1
for tag in tags.split('&'):
try:
cnt_each[tag] += 1
except KeyError:
cnt_each[tag] = 1
if i % 10000 == 0:
print(i, chrom, arr[1], len(cnt.keys()))
# break
print(cnt)
print(cnt_each)
file_util.jsonSave(vep + '.stat_tag.json', cnt_each)
file_util.jsonSave(vep + '.stat_tags.json', cnt)
def save_stat(jsontype = 'chrom'):
cnt_each = {}
cnt_merge = {}
cnt = {}
mtagsmap = {}
tagsmap = {}
tagmap = {}
for chrom in seq_util.MAIN_CHROM_LIST:
vep = vepfile.replace('#CHROM#',chrom)
if jsontype == 'chrom':
cnt_merge[chrom] = file_util.jsonOpen(vep + '.stat_mergedtag.json')
cnt_each[chrom] = file_util.jsonOpen(vep + '.stat_tag.json')
cnt[chrom] = file_util.jsonOpen(vep + '.stat_tags.json')
else:
cnt_each[chrom] = {}
cnt_merge[chrom] = {}
cnt[chrom] = {}
seq_util.load_refseq_info('b38d')
chrlen = seq_util.CHROM_LEN['b38d'][chrom]
flag = True
spos = 1
k = 0
while flag:
k += 1
epos = spos + bsize - 1
if epos > chrlen:
epos = chrlen
k_cnt_merge = {}
k_cnt_each = {}
k_cnt = {}
if file_util.is_exist(vep + '.' + str(k) + '.stat_tag.json'):
# print(vep + '.' + str(k) + '.stat_tag.json')
k_cnt_merge = file_util.jsonOpen(vep + '.' + str(k) + '.stat_mergedtag.json')
k_cnt_each = file_util.jsonOpen(vep + '.' + str(k) + '.stat_tag.json')
k_cnt = file_util.jsonOpen(vep + '.' + str(k) + '.stat_tags.json')
else:
cmd = "python /home/mk446/bio/mutanno/SRC/scripts/precal_vep/s10_vep_stat.py " + chrom
cmd += " " + str(spos)
cmd += " " + str(epos)
cmd += " " + str(k)
print(cmd)
for f1 in k_cnt_merge.keys():
try:
cnt_merge[chrom][f1] += k_cnt_merge[f1]
except KeyError:
cnt_merge[chrom][f1] = k_cnt_merge[f1]
for f1 in k_cnt_each.keys():
try:
cnt_each[chrom][f1] += k_cnt_each[f1]
except KeyError:
cnt_each[chrom][f1] = k_cnt_each[f1]
for f1 in k_cnt.keys():
try:
cnt[chrom][f1] += k_cnt[f1]
except KeyError:
cnt[chrom][f1] = k_cnt[f1]
spos += bsize
if epos >= chrlen or spos >= chrlen:
break
for tags in cnt[chrom].keys():
tagsmap[tags] = 1
for tag in cnt_each[chrom].keys():
tagmap[tag] = 1
for tag in cnt_merge[chrom].keys():
mtagsmap[tag] = 1
mtagslist = list(mtagsmap.keys())
tagslist = list(tagsmap.keys())
taglist = list(tagmap.keys())
f = open(statfile, 'w')
f.write("## VEPmergedtag\n")
cont = ['VEP_tag']
cont.append('chr' + '\tchr'.join(seq_util.MAIN_CHROM_LIST))
cont.append('Total')
header = '\t'.join(cont) + '\n'
f.write(header)
for tag in sorted(mtagslist):
cont = [tag]
total = 0
for chrom in seq_util.MAIN_CHROM_LIST:
try:
c1 = cnt_merge[chrom][tag]
except KeyError:
c1 = 0
total += c1
cont.append(str_util.comma(c1))
cont.append(str_util.comma(total))
f.write('\t'.join(cont) + '\n')
f.write("\n\n\n########################\n")
f.write("## VEPtags\n")
cont = ['VEP_tag']
cont.append('chr' + '\tchr'.join(seq_util.MAIN_CHROM_LIST))
cont.append('Total')
header = '\t'.join(cont) + '\n'
f.write(header)
for tags in sorted(tagslist):
cont = [tags]
total = 0
for chrom in seq_util.MAIN_CHROM_LIST:
try:
c1 = cnt[chrom][tags]
except KeyError:
c1 = 0
total += c1
cont.append(str_util.comma(c1))
cont.append(str_util.comma(total))
f.write('\t'.join(cont) + '\n')
f.write("\n\n\n########################\n")
f.write("## VEPtag\n")
cont = ['VEP_tag']
cont.append('chr' + '\tchr'.join(seq_util.MAIN_CHROM_LIST))
cont.append('Total')
header = '\t'.join(cont) + '\n'
f.write(header)
for tag in sorted(taglist):
cont = [tag]
total = 0
for chrom in seq_util.MAIN_CHROM_LIST:
try:
c1 = cnt_each[chrom][tag]
except KeyError:
c1 = 0
total += c1
cont.append(str_util.comma(c1))
cont.append(str_util.comma(total))
f.write('\t'.join(cont) + '\n')
f.close()
print("Saved",statfile)
def run():
for chrom in seq_util.MAIN_CHROM_LIST:
cmd = "python /home/mk446/bio/mutanno/SRC/scripts/precal_vep/s10_vep_stat.py " + chrom
print(cmd)
def run_more_split():
seq_util.load_refseq_info('b38d')
for chrom in seq_util.MAIN_CHROM_LIST:
chrlen = seq_util.CHROM_LEN['b38d'][chrom]
flag = True
spos = 1
k = 0
while flag:
k += 1
epos = spos + bsize - 1
if epos > chrlen:
epos = chrlen
cmd = "python /home/mk446/bio/mutanno/SRC/scripts/precal_vep/s10_vep_stat.py " + chrom
cmd += " " + str(spos)
cmd += " " + str(epos)
cmd += " " + str(k)
print(cmd)
spos += bsize
if epos >= chrlen or spos >= chrlen:
break
if __name__ == "__main__":
import proc_util
import file_util
import seq_util
import str_util
bsize = 1000000
vepfile = "/home/mk446/bio/mutanno/DATASOURCE/ANNOT/VEP/hg38/v99_SNV/vep.99.hg38.#CHROM#.tsi.gz"
statfile = "/home/mk446/bio/mutanno/DATASOURCE/ANNOT/VEP/hg38/v99_SNV/vep.99.hg38.tag_stat"
if len(sys.argv) == 1:
# run()
# save_stat()
# run_more_split()
save_stat('split')
elif len(sys.argv) == 2:
chrom = sys.argv[1]
s10_vep_stat(chrom)
else:
chrom = sys.argv[1]
spos = int(sys.argv[2])
epos = int(sys.argv[3])
k = sys.argv[4]
s10_vep_stat_splitrun(chrom, spos, epos, k)
| 30.086687 | 106 | 0.481581 | 1,197 | 9,718 | 3.723475 | 0.131161 | 0.037694 | 0.022212 | 0.032309 | 0.633834 | 0.580435 | 0.523222 | 0.48508 | 0.407 | 0.360332 | 0 | 0.029067 | 0.376929 | 9,718 | 322 | 107 | 30.180124 | 0.707019 | 0.020375 | 0 | 0.553903 | 0 | 0.007435 | 0.100169 | 0.04903 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018587 | false | 0 | 0.026022 | 0 | 0.04461 | 0.055762 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea4949bf66190d2757274fe5dfcbe84fddbc6ff5 | 5,579 | py | Python | src/graphics/tguim/scrollablegraphicsitem.py | facade-technologies-inc/facile | 4c9134dced71734641fed605e152880cd9ddefe3 | [
"MIT"
] | 2 | 2020-09-17T20:51:18.000Z | 2020-11-03T15:58:10.000Z | src/graphics/tguim/scrollablegraphicsitem.py | facade-technologies-inc/facile | 4c9134dced71734641fed605e152880cd9ddefe3 | [
"MIT"
] | 97 | 2020-08-26T05:07:08.000Z | 2022-03-28T16:01:49.000Z | src/graphics/tguim/scrollablegraphicsitem.py | facade-technologies-inc/facile | 4c9134dced71734641fed605e152880cd9ddefe3 | [
"MIT"
] | null | null | null | """
..
/------------------------------------------------------------------------------\
| -- FACADE TECHNOLOGIES INC. CONFIDENTIAL -- |
|------------------------------------------------------------------------------|
| |
| Copyright [2019] Facade Technologies Inc. |
| All Rights Reserved. |
| |
| NOTICE: All information contained herein is, and remains the property of |
| Facade Technologies Inc. and its suppliers if any. The intellectual and |
| and technical concepts contained herein are proprietary to Facade |
| Technologies Inc. and its suppliers and may be covered by U.S. and Foreign |
| Patents, patents in process, and are protected by trade secret or copyright |
| law. Dissemination of this information or reproduction of this material is |
| strictly forbidden unless prior written permission is obtained from Facade |
| Technologies Inc. |
| |
\------------------------------------------------------------------------------/
This module contains the ScrollableGraphicsItem class.
"""
from PySide2.QtWidgets import QGraphicsItem, QApplication, QGraphicsView, QGraphicsScene, QGraphicsRectItem
from PySide2.QtGui import QColor, QWheelEvent, Qt, QPen
class ScrollableGraphicsItem(QGraphicsRectItem):
MARGIN = 60 # left and right margin for scrolling
def __init__(self, parent=None):
QGraphicsRectItem.__init__(self, parent)
self.setFlag(QGraphicsItem.ItemClipsChildrenToShape)
self._maxX = 0
# create empty invisible child
self._ghostContainer = QGraphicsRectItem(self)
self._ghostContainer.setFlag(QGraphicsItem.ItemHasNoContents)
self._ghostContainer.setPos(0, 0)
self.contents = [] # all items that we can scroll between
def addItemToContents(self, item):
# assert(item not in self.contents)
# add the item
self.contents.append(item)
item.setParentItem(self._ghostContainer)
# set the position of the item
cumulativeX = self.parentItem().scenePos().x() + self.parentItem().getWindowGraphics().width()
y = self.parentItem().getWindowGraphics().scenePos().y() + self.boundingRect().height()/2 - item.height()/2
if self.contents:
item.prepareGeometryChange()
for i, curItem in enumerate(self.contents):
item.setPos(ScrollableGraphicsItem.MARGIN * (i+1) + cumulativeX, y)
cumulativeX += curItem.width()
self._maxX = cumulativeX + item.width() + ScrollableGraphicsItem.MARGIN*3
else:
self._ghostContainer.setPos(self.scenePos().x(), self.scenePos().y())
item.setPos(ScrollableGraphicsItem.MARGIN, y)
def removeItemFromContents(self, item):
assert(item in self.contents)
# permanently remove the item
self.contents.remove(item)
self.scene().removeItem(item)
self.refreshContents()
def refreshContents(self):
"""
Updates the contents after a change
"""
# remove all other items temporarily
items = self.contents[:]
self.contents = []
for item in items:
self.scene().removeItem(item)
# Add items again to put them in the correct positions
for item in items:
self.addItemToContents(item)
def getMaxX(self):
"""
Gets the max X value
"""
return self._maxX
def getGhost(self):
return self._ghostContainer
def ghostCanGoLeft(self):
br = self.boundingRect()
cbr = self.childrenBoundingRect() # because of clipping, this doesn't go beyond the bounding rect
return cbr.x() + cbr.width() > br.x() + br.width() - ScrollableGraphicsItem.MARGIN
def ghostCanGoRight(self):
return self._ghostContainer.scenePos().x() <= self.scenePos().x()
def wheelEvent(self, event: QWheelEvent) -> None:
oldY = self._ghostContainer.pos().y()
if event.delta() > 0:
if self.ghostCanGoRight():
for i in range(1, 17):
self._ghostContainer.setPos(self._ghostContainer.pos().x() + 1, oldY)
else:
if self.ghostCanGoLeft():
for i in range(1, 17):
self._ghostContainer.setPos(self._ghostContainer.pos().x() - 1, oldY)
def paint(self, painter, option, widget):
pen = QPen(Qt.transparent)
painter.setPen(pen)
if __name__ == "__main__":
app = QApplication()
# create view and scene
view = QGraphicsView()
scene = QGraphicsScene()
view.setScene(scene)
# create scrollable item
scrollableItem = ScrollableGraphicsItem()
scene.addItem(scrollableItem)
scrollableItem.setRect(-250, -50, 500, 100)
# scrollableItem.setBrush(QColor(0, 255, 0))
# create nested items
width = 50
height = 50
buffer = 10
for i in range(15):
item = QGraphicsRectItem(0, 0, width, height)
scrollableItem.addItemToContents(item)
item.setBrush(QColor(255, 0, 0))
view.show()
app.exec_()
| 37.193333 | 115 | 0.560853 | 524 | 5,579 | 5.910305 | 0.379771 | 0.069745 | 0.033904 | 0.027123 | 0.076849 | 0.065224 | 0.041976 | 0.041976 | 0.041976 | 0.041976 | 0 | 0.01426 | 0.308657 | 5,579 | 149 | 116 | 37.442953 | 0.788696 | 0.36261 | 0 | 0.128205 | 0 | 0 | 0.002296 | 0 | 0 | 0 | 0 | 0 | 0.012821 | 1 | 0.128205 | false | 0 | 0.025641 | 0.025641 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea4a0f0736aa62d549c1f95e3b7b79cd22096c1e | 4,200 | py | Python | ghtrack/RequestInit.py | zinaLacina/github-track | 1a1754ff9e6bfffa1f7eab6030cc38ad71715117 | [
"MIT"
] | null | null | null | ghtrack/RequestInit.py | zinaLacina/github-track | 1a1754ff9e6bfffa1f7eab6030cc38ad71715117 | [
"MIT"
] | null | null | null | ghtrack/RequestInit.py | zinaLacina/github-track | 1a1754ff9e6bfffa1f7eab6030cc38ad71715117 | [
"MIT"
] | null | null | null | import json
import logging
from urllib import parse
import requests
from ghtrack.GhTrackException import UnknownApiQueryException
from ghtrack.Util import Util
class RequestInit:
"""This class initialize the requests object with default and required values"""
def __init__(self, token, apiUrl="https://api.github.com/repos/"):
"""Constructor.
token of github for unlimited queries if not provided you can not query more than 60 times
apiUrl the base api url
:param token: Personal github token .
:param apiUrl: The base api url.
"""
self.__tokenHeader = {
"Accept": "application/vnd.github.v3+json"
}
if token:
self.__tokenHeader["Authorization"] = f"token {token}"
self.__apiUrl = apiUrl
"""
This method return the api data in json format for all data not older than the provided param
:param url: str the :owner/:repo_name
:param parameters: str not recommended for now
:param body: str not recommended for now
:param old: int determines how old the data should be
:rtype: :tuple:
"""
def dataRequest(self, url, parameters=None, body="", old: int = 7):
if parameters is None:
parameters = dict()
headers, output = self.__statusCheckedRequest(url, parameters, body)
# output = [row for row in output if Util.oneWeekOld(row["created_at"], old)]
# output = list(filter(lambda row: Util.oneWeekOld(row["created_at"], old), dict(output)))
# page = 2
# while "link" in headers and "next" in headers["link"]:
# parameters["page"] = page
# headers, newOutput = self.__statusCheckedRequest(url, parameters, body)
# output += newOutput
# page += 1
return output
"""
This method check the status of request, you can determine if the repo exists.
:param url: str the :owner/:repo_name
:param parameters: str not recommended for now
:param input: str not recommended for now
:rtype: :int:
"""
def __statusCheckedRequest(self, url, parameters, input):
status, headers, output = self.__jsonRequest(url, parameters, input)
if status < 200 or status >= 300:
raise UnknownApiQueryException(status, output, headers)
return headers, output
"""
This method check the status of request, you can determine if the repo exists.
:param url: str the :owner/:repo_name
:param parameters: str not recommended for now
:param input: str not recommended for now
:rtype: :int:
"""
def statusRequest(self, url, parameters, input):
status, headers, output = self.__jsonRequest(url, parameters, input)
return status
"""
This method return the api data in json format
:param url: str the :owner/:repo_name
:param parameters: str not recommended for now
:param input: str not recommended for now
:rtype: :tuple:
"""
def __jsonRequest(self, url, parameters, input) -> tuple:
fullUrl = self.getCompleteUrl(url, parameters)
try:
response = requests.get(
url=fullUrl,
headers=self.__tokenHeader
)
status = response.status_code
headers = dict(response.headers)
output = response.json()
return status, headers, output
except Exception as ex:
logging.info(f"Exception during json conversion {ex}")
return 200, dict(), []
"""
This method give you the absolute url of the public repo you are querying on
:param url: str the :owner/:repo_name
:param parameters: str not recommended for now
:rtype: :str:
"""
def getCompleteUrl(self, url, parameters=None):
if parameters is None or len(parameters) == 0:
return f"{self.__apiUrl}{url}"
else:
return url + "?" + parse.urlencode(parameters)
"""
In case if you have passed an github token, this method will return the value of the token
:rtype: :class:`str`
"""
def getToken(self):
return self.__tokenHeader
| 35 | 102 | 0.630714 | 507 | 4,200 | 5.153846 | 0.285996 | 0.049751 | 0.058553 | 0.068886 | 0.398393 | 0.38385 | 0.31305 | 0.31305 | 0.31305 | 0.283965 | 0 | 0.005288 | 0.279524 | 4,200 | 119 | 103 | 35.294118 | 0.858229 | 0.154048 | 0 | 0.041667 | 0 | 0 | 0.071156 | 0.014327 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145833 | false | 0 | 0.125 | 0.020833 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea4b86dd598e3fa8a985e752ee537d70f29f4407 | 5,681 | py | Python | py/probe_info_service/app_engine/protorpc_utils.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 3 | 2022-01-06T16:52:52.000Z | 2022-03-07T11:30:47.000Z | py/probe_info_service/app_engine/protorpc_utils.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | null | null | null | py/probe_info_service/app_engine/protorpc_utils.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 1 | 2021-10-24T01:47:22.000Z | 2021-10-24T01:47:22.000Z | # Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import enum
import http
import logging
import uuid
# pylint: disable=wrong-import-order
import flask
from google.protobuf import symbol_database
# pylint: enable=wrong-import-order
# Referenced from https://grpc.github.io/grpc/core/md_doc_statuscodes.html
class RPCCanonicalErrorCode(enum.Enum):
PERMISSION_DENIED = (7, http.HTTPStatus.FORBIDDEN)
INTERNAL = (13, http.HTTPStatus.INTERNAL_SERVER_ERROR)
NOT_FOUND = (5, http.HTTPStatus.NOT_FOUND)
FAILED_PRECONDITION = (9, http.HTTPStatus.BAD_REQUEST)
ABORTED = (10, http.HTTPStatus.CONFLICT)
UNIMPLEMENTED = (12, http.HTTPStatus.NOT_IMPLEMENTED)
class ProtoRPCException(Exception):
"""RPC exceptions with addition information to set error status/code in stubby
requests."""
def __init__(self, code, detail=None):
super(ProtoRPCException, self).__init__()
self.code = code
self.detail = detail
class _ProtoRPCServiceBaseMeta(type):
"""Metaclass for ProtoRPC classes.
This metaclass customizes class creation flow to parse and convert the
service descriptor object into a friendly data structure for information
looking up in runtime.
"""
# pylint: disable=return-in-init
def __init__(cls, name, bases, attrs, **kwargs):
service_descriptor = attrs.get('SERVICE_DESCRIPTOR')
if service_descriptor:
sym_db = symbol_database.Default()
for method_desc in service_descriptor.methods:
method = getattr(cls, method_desc.name, None)
rpc_method_spec = getattr(method, 'rpc_method_spec', None)
if rpc_method_spec:
rpc_method_spec.request_type = sym_db.GetSymbol(
method_desc.input_type.full_name)
rpc_method_spec.response_type = sym_db.GetSymbol(
method_desc.output_type.full_name)
return super().__init__(name, bases, attrs, **kwargs)
class ProtoRPCServiceBase(metaclass=_ProtoRPCServiceBaseMeta):
"""Base class of a ProtoRPC Service.
Sub-class must override `SERVICE_DESCRIPTOR` to the correct descriptor
instance. To implement the service's methods, author should define class
methods with the same names and decorates it with `ProtoRPCServiceMethod`.
The method will be called with only one argument in type of the request
message defined in the protobuf file, and the return value should be in
type of the response message defined in the protobuf file as well.
"""
SERVICE_DESCRIPTOR = None
class _ProtoRPCServiceMethodSpec:
"""Placeholder for spec of a ProtoRPC method."""
def __init__(self, request_type, response_type):
self.request_type = request_type
self.response_type = response_type
def ProtoRPCServiceMethod(method):
"""Decorator for ProtoRPC methods.
It wraps the target method with type-checking assertions as well as attaching
additional a spec information placeholder.
"""
def wrapper(self, request):
assert isinstance(request, wrapper.rpc_method_spec.request_type)
logging.info("Request:\n%s", request)
response = method(self, request)
assert isinstance(response, wrapper.rpc_method_spec.response_type)
logging.info("Response:\n%s", response)
return response
# Since the service's descriptor will be parsed when the class is created,
# which is later than the invocation time of this decorator, here it just
# place the placeholder with dummy contents.
wrapper.rpc_method_spec = _ProtoRPCServiceMethodSpec(None, None)
return wrapper
class _ProtoRPCServiceFlaskAppViewFunc:
"""A helper class to handle ProtoRPC POST requests on flask apps."""
def __init__(self, service_inst):
self._service_inst = service_inst
def __call__(self, method_name):
method = getattr(self._service_inst, method_name, None)
rpc_method_spec = getattr(method, 'rpc_method_spec', None)
if not rpc_method_spec:
return flask.Response(status=404)
try:
request_msg = rpc_method_spec.request_type.FromString(
flask.request.get_data())
response_msg = method(request_msg)
response_raw_body = response_msg.SerializeToString()
except ProtoRPCException as ex:
rpc_code, http_code = ex.code.value
resp = flask.Response(status=http_code)
resp.headers['RPC-Canonical-Code'] = rpc_code
if ex.detail:
resp.headers['RPC-Error-Detail'] = ex.detail
return resp
except Exception:
logging.exception('Caught exception from RPC method %r.', method_name)
return flask.Response(status=http.HTTPStatus.INTERNAL_SERVER_ERROR)
response = flask.Response(response=response_raw_body)
response.headers['Content-type'] = 'application/octet-stream'
return response
def RegisterProtoRPCServiceToFlaskApp(app_inst, path, service_inst,
service_name=None):
"""Register the given ProtoRPC service to the given flask app.
Args:
app_inst: Instance of `flask.Flask`.
path: Root URL of the service.
service_inst: The ProtoRPC service to register, must be a subclass of
`ProtoRPCServiceBase`.
service_name: Specify the name of the service. Default to
`service_inst.SERVICE_DESCRIPTOR.name`.
"""
service_name = service_name or service_inst.SERVICE_DESCRIPTOR.name
endpoint_name = '__protorpc_service_view_func_' + str(uuid.uuid1())
view_func = _ProtoRPCServiceFlaskAppViewFunc(service_inst)
app_inst.add_url_rule('%s/%s.<method_name>' % (path, service_name),
endpoint=endpoint_name, view_func=view_func,
methods=['POST'])
| 36.651613 | 80 | 0.737546 | 734 | 5,681 | 5.502725 | 0.322888 | 0.028968 | 0.038623 | 0.014855 | 0.11587 | 0.055459 | 0.026244 | 0.026244 | 0.026244 | 0.026244 | 0 | 0.003657 | 0.181834 | 5,681 | 154 | 81 | 36.88961 | 0.865318 | 0.330752 | 0 | 0.04878 | 0 | 0 | 0.062315 | 0.014297 | 0 | 0 | 0 | 0 | 0.02439 | 1 | 0.097561 | false | 0 | 0.073171 | 0 | 0.414634 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea4c3af27eed2a2a310312f2a2351a3fd05c33b7 | 740 | py | Python | tests/test_config.py | druttka/iotedgedev | c1014993410f220cb8646e5bbdc7d87d064e27c5 | [
"MIT"
] | 111 | 2018-04-09T18:24:30.000Z | 2022-03-29T12:12:50.000Z | tests/test_config.py | druttka/iotedgedev | c1014993410f220cb8646e5bbdc7d87d064e27c5 | [
"MIT"
] | 314 | 2018-04-09T19:59:27.000Z | 2022-03-28T12:13:45.000Z | tests/test_config.py | druttka/iotedgedev | c1014993410f220cb8646e5bbdc7d87d064e27c5 | [
"MIT"
] | 45 | 2018-04-09T21:52:23.000Z | 2022-03-23T12:48:01.000Z | import os
import pytest
from iotedgedev.telemetryconfig import TelemetryConfig
pytestmark = pytest.mark.unit
def test_firsttime(request):
config = TelemetryConfig()
def clean():
config_path = config.get_config_path()
if os.path.exists(config_path):
os.remove(config_path)
request.addfinalizer(clean)
clean()
config = TelemetryConfig()
assert config.get(config.DEFAULT_DIRECT, config.FIRSTTIME_SECTION) == 'yes'
assert config.get(config.DEFAULT_DIRECT, config.TELEMETRY_SECTION) is None
config.check_firsttime()
assert config.get(config.DEFAULT_DIRECT, config.FIRSTTIME_SECTION) == 'no'
assert config.get(config.DEFAULT_DIRECT, config.TELEMETRY_SECTION) == 'yes'
| 25.517241 | 79 | 0.72973 | 87 | 740 | 6.034483 | 0.356322 | 0.085714 | 0.142857 | 0.16 | 0.426667 | 0.426667 | 0.426667 | 0.426667 | 0.426667 | 0 | 0 | 0 | 0.172973 | 740 | 28 | 80 | 26.428571 | 0.857843 | 0 | 0 | 0.111111 | 0 | 0 | 0.010811 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0.111111 | false | 0 | 0.166667 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea4d14424a3513c41ee8d5ff96fe7901f3943811 | 1,132 | py | Python | projects/microparrot/application.py | TimParrish/methods | c90f734172e7ca2b1a7094c35664498411b3b165 | [
"MIT"
] | 9 | 2019-01-15T16:03:56.000Z | 2019-05-30T01:00:49.000Z | projects/microparrot/application.py | TimParrish/methods | c90f734172e7ca2b1a7094c35664498411b3b165 | [
"MIT"
] | 34 | 2019-01-30T19:02:38.000Z | 2019-04-23T21:20:36.000Z | projects/microparrot/application.py | TimParrish/methods | c90f734172e7ca2b1a7094c35664498411b3b165 | [
"MIT"
] | 27 | 2019-01-15T23:37:21.000Z | 2019-12-26T20:18:24.000Z | from flask import Flask
from flask import jsonify
from flask import request
# A very basic API created using Flask that has two possible routes for requests.
application = Flask(__name__)
# The service basepath has a short response just to ensure that healthchecks
# sent to the service root will receive a healthy response.
@application.route("/")
def entry():
return '''<h1>Project 6 by Joshua Stephenson-Losey</h1>
<h3>You can ask the parrot to speak by typing /parrot?say={what you want to hear} into the url</h3>
<p>for example: project6a.us-east-2.elasticbeanstalk.com/parrot?say=I%20Can%20Talk!</p>
<a href="/parrot?say=I%20Can%20Talk!">Ask the parrot to say I Can Talk!</a>'''
@application.route("/parrot")
def parrot():
repeat = request.args.get('say')
return jsonify(Request=repeat, Responce=repeat)
# return '''<h1>You told me to say: {}</h1>
# <p>Hope you enjoyed hearing it from me</p>'''.format(repeat)
# Run the service on the local server it has been deployed to,
# listening on port 8080.
if __name__ == "__main__":
application.run(host="0.0.0.0", port=8080) | 37.733333 | 103 | 0.704064 | 179 | 1,132 | 4.385475 | 0.530726 | 0.034395 | 0.057325 | 0.035669 | 0.053503 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031016 | 0.174028 | 1,132 | 30 | 104 | 37.733333 | 0.808556 | 0.355124 | 0 | 0 | 0 | 0.1875 | 0.479945 | 0.179806 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.1875 | 0.0625 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea4fad6b72a5329aaf5a7b7cc4dc78470a4798dc | 1,171 | py | Python | linear_regression/lr_sklearn.py | tkornuta/python-sandbox | 00e03cd3f49ebb014611d67aad886aaff04c058f | [
"Apache-2.0"
] | null | null | null | linear_regression/lr_sklearn.py | tkornuta/python-sandbox | 00e03cd3f49ebb014611d67aad886aaff04c058f | [
"Apache-2.0"
] | null | null | null | linear_regression/lr_sklearn.py | tkornuta/python-sandbox | 00e03cd3f49ebb014611d67aad886aaff04c058f | [
"Apache-2.0"
] | null | null | null | # Copyright (C) tkornuta, 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.linear_model import LinearRegression
import numpy as np
ints_str_lst = ["95 85", "85 95", "80 70", "70 65", "60 70"]
# Read ints.
x = []
y = []
for ints_str in ints_str_lst:
ints = [int(x) for x in ints_str.split()]
x.append( [ ints[0] ] )
y.append( ints[1] )
print("x = ", x)
print("y = ", y)
# Fit linear regression model.
lm = LinearRegression()
lm.fit(x, y)
# Y = ax+b
# Print coefficients.
a = lm.coef_[0]
b = lm.intercept_
print("a =", a)
print("b =", b)
# Print value for 80.
x1 = np.asarray([80]).reshape(-1, 1)
print(lm.predict(x1))
print(a*80 + b)
| 23.897959 | 74 | 0.681469 | 194 | 1,171 | 4.06701 | 0.520619 | 0.076046 | 0.032953 | 0.040558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043113 | 0.187874 | 1,171 | 48 | 75 | 24.395833 | 0.78654 | 0.546541 | 0 | 0 | 0 | 0 | 0.076471 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea551c83f92115408f1dcea47e0080ac15ae1ae4 | 1,611 | py | Python | artbot_scraper/spiders/sarah_cottier_spider.py | coreymcdermott/artbot | 848e85d0be0c58b7803d4bd1631a0cef63abb72d | [
"MIT"
] | 3 | 2016-03-04T02:53:05.000Z | 2021-12-02T20:50:11.000Z | artbot_scraper/spiders/sarah_cottier_spider.py | coreymcdermott/artbot | 848e85d0be0c58b7803d4bd1631a0cef63abb72d | [
"MIT"
] | 14 | 2020-02-11T21:53:12.000Z | 2022-03-11T23:16:12.000Z | artbot_scraper/spiders/sarah_cottier_spider.py | coreymcdermott/artbot | 848e85d0be0c58b7803d4bd1631a0cef63abb72d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
from dateutil import parser
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from artbot_scraper.items import EventItem
from pytz import timezone
class SarahCottierGallery(CrawlSpider):
name = 'Sarah Cottier Gallery'
allowed_domains = ['sarahcottiergallery.com']
start_urls = ['http://www.sarahcottiergallery.com/exhibition.htm']
rules = (Rule(LinkExtractor(allow=('exhibition/.+', )), callback='parse_exhibition'),)
download_delay = 16
def parse_exhibition(self, response):
item = EventItem()
item['url'] = response.url
item['venue'] = self.name
item['image'] = None
# If JavaScript is exectued, image could be extracted with
# response.xpath('//span[contains(@id, "media_holder")]/img/@src').extract_first()
alt = response.xpath('//a[contains(@href,"' + response.url + '")]/img/@alt').extract_first()
match = re.search('(?P<title>[\w+\s+]*)(?P<start>\d+[\s+\w+]*)[\s\-]*(?P<end>\d+\s+\w+,\s+\d+)', alt)
if (match):
tz = timezone('Australia/Sydney')
item['end'] = tz.localize(parser.parse(match.group('end')))
item['start'] = tz.localize(parser.parse(match.group('start')))
item['title'] = match.group('title').strip()
else:
# Can't extract end, start, and title - Dump whole string into title, fix manually via admin.
item['title'] = alt
yield item
| 41.307692 | 109 | 0.588454 | 183 | 1,611 | 5.131148 | 0.535519 | 0.00639 | 0.00639 | 0.00852 | 0.066028 | 0.066028 | 0 | 0 | 0 | 0 | 0 | 0.002492 | 0.252638 | 1,611 | 38 | 110 | 42.394737 | 0.777409 | 0.155804 | 0 | 0 | 0 | 0.037037 | 0.213284 | 0.072325 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.222222 | 0 | 0.481481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea5575eee5bc79e96867d4d42bf0613952e702b5 | 989 | py | Python | mergecsv.py | Neuromancer2701/GIS_parser | e34b77d2837743a1795b0c188638dbdf5b69c101 | [
"MIT"
] | null | null | null | mergecsv.py | Neuromancer2701/GIS_parser | e34b77d2837743a1795b0c188638dbdf5b69c101 | [
"MIT"
] | null | null | null | mergecsv.py | Neuromancer2701/GIS_parser | e34b77d2837743a1795b0c188638dbdf5b69c101 | [
"MIT"
] | null | null | null | import csv
from collections import OrderedDict
csv_read = "/opt/repos/GIS_parser/Bedford_County_Parcels.csv"
csv_zipcodes = "/opt/repos/GIS_parser/Parcels_ZipCodes.csv"
objectkey = "OBJECTID"
postalkey = "PostalCode"
objectkey2 = '\xef\xbb\xbfOBJECTID'
zipDictionary = OrderedDict()
with open(csv_zipcodes, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
zipDictionary[row[objectkey]] = row[postalkey]
with open(csv_read, 'rb') as csvfile:
baseDictionary = csv.DictReader(csvfile)
for row in baseDictionary:
delete = False
for key, value in zipDictionary.iteritems():
if row[objectkey2] == key:
row[postalkey] = value
delete = True
break
if delete:
del zipDictionary[row[objectkey2]]
with open("Bedford_County_Parcels_zipcodes.csv", 'wb') as csvfile:
writer = csv.DictWriter(csvfile)
writer.writerows(baseDictionary)
| 26.72973 | 70 | 0.667341 | 111 | 989 | 5.837838 | 0.423423 | 0.037037 | 0.033951 | 0.052469 | 0.08642 | 0.08642 | 0 | 0 | 0 | 0 | 0 | 0.003968 | 0.235592 | 989 | 36 | 71 | 27.472222 | 0.853175 | 0 | 0 | 0 | 0 | 0 | 0.171226 | 0.126646 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea5652edf18cbbb9562350ad05e163b6a8f1df9f | 1,205 | py | Python | IO.py | artur-szpot/python-modules | fad840131c67a8815d0f6055311f84d26f6882dd | [
"MIT"
] | null | null | null | IO.py | artur-szpot/python-modules | fad840131c67a8815d0f6055311f84d26f6882dd | [
"MIT"
] | null | null | null | IO.py | artur-szpot/python-modules | fad840131c67a8815d0f6055311f84d26f6882dd | [
"MIT"
] | null | null | null | """
A set of utilitarian functions to facilitate cooperation with the file system.
"""
import os
def get_all_file_paths(directory):
""" Return a list of full file paths inside a given directory. """
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths
def create_directory(path):
""" Create a directory structure if it doesn't exist. """
success = 1
if not os.path.isdir(path):
paths = path.split('/')
if len(paths) == 1:
paths = paths.split('\\')
for i in range(len(paths)):
if not create_single_directory('/'.join(paths[:i+1])):
success = 0
return success
def create_single_directory(path):
"""
Create a directory if it doesn't exist.
Cannot generate nested structure - use create_directory if unsure.
"""
success = 1
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError:
print('Failed to create {} directory.'.format(path))
success = 0
return success
| 29.390244 | 78 | 0.605809 | 155 | 1,205 | 4.632258 | 0.412903 | 0.062674 | 0.052925 | 0.05571 | 0.200557 | 0.077994 | 0.077994 | 0.077994 | 0 | 0 | 0 | 0.007001 | 0.288797 | 1,205 | 40 | 79 | 30.125 | 0.830805 | 0.245643 | 0 | 0.296296 | 0 | 0 | 0.03908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.037037 | 0 | 0.259259 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea574854da0114ed29a0d46d9bd0603aef04f210 | 1,429 | py | Python | config.py | dr-yali/Bone-MRI | 54c50b2da26190575ad0913f715bc15a7dbd857f | [
"MIT"
] | null | null | null | config.py | dr-yali/Bone-MRI | 54c50b2da26190575ad0913f715bc15a7dbd857f | [
"MIT"
] | null | null | null | config.py | dr-yali/Bone-MRI | 54c50b2da26190575ad0913f715bc15a7dbd857f | [
"MIT"
] | null | null | null | #git clone https://github.com/ANTsX/ANTsPy
#cd ANTsPy
#python setup.py install
import os
import logging
class Config(object):
IMAGE_SIZE = 200
TRIALS = 1
BATCH_SIZE = 64
EPOCHS = 1
PATIENCE = 200
VALIDATION_SPLIT = 0.2
TEST_SPLIT = 0.1
OUTCOME_BIAS = "pos"
EXPERTS = "experts.csv"
DEVELOPMENT = True
DEBUG = True
PRINT_SQL = False
SECRET = "example secret key"
LOG_LEVEL = logging.DEBUG
RAW_NRRD_ROOT = "raw_data/"
RAW_FEATURES = "features.csv"
DATA = "data_dir/"
PREPROCESSED_DIR = os.path.join(DATA, "preprocessed")
TRAIN_DIR = os.path.join(DATA, "train")
TEST_DIR = os.path.join(DATA, "test")
VALIDATION_DIR = os.path.join(DATA, "validation")
CROSSVAL_DIR = os.path.join(DATA, "crossval")
FIGURES = "figures/"
NOTEBOOKS = "notebooks/"
FEATURES_DIR = "features/"
NRRD_FEATURES = os.path.join(FEATURES_DIR, "nrrd-features.pkl")
FEATURES = os.path.join(FEATURES_DIR, "training-features.pkl")
PREPROCESS = os.path.join(FEATURES_DIR, "preprocess.pkl")
INPUT_FORM = "t2"
OUTPUT = "output"
DB_URL = "sqlite:///{}/results.db".format(OUTPUT)
MODEL_DIR = os.path.join(OUTPUT, "models")
STDOUT_DIR = os.path.join(OUTPUT, "stdout")
STDERR_DIR = os.path.join(OUTPUT, "stderr")
MAIN_TEST_HOLDOUT = 0.2
NUMBER_OF_FOLDS = 4
SPLIT_TRAINING_INTO_VALIDATION = 0.1
config = Config()
| 23.816667 | 67 | 0.659902 | 192 | 1,429 | 4.734375 | 0.4375 | 0.072607 | 0.121012 | 0.114411 | 0.243124 | 0.063806 | 0 | 0 | 0 | 0 | 0 | 0.017778 | 0.212736 | 1,429 | 59 | 68 | 24.220339 | 0.790222 | 0.051085 | 0 | 0 | 0 | 0 | 0.169254 | 0.03252 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04878 | 0 | 0.97561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea59c81561ac953193bac07bd5b7c3661a03bbc5 | 740 | py | Python | patamaen/features/environment.py | i3thuan5/Patamaen | 7e61cfdc68d83d5f8dd1a23d596287fd28291827 | [
"MIT"
] | null | null | null | patamaen/features/environment.py | i3thuan5/Patamaen | 7e61cfdc68d83d5f8dd1a23d596287fd28291827 | [
"MIT"
] | null | null | null | patamaen/features/environment.py | i3thuan5/Patamaen | 7e61cfdc68d83d5f8dd1a23d596287fd28291827 | [
"MIT"
] | null | null | null | from behave import use_fixture
from behave.fixture import fixture
import behave_webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def before_all(context):
use_fixture(browser_chrome, context, timeout=10)
def after_step(context, step):
context.browser.get_screenshot_as_file(
'behave_steps/{}.png'.format(step)
)
@fixture
def browser_chrome(context, timeout=30, **kwargs):
# -- SETUP-FIXTURE PART:
context.browser = behave_webdriver.Remote(
command_executor='http://localhost:4444/wd/hub',
desired_capabilities=DesiredCapabilities.CHROME
)
context.browser.implicitly_wait(30)
yield
# -- CLEANUP-FIXTURE PART:
context.browser.quit()
| 26.428571 | 78 | 0.743243 | 87 | 740 | 6.137931 | 0.517241 | 0.104869 | 0.074906 | 0.101124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016051 | 0.158108 | 740 | 27 | 79 | 27.407407 | 0.841091 | 0.063514 | 0 | 0 | 0 | 0 | 0.068116 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.210526 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea5a66150c00be0346b8da75018daa7f0d2f15fe | 10,129 | py | Python | dodge/level.py | MoyTW/7DRL2016_Rewrite | 99e092dcb8797a25caa3c8a989a574efae19e4d4 | [
"MIT"
] | 2 | 2020-05-10T02:16:28.000Z | 2021-04-05T21:54:10.000Z | dodge/level.py | MoyTW/7DRL2016_Rewrite | 99e092dcb8797a25caa3c8a989a574efae19e4d4 | [
"MIT"
] | null | null | null | dodge/level.py | MoyTW/7DRL2016_Rewrite | 99e092dcb8797a25caa3c8a989a574efae19e4d4 | [
"MIT"
] | null | null | null | import dodge.components as components
from dodge.constants import ComponentType, EventParam, EventType, Factions
from dodge.fov import FOVMap
from dodge.entity import Entity
from dodge.event import Event
from dodge.paths import LinePath
import math
class Tile(object):
def __init__(self, blocked, block_sight=None):
self.blocked = blocked
self.explored = False
if block_sight is not None:
self.block_sight = block_sight
else:
self.block_sight = blocked
class Zone(object):
def __init__(self, x, y, w, h, name):
self.x1 = x
self.y1 = y
self.x2 = x + w
self.y2 = y + h
self.name = 'Zone ' + str(name)
self.encounter = None
self.summary = None
def build_summary(self, level, has_intel):
raise NotImplementedError()
class Level(object):
def __init__(self, width, height, config):
self._width = width
self._height = height
self.config = config
self._num_added = 0
def is_edge(tx, ty): return tx == 0 or ty == 0 or tx == width - 1 or ty == height - 1
self._tiles = [[Tile(True, True) if is_edge(x, y) else Tile(False) for y in range(height)]
for x in range(width)]
self.zones = []
self._entities = {}
self.fov_map = None # type: FOVMap
self.rebuild_fov()
def rebuild_fov(self):
self.fov_map = FOVMap(self.width, self.height)
for y in range(self.height):
for x in range(self.width):
self.fov_map.set_tile_properties(x, y, not self[x][y].block_sight, not self[x][y].blocked)
# Allow by-index access
def __getitem__(self, index):
return self._tiles[index]
@property
def width(self):
return self._width
@property
def height(self):
return self._height
def add_entity(self, entity: Entity):
if entity.has_component(ComponentType.POSITION):
entity.add_order = self._num_added
self._num_added += 1
self._entities[entity.eid] = entity
else:
raise ValueError('Cannot add an entity to the level if it has no position!')
def remove_entity(self, entity):
self._entities.pop(entity.eid)
def has_entity_with_id(self, eid):
return eid in self._entities
def get_entity_by_id(self, eid) -> Entity:
return self._entities[eid]
# TODO: Don't do full scan every time
def get_entities_in_position(self, x, y, blocks_only=False) -> [Entity]:
""" Returns the entity in tile (x, y). Assumes that entities cannot share an (x, y) position; will throw
ValueError if that is untrue. """
have_pos = self.entities_with_component(ComponentType.POSITION)
in_pos = []
for entity in have_pos:
pos = entity.get_component(ComponentType.POSITION)
if x == pos.x and y == pos.y and ((not blocks_only) or (blocks_only and pos.blocks)):
in_pos.append(entity)
return in_pos
def get_player_entity(self):
return self.entities_with_component(ComponentType.PLAYER)[0]
def get_player_position(self):
player_position = self.get_player_entity().get_component(ComponentType.POSITION)
return player_position.x, player_position.y
# TODO: When you actually invoke this, don't full scan every time
def get_entities_in_area(self, x1, y1, x2, y2):
"""Returns all entities in (x1-x2, y1-y2), inclusive."""
have_pos = self.entities_with_component(ComponentType.POSITION)
in_area = []
for entity in have_pos:
pos = entity.get_component(ComponentType.POSITION)
if x1 <= pos.x <= x2 and y1 <= pos.y <= y2:
in_area.append(entity)
return in_area
def get_entities_in_radius(self, x, y, radius):
in_area = self.get_entities_in_area(x - radius, y - radius, x + radius, y + radius)
in_radius = []
for entity in in_area:
position = entity.get_component(ComponentType.POSITION)
dx = x - position.x
dy = y - position.y
if math.sqrt(dx ** 2 + dy ** 2) <= radius:
in_radius.append(entity)
return in_radius
def entities_with_component(self, component_type):
return [e for e in self._entities.values() if e.has_component(component_type)]
def entities_with_components(self, component_types):
return [e for e in self._entities.values() if e.has_components(component_types)]
def in_fov(self, x, y):
return self.fov_map.in_fov(x, y)
def set_blocked(self, x, y, blocked):
self[x][y].blocked = blocked
self.fov_map.set_tile_properties(x, y, not self[x][y].block_sight, not self[x][y].blocked)
def is_walkable(self, x, y, terrain_only=False):
terrain_walkable = self.fov_map.is_walkable(x, y)
if terrain_only:
return terrain_walkable
else:
entities_in_pos = self.get_entities_in_position(x, y)
an_entity_blocks = False
for entity in entities_in_pos:
if entity.get_component(ComponentType.POSITION).blocks:
an_entity_blocks = True
break
return self.fov_map.is_walkable(x, y) and not an_entity_blocks
def recompute_fov(self):
# Assumes only 1 player-controlled unit
player = self.get_player_entity()
position = player.get_component(ComponentType.POSITION)
self.fov_map.recompute_fov(position.x, position.y, self.config.VISION_RADIUS, self.config.FOV_LIGHT_WALLS,
self.config.FOV_ALGO)
class SillyLevelBuilder:
def build_zone(self, zone_params):
raise NotImplementedError()
@staticmethod
def build_level(game_state, level_params):
laser_render_info = components.RenderInfo(' ', (0, 0, 0)) # TODO: Make configurable
cutting_laser = Entity(eid='cutter',
name='cutting laser',
components=[components.Weapon(event_stack=game_state.event_stack,
projectile_name='laser',
path=LinePath,
power=10,
speed=0,
targeting_radius=3,
render_info=laser_render_info),
components.Mountable('turret')]) # TODO: Constant-ify
test_item = Entity(eid='test_item', name='test_item', components=[
components.Item(),
components.HealUse(game_state.event_stack, 9999)
])
game_state.player = Entity(eid='player',
name='player',
components=[
components.Inventory(game_state.event_stack, 26),
components.Faction(Factions.ASSASSIN),
components.Player(game_state.event_stack, target_faction=Factions.DEFENDER),
components.Mountings(['turret']), # TODO: Constant-ify
components.Actor(game_state.event_stack, 100),
components.Destructible(game_state.event_stack, 100, 0),
components.Position(game_state.event_stack, 5, 5, True),
components.Renderable('@', (255, 255, 255))])
mount_laser = Event(EventType.MOUNT_ITEM, {EventParam.HANDLER: game_state.player,
EventParam.ITEM: cutting_laser})
game_state.player.handle_event(mount_laser)
add_item = Event(EventType.ADD_ITEM_TO_INVENTORY, {EventParam.HANDLER: game_state.player,
EventParam.ITEM: test_item})
game_state.player.handle_event(add_item)
test_enemy = Entity(eid='test_enemy',
name='test_enemy',
components=[components.Mountings(['turret']), # TODO: Constant-ify
components.Faction(Factions.DEFENDER),
components.AI(game_state.event_stack),
components.Actor(game_state.event_stack, 100),
components.Destructible(game_state.event_stack, 100, 0),
components.Position(game_state.event_stack, 10, 10, True),
components.Renderable('E', (0, 255, 0))])
game_state.event_stack.push(Event(EventType.ACTIVATE, {EventParam.HANDLER: test_enemy}))
cannon = Entity(eid='cannon',
name='cannon',
components=[components.Weapon(event_stack=game_state.event_stack,
projectile_name='shell',
path=LinePath,
power=10,
speed=30,
targeting_radius=8,
render_info=components.RenderInfo('.', (255, 0, 0))),
components.Mountable('turret')])
mount_cannon = Event(EventType.MOUNT_ITEM, {EventParam.HANDLER: test_enemy, EventParam.ITEM: cannon})
test_enemy.handle_event(mount_cannon)
# TODO: This should be in a proper level gen!
game_state.level.add_entity(game_state.player)
game_state.level.add_entity(test_enemy)
| 44.03913 | 115 | 0.554941 | 1,134 | 10,129 | 4.742504 | 0.181658 | 0.036817 | 0.033842 | 0.045928 | 0.285422 | 0.216995 | 0.205281 | 0.163258 | 0.150986 | 0.130532 | 0 | 0.012726 | 0.356106 | 10,129 | 229 | 116 | 44.231441 | 0.811868 | 0.047487 | 0 | 0.147541 | 0 | 0 | 0.018713 | 0 | 0 | 0 | 0 | 0.008734 | 0 | 1 | 0.142077 | false | 0 | 0.038251 | 0.054645 | 0.284153 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea5bb724137ff9750e6ee50b65c5082e10c0727e | 64,192 | py | Python | code/functions/plot_tools.py | manuhuth/ModellingVaccineAllocations | adf784e6badc73a6ca1adb707ec5ae8d99bca183 | [
"MIT"
] | null | null | null | code/functions/plot_tools.py | manuhuth/ModellingVaccineAllocations | adf784e6badc73a6ca1adb707ec5ae8d99bca183 | [
"MIT"
] | null | null | null | code/functions/plot_tools.py | manuhuth/ModellingVaccineAllocations | adf784e6badc73a6ca1adb707ec5ae8d99bca183 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import CubicHermiteSpline as cbs
from matplotlib.gridspec import GridSpec
from numpy import trapz
import matplotlib as mpl
from scipy.ndimage.filters import uniform_filter1d
mpl.rcParams["axes.spines.top"] = True
mpl.rcParams["axes.spines.right"] = True
def plot_bars_deaths(
dict_output,
ax=None,
case="initalEqual_vacEqual",
total=False,
unit=10 ** 6,
title="Equal initial states; Equal vaccines",
xlabel="Areas",
ylabel="Number of deaths",
label_optimal="Optimal",
label_Pareto="Pareto",
label_population="Population",
ylim=None,
):
# preprocess
output = dict_output[case]
appended_df = output["optimal_strategies"].append(output["pareto_frontier"])
country_names = [
x for x in appended_df.columns if "country" in x and not ("_" in x)
]
add_row = dict(
zip(
country_names + ["fval"],
output["population_based"] + [np.sum(output["population_based"])],
)
)
appended_df = appended_df.append(add_row, ignore_index=True)
unrestricted_min = appended_df.iloc[np.argmin(appended_df["fval"])][
["fval"] + country_names
]
pareto_df = appended_df[appended_df["countryA"] <= output["population_based"][0]]
for i in range(len(country_names)):
pareto_df = pareto_df[
pareto_df[country_names[i]] <= output["population_based"][i]
]
pareto_optimal = pareto_df.iloc[np.argmin(pareto_df["fval"])][
["fval"] + country_names
]
population_based = pd.Series(
[np.sum(output["population_based"])] + output["population_based"],
index=["fval"] + country_names,
)
if total is True:
X = ["Total"] + country_names
unrestricted = np.round(list(unrestricted_min / unit), 2)
pareto = np.round(list(pareto_optimal / unit), 2)
X_axis = np.arange(len(X))
if ax is None:
fig, ax = plt.subplots()
rects1 = ax.bar(X_axis - 0.2, unrestricted, 0.2, label=label_optimal)
rects2 = ax.bar(X_axis, pareto, 0.2, label=label_Pareto)
rects3 = ax.bar(X_axis + 0.2, pareto, 0.2, label=label_population)
ax.set_xticks(X_axis)
ax.set_xticklabels(X)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.legend(
loc="upper center",
bbox_to_anchor=(0.5, -0.14),
fancybox=True,
shadow=True,
ncol=2,
)
elif total is False:
X = ["Total"] + country_names
unrestricted = np.round(
list(((unrestricted_min - population_based) / population_based) * 100), 2
)
pareto = np.round(
list(((pareto_optimal - population_based) / population_based) * 100), 2
)
X_axis = np.arange(len(X))
if ax is None:
fig, ax = plt.subplots()
rects1 = ax.bar(
X_axis - 0.2, unrestricted, 0.4, label=label_optimal, edgecolor="black"
)
rects2 = ax.bar(
X_axis + 0.2, pareto, 0.4, label=label_Pareto, edgecolor="black"
)
ax.set_xticks(X_axis)
ax.set_xticklabels(X)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.bar_label(rects1, padding=5, fmt="%.1f%%")
ax.bar_label(rects2, padding=5, fmt="%.1f%%")
mini = np.min([unrestricted, pareto])
maxi = np.max([unrestricted, pareto])
ax.set_ylim([1.2 * mini, 1.2 * maxi])
# ax.legend(
# loc="upper center",
# bbox_to_anchor=(0.5, -0.14),
# fancybox=True,
# shadow=True,
# ncol=2,
# )
if not (ylim is None):
ax.set_ylim(ylim)
return ax
def plot_bars_deaths2(
dict_output,
ax=None,
case="initalEqual_vacEqual",
total=False,
unit=10 ** 6,
title="Equal initial states; Equal vaccines",
xlabel="Areas",
ylabel="Number of deaths",
label_optimal="Optimal",
label_Pareto="Pareto",
label_population="Population",
ylim=None,
):
# preprocess
output = dict_output[case]
appended_df = output["optimal_strategies"].append(output["pareto_frontier"])
country_names = [
x for x in appended_df.columns if "country" in x and not ("_" in x)
]
appended_df = output["all_strategies"]
unrestricted_min = appended_df.iloc[np.argmin(appended_df["fval"])][
["fval"] + country_names
]
pareto_df = output["pareto_improvements"]
pareto_optimal = pareto_df.iloc[np.argmin(pareto_df["fval"])][
["fval"] + country_names
]
population_based = pd.Series(
[np.sum(output["population_based"])] + output["population_based"],
index=["fval"] + country_names,
)
if total is True:
X = ["Total"] + country_names
unrestricted = list(unrestricted_min / unit)
pareto = list(pareto_optimal / unit)
X_axis = np.arange(len(X))
if ax is None:
fig, ax = plt.subplots()
ax.bar(X_axis - 0.2, unrestricted, 0.2, label=label_optimal)
ax.bar(X_axis, pareto, 0.2, label=label_Pareto)
ax.bar(X_axis + 0.2, pareto, 0.2, label=label_population)
ax.set_xticks(X_axis)
ax.set_xticklabels(X)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.legend(
loc="upper center",
bbox_to_anchor=(0.5, -0.14),
fancybox=True,
shadow=True,
ncol=2,
)
elif total is False:
X = ["Total"] + country_names
unrestricted = list(
((unrestricted_min - population_based) / population_based) * 100
)
pareto = list(((pareto_optimal - population_based) / population_based) * 100)
X_axis = np.arange(len(X))
if ax is None:
fig, ax = plt.subplots()
ax.bar(X_axis - 0.2, unrestricted, 0.4, label=label_optimal)
ax.bar(X_axis + 0.2, pareto, 0.4, label=label_Pareto)
ax.set_xticks(X_axis)
ax.set_xticklabels(X)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
# for i in range(len(X)):
# ax.annotate(np.round(unrestricted[0],4), (-0.25 + X_axis[0], unrestricted[0] - 0.1 ))
# ax.legend(
# loc="upper center",
# bbox_to_anchor=(0.5, -0.14),
# fancybox=True,
# shadow=True,
# ncol=2,
# )
if not (ylim is None):
ax.set_ylim(ylim)
return ax
def plot_pareto_front(
dict_output,
case,
fig=None,
ax=None,
size_optimal=10,
size_points=3,
title="",
alpha=0.3,
color_pareto="C2",
linewidth_pareto=0.8,
xlabel="Country A",
ylabel="Country B",
color_min="C0",
color_pareto_improvement="C1",
):
pareto_x = dict_output[case]["pareto_frontier"]["countryA"]
pareto_y = dict_output[case]["pareto_frontier"]["countryB"]
pareto = dict_output[case]["population_based"]
output = dict_output[case]
appended_df = output["optimal_strategies"].append(output["pareto_frontier"])
country_names = [
x for x in appended_df.columns if "country" in x and not ("_" in x)
]
add_row = dict(
zip(
country_names + ["fval"],
output["population_based"] + [np.sum(output["population_based"])],
)
)
appended_df = appended_df.append(add_row, ignore_index=True)
amin = np.argmin(appended_df["fval"])
pareto_optimal_df = appended_df[
(appended_df["countryA"] <= pareto[0]) & (appended_df["countryB"] <= pareto[1])
].reset_index()
pareto_amin = np.argmin(pareto_optimal_df["fval"])
if ax is None:
fig, ax = plt.subplots()
im = ax.scatter(
pareto_x,
pareto_y,
c=pareto_x + pareto_y,
s=size_points,
)
minA = np.min(pareto_x)
minB = np.min(pareto_y)
ax.fill_between(
x=[minA, pareto[0]],
y1=[pareto[1], pareto[1]],
y2=[minB, minB],
alpha=alpha,
color=color_pareto,
)
ax.hlines(
appended_df["countryB"][amin],
xmin=minA,
xmax=appended_df["countryA"][amin],
linewidth=linewidth_pareto,
linestyle="dashed",
color=color_min,
)
ax.vlines(
appended_df["countryA"][amin],
ymin=minB,
ymax=appended_df["countryB"][amin],
linewidth=linewidth_pareto,
linestyle="dashed",
color=color_min,
)
ax.hlines(
pareto_optimal_df["countryB"][pareto_amin],
xmin=minA,
xmax=pareto_optimal_df["countryA"][pareto_amin],
linewidth=linewidth_pareto,
linestyle="dashed",
color=color_pareto_improvement,
)
ax.vlines(
pareto_optimal_df["countryA"][pareto_amin],
ymin=minB,
ymax=pareto_optimal_df["countryB"][pareto_amin],
linewidth=linewidth_pareto,
linestyle="dashed",
color=color_pareto_improvement,
)
cbar = fig.colorbar(im, ax=ax)
cbar.formatter.set_powerlimits((0, 0))
ax.scatter(
appended_df["countryA"][amin],
appended_df["countryB"][amin],
color="firebrick",
s=size_optimal,
)
ax.scatter(
pareto_optimal_df["countryA"][pareto_amin],
pareto_optimal_df["countryB"][pareto_amin],
color="seagreen",
s=size_optimal,
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
ax.set_title(title)
return ax
def plot_pareto_front2(
dict_output,
case,
fig=None,
ax=None,
size_optimal=10,
size_points=3,
title="",
alpha=0.3,
color_pareto="C0",
linewidth_pareto=0.8,
xlabel="Country A",
ylabel="Country B",
):
pareto_x = dict_output[case]["pareto_frontier"]["countryA"]
pareto_y = dict_output[case]["pareto_frontier"]["countryB"]
pareto = dict_output[case]["population_based"]
output = dict_output[case]
appended_df = output["all_strategies"]
amin = np.argmin(appended_df["fval"])
pareto_optimal_df = output["pareto_improvements"]
pareto_amin = np.argmin(pareto_optimal_df["fval"])
if ax is None:
fig, ax = plt.subplots()
im = ax.scatter(
pareto_x,
pareto_y,
c=pareto_x + pareto_y,
s=size_points,
)
minA = np.min(pareto_x)
minB = np.min(pareto_y)
ax.fill_between(
x=[minA, pareto[0]],
y1=[pareto[1], pareto[1]],
y2=[minB, minB],
alpha=alpha,
color=color_pareto,
)
ax.hlines(
appended_df["countryB"][amin],
xmin=minA,
xmax=appended_df["countryA"][amin],
linewidth=linewidth_pareto,
linestyle="dashed",
color="firebrick",
)
ax.vlines(
appended_df["countryA"][amin],
ymin=minB,
ymax=appended_df["countryB"][amin],
linewidth=linewidth_pareto,
linestyle="dashed",
color="firebrick",
)
ax.hlines(
pareto_optimal_df["countryB"][pareto_amin],
xmin=minA,
xmax=pareto_optimal_df["countryA"][pareto_amin],
linewidth=linewidth_pareto,
linestyle="dashed",
color="seagreen",
)
ax.vlines(
pareto_optimal_df["countryA"][pareto_amin],
ymin=minB,
ymax=pareto_optimal_df["countryB"][pareto_amin],
linewidth=linewidth_pareto,
linestyle="dashed",
color="seagreen",
)
cbar = fig.colorbar(im, ax=ax)
cbar.formatter.set_powerlimits((0, 0))
ax.scatter(
appended_df["countryA"][amin],
appended_df["countryB"][amin],
color="firebrick",
s=size_optimal,
)
ax.scatter(
pareto_optimal_df["countryA"][pareto_amin],
pareto_optimal_df["countryB"][pareto_amin],
color="seagreen",
s=size_optimal,
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
ax.set_title(title)
return ax
# From Lorenzos Code------------------------------------------------------------------------
def finite_differences(xx, yy):
dd = []
fd = onesidedFD(yy[0], yy[1], xx[1] - xx[0])
dd.append(fd)
for i in range(1, len(xx) - 1):
dd.append(
centeredFD(
yy[i - 1], yy[i], yy[i + 1], xx[i] - xx[i - 1], xx[i + 1] - xx[i]
)
)
fd = onesidedFD(yy[-2], yy[-1], xx[-1] - xx[-2])
dd.append(fd)
return np.asarray(dd)
def onesidedFD(y0, y1, h):
return (y1 - y0) * 1 / h
def centeredFD(ym1, y0, yp1, hm, hp):
if hm == hp:
return (yp1 - ym1) / (2 * hm)
else:
return ((yp1 - y0) / hp + (y0 - ym1) / hm) / 2
# ----------------------------------------------------------------------------------------
def get_spline(array, periods, length, total_length, grid_points=6000, transform=True):
y = np.array(np.log(array / (1 - array)))
x = np.linspace(0, periods * length, periods + 1)
fd = finite_differences(x, y)
# raise ValueError(fd)
spline = cbs(x, y, fd)
grid = np.linspace(0, total_length, grid_points)
spline_vals = spline(grid)
if transform == True:
logistic = 1 / (1 + np.exp(-spline_vals))
return logistic
def plot_best_strategy(
dict_output,
vac_interest,
case,
x_scatter=None,
fig=None,
ax=None,
periods=8,
length=14,
total_length=140,
grid_points=6000,
col_unconstrained="C0",
label_unconstrained="Optimal",
col_pareto="C1",
label_pareto="Pareto optimal",
xlabel="Time",
ylabel="% of vaccine in Country A",
title="",
linewidth=1,
s_scatter=4,
label_scatter="",
plot=None,
n_vacc=1,
x_total=16,
y_total=0.2,
scale_total=1,
add_additional=None,
):
pareto = dict_output[case]["population_based"]
output = dict_output[case]
appended_df = output["optimal_strategies"].append(output["pareto_frontier"])
country_names = [
x for x in appended_df.columns if "country" in x and not ("_" in x)
]
if not(add_additional is None):
for index in add_additional["integers"]:
for index2 in ["country A"]:
for index3 in ["vac1", "vac2"]:
name = f"yy_{index2}_{index3}_{index}"
appended_df[name] = add_additional["number"]
pars = [x for x in appended_df.columns if "yy_" in x]
pars1 = [x for x in pars if "vac1" in x]
pars2 = [x for x in pars if "vac2" in x]
add_row = dict(
zip(
country_names + ["fval"],
output["population_based"] + [np.sum(output["population_based"])],
)
)
appended_df = appended_df.append(add_row, ignore_index=True)
amin = np.argmin(appended_df["fval"])
pareto_optimal_df = appended_df[
(appended_df["countryA"] <= pareto[0]) & (appended_df["countryB"] <= pareto[1])
].reset_index()
pareto_amin = np.argmin(pareto_optimal_df["fval"])
optimal_vacc_strategy1 = appended_df[pars1].iloc[amin]
optimal_pareto_strategy1 = pareto_optimal_df[pars1].iloc[pareto_amin]
if optimal_pareto_strategy1.isnull().values.any():
optimal_pareto_strategy1 = pd.Series(
np.repeat(0.5, len(optimal_pareto_strategy1)),
index=pareto_optimal_df[pars1].iloc[pareto_amin].index,
)
if len(pars2) > 0:
optimal_vacc_strategy2 = appended_df[pars2].iloc[amin]
optimal_pareto_strategy2 = pareto_optimal_df[pars2].iloc[pareto_amin]
if optimal_pareto_strategy2.isnull().values.any():
optimal_pareto_strategy2 = pd.Series(
np.repeat(0.5, len(optimal_pareto_strategy2)),
index=pareto_optimal_df[pars2].iloc[pareto_amin].index,
)
if vac_interest == "vac1":
vac_pareto = get_spline(
optimal_pareto_strategy1,
periods=periods,
length=length,
total_length=total_length,
grid_points=grid_points,
)
vac_unconstrained = get_spline(
optimal_vacc_strategy1,
periods=periods,
length=length,
total_length=total_length,
grid_points=grid_points,
)
scatter_vac_pareto = optimal_pareto_strategy1
scatter_vac_unconstr = optimal_vacc_strategy1
else:
vac_pareto = get_spline(
optimal_pareto_strategy2,
periods=periods,
length=length,
total_length=total_length,
grid_points=grid_points,
)
vac_unconstrained = get_spline(
optimal_vacc_strategy2,
periods=periods,
length=length,
total_length=total_length,
grid_points=grid_points,
)
scatter_vac_pareto = optimal_pareto_strategy2
scatter_vac_unconstr = optimal_vacc_strategy2
if ax is None:
fig, ax = plt.subplots()
if plot == "pareto" or plot is None:
ax.plot(
np.linspace(0, total_length, grid_points) / 7,
vac_pareto*n_vacc,
color=col_pareto,
linewidth=linewidth,
label=label_pareto,
)
ax.fill_between(np.linspace(0, total_length, grid_points) / 7, vac_pareto*n_vacc,
np.repeat(0, len(vac_pareto))*n_vacc, color=col_pareto, alpha = 0.3)
ax.plot(np.linspace(0, total_length, grid_points) / 7,
np.repeat(0.5, len(vac_pareto))*n_vacc,
color="black", linestyle="dashed", label="Population \nallocation")
time = np.linspace(0, total_length, grid_points) / 7
area = trapz(vac_pareto*n_vacc, dx=(time[1] - time[0]))
ax.text(x_total, y_total, f"Total doses: \n{np.round(area, 2)}",
horizontalalignment="center",
verticalalignment="center",
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
if plot == "optimal" or plot is None:
ax.plot(
np.linspace(0, total_length, grid_points) / 7,
vac_unconstrained*n_vacc,
color=col_unconstrained,
linewidth=linewidth,
label=label_unconstrained,
)
ax.plot(np.linspace(0, total_length, grid_points) / 7,
np.repeat(0.5, len(vac_unconstrained))*n_vacc,
color="black", linestyle="dashed", label="Population \nallocation")
time = np.linspace(0, total_length, grid_points-1) / 7
area = trapz(vac_unconstrained*n_vacc, dx=(time[1] - time[0]))
ax.fill_between(np.linspace(0, total_length, grid_points) / 7, vac_unconstrained*n_vacc,
np.repeat(0, len(vac_unconstrained))*n_vacc, color=col_unconstrained,
alpha = 0.3)
ax.text(x_total, y_total, f"Total doses: \n{np.round(area, 2)}",
horizontalalignment="center",
verticalalignment="center",
bbox=dict(facecolor='none', edgecolor='black', boxstyle='round,pad=1'))
#ax.scatter(x_scatter, scatter_vac_pareto, s=s_scatter, label=label_scatter,
# color=col_pareto)
#ax.scatter(x_scatter, scatter_vac_unconstr, s=s_scatter, label=label_scatter,
# color=col_pareto)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_ylim([-0.05, 1.05])
return ax
def plot_distance_curves(
dict_use,
max_index=None,
linewidth=1,
color_optimal="C0",
color_pareto="C1",
color_pop="C2",
label_optimal="Optimal",
label_pareto="pareto",
label_pop="pop",
var="fval",
relative=True,
ax=None,
x_label="Distance parameter",
y_label="% deaths compared to the Population based strategy",
title="",
vline=None,
vline_color="C3",
vline_width=4,
vline_label="Previous parameter",
v_ymin=0,
v_ymax=-10,
ylim=None,
):
if ax is None:
fig, ax = plt.subplots()
optimal = (
dict_use["optimal"]
.sort_values(by=["distance"])[var][
(dict_use["optimal"]["fval"] > 0)
& (dict_use["optimal"]["countryA"] > 0)
& (dict_use["optimal"]["countryB"] > 0)
& (dict_use["pareto"]["fval"] > 0)
& (dict_use["pareto"]["countryA"] > 0)
& (dict_use["pareto"]["countryB"] > 0)
]
.reset_index(drop=True)
)
pareto = (
dict_use["pareto"]
.sort_values(by=["distance"])[var][
(dict_use["pareto"]["fval"] > 0)
& (dict_use["pareto"]["countryA"] > 0)
& (dict_use["pareto"]["countryB"] > 0)
& (dict_use["optimal"]["fval"] > 0)
& (dict_use["optimal"]["countryA"] > 0)
& (dict_use["optimal"]["countryB"] > 0)
]
.reset_index(drop=True)
)
pop = (
dict_use["pop_based"]
.sort_values(by=["distance"])[var][
(dict_use["pop_based"]["fval"] > 0)
& (dict_use["pop_based"]["countryA"] > 0)
& (dict_use["pop_based"]["countryB"] > 0)
& (dict_use["pareto"]["fval"] > 0)
& (dict_use["pareto"]["countryA"] > 0)
& (dict_use["pareto"]["countryB"] > 0)
& (dict_use["optimal"]["fval"] > 0)
& (dict_use["optimal"]["countryA"] > 0)
& (dict_use["optimal"]["countryB"] > 0)
]
.reset_index(drop=True)
)
distance = 1 - dict_use["optimal"].sort_values(by=["distance"])["distance"][
(dict_use["pareto"]["fval"] > 0)
& (dict_use["pareto"]["countryA"] > 0)
& (dict_use["pareto"]["countryB"] > 0)
& (dict_use["optimal"]["fval"] > 0)
& (dict_use["optimal"]["countryA"] > 0)
& (dict_use["optimal"]["countryB"] > 0)
].reset_index(drop=True)
if max_index is None:
max_index = len(pop)
a = (optimal[0:max_index] - pop[0:max_index]) / pop[0:max_index]
b = (pareto[0:max_index] - pop[0:max_index]) / pop[0:max_index]
if relative is True:
ax.plot(
distance[0:max_index],
a * 100,
color=color_optimal,
linewidth=linewidth,
label=label_optimal,
)
ax.plot(
distance[0:max_index],
b * 100,
color=color_pareto,
linewidth=linewidth,
label=label_pareto,
)
elif relative is False:
ax.plot(
distance[0:max_index],
optimal[0:max_index],
color=color_optimal,
linewidth=linewidth,
label=label_optimal,
)
ax.plot(
distance[0:max_index],
pareto[0:max_index],
color=color_pareto,
linewidth=linewidth,
label=label_pareto,
)
ax.plot(
distance[0:max_index],
pop[0:max_index],
color=color_pop,
linewidth=linewidth,
label=label_pop,
)
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
if not (vline is None):
ax.vlines(
vline,
ymin=v_ymin,
ymax=v_ymax,
color=vline_color,
linewidth=vline_width,
linestyles="dashed",
label=vline_label,
)
if not (ylim is None):
ax.set_ylim(ylim)
return ax
# ---------------------------------------------------------------------------------------------------------------------
def plot_bars_multiple(
ax,
unconstr_deaths,
pop_deaths,
constr_deaths,
label_optimal="Optimal strategy",
label_Pareto="Pareto strategy",
X=["Total"] + ["Belgium", "France", "Germany", "United \nKingdom"],
xlabel="Countries",
ylabel="Difference in %",
title="Number of deaths per strategy and country compared to population strategy",
color_good="seagreen",
color_bad="firebrick",
alpha=0.3,
label_good="Improvement",
label_bad="Deterioration",
xlim=None,
):
unrestricted = (unconstr_deaths / pop_deaths - 1) * 100
pareto = (constr_deaths / pop_deaths - 1) * 100
minimum = np.min([pareto, unrestricted]) * 1.05
maximum = np.max([pareto, unrestricted]) * 1.05
X_axis = np.arange(len(X))
ax.bar(X_axis - 0.3, unrestricted, 0.3, label=label_optimal, edgecolor="grey")
ax.bar(X_axis, pareto, 0.3, label=label_Pareto, edgecolor="grey")
ax.set_xticks(X_axis - 0.1)
ax.set_xticklabels(X)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
a = None
if not (xlim is None):
ax.set_xlim(xlim)
a = xlim[1]
if a is None:
a = len(X)
# ax.fill_between([-0.4, a + 0.2], [0, 0], [minimum,minimum], step="pre", alpha=alpha, color = color_good,
# label=label_good)
# ax.fill_between([-0.4, a + 0.2], [maximum, maximum], [0,0], step="pre", alpha=alpha, color = color_bad,
# label=label_bad)
if not (xlim is None):
ax.set_xlim(xlim)
ax.set_ylim([minimum, maximum])
ax.legend()
def plot_trajectories_aggregated(
ax,
length,
pop_trajectory,
unconstr_trajectory,
constr_trajectory,
labels=["Population", "Optimal", "Pareto"],
colors=["C0", "C1", "C2"],
alphas=[0.6, 0.4, 0.2],
xlabel="Weeks",
ylabel="Infected individuals \nin millions",
title="Total number of infected individuals",
target="infectious",
scale=10 ** 6,
fill_between=False,
plot_legend=True,
):
index_axis = np.array(list(pop_trajectory.reset_index(drop=True).index))
x_axis = index_axis / index_axis[-1] * length / 7
trajectories = [unconstr_trajectory, constr_trajectory, pop_trajectory]
sum_infectious = {}
for index in range(len(trajectories)):
df = trajectories[index]
if len(target) == 1:
states_infectious = [x for x in df.columns if target[0] in x]
if len(target) == 2:
states_infectious = [
x for x in df.columns if (target[0] in x) and (target[1] in x)
]
sum_infectious = df[states_infectious].sum(axis=1)
if labels[index] == "Population":
linestyle = "dashed"
alpha = 0.8
else:
linestyle = "solid"
alpha = 1
ax.plot(
x_axis,
sum_infectious / scale,
label=labels[index],
color=colors[index],
linestyle=linestyle,
alpha=alpha,
)
if fill_between is True:
ax.fill_between(
x_axis,
sum_infectious / scale,
np.repeat(0, len(x_axis)),
step="pre",
alpha=alphas[index],
color=colors[index],
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
if plot_legend is True:
ax.legend()
def plot_trajectories_aggregated_vac(
ax,
length,
pop_trajectory,
unconstr_trajectory,
constr_trajectory,
labels=["Population", "Optimal", "Pareto"],
colors=["C0", "C1", "C2"],
alphas=[0.6, 0.4, 0.2],
xlabel="Weeks",
ylabel="",
title="",
scale=10 ** 6,
fill_between=False,
plot_legend=True,
):
index_axis = np.array(list(pop_trajectory.reset_index(drop=True).index))
x_axis = index_axis / index_axis[-1] * length / 7
trajectories = [unconstr_trajectory, constr_trajectory, pop_trajectory]
sum_infectious = {}
for index in range(len(trajectories)):
df = trajectories[index]
states_vaccinated = [
x
for x in df.columns
if (("vac1" in x) or ("vac2" in x) or ("recoverede" in x))
and not ("dead" in x)
]
states_alive = [x for x in df.columns if not ("dead" in x)]
sum_vaccinated = df[states_vaccinated].sum(axis=1)
sum_alive = df[states_alive].sum(axis=1)
prop_vac = sum_vaccinated / sum_alive
ax.plot(
x_axis,
prop_vac,
label=labels[index],
color=colors[index],
)
if fill_between is True:
ax.fill_between(
x_axis,
sum_infectious / scale,
np.repeat(0, len(x_axis)),
step="pre",
alpha=alphas[index],
color=colors[index],
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
if plot_legend is True:
ax.legend()
def plot_vac_allocated(
ax,
colors,
time,
dict_out,
index_vac,
index_areas,
areas,
scale,
countries,
col_vac1="C7",
col_vac2="C8",
label_vac1="mRNA",
label_vac2="Vector",
vac=["vac1", "vac2"],
types=["unconstrained", "constrained", "pop"],
ylabel="% received",
xlabel="Weeks",
labels=["Optimal", "Pareto", "Population"],
alphas=[0.1, 0.1, 0.1],
axvline_x=40,
ylim=[-0.05, 0.9],
title="Vaccine received in ",
total=True,
spline_xx=None,
numb_xx=4,
s=5,
):
for index_type in range(len(types)):
vac_available = dict_out["vaccine"][vac[index_vac]]
name = f"{types[index_type]}_{areas[index_areas]}_{vac[index_vac]}"
vac_prop = dict_out["allocated_best"][name]
vac_allocated = vac_available * vac_prop
if total is True:
y = vac_allocated / scale
else:
y = vac_prop
if types[index_type] == "pop":
alpha = 0.8
linestyle = "dashed"
else:
alpha = 1
linestyle = "solid"
ax.plot(
time / 7,
y,
color=colors[index_type],
label=labels[index_type],
linestyle=linestyle,
alpha=alpha,
)
if not (spline_xx is None):
if types[index_type] != "pop":
xx = np.array(list(spline_xx.values()))
y_index = xx / xx[-1] * (len(y) - 1)
ax.scatter(
xx[0:numb_xx] / 7,
y.loc[np.round(y_index[0:numb_xx])],
s=s,
)
# y_lim = [ax.get_yticks()[0], ax.get_yticks()[-1]]
# ax.fill_between([mini, axvline_x], y_lim,
# color="grey", step="pre", alpha=0.5)
# ax.fill_between([mini, 60], y_lim,
# color="grey", step="pre", alpha=0.2)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_title(title + f"{countries[index_areas]}")
# ax.axvline(axvline_x ,0, 1, color = "firebrick",
# linestyle = "dashed", label = "Last optimitaion \npoint", linewidth = 0.7)
if [index_vac, index_areas] == [0, 0]:
ax.legend()
# ----------------------------------------------------------------------------------------------------
def plot_four_country_overview(
spline_xx,
vaccine_inflow,
number_yy,
length,
interventionA,
interventionB,
end_data,
start_population,
infectious_t0,
recovered_t0,
delta,
omega,
countries,
areas,
par_R,
number_xx_R,
total_grid,
df_inf_true,
df_infected,
grid_data,
grid_sim,
scale,
text_x,
text_y,
ylim,
text_str,
text_lockdown_x,
text_lockdown_y=0.02,
text_lockdown_str="Constant \nNPIs",
color_prop=["C4", "C5", "C6", "C7"],
label_vac1="mRNA",
label_vac2="vector",
color_vac1="C7",
color_vac2="C8",
title_vac="Available vaccines",
title_setup="Set-up",
position_start_vac=[0, 0.5],
height_start_vac=0.3,
letter_size=16,
letter_y=1.06,
size=(18, 16),
):
linspace = []
for j in range(len(spline_xx.values()) - 1):
new = list(np.linspace(0, list(spline_xx.values())[j + 1], 1000))
linspace += new
vaccine_available = pd.DataFrame(
{
"vac1": np.repeat(
np.array(list(vaccine_inflow.values())[0 : (number_yy - 1)]), 1000
),
"vac2": np.repeat(
np.array(
list(vaccine_inflow.values())[(number_yy - 1) : (2 * number_yy)]
),
1000,
),
"t": np.linspace(0, length, len(linspace)),
}
)
fig = plt.figure(constrained_layout=True, figsize=size)
gs = GridSpec(3, 4, figure=fig)
count_plot = 97
ax = fig.add_subplot(gs[0, :1])
ax.set_xlim([0, 60])
ax.set_ylim([0, 1])
ax.get_yaxis().set_visible(False)
# ax.spines['left'].set_visible(False)
# ax.spines['bottom'].set_position('center')
ax.text(
-0.05,
letter_y,
chr(count_plot),
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
weight="bold",
size=letter_size,
)
color_tl = "seagreen"
ax.fill_between(
[0, length / 7], [1, 1], [0.8, 0.8], step="pre", alpha=0.6, color=color_tl
)
ax.text(length / 7 / 2, 0.9, "Alpha variant", ha="center", va="center")
ax.fill_between(
[interventionA["t"] / 7, length / 7],
[0.8, 0.8],
[0.6, 0.6],
step="pre",
alpha=0.5,
color=color_tl,
)
ax.text(
((length - interventionA["t"]) / 2 + interventionA["t"]) / 7,
0.7,
"Delta variant",
ha="center",
va="center",
)
for i in range(3):
key1 = f"xx{i}"
key2 = f"xx{i+1}"
ax.fill_between(
[spline_xx[key1] / 7, spline_xx[key2] / 7],
[0.6, 0.6],
[0.4, 0.4],
step="pre",
alpha=0.4,
color=color_tl,
)
ax.text(
((spline_xx[key2] - spline_xx[key1]) / 2 + spline_xx[key1]) / 7,
0.5,
f"Spline {i+1}",
ha="center",
va="center",
)
ax.fill_between(
[0, end_data / 7], [0.4, 0.4], [0.2, 0.2], step="pre", alpha=0.3, color=color_tl
)
ax.text(end_data / 7 / 2, 0.3, "Optimize vaccinations", ha="center", va="center")
ax.fill_between(
[end_data / 7, length / 7],
[0.4, 0.4],
[0.2, 0.2],
step="pre",
alpha=0.3,
color=color_tl,
)
ax.text(
((length - end_data) / 2 + end_data) / 7,
0.3,
"Pop. based \nallocation",
ha="center",
va="center",
)
ax.fill_between(
[0, length / 7], [0.2, 0.2], [0.0, 0.0], step="pre", alpha=0.2, color=color_tl
)
ax.text(length / 7 / 2, 0.1, "NPIs active", ha="center", va="center")
ax.set_xlabel("Weeks")
ax.set_title("Time course")
# ax = fig.add_subplot(gs[0, :2])
# ax.set_title(title_setup)
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# ax.table(cellText=[[1,1],[2,2]], loc='upper center',
# rowLabels=['Alpha \nvariant','Delta \nvariant'],
# colLabels=['Vaccine mRNA','Vaccine \n vector'],
# colLoc="center", rowLoc = "center", colWidths=[0.2,0.2],
# )
count_plot += 1
ax = fig.add_subplot(gs[0, 1])
susceptible = (
start_population["susceptible_countryA_vac0_t0"]
+ start_population["susceptible_countryB_vac0_t0"]
+ start_population["susceptible_countryC_vac0_t0"]
+ start_population["susceptible_countryD_vac0_t0"]
)
infected = (
infectious_t0["infectious_countryA_vac0_virus1_t0"]
+ infectious_t0["infectious_countryB_vac0_virus1_t0"]
+ infectious_t0["infectious_countryC_vac0_virus1_t0"]
+ infectious_t0["infectious_countryD_vac0_virus1_t0"]
)
recovered = (
recovered_t0["recovered_countryA_vac0_virus1_t0"]
+ recovered_t0["recovered_countryB_vac0_virus1_t0"]
+ recovered_t0["recovered_countryC_vac0_virus1_t0"]
+ recovered_t0["recovered_countryD_vac0_virus1_t0"]
)
sums = {"susceptible": susceptible, "infectious": infected, "recovered": recovered}
dicts = [start_population, infectious_t0, recovered_t0]
category_names = ["Belgium", "France", "Germany", "Uk"]
states = ["susceptible", "infectious", "recovered"]
results = {}
for i in range(len(states)):
d = dicts[i]
ph_str = ""
if states[i] != "susceptible":
ph_str = "_virus1"
results[states[i].capitalize()] = np.round(
np.array(
[
d[f"{states[i]}_countryA_vac0{ph_str}_t0"],
d[f"{states[i]}_countryB_vac0{ph_str}_t0"],
d[f"{states[i]}_countryC_vac0{ph_str}_t0"],
d[f"{states[i]}_countryD_vac0{ph_str}_t0"],
]
)
/ sums[states[i]],
2,
)
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1)
category_colors = plt.get_cmap("RdYlGn")(np.linspace(0.15, 0.85, data.shape[1]))
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
ax.set_title("Relative initial populations")
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
rects = ax.barh(
labels, widths, left=starts, height=0.5, label=colname, color=color
)
r, g, b, _ = color
text_color = "black" if r * g * b < 0.5 else "darkgrey"
if colname != "Belgium":
ax.bar_label(
rects,
label_type="center",
fmt="%.2f%%",
color=text_color,
fontsize="small",
padding=0,
)
# ax.legend(ncol=2, fontsize="small")
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, -0.2), loc="lower left")
ax.text(
-0.05,
letter_y,
chr(count_plot),
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
weight="bold",
size=letter_size,
)
count_plot += 1
ax = fig.add_subplot(gs[0, 2])
ax.plot(
vaccine_available["t"] / 7,
vaccine_available["vac1"] / scale,
color=color_vac1,
label=label_vac1,
)
ax.set_ylabel("Doses per day \nin millions")
ax.fill_between(
vaccine_available["t"] / 7,
vaccine_available["vac1"] / scale,
color=color_vac1,
step="pre",
alpha=0.25,
)
ax.plot(
vaccine_available["t"] / 7,
vaccine_available["vac2"] / scale,
color=color_vac2,
label=label_vac2,
)
ax.fill_between(
vaccine_available["t"] / 7,
vaccine_available["vac2"] / scale,
color=color_vac2,
step="pre",
alpha=0.25,
)
ax.set_xlabel("Weeks")
ax.set_title(title_vac)
ax.legend()
ax.text(
-0.05,
letter_y,
chr(count_plot),
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
weight="bold",
size=letter_size,
)
count_plot += 1
ax = fig.add_subplot(gs[0, 3])
barWidth = 0.25
# set heights of bars
vaccine1 = [
delta["delta_vac1_virus1"],
delta["delta_vac1_virus2"],
omega["omega_vac1_virus1"],
omega["omega_vac1_virus2"],
]
vaccine2 = [
delta["delta_vac2_virus1"],
delta["delta_vac2_virus2"],
omega["omega_vac2_virus1"],
omega["omega_vac2_virus2"],
]
# Set position of bar on X axis
r1 = np.array([0, 0.75, 3, 3.75])
r2 = np.array([x + barWidth for x in r1])
# Make the plot
ax.bar(
r1, vaccine1, color="#727272", width=barWidth, edgecolor="white", label="mRNA"
)
ax.bar(
r2, vaccine2, color="#cd7058", width=barWidth, edgecolor="white", label="vector"
)
midpoints = (r1 + r2) / 2
# Add xticks on the middle of the group bars
# ax.set_xlabel('group', fontweight='bold')
ax.set_xticks(midpoints)
ax.set_xticklabels(["Alpha", "Delta", "Alpha", "Delta"])
point1 = (midpoints[1] - midpoints[0]) / 2 + midpoints[0]
point2 = (midpoints[3] - midpoints[2]) / 2 + midpoints[2]
ax.text(point1, 1.1, "Infection \nprotection", ha="center", va="center")
ax.text(point2, 1.1, "Death \nprotection", ha="center", va="center")
ax.set_ylim([0, 1.2])
ax.set_yticks(np.linspace(0, 1, 6))
ax.set_ylabel("Reduction in %")
ax.set_title("Vaccine infection and \ndeath reduction")
ax.legend(loc="center")
ax.text(
-0.05,
letter_y,
chr(count_plot),
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
weight="bold",
size=letter_size,
)
for j in range(len(countries)):
ax = fig.add_subplot(gs[1, j])
count_plot += 1
ax.text(
-0.05,
letter_y,
chr(count_plot),
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
weight="bold",
size=letter_size,
)
if j == 0:
ax.set_ylabel("Degree of NPIs")
country = areas[j]
array = np.array([par_R[x] for x in par_R.keys() if country in x])
spline_R = get_spline(
array,
periods=number_xx_R - 1,
length=length / number_xx_R,
total_length=length,
grid_points=total_grid,
transform=True,
)
ax.set_xlabel("Weeks")
ax.plot(grid_data / 7, 1 - spline_R[0 : (len(grid_data))], color="C2")
ax.plot(
grid_sim / 7,
1 - spline_R[(len(grid_data) + 1) : (total_grid)],
color="C2",
linestyle="dotted",
)
ax.text(text_x, text_y, text_str)
ax.text(text_lockdown_x, text_lockdown_y, text_lockdown_str)
ax.set_ylim(ylim)
ax.set_title(countries[j].capitalize())
ax.fill_between(
grid_data / 7,
1 - spline_R[0 : (len(grid_data))],
step="pre",
alpha=0.4,
color="C2",
)
ax.fill_between(
grid_sim / 7,
1 - spline_R[(len(grid_data) + 1) : (total_grid)],
step="pre",
alpha=0.25,
color="C2",
)
ax.axvline(
list(df_inf_true.index)[-1] / 7,
0,
1,
color="firebrick",
linestyle="dashed",
label="Last optimized \nspline point",
linewidth="0.7",
)
if j == 0:
ax.legend(loc="upper right")
for j in range(len(countries)):
ax = fig.add_subplot(gs[2, j])
count_plot += 1
ax.text(
-0.05,
letter_y,
chr(count_plot),
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
weight="bold",
size=letter_size,
)
if j == 0:
ax.set_ylabel("Active cases \nin millions")
ax.plot(
grid_data / 7,
df_infected.loc[0 : (len(grid_data) - 1), areas[j]] / scale,
label="Simulated",
color="C0",
)
ax.plot(
grid_sim / 7,
df_infected.loc[len(grid_data) : (total_grid), areas[j]] / scale,
color="C0",
linestyle="dotted",
)
ax.plot(
np.array(df_inf_true.index) / 7,
df_inf_true[countries[j]] / scale,
label="Data",
color="C1",
)
ax.set_title(countries[j].capitalize())
ax.set_xlabel("Weeks")
ax.axvline(
list(df_inf_true.index)[-1] / 7,
0,
1,
color="firebrick",
linestyle="dashed",
label="Last optimized \nspline point",
linewidth="0.7",
)
if j == 0:
ax.legend()
fig.savefig(
"/home/manuel/Documents/VaccinationDistribution/paper/images/infected_compare",
bbox_inches="tight",
)
return vaccine_available
def stacked_bar(results, category_names, ax, ylabel=True, legend=True, map_col = 'RdYlGn'):
"""
Parameters
----------
results : dict
A mapping from question labels to a list of answers per category.
It is assumed all lists contain the same number of entries and that
it matches the length of *category_names*.
category_names : list of str
The category labels.
"""
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1)
category_colors1 = plt.get_cmap("Set1")(
np.linspace(0.15, 0.85, 5))[1]
category_colors2 = plt.get_cmap("Set2")(
np.linspace(0.15, 0.85, 5))[0]
category_colors=[category_colors1, category_colors2]
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
rects = ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color)
#r, g, b, _ = color
text_color = 'black'
ax.bar_label(rects, label_type='center', color=text_color)
if ylabel is False:
ax.yaxis.set_visible(False)
if legend is True:
ax.legend(ncol=len(category_names), bbox_to_anchor=(0.5, 0.48),
loc='center', fontsize='small')
def plot_bars_vac(ax, vaccine1, vaccine2, title, barWidth=0.25):
r1 = np.array([0, 0.75, 3, 3.75])
r2 = np.array([x + barWidth for x in r1])
# Make the plot
ax.bar(
r1, vaccine1, color="#727272", width=barWidth, edgecolor="white", label="Vaccine \none"
)
if not(vaccine2 is None):
ax.bar(
r2, vaccine2, color="#cd7058", width=barWidth, edgecolor="white", label="VAccine \ntwo"
)
midpoints = (r1 + r2) / 2
# Add xticks on the middle of the group bars
# ax.set_xlabel('group', fontweight='bold')
ax.set_xticks(midpoints)
ax.set_xticklabels(["Wild \ntype", "Mutant", "Wild \ntype", "Mutant"])
point1 = (midpoints[1] - midpoints[0]) / 2 + midpoints[0]
point2 = (midpoints[3] - midpoints[2]) / 2 + midpoints[2]
ax.text(point1, 1.2, "Infection \nprotection", ha="center", va="center")
ax.text(point2, 1.2, "Death \nprotection", ha="center", va="center")
ax.set_ylim([0, 1.2])
ax.set_yticks(np.linspace(0, 1, 6))
ax.set_ylabel(title)
#ax.set_title("Vaccine infection and \ndeath reduction")
ax.legend(loc="center")
#---------------------------------------------------------------------------------------------------------------
def plot_horizontal_bars(results, category_names, ax, category_colors = ["C0", "C1"], bbox_to_anchor=(0.6, 0)):
"""
Parameters
----------
results : dict
A mapping from question labels to a list of answers per category.
It is assumed all lists contain the same number of entries and that
it matches the length of *category_names*.
category_names : list of str
The category labels.
"""
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1)
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
rects = ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color, alpha=0.6)
#r, g, b, _ = color
text_color = 'black' # 'darkgrey'
ax.bar_label(rects, label_type='center', color=text_color, alpha=1)
ax.legend(ncol=len(category_names), bbox_to_anchor=bbox_to_anchor,
fontsize='small')
return ax
def plot_horizontal_bars_annotated(ax,
dict_use,
scale = 10**5,
color_vline = "black",
linestyle_vline = "dashed", title="",
category_names=["Country A", "Country B"], category_colors = ["C0", "C1", "C2", "C3"]):
all_results = dict_use["all_strategies"]
argmin_global = np.argmin(all_results["fval"])
global_optimum = all_results.iloc[argmin_global]
pareto_results = dict_use["pareto_improvements"]
argmin_Pareto = np.argmin(pareto_results["fval"])
pareto_optimum = all_results.iloc[argmin_Pareto]
optimal = np.round([global_optimum["countryA"]/scale, global_optimum["countryB"]/scale],2)
population = np.round(np.array(dict_use["population_based"])/scale,2)
pareto = np.round([pareto_optimum["countryA"]/scale, pareto_optimum["countryB"]/scale],2)
results = {
'Optimal \nStrategy': optimal,
'Population \nStrategy': population,
'Pareto Optimal \nStrategy': pareto,
}
plot_horizontal_bars(results, category_names, ax, category_colors = category_colors)
ax.axvline(x=np.sum(population), color = color_vline, linestyle =linestyle_vline)
y_ticks = ax.get_yticks()
#ax.annotate(f"{np.round((np.sum(optimal) / np.sum(population) - 1)*100, 2)}",
# xy=(np.sum(optimal), y_ticks[0]), xycoords='data',
# xytext=(np.sum(population), y_ticks[0]), #textcoords='offset points',
# ha="center", fontsize = 4, va = "center",
# arrowprops=dict(arrowstyle="->"),)
par_opt_list = [optimal, pareto]
for index in range(len(par_opt_list)):
if index == 1:
tick = 2
else:
tick = index
ax.arrow(np.sum(population), y_ticks[tick], np.sum(par_opt_list[index]) - np.sum(population), 0,
length_includes_head=True, head_width=0.1, head_length=0.05)
ax.text(np.sum(population), y_ticks[tick], f"{np.round((np.sum(par_opt_list[index])/np.sum(population) - 1)*100,2)}%",
va = "center")
ax.set_title(title)
def plot_horizontal_bars_annotated_many(ax,
global_optimum, population_based, pareto_optimum,
scale = 10**5,
color_vline = "black",
linestyle_vline = "dashed", title="",
category_names=["Country A", "Country B"],
category_colors = ["C0", "C1", "C2", "C3"], bbox_to_anchor=(0.6, 0)):
optimal = np.round(global_optimum/scale,2)
population = np.round(population_based/scale,2)
pareto = np.round(pareto_optimum/scale, 2)
results = {
'Optimal \nStrategy': optimal,
'Population \nStrategy': population,
'Pareto Optimal \nStrategy': pareto,
}
plot_horizontal_bars(results, category_names, ax, category_colors = category_colors, bbox_to_anchor=bbox_to_anchor)
ax.axvline(x=np.sum(population), color = color_vline, linestyle =linestyle_vline)
y_ticks = ax.get_yticks()
#ax.annotate(f"{np.round((np.sum(optimal) / np.sum(population) - 1)*100, 2)}",
# xy=(np.sum(optimal), y_ticks[0]), xycoords='data',
# xytext=(np.sum(population), y_ticks[0]), #textcoords='offset points',
# ha="center", fontsize = 4, va = "center",
# arrowprops=dict(arrowstyle="->"),)
par_opt_list = [optimal, pareto]
for index in range(len(par_opt_list)):
if index == 1:
tick = 2
else:
tick = index
ax.arrow(np.sum(population), y_ticks[tick], np.sum(par_opt_list[index]) - np.sum(population), 0,
length_includes_head=True, head_width=0.1, head_length=0.05)
ax.text(np.sum(population), y_ticks[tick], f"{np.round((np.sum(par_opt_list[index])/np.sum(population) - 1)*100,2)}%",
va = "center")
ax.set_title(title)
def compute_incidences(trajectories,
viruses = ["virus1", "virus2"],
countries = ["countryA", "countryB"],
time = np.linspace(0, 140, 6000),
lambda1 = 0.1,
habitant_scale = 0.1,days=7):
infected = {}
for index in range(len(viruses)):
df_help = pd.DataFrame(np.nan, index=range(trajectories.shape[0]), columns=countries)
for index_country in range(len(countries)):
cols = [x for x in trajectories.columns if "infectious" in x and viruses[index] in x and countries[index_country] in x]
df_help[countries[index_country]] = trajectories[cols].sum(axis=1)
infected[viruses[index]] = df_help
incidences = {}
for index in range(len(viruses)):
df_incidence = pd.DataFrame(np.nan, index=range(trajectories.shape[0]), columns=countries)
for index_country in range(len(countries)):
time_course = infected[viruses[index]][countries[index_country]]
for index_time in range(1, len(time_course)-1):
delta_t = time[index_time+1] - time[index_time]
newly_infected = time_course[index_time + 1] - (1 - lambda1*delta_t) * time_course[index_time]
#if newly_infected < 0:
# newly_infected=0
df_incidence.loc[index_time, countries[index_country]] = newly_infected
incidences[viruses[index]] = df_incidence
delta_t = time[1] - time[0]
length_7_days = int(days / delta_t)
seven_day_incidences = {}
for index in range(len(viruses)):
df_7_day_incidence = pd.DataFrame(np.nan, index=range(trajectories.shape[0]), columns=countries)
for index_country in range(len(countries)):
time_course_incidence = incidences[viruses[index]]
for index_time in range(300, len(time_course)-1):
df_7_day_incidence.loc[index_time, countries[index_country]] = time_course_incidence.loc[(index_time - length_7_days):index_time, countries[index_country]].sum()
seven_day_incidences[viruses[index]] = df_7_day_incidence * habitant_scale
return seven_day_incidences
def plot_incidences(incidence, time,
countries = ["countryA", "countryB"],
ax =None,
label_countries = ["Country A", "Country B"],
colors = ["C0", "C1"],
alpha = 0.3):
viruses = list(incidence.keys())
for index_country in range(len(countries)):
sum_infections = 0
for index in range(len(viruses)):
sum_infections += incidence[viruses[index]][countries[index_country]]
ax.plot(time/7, sum_infections, label = label_countries[index_country],
color=colors[index_country])
ax.fill_between(time/7, np.repeat(0, len(sum_infections)), sum_infections,
color=colors[index_country], alpha = alpha)
def plot_incidences_country(ax, trajectories, time, incidences, viruses = ["virus1", "virus2"],
index_country = "countryA",
label_type = ["Optimal", "Pareto optimal", "Population\nbased"],
colors = ["C0", "C1", "black"], alpha=0.3):
for key in range(len(incidences.keys())):
incidence = incidences[trajectories[key]]
sum_infections = 0
for index in range(len(viruses)):
sum_infections += incidence[viruses[index]][index_country]
if "pop" in trajectories[key]:
ax.plot(time/7, sum_infections, label = label_type[key],
color=colors[key], linestyle = "dashed")
else:
ax.plot(time/7, sum_infections, label = label_type[key],
color=colors[key])
ax.fill_between(time/7, np.repeat(0, len(sum_infections)), sum_infections,
color=colors[key], alpha = alpha)
def compute_incidences_countries(dicts, time, trajectories, name = "initalUnequal_vacUnequal_nvacc_60000",
viruses=["virus1", "virus2"],
countries = ["countryA", "countryB"],
lambda1 = 0.1,
habitant_scale = 0.01):
incidences = {}
for index in range(len(trajectories)):
incidences[trajectories[index]] = compute_incidences(trajectories = dicts[name][trajectories[index]],
viruses = viruses,
countries = countries,
time =time,
lambda1 =lambda1,
habitant_scale = habitant_scale,)
return incidences
def compute_deceased(results, country):
pareto_deceased = []
optimal_deceased = []
pop_deceased = []
for index in range(len(results)):
result = results[index]
pareto_result = result["trajectories_pareto"]
optimal_result = result["trajectories_best"]
population_result = result["trajectories_pop"]
cols = [x for x in pareto_result.columns if "dead" in x and country in x]
deceased_pareto = list(pareto_result[cols].sum(axis=1))
deceased_optimal = list(optimal_result[cols].sum(axis=1))
deceased_pop = list(population_result[cols].sum(axis=1))
pareto_deceased.append(deceased_pareto[-1])
optimal_deceased.append(deceased_optimal[-1])
pop_deceased.append(deceased_pop[-1])
out = {"pareto" : pareto_deceased,
"optimal": optimal_deceased,
"pop" : pop_deceased,}
return out
def compute_splines_from_results(type_opti = "pareto_improvements",
vac = "vac2",
periods = 10,
length = 14,
total_length = 140,
grid_points = 6000,
add_additional = {"integers" : [9,10],
"number" : 0.5},n_vaccs=None,results=None, ):
fractions = []
for index in range(len(n_vaccs)):
result = results[index]
n_vacc = n_vaccs[index]
df_optimal = result[type_opti]
if not(add_additional is None):
for index in add_additional["integers"]:
for index2 in ["countryA"]:
for index3 in ["vac1", "vac2"]:
name = f"yy_{index2}_{index3}_{index}"
df_optimal[name] = add_additional["number"]
cols = [x for x in df_optimal.columns if vac in x]
argmin = np.argmin(df_optimal["fval"])
yy_points = pd.Series(list(df_optimal.iloc[argmin][cols]))
time = np.linspace(0, total_length, grid_points) / 7
spline = get_spline(yy_points,
periods=periods,
length=length,
total_length=total_length,
grid_points=grid_points,
)
area = trapz(spline*n_vacc/2, dx=(time[1] - time[0]))
total_area = n_vacc*10
fraction_country_A = area/total_area
fractions.append(fraction_country_A)
return fractions
def compute_splines_from_results_initials(initials, results, type_opti = "pareto_improvements",
vac = "vac2",
periods = 10,
length = 14,
total_length = 140,
grid_points = 6000,
add_additional = {"integers" : [9,10],
"number" : 0.5},):
fractions = []
for index in range(len(initials)):
result = results[index]
n_vacc = 60000
df_optimal = result[type_opti]
if not(add_additional is None):
for index in add_additional["integers"]:
for index2 in ["countryA"]:
for index3 in ["vac1", "vac2"]:
name = f"yy_{index2}_{index3}_{index}"
df_optimal[name] = add_additional["number"]
cols = [x for x in df_optimal.columns if vac in x]
argmin = np.argmin(df_optimal["fval"])
yy_points = pd.Series(list(df_optimal.iloc[argmin][cols]))
time = np.linspace(0, total_length, grid_points) / 7
spline = get_spline(yy_points,
periods=periods,
length=length,
total_length=total_length,
grid_points=grid_points,
)
area = trapz(spline*n_vacc/2, dx=(time[1] - time[0]))
total_area = n_vacc*10
fraction_country_A = area/total_area
fractions.append(fraction_country_A)
return fractions
def compute_splines_from_Rval_results(results, type_opti = "pareto_improvements",
vac = "vac2",
periods = 10,
length = 14,
total_length = 140,
grid_points = 6000,
add_additional = {"integers" : [9,10],
"number" : 0.5},):
fractions = []
for index in range(len(results.keys())):
result = results[index]
n_vacc = 60000
df_optimal = result[type_opti]
if not(add_additional is None):
for index in add_additional["integers"]:
for index2 in ["countryA"]:
for index3 in ["vac1", "vac2"]:
name = f"yy_{index2}_{index3}_{index}"
df_optimal[name] = add_additional["number"]
cols = [x for x in df_optimal.columns if vac in x]
argmin = np.argmin(df_optimal["fval"])
yy_points = pd.Series(list(df_optimal.iloc[argmin][cols]))
time = np.linspace(0, total_length, grid_points) / 7
spline = get_spline(yy_points,
periods=periods,
length=length,
total_length=total_length,
grid_points=grid_points,
)
area = trapz(spline*n_vacc/2, dx=(time[1] - time[0]))
total_area = n_vacc*10
fraction_country_A = area/total_area
fractions.append(fraction_country_A)
return fractions
| 33.071613 | 177 | 0.550084 | 7,624 | 64,192 | 4.432975 | 0.073452 | 0.013167 | 0.007693 | 0.012427 | 0.703731 | 0.663402 | 0.619818 | 0.587892 | 0.56073 | 0.546054 | 0 | 0.028008 | 0.312516 | 64,192 | 1,940 | 178 | 33.08866 | 0.737826 | 0.059836 | 0 | 0.565244 | 0 | 0.00122 | 0.097505 | 0.015677 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017073 | false | 0 | 0.004878 | 0.00061 | 0.033537 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea5cafe79d0fc3ca37a1c223fbd84a13133c1a53 | 9,138 | py | Python | sysinv/cgts-client/cgts-client/cgtsclient/v1/imemory_shell.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 10 | 2020-02-07T18:57:44.000Z | 2021-09-11T10:29:34.000Z | sysinv/cgts-client/cgts-client/cgtsclient/v1/imemory_shell.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 1 | 2021-01-14T12:01:55.000Z | 2021-01-14T12:01:55.000Z | sysinv/cgts-client/cgts-client/cgtsclient/v1/imemory_shell.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 10 | 2020-10-13T08:37:46.000Z | 2022-02-09T00:21:25.000Z | # Copyright (c) 2013-2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
from cgtsclient.common import utils
from cgtsclient import exc
from cgtsclient.v1 import ihost as ihost_utils
def _print_imemory_show(imemory):
fields = ['memtotal_mib',
'platform_reserved_mib',
'memavail_mib',
'hugepages_configured',
'vswitch_hugepages_size_mib',
'vswitch_hugepages_nr',
'vswitch_hugepages_avail',
'vswitch_hugepages_reqd',
'vm_hugepages_nr_4K',
'vm_pending_as_percentage',
'vm_hugepages_nr_2M',
'vm_hugepages_nr_2M_pending',
'vm_hugepages_avail_2M',
'vm_hugepages_nr_1G',
'vm_hugepages_nr_1G_pending',
'vm_hugepages_avail_1G',
'uuid', 'ihost_uuid', 'inode_uuid',
'created_at', 'updated_at']
labels = ['Memory: Usable Total (MiB)',
' Platform (MiB)',
' Available (MiB)',
'Huge Pages Configured',
'vSwitch Huge Pages: Size (MiB)',
' Total',
' Available',
' Required',
'Application Pages (4K): Total',
'Application Huge Pages Pending As Percentage',
'Application Huge Pages (2M): Total',
' Total Pending',
' Available',
'Application Huge Pages (1G): Total',
' Total Pending',
' Available',
'uuid', 'ihost_uuid', 'inode_uuid',
'created_at', 'updated_at']
data = [(f, getattr(imemory, f, '')) for f in fields]
for d in data:
if d[0] == 'vm_hugepages_nr_2M_pending':
if d[1] is None:
fields.remove(d[0])
labels.pop(labels.index(' Total Pending'))
if d[0] == 'vm_hugepages_nr_1G_pending':
if d[1] is None:
fields.remove(d[0])
labels.pop(len(labels) - labels[::-1].index(' Total Pending') - 1)
data = [(f, getattr(imemory, f, '')) for f in fields]
utils.print_tuple_list(data, labels)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host")
@utils.arg('numa_node',
metavar='<processor>',
help="processor")
def do_host_memory_show(cc, args):
"""Show memory attributes."""
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
inodes = cc.inode.list(ihost.uuid)
imemorys = cc.imemory.list(ihost.uuid)
for m in imemorys:
for n in inodes:
if m.inode_uuid == n.uuid:
if int(n.numa_node) == int(args.numa_node):
_print_imemory_show(m)
return
else:
raise exc.CommandError('Processor not found: host %s processor %s' %
(ihost.hostname, args.numa_node))
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host")
def do_host_memory_list(cc, args):
"""List memory nodes."""
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
inodes = cc.inode.list(ihost.uuid)
imemorys = cc.imemory.list(ihost.uuid)
for m in imemorys:
for n in inodes:
if m.inode_uuid == n.uuid:
m.numa_node = n.numa_node
break
fields = ['numa_node',
'memtotal_mib',
'platform_reserved_mib',
'memavail_mib',
'hugepages_configured',
'vswitch_hugepages_size_mib',
'vswitch_hugepages_nr',
'vswitch_hugepages_avail',
'vswitch_hugepages_reqd',
'vm_hugepages_nr_4K',
'vm_pending_as_percentage',
'vm_hugepages_nr_2M',
'vm_hugepages_avail_2M',
'vm_hugepages_nr_2M_pending',
'vm_hugepages_nr_1G',
'vm_hugepages_avail_1G',
'vm_hugepages_nr_1G_pending',
'vm_hugepages_use_1G']
field_labels = ['processor',
'mem_total(MiB)',
'mem_platform(MiB)',
'mem_avail(MiB)',
'hugepages(hp)_configured',
'vs_hp_size(MiB)',
'vs_hp_total',
'vs_hp_avail',
'vs_hp_reqd',
'app_total_4K',
'app_hp_as_percentage',
'app_hp_total_2M',
'app_hp_avail_2M',
'app_hp_pending_2M',
'app_hp_total_1G',
'app_hp_avail_1G',
'app_hp_pending_1G',
'app_hp_use_1G']
utils.print_list(imemorys, fields, field_labels, sortby=1)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help="Name or ID of host")
@utils.arg('numa_node',
metavar='<processor>',
help="processor")
@utils.arg('-m', '--platform_reserved_mib',
metavar='<Platform Reserved MiB>',
help='The amount of platform memory (MiB) for the numa node')
@utils.arg('-2M', '--hugepages_nr_2M_pending',
metavar='<2M hugepages number>',
help='The number of 2M application huge pages for the numa node')
@utils.arg('-1G', '--hugepages_nr_1G_pending',
metavar='<1G hugepages number>',
help='The number of 1G application huge pages for the numa node')
@utils.arg('-f', '--function',
metavar='<function>',
choices=['vswitch', 'application'],
default='application',
help='The Memory Function.')
def do_host_memory_modify(cc, args):
"""Modify platform reserved and/or application huge page memory attributes for worker nodes."""
rwfields = ['platform_reserved_mib',
'hugepages_nr_2M_pending',
'hugepages_nr_1G_pending',
'function']
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
user_specified_fields = dict((k, v) for (k, v) in vars(args).items()
if k in rwfields and not (v is None))
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
inodes = cc.inode.list(ihost.uuid)
imemorys = cc.imemory.list(ihost.uuid)
mem = None
for m in imemorys:
for n in inodes:
if m.inode_uuid == n.uuid:
if int(n.numa_node) == int(args.numa_node):
mem = m
break
if mem:
break
if mem is None:
raise exc.CommandError('Processor not found: host %s processor %s' %
(ihost.hostname, args.numa_node))
function = user_specified_fields.get('function')
vswitch_hp_size_mib = None
percent_2M = None
percent_1G = None
patch = []
for (k, v) in user_specified_fields.items():
if k == 'function':
continue
if function == 'vswitch':
if k == 'hugepages_nr_2M_pending':
vswitch_hp_size_mib = 2
k = 'vswitch_hugepages_reqd'
elif k == 'hugepages_nr_1G_pending':
vswitch_hp_size_mib = 1024
k = 'vswitch_hugepages_reqd'
else:
if k == 'hugepages_nr_2M_pending':
k = 'vm_hugepages_nr_2M_pending'
percent_2M = "False"
if str(v).endswith('%'):
percent_2M = "True"
v = v.rstrip("%")
v = int(v)
elif k == 'hugepages_nr_1G_pending':
k = 'vm_hugepages_nr_1G_pending'
percent_1G = "False"
if str(v).endswith('%'):
percent_1G = "True"
v = v.rstrip("%")
v = int(v)
patch.append({'op': 'replace', 'path': '/' + k, 'value': v})
if patch:
if (percent_2M == "True" and percent_1G == "False") or \
(percent_2M == "False" and percent_1G == "True"):
raise exc.CommandError('2MB hugepage and 1GB hugepage values must both be \
percent or not percent. (2M as percentage: %s, 1G as \
percentage: %s)' % (percent_2M, percent_1G))
if vswitch_hp_size_mib:
patch.append({'op': 'replace', 'path': '/vswitch_hugepages_size_mib',
'value': vswitch_hp_size_mib})
if percent_2M is not None or percent_1G is not None:
patch.append({'op': 'replace', 'path': '/vm_pending_as_percentage',
'value': percent_2M if percent_2M is not None else percent_1G})
imemory = cc.imemory.update(mem.uuid, patch)
_print_imemory_show(imemory)
| 36.552 | 99 | 0.52309 | 1,012 | 9,138 | 4.455534 | 0.162055 | 0.05855 | 0.040364 | 0.035485 | 0.512974 | 0.47882 | 0.408073 | 0.394988 | 0.357507 | 0.306942 | 0 | 0.016079 | 0.367039 | 9,138 | 249 | 100 | 36.698795 | 0.763485 | 0.030751 | 0 | 0.490385 | 0 | 0 | 0.306134 | 0.09914 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.014423 | 0 | 0.038462 | 0.024038 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea5d7abacc432b49216aa2f8e37ada38b4b43fc5 | 6,672 | py | Python | venv/Lib/site-packages/pylint/lint/parallel.py | AnxhelaMehmetaj/is219_flask | 1e88579f14a96c9826e9452b3c7f8e6477577ef7 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/pylint/lint/parallel.py | AnxhelaMehmetaj/is219_flask | 1e88579f14a96c9826e9452b3c7f8e6477577ef7 | [
"BSD-3-Clause"
] | null | null | null | venv/Lib/site-packages/pylint/lint/parallel.py | AnxhelaMehmetaj/is219_flask | 1e88579f14a96c9826e9452b3c7f8e6477577ef7 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
import collections
import functools
import warnings
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Iterable,
List,
Sequence,
Tuple,
Union,
)
import dill
from pylint import reporters
from pylint.lint.utils import _patch_sys_path
from pylint.message import Message
from pylint.typing import FileItem, MessageLocationTuple
from pylint.utils import LinterStats, merge_stats
try:
import multiprocessing
except ImportError:
multiprocessing = None # type: ignore[assignment]
if TYPE_CHECKING:
from pylint.lint import PyLinter
# PyLinter object used by worker processes when checking files using multiprocessing
# should only be used by the worker processes
_worker_linter = None
def _get_new_args(message):
location = (
message.abspath,
message.path,
message.module,
message.obj,
message.line,
message.column,
)
return (message.msg_id, message.symbol, location, message.msg, message.confidence)
def _worker_initialize(
linter: bytes, arguments: Union[None, str, Sequence[str]] = None
) -> None:
"""Function called to initialize a worker for a Process within a multiprocessing Pool.
:param linter: A linter-class (PyLinter) instance pickled with dill
:param arguments: File or module name(s) to lint and to be added to sys.path
"""
global _worker_linter # pylint: disable=global-statement
_worker_linter = dill.loads(linter)
# On the worker process side the messages are just collected and passed back to
# parent process as _worker_check_file function's return value
_worker_linter.set_reporter(reporters.CollectingReporter())
_worker_linter.open()
# Patch sys.path so that each argument is importable just like in single job mode
_patch_sys_path(arguments or ())
def _worker_check_single_file(
file_item: FileItem,
) -> Tuple[
int, Any, str, Any, List[Tuple[Any, ...]], LinterStats, Any, DefaultDict[Any, List]
]:
if not _worker_linter:
raise Exception("Worker linter not yet initialised")
_worker_linter.open()
_worker_linter.check_single_file_item(file_item)
mapreduce_data = collections.defaultdict(list)
for checker in _worker_linter.get_checkers():
try:
data = checker.get_map_data()
except AttributeError:
continue
mapreduce_data[checker.name].append(data)
msgs = [_get_new_args(m) for m in _worker_linter.reporter.messages]
_worker_linter.reporter.reset()
if _worker_linter.current_name is None:
warnings.warn(
(
"In pylint 3.0 the current_name attribute of the linter object should be a string. "
"If unknown it should be initialized as an empty string."
),
DeprecationWarning,
)
return (
id(multiprocessing.current_process()),
_worker_linter.current_name,
file_item.filepath,
_worker_linter.file_state.base_name,
msgs,
_worker_linter.stats,
_worker_linter.msg_status,
mapreduce_data,
)
def _merge_mapreduce_data(linter, all_mapreduce_data):
"""Merges map/reduce data across workers, invoking relevant APIs on checkers."""
# First collate the data and prepare it, so we can send it to the checkers for
# validation. The intent here is to collect all the mapreduce data for all checker-
# runs across processes - that will then be passed to a static method on the
# checkers to be reduced and further processed.
collated_map_reduce_data = collections.defaultdict(list)
for linter_data in all_mapreduce_data.values():
for run_data in linter_data:
for checker_name, data in run_data.items():
collated_map_reduce_data[checker_name].extend(data)
# Send the data to checkers that support/require consolidated data
original_checkers = linter.get_checkers()
for checker in original_checkers:
if checker.name in collated_map_reduce_data:
# Assume that if the check has returned map/reduce data that it has the
# reducer function
checker.reduce_map_data(linter, collated_map_reduce_data[checker.name])
def check_parallel(
linter: "PyLinter",
jobs: int,
files: Iterable[FileItem],
arguments: Union[None, str, Sequence[str]] = None,
) -> None:
"""Use the given linter to lint the files with given amount of workers (jobs).
This splits the work filestream-by-filestream. If you need to do work across
multiple files, as in the similarity-checker, then inherit from MapReduceMixin and
implement the map/reduce mixin functionality.
"""
# The linter is inherited by all the pool's workers, i.e. the linter
# is identical to the linter object here. This is required so that
# a custom PyLinter object can be used.
initializer = functools.partial(_worker_initialize, arguments=arguments)
with multiprocessing.Pool(
jobs, initializer=initializer, initargs=[dill.dumps(linter)]
) as pool:
linter.open()
all_stats = []
all_mapreduce_data = collections.defaultdict(list)
# Maps each file to be worked on by a single _worker_check_single_file() call,
# collecting any map/reduce data by checker module so that we can 'reduce' it
# later.
for (
worker_idx, # used to merge map/reduce data across workers
module,
file_path,
base_name,
messages,
stats,
msg_status,
mapreduce_data,
) in pool.imap_unordered(_worker_check_single_file, files):
linter.file_state.base_name = base_name
linter.set_current_module(module, file_path)
for msg in messages:
msg = Message(
msg[0], msg[1], MessageLocationTuple(*msg[2]), msg[3], msg[4]
)
linter.reporter.handle_message(msg)
all_stats.append(stats)
all_mapreduce_data[worker_idx].append(mapreduce_data)
linter.msg_status |= msg_status
_merge_mapreduce_data(linter, all_mapreduce_data)
linter.stats = merge_stats([linter.stats] + all_stats)
# Insert stats data to local checkers.
for checker in linter.get_checkers():
if checker is not linter:
checker.stats = linter.stats
| 36.064865 | 100 | 0.684952 | 860 | 6,672 | 5.140698 | 0.3 | 0.046143 | 0.023524 | 0.019 | 0.113549 | 0.065596 | 0.05112 | 0.018095 | 0 | 0 | 0 | 0.001779 | 0.241757 | 6,672 | 184 | 101 | 36.26087 | 0.872109 | 0.307404 | 0 | 0.0625 | 0 | 0 | 0.039112 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039063 | false | 0 | 0.101563 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea5e6b4b4392c0dc1ba2d1246c10bb825415c5c5 | 2,350 | py | Python | scan.py | carlos2606/FileSystemScanner | ab102979d36090392244c387b02e4df8f2ad95b7 | [
"MIT"
] | null | null | null | scan.py | carlos2606/FileSystemScanner | ab102979d36090392244c387b02e4df8f2ad95b7 | [
"MIT"
] | null | null | null | scan.py | carlos2606/FileSystemScanner | ab102979d36090392244c387b02e4df8f2ad95b7 | [
"MIT"
] | null | null | null | import os
import grp
import itertools
import multiprocessing
import schedule
import time
from pwd import getpwuid
from datetime import datetime as dt
def getsize(filename):
return os.path.getsize(filename)
def getname(root, filename):
return os.path.join(root, filename)
def getctime(path):
return dt.fromtimestamp(os.path.getctime(path)).strftime('%Y-%m-%d %H:%M:%S')
def getmtime(path):
return dt.fromtimestamp(os.path.getmtime(path)).strftime('%Y-%m-%d %H:%M:%S')
def getatime(path):
return dt.fromtimestamp(os.path.getatime(path)).strftime('%Y-%m-%d %H:%M:%S')
def find_owner(filename):
return getpwuid(os.stat(filename).st_uid).pw_name
def find_group(path):
gid = os.stat(path).st_gid
group = grp.getgrgid(gid)[0]
return group
def get_access_bits(path):
bits = oct(os.stat(path)[0])[-3:]
return bits
def worker(path):
'''
Gathers data from one file
'''
realpath = path.split('/')[:-1]
try:
data = {
'file': path.split('/')[-1],
'path': '/'.join(realpath),
'changedTime': getctime(path),
'modifiedTime': getmtime(path),
'accessedTime': getatime(path),
'size': os.path.getsize(path),
'owner': find_owner(path),
'group': find_group(path),
'accesBits': get_access_bits(path)
}
print (data)
except:
pass
def scanner():
'''
Parallel file system walk using multiple processes.
Each process will run a worker.
'''
path = input("Please enter a directory to be scanned: \n")
if os.path.exists(path):
with multiprocessing.Pool(8) as Pool: # pool of 8 processes
walk = os.walk(path, followlinks=False)
fn_gen = itertools.chain.from_iterable((os.path.join(root, file)
for file in files)
for root, dirs, files
in walk)
# parallel processing
results = Pool.map(
worker, [j for j in fn_gen if os.path.isfile(j)])
def scan():
schedule.every(3).seconds.do(scanner)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == '__main__':
scan()
| 23.737374 | 81 | 0.569787 | 291 | 2,350 | 4.522337 | 0.391753 | 0.041033 | 0.027356 | 0.056991 | 0.118541 | 0.118541 | 0.047872 | 0.047872 | 0.047872 | 0 | 0 | 0.005468 | 0.299574 | 2,350 | 98 | 82 | 23.979592 | 0.794046 | 0.064255 | 0 | 0 | 0 | 0 | 0.078595 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.177419 | false | 0.016129 | 0.129032 | 0.096774 | 0.435484 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ea5e8f2bec1c9a1d594343ebe94f7d8aeabcd271 | 762 | py | Python | python/sender.py | dargkonide/pykins | 4bbdd799ca15cf8e92f80340a2899f770a05bdb0 | [
"MIT"
] | null | null | null | python/sender.py | dargkonide/pykins | 4bbdd799ca15cf8e92f80340a2899f770a05bdb0 | [
"MIT"
] | null | null | null | python/sender.py | dargkonide/pykins | 4bbdd799ca15cf8e92f80340a2899f770a05bdb0 | [
"MIT"
] | null | null | null | from threading import Thread
from exe.proto import *
from queue import Queue
from time import sleep
import traceback
def sleeper(data,qoutput,wait=10):
sleep(wait)
qoutput.put(data)
def work(data):
while 1:
try:
host,msg=data['send'].get()
ip=data['x']['host'].get(host.split('.')[0])
z=data['connects'].get(ip)
if not z:
print(f"Node {host} is offline, waiting ...")
Thread(target=sleeper,args=((host,msg),data['send'])).start()
continue
# print(f'send: {msg}')
send(z[0],msg)
except:
with open('err.log','a') as ff:
traceback.print_exc()
traceback.print_exc(file=ff) | 29.307692 | 77 | 0.528871 | 97 | 762 | 4.134021 | 0.546392 | 0.034913 | 0.054863 | 0.074813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009671 | 0.321522 | 762 | 26 | 78 | 29.307692 | 0.765957 | 0.027559 | 0 | 0 | 0 | 0 | 0.087838 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.217391 | 0 | 0.304348 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |