hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
538a493d99ff3d905d532327c5a14418aa3d3b7e
| 10,614
|
py
|
Python
|
scripts/biotimesql.py
|
Jay-Iam/retriever
|
26e321cdb86fcb4cb78184c4bf5c0c6902a97d2c
|
[
"MIT"
] | null | null | null |
scripts/biotimesql.py
|
Jay-Iam/retriever
|
26e321cdb86fcb4cb78184c4bf5c0c6902a97d2c
|
[
"MIT"
] | 1
|
2019-02-23T14:11:34.000Z
|
2019-02-28T21:18:51.000Z
|
scripts/biotimesql.py
|
harshitbansal05/retriever
|
a5b849ee5ed3cc8a92f8aff93e5ec2ba54599213
|
[
"MIT"
] | 1
|
2020-01-06T11:37:54.000Z
|
2020-01-06T11:37:54.000Z
|
# -*- coding: utf-8 -*-
#retriever
import csv
from pkg_resources import parse_version
from retriever.lib.models import Table
from retriever.lib.templates import Script
try:
from retriever.lib.defaults import VERSION
try:
from retriever.lib.tools import open_fr, open_fw, open_csvw
except ImportError:
from retriever.lib.scripts import open_fr, open_fw
except ImportError:
from retriever import open_fr, open_fw, VERSION
class main(Script):
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
self.title = "Commercial Fisheries Monthly Trade Data by Product, Country/Association"
self.name = "biotimesql"
self.retriever_minimum_version = "2.2.0"
self.urls = {
"sql_file": "https://zenodo.org/record/2602708/files/BioTIMESQL02_04_2018.sql?download=1",
}
self.version = "1.0.1"
self.ref = "https://zenodo.org/record/1095628#.WskN7dPwYyn"
self.citation = "Dornelas M, Antão LH, Moyes F, et al. BioTIME: A database of biodiversity time series for the Anthropocene. Global Ecology & Biogeography. 2018; 00:1 - 26. https://doi.org/10.1111/geb.12729."
self.description = "The BioTIME database has species identities and abundances in ecological assemblages through time."
self.keywords = ["Time series", "Anthropocene", "Global"]
self.licenses = [{"name": "CC BY 4.0"}]
self.encoding = "latin1"
if parse_version(VERSION) <= parse_version("2.0.0"):
self.shortname = self.name
self.name = self.title
self.tags = self.keywords
def download(self, engine=None, debug=False):
Script.download(self, engine, debug)
engine = self.engine
original_sql_file = "BioTIMESQL02_04_2018.sql"
engine.download_file(self.urls["sql_file"], original_sql_file)
sql_data = open_fr(self.engine.format_filename(original_sql_file))
set_open = False
csv_writer = None
csv_file = None
table_name = None
NULL = None
for line in sql_data:
table_indicator = "-- Table structure for table "
if line.startswith(table_indicator):
st = line[len(table_indicator):].replace("`", "")
table_name = st.strip()
current_file_process = table_name
current_file_open = current_file_process
if set_open and not current_file_process == current_file_open:
csv_file.close()
set_open = False
else:
out_file = "{name}.csv".format(name=table_name)
csv_file = open_fw(engine.format_filename(out_file))
csv_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
set_open = True
if line.startswith("INSERT INTO `{table_name}`".format(table_name=table_name)):
row_val = line[line.index("VALUES (") + 8:-3]
table_rows = row_val.replace("\r\n","").split("),(")
for i_row in table_rows:
v = eval('[' + str(i_row) + ']')
csv_writer.writerows([v])
if csv_file:
csv_file.close()
# Create abundance table
table = Table("ID_ABUNDANCE", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_ABUNDANCE", ("int",)),
("ABUNDANCE_TYPE", ("char", "100")),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("abundance.csv"))
# Create allrawdata table
table = Table("allrawdata", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_ALL_RAW_DATA", ("int",)),
("ABUNDANCE", ("double",)),
("BIOMASS", ("double",)),
("ID_SPECIES", ("int",)),
("SAMPLE_DESC", ("char", 200)),
("PLOT", ("char", 150)),
("LATITUDE", ("double",)),
("LONGITUDE", ("double",)),
("DEPTH", ("double",)),
("DAY", ("int",)),
("MONTH", ("int",)),
("YEAR", ("int",)),
("STUDY_ID", ("int",)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("allrawdata.csv"))
# Create biomass table
table = Table("biomass", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [("ID_BIOMASS", ("int",)), ("BIOMASS_TYPE", ("char", "100"))]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("biomass.csv"))
# Create citation1 table
table = Table("citation1", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_CITATION1", ("int",)),
("STUDY_ID", ("int",)),
("CITATION_LINE", ("char",)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("citation1.csv"))
# Create contacts table
table = Table("contacts", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_CONTACTS", ("int",)),
("STUDY_ID", ("int",)),
("CONTACT_1", ("char", 500)),
("CONTACT_2", ("char", 500)),
("CONT_1_MAIL", ("char", 60)),
("CONT_2_MAIL", ("char", 60)),
("LICENSE", ("char", 200)),
("WEB_LINK", ("char", 200)),
("DATA_SOURCE", ("char", 250)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("contacts.csv"))
# Create countries table
table = Table("countries", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [("COUNT_ID", ("int",)), ("COUNTRY_NAME", ("char", 200))]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("countries.csv"))
# Create curation table
table = Table("curation", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_CURATION", ("int",)),
("STUDY_ID", ("int",)),
("LINK_ID", ("int",)),
("COMMENTS", ("char",)),
("DATE_STUDY_ADDED", ("char", 50)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("curation.csv"))
# Create datasets table
table = Table("datasets", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_DATASETS", ("int",)),
("STUDY_ID", ("int",)),
("TAXA", ("char", 50)),
("ORGANISMS", ("char", 200)),
("TITLE", ("char",800)),
("AB_BIO", ("char", 2)),
("HAS_PLOT", ("char", 10)),
("DATA_POINTS", ("char",)),
("START_YEAR", ("char",)),
("END_YEAR", ("char",)),
("CENT_LAT", ("double",)),
("CENT_LONG", ("double",)),
("NUMBER_OF_SPECIES", ("char",)),
("NUMBER_OF_SAMPLES", ("char",)),
("NUMBER_LAT_LONG", ("char",)),
("TOTAL", ("char",)),
("GRAIN_SIZE_TEXT", ("char",)),
("GRAIN_SQ_KM", ("double",)),
("AREA_SQ_KM", ("double",)),
("AB_TYPE", ("char", )),
("BIO_TYPE", ("char",)),
("SAMPLE_TYPE", ("char",)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("datasets.csv"))
# Create downloads table
table = Table("downloads", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("D_ID", ("int",)),
("STUDY", ("char", 25)),
("NAME", ("char", 150)),
("EMAIL", ("char", 150)),
("COUNTRY", ("char", 200)),
("ROLE", ("char", 150)),
("PURPOSE", ("char", 500)),
("LOCATION", ("char", 250)),
("DATE_STAMP", ("char",)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("downloads.csv"))
# Create methods table
table = Table("methods", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_METHODS", ("int",)),
("STUDY_ID", ("int",)),
("METHODS", ("char",)),
("SUMMARY_METHODS", ("char", 500)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("methods.csv"))
# Create sample table
table = Table("sample", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_SAMPLE", ("int",)),
("ID_TREAT", ("int",)),
("SAMPLE_DESC_NAME", ("char", 200)),
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("sample.csv"))
# Create site table
table = Table("site", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_SITE", ("int",)),
("STUDY_ID", ("int",)),
("REALM", ("char", 11)),
("CLIMATE", ("char", 20)),
("GENERAL_TREAT", ("char", 200)),
("TREATMENT", ("char", 200)),
("TREAT_COMMENTS", ("char", 250)),
("TREAT_DATE", ("char", 100)),
("CEN_LATITUDE", ("double",)),
("CEN_LONGITUDE", ("double",)),
("HABITAT", ("char", 100)),
("PROTECTED_AREA", ("char", 50)),
("AREA", ("double",)),
("BIOME_MAP", ("char", 500))
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("site.csv"))
# Create species table
table = Table("species", delimiter=",", header_rows=0, contains_pk=False)
table.columns = [
("ID_SPECIES", ("int",)),
("GENUS", ("char", 100)),
("SPECIES", ("char", 100)),
("GENUS_SPECIES", ("char", 100))
]
engine.table = table
engine.create_table()
engine.insert_data_from_file(engine.format_filename("species.csv"))
SCRIPT = main()
| 39.022059
| 216
| 0.531939
| 1,105
| 10,614
| 4.885068
| 0.224434
| 0.072249
| 0.055576
| 0.048166
| 0.326973
| 0.316969
| 0.316969
| 0.316969
| 0.316969
| 0.299555
| 0
| 0.026086
| 0.299322
| 10,614
| 271
| 217
| 39.166052
| 0.699745
| 0.029489
| 0
| 0.233766
| 0
| 0.004329
| 0.209917
| 0.002334
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008658
| false
| 0
| 0.04329
| 0
| 0.056277
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538b8d9cb91e4b908b2574c10cefedcf90ea344f
| 6,356
|
py
|
Python
|
day5.py
|
PLCoster/adventofcode2019
|
7aad1503dcf80b127b21191850ad9c93f91a602a
|
[
"MIT"
] | 1
|
2019-12-09T21:26:22.000Z
|
2019-12-09T21:26:22.000Z
|
day5.py
|
PLCoster/adventofcode2019
|
7aad1503dcf80b127b21191850ad9c93f91a602a
|
[
"MIT"
] | null | null | null |
day5.py
|
PLCoster/adventofcode2019
|
7aad1503dcf80b127b21191850ad9c93f91a602a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 2 11:06:59 2019
@author: Paul
"""
def read_data(filename):
"""
Reads csv file into a list, and converts to ints
"""
data = []
f = open(filename, 'r')
for line in f:
data += line.strip('\n').split(',')
int_data = [int(i) for i in data]
f.close()
return int_data
def run_intcode(program, input_int):
"""
Takes data, list of ints to run int_code on.
Returns list of ints after intcode program has been run.
Running Intcode program looks reads in the integers sequentially in sets of 4:
data[i] == Parameter Mode + Opcode (last two digits)
data[i+1] == Entry 1
data[i+2] == Entry 2
data[i+3] == Entry 3
If Opcode == 1, the value of the opcode at index location = entry 1 and 2
in the program are summed and stored at the index location of entry 3.
If Opcode == 2, the value of the opcode at index location = entry 1 and 2
in the program are multiplied and stored at the index location of entry 3.
If Opcode == 3, the the single integer (input) is saved to the position given
by index 1.
If Opcode == 4, the program outputs the value of its only parameter. E.g. 4,50
would output the value at address 50.
If Opcode == 5 and entry 1 is != 0, the intcode position moves to the index stored
at entry 2. Otherwise it does nothing.
If Opcode == 6 and entry 1 is 0, the intcode postion moves to the index stored
at entry 2. Otherwise it does nothing.
If Opcode == 7 and entry 1> entry 2, store 1 in position given by third param,
otherwise store 0 at position given by third param.
If Opcode == 7 and entry 1 = entry 2, store 1 in position given by third param,
otherwise store 0 at position given by third param.
If Opcode == 99, the program is completed and will stop running.
Parameters are digits to the left of the opcode, read left to right:
Parameter 0 -> Position mode - the entry is treated as an index location
Parameter 1 -> Immediate mode - the entry is treated as a value
"""
data = program[:]
answer = -1
params = [0, 0, 0]
param_modes = ['', '', '']
i = 0
while (i < len(program)):
#print("i = ", i)
# Determine Opcode and parameter codes:
opcode_str = "{:0>5d}".format(data[i])
opcode = int(opcode_str[3:])
param_modes[0] = opcode_str[2]
param_modes[1] = opcode_str[1]
param_modes[2] = opcode_str[0]
#print(opcode_str)
for j in range(2):
if param_modes[j] == '0':
try:
params[j] = data[data[i+j+1]]
except IndexError:
continue
else:
try:
params[j] = data[i+j+1]
except IndexError:
continue
#print(params, param_modes)
# If opcode is 1, add relevant entries:
if opcode == 1:
data[data[i+3]] = params[0] + params[1]
i += 4;
# If opcode is 2, multiply the relevant entries:
elif opcode == 2:
data[data[i+3]] = params[0] * params[1]
i += 4;
# If opcode is 3, store input value at required location.
elif opcode == 3:
data[data[i+1]] = input_int
i += 2;
# If opcode is 4, print out the input stored at specified location.
elif opcode == 4:
answer = data[data[i+1]]
print("Program output: ", data[data[i+1]])
i += 2;
# If the opcode is 5 and the next parameter !=0, jump forward
elif opcode == 5:
if params[0] != 0:
i = params[1]
else:
i += 3
# If the opcode is 6 and next parameter is 0, jump forward
elif opcode == 6:
if params[0] == 0:
i = params[1]
else:
i += 3
# If the opcode is 7, carry out less than comparison and store 1/0 at loc 3
elif opcode == 7:
if params[0] < params[1]:
data[data[i+3]] = 1
else:
data[data[i+3]] = 0
i += 4
# If the opcode is 8, carry out equality comparison and store 1/0 at loc 3
elif opcode == 8:
if params[0] == params[1]:
data[data[i+3]] = 1
else:
data[data[i+3]] = 0
i += 4
# If the opcode is 99, halt the intcode
elif opcode == 99:
print("Program ended by halt code")
break
# If opcode is anything else something has gone wrong!
else:
print("Problem with the Program")
break
return data, answer
program = read_data("day5input.txt")
#print(program)
result1, answer1 = run_intcode(program, 1)
#print(result1)
print("Part 1: Answer is: ", answer1)
result2, answer2 = run_intcode(program, 5)
#print(result2)
print("Part 2: Answer is: ", answer2)
#test_program = [1002,4,3,4,33]
#test_program2 = [3,0,4,0,99]
#test_program3 = [1101,100,-1,4,0]
#test_program4 = [3,9,8,9,10,9,4,9,99,-1,8] # 1 if input = 8, 0 otherwise
#test_program5 = [3,9,7,9,10,9,4,9,99,-1,8] # 1 if input < 8, 0 otherwise
#test_program6 = [3,3,1108,-1,8,3,4,3,99] # 1 if input = 8, 0 otherwise
#test_program7 = [3,3,1107,-1,8,3,4,3,99] # 1 if input < 8, 0 otherwise
#test_program8 = [3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9] # 0 if input = 0, 1 otherwise
#test_program9 = [3,3,1105,-1,9,1101,0,0,12,4,12,99,1] # 0 if input = 0, 1 otherwise
#test_program10 = [3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,
#36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,
#1105,1,46,98,99] # 999 if input < 8, 1000 if input = 8, 1001 if input > 8
| 34.73224
| 92
| 0.522498
| 938
| 6,356
| 3.506397
| 0.221748
| 0.024324
| 0.027364
| 0.018243
| 0.378535
| 0.364853
| 0.343569
| 0.297355
| 0.297355
| 0.297355
| 0
| 0.105316
| 0.369572
| 6,356
| 182
| 93
| 34.923077
| 0.715498
| 0.512429
| 0
| 0.358974
| 0
| 0
| 0.048188
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0
| 0
| 0.051282
| 0.064103
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538cf8a863a1cdd537656657d4741a5309d4d759
| 8,079
|
py
|
Python
|
test/test_purchasing.py
|
jacob22/accounting
|
e2fceea880e3f056703ba97b6cf52b73cd7af93b
|
[
"Apache-2.0"
] | null | null | null |
test/test_purchasing.py
|
jacob22/accounting
|
e2fceea880e3f056703ba97b6cf52b73cd7af93b
|
[
"Apache-2.0"
] | null | null | null |
test/test_purchasing.py
|
jacob22/accounting
|
e2fceea880e3f056703ba97b6cf52b73cd7af93b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Open End AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if (sys.version_info >=(3, 0)):
PYT3 = True
import urllib.request
import urllib.parse
else:
PYT3 = False
import urllib2
import urlparse
import contextlib
import json
import os
import py
import subprocess
import time
import uuid
from . import support
here = os.path.dirname(__file__)
class Container(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def do_purchase(products, emailaddress):
params = {
'data': [
{'items': [{'product': product} for product in products],
'buyerName': 'Kalle Anka',
'buyerEmail': emailaddress}
]
}
if PYT3:
req = urllib.request.Request(urllib.parse.urljoin(support.url, '/rest/purchase'),
json.dumps(params).encode('ascii'),
{'Content-Type': 'application/json'})
data = json.load(urllib.request.urlopen(req))
else:
req = urllib2.Request(urlparse.urljoin(support.url, '/rest/purchase'),
json.dumps(params),
{'Content-Type': 'application/json'})
data = json.load(urllib2.urlopen(req))
return Container(id=data['purchase'],
invoice=data['invoiceUrl'],
buyerEmail=emailaddress)
def check_mail(client, mailssh, purchase, mailtype):
client.run('sendmail -qf')
message, = mailssh.find_and_delete_mail(None, 'TO', purchase.buyerEmail)
msg, headers = mailssh.parse(message)
assert headers['X-OE-MailType'] == [mailtype]
assert purchase.invoice in msg
return msg, headers
@contextlib.contextmanager
def check_mails(client, mailssh, purchase):
check_mail(client, mailssh, purchase, 'order-confirmation')
yield
check_mail(client, mailssh, purchase, 'full-payment-confirmation')
def gen_pg(client, org, id_args=[1, 1]):
cmd = 'python /root/accounting/members/paymentgen.py %s %s %s' % (
org.id, id_args[0], id_args[1])
id_args[0] += 1
id_args[1] += 1000
stdin, stdout, stderr = client.exec_command('PYTHONPATH=/root/accounting ' +
cmd)
return stdout.read()
def upload_pg(tmpdir, ssh, pgdata):
pgfile = tmpdir.join('pgfile')
pgfile.write(pgdata)
dest = uuid.uuid4()
with ssh(username='nordea') as client:
sftp = client.open_sftp()
sftp.put(str(pgfile), 'incoming/%s' % dest, confirm=False)
@py.test.mark.usefixtures('cluster', 'clean_db', 'bootstrapped', 'mailssh',
'ssh', 'org', 'emailaddress')
def test_full_plusgiro_payment(mailssh, ssh, org, emailaddress, tmpdir):
purchase = do_purchase([org.product], emailaddress)
with ssh() as client:
with check_mails(client, mailssh, purchase):
pgdata = gen_pg(client, org)
upload_pg(tmpdir, ssh, pgdata)
@py.test.mark.usefixtures('cluster', 'clean_db', 'bootstrapped', 'mailssh',
'ssh', 'org', 'emailaddress')
def test_partial_plusgiro_payment(ssh, mailssh, org, emailaddress,
tmpdir):
purchase = do_purchase([org.product], emailaddress)
with ssh() as client:
with check_mails(client, mailssh, purchase):
pgdata1 = gen_pg(client, org)
pgdata2 = gen_pg(client, org)
pgdata3 = gen_pg(client, org)
# The sum is 66666 (öre). It is probably unique in the fake pgfile,
# so we can simply replace it in order to make partial payments.
if PYT3:
partial_payment1 = pgdata1.replace(b'66666', b'22222') # pay 222.22 SEK
partial_payment2 = pgdata2.replace(b'66666', b'33333') # pay 333.33 SEK
final_payment = pgdata3.replace(b'66666', b'11111') # final 111.11 SEK
else:
partial_payment1 = pgdata1.replace('66666', '22222') # pay 222.22 SEK
partial_payment2 = pgdata2.replace('66666', '33333') # pay 333.33 SEK
final_payment = pgdata3.replace('66666', '11111') # final 111.11 SEK
upload_pg(tmpdir, ssh, partial_payment1)
msg, headers = check_mail(client, mailssh, purchase,
'partial-payment-confirmation')
assert '222,22' in msg # amount paid
assert '444,44' in msg # amount remaining
upload_pg(tmpdir, ssh, partial_payment2)
msg, headers = check_mail(client, mailssh, purchase,
'partial-payment-confirmation')
assert '333,33' in msg # amount paid
assert '111,11' in msg # amount remaining
upload_pg(tmpdir, ssh, final_payment)
@py.test.mark.usefixtures('cluster', 'clean_db', 'bootstrapped', 'mailssh',
'nodes', 'ssh', 'org', 'emailaddress')
def test_swish_payment(nodes, ssh, mailssh, org, emailaddress):
#py.test.skip('Skip swish tests until certificates work')
purchase = do_purchase([org.product], emailaddress)
with ssh() as client:
with check_mails(client, mailssh, purchase):
print(purchase.invoice)
if PYT3:
parsed = urllib.parse.urlparse(purchase.invoice)
_, _, purchase, _ = parsed.path.split('/')
path = '/providers/swish/charge/%s/%s' % (org.swish_provider, purchase)
url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, path,
'', '', ''))
data = {'phone': '1231181189'}
req = urllib.request.Request(url, json.dumps(data).encode('ascii'),
{'Content-Type': 'application/json'})
response = json.load(urllib.request.urlopen(req))
else:
parsed = urlparse.urlparse(purchase.invoice)
_, _, purchase, _ = parsed.path.split('/')
path = '/providers/swish/charge/%s/%s' % (org.swish_provider, purchase)
url = urlparse.urlunparse((parsed.scheme, parsed.netloc, path,
'', '', ''))
data = {'phone': '1231181189'}
req = urllib2.Request(url, json.dumps(data),
{'Content-Type': 'application/json'})
response = json.load(urllib2.urlopen(req))
print(response)
assert response['status'] == 'CREATED'
path = '/providers/swish/poll/%s/%s' % (org.swish_provider,
response['id'])
if PYT3:
url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, path,
'', '', ''))
else:
url = urlparse.urlunparse((parsed.scheme, parsed.netloc, path,
'', '', ''))
for _ in range(20):
if PYT3:
req = urllib.request.Request(url)
response = json.load(urllib.request.urlopen(req))
else:
req = urllib2.Request(url)
response = json.load(urllib2.urlopen(req))
print(response)
if response['status'] == 'PAID':
break
time.sleep(1)
| 39.409756
| 89
| 0.564179
| 868
| 8,079
| 5.163594
| 0.300691
| 0.026104
| 0.042169
| 0.024543
| 0.510933
| 0.421464
| 0.403614
| 0.376841
| 0.296073
| 0.231147
| 0
| 0.034464
| 0.317614
| 8,079
| 204
| 90
| 39.602941
| 0.778523
| 0.112143
| 0
| 0.344156
| 0
| 0
| 0.116305
| 0.03233
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.058442
| false
| 0
| 0.084416
| 0
| 0.168831
| 0.019481
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538d31ed98e59299719777fcb1330ca052cef24d
| 1,455
|
py
|
Python
|
iot/downstream/fog_processes.py
|
SENERGY-Platform/senergy-connector
|
7198f6b2ec08b3c09c53755f259a2711921fdcbe
|
[
"Apache-2.0"
] | null | null | null |
iot/downstream/fog_processes.py
|
SENERGY-Platform/senergy-connector
|
7198f6b2ec08b3c09c53755f259a2711921fdcbe
|
[
"Apache-2.0"
] | null | null | null |
iot/downstream/fog_processes.py
|
SENERGY-Platform/senergy-connector
|
7198f6b2ec08b3c09c53755f259a2711921fdcbe
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ("Router", )
from ..util import conf, get_logger, mqtt
import threading
import cc_lib
logger = get_logger(__name__.split(".", 1)[-1])
class Router(threading.Thread):
def __init__(self, client: cc_lib.client.Client, mqtt_client: mqtt.Client):
super().__init__(name="downstream-fog-processes-router", daemon=True)
self.__cc = client
self.__mqtt = mqtt_client
def run(self) -> None:
try:
while True:
envelope = self.__cc.receive_fog_processes()
logger.debug(envelope)
self.__mqtt.publish(
"{}/{}".format(conf.MQTTClient.fog_processes_pub_topic, envelope.sub_topic),
envelope.message,
qos=conf.MQTTClient.qos
)
except Exception as ex:
logger.error(ex)
| 31.630435
| 96
| 0.648797
| 186
| 1,455
| 4.88172
| 0.580645
| 0.066079
| 0.028634
| 0.035242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00932
| 0.262543
| 1,455
| 45
| 97
| 32.333333
| 0.836906
| 0.380756
| 0
| 0
| 0
| 0
| 0.049826
| 0.035921
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538daa45b22d9013e84ef526505b8753b513ae7f
| 2,522
|
py
|
Python
|
day07/test.py
|
mpirnat/aoc2016
|
1aec59aca01541d0d1c30f85d4668959c82fa35c
|
[
"MIT"
] | null | null | null |
day07/test.py
|
mpirnat/aoc2016
|
1aec59aca01541d0d1c30f85d4668959c82fa35c
|
[
"MIT"
] | null | null | null |
day07/test.py
|
mpirnat/aoc2016
|
1aec59aca01541d0d1c30f85d4668959c82fa35c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import unittest
from day07 import has_abba, get_abba_allowed_strings, get_abba_disallowed_strings
from day07 import supports_tls, count_tls_addresses
from day07 import find_abas, supports_ssl, count_ssl_addresses
class TestFindingABBASequences(unittest.TestCase):
cases = (
('abba', True),
('oxyyxo', True),
('aaaa', False),
('abcd', False),
)
def test_finds_abba_sequences(self):
for text, expected in self.cases:
self.assertEqual(has_abba(text), expected)
class TestGettingAllowedChunks(unittest.TestCase):
cases = (
('abba[mnop]qrst[abcd]defg', ['abba', 'qrst', 'defg']),
)
def test_finds_allowed_substrings(self):
for text, expected in self.cases:
self.assertEqual(get_abba_allowed_strings(text), expected)
class TestGettingDisallowedChunks(unittest.TestCase):
cases = (
('abba[mnop]qrst[abcd]defg', ['mnop', 'abcd']),
)
def test_finds_disallowed_substrings(self):
for text, expected in self.cases:
self.assertEqual(get_abba_disallowed_strings(text), expected)
class TestCheckingTLSAddresses(unittest.TestCase):
cases = (
('abba[mnop]qrst', True),
('abcd[bddb]xyyx', False),
('aaaa[qwer]tyui', False),
('ioxxoj[asdfgh]zxcvbn', True),
)
def test_finds_tls_addresses(self):
for text, expected in self.cases:
self.assertEqual(supports_tls(text), expected)
def test_counts_tls_addresses(self):
data = [x[0] for x in self.cases]
self.assertEqual(count_tls_addresses(data), 2)
class TestFindingABASequences(unittest.TestCase):
cases = (
('aba', ['aba']),
('xyxxyx', ['xyx']),
('aaakekeke', ['eke', 'kek']),
('zazbzbzbcdb', ['bzb', 'zaz', 'zbz']),
)
def test_finds_aba_sequences(self):
for text, expected in self.cases:
self.assertEqual(find_abas(text), expected)
class TestCheckingSSLAddresses(unittest.TestCase):
cases = (
('aba[bab]xyz', True),
('xyx[xyx]xyx', False),
('aaa[kek]eke', True),
('zazbz[bzb]cdb', True),
)
def test_finds_ssl_addresses(self):
for text, expected in self.cases:
self.assertEqual(supports_ssl(text), expected)
def test_counts_ssl_addresses(self):
data = [x[0] for x in self.cases]
self.assertEqual(count_ssl_addresses(data), 3)
if __name__ == '__main__':
unittest.main()
| 27.714286
| 81
| 0.635607
| 292
| 2,522
| 5.284247
| 0.273973
| 0.093325
| 0.057032
| 0.077771
| 0.407647
| 0.375243
| 0.353856
| 0.353856
| 0.300713
| 0.300713
| 0
| 0.005157
| 0.231166
| 2,522
| 90
| 82
| 28.022222
| 0.790614
| 0.00793
| 0
| 0.212121
| 0
| 0
| 0.10076
| 0.019192
| 0
| 0
| 0
| 0
| 0.121212
| 1
| 0.121212
| false
| 0
| 0.060606
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538e1ba9c8f2894b4bdf8950c5cd9a8fa42ed826
| 4,787
|
py
|
Python
|
rlnets/PG.py
|
HTRPOCODES/HTRPO-v2
|
7e085e8077e6caa38d192bbd33b41c49b36ad6a6
|
[
"MIT"
] | 7
|
2020-02-24T15:05:20.000Z
|
2021-08-24T02:27:13.000Z
|
rlnets/PG.py
|
ZhangHanbo/Deep-Reinforcement-Learning-Package
|
10ab418fcb4807747ebe162920f3df1e80b80a2a
|
[
"MIT"
] | null | null | null |
rlnets/PG.py
|
ZhangHanbo/Deep-Reinforcement-Learning-Package
|
10ab418fcb4807747ebe162920f3df1e80b80a2a
|
[
"MIT"
] | 1
|
2020-04-11T13:08:23.000Z
|
2020-04-11T13:08:23.000Z
|
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from basenets.MLP import MLP
from basenets.Conv import Conv
from torch import nn
class FCPG_Gaussian(MLP):
def __init__(self,
n_inputfeats,
n_actions,
sigma,
n_hiddens = [30],
nonlinear = F.tanh,
usebn = False,
outactive = None,
outscaler = None,
initializer = "orthogonal",
initializer_param = {"gain":np.sqrt(2), "last_gain": 0.1}
):
self.n_actions = n_actions
super(FCPG_Gaussian, self).__init__(
n_inputfeats, # input dim
n_actions, # output dim
n_hiddens, # hidden unit number list
nonlinear,
usebn,
outactive,
outscaler,
initializer,
initializer_param=initializer_param,
)
self.logstd = nn.Parameter(torch.log(sigma * torch.ones(n_actions) + 1e-8))
def forward(self,x, other_data = None):
x = MLP.forward(self, x, other_data)
# for exploration, we need to make sure that the std is not too low.
logstd = torch.clamp(self.logstd, min = np.log(0.1))
return x, logstd.expand_as(x), torch.exp(logstd).expand_as(x)
def cuda(self, device = None):
self.logstd.cuda()
return self._apply(lambda t: t.cuda(device))
class FCPG_Softmax(MLP):
def __init__(self,
n_inputfeats, # input dim
n_actions, # output dim
n_hiddens = [10], # hidden unit number list
nonlinear = F.tanh,
usebn = False,
outactive = F.softmax,
outscaler = None,
initializer = "orthogonal",
initializer_param = {"gain":np.sqrt(2), "last_gain": 0.1}
):
self.n_actions = n_actions
super(FCPG_Softmax, self).__init__(
n_inputfeats, # input dim
n_actions, # output dim
n_hiddens, # hidden unit number list
nonlinear,
usebn,
outactive,
outscaler,
initializer,
initializer_param=initializer_param,
)
def forward(self, x, other_data=None):
x = MLP.forward(self, x, other_data)
# for exploration, and similar to e-greedy
x = x + 0.01 / self.n_actions
x = x / torch.sum(x, dim = -1, keepdim=True).detach()
return x
class ConvPG_Softmax(Conv):
def __init__(self,
n_inputfeats, # input dim
n_actions, # output dim
k_sizes = [8, 4, 3],
channels = [8, 16, 16],
strides = [4, 2, 2],
fcs = [32, 32, 32], # hidden unit number list
nonlinear = F.relu,
usebn = False,
outactive = F.softmax,
outscaler = None,
initializer="xavier",
initializer_param={}
):
self.n_actions = n_actions
super(ConvPG_Softmax, self).__init__(
n_inputfeats, # input dim
n_actions, # output dim
k_sizes,
channels,
strides,
fcs,
nonlinear,
usebn,
outactive,
outscaler,
initializer,
initializer_param=initializer_param,
)
def forward(self, x, other_data=None):
x = Conv.forward(self, x, other_data)
# for exploration, and similar to e-greedy
x = x + 0.01 / self.n_actions
x = x / torch.sum(x, dim=-1, keepdim=True).detach()
return x
# TODO: support multi-layer value function in which action is concat before the final layer
class FCVALUE(MLP):
def __init__(self,
n_inputfeats,
n_hiddens = [30],
nonlinear = F.tanh,
usebn = False,
outactive = None,
outscaler = None,
initializer="orthogonal",
initializer_param={"gain":np.sqrt(2), "last_gain": 0.1}
):
super(FCVALUE, self).__init__(
n_inputfeats,
1,
n_hiddens,
nonlinear,
usebn,
outactive,
outscaler,
initializer,
initializer_param=initializer_param,
)
| 34.192857
| 91
| 0.48569
| 483
| 4,787
| 4.621118
| 0.248447
| 0.053763
| 0.032258
| 0.045699
| 0.694444
| 0.694444
| 0.653226
| 0.630376
| 0.59543
| 0.561828
| 0
| 0.01652
| 0.430959
| 4,787
| 139
| 92
| 34.438849
| 0.802863
| 0.091707
| 0
| 0.68254
| 0
| 0
| 0.017329
| 0
| 0
| 0
| 0
| 0.007194
| 0
| 1
| 0.063492
| false
| 0
| 0.055556
| 0
| 0.18254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538e7c69b579d9dbd9a344fd3df293fc4cfca562
| 10,057
|
py
|
Python
|
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
|
m4rkl1u/tensorflow
|
90a8825c7ae9719e8969d45040b4155b0e7de130
|
[
"Apache-2.0"
] | 2
|
2018-12-05T10:58:40.000Z
|
2019-01-24T11:36:01.000Z
|
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
|
m4rkl1u/tensorflow
|
90a8825c7ae9719e8969d45040b4155b0e7de130
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py
|
m4rkl1u/tensorflow
|
90a8825c7ae9719e8969d45040b4155b0e7de130
|
[
"Apache-2.0"
] | 2
|
2019-02-26T16:21:15.000Z
|
2020-12-04T17:48:17.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseTensorsMap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
# pylint: disable=protected-access
add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map
add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map
take_many_sparse_from_tensors_map = (
sparse_ops._take_many_sparse_from_tensors_map)
# pylint: enable=protected-access
class SparseTensorsMapTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def testAddTakeMany(self):
with self.session(graph=ops.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a")
self.assertEqual(handle0.get_shape(), ())
handles_concat = array_ops.stack([handle0, handle1])
sp_out = take_many_sparse_from_tensors_map(
sparse_map_op=handle0.op, sparse_handles=handles_concat)
combined_indices, combined_values, combined_shape = self.evaluate(sp_out)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testFeedAddTakeMany(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
sparse_handles = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=sparse_handles)
combined_indices, combined_values, combined_shape = self.evaluate(
sp_roundtrip)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testAddManyTakeManyRoundTrip(self):
with self.session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
handles = add_many_sparse_to_tensors_map(sparse_tensor)
roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handles.op, sparse_handles=handles)
handles_value, roundtrip_value = sess.run(
[handles, roundtrip],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(handles_value.shape, (4,))
self.assertAllEqual(roundtrip_value.indices, indices_value)
self.assertAllEqual(roundtrip_value.values, values_value)
self.assertAllEqual(roundtrip_value.dense_shape, shape_value)
def testDeserializeFailsInconsistentRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
handle_concat = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=handle_concat)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
self.evaluate(sp_roundtrip)
def testTakeManyFailsWrongInputOp(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
handle = add_sparse_to_tensors_map(input_val)
handle_value = self.evaluate(handle)
bad_handle = handle_value + 10
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle])
with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"):
self.evaluate(sp_roundtrip)
class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark):
def benchmarkVeryLarge2DFloatSparseTensor(self):
np.random.seed(127)
num_elements = 10000
batch_size = 64
indices_batch = np.random.randint(
batch_size, size=num_elements, dtype=np.int64)
indices_value = np.arange(num_elements, dtype=np.int64)
indices = np.asarray(
sorted(zip(indices_batch, indices_value)), dtype=np.int64)
values = ["feature_value_for_embedding_lookup"] * num_elements
shape = np.asarray([batch_size, num_elements], dtype=np.int64)
with session.Session(config=benchmark.benchmark_config()) as sess:
with ops.device("/cpu:0"):
indices = variables.Variable(indices)
values = variables.Variable(values)
shape = variables.Variable(shape)
st = sparse_tensor_lib.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
st_serialized = sparse_ops.serialize_many_sparse(st)
st_deserialized = sparse_ops.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
variables.global_variables_initializer().run()
st_roundtrip_values = self.evaluate(st_roundtrip)
st_deserialized_values = self.evaluate(st_deserialized)
np.testing.assert_equal(st_roundtrip_values.values,
st_deserialized_values.values)
np.testing.assert_equal(st_roundtrip_values.indices,
st_deserialized_values.indices)
np.testing.assert_equal(st_roundtrip_values.dense_shape,
st_deserialized_values.dense_shape)
self.run_op_benchmark(
sess,
st_roundtrip_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_tensor_maps")
self.run_op_benchmark(
sess,
st_deserialized_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_serialization")
if __name__ == "__main__":
test.main()
| 42.079498
| 80
| 0.704484
| 1,308
| 10,057
| 5.127676
| 0.17737
| 0.028329
| 0.054272
| 0.029521
| 0.521545
| 0.460713
| 0.382138
| 0.331892
| 0.331892
| 0.280602
| 0
| 0.031189
| 0.187034
| 10,057
| 238
| 81
| 42.256303
| 0.789139
| 0.083424
| 0
| 0.237288
| 0
| 0
| 0.031332
| 0.01349
| 0
| 0
| 0
| 0
| 0.135593
| 1
| 0.056497
| false
| 0
| 0.073446
| 0
| 0.163842
| 0.00565
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538f0d9adeec1b1a9f1d17d56827c035463ad1c5
| 1,412
|
py
|
Python
|
ceph/tests/conftest.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T13:00:14.000Z
|
2021-03-24T13:00:14.000Z
|
ceph/tests/conftest.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
ceph/tests/conftest.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import pytest
from datadog_checks.dev import docker_run
from datadog_checks.dev.conditions import CheckDockerLogs
from datadog_checks.dev.subprocess import run_command
from .common import BASIC_CONFIG, HERE
E2E_METADATA = {
'start_commands': [
'apt-get update',
'apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y docker.io',
],
'docker_volumes': ['/var/run/docker.sock:/var/run/docker.sock'],
}
@pytest.fixture(scope="session")
def dd_environment():
compose_file = os.path.join(HERE, 'compose', 'docker-compose.yaml')
# We need a custom condition to wait a bit longer
with docker_run(
compose_file=compose_file,
conditions=[
CheckDockerLogs(compose_file, 'spawning ceph --cluster ceph -w', wait=5),
CheckDockerLogs(compose_file, 'Running on http://0.0.0.0:5000/'),
],
):
# Clean the disk space warning
run_command(
['docker', 'exec', 'dd-test-ceph', 'ceph', 'tell', 'mon.*', 'injectargs', '--mon_data_avail_warn', '5']
)
# Wait a bit for the change to take effect
condition = CheckDockerLogs(compose_file, 'Cluster is now healthy')
condition()
yield BASIC_CONFIG, E2E_METADATA
| 32.837209
| 115
| 0.659348
| 183
| 1,412
| 4.961749
| 0.562842
| 0.072687
| 0.056167
| 0.066079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015288
| 0.212465
| 1,412
| 42
| 116
| 33.619048
| 0.801259
| 0.160057
| 0
| 0.068966
| 0
| 0.034483
| 0.312977
| 0.108567
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.206897
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538f4e290b42893ff7be5c3f3a19a555501eb1e6
| 3,025
|
py
|
Python
|
federation/hostmeta/fetchers.py
|
weex/federation
|
01357aacb04b076442ce5f803a0fc65df5a74d09
|
[
"BSD-3-Clause"
] | 93
|
2016-11-26T10:52:13.000Z
|
2022-01-15T20:07:35.000Z
|
federation/hostmeta/fetchers.py
|
weex/federation
|
01357aacb04b076442ce5f803a0fc65df5a74d09
|
[
"BSD-3-Clause"
] | 75
|
2016-10-18T10:15:44.000Z
|
2019-10-05T22:16:32.000Z
|
federation/hostmeta/fetchers.py
|
weex/federation
|
01357aacb04b076442ce5f803a0fc65df5a74d09
|
[
"BSD-3-Clause"
] | 9
|
2017-04-08T08:03:45.000Z
|
2021-09-13T22:00:48.000Z
|
import json
from typing import Dict, Optional
import requests
from federation.hostmeta.parsers import (
parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, parse_mastodon_document,
parse_matrix_document, parse_misskey_document)
from federation.utils.network import fetch_document
HIGHEST_SUPPORTED_NODEINFO_VERSION = 2.1
def fetch_mastodon_document(host):
doc, status_code, error = fetch_document(host=host, path='/api/v1/instance')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_mastodon_document(doc, host)
def fetch_matrix_document(host: str) -> Optional[Dict]:
doc, status_code, error = fetch_document(host=host, path='/_matrix/federation/v1/version')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_matrix_document(doc, host)
def fetch_misskey_document(host: str, mastodon_document: Dict=None) -> Optional[Dict]:
try:
response = requests.post(f'https://{host}/api/meta') # ¯\_(ツ)_/¯
except Exception:
return
try:
doc = response.json()
except json.JSONDecodeError:
return
if response.status_code == 200:
return parse_misskey_document(doc, host, mastodon_document=mastodon_document)
def fetch_nodeinfo_document(host):
doc, status_code, error = fetch_document(host=host, path='/.well-known/nodeinfo')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
url, highest_version = '', 0.0
if doc.get('0'):
# Buggy NodeInfo from certain old Hubzilla versions
url = doc.get('0', {}).get('href')
elif isinstance(doc.get('links'), dict):
# Another buggy NodeInfo from certain old Hubzilla versions
url = doc.get('links').get('href')
else:
for link in doc.get('links'):
version = float(link.get('rel').split('/')[-1])
if highest_version < version <= HIGHEST_SUPPORTED_NODEINFO_VERSION:
url, highest_version = link.get('href'), version
if not url:
return
doc, status_code, error = fetch_document(url=url)
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_nodeinfo_document(doc, host)
def fetch_nodeinfo2_document(host):
doc, status_code, error = fetch_document(host=host, path='/.well-known/x-nodeinfo2')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_nodeinfo2_document(doc, host)
def fetch_statisticsjson_document(host):
doc, status_code, error = fetch_document(host=host, path='/statistics.json')
if not doc:
return
try:
doc = json.loads(doc)
except json.JSONDecodeError:
return
return parse_statisticsjson_document(doc, host)
| 28.809524
| 110
| 0.668099
| 375
| 3,025
| 5.221333
| 0.208
| 0.067416
| 0.042901
| 0.110827
| 0.480592
| 0.433606
| 0.417773
| 0.417773
| 0.417773
| 0.395812
| 0
| 0.006879
| 0.231074
| 3,025
| 104
| 111
| 29.086538
| 0.83405
| 0.038678
| 0
| 0.506024
| 0
| 0
| 0.056129
| 0.025826
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072289
| false
| 0
| 0.060241
| 0
| 0.325301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538fd4b4cff424f1346a608bba50033518ef9ea5
| 2,582
|
py
|
Python
|
features/analysis_features.py
|
iag0g0mes/t2_fis_driving_style
|
7f62ac3e67e65e7bd1273a2f845eb05820e95b70
|
[
"Apache-2.0"
] | 5
|
2021-04-20T16:03:37.000Z
|
2022-03-11T00:13:11.000Z
|
features/analysis_features.py
|
iag0g0mes/t2_fis_driving_style
|
7f62ac3e67e65e7bd1273a2f845eb05820e95b70
|
[
"Apache-2.0"
] | 1
|
2021-04-21T02:35:38.000Z
|
2021-04-21T12:54:14.000Z
|
features/analysis_features.py
|
iag0g0mes/t2fis_driving_style
|
7f62ac3e67e65e7bd1273a2f845eb05820e95b70
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from typing import Any, Dict, List, Tuple, NoReturn
import argparse
import os
def parse_arguments() -> Any:
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default="",
type=str,
help="Directory where the features (npy files) are saved",
)
parser.add_argument("--mode",
required=True,
type=str,
help="train/val/test/sample",
choices=['train', 'test', 'val','sample'])
parser.add_argument("--obs_len",
default=2,
type=int,
help="Observed length of the trajectory in seconds",
choices=[1,2,3,4,5])
parser.add_argument("--filter",
default='ekf',
type=str,
help="Filter to process the data noise. (ekf/none/ekf-savgol/savgol",
choices=['ekf', 'none', 'ekf-savgol', 'savgol'])
return parser.parse_args()
def stats(traj:np.ndarray) -> NoReturn:
#central tendency : mean
#dispersion : std
#bounds : min max
#quantile : 0.25, 0.5, 0.75
labels = ['mean_v', 'mean_acc', 'mean_deac', 'std_jy']
for i, l in zip(range(0, traj.shape[1]), labels):
t = traj[:, i]
_mean = round(np.mean(t),2)
_std = round(np.std(t),2)
_min = round(np.min(t),2)
_max = round(np.max(t),2)
_q25 = round(np.quantile(t, 0.25),2)
_q50 = round(np.quantile(t, 0.5),2)
_q75 = round(np.quantile(t, 0.75),2)
print (f'Feature: {l}')
print ('\tmean:{} | std:{} | min:{} | max:{} | q25:{} | q50:{} | q75:{}'.format(_mean,
_std, _min, _max, _q25, _q50, _q75))
if __name__== '__main__':
#_filters = ['none', 'ekf', 'savgol', 'ekf-savgol']
#_modes = ['train', 'val', 'test', 'sample']
#_obs_len = [2,5]
#seg = _obs_len[0]
#mode = _modes[3]
#filter_name = _filters[0]
args = parse_arguments()
if args.mode == 'test':
args.obs_len = 2
assert os.path.exists(args.data_dir),\
f'[Analysis][main][ERROR] data_dir not found!({args.data_dir})'
data_file = 'features_{}_{}s_{}.npy'.format(args.mode,
args.obs_len,
args.filter)
assert os.path.exists(os.path.join(args.data_dir, data_file)),\
f'[Analysis][main][ERROR] data_file not found!({data_file})'
print ('[Analysis] loading dataset....')
# (m, 4)
# [mean_v, mean_acc, mean_deac, std_jy]
data = np.load(os.path.join(args.data_dir,data_file))
print ('[Analysis] mode:{} | filter:{} | obs_len:{}'.format(args.mode,
args.filter,
args.obs_len))
print ('[Analysis] data shape:{}'.format(data.shape))
print ('[Analysis] stats:')
stats(data)
| 23.907407
| 88
| 0.606119
| 371
| 2,582
| 4.043127
| 0.331536
| 0.028
| 0.045333
| 0.032
| 0.201333
| 0.096
| 0.072
| 0.072
| 0
| 0
| 0
| 0.027171
| 0.201782
| 2,582
| 107
| 89
| 24.130841
| 0.700631
| 0.134392
| 0
| 0.04918
| 0
| 0.016393
| 0.281066
| 0.062811
| 0
| 0
| 0
| 0
| 0.032787
| 1
| 0.032787
| false
| 0
| 0.065574
| 0
| 0.114754
| 0.098361
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
538fed081c6f7c33b40d25f1c7cac9cd82761148
| 2,916
|
py
|
Python
|
python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
python-watcher-2.0.0/watcher/tests/notifications/test_service_notifications.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2017 Servionica
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import freezegun
import mock
import oslo_messaging as om
from watcher.common import rpc
from watcher import notifications
from watcher.objects import service as w_service
from watcher.tests.db import base
from watcher.tests.objects import utils
@freezegun.freeze_time('2016-10-18T09:52:05.219414')
class TestActionPlanNotification(base.DbTestCase):
def setUp(self):
super(TestActionPlanNotification, self).setUp()
p_get_notifier = mock.patch.object(rpc, 'get_notifier')
m_get_notifier = p_get_notifier.start()
self.addCleanup(p_get_notifier.stop)
self.m_notifier = mock.Mock(spec=om.Notifier)
def fake_get_notifier(publisher_id):
self.m_notifier.publisher_id = publisher_id
return self.m_notifier
m_get_notifier.side_effect = fake_get_notifier
def test_service_failed(self):
service = utils.get_test_service(mock.Mock(),
created_at=datetime.datetime.utcnow())
state = w_service.ServiceStatus.FAILED
notifications.service.send_service_update(mock.MagicMock(),
service,
state,
host='node0')
notification = self.m_notifier.warning.call_args[1]
payload = notification['payload']
self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id)
self.assertDictEqual({
'watcher_object.data': {
'last_seen_up': '2016-09-22T08:32:06Z',
'name': 'watcher-service',
'sevice_host': 'controller',
'status_update': {
'watcher_object.data': {
'old_state': 'ACTIVE',
'state': 'FAILED'
},
'watcher_object.name': 'ServiceStatusUpdatePayload',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.0'
}
},
'watcher_object.name': 'ServiceUpdatePayload',
'watcher_object.namespace': 'watcher',
'watcher_object.version': '1.0'
},
payload
)
| 37.384615
| 79
| 0.607339
| 318
| 2,916
| 5.41195
| 0.484277
| 0.051133
| 0.037769
| 0.018594
| 0.087159
| 0.059268
| 0.059268
| 0.059268
| 0.059268
| 0
| 0
| 0.024558
| 0.301783
| 2,916
| 77
| 80
| 37.87013
| 0.820727
| 0.196845
| 0
| 0.113208
| 0
| 0
| 0.177128
| 0.061909
| 0
| 0
| 0
| 0
| 0.037736
| 1
| 0.056604
| false
| 0
| 0.169811
| 0
| 0.264151
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
539267e2204960bd72eacaf1dd33c30f2edce8d2
| 1,270
|
py
|
Python
|
dca_models/deform_offsets_module.py
|
vatsalag99/Deformable-Channel-Attention
|
d904135fd7be45331a16d9cb84e44f8e1ff5c07e
|
[
"MIT"
] | 1
|
2020-12-01T20:57:09.000Z
|
2020-12-01T20:57:09.000Z
|
dca_models/deform_offsets_module.py
|
vatsalag99/Deformable-Channel-Attention
|
d904135fd7be45331a16d9cb84e44f8e1ff5c07e
|
[
"MIT"
] | null | null | null |
dca_models/deform_offsets_module.py
|
vatsalag99/Deformable-Channel-Attention
|
d904135fd7be45331a16d9cb84e44f8e1ff5c07e
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch.nn.parameter import Parameter
from einops import rearrange, reduce, repeat
class dca_offsets_layer(nn.Module):
"""Constructs a Offset Generation module.
"""
def __init__(self, channel, n_offsets):
super(dca_offsets_layer, self).__init__()
self.channel = channel
self.n_offsets = n_offsets
def covariance_features(self, x):
"""
Takes in a feature map and returns the unnormalized covariance matrix
"""
m_batchsize, C, height, width = x.size()
x = x - x.mean(dim=1, keepdim=True) / (x.std(dim=1, keepdim=True) + 1e-5)
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
return energy
def forward(self, x):
m_batchsize, C, height, width = x.size()
cov_matrix = self.covariance_features(x).reshape(m_batchsize, C, 1, C)
_, locations = torch.topk(cov_matrix, self.n_offsets, dim=1)
delta = torch.stack(self.n_offsets*[torch.arange(0, self.channel)], dim=0)
delta = torch.stack(m_batchsize * [delta], dim=0)
offsets = locations.squeeze() - delta.cuda()
return offsets
| 35.277778
| 82
| 0.640157
| 178
| 1,270
| 4.38764
| 0.393258
| 0.076825
| 0.070423
| 0.046095
| 0.112676
| 0.112676
| 0.069142
| 0
| 0
| 0
| 0
| 0.014508
| 0.240157
| 1,270
| 35
| 83
| 36.285714
| 0.794819
| 0.088976
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.166667
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53990709c9653095e01a4f58d04ac79451da6d42
| 3,921
|
py
|
Python
|
src/syft/lib/__init__.py
|
godormad/PySyft
|
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
|
[
"Apache-2.0"
] | null | null | null |
src/syft/lib/__init__.py
|
godormad/PySyft
|
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
|
[
"Apache-2.0"
] | null | null | null |
src/syft/lib/__init__.py
|
godormad/PySyft
|
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
|
[
"Apache-2.0"
] | null | null | null |
# stdlib
import importlib
import sys
from typing import Any
from typing import Any as TypeAny
from typing import Dict as TypeDict
from typing import Optional
# third party
from packaging import version
# syft relative
from ..ast.globals import Globals
from ..lib.python import create_python_ast
from ..lib.torch import create_torch_ast
from ..lib.torchvision import create_torchvision_ast
from ..logger import critical
from ..logger import traceback_and_raise
from .misc import create_union_ast
class VendorLibraryImportException(Exception):
pass
def vendor_requirements_available(vendor_requirements: TypeDict[str, TypeAny]) -> bool:
# see if python version is supported
if "python" in vendor_requirements:
python_reqs = vendor_requirements["python"]
PYTHON_VERSION = sys.version_info
min_version = python_reqs.get("min_version", None)
if min_version is not None:
if PYTHON_VERSION < min_version:
traceback_and_raise(
VendorLibraryImportException(
f"Unable to load {vendor_requirements['lib']}."
+ f"Python: {PYTHON_VERSION} < {min_version}"
)
)
# see if torch version is supported
if "torch" in vendor_requirements:
torch_reqs = vendor_requirements["torch"]
# third party
import torch
TORCH_VERSION = version.parse(torch.__version__.split("+")[0])
min_version = torch_reqs.get("min_version", None)
if min_version is not None:
if TORCH_VERSION < version.parse(min_version):
traceback_and_raise(
VendorLibraryImportException(
f"Unable to load {vendor_requirements['lib']}."
+ f"Torch: {TORCH_VERSION} < {min_version}"
)
)
return True
def load_lib(lib: str, options: TypeDict[str, TypeAny] = {}) -> None:
try:
_ = importlib.import_module(lib)
vendor_ast = importlib.import_module(f"syft.lib.{lib}")
PACKAGE_SUPPORT = getattr(vendor_ast, "PACKAGE_SUPPORT", None)
PACKAGE_SUPPORT.update(options)
if PACKAGE_SUPPORT is not None and vendor_requirements_available(
vendor_requirements=PACKAGE_SUPPORT
):
update_ast = getattr(vendor_ast, "update_ast", None)
if update_ast is not None:
global lib_ast
update_ast(ast_or_client=lib_ast)
for _, client in lib_ast.registered_clients.items():
update_ast(ast_or_client=client)
# cache the constructor for future created clients
lib_ast.loaded_lib_constructors[lib] = update_ast
except VendorLibraryImportException as e:
critical(e)
except Exception as e:
critical(f"Unable to load package support for: {lib}. {e}")
# now we need to load the relevant frameworks onto the node
def create_lib_ast(client: Optional[Any] = None) -> Globals:
python_ast = create_python_ast(client=client)
torch_ast = create_torch_ast(client=client)
torchvision_ast = create_torchvision_ast(client=client)
# numpy_ast = create_numpy_ast()
lib_ast = Globals(client=client)
lib_ast.add_attr(attr_name="syft", attr=python_ast.attrs["syft"])
lib_ast.add_attr(attr_name="torch", attr=torch_ast.attrs["torch"])
lib_ast.add_attr(attr_name="torchvision", attr=torchvision_ast.attrs["torchvision"])
# let the misc creation be always the last, as it needs the full ast solved
# to properly generated unions
union_misc_ast = getattr(getattr(create_union_ast(lib_ast, client), "syft"), "lib")
misc_root = getattr(getattr(lib_ast, "syft"), "lib")
misc_root.add_attr(attr_name="misc", attr=union_misc_ast.attrs["misc"])
return lib_ast
lib_ast = create_lib_ast(None)
| 35.972477
| 88
| 0.665902
| 486
| 3,921
| 5.117284
| 0.226337
| 0.033776
| 0.025734
| 0.024125
| 0.185364
| 0.133092
| 0.10776
| 0.10776
| 0.10776
| 0.10776
| 0
| 0.00034
| 0.250446
| 3,921
| 108
| 89
| 36.305556
| 0.845866
| 0.090283
| 0
| 0.105263
| 0
| 0
| 0.100703
| 0.016315
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039474
| false
| 0.013158
| 0.276316
| 0
| 0.355263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5399748c26ec62ec3b268e3e29283c1ccc28b398
| 8,742
|
py
|
Python
|
scripts/griffin_GC_counts.py
|
GavinHaLab/Griffin
|
83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-09-08T05:43:15.000Z
|
2021-09-08T05:43:15.000Z
|
scripts/griffin_GC_counts.py
|
GavinHaLab/Griffin
|
83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
scripts/griffin_GC_counts.py
|
GavinHaLab/Griffin
|
83942189c0e3e62ac533d6b6a5ffd7d2dfd2d4b3
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pysam
import os
import pandas as pd
import numpy as np
import time
import argparse
import sys
from multiprocessing import Pool
# In[ ]:
# ##arguments for testing
# bam_file_path = '/fh/scratch/delete90/ha_g/realigned_bams/cfDNA_MBC_ULP_hg38/realign_bam_paired_snakemake-master/results/MBC_1041_1_ULP/MBC_1041_1_ULP_recalibrated.bam'
# bam_file_name = 'MBC_1041_1_ULP'
# mapable_path = '../../downloads/genome/repeat_masker.mapable.k50.Umap.hg38.bedGraph'
# ref_seq_path = '/fh/fast/ha_g/grp/reference/GRCh38/GRCh38.fa'
# chrom_sizes_path = '/fh/fast/ha_g/grp/reference/GRCh38/hg38.standard.chrom.sizes'
# out_dir = './tmp/'
# map_q = 20
# size_range = [15,500]
# CPU = 4
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('--bam_file', help='sample_bam_file', required=True)
parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True)
parser.add_argument('--mapable_regions', help='highly mapable regions to be used in GC correction, bedGraph or bed foramt', required=True)
parser.add_argument('--ref_seq',help='reference sequence (fasta format)',required=True)
parser.add_argument('--chrom_sizes',help='path to chromosome sizes for the reference seq',required=True)
parser.add_argument('--out_dir',help='folder for GC bias results',required=True)
parser.add_argument('--map_q',help='minimum mapping quality for reads to be considered',type=int,required=True)
parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True)
parser.add_argument('--CPU',help='number of CPU for parallelizing', type=int, required=True)
args = parser.parse_args()
bam_file_path = args.bam_file
bam_file_name = args.bam_file_name
mapable_path=args.mapable_regions
ref_seq_path = args.ref_seq
chrom_sizes_path = args.chrom_sizes
out_dir = args.out_dir
map_q = args.map_q
size_range = args.size_range
CPU = args.CPU
# In[ ]:
print('arguments provided:')
print('\tbam_file_path = "'+bam_file_path+'"')
print('\tbam_file_name = "'+bam_file_name+'"')
print('\tmapable_regions = "'+mapable_path+'"')
print('\tref_seq_path = "'+ref_seq_path+'"')
print('\tchrom_sizes_path = "'+chrom_sizes_path+'"')
print('\tout_dir = "'+out_dir+'"')
print('\tmap_q = '+str(map_q))
print('\tsize_range = '+str(size_range))
print('\tCPU = '+str(CPU))
# In[ ]:
mapable_name = mapable_path.rsplit('/',1)[1].rsplit('.',1)[0]
out_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt'
print('out_file',out_file)
# In[ ]:
#create a directory for the GC data
if not os.path.exists(out_dir +'/'+mapable_name):
os.mkdir(out_dir +'/'+mapable_name)
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_counts/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_counts/')
# In[ ]:
#import filter
mapable_intervals = pd.read_csv(mapable_path, sep='\t', header=None)
#remove non standard chromosomes and X and Y
chroms = ['chr'+str(m) for m in range(1,23)]
mapable_intervals = mapable_intervals[mapable_intervals[0].isin(chroms)]
print('chroms:', chroms)
print('number_of_intervals:',len(mapable_intervals))
sys.stdout.flush()
# In[ ]:
def collect_reads(sublist):
#create a dict for holding the frequency of each read length and GC content
GC_dict = {}
for length in range(size_range[0],size_range[1]+1):
GC_dict[length]={}
for num_GC in range(0,length+1):
GC_dict[length][num_GC]=0
#import the bam file
#this needs to be done within the loop otherwise it gives a truncated file warning
bam_file = pysam.AlignmentFile(bam_file_path, "rb")
print('sublist intervals:',len(sublist))
#this might also need to be in the loop
#import the ref_seq
ref_seq=pysam.FastaFile(ref_seq_path)
for i in range(len(sublist)):
chrom = sublist.iloc[i][0]
start = sublist.iloc[i][1]
end = sublist.iloc[i][2]
if i%5000==0:
print('interval',i,':',chrom,start,end,'seconds:',np.round(time.time()-start_time))
sys.stdout.flush()
#fetch any read that overlaps the inteterval (don't need to extend the interval because the fetch function does this automatically)
fetched = bam_file.fetch(chrom,start,end)
for read in fetched:
#use both fw (positive template length) and rv (negative template length) reads
if (read.is_reverse==False and read.template_length>=size_range[0] and read.template_length<=size_range[1]) or (read.is_reverse==True and -read.template_length>=size_range[0] and -read.template_length<=size_range[1]):
#qc filters, some longer fragments are considered 'improper pairs' but I would like to keep these
if read.is_paired==True and read.mapping_quality>=map_q and read.is_duplicate==False and read.is_qcfail==False:
if read.is_reverse==False:
read_start = read.reference_start
read_end = read.reference_start+read.template_length
elif read.is_reverse==True:
read_end = read.reference_start + read.reference_length
read_start = read_end + read.template_length
fragment_seq = ref_seq.fetch(read.reference_name,read_start,read_end)
#tally up the GC content
fragment_seq=fragment_seq.replace('g','G').replace('c','C').replace('a','A').replace('t','T').replace('n','N')
# #################
# ##logic check####
# #################
# if read.is_reverse==False:
# if fragment_seq[0:read.reference_length]==read.query_sequence and len(fragment_seq)==read.template_length:
# print('fw match',read.reference_length)
# else:
# print(fragment_seq[0:read.reference_length],read.reference_length,'fw')
# print(read.query_sequence,len(read.query_sequence),'fw')
# print(len(fragment_seq),read.template_length)
# print('\n')
# elif read.is_reverse==True:
# if fragment_seq[-read.reference_length:]==read.query_sequence and len(fragment_seq)==-read.template_length:
# print('rv match',read.reference_length)
# else:
# print(fragment_seq[-read.reference_length:],read.reference_length,'rv')
# print(read.query_sequence,len(read.query_sequence),'rv')
# print(len(fragment_seq),read.template_length)
# print('\n')
# #################
#split and convert to numpy array
fragment_seq = np.array(list(fragment_seq))
#replace with values
fragment_seq[(fragment_seq=='G') | (fragment_seq=='C')]=1
fragment_seq[(fragment_seq=='A') | (fragment_seq=='T')]=0
fragment_seq[(fragment_seq=='N')]=np.random.randint(2) #choose a random 0 or 1 for N (so that you always get an integer) #should be very rare if the filter is done right
fragment_seq = fragment_seq.astype(int)
num_GC = int(fragment_seq.sum())
GC_dict[abs(read.template_length)][num_GC]+=1
print('done')
return(GC_dict)
# In[ ]:
start_time = time.time()
p = Pool(processes=CPU) #use the available CPU
sublists = np.array_split(mapable_intervals,CPU) #split the list into sublists, one per CPU
GC_dict_list = p.map(collect_reads, sublists, 1)
# In[ ]:
all_GC_df = pd.DataFrame()
for i,GC_dict in enumerate(GC_dict_list):
GC_df = pd.DataFrame()
for length in GC_dict.keys():
current = pd.Series(GC_dict[length]).reset_index()
current = current.rename(columns={'index':'num_GC',0:'number_of_fragments'})
current['length']=length
current = current[['length','num_GC','number_of_fragments']]
GC_df = GC_df.append(current, ignore_index=True)
GC_df = GC_df.set_index(['length','num_GC'])
all_GC_df[i] = GC_df['number_of_fragments']
del(GC_df,GC_dict)
all_GC_df = all_GC_df.sum(axis=1)
all_GC_df = pd.DataFrame(all_GC_df).rename(columns = {0:'number_of_fragments'})
all_GC_df = all_GC_df.reset_index()
all_GC_df.to_csv(out_file,sep='\t',index=False)
# In[ ]:
print('done')
# In[ ]:
# In[ ]:
# In[ ]:
| 33.366412
| 241
| 0.636811
| 1,220
| 8,742
| 4.332787
| 0.240164
| 0.049943
| 0.037457
| 0.031782
| 0.260121
| 0.19126
| 0.146046
| 0.125615
| 0.081347
| 0.052213
| 0
| 0.011662
| 0.22512
| 8,742
| 261
| 242
| 33.494253
| 0.768674
| 0.309883
| 0
| 0.036697
| 0
| 0
| 0.152726
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009174
| false
| 0
| 0.073395
| 0
| 0.082569
| 0.155963
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5399b6c7047b5726e42c8b72d0dc40c3dfb01acf
| 4,372
|
py
|
Python
|
task2/04-task2-upload-dim-tables.py
|
canovasjm/InterviewProject_JuanCanovas
|
6ff385c66664328cea0678454560e89e44851e24
|
[
"MIT"
] | null | null | null |
task2/04-task2-upload-dim-tables.py
|
canovasjm/InterviewProject_JuanCanovas
|
6ff385c66664328cea0678454560e89e44851e24
|
[
"MIT"
] | null | null | null |
task2/04-task2-upload-dim-tables.py
|
canovasjm/InterviewProject_JuanCanovas
|
6ff385c66664328cea0678454560e89e44851e24
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 1 18:17:07 2021
@author: jm
"""
# %% required libraries
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
# %% connect to DB
# create connection using pymssql
engine = create_engine('mssql+pymssql://sa:<YourStrong@Passw0rd>@localhost:1433/rga')
connection = engine.connect()
# %% read data sets from where I will build the dimension tables
# read employee roster data
employee_roster = pd.read_excel("datasources/Employee_Roster_Data.xlsx", sheet_name = 'Sheet1')
# read skills data
skills = pd.read_excel("datasources/skills.xlsx", sheet_name = "Sheet1")
# read hours data
hours = pd.read_excel("datasources/hours.xlsx", sheet_name = "Sheet1")
# %% dimensions created from source employee_roster
# %% create DIM_Currency
# get unique values
currencies = sorted(employee_roster['Currency'].unique())
# create a data frame
DIM_Currency = pd.DataFrame({'id_currency': (np.arange(len(currencies)) + 1), 'currency': currencies})
# send data frame to DB
DIM_Currency.to_sql('DIM_Currency', con = connection, if_exists = 'append', index = False)
# %% create DIM_Department
# get unique values
departments = sorted(pd.concat([employee_roster['Department'], skills['Department']], axis = 0).unique())
# create a data frame
DIM_Department = pd.DataFrame({'id_department': (np.arange(len(departments)) + 1), 'department': departments})
# send data frame to DB
DIM_Department.to_sql('DIM_Department', con = connection, if_exists = 'append', index = False)
# %% create DIM_Gender
# get unique values
genders = sorted(pd.concat([employee_roster['Gender'], skills['Gender']], axis = 0).unique())
# create a data frame
DIM_Gender = pd.DataFrame({'id_gender': (np.arange(len(genders)) + 1), 'gender': genders})
# send data frame to DB
DIM_Gender.to_sql('DIM_Gender', con = connection, if_exists = 'append', index = False)
# %% create DIM_User
# check if 'UserId' values in 'skills' are in 'User_ID' in 'employee_roster'
# we get 20134 'True' values, meaning that all 'UserId' in 'skills' are already
# in 'User_ID' in employee_roster
users_check_1 = np.isin(skills['UserId'], employee_roster['User_ID']).sum()
# check if 'UserId' values in 'hours' are in 'User_ID' in 'employee_roster'
# we get 7659 'True' values, meaning that NOT all 'UserId' in 'hours' are already
# in 'User_ID' in employee_roster
users_check_2 = np.isin(hours['UserId'], employee_roster['User_ID']).sum()
# get unique values
users = sorted(pd.concat([employee_roster['User_ID'], skills['UserId'], hours['UserId']], axis = 0).unique())
# create a data frame to use pd.merge()
df_users = pd.DataFrame({'User_ID': users})
# left join 'df_user' with 'employee_roster' on 'UserID'
users_final = pd.merge(df_users, employee_roster, on = 'User_ID', how ='left')
# select only columns I need
users_final = users_final[['User_ID', 'Email_ID', 'Fullname']]
# rename columns
users_final.rename(columns = {'User_ID': 'id_user', 'Email_ID': 'id_email', 'Fullname': 'fullname'}, inplace = True)
# send data frame to DB
users_final.to_sql('DIM_User', con = connection, if_exists = 'append', index = False)
# %% dimensions created from source skills
# %% create DIM_AttributeGroup
# get unique values
att_group = sorted(skills['Attribute Group'].unique())
# create a data frame
DIM_AttributeGroup = pd.DataFrame({'id_att_group': (np.arange(len(att_group)) + 1), 'attribute_group': att_group})
# send data frame to DB
DIM_AttributeGroup.to_sql('DIM_AttributeGroup', con = connection, if_exists = 'append', index = False)
# %% create DIM_AttributeSubGroup
# get unique values
att_sub_group = sorted(skills['Attribute Sub-Group'].unique())
# create a data frame
DIM_AttributeSubGroup = pd.DataFrame({'id_att_sub_group': (np.arange(len(att_sub_group)) + 1), 'attribute_sub_group': att_sub_group})
# send data frame to DB
DIM_AttributeSubGroup.to_sql('DIM_AttributeSubGroup', con = connection, if_exists = 'append', index = False)
# %% create DIM_AttributeName
# get unique values
att_name = sorted(skills['Attribute Name'].unique())
# create a data frame
DIM_AttributeName = pd.DataFrame({'id_att_name': (np.arange(len(att_name)) + 1), 'attribute_name': att_name})
# send data frame to DB
DIM_AttributeName.to_sql('DIM_AttributeName', con = connection, if_exists = 'append', index = False)
| 34.698413
| 133
| 0.730101
| 631
| 4,372
| 4.873217
| 0.212361
| 0.072846
| 0.028618
| 0.038699
| 0.347317
| 0.273496
| 0.212358
| 0.14374
| 0.124228
| 0.028618
| 0
| 0.010772
| 0.12946
| 4,372
| 125
| 134
| 34.976
| 0.797162
| 0.335087
| 0
| 0
| 0
| 0
| 0.23317
| 0.056802
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.029412
| 0.088235
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
539a58166d003e0486119a3a4445a376e8149b19
| 6,897
|
py
|
Python
|
cogs/server.py
|
vikasbaghel1001/Kanna-Chan
|
6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb
|
[
"MIT"
] | 5
|
2021-10-17T07:29:42.000Z
|
2022-03-23T11:01:58.000Z
|
cogs/server.py
|
vikasbaghel1001/Kanna-Chan
|
6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb
|
[
"MIT"
] | 1
|
2021-10-17T08:14:09.000Z
|
2021-10-17T08:14:09.000Z
|
cogs/server.py
|
vikasbaghel1001/Kanna-Chan
|
6f74978cb73b66cdb0952351a7e84a9e4ef4ebeb
|
[
"MIT"
] | 4
|
2021-07-12T04:20:22.000Z
|
2021-10-01T03:29:50.000Z
|
import discord
from discord.ext import commands
arrow = "<a:right:877425183839891496>"
kwee = "<:kannawee:877036162122924072>"
kdance = "<a:kanna_dance:877038778798207016>"
kbored = "<:kanna_bored:877036162827583538>"
ksmug = "<:kanna_smug:877038777896427560>"
heart = "<a:explosion_heart:877426228775227392>"
class Server(commands.Cog):
def __init__(self, client):
self.client = client
self.kana_id = 857835279259664403
@commands.command()
@commands.is_owner()
async def sabout(self, ctx):
kana = self.client.get_user(self.kana_id)
about_file = discord.File("./images/about_server.png")
await ctx.send(file = about_file)
emb = discord.Embed(title=f"{kdance} ABOUT SERVER {kdance}",description = f"{arrow} **DRAGON LOLI'S HOME** is the official Server of the bot **Kanna Chan**. It's a friendly community meant for having fun, chilling and spending time with others.\n{arrow} This server has cute emotes and a lot of fun events are about to be done here! So, stay tuned!", color=0xfc74c6)
emb.add_field(
name=f"{kwee} __ROLES__",
value=f"{arrow} <@&876800883441156138> The highest role supposed to be only for Kanna Chan.\n{arrow} <@&876817811396263946> Admins of the Server and have the highest power and authority after owner.\n{arrow} <@&876818242058997791> Moderators of the server meant to moderate the chat and maintain a positive environment in community.\n{arrow} <@&876801038420701196> Developer(s) of Kanna Chan have this role.\n{arrow} <@&876804164661944340> All other users who join this server get this role by default. They have image and embed perms by deault.\n{arrow} **PS: APART FROM THESE SELF-ROLES ARE ALSO AVAIALBLE FOR MEMBERS.**",
inline=False
)
emb.add_field(
name=f"{ksmug} __CHANNELS__",
value=f"{arrow} <#877030933847490691> Read the rules here.\n{arrow} <#877031867440832574> Channel for grabbing self-roles.\n{arrow} <#876798564704084011> The general chat for the server.\n{arrow} <#876798809819189249> Bot Commands should be executed here.\n{arrow} <#876798696078065694> You can give suggestions for improving Kanna Chan here.\n{arrow} <#876798720254029864> You can report BUGS here if you find any in Kanna Chan.\n{arrow} <#876798750876651530> For any other support or query use this channel.\n{arrow} **P.S: YOU CAN PING ANY STAFF MEMBER OR DEVELOPER WHILE REPORTING BUG OR IN CASE OF ANY QUERY.**",
inline=False
)
emb.set_footer(
text="Kanna Chan",
icon_url=kana.avatar_url
)
await ctx.send(embed=emb)
@commands.command()
@commands.is_owner()
async def rule(self, ctx):
kana = self.client.get_user(self.kana_id)
rule_file = discord.File("./images/rules.png")
await ctx.send(file=rule_file)
emb = discord.Embed(title=f"{kbored} RULES {kbored}", color=0xfc74c6)
emb.add_field(
name=f"{heart} **Be respectful**",
value=f"You must respect all users, regardless of your liking towards them. Treat others the way you want to be treated.",
inline=False
)
emb.add_field(
name=f"{heart} **No Inappropriate Language**",
value=f"{arrow} The use of profanity should be kept to a minimum. However, any derogatory language towards any user is prohibited.",
inline=False
)
emb.add_field(
name=f"{heart} **No spamming**",
value=f"{arrow} Don't send a lot of small messages right after each other. Do not disrupt chat by spamming.",
inline=False
)
emb.add_field(
name=f"{heart} **No pornographic/adult/other NSFW material**",
value=f"{arrow} This is a community server and not meant to share this kind of material.",
inline=False
)
emb.add_field(
name=f"{heart} **No advertisements**",
value=f"{arrow} We do not tolerate any kind of advertisements, whether it be for other communities or streams. You can post your content in the media channel if it is relevant and provides actual value (Video/Art)",
inline=False
)
emb.add_field(
name=f"{heart} **No offensive names and profile pictures**",
value=f"{arrow} You will be asked to change your name or picture if the staff deems them inappropriate.",
inline=False
)
emb.add_field(
name=f"{heart} **Server Raiding**",
value=f"{arrow} Raiding or mentions of raiding are not allowed.",
inline=False
)
emb.add_field(
name=f"{heart} **Direct & Indirect Threats**",
value=f"{arrow} Threats to other users of DDoS, Death, DoX, abuse, and other malicious threats are absolutely prohibited and disallowed.",
inline=False
)
emb.add_field(
name=f"{heart} **Follow the Discord Community Guidelines**",
value=f"{arrow} You can find them here: https://discordapp.com/guidelines",
inline=False
)
emb.add_field(
name=f"{heart} **VOICE CHANNELS**",
value=f"{arrow} Do not join voice chat channels without permission of the people already in there.",
inline=False
)
emb.add_field(
name=f"{heart} **DECISIONS AND ISSUES**",
value = f"{arrow} ***The Admins and Mods will Mute/Kick/Ban per discretion. If you feel mistreated DM an Admin and we will resolve the issue.***",
inline=False
)
emb.add_field(
name=f"{heart} **CHANGES**",
value = f"{arrow} ***Your presence in this server implies accepting these rules, including all further changes. These changes might be done at any time without notice, it is your responsibility to check for them.***",
inline=False
)
emb.set_footer(
text="Kanna Chan",
icon_url=kana.avatar_url
)
await ctx.send(embed=emb)
@commands.Cog.listener()
async def on_member_join(self, member):
if member.guild.id == 876798564704084008:
if member.bot:
return
else:
member_role = member.guild.get_role(876804164661944340)
await member.add_roles(member_role)
desc = f"{member.name} Thanks for joining Kanna's Server. The server is currently under construction, Thanks for being an **early supporter**!! If you need any kind of help or support just ping any staff member or DM `aSHish#1198`. Have a nice stay in the server :)"
await member.send(desc)
else:
return
def setup(client):
client.add_cog(Server(client))
print(">> Server Utility loaded")
| 54.307087
| 636
| 0.641438
| 915
| 6,897
| 4.774863
| 0.346448
| 0.019226
| 0.035248
| 0.048066
| 0.204166
| 0.186313
| 0.173724
| 0.138705
| 0.094759
| 0.055848
| 0
| 0.076276
| 0.258663
| 6,897
| 127
| 637
| 54.307087
| 0.778212
| 0
| 0
| 0.380165
| 0
| 0.090909
| 0.568136
| 0.07263
| 0
| 0
| 0.00232
| 0
| 0
| 1
| 0.016529
| false
| 0
| 0.016529
| 0
| 0.057851
| 0.008264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
539b84ee2616f61a9bf370a8a3b1b21465720328
| 10,016
|
py
|
Python
|
paho/mqtt/subscribe.py
|
RandomGamer342/TTM4115-plantsensor
|
e63c34160d284bb6fd26563eeba949d54026348b
|
[
"MIT"
] | 8
|
2017-01-17T02:25:08.000Z
|
2019-07-24T13:39:55.000Z
|
python/lib/python3.4/site-packages/paho/mqtt/subscribe.py
|
nidiascampos/smartgreen
|
d574d90918702ac3bd383ed77d673f871576c5b0
|
[
"Apache-2.0"
] | 5
|
2018-11-20T16:57:21.000Z
|
2019-03-17T19:59:52.000Z
|
python/lib/python3.4/site-packages/paho/mqtt/subscribe.py
|
nidiascampos/smartgreen
|
d574d90918702ac3bd383ed77d673f871576c5b0
|
[
"Apache-2.0"
] | 9
|
2017-01-19T03:56:05.000Z
|
2020-03-10T04:03:20.000Z
|
# Copyright (c) 2016 Roger Light <roger@atchoo.org>
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# and Eclipse Distribution License v1.0 which accompany this distribution.
#
# The Eclipse Public License is available at
# http://www.eclipse.org/legal/epl-v10.html
# and the Eclipse Distribution License is available at
# http://www.eclipse.org/org/documents/edl-v10.php.
#
# Contributors:
# Roger Light - initial API and implementation
"""
This module provides some helper functions to allow straightforward subscribing
to topics and retrieving messages. The two functions are simple(), which
returns one or messages matching a set of topics, and callback() which allows
you to pass a callback for processing of messages.
"""
import paho.mqtt.client as paho
import paho.mqtt as mqtt
import ssl
def _on_connect(c, userdata, flags, rc):
"""Internal callback"""
if rc != 0:
raise mqtt.MQTTException(paho.connack_string(rc))
if type(userdata['topics']) is list:
for t in userdata['topics']:
c.subscribe(t, userdata['qos'])
else:
c.subscribe(userdata['topics'], userdata['qos'])
def _on_message_callback(c, userdata, message):
"""Internal callback"""
userdata['callback'](c, userdata['userdata'], message)
def _on_message_simple(c, userdata, message):
"""Internal callback"""
if userdata['msg_count'] == 0:
return
# Don't process stale retained messages if 'retained' was false
if userdata['retained'] == False and message.retain == True:
return
userdata['msg_count'] = userdata['msg_count'] - 1
if userdata['messages'] is None and userdata['msg_count'] == 0:
userdata['messages'] = message
c.disconnect()
return
userdata['messages'].append(message)
if userdata['msg_count'] == 0:
c.disconnect()
def callback(callback, topics, qos=0, userdata=None, hostname="localhost",
port=1883, client_id="", keepalive=60, will=None, auth=None, tls=None,
protocol=paho.MQTTv311, transport="tcp"):
"""Subscribe to a list of topics and process them in a callback function.
This function creates an MQTT client, connects to a broker and subscribes
to a list of topics. Incoming messages are processed by the user provided
callback. This is a blocking function and will never return.
callback : function of the form "on_message(client, userdata, message)" for
processing the messages received.
topics : either a string containing a single topic to subscribe to, or a
list of topics to subscribe to.
qos : the qos to use when subscribing. This is applied to all topics.
userdata : passed to the callback
hostname : a string containing the address of the broker to connect to.
Defaults to localhost.
port : the port to connect to the broker on. Defaults to 1883.
client_id : the MQTT client id to use. If "" or None, the Paho library will
generate a client id automatically.
keepalive : the keepalive timeout value for the client. Defaults to 60
seconds.
will : a dict containing will parameters for the client: will = {'topic':
"<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}.
Topic is required, all other parameters are optional and will
default to None, 0 and False respectively.
Defaults to None, which indicates no will should be used.
auth : a dict containing authentication parameters for the client:
auth = {'username':"<username>", 'password':"<password>"}
Username is required, password is optional and will default to None
if not provided.
Defaults to None, which indicates no authentication is to be used.
tls : a dict containing TLS configuration parameters for the client:
dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>",
'keyfile':"<keyfile>", 'tls_version':"<tls_version>",
'ciphers':"<ciphers">}
ca_certs is required, all other parameters are optional and will
default to None if not provided, which results in the client using
the default behaviour - see the paho.mqtt.client documentation.
Defaults to None, which indicates that TLS should not be used.
transport : set to "tcp" to use the default setting of transport which is
raw TCP. Set to "websockets" to use WebSockets as the transport.
"""
if qos < 0 or qos > 2:
raise ValueError('qos must be in the range 0-2')
callback_userdata = {
'callback':callback,
'topics':topics,
'qos':qos,
'userdata':userdata}
client = paho.Client(client_id=client_id,
userdata=callback_userdata, protocol=protocol, transport=transport)
client.on_message = _on_message_callback
client.on_connect = _on_connect
if auth is not None:
username = auth['username']
try:
password = auth['password']
except KeyError:
password = None
client.username_pw_set(username, password)
if will is not None:
will_topic = will['topic']
try:
will_payload = will['payload']
except KeyError:
will_payload = None
try:
will_qos = will['qos']
except KeyError:
will_qos = 0
try:
will_retain = will['retain']
except KeyError:
will_retain = False
client.will_set(will_topic, will_payload, will_qos, will_retain)
if tls is not None:
ca_certs = tls['ca_certs']
try:
certfile = tls['certfile']
except KeyError:
certfile = None
try:
keyfile = tls['keyfile']
except KeyError:
keyfile = None
try:
tls_version = tls['tls_version']
except KeyError:
tls_version = ssl.PROTOCOL_SSLv23;
try:
ciphers = tls['ciphers']
except KeyError:
ciphers = None
client.tls_set(ca_certs, certfile, keyfile, tls_version=tls_version,
ciphers=ciphers)
client.connect(hostname, port, keepalive)
client.loop_forever()
def simple(topics, qos=0, msg_count=1, retained=True, hostname="localhost", port=1883,
client_id="", keepalive=60, will=None, auth=None, tls=None,
protocol=paho.MQTTv311, transport="tcp"):
"""Subscribe to a list of topics and return msg_count messages.
This function creates an MQTT client, connects to a broker and subscribes
to a list of topics. Once "msg_count" messages have been received, it
disconnects cleanly from the broker and returns the messages.
topics : either a string containing a single topic to subscribe to, or a
list of topics to subscribe to.
qos : the qos to use when subscribing. This is applied to all topics.
msg_count : the number of messages to retrieve from the broker.
if msg_count == 1 then a single MQTTMessage will be returned.
if msg_count > 1 then a list of MQTTMessages will be returned.
retained : If set to True, retained messages will be processed the same as
non-retained messages. If set to False, retained messages will
be ignored. This means that with retained=False and msg_count=1,
the function will return the first message received that does
not have the retained flag set.
hostname : a string containing the address of the broker to connect to.
Defaults to localhost.
port : the port to connect to the broker on. Defaults to 1883.
client_id : the MQTT client id to use. If "" or None, the Paho library will
generate a client id automatically.
keepalive : the keepalive timeout value for the client. Defaults to 60
seconds.
will : a dict containing will parameters for the client: will = {'topic':
"<topic>", 'payload':"<payload">, 'qos':<qos>, 'retain':<retain>}.
Topic is required, all other parameters are optional and will
default to None, 0 and False respectively.
Defaults to None, which indicates no will should be used.
auth : a dict containing authentication parameters for the client:
auth = {'username':"<username>", 'password':"<password>"}
Username is required, password is optional and will default to None
if not provided.
Defaults to None, which indicates no authentication is to be used.
tls : a dict containing TLS configuration parameters for the client:
dict = {'ca_certs':"<ca_certs>", 'certfile':"<certfile>",
'keyfile':"<keyfile>", 'tls_version':"<tls_version>",
'ciphers':"<ciphers">}
ca_certs is required, all other parameters are optional and will
default to None if not provided, which results in the client using
the default behaviour - see the paho.mqtt.client documentation.
Defaults to None, which indicates that TLS should not be used.
transport : set to "tcp" to use the default setting of transport which is
raw TCP. Set to "websockets" to use WebSockets as the transport.
"""
if msg_count < 1:
raise ValueError('msg_count must be > 0')
# Set ourselves up to return a single message if msg_count == 1, or a list
# if > 1.
if msg_count == 1:
messages = None
else:
messages = []
userdata = {'retained':retained, 'msg_count':msg_count, 'messages':messages}
callback(_on_message_simple, topics, qos, userdata, hostname, port,
client_id, keepalive, will, auth, tls, protocol, transport)
return userdata['messages']
| 38.523077
| 92
| 0.648862
| 1,304
| 10,016
| 4.921012
| 0.172546
| 0.02244
| 0.01122
| 0.012155
| 0.506311
| 0.490416
| 0.485429
| 0.47904
| 0.467508
| 0.467508
| 0
| 0.009149
| 0.26887
| 10,016
| 259
| 93
| 38.671815
| 0.867131
| 0.597644
| 0
| 0.278351
| 0
| 0
| 0.088998
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051546
| false
| 0.030928
| 0.030928
| 0
| 0.123711
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
539b8675dc9b20bffab7e413aa5943d934069113
| 1,561
|
py
|
Python
|
py/2017/day24/aoc_day_24.py
|
cs-cordero/advent-of-code
|
614b8f78b43c54ef180a7dc411a0d1366a62944f
|
[
"MIT"
] | null | null | null |
py/2017/day24/aoc_day_24.py
|
cs-cordero/advent-of-code
|
614b8f78b43c54ef180a7dc411a0d1366a62944f
|
[
"MIT"
] | null | null | null |
py/2017/day24/aoc_day_24.py
|
cs-cordero/advent-of-code
|
614b8f78b43c54ef180a7dc411a0d1366a62944f
|
[
"MIT"
] | 2
|
2019-12-01T15:33:27.000Z
|
2020-12-14T05:37:23.000Z
|
from collections import defaultdict
def solution():
starting_components = d[0]
best_scores = []
for component in starting_components:
n_a, n_b = get_ports(component)
nxt_port = n_a if n_b == 0 else n_b
best_scores.append(recurse(component, set(), nxt_port, 0))
print("fuck", max(best_scores))
def recurse(component, seen, next_port, level):
seen.add(component)
c_a, c_b = get_ports(component)
next_components = d[next_port] - seen
my_score = sum(get_ports(component))
scores = []
for next_component in next_components:
n_a, n_b = get_ports(next_component)
nxt_port = n_a if n_b in (c_a, c_b) else n_b
score, reclevel = recurse(next_component, seen.copy(), nxt_port, level + 1)
scores.append((score, reclevel))
scores = sorted(scores, key=lambda x: (x[1], x[0]), reverse=True)
print(component, level, scores)
return my_score + (scores[0][0] if scores else 0), scores[0][1] if scores else level
def get_ports(component):
return map(int, component.split("/"))
if __name__ == "__main__":
d = defaultdict(set)
# with open('aoc_day_24_sample.txt') as f:
with open("aoc_day_24_input.txt") as f:
sample = f.readlines()
# sample = [
# '0/1',
# '1/2',
# '1/3',
# '1/4',
# '5/0',
# '2/5',
# '3/6',
# '4/500'
# ]
for component in sample:
a, b = map(int, component.split("/"))
d[a].add(component)
d[b].add(component)
solution()
| 27.875
| 88
| 0.59385
| 228
| 1,561
| 3.842105
| 0.307018
| 0.013699
| 0.077626
| 0.02968
| 0.136986
| 0.100457
| 0.100457
| 0.050228
| 0
| 0
| 0
| 0.028746
| 0.264574
| 1,561
| 55
| 89
| 28.381818
| 0.734321
| 0.090967
| 0
| 0
| 0
| 0
| 0.024165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.029412
| 0.029412
| 0.176471
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
539eb7f2ba00a494348f5e2c2412e8b083606e64
| 1,048
|
py
|
Python
|
live-plotting.py
|
rmhsawyer/EC601-Final-Project-Mapping_User_Face_To_Emoji
|
05a61dca25ef6dc6827e3389a753eb65a09c1813
|
[
"Apache-2.0"
] | null | null | null |
live-plotting.py
|
rmhsawyer/EC601-Final-Project-Mapping_User_Face_To_Emoji
|
05a61dca25ef6dc6827e3389a753eb65a09c1813
|
[
"Apache-2.0"
] | 22
|
2017-11-10T21:37:20.000Z
|
2017-12-05T22:36:50.000Z
|
live-plotting.py
|
rmhsawyer/EC601-Final-Project
|
05a61dca25ef6dc6827e3389a753eb65a09c1813
|
[
"Apache-2.0"
] | 3
|
2017-10-30T20:07:18.000Z
|
2017-12-03T00:47:18.000Z
|
#draw the predictions from real-time.py
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
def animate(i):
graph_data = open('emotion.txt', 'r').read()
lines = graph_data.split('\n')
xs = []
y_angry = []
y_fear = []
y_happy = []
y_sad = []
y_surprise = []
y_neutral = []
for line in lines:
if len(line) > 1:
time, angry, fear, happy, sad, surprise, neutral = line.split(',')
xs.append(time)
y_angry.append(angry)
y_fear.append(fear)
y_happy.append(happy)
y_sad.append(sad)
y_surprise.append(surprise)
y_neutral.append(neutral)
ax1.clear()
ax1.plot(xs, y_angry)
ax1.plot(xs, y_fear)
ax1.plot(xs, y_happy)
ax1.plot(xs, y_sad)
ax1.plot(xs, y_surprise)
ax1.plot(xs, y_neutral)
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
| 24.952381
| 78
| 0.605916
| 147
| 1,048
| 4.176871
| 0.380952
| 0.034202
| 0.087948
| 0.09772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020672
| 0.26145
| 1,048
| 41
| 79
| 25.560976
| 0.77261
| 0.03626
| 0
| 0
| 0
| 0
| 0.029732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.085714
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53a13df64d25ae2c757b6265afa2baab533adc4f
| 3,122
|
py
|
Python
|
libs/Rack.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 22
|
2015-01-16T01:36:32.000Z
|
2020-06-08T00:46:18.000Z
|
libs/Rack.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 8
|
2015-12-28T18:56:19.000Z
|
2019-04-01T17:33:48.000Z
|
libs/Rack.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 13
|
2015-01-13T20:56:22.000Z
|
2022-02-23T06:01:17.000Z
|
from KeyValueTree import KeyValueTree
from truth.models import KeyValue as TruthKeyValue, Truth
from systems.models import KeyValue as KeyValue
from django.test.client import RequestFactory
from api_v2.keyvalue_handler import KeyValueHandler
import json
factory = RequestFactory()
class Rack:
rack_name = None
tree = None
kv = None
ru = None
width = None
systems = []
ethernet_patch_panel_24 = []
ethernet_patch_panel_48 = []
def __init__(self, rack_name):
self.systems = []
self.rack_name = rack_name
self.kv = Truth.objects.select_related('truth_key_value').get(name=self.rack_name)
self.system_list = KeyValue.objects.select_related('system').filter(value__contains="truth:%s" % (self.rack_name))
self.ethernet_patch_panel_24 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 24)
self.ethernet_patch_panel_48 = self._get_ethernet_patch_panels(self.kv, 'ethernet', 48)
import pdb
h = KeyValueHandler()
for s in self.system_list:
request = factory.get('/api/v2/keyvalue/?keystore=%s' % (s.system.hostname), follow=True)
tree = h.read(request)
system_ru = self._get_system_ru(tree)
system_image = self._get_system_image(tree)
system_slot = self._get_system_slot(tree)
self.systems.append({
"system_name":s.system.hostname,
"system_id":s.system.id,
"system_ru":system_ru,
"system_image":system_image,
'system_slot':system_slot,
'operating_system':str(s.system.operating_system),
'server_model': str(s.system.server_model),
'oob_ip': str(s.system.oob_ip),
})
self.systems = sorted(self.systems, key=lambda k: k['system_slot'])
try:
self.ru = self.kv.keyvalue_set.get(key='rack_ru').value
except:
self.ru = 42
try:
self.width = self.kv.keyvalue_set.get(key='rack_width').value
except:
self.width = 30
def _get_ethernet_patch_panels(self, tree, type, port_count):
ret = []
for i in tree.keyvalue_set.all():
match_string = "%i_port_%s_patch_panel" % (port_count, type)
if str(i.key) == match_string:
ret.append(i.value)
return ret
def _get_system_ru(self, tree):
for i in tree.iterkeys():
try:
if 'system_ru' in i.split(':'):
return tree[i]
except:
pass
return 4
def _get_system_image(self, tree):
for i in tree.iterkeys():
try:
if 'system_image' in i.split(':'):
return tree[i]
except:
pass
return None
def _get_system_slot(self, tree):
for i in tree.iterkeys():
try:
if 'system_slot' in i.split(':'):
return tree[i]
except:
pass
return 1
| 34.688889
| 122
| 0.575593
| 380
| 3,122
| 4.476316
| 0.236842
| 0.053498
| 0.042328
| 0.023516
| 0.221046
| 0.205761
| 0.205761
| 0.174015
| 0.126984
| 0.065256
| 0
| 0.009447
| 0.321909
| 3,122
| 89
| 123
| 35.078652
| 0.794048
| 0
| 0
| 0.234568
| 0
| 0
| 0.078475
| 0.016336
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0.037037
| 0.08642
| 0
| 0.345679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53a2e756b6afda167f3e4ff4e520ec037aac6965
| 9,526
|
py
|
Python
|
poem.py
|
xcollantes/poetry-generator
|
456c9702f0105b49b8c3edbb55043a10efbf359b
|
[
"MIT"
] | null | null | null |
poem.py
|
xcollantes/poetry-generator
|
456c9702f0105b49b8c3edbb55043a10efbf359b
|
[
"MIT"
] | null | null | null |
poem.py
|
xcollantes/poetry-generator
|
456c9702f0105b49b8c3edbb55043a10efbf359b
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import datetime
import os
import random
import sys
import uuid
import base64
import yaml
import re
try:
import en
except:
print("DOWNLOD NODECUBE")
print("""wget https://www.nodebox.net/code/data/media/linguistics.zip
unzip linguistics.zip""")
VERSION = "1.1"
THEME_PROB = 0
class bnfDictionary:
def __init__(self, file):
self.grammar = yaml.load(open(file,'r'))
self.poemtype = "<poem>"
def generate(self, key, num):
gram = self.grammar[key]
if len(gram)==1:
i = 0
else:
i = random.randint(0, len(gram) - 1)
string = ""
if "<" not in gram[i]:
string = gram[i]
else:
for word in gram[i].split():
if "<" not in word:
string = string + word + " "
else:
if "verb" in word and word != '<adverb>':
if "pverb" in word or "mushy" in self.poemtype:
v = self.generate("<pverb>", 1).strip()
elif "nverb" in word:
v = self.generate("<nverb>", 1).strip()
# else:
# v = self.generate("<verb>", 1).strip()
if random.randint(1, 100) < THEME_PROB:
v = self.generate("<theme-verb>", 1).strip()
if "verb-inf" in word:
string = string + \
en.verb.present_participle(v) + " "
elif "verb-pr" in word:
string = string + \
en.verb.present(
v, person=3, negate=False) + " "
elif "verb-past" in word:
string = string + en.verb.past(v) + " "
else:
string = string + v + " "
elif "noun" in word:
if "pnoun" in word or "mushy" in self.poemtype:
v = self.generate("<pnoun>", 1).strip()
elif "nnoun" in word:
v = self.generate("<nnoun>", 1).strip()
else:
v = self.generate("<noun>", 1).strip()
if random.randint(1, 100) < THEME_PROB:
v = self.generate("<theme-noun>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + v + " "
elif "person" in word:
v = self.generate("<person>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + v + " "
elif "adj" in word:
if "mushy" in self.poemtype:
v = self.generate("<padj>",1)
else:
if random.randint(1, 100) < THEME_PROB:
v = self.generate("<theme-adj>", 1).strip()
else:
v = self.generate(word, 1).strip()
string = string + v + " "
elif "fruit" in word:
v = self.generate("<fruit>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + self.generate(word, 1) + " "
elif "person" in word:
v = self.generate("<fruit>", 1).strip()
if "pl" in word:
v = en.noun.plural(v)
string = string + self.generate(word, 1) + " "
else:
if "-pl" in word:
v = en.noun.plural(self.generate(word.replace("-pl",""),1))
else:
v = self.generate(word, 1)
string = string + v + " "
return string
def generatePretty(self, key, seed_str):
if seed_str == None:
seed_str = str(uuid.uuid4()).split("-")[0]
random.seed(uuid.uuid5(uuid.NAMESPACE_DNS,seed_str).int)
#tool = language_check.LanguageTool('en-US')
self.poemtype = key
if key == "<mushypoem>":
key = "<poem>"
poem = self.generate(key, 1)
poem = poem.replace(" ,", ",")
puncuation = [".", ".", ".", ".", "!", "?"]
dontbreaks = ["of", "behind", "the", "when", "what", "why", "who", ",",
"your", "by", "like", "to", "you", "your", "a", "are", "become", "newline"]
capitalize = False
breaks = 0
poem2 = []
foundFirstBreak = False
for word in poem.replace("\n", "newline").split():
poem2.append(word.lower())
if random.randint(1, 100) < 2 and "newline" not in word and foundFirstBreak:
isgood = True
for dontbreak in list(dontbreaks + puncuation):
if dontbreak == word.lower():
isgood = False
if isgood:
poem2.append("newline")
if "newline" in word:
foundFirstBreak = True
poem3 = []
beforeFirstBreak = True
for word in poem2:
if "newline" in word:
breaks += 1
beforeFirstBreak = False
else:
breaks = 0
if beforeFirstBreak or word == "i" or "i'" in word:
word = word.capitalize()
poem3.append(word)
capitalize = False
else:
if breaks > 1:
capitalize = True
if capitalize == True and "newline" not in word:
word = word.capitalize()
capitalize = False
for punc in list(set(puncuation)):
if punc in word:
capitalize = True
poem3.append(word)
if random.randint(1, 100) < 0 and "newline" not in word:
isgood = True
for dontbreak in list(dontbreaks + puncuation):
if dontbreak == word.lower():
isgood = False
if isgood:
poem3.append(random.choice(puncuation))
capitalize = True
# noPunc = True
# for punc in list(set(puncuation)):
# if punc in word:
# noPunc = False
# if noPunc:
# poem3.append(random.choice(puncuation))
newPoem = " ".join(poem3)
newPoem = newPoem.replace(" a a", " an a")
newPoem = newPoem.replace("newline .", ". newline")
newPoem = newPoem.replace("newline ?", "? newline")
newPoem = newPoem.replace("newline !", "! newline")
newPoem = newPoem.replace("newline ,", ", newline")
newPoem = newPoem.replace("newline", "\n")
newPoem = newPoem.replace(" \n \n", "\n\n")
newPoem = newPoem.replace("\n \n ", "\n\n")
newPoem = newPoem.replace(" '", "'")
for punc in list(set(puncuation)):
newPoem = newPoem.replace(" " + punc, punc)
for punc in list(set(puncuation)):
newPoem = newPoem.replace(" " + punc, punc)
for punc in list(set(puncuation)):
newPoem = newPoem.replace(" " + punc, punc)
newPoem = newPoem.replace(" ,", ",")
newPoem = newPoem.replace("?.", "?")
newPoem = newPoem.replace(".?", ".")
newPoem = newPoem.replace(",.", ",")
newPoem = newPoem.replace("!.", "!")
newPoem = newPoem.replace("..", ".")
newPoem = newPoem.replace("..", ".")
newPoem = newPoem.replace("..", ".")
title = newPoem.split("\n")[0]
newTitle = title.replace(".", "")
newPoem = newPoem.replace(title, "<h1>" + newTitle + "</h1>")
newPoem2 = ""
firstLine = False
secondLine = False
for line in newPoem.split("\n"):
if len(line) > 0:
if firstLine and not secondLine:
newPoem2 = newPoem2 + "<p>\n"
secondLine = True
if firstLine == False:
firstLine = True
newPoem2 = newPoem2 + line + " \n"
if firstLine and secondLine:
newPoem2 = newPoem2 + line + " <br />\n"
else:
newPoem2 = newPoem2 + " <br />\n"
newPoem2 = newPoem2 + "</p>"
return newPoem2,seed_str
bnf = bnfDictionary('brain.yaml')
def generate_poem(poemtype, hex_seed=None):
p,seed_str = bnf.generatePretty('<' + poemtype + '>',hex_seed)
return p,seed_str
if __name__ == '__main__':
poemtype = 'poem'
if 'mushy' in sys.argv[1:]:
poemtype = 'mushypoem'
p,seed_str=generate_poem(poemtype)
print(("*"*30 + "\n"*5))
filtered = []
for line in re.sub("<.*?>", " ", p).split("\n"):
if len(line.strip()) > 0:
filtered.append(line.strip())
else:
filtered.append("pause")
print(p)
| 39.526971
| 97
| 0.43607
| 909
| 9,526
| 4.524752
| 0.182618
| 0.039387
| 0.107221
| 0.054461
| 0.460491
| 0.385363
| 0.356188
| 0.324824
| 0.319232
| 0.319232
| 0
| 0.0165
| 0.43376
| 9,526
| 240
| 98
| 39.691667
| 0.746014
| 0.025089
| 0
| 0.339535
| 0
| 0
| 0.076426
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018605
| false
| 0
| 0.051163
| 0
| 0.088372
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53a4815531cf8a3d91a379873dd45b934995baa1
| 20,346
|
py
|
Python
|
src/ncstyler/console.py
|
starofrainnight/ncstyler
|
d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb
|
[
"MIT"
] | null | null | null |
src/ncstyler/console.py
|
starofrainnight/ncstyler
|
d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb
|
[
"MIT"
] | null | null | null |
src/ncstyler/console.py
|
starofrainnight/ncstyler
|
d13a6fa330b955db1cb9aa7a6ff1751ec41e82eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import CppHeaderParser
import re
import sys
import yaml
import copy
import six
import os.path
import traceback
class CppDefine(dict):
def __init__(self):
self["name"] = None
self["parameters"] = []
self["line_number"] = -1
class CppDefineParameter(dict):
def __init__(self):
self["name"] = None
self["line_number"] = -1
class CppNamespace(dict):
def __init__(self):
self["name"] = None
self["line_number"] = -1
class CppFileName(dict):
def __init__(self):
self["name"] = None
self["line_number"] = -1
class Application(object):
def __init__(self):
description='''A styler just target to naming conventions of source
code'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-c", "--config",
help="Configuration file path (In YAML format)",
required=True)
parser.add_argument("-o", "--output", help="Output file path")
parser.add_argument("-d", "--debug", action='store_true', help="Print trace stack")
parser.add_argument("file_path", help="Source file path")
self.__args = parser.parse_args()
# If user does not specific output path, we default it to input file
# path
if self.__args.output is None:
self.__args.output = self.__args.file_path
self.__config = yaml.load(open(self.__args.config))
old_base = self.__config["_base_"]
self.__config["_base_"] = {
"re":"[a-zA-Z0-9_]+",
"error": "",
}
self.__config["_base_"].update(old_base)
def parse_define(self, adefine):
matched = re.match(r"[^\w]*(\w+)(?:\(([^\)]*)\)|\s*).*", adefine)
name = matched.group(1)
parameters = []
if matched.group(2) is not None:
parameter_names = matched.group(2).split(',')
for parameter_name in parameter_names:
aparameter = CppDefineParameter()
aparameter["name"] = parameter_name.strip()
parameters.append(aparameter)
result = CppDefine()
result["name"] = name
result["parameters"] = parameters
return result
def _is_special_method(self, amethod):
if isinstance(amethod, six.string_types):
amethod_name = amethod
else:
amethod_name = amethod["name"]
founded = re.findall(r"(?:^|[^\w]+)operator[^\w]+", amethod_name)
if len(founded) <= 0:
if re.match(r"(?:^|.*\W)operator\W.*", amethod["debug"]) is not None:
return True
return False
return True
def _get_argument_name(self, an_argument):
if isinstance(an_argument, six.string_types):
return an_argument
if len(an_argument["name"]) > 0:
return an_argument["name"]
# If it's a functor?? with "class name::function" style
matched = re.match(r"^\w+\s*\(\w*::\*(\w+)\)\(.*$", an_argument["type"])
if matched is None:
# with normal "function" style
matched = re.match(r"[^\(]*\([^\)]*\W(\w+)\W.*\).*", an_argument["type"])
if matched is None:
return ""
else:
return matched.group(1)
def _get_config(self, name):
override_table = {
"class": "_base_",
"function": "_base_",
"variant": "_base_",
"namespace": "_base_",
"define": "_base_",
"filename": "_base_", # Special config use to define filename rule
"argument": "variant",
"static_variant": "variant",
"global_variant": "variant",
"function_argument": "argument",
"class_method_argument": "function_argument",
"struct_method_argument": "class_method_argument",
"define_function_argument": "function_argument",
"define_function": "function",
"class_method": "function",
"struct_method": "class_method",
"class_variant": "variant",
"struct_variant": "class_variant",
"typedef": "class",
"struct": "class",
"enum": "class",
"enum_value": "define",
"union": "struct",
}
my_config = dict()
if name in override_table:
base_name = override_table[name]
my_config.update(self._get_config(base_name))
if name in self.__config:
my_config.update(self.__config[name])
return my_config
def _is_valid_variable(self, cpp_variable):
if cpp_variable["type"] == "return":
return False
if len(cpp_variable["type"]) <= 0:
return False
return True
def _get_cpp_method_re(self, name):
prefix = "operator"
if not name.startswith(prefix):
return re.escape(name)
# Operator methods
chars = []
for achar in name[len(prefix):]:
chars.append("\\s*")
if achar.isalnum():
chars.append(achar)
else:
chars.append("\\")
chars.append(achar)
return "operator%s" % ''.join(chars)
def _validate_codes_of_cpp_method(self, cpp_method):
start_line_index = cpp_method["line_number"] - 1
# Extract cpp method codes
rest_lines = self._source_lines[start_line_index:]
content = '\n'.join(rest_lines)
code_lines = []
name_re = self._get_cpp_method_re(cpp_method["name"])
name_start_pos = re.search(name_re, content).span()[0]
parameters_start_pos = content.index('(', name_start_pos)
parameters_stop_pos = content.index(')', parameters_start_pos)
stack = []
try:
i = content.index('{', parameters_stop_pos + 1)
except ValueError:
return;
try:
semicolonPos = content.index(';', parameters_stop_pos + 1)
if semicolonPos <= i:
return;
except ValueError:
# Not found a semicolon, just ignored.
pass
skipped_lines = cpp_method["line_number"] + content.count("\n", 0, i) - 2
stack.append(i)
i += 1
first_i = i
last_i = 0
is_finding_block_comment = False
is_finding_single_comment = False
while (len(stack) > 0) and (i < len(content)):
c = content[i]
if is_finding_block_comment:
# If finding block comment, then skip all other searching
if (c == "*") and (content[i + 1] == "/"):
is_finding_block_comment = False
elif (c == "/") and (content[i + 1] == "*"):
is_finding_block_comment = True
elif is_finding_single_comment:
# If finding single comment, then skip all other searching
if c == "\n":
is_finding_single_comment = False
elif (c == "/") and (content[i + 1] == "/"):
is_finding_single_comment = True
elif c == "{":
stack.append(i)
elif c == "}":
last_i = i
del stack[len(stack) - 1]
i += 1
if len(stack) <= 0:
content = content[first_i:last_i]
founded = re.findall(r"\w+\W+(\w+)\s*=[^=]", content)
for aname in founded:
avariant = dict()
avariant["name"] = aname
avariant["line_number"] = cpp_method["line_number"]
self._validate_name(avariant, "variant")
def _validate_name(self, cpp_object, name_re):
cpp_object_name = ""
if isinstance(cpp_object, six.string_types):
cpp_object_name = cpp_object
cpp_object = dict()
cpp_object["name"] = cpp_object_name
cpp_object["line_number"] = -1
elif "name" in cpp_object:
cpp_object_name = cpp_object["name"]
if ('<' in cpp_object_name) and ("debug" in cpp_object):
matched = re.match(r".*?(\w+)\W+$", cpp_object["debug"])
if matched is not None:
cpp_object_name = matched.group(1)
else:
return
# Parse union like names
splitted = cpp_object_name.split()
if len(splitted) > 1:
cpp_object_name = splitted[-1]
if '...' in cpp_object_name:
# Does not have valid name, we must not check it .
return
if len(cpp_object_name) <= 0:
# Does not have valid name, we must not check it .
return
matched = re.match(self._get_config(name_re)["re"], cpp_object_name)
if matched is None:
filename = os.path.basename(self.__args.file_path)
error_message = self._get_config(name_re)["error"]
if len(error_message) > 0:
error_message = "%s %s" % (
' '.join([rule_name.capitalize() for rule_name in name_re.split("_")]),
error_message)
if self.__args.debug:
traceback.print_stack()
raise SyntaxError("%s:%s:error: Name '%s' isn't matched with rule : %s! %s" % (
filename,
cpp_object["line_number"],
cpp_object_name,
name_re,
error_message))
def _get_class_realname(self, class_name):
return re.match(r"(\w+).*", class_name).group(1)
def _validate_cpp_object(self, cpp_object):
cpp_object_type = type(cpp_object)
if cpp_object_type == CppDefine:
if len(cpp_object["parameters"]) <= 0:
# Normal Define Name
self._validate_name(cpp_object, "define")
else:
# Function Liked Define Name
self._validate_name(cpp_object, "define_function")
for aparameter in cpp_object["parameters"]:
self._validate_name(aparameter, "define_function_argument")
elif cpp_object_type == CppHeaderParser.CppClass:
if "struct" in cpp_object["declaration_method"]:
class_re = "struct"
class_method_re = "struct_method"
class_method_argument_re = "struct_method_argument"
class_variant_re = "struct_variant"
else:
class_re = "class"
class_method_re = "class_method"
class_method_argument_re = "class_method_argument"
class_variant_re = "class_variant"
self._validate_name(cpp_object, class_re)
for amethod in cpp_object.get_all_methods():
matched = re.match(r".*typedef\W[^\(]*\([^\)]*\W(\w+)\W.*\).*", amethod["debug"])
if matched is None:
self._validate_codes_of_cpp_method(amethod)
if not self._is_special_method(amethod):
if ((amethod["name"] != self._get_class_realname(cpp_object["name"]))
and (not amethod.get("constructor", False))
and (not amethod.get("destructor", False))):
try:
self._validate_name(amethod, class_method_re)
except SyntaxError:
is_need_reraise = True
try:
self._validate_name(amethod, "define_function")
is_need_reraise = False
except SyntaxError:
pass
if is_need_reraise:
raise
for aparameter in amethod["parameters"]:
an_object = dict()
an_object["line_number"] = aparameter["line_number"]
if (aparameter["type"].endswith("::*")
and (")" in aparameter["name"])):
an_object["name"] = re.match(r"(\w+).*", aparameter["name"]).group(1)
try:
self._validate_name(an_object,
class_method_re)
except SyntaxError:
is_need_reraise = True
try:
self._validate_name(amethod, "define_function")
is_need_reraise = False
except SyntaxError:
pass
if is_need_reraise:
raise
else:
an_object["name"] = self._get_argument_name(aparameter)
self._validate_name(an_object,
class_method_argument_re)
else:
self._validate_name(
{"name":matched.group(1), "line_number":amethod["line_number"]},
"typedef")
for access_specifier in CppHeaderParser.supportedAccessSpecifier:
for amember in cpp_object["properties"][access_specifier]:
is_skip_validate = False
if ("type" in amember) and (amember["type"] is not None):
internal_predeclares = ["class", "struct", "union"]
if amember["type"] in internal_predeclares:
is_skip_validate = True
if not is_skip_validate:
if amember["static"]:
self._validate_name(amember, "static_variant")
else:
self._validate_name(amember, class_variant_re)
for amember in cpp_object["structs"][access_specifier]:
self._validate_cpp_object(amember)
for amember in cpp_object["enums"][access_specifier]:
self._validate_cpp_object(amember)
elif cpp_object_type == CppHeaderParser.CppStruct:
self._validate_name(cpp_object, "struct")
elif cpp_object_type == CppHeaderParser.CppEnum:
self._validate_name(cpp_object, "enum")
line_number = -1
if "line_number" in cpp_object:
line_number = cpp_object["line_number"]
for amember in cpp_object["values"]:
# Use parent line number if enum value does not have it's line
# number
if "line_number" not in amember:
amember["line_number"] = line_number
self._validate_name(amember, "enum_value")
elif cpp_object_type == CppHeaderParser.CppVariable:
if cpp_object["type"] != "return":
if cpp_object["static"]:
self._validate_name(cpp_object, "static_variant")
elif cpp_object["type"] not in ["class", "struct", "union"]:
if not cpp_object["type"].endswith("::"):
# Don't parse variable that implemented outside of
# template class. It's already be parsed when parsing
# the class.
self._validate_name(cpp_object, "global_variant")
elif cpp_object_type == CppHeaderParser.CppMethod:
# Exclude "main" function while parsing global function
while True:
# FIXME: Parse special case : "struct RArraySize <T ( & ) [ N ]> {"
if "debug" in cpp_object:
if re.match(r".*\>\s*{$", cpp_object["debug"]) is not None:
break
self._validate_codes_of_cpp_method(cpp_object)
if cpp_object["name"] == "main":
break
if self._is_special_method(cpp_object):
break
if (cpp_object["class"] is None) or (len(cpp_object["class"]) <= 0):
if ">" in cpp_object["name"]:
regex = r"^[^<:]*?(?:(\w+)::)?(\w+)\s*<"
matched = re.search(regex, cpp_object["debug"])
if matched.group(1) is not None:
cpp_object["class"] = matched.group(1)
cpp_object["name"] = matched.group(2)
self._validate_name(cpp_object, "class_method")
elif len(cpp_object["returns"]) > 0:
# If a function does not have return value(at least
# "void"), it maybe macro invokes.
# FIXME: We just ignored this situation:
# Code Snippets: static RSignal<void(int)> sReceived;
if "<" not in cpp_object["name"]:
self._validate_name(cpp_object, "function")
break
if self._get_class_realname(cpp_object["class"]) == cpp_object["name"]:
# Constructor / Destructor will the same with class name
break
self._validate_name(cpp_object, "class_method")
break
elif cpp_object_type == CppHeaderParser.CppUnion:
self._validate_name(cpp_object, "union")
elif cpp_object_type == CppNamespace:
self._validate_name(cpp_object, "namespace")
elif cpp_object_type == CppFileName:
self._validate_name(cpp_object, "filename")
def exec_(self):
try:
with open(self.__args.file_path, "r") as source_file:
# For later parse by _validate_codes_of_cpp_method()
self._source_lines = source_file.readlines()
parsed_info = CppHeaderParser.CppHeader(self.__args.file_path)
# Verify File Names
filename = os.path.basename(self.__args.file_path)
cpp_object = CppFileName()
cpp_object["name"] = filename
self._validate_cpp_object(cpp_object)
# Verify Define Names
for define_text in parsed_info.defines:
self._validate_cpp_object(self.parse_define(define_text))
# Verify Function Names
for cpp_object in parsed_info.functions:
self._validate_cpp_object(cpp_object)
# Verify Class Names
for cpp_object in parsed_info.classes_order:
self._validate_cpp_object(cpp_object)
# Verify Struct Names
for cpp_object in parsed_info.structs_order:
self._validate_cpp_object(cpp_object)
# Verify Enum Names
for cpp_object in parsed_info.enums:
self._validate_cpp_object(cpp_object)
# Verify Variable Names
for cpp_object in parsed_info.variables:
# Avoid checking member variable inside function body.
if '{' not in cpp_object['type']:
self._validate_cpp_object(cpp_object)
for namespace in parsed_info.namespaces:
cpp_object = CppNamespace()
cpp_object["name"] = namespace
self._validate_cpp_object(cpp_object)
# Verify Typdef Names
for cpp_object in parsed_info.typedefs:
self._validate_cpp_object(cpp_object)
except SyntaxError as e:
print(str(e))
return 1
except CppHeaderParser.CppHeaderParser.CppParseError as e:
# CppHeaderParser can't parse this file, but we should pass it, this
# is the CppHeaderParser's problem.
print(str(e))
return 0
return 0
def main():
a = Application()
sys.exit(a.exec_())
if __name__ == "__main__":
# Execute only if run as a script
main()
| 38.172608
| 97
| 0.524182
| 2,118
| 20,346
| 4.74882
| 0.140227
| 0.097534
| 0.038179
| 0.024558
| 0.332969
| 0.215948
| 0.161762
| 0.102804
| 0.069
| 0.058958
| 0
| 0.003676
| 0.371572
| 20,346
| 532
| 98
| 38.244361
| 0.782966
| 0.073823
| 0
| 0.246883
| 0
| 0.002494
| 0.106685
| 0.019252
| 0
| 0
| 0
| 0.00188
| 0
| 1
| 0.042394
| false
| 0.007481
| 0.022444
| 0.002494
| 0.129676
| 0.007481
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53a4ae1a747ba84b0abf192cd72d5b27b2b5e891
| 1,527
|
py
|
Python
|
theone/wsgi/server.py
|
laozijiaojiangnan/TheOne
|
73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824
|
[
"Apache-2.0"
] | null | null | null |
theone/wsgi/server.py
|
laozijiaojiangnan/TheOne
|
73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824
|
[
"Apache-2.0"
] | null | null | null |
theone/wsgi/server.py
|
laozijiaojiangnan/TheOne
|
73c1e7cee545c2eb2b2118f2dbf2d4d0c56e3824
|
[
"Apache-2.0"
] | null | null | null |
import typing as t
from http.server import HTTPServer, BaseHTTPRequestHandler
from . import response as resp
class WsgiServer(HTTPServer):
pass
class WsgiHandel(BaseHTTPRequestHandler):
def handle(self) -> None:
handle_response = SimpleHandler(self.wfile)
handle_response.send()
class SimpleHandler:
def __init__(self, wfile):
self._response = resp.Response.create_empty() # type: resp.Response
self.sender = wfile
def send(self):
"""像浏览器发送包
node: 下面分成了三次发送,因为合在发送会有 bug,不确定问题,暂时先这样
"""
line = f"{self._response.line.version} {self._response.line.code} {self._response.line.code}\r\n"
self.sender.write(bytes(line, 'utf-8'))
self.add_header(key='Content-Length', value=len(self._response.body.content))
headers = "".join(
[f"{h.key}:{h.value}\r\n" for h in self._response.headers]
)
print(f'headers: {headers}')
self.sender.write(bytes(headers, 'utf-8'))
body = f"\r\n{self._response.body.content}"
self.sender.write(bytes(body, 'utf-8'))
def add_header(self, key: str, value: t.Any) -> t.List[resp.Headers]:
"""添加请求头键值对
Args:
key: 键
value: 值
Return:
存在的所有键值对信息
"""
if self._response is None:
self._response = resp.Response.create_empty()
h = resp.Headers(key=key, value=value)
self._response.headers.append(h)
return self._response.headers
| 28.277778
| 105
| 0.612967
| 186
| 1,527
| 4.919355
| 0.376344
| 0.144262
| 0.052459
| 0.065574
| 0.076503
| 0.076503
| 0
| 0
| 0
| 0
| 0
| 0.002646
| 0.257367
| 1,527
| 53
| 106
| 28.811321
| 0.804233
| 0.085789
| 0
| 0.066667
| 0
| 0.033333
| 0.142532
| 0.105383
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0.033333
| 0.1
| 0
| 0.366667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53a59bcf9df24d2abf9133b0c94be6aa674beda0
| 4,462
|
py
|
Python
|
pytorch_translate/attention/multihead_attention.py
|
dzhulgakov/translate
|
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
|
[
"BSD-3-Clause"
] | 1
|
2019-06-14T20:20:39.000Z
|
2019-06-14T20:20:39.000Z
|
pytorch_translate/attention/multihead_attention.py
|
dzhulgakov/translate
|
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch_translate/attention/multihead_attention.py
|
dzhulgakov/translate
|
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from fairseq.modules import multihead_attention as fair_multihead
from pytorch_translate.attention import (
BaseAttention,
attention_utils,
register_attention,
)
@register_attention("multihead")
class MultiheadAttention(BaseAttention):
"""
Multiheaded Scaled Dot Product Attention
Implements equation:
MultiHead(Q, K, V) = Concat(head_1,...,head_h)W^O
where head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)
Similarly to the above, d_k = d_v = d_model / h
In this implementation, keys and values are both set to encoder output
Inputs
init:
decoder_hidden_state_dim : dimensionality of decoder hidden state
context_dim : dimensionality of encoder output
kwargs :
nheads : integer # of attention heads
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
forward:
decoder_state : [batch size, d_model]
source_hids : [sequence length, batch size, d_model]
src_lengths : [batch size]
forward:
query : [sequence length, batch size, d_model]
key: [sequence length, batch size, d_model]
value: [sequence length, batch size, d_model]
Output
result : [batch_size, d_model]
"""
def __init__(
self,
decoder_hidden_state_dim,
context_dim,
*,
nheads=1,
unseen_mask=False,
src_length_mask=True
):
super().__init__(decoder_hidden_state_dim, context_dim)
assert decoder_hidden_state_dim == context_dim
d_model = decoder_hidden_state_dim # for brevity
assert d_model % nheads == 0
if unseen_mask:
raise NotImplementedError(
"Unseen mask not supported with sequential decoding"
)
self._fair_attn = fair_multihead.MultiheadAttention(d_model, nheads)
self.use_src_length_mask = src_length_mask
def forward(self, decoder_state, source_hids, src_lengths, squeeze=True):
"""
Computes MultiheadAttention with respect to either a vector
or a tensor
Inputs:
decoder_state: (bsz x decoder_hidden_state_dim) or
(bsz x T x decoder_hidden_state_dim)
source_hids: srclen x bsz x context_dim
src_lengths: bsz x 1, actual sequence lengths
squeeze: Whether or not to squeeze on the time dimension.
Even if decoder_state.dim() is 2 dimensional an
explicit time step dimension will be unsqueezed.
Outputs:
[batch_size, max_src_len] if decoder_state.dim() == 2 & squeeze
or
[batch_size, 1, max_src_len] if decoder_state.dim() == 2 & !squeeze
or
[batch_size, T, max_src_len] if decoder_state.dim() == 3 & !squeeze
or
[batch_size, T, max_src_len] if decoder_state.dim() == 3 & squeeze & T != 1
or
[batch_size, max_src_len] if decoder_state.dim() == 3 & squeeze & T == 1
"""
batch_size = decoder_state.shape[0]
if decoder_state.dim() == 3:
query = decoder_state
elif decoder_state.dim() == 2:
query = decoder_state.unsqueeze(1)
else:
raise ValueError("decoder state must be either 2 or 3 dimensional")
query = query.transpose(0, 1)
value = key = source_hids
src_len_mask = None
if src_lengths is not None and self.use_src_length_mask:
# [batch_size, 1, seq_len]
src_len_mask_int = attention_utils.create_src_lengths_mask(
batch_size=batch_size, src_lengths=src_lengths
)
src_len_mask = src_len_mask_int != 1
attn, attn_weights = self._fair_attn.forward(
query, key, value, key_padding_mask=src_len_mask, need_weights=True
)
# attn.shape = T X bsz X embed_dim
# attn_weights.shape = bsz X T X src_len
attn_weights = attn_weights.transpose(0, 2)
# attn_weights.shape = src_len X T X bsz
if squeeze:
attn = attn.squeeze(0)
# attn.shape = squeeze(T) X bsz X embed_dim
attn_weights = attn_weights.squeeze(1)
# attn_weights.shape = src_len X squeeze(T) X bsz
return attn, attn_weights
return attn, attn_weights
| 35.412698
| 85
| 0.62528
| 581
| 4,462
| 4.53012
| 0.266781
| 0.054711
| 0.054711
| 0.055851
| 0.2519
| 0.202128
| 0.105243
| 0.105243
| 0.086246
| 0.079407
| 0
| 0.009006
| 0.303227
| 4,462
| 125
| 86
| 35.696
| 0.837568
| 0.463469
| 0
| 0.038462
| 0
| 0
| 0.05038
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 1
| 0.038462
| false
| 0
| 0.038462
| 0
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53a892c5198d37c345b5950774654f861533af79
| 2,904
|
py
|
Python
|
problems/Kelvin_Helmholtz/problem.py
|
sddyates/mars
|
a56735bd344b7337151fb419b1c832b0c702ea69
|
[
"MIT"
] | 1
|
2019-12-20T20:29:14.000Z
|
2019-12-20T20:29:14.000Z
|
problems/Kelvin_Helmholtz/problem.py
|
sddyates/mars
|
a56735bd344b7337151fb419b1c832b0c702ea69
|
[
"MIT"
] | 3
|
2019-08-30T08:12:16.000Z
|
2020-05-15T16:19:53.000Z
|
problems/Kelvin_Helmholtz/problem.py
|
sddyates/mars
|
a56735bd344b7337151fb419b1c832b0c702ea69
|
[
"MIT"
] | 1
|
2019-12-21T03:51:30.000Z
|
2019-12-21T03:51:30.000Z
|
from mars import main_loop
import numpy as np
from mars.settings import *
class Problem:
"""
Synopsis
--------
User class for the Kelvin-Helmholtz instability
Args
----
None
Methods
-------
initialise
Set all variables in each cell to initialise the simulation.
internal_bc
Specify the internal boundary for the simulation.
TODO
----
None
"""
def __init__(self):
self.parameter = {
'Name':'Kelvin Helmholtz instability.',
'Dimensions':'2D',
'x1 min':-0.5,
'x1 max':0.5,
'x2 min':-0.5,
'x2 max':0.5,
'x3 min':-0.5,
'x3 max':0.5,
'resolution x1':256,
'resolution x2':256,
'resolution x3':0,
'cfl':0.3,
'initial dt':1.0e-5,
'max dt increase':1.5,
'initial t': 0.0,
'max time': 5.0,
'save frequency': 2.5e-2,
'output type': ['numpy'],
'output primitives': True,
'print to file':False,
'profiling': True,
'restart file':None,
'gamma':1.4,
'density unit':1.0,
'length unit':1.0,
'velocity unit':1.0,
'optimisation': 'numba',
'riemann':'hllc',
'reconstruction':'linear',
'limiter':'minmod',
'time stepping':'RK2',
'method':'hydro',
'lower x1 boundary':'reciprocal',
'upper x1 boundary':'reciprocal',
'lower x2 boundary':'reciprocal',
'upper x2 boundary':'reciprocal',
'lower x3 boundary':'reciprocal',
'upper x3 boundary':'reciprocal',
'internal boundary':False
}
def initialise(self, V, g, l):
if self.parameter['Dimensions'] == '2D':
Y, X = np.meshgrid(g.x1, g.x2, indexing='ij')
if self.parameter['Dimensions'] == '3D':
Z, Y, X = np.meshgrid(g.x1, g.x2, g.x3, indexing='ij')
yp = 0.25
dens_1 = 2.0
dens_2 = 1.0
pres = 2.0
vel_1 = 0.5
vel_2 = 0.0
amp = 0.001
vx1_per = (np.random.random(V.shape)*2.0 - 1)*amp
vx2_per = (np.random.random(V.shape)*2.0 - 1)*amp
region_1 = np.absolute(Y) < yp
region_2 = np.absolute(Y) > yp
V[rho, region_1] = dens_1
V[prs, region_1] = pres
V[vx1, region_1] = vel_1 + vx1_per[vx1, region_1]
V[vx2, region_1] = vel_2 + vx2_per[vx2, region_1]
V[rho, region_2] = dens_2
V[prs, region_2] = pres
V[vx1, region_2] = -vel_1 + vx1_per[vx1, region_2]
V[vx2, region_2] = vel_2 + vx2_per[vx2, region_2]
def internal_bc(self):
return None
if __name__ == "__main__":
main_loop(Problem())
| 24.2
| 68
| 0.490358
| 364
| 2,904
| 3.785714
| 0.335165
| 0.01016
| 0.010885
| 0.036284
| 0.123367
| 0.123367
| 0.068215
| 0.068215
| 0.04209
| 0.04209
| 0
| 0.06859
| 0.367424
| 2,904
| 119
| 69
| 24.403361
| 0.681546
| 0.088499
| 0
| 0
| 0
| 0
| 0.229215
| 0
| 0
| 0
| 0
| 0.008403
| 0
| 1
| 0.040541
| false
| 0
| 0.040541
| 0.013514
| 0.108108
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53aaad486aeb5cf94c98b45787e68241bed70175
| 2,001
|
py
|
Python
|
tests/test_minhash.py
|
azachar/pyminhash
|
8a595fb25fe7172ea31d604fe8a40b8c11f1b8af
|
[
"MIT"
] | null | null | null |
tests/test_minhash.py
|
azachar/pyminhash
|
8a595fb25fe7172ea31d604fe8a40b8c11f1b8af
|
[
"MIT"
] | null | null | null |
tests/test_minhash.py
|
azachar/pyminhash
|
8a595fb25fe7172ea31d604fe8a40b8c11f1b8af
|
[
"MIT"
] | null | null | null |
import pytest
from pyminhash import MinHash
from pyminhash.datasets import load_data
def test__sparse_vector():
df = load_data()
myMinHasher = MinHash(10)
res = myMinHasher._sparse_vectorize(df, 'name')
assert res.columns.tolist() == ['name', 'sparse_vector']
assert res['sparse_vector'].dtype == 'object'
def test__create_hashing_parameters():
n_hashes = 10
myMinHasher = MinHash(n_hash_tables=n_hashes)
res = myMinHasher._create_hashing_parameters()
assert len(res) == n_hashes
assert res.dtype == 'int64'
assert min(res) >= 0
assert min(res) <= myMinHasher.max_token_value
def test__create_minhash():
n_hashes = 10
myMinHasher = MinHash(n_hash_tables=n_hashes)
doc = [59, 65, 66, 67, 118, 150, 266]
res = myMinHasher._create_minhash(doc)
assert len(res) == n_hashes
def test__create_minhash_signatures():
df = load_data()
myMinHasher = MinHash(3)
df = myMinHasher._sparse_vectorize(df, 'name')
df = myMinHasher._create_minhash_signatures(df)
for col in ['hash_0', 'hash_1', 'hash_2']:
assert col in df.columns
assert df[col].dtype == 'int64'
def test_fit_predict():
df = load_data()
myMinHasher = MinHash(10)
res = myMinHasher.fit_predict(df, 'name')
assert res.columns.tolist() == ['row_number_1', 'row_number_2', 'name_1', 'name_2', 'jaccard_sim']
assert res['jaccard_sim'].dtype == 'float'
def test_fit_predict_accuracy():
def jaccard(x, y):
x_tokens = set(x.split())
y_tokens = set(y.split())
return len(x_tokens.intersection(y_tokens)) / len(x_tokens.union(y_tokens))
df = load_data()
myMinHasher = MinHash(1000)
res = myMinHasher.fit_predict(df, 'name')
assert len(res) == 1727
res['jaccard_real'] = res.apply(lambda row: jaccard(row['name_1'], row['name_2']), axis=1)
res['diff'] = res['jaccard_real'] - res['jaccard_sim']
assert abs(res['diff'].mean()) < 0.02
assert res['diff'].std() < 0.1
| 30.318182
| 102
| 0.667166
| 278
| 2,001
| 4.535971
| 0.284173
| 0.033307
| 0.031721
| 0.066614
| 0.333862
| 0.222046
| 0.187153
| 0.141158
| 0.071372
| 0.071372
| 0
| 0.033375
| 0.191404
| 2,001
| 65
| 103
| 30.784615
| 0.745983
| 0
| 0
| 0.28
| 0
| 0
| 0.10095
| 0
| 0
| 0
| 0
| 0
| 0.28
| 1
| 0.14
| false
| 0
| 0.06
| 0
| 0.22
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53ac58babeeeae8a59ad21aa748c5f201e132f9d
| 1,325
|
py
|
Python
|
openpicle/caravel.py
|
DX-MON/OpenPICle
|
c036333f807b1b4959af22bde8c4cac553ef162f
|
[
"BSD-3-Clause"
] | null | null | null |
openpicle/caravel.py
|
DX-MON/OpenPICle
|
c036333f807b1b4959af22bde8c4cac553ef162f
|
[
"BSD-3-Clause"
] | null | null | null |
openpicle/caravel.py
|
DX-MON/OpenPICle
|
c036333f807b1b4959af22bde8c4cac553ef162f
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-License-Identifier: BSD-3-Clause
from amaranth import Elaboratable, Module, Signal, ResetInserter, EnableInserter
__all__ = (
'PIC16Caravel',
)
class PIC16Caravel(Elaboratable):
def elaborate(self, platform):
from .pic16 import PIC16
from .soc.busses.qspi import QSPIBus
m = Module()
reset = Signal()
busy_n = Signal(reset = 1)
m.submodules.qspiFlash = qspiFlash = QSPIBus(resourceName = ('spi_flash_4x', 0))
m.submodules.pic = pic = ResetInserter(reset)(EnableInserter(busy_n)(PIC16()))
run = platform.request('run', 0)
pBus = platform.request('p_bus', 0)
addr = pBus.addr.o
dataIn = pBus.data.i
dataOut = pBus.data.o
dataDir = pBus.data.oe
read = pBus.read
write = pBus.write
with m.If(qspiFlash.complete | reset):
m.d.sync += busy_n.eq(1)
with m.Elif(pic.iBus.read):
m.d.sync += busy_n.eq(0)
m.d.comb += [
reset.eq(~qspiFlash.ready),
run.o.eq(qspiFlash.ready & busy_n),
qspiFlash.address[0].eq(0),
qspiFlash.address[1:].eq(pic.iBus.address),
pic.iBus.data.eq(qspiFlash.data),
qspiFlash.read.eq(pic.iBus.read),
addr.eq(pic.pBus.address),
read.eq(pic.pBus.read),
pic.pBus.readData.eq(dataIn),
write.eq(pic.pBus.write),
dataOut.eq(pic.pBus.writeData),
dataDir.eq(pic.pBus.write),
]
return m
def get_ports(self):
return []
| 25
| 82
| 0.682264
| 196
| 1,325
| 4.545918
| 0.357143
| 0.039282
| 0.050505
| 0.022447
| 0.029181
| 0.029181
| 0
| 0
| 0
| 0
| 0
| 0.018851
| 0.159245
| 1,325
| 52
| 83
| 25.480769
| 0.780969
| 0.027925
| 0
| 0
| 0
| 0
| 0.024883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.071429
| 0.02381
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53ad1ae14a311f840335b9dec9f60aa2cc4425a1
| 2,615
|
py
|
Python
|
cogs/stats.py
|
est73/raid-shack
|
727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e
|
[
"MIT"
] | null | null | null |
cogs/stats.py
|
est73/raid-shack
|
727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e
|
[
"MIT"
] | null | null | null |
cogs/stats.py
|
est73/raid-shack
|
727b79a50a0ff5a5fc1cdfe03d51ba6703343b2e
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
import discord
class Stats(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.has_permissions(manage_channels=True)
async def stats(self, ctx):
members = await ctx.guild.fetch_members(limit=None).flatten()
member_count = 0
member_role_count = 0
instinct_count = 0
mystic_count = 0
valor_count = 0
ign_count = 0
tc_count = 0
level_count = 0
country_count = 0
profile_count = 0
for member in members:
if not member.bot:
member_count += 1
for role in member.roles:
if role.name == "Member":
member_role_count += 1
if role.name == "instinct":
instinct_count += 1
if role.name == "mystic":
mystic_count += 1
if role.name == "valor":
valor_count += 1
if role.name == "ign":
ign_count += 1
if role.name == "tc":
tc_count += 1
if role.name == "level":
level_count += 1
if role.name == "country":
country_count += 1
if role.name == "profile":
profile_count += 1
values = [f'Members: {member_count}',
f'Members Role: {member_role_count}',
f'Members on Team Instinct: {instinct_count}',
f'Members on Team Mystic: {mystic_count}',
f'Members on Team Valor: {valor_count}',
f'Members with IGN set: {ign_count}',
f'Members with TC set: {tc_count}',
f'Members with level set: {level_count}',
f'Members with country set: {country_count}',
f'Members with completed Nexus Profiles: {profile_count}']
embed = discord.Embed(color=discord.Color.green())
embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
embed.add_field(name='Server Stats:', value='\n'.join(values), inline=False)
await ctx.send(embed=embed)
@stats.error
async def permission_error(self, ctx, error):
if isinstance(error, commands.MissingPermissions):
await ctx.send("Sorry, you can't run this command")
else:
raise error
def setup(bot):
bot.add_cog(Stats(bot))
| 35.337838
| 84
| 0.507457
| 291
| 2,615
| 4.402062
| 0.285223
| 0.046838
| 0.070258
| 0.074941
| 0.144418
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012666
| 0.396176
| 2,615
| 73
| 85
| 35.821918
| 0.798607
| 0
| 0
| 0
| 0
| 0
| 0.17782
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.031746
| 0
| 0.079365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53b14303d9879fe4fc46ca016bb6d34bfedbf48e
| 35,783
|
py
|
Python
|
inquire/agents/dempref.py
|
HARPLab/inquire
|
fa74eb10e5391a0f226753668a31527c68fc6962
|
[
"BSD-3-Clause"
] | null | null | null |
inquire/agents/dempref.py
|
HARPLab/inquire
|
fa74eb10e5391a0f226753668a31527c68fc6962
|
[
"BSD-3-Clause"
] | null | null | null |
inquire/agents/dempref.py
|
HARPLab/inquire
|
fa74eb10e5391a0f226753668a31527c68fc6962
|
[
"BSD-3-Clause"
] | null | null | null |
"""
An agent which uses demonstrations and preferences.
Code adapted from Learning Reward Functions
by Integrating Human Demonstrations and Preferences.
"""
import itertools
import os
import time
from pathlib import Path
from typing import Dict, List
import arviz as az
from inquire.agents.agent import Agent
from inquire.environments.environment import Environment
from inquire.interactions.feedback import Query, Trajectory
from inquire.interactions.modalities import Preference
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
import pymc3.distributions.transforms as tr
import scipy.optimize as opt
import theano.tensor as tt
class DemPref(Agent):
"""A preference-querying agent seeded with demonstrations.
Note: We instantiate the agent according to arguments corresponding to
what the the original paper's codebase designates as their main experiment.
"""
def __init__(
self,
weight_sample_count: int,
trajectory_sample_count: int,
trajectory_length: int,
interaction_types: list = [],
w_dim: int = 4,
which_param_csv: int = 0,
visualize: bool = False,
):
"""Initialize the agent.
Note we needn't maintain a domain's start state; that's handled in
inquire/tests/evaluation.py and the respective domain.
"""
self._weight_sample_count = weight_sample_count
self._trajectory_sample_count = trajectory_sample_count
self._trajectory_length = trajectory_length
self._interaction_types = interaction_types
self._visualize = visualize
"""
Get the pre-defined agent parameters
"""
self._dempref_agent_parameters = self.read_param_csv(which_param_csv)
"""
Instance attributes from orginal codebase's 'runner.py' object. Note
that some variable names are modified to be consist with the Inquire
parlance.
"""
self.domain_name = self._dempref_agent_parameters["domain"][0]
self.teacher_type = self._dempref_agent_parameters["teacher_type"][0]
self.n_demos = self._dempref_agent_parameters["n_demos"][0]
self.gen_demos = self._dempref_agent_parameters["gen_demos"][0]
self.opt_iter_count = self._dempref_agent_parameters["opt_iter_count"][
0
]
self.trim_start = self._dempref_agent_parameters["trim_start"][0]
self.query_option_count = self._dempref_agent_parameters[
"query_option_count"
][0]
self.update_func = self._dempref_agent_parameters["update_func"][0]
self.trajectory_length = self._dempref_agent_parameters[
"trajectory_length"
][0]
self.incl_prev_query = self._dempref_agent_parameters[
"incl_prev_query"
][0]
self.gen_scenario = self._dempref_agent_parameters["gen_scenario"][0]
self.n_pref_iters = self._dempref_agent_parameters["n_pref_iters"][0]
self.epsilon = self._dempref_agent_parameters["epsilon"][0]
"""
Instantiate the DemPref-specific sampler and query generator:
"""
self._sampler = None
self._w_samples = None
self._query_generator = None
self._first_q_session = True
self._q_session_index = 0
self._query_index = 0
self._w_dim = w_dim
assert (
self.update_func == "pick_best"
or self.update_func == "approx"
or self.update_func == "rank"
), ("Update" " function must be one of the provided options")
if self.incl_prev_query and self.teacher_type == "term":
assert (
self.n_demos > 0
), "Cannot include previous query if no demonstration is provided"
self.n_samples_summ = self._dempref_agent_parameters["n_samples_summ"][
0
]
self.n_samples_exp = self._dempref_agent_parameters["n_samples_exp"][0]
self.beta_demo = self._dempref_agent_parameters["beta_demo"][0]
self.beta_pref = self._dempref_agent_parameters["beta_pref"][0]
self.beta_teacher = self._dempref_agent_parameters["beta_teacher"][0]
"""If we want to save data as they did in DemPref:"""
self.first_q_session = True
self.q_session_index = 0
self.query_index = 0
self.config = [
self.teacher_type,
self.n_demos,
self.trim_start,
self.query_option_count,
self.update_func,
self.trajectory_length,
self.incl_prev_query,
self.gen_scenario,
self.n_pref_iters,
self.epsilon,
self.n_samples_summ,
self.n_samples_exp,
self.beta_demo,
self.beta_pref,
self.beta_teacher,
]
self.df = pd.DataFrame(columns=["run #", "pref_iter", "type", "value"])
def initialize_weights(self, domain: Environment) -> np.ndarray:
"""Randomly initialize weights for gradient descent."""
self.reset()
return self.w_samples
def reset(self) -> None:
"""Prepare for new query session."""
if self._sampler is not None:
self._sampler.clear_pref()
self._sampler = self.DemPrefSampler(
query_option_count=self.query_option_count,
dim_features=self._w_dim,
update_func=self.update_func,
beta_demo=self.beta_demo,
beta_pref=self.beta_pref,
visualize=self._visualize,
)
self.w_samples = self._sampler.sample(N=self.n_samples_summ)
"""If we want to save data as they did in DemPref:"""
mean_w = np.mean(self.w_samples, axis=0)
mean_w = mean_w / np.linalg.norm(mean_w)
var_w = np.var(self.w_samples, axis=0)
# Make sure to properly index data:
if self.first_q_session:
self.first_q_session = False
else:
self.q_session_index += 1
data = [
[self.q_session_index, 0, "mean", mean_w],
[self.q_session_index, 0, "var", var_w],
]
self.df = self.df.append(
pd.DataFrame(
data, columns=["run #", "pref_iter", "type", "value"]
),
ignore_index=True,
)
def generate_query(
self,
domain: Environment,
query_state: int,
curr_w: np.ndarray,
verbose: bool = False,
) -> list:
"""Generate query using approximate gradients.
Code adapted from DemPref's ApproxQueryGenerator.
"""
if self._query_generator is None:
self._query_generator = self.DemPrefQueryGenerator(
dom=domain,
num_queries=self.query_option_count,
trajectory_length=self.trajectory_length,
num_expectation_samples=self.n_samples_exp,
include_previous_query=self.incl_prev_query,
generate_scenario=self.gen_scenario,
update_func=self.update_func,
beta_pref=self.beta_pref,
)
if self.incl_prev_query:
if len(self.demos) > 0:
self.random_scenario_index = np.random.randint(len(self.demos))
else:
self.random_scenario_index = 0
last_query_choice = self.all_query_choices[
self.random_scenario_index
]
# Generate query_options while ensuring that features of query_options
# are epsilon apart:
query_diff = 0
print("Generating query_options")
while query_diff <= self.epsilon:
if self.incl_prev_query:
if last_query_choice.null:
query_options = self._query_generator.generate_query_options(
self.w_samples, blank_traj=True
)
else:
query_options = self._query_generator.generate_query_options(
self.w_samples, last_query_choice
)
else:
query_options = self._query_generator.generate_query_options(
self.w_samples
)
query_diffs = []
for m in range(len(query_options)):
for n in range(m):
query_diffs.append(
np.linalg.norm(
domain.features_from_trajectory(
query_options[m].trajectory
)
- domain.features_from_trajectory(
query_options[n].trajectory
)
)
)
query_diff = max(query_diffs)
query = Query(
query_type=Preference,
task=None,
start_state=query_state,
trajectories=query_options,
)
return query
def update_weights(
self, current_weights: np.ndarray, domain: Environment, feedback: list
) -> np.ndarray:
"""Update the model's learned weights.
::inputs:
::current_weights: Irrelevant for DemPref; useful to other agents
::domain: The task's environment
::feedback: A list of the human feedback received to this point.
DemPref utilizes only the most recent
"""
if feedback == []:
# No feedback yet received
return self.w_samples
else:
# Use the most recent Choice in feedback:
query_options = feedback[-1].choice.options
choice = feedback[-1].choice.selection
choice_index = query_options.index(choice)
if self.incl_prev_query:
self.all_query_choices[self.random_scenario_index] = choice
# Create dictionary map from rankings to query-option features;
# load into sampler:
features = [
domain.features_from_trajectory(x.trajectory)
for x in query_options
]
phi = {k: features[k] for k in range(len(query_options))}
self._sampler.load_prefs(phi, choice_index)
self.w_samples = self._sampler.sample(N=self.n_samples_summ)
# Return the new weights from the samples:
mean_w = np.mean(self.w_samples, axis=0)
mean_w = mean_w / np.linalg.norm(mean_w)
return np.array(mean_w, copy=True).reshape(1, -1)
def read_param_csv(self, which_csv: int = 0) -> dict:
"""Read an agent-parameterization .csv.
::inputs:
:creation_index: A time-descending .csv file index.
e.g. if creation_index = 0, use the dempref
dempref_agent.csv most recently created.
"""
data_path = Path.cwd() / Path("../inquire/agents/")
# Sort the .csvs in descending order by time of creation:
all_files = np.array(list(Path.iterdir(data_path)))
all_csvs = all_files[
np.argwhere([f.suffix == ".csv" for f in all_files])
]
all_csvs = np.array([str(f[0]).strip() for f in all_csvs])
sorted_csvs = sorted(all_csvs, key=os.path.getmtime)
sorted_csvs = [Path(c) for c in sorted_csvs]
# Select the indicated .csv and convert it to a dictionary:
chosen_csv = sorted_csvs[-which_csv]
df = pd.read_csv(chosen_csv)
params_dict = df.to_dict()
return params_dict
def process_demonstrations(
self, trajectories: list, domain: Environment
) -> None:
"""Generate demonstrations to seed the querying process."""
self.demos = trajectories
phi_demos = [
domain.features_from_trajectory(x.trajectory) for x in self.demos
]
self._sampler.load_demo(np.array(phi_demos))
self.cleaned_demos = self.demos
if self.incl_prev_query:
self.all_query_choices = [d for d in self.cleaned_demos]
class DemPrefSampler:
"""Sample trajectories for querying.
Code adapted from original DemPref agent.
"""
def __init__(
self,
query_option_count: int,
dim_features: int,
update_func: str = "approx",
beta_demo: float = 0.1,
beta_pref: float = 1.0,
visualize: bool = False,
):
"""
Initialize the sampler.
:param query_option_count: Number of queries.
:param dim_features: Dimension of feature vectors.
:param update_func: options are "rank", "pick_best", and
"approx". To use "approx", query_option_count
must be 2; will throw an assertion error
otherwise
:param beta_demo: parameter measuring irrationality of teacher in
providing demonstrations
:param beta_pref: parameter measuring irrationality of teacher in
selecting preferences
"""
self.query_option_count = query_option_count
self.dim_features = dim_features
self.update_func = update_func
self.beta_demo = beta_demo
self.beta_pref = beta_pref
self._visualize = visualize
if self.update_func == "approx":
assert (
self.query_option_count == 2
), "Cannot use approximation to update function if query_option_count > 2"
elif not (
self.update_func == "rank" or self.update_func == "pick_best"
):
raise Exception(
update_func + " is not a valid update function."
)
# feature vectors from demonstrated trajectories
self.phi_demos = np.zeros((1, self.dim_features))
# a list of np.arrays containing feature difference vectors and
# which encode the ranking from the preference
# queries
self.phi_prefs = []
def load_demo(self, phi_demos: np.ndarray):
"""
Load the demonstrations into the Sampler.
:param demos: a Numpy array containing feature vectors for each
demonstration; has dimension
n_dem -by- self.dim_features
"""
self.phi_demos = phi_demos
def load_prefs(self, phi: Dict, rank):
"""
Load the results of a preference query into the Sampler.
:param phi: a dictionary mapping rankings
(0,...,query_option_count-1) to feature vectors
"""
result = []
if self.update_func == "rank":
result = [None] * len(rank)
for i in range(len(rank)):
result[i] = phi[rank[i]]
elif self.update_func == "approx":
result = phi[rank] - phi[1 - rank]
elif self.update_func == "pick_best":
result, tmp = [phi[rank] - phi[rank]], []
for key in sorted(phi.keys()):
if key != rank:
tmp.append(phi[key] - phi[rank])
result.extend(tmp)
self.phi_prefs.append(np.array(result))
def clear_pref(self):
"""Clear all preference information from the sampler."""
self.phi_prefs = []
def sample(self, N: int, T: int = 1, burn: int = 1000) -> np.ndarray:
"""Return N samples from the distribution.
The distribution is defined by applying update_func on the
demonstrations and preferences observed thus far.
:param N: number of w_samples to draw.
:param T: if greater than 1, all samples except each T^{th}
sample are discarded
:param burn: how many samples before the chain converges;
these initial samples are discarded
:return: list of w_samples drawn
"""
"""Define model for MCMC.
NOTE the DemPref codebase creates a sampler via PyMC3 version 3.5;
this codebase adapts their model to PyMC3 version 3.11.2.
We use the NUTS sampling algorithm (an extension of
Hamilitonian Monte Carlo MCMC): https://arxiv.org/abs/1111.4246.
"""
# Define update function:
if self.update_func == "approx":
def update_function(distribution):
result = tt.sum(
[
-tt.nnet.relu(
-self.beta_pref
* tt.dot(self.phi_prefs[i], distribution)
)
for i in range(len(self.phi_prefs))
]
) + tt.sum(
self.beta_demo * tt.dot(self.phi_demos, distribution)
)
return result
elif self.update_func == "pick_best":
def update_function(distribution):
result = tt.sum(
[
-tt.log(
tt.sum(
tt.exp(
self.beta_pref
* tt.dot(
self.phi_prefs[i], distribution
)
)
)
)
for i in range(len(self.phi_prefs))
]
) + tt.sum(
self.beta_demo * tt.dot(self.phi_demos, distribution)
)
return result
elif self.update_func == "rank":
def update_function(distribution):
result = (
tt.sum( # sum across different queries
[
tt.sum( # sum across different terms in PL-update
-tt.log(
[
tt.sum( # sum down different feature-differences in a single term in PL-update
tt.exp(
self.beta_pref
* tt.dot(
self.phi_prefs[i][
j:, :
]
- self.phi_prefs[i][j],
distribution,
)
)
)
for j in range(
self.query_option_count
)
]
)
)
for i in range(len(self.phi_prefs))
]
)
+ tt.sum(
self.beta_demo
* tt.dot(self.phi_demos, distribution)
),
)
return result
self.update_function = update_function
while True:
test_value = np.random.uniform(
low=-1, high=1, size=self.dim_features
)
test_value = test_value / np.linalg.norm(test_value)
norm = (test_value ** 2).sum()
if norm <= 1:
break
# Get a sampling trace (and avoid Bad Initial Energy):
while True:
trace = self.get_trace(test_value)
if trace is not None:
break
if self._visualize:
az.plot_trace(trace)
plt.show()
input("Press enter to continue")
az.plot_energy(trace)
plt.show()
input("Press enter to continue")
az.plot_posterior(trace)
plt.show()
input("Press enter to continue")
all_samples = trace.sel(
draw=slice(burn, None)
).posterior.rv_x.values
all_samples = all_samples.reshape(
all_samples.shape[0] * all_samples.shape[1], -1
)
w_samples = np.array([r / np.linalg.norm(r) for r in all_samples])
return w_samples
def get_trace(self, test_val: np.ndarray) -> az.InferenceData:
"""Create an MCMC trace."""
# model accumulates the objects defined within the proceeding
# context:
model = pm.Model()
with model:
# Add random-variable x to model:
rv_x = pm.Uniform(
name="rv_x",
shape=self.dim_features,
lower=-1,
upper=1,
testval=test_val,
)
# Define the prior as the unit ball centered at 0:
def sphere(w):
"""Determine if w is part of the unit ball."""
w_sum = pm.math.sqr(w).sum()
result = tt.switch(
pm.math.gt(w_sum, 1.0),
-100,
# -np.inf,
self.update_function(w),
)
return result
try:
# Potential is a "potential term" defined as an "additional
# tensor...to be added to the model logp"(PyMC3 developer
# guide). In this instance, the potential is effectively
# the model's log-likelihood.
p = pm.Potential("sphere", sphere(rv_x))
trace = pm.sample(
10000,
tune=5000,
return_inferencedata=True,
init="adapt_diag",
)
# except:
except (
pm.SamplingError,
pm.parallel_sampling.ParallelSamplingError,
):
return None
return trace
class DemPrefQueryGenerator:
"""Generate queries.
Code adapted from original DemPref agent.
"""
def __init__(
self,
dom: Environment,
num_queries: int,
trajectory_length: int,
num_expectation_samples: int,
include_previous_query: bool,
generate_scenario: bool,
update_func: str,
beta_pref: float,
) -> None:
"""
Initialize the approx query generation.
Note: this class generates queries using approx gradients.
::original inputs:
:dom: the domain to generate queries on
:num_queries: number of queries to generate at each time step
:trajectory_length: the length of each query
:num_expectation_samples: number of w_samples to use in
approximating the objective
function
:include_previous_query: boolean for whether one of the
queries is the previously selected
query
:generate_scenario: boolean for whether we want to generate
the scenario -- i.e., other agents'
behavior
:update_func: the update_func used; the options are
"pick_best", "approx", and "rank"
:beta_pref: the rationality parameter for the teacher
selecting her query
::Inquire-specific inputs:
:start_state: The state from which a trajectory begins.
"""
assert (
num_queries >= 1
), "QueryGenerator.__init__: num_queries must be at least 1"
assert (
trajectory_length >= 1
), "QueryGenerator.__init__: trajectory_length must be at least 1"
assert (
num_expectation_samples >= 1
), "QueryGenerator.__init__: num_expectation_samples must be \
at least 1"
self.domain = dom
self.num_queries = num_queries
self.trajectory_length = trajectory_length
self.num_expectation_samples = num_expectation_samples
self.include_previous_query = include_previous_query
self.generate_scenario = (
generate_scenario # Currently must be False
)
assert (
self.generate_scenario is False
), "Cannot generate scenario when using approximate gradients"
self.update_func = update_func
self.beta_pref = beta_pref
self.num_new_queries = (
self.num_queries - 1
if self.include_previous_query
else self.num_queries
)
def generate_query_options(
self,
w_samples: np.ndarray,
last_query_choice: Trajectory = None,
blank_traj: bool = False,
) -> List[Trajectory]:
"""
Generate self.num_queries number of queries.
This function produces query options that (locally) maximize the
maximum volume removal objective.
:param w_samples: Samples of w
:param last_query_choice: The previously selected query. Only
required if self.incl_prev_query is
True
:param blank_traj: True is last_query_choice is blank. (Only
True if not using Dempref but using incl_prev_)
:return: a list of trajectories (queries)
"""
start = time.perf_counter()
def func(controls: np.ndarray, *args) -> float:
"""Minimize via L_BFGS.
:param controls: an array, concatenated to contain the control
input for all queries
:param args: the first argument is the domain, and the second
is the samples that will be used to approximate
the objective function
:return: the value of the objective function for the given set
of controls
"""
domain = args[0]
w_samples = args[1]
controls = np.array(controls)
controls_set = [
controls[i * z : (i + 1) * z]
for i in range(self.num_new_queries)
]
features_each_q_option = np.zeros(
(domain.w_dim, self.num_new_queries)
)
for i, c in enumerate(controls_set):
features_each_q_option[
:, i
] = domain.features_from_trajectory(
c, controls_as_input=True
)
if self.include_previous_query and not blank_traj:
features_each_q_option = np.append(
features_each_q_option,
domain.features_from_trajectory(last_query_choice),
axis=1,
)
if self.update_func == "pick_best":
return -objective(features_each_q_option, w_samples)
elif self.update_func == "approx":
return -approx_objective(features_each_q_option, w_samples)
else:
return -rank_objective(features_each_q_option, w_samples)
def objective(features: List, w_samples: np.ndarray) -> float:
"""
Maximize the volume removal objective.
:param features: a list containing the feature values of each
query
:param w_samples: samples of w, used to approximate the
objective
:return: the value of the objective function, evaluated on the
given queries' features
"""
volumes_removed = []
for i in range(len(features)):
feature_diff = np.array(
[f - features[i] for f in features]
) # query_option_count x feature_size
weighted_feature_diff = (
np.sum(np.dot(feature_diff, w_samples.T), axis=1)
/ w_samples.shape[0]
) # query_option_count x 1 -- summed across w_samples
v_removed = 1.0 - 1.0 / np.sum(
np.exp(self.beta_pref * weighted_feature_diff)
)
volumes_removed.append(v_removed)
return np.min(volumes_removed)
def approx_objective(
features: np.ndarray, w_samples: np.ndarray
) -> float:
"""
Approximate the maximum volume removal objective.
:param features: the feature values of each query option
:param w_samples: w_samples of w used to approximate the
objective
:return: the value of the objective function, evaluated on the
given queries' features
"""
if features.shape[0] > features.shape[1]:
features = features.T
volumes_removed = []
for i in range(len(features)):
feature_diff = (
features[i] - features[1 - i]
) # 1 x feature_size
weighted_feature_diff = (
np.sum(np.dot(feature_diff, w_samples.T))
/ w_samples.shape[0]
) # 1 x 1 -- summed across w_samples
v_removed = 1.0 - np.minimum(
1.0, np.exp(self.beta_pref * weighted_feature_diff)
)
volumes_removed.append(v_removed)
return np.min(volumes_removed)
def rank_objective(features, w_samples) -> float:
"""
The ranking maximum volume removal objective function.
Note: This objective uses the Plackett-Luce model of
teacher behavior.
CANNOT BE USED WITH (incl_prev_QUERY AND NO DEMPREF).
:param features: a list containing the feature values of each
query
:param w_samples: samples of w, used to approximate the
objective
:return: the value of the objective function, evaluated on the
given queries' features
"""
# features: query_option_count x feature_size
# w_samples: n_samples x feature_size
exp_rewards = (
np.sum(np.dot(features, w_samples.T), axis=1)
/ w_samples.shape[0]
) # query_option_count x 1 -- summed across w_samples
volumes_removed = []
rankings = itertools.permutations(
list(range(self.num_queries))
) # iterating over all possible rankings
for rank in rankings:
exp_rewards_sorted = [None] * len(rank)
for i in range(len(rank)):
exp_rewards_sorted[rank[i]] = exp_rewards[i]
value, i = 1, 0
for i in range(len(rank) - 1):
value *= 1.0 / np.sum(
np.exp(
self.beta_pref
* (
np.array(exp_rewards_sorted[i:])
- exp_rewards_sorted[i]
)
)
)
volumes_removed.append(1 - value)
return np.min(volumes_removed)
# The following optimization is w.r.t. volume removal; the domain's
# optimization is w.r.t. the linear combination of weights and
# features; this difference is a trait of the DemPref codebase.
z = self.trajectory_length * self.domain.control_size
lower_input_bound = [
x[0] for x in self.domain.control_bounds
] * self.trajectory_length
upper_input_bound = [
x[1] for x in self.domain.control_bounds
] * self.trajectory_length
opt_res = opt.fmin_l_bfgs_b(
func,
x0=np.random.uniform(
low=self.num_new_queries * lower_input_bound,
high=self.num_new_queries * upper_input_bound,
size=(self.num_new_queries * z),
),
args=(self.domain, w_samples),
bounds=self.domain.control_bounds
* self.num_new_queries
* self.trajectory_length,
approx_grad=True,
)
query_options_controls = [
opt_res[0][i * z : (i + 1) * z]
for i in range(self.num_new_queries)
]
end = time.perf_counter()
print(f"Finished computing queries in {end - start}s")
# Note the domain was reset w/ appropriate seed before beginning
# this query session; domain.run(c) will thus reset to appropriate
# state:
raw_trajectories = [
self.domain.run(c) for c in query_options_controls
]
raw_phis = [
self.domain.features_from_trajectory(t)
for t in raw_trajectories
]
query_options_trajectories = [
Trajectory(raw_trajectories[i], raw_phis[i])
for i in range(len(raw_trajectories))
]
if self.include_previous_query and not blank_traj:
return [last_query_choice] + query_options_trajectories
else:
return query_options_trajectories
| 40.570295
| 123
| 0.506386
| 3,654
| 35,783
| 4.747126
| 0.151615
| 0.018448
| 0.016142
| 0.028479
| 0.304335
| 0.231062
| 0.184308
| 0.166551
| 0.155828
| 0.131097
| 0
| 0.006986
| 0.423944
| 35,783
| 881
| 124
| 40.616345
| 0.834522
| 0.211749
| 0
| 0.223154
| 0
| 0
| 0.03593
| 0.001862
| 0
| 0
| 0
| 0
| 0.011745
| 1
| 0.038591
| false
| 0
| 0.028523
| 0
| 0.105705
| 0.003356
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53b40880bc916c9f0a3ace8c04060a57ded76e7b
| 24,347
|
py
|
Python
|
virtual/lib/python3.8/site-packages/dns/zonefile.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | 1
|
2022-01-27T05:54:14.000Z
|
2022-01-27T05:54:14.000Z
|
virtual/lib/python3.8/site-packages/dns/zonefile.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | null | null | null |
virtual/lib/python3.8/site-packages/dns/zonefile.py
|
Lenus254/personal_blog
|
aac38e4b5372c86efa8e24db2e051fef8e5feef8
|
[
"Unlicense"
] | null | null | null |
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
import re
import sys
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rdtypes.ANY.SOA
import dns.rrset
import dns.tokenizer
import dns.transaction
import dns.ttl
import dns.grange
class UnknownOrigin(dns.exception.DNSException):
"""Unknown origin"""
class CNAMEAndOtherData(dns.exception.DNSException):
"""A node has a CNAME and other data"""
def _check_cname_and_other_data(txn, name, rdataset):
rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset)
node = txn.get_node(name)
if node is None:
# empty nodes are neutral.
return
node_kind = node.classify()
if node_kind == dns.node.NodeKind.CNAME and \
rdataset_kind == dns.node.NodeKind.REGULAR:
raise CNAMEAndOtherData('rdataset type is not compatible with a '
'CNAME node')
elif node_kind == dns.node.NodeKind.REGULAR and \
rdataset_kind == dns.node.NodeKind.CNAME:
raise CNAMEAndOtherData('CNAME rdataset is not compatible with a '
'regular data node')
# Otherwise at least one of the node and the rdataset is neutral, so
# adding the rdataset is ok
class Reader:
"""Read a DNS zone file into a transaction."""
def __init__(self, tok, rdclass, txn, allow_include=False,
allow_directives=True, force_name=None,
force_ttl=None, force_rdclass=None, force_rdtype=None,
default_ttl=None):
self.tok = tok
(self.zone_origin, self.relativize, _) = \
txn.manager.origin_information()
self.current_origin = self.zone_origin
self.last_ttl = 0
self.last_ttl_known = False
if force_ttl is not None:
default_ttl = force_ttl
if default_ttl is None:
self.default_ttl = 0
self.default_ttl_known = False
else:
self.default_ttl = default_ttl
self.default_ttl_known = True
self.last_name = self.current_origin
self.zone_rdclass = rdclass
self.txn = txn
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.allow_directives = allow_directives
self.force_name = force_name
self.force_ttl = force_ttl
self.force_rdclass = force_rdclass
self.force_rdtype = force_rdtype
self.txn.check_put_rdataset(_check_cname_and_other_data)
def _eat_line(self):
while 1:
token = self.tok.get()
if token.is_eol_or_eof():
break
def _get_identifier(self):
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
return token
def _rr_line(self):
"""Process one line from a DNS zone file."""
token = None
# Name
if self.force_name is not None:
name = self.force_name
else:
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading=True)
if not token.is_whitespace():
self.last_name = self.tok.as_name(token, self.current_origin)
else:
token = self.tok.get()
if token.is_eol_or_eof():
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone_origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone_origin)
# TTL
if self.force_ttl is not None:
ttl = self.force_ttl
self.last_ttl = ttl
self.last_ttl_known = True
else:
token = self._get_identifier()
ttl = None
try:
ttl = dns.ttl.from_text(token.value)
self.last_ttl = ttl
self.last_ttl_known = True
token = None
except dns.ttl.BadTTL:
if self.default_ttl_known:
ttl = self.default_ttl
elif self.last_ttl_known:
ttl = self.last_ttl
self.tok.unget(token)
# Class
if self.force_rdclass is not None:
rdclass = self.force_rdclass
else:
token = self._get_identifier()
try:
rdclass = dns.rdataclass.from_text(token.value)
except dns.exception.SyntaxError:
raise
except Exception:
rdclass = self.zone_rdclass
self.tok.unget(token)
if rdclass != self.zone_rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
if self.force_rdtype is not None:
rdtype = self.force_rdtype
else:
token = self._get_identifier()
try:
rdtype = dns.rdatatype.from_text(token.value)
except Exception:
raise dns.exception.SyntaxError(
"unknown rdatatype '%s'" % token.value)
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, self.relativize,
self.zone_origin)
except dns.exception.SyntaxError:
# Catch and reraise.
raise
except Exception:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError(
"caught exception {}: {}".format(str(ty), str(va)))
if not self.default_ttl_known and rdtype == dns.rdatatype.SOA:
# The pre-RFC2308 and pre-BIND9 behavior inherits the zone default
# TTL from the SOA minttl if no $TTL statement is present before the
# SOA is parsed.
self.default_ttl = rd.minimum
self.default_ttl_known = True
if ttl is None:
# if we didn't have a TTL on the SOA, set it!
ttl = rd.minimum
# TTL check. We had to wait until now to do this as the SOA RR's
# own TTL can be inferred from its minimum.
if ttl is None:
raise dns.exception.SyntaxError("Missing default TTL value")
self.txn.add(name, ttl, rd)
def _parse_modify(self, side):
# Here we catch everything in '{' '}' in a group so we can replace it
# with ''.
is_generate1 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$")
is_generate2 = re.compile(r"^.*\$({(\+|-?)(\d+)}).*$")
is_generate3 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+)}).*$")
# Sometimes there are modifiers in the hostname. These come after
# the dollar sign. They are in the form: ${offset[,width[,base]]}.
# Make names
g1 = is_generate1.match(side)
if g1:
mod, sign, offset, width, base = g1.groups()
if sign == '':
sign = '+'
g2 = is_generate2.match(side)
if g2:
mod, sign, offset = g2.groups()
if sign == '':
sign = '+'
width = 0
base = 'd'
g3 = is_generate3.match(side)
if g3:
mod, sign, offset, width = g3.groups()
if sign == '':
sign = '+'
base = 'd'
if not (g1 or g2 or g3):
mod = ''
sign = '+'
offset = 0
width = 0
base = 'd'
if base != 'd':
raise NotImplementedError()
return mod, sign, offset, width, base
def _generate_line(self):
# range lhs [ttl] [class] type rhs [ comment ]
"""Process one line containing the GENERATE statement from a DNS
zone file."""
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get()
# Range (required)
try:
start, stop, step = dns.grange.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError
# lhs (required)
try:
lhs = token.value
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
self.last_ttl = ttl
self.last_ttl_known = True
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
if not (self.last_ttl_known or self.default_ttl_known):
raise dns.exception.SyntaxError("Missing default TTL value")
if self.default_ttl_known:
ttl = self.default_ttl
elif self.last_ttl_known:
ttl = self.last_ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
rdclass = self.zone_rdclass
if rdclass != self.zone_rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError("unknown rdatatype '%s'" %
token.value)
# rhs (required)
rhs = token.value
# The code currently only supports base 'd', so the last value
# in the tuple _parse_modify returns is ignored
lmod, lsign, loffset, lwidth, _ = self._parse_modify(lhs)
rmod, rsign, roffset, rwidth, _ = self._parse_modify(rhs)
for i in range(start, stop + 1, step):
# +1 because bind is inclusive and python is exclusive
if lsign == '+':
lindex = i + int(loffset)
elif lsign == '-':
lindex = i - int(loffset)
if rsign == '-':
rindex = i - int(roffset)
elif rsign == '+':
rindex = i + int(roffset)
lzfindex = str(lindex).zfill(int(lwidth))
rzfindex = str(rindex).zfill(int(rwidth))
name = lhs.replace('$%s' % (lmod), lzfindex)
rdata = rhs.replace('$%s' % (rmod), rzfindex)
self.last_name = dns.name.from_text(name, self.current_origin,
self.tok.idna_codec)
name = self.last_name
if not name.is_subdomain(self.zone_origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone_origin)
try:
rd = dns.rdata.from_text(rdclass, rdtype, rdata,
self.current_origin, self.relativize,
self.zone_origin)
except dns.exception.SyntaxError:
# Catch and reraise.
raise
except Exception:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError("caught exception %s: %s" %
(str(ty), str(va)))
self.txn.add(name, ttl, rd)
def read(self):
"""Read a DNS zone file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True)
if token.is_eof():
if self.current_file is not None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.last_ttl,
self.last_ttl_known,
self.default_ttl,
self.default_ttl_known) = self.saved_state.pop(-1)
continue
break
elif token.is_eol():
continue
elif token.is_comment():
self.tok.get_eol()
continue
elif token.value[0] == '$' and self.allow_directives:
c = token.value.upper()
if c == '$TTL':
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError("bad $TTL")
self.default_ttl = dns.ttl.from_text(token.value)
self.default_ttl_known = True
self.tok.get_eol()
elif c == '$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone_origin is None:
self.zone_origin = self.current_origin
self.txn._set_origin(self.current_origin)
elif c == '$INCLUDE' and self.allow_include:
token = self.tok.get()
filename = token.value
token = self.tok.get()
if token.is_identifier():
new_origin =\
dns.name.from_text(token.value,
self.current_origin,
self.tok.idna_codec)
self.tok.get_eol()
elif not token.is_eol_or_eof():
raise dns.exception.SyntaxError(
"bad origin in $INCLUDE")
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.last_ttl,
self.last_ttl_known,
self.default_ttl,
self.default_ttl_known))
self.current_file = open(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
elif c == '$GENERATE':
self._generate_line()
else:
raise dns.exception.SyntaxError(
"Unknown zone file directive '" + c + "'")
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError as detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
ex = dns.exception.SyntaxError(
"%s:%d: %s" % (filename, line_number, detail))
tb = sys.exc_info()[2]
raise ex.with_traceback(tb) from None
class RRsetsReaderTransaction(dns.transaction.Transaction):
def __init__(self, manager, replacement, read_only):
assert not read_only
super().__init__(manager, replacement, read_only)
self.rdatasets = {}
def _get_rdataset(self, name, rdtype, covers):
return self.rdatasets.get((name, rdtype, covers))
def _get_node(self, name):
rdatasets = []
for (rdataset_name, _, _), rdataset in self.rdatasets.items():
if name == rdataset_name:
rdatasets.append(rdataset)
if len(rdatasets) == 0:
return None
node = dns.node.Node()
node.rdatasets = rdatasets
return node
def _put_rdataset(self, name, rdataset):
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
def _delete_name(self, name):
# First remove any changes involving the name
remove = []
for key in self.rdatasets:
if key[0] == name:
remove.append(key)
if len(remove) > 0:
for key in remove:
del self.rdatasets[key]
def _delete_rdataset(self, name, rdtype, covers):
try:
del self.rdatasets[(name, rdtype, covers)]
except KeyError:
pass
def _name_exists(self, name):
for (n, _, _) in self.rdatasets:
if n == name:
return True
return False
def _changed(self):
return len(self.rdatasets) > 0
def _end_transaction(self, commit):
if commit and self._changed():
rrsets = []
for (name, _, _), rdataset in self.rdatasets.items():
rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype,
rdataset.covers)
rrset.update(rdataset)
rrsets.append(rrset)
self.manager.set_rrsets(rrsets)
def _set_origin(self, origin):
pass
class RRSetsReaderManager(dns.transaction.TransactionManager):
def __init__(self, origin=dns.name.root, relativize=False,
rdclass=dns.rdataclass.IN):
self.origin = origin
self.relativize = relativize
self.rdclass = rdclass
self.rrsets = []
def writer(self, replacement=False):
assert replacement is True
return RRsetsReaderTransaction(self, True, False)
def get_class(self):
return self.rdclass
def origin_information(self):
if self.relativize:
effective = dns.name.empty
else:
effective = self.origin
return (self.origin, self.relativize, effective)
def set_rrsets(self, rrsets):
self.rrsets = rrsets
def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN,
default_rdclass=dns.rdataclass.IN,
rdtype=None, default_ttl=None, idna_codec=None,
origin=dns.name.root, relativize=False):
"""Read one or more rrsets from the specified text, possibly subject
to restrictions.
*text*, a file object or a string, is the input to process.
*name*, a string, ``dns.name.Name``, or ``None``, is the owner name of
the rrset. If not ``None``, then the owner name is "forced", and the
input must not specify an owner name. If ``None``, then any owner names
are allowed and must be present in the input.
*ttl*, an ``int``, string, or None. If not ``None``, the the TTL is
forced to be the specified value and the input must not specify a TTL.
If ``None``, then a TTL may be specified in the input. If it is not
specified, then the *default_ttl* will be used.
*rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If
not ``None``, then the class is forced to the specified value, and the
input must not specify a class. If ``None``, then the input may specify
a class that matches *default_rdclass*. Note that it is not possible to
return rrsets with differing classes; specifying ``None`` for the class
simply allows the user to optionally type a class as that may be convenient
when cutting and pasting.
*default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class
of the returned rrsets.
*rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not
``None``, then the type is forced to the specified value, and the
input must not specify a type. If ``None``, then a type must be present
for each RR.
*default_ttl*, an ``int``, string, or ``None``. If not ``None``, then if
the TTL is not forced and is not specified, then this value will be used.
if ``None``, then if the TTL is not forced an error will occur if the TTL
is not specified.
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
is used. Note that codecs only apply to the owner name; dnspython does
not do IDNA for names in rdata, as there is no IDNA zonefile format.
*origin*, a string, ``dns.name.Name``, or ``None``, is the origin for any
relative names in the input, and also the origin to relativize to if
*relativize* is ``True``.
*relativize*, a bool. If ``True``, names are relativized to the *origin*;
if ``False`` then any relative names in the input are made absolute by
appending the *origin*.
"""
if isinstance(origin, str):
origin = dns.name.from_text(origin, dns.name.root, idna_codec)
if isinstance(name, str):
name = dns.name.from_text(name, origin, idna_codec)
if isinstance(ttl, str):
ttl = dns.ttl.from_text(ttl)
if isinstance(default_ttl, str):
default_ttl = dns.ttl.from_text(default_ttl)
if rdclass is not None:
rdclass = dns.rdataclass.RdataClass.make(rdclass)
default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass)
if rdtype is not None:
rdtype = dns.rdatatype.RdataType.make(rdtype)
manager = RRSetsReaderManager(origin, relativize, default_rdclass)
with manager.writer(True) as txn:
tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec)
reader = Reader(tok, default_rdclass, txn, allow_directives=False,
force_name=name, force_ttl=ttl, force_rdclass=rdclass,
force_rdtype=rdtype, default_ttl=default_ttl)
reader.read()
return manager.rrsets
| 38.9552
| 83
| 0.548897
| 2,826
| 24,347
| 4.611465
| 0.14862
| 0.017726
| 0.045887
| 0.042971
| 0.40907
| 0.311234
| 0.262431
| 0.251381
| 0.218462
| 0.206952
| 0
| 0.004008
| 0.364686
| 24,347
| 624
| 84
| 39.017628
| 0.838505
| 0.207541
| 0
| 0.410835
| 0
| 0
| 0.027668
| 0.004629
| 0
| 0
| 0
| 0
| 0.004515
| 1
| 0.054176
| false
| 0.004515
| 0.031603
| 0.006772
| 0.133183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53b4099090d815c2fccdfff9285d6d8c4361e95f
| 11,719
|
py
|
Python
|
swift/common/daemon.py
|
fossabot/swift-1
|
63fc013b8b96484cede0e9901ad54676b8c93298
|
[
"Apache-2.0"
] | null | null | null |
swift/common/daemon.py
|
fossabot/swift-1
|
63fc013b8b96484cede0e9901ad54676b8c93298
|
[
"Apache-2.0"
] | null | null | null |
swift/common/daemon.py
|
fossabot/swift-1
|
63fc013b8b96484cede0e9901ad54676b8c93298
|
[
"Apache-2.0"
] | 1
|
2020-03-09T19:58:52.000Z
|
2020-03-09T19:58:52.000Z
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import sys
import time
import signal
from re import sub
import eventlet.debug
from eventlet.hubs import use_hub
from swift.common import utils
class Daemon(object):
"""
Daemon base class
A daemon has a run method that accepts a ``once`` kwarg and will dispatch
to :meth:`run_once` or :meth:`run_forever`.
A subclass of Daemon must implement :meth:`run_once` and
:meth:`run_forever`.
A subclass of Daemon may override :meth:`get_worker_args` to dispatch
arguments to individual child process workers and :meth:`is_healthy` to
perform context specific periodic wellness checks which can reset worker
arguments.
Implementations of Daemon do not know *how* to daemonize, or execute
multiple daemonized workers, they simply provide the behavior of the daemon
and context specific knowledge about how workers should be started.
"""
def __init__(self, conf):
self.conf = conf
self.logger = utils.get_logger(conf, log_route='daemon')
def run_once(self, *args, **kwargs):
"""Override this to run the script once"""
raise NotImplementedError('run_once not implemented')
def run_forever(self, *args, **kwargs):
"""Override this to run forever"""
raise NotImplementedError('run_forever not implemented')
def run(self, once=False, **kwargs):
if once:
self.run_once(**kwargs)
else:
self.run_forever(**kwargs)
def post_multiprocess_run(self):
"""
Override this to do something after running using multiple worker
processes. This method is called in the parent process.
This is probably only useful for run-once mode since there is no
"after running" in run-forever mode.
"""
pass
def get_worker_args(self, once=False, **kwargs):
"""
For each worker yield a (possibly empty) dict of kwargs to pass along
to the daemon's :meth:`run` method after fork. The length of elements
returned from this method will determine the number of processes
created.
If the returned iterable is empty, the Strategy will fallback to
run-inline strategy.
:param once: False if the worker(s) will be daemonized, True if the
worker(s) will be run once
:param kwargs: plumbed through via command line argparser
:returns: an iterable of dicts, each element represents the kwargs to
be passed to a single worker's :meth:`run` method after fork.
"""
return []
def is_healthy(self):
"""
This method is called very frequently on the instance of the daemon
held by the parent process. If it returns False, all child workers are
terminated, and new workers will be created.
:returns: a boolean, True only if all workers should continue to run
"""
return True
class DaemonStrategy(object):
"""
This is the execution strategy for using subclasses of Daemon. The default
behavior is to invoke the daemon's :meth:`Daemon.run` method from within
the parent process. When the :meth:`Daemon.run` method returns the parent
process will exit.
However, if the Daemon returns a non-empty iterable from
:meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will
be invoked in child processes, with the arguments provided from the parent
process's instance of the daemon. If a child process exits it will be
restarted with the same options, unless it was executed in once mode.
:param daemon: an instance of a :class:`Daemon` (has a `run` method)
:param logger: a logger instance
"""
def __init__(self, daemon, logger):
self.daemon = daemon
self.logger = logger
self.running = False
# only used by multi-worker strategy
self.options_by_pid = {}
self.unspawned_worker_options = []
def setup(self, **kwargs):
utils.validate_configuration()
utils.drop_privileges(self.daemon.conf.get('user', 'swift'))
utils.clean_up_daemon_hygiene()
utils.capture_stdio(self.logger, **kwargs)
def kill_children(*args):
self.running = False
self.logger.info('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
os.killpg(0, signal.SIGTERM)
os._exit(0)
signal.signal(signal.SIGTERM, kill_children)
self.running = True
def _run_inline(self, once=False, **kwargs):
"""Run the daemon"""
self.daemon.run(once=once, **kwargs)
def run(self, once=False, **kwargs):
"""Daemonize and execute our strategy"""
self.setup(**kwargs)
try:
self._run(once=once, **kwargs)
except KeyboardInterrupt:
self.logger.notice('User quit')
finally:
self.cleanup()
self.running = False
def _fork(self, once, **kwargs):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.daemon.run(once, **kwargs)
self.logger.debug('Forked worker %s finished', os.getpid())
# do not return from this stack, nor execute any finally blocks
os._exit(0)
else:
self.register_worker_start(pid, kwargs)
return pid
def iter_unspawned_workers(self):
while True:
try:
per_worker_options = self.unspawned_worker_options.pop()
except IndexError:
return
yield per_worker_options
def spawned_pids(self):
return list(self.options_by_pid.keys())
def register_worker_start(self, pid, per_worker_options):
self.logger.debug('Spawned worker %s with %r', pid, per_worker_options)
self.options_by_pid[pid] = per_worker_options
def register_worker_exit(self, pid):
self.unspawned_worker_options.append(self.options_by_pid.pop(pid))
def ask_daemon_to_prepare_workers(self, once, **kwargs):
self.unspawned_worker_options = list(
self.daemon.get_worker_args(once=once, **kwargs))
def abort_workers_if_daemon_would_like(self):
if not self.daemon.is_healthy():
self.logger.debug(
'Daemon needs to change options, aborting workers')
self.cleanup()
return True
return False
def check_on_all_running_workers(self):
for p in self.spawned_pids():
try:
pid, status = os.waitpid(p, os.WNOHANG)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
self.logger.notice('Worker %s died', p)
else:
if pid == 0:
# child still running
continue
self.logger.debug('Worker %s exited', p)
self.register_worker_exit(p)
def _run(self, once, **kwargs):
self.ask_daemon_to_prepare_workers(once, **kwargs)
if not self.unspawned_worker_options:
return self._run_inline(once, **kwargs)
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
while self.running:
if self.abort_workers_if_daemon_would_like():
self.ask_daemon_to_prepare_workers(once, **kwargs)
self.check_on_all_running_workers()
if not once:
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
else:
if not self.spawned_pids():
self.logger.notice('Finished %s', os.getpid())
break
time.sleep(0.1)
self.daemon.post_multiprocess_run()
return 0
def cleanup(self):
for p in self.spawned_pids():
try:
os.kill(p, signal.SIGTERM)
except OSError as err:
if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD):
raise
self.register_worker_exit(p)
self.logger.debug('Cleaned up worker %s', p)
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
"""
Loads settings from conf, then instantiates daemon ``klass`` and runs the
daemon with the specified ``once`` kwarg. The section_name will be derived
from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
object-replicator).
:param klass: Class to instantiate, subclass of :class:`Daemon`
:param conf_file: Path to configuration file
:param section_name: Section name from conf file to load config from
:param once: Passed to daemon :meth:`Daemon.run` method
"""
# very often the config section_name is based on the class name
# the None singleton will be passed through to readconf as is
if section_name == '':
section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
klass.__name__).lower()
try:
conf = utils.readconf(conf_file, section_name,
log_name=kwargs.get('log_name'))
except (ValueError, IOError) as e:
# The message will be printed to stderr
# and results in an exit code of 1.
sys.exit(e)
use_hub(utils.get_hub())
# once on command line (i.e. daemonize=false) will over-ride config
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
# pre-configure logger
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = utils.get_logger(conf, conf.get('log_name', section_name),
log_to_console=kwargs.pop('verbose', False),
log_route=section_name)
# optional nice/ionice priority scheduling
utils.modify_priority(conf, logger)
# disable fallocate if desired
if utils.config_true_value(conf.get('disable_fallocate', 'no')):
utils.disable_fallocate()
# set utils.FALLOCATE_RESERVE if desired
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))
# By default, disable eventlet printing stacktraces
eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to UTC.
os.environ['TZ'] = 'UTC+0'
time.tzset()
logger.notice('Starting %s', os.getpid())
try:
DaemonStrategy(klass(conf), logger).run(once=once, **kwargs)
except KeyboardInterrupt:
logger.info('User quit')
logger.notice('Exited %s', os.getpid())
| 36.621875
| 79
| 0.63572
| 1,526
| 11,719
| 4.757536
| 0.247051
| 0.025069
| 0.019835
| 0.017906
| 0.184711
| 0.1427
| 0.085399
| 0.051791
| 0.033333
| 0.033333
| 0
| 0.003415
| 0.275365
| 11,719
| 319
| 80
| 36.736677
| 0.851507
| 0.364707
| 0
| 0.252941
| 0
| 0
| 0.057796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135294
| false
| 0.005882
| 0.052941
| 0.005882
| 0.264706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53b5ca21f061bcccc9e7720c97265d2e56f05552
| 1,305
|
py
|
Python
|
backend/api/v1/auth_module/auth_api.py
|
aroraenterprise/projecteos
|
e1fb0438af8cb59b77792523c6616c480b23a6f8
|
[
"MIT"
] | null | null | null |
backend/api/v1/auth_module/auth_api.py
|
aroraenterprise/projecteos
|
e1fb0438af8cb59b77792523c6616c480b23a6f8
|
[
"MIT"
] | null | null | null |
backend/api/v1/auth_module/auth_api.py
|
aroraenterprise/projecteos
|
e1fb0438af8cb59b77792523c6616c480b23a6f8
|
[
"MIT"
] | null | null | null |
"""
Project: flask-rest
Author: Saj Arora
Description: Handle auth endpoints such as auth/signup, auth/login
"""
from api.v1 import make_json_ok_response, SageController, SageMethod
from api.v1.fundamentals import helper
from .auth_controller import AuthController
def sage_auth_signup_function(self, resource, **kwargs):
_UserModel = resource.get_account_model()
args = helper.parse_args_for_model(_UserModel)
user = _UserModel(**args) # user has been created
user.put() # save to get a key for the user
result, params = AuthController.create_unique_for_user(user.key)
if not result: # not successful
user.key.delete()
raise params # this holds the error message
else:
return params # this holds accesskey and refresh token
def sage_auth_authenticate_function(self, resource, **kwargs):
result, params = AuthController.authenticate_client()
if not result: # not successful
raise params # this holds the error message
else:
return params # this holds the refresh token and the access token
auth_controller = {
'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False),
'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST, authenticate=False)
}
| 36.25
| 104
| 0.744828
| 166
| 1,305
| 5.680723
| 0.451807
| 0.033934
| 0.063627
| 0.057264
| 0.26087
| 0.127253
| 0.127253
| 0.127253
| 0.127253
| 0.127253
| 0
| 0.001869
| 0.180077
| 1,305
| 36
| 105
| 36.25
| 0.879439
| 0.256705
| 0
| 0.333333
| 0
| 0
| 0.018828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.125
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53b6650eb89817fbb23a4d021878f43cb942eb48
| 538
|
py
|
Python
|
QuGraphy/state.py
|
Mohamed-ShehabEldin/QuGraphy
|
c43fe7128f91e7bd383393f5ff16ff613077e8d7
|
[
"Apache-2.0"
] | null | null | null |
QuGraphy/state.py
|
Mohamed-ShehabEldin/QuGraphy
|
c43fe7128f91e7bd383393f5ff16ff613077e8d7
|
[
"Apache-2.0"
] | null | null | null |
QuGraphy/state.py
|
Mohamed-ShehabEldin/QuGraphy
|
c43fe7128f91e7bd383393f5ff16ff613077e8d7
|
[
"Apache-2.0"
] | null | null | null |
#this file will contain function that related to vector state
from .density import * #we may use some functions from them and dependencies
def row2col(vec):
if np.ndim(vec)==1:
col=[]
for element in vec:
col.append([element])
return col
else:
return vec
def check_state(state):
row2col(state)
if np.shape(state)[1]>1:
raise Exception("invalid state, not a vector!")
if schmidt_inner(state,state) !=1:
raise Exception("invalid state, not normalized!")
| 25.619048
| 79
| 0.633829
| 74
| 538
| 4.581081
| 0.608108
| 0.023599
| 0.088496
| 0.129794
| 0.176991
| 0.176991
| 0
| 0
| 0
| 0
| 0
| 0.015228
| 0.267658
| 538
| 21
| 80
| 25.619048
| 0.845178
| 0.208178
| 0
| 0
| 0
| 0
| 0.136471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53b8d7ac852024e1d3318cbf747bac9b0ef35d8a
| 28,857
|
py
|
Python
|
RMtools_1D/do_RMsynth_1D.py
|
lh-astro/RM-Tools
|
ac64cc41b2f696f21ee7dd001303cbad1ff71114
|
[
"MIT"
] | null | null | null |
RMtools_1D/do_RMsynth_1D.py
|
lh-astro/RM-Tools
|
ac64cc41b2f696f21ee7dd001303cbad1ff71114
|
[
"MIT"
] | null | null | null |
RMtools_1D/do_RMsynth_1D.py
|
lh-astro/RM-Tools
|
ac64cc41b2f696f21ee7dd001303cbad1ff71114
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#=============================================================================#
# #
# NAME: do_RMsynth_1D.py #
# #
# PURPOSE: API for runnning RM-synthesis on an ASCII Stokes I, Q & U spectrum.#
# #
# MODIFIED: 16-Nov-2018 by J. West #
# MODIFIED: 23-October-2019 by A. Thomson #
# #
#=============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2015 - 2018 Cormac R. Purcell #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#=============================================================================#
import sys
import os
import time
import traceback
import json
import math as m
import numpy as np
import matplotlib.pyplot as plt
from RMutils.util_RM import do_rmsynth
from RMutils.util_RM import do_rmsynth_planes
from RMutils.util_RM import get_rmsf_planes
from RMutils.util_RM import measure_FDF_parms
from RMutils.util_RM import measure_qu_complexity
from RMutils.util_RM import measure_fdf_complexity
from RMutils.util_misc import nanmedian
from RMutils.util_misc import toscalar
from RMutils.util_misc import create_frac_spectra
from RMutils.util_misc import poly5
from RMutils.util_misc import MAD
from RMutils.util_plotTk import plot_Ipqu_spectra_fig
from RMutils.util_plotTk import plot_rmsf_fdf_fig
from RMutils.util_plotTk import plot_complexity_fig
from RMutils.util_plotTk import CustomNavbar
from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax
if sys.version_info.major == 2:
print('RM-tools will no longer run with Python 2! Please use Python 3.')
exit()
C = 2.997924538e8 # Speed of light [m/s]
#-----------------------------------------------------------------------------#
def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None,
nSamples=10.0, weightType="variance", fitRMSF=False,
noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False,
debug=False, verbose=False, log=print,units='Jy/beam', prefixOut="prefixOut", args=None):
"""Run RM synthesis on 1D data.
Args:
data (list): Contains frequency and polarization data as either:
[freq_Hz, I, Q, U, dI, dQ, dU]
freq_Hz (array_like): Frequency of each channel in Hz.
I (array_like): Stokes I intensity in each channel.
Q (array_like): Stokes Q intensity in each channel.
U (array_like): Stokes U intensity in each channel.
dI (array_like): Error in Stokes I intensity in each channel.
dQ (array_like): Error in Stokes Q intensity in each channel.
dU (array_like): Error in Stokes U intensity in each channel.
or
[freq_Hz, q, u, dq, du]
freq_Hz (array_like): Frequency of each channel in Hz.
q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
u (array_like): Fractional Stokes U intensity (U/I) in each channel.
dq (array_like): Error in fractional Stokes Q intensity in each channel.
du (array_like): Error in fractional Stokes U intensity in each channel.
Kwargs:
polyOrd (int): Order of polynomial to fit to Stokes I spectrum.
phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).
dPhi_radm2 (float): Faraday depth channel size (rad/m^2).
nSamples (float): Number of samples across the RMSF.
weightType (str): Can be "variance" or "uniform"
"variance" -- Weight by uncertainty in Q and U.
"uniform" -- Weight uniformly (i.e. with 1s)
fitRMSF (bool): Fit a Gaussian to the RMSF?
noStokesI (bool: Is Stokes I data provided?
phiNoise_radm2 (float): ????
nBits (int): Precision of floating point numbers.
showPlots (bool): Show plots?
debug (bool): Turn on debugging messages & plots?
verbose (bool): Verbosity.
log (function): Which logging function to use.
units (str): Units of data.
Returns:
mDict (dict): Summary of RM synthesis results.
aDict (dict): Data output by RM synthesis.
"""
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
if verbose: log("... success.")
except Exception:
if verbose: log("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: log("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
if verbose: log("... success.")
noStokesI = True
except Exception:
if verbose: log("...failed.")
if debug:
log(traceback.format_exc())
sys.exit()
if verbose: log("Successfully read in the Stokes spectra.")
# If no Stokes I present, create a dummy spectrum = unity
if noStokesI:
if verbose: log("Warn: no Stokes I data in use.")
IArr = np.ones_like(QArr)
dIArr = np.zeros_like(QArr)
# Convert to GHz for convenience
freqArr_GHz = freqArr_Hz / 1e9
dQUArr = (dQArr + dUArr)/2.0
# Fit the Stokes I spectrum and create the fractional spectra
IModArr, qArr, uArr, dqArr, duArr, fitDict = \
create_frac_spectra(freqArr = freqArr_GHz,
IArr = IArr,
QArr = QArr,
UArr = UArr,
dIArr = dIArr,
dQArr = dQArr,
dUArr = dUArr,
polyOrd = polyOrd,
verbose = True,
debug = debug)
# Plot the data and the Stokes I model fit
if verbose: log("Plotting the input data and spectral index fit.")
freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz/1e9)
specFig = plt.figure(figsize=(12.0, 8))
plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz,
IArr = IArr,
qArr = qArr,
uArr = uArr,
dIArr = dIArr,
dqArr = dqArr,
duArr = duArr,
freqHirArr_Hz = freqHirArr_Hz,
IModArr = IModHirArr,
fig = specFig,
units = units)
# Use the custom navigation toolbar (does not work on Mac OS X)
# try:
# specFig.canvas.toolbar.pack_forget()
# CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# if not plt.isinteractive():
# specFig.show()
# DEBUG (plot the Q, U and average RMS spectrum)
if debug:
rmsFig = plt.figure(figsize=(12.0, 8))
ax = rmsFig.add_subplot(111)
ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5,
label='rms <QU>')
ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5,
label='rms Q')
ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5,
label='rms U')
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
ax.set_xlabel('$\\nu$ (GHz)')
ax.set_ylabel('RMS '+units)
ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
# rmsFig.show()
#-------------------------------------------------------------------------#
# Calculate some wavelength parameters
lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) -
np.nanmin(lambdaSqArr_m2) )
dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))
# Set the Faraday depth range
fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
if dPhi_radm2 is None:
dPhi_radm2 = fwhmRMSF_radm2 / nSamples
if phiMax_radm2 is None:
phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM
# Faraday depth sampling. Zero always centred on middle channel
nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0
stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0
phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
phiArr_radm2 = phiArr_radm2.astype(dtFloat)
if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0],
phiArr_radm2[-1],
float(dPhi_radm2),
nChanRM))
# Calculate the weighting as 1/sigma^2 or all 1s (uniform)
if weightType=="variance":
weightArr = 1.0 / np.power(dQUArr, 2.0)
else:
weightType = "uniform"
weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
if verbose: log("Weight type is '%s'." % weightType)
startTime = time.time()
# Perform RM-synthesis on the spectrum
dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr,
dataU = uArr,
lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
nBits = nBits,
verbose = verbose,
log = log)
# Calculate the Rotation Measure Spread Function
RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
mskArr = ~np.isfinite(qArr),
lam0Sq_m2 = lam0Sq_m2,
double = True,
fitRMSF = fitRMSF,
fitRMSFreal = False,
nBits = nBits,
verbose = verbose,
log = log)
fwhmRMSF = float(fwhmRMSFArr)
# ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#
#dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
# do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)
#-------------------------------------------------------------------------#
endTime = time.time()
cputime = (endTime - startTime)
if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)
# Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
# Multiply the dirty FDF by Ifreq0 to recover the PI
freq0_Hz = C / m.sqrt(lam0Sq_m2)
Ifreq0 = poly5(fitDict["p"])(freq0_Hz/1e9)
dirtyFDF *= (Ifreq0) # FDF is in fracpol units initially, convert back to flux
# Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 )
# Measure the parameters of the dirty FDF
# Use the theoretical noise to calculate uncertainties
mDict = measure_FDF_parms(FDF = dirtyFDF,
phiArr = phiArr_radm2,
fwhmRMSF = fwhmRMSF,
dFDF = dFDFth,
lamSqArr_m2 = lambdaSqArr_m2,
lam0Sq = lam0Sq_m2)
mDict["Ifreq0"] = toscalar(Ifreq0)
mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
mDict["IfitStat"] = fitDict["fitStatus"]
mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
mDict["freq0_Hz"] = toscalar(freq0_Hz)
mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
mDict["dQU"] = toscalar(nanmedian(dQUArr))
mDict["dFDFth"] = toscalar(dFDFth)
mDict["units"] = units
if fitDict["fitStatus"] >= 128:
log("WARNING: Stokes I model contains negative values!")
elif fitDict["fitStatus"] >= 64:
log("Caution: Stokes I model has low signal-to-noise.")
#Add information on nature of channels:
good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0]
mDict["min_freq"]=float(np.min(freqArr_Hz[good_channels]))
mDict["max_freq"]=float(np.max(freqArr_Hz[good_channels]))
mDict["N_channels"]=good_channels.size
mDict["median_channel_width"]=float(np.median(np.diff(freqArr_Hz)))
# Measure the complexity of the q and u spectra
mDict["fracPol"] = mDict["ampPeakPIfit"]/(Ifreq0)
mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
fracPol = mDict["fracPol"],
psi0_deg = mDict["polAngle0Fit_deg"],
RM_radm2 = mDict["phiPeakPIfit_rm2"])
mDict.update(mD)
# Debugging plots for spectral complexity measure
if debug:
tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
qArr=pD["yArrQ"],
dqArr=pD["dyArrQ"],
sigmaAddqArr=pD["sigmaAddArrQ"],
chiSqRedqArr=pD["chiSqRedArrQ"],
probqArr=pD["probArrQ"],
uArr=pD["yArrU"],
duArr=pD["dyArrU"],
sigmaAdduArr=pD["sigmaAddArrU"],
chiSqReduArr=pD["chiSqRedArrU"],
probuArr=pD["probArrU"],
mDict=mDict)
if saveOutput:
if verbose: print("Saving debug plots:")
outFilePlot = prefixOut + ".debug-plots.pdf"
if verbose: print("> " + outFilePlot)
tmpFig.savefig(outFilePlot, bbox_inches = 'tight')
else:
tmpFig.show()
#add array dictionary
aDict = dict()
aDict["phiArr_radm2"] = phiArr_radm2
aDict["phi2Arr_radm2"] = phi2Arr_radm2
aDict["RMSFArr"] = RMSFArr
aDict["freqArr_Hz"] = freqArr_Hz
aDict["weightArr"]=weightArr
aDict["dirtyFDF"]=dirtyFDF
if verbose:
# Print the results to the screen
log()
log('-'*80)
log('RESULTS:\n')
log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))
log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"],
mDict["dPolAngleFit_deg"]))
log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"],
mDict["dPolAngle0Fit_deg"]))
log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"],
mDict["dPhiPeakPIfit_rm2"]))
log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"]/1e9))
log('I freq0 = %.4g %s' % (mDict["Ifreq0"],units))
log('Peak PI = %.4g (+/-%.4g) %s' % (mDict["ampPeakPIfit"],
mDict["dAmpPeakPIfit"],units))
log('QU Noise = %.4g %s' % (mDict["dQU"],units))
log('FDF Noise (theory) = %.4g %s' % (mDict["dFDFth"],units))
log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict["dFDFcorMAD"],units))
log('FDF Noise (rms) = %.4g %s' % (mDict["dFDFrms"],units))
log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"],
mDict["dSigmaAddPlusQ"],
mDict["dSigmaAddMinusQ"]))
log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"],
mDict["dSigmaAddPlusU"],
mDict["dSigmaAddMinusU"]))
log()
log('-'*80)
# Plot the RM Spread Function and dirty FDF
if showPlots or saveOutput:
fdfFig = plt.figure(figsize=(12.0, 8))
plot_rmsf_fdf_fig(phiArr = phiArr_radm2,
FDF = dirtyFDF,
phi2Arr = phi2Arr_radm2,
RMSFArr = RMSFArr,
fwhmRMSF = fwhmRMSF,
vLine = mDict["phiPeakPIfit_rm2"],
fig = fdfFig,
units = units)
# Use the custom navigation toolbar
# try:
# fdfFig.canvas.toolbar.pack_forget()
# CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# fdfFig.show()
# Pause if plotting enabled
if showPlots:
plt.show()
elif saveOutput or debug:
if verbose: print("Saving RMSF and dirty FDF plot:")
outFilePlot = prefixOut + ".RMSF-dirtyFDF-plots.pdf"
if verbose: print("> " + outFilePlot)
fdfFig.savefig(outFilePlot, bbox_inches = 'tight')
# #if verbose: print "Press <RETURN> to exit ...",
# input()
return mDict, aDict
def readFile(dataFile, nBits, verbose=True, debug=False):
"""
Read the I, Q & U data from the ASCII file.
Inputs:
datafile (str): relative or absolute path to file.
nBits (int): number of bits to store the data as.
verbose (bool): Print verbose messages to terminal?
debug (bool): Print full traceback in case of failure?
Returns:
data (list of arrays): List containing the columns found in the file.
If Stokes I is present, this will be [freq_Hz, I, Q, U, dI, dQ, dU],
else [freq_Hz, q, u, dq, du].
"""
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# Output prefix is derived from the input file name
# Read the data-file. Format=space-delimited, comments="#".
if verbose: print("Reading the data file '%s':" % dataFile)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: print("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr,
dIArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr]
except Exception:
if verbose: print("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: print("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, QArr, UArr, dQArr, dUArr]
noStokesI = True
except Exception:
if verbose: print("...failed.")
if debug:
print(traceback.format_exc())
sys.exit()
if verbose: print("Successfully read in the Stokes spectra.")
return data
def saveOutput(outdict, arrdict, prefixOut, verbose):
# Save the dirty FDF, RMSF and weight array to ASCII files
if verbose: print("Saving the dirty FDF, RMSF weight arrays to ASCII files.")
outFile = prefixOut + "_FDFdirty.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phiArr_radm2"], arrdict["dirtyFDF"].real, arrdict["dirtyFDF"].imag)))
outFile = prefixOut + "_RMSF.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phi2Arr_radm2"], arrdict["RMSFArr"].real, arrdict["RMSFArr"].imag)))
outFile = prefixOut + "_weight.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["freqArr_Hz"], arrdict["weightArr"])))
# Save the measurements to a "key=value" text file
outFile = prefixOut + "_RMsynth.dat"
if verbose:
print("Saving the measurements on the FDF in 'key=val' and JSON formats.")
print("> %s" % outFile)
FH = open(outFile, "w")
for k, v in outdict.items():
FH.write("%s=%s\n" % (k, v))
FH.close()
outFile = prefixOut + "_RMsynth.json"
if verbose:
print("> %s" % outFile)
json.dump(dict(outdict), open(outFile, "w"))
#-----------------------------------------------------------------------------#
def main():
import argparse
"""
Start the function to perform RM-synthesis if called from the command line.
"""
# Help string to be shown using the -h option
descStr = """
Run RM-synthesis on Stokes I, Q and U spectra (1D) stored in an ASCII
file. The Stokes I spectrum is first fit with a polynomial and the
resulting model used to create fractional q = Q/I and u = U/I spectra.
The ASCII file should the following columns, in a space separated format:
[freq_Hz, I, Q, U, I_err, Q_err, U_err]
OR
[freq_Hz, Q, U, Q_err, U_err]
To get outputs, one or more of the following flags must be set: -S, -p, -v.
"""
epilog_text="""
Outputs with -S flag:
_FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U]
_RMSF.dat: Computed RMSF [Phi, Q, U]
_RMsynth.dat: list of derived parameters for RM spectrum
(approximately equivalent to -v flag output)
_RMsynth.json: dictionary of derived parameters for RM spectrum
_weight.dat: Calculated channel weights [freq_Hz, weight]
"""
# Parse the command line options
parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("dataFile", metavar="dataFile.dat", nargs=1,
help="ASCII file containing Stokes spectra & errors.")
parser.add_argument("-t", dest="fitRMSF", action="store_true",
help="fit a Gaussian to the RMSF [False]")
parser.add_argument("-l", dest="phiMax_radm2", type=float, default=None,
help="absolute max Faraday depth sampled [Auto].")
parser.add_argument("-d", dest="dPhi_radm2", type=float, default=None,
help="width of Faraday depth channel [Auto].\n(overrides -s NSAMPLES flag)")
parser.add_argument("-s", dest="nSamples", type=float, default=10,
help="number of samples across the RMSF lobe [10].")
parser.add_argument("-w", dest="weightType", default="variance",
help="weighting [inverse variance] or 'uniform' (all 1s).")
parser.add_argument("-o", dest="polyOrd", type=int, default=2,
help="polynomial order to fit to I spectrum [2].")
parser.add_argument("-i", dest="noStokesI", action="store_true",
help="ignore the Stokes I spectrum [False].")
parser.add_argument("-b", dest="bit64", action="store_true",
help="use 64-bit floating point precision [False (uses 32-bit)]")
parser.add_argument("-p", dest="showPlots", action="store_true",
help="show the plots [False].")
parser.add_argument("-v", dest="verbose", action="store_true",
help="verbose output [False].")
parser.add_argument("-S", dest="saveOutput", action="store_true",
help="save the arrays and plots [False].")
parser.add_argument("-D", dest="debug", action="store_true",
help="turn on debugging messages & plots [False].")
parser.add_argument("-U", dest="units", type=str, default="Jy/beam",
help="Intensity units of the data. [Jy/beam]")
args = parser.parse_args()
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
dataDir, dummy = os.path.split(args.dataFile[0])
# Set the floating point precision
nBits = 32
if args.bit64:
nBits = 64
verbose=args.verbose
data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug)
# Run RM-synthesis on the spectra
mDict, aDict = run_rmsynth(data = data,
polyOrd = args.polyOrd,
phiMax_radm2 = args.phiMax_radm2,
dPhi_radm2 = args.dPhi_radm2,
nSamples = args.nSamples,
weightType = args.weightType,
fitRMSF = args.fitRMSF,
noStokesI = args.noStokesI,
nBits = nBits,
showPlots = args.showPlots,
debug = args.debug,
verbose = verbose,
units = args.units,
prefixOut = prefixOut,
args = args,
)
if args.saveOutput:
saveOutput(mDict, aDict, prefixOut, verbose)
#-----------------------------------------------------------------------------#
if __name__ == "__main__":
main()
| 45.159624
| 111
| 0.524517
| 3,171
| 28,857
| 4.678966
| 0.208767
| 0.019411
| 0.018872
| 0.011862
| 0.270742
| 0.220395
| 0.145582
| 0.116061
| 0.10494
| 0.096583
| 0
| 0.017803
| 0.349863
| 28,857
| 638
| 112
| 45.230408
| 0.77304
| 0.278026
| 0
| 0.216418
| 0
| 0.002488
| 0.194327
| 0.001178
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00995
| false
| 0
| 0.062189
| 0
| 0.077114
| 0.059701
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53b93c021c611ea7b35c2a4e8768e23aee0fabe0
| 1,449
|
py
|
Python
|
netket/utils/jax.py
|
gpescia/MyNetKet
|
958510966a5870d9d491de0628903cf1fc210921
|
[
"Apache-2.0"
] | 1
|
2022-01-31T15:19:09.000Z
|
2022-01-31T15:19:09.000Z
|
netket/utils/jax.py
|
gpescia/MyNetKet
|
958510966a5870d9d491de0628903cf1fc210921
|
[
"Apache-2.0"
] | 26
|
2021-08-06T15:27:57.000Z
|
2022-03-30T16:55:18.000Z
|
netket/utils/jax.py
|
gpescia/MyNetKet
|
958510966a5870d9d491de0628903cf1fc210921
|
[
"Apache-2.0"
] | 1
|
2021-04-25T15:47:32.000Z
|
2021-04-25T15:47:32.000Z
|
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from . import struct
def get_afun_if_module(mod_or_fun) -> Callable:
"""Returns the apply function if it's a module. Does nothing otherwise."""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun.apply
else:
return mod_or_fun
@struct.dataclass
class WrappedApplyFun:
"""Wraps a callable to be a module-like object with the method `apply`."""
apply: Callable
"""The wrapped callable."""
def __repr__(self):
return f"{type(self).__name__}(apply={self.apply}, hash={hash(self)})"
def wrap_afun(mod_or_fun):
"""Wraps a callable to be a module-like object with the method `apply`.
Does nothing if it already has an apply method.
"""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun
else:
return WrappedApplyFun(mod_or_fun)
| 30.829787
| 78
| 0.712215
| 218
| 1,449
| 4.605505
| 0.490826
| 0.039841
| 0.063745
| 0.038845
| 0.177291
| 0.177291
| 0.177291
| 0.177291
| 0.177291
| 0.177291
| 0
| 0.006897
| 0.199448
| 1,449
| 46
| 79
| 31.5
| 0.858621
| 0.574879
| 0
| 0.352941
| 0
| 0
| 0.126812
| 0.074275
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.117647
| 0.058824
| 0.705882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53b95578f3b9aa9d904006c7f7edb3a1fb45bd48
| 10,933
|
py
|
Python
|
geetools/batch/featurecollection.py
|
Kungreye/gee_tools
|
d0712ac78410250c41503ca08075f536d58d2ef3
|
[
"MIT"
] | null | null | null |
geetools/batch/featurecollection.py
|
Kungreye/gee_tools
|
d0712ac78410250c41503ca08075f536d58d2ef3
|
[
"MIT"
] | null | null | null |
geetools/batch/featurecollection.py
|
Kungreye/gee_tools
|
d0712ac78410250c41503ca08075f536d58d2ef3
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import ee
from . import utils
import json
import csv
from .. import tools
def fromShapefile(filename, crs=None, start=None, end=None):
""" Convert an ESRI file (.shp and .dbf must be present) to a
ee.FeatureCollection
At the moment only works for shapes with less than 1000 records and doesn't
handle complex shapes.
:param filename: the name of the filename. If the shape is not in the
same path than the script, specify a path instead.
:type filename: str
:param start:
:return: the FeatureCollection
:rtype: ee.FeatureCollection
"""
import shapefile
wgs84 = ee.Projection('EPSG:4326')
# read the filename
reader = shapefile.Reader(filename)
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
field_types = [field[1] for field in fields]
types = dict(zip(field_names, field_types))
features = []
projection = utils.getProjection(filename) if not crs else crs
# catch a string with format "EPSG:XXX"
if isinstance(projection, str):
if 'EPSG:' in projection:
projection = projection.split(':')[1]
projection = 'EPSG:{}'.format(projection)
# filter records with start and end
start = start if start else 0
if not end:
records = reader.shapeRecords()
end = len(records)
else:
end = end + 1
if (end-start)>1000:
msg = "Can't process more than 1000 records at a time. Found {}"
raise ValueError(msg.format(end-start))
for i in range(start, end):
# atr = dict(zip(field_names, sr.record))
sr = reader.shapeRecord(i)
atr = {}
for fld, rec in zip(field_names, sr.record):
fld_type = types[fld]
if fld_type == 'D':
value = ee.Date(rec.isoformat()).millis().getInfo()
elif fld_type in ['C', 'N', 'F']:
value = rec
else:
continue
atr[fld] = value
geom = sr.shape.__geo_interface__
if projection is not None:
geometry = ee.Geometry(geom, projection) \
.transform(wgs84, 1)
else:
geometry = ee.Geometry(geom)
feat = ee.Feature(geometry, atr)
features.append(feat)
return ee.FeatureCollection(features)
def fromGeoJSON(filename=None, data=None, crs=None):
""" Create a list of Features from a GeoJSON file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
if filename:
with open(filename, 'r') as geoj:
content = geoj.read()
geodict = json.loads(content)
else:
geodict = data
features = []
# Get crs from GeoJSON
if not crs:
filecrs = geodict.get('crs')
if filecrs:
name = filecrs.get('properties').get('name')
splitcrs = name.split(':')
cleancrs = [part for part in splitcrs if part]
try:
if cleancrs[-1] == 'CRS84':
crs = 'EPSG:4326'
elif cleancrs[-2] == 'EPSG':
crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1])
else:
raise ValueError('{} not recognized'.format(name))
except IndexError:
raise ValueError('{} not recognized'.format(name))
else:
crs = 'EPSG:4326'
for n, feat in enumerate(geodict.get('features')):
properties = feat.get('properties')
geom = feat.get('geometry')
ty = geom.get('type')
coords = geom.get('coordinates')
if ty == 'GeometryCollection':
ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs)
else:
if ty == 'Polygon':
coords = utils.removeZ(coords) if utils.hasZ(coords) else coords
ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs))
ee_feat = ee.feature.Feature(ee_geom, properties)
features.append(ee_feat)
return tuple(features)
def fromKML(filename=None, data=None, crs=None, encoding=None):
""" Create a list of Features from a KML file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding)
features = geojsondict['features']
for feat in features:
# remove styleUrl
prop = feat['properties']
if 'styleUrl' in prop:
prop.pop('styleUrl')
# remove Z value if needed
geom = feat['geometry']
ty = geom['type']
if ty == 'GeometryCollection':
geometries = geom['geometries']
for g in geometries:
c = g['coordinates']
utils.removeZ(c)
else:
coords = geom['coordinates']
utils.removeZ(coords)
return fromGeoJSON(data=geojsondict, crs=crs)
def toDict(collection, split_at=4000):
""" Get the FeatureCollection as a dict object """
size = collection.size()
condition = size.gte(4999)
def greater():
size = collection.size()
seq = tools.ee_list.sequence(0, size, split_at)
limits = ee.List.zip(seq.slice(1), seq)
def over_limits(n):
n = ee.List(n)
ini = ee.Number(n.get(0))
end = ee.Number(n.get(1))
return ee.FeatureCollection(collection.toList(ini, end))
return limits.map(over_limits)
collections = ee.List(
ee.Algorithms.If(condition,
greater(),
ee.List([collection])))
collections_size = collections.size().getInfo()
col = ee.FeatureCollection(collections.get(0))
content = col.getInfo()
feats = content['features']
for i in range(0, collections_size):
c = ee.FeatureCollection(collections.get(i))
content_c = c.getInfo()
feats_c = content_c['features']
feats = feats + feats_c
content['features'] = feats
return content
def toGeoJSON(collection, name, path=None, split_at=4000):
""" Export a FeatureCollection to a GeoJSON file
:param collection: The collection to export
:type collection: ee.FeatureCollection
:param name: name of the resulting file
:type name: str
:param path: The path where to save the file. If None, will be saved
in the current folder
:type path: str
:param split_at: limit to avoid an EE Exception
:type split_at: int
:return: A GeoJSON (.geojson) file.
:rtype: file
"""
import json
import os
if not path:
path = os.getcwd()
# name
if name[-8:-1] != '.geojson':
fname = name+'.geojson'
content = toDict(collection, split_at)
with open(os.path.join(path, fname), 'w') as thefile:
thefile.write(json.dumps(content))
return thefile
def toCSV(collection, filename, split_at=4000):
""" Alternative to download a FeatureCollection as a CSV """
d = toDict(collection, split_at)
fields = list(d['columns'].keys())
fields.append('geometry')
features = d['features']
ext = filename[-4:]
if ext != '.csv':
filename += '.csv'
with open(filename, 'w') as thecsv:
writer = csv.DictWriter(thecsv, fields)
writer.writeheader()
# write rows
for feature in features:
properties = feature['properties']
fid = feature['id']
geom = feature['geometry']['type']
# match fields
properties['system:index'] = fid
properties['geometry'] = geom
# write row
writer.writerow(properties)
return thecsv
def toLocal(collection, filename, filetype=None, selectors=None, path=None):
""" Download a FeatureCollection to a local file a CSV or geoJSON file.
This uses a different method than `toGeoJSON` and `toCSV`
:param filetype: The filetype of download, either CSV or JSON.
Defaults to CSV.
:param selectors: The selectors that should be used to determine which
attributes will be downloaded.
:param filename: The name of the file to be downloaded
"""
if not filetype:
filetype = 'CSV'
url = collection.getDownloadURL(filetype, selectors, filename)
thefile = utils.downloadFile(url, filename, filetype, path)
return thefile
def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs):
""" This function can create folders and ImageCollections on the fly.
The rest is the same to Export.image.toAsset. You can pass the same
params as the original function
:param table: the feature collection to upload
:type table: ee.FeatureCollection
:param assetPath: path to upload the image (only PATH, without
filename)
:type assetPath: str
:param name: filename for the image (AssetID will be assetPath + name)
:type name: str
:return: the tasks
:rtype: ee.batch.Task
"""
# Check if the user is specified in the asset path
is_user = (assetPath.split('/')[0] == 'users')
if not is_user:
user = ee.batch.data.getAssetRoots()[0]['id']
assetPath = "{}/{}".format(user, assetPath)
if create:
# Recrusive create path
path2create = assetPath # '/'.join(assetPath.split('/')[:-1])
utils.createAssets([path2create], 'Folder', True)
# Asset ID (Path + name)
assetId = '/'.join([assetPath, name])
# Description
description = utils.matchDescription(name)
# Init task
task = ee.batch.Export.table.toAsset(table, assetId=assetId,
description=description, **kwargs)
task.start()
if verbose:
print('Exporting {} to {}'.format(name, assetPath))
return task
| 32.346154
| 83
| 0.611269
| 1,364
| 10,933
| 4.869501
| 0.232405
| 0.007528
| 0.006775
| 0.012045
| 0.18368
| 0.177356
| 0.154622
| 0.14589
| 0.132189
| 0.132189
| 0
| 0.009228
| 0.286381
| 10,933
| 338
| 84
| 32.346154
| 0.842092
| 0.30815
| 0
| 0.11828
| 0
| 0
| 0.06733
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053763
| false
| 0
| 0.043011
| 0
| 0.150538
| 0.005376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53bdcb0790280882aedd07e5cb2cef0159140f96
| 7,236
|
py
|
Python
|
backend/chart/application/service/employees.py
|
toshi-click/chart_app
|
10577d7835554a93688ae0c58ecb25fbe2925bec
|
[
"BSD-3-Clause"
] | null | null | null |
backend/chart/application/service/employees.py
|
toshi-click/chart_app
|
10577d7835554a93688ae0c58ecb25fbe2925bec
|
[
"BSD-3-Clause"
] | 7
|
2020-10-25T05:34:54.000Z
|
2020-12-02T11:31:44.000Z
|
backend/chart/application/service/employees.py
|
toshi-click/chart_app
|
10577d7835554a93688ae0c58ecb25fbe2925bec
|
[
"BSD-3-Clause"
] | 1
|
2021-04-30T16:51:43.000Z
|
2021-04-30T16:51:43.000Z
|
import logging
from django.db import transaction, connection
from django.utils import timezone
from django.utils.timezone import localtime
from chart.application.enums.department_type import DepartmentType
from chart.application.enums.gender_type import GenderType
from chart.application.service.app_logic_base import AppLogicBaseService
from chart.models import Employees, Departments
"""
employeesテーブルを操作するクラスです。
"""
class EmployeesService(AppLogicBaseService):
def __init__(self):
super().__init__()
@staticmethod
@transaction.atomic()
def create_employees():
"""
Employeesを作成する
"""
service = EmployeesService()
for emp_no in range(1, 11):
if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0:
if emp_no <= 5:
department_no = DepartmentType.SALES.value
else:
department_no = DepartmentType.MARKETING.value
select_model = Departments.objects.filter(department_no=department_no).values("id").first()
# データを登録する
service._regist_employees(select_model['id'], emp_no)
@staticmethod
@transaction.atomic()
def create_departments():
"""
Departmentsを作成する
"""
service = EmployeesService()
# データをすべて削除する
# ForeignKeyが指定されているためdeleteコマンドを実行する
Departments.objects.all().delete()
for department_type in DepartmentType:
department_no = department_type.value
if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0:
# データを登録する
service._regist_departments(department_no, department_type.en_name)
@staticmethod
@transaction.atomic()
def update_employees():
"""
Employeesを更新する
"""
service = EmployeesService()
# filterによる絞込を行う
# gt:...より大きい(>),lt:...より小さい(<)になる
for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0):
employees_id = employees_item.id
select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values(
"id").first()
department_id = select_model['id']
department_date_from = 20190903
# データを更新する
service._update_employees_department(employees_id, department_id, department_date_from)
# filterによる絞込を行う
# gte:...以上(>=),lte:...以下(<=)になる
for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0):
employees_id = employees_item.id
select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values("id").first()
department_id = select_model['id']
department_date_from = 20190905
# データを更新する
service._update_employees_department(employees_id, department_id, department_date_from)
@staticmethod
def select_employees():
"""
Employeesを検索する
"""
# テーブル名__項目名で指定するとINNER JOINになる
# Queryは参照先のテーブルを参照する度に発行されます
for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value,
delete_flag=0):
logging.debug("reference:emp_no={}".format(employees_item.emp_no))
logging.debug("reference:department_no={}".format(employees_item.department.department_no))
logging.debug("reference:department_name={}".format(employees_item.department.department_name))
logging.debug("reference:first_name={}".format(employees_item.first_name))
logging.debug("reference:last_name={}".format(employees_item.last_name))
# select_relatedを使用した参照先情報を取得してキャッシュします
# Queryは1回のみ発行されます
for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related("department"):
logging.debug("select_related:emp_no={}".format(employees_item.emp_no))
logging.debug("select_related:first_name={}".format(employees_item.first_name))
logging.debug("select_related:last_name={}".format(employees_item.last_name))
logging.debug("select_related:department_no={}".format(employees_item.department.department_no))
logging.debug("select_related:department_name={}".format(employees_item.department.department_name))
# prefetch_relatedを使用した参照先情報を取得してキャッシュします
# Queryは2回発行されてForeignKeyで結合します
for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related(
"department__employees_set"):
logging.debug("prefetch_related:emp_no={}".format(employees_item.emp_no))
logging.debug("prefetch_related:first_name={}".format(employees_item.first_name))
logging.debug("prefetch_related:last_name={}".format(employees_item.last_name))
logging.debug("prefetch_related:department_no={}".format(employees_item.department.department_no))
logging.debug("prefetch_related:department_name={}".format(employees_item.department.department_name))
@staticmethod
@transaction.atomic()
def truncate_employees():
"""
トランケートを行う
"""
cursor = connection.cursor()
cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table))
def _regist_employees(self, department_id, emp_no):
"""
employeesを登録する
"""
self.regist_model = Employees()
self.regist_model.emp_no = emp_no
self.regist_model.department_id = department_id
self.regist_model.first_name = "first_name_" + str(emp_no).zfill(3)
self.regist_model.last_name = "last_name_" + str(emp_no).zfill(3)
self.regist_model.gender = GenderType.MAN.value
self.regist_model.department_date_from = "20190902"
self.regist_model.delete_flag = 0
self.regist_model.regist_dt = localtime(timezone.now())
self.regist_model.update_dt = localtime(timezone.now())
self.regist_model.save()
return self.regist_model.id
def _regist_departments(self, department_no, department_name):
"""
departmentsを登録する
"""
self.regist_model = Departments()
self.regist_model.department_no = department_no
self.regist_model.department_name = department_name
self.regist_model.delete_flag = 0
self.regist_model.regist_dt = localtime(timezone.now())
self.regist_model.update_dt = localtime(timezone.now())
self.regist_model.save()
def _update_employees_department(self, employees_id, department_id, department_date_from):
"""
配属情報を更新する
"""
self.update_model = Employees()
self.update_model.pk = employees_id
self.update_model.department_id = department_id
self.update_model.department_date_from = department_date_from
self.update_model.update_dt = localtime(timezone.now())
self.update_model.save(update_fields=['department_id', 'department_date_from', 'update_dt'])
| 43.590361
| 116
| 0.674268
| 769
| 7,236
| 6.024707
| 0.161248
| 0.023743
| 0.061515
| 0.044679
| 0.564213
| 0.472696
| 0.45068
| 0.394561
| 0.382258
| 0.320958
| 0
| 0.008906
| 0.224157
| 7,236
| 165
| 117
| 43.854545
| 0.816352
| 0.065644
| 0
| 0.25
| 0
| 0
| 0.08424
| 0.064328
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086538
| false
| 0
| 0.076923
| 0
| 0.182692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53c0dd2b4f081d4c8d070b26922f68bf139eaa76
| 4,138
|
py
|
Python
|
.travis/manage_daily_builds.py
|
loonwerks/AGREE
|
58640ab89aaa3c72ccca0b8c80cf96d1815981da
|
[
"BSD-3-Clause"
] | 5
|
2020-12-28T15:41:04.000Z
|
2021-07-31T09:07:28.000Z
|
.travis/manage_daily_builds.py
|
loonwerks/AGREE
|
58640ab89aaa3c72ccca0b8c80cf96d1815981da
|
[
"BSD-3-Clause"
] | 89
|
2020-01-27T17:16:00.000Z
|
2022-03-31T09:57:25.000Z
|
.travis/manage_daily_builds.py
|
loonwerks/AGREE
|
58640ab89aaa3c72ccca0b8c80cf96d1815981da
|
[
"BSD-3-Clause"
] | 5
|
2020-02-25T00:33:21.000Z
|
2021-01-02T07:23:11.000Z
|
#!/usr/bin/env python3
'''
Copyright (c) 2021, Collins Aerospace.
Developed with the sponsorship of Defense Advanced Research Projects Agency (DARPA).
Permission is hereby granted, free of charge, to any person obtaining a copy of this data,
including any software or models in source or binary form, as well as any drawings, specifications,
and documentation (collectively "the Data"), to deal in the Data without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Data, and to permit persons to whom the Data is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Data.
THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
'''
import os
import re
import sys
from github3 import GitHub
from pprint import pformat
GITHUB_API = 'https://api.github.com/repos'
GITHUB_RELEASES = 'releases'
AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None
REPOSITORY_OWNER = 'loonwerks'
REPOSITORY_REPO = 'AGREE'
PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\d+\.\d+\.\d+(-(\d{12}))?-.*')
def manage_daily_builds(sname):
print('Managing builds matching %s' % (sname))
# obtain git handle
gh = GitHub(GITHUB_API, token=AUTH_TOKEN)
repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO)
# get list of releases
releases = repository.releases()
# extract keys and sort by build date
release_keys = {x.id : x.created_at for x in releases if sname in x.name}
sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x: x[1])
print('%s' % (pformat(sorted_keys)))
# filter to obtain the keys to delete
delete_keys = [v[0] for v in sorted_keys[2:]]
print('Deleting releases: %s' % (pformat(delete_keys)))
# iterate, deleting the releases and corresponding tags
for rel in releases:
print('examining rel %d from %s...' % (rel.id, str(rel.created_at)))
if rel.id in delete_keys and rel.tag_name is not None:
print(' deleting release id %d and tag %s.' % (rel.id, rel.tag_name))
rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name))
rel.delete()
if rel_tag_ref is not None:
print(' deleting tag %s' % (rel_tag_ref.ref))
rel_tag_ref.delete()
else:
# Look for stale files in the release
assets = rel.assets()
print('In release %s found assets:' % (rel.name))
for asset in assets:
match = PRODUCT_ASSET_PATTERN.search(asset.name)
print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None'))
build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)])
latest_build_time = build_times[-1] if build_times else None
print('Lastest build time is %s' % (latest_build_time))
for asset in assets:
match = PRODUCT_ASSET_PATTERN.search(asset.name)
# print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None'))
if match is not None:
asset_build_time = match.group(1)
if asset_build_time != latest_build_time:
print('deleting stale asset %s' % (asset.name))
asset.delete()
if __name__ == '__main__':
manage_daily_builds(sys.argv[1])
| 48.682353
| 137
| 0.678347
| 601
| 4,138
| 4.559068
| 0.357737
| 0.020438
| 0.034672
| 0.036496
| 0.133577
| 0.111679
| 0.089781
| 0.089781
| 0.089781
| 0.089781
| 0
| 0.005322
| 0.22813
| 4,138
| 84
| 138
| 49.261905
| 0.852536
| 0.386177
| 0
| 0.083333
| 0
| 0
| 0.152079
| 0.027327
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.104167
| 0
| 0.125
| 0.229167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53c1b1b92893f74554831ae30476aefdb5464370
| 5,743
|
py
|
Python
|
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
|
KaihuiLiang/ParlAI
|
fb5c92741243756516fa50073d34e94ba0b6981e
|
[
"MIT"
] | null | null | null |
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
|
KaihuiLiang/ParlAI
|
fb5c92741243756516fa50073d34e94ba0b6981e
|
[
"MIT"
] | 1
|
2020-11-12T02:20:02.000Z
|
2020-11-12T02:20:02.000Z
|
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
|
MoPei/ParlAI
|
321bc857f2765cd76d5134531a802442ac4c9f5c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Test components of specific crowdsourcing tasks.
"""
import json
import os
import unittest
import pandas as pd
import parlai.utils.testing as testing_utils
try:
from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import (
TurnAnnotationsStaticResultsCompiler,
)
from parlai.crowdsourcing.utils.tests import check_stdout
class TestAnalysis(unittest.TestCase):
"""
Test the analysis code for the static turn annotations task.
"""
def test_compile_results(self):
"""
Test compiling results on a dummy set of data.
"""
with testing_utils.tempdir() as tmpdir:
# Define expected stdout
# Paths
analysis_samples_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'analysis_samples'
)
analysis_outputs_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'test_turn_annotations_static_analysis',
)
expected_stdout_path = os.path.join(
analysis_outputs_folder, 'test_stdout.txt'
)
temp_gold_annotations_path = os.path.join(
tmpdir, 'gold_annotations.json'
)
# Save a file of gold annotations
gold_annotations = {
"1_0_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": False,
"none_all_good": True,
},
"1_1_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
},
"2_0_5": {
"bucket_0": False,
"bucket_1": True,
"bucket_2": False,
"bucket_3": False,
"bucket_4": False,
"none_all_good": False,
},
"2_1_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
},
}
with open(temp_gold_annotations_path, 'w') as f:
json.dump(gold_annotations, f)
# Run compilation of results
parser = TurnAnnotationsStaticResultsCompiler.setup_args()
parser.set_defaults(
**{
'results_folders': analysis_samples_folder,
'output_folder': tmpdir,
'onboarding_in_flight_data_file': os.path.join(
analysis_samples_folder, 'onboarding_in_flight.jsonl'
),
'gold_annotations_file': temp_gold_annotations_path,
}
)
args = parser.parse_args([])
with testing_utils.capture_output() as output:
compiler = TurnAnnotationsStaticResultsCompiler(vars(args))
compiler.NUM_SUBTASKS = 3
compiler.NUM_ANNOTATIONS = 3
compiler.compile_results()
actual_stdout = output.getvalue()
# Check the output against what it should be
check_stdout(
actual_stdout=actual_stdout,
expected_stdout_path=expected_stdout_path,
)
# Check that the saved results file is what it should be
sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx']
expected_results_path = os.path.join(
analysis_outputs_folder, 'expected_results.csv'
)
expected_results = (
pd.read_csv(expected_results_path)
.drop('folder', axis=1)
.sort_values(sort_columns)
.reset_index(drop=True)
)
# Drop the 'folder' column, which contains a system-dependent path string
actual_results_rel_path = [
obj for obj in os.listdir(tmpdir) if obj.startswith('results')
][0]
actual_results_path = os.path.join(tmpdir, actual_results_rel_path)
actual_results = (
pd.read_csv(actual_results_path)
.drop('folder', axis=1)
.sort_values(sort_columns)
.reset_index(drop=True)
)
if not actual_results.equals(expected_results):
raise ValueError(
f'\n\n\tExpected results:\n{expected_results.to_csv()}'
f'\n\n\tActual results:\n{actual_results.to_csv()}'
)
except ImportError:
pass
if __name__ == "__main__":
unittest.main()
| 37.292208
| 93
| 0.482675
| 520
| 5,743
| 5.034615
| 0.315385
| 0.063025
| 0.026738
| 0.02139
| 0.247517
| 0.224217
| 0.224217
| 0.189076
| 0.189076
| 0.189076
| 0
| 0.01182
| 0.440188
| 5,743
| 153
| 94
| 37.535948
| 0.802488
| 0.105346
| 0
| 0.247788
| 0
| 0
| 0.120805
| 0.040861
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00885
| false
| 0.00885
| 0.070796
| 0
| 0.088496
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53c38f978d506f03ad72b1b6b50a34e76cbf6a7b
| 3,937
|
py
|
Python
|
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | 1
|
2020-10-14T00:06:54.000Z
|
2020-10-14T00:06:54.000Z
|
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | null | null | null |
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py
|
mith1979/ansible_automation
|
013dfa67c6d91720b787fadb21de574b6e023a26
|
[
"Apache-2.0"
] | 2
|
2015-08-06T07:45:48.000Z
|
2017-01-04T17:47:16.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rabbitmq_plugin
short_description: Adds or removes plugins to RabbitMQ
description:
- Enables or disables RabbitMQ plugins
version_added: "1.1"
author: Chris Hoffman
options:
names:
description:
- Comma-separated list of plugin names
required: true
default: null
aliases: [name]
new_only:
description:
- Only enable missing plugins
- Does not disable plugins that are not in the names list
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- Specify if plugins are to be enabled or disabled
required: false
default: enabled
choices: [enabled, disabled]
prefix:
description:
- Specify a custom install prefix to a Rabbit
required: false
version_added: "1.3"
default: null
'''
EXAMPLES = '''
# Enables the rabbitmq_management plugin
- rabbitmq_plugin: names=rabbitmq_management state=enabled
'''
class RabbitMqPlugins(object):
def __init__(self, module):
self.module = module
if module.params['prefix']:
self._rabbitmq_plugins = module.params['prefix'] + "/sbin/rabbitmq-plugins"
else:
self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = [self._rabbitmq_plugins]
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get_all(self):
return self._exec(['list', '-E', '-m'], True)
def enable(self, name):
self._exec(['enable', name])
def disable(self, name):
self._exec(['disable', name])
def main():
arg_spec = dict(
names=dict(required=True, aliases=['name']),
new_only=dict(default='no', type='bool'),
state=dict(default='enabled', choices=['enabled', 'disabled']),
prefix=dict(required=False, default=None)
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
names = module.params['names'].split(',')
new_only = module.params['new_only']
state = module.params['state']
rabbitmq_plugins = RabbitMqPlugins(module)
enabled_plugins = rabbitmq_plugins.get_all()
enabled = []
disabled = []
if state == 'enabled':
if not new_only:
for plugin in enabled_plugins:
if plugin not in names:
rabbitmq_plugins.disable(plugin)
disabled.append(plugin)
for name in names:
if name not in enabled_plugins:
rabbitmq_plugins.enable(name)
enabled.append(name)
else:
for plugin in enabled_plugins:
if plugin in names:
rabbitmq_plugins.disable(plugin)
disabled.append(plugin)
changed = len(enabled) > 0 or len(disabled) > 0
module.exit_json(changed=changed, enabled=enabled, disabled=disabled)
# import module snippets
from ansible.module_utils.basic import *
main()
| 30.053435
| 88
| 0.654559
| 494
| 3,937
| 5.103239
| 0.3583
| 0.06545
| 0.01547
| 0.02261
| 0.13566
| 0.125347
| 0.069814
| 0.043633
| 0.043633
| 0
| 0
| 0.00404
| 0.245618
| 3,937
| 130
| 89
| 30.284615
| 0.844781
| 0.18796
| 0
| 0.210526
| 0
| 0
| 0.328194
| 0.014789
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063158
| false
| 0
| 0.010526
| 0.010526
| 0.115789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53c47f75ab180de02752f1ea49f9b87157a860e1
| 2,406
|
py
|
Python
|
napari/layers/shapes/mesh.py
|
marshuang80/napari
|
10f1d0f39fe9ccd42456c95458e2f23b59450f02
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/shapes/mesh.py
|
marshuang80/napari
|
10f1d0f39fe9ccd42456c95458e2f23b59450f02
|
[
"BSD-3-Clause"
] | null | null | null |
napari/layers/shapes/mesh.py
|
marshuang80/napari
|
10f1d0f39fe9ccd42456c95458e2f23b59450f02
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
class Mesh:
"""Contains meshses of shapes that will ultimately get rendered.
Attributes
----------
vertices : np.ndarray
Qx2 array of vertices of all triangles for shapes including edges and
faces
vertices_centers : np.ndarray
Qx2 array of centers of vertices of triangles for shapes. For vertices
corresponding to faces these are the same as the actual vertices. For
vertices corresponding to edges these values should be added to a
scaled `vertices_offsets` to get the actual vertex positions.
The scaling corresponds to the width of the edge
vertices_offsets : np.ndarray
Qx2 array of offsets of vertices of triangles for shapes. For vertices
corresponding to faces these are 0. For vertices corresponding to
edges these values should be scaled and added to the
`vertices_centers` to get the actual vertex positions.
The scaling corresponds to the width of the edge
vertices_index : np.ndarray
Qx2 array of the index (0, ..., N-1) of each shape that each vertex
corresponds and the mesh type (0, 1) for face or edge.
triangles : np.ndarray
Px3 array of vertex indices that form the mesh triangles
triangles_index : np.ndarray
Px2 array of the index (0, ..., N-1) of each shape that each triangle
corresponds and the mesh type (0, 1) for face or edge.
triangles_colors : np.ndarray
Px4 array of the rgba color of each triangle
triangles_z_order : np.ndarray
Length P array of the z order of each triangle. Must be a permutation
of (0, ..., P-1)
Extended Summary
----------
_types : list
Length two list of the different mesh types corresponding to faces and
edges
"""
_types = ['face', 'edge']
def __init__(self):
self.clear()
def clear(self):
"""Resets mesh data
"""
self.vertices = np.empty((0, 2))
self.vertices_centers = np.empty((0, 2))
self.vertices_offsets = np.empty((0, 2))
self.vertices_index = np.empty((0, 2), dtype=int)
self.triangles = np.empty((0, 3), dtype=np.uint32)
self.triangles_index = np.empty((0, 2), dtype=int)
self.triangles_colors = np.empty((0, 4))
self.triangles_z_order = np.empty((0), dtype=int)
| 38.806452
| 79
| 0.646301
| 343
| 2,406
| 4.469388
| 0.265306
| 0.046967
| 0.041748
| 0.029354
| 0.499674
| 0.454664
| 0.413568
| 0.413568
| 0.413568
| 0.302674
| 0
| 0.020138
| 0.277639
| 2,406
| 61
| 80
| 39.442623
| 0.86191
| 0.694929
| 0
| 0
| 0
| 0
| 0.01406
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53c5eb302f7f03de564020dfecea1ce909aa994c
| 12,916
|
py
|
Python
|
configs/docker-ubuntu-img/para.py
|
MarioCarrilloA/stx-packaging
|
56cf32c4d65ba20f9317102d922ce946a800527d
|
[
"Apache-2.0"
] | 1
|
2019-06-02T00:28:03.000Z
|
2019-06-02T00:28:03.000Z
|
configs/docker-ubuntu-img/para.py
|
MarioCarrilloA/stx-packaging
|
56cf32c4d65ba20f9317102d922ce946a800527d
|
[
"Apache-2.0"
] | 11
|
2019-04-05T16:04:54.000Z
|
2019-08-23T19:24:49.000Z
|
configs/docker-ubuntu-img/para.py
|
MarioCarrilloA/stx-packaging
|
56cf32c4d65ba20f9317102d922ce946a800527d
|
[
"Apache-2.0"
] | 5
|
2019-02-18T23:11:30.000Z
|
2019-04-29T07:42:31.000Z
|
#!/usr/bin/python3
# vim:se tw=0 sts=4 ts=4 et ai:
"""
Copyright © 2014 Osamu Aoki
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import os
import pwd
import sys
import time
import debmake.read
###########################################################################
# undefined environment variable -> ''
def env(var):
try:
return os.environ[var]
except KeyError:
return ''
#######################################################################
# Initialize parameters
#######################################################################
def para(para):
debmail = env('DEBEMAIL')
if not debmail:
#debmail = os.getlogin() + '@localhost'
debemail = pwd.getpwuid(os.getuid())[0] + '@localhost'
debfullname = env('DEBFULLNAME')
if not debfullname:
# os.getlogin may not work well: #769392
#debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0]
debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0]
#######################################################################
# command line setting
#######################################################################
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description = '''\
{0}: make Debian source package Version: {1}
{2}
{0} helps to build the Debian package from the upstream source.
Normally, this is done as follows:
* The upstream tarball is downloaded as the package-version.tar.gz file.
* It is untared to create many files under the package-version/ directory.
* {0} is invoked in the package-version/ directory possibly without any arguments.
* Files in the package-version/debian/ directory are manually adjusted.
* dpkg-buildpackage (usually from its wrapper debuild or pdebuild) is invoked in the package-version/ directory to make debian packages.
Argument may need to be quoted to protect from the shell.
'''.format(
para['program_name'],
para['program_version'],
para['program_copyright']),
epilog='See debmake(1) manpage for more.')
ck = p.add_mutually_exclusive_group()
ck.add_argument(
'-c',
'--copyright',
action = 'count',
default = 0,
help = 'scan source for copyright+license text and exit')
ck.add_argument(
'-k',
'--kludge',
action = 'count',
default = 0,
help = 'compare debian/copyright with the source and exit')
sp = p.add_mutually_exclusive_group()
sp.add_argument(
'-n',
'--native',
action = 'store_true',
default = False,
help = 'make a native source package without .orig.tar.gz')
sp.add_argument(
'-a',
'--archive',
type = str,
action = 'store',
default = '',
help = 'use the upstream source tarball directly (-p, -u, -z: overridden)',
metavar = 'package-version.tar.gz')
sp.add_argument(
'-d',
'--dist',
action = 'store_true',
default = False,
help = 'run "make dist" equivalent first to generate upstream tarball and use it')
sp.add_argument(
'-t',
'--tar',
action = 'store_true',
default = False,
help = 'run "tar" to generate upstream tarball and use it')
p.add_argument(
'-p',
'--package',
action = 'store',
default = '',
help = 'set the Debian package name',
metavar = 'package')
p.add_argument(
'-u',
'--upstreamversion',
action = 'store',
default = '',
help = 'set the upstream package version',
metavar = 'version')
p.add_argument(
'-r',
'--revision',
action = 'store',
default = '',
help = 'set the Debian package revision',
metavar = 'revision')
p.add_argument(
'-z',
'--targz',
action = 'store',
default = '',
help = 'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)',
metavar = 'extension')
p.add_argument(
'-b',
'--binaryspec',
action = 'store',
default = '',
help = 'set binary package specs as comma separated list of "binarypackage":"type" pairs, e.g., in full form "foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev" or in short form ",-doc,libfoo1,libfoo1-dbg, libfoo-dev". Here, "binarypackage" is the binary package name; and optional "type" is chosen from "bin", "data", "dbg", "dev", "doc", "lib", "perl", "python", "python3", "ruby", and "script". If "type" is not specified but obvious, it is set by "binarypackage". Otherwise it is set to "bin" for the compiled ELF binary.',
metavar = 'binarypackage[:type]')
p.add_argument(
'-e',
'--email',
action = 'store',
default = debmail,
help = 'set e-mail address',
metavar = 'foo@example.org')
p.add_argument(
'-f',
'--fullname',
action = 'store',
default = debfullname,
help = 'set the fullname',
metavar = '"firstname lastname"')
# p.add_argument(
# '-g',
# '--gui',
# action = 'store_true',
# default = False,
# help = 'run GUI configuration')
#
# -h : used by argparse for --help
ep = p.add_mutually_exclusive_group()
ep.add_argument(
'-i',
'--invoke',
default = '',
action = 'store',
help = 'invoke package build tool',
metavar = '[debuild|pdebuild|...]')
ep.add_argument(
'-j',
'--judge',
action = 'store_true',
default = False,
help = 'run "dpkg-depcheck" to judge build dependencies and identify file paths')
p.add_argument(
'-l',
'--license',
default = '',
action = 'store',
help = 'add formatted license to debian/copyright',
metavar = '"license_file"')
p.add_argument(
'-m',
'--monoarch',
action = 'store_true',
default = False,
help = 'force packages to be non-multiarch')
p.add_argument(
'-o',
'--option',
default = '',
action = 'store',
help = 'read optional parameters from "file"',
metavar = '"file"')
p.add_argument(
'-q',
'--quitearly',
action = 'store_true',
default = False,
help='quit early before creating files in the debian directory')
p.add_argument(
'-s',
'--spec',
action = 'store_true',
default = False,
help = 'use upstream spec')
p.add_argument(
'-v',
'--version',
action = 'store_true',
default = False,
help = 'show version information')
p.add_argument(
'-w',
'--with',
action = 'store',
default = '',
dest = 'withargs',
help = 'set additional "dh --with" option arguments',
metavar = 'args')
p.add_argument(
'-x',
'--extra',
default = '',
action = 'store',
help = 'generate extra configuration files as templates',
metavar = '[01234]')
p.add_argument(
'-y',
'--yes',
action = 'count',
default = 0,
help = '"force yes" for all prompts')
p.add_argument(
'-L',
'--local',
action = 'store_true',
default = False,
help='generate configuration files for the local package')
p.add_argument(
'-P',
'--pedantic',
action = 'store_true',
default = False,
help='pedantically check auto-generated files')
p.add_argument(
'-T',
'--tutorial',
action = 'store_true',
default = False,
help='output tutorial comment lines in template files')
args = p.parse_args()
#######################################################################
# Set parameter values
#######################################################################
############################################# -a
if args.archive:
para['archive'] = True
para['tarball'] = args.archive
else:
para['archive'] = False
para['tarball'] = ''
#############################################
para['binaryspec'] = args.binaryspec # -b
para['copyright'] = min(args.copyright, 6) # -c
if para['copyright'] >=4:
para['copyright'] = 3 - para['copyright']
# 0: debian/copyright, +/-1: simple, +/-2: standard +/-3: extensive
para['dist'] = args.dist # -d
para['email'] = args.email # -e
para['fullname'] = args.fullname # -f
# para['gui'] = args.gui # -g
para['invoke'] = args.invoke # -i
para['judge'] = args.judge # -j
if para['judge']:
para['override'].update({'judge'})
para['kludge'] = args.kludge # -k
############################################# -l
# --license: args.license -> para['license'] as set
if args.license == '':
para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*',
'[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default
else:
para['license'] = set(args.copyright.split(','))
#############################################
para['monoarch'] = args.monoarch # -m
para['native'] = args.native # -n
para['package'] = args.package.lower() # -p
#############################################
para['quitearly'] = args.quitearly # -q
para['revision'] = args.revision # -r
para['spec'] = args.spec # -s
para['tar'] = args.tar # -t
para['version'] = args.upstreamversion # -u
para['print_version'] = args.version # -v
############################################# -w
# --with: args.withargs -> para['dh_with'] as set
if args.withargs == '':
para['dh_with'] = set() # default is empty set
else:
para['dh_with'] = set(args.withargs.split(','))
#############################################
para['extra'] = args.extra # -x
para['yes'] = min(args.yes, 2) # -y
# 0: ask, 1: yes, 2: no
para['targz'] = args.targz # -z
para['local'] = args.local # -L
para['pedantic'] = args.pedantic # -P
para['tutorial'] = args.tutorial # -T
############################################# -o
if args.option:
exec(debmake.read.read(args.option))
#######################################################################
# return command line parameters
#######################################################################
return para
#######################################################################
# Test code
#######################################################################
if __name__ == '__main__':
for p, v in para().items():
print("para['{}'] = \"{}\"".format(p,v))
| 38.440476
| 554
| 0.477083
| 1,273
| 12,916
| 4.788688
| 0.298507
| 0.050525
| 0.03937
| 0.043307
| 0.149934
| 0.108596
| 0.058727
| 0.013451
| 0
| 0
| 0
| 0.005667
| 0.316894
| 12,916
| 335
| 555
| 38.555224
| 0.685141
| 0.148421
| 0
| 0.309434
| 0
| 0.007547
| 0.341297
| 0.029913
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007547
| false
| 0
| 0.022642
| 0
| 0.041509
| 0.007547
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53c6b101ead41851286a75be3bcca965a4128b2f
| 6,164
|
py
|
Python
|
build/lib/jet_django/views/model.py
|
lukejamison/jet-dasboard
|
5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9
|
[
"MIT"
] | 193
|
2018-08-27T06:10:48.000Z
|
2022-03-08T13:04:55.000Z
|
build/lib/jet_django/views/model.py
|
lukejamison/jet-dasboard
|
5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9
|
[
"MIT"
] | 23
|
2018-10-21T15:05:41.000Z
|
2020-12-20T15:18:58.000Z
|
build/lib/jet_django/views/model.py
|
lukejamison/jet-dasboard
|
5dce66b6ea2f107d7120e5e0256346d2d3bc8ed9
|
[
"MIT"
] | 38
|
2018-10-31T16:19:25.000Z
|
2022-02-10T05:08:24.000Z
|
from django.core.exceptions import NON_FIELD_ERRORS
from rest_framework import status, viewsets, serializers
from rest_framework.decorators import list_route
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from jet_django.filters.model_aggregate import AggregateFilter
from jet_django.filters.model_group import GroupFilter
from jet_django.pagination import CustomPageNumberPagination
from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo
from jet_django.serializers.reorder import reorder_serializer_factory
class AggregateSerializer(serializers.Serializer):
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
class GroupSerializer(serializers.Serializer):
group = serializers.CharField()
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'group_serializer' in kwargs:
self.fields['group'] = kwargs.pop('group_serializer')
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field):
ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field)
class Viewset(viewsets.ModelViewSet):
model = build_model
queryset = build_queryset
pagination_class = CustomPageNumberPagination
filter_class = build_filter_class
authentication_classes = ()
permission_classes = (HasProjectPermissions, ModifyNotInDemo)
def get_serializer_class(self):
if self.action == 'aggregate':
return AggregateSerializer
elif self.action == 'group':
return GroupSerializer
elif self.action == 'retrieve':
return build_detail_serializer_class
else:
return build_serializer_class
@list_route(methods=['get'])
def aggregate(self, request):
queryset = self.filter_queryset(self.get_queryset())
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
y_field = self.model._meta.get_field(y_column)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = AggregateFilter().filter(queryset, {
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
y_func_serializer=y_serializer
)
return Response(serializer.data)
@list_route(methods=['get'])
def group(self, request):
queryset = self.filter_queryset(self.get_queryset())
x_column = request.GET['_x_column']
x_lookup_name = request.GET.get('_x_lookup')
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
x_field = self.model._meta.get_field(x_column)
x_lookup = x_field.class_lookups.get(x_lookup_name)
y_field = self.model._meta.get_field(y_column)
if x_lookup:
x_field = x_lookup('none').output_field
x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field)
x_serializer = x_serializer_class(**x_serializer_kwargs)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = GroupFilter().filter(queryset, {
'x_column': x_column,
'x_lookup': x_lookup,
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
many=True,
group_serializer=x_serializer,
y_func_serializer=y_serializer
)
return Response(serializer.data)
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
@list_route(methods=['post'])
def reorder(self, request):
serializer = ReorderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
@list_route(methods=['post'])
def reset_order(self, request):
i = 1
for instance in build_queryset:
setattr(instance, ordering_field, i)
instance.save()
i += 1
return Response({})
for action in build_actions:
def route(self, request):
form = action(data=request.data)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
queryset = form.filer_queryset(self.get_queryset())
try:
result = form.save(queryset)
except Exception as e:
return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST)
return Response({'action': form._meta.name, 'result': result})
decorator = list_route(methods=['post'])
route = decorator(route)
setattr(Viewset, action._meta.name, route)
return Viewset
| 36.91018
| 161
| 0.638384
| 663
| 6,164
| 5.612368
| 0.182504
| 0.024187
| 0.024187
| 0.018275
| 0.387799
| 0.343187
| 0.294007
| 0.277882
| 0.277882
| 0.202634
| 0
| 0.001787
| 0.273524
| 6,164
| 166
| 162
| 37.13253
| 0.829165
| 0.019143
| 0
| 0.336066
| 0
| 0
| 0.046318
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.081967
| 0
| 0.368852
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53c796e3204469330950f66fd76505dd80903be6
| 8,086
|
py
|
Python
|
davenetgame/dispatch/dispatcher.py
|
davefancella/davenetgame
|
f16c36539a3898ab4a021e63feef7fe497e5bc69
|
[
"Apache-2.0"
] | null | null | null |
davenetgame/dispatch/dispatcher.py
|
davefancella/davenetgame
|
f16c36539a3898ab4a021e63feef7fe497e5bc69
|
[
"Apache-2.0"
] | null | null | null |
davenetgame/dispatch/dispatcher.py
|
davefancella/davenetgame
|
f16c36539a3898ab4a021e63feef7fe497e5bc69
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
'''
Copyright 2016 Dave Fancella
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import threading, time
from davenetgame.dispatch.base import DispatcherBase
from davenetgame.protocol import connection
## @file dispatcher
#
# This file contains the standard, generic EventDispatcher class. It's the one you use if
# the library doesn't support your preferred game engine, or if you'd rather manage the library
# independently of your game engine.
## This is the standard EventDispatcher.
class EventDispatcher(DispatcherBase):
pass
## This is a special server-oriented EventDispatcher that provides for an interactive console
# on the server when run in a terminal. This is probably most useful for testing the library,
# though it's not unheard of for a server to run in a terminal and have a console.
class EventDispatcherServer(DispatcherBase):
__console = None
__consolecommands = None
def __init__(self, **args):
super().__init__(**args)
self.__console = ConsoleInput()
self.__consolecommands = []
# Register the standard commands available to every game server.
self.RegisterCommand('show', self.consoleShow, "show (connections)", "Show whatever you want to see.")
self.RegisterCommand('help', self.consoleHelp, "help [command]", "print this helpful text. Alternately, type in a command to see its helpful text.")
self.RegisterCommand('quit', self.consoleQuit, "quit", "Quit the server.")
def Start(self):
self.__console.Start()
super().Start()
def Update(self, timestep):
try:
while self.__console.HasPending():
msg = self.__console.pop()
args = msg.split(" ")
command = args.pop(0)
command = command.lower()
# Ignore simple presses of enter
if command == '':
continue
foundcommand = False
for a in self.__consolecommands:
if a.command() == command:
a.callback(*args)
foundcommand = True
if not foundcommand:
print("Command not recognized: " + command)
except:
pass
super().Update(timestep)
## @name Console API
#
# These methods give access to the built-in server console and the various commands that
# can be created.
#@{
## Console command: show
def consoleShow(self, *args):
if len(args) != 1:
print("Usage: show (connections)")
else:
if args[0] == "connections":
if len(self.GetConnections() ) == 0:
print("There are no connections at this time.")
else:
for a in self.GetConnections():
print("{0:3}: {1:40} {2:10} {3:4}".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) ) )
else:
print("Unknown thing to show: " + args[0])
## Console command: help
def consoleHelp(self, *args):
if len(args) > 0:
for a in self.__consolecommands:
if a.command() == args[0]:
print("%10s : %s" % (args[0], a.helplong() ))
print("%13s %s" % (" ", a.helpshort() ))
print
else:
print("Command not found.")
else:
for a in self.__consolecommands:
print("%10s : %s" % (a.command(), a.helplong() ))
print("%13s %s" % (" ", a.helpshort() ))
print()
## Console command: quit
def consoleQuit(self, *args):
print("Quit signaled from console.")
self.Stop()
self.__console.Stop()
## Call to register console commands with the server. The library implements a number of standard
# commands, but games may need their own commands. In that case, you will need your own callbacks.
def RegisterCommand(self, command, callback, helpshort, helplong):
self.__consolecommands.append(ConsoleCommand(
command = command,
callback = callback,
helpshort = helpshort,
helplong = helplong
)
)
#@}
## This class implements console commands. To create a new console command, simply make an instance of
# this class, giving all the keyword arguments in the constructor.
# @param 'command' : the name of the command, what the user types to use it.
# @param 'callback' : a function that will process the command when the user types it.
# @param 'helpshort' : short help text, usually one line of text, preferably not more than 50 characters.
# In output, it will be prepended with "Usage: "
# @param 'helplong' : long help text, can be as long as needed, as many lines as needed. Do not put
# line endings, however. Those will be added as needed. You may put line endings to
# signify paragraph breaks, if need be.
class ConsoleCommand(object):
__command = None
__callback = None
__helpshort = None
__helplong = None
def __init__(self, **args):
# Ensure the command is always lowercase
self.__command = args['command'].strip().lower()
self.__callback = args['callback']
self.__helpshort = args['helpshort']
self.__helplong = args['helplong']
def callback(self, *args):
self.__callback(*args)
def command(self):
return self.__command
def helpshort(self):
return self.__helpshort
def helplong(self):
return self.__helplong
## This class makes the console input non-blocking.
class ConsoleInput(threading.Thread):
## This is the lock that must be called to avoid thread collisions
__lock = None
## This is a queue of commands, unparsed.
__pcommands = None
def __init__(self, **args):
threading.Thread.__init__(self, **args)
self.__lock = threading.RLock()
self.__pcommands = []
## Call to start the client.
def Start(self):
self.__continue = True
self.start()
## Stops the server. It may still take a few seconds or so. If blocking is "True", then the call will
# block until the server has shut down.
def Stop(self, blocking=False):
self.__continue = False
if blocking:
self.join()
## Returns true if there are pending lines from stdin to work with
def HasPending(self):
if len(self.__pcommands) > 0:
return True
return False
## Starts the console input. Don't call this directly, instead call Start().
def run(self):
while self.__continue:
msg = input(': ')
self.__lock.acquire()
self.__pcommands.append(msg.strip() )
self.__lock.release()
time.sleep(0.01)
## Pops the first item off the commands list and returns it.
def pop(self):
theCommand = None
if len(self.__pcommands) > 0:
self.__lock.acquire()
theCommand = self.__pcommands.pop(0)
self.__lock.release()
return theCommand
| 34.703863
| 157
| 0.589661
| 937
| 8,086
| 4.983991
| 0.33191
| 0.013705
| 0.010278
| 0.008565
| 0.065953
| 0.029122
| 0.029122
| 0.029122
| 0
| 0
| 0
| 0.008734
| 0.320307
| 8,086
| 232
| 158
| 34.853448
| 0.840975
| 0.36186
| 0
| 0.184
| 0
| 0
| 0.086183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144
| false
| 0.016
| 0.024
| 0.024
| 0.312
| 0.112
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53c8f59b4f5c675f0331d7886d8de3f13a17f272
| 322
|
py
|
Python
|
03_Estrutura_de_Repeticao/13_potenciacao.py
|
gabrieldcpadilha/ListaDeExercicios-PythonBrasil
|
a92d477468bde5eac8987a26ea79af2ffeb6ad81
|
[
"MIT"
] | null | null | null |
03_Estrutura_de_Repeticao/13_potenciacao.py
|
gabrieldcpadilha/ListaDeExercicios-PythonBrasil
|
a92d477468bde5eac8987a26ea79af2ffeb6ad81
|
[
"MIT"
] | 10
|
2020-08-19T04:31:52.000Z
|
2020-09-21T22:48:29.000Z
|
03_Estrutura_de_Repeticao/13_potenciacao.py
|
gabrieldcpadilha/ListaDeExercicios-PythonBrasil
|
a92d477468bde5eac8987a26ea79af2ffeb6ad81
|
[
"MIT"
] | null | null | null |
base = int(input('Digite o valor da base: '))
expoente = 0
while expoente <= 0:
expoente = int(input('Digite o valor do expoente: '))
if expoente <= 0:
print('O expoente tem que ser positivo')
potencia = 1
for c in range(1, expoente + 1):
potencia *= base
print(f'{base}^ {expoente} = {potencia}')
| 21.466667
| 57
| 0.624224
| 47
| 322
| 4.276596
| 0.510638
| 0.134328
| 0.139303
| 0.149254
| 0.199005
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024194
| 0.229814
| 322
| 14
| 58
| 23
| 0.78629
| 0
| 0
| 0
| 0
| 0
| 0.354037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53cb133ef9cebb74671b9c48466b895d83fd6371
| 1,313
|
py
|
Python
|
accounting/accounting/doctype/journal_entry/journal_entry.py
|
noahjacob/Accounting
|
6be90c4f82867156532ca71b1faa9d017e3269af
|
[
"MIT"
] | 1
|
2021-04-05T06:22:16.000Z
|
2021-04-05T06:22:16.000Z
|
accounting/accounting/doctype/journal_entry/journal_entry.py
|
mohsinalimat/Accounting
|
6be90c4f82867156532ca71b1faa9d017e3269af
|
[
"MIT"
] | null | null | null |
accounting/accounting/doctype/journal_entry/journal_entry.py
|
mohsinalimat/Accounting
|
6be90c4f82867156532ca71b1faa9d017e3269af
|
[
"MIT"
] | 2
|
2021-04-05T06:22:17.000Z
|
2021-04-10T06:05:36.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Noah Jacob and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import flt
from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry
class JournalEntry(Document):
def validate(self):
calc_total_debit_credit(self)
if self.difference:
frappe.throw("The total debit and credit must be equal. The current difference is {}".format(self.difference))
if self.total_credit == 0 or self.total_debit == 0 :
frappe.throw('Total Cannot be Zero')
if not self.accounts:
frappe.throw('Account Entries are required')
else:
self.title = self.accounts[0].account
def on_submit(self):
for entry in self.accounts:
make_gl_entry(self,entry.account,entry.debit,entry.credit)
def on_cancel(self):
# cancel gl entry
make_reverse_gl_entry(self,self.doctype,self.name)
def calc_total_debit_credit(self):
self.total_debit, self.total_credit,self.difference = 0,0,0
for entry in self.accounts:
self.total_debit = flt(self.total_debit) +flt(entry.debit)
self.total_credit = flt(self.total_credit) + flt(entry.credit)
self.difference = flt(self.total_debit) - (self.total_credit)
| 29.840909
| 113
| 0.760853
| 198
| 1,313
| 4.873737
| 0.353535
| 0.093264
| 0.07772
| 0.062176
| 0.207254
| 0.111917
| 0
| 0
| 0
| 0
| 0
| 0.009717
| 0.137852
| 1,313
| 44
| 114
| 29.840909
| 0.842756
| 0.101295
| 0
| 0.074074
| 0
| 0
| 0.10034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.185185
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53cd4bfd1a117d3dcaa2d01161d38a59434bcf2f
| 5,608
|
py
|
Python
|
sources/datasets/client_dataset_definitions/client_dataset.py
|
M4rukku/impact_of_non_iid_data_in_federated_learning
|
c818db03699c82e42217d56f8ddd4cc2081c8bb1
|
[
"MIT"
] | null | null | null |
sources/datasets/client_dataset_definitions/client_dataset.py
|
M4rukku/impact_of_non_iid_data_in_federated_learning
|
c818db03699c82e42217d56f8ddd4cc2081c8bb1
|
[
"MIT"
] | null | null | null |
sources/datasets/client_dataset_definitions/client_dataset.py
|
M4rukku/impact_of_non_iid_data_in_federated_learning
|
c818db03699c82e42217d56f8ddd4cc2081c8bb1
|
[
"MIT"
] | null | null | null |
import functools
import gc
from abc import ABC
from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents
from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor
from sources.utils.exception_definitions import OutsideOfContextError
def throw_error_outside_context(func):
@functools.wraps(func)
def wrapper_decorator(self, *args, **kwargs):
if not self.within_context:
raise OutsideOfContextError(
"""Error: Tried to access client Dataset outside of context
manager. This might lead to data leaks and bad use of
memory. Please wrap the usage of ClientDataset.dataset_x
inside a "with statement". """)
else:
value = func(self, *args, **kwargs)
return value
return wrapper_decorator
class ClientDataset(ABC):
def __init__(self,
client_identifier: str,
client_dataset_loader: ClientDatasetLoader,
client_dataset_processor: ClientDatasetProcessor,
):
self.client_identifier = client_identifier
self.client_dataset_loader = client_dataset_loader
self.client_dataset_processor = client_dataset_processor
self._train_data = None
self._test_data = None
self._validation_data = None
self.within_context = False
def process_x(self, raw_x_batch):
"""Pre-processes each batch of features
before being fed to the model."""
return self.client_dataset_processor.process_x(raw_x_batch)
def process_y(self, raw_y_batch):
"""Pre-processes each batch of labels before being fed to the model."""
return self.client_dataset_processor.process_y(raw_y_batch)
def _lazy_initialise_data(self, data, dataset_component: DatasetComponents):
if data is None:
data = self.client_dataset_loader.load_dataset(self.client_identifier,
dataset_component)
return self.process_x(data["x"]), self.process_y(data["y"])
else:
return data
@property
@throw_error_outside_context
def training_data(self):
"""Returns the Training Data as pair of arrays containing the samples x,
and classification y"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data
@property
@throw_error_outside_context
def training_data_x(self):
"""Returns the Training Data as an array of samples"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data[0]
@property
@throw_error_outside_context
def training_data_y(self):
"""Returns the Classifications for the Training Data as array"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data[1]
@property
@throw_error_outside_context
def test_data(self):
"""Returns the Training Data as pair of arrays containing the samples x,
and classification y"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data
@property
@throw_error_outside_context
def test_data_x(self):
"""Returns the Test Data as an array of samples"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data[0]
@property
@throw_error_outside_context
def test_data_y(self):
"""Returns the Classifications for the Test Data as array"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data[1]
@property
@throw_error_outside_context
def validation_data(self):
"""Returns the Validation Data as pair of arrays containing the
samples x,
and classification y"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data
@property
@throw_error_outside_context
def validation_data_x(self):
"""Returns the Validation Data as an array of samples"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data[0]
@property
@throw_error_outside_context
def validation_data_y(self):
"""Returns the Classifications for the Validation Data as array"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data[1]
def __enter__(self):
self.within_context = True
def __exit__(self, exc_type, exc_value, exc_traceback):
self.within_context = False
self._train_data = None
self._test_data = None
self._validation_data = None
gc.collect()
| 38.675862
| 139
| 0.652461
| 629
| 5,608
| 5.475358
| 0.17806
| 0.053426
| 0.041521
| 0.069686
| 0.625726
| 0.62079
| 0.584204
| 0.565041
| 0.407085
| 0.379791
| 0
| 0.001494
| 0.283702
| 5,608
| 144
| 140
| 38.944444
| 0.855863
| 0.129993
| 0
| 0.45098
| 0
| 0
| 0.000442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.058824
| 0
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53ce7501b9e972d2df63aa7b92834c10ac73f623
| 2,377
|
py
|
Python
|
src/rmt/kinematics.py
|
mfrigerio17/robot-model-tools
|
97e25d5c4d1386c503d37a70b57400022c5b7ca0
|
[
"BSD-3-Clause"
] | 2
|
2020-06-16T09:23:46.000Z
|
2021-01-20T09:11:43.000Z
|
src/rmt/kinematics.py
|
mfrigerio17/robot-model-tools
|
97e25d5c4d1386c503d37a70b57400022c5b7ca0
|
[
"BSD-3-Clause"
] | null | null | null |
src/rmt/kinematics.py
|
mfrigerio17/robot-model-tools
|
97e25d5c4d1386c503d37a70b57400022c5b7ca0
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import numpy
import kgprim.motions as motions
import kgprim.ct.frommotions as frommotions
import kgprim.ct.repr.mxrepr as mxrepr
import motiondsl.motiondsl as motdsl
logger = logging.getLogger(__name__)
class RobotKinematics:
'''The composition of the constant poses and the joint poses of a robot.
This class is a simple aggregation of the geometry model and the joint-poses
model. By merging the two, this class have access to the full robot
kinematics.
Thanks to gr.motions.ConnectedFramesInspector, an arbitrary relative pose
between two frames on the robot can be obtained.
'''
def __init__(self, geometry, jointPoses):
self.robotGeometry = geometry
self.jointPoses = jointPoses
self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ]
allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel )
self.framesConnectivity = motions.ConnectedFramesInspector(allPoses)
def base_H_ee(kinematics, framename):
if framename not in kinematics.robotGeometry.framesModel.framesByName:
logger.error("Could not find frame '{0}' in model '{1}'".format(framename, kinematics.robotGeometry.robotName))
return None
ee = kinematics.robotGeometry.framesModel.framesByName[ framename ]
if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame):
logger.error("Frame '{0}' and the base frame do not seem to be connected".format(framename))
return None
poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame)
cotr = frommotions.toCoordinateTransform(poseSpec)
H = mxrepr.hCoordinatesSymbolic(cotr)
q = numpy.zeros( len(H.variables) )
H = H.setVariablesValue( valueslist=q )
return H
def serializeToMotionDSLModel(robotKinematics, ostream):
header ='''
Model {modelname}
Convention = currentFrame
'''.format(modelname=robotKinematics.robotGeometry.robotName)
ostream.write(header)
for jp in robotKinematics.jointPoses.poseSpecByJoint.values():
text = motdsl.poseSpecToMotionDSLSnippet( jp )
ostream.write(text)
ostream.write('\n')
for cp in robotKinematics.robotGeometry.byPose.values() :
text = motdsl.poseSpecToMotionDSLSnippet( cp )
ostream.write(text)
ostream.write('\n')
| 34.955882
| 119
| 0.738746
| 264
| 2,377
| 6.613636
| 0.439394
| 0.034364
| 0.016037
| 0.018328
| 0.033219
| 0.033219
| 0
| 0
| 0
| 0
| 0
| 0.001538
| 0.179218
| 2,377
| 67
| 120
| 35.477612
| 0.893388
| 0.147665
| 0
| 0.142857
| 0
| 0
| 0.075226
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.309524
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53d0271d7e3d9c0d0f41f088e5b38f2630dec774
| 5,318
|
py
|
Python
|
pcdet/utils/box_coder_utils.py
|
Nuri-benbarka/PCDet
|
8da66ead3bb1120db2fa919187948c8c134e85ae
|
[
"Apache-2.0"
] | 7
|
2020-11-28T03:38:51.000Z
|
2021-12-31T07:44:19.000Z
|
pcdet/utils/box_coder_utils.py
|
Nuri-benbarka/PCDet
|
8da66ead3bb1120db2fa919187948c8c134e85ae
|
[
"Apache-2.0"
] | null | null | null |
pcdet/utils/box_coder_utils.py
|
Nuri-benbarka/PCDet
|
8da66ead3bb1120db2fa919187948c8c134e85ae
|
[
"Apache-2.0"
] | 1
|
2021-04-01T15:54:21.000Z
|
2021-04-01T15:54:21.000Z
|
import numpy as np
import torch
from . import common_utils
class ResidualCoder(object):
def __init__(self, code_size=7):
super().__init__()
self.code_size = code_size
@staticmethod
def encode_np(boxes, anchors):
"""
:param boxes: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
box_ndim = anchors.shape[-1]
xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1)
xg, yg, zg, wg, lg, hg, rg, *cgs = np.split(boxes, box_ndim, axis=-1)
# need to convert boxes to z-center format
zg = zg + hg / 2
za = za + ha / 2
diagonal = np.sqrt(la ** 2 + wa ** 2) # 4.3
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha # 1.6
lt = np.log(lg / la)
wt = np.log(wg / wa)
ht = np.log(hg / ha)
rt = rg - ra
cts = [g - a for g, a in zip(cgs, cas)]
return np.concatenate([xt, yt, zt, wt, lt, ht, rt, *cts], axis=-1)
@staticmethod
def decode_np(box_encodings, anchors):
"""
:param box_encodings: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
box_ndim = anchors.shape[-1]
xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = np.split(box_encodings, box_ndim, axis=-1)
# need to convert box_encodings to z-bottom format
za = za + ha / 2
diagonal = np.sqrt(la ** 2 + wa ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = np.exp(lt) * la
wg = np.exp(wt) * wa
hg = np.exp(ht) * ha
rg = rt + ra
zg = zg - hg / 2
cgs = [t + a for t, a in zip(cts, cas)]
return np.concatenate([xg, yg, zg, wg, lg, hg, rg, *cgs], axis=-1)
@staticmethod
def encode_torch(boxes, anchors):
"""
:param boxes: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, wg, lg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1)
za = za + ha / 2
zg = zg + hg / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
ht = torch.log(hg / ha)
rt = rg - ra
cts = [g - a for g, a in zip(cgs, cas)]
return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1)
@staticmethod
def decode_torch(box_encodings, anchors):
"""
:param box_encodings: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
za = za + ha / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
hg = torch.exp(ht) * ha
rg = rt + ra
zg = zg - hg / 2
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1)
def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds,
num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False):
"""
:param box_preds: (batch_size, N, 7 + ?), x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (batch_size, N, 7 + ?), x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2)
:return:
"""
batch_box_preds = self.decode_torch(box_preds, anchors)
if dir_cls_preds is not None:
dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1)
if use_binary_dir_classifier:
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
opp_labels = (batch_box_preds[..., -1] > 0) ^ dir_labels.byte()
batch_box_preds[..., -1] += torch.where(
opp_labels,
torch.tensor(np.pi).type_as(batch_box_preds),
torch.tensor(0.0).type_as(batch_box_preds)
)
else:
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
period = (2 * np.pi / num_dir_bins)
dir_rot = common_utils.limit_period_torch(
batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period
)
batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype)
return batch_box_preds
if __name__ == '__main__':
pass
| 35.691275
| 118
| 0.507334
| 808
| 5,318
| 3.196782
| 0.158416
| 0.04336
| 0.045296
| 0.009292
| 0.58343
| 0.538908
| 0.538908
| 0.510647
| 0.469609
| 0.443283
| 0
| 0.018792
| 0.349568
| 5,318
| 148
| 119
| 35.932432
| 0.727956
| 0.172057
| 0
| 0.458333
| 0
| 0
| 0.001907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.010417
| 0.03125
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53d12a0522be9c1f94c8076c489fd23a012f880f
| 15,175
|
py
|
Python
|
utils/utils.py
|
jainajinkya/deep_bingham
|
2ea85b3ea2af579eab36567091b88a1bbf4a627b
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
jainajinkya/deep_bingham
|
2ea85b3ea2af579eab36567091b88a1bbf4a627b
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
jainajinkya/deep_bingham
|
2ea85b3ea2af579eab36567091b88a1bbf4a627b
|
[
"MIT"
] | null | null | null |
""" Utilities for learning pipeline."""
from __future__ import print_function
import copy
import dill
import hashlib
import itertools
import third_party.deep_bingham.bingham_distribution as ms
import math
import numpy as np
import os
import scipy
import scipy.integrate as integrate
import scipy.special
import sys
import torch
from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import cpu_count
def convert_euler_to_quaternion(roll, yaw, pitch):
"""Converts roll, yaw, pitch to a quaternion.
"""
# roll (z), yaw (y), pitch (x)
cy = math.cos(math.radians(roll) * 0.5)
sy = math.sin(math.radians(roll) * 0.5)
cp = math.cos(math.radians(yaw) * 0.5)
sp = math.sin(math.radians(yaw) * 0.5)
cr = math.cos(math.radians(pitch) * 0.5)
sr = math.sin(math.radians(pitch) * 0.5)
w = cy * cp * cr + sy * sp * sr
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
quat = np.array([w, x, y, z])
quat = quat / np.linalg.norm(quat)
return quat
def radians(degree_tensor):
"""
Method to convert a torch tensor of angles in degree format to radians.
Arguments:
degree_tensor (torch.Tensor): Tensor consisting of angles in degree format.
Returns:
radian_tensor (torch.Tensor): Tensor consisting of angles in radian format.
"""
radian_tensor = degree_tensor/180 * math.pi
return radian_tensor
def generate_coordinates(coords):
"""
A function that returns all possible triples of coords
Parameters:
coords: a numpy array of coordinates
Returns:
x: the first coordinate of possible triples
y: the second coordinate of possible triples
z the third coordinate of possible triples
"""
x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten()
y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords))
z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords))
return x, y, z
def ensure_dir_exists(path):
""" Checks if a directory exists and creates it otherwise. """
if not os.path.exists(path):
os.makedirs(path)
def load_lookup_table(path):
"""
Loads lookup table from dill serialized file.
Returns a table specific tuple. For the Bingham case, the tuple containins:
table_type (str):
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
coords (numpy.ndarray): Coordinates at which lookup table was evaluated.
For the von Mises case, it contains:
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
"""
assert os.path.exists(path), "Lookup table file not found."
with open(path, "rb") as dillfile:
return dill.load(dillfile)
def eaad_von_mises(kappas, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
kappas: Von Mises kappa parameters for roll, pitch, yaw.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2.0 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-2, "epsabs": 1e-2}
param_mu = np.array([0., 0., 0.]) # radians
quat_mu = convert_euler_to_quaternion(
math.degrees(param_mu[0]), math.degrees(param_mu[1]),
math.degrees(param_mu[2])
)
param_kappa = kappas
direct_norm_const = 8.0 * (np.pi ** 3) \
* scipy.special.iv(0, param_kappa[0]) \
* scipy.special.iv(0, param_kappa[1]) \
* scipy.special.iv(0, param_kappa[2])
def integrand_aad(phi1, phi2, phi3):
return np.exp(param_kappa[0] * np.cos(phi1)) \
* np.exp(param_kappa[1] * np.cos(phi2)) \
* np.exp(param_kappa[2] * np.cos(phi3)) \
* aad(quat_mu,
convert_euler_to_quaternion(
math.degrees(phi1), math.degrees(phi2),
math.degrees(phi3)
))
eaad_int = integrate.tplquad(
integrand_aad,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: 2. * np.pi, # phi2
lambda x, y: 0.0, lambda x, y: 2. * np.pi, # phi1
**integral_options
)
return eaad_int[0]/direct_norm_const
def eaad_bingham(bingham_z, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
bingham_z: Bingham dispersion parameter in the format expected by the
manstats BinghamDistribution class.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
# acos_val = np.arccos(np.dot(quat_a, quat_b))
# diff_ang = 2 * np.min([acos_val, np.pi - acos_val])
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-4, "epsabs": 1e-4}
bd = ms.BinghamDistribution(
np.eye(4), bingham_z,
{"norm_const_mode": "numerical",
"norm_const_options": integral_options}
)
def integrand_transformed(x):
# To avoid unnecessary divisions, this term does not contain the
# normalization constant. At the end, the result of the integration is
# divided by it.
return aad(x, bd.mode) \
* np.exp(np.dot(x, np.dot(np.diag(bingham_z), x)))
def integrand(phi1, phi2, phi3):
sp1 = np.sin(phi1)
sp2 = np.sin(phi2)
return integrand_transformed(np.array([
sp1 * sp2 * np.sin(phi3),
sp1 * sp2 * np.cos(phi3),
sp1 * np.cos(phi2),
np.cos(phi1)
])) * (sp1 ** 2.) * sp2
eaad_int = integrate.tplquad(
integrand,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: np.pi, # phi2
lambda x, y: 0.0, lambda x, y: np.pi, # phi1
**integral_options
)
return eaad_int[0] / bd.norm_const
def build_bd_lookup_table(table_type, options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
table_type: Type of lookup table used. May be 'uniform' or 'nonuniform'
options: Dict cotaining type specific options.
If type is "uniform" this dict must contain:
"bounds" = Tuple (lower_bound, upper_bound) representing bounds.
"num_points" = Number of points per dimension.
If type is "nonuniform" this dict must contain a key "coords" which
is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(table_type.encode('utf-8'))
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_type, serialized_options, res_table, coords) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(serialized_type)
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
elif table_type == "uniform":
# Number of points per axis.
(lbound, rbound) = options["bounds"]
num_points = options["num_points"]
assert num_points > 1, \
"Grid must have more than one point per dimension."
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = np.linspace(lbound, rbound, num_points)
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
elif table_type == "nonuniform":
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = options["coords"]
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
else:
sys.exit("Unknown lookup table type")
return res_table
def build_vm_lookup_table(options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
options: Dict cotaining table options. It must contain a key "coords"
which is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_options, res_table) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
else:
coords = options["coords"]
res_table = _compute_vm_lookup_table(coords)
with open(path, "wb") as dillfile:
dill.dump((options, res_table), dillfile)
return res_table
def _compute_bd_lookup_table(coords, nc_options):
num_points = len(coords)
pool = Pool(max(cpu_count()//2, 1))
def nc_wrapper(idx):
pt_idx = point_indices[idx]
# Indexing pt_idx in the order 2,1,0 vs. 0,1,2 has no impact
# on the result as the Bingham normalization constant is agnostic to it.
# However, the numpy integration that is used to compute it, combines
# numerical 2d and 1d integration which is why the order matters for the
# actual computation time.
#
# TODO: Make pymanstats choose best order automatically.
norm_const = ms.BinghamDistribution.normalization_constant(
np.array(
[coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]),
"numerical", nc_options)
print("Computing NC for Z=[{}, {}, {}, 0.0]: {}".format(
coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]],
norm_const))
return norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
class AverageMeter(object):
"""Computes and stores the averages over a numbers or dicts of numbers.
For the dict, this class assumes that no new keys are added during
the computation.
"""
def __init__(self):
self.last_val = 0
self.avg = 0
self.count = 0
def update(self, val, n=1):
self.last_val = val
n = float(n)
if type(val) == dict:
if self.count == 0:
self.avg = copy.deepcopy(val)
else:
for key in val:
self.avg[key] *= self.count / (self.count + n)
self.avg[key] += val[key] * n / (self.count + n)
else:
self.avg *= self.count / (self.count + n)
self.avg += val * n / (self.count + n)
self.count += n
self.last_val = val
def _compute_vm_lookup_table(coords):
num_points = len(coords)
pool = Pool()
def nc_wrapper(idx):
cur_pt_idx = point_indices[idx]
log_norm_const = np.log(8.0) + (3. * np.log(np.pi)) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[2]]))
print("Computing NC for kappas=[{}, {}, {}]: {}".format(
coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]],
log_norm_const))
return log_norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
def vec_to_bingham_z_many(y):
z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0)
return z
def vec_to_bingham_z(y):
z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0)
if not all(z[0][:-1] <= z[0][1:]):
print(z)
return z
| 34.805046
| 83
| 0.623394
| 2,148
| 15,175
| 4.244413
| 0.161546
| 0.029067
| 0.01053
| 0.018427
| 0.561259
| 0.51563
| 0.490183
| 0.480641
| 0.451903
| 0.434244
| 0
| 0.019759
| 0.266293
| 15,175
| 435
| 84
| 34.885057
| 0.799084
| 0.276705
| 0
| 0.370968
| 0
| 0
| 0.049261
| 0.005651
| 0
| 0
| 0
| 0.002299
| 0.016129
| 1
| 0.08871
| false
| 0
| 0.064516
| 0.008065
| 0.233871
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53d21a61b1f0af656cef94761b86e69e5114d1b2
| 8,108
|
py
|
Python
|
cli_ui.py
|
obatsis/Distributed-NTUA
|
0bf39163b64aaefb2576be01337e0ec6e026ce6d
|
[
"MIT"
] | null | null | null |
cli_ui.py
|
obatsis/Distributed-NTUA
|
0bf39163b64aaefb2576be01337e0ec6e026ce6d
|
[
"MIT"
] | null | null | null |
cli_ui.py
|
obatsis/Distributed-NTUA
|
0bf39163b64aaefb2576be01337e0ec6e026ce6d
|
[
"MIT"
] | null | null | null |
import requests
import os
from PyInquirer import style_from_dict, Token, prompt
import sys
import utils.config as config
import utils.ends as ends
from utils.colorfy import *
from auto.testing import test_trans
import time
import json
style = style_from_dict({
Token.QuestionMark: '#E91E63 bold',
Token.Selected: '#673AB7 bold',
Token.Instruction: '#0bf416',
Token.Answer: '#2196f3 bold',
Token.Question: '#0bf416 bold',
})
def client(ip, port):
os.system('clear')
cyan('What a beautiful day to enter the cult...')
baseURL = 'http://' + ip + ':' + port
while True:
print('----------------------------------------------------------------------')
method_q = {
'type': 'list',
'name': 'method',
'message': 'Select action:',
'choices': ['Network Overlay', \
'Insert a Song', \
'Search for a Song', \
'Delete a Song', \
'Depart from Chord', \
'Run automated test', \
'Help', \
'Exit']
}
method_a = prompt(method_q, style=style)['method']
os.system('clear')
if method_a == 'Depart from Chord':
print(cyan("Preparing Node to depart from Chord..."))
try:
response = requests.get(baseURL + ends.c_depart)
if response.status_code == 200:
if response.text == "Left the Chord":
print(response.text)
print(green("Node is out of Toychord network"))
else:
print(red(response.text))
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node. Node didnt depart..."))
print(red("Unfortunately exiting..."))
break
elif method_a == 'Insert a Song':
print('Insert a Title-Value pair for the song you wish to insert')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
},
{
'type': 'input',
'name': 'value',
'message': 'Value:',
'filter': lambda val: str(val)
}
]
fetch_a = prompt(fetch_q, style=style)
print(cyan("Inserting Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']})
if response.status_code == 200:
print(cyan("Inserted by node with id: ") + green(response.text.split(" ")[0]))
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node. Song wasnt inserted..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Delete a Song':
print('Insert the Song Title you wish to delete')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
}]
fetch_a = prompt(fetch_q, style=style)
print(cyan("Deleting Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']})
if response.status_code == 200 and response.text.split(" ")[1] != "@!@":
# print(cyan("Deleting Song: ") + green(response.text.split(" ")[1]) + )
print(cyan("Deleted by node with id: ") + green(response.text.split(" ")[0]))
else :
print(yellow("Song doesnt exist in the Chord"))
print(yellow("Couldnt delete it"))
except:
print(red("Could not establish connection with Node. Song wasnt deleted..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Search for a Song':
print('Insert the Song Title you wish to Search or * to get all songs of the Chord')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
}]
fetch_a = prompt(fetch_q, style=style)
if fetch_a['key'] == "*":
print(cyan("Fetching all the songs of the Chord..."))
try:
response = requests.get(baseURL + ends.c_query_star)
if response.status_code == 200:
nodes_list = json.loads(response.text)
# print(green(response.text))
# print(cyan()))
for node in nodes_list["res"]:
print(header("\n" + node["uid"]) + " " + underline(node["ip"] + ":" + node["port"]))
for song in node["song"]:
print(" -" + green(song["key"]) + " " + song["value"])
else:
print(yellow("Something went Wrong...") + response.status_code)
except:
print(red("Could not establish connection with Node. Couldnt search for song..."))
print(red("Unfortunately exiting..."))
exit(0)
else:
print(cyan("Searching Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']})
if response.status_code == 200 and response.text.split(" ")[1] != "@!@":
print("Song found in node with id: ",green(response.text.split(" ")[0]))
print("Song value: " + green(response.text.split(" ")[1]))
else:
print(yellow("Song doesnt exist in the Chord"))
except:
print(red("Could not establish connection with Node. Couldnt search for song..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Network Overlay':
print(cyan("Initiating Network Overlay..."))
try:
response = requests.get(baseURL + ends.c_overlay)
if response.status_code == 200:
nodes_list = json.loads(response.text)
print('\n')
for node in nodes_list["res"]:
print(green(node["ip"] + ":" + node["port"]), end = '')
if node != nodes_list["res"][-1]:
print(" -> ", end = '')
print('\n')
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Help':
print('-------------------------------- Help --------------------------------\n')
overlayHelp=header("Overlay: ") + cyan("This functions recreates and prints the current Network Topology(eg. Node1 -> Node2 -> ...)\n")
insertHelp=header("Insert Song: ") + cyan("This functions expects a Song Title and a Song Value and inserts them in the Chord\n")
queryHelp=header("Search Song: ") + cyan("This function expects a Song Title and returns the Node in whitch the song is stored and the value of the song\n")
deleteHelp=header("Delete Song: ") + cyan("This function expects a Song Title and returns the Node who deleted the song\n")
departHelp=header("Depart: ") + cyan("This function makes the node connected to this cli leave the Chord\n")
autoTests=header("Run automated tests: ") + cyan("This function expects a test number (1=insert, 2=query, 3=requests), runs the test and returns the chord throughput")
print( " -",overlayHelp,"\n"
" -",insertHelp,"\n",
"-",queryHelp,"\n",
"-",deleteHelp,"\n",
"-",departHelp,"\n",
"-",autoTests,"\n",
)
continue
elif method_a == 'Run automated test':
print('Select which test you wish to run (1 = insert, 2 = query, 3 = requests)')
fetch_q = [
{
'type': 'input',
'name': 'test_n',
'message': 'Test:',
'filter': lambda val: str(val)
}
]
fetch_a = prompt(fetch_q, style=style)
test_number = fetch_a['test_n'] if fetch_a['test_n'] else 's'
if test_number not in ('1', '2', '3'):
print(yellow("Wrong test number (give 1, 2 or 3)"))
continue
print(cyan("Running automated test: ") + ("insert" if test_number == '1' else ("query" if test_number == '2' else "requests")) + cyan("..."))
print(blue(test_trans(test_number)))
print(cyan("Done!"))
continue
elif method_a == 'Exit':
os.system('clear')
break
else:
os.system('clear')
continue
if __name__ == '__main__':
if len(sys.argv) < 3:
print("!! you must tell me the port. Ex. -p 5000 !!")
exit(0)
if sys.argv[1] in ("-p", "-P"):
my_port = sys.argv[2]
my_ip = os.popen('ip addr show ' + config.NETIFACE + ' | grep "\<inet\>" | awk \'{ print $2 }\' | awk -F "/" \'{ print $1 }\'').read().strip()
client(my_ip, my_port)
| 34.21097
| 170
| 0.604465
| 1,073
| 8,108
| 4.492078
| 0.201305
| 0.026556
| 0.048548
| 0.024896
| 0.473859
| 0.442116
| 0.426556
| 0.408714
| 0.385685
| 0.370954
| 0
| 0.012168
| 0.209423
| 8,108
| 236
| 171
| 34.355932
| 0.739782
| 0.013937
| 0
| 0.429907
| 0
| 0.009346
| 0.365912
| 0.017019
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004673
| false
| 0
| 0.046729
| 0
| 0.051402
| 0.242991
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53d70d3013eebf509bd463bbe169adf9205bf22b
| 4,367
|
py
|
Python
|
api_youtube.py
|
OnoArnaldo/PythonApiYoutube
|
8507eac234cd3d05a223db3beebd10412505bcf8
|
[
"MIT"
] | 2
|
2019-11-15T16:46:36.000Z
|
2020-11-30T07:34:26.000Z
|
api_youtube.py
|
OnoArnaldo/PythonApiYoutube
|
8507eac234cd3d05a223db3beebd10412505bcf8
|
[
"MIT"
] | null | null | null |
api_youtube.py
|
OnoArnaldo/PythonApiYoutube
|
8507eac234cd3d05a223db3beebd10412505bcf8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import json
import urllib2
import codecs
BASE_DIR = os.path.dirname(__file__)
BASE_URL = 'https://www.googleapis.com/youtube/v3/'
API_CHANNELS = 'channels'
API_PLAYLIST = 'playlistItems'
API_KEY = 'YOUR KEY'
CHANNELS = [
'videosimprovaveis',
'nerdologia',
'Kurzgesagt',
'1veritasium',
'minutephysics',
'xadrezverbal',
'estevaoslow',
'Vsauce',
'braincraftvideo',
'CienciaTodoDia',
]
class UrlEncoder(object):
API_URL = ''
def __init__(self, **kwargs):
self.args = kwargs
def _parms(self):
args = []
for k, v in self.args.items():
args.append(k + '=' + str(v))
return '&'.join(args)
def get(self):
parms = '?' + self._parms() if len(self.args) else ''
return self.API_URL + parms
def set(self, key, value):
if value:
self.args[key] = value
class ApiChannel(object):
URL = BASE_URL + API_CHANNELS
FILE_NAME = os.path.join(BASE_DIR, 'channels.json')
def __init__(self, channels):
self.encoder = self.build_encoder(API_KEY)
self.channels = channels
def run(self):
data = self.generate_data()
self.save(data)
def generate_data(self):
encoder = self.encoder
ret = {}
for channel in self.channels:
encoder.set('forUsername', channel)
data = self.get_data(encoder.get())
ret[channel] = self.get_playlist_id(data)
return ret
def get_data(self, url):
url = urllib2.urlopen(url)
data = url.read()
return json.loads(data)
def get_playlist_id(self, data):
items = data.get('items')
content = items[0].get('contentDetails')
playlists = content.get('relatedPlaylists')
return playlists.get('uploads')
def save(self, data):
with open(self.FILE_NAME, 'w') as f:
f.write(json.dumps(data))
f.close()
def build_encoder(self, api_key):
UrlEncoder.API_URL = self.URL
encoder = UrlEncoder()
encoder.set('key', api_key)
encoder.set('part', 'contentDetails')
return encoder
class ApiPlayList(object):
URL = BASE_URL + API_PLAYLIST
FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt')
def __init__(self, channels):
self.channels = channels
self.encoder = self.build_encoder(API_KEY)
def run(self):
data = self.generate_data()
self.save(data)
def generate_data(self):
encoder = self.encoder
channels = self.channels
ret = []
for key in channels:
encoder.set('playlistId', channels[key])
data = self.get_data(encoder.get())
ret += [[key] + self.get_info(data)]
return ret
def get_info(self, data):
items = data.get('items')
snippet = items[0].get('snippet')
title = snippet.get('title')
published_at = snippet.get('publishedAt')
description = snippet.get('description')
return [title, published_at, description]
def save(self, data):
fname = os.path.join(BASE_DIR, 'last_update.txt')
with codecs.open(fname, 'w', encoding='utf-8') as f:
for key, title, published_at, description in sorted(data, key=lambda x: x[2]):
f.write('{}: {} - {}\n'.format(published_at[:10], key, title))
f.close()
def get_data(self, url):
url = urllib2.urlopen(url)
data = url.read()
return json.loads(data)
def build_encoder(self, api_key):
UrlEncoder.API_URL = self.URL
encoder = UrlEncoder()
encoder.set('key', api_key)
encoder.set('part', 'snippet')
encoder.set('maxResults', '1')
return encoder
@classmethod
def import_channels(cls, fname):
with open(fname, 'r') as f:
text = f.read()
f.close()
return json.loads(text)
if __name__ == '__main__':
args = sys.argv[1:]
if '-channel' in args:
channel = ApiChannel(CHANNELS)
channel.run()
if '-playlist' in args:
channels = ApiPlayList.import_channels(ApiChannel.FILE_NAME)
play_list = ApiPlayList(channels)
play_list.run()
| 24.672316
| 90
| 0.587589
| 520
| 4,367
| 4.773077
| 0.242308
| 0.032232
| 0.024174
| 0.016922
| 0.342466
| 0.293715
| 0.27357
| 0.230862
| 0.197824
| 0.197824
| 0
| 0.004473
| 0.283261
| 4,367
| 176
| 91
| 24.8125
| 0.788498
| 0.004809
| 0
| 0.343511
| 0
| 0
| 0.097376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145038
| false
| 0
| 0.061069
| 0
| 0.351145
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53d750a045a189f59e633e7a1ce562b90e7d821b
| 2,744
|
py
|
Python
|
python_and_ebpf/train.py
|
be4r/ssh-miner-detection
|
47003db1d9f72ae44d5a27e92d0109d5111bec35
|
[
"MIT"
] | null | null | null |
python_and_ebpf/train.py
|
be4r/ssh-miner-detection
|
47003db1d9f72ae44d5a27e92d0109d5111bec35
|
[
"MIT"
] | null | null | null |
python_and_ebpf/train.py
|
be4r/ssh-miner-detection
|
47003db1d9f72ae44d5a27e92d0109d5111bec35
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from sklearn.tree import DecisionTreeClassifier
import pickle
import numpy as np
no = [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg']
class model:
def __init__(self):
self.d = DecisionTreeClassifier()
def load(self, filename = 'model.p'):
try:
f = open(filename, 'rb')
self.d = pickle.load(f)
if type(self.d) != DecisionTreeClassifier:
d = None
f.close()
except:
return
def save(self, filename = 'model.p'):
f = open(filename, 'wb')
pickle.dump(self.d, f)
f.close()
def fit(self, x, y):
self.d.fit(x, y)
def predict(self, x):
return self.d.predict(x)
def accuracy(self, y_pred, y_ref):
return sum(np.array(y_pred) == np.array(y_ref)) / len(y_ref)
def f1(self, y_pred, y_ref):
tp = (np.array(y_pred) == 1) * (np.array(y_ref) == 1)
tn = (np.array(y_pred) == 0) * (np.array(y_ref) == 0)
fp = (np.array(y_pred) == 1) * (np.array(y_ref) == 0)
fn = (np.array(y_pred) == 0) * (np.array(y_ref) == 1)
return tp / (tp + (fp + fn) / 2)
def ngrams(array, size = 25, overlacing = False):
return [array[i:i+size] for i in range(0, len(array)//size * size, 1 if overlacing else size)]
res = [array[i:i+size] for i in range(0, len(array)//size * size, 1 if overlacing else size)]
if sum([len(i) == size for i in res]) != len(res):
raise Exception('wtf')
def gen_train(a, is_miner):
#x1,y1,x2,y2 = train_test_split(x,y,0.05)
x = ngrams(a)
y = [1 if is_miner else 0,] * len(x)
return x,y
def train_on_logs(*filenames, is_miner):
classifier = model()
#classifier.load()
x, y = [], []
for id, filename in enumerate(filenames):
l = []
with open(filename, 'r') as f:
l = eval(''.join(f))
codes = []
for i in l:
if i[0] not in no:
codes.append(i[1])
x_, y_ = gen_train(codes, is_miner[id])
x.append(x_)
y.append(y_)
print(x,y)
#classifier.fit(x,y)
#classifier.save()
def predict_on_logs(*filenames, is_miner):
classifier = model()
classifier.load()
x, y = [], []
for id, filename in enumerate(filenames):
l = []
with open(filename, 'r') as f:
l = eval(''.join(f))
codes = []
for i in l:
if i[0] not in no:
codes.append(i[1])
x_, y_ = gen_train(codes, is_miner[id])
x.append(x_)
y.append(y_)
y_pred = classifier.predict(x)
print("Accuracy: ", classifier.accuracy(y_pred, y))
print("F1: ",classifier.f1(y_pred, y))
def predict_on_trace(trace, A = 0.9):
classifier = model()
classifier.load()
x, y = [], []
for id, filename in enumerate(filenames):
codes = []
for i in trace:
if i[0] not in no:
codes.append(i[1])
x_, y_ = gen_train(codes, is_miner[id])
x.append(x_)
y.append(y_)
y_pred = classifier.predict(x)
acc = sum(np.array(y_pred)) / len(y_pred)
return acc > A
| 24.283186
| 95
| 0.622085
| 478
| 2,744
| 3.453975
| 0.217573
| 0.018171
| 0.053301
| 0.04361
| 0.513022
| 0.470018
| 0.470018
| 0.470018
| 0.470018
| 0.411872
| 0
| 0.016719
| 0.193513
| 2,744
| 112
| 96
| 24.5
| 0.729327
| 0.041545
| 0
| 0.460674
| 0
| 0
| 0.027429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134831
| false
| 0
| 0.033708
| 0.022472
| 0.258427
| 0.033708
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53d94f243224facafe883070b86bd959182c98e6
| 9,455
|
py
|
Python
|
repokid/tests/test_roledata.py
|
tomdev/repokid
|
e1a4839290bafccfaa304d87bbdeae85b9dc80aa
|
[
"Apache-2.0"
] | null | null | null |
repokid/tests/test_roledata.py
|
tomdev/repokid
|
e1a4839290bafccfaa304d87bbdeae85b9dc80aa
|
[
"Apache-2.0"
] | null | null | null |
repokid/tests/test_roledata.py
|
tomdev/repokid
|
e1a4839290bafccfaa304d87bbdeae85b9dc80aa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from mock import patch
import repokid.utils.roledata
from repokid.role import Role
from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES
AARDVARK_DATA = {
"arn:aws:iam::123456789012:role/all_services_used": [
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "iam"},
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "s3"}],
"arn:aws:iam::123456789012:role/unused_ec2": [
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "iam"},
{"lastAuthenticated": 0,
"serviceNamespace": "ec2"}],
"arn:aws:iam::123456789012:role/young_role": [
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "iam"},
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "s3"}]
}
class TestRoledata(object):
@patch('repokid.utils.roledata.expand_policy')
@patch('repokid.utils.roledata.get_actions_from_statement')
@patch('repokid.utils.roledata.all_permissions')
def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy):
test_role = Role(ROLES[0])
all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket',
's3:getobject']
# empty policy to make sure we get the latest
test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}]
mock_all_permissions.return_value = all_permissions
mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms']
mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms']
permissions = repokid.utils.roledata._get_role_permissions(test_role)
assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms'])
@patch('repokid.hooks.call_hooks')
def test_get_repoable_permissions(self, mock_call_hooks):
minimum_age = 1
repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2']
repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4']
hooks = {}
permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4',
'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1',
'service_4:action_2']
# service_1 and service_2 both used more than a day ago, which is outside of our test filter for age
aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() - 90000) * 1000},
{'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() - 90000) * 1000},
{'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() * 1000}]
no_repo_permissions = {'service_4:action_1': time.time() - 1, 'service_4:action_2': time.time() + 1000}
repoable_decision = repokid.utils.roledata.RepoablePermissionDecision()
repoable_decision.repoable = True
mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision,
'service_1:action_2': repoable_decision,
'service_4:action_1': repoable_decision}}
repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data,
no_repo_permissions, minimum_age,
hooks)
# service_1:action_3 and action_4 are unsupported actions, service_2 is an unsupported service, service_3
# was used too recently, service_4 action 2 is in no_repo_permissions and not expired
assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1'])
@patch('repokid.utils.roledata._get_role_permissions')
@patch('repokid.utils.roledata._get_repoable_permissions')
@patch('repokid.hooks.call_hooks')
def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions):
roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])]
roles[0].disqualified_by = []
roles[0].aa_data = 'some_aa_data'
# disqualified by a filter
roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}]
roles[1].disqualified_by = ['some_filter']
roles[1].aa_data = 'some_aa_data'
# no AA data
roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}]
roles[2].disqualified_by = []
roles[2].aa_data = None
hooks = {}
mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy',
'ec2:AllocateHosts', 'ec2:AssociateAddress'],
['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'],
['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']]
mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])
mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])]
minimum_age = 90
repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks)
assert roles[0].repoable_permissions == 2
assert roles[0].repoable_services == ['iam']
assert roles[1].repoable_permissions == 0
assert roles[1].repoable_services == []
assert roles[2].repoable_permissions == 0
assert roles[2].repoable_services == []
def test_get_repoed_policy(self):
policies = ROLE_POLICIES['all_services_used']
repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket'])
rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions)
assert rewritten_policies == {'s3_perms': {'Version': '2012-10-17',
'Statement': [{'Action': ['s3:deletebucket'],
'Resource': ['*'],
'Effect': 'Allow'}]}}
assert empty_policies == ['iam_perms']
def test_find_newly_added_permissions(self):
old_policy = ROLE_POLICIES['all_services_used']
new_policy = ROLE_POLICIES['unused_ec2']
new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy)
assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress'])
def test_convert_repoable_perms_to_perms_and_services(self):
all_perms = ['a:j', 'a:k', 'b:l', 'c:m', 'c:n']
repoable_perms = ['b:l', 'c:m']
expected_repoed_services = ['b']
expected_repoed_permissions = ['c:m']
assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) ==
(expected_repoed_permissions, expected_repoed_services))
def test_convert_repoed_service_to_sorted_perms_and_services(self):
repoed_services = ['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl']
expected_services = ['ec2', 'route53']
expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl']
assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == (
expected_permissions, expected_services
)
def test_get_epoch_authenticated(self):
assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True))
assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True))
assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False))
def test_filter_scheduled_repoable_perms(self):
assert repokid.utils.roledata._filter_scheduled_repoable_perms(
['a:b', 'a:c', 'b:a'], ['a:c', 'b']) == ['a:c', 'b:a']
assert repokid.utils.roledata._filter_scheduled_repoable_perms(
['a:b', 'a:c', 'b:a'], ['a', 'b']) == ['a:b', 'a:c', 'b:a']
assert repokid.utils.roledata._filter_scheduled_repoable_perms(
['a:b', 'a:c', 'b:a'], ['a:b', 'a:c']) == ['a:b', 'a:c']
| 51.950549
| 120
| 0.639662
| 1,068
| 9,455
| 5.368914
| 0.189139
| 0.046041
| 0.076735
| 0.0361
| 0.425009
| 0.295954
| 0.252006
| 0.154168
| 0.103767
| 0.069585
| 0
| 0.036081
| 0.237864
| 9,455
| 181
| 121
| 52.237569
| 0.759645
| 0.100793
| 0
| 0.12
| 0
| 0
| 0.242156
| 0.069946
| 0
| 0
| 0
| 0
| 0.152
| 1
| 0.072
| false
| 0
| 0.04
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53da2e6911920cb3cc789891eed24c27f4a325c6
| 1,838
|
py
|
Python
|
DL_Scripts/image_recognition.py
|
Matnay/KPIT_Deep_Learning
|
14f3815fc2829db9bede86c31f23e721f6423f79
|
[
"MIT"
] | 1
|
2020-05-01T15:28:12.000Z
|
2020-05-01T15:28:12.000Z
|
DL_Scripts/image_recognition.py
|
Matnay/KPIT_Deep_Learning
|
14f3815fc2829db9bede86c31f23e721f6423f79
|
[
"MIT"
] | null | null | null |
DL_Scripts/image_recognition.py
|
Matnay/KPIT_Deep_Learning
|
14f3815fc2829db9bede86c31f23e721f6423f79
|
[
"MIT"
] | null | null | null |
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import numpy as np
import tensorflow as tf
import classify_image
class RosTensorFlow():
def __init__(self):
classify_image.maybe_download_and_extract()
self._session = tf.Session()
classify_image.create_graph()
self._cv_bridge = CvBridge()
self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('result', String, queue_size=1)
self.score_threshold = rospy.get_param('~score_threshold', 0.1)
self.use_top_k = rospy.get_param('~use_top_k', 5)
def callback(self, image_msg):
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
# copy from
# classify_image.py
image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
# Creates graph from saved GraphDef.
softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
predictions = self._session.run(
softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = classify_image.NodeLookup()
top_k = predictions.argsort()[-self.use_top_k:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
if score > self.score_threshold:
rospy.loginfo('%s (score = %.5f)' % (human_string, score))
self._pub.publish(human_string)
def main(self):
rospy.spin()
if __name__ == '__main__':
classify_image.setup_args()
rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
| 36.039216
| 94
| 0.661589
| 237
| 1,838
| 4.805907
| 0.383966
| 0.068481
| 0.018437
| 0.024583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009887
| 0.229597
| 1,838
| 50
| 95
| 36.76
| 0.794492
| 0.057127
| 0
| 0
| 0
| 0
| 0.072917
| 0.012153
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53dd0a97f61bddb70bdbb1861eb823497caf7e52
| 21,202
|
py
|
Python
|
plugins/grouputils.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/grouputils.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/grouputils.py
|
aviskumar/speedo
|
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
|
[
"BSD-3-Clause"
] | 3
|
2021-10-12T08:17:01.000Z
|
2021-12-21T01:17:54.000Z
|
# Copyright (C) 2020-2021 by TeamSpeedo@Github, < https://github.com/TeamSpeedo >.
#
# This file is part of < https://github.com/TeamSpeedo/FridayUserBot > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/TeamSpeedo/blob/master/LICENSE >
#
# All rights reserved.
import asyncio
import os
import time
from asyncio import sleep
from pyrogram.types import ChatPermissions
import pyrogram
from main_start.core.decorators import speedo_on_cmd
from main_start.helper_func.basic_helpers import (
edit_or_reply,
edit_or_send_as_file,
get_text,
get_user,
is_admin_or_owner,
)
from main_start.helper_func.logger_s import LogIt
from main_start.helper_func.plugin_helpers import (
convert_to_image,
convert_vid_to_vidnote,
generate_meme,
)
@speedo_on_cmd(
["silentpin"],
only_if_admin=True,
cmd_help={
"help": "Pin Message Without Sending Notification To Members!",
"example": "{ch}silentpin (reply to message)",
},
)
async def spin(client, message):
engine = message.Engine
if not message.reply_to_message:
await edit_or_reply(message, engine.get_string("REPLY_TO_PIN"))
try:
await client.pin_chat_message(
message.chat.id,
message.reply_to_message.message_id,
disable_notification=True,
)
except BaseException as e:
await edit_or_reply(
message, engine.get_string("UNABLE_TO_PIN").format(e)
)
return
await edit_or_reply(message, engine.get_string("PINNED"))
@speedo_on_cmd(
["pinloud", "pin"],
only_if_admin=True,
cmd_help={
"help": "Pin Message With Sending Notification To Members!",
"example": "{ch}pin (reply to messages)",
},
)
async def lpin(client, message):
engine = message.Engine
if not message.reply_to_message:
await edit_or_reply(message, engine.get_string("REPLY_TO_PIN"))
try:
await client.pin_chat_message(
message.chat.id, message.reply_to_message.message_id
)
except BaseException as e:
await edit_or_reply(
message, engine.get_string("UNABLE_TO_PIN").format(e)
)
return
await edit_or_reply(message, engine.get_string("PINNED"))
@speedo_on_cmd(
["unpin", "rmpins"],
only_if_admin=True,
cmd_help={"help": "Unpin All Pinned Messages!", "example": "{ch}rmpins"},
)
async def dpins(client, message):
engine = message.Engine
await client.unpin_all_chat_messages(message.chat.id)
await edit_or_reply(message, engine.get_string("UNPINNED"))
@speedo_on_cmd(
["adminlist", "admins"],
cmd_help={"help": "Get Adminlist Of Chat!", "example": "{ch}adminlist"},
)
async def midhunadmin(client, message):
engine = message.Engine
mentions = ""
starky = get_text(message) or message.chat.id
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
try:
X = await client.get_chat_members(starky, filter="administrators")
ujwal = await client.get_chat(starky)
except BaseException as e:
await pablo.edit(engine.get_string("CANT_FETCH_ADMIN").format("Admins", e))
return
for midhun in X:
if not midhun.user.is_deleted:
link = f'✱ <a href="tg://user?id={midhun.user.id}">{midhun.user.first_name}</a>'
userid = f"<code>{midhun.user.id}</code>"
mentions += f"\n{link} {userid}"
holy = ujwal.username or ujwal.id
messag = f"""
<b>Admins in {ujwal.title} | {holy}</b>
{mentions}
"""
await edit_or_send_as_file(
messag,
pablo,
client,
f"`AdminList Of {holy}!`",
"admin-lookup-result",
"html",
)
@speedo_on_cmd(
["botlist", "bot"],
group_only=True,
cmd_help={"help": "Get List Of Bots In Chat!", "example": "{ch}botlist"},
)
async def bothub(client, message):
engine = message.Engine
buts = "**Bot List** \n\n"
starky = get_text(message) or message.chat.id
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
try:
bots = await client.get_chat_members(starky, filter="bots")
except BaseException as e:
await pablo.edit(engine.get_string("CANT_FETCH_ADMIN").format("Bots", e))
return
for nos, ujwal in enumerate(bots, start=1):
buts += f"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \n"
await pablo.edit(buts)
@speedo_on_cmd(
["zombies", "delusers"],
cmd_help={
"help": "Remove Deleted Accounts In The Group/Channel!",
"example": "{ch}zombies",
},
)
async def ujwalzombie(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
if len(message.text.split()) == 1:
dm = 0
da = 0
dc = 0
async for member in client.iter_chat_members(message.chat.id):
if member.user.is_deleted:
await sleep(1)
if member.status == "member":
dm += 1
elif member.status == "administrator":
da += 1
elif member.status == "creator":
dc += 1
text = "**Zombies Report!** \n\n"
if dm > 0:
text += engine.get_string("TOTAL_ZOMBIES_USERS").format(dm)
if da > 0:
text += engine.get_string("TOTAL_ZOMBIES_ADMINS").format(da)
if dc > 0:
text += engine.get_string("GRP_OWNER_IS_ZOMBIE")
d = dm + da + dc
if d > 0:
text += (engine.get_string("WIPE_THEM"))
await pablo.edit(text)
else:
await pablo.edit(engine.get_string("NO_ZOMBIES"))
return
sgname = message.text.split(None, 1)[1]
if sgname.lower().strip() == "clean":
me = client.me
lol = await is_admin_or_owner(message, me.id)
if not lol:
await pablo.edit(engine.get_string("NOT_ADMIN"))
return
s = 0
f = 0
async for member in client.iter_chat_members(message.chat.id):
if member.user.is_deleted:
try:
await client.kick_chat_member(message.chat.id, member.user.id)
s += 1
except:
f += 1
text = ""
if s > 0:
text += engine.get_string("REMOVED_ZOMBIES").format(s)
if f > 0:
text += (engine.get_string("FAILED_ZOMBIES").format(f))
await pablo.edit(text)
@speedo_on_cmd(
["ban", "bun"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Ban Replied User or provide his ID!",
"example": "{ch}ban (reply to user message OR provide his ID)",
},
)
async def ban_world(client, message):
engine = message.Engine
bun = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_restrict_members:
await bun.edit(engine.get_string("NOT_ADMIN"))
return
text_ = get_text(message)
userk, reason = get_user(message, text_)
if not userk:
await bun.edit(engine.get_string("TO_DO").format("Ban"))
return
try:
user_ = await client.get_users(userk)
except BaseException as e:
await bun.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user_.id
if not reason:
reason = "Not Specified!"
if userz == me_m.id:
await bun.edit(engine.get_string("TF_DO_IT").format("Ban"))
return
try:
user_ = await client.get_users(userz)
except BaseException as e:
await bun.edit(engine.get_string("USER_MISSING").format(e))
return
try:
await client.kick_chat_member(message.chat.id, int(user_.id))
except BaseException as e:
await bun.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Ban", e))
return
b = f"**#Banned** \n**User :** [{user_.first_name}](tg://user?id={user_.id}) \n**Chat :** `{message.chat.title}` \n**Reason :** `{reason}`"
await bun.edit(b)
log = LogIt(message)
await log.log_msg(client, b)
@speedo_on_cmd(
["unban", "unbun"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "UnBan Replied User or provide his ID!",
"example": "{ch}unban (reply to user message OR Provide his id)",
},
)
async def unban_world(client, message):
engine = message.Engine
unbun = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_restrict_members:
await unbun.edit(engine.get_string("NOT_ADMIN"))
return
text_ = get_text(message)
userm, reason = get_user(message, text_)
if not userm:
await unbun.edit(
engine.get_string("TO_DO").format("Un-Ban")
)
return
try:
user_ = await client.get_users(userm)
except BaseException as e:
await unbun.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user_.id
if not reason:
reason = "Not Specified!"
if userz == me_m.id:
await unbun.edit(engine.get_string("TF_DO_IT").format("Un-Ban"))
return
try:
await client.unban_chat_member(message.chat.id, int(user_.id))
except BaseException as e:
await unbun.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Un-Ban", e))
ub = f"**#UnBanned** \n**User :** [{user_.first_name}](tg://user?id={user_.id}) \n**Chat :** `{message.chat.title}` \n**Reason :** `{reason}`"
await unbun.edit(ub)
log = LogIt(message)
await log.log_msg(client, ub)
@speedo_on_cmd(
["promote", "prumote"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Promote Replied user or provide his ID!",
"example": "{ch}promote (reply to user message OR provide his ID)",
},
)
async def ujwal_mote(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_promote_members:
await pablo.edit(engine.get_string("NOT_ADMIN"))
return
asplit = get_text(message)
userl, Res = get_user(message, asplit)
if not userl:
await pablo.edit(
engine.get_string("TO_DO").format("Promote")
)
return
try:
user = await client.get_users(userl)
except BaseException as e:
await pablo.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user.id
if not Res:
Res = "Admeme"
if userz == me_m.id:
await pablo.edit(engine.get_string("TF_DO_IT").format("Promote"))
return
try:
await client.promote_chat_member(
message.chat.id,
user.id,
can_change_info=me_.can_change_info,
can_delete_messages=me_.can_delete_messages,
can_restrict_members=me_.can_restrict_members,
can_invite_users=me_.can_invite_users,
can_pin_messages=me_.can_pin_messages,
can_promote_members=me_.can_promote_members,
)
except BaseException as e:
await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Promote", e))
return
p = f"**#Promote** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}` \n**Title :** `{Res}`"
await pablo.edit(p)
log = LogIt(message)
await log.log_msg(client, p)
try:
if Res:
await client.set_administrator_title(message.chat.id, user.id, Res)
except:
pass
@speedo_on_cmd(
["demote", "demute"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Demote Replied user or provide his ID!",
"example": "{ch}demote (reply to user message OR provide his ID)",
},
)
async def ujwal_demote(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
await message.chat.get_member(int(me_m.id))
asplit = get_text(message)
usero = get_user(message, asplit)[0]
if not usero:
await pablo.edit(
engine.get_string("TO_DO").format("Demote")
)
return
try:
user = await client.get_users(usero)
except BaseException as e:
await pablo.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user.id
if userz == me_m.id:
await pablo.edit(engine.get_string("TF_DO_IT").format("Demote"))
return
try:
await client.promote_chat_member(
message.chat.id,
user.id,
is_anonymous=False,
can_change_info=False,
can_post_messages=False,
can_edit_messages=False,
can_delete_messages=False,
can_restrict_members=False,
can_invite_users=False,
can_pin_messages=False,
can_promote_members=False,
)
except BaseException as e:
await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Demote", e))
return
d = f"**#Demote** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}`"
await pablo.edit(d)
log = LogIt(message)
await log.log_msg(client, d)
@speedo_on_cmd(
["mute"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Mute Replied user or provide his ID!",
"example": "{ch}mute (reply to user message OR provide his ID)",
},
)
async def ujwal_mute(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_restrict_members:
await pablo.edit(engine.get_string("NOT_ADMIN"))
return
asplit = get_text(message)
userf = get_user(message, asplit)[0]
if not userf:
await pablo.edit(
engine.get_string("TO_DO").format("Mute")
)
return
try:
user = await client.get_users(userf)
except BaseException as e:
await pablo.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user.id
if userz == me_m.id:
await pablo.edit(engine.get_string("TF_DO_IT").format("Mute"))
return
try:
await client.restrict_chat_member(
message.chat.id, user.id, ChatPermissions(can_send_messages=False)
)
except BaseException as e:
await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Mute", e))
return
m = f"**#Muted** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}`"
await pablo.edit(m)
log = LogIt(message)
await log.log_msg(client, m)
@speedo_on_cmd(
["unmute"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Unmute Replied user or provide his ID!",
"example": "{ch}Unmute (reply to user message OR provide his ID)",
},
)
async def ujwal_unmute(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_restrict_members:
await pablo.edit(engine.get_string("NOT_ADMIN"))
return
asplit = get_text(message)
userf = get_user(message, asplit)[0]
if not userf:
await pablo.edit(
engine.get_string("TO_DO").format("Un-Mute")
)
return
try:
user = await client.get_users(userf)
except BaseException as e:
await pablo.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user.id
if userz == me_m.id:
await pablo.edit(engine.get_string("TF_DO_IT").format("un-mute"))
return
try:
await client.restrict_chat_member(
message.chat.id, user.id, ChatPermissions(can_send_messages=True)
)
except BaseException as e:
await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Un-mute", e))
return
um = f"**#Un_Muted** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}`"
await pablo.edit(um)
log = LogIt(message)
await log.log_msg(client, um)
@speedo_on_cmd(
["chatinfo", "grpinfo"],
group_only=True,
cmd_help={"help": "Get Info Of The Chat!", "example": "{ch}chatinfo"},
)
async def owo_chat_info(client, message):
engine = message.Engine
s = await edit_or_reply(message, engine.get_string("PROCESSING"))
ujwal = await client.get_chat(message.chat.id)
peer = await client.resolve_peer(message.chat.id)
online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer))
msg = "**Chat Info** \n\n"
msg += f"**Chat-ID :** __{ujwal.id}__ \n"
msg += f"**Verified :** __{ujwal.is_verified}__ \n"
msg += f"**Is Scam :** __{ujwal.is_scam}__ \n"
msg += f"**Chat Title :** __{ujwal.title}__ \n"
msg += f"**Users Online :** __{online_.onlines}__ \n"
if ujwal.photo:
msg += f"**Chat DC :** __{ujwal.dc_id}__ \n"
if ujwal.username:
msg += f"**Chat Username :** __{ujwal.username}__ \n"
if ujwal.description:
msg += f"**Chat Description :** __{ujwal.description}__ \n"
msg += f"**Chat Members Count :** __{ujwal.members_count}__ \n"
if ujwal.photo:
kek = await client.download_media(ujwal.photo.big_file_id)
await client.send_photo(message.chat.id, photo=kek, caption=msg)
await s.delete()
else:
await s.edit(msg)
@speedo_on_cmd(
["purge"],
only_if_admin=True,
cmd_help={
"help": "Purge All Messages Till Replied Message!",
"example": "{ch}purge (reply to message)",
},
)
async def purge(client, message):
engine = message.Engine
start_time = time.time()
message_ids = []
purge_len = 0
event = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
if message.chat.type in ["supergroup", "channel"]:
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_delete_messages:
await event.edit(engine.get_string("NOT_ADMIN"))
return
if not message.reply_to_message:
await event.edit(engine.get_string("NEEDS_REPLY").format("Message To Purge."))
return
async for msg in client.iter_history(
chat_id=message.chat.id,
offset_id=message.reply_to_message.message_id,
reverse=True,
):
if msg.message_id != message.message_id:
purge_len += 1
message_ids.append(msg.message_id)
if len(message_ids) >= 100:
await client.delete_messages(
chat_id=message.chat.id, message_ids=message_ids, revoke=True
)
message_ids.clear()
if message_ids:
await client.delete_messages(
chat_id=message.chat.id, message_ids=message_ids, revoke=True
)
end_time = time.time()
u_time = round(end_time - start_time)
await event.edit(
engine.get_string("PURGE_").format(purge_len, u_time)
)
await asyncio.sleep(3)
await event.delete()
@speedo_on_cmd(
["del"],
cmd_help={
"help": "Delete Replied Message!",
"example": "{ch}del (reply to message)",
},
)
async def delmsgs(client, message):
engine = message.Engine
if not message.reply_to_message:
await message.delete()
return
await client.delete_messages(
chat_id=message.chat.id,
message_ids=[message.reply_to_message.message_id],
revoke=True,
)
await message.delete()
@speedo_on_cmd(
["setgrppic", "gpic"],
cmd_help={
"help": "Set Custom Group Pic, For Lazy Peoples!",
"example": "{ch}setgrppic (reply to image)",
},
)
async def magic_grps(client, message):
engine = message.Engine
msg_ = await edit_or_reply(message, engine.get_string("PROCESSING"))
if not message.reply_to_message:
await msg_.edit(engine.get_string("NEEDS_REPLY").format("image"))
return
me_ = await message.chat.get_member(int(client.me.id))
if not me_.can_change_info:
await msg_.edit(engine.get_string("NOT_ADMIN"))
return
cool = await convert_to_image(message, client)
if not cool:
await msg_.edit(engine.get_string("NEEDS_REPLY").format("a valid media"))
return
if not os.path.exists(cool):
await msg_.edit(engine.get_string("INVALID_MEDIA"))
return
try:
await client.set_chat_photo(message.chat.id, photo=cool)
except BaseException as e:
await msg_.edit(f"`Unable To Set Group Photo! TraceBack : {e}")
return
await msg_.edit(engine.get_string("DONE_"))
| 33.076443
| 146
| 0.621215
| 2,834
| 21,202
| 4.434368
| 0.107622
| 0.047983
| 0.079971
| 0.0635
| 0.657436
| 0.608101
| 0.580886
| 0.527731
| 0.482534
| 0.44036
| 0
| 0.002512
| 0.248844
| 21,202
| 640
| 147
| 33.128125
| 0.78645
| 0.01415
| 0
| 0.438861
| 0
| 0.0134
| 0.183641
| 0.030392
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.001675
| 0.01675
| 0
| 0.088777
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53dd16873458e07dbdbf665e77a30bc20865dfcb
| 16,809
|
py
|
Python
|
carberretta/bot/cogs/feeds.py
|
Nereg/Carberretta
|
01e25bc8ece4c310ab541304e8809dfdd3eec3b8
|
[
"BSD-3-Clause"
] | null | null | null |
carberretta/bot/cogs/feeds.py
|
Nereg/Carberretta
|
01e25bc8ece4c310ab541304e8809dfdd3eec3b8
|
[
"BSD-3-Clause"
] | null | null | null |
carberretta/bot/cogs/feeds.py
|
Nereg/Carberretta
|
01e25bc8ece4c310ab541304e8809dfdd3eec3b8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
FEEDS
Handles YouTube and Twitch feed notifications.
"""
import datetime as dt
import discord
import feedparser
from apscheduler.triggers.cron import CronTrigger
from discord.ext import commands
from carberretta import Config
from carberretta.utils import DEFAULT_EMBED_COLOUR, chron
LIVE_EMBED_COLOUR = 0x9146FF
VOD_EMBED_COLOUR = 0x3498DB
class Feeds(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
async def call_feed(self) -> dict:
url = f"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}"
async with self.bot.session.get(url) as response:
if not 200 <= response.status <= 299:
return []
if not (data := feedparser.parse(await response.text()).entries):
return []
return data
async def call_yt_api(self, video_id: str) -> dict:
url = f"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}"
async with self.bot.session.get(url) as response:
if not 200 <= response.status <= 299:
return []
if not (data := await response.json()):
return []
return data["items"][0]
async def call_twitch_api(self) -> dict:
url = f"https://api.twitch.tv/helix/search/channels?query=carberratutorials"
oauthurl = f"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials"
async with self.bot.session.post(url=oauthurl) as response:
if not 200 <= response.status <= 299:
return []
if not (twitch_tok := (await response.json())["access_token"]):
return []
headers = {
"client-id": f"{Config.TWITCH_CLIENT_ID}",
"Authorization": f"Bearer {twitch_tok}",
}
async with self.bot.session.get(url=url, headers=headers) as response:
if not 200 <= response.status <= 299:
return []
if not (data := await response.json()):
return []
return data["data"][0]
@commands.Cog.listener()
async def on_ready(self) -> None:
if not self.bot.ready.booted:
self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID)
self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID)
self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID)
self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID)
self.youtube = self.bot.get_cog("YouTube")
if (await self.bot.application_info()).id == 696804435321552906:
self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute="*/3", second=0))
self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute="*/3", second=15))
self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute="*/3", second=30))
self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute="*/3", second=45))
self.bot.ready.up(self)
async def get_new_vods(self) -> str:
current_vod = await self.bot.db.field("SELECT ContentValue FROM videos WHERE ContentType = ?", "vod")
for item in await self.call_feed():
data = await self.call_yt_api(item.yt_videoid)
thumbnails = data["snippet"]["thumbnails"]
duration = data["contentDetails"]["duration"]
if current_vod == item.yt_videoid:
# We announced this vod already
return
elif "#VOD" in item.summary:
# This is a vod we havent announced
await self.videos_channel.send(
f"Hey {self.vods_role.mention}, a new VOD just went live! Catch up on anything you missed from the last stream!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": VOD_EMBED_COLOUR,
"url": item.link,
"author": {"name": "Carberra Tutorials"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"UPDATE videos SET ContentValue = ? WHERE ContentType = ?", item.yt_videoid, "vod"
)
return item.yt_videoid
async def get_new_videos(self) -> str:
current_vid = await self.bot.db.field("SELECT ContentValue FROM videos WHERE ContentType = ?", "video")
for item in await self.call_feed():
data = await self.call_yt_api(item.yt_videoid)
thumbnails = data["snippet"]["thumbnails"]
duration = data["contentDetails"]["duration"]
if item.yt_videoid == current_vid:
# This is a video we already announced
return
elif "liveStreamingDetails" not in data.keys():
# A new video is live and its was not a premiere
if "#VOD" not in item.summary:
# This isnt a VOD
await self.videos_channel.send(
f"Hey {self.videos_role.mention}, a new video just went live! Come check it out!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": DEFAULT_EMBED_COLOUR,
"url": item.link,
"author": {"name": "Carberra Tutorials"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"UPDATE videos SET ContentValue = ? WHERE ContentType = ?", item.yt_videoid, "video"
)
return item.yt_videoid
async def get_new_premieres(self) -> tuple:
known_premieres = {
_id: [_upcoming, _announced]
for _id, _upcoming, _announced in await self.bot.db.records("SELECT * FROM premieres")
}
for item in await self.call_feed():
data = await self.call_yt_api(item.yt_videoid)
thumbnails = data["snippet"]["thumbnails"]
duration = data["contentDetails"]["duration"]
live_content = data["snippet"]["liveBroadcastContent"]
upcoming = known_premieres[item.yt_videoid][0] if item.yt_videoid in known_premieres.keys() else None
announced = known_premieres[item.yt_videoid][1] if item.yt_videoid in known_premieres.keys() else None
if "liveStreamingDetails" in data.keys():
start_time = data["liveStreamingDetails"]["scheduledStartTime"].strip("Z")
scheduled_time = chron.from_iso(start_time)
if not upcoming and duration != "P0D":
# We have not seen this premiere before
if live_content == "upcoming" and not announced:
# This premiere is upcoming and not live
await self.videos_channel.send(
f"Hey {self.videos_role.mention}, a new premiere is scheduled for {chron.long_date_and_time(scheduled_time)} UTC! Hope to see you there!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": DEFAULT_EMBED_COLOUR,
"url": item.link,
"author": {"name": "Carberra Tutorials"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)",
item.yt_videoid,
1,
0,
)
return item.yt_videoid, False
elif live_content == "live" and not upcoming and not announced:
# The premiere was never upcoming is now live
await self.videos_channel.send(
f"Hey {self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": DEFAULT_EMBED_COLOUR,
"url": item.link,
"author": {"name": "Carberra Tutorials"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)",
item.yt_videoid,
1,
1,
)
return item.yt_videoid, True
elif not announced:
# A premiere was upcoming, and is now live
await self.videos_channel.send(
f"Hey {self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": DEFAULT_EMBED_COLOUR,
"url": item.link,
"author": {"name": "Carberra Tutorials"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)", item.yt_videoid, 1, 1
)
return item.yt_videoid, True
async def get_new_streams(self) -> tuple:
data = await self.call_twitch_api()
if data:
live_now = await self.bot.db.field("SELECT StreamLive FROM streams WHERE ID = 1")
if data["is_live"] and not live_now:
# The stream is live and we havent announced it yet
start = chron.from_iso(data["started_at"].strip("Z"))
message = await self.videos_channel.send(
f"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!",
embed=discord.Embed.from_dict(
{
"title": data["title"],
"description": f"**Category: {data['game_name']}**",
"color": LIVE_EMBED_COLOUR,
"url": "https://www.twitch.tv/carberratutorials",
"author": {"name": "Carberra Tutorials"},
"thumbnail": {"url": data["thumbnail_url"]},
"footer": {"text": f"Started: {chron.long_date_and_time(start)} UTC"},
}
),
)
await self.bot.db.execute(
"UPDATE streams SET StreamLive = ?, StreamStart = ?, StreamMessage= ? WHERE ID = 1",
1,
start,
message.id,
)
return data["title"], False
elif not data["is_live"] and live_now:
# The stream is not live and last we checked it was (stream is over)
await self.bot.db.execute(
"UPDATE streams SET StreamLive = ?, StreamEnd = ? WHERE ID = 1", 0, dt.datetime.utcnow()
)
start, stream_message, end = await self.bot.db.record(
"SELECT StreamStart, StreamMessage, StreamEnd FROM streams WHERE ID = 1"
)
duration = chron.from_iso(end) - chron.from_iso(start)
try:
message = await self.videos_channel.fetch_message(stream_message)
except (discord.NotFound, discord.Forbidden, discord.HTTPException):
return
else:
await message.edit(
content=f"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!",
embed=discord.Embed.from_dict(
{
"title": "The stream has ended.",
"description": "**Catch you in the next one!**",
"color": LIVE_EMBED_COLOUR,
"url": "https://www.twitch.tv/carberratutorials",
"author": {"name": "Carberra Tutorials"},
"thumbnail": {"url": data["thumbnail_url"]},
"footer": {"text": f"Runtime: {chron.long_delta(duration)}"},
}
),
)
return data["title"], True
@commands.group(name="feed", invoke_without_command=True)
@commands.is_owner()
async def group_feed(self, ctx: commands.Context) -> None:
pass
@group_feed.command(name="video")
@commands.is_owner()
async def command_feed_video(self, ctx: commands.Context) -> None:
last_video = await self.get_new_videos()
await ctx.send(f"Announced video: {last_video}." if last_video else "No new videos.")
@group_feed.command(name="vod")
@commands.is_owner()
async def command_feed_vod(self, ctx: commands.Context) -> None:
last_vod = await self.get_new_vods()
await ctx.send(f"Announced VOD: {last_vod}." if last_vod else "No new VODs.")
@group_feed.command(name="premiere")
@commands.is_owner()
async def command_feed_premiere(self, ctx: commands.Context) -> None:
if not (last_premiere := await self.get_new_premieres()):
await ctx.send("No new premieres.")
else:
await ctx.send(
f"Announced live premiere: {last_premiere[0]}."
if last_premiere[1]
else f"Announced upcoming premiere: {last_premiere[0]}."
)
@group_feed.command(name="stream")
@commands.is_owner()
async def command_feed_stream(self, ctx: commands.Context) -> None:
if not (last_stream := await self.get_new_streams()):
await ctx.send("No new streams.")
else:
await ctx.send(
f"Stream ended: {last_stream[0]}." if last_stream[1] else f"Announced stream: {last_stream[0]}."
)
def setup(bot: commands.Bot) -> None:
bot.add_cog(Feeds(bot))
| 44.586207
| 166
| 0.50467
| 1,725
| 16,809
| 4.77913
| 0.151884
| 0.033843
| 0.029961
| 0.020378
| 0.551553
| 0.498544
| 0.488234
| 0.457666
| 0.413998
| 0.402596
| 0
| 0.011623
| 0.385805
| 16,809
| 376
| 167
| 44.704787
| 0.786904
| 0.029508
| 0
| 0.431579
| 0
| 0.024561
| 0.22958
| 0.036944
| 0
| 0
| 0.000982
| 0
| 0
| 1
| 0.007018
| false
| 0.003509
| 0.024561
| 0
| 0.108772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53dd795653b27c0823e1d06e1e8c37e9cd9ead3e
| 5,676
|
py
|
Python
|
gdb/proxy.py
|
abaire/gdb_sniffer
|
f330193c65a39ce6abb01f25737ca967a0af9629
|
[
"Unlicense"
] | 1
|
2021-12-22T04:04:22.000Z
|
2021-12-22T04:04:22.000Z
|
gdb/proxy.py
|
abaire/gdb_sniffer
|
f330193c65a39ce6abb01f25737ca967a0af9629
|
[
"Unlicense"
] | null | null | null |
gdb/proxy.py
|
abaire/gdb_sniffer
|
f330193c65a39ce6abb01f25737ca967a0af9629
|
[
"Unlicense"
] | null | null | null |
"""Provides a GDB logging proxy.
See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html
See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html
"""
from __future__ import annotations
import logging
import socket
from typing import Optional
from typing import Tuple
from .packet import GDBPacket
from net import ip_transport
logger = logging.getLogger(__name__)
class GDBProxy(ip_transport.IPTransport):
"""GDB Remote Serial Protocol proxy."""
def __init__(self, target_addr: Tuple[str, int], colorize: bool = False):
super().__init__(process_callback=self._on_gdb_bytes_read)
self.log_acks = False
self.target_addr = target_addr
self._target: Optional[ip_transport.IPTransport] = None
if colorize:
self.target_color = "\x1b[34m\x1b[47m"
self.gdb_color = "\x1b[30m\x1b[47m"
else:
self.target_color = ""
self.gdb_color = ""
self._gdb_read_buffer: bytearray = bytearray()
self._target_read_buffer: bytearray = bytearray()
def set_connection(self, sock, addr):
super().set_connection(sock, addr)
logger.debug(f"{self.target_color}Connecting to target at {self.target_addr}")
try:
target_sock = socket.create_connection(self.target_addr)
except ConnectionRefusedError:
logger.error(f"{self.target_color}Connection to Target@{self.target_addr} refused.")
self.close()
return
self._target = ip_transport.IPTransport(self._on_target_bytes_read, f"Target@{self.target_addr}")
self._target.set_connection(target_sock, self.target_addr)
self._add_sub_connection(self._target)
def _on_gdb_bytes_read(self, _ignored):
buffer = self._read_buffer
self.shift_read_buffer(len(buffer))
self._append_gdb_read_buffer(buffer)
self._target._write_buffer.extend(buffer)
def _on_target_bytes_read(self, _ignored):
buffer = self._target.read_buffer
self._target.shift_read_buffer(len(buffer))
self._append_target_read_buffer(buffer)
self._write_buffer.extend(buffer)
def _append_gdb_read_buffer(self, data: bytes):
self._unescape_and_append(self._gdb_read_buffer, data)
bytes_consumed = self._log_rsp_bytes(f"{self.gdb_color}GDB :", self._gdb_read_buffer)
if bytes_consumed:
self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:])
def _append_target_read_buffer(self, data: bytes):
self._unescape_and_append(self._target_read_buffer, data)
bytes_consumed = self._log_rsp_bytes(f"{self.target_color}TARGET :", self._target_read_buffer)
if bytes_consumed:
self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:])
@staticmethod
def _unescape_and_append(buffer: bytearray, data: bytes):
# RSP uses '}' as an escape character. Escapes are processed in this method
# before adding to the read buffer to simplify parsing.
if not data:
return
# Process any left over escapes.
if buffer and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR:
buffer[-1] = data[0] ^ 0x20
data = data[1:]
escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR)
while escape_char_index >= 0:
if escape_char_index == len(data):
# If there are no more characters after the escape char, just add it to the buffer and let it be
# processed when more data is received.
break
if escape_char_index:
buffer.extend(data[: escape_char_index - 1])
unescaped = data[escape_char_index + 1] ^ 0x20
buffer.append(unescaped)
data = data[escape_char_index + 2 :]
buffer.extend(data)
def _log_rsp_bytes(self, log_prefix: str, buffer: bytearray) -> int:
total_bytes_consumed = 0
pkt = GDBPacket()
buffer_len = len(buffer)
while total_bytes_consumed < buffer_len:
if buffer[0] == ord("+"):
if self.log_acks:
logger.info(f"{log_prefix} <<ack>>")
total_bytes_consumed += 1
buffer = buffer[1:]
continue
if buffer[0] == ord("-"):
if self.log_acks:
logger.info(f"{log_prefix} <<nack>>")
total_bytes_consumed += 1
buffer = buffer[1:]
continue
if buffer[0] == 0x03:
logger.info(f"{log_prefix} <<Interrupt request>>")
total_bytes_consumed += 1
buffer = buffer[1:]
continue
leader = buffer.find(GDBPacket.PACKET_LEADER)
if leader > 0:
logger.warning(
f"{log_prefix} Skipping {leader} non-leader bytes {buffer[:total_bytes_consumed + leader]}"
)
buffer = buffer[leader:]
bytes_consumed = pkt.parse(buffer)
buffer = buffer[bytes_consumed:]
if not bytes_consumed:
break
total_bytes_consumed += bytes_consumed
if pkt.data:
logger.info(f"{log_prefix} Received packet {pkt}")
else:
logger.info(f"{log_prefix} Received empty packet")
if len(buffer):
logger.debug(
f"{log_prefix} After processing: [{len(buffer)}] {buffer}"
)
return total_bytes_consumed
| 35.698113
| 112
| 0.617512
| 678
| 5,676
| 4.868732
| 0.234513
| 0.072705
| 0.038776
| 0.036353
| 0.277795
| 0.197213
| 0.144502
| 0.123902
| 0.111784
| 0.111784
| 0
| 0.010654
| 0.288936
| 5,676
| 158
| 113
| 35.924051
| 0.807235
| 0.090028
| 0
| 0.168142
| 0
| 0
| 0.101767
| 0.031462
| 0
| 0
| 0.002331
| 0
| 0
| 1
| 0.070796
| false
| 0
| 0.061947
| 0
| 0.168142
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53ddde78f62a83aa118f0171be55b4c481a15868
| 1,373
|
py
|
Python
|
pylayers/em/openems/test/Rect_Waveguide.py
|
usmanwardag/pylayers
|
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
|
[
"MIT"
] | 143
|
2015-01-09T07:50:20.000Z
|
2022-03-02T11:26:53.000Z
|
pylayers/em/openems/test/Rect_Waveguide.py
|
usmanwardag/pylayers
|
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
|
[
"MIT"
] | 148
|
2015-01-13T04:19:34.000Z
|
2022-03-11T23:48:25.000Z
|
pylayers/em/openems/test/Rect_Waveguide.py
|
usmanwardag/pylayers
|
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
|
[
"MIT"
] | 95
|
2015-05-01T13:22:42.000Z
|
2022-03-15T11:22:28.000Z
|
from openems.openems import *
# A simple simulation
#
# FDTD Simulation Setting
#
F = FDTD()
F.add(Exc(typ='Sinus',f0=100000))
F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR']))
#
# CSX (Geometry setting)
#
C = CSX()
# The Box is added as a property
C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0))
C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0))
C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1)))
C.add(Polyhedron())
S = OpenEMS(F,C)
S.save(filename='RectWaveguide.xml')
#gnd = Matter('gnd')
#sphere = Matter('sphere')
#patch = Matter('patch')
#substrate = Matter('substrate',typ='Ma',Epsilon="3.38",Kappa="0.00046")
#cdgsht = Matter('copper',typ='Cs',conductivity="56e6",thickness="40e-6")
#b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0)
#b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10)
#b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)
#s1 = Sphere(P=[0,0,0],R=100,Pr=50)
#dump = DumpBox()
#C.add(gnd)
#C.add(patch)
#C.add(substrate)
#C.add(sphere)
#C.add(cdgsht)
#C.add(exc)
#C.add(dump)
#C.set('gnd',b1)
#C.set('gnd',b2)
#C.set('sphere',s1)
#C.set('copper',b1)
#C.set('copper',b2)
#C.set('Et',b4)
#C.save(filename='structure.xml')
##C.AddBox(prop='ConductingSheet',name='copper',P1=[0,-50,200],P2=[1000,50,200],Pri=10)
##C.AddCylinder(prop='Metal',name='cyl0',P1=[0,0,0],P2=[0,0,100],Rad=50,Pri=10)
#
| 25.90566
| 87
| 0.632921
| 264
| 1,373
| 3.291667
| 0.340909
| 0.050633
| 0.013809
| 0.041427
| 0.121979
| 0.113924
| 0.090909
| 0.090909
| 0.090909
| 0.090909
| 0
| 0.122353
| 0.071377
| 1,373
| 52
| 88
| 26.403846
| 0.559216
| 0.625637
| 0
| 0
| 0
| 0
| 0.109244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53debe5489e3f53b73538719925c989ad4ce399d
| 381
|
py
|
Python
|
DataPreprocessing/_segment_Y.py
|
vd1371/CBSA
|
f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47
|
[
"MIT"
] | null | null | null |
DataPreprocessing/_segment_Y.py
|
vd1371/CBSA
|
f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47
|
[
"MIT"
] | null | null | null |
DataPreprocessing/_segment_Y.py
|
vd1371/CBSA
|
f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47
|
[
"MIT"
] | null | null | null |
import numpy as np
def segment_Y(Y, **params):
Y_segments = params.get("Y_segments")
Y_quantile = params.get("Y_quantile")
print("segmenting Y")
Y = Y.values.reshape(-1)
Y_quantile = np.quantile(Y, Y_quantile, axis = 0)
bigger_mask = (Y > Y_quantile).copy()
smaller_mask = (Y <= Y_quantile).copy()
Y[bigger_mask] = 1
Y[smaller_mask] = 0
Y = Y.astype(int)
return Y
| 19.05
| 50
| 0.677165
| 64
| 381
| 3.828125
| 0.390625
| 0.057143
| 0.122449
| 0.114286
| 0.146939
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012579
| 0.165354
| 381
| 20
| 51
| 19.05
| 0.757862
| 0
| 0
| 0
| 0
| 0
| 0.08377
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.230769
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53df3216d619040fc2551d1e35eda4fe2e177604
| 3,868
|
py
|
Python
|
WifiEnigma/BattleAI/question.py
|
Puzzlebox-IMT/Puzzlebox
|
6b80e22a4aee3228140692bd6352de18b2f6a96d
|
[
"MIT"
] | null | null | null |
WifiEnigma/BattleAI/question.py
|
Puzzlebox-IMT/Puzzlebox
|
6b80e22a4aee3228140692bd6352de18b2f6a96d
|
[
"MIT"
] | null | null | null |
WifiEnigma/BattleAI/question.py
|
Puzzlebox-IMT/Puzzlebox
|
6b80e22a4aee3228140692bd6352de18b2f6a96d
|
[
"MIT"
] | null | null | null |
import mysql.connector
import random
from voice import synthetize_voice, delete_wav
def AllQuestionAI(id_theme):
i = 0
#CONNEXION A LA BDD
conn = mysql.connector.connect(host="localhost",
user="phpmyadmin", password="Vince@Mysql1997",
database="Puzzlebox")
cursor = conn.cursor()
#EXECUTER LA REQUETE AVEC LA BDD
query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s")
cursor.execute(query, (id_theme, ))
#RECUPERATION DES INFORMATIONS
rows = cursor.fetchall()
if rows:
for line in rows:
i += 1
enonce = line[1]
proposition1 = line[2]
proposition2 = line[3]
proposition3 = line[4]
proposition4 = line[5]
reponse = line[5]
print("*******************************************************************************")
print(" QUESTION ",i," ")
print("*******************************************************************************")
print("ENONCE : ", enonce)
print("PROPOSITION 1 : ", proposition1)
print("PROPOSITION 2 : ", proposition2)
print("PROPOSITION 3 : ", proposition3)
print("PROPOSITION 4 : ", proposition4)
print("REPONSE : ", reponse)
else:
print("Ce thème ne contient pas de questions")
def questionAI(id_theme):
i = 0
#CONNEXION A LA BDD
conn = mysql.connector.connect(host="localhost",
user="phpmyadmin", password="Vince@Mysql1997",
database="Puzzlebox")
cursor = conn.cursor()
#EXECUTER LA REQUETE AVEC LA BDD
query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s")
cursor.execute(query, (id_theme, ))
#RECUPERATION DES INFORMATIONS
rows = cursor.fetchall()
if rows:
nb_rows = len(rows)
num_question = random.randint(1, nb_rows)
#L'index de la liste commence à zéro, il faut donc décaler d'un le numéro
num_question = num_question - 1
question = rows[num_question]
result = [] #Tab which stores the query results
#RECUPERATION DES TUPLES
result.append(question[1])
result.append(question[2])
result.append(question[3])
result.append(question[4])
result.append(question[5])
result.append(question[5]) #This last one is the answer
print("*******************************************************************************")
print(" QUESTION ",num_question+1," ")
print("*******************************************************************************")
print("ENONCE : ", result[0])
print("PROPOSITION 1 : ", result[1])
print("PROPOSITION 2 : ", result[2])
print("PROPOSITION 3 : ", result[3])
print("PROPOSITION 4 : ", result[4])
print("REPONSE : ", result[5])
#complete_question = ''.join(complete_question) #Convert tuple into string
return result
else:
print("Ce thème ne contient pas de questions")
def tell_question(question):
synthetize_voice(question[0])
for i in range(1,5) :
num_prop = "Proposition {} ".format(i)
num_prop = ''.join(num_prop)
line = ''.join(question[i])
line = num_prop + line
synthetize_voice(line)
delete_wav()
def quiz():
counter = 1
while(counter <= 5):
questionAI(1)
if (__name__ == '__main__'):
result = questionAI(1)
tell_question(result)
| 31.447154
| 140
| 0.520941
| 388
| 3,868
| 5.085052
| 0.311856
| 0.064876
| 0.060821
| 0.009123
| 0.386214
| 0.386214
| 0.386214
| 0.386214
| 0.386214
| 0.386214
| 0
| 0.020265
| 0.298345
| 3,868
| 122
| 141
| 31.704918
| 0.706706
| 0.099793
| 0
| 0.345679
| 0
| 0
| 0.305187
| 0.107205
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0.024691
| 0.037037
| 0
| 0.098765
| 0.246914
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53e02e91fc0737f80d21208f1511392c2bcd37d1
| 875
|
py
|
Python
|
toy-amr/flux_functions.py
|
IanHawke/toy-amr
|
1f616791993ccd83cc6034616c08e09fa4ba310d
|
[
"MIT"
] | 5
|
2019-05-27T18:13:45.000Z
|
2021-01-06T09:42:28.000Z
|
toy-amr/flux_functions.py
|
IanHawke/toy-amr
|
1f616791993ccd83cc6034616c08e09fa4ba310d
|
[
"MIT"
] | 1
|
2019-10-21T13:34:48.000Z
|
2019-12-11T22:11:17.000Z
|
toy-amr/flux_functions.py
|
IanHawke/toy-amr
|
1f616791993ccd83cc6034616c08e09fa4ba310d
|
[
"MIT"
] | 2
|
2019-05-08T18:00:36.000Z
|
2021-05-27T16:57:57.000Z
|
import numpy
def lax_friedrichs(cons_minus, cons_plus, simulation, tl):
alpha = tl.grid.dx / tl.dt
flux = numpy.zeros_like(cons_minus)
prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim)
prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim)
f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus)
f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus )
flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \
alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) )
return flux
def upwind(cons_minus, cons_plus, simulation, patch):
flux = numpy.zeros_like(cons_minus)
flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2],
cons_minus[:, 1:-1])
return flux
| 39.772727
| 79
| 0.609143
| 123
| 875
| 4.081301
| 0.252033
| 0.143426
| 0.035857
| 0.067729
| 0.424303
| 0.316733
| 0.123506
| 0.123506
| 0.123506
| 0.123506
| 0
| 0.030488
| 0.250286
| 875
| 21
| 80
| 41.666667
| 0.734756
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53e0390b65014122e4de16c06f08712946e2a007
| 2,084
|
py
|
Python
|
pi/auth.py
|
vmagamedov/pi
|
6ee98af69b757d96aa4eddc32513309e0fe05d1d
|
[
"BSD-3-Clause"
] | 7
|
2016-06-24T04:49:48.000Z
|
2020-06-29T17:34:12.000Z
|
pi/auth.py
|
vmagamedov/pi
|
6ee98af69b757d96aa4eddc32513309e0fe05d1d
|
[
"BSD-3-Clause"
] | 11
|
2016-06-19T13:16:59.000Z
|
2019-11-02T13:14:19.000Z
|
pi/auth.py
|
vmagamedov/pi
|
6ee98af69b757d96aa4eddc32513309e0fe05d1d
|
[
"BSD-3-Clause"
] | null | null | null |
import re
import json
import base64
import codecs
import os.path
import asyncio
import subprocess
_PREFIX = 'docker-credential-'
def read_config():
path = os.path.expanduser('~/.docker/config.json')
if not os.path.exists(path):
return {}
with codecs.open(path, encoding='utf-8') as f:
json_data = f.read()
return json.loads(json_data)
async def _read_creds(creds_store, server):
if not re.match(r'^\w+$', creds_store, re.ASCII):
raise ValueError('Invalid credsStore: {!r}'.format(creds_store))
proc = await asyncio.create_subprocess_exec(
_PREFIX + creds_store, 'get',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = await proc.communicate(server.encode('ascii'))
if proc.returncode != 0:
return None
else:
data = json.loads(stdout)
return {
'Username': data['Username'],
'Password': data['Secret'],
'ServerAddress': server,
}
def _decode_auth(auth_data, server):
auth_data_decoded = base64.b64decode(auth_data).decode('utf-8')
username, _, password = auth_data_decoded.partition(':')
return {
'Username': username,
'Password': password,
'ServerAddress': server,
}
async def resolve_auth(config, server):
config_auths = config.get('auths')
if config_auths is None:
return None
server_auth = config_auths.get(server)
if server_auth is not None:
auth_data = server_auth.get('auth')
if auth_data is not None:
return _decode_auth(auth_data, server)
creds_store = config.get('credsStore')
if creds_store is not None:
return await _read_creds(creds_store, server)
return None
def server_name(image_name):
registry, _, name = image_name.partition('/')
if not name:
return 'docker.io'
else:
return registry
def encode_header(auth):
json_data = json.dumps(auth)
return base64.urlsafe_b64encode(json_data.encode('ascii'))
| 25.108434
| 72
| 0.644914
| 258
| 2,084
| 5.027132
| 0.29845
| 0.053971
| 0.032382
| 0.029298
| 0.075559
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008228
| 0.241843
| 2,084
| 82
| 73
| 25.414634
| 0.812658
| 0
| 0
| 0.140625
| 0
| 0
| 0.09261
| 0.010077
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.046875
| 0.109375
| 0
| 0.359375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53e10c53f31c7e396a4573a421ae3212e9a11856
| 1,543
|
py
|
Python
|
DPSparkImplementations/paf_kernels.py
|
TEAlab/DPSpark
|
4d53ee13b03e2e12119c28fe2b2241ad20231eac
|
[
"MIT"
] | null | null | null |
DPSparkImplementations/paf_kernels.py
|
TEAlab/DPSpark
|
4d53ee13b03e2e12119c28fe2b2241ad20231eac
|
[
"MIT"
] | null | null | null |
DPSparkImplementations/paf_kernels.py
|
TEAlab/DPSpark
|
4d53ee13b03e2e12119c28fe2b2241ad20231eac
|
[
"MIT"
] | 1
|
2020-12-30T22:12:55.000Z
|
2020-12-30T22:12:55.000Z
|
__author__ = "Zafar Ahmad, Mohammad Mahdi Javanmard"
__copyright__ = "Copyright (c) 2019 Tealab@SBU"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Zafar Ahmad"
__email__ = "zafahmad@cs.stonybrook.edu"
__status__ = "Development"
import numpy as np
import numba as nb
'''
Iterative kernels
'''
def update_iter(u_block, x_block, n, I_, J_, K_):
return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_, J_, K_)
@nb.jit(nopython=True)
def _update_iter(u_block, x_block, n, I_, J_, K_):
# For testing purposes, rather than passing f_matrix_broadcast, we call this function
def f_matrix(i, j):
return float(i+j)
for k in range(x_block.shape[0]-1, -1, -1):
K = K_*x_block.shape[0]+k
for j in range(x_block.shape[0]-1, -1, -1):
J = J_*x_block.shape[0]+j
for i in range(x_block.shape[0]-1, -1, -1):
I = I_*x_block.shape[0]+i
min1 = min(K-2, n-3)
min2 = min(J-1, n-4)
if ((K < n) and (K >= 3) and (J <= min1) and (J >= I+1) and (I <= min2)):
x_block[i, j] = max(x_block[i, j], u_block[j+1, k] + f_matrix(J+1, min(K, 2*J-I+1)))
return x_block
def funcA_iter(block_info, n):
((I_, J_), x_block) = block_info
return update_iter(x_block, x_block, n, I_, J_, I_)
def funcX_iter(block_info, u_block_info, n):
((I_, J_), x_block) = block_info
((UI_, UJ_), u_block) = u_block_info
return update_iter(u_block, x_block, n, I_, J_, UJ_)
| 35.068182
| 104
| 0.610499
| 266
| 1,543
| 3.180451
| 0.293233
| 0.120567
| 0.024823
| 0.085106
| 0.304965
| 0.268322
| 0.239953
| 0.239953
| 0.239953
| 0.068558
| 0
| 0.030586
| 0.2372
| 1,543
| 43
| 105
| 35.883721
| 0.68819
| 0.053791
| 0
| 0.060606
| 0
| 0
| 0.085374
| 0.018195
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0.060606
| 0.060606
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53e339cc8fb766eb00e75883c4d6064e436e942f
| 1,343
|
py
|
Python
|
terrakg/rates.py
|
terrapain/terrakg
|
90c52ca3b227d2daabd604255e793ac5f536c246
|
[
"Apache-2.0"
] | null | null | null |
terrakg/rates.py
|
terrapain/terrakg
|
90c52ca3b227d2daabd604255e793ac5f536c246
|
[
"Apache-2.0"
] | null | null | null |
terrakg/rates.py
|
terrapain/terrakg
|
90c52ca3b227d2daabd604255e793ac5f536c246
|
[
"Apache-2.0"
] | null | null | null |
from terra_sdk.exceptions import LCDResponseError
from terrakg import logger
# Logging
from terrakg.client import ClientContainer
logger = logger.get_logger(__name__)
class Rates:
"""
Access the most recent rates.
"""
def __init__(self, client: ClientContainer):
self.client = client
def get_token_quote_and_fees(self, token_contract: str, pair: str, amount: int = 1000000, reverse: bool = False):
"""
Returns the price for `amount` of the token `pair` (exchange is included in pair).
Set `reverse` to true to get the inverse price.
"""
desc, action, result_key = ("reverse_simulation", "ask_asset", "offer_amount") if reverse else (
"simulation", "offer_asset", "return_amount")
query_msg = {
desc: {
action: {
"amount": str(amount),
"info": {"token": {
"contract_addr": token_contract
}
}
}
}
}
try:
result = self.client.lcd_client.wasm.contract_query(pair, query_msg)
return result[result_key], result['commission_amount']
except LCDResponseError as e:
logger.warning(f"Issue with price query: {e}")
return None
| 30.522727
| 117
| 0.568876
| 141
| 1,343
| 5.212766
| 0.524823
| 0.040816
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007856
| 0.33656
| 1,343
| 43
| 118
| 31.232558
| 0.817059
| 0.125838
| 0
| 0
| 0
| 0
| 0.12866
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.111111
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53e44f41ef2d0962b6580e25176980ba9b2fe713
| 2,868
|
py
|
Python
|
src/tracking_module.py
|
HonzaKlicpera/Effective-footage-processing-Blender-add-on
|
f3faae3fc56a3ef8f2eabba9af8be718e57f4d35
|
[
"MIT"
] | 1
|
2020-06-09T11:23:44.000Z
|
2020-06-09T11:23:44.000Z
|
src/tracking_module.py
|
HonzaKlicpera/Effective-footage-processing-Blender
|
f3faae3fc56a3ef8f2eabba9af8be718e57f4d35
|
[
"MIT"
] | null | null | null |
src/tracking_module.py
|
HonzaKlicpera/Effective-footage-processing-Blender
|
f3faae3fc56a3ef8f2eabba9af8be718e57f4d35
|
[
"MIT"
] | null | null | null |
import bpy
import os, glob
from pathlib import Path
from enum import Enum
from abc import ABC, abstractmethod
import csv
from . import keying_module
def export_tracking_data(self, context):
clip = context.space_data.clip
clip_name = os.path.splitext(clip.name)[0]
tracker_name = context.scene.tracking_local.tracker_name
output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name)
keying_module.create_directory(output_path)
file = open(os.path.join(output_path,clip_name+".csv"), "w", newline='')
writer = csv.writer(file, delimiter=',')
multiplier = context.scene.tracking_local.tracking_multiplier
tracker = clip.tracking.tracks.get(tracker_name)
if tracker is not None:
prev = tracker.markers[0].co[0]
for m in tracker.markers:
writer.writerow([(m.co[0] - prev) * multiplier])
prev = m.co[0]
self.report({"INFO"},"TRACKER SUCESSFULLY EXPORTED")
else:
self.report({"ERROR"},"TRACKER NOT FOUND")
file.close()
#----------------------------------------
# PROPERTIES
#----------------------------------------
class TrackingSceneProps(bpy.types.PropertyGroup):
tracker_name: bpy.props.StringProperty \
(
name = "Track name",
description = "Name of the tracker for data export",
)
tracking_multiplier: bpy.props.FloatProperty \
(
name = "Distance multiplier",
description = "The exported tracking distance gets multiplied by this value",
default = 1,
min = 0.0001
)
class TrackingPanel(bpy.types.Panel):
bl_label = "Tracking Panel"
bl_idname = "SCENE_PT_tracking_rendering"
bl_space_type = "CLIP_EDITOR"
bl_region_type = "UI"
bl_context = "render"
def draw(self, context):
layout = self.layout
scene = context.scene
box = layout.box()
box.row().label(text = "Tracking export")
box.row().prop(scene.tracking_local, "tracker_name")
box.row().prop(scene.tracking_local, "tracking_multiplier")
box.row().operator("tracking.export_data")
class TrackingExportDataOp(bpy.types.Operator):
bl_idname = "tracking.export_data"
bl_label = "Export Data"
bl_description = "Export the tracking data of the chosen tracker"
def execute(self, context):
export_tracking_data(self, context)
return {"FINISHED"}
classes = (
TrackingExportDataOp,
TrackingPanel,
TrackingSceneProps
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
del bpy.types.Scene.tracking_local
| 30.189474
| 87
| 0.644003
| 335
| 2,868
| 5.364179
| 0.340299
| 0.043406
| 0.0601
| 0.024485
| 0.144686
| 0.031163
| 0
| 0
| 0
| 0
| 0
| 0.004955
| 0.225941
| 2,868
| 95
| 88
| 30.189474
| 0.804505
| 0.032427
| 0
| 0
| 0
| 0
| 0.142702
| 0.009754
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068493
| false
| 0
| 0.09589
| 0
| 0.356164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53e4b90b1159d838a8edfa7ab52a953ffb4eca72
| 437
|
py
|
Python
|
nodes/2.x/python/View.ViewTemplate.py
|
andydandy74/ClockworkForDynamo
|
bd4ac2c13956a02352a458d01096a35b7258d9f2
|
[
"MIT"
] | 147
|
2016-02-24T16:37:03.000Z
|
2022-02-18T12:10:34.000Z
|
nodes/2.x/python/View.ViewTemplate.py
|
johnpierson/ClockworkForDynamo
|
953d3f56b75e99561978925756e527357f9978dd
|
[
"MIT"
] | 269
|
2016-02-25T14:04:14.000Z
|
2022-03-26T07:30:53.000Z
|
nodes/2.x/python/View.ViewTemplate.py
|
johnpierson/ClockworkForDynamo
|
953d3f56b75e99561978925756e527357f9978dd
|
[
"MIT"
] | 89
|
2016-03-16T18:21:56.000Z
|
2022-02-03T14:34:30.000Z
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def GetViewTemplate(view):
if not view: return None
elif hasattr(view, "ViewTemplateId"):
if view.ViewTemplateId.IntegerValue == -1: return None
else: return view.Document.GetElement(view.ViewTemplateId)
else: return None
views = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x in views]
else: OUT = GetViewTemplate(views)
| 29.133333
| 69
| 0.757437
| 59
| 437
| 5.610169
| 0.542373
| 0.090634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007853
| 0.125858
| 437
| 15
| 70
| 29.133333
| 0.858639
| 0
| 0
| 0
| 0
| 0
| 0.050228
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53e73c9f153e27f98b4ee8cc325ad02d4ef90185
| 8,267
|
py
|
Python
|
infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
|
bohdana-kuzmenko/incubator-dlab
|
d052709450e7916860c7dd191708d5524cf44c1e
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
|
bohdana-kuzmenko/incubator-dlab
|
d052709450e7916860c7dd191708d5524cf44c1e
|
[
"Apache-2.0"
] | null | null | null |
infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
|
bohdana-kuzmenko/incubator-dlab
|
d052709450e7916860c7dd191708d5524cf44c1e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import time
from fabric.api import *
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import sys
import os
import uuid
import logging
from Crypto.PublicKey import RSA
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
os.environ['exploratory_name']
except:
os.environ['exploratory_name'] = ''
if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])):
time.sleep(30)
print('Generating infrastructure names and tags')
dataproc_conf = dict()
try:
dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
except:
dataproc_conf['exploratory_name'] = ''
try:
dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')
except:
dataproc_conf['computational_name'] = ''
dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
dataproc_conf['key_name'] = os.environ['conf_key_name']
dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
dataproc_conf['region'] = os.environ['gcp_region']
dataproc_conf['zone'] = os.environ['gcp_zone']
dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])
dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'],
dataproc_conf['exploratory_name'], dataproc_conf['computational_name'])
dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])
dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])
dataproc_conf['release_label'] = os.environ['dataproc_version']
dataproc_conf['cluster_labels'] = {
os.environ['notebook_instance_name']: "not-configured",
"name": dataproc_conf['cluster_name'],
"sbn": dataproc_conf['service_base_name'],
"user": dataproc_conf['edge_user_name'],
"notebook_name": os.environ['notebook_instance_name'],
"product": "dlab",
"computational_name": dataproc_conf['computational_name']
}
dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
dataproc_conf['edge_user_name'])
service_account_email = "{}@{}.iam.gserviceaccount.com".format(dataproc_conf['dataproc_service_account_name'],
os.environ['gcp_project_id'])
dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])
dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname'])
if edge_status != 'RUNNING':
logging.info('ERROR: Edge node is unavailable! Aborting...')
print('ERROR: Edge node is unavailable! Aborting...')
ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn')
put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname)
append_result("Edge node is unavailable")
sys.exit(1)
print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': '))))
logging.info(json.dumps(dataproc_conf))
local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
local("echo Waiting for changes to propagate; sleep 10")
dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig'))
dataproc_cluster['projectId'] = os.environ['gcp_project_id']
dataproc_cluster['clusterName'] = dataproc_conf['cluster_name']
dataproc_cluster['labels'] = dataproc_conf['cluster_labels']
dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name']
dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email
dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone']
dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet']
dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type']
dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type']
dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count'])
dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count'])
if int(os.environ['dataproc_preemptible_count']) != 0:
dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count'])
else:
del dataproc_cluster['config']['secondaryWorkerConfig']
dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label']
ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] + '.pub').read()
key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read())
ssh_admin_pubkey = key.publickey().exportKey("OpenSSH")
dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey)
dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag']
try:
logging.info('[Creating Dataproc Cluster]')
print('[Creating Dataproc Cluster]')
params = "--region {0} --bucket {1} --params '{2}'".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster))
try:
local("~/scripts/{}.py {}".format('dataengine-service_create', params))
except:
traceback.print_exc()
raise Exception
keyfile_name = "/root/keys/{}.pem".format(dataproc_conf['key_name'])
local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to create Dataproc Cluster.", str(err))
local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
sys.exit(1)
| 57.013793
| 178
| 0.670134
| 963
| 8,267
| 5.475597
| 0.265836
| 0.125166
| 0.057652
| 0.036033
| 0.345534
| 0.227574
| 0.159492
| 0.121373
| 0.100891
| 0.100891
| 0
| 0.005332
| 0.160639
| 8,267
| 144
| 179
| 57.409722
| 0.754576
| 0.112254
| 0
| 0.117117
| 0
| 0
| 0.37073
| 0.077207
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.108108
| 0
| 0.108108
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53e7f5b9bbd28821250ea584ab34945cec2c0582
| 931
|
py
|
Python
|
02.py
|
mattias-lundell/aoc2021
|
32bd41446d963c5788d4614106405be65de81bcd
|
[
"MIT"
] | null | null | null |
02.py
|
mattias-lundell/aoc2021
|
32bd41446d963c5788d4614106405be65de81bcd
|
[
"MIT"
] | null | null | null |
02.py
|
mattias-lundell/aoc2021
|
32bd41446d963c5788d4614106405be65de81bcd
|
[
"MIT"
] | null | null | null |
test = """forward 5
down 5
forward 8
up 3
down 8
forward 2
"""
def part1(lines):
h = 0
d = 0
for line in lines:
direction, delta = line.split()
delta = int(delta)
if direction == 'forward':
h += delta
elif direction == 'down':
d += delta
elif direction == 'up':
d -= delta
print(h*d)
def part2(lines):
h = 0
d = 0
a = 0
for line in lines:
direction, delta = line.split()
delta = int(delta)
print(direction, delta)
if direction == 'forward':
h += delta
d += (delta * a)
elif direction == 'down':
a += delta
elif direction == 'up':
a -= delta
print(h*d)
if __name__ == '__main__':
part1(test.splitlines())
part1(open('in02.txt').readlines())
part2(test.splitlines())
part2(open('in02.txt').readlines())
| 19.395833
| 39
| 0.493018
| 112
| 931
| 4.026786
| 0.294643
| 0.115299
| 0.119734
| 0.035477
| 0.381375
| 0.343681
| 0.226164
| 0.226164
| 0.226164
| 0.226164
| 0
| 0.035836
| 0.370569
| 931
| 47
| 40
| 19.808511
| 0.733788
| 0
| 0
| 0.487805
| 0
| 0
| 0.106452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0
| 0
| 0.04878
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53e9f02f64051ff304c3ebef251b469302530c2e
| 626
|
py
|
Python
|
e/mail-relay/web/apps/mail/migrations/0109_auto_20171130_1047.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
e/mail-relay/web/apps/mail/migrations/0109_auto_20171130_1047.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | 18
|
2020-06-05T18:17:40.000Z
|
2022-03-11T23:25:21.000Z
|
e/mail-relay/web/apps/mail/migrations/0109_auto_20171130_1047.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mail', '0108_auto_20171130_1004'),
]
operations = [
migrations.AlterModelOptions(
name='relaysenderwhitelist',
options={'verbose_name': '\u4e2d\u7ee7\u53d1\u4ef6\u4eba\u767d\u540d\u5355'},
),
migrations.AlterModelOptions(
name='spamrptblacklist',
options={'verbose_name': '\u7f51\u5173\u9694\u79bb\u62a5\u544a\u6536\u4ef6\u4eba\u9ed1\u540d\u5355'},
),
]
| 27.217391
| 113
| 0.635783
| 59
| 626
| 6.576271
| 0.728814
| 0.139175
| 0.159794
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149688
| 0.231629
| 626
| 22
| 114
| 28.454545
| 0.656965
| 0.033546
| 0
| 0.25
| 0
| 0.0625
| 0.343284
| 0.237148
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53ea00fc5aec5aef16f52f772300f59c029df625
| 11,168
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_test/_data/sanity/code-smell/runtime-metadata.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_test/_data/sanity/code-smell/runtime-metadata.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_test/_data/sanity/code-smell/runtime-metadata.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import re
import sys
from distutils.version import StrictVersion, LooseVersion
from functools import partial
import yaml
from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA
from voluptuous import Required, Schema, Invalid
from voluptuous.humanize import humanize_error
from ansible.module_utils.six import string_types
from ansible.utils.version import SemanticVersion
def isodate(value, check_deprecation_date=False, is_tombstone=False):
"""Validate a datetime.date or ISO 8601 date string."""
# datetime.date objects come from YAML dates, these are ok
if isinstance(value, datetime.date):
removal_date = value
else:
# make sure we have a string
msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date'
if not isinstance(value, string_types):
raise Invalid(msg)
# From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
# we have to do things manually.
if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
raise Invalid(msg)
try:
removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
raise Invalid(msg)
# Make sure date is correct
today = datetime.date.today()
if is_tombstone:
# For a tombstone, the removal date must be in the past
if today < removal_date:
raise Invalid(
'The tombstone removal_date (%s) must not be after today (%s)' % (removal_date, today))
else:
# For a deprecation, the removal date must be in the future. Only test this if
# check_deprecation_date is truish, to avoid checks to suddenly start to fail.
if check_deprecation_date and today > removal_date:
raise Invalid(
'The deprecation removal_date (%s) must be after today (%s)' % (removal_date, today))
return value
def removal_version(value, is_ansible, current_version=None, is_tombstone=False):
"""Validate a removal version string."""
msg = (
'Removal version must be a string' if is_ansible else
'Removal version must be a semantic version (https://semver.org/)'
)
if not isinstance(value, string_types):
raise Invalid(msg)
try:
if is_ansible:
version = StrictVersion()
version.parse(value)
version = LooseVersion(value) # We're storing Ansible's version as a LooseVersion
else:
version = SemanticVersion()
version.parse(value)
if version.major != 0 and (version.minor != 0 or version.patch != 0):
raise Invalid('removal_version (%r) must be a major release, not a minor or patch release '
'(see specification at https://semver.org/)' % (value, ))
if current_version is not None:
if is_tombstone:
# For a tombstone, the removal version must not be in the future
if version > current_version:
raise Invalid('The tombstone removal_version (%r) must not be after the '
'current version (%s)' % (value, current_version))
else:
# For a deprecation, the removal version must be in the future
if version <= current_version:
raise Invalid('The deprecation removal_version (%r) must be after the '
'current version (%s)' % (value, current_version))
except ValueError:
raise Invalid(msg)
return value
def any_value(value):
"""Accepts anything."""
return value
def get_ansible_version():
"""Return current ansible-core version"""
from ansible.release import __version__
return LooseVersion('.'.join(__version__.split('.')[:3]))
def get_collection_version():
"""Return current collection version, or None if it is not available"""
import importlib.util
collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'collection_detail.py')
collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path)
collection_detail = importlib.util.module_from_spec(collection_detail_spec)
sys.modules['collection_detail'] = collection_detail
collection_detail_spec.loader.exec_module(collection_detail)
# noinspection PyBroadException
try:
result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.')
return SemanticVersion(result['version'])
except Exception: # pylint: disable=broad-except
# We do not care why it fails, in case we cannot get the version
# just return None to indicate "we don't know".
return None
def validate_metadata_file(path, is_ansible, check_deprecation_dates=False):
"""Validate explicit runtime metadata file"""
try:
with open(path, 'r') as f_path:
routing = yaml.safe_load(f_path)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line +
1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
return
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: YAML load failed: %s' %
(path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
return
if is_ansible:
current_version = get_ansible_version()
else:
current_version = get_collection_version()
# Updates to schema MUST also be reflected in the documentation
# ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html
# plugin_routing schema
avoid_additional_data = Schema(
Any(
{
Required('removal_version'): any_value,
'warning_text': any_value,
},
{
Required('removal_date'): any_value,
'warning_text': any_value,
}
),
extra=PREVENT_EXTRA
)
deprecation_schema = All(
# The first schema validates the input, and the second makes sure no extra keys are specified
Schema(
{
'removal_version': partial(removal_version, is_ansible=is_ansible,
current_version=current_version),
'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates),
'warning_text': Any(*string_types),
}
),
avoid_additional_data
)
tombstoning_schema = All(
# The first schema validates the input, and the second makes sure no extra keys are specified
Schema(
{
'removal_version': partial(removal_version, is_ansible=is_ansible,
current_version=current_version, is_tombstone=True),
'removal_date': partial(isodate, is_tombstone=True),
'warning_text': Any(*string_types),
}
),
avoid_additional_data
)
plugin_routing_schema = Any(
Schema({
('deprecation'): Any(deprecation_schema),
('tombstone'): Any(tombstoning_schema),
('redirect'): Any(*string_types),
}, extra=PREVENT_EXTRA),
)
list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema}
for str_type in string_types]
plugin_schema = Schema({
('action'): Any(None, *list_dict_plugin_routing_schema),
('become'): Any(None, *list_dict_plugin_routing_schema),
('cache'): Any(None, *list_dict_plugin_routing_schema),
('callback'): Any(None, *list_dict_plugin_routing_schema),
('cliconf'): Any(None, *list_dict_plugin_routing_schema),
('connection'): Any(None, *list_dict_plugin_routing_schema),
('doc_fragments'): Any(None, *list_dict_plugin_routing_schema),
('filter'): Any(None, *list_dict_plugin_routing_schema),
('httpapi'): Any(None, *list_dict_plugin_routing_schema),
('inventory'): Any(None, *list_dict_plugin_routing_schema),
('lookup'): Any(None, *list_dict_plugin_routing_schema),
('module_utils'): Any(None, *list_dict_plugin_routing_schema),
('modules'): Any(None, *list_dict_plugin_routing_schema),
('netconf'): Any(None, *list_dict_plugin_routing_schema),
('shell'): Any(None, *list_dict_plugin_routing_schema),
('strategy'): Any(None, *list_dict_plugin_routing_schema),
('terminal'): Any(None, *list_dict_plugin_routing_schema),
('test'): Any(None, *list_dict_plugin_routing_schema),
('vars'): Any(None, *list_dict_plugin_routing_schema),
}, extra=PREVENT_EXTRA)
# import_redirection schema
import_redirection_schema = Any(
Schema({
('redirect'): Any(*string_types),
# import_redirect doesn't currently support deprecation
}, extra=PREVENT_EXTRA)
)
list_dict_import_redirection_schema = [{str_type: import_redirection_schema}
for str_type in string_types]
# top level schema
schema = Schema({
# All of these are optional
('plugin_routing'): Any(plugin_schema),
('import_redirection'): Any(None, *list_dict_import_redirection_schema),
# requires_ansible: In the future we should validate this with SpecifierSet
('requires_ansible'): Any(*string_types),
('action_groups'): dict,
}, extra=PREVENT_EXTRA)
# Ensure schema is valid
try:
schema(routing)
except MultipleInvalid as ex:
for error in ex.errors:
# No way to get line/column numbers
print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error)))
def main():
"""Validate runtime metadata"""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
collection_legacy_file = 'meta/routing.yml'
collection_runtime_file = 'meta/runtime.yml'
# This is currently disabled, because if it is enabled this test can start failing
# at a random date. For this to be properly activated, we (a) need to be able to return
# codes for this test, and (b) make this error optional.
check_deprecation_dates = False
for path in paths:
if path == collection_legacy_file:
print('%s:%d:%d: %s' % (path, 0, 0, ("Should be called '%s'" % collection_runtime_file)))
continue
validate_metadata_file(
path,
is_ansible=path not in (collection_legacy_file, collection_runtime_file),
check_deprecation_dates=check_deprecation_dates)
if __name__ == '__main__':
main()
| 39.885714
| 112
| 0.632969
| 1,334
| 11,168
| 5.068966
| 0.215142
| 0.04614
| 0.064626
| 0.062112
| 0.357734
| 0.292073
| 0.252292
| 0.128364
| 0.100266
| 0.065661
| 0
| 0.00392
| 0.268983
| 11,168
| 279
| 113
| 40.028674
| 0.824351
| 0.180068
| 0
| 0.281407
| 0
| 0
| 0.127463
| 0.003082
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035176
| false
| 0
| 0.100503
| 0
| 0.175879
| 0.025126
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53eb2f5275fa111e5a11e8a6b19fe5db87a5dc8d
| 2,160
|
py
|
Python
|
catkin_ws/src/o2ac_flexbe/o2ac_flexbe_states/src/o2ac_flexbe_states/align_bearing_holes.py
|
mitdo/o2ac-ur
|
74c82a54a693bf6a3fc995ff63e7c91ac1fda6fd
|
[
"MIT"
] | 32
|
2021-09-02T12:29:47.000Z
|
2022-03-30T21:44:10.000Z
|
catkin_ws/src/o2ac_flexbe/o2ac_flexbe_states/src/o2ac_flexbe_states/align_bearing_holes.py
|
kroglice/o2ac-ur
|
f684f21fd280a22ec061dc5d503801f6fefb2422
|
[
"MIT"
] | 4
|
2021-09-22T00:51:14.000Z
|
2022-01-30T11:54:19.000Z
|
catkin_ws/src/o2ac_flexbe/o2ac_flexbe_states/src/o2ac_flexbe_states/align_bearing_holes.py
|
kroglice/o2ac-ur
|
f684f21fd280a22ec061dc5d503801f6fefb2422
|
[
"MIT"
] | 7
|
2021-11-02T12:26:09.000Z
|
2022-02-01T01:45:22.000Z
|
#!/usr/bin/env python
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
# example import of required action
from o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal
class AlignBearingHolesActionState(EventState):
'''
Actionlib for aligning the bearing holes
-- task_name string Name of the task
<= success AlignBearingHoles completed successfully.
<= error AlignBearingHoles failed to execute.
'''
def __init__(self, task_name):
super(
AlignBearingHolesActionState,
self).__init__(
outcomes=[
'success',
'error'])
self._topic = 'o2ac_flexbe/align_bearing_holes'
# pass required clients as dict (topic: type)
self._client = ProxyActionClient(
{self._topic: AlignBearingHolesAction})
self._task_name = task_name
self._success = False
def execute(self, userdata):
if not self._success:
return 'error'
if self._client.has_result(self._topic):
result = self._client.get_result(self._topic)
Logger.logwarn('result %s' % str(result))
if not result:
Logger.logwarn('Fail to complete AlignBearingHoles')
self._success = False
return 'error'
else:
Logger.logwarn('Succeed! completed AlignBearingHoles')
self._success = True
return 'success'
def on_enter(self, userdata):
goal = AlignBearingHolesGoal()
goal.task_name = self._task_name
self._success = True
try:
self._client.send_goal(self._topic, goal)
except Exception as e:
Logger.logwarn(
'Failed to send the AlignBearingHoles command:\n%s' %
str(e))
self._success = False
def on_exit(self, userdata):
if not self._client.has_result(self._topic):
self._client.cancel(self._topic)
Logger.loginfo('Cancelled active action goal.')
| 30.422535
| 72
| 0.600463
| 215
| 2,160
| 5.813953
| 0.390698
| 0.0504
| 0.0288
| 0.0304
| 0.0752
| 0.0448
| 0
| 0
| 0
| 0
| 0
| 0.001363
| 0.320833
| 2,160
| 70
| 73
| 30.857143
| 0.850716
| 0.144907
| 0
| 0.155556
| 0
| 0
| 0.119493
| 0.01707
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.066667
| 0
| 0.244444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53eb9134fe73eaf59759bdec6bb46f044d4317f1
| 6,710
|
py
|
Python
|
find_unicode_control.py
|
sebastian-philipp/find-unicode-control
|
170730aff64d17a4d9c57b0284d862c932e1565c
|
[
"BSD-3-Clause"
] | null | null | null |
find_unicode_control.py
|
sebastian-philipp/find-unicode-control
|
170730aff64d17a4d9c57b0284d862c932e1565c
|
[
"BSD-3-Clause"
] | null | null | null |
find_unicode_control.py
|
sebastian-philipp/find-unicode-control
|
170730aff64d17a4d9c57b0284d862c932e1565c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""Find unicode control characters in source files
By default the script takes one or more files or directories and looks for
unicode control characters in all text files. To narrow down the files, provide
a config file with the -c command line, defining a scan_exclude list, which
should be a list of regular expressions matching paths to exclude from the scan.
There is a second mode enabled with -p which when set to 'all', prints all
control characters and when set to 'bidi', prints only the 9 bidirectional
control characters.
"""
import sys, os, argparse, re, unicodedata, magic
import importlib
from stat import *
scan_exclude = [r'\.git/', r'\.hg/', r'\.desktop$', r'ChangeLog$', r'NEWS$',
r'\.ppd$', r'\.txt$', r'\.directory$']
scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$',
r'text/html$']
verbose_mode = False
# Print to stderr in verbose mode.
def eprint(*args, **kwargs):
if verbose_mode:
print(*args, file=sys.stderr, **kwargs)
# Decode a single latin1 line.
def decodeline(inf):
if isinstance(inf, str):
return inf
return inf.decode('latin-1')
# Make a text string from a file, attempting to decode from latin1 if necessary.
# Other non-utf-8 locales are not supported at the moment.
def getfiletext(filename):
text = None
with open(filename) as infile:
try:
if detailed_mode:
return [decodeline(inf) for inf in infile]
except Exception as e:
eprint('%s: %s' % (filename, e))
return None
try:
text = ''.join(infile)
except UnicodeDecodeError:
eprint('%s: Retrying with latin1' % filename)
try:
text = ''.join([decodeline(inf) for inf in infile])
except Exception as e:
eprint('%s: %s' % (filename, e))
if text:
return set(text)
else:
return None
def analyze_text_detailed(filename, text, disallowed, msg):
line = 0
warned = False
for t in text:
line = line + 1
subset = [c for c in t if c in disallowed]
if subset:
print('%s:%d %s: %s' % (filename, line, msg, subset))
warned = True
if not warned:
eprint('%s: OK' % filename)
# Look for disallowed characters in the text. We reduce all characters into a
# set to speed up analysis. FIXME: Add a slow mode to get line numbers in files
# that have these disallowed chars.
def analyze_text(filename, text, disallowed, msg):
if detailed_mode:
analyze_text_detailed(filename, text, disallowed, msg)
return
if not text.isdisjoint(disallowed):
print('%s: %s: %s' % (filename, msg, text & disallowed))
else:
eprint('%s: OK' % filename)
def should_read(f):
m = magic.detect_from_filename(f)
# Fast check, just the file name.
if [e for e in scan_exclude if re.search(e, f)]:
return False
# Slower check, mime type.
if not 'text/' in m.mime_type \
or [e for e in scan_exclude_mime if re.search(e, m.mime_type)]:
return False
return True
# Get file text and feed into analyze_text.
def analyze_file(f, disallowed, msg):
eprint('%s: Reading file' % f)
if should_read(f):
text = getfiletext(f)
if text:
analyze_text(f, text, disallowed, msg)
else:
eprint('%s: SKIPPED' % f)
# Actual implementation of the recursive descent into directories.
def analyze_any(p, disallowed, msg):
mode = os.stat(p).st_mode
if S_ISDIR(mode):
analyze_dir(p, disallowed, msg)
elif S_ISREG(mode):
analyze_file(p, disallowed, msg)
else:
eprint('%s: UNREADABLE' % p)
# Recursively analyze files in the directory.
def analyze_dir(d, disallowed, msg):
for f in os.listdir(d):
analyze_any(os.path.join(d, f), disallowed, msg)
def analyze_paths(paths, disallowed, msg):
for p in paths:
analyze_any(p, disallowed, msg)
# All control characters. We omit the ascii control characters.
def nonprint_unicode(c):
cat = unicodedata.category(c)
if cat.startswith('C') and cat != 'Cc':
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Look for Unicode control characters")
parser.add_argument('path', metavar='path', nargs='+',
help='Sources to analyze')
parser.add_argument('-p', '--nonprint', required=False,
type=str, choices=['all', 'bidi'],
help='Look for either all non-printable unicode characters or bidirectional control characters.')
parser.add_argument('-v', '--verbose', required=False, action='store_true',
help='Verbose mode.')
parser.add_argument('-d', '--detailed', required=False, action='store_true',
help='Print line numbers where characters occur.')
parser.add_argument('-t', '--notests', required=False,
action='store_true', help='Exclude tests (basically test.* as a component of path).')
parser.add_argument('-c', '--config', required=False, type=str,
help='Configuration file to read settings from.')
args = parser.parse_args()
verbose_mode = args.verbose
detailed_mode = args.detailed
if not args.nonprint:
# Formatting control characters in the unicode space. This includes the
# bidi control characters.
disallowed = set(chr(c) for c in range(sys.maxunicode) if \
unicodedata.category(chr(c)) == 'Cf')
msg = 'unicode control characters'
elif args.nonprint == 'all':
# All control characters.
disallowed = set(chr(c) for c in range(sys.maxunicode) if \
nonprint_unicode(chr(c)))
msg = 'disallowed characters'
else:
# Only bidi control characters.
disallowed = set([
chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e),
chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)])
msg = 'bidirectional control characters'
if args.config:
spec = importlib.util.spec_from_file_location("settings", args.config)
settings = importlib.util.module_from_spec(spec)
spec.loader.exec_module(settings)
if hasattr(settings, 'scan_exclude'):
scan_exclude = scan_exclude + settings.scan_exclude
if hasattr(settings, 'scan_exclude_mime'):
scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime
if args.notests:
scan_exclude = scan_exclude + [r'/test[^/]+/']
analyze_paths(args.path, disallowed, msg)
| 35.882353
| 109
| 0.634426
| 895
| 6,710
| 4.669274
| 0.275978
| 0.039483
| 0.021536
| 0.017947
| 0.181622
| 0.13161
| 0.079445
| 0.058387
| 0.058387
| 0.058387
| 0
| 0.009782
| 0.253502
| 6,710
| 186
| 110
| 36.075269
| 0.824516
| 0.20313
| 0
| 0.204545
| 0
| 0
| 0.143904
| 0
| 0
| 0
| 0.010145
| 0.005376
| 0
| 1
| 0.083333
| false
| 0
| 0.037879
| 0
| 0.212121
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53ed119c9b07bf3b0dd5b8ddf0cc3d573400eed1
| 34,187
|
py
|
Python
|
vsphere/tests/test_vsphere.py
|
fujigon/integrations-core
|
256b1c138fd1bf1c71db63698737e813cfda00f8
|
[
"BSD-3-Clause"
] | null | null | null |
vsphere/tests/test_vsphere.py
|
fujigon/integrations-core
|
256b1c138fd1bf1c71db63698737e813cfda00f8
|
[
"BSD-3-Clause"
] | null | null | null |
vsphere/tests/test_vsphere.py
|
fujigon/integrations-core
|
256b1c138fd1bf1c71db63698737e813cfda00f8
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T13:35:17.000Z
|
2019-12-23T13:35:17.000Z
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from __future__ import unicode_literals
import time
from datetime import datetime
import mock
import pytest
from mock import MagicMock
from pyVmomi import vim
from datadog_checks.vsphere import VSphereCheck
from datadog_checks.vsphere.cache_config import CacheConfig
from datadog_checks.vsphere.common import SOURCE_TYPE
from datadog_checks.vsphere.errors import BadConfigError, ConnectionError
from datadog_checks.vsphere.vsphere import (
REFRESH_METRICS_METADATA_INTERVAL,
REFRESH_MORLIST_INTERVAL,
RESOURCE_TYPE_METRICS,
SHORT_ROLLUP,
)
from .utils import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server
SERVICE_CHECK_TAGS = ["vcenter_server:vsphere_mock", "vcenter_host:None", "foo:bar"]
def test__init__(instance):
with pytest.raises(BadConfigError):
# Must define a unique 'name' per vCenter instance
VSphereCheck('vsphere', {}, {}, [{'': ''}])
init_config = {
'clean_morlist_interval': 50,
'refresh_morlist_interval': 42,
'refresh_metrics_metadata_interval': -42,
'batch_property_collector_size': -1,
}
check = VSphereCheck('vsphere', init_config, {}, [instance])
i_key = check._instance_key(instance)
assert check.time_started > 0
assert not check.server_instances
assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42
assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42
assert check.clean_morlist_interval == 50
assert len(check.event_config) == 1
assert 'vsphere_mock' in check.event_config
assert not check.registry
assert not check.latest_event_query
assert check.batch_collector_size == 0
assert check.batch_morlist_size == 50
assert check.excluded_host_tags == []
def test_excluded_host_tags(vsphere, instance, aggregator):
# Check default value and precedence of instance config over init config
check = VSphereCheck('vsphere', {}, {}, [instance])
assert check.excluded_host_tags == []
check = VSphereCheck('vsphere', {"excluded_host_tags": ["vsphere_host"]}, {}, [instance])
assert check.excluded_host_tags == ["vsphere_host"]
instance["excluded_host_tags"] = []
check = VSphereCheck('vsphere', {"excluded_host_tags": ["vsphere_host"]}, {}, [instance])
assert check.excluded_host_tags == []
# Test host tags are excluded from external host metadata, but still stored in the cache for metrics
vsphere.excluded_host_tags = ["vsphere_host"]
mocked_vm = MockedMOR(spec="VirtualMachine")
mocked_host = MockedMOR(spec="HostSystem")
mocked_mors_attrs = {
mocked_vm: {
"name": "mocked_vm",
"parent": mocked_host,
"runtime.powerState": vim.VirtualMachinePowerState.poweredOn,
},
mocked_host: {"name": "mocked_host", "parent": None},
}
with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs):
server_instance = vsphere._get_server_instance(instance)
result = MagicMock()
result.value = [23.4]
server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)]
vsphere.metadata_cache = MagicMock()
vsphere.metadata_cache.get_metadata.return_value = {"name": "mymetric", "unit": "kb"}
vsphere.in_compatibility_mode = MagicMock()
vsphere.in_compatibility_mode.return_value = False
vsphere.check(instance)
ext_host_tags = vsphere.get_external_host_tags()
# vsphere_host tag not in external metadata
for host, source_tags in ext_host_tags:
if host == u"mocked_vm":
tags = source_tags["vsphere"]
for tag in tags:
assert "vsphere_host:" not in tag
break
# vsphere_host tag still in cache for sending with metrics
aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname="mocked_vm", count=1)
aggregator.assert_metric_has_tag('vsphere.mymetric', tag="vsphere_host:mocked_host", count=1)
def test__is_excluded():
"""
* Exclude hosts/vms not compliant with the user's `*_include` configuration.
* Exclude "non-labeled" virtual machines when the user configuration instructs to.
"""
# Sample(s)
include_regexes = {'host_include': "f[o]+", 'vm_include': "f[o]+"}
# OK
included_host = MockedMOR(spec="HostSystem", name="foo")
included_vm = MockedMOR(spec="VirtualMachine", name="foo")
assert not VSphereCheck._is_excluded(included_host, {"name": included_host.name}, include_regexes, None)
assert not VSphereCheck._is_excluded(included_vm, {"name": included_vm.name}, include_regexes, None)
# Not OK!
excluded_host = MockedMOR(spec="HostSystem", name="bar")
excluded_vm = MockedMOR(spec="VirtualMachine", name="bar")
assert VSphereCheck._is_excluded(excluded_host, {"name": excluded_host.name}, include_regexes, None)
assert VSphereCheck._is_excluded(excluded_vm, {"name": excluded_vm.name}, include_regexes, None)
# Sample(s)
include_regexes = None
include_only_marked = True
# OK
included_vm = MockedMOR(spec="VirtualMachine", name="foo", label=True)
assert not VSphereCheck._is_excluded(
included_vm, {"customValue": included_vm.customValue}, include_regexes, include_only_marked
)
# Not OK
included_vm = MockedMOR(spec="VirtualMachine", name="foo")
assert VSphereCheck._is_excluded(included_vm, {"customValue": []}, include_regexes, include_only_marked)
def test_vms_in_filtered_host_are_filtered(vsphere, instance):
"""Test that all vms belonging to a filtered host are also filtered"""
server_instance = vsphere._get_server_instance(instance)
filtered_host = MockedMOR(spec="HostSystem")
filtered_vm = MockedMOR(spec="VirtualMachine")
non_filtered_host = MockedMOR(spec="HostSystem")
non_filtered_vm = MockedMOR(spec="VirtualMachine")
mocked_mors_attrs = {
filtered_host: {"name": "filtered_host_number_1", "parent": None},
filtered_vm: {
"name": "this_vm_is_filtered",
"runtime.powerState": vim.VirtualMachinePowerState.poweredOn,
"runtime.host": filtered_host,
},
non_filtered_host: {"name": "non_filtered_host_number_1", "parent": None},
non_filtered_vm: {
"name": "this_vm_is_not_filtered",
"runtime.powerState": vim.VirtualMachinePowerState.poweredOn,
"runtime.host": non_filtered_host,
},
}
regex = {'host_include': '^(?!filtered_.+)'}
with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs):
obj_list = vsphere._get_all_objs(server_instance, regex, False, [])
assert len(obj_list[vim.VirtualMachine]) == 1
assert len(obj_list[vim.HostSystem]) == 1
assert {
"mor_type": "vm",
"mor": non_filtered_vm,
"hostname": "this_vm_is_not_filtered",
"tags": ["vsphere_host:non_filtered_host_number_1", "vsphere_type:vm"],
} == obj_list[vim.VirtualMachine][0]
assert {
"mor_type": "host",
"mor": non_filtered_host,
"hostname": "non_filtered_host_number_1",
"tags": ["vsphere_type:host"],
} == obj_list[vim.HostSystem][0]
def test__get_all_objs(vsphere, instance):
"""
Test that we don't raise KeyError if the property collector failed to collect some attributes
and that we handle the case were there are missing attributes
"""
server_instance = vsphere._get_server_instance(instance)
vm_no_parent = MockedMOR(spec="VirtualMachine")
vm_no_powerstate = MockedMOR(spec="VirtualMachine")
vm_host_parent = MockedMOR(spec="VirtualMachine")
mocked_host = MockedMOR(spec="HostSystem")
mocked_datastore = MockedMOR(spec="Datastore")
mocked_datacenter = MockedMOR(spec="Datacenter")
mocked_cluster = MockedMOR(spec="ClusterComputeResource")
mocked_mors_attrs = {
vm_no_parent: {"name": "vm_no_parent", "runtime.powerState": vim.VirtualMachinePowerState.poweredOn},
vm_no_powerstate: {"name": "vm_no_powerstate"},
vm_host_parent: {"parent": mocked_host, "runtime.powerState": vim.VirtualMachinePowerState.poweredOn},
mocked_host: {"name": "mocked_host", "parent": None},
mocked_datastore: {},
mocked_cluster: {"name": "cluster"},
mocked_datacenter: {"parent": MockedMOR(spec="Folder", name="unknown folder"), "name": "datacenter"},
}
with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs):
obj_list = vsphere._get_all_objs(server_instance, None, False, [])
assert len(obj_list[vim.VirtualMachine]) == 2
assert {
"mor_type": "vm",
"mor": vm_no_parent,
"hostname": "vm_no_parent",
"tags": ["vsphere_host:unknown", "vsphere_type:vm"],
} in obj_list[vim.VirtualMachine]
assert {
"mor_type": "vm",
"mor": vm_host_parent,
"hostname": "unknown",
"tags": ["vsphere_host:mocked_host", "vsphere_host:unknown", "vsphere_type:vm"],
} in obj_list[vim.VirtualMachine]
assert len(obj_list[vim.HostSystem]) == 1
assert {
"mor_type": "host",
"mor": mocked_host,
"hostname": "mocked_host",
"tags": ["vsphere_type:host"],
} in obj_list[vim.HostSystem]
assert len(obj_list[vim.Datastore]) == 1
assert {
"mor_type": "datastore",
"mor": mocked_datastore,
"hostname": None,
"tags": ["vsphere_datastore:unknown", "vsphere_type:datastore"],
} in obj_list[vim.Datastore]
assert len(obj_list[vim.Datacenter]) == 1
assert {
"mor_type": "datacenter",
"mor": mocked_datacenter,
"hostname": None,
"tags": ["vsphere_folder:unknown", "vsphere_datacenter:datacenter", "vsphere_type:datacenter"],
} in obj_list[vim.Datacenter]
assert len(obj_list[vim.ClusterComputeResource]) == 1
assert {
"mor_type": "cluster",
"mor": mocked_cluster,
"hostname": None,
"tags": ["vsphere_cluster:cluster", "vsphere_type:cluster"],
} in obj_list[vim.ClusterComputeResource]
def test__collect_mors_and_attributes(vsphere, instance):
"""
Test that we check for errors when collecting properties with property collector
"""
server_instance = vsphere._get_server_instance(instance)
with mock.patch("datadog_checks.vsphere.vsphere.vmodl"):
obj = MagicMock(missingSet=None, obj="obj")
result = MagicMock(token=None, objects=[obj])
server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result
log = MagicMock()
vsphere.log = log
mor_attrs = vsphere._collect_mors_and_attributes(server_instance)
log.error.assert_not_called()
assert len(mor_attrs) == 1
obj.missingSet = [MagicMock(path="prop", fault="fault")]
mor_attrs = vsphere._collect_mors_and_attributes(server_instance)
log.error.assert_called_once_with('Unable to retrieve property %s for object %s: %s', 'prop', 'obj', 'fault')
assert len(mor_attrs) == 1
def test__cache_morlist_raw(vsphere, instance):
"""
Explore the vCenter infrastructure to discover hosts, virtual machines.
Input topology:
```
rootFolder
- datacenter1
- compute_resource1
- host1 # Filtered out
- host2
- folder1
- datacenter2
- compute_resource2
- host3
- vm1 # Not labeled
- vm2 # Filtered out
- vm3 # Powered off
- vm4
```
"""
# Samples
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
instance["host_include_only_regex"] = "host[2-9]"
instance["vm_include_only_regex"] = "vm[^2]"
instance["include_only_marked"] = True
# Discover hosts and virtual machines
vsphere._cache_morlist_raw(instance)
# Assertions: 1 labeled+monitored VM + 2 hosts + 2 datacenters + 2 clusters + 1 datastore.
assertMOR(vsphere, instance, count=8)
# ...on hosts
assertMOR(vsphere, instance, spec="host", count=2)
tags = [
"vcenter_server:vsphere_mock",
"vsphere_folder:rootFolder",
"vsphere_datacenter:datacenter1",
"vsphere_compute:compute_resource1",
"vsphere_cluster:compute_resource1",
"vsphere_type:host",
]
assertMOR(vsphere, instance, name="host2", spec="host", tags=tags)
tags = [
"vcenter_server:vsphere_mock",
"vsphere_folder:rootFolder",
"vsphere_folder:folder1",
"vsphere_datacenter:datacenter2",
"vsphere_compute:compute_resource2",
"vsphere_cluster:compute_resource2",
"vsphere_type:host",
]
assertMOR(vsphere, instance, name="host3", spec="host", tags=tags)
# ...on VMs
assertMOR(vsphere, instance, spec="vm", count=1)
tags = [
"vcenter_server:vsphere_mock",
"vsphere_folder:folder1",
"vsphere_datacenter:datacenter2",
"vsphere_compute:compute_resource2",
"vsphere_cluster:compute_resource2",
"vsphere_host:host3",
"vsphere_type:vm",
]
assertMOR(vsphere, instance, name="vm4", spec="vm", subset=True, tags=tags)
def test_use_guest_hostname(vsphere, instance):
# Default value
with mock.patch("datadog_checks.vsphere.VSphereCheck._get_all_objs") as mock_get_all_objs, mock.patch(
"datadog_checks.vsphere.vsphere.vmodl"
):
vsphere._cache_morlist_raw(instance)
# Default value
assert not mock_get_all_objs.call_args[1]["use_guest_hostname"]
# use guest hostname
instance["use_guest_hostname"] = True
vsphere._cache_morlist_raw(instance)
assert mock_get_all_objs.call_args[1]["use_guest_hostname"]
with mock.patch("datadog_checks.vsphere.vsphere.vmodl"):
# Discover hosts and virtual machines
instance["use_guest_hostname"] = True
vsphere._cache_morlist_raw(instance)
assertMOR(vsphere, instance, spec="vm", count=3)
# Fallback on VM name when guest hostname not available
assertMOR(vsphere, instance, name="vm1", spec="vm", subset=True)
assertMOR(vsphere, instance, name="vm2_guest", spec="vm", subset=True)
assertMOR(vsphere, instance, name="vm4_guest", spec="vm", subset=True)
def test__process_mor_objects_queue(vsphere, instance):
vsphere.log = MagicMock()
vsphere._process_mor_objects_queue_async = MagicMock()
vsphere._process_mor_objects_queue(instance)
# Queue hasn't been initialized
vsphere.log.debug.assert_called_once_with(
"Objects queue is not initialized yet for instance %s, skipping processing", vsphere._instance_key(instance)
)
vsphere.batch_morlist_size = 1
i_key = vsphere._instance_key(instance)
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere._cache_morlist_raw(instance)
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11
vsphere._process_mor_objects_queue(instance)
# Object queue should be empty after processing
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0
assert vsphere._process_mor_objects_queue_async.call_count == 0 # realtime only
for call_args in vsphere._process_mor_objects_queue_async.call_args_list:
# query_specs parameter should be a list of size 1 since the batch size is 1
assert len(call_args[0][1]) == 1
instance["collect_realtime_only"] = False
vsphere._cache_morlist_raw(instance)
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11
vsphere._process_mor_objects_queue(instance)
# Object queue should be empty after processing
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0
assert vsphere._process_mor_objects_queue_async.call_count == 5 # 2 datacenters, 2 clusters, 1 datastore
def test_collect_realtime_only(vsphere, instance):
"""
Test the collect_realtime_only parameter acts as expected
"""
vsphere._process_mor_objects_queue_async = MagicMock()
instance["collect_realtime_only"] = False
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere._cache_morlist_raw(instance)
vsphere._process_mor_objects_queue(instance)
# Called once to process the 2 datacenters, then 2 clusters, then the datastore
assert vsphere._process_mor_objects_queue_async.call_count == 3
instance["collect_realtime_only"] = True
vsphere._process_mor_objects_queue_async.reset_mock()
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere._cache_morlist_raw(instance)
vsphere._process_mor_objects_queue(instance)
assert vsphere._process_mor_objects_queue_async.call_count == 0
def test__cache_metrics_metadata(vsphere, instance):
vsphere.metadata_cache = MagicMock()
vsphere._cache_metrics_metadata(instance)
vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance))
vsphere.metadata_cache.set_metadata.assert_called_once()
vsphere.metadata_cache.set_metric_ids.assert_called_once()
def test__cache_metrics_metadata_compatibility(vsphere, instance):
server_instance = vsphere._get_server_instance(instance)
i_key = vsphere._instance_key(instance)
counter = MagicMock()
counter.rollupType = "average"
counter.key = 1
vsphere.format_metric_name = MagicMock()
# New way
instance["collection_level"] = 3
server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter]
vsphere._cache_metrics_metadata(instance)
server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3)
assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1
assert len(vsphere.metadata_cache._metadata[i_key]) == 1
vsphere.format_metric_name.assert_called_once_with(counter)
# Compatibility mode
instance["all_metrics"] = False
del instance["collection_level"]
vsphere.format_metric_name.reset_mock()
server_instance.content.perfManager.perfCounter = [counter]
vsphere._cache_metrics_metadata(instance)
assert not vsphere.metadata_cache._metric_ids[i_key]
assert len(vsphere.metadata_cache._metadata[i_key]) == 1
vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True)
def test_in_compatibility_mode(vsphere, instance):
vsphere.log = MagicMock()
instance["collection_level"] = 2
assert not vsphere.in_compatibility_mode(instance)
instance["all_metrics"] = True
assert not vsphere.in_compatibility_mode(instance)
vsphere.log.warning.assert_not_called()
assert not vsphere.in_compatibility_mode(instance, log_warning=True)
vsphere.log.warning.assert_called_once()
del instance["collection_level"]
vsphere.log.reset_mock()
assert vsphere.in_compatibility_mode(instance)
vsphere.log.warning.assert_not_called()
assert vsphere.in_compatibility_mode(instance, log_warning=True)
vsphere.log.warning.assert_called_once()
def test_format_metric_name(vsphere):
counter = MagicMock()
counter.groupInfo.key = "group"
counter.nameInfo.key = "name"
counter.rollupType = "rollup"
assert vsphere.format_metric_name(counter, compatibility=True) == "group.name"
for rollup, short_rollup in SHORT_ROLLUP.items():
counter.rollupType = rollup
assert vsphere.format_metric_name(counter) == "group.name.{}".format(short_rollup)
def test_collect_metrics(vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere.batch_morlist_size = 1
vsphere._collect_metrics_async = MagicMock()
vsphere._cache_metrics_metadata(instance)
vsphere._cache_morlist_raw(instance)
vsphere._process_mor_objects_queue(instance)
vsphere.collect_metrics(instance)
assert vsphere._collect_metrics_async.call_count == 6 # One for each VM/host, datacenters are not collected
for call_args in vsphere._collect_metrics_async.call_args_list:
# query_specs parameter should be a list of size 1 since the batch size is 1
assert len(call_args[0][1]) == 1
def test__collect_metrics_async_compatibility(vsphere, instance):
server_instance = vsphere._get_server_instance(instance)
server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])]
vsphere.mor_cache = MagicMock()
vsphere.metadata_cache = MagicMock()
vsphere.metadata_cache.get_metadata.return_value = {"name": "unknown"}
vsphere.in_compatibility_mode = MagicMock()
vsphere.log = MagicMock()
vsphere.in_compatibility_mode.return_value = True
vsphere._collect_metrics_async(instance, [])
vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown')
vsphere.log.reset_mock()
vsphere.in_compatibility_mode.return_value = False
vsphere._collect_metrics_async(instance, [])
vsphere.log.debug.assert_not_called()
def test__collect_metrics_async_hostname(vsphere, instance, aggregator):
server_instance = vsphere._get_server_instance(instance)
result = MagicMock()
result.value = [23.4]
server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])]
mor = {"hostname": "foo"}
vsphere.mor_cache = MagicMock()
vsphere.mor_cache.get_mor.return_value = mor
vsphere.metadata_cache = MagicMock()
vsphere.metadata_cache.get_metadata.return_value = {"name": "mymetric", "unit": "kb"}
vsphere.in_compatibility_mode = MagicMock()
vsphere.in_compatibility_mode.return_value = False
vsphere._collect_metrics_async(instance, [])
aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname="foo")
def test_check(vsphere, instance):
"""
Test the check() method
"""
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags:
vsphere.check(instance)
set_external_tags.assert_called_once()
all_the_tags = dict(set_external_tags.call_args[0][0])
assert all_the_tags['vm4'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_host:host3',
'vsphere_host:host3',
'vsphere_type:vm',
]
assert all_the_tags['host1'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_datacenter:datacenter1',
'vsphere_cluster:compute_resource1',
'vsphere_compute:compute_resource1',
'vsphere_type:host',
]
assert all_the_tags['host3'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_type:host',
]
assert all_the_tags['vm2'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_host:host3',
'vsphere_host:host3',
'vsphere_type:vm',
]
assert all_the_tags['vm1'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_host:host3',
'vsphere_host:host3',
'vsphere_type:vm',
]
assert all_the_tags['host2'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_datacenter:datacenter1',
'vsphere_cluster:compute_resource1',
'vsphere_compute:compute_resource1',
'vsphere_type:host',
]
def test_service_check_ko(aggregator, instance):
check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))
with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect:
# SmartConnect fails
SmartConnect.side_effect = Exception()
with pytest.raises(ConnectionError):
check.check(instance)
aggregator.assert_service_check(
VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS
)
aggregator.reset()
# SmartConnect succeeds, CurrentTime fails
server = MagicMock()
server.CurrentTime.side_effect = Exception()
SmartConnect.side_effect = None
SmartConnect.return_value = server
with pytest.raises(ConnectionError):
check.check(instance)
aggregator.assert_service_check(
VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS
)
def test_service_check_ok(aggregator, instance):
check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect:
SmartConnect.return_value = get_mocked_server()
check.check(instance)
aggregator.assert_service_check(
VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS
)
def test__instance_key(vsphere, instance):
assert vsphere._instance_key(instance) == "vsphere_mock"
del instance['name']
with pytest.raises(BadConfigError):
vsphere._instance_key(instance)
def test__should_cache(instance):
now = time.time()
# do not use fixtures for the check instance, some params are set at
# __init__ time and we need to instantiate the check multiple times
check = VSphereCheck('vsphere', {}, {}, [instance])
i_key = check._instance_key(instance)
# first run should always cache
assert check._should_cache(instance, CacheConfig.Morlist)
assert check._should_cache(instance, CacheConfig.Metadata)
# explicitly set cache expiration times, don't use defaults so we also test
# configuration is properly propagated
init_config = {
'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL,
'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL,
}
check = VSphereCheck('vsphere', init_config, {}, [instance])
# simulate previous runs, set the last execution time in the past
check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 * REFRESH_MORLIST_INTERVAL))
check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL))
with mock.patch("time.time", return_value=now):
assert not check._should_cache(instance, CacheConfig.Morlist)
assert not check._should_cache(instance, CacheConfig.Metadata)
def alarm_event(from_status='green', to_status='red', message='Some error'):
now = datetime.utcnow()
vm = MockedMOR(spec='VirtualMachine')
dc = MockedMOR(spec="Datacenter")
dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1')
alarm = MockedMOR(spec="Alarm")
alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1')
entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1')
event = vim.event.AlarmStatusChangedEvent(
entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg
)
setattr(event, 'from', from_status) # noqa: B009
return event
def migrated_event():
now = datetime.utcnow()
vm = MockedMOR(spec='VirtualMachine', name='vm1')
vm_arg = vim.event.VmEventArgument(vm=vm)
host = MockedMOR(spec='HostSystem')
host_arg = vim.event.HostEventArgument(host=host, name='host1')
host_dest = MockedMOR(spec='HostSystem')
host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2')
dc = MockedMOR(spec='Datacenter')
dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1')
dc_dest = MockedMOR(spec='Datacenter')
dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2')
ds = MockedMOR(spec='Datastore')
ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1')
ds_dest = MockedMOR(spec='Datastore')
ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2')
event = vim.event.VmBeingHotMigratedEvent(
vm=vm_arg,
userName='John',
fullFormattedMessage='Some error',
createdTime=now,
host=host_arg,
destHost=host_dest_arg,
datacenter=dc_arg,
destDatacenter=dc_dest_arg,
ds=ds_arg,
destDatastore=ds_dest_arg,
)
return event
def test_events(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was green and it's now red.", tags=['foo:bar']
)
def test_events_tags(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
aggregator.assert_event(
"John has launched a hot migration of this virtual machine",
exact_match=False,
tags=[
'foo:bar',
'vsphere_host:host1',
'vsphere_host:host2',
'vsphere_datacenter:dc1',
'vsphere_datacenter:dc2',
],
)
server_instance = vsphere._get_server_instance(instance)
server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()]
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was green and it's now red.", tags=['foo:bar']
)
def test_events_gray_handled(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
event = alarm_event(from_status='gray', message='Went from Gray to Red')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was gray and it's now red.", tags=['foo:bar']
)
event = alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow to Gray')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was yellow and it's now gray.",
tags=['foo:bar'],
alert_type='info',
)
def test_events_gray_ignored(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
event = alarm_event(from_status='gray', to_status='green', message='Went from Gray to Green')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
assert not aggregator.events
event = alarm_event(from_status='green', to_status='gray', message='Went from Green to Gray')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.check(instance)
assert not aggregator.events
| 42.363073
| 120
| 0.678796
| 3,889
| 34,187
| 5.673695
| 0.10414
| 0.026649
| 0.02266
| 0.019941
| 0.63671
| 0.552595
| 0.495037
| 0.433039
| 0.406028
| 0.379424
| 0
| 0.007219
| 0.218007
| 34,187
| 806
| 121
| 42.415633
| 0.818134
| 0.080089
| 0
| 0.440984
| 0
| 0
| 0.194327
| 0.098924
| 0
| 0
| 0
| 0
| 0.186885
| 1
| 0.045902
| false
| 0
| 0.021311
| 0
| 0.070492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53f15f1ad7b41be043cf58489197157314abeded
| 2,110
|
py
|
Python
|
clip/clip.py
|
keshav11/clip
|
f426dee5c3a6885ddeba20d450d85fc71951c5ca
|
[
"MIT"
] | 1
|
2018-03-27T05:13:43.000Z
|
2018-03-27T05:13:43.000Z
|
clip/clip.py
|
keshav11/clip
|
f426dee5c3a6885ddeba20d450d85fc71951c5ca
|
[
"MIT"
] | 1
|
2018-03-27T14:57:05.000Z
|
2018-03-27T14:57:05.000Z
|
clip/clip.py
|
keshav11/clip
|
f426dee5c3a6885ddeba20d450d85fc71951c5ca
|
[
"MIT"
] | null | null | null |
import os
import argparse
from pathlib import Path
CLIP_FILE = os.path.join(Path.home(), '.clip')
TEMP_FILE = '.TEMP_FILE'
def add_text(key, text):
if os.path.exists(CLIP_FILE):
open_mode = 'a'
else:
open_mode = 'w+'
with open(CLIP_FILE, open_mode) as clip_file:
clip_file.write(key + ": " + text + "\n")
def list_texts():
with open(CLIP_FILE, 'r') as clip_file:
for text in clip_file.read().split('\n'):
print(text)
def get_text(key):
with open(CLIP_FILE, 'r') as clip_file:
for text in clip_file.read().split('\n'):
key_val = text.split(':')
if key_val[0].strip() == key:
print(key_val[1].strip(), end='')
def delete_text(key):
exists = False
with open(TEMP_FILE, 'w+') as temp_file:
with open(CLIP_FILE, 'r') as clip_file:
for text in clip_file.read().split('\n'):
if text.strip() == "":
continue
key_val = text.split(':')
if key_val[0].strip() != key:
temp_file.write(text+"\n")
else:
exists = True
if not exists:
print("key:", key, "was not found in the clip store")
try:
os.rename(TEMP_FILE, CLIP_FILE)
except Exception as ex:
os.remove(TEMP_FILE)
print('remove text failed.', ex)
def main():
parser = argparse.ArgumentParser(description='clips and saves texts from the command line')
parser.add_argument('-a', '--add', nargs=2)
parser.add_argument('-g', '--get', nargs=1)
parser.add_argument('-d', '--delete', nargs=1)
parser.add_argument('-l', '--list', action='store_true')
args = parser.parse_args()
if args.add:
key, value = args.add[0], args.add[1]
add_text(key, value)
elif args.list:
list_texts()
elif args.get:
key = args.get[0]
get_text(key)
elif args.delete:
key = args.delete[0]
delete_text(key)
else:
parser.print_usage()
if __name__ == '__main__':
main()
| 26.708861
| 95
| 0.555924
| 290
| 2,110
| 3.862069
| 0.268966
| 0.107143
| 0.042857
| 0.057143
| 0.242857
| 0.201786
| 0.201786
| 0.201786
| 0.201786
| 0.201786
| 0
| 0.006734
| 0.296209
| 2,110
| 78
| 96
| 27.051282
| 0.747475
| 0
| 0
| 0.174603
| 0
| 0
| 0.087204
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0.047619
| 0
| 0.126984
| 0.079365
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53f16f379316b618805c2343722f2905bbfec891
| 2,383
|
py
|
Python
|
tests/unit/test_nsga2.py
|
learsi1911/GAMA_pygmo_v4
|
459807db352dd1c9f9c1e0e322f8c1e9b5abbca0
|
[
"Apache-2.0"
] | 49
|
2018-10-22T06:05:29.000Z
|
2021-09-07T20:12:36.000Z
|
tests/unit/test_nsga2.py
|
learsi1911/GAMA_pygmo_v4
|
459807db352dd1c9f9c1e0e322f8c1e9b5abbca0
|
[
"Apache-2.0"
] | 102
|
2018-10-02T12:00:47.000Z
|
2021-02-24T14:35:30.000Z
|
tests/unit/test_nsga2.py
|
learsi1911/GAMA_pygmo_v4
|
459807db352dd1c9f9c1e0e322f8c1e9b5abbca0
|
[
"Apache-2.0"
] | 11
|
2021-06-04T11:56:19.000Z
|
2022-03-21T20:21:15.000Z
|
from typing import List, Tuple
from gama.genetic_programming.nsga2 import (
NSGAMeta,
fast_non_dominated_sort,
crowding_distance_assignment,
)
def _tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]:
""" Converts a list of tuples to NSGAMeta objects. """
# Can't declare it directly in a loop as it does not create a new scope.
def fetch_value(i):
return lambda x: x[i]
metrics = [fetch_value(i) for i in range(len(tuples[0]))]
return [NSGAMeta(t, metrics) for t in tuples]
def test_nsgameta_value_assignment():
pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)])
three_five, five_three, four_four = pareto
assert three_five.values == (3, 5)
assert five_three.values == (5, 3)
assert four_four.values == (4, 4)
def test_dominates():
pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (2, 4)])
three_five, five_three, two_four = pareto
assert not three_five.dominates(five_three)
assert not five_three.dominates(three_five)
assert three_five.dominates(two_four)
assert not two_four.dominates(three_five)
assert not five_three.dominates(two_four)
assert not two_four.dominates(five_three)
def test_crowding_distance_assignment():
pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)])
three_five, five_three, four_four = pareto
crowding_distance_assignment(pareto)
assert three_five.distance == float("inf")
assert five_three.distance == float("inf")
assert four_four.distance == 2
def test_crowding_distance_assignment_inf():
pareto = _tuples_to_NSGAMeta([(3, float("inf")), (5, 3), (4, 4)])
three_inf, five_three, four_four = pareto
crowding_distance_assignment(pareto)
assert three_inf.distance == float("inf")
assert five_three.distance == float("inf")
# In our implementation, we ignore 'axis' that contain inf values.
assert four_four.distance == 1
def test_crowd_compare():
pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4), (4.01, 3.99), (4.5, 3.5)])
three_five, five_three, four_four, approx_four_four, half_half = pareto
fast_non_dominated_sort(pareto) # assigns rank
crowding_distance_assignment(pareto) # assigns distance
assert all([three_five.crowd_compare(other) == -1 for other in pareto[2:]])
assert all([five_three.crowd_compare(other) == -1 for other in pareto[2:]])
| 33.56338
| 84
| 0.698699
| 352
| 2,383
| 4.471591
| 0.230114
| 0.074333
| 0.071156
| 0.069886
| 0.461881
| 0.365311
| 0.348793
| 0.348793
| 0.280178
| 0.175985
| 0
| 0.027124
| 0.180025
| 2,383
| 70
| 85
| 34.042857
| 0.778403
| 0.090222
| 0
| 0.191489
| 0
| 0
| 0.006951
| 0
| 0
| 0
| 0
| 0
| 0.361702
| 1
| 0.148936
| false
| 0
| 0.042553
| 0.021277
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53f1e3a9ae5af85a04a5bf0c18896233f3416fe3
| 2,738
|
py
|
Python
|
stac_ingest/utils/tds.py
|
crim-ca/stac-ingest
|
e4cc2a66fee4b86ec238f139135d78215ec91ea4
|
[
"Apache-2.0"
] | null | null | null |
stac_ingest/utils/tds.py
|
crim-ca/stac-ingest
|
e4cc2a66fee4b86ec238f139135d78215ec91ea4
|
[
"Apache-2.0"
] | null | null | null |
stac_ingest/utils/tds.py
|
crim-ca/stac-ingest
|
e4cc2a66fee4b86ec238f139135d78215ec91ea4
|
[
"Apache-2.0"
] | null | null | null |
# File taken from https://github.com/Ouranosinc/pavics-vdb/blob/master/catalog/tds.py
"""Utility function to parse metadata from a THREDDS Data Server catalog."""
def walk(cat, depth=1):
"""Return a generator walking a THREDDS data catalog for datasets.
Parameters
----------
cat : TDSCatalog
THREDDS catalog.
depth : int
Maximum recursive depth. Setting 0 will return only datasets within the top-level catalog. If None,
depth is set to 1000.
"""
yield from cat.datasets.items()
if depth is None:
depth = 1000
if depth > 0:
for name, ref in cat.catalog_refs.items():
child = ref.follow()
yield from walk(child, depth=depth-1)
def attrs_from_ds(ds):
"""Extract attributes from TDS Dataset."""
url = ds.access_urls["NCML"]
attrs = attrs_from_ncml(url)
attrs["__services__"] = ds.access_urls
return attrs
def attrs_from_ncml(url):
"""Extract attributes from NcML file.
Parameters
----------
url : str
Link to NcML service of THREDDS server for a dataset.
Returns
-------
dict
Global attribute values keyed by facet names, with variable attributes in `__variable__` nested dict, and
additional specialized attributes in `__group__` nested dict.
"""
import lxml.etree
import requests
parser = lxml.etree.XMLParser(encoding='UTF-8')
ns = {"ncml": "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2"}
# Parse XML content - UTF-8 encoded documents need to be read as bytes
xml = requests.get(url).content
doc = lxml.etree.fromstring(xml, parser=parser)
nc = doc.xpath("/ncml:netcdf", namespaces=ns)[0]
# Extract global attributes
out = _attrib_to_dict(nc.xpath("ncml:attribute", namespaces=ns))
# Extract group attributes
gr = {}
for group in nc.xpath("ncml:group", namespaces=ns):
gr[group.attrib["name"]] = _attrib_to_dict(group.xpath("ncml:attribute", namespaces=ns))
# Extract variable attributes
va = {}
for variable in nc.xpath("ncml:variable", namespaces=ns):
if '_CoordinateAxisType' in variable.xpath("ncml:attribute/@name", namespaces=ns):
continue
va[variable.attrib["name"]] = _attrib_to_dict(variable.xpath("ncml:attribute", namespaces=ns))
out["__group__"] = gr
out["__variable__"] = va
return out
def _attrib_to_dict(elems):
"""Convert element attributes to dictionary.
Ignore attributes with names starting with _
"""
hidden_prefix = "_"
out = {}
for e in elems:
a = e.attrib
if a["name"].startswith(hidden_prefix):
continue
out[a["name"]] = a["value"]
return out
| 29.44086
| 111
| 0.648283
| 355
| 2,738
| 4.870423
| 0.388732
| 0.036437
| 0.027762
| 0.048583
| 0.085599
| 0.042799
| 0
| 0
| 0
| 0
| 0
| 0.008095
| 0.233017
| 2,738
| 93
| 112
| 29.44086
| 0.815238
| 0.367787
| 0
| 0.095238
| 0
| 0
| 0.147004
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53f27d7f999c3ddce62ec7074bca13f18a96eb7b
| 4,484
|
py
|
Python
|
tact/util.py
|
brunel-physics/mva_scikit
|
b0182da89efa466461aaf2cff4387c821df1758b
|
[
"BSD-3-Clause"
] | null | null | null |
tact/util.py
|
brunel-physics/mva_scikit
|
b0182da89efa466461aaf2cff4387c821df1758b
|
[
"BSD-3-Clause"
] | null | null | null |
tact/util.py
|
brunel-physics/mva_scikit
|
b0182da89efa466461aaf2cff4387c821df1758b
|
[
"BSD-3-Clause"
] | 2
|
2020-05-18T19:52:32.000Z
|
2022-01-24T10:07:35.000Z
|
# -*- coding: utf-8 -*-
"""
Module containing miscellaneous utility functions.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import itertools
import numpy as np
class BinaryTree(object):
def __init__(self):
self.left = None
self.right = None
self.val = None
def deep_update(d1, d2):
"""
Adds key-value pairs in d2 to d1. Conflicts are resolved in favour of d2.
Recurses into all values in d2 which belong to the collections.Mapping
abstract base class.
Parameters
----------
d1 : collections.Mapping
Base dictionary
d2 : collections.Mapping
Dictionary with updated values
Returns
-------
d1 : collections.Mapping
Updated dictionary
"""
for k, v in d2.iteritems():
if isinstance(v, collections.Mapping):
d1[k] = deep_update(d1.get(k, {}), v)
else:
d1[k] = v
return d1
def nodes(tree):
"""
Return a list of values at every node of a tree.
Parameters
----------
tree : BinaryTree
BinaryTree to extract nodes from.
Returns
-------
nodelist : list
List of values at tree nodes.
"""
nodelist = []
def _get_nodes(tree):
"""
Build up a list of nodes.
Parameters
----------
tree : BinaryTree
BinaryTree to extract nodes from.
Returns
-------
None
"""
nodelist.append(tree.val)
try:
_get_nodes(tree.left)
except AttributeError:
nodelist.append(tree.left)
try:
_get_nodes(tree.right)
except AttributeError:
nodelist.append(tree.right)
_get_nodes(tree)
return nodelist
def maenumerate(marr):
"""
Multidimensional index iterator for masked arrays.
Return an iterator yielding pairs of array coordinates and values, with
masked values skipped.
Parameters
----------
marr : MaskedArray
Input array.
"""
for i, m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()):
if m:
yield i
def corrcoef(x, y=None, rowvar=True, fweights=None, aweights=None):
"""
Return Pearson product-moment correlation coefficients.
This is a copy of the implementation found in numpy, with the removal of
the deperecated bias and ddof keyword arguments, and the addition of
the fweights and aweights arguments, which are pased to np.cov.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
"""
c = np.cov(x, y, rowvar, fweights=fweights, aweights=aweights)
try:
d = np.diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = np.sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
np.clip(c.real, -1, 1, out=c.real)
if np.iscomplexobj(c):
np.clip(c.imag, -1, 1, out=c.imag)
return c
| 26.222222
| 79
| 0.61686
| 561
| 4,484
| 4.885918
| 0.404635
| 0.032835
| 0.017512
| 0.010215
| 0.083181
| 0.04305
| 0.04305
| 0.04305
| 0.04305
| 0
| 0
| 0.009157
| 0.293711
| 4,484
| 170
| 80
| 26.376471
| 0.856331
| 0.583408
| 0
| 0.104167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.083333
| 0
| 0.3125
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53f4891624f4d3bc5f0cf1971fce25d204c1cf18
| 1,325
|
py
|
Python
|
orbit/actions/conditional_action_test.py
|
mcasanova1445/models
|
37be0fdb4abccca633bb3199a4e6f3f71cd174d9
|
[
"Apache-2.0"
] | 1
|
2020-09-14T10:46:07.000Z
|
2020-09-14T10:46:07.000Z
|
orbit/actions/conditional_action_test.py
|
mdsaifhaider/models
|
7214e17eb425963ec3d0295be215d5d26deaeb32
|
[
"Apache-2.0"
] | 8
|
2020-05-19T00:52:30.000Z
|
2020-06-04T23:57:20.000Z
|
orbit/actions/conditional_action_test.py
|
mdsaifhaider/models
|
7214e17eb425963ec3d0295be215d5d26deaeb32
|
[
"Apache-2.0"
] | 2
|
2021-10-07T04:47:04.000Z
|
2021-12-18T04:18:19.000Z
|
# Copyright 2022 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.actions.conditional_action."""
from orbit import actions
import tensorflow as tf
class ConditionalActionTest(tf.test.TestCase):
def test_conditional_action(self):
# Define a function to raise an AssertionError, since we can't in a lambda.
def raise_assertion(arg):
raise AssertionError(str(arg))
conditional_action = actions.ConditionalAction(
condition=lambda x: x['value'], action=raise_assertion)
conditional_action({'value': False}) # Nothing is raised.
with self.assertRaises(AssertionError) as ctx:
conditional_action({'value': True})
self.assertEqual(ctx.exception.message, "{'value': True}")
if __name__ == '__main__':
tf.test.main()
| 33.125
| 79
| 0.739623
| 182
| 1,325
| 5.296703
| 0.593407
| 0.062241
| 0.026971
| 0.033195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007266
| 0.169057
| 1,325
| 39
| 80
| 33.974359
| 0.868302
| 0.538868
| 0
| 0
| 0
| 0
| 0.064298
| 0
| 0
| 0
| 0
| 0
| 0.357143
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53f4cffa9d98d6fc50ab66c96fe1f4f487091562
| 880
|
py
|
Python
|
Customizations/Tagging/show_tags.task.py
|
phnomcobra/valarie-content
|
b1f6242605badd2b0b2e53c4320f5d963b5e0b21
|
[
"MIT"
] | null | null | null |
Customizations/Tagging/show_tags.task.py
|
phnomcobra/valarie-content
|
b1f6242605badd2b0b2e53c4320f5d963b5e0b21
|
[
"MIT"
] | null | null | null |
Customizations/Tagging/show_tags.task.py
|
phnomcobra/valarie-content
|
b1f6242605badd2b0b2e53c4320f5d963b5e0b21
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
################################################################################
# DOCUMENTS
#
# Justin Dierking
# justin.l.dierking.civ@mail.mil
# 614 692 2050
#
# 04/22/2018 Original Construction
################################################################################
import traceback
import json
class Task:
def __init__(self):
self.output = []
self.status = STATUS_NOT_EXECUTED
def execute(self, cli):
try:
keys = cli.AGTCollections("tags")
self.status = STATUS_SUCCESS
for key in keys.find():
#key.set()
self.output.append(json.dumps(key.object, indent = 4))
except Exception:
self.status = STATUS_EXCEPTION
self.output.append(traceback.format_exc())
return self.status
| 25.882353
| 80
| 0.465909
| 80
| 880
| 5.0125
| 0.65
| 0.099751
| 0.119701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029641
| 0.271591
| 880
| 34
| 81
| 25.882353
| 0.595944
| 0.145455
| 0
| 0
| 0
| 0
| 0.006849
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fa17d1fb343f99d7928294d83a0d41844594ce
| 748
|
py
|
Python
|
backup/models.py
|
helwete/simple-backup
|
c7dd1a08d398f5b4005c187e274e192b2e024f30
|
[
"MIT"
] | null | null | null |
backup/models.py
|
helwete/simple-backup
|
c7dd1a08d398f5b4005c187e274e192b2e024f30
|
[
"MIT"
] | null | null | null |
backup/models.py
|
helwete/simple-backup
|
c7dd1a08d398f5b4005c187e274e192b2e024f30
|
[
"MIT"
] | null | null | null |
from datetime import date
from django.conf import settings
from django.db import models
# Create your models here.
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
today = date.today()
return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime("%Y/%m/%d/"))
class Upload(models.Model):
uploaded_file = models.FileField(null=True, blank=True, upload_to=user_directory_path)
file_name = models.CharField(max_length=255, null=True)
date_uploaded = models.DateField(auto_now_add=True, null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.uploaded_file.name
| 35.619048
| 94
| 0.743316
| 108
| 748
| 4.953704
| 0.555556
| 0.059813
| 0.063551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009302
| 0.137701
| 748
| 20
| 95
| 37.4
| 0.820155
| 0.108289
| 0
| 0
| 0
| 0
| 0.03012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.230769
| 0.076923
| 0.923077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fa743e6670e6a8830a736afc87f494f4f511b4
| 2,713
|
py
|
Python
|
Kmeans Cluster/Kmeans_Compare.py
|
Jojoxiao/Machine-Learning-for-Beginner-by-Python3
|
71b91c9cba5803bd78d4d31be6dabb1d3989e968
|
[
"MIT"
] | 397
|
2018-05-28T02:07:32.000Z
|
2022-03-30T09:53:37.000Z
|
Kmeans Cluster/Kmeans_Compare.py
|
976634681/Machine-Learning-for-Beginner-by-Python3
|
d9effcbb1b390dc608a0f4c0a28f0ad03892047a
|
[
"MIT"
] | 4
|
2019-01-14T16:41:02.000Z
|
2021-03-11T13:23:06.000Z
|
Kmeans Cluster/Kmeans_Compare.py
|
976634681/Machine-Learning-for-Beginner-by-Python3
|
d9effcbb1b390dc608a0f4c0a28f0ad03892047a
|
[
"MIT"
] | 235
|
2018-06-28T05:31:40.000Z
|
2022-03-11T03:20:07.000Z
|
#-*- coding:utf-8 -*-
# &Author AnFany
# 引入方法
import Kmeans_AnFany as K_Af # AnFany
import Kmeans_Sklearn as K_Sk # Sklearn
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体
mpl.rcParams['axes.unicode_minus'] = False
import numpy as np
# 利用sklearn生成数据集
from sklearn.datasets import make_blobs
X, Y = make_blobs(n_samples=600, centers=6, n_features=2)
# 绘制散点图
def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']):
typeclass = sorted(list(set(eydata)))
for ii in range(len(typeclass)):
datax = exdata[eydata == typeclass[ii]]
plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii])
plt.title(titl)
#plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9))
plt.xlabel('特征1')
plt.ylabel('特征2')
# 调用不同的方法
# AnFany
kresult = K_Af.op_kmeans(X, countcen=6)
# Sklearn
sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10)
train = sk.fit(X)
result = sk.predict(X)
skru = K_Sk.trans(result)
#绘制算法后的类别的散点图
def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'):
du = 1
for jj in signdict:
xdata = Xdata[signdict[jj]]
plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图
for ss in Center:
if du:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点
du = 0
else:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点
plt.legend(bbox_to_anchor=(1.2, 1))
plt.title(titl)
plt.xlabel('特征1')
plt.ylabel('特征2')
# 定义欧几里得距离
def dis(sample, center):
cen = np.array([center])
sample = np.array(sample)
if len(sample) != 0:
usb = np.sum((sample - cen) ** 2, axis=1) ** 0.5
return usb
else:
return 0
# 计算最终的分类结果的成本值
def Cost(Xdata, typedict):
center = {}
for kk in typedict:
center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值
cio = 0
for cc in typedict:
cio += np.sum(dis(Xdata[typedict[cc]], center[cc]))
return cio
# 最终的结果展示
plt.subplot(2, 2, 1)
fig_scatter(X, Y)
plt.subplot(2, 2, 2)
sca(X, kresult[0], kresult[2])
plt.subplot(2, 2, 3)
sca(X, train.cluster_centers_, skru, titl='Sklearn 结果')
plt.subplot(2, 2, 4)
plt.axis('off')
plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2]))
plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X, skru))
plt.show()
| 25.59434
| 123
| 0.573535
| 419
| 2,713
| 3.658711
| 0.353222
| 0.006523
| 0.028702
| 0.031311
| 0.100457
| 0.069145
| 0.037834
| 0.037834
| 0.037834
| 0.037834
| 0
| 0.036591
| 0.234427
| 2,713
| 105
| 124
| 25.838095
| 0.701493
| 0.093255
| 0
| 0.125
| 0
| 0
| 0.072719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0
| 0.203125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53faaa8c310593f3046382b5d7e3fa8922d7e1b7
| 5,544
|
py
|
Python
|
control_panel.py
|
Stayermax/5dof-bartender-robot
|
dd04303afd2c252e6f7105e33ba35b01f3915194
|
[
"MIT"
] | null | null | null |
control_panel.py
|
Stayermax/5dof-bartender-robot
|
dd04303afd2c252e6f7105e33ba35b01f3915194
|
[
"MIT"
] | null | null | null |
control_panel.py
|
Stayermax/5dof-bartender-robot
|
dd04303afd2c252e6f7105e33ba35b01f3915194
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Control panel file
"""
import pddl_solver as pddl
import ik
import rospy
from get_object_position import get_object_position
import time
from constants import *
from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models
from delete_models import delete_all, delete_model
def control_panel():
robot = ik.MoveGroupPythonIntefaceTutorial()
# robot.go_to_init_state()
# robot.open_gripper()
bottle = 'bottle_1'
# simulatiuon
current_bottle_orig_pos = get_object_position(bottle)
# real_world
# current_bottle_orig_pos = Real_poses(bottle)
# current_bottle_orig_pos[-1] += BZS
while(True):
print()
cmd = raw_input("Enter command:\n open, close, init,\n gtb, hover, gtc, move,\n pour, cb, rb, ra,\n pgr, parm, pj,\n setj, att, box,\n del, dela, spawn, exit:\n")
if(cmd == 'open'): # open the gripper
robot.open_gripper()
elif(cmd == 'close'): # close the gripper
goal = float(raw_input("Enter closing goal in range [-0.12; 0]:\n"))
if(goal==""):
goal = -0.075
while(goal > 0 or goal < -0.12):
goal = float(raw_input("Enter closing goal in range [-0.12; 0]:\n"))
robot.close_gripper(goal)
elif(cmd == 'init'): # go to initial pose
robot.go_to_init_state()
elif(cmd == 'gtb'): # go to bottle
x,y,z = current_bottle_orig_pos
h = raw_input("Set z level: ")
if(h == ""):
h = BZS
else:
h = float(h)
robot.go_to_xyz(x, y, z + h)
elif(cmd == 'hover'): # hover over the bottle
x,y,z = current_bottle_orig_pos
robot.go_to_xyz(x, y, BUO)
elif(cmd == 'gtc'): # go to cup
# simulation
x,y,z = get_object_position('cup_1')
# real_world
# pos, angle = Real_world_PourPos[cup]
# x,y,z = pos
robot.go_to_xyz(x, y, CUO)
elif(cmd == 'move'): # go to cup
x,y,z = robot.get_arm_pose()
dir = raw_input("Enter coord: x,y or z:\n")
while(dir not in ['x','y','z']):
dir = raw_input("Enter coord: x,y or z:\n")
step = float(raw_input("Enter step size:\n"))
if(dir == 'x'):
x += step
elif(dir == 'y'):
y += step
elif(dir == 'z'):
z += step
robot.go_to_xyz(x, y, z)
elif(cmd == 'pour'): # turn gripper on pouring angle
robot.rotate_gripper(angle = 1)
rospy.sleep(1.5)
robot.rotate_gripper(angle = 0)
elif(cmd == 'cb'): # change bottle
b_n = int(raw_input("Enter bottle number from 1 to 6\n"))
while(b_n not in [1,2,3,4,5,6]):
b_n = int(raw_input("Enter bottle number from 1 to 6\n"))
bottle = 'bottle_' + str(b_n)
# simulatiuon
current_bottle_orig_pos = get_object_position(bottle)
# real_world
# current_bottle_orig_pos = Real_poses(bottle)
elif(cmd == 'rb'): # reset bottle position
reset_model_position(bottle)
elif(cmd == 'ra'): # reset all models positions
reset_all()
elif(cmd == 'pgr'): # print gripper postiion
pos = robot.get_gripper_pose()
print("Current gripper coordinates: " + str(pos))
elif(cmd == 'parm'): # print arm postiion
pos = robot.get_arm_pose()
print("Current arm coordinates: " + str(pos))
elif(cmd == 'pj'): # print arm joints
current_joints = robot.get_arm_joints()
print("Current joints poistion: " + str(current_joints))
elif(cmd == 'setj'): # set robot joint angles
joints = robot.get_arm_joints()
# joints[0] = float(raw_input("Enter theta_0")) # We don't want to change the arm direction
t1 = raw_input("Enter theta_1: ")
t2 = raw_input("Enter theta_2: ")
t3 = raw_input("Enter theta_3: ")
if(t1 != ''):
joints[1] = float(t1)
if(t2 != ''):
joints[2] = float(t2)
if(t3 != ''):
joints[3] = float(t3)
joints[4] = 0
robot.set_joints(joints)
elif(cmd == 'att'): # attaches object to the gripper
robot.attach_object(bottle)
attached_objects = robot.scene.get_attached_objects([bottle])
print("Attached objects: " + str(attached_objects))
elif(cmd == 'box'):
robot.add_box()
robot.attach_object('box')
attached_objects = robot.scene.get_attached_objects([bottle])
print("Attached objects: " + str(attached_objects))
elif(cmd == 'del'):
delete_model(bottle)
print("Bottle " + str(bottle.split('_')[1]) + " was deleted")
elif(cmd == 'dela'):
delete_all()
print("All models were deleted")
elif(cmd == 'spawn'):
spawn_model(bottle)
print("Bottle " + str(bottle.split('_')[1]) + " was spawned")
elif(cmd == 'exit'): # exit control panel script
print('Finish performance')
return
else:
print('Wrong command')
if __name__ == '__main__':
control_panel()
| 40.173913
| 170
| 0.530483
| 696
| 5,544
| 4.033046
| 0.222701
| 0.049875
| 0.055575
| 0.049875
| 0.333096
| 0.286783
| 0.286783
| 0.265052
| 0.244389
| 0.215889
| 0
| 0.014782
| 0.341089
| 5,544
| 138
| 171
| 40.173913
| 0.753627
| 0.136724
| 0
| 0.140351
| 0
| 0.008772
| 0.154754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008772
| false
| 0
| 0.070175
| 0
| 0.087719
| 0.096491
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fac3e7275b1080c646a6ed12952be14a9e25f1
| 1,427
|
py
|
Python
|
Enigma/Enigma.py
|
archanpatkar/Enigma
|
dbbc1fda99bf451a0284f051c724ed43915dfe2a
|
[
"MIT"
] | 3
|
2019-06-25T06:46:50.000Z
|
2021-07-27T14:14:32.000Z
|
Enigma/Enigma.py
|
archanpatkar/Enigma
|
dbbc1fda99bf451a0284f051c724ed43915dfe2a
|
[
"MIT"
] | null | null | null |
Enigma/Enigma.py
|
archanpatkar/Enigma
|
dbbc1fda99bf451a0284f051c724ed43915dfe2a
|
[
"MIT"
] | 1
|
2021-07-27T14:20:30.000Z
|
2021-07-27T14:20:30.000Z
|
from Enigma.Rotor import Rotor
from Enigma.Reflector import Reflector
from Enigma.Plugboard import Plugboard
class Enigma:
def __init__(self , rotors = [ Rotor(0,"IC") , Rotor(0,"IIC") , Rotor(0,"IIIC") ] , plugboard = Plugboard() , reflector = Reflector("A")):
self.rotors = rotors
for i in range(len(rotors)):
if i + 1 < len(rotors):
rotors[i].on("Sidereal", lambda *args: rotors[i+1].step())
self.Plugboard = plugboard;
self.Reflector = reflector;
def encrypt(self,data):
data = data.upper().replace(" ","");
string = "";
for char in data:
string += self.each(char,True);
return string;
def decrypt(self,data):
data = data.upper();
string = "";
for char in data:
string += self.each(char,False);
return string;
def each(self,char,flag):
self.rotors[0].step()
output = self.Plugboard.get(char)
for rotor in self.rotors:
if flag:
output = rotor.scramble(output)
else:
output = rotor.unscramble(output)
output = self.Reflector.get(output)
for rotor in self.rotors[::-1]:
if flag:
output = rotor.scramble(output)
else:
output = rotor.unscramble(output)
return self.Plugboard.get(output);
| 32.431818
| 143
| 0.5459
| 161
| 1,427
| 4.813665
| 0.279503
| 0.064516
| 0.030968
| 0.04129
| 0.36129
| 0.255484
| 0.255484
| 0.255484
| 0.255484
| 0.16
| 0
| 0.007307
| 0.328662
| 1,427
| 43
| 144
| 33.186047
| 0.80167
| 0
| 0
| 0.368421
| 0
| 0
| 0.013315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.078947
| 0
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fb4aef0b525310a37b5aa5c278d91c9afe8fd1
| 2,711
|
py
|
Python
|
magicauth/send_token.py
|
JMIdeaMaker/django-magicauth
|
ffca3423c46f8f3d7e49eaf374b33265d4730587
|
[
"MIT"
] | null | null | null |
magicauth/send_token.py
|
JMIdeaMaker/django-magicauth
|
ffca3423c46f8f3d7e49eaf374b33265d4730587
|
[
"MIT"
] | null | null | null |
magicauth/send_token.py
|
JMIdeaMaker/django-magicauth
|
ffca3423c46f8f3d7e49eaf374b33265d4730587
|
[
"MIT"
] | null | null | null |
import math
from django.contrib.auth import get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.template import loader
from magicauth import settings as magicauth_settings
from django.conf import settings as django_settings
from magicauth.models import MagicToken
import sendgrid
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY)
class SendTokenMixin(object):
"""
Helper for sending an email containing a link containing the MagicToken.
"""
def create_token(self, user):
token = MagicToken.objects.create(user=user)
return token
def get_user_from_email(self, user_email):
"""
Query the DB for the user corresponding to the email.
- We use get_user_model() instead of User (in case the Django app has customised the User
class)
- We use magicauth_settings.EMAIL_FIELD, which is the name of the field in the user
model. By default "username" but not always.
"""
user_class = get_user_model()
email_field = magicauth_settings.EMAIL_FIELD
field_lookup = {f"{email_field}__iexact": user_email}
user = user_class.objects.get(**field_lookup)
return user
def send_email(self, user, user_email, token, extra_context=None):
email_subject = magicauth_settings.EMAIL_SUBJECT
html_template = magicauth_settings.EMAIL_HTML_TEMPLATE
text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE
from_email = magicauth_settings.FROM_EMAIL
context = {
"token": token,
"user": user,
"site": get_current_site(self.request),
"TOKEN_DURATION_MINUTES": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60),
"TOKEN_DURATION_SECONDS": magicauth_settings.TOKEN_DURATION_SECONDS,
}
if extra_context:
context.update(extra_context)
text_message = loader.render_to_string(text_template, context)
html_message = loader.render_to_string(html_template, context)
mail = Mail(
from_email=(
django_settings.MAGICAUTH_FROM_EMAIL,
django_settings.MAGICAUTH_SENDER
),
to_emails=[user_email],
subject=email_subject,
html_content=html_message
)
sg.send(mail)
def send_token(self, user_email, extra_context=None):
user = self.get_user_from_email(user_email)
token = self.create_token(user)
self.send_email(user, user_email, token, extra_context)
| 36.146667
| 98
| 0.69384
| 335
| 2,711
| 5.337313
| 0.280597
| 0.08557
| 0.061521
| 0.017897
| 0.14094
| 0.033557
| 0
| 0
| 0
| 0
| 0
| 0.000969
| 0.239026
| 2,711
| 74
| 99
| 36.635135
| 0.86573
| 0.130579
| 0
| 0
| 0
| 0
| 0.034121
| 0.028434
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.211538
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fbcfdc398532d49a5138646d1108fbc979d12a
| 2,148
|
py
|
Python
|
qcdb/util/paths.py
|
loriab/qccddb
|
d9e156ef8b313ac0633211fc6b841f84a3ddde24
|
[
"BSD-3-Clause"
] | 8
|
2019-03-28T11:54:59.000Z
|
2022-03-19T03:31:37.000Z
|
qcdb/util/paths.py
|
loriab/qccddb
|
d9e156ef8b313ac0633211fc6b841f84a3ddde24
|
[
"BSD-3-Clause"
] | 39
|
2018-10-31T23:02:18.000Z
|
2021-12-12T22:11:37.000Z
|
qcdb/util/paths.py
|
loriab/qccddb
|
d9e156ef8b313ac0633211fc6b841f84a3ddde24
|
[
"BSD-3-Clause"
] | 9
|
2018-03-12T20:51:50.000Z
|
2022-02-28T15:18:34.000Z
|
import os
import sys
## {{{ http://code.activestate.com/recipes/52224/ (r1)
def search_file(filename, search_path):
"""Given an os.pathsep divided `search_path`, find first occurrence of
`filename`. Returns full path to file if found or None if unfound.
"""
file_found = False
paths = search_path.split(os.pathsep)
# paths = string.split(search_path, os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, filename)):
file_found = True
break
if file_found:
return os.path.abspath(os.path.join(path, filename))
else:
return None
## end of http://code.activestate.com/recipes/52224/ }}}
def all_casings(input_string):
"""Function to return a generator of all lettercase permutations
of *input_string*.
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
def import_ignorecase(module, lenv=None):
"""Function to import *module* in any possible lettercase
permutation. Returns module object if available, None if not.
`lenv` is list (not str) of addl sys.path members to try.
"""
lenv = [] if lenv is None else lenv
with add_path(lenv):
modobj = None
for per in list(all_casings(module)):
try:
modobj = __import__(per)
except ImportError:
pass
else:
break
return modobj
class add_path:
"""https://stackoverflow.com/a/39855753"""
def __init__(self, paths):
# paths must be list
self.paths = paths
def __enter__(self):
for pth in reversed(self.paths):
sys.path.insert(0, pth)
def __exit__(self, exc_type, exc_value, traceback):
for pth in self.paths:
sys.path.remove(pth)
| 26.85
| 74
| 0.603352
| 277
| 2,148
| 4.519856
| 0.361011
| 0.052716
| 0.035942
| 0.050319
| 0.162939
| 0.127796
| 0.073482
| 0.073482
| 0.073482
| 0.073482
| 0
| 0.015202
| 0.295624
| 2,148
| 79
| 75
| 27.189873
| 0.812293
| 0.278864
| 0
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0.021739
| 0.108696
| 0
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fbd095d48c73b6a23ec7ef2c3b6688ff51dfc5
| 2,380
|
py
|
Python
|
tests/models/DCN_test.py
|
JiangBowen-master/DeepCTR
|
291ffb0ff3b8322f64bd839f963d5c7a70e6b358
|
[
"Apache-2.0"
] | 1
|
2021-09-20T14:12:35.000Z
|
2021-09-20T14:12:35.000Z
|
tests/models/DCN_test.py
|
JiangBowen-master/DeepCTR
|
291ffb0ff3b8322f64bd839f963d5c7a70e6b358
|
[
"Apache-2.0"
] | 1
|
2022-02-10T06:29:19.000Z
|
2022-02-10T06:29:19.000Z
|
tests/models/DCN_test.py
|
JiangBowen-master/DeepCTR
|
291ffb0ff3b8322f64bd839f963d5c7a70e6b358
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import tensorflow as tf
from deepctr.estimator import DCNEstimator
from deepctr.models import DCN
from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \
Estimator_TEST_TF1
@pytest.mark.parametrize(
'cross_num,hidden_size,sparse_feature_num,cross_parameterization',
[(0, (8,), 2, 'vector'), (1, (), 1, 'vector'), (1, (8,), 3, 'vector'),
(0, (8,), 2, 'matrix'), (1, (), 1, 'matrix'), (1, (8,), 3, 'matrix'),
]
)
def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization):
model_name = "DCN"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization,
dnn_hidden_units=hidden_size, dnn_dropout=0.5)
check_model(model, model_name, x, y)
@pytest.mark.parametrize(
'cross_num,hidden_size,sparse_feature_num',
[(1, (8,), 3)
]
)
def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num):
if not Estimator_TEST_TF1 and tf.__version__ < "2.2.0":
return
model_name = "DCN"
sample_size = SAMPLE_SIZE
linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size,
sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size,
dnn_dropout=0.5)
check_estimator(model, input_fn)
# def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()):
# feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)],
# 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]}
# with pytest.raises(ValueError):
# _ = DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5)
if __name__ == "__main__":
pass
| 42.5
| 122
| 0.654622
| 301
| 2,380
| 4.760797
| 0.215947
| 0.08374
| 0.111654
| 0.08374
| 0.542219
| 0.519888
| 0.447313
| 0.378925
| 0.343336
| 0.2903
| 0
| 0.024671
| 0.233613
| 2,380
| 55
| 123
| 43.272727
| 0.760965
| 0.191597
| 0
| 0.210526
| 0
| 0
| 0.082377
| 0.053702
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.026316
| 0.131579
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fc42709c54959b0375cdc103e3419eb44ee072
| 3,012
|
py
|
Python
|
deploy_tix/__main__.py
|
rpappalax/deploy-tix
|
a53c7fa7898b9f0c2f530c8abd8bab322a2eb7bc
|
[
"MIT"
] | null | null | null |
deploy_tix/__main__.py
|
rpappalax/deploy-tix
|
a53c7fa7898b9f0c2f530c8abd8bab322a2eb7bc
|
[
"MIT"
] | 20
|
2015-02-24T08:56:47.000Z
|
2018-07-25T16:35:30.000Z
|
deploy_tix/__main__.py
|
rpappalax/deploy-tix
|
a53c7fa7898b9f0c2f530c8abd8bab322a2eb7bc
|
[
"MIT"
] | 3
|
2015-04-01T21:39:50.000Z
|
2020-09-10T19:40:43.000Z
|
import argparse
from deploy_tix.bugzilla_rest_client import BugzillaRESTClient
from deploy_tix.release_notes import ReleaseNotes
from output_helper import OutputHelper
def main(args=None):
parser = argparse.ArgumentParser(
description='Scripts for creating / updating deployment tickets in \
Bugzilla',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-a', '--application',
help='Example: loop-server',
required=True)
parser.add_argument(
'-B', '--bugzilla-mozilla',
help='Set this switch to post directly to bugzilla.mozilla.org \
(without switch posts to: bugzilla-dev.allizom.org)',
action='store_true',
default=False,
required=False)
subparsers = parser.add_subparsers(help='Ticket action')
# parser for ticket - {create} option
parser_create = \
subparsers.add_parser('NEW', help='Create a NEW deployment ticket.')
parser_create.add_argument(
'-o', '--repo-owner',
help='Example: mozilla-services',
default='mozilla-services',
required=False)
parser_create.add_argument(
'-e', '--environment',
help='Enter: STAGE, PROD',
default='STAGE',
required=False)
parser_create.add_argument(
'-m', '--cc-mail',
help='Example: xyz-services-dev@mozilla.com \
NOTE: must be a registered username!',
default='',
required=False)
# parser for ticket - {upate} option
parser_update = subparsers.add_parser(
'UPDATE',
help='UPDATE an existing deployment ticket'
)
parser_update.add_argument(
'-i', '--bug-id',
help='Example: 1234567',
required=False)
parser_update.add_argument(
'-c', '--comment',
help='Enter: <your bug comment>',
required=True)
args = vars(parser.parse_args())
application = args['application']
bugzilla_mozilla = args['bugzilla_mozilla']
ticket = BugzillaRESTClient(bugzilla_mozilla)
if all(key in args for key in ['bug_id', 'comment']):
bug_id = args['bug_id']
comment = args['comment']
ticket.bug_update(application, comment, bug_id)
if all(key in args for key in ['repo_owner', 'application', 'environment']): # noqa
repo_owner = args['repo_owner']
environment = args['environment'].lower()
if args['cc_mail']:
cc_mail = args['cc_mail']
else:
cc_mail = ''
status = 'NEW'
output = OutputHelper()
output.log('Create deployment ticket', True, True)
notes = ReleaseNotes(repo_owner, application, environment)
description = notes.get_release_notes()
release_num = notes.last_tag
output.log('Release Notes', True)
output.log(description)
ticket.bug_create(
release_num, application, environment, status, description, cc_mail
)
| 30.12
| 87
| 0.625166
| 327
| 3,012
| 5.608563
| 0.330275
| 0.041985
| 0.041439
| 0.037623
| 0.06325
| 0.06325
| 0.023991
| 0.023991
| 0
| 0
| 0
| 0.003135
| 0.258632
| 3,012
| 99
| 88
| 30.424242
| 0.818182
| 0.0249
| 0
| 0.181818
| 0
| 0
| 0.163655
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012987
| false
| 0
| 0.051948
| 0
| 0.064935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fce9990550dc9cdc1a65b09b6de93156132380
| 2,583
|
py
|
Python
|
site-packages/visual/examples/drape.py
|
lebarsfa/vpython-wx
|
38df062e5532b79f632f4f2a1abae86754c264a9
|
[
"BSL-1.0"
] | 68
|
2015-01-17T05:41:58.000Z
|
2021-04-24T08:35:24.000Z
|
site-packages/visual/examples/drape.py
|
lebarsfa/vpython-wx
|
38df062e5532b79f632f4f2a1abae86754c264a9
|
[
"BSL-1.0"
] | 16
|
2015-01-02T19:36:06.000Z
|
2018-09-09T21:01:25.000Z
|
site-packages/visual/examples/drape.py
|
lebarsfa/vpython-wx
|
38df062e5532b79f632f4f2a1abae86754c264a9
|
[
"BSL-1.0"
] | 37
|
2015-02-04T04:23:00.000Z
|
2020-06-07T03:24:41.000Z
|
from visual import *
print("""
Click to place spheres under falling string.
Right button drag or Ctrl-drag to rotate view.
Middle button drag or Alt-drag to zoom in or out.
On a two-button mouse, middle is left + right.
""")
# David Scherer
scene.title = "Drape"
restlength = 0.02
m = 0.010 * restlength
g = 9.8
dt = 0.002
k = 3
damp = (1-0)**dt
nspheres = 3
floor = 0
# Create the stringy thing:
band = curve( x = arange(-1,1,restlength),
y = 1,
radius = 0.02
)
band.p = band.pos * 0
scene.range = 1.5
scene.autoscale = 0
# Let the user position obstacles:
spheres = []
for i in range(nspheres):
s = sphere( pos = scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 + i*0.1,0),
radius = 0.25,
color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) )
spheres.append( s )
while True:
rate(1.0 / dt)
if scene.mouse.clicked:
i = len(spheres)
s = sphere( pos = scene.mouse.getclick().pos,
radius = 0.25,
color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) )
spheres.append( s )
if floor:
below = less(band.pos[:,1],-1)
band.p[:,1] = where( below, 0, band.p[:,1] )
band.pos[:,1] = where( below, -1, band.pos[:,1] )
# need a more physical way to make 'damped springs' than this!
band.p = band.p * damp
#band.p[0] = 0 # nail down left endpoint
#band.p[-1] = 0 # nail down right endpoint
band.pos = band.pos + band.p/m*dt
#gravity
band.p[:,1] = band.p[:,1] - m * g * dt
# force[n] is the force on point n from point n+1 (to the right):
length = (band.pos[1:] - band.pos[:-1])
dist = sqrt(sum(length*length,-1))
force = k * ( dist - restlength )
force = length/dist[:,newaxis] * force[:,newaxis]
band.p[:-1] = band.p[:-1] + force*dt
band.p[1:] = band.p[1:] - force*dt
# color based on "stretch": blue -> white -> red
c = clip( dist/restlength * 0.5, 0, 2 )
# blue (compressed) -> white (relaxed) -> red (tension)
band.red[1:] = where( less(c,1), c, 1 )
band.green[1:] = where( less(c,1), c, 2-c )
band.blue[1:] = where( less(c,1), 1, 2-c )
for s in spheres:
dist = mag( band.pos - s.pos )[:,newaxis]
inside = less( dist, s.radius )
if sometrue(inside):
R = ( band.pos - s.pos ) / dist
surface = s.pos + (s.radius)*R
band.pos = surface*inside + band.pos*(1-inside)
pdotR = sum(asarray(band.p)*asarray(R),-1)
band.p = band.p - R*pdotR[:,newaxis]*inside
| 27.189474
| 81
| 0.542005
| 414
| 2,583
| 3.381643
| 0.318841
| 0.060714
| 0.038571
| 0.02
| 0.172857
| 0.164286
| 0.137143
| 0.092857
| 0.065714
| 0.065714
| 0
| 0.051407
| 0.284553
| 2,583
| 94
| 82
| 27.478723
| 0.706169
| 0.16144
| 0
| 0.126984
| 0
| 0
| 0.095724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015873
| 0
| 0.015873
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fd39f8be55af2124122647f83ca83013ed5b72
| 8,921
|
py
|
Python
|
sdc/utilities/sdc_typing_utils.py
|
dlee992/sdc
|
1ebf55c00ef38dfbd401a70b3945e352a5a38b87
|
[
"BSD-2-Clause"
] | 540
|
2017-06-19T16:29:24.000Z
|
2019-05-21T09:30:07.000Z
|
sdc/utilities/sdc_typing_utils.py
|
dlee992/sdc
|
1ebf55c00ef38dfbd401a70b3945e352a5a38b87
|
[
"BSD-2-Clause"
] | 389
|
2019-10-30T18:56:46.000Z
|
2022-03-09T08:21:36.000Z
|
sdc/utilities/sdc_typing_utils.py
|
dlee992/sdc
|
1ebf55c00ef38dfbd401a70b3945e352a5a38b87
|
[
"BSD-2-Clause"
] | 36
|
2017-06-19T16:29:15.000Z
|
2019-04-26T09:22:39.000Z
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains SDC utility functions related to typing compilation phase
"""
import numpy
import numba
import sdc
from numba import types
from numba.core.errors import TypingError
from numba.np import numpy_support
from sdc.datatypes.indexes import *
from sdc.str_arr_type import string_array_type, StringArrayType
from sdc.datatypes.categorical.types import Categorical
sdc_old_index_types = (types.Array, StringArrayType, )
sdc_pandas_index_types = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
Int64IndexType,
MultiIndexType,
) + sdc_old_index_types
sdc_indexes_range_like = (
PositionalIndexType,
RangeIndexType,
)
# TO-DO: support caching of data allocated for range indexes at request for .values
sdc_indexes_wo_values_cache = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
)
sdc_pandas_df_column_types = (
types.Array,
StringArrayType,
Categorical,
)
class TypeChecker:
"""
Validate object type and raise TypingError if the type is invalid, e.g.:
Method nsmallest(). The object n
given: bool
expected: int
"""
msg_template = '{} The object {}\n given: {}\n expected: {}'
def __init__(self, func_name):
"""
Parameters
----------
func_name: :obj:`str`
name of the function where types checking
"""
self.func_name = func_name
def raise_exc(self, data, expected_types, name=''):
"""
Raise exception with unified message
Parameters
----------
data: :obj:`any`
real type of the data
expected_types: :obj:`str`
expected types inserting directly to the exception
name: :obj:`str`
name of the parameter
"""
msg = self.msg_template.format(self.func_name, name, data, expected_types)
raise TypingError(msg)
def check(self, data, accepted_type, name=''):
"""
Check data type belongs to specified type
Parameters
----------
data: :obj:`any`
real type of the data
accepted_type: :obj:`type`
accepted type
name: :obj:`str`
name of the parameter
"""
if not isinstance(data, accepted_type):
self.raise_exc(data, accepted_type.__name__, name=name)
class SDCLimitation(Exception):
"""Exception to be raised in case of SDC limitation"""
pass
def kwsparams2list(params):
"""Convert parameters dict to a list of string of a format 'key=value'"""
return ['{}={}'.format(k, v) for k, v in params.items()]
def sigparams2list(param_names, defaults):
"""Creates a list of strings of a format 'key=value' from parameter names and default values"""
return [(f'{param}' if param not in defaults else f'{param}={defaults[param]}') for param in param_names]
def has_literal_value(var, value):
"""Used during typing to check that variable var is a Numba literal value equal to value"""
if not isinstance(var, types.Literal):
return False
if value is None:
return isinstance(var, types.NoneType) or var.literal_value is value
elif isinstance(value, type(bool)):
return var.literal_value is value
else:
return var.literal_value == value
def has_python_value(var, value):
"""Used during typing to check that variable var was resolved as Python type and has specific value"""
if not isinstance(var, type(value)):
return False
if value is None or isinstance(value, type(bool)):
return var is value
else:
return var == value
def is_default(var, value):
return has_literal_value(var, value) or has_python_value(var, value) or isinstance(var, types.Omitted)
def check_is_numeric_array(type_var):
"""Used during typing to check that type_var is a numeric numpy arrays"""
return check_is_array_of_dtype(type_var, types.Number)
def check_index_is_numeric(ty_series):
"""Used during typing to check that series has numeric index"""
return isinstance(ty_series.index.dtype, types.Number)
def check_types_comparable(ty_left, ty_right):
"""Used during typing to check that specified types can be compared"""
if hasattr(ty_left, 'dtype'):
ty_left = ty_left.dtype
if hasattr(ty_right, 'dtype'):
ty_right = ty_right.dtype
# add the rest of supported types here
if isinstance(ty_left, types.Number):
return isinstance(ty_right, types.Number)
if isinstance(ty_left, types.UnicodeType):
return isinstance(ty_right, types.UnicodeType)
if isinstance(ty_left, types.Boolean):
return isinstance(ty_right, types.Boolean)
if isinstance(ty_left, (types.Tuple, types.UniTuple)):
# FIXME: just for now to unblock compilation
return ty_left == ty_right
return False
def check_arrays_comparable(ty_left, ty_right):
"""Used during typing to check that underlying arrays of specified types can be compared"""
return ((ty_left == string_array_type and ty_right == string_array_type)
or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right)))
def check_is_array_of_dtype(type_var, dtype):
"""Used during typing to check that type_var is a numeric numpy array of specific dtype"""
return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype)
def find_common_dtype_from_numpy_dtypes(array_types, scalar_types):
"""Used to find common numba dtype for a sequences of numba dtypes each representing some numpy dtype"""
np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types]
np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types]
np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes)
numba_common_dtype = numpy_support.from_dtype(np_common_dtype)
return numba_common_dtype
def find_index_common_dtype(left, right):
"""Used to find common dtype for indexes of two series and verify if index dtypes are equal"""
left_index_dtype = left.dtype
right_index_dtype = right.dtype
index_dtypes_match = left_index_dtype == right_index_dtype
if not index_dtypes_match:
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[left_index_dtype, right_index_dtype], [])
else:
numba_index_common_dtype = left_index_dtype
return index_dtypes_match, numba_index_common_dtype
def gen_impl_generator(codegen, impl_name):
"""Generate generator of an implementation"""
def _df_impl_generator(*args, **kwargs):
func_text, global_vars = codegen(*args, **kwargs)
loc_vars = {}
exec(func_text, global_vars, loc_vars)
_impl = loc_vars[impl_name]
return _impl
return _df_impl_generator
def check_signed_integer(ty):
return isinstance(ty, types.Integer) and ty.signed
def _check_dtype_param_type(dtype):
""" Returns True is dtype is a valid type for dtype parameter and False otherwise.
Used in RangeIndex ctor and other methods that take dtype parameter. """
valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass)
return isinstance(dtype, valid_dtype_types) or dtype is None
| 34.311538
| 109
| 0.690954
| 1,189
| 8,921
| 4.997477
| 0.248949
| 0.012117
| 0.018849
| 0.021205
| 0.266914
| 0.176372
| 0.129923
| 0.099293
| 0.099293
| 0.073376
| 0
| 0.001143
| 0.215335
| 8,921
| 259
| 110
| 34.444015
| 0.847714
| 0.391212
| 0
| 0.119658
| 0
| 0
| 0.017818
| 0.00495
| 0
| 0
| 0
| 0.003861
| 0
| 1
| 0.162393
| false
| 0.008547
| 0.076923
| 0.017094
| 0.478632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fde8ce197812a38b7631459a915158d4d2d39f
| 1,074
|
py
|
Python
|
Hackerrank/Contests/Project Euler/euler010.py
|
PROxZIMA/Competitive-Coding
|
ba6b365ea130b6fcaa15c5537b530ed363bab793
|
[
"MIT"
] | 1
|
2021-01-10T13:29:21.000Z
|
2021-01-10T13:29:21.000Z
|
Hackerrank/Contests/Project Euler/euler010.py
|
PROxZIMA/Competitive-Coding
|
ba6b365ea130b6fcaa15c5537b530ed363bab793
|
[
"MIT"
] | null | null | null |
Hackerrank/Contests/Project Euler/euler010.py
|
PROxZIMA/Competitive-Coding
|
ba6b365ea130b6fcaa15c5537b530ed363bab793
|
[
"MIT"
] | null | null | null |
from math import sqrt
# Naive method: Loop through N and check if every number is prime or not. If prime add to sum. Time complexity is O(√n). Time of execution ~ 8sec for n = 1000000
def prime(n):
yield 2
yield 3
for p in range(5, n+1, 2):
if p % 3 == 0:
continue
else:
for i in range (5, int(sqrt(p)) + 1, 6):
if p % i == 0 or p % (i + 2) == 0:
break
else:
yield p
s = set(prime(1000000))
for _ in range(int(input())):
n = int(input())
print(sum(i for i in s if i <= n))
# Sieve implementation: Time complexity of O(n*log(log(n))). Time of execution ~ 2sec for n = 1000000
limit = 1000000
sieve = [0] + [1, 0] * 500000
sieve[0], sieve[1], sieve[2] = 0, 0, 2
p = 3
while p <= limit:
if sieve[p]:
sieve[p] = sieve[p-1] + p
for i in range(p*p, limit+1, p):
sieve[i] = 0
else:
sieve[p] = sieve[p-1]
sieve[p+1] = sieve[p]
p += 2
for _ in range(int(input())):
print(sieve[int(input())])
| 23.347826
| 161
| 0.515829
| 183
| 1,074
| 3.021858
| 0.311475
| 0.075949
| 0.03255
| 0.065099
| 0.135624
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091808
| 0.340782
| 1,074
| 45
| 162
| 23.866667
| 0.687853
| 0.241155
| 0
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.03125
| 0
| 0.0625
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53fe751d15505be94879d0853534a2ee2c6e3129
| 3,891
|
py
|
Python
|
DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
l1EmulatorErrorFlagClient = DQMEDHarvester("L1EmulatorErrorFlagClient",
#
# for each L1 system, give:
# - SystemLabel: system label
# - HwValLabel: system label as used in hardware validation package
# (the package producing the ErrorFlag histogram)
# - SystemMask: system mask: if 1, the system is masked in the summary plot
# - SystemFolder: the folder where the ErrorFlag histogram is looked for
#
# the position in the parameter set gives, in reverse order, the position in the reportSummaryMap
# in the emulator column (left column)
L1Systems = cms.VPSet(
cms.PSet(
SystemLabel = cms.string("ECAL"),
HwValLabel = cms.string("ETP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("HCAL"),
HwValLabel = cms.string("HTP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("RCT"),
HwValLabel = cms.string("RCT"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("Stage1Layer2"),
HwValLabel = cms.string("Stage1Layer2"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("DTTF"),
HwValLabel = cms.string("DTF"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("DTTPG"),
HwValLabel = cms.string("DTP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("CSCTF"),
HwValLabel = cms.string("CTF"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("CSCTPG"),
HwValLabel = cms.string("CTP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("RPC"),
HwValLabel = cms.string("RPC"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("GMT"),
HwValLabel = cms.string("GMT"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("GT"),
HwValLabel = cms.string("GT"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("L1TEMU/Stage1GTexpert")
)
)
)
| 45.776471
| 101
| 0.40992
| 274
| 3,891
| 5.821168
| 0.273723
| 0.186207
| 0.124138
| 0.144828
| 0.468966
| 0.452038
| 0.452038
| 0.426332
| 0.426332
| 0.426332
| 0
| 0.022495
| 0.497301
| 3,891
| 84
| 102
| 46.321429
| 0.792945
| 0.125161
| 0
| 0.583333
| 0
| 0
| 0.040672
| 0.013557
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027778
| 0
| 0.027778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53ff445026af64cf9c890da3e25303bb69266c4d
| 17,382
|
py
|
Python
|
codalab/model/tables.py
|
jzwang43/codalab-worksheets
|
b1d4c6cc4b72f4dfa35a15f876e2d0ce9a03d28d
|
[
"Apache-2.0"
] | null | null | null |
codalab/model/tables.py
|
jzwang43/codalab-worksheets
|
b1d4c6cc4b72f4dfa35a15f876e2d0ce9a03d28d
|
[
"Apache-2.0"
] | null | null | null |
codalab/model/tables.py
|
jzwang43/codalab-worksheets
|
b1d4c6cc4b72f4dfa35a15f876e2d0ce9a03d28d
|
[
"Apache-2.0"
] | null | null | null |
"""
The SQLAlchemy table objects for the CodaLab bundle system tables.
"""
# TODO: Replace String and Text columns with Unicode and UnicodeText as appropriate
# This way, SQLAlchemy will automatically perform conversions to and from UTF-8
# encoding, or use appropriate database engine-specific data types for Unicode
# data. Currently, only worksheet.title uses the Unicode column type.
from sqlalchemy import Column, ForeignKey, Index, MetaData, Table, UniqueConstraint
from sqlalchemy.types import (
BigInteger,
Boolean,
DateTime,
Enum,
Float,
Integer,
LargeBinary,
String,
Text,
Unicode,
)
from sqlalchemy.sql.schema import ForeignKeyConstraint
db_metadata = MetaData()
bundle = Table(
'bundle',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('uuid', String(63), nullable=False),
Column('bundle_type', String(63), nullable=False),
# The command will be NULL except for run bundles.
Column('command', Text, nullable=True),
# The data_hash will be NULL if the bundle's value is still being computed.
Column('data_hash', String(63), nullable=True),
Column('state', String(63), nullable=False),
Column('owner_id', String(255), nullable=True),
Column('is_anonymous', Boolean, nullable=False, default=False),
UniqueConstraint('uuid', name='uix_1'),
Index('bundle_data_hash_index', 'data_hash'),
Index('state_index', 'state'), # Needed for the bundle manager.
)
# Includes things like name, description, etc.
bundle_metadata = Table(
'bundle_metadata',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),
Column('metadata_key', String(63), nullable=False),
Column('metadata_value', Text, nullable=False),
Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63),
)
# For each child_uuid, we have: key = child_path, target = (parent_uuid, parent_path)
bundle_dependency = Table(
'bundle_dependency',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),
Column('child_path', Text, nullable=False),
# Deliberately omit ForeignKey(bundle.c.uuid), because bundles can have
# dependencies to bundles not (yet) in the system.
Column('parent_uuid', String(63), nullable=False),
Column('parent_path', Text, nullable=False),
)
# The worksheet table does not have many columns now, but it will eventually
# include columns for owner, group, permissions, etc.
worksheet = Table(
'worksheet',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('uuid', String(63), nullable=False),
Column('name', String(255), nullable=False),
Column('owner_id', String(255), nullable=True),
Column(
'title', Unicode(255), nullable=True
), # Short human-readable description of the worksheet
Column(
'frozen', DateTime, nullable=True
), # When the worksheet was frozen (forever immutable) if it is.
Column('is_anonymous', Boolean, nullable=False, default=False),
Column(
'date_created', DateTime
), # When the worksheet was created; Set to null if the worksheet created before v0.5.31; Set to current timestamp by default
Column(
'date_last_modified', DateTime
), # When the worksheet was last modified; Set to null if the worksheet created before v0.5.31; Set to current_timestamp by default
UniqueConstraint('uuid', name='uix_1'),
Index('worksheet_name_index', 'name'),
Index('worksheet_owner_index', 'owner_id'),
)
worksheet_item = Table(
'worksheet_item',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),
# A worksheet item is either:
# - type = bundle (bundle_uuid != null)
# - type = worksheet (subworksheet_uuid != null)
# - type = markup (value != null)
# - type = directive (value != null)
# Deliberately omit ForeignKey(bundle.c.uuid), because worksheets can contain
# bundles and worksheets not (yet) in the system.
Column('bundle_uuid', String(63), nullable=True),
Column('subworksheet_uuid', String(63), nullable=True),
Column('value', Text, nullable=False), # TODO: make this nullable
Column('type', String(20), nullable=False),
Column('sort_key', Integer, nullable=True),
Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'),
Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'),
Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'),
)
# Worksheet tags
worksheet_tag = Table(
'worksheet_tag',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),
Column('tag', String(63), nullable=False),
Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'),
Index('worksheet_tag_tag_index', 'tag'),
)
group = Table(
'group',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('uuid', String(63), nullable=False),
Column('name', String(255), nullable=False),
Column('user_defined', Boolean),
Column('owner_id', String(255), nullable=True),
UniqueConstraint('uuid', name='uix_1'),
Index('group_name_index', 'name'),
Index('group_owner_id_index', 'owner_id'),
)
user_group = Table(
'user_group',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),
Column('user_id', String(63), ForeignKey("user.user_id"), nullable=False),
# Whether a user is able to modify this group.
Column('is_admin', Boolean),
Index('group_uuid_index', 'group_uuid'),
Index('user_id_index', 'user_id'),
)
# Permissions for bundles
group_bundle_permission = Table(
'group_bundle_permission',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),
# Reference to a bundle
Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),
# Permissions encoded as integer (see below)
Column('permission', Integer, nullable=False),
)
# Permissions for worksheets
group_object_permission = Table(
'group_object_permission',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),
# Reference to a worksheet object
Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),
# Permissions encoded as integer (see below)
Column('permission', Integer, nullable=False),
)
# A permission value is one of the following: none (0), read (1), or all (2).
GROUP_OBJECT_PERMISSION_NONE = 0x00
GROUP_OBJECT_PERMISSION_READ = 0x01
GROUP_OBJECT_PERMISSION_ALL = 0x02
# A notifications value is one of the following:
NOTIFICATIONS_NONE = 0x00 # Receive no notifications
NOTIFICATIONS_IMPORTANT = 0x01 # Receive only important notifications
NOTIFICATIONS_GENERAL = 0x02 # Receive general notifications (new features)
# Store information about users.
user = Table(
'user',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
# Basic information
Column('user_id', String(63), nullable=False),
Column('user_name', String(63), nullable=False, unique=True),
Column(
'email', String(254), nullable=False, unique=True
), # Length of 254 to be compliant with RFC3696/5321
Column(
'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL
), # Which emails user wants to receive
Column('last_login', DateTime), # Null if user has never logged in
Column(
'is_active', Boolean, nullable=False, default=True
), # Set to False instead of deleting users to maintain foreign key integrity
Column('first_name', String(30, convert_unicode=True)),
Column('last_name', String(30, convert_unicode=True)),
Column('date_joined', DateTime, nullable=False),
Column('has_access', Boolean, default=False, nullable=True),
Column('is_verified', Boolean, nullable=False, default=False),
Column('is_superuser', Boolean, nullable=False, default=False),
Column('password', String(128), nullable=False),
# Additional information
Column('affiliation', String(255, convert_unicode=True), nullable=True),
Column('url', String(255, convert_unicode=True), nullable=True),
# Quotas
Column('time_quota', Float, nullable=False), # Number of seconds allowed
Column('parallel_run_quota', Integer, nullable=False), # Number of parallel jobs allowed
Column('time_used', Float, nullable=False), # Number of seconds already used
Column('disk_quota', Float, nullable=False), # Number of bytes allowed
Column('disk_used', Float, nullable=False), # Number of bytes already used
Index('user_user_id_index', 'user_id'),
Index('user_user_name_index', 'user_name'),
UniqueConstraint('user_id', name='uix_1'),
)
# Stores (email) verification keys
user_verification = Table(
'user_verification',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('date_created', DateTime, nullable=False),
Column('date_sent', DateTime, nullable=True),
Column('key', String(64), nullable=False),
)
# Stores password reset codes
user_reset_code = Table(
'user_reset_code',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('date_created', DateTime, nullable=False),
Column('code', String(64), nullable=False),
)
# OAuth2 Tables
oauth2_client = Table(
'oauth2_client',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('client_id', String(63), nullable=False),
Column('name', String(63), nullable=True),
Column('secret', String(255), nullable=True),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True),
Column(
'grant_type',
Enum("authorization_code", "password", "client_credentials", "refresh_token"),
nullable=False,
),
Column('response_type', Enum("code", "token"), nullable=False),
Column('scopes', Text, nullable=False), # comma-separated list of allowed scopes
Column('redirect_uris', Text, nullable=False), # comma-separated list of allowed redirect URIs
UniqueConstraint('client_id', name='uix_1'),
)
oauth2_token = Table(
'oauth2_token',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('scopes', Text, nullable=False),
Column('access_token', String(255), unique=True),
Column('refresh_token', String(255), unique=True),
Column('expires', DateTime, nullable=False),
)
oauth2_auth_code = Table(
'oauth2_auth_code',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('scopes', Text, nullable=False),
Column('code', String(100), nullable=False),
Column('expires', DateTime, nullable=False),
Column('redirect_uri', String(255), nullable=False),
)
# Store information about users' questions or feedback.
chat = Table(
'chat',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
), # Primary key
Column('time', DateTime, nullable=False), # When did the user send this query?
Column('sender_user_id', String(63), nullable=True), # Who sent it?
Column('recipient_user_id', String(63), nullable=True), # Who received it?
Column('message', Text, nullable=False), # What's the content of the chat?
Column(
'worksheet_uuid', String(63), nullable=True
), # What is the id of the worksheet that the sender is on?
Column(
'bundle_uuid', String(63), nullable=True
), # What is the id of the bundle that the sender is on?
)
# Store information about workers.
worker = Table(
'worker',
db_metadata,
Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False),
Column('worker_id', String(127), primary_key=True, nullable=False),
Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True),
Column('tag', Text, nullable=True), # Tag that allows for scheduling runs on specific workers.
Column('cpus', Integer, nullable=False), # Number of CPUs on worker.
Column('gpus', Integer, nullable=False), # Number of GPUs on worker.
Column('memory_bytes', BigInteger, nullable=False), # Total memory of worker.
Column('free_disk_bytes', BigInteger, nullable=True), # Available disk space on worker.
Column(
'checkin_time', DateTime, nullable=False
), # When the worker last checked in with the bundle service.
Column('socket_id', Integer, nullable=False), # Socket ID worker listens for messages on.
Column(
'shared_file_system', Boolean, nullable=False
), # Whether the worker and the server have a shared filesystem.
Column(
'tag_exclusive', Boolean, nullable=False
), # Whether worker runs bundles if and only if they match tags.
Column(
'exit_after_num_runs', Integer, nullable=False
), # Number of jobs allowed to run on worker.
Column('is_terminating', Boolean, nullable=False),
)
# Store information about all sockets currently allocated to each worker.
worker_socket = Table(
'worker_socket',
db_metadata,
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('worker_id', String(127), nullable=False),
# No foreign key constraint on the worker table so that we can create a socket
# for the worker before adding the worker to the worker table.
Column('socket_id', Integer, primary_key=True, nullable=False),
)
# Store information about the bundles currently running on each worker.
worker_run = Table(
'worker_run',
db_metadata,
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('worker_id', String(127), nullable=False),
ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']),
Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),
Index('uuid_index', 'run_uuid'),
)
# Store information about the dependencies available on each worker.
worker_dependency = Table(
'worker_dependency',
db_metadata,
Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False),
Column('worker_id', String(127), primary_key=True, nullable=False),
ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']),
# Serialized list of dependencies for the user/worker combination.
# See WorkerModel for the serialization method.
Column('dependencies', LargeBinary, nullable=False),
)
| 36.904459
| 136
| 0.676159
| 2,111
| 17,382
| 5.421601
| 0.153955
| 0.116994
| 0.058104
| 0.042289
| 0.529664
| 0.47287
| 0.420882
| 0.375273
| 0.359546
| 0.347401
| 0
| 0.014881
| 0.195835
| 17,382
| 470
| 137
| 36.982979
| 0.803906
| 0.220055
| 0
| 0.534005
| 0
| 0
| 0.16736
| 0.018637
| 0
| 0
| 0.001782
| 0.002128
| 0
| 1
| 0
| false
| 0.005038
| 0.010076
| 0
| 0.010076
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53ff8a47a271e5535277c6325b7ff8df26908ae6
| 31,403
|
py
|
Python
|
grpc/plugins/connection/gnmi.py
|
hansthienpondt/ansible-networking-collections
|
278c88fceac297693a31df3cb54c942284823fbd
|
[
"BSD-3-Clause"
] | null | null | null |
grpc/plugins/connection/gnmi.py
|
hansthienpondt/ansible-networking-collections
|
278c88fceac297693a31df3cb54c942284823fbd
|
[
"BSD-3-Clause"
] | null | null | null |
grpc/plugins/connection/gnmi.py
|
hansthienpondt/ansible-networking-collections
|
278c88fceac297693a31df3cb54c942284823fbd
|
[
"BSD-3-Clause"
] | null | null | null |
# (c) 2020 Nokia
#
# Licensed under the BSD 3 Clause license
# SPDX-License-Identifier: BSD-3-Clause
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author:
- "Hans Thienpondt (@HansThienpondt)"
- "Sven Wisotzky (@wisotzky)"
connection: gnmi
short_description: Provides a persistent gRPC connection for gNMI API service
description:
- This gRPC plugin provides methods to interact with the gNMI service.
- OpenConfig gNMI specification
https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md
- gNMI API
https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto
- This connection plugin provides a persistent communication channel to
remote devices using gRPC including the underlying transport (TLS).
- The plugin binds to the gNMI gRPC service. It provide wrappers for gNMI
requests (Capabilities, Get, Set, Subscribe)
requirements:
- grpcio
- protobuf
options:
host:
description:
- Target host FQDN or IP address to establish gRPC connection.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections
when establishing the gRPC connection. If None only the C(host) part
will be used.
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
remote_user:
description:
- The username used to authenticate to the remote device when the gRPC
connection is first established. If the remote_user is not specified,
the connection will use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device
when first establishing the gRPC connection.
vars:
- name: ansible_password
- name: ansible_ssh_pass
private_key_file:
description:
- The PEM encoded private key file used to authenticate to the
remote device when first establishing the grpc connection.
ini:
- section: grpc_connection
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
root_certificates_file:
description:
- The PEM encoded root certificate file used to create a SSL-enabled
channel, if the value is None it reads the root certificates from
a default location chosen by gRPC at runtime.
ini:
- section: grpc_connection
key: root_certificates_file
env:
- name: ANSIBLE_ROOT_CERTIFICATES_FILE
vars:
- name: ansible_root_certificates_file
certificate_chain_file:
description:
- The PEM encoded certificate chain file used to create a SSL-enabled
channel. If the value is None, no certificate chain is used.
ini:
- section: grpc_connection
key: certificate_chain_file
env:
- name: ANSIBLE_CERTIFICATE_CHAIN_FILE
vars:
- name: ansible_certificate_chain_file
certificate_path:
description:
- Folder to search for certificate and key files
ini:
- section: grpc_connection
key: certificate_path
env:
- name: ANSIBLE_CERTIFICATE_PATH
vars:
- name: ansible_certificate_path
gnmi_encoding:
description:
- Encoding used for gNMI communication
- Must be either JSON or JSON_IETF
- If not provided, will run CapabilityRequest for auto-detection
ini:
- section: grpc_connection
key: gnmi_encoding
env:
- name: ANSIBLE_GNMI_ENCODING
vars:
- name: ansible_gnmi_encoding
grpc_channel_options:
description:
- Key/Value pairs (dict) to define gRPC channel options to be used
- gRPC reference
U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html)
- Provide the I(ssl_target_name_override) option to override the TLS
subject or subjectAltName (only in the case secure connections are
used). The option must be provided in cases, when the FQDN or IPv4
address that is used to connect to the device is different from the
subject name that is provided in the host certificate. This is
needed, because the TLS validates hostname or IP address to avoid
man-in-the-middle attacks.
vars:
- name: ansible_grpc_channel_options
grpc_environment:
description:
- Key/Value pairs (dict) to define environment settings specific to gRPC
- The standard mechanism to provide/set the environment in Ansible
cannot be used, because those environment settings are not passed to
the client process that establishes the gRPC connection.
- Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup gRPC logging. Need to
add code for log forwarding of gRPC related log messages to the
persistent messages log (see below).
- Set C(HTTPS_PROXY) to specify your proxy settings (if needed).
- Set C(GRPC_SSL_CIPHER_SUITES) in case the default TLS ciphers do not match
what is offered by the gRPC server.
vars:
- name: ansible_grpc_environment
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to
initially establish a persistent connection. If this value expires
before the connection to the remote device is completed, the connection
will fail.
default: 5
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
description:
- Configures the default timeout value (in seconds) when awaiting a
response after issuing a call to a RPC. If the RPC does not return
before the timeout exceed, an error is generated and the connection
is closed.
default: 300
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
persistent_log_messages:
type: boolean
description:
- This flag will enable logging the command executed and response received
from target device in the ansible log file. For this option to work the
'log_path' ansible configuration option is required to be set to a file
path with write access.
- Be sure to fully understand the security implications of enabling this
option as it could create a security vulnerability by logging sensitive
information in log file.
default: False
ini:
- section: persistent_connection
key: log_messages
env:
- name: ANSIBLE_PERSISTENT_LOG_MESSAGES
vars:
- name: ansible_persistent_log_messages
"""
import os
import re
import json
import base64
import datetime
try:
import grpc
HAS_GRPC = True
except ImportError:
HAS_GRPC = False
try:
from google import protobuf
HAS_PROTOBUF = True
except ImportError:
HAS_PROTOBUF = False
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.connection import NetworkConnectionBase
from ansible.plugins.connection import ensure_connect
from google.protobuf import json_format
from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2
from ansible.module_utils._text import to_text
class Connection(NetworkConnectionBase):
"""
Connection plugin for gRPC
To use gRPC connections in Ansible one (or more) sub-plugin(s) for the
required gRPC service(s) must be loaded. To load gRPC sub-plugins use the
method `register_service()` with the name of the sub-plugin to be
registered.
After loading the sub-plugin, Ansible modules can call methods provided by
that sub-plugin. There is a wrapper available that consumes the attribute
name {sub-plugin name}__{method name} to call a specific method of that
sub-plugin.
"""
transport = "nokia.grpc.gnmi"
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(
play_context, new_stdin, *args, **kwargs
)
self._task_uuid = to_text(kwargs.get("task_uuid", ""))
if not HAS_PROTOBUF:
raise AnsibleError(
"protobuf is required to use gRPC connection type. " +
"Please run 'pip install protobuf'"
)
if not HAS_GRPC:
raise AnsibleError(
"grpcio is required to use gRPC connection type. " +
"Please run 'pip install grpcio'"
)
self._connected = False
def readFile(self, optionName):
"""
Reads a binary certificate/key file
Parameters:
optionName(str): used to read filename from options
Returns:
File content
Raises:
AnsibleConnectionFailure: file does not exist or read excpetions
"""
path = self.get_option('certificate_path')
if not path:
path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates'
filename = self.get_option(optionName)
if filename:
if filename.startswith('~'):
filename = os.path.expanduser(filename)
if not filename.startswith('/'):
for entry in path.split(':'):
if os.path.isfile(os.path.join(entry, filename)):
filename = os.path.join(entry, filename)
break
if os.path.isfile(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except Exception as exc:
raise AnsibleConnectionFailure(
'Failed to read cert/keys file %s: %s' % (filename, exc)
)
else:
raise AnsibleConnectionFailure(
'Cert/keys file %s does not exist' % filename
)
return None
def _connect(self):
"""
Establish gRPC connection to remote node and create gNMI stub.
This method will establish the persistent gRPC connection, if not
already done. After this, the gNMI stub will be created. To get
visibility about gNMI capabilities of the remote device, a gNM
CapabilityRequest will be sent and result will be persisted.
Parameters:
None
Returns:
None
"""
if self.connected:
self.queue_message('v', 'gRPC connection to host %s already exist' % self._target)
return
grpcEnv = self.get_option('grpc_environment') or {}
if not isinstance(grpcEnv, dict):
raise AnsibleConnectionFailure("grpc_environment must be a dict")
for key in grpcEnv:
if grpcEnv[key]:
os.environ[key] = str(grpcEnv[key])
else:
try:
del os.environ[key]
except KeyError:
# no such setting in current environment, but thats ok
pass
self._login_credentials = [
('username', self.get_option('remote_user')),
('password', self.get_option('password'))
]
host = self.get_option('host')
port = self.get_option('port')
self._target = host if port is None else '%s:%d' % (host, port)
self._timeout = self.get_option('persistent_command_timeout')
certs = {}
certs['root_certificates'] = self.readFile('root_certificates_file')
certs['certificate_chain'] = self.readFile('certificate_chain_file')
certs['private_key'] = self.readFile('private_key_file')
options = self.get_option('grpc_channel_options')
if options:
if not isinstance(options, dict):
raise AnsibleConnectionFailure("grpc_channel_options must be a dict")
options = options.items()
if certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']:
self.queue_message('v', 'Starting secure gRPC connection')
creds = grpc.ssl_channel_credentials(**certs)
self._channel = grpc.secure_channel(self._target, creds, options=options)
else:
self.queue_message('v', 'Starting insecure gRPC connection')
self._channel = grpc.insecure_channel(self._target, options=options)
self.queue_message('v', "gRPC connection established for user %s to %s" %
(self.get_option('remote_user'), self._target))
self.queue_message('v', 'Creating gNMI stub')
self._stub = gnmi_pb2.gNMIStub(self._channel)
self._encoding = self.get_option('gnmi_encoding')
if not self._encoding:
self.queue_message('v', 'Run CapabilityRequest()')
request = gnmi_pb2.CapabilityRequest()
response = self._stub.Capabilities(request, metadata=self._login_credentials)
self.queue_message('v', 'CapabilityRequest() succeeded')
self._gnmiVersion = response.gNMI_version
self._yangModels = response.supported_models
if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings:
self._encoding = 'JSON_IETF'
elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings:
self._encoding = 'JSON'
else:
raise AnsibleConnectionFailure("No compatible supported encoding found (JSON or JSON_IETF)")
else:
if self._encoding not in ['JSON_IETF', 'JSON']:
raise AnsibleConnectionFailure("Incompatible encoding '%s' requested (JSON or JSON_IETF)" % self._encoding)
self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding)
self._connected = True
self.queue_message('v', 'gRPC/gNMI connection has established successfully')
def close(self):
"""
Closes the active gRPC connection to the target host
Parameters:
None
Returns:
None
"""
if self._connected:
self.queue_message('v', "Closing gRPC connection to target host")
self._channel.close()
super(Connection, self).close()
# -----------------------------------------------------------------------
def _encodeXpath(self, xpath='/'):
"""
Encodes XPATH to dict representation that allows conversion to gnmi_pb.Path object
Parameters:
xpath (str): path string using XPATH syntax
Returns:
(dict): path dict using gnmi_pb2.Path structure for easy conversion
"""
mypath = []
xpath = xpath.strip('\t\n\r /')
if xpath:
path_elements = re.split('''/(?=(?:[^\[\]]|\[[^\[\]]+\])*$)''', xpath)
for e in path_elements:
entry = {'name': e.split("[", 1)[0]}
eKeys = re.findall('\[(.*?)\]', e)
dKeys = dict(x.split('=', 1) for x in eKeys)
if dKeys:
entry['key'] = dKeys
mypath.append(entry)
return {'elem': mypath}
return {}
def _decodeXpath(self, path):
"""
Decodes XPATH from dict representation converted from gnmi_pb.Path object
Parameters:
path (dict): decoded gnmi_pb2.Path object
Returns:
(str): path string using XPATH syntax
"""
result = []
if 'elem' not in path:
return ""
for elem in path['elem']:
tmp = elem['name']
if 'key' in elem:
for k, v in elem['key'].items():
tmp += "[%s=%s]" % (k, v)
result.append(tmp)
return '/'.join(result)
def _encodeVal(self, data):
"""
Encodes value to dict representation that allows conversion to gnmi_pb.TypedValue object
Parameters:
data (ANY): data to be encoded as gnmi_pb.TypedValue object
Returns:
(dict): dict using gnmi_pb.TypedValue structure for easy conversion
"""
value = base64.b64encode(json.dumps(data).encode())
if self._encoding == 'JSON_IETF':
return {'jsonIetfVal': value}
else:
return {'jsonVal': value}
def _decodeVal(self, val):
"""
Decodes value from dict representation converted from gnmi_pb.TypedValue object
Parameters:
val (dict): decoded gnmi_pb.TypedValue object
Returns:
(ANY): extracted data
"""
if 'jsonIetfVal' in val:
return json.loads(base64.b64decode(val['jsonIetfVal']))
elif 'jsonVal' in val:
return json.loads(base64.b64decode(val['jsonVal']))
else:
raise AnsibleConnectionFailure("Ansible gNMI plugin does not support encoding for value: %s" % json.dumps(val))
def _dictToList(self, aDict):
for key in aDict.keys():
if key.startswith('___'):
aDict[key[3:]] = [self._dictToList(val) if isinstance(val, dict) else val for val in aDict[key].values()]
del aDict[key]
else:
if isinstance(aDict[key], dict):
aDict[key] = self._dictToList(aDict[key])
return aDict
def _mergeToSingleDict(self, rawData):
result = {}
for entry in rawData:
if 'syncResponse' in entry and entry['syncResponse']:
# Ignore: SyncResponse is sent after initial update
break
elif 'update' not in entry:
# Ignore: entry without updates
break
elif 'timestamp' not in entry:
# Subscribe response, enter update context
entry = entry['update']
else:
# Get response, keep context
pass
prfx = result
if ('prefix' in entry) and ('elem' in entry['prefix']):
prfx_elements = entry['prefix']['elem']
else:
prfx_elements = []
for elem in prfx_elements:
eleName = elem['name']
if 'key' in elem:
eleKey = json.dumps(elem['key'])
eleName = '___'+eleName
# Path Element has key => must be list()
if eleName in prfx:
# Path Element exists => Change Context
prfx = prfx[eleName]
if eleKey not in prfx:
# List entry does not exist => Create
prfx[eleKey] = elem['key']
prfx = prfx[eleKey]
else:
# Path Element does not exist => Create
prfx[eleName] = {}
prfx = prfx[eleName]
prfx[eleKey] = elem['key']
prfx = prfx[eleKey]
else:
# Path Element hasn't key => must be dict()
if eleName in prfx:
# Path Element exists => Change Context
prfx = prfx[eleName]
else:
# Path Element does not exist => Create
prfx[eleName] = {}
prfx = prfx[eleName]
for _upd in entry['update']:
if 'val' not in _upd:
# requested path without content (no value) => skip
continue
elif ('path' in _upd) and ('elem' in _upd['path']):
path_elements = _upd['path']['elem']
cPath = prfx
elif prfx_elements:
path_elements = prfx_elements
cPath = result
else:
# No path at all, replace the objecttree with value
result = self._decodeVal(_upd['val'])
prfx = result
continue
# If path_elements has more than just a single entry,
# we need to create/navigate to the specified subcontext
for elem in path_elements[:-1]:
eleName = elem['name']
if 'key' in elem:
eleKey = json.dumps(elem['key'])
eleName = '___'+eleName
# Path Element has key => must be list()
if eleName in cPath:
# Path Element exists => Change Context
cPath = cPath[eleName]
if eleKey not in cPath:
# List entry does not exist => Create
cPath[eleKey] = elem['key']
cPath = cPath[eleKey]
else:
# Path Element does not exist => Create
cPath[eleName] = {}
cPath = cPath[eleName]
cPath[eleKey] = elem['key']
cPath = cPath[eleKey]
else:
# Path Element hasn't key => must be dict()
if eleName in cPath:
# Path Element exists => Change Context
cPath = cPath[eleName]
else:
# Path Element does not exist => Create
cPath[eleName] = {}
cPath = cPath[eleName]
# The last entry of path_elements is the leaf element
# that needs to be created/updated
leaf_elem = path_elements[-1]
if 'key' in leaf_elem:
eleKey = json.dumps(leaf_elem['key'])
eleName = '___'+leaf_elem['name']
if eleName not in cPath:
cPath[eleName] = {}
cPath = cPath[eleName]
cPath[eleKey] = self._decodeVal(_upd['val'])
else:
cPath[leaf_elem['name']] = self._decodeVal(_upd['val'])
return self._dictToList(result)
def _simplifyUpdates(self, rawData):
for msg in rawData:
entry = json_format.MessageToDict(msg)
if 'syncResponse' in entry:
# Ignore: SyncResponse is sent after initial update
pass
elif 'update' in entry:
result = {}
update = entry['update']
if 'prefix' in update:
result['prefix'] = '/'+self._decodeXpath(update['prefix'])
if 'timestamp' in update:
result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat()
if 'update' in update:
result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val']) for u in update['update']}
yield result
else:
# Ignore: Invalid message format
pass
# -----------------------------------------------------------------------
@ensure_connect
def gnmiCapabilities(self):
"""
Executes a gNMI Capabilities request
Parameters:
None
Returns:
str: gNMI capabilities converted into JSON format
"""
request = gnmi_pb2.CapabilityRequest()
auth = self._login_credentials
try:
response = self._stub.Capabilities(request, metadata=auth)
except grpc.RpcError as e:
raise AnsibleConnectionFailure("%s" % e)
return json_format.MessageToJson(response)
@ensure_connect
def gnmiGet(self, *args, **kwargs):
"""
Executes a gNMI Get request
Encoding that is used for data serialization is automatically determined
based on the remote device capabilities. This gNMI plugin has implemented
suppport for JSON_IETF (preferred) and JSON (fallback).
Parameters:
type (str): Type of data that is requested: ALL, CONFIG, STATE
prefix (str): Path prefix that is added to all paths (XPATH syntax)
paths (list): List of paths (str) to be captured
Returns:
str: GetResponse message converted into JSON format
"""
# Remove all input parameters from kwargs that are not set
input = dict(filter(lambda x: x[1], kwargs.items()))
# Adjust input parameters to match specification for gNMI SetRequest
if 'prefix' in input:
input['prefix'] = self._encodeXpath(input['prefix'])
if 'path' in input:
input['path'] = [self._encodeXpath(path) for path in input['path']]
if 'type' in input:
input['type'] = input['type'].upper()
input['encoding'] = self._encoding_value
request = json_format.ParseDict(input, gnmi_pb2.GetRequest())
auth = self._login_credentials
try:
response = self._stub.Get(request, metadata=auth)
except grpc.RpcError as e:
raise AnsibleConnectionFailure("%s" % e)
output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification'])
return json.dumps(output, indent=4).encode()
@ensure_connect
def gnmiSet(self, *args, **kwargs):
"""
Executes a gNMI Set request
Encoding that is used for data serialization is automatically determined
based on the remote device capabilities. This gNMI plugin has implemented
suppport for JSON_IETF (preferred) and JSON (fallback).
Parameters:
prefix (str): Path prefix that is added to all paths (XPATH syntax)
update (list): Path/Value pairs to be updated
replace (list): Path/Value pairs to be replaced
delete (list): Paths (str) to be deleted
Returns:
str: SetResponse message converted into JSON format
"""
# Remove all input parameters from kwargs that are not set
input = dict(filter(lambda x: x[1], kwargs.items()))
# Backup options are not to be used in gNMI SetRequest
if 'backup' in input:
del input['backup']
if 'backup_options' in input:
del input['backup_options']
# Adjust input parameters to match specification for gNMI SetRequest
if 'prefix' in input:
input['prefix'] = self._encodeXpath(input['prefix'])
if 'delete' in input:
input['delete'] = [self._encodeXpath(entry) for entry in input['delete']]
if 'update' in input:
for entry in input['update']:
entry['path'] = self._encodeXpath(entry['path'])
entry['val'] = self._encodeVal(entry['val'])
if 'replace' in input:
for entry in input['replace']:
entry['path'] = self._encodeXpath(entry['path'])
entry['val'] = self._encodeVal(entry['val'])
request = json_format.ParseDict(input, gnmi_pb2.SetRequest())
auth = self._login_credentials
try:
response = self._stub.Set(request, metadata=auth)
except grpc.RpcError as e:
raise AnsibleConnectionFailure("%s" % e)
output = json_format.MessageToDict(response)
output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat()
if 'prefix' in output:
output['prefix'] = self._decodeXpath(output['prefix'])
for item in output['response']:
item['path'] = self._decodeXpath(item['path'])
return json.dumps(output, indent=4).encode()
@ensure_connect
def gnmiSubscribe(self, *args, **kwargs):
"""
Executes a gNMI Subscribe request
Encoding that is used for data serialization is automatically determined
based on the remote device capabilities. This gNMI plugin has implemented
suppport for JSON_IETF (preferred) and JSON (fallback).
Parameters:
prefix (str): Path prefix that is added to all paths (XPATH syntax)
mode (str): Mode of subscription (STREAM, ONCE)
subscription (list of dict): Subscription specification (path, interval, submode)
duration (int): timeout, to stop receiving
qos (int): DSCP marking that is used
updates_only (bool): Send only updates to initial state
allow_aggregation (bool): Aggregate elements marked as eligible for aggregation
Returns:
str: Updates received converted into JSON format
"""
# Remove all input parameters from kwargs that are not set
input = dict(filter(lambda x: x[1], kwargs.items()))
# Adjust input parameters to match specification for gNMI SubscribeRequest
if 'mode' in input:
input['mode'] = input['mode'].upper()
input['encoding'] = self._encoding_value
if 'prefix' in input:
input['prefix'] = self._encodeXpath(input['prefix'])
if 'subscription' in input:
for item in input['subscription']:
item['path'] = self._encodeXpath(item['path'])
# Extract duration from input attributes
if 'duration' in input:
duration = input['duration']
del input['duration']
else:
duration = 20
request = json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest())
auth = self._login_credentials
try:
output = []
responses = self._stub.Subscribe(iter([request]), duration, metadata=auth)
if input['mode'] == 'ONCE':
responses = [json_format.MessageToDict(response) for response in responses]
output = self._mergeToSingleDict(responses)
else:
for update in self._simplifyUpdates(responses):
output.append(update)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED:
if input['mode'] == 'ONCE':
raise AnsibleConnectionFailure("gNMI ONCE Subscription timed out")
else:
# RPC timed out, which is okay
pass
else:
raise AnsibleConnectionFailure("%s" % e)
return json.dumps(output, indent=4).encode()
| 37.74399
| 124
| 0.583384
| 3,438
| 31,403
| 5.220477
| 0.172193
| 0.015322
| 0.0117
| 0.008525
| 0.32973
| 0.25624
| 0.220303
| 0.193782
| 0.180187
| 0.174838
| 0
| 0.003383
| 0.331656
| 31,403
| 831
| 125
| 37.78941
| 0.851772
| 0.183008
| 0
| 0.326316
| 0
| 0.003509
| 0.365895
| 0.037773
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0.019298
| 0.02807
| 0
| 0.089474
| 0.001754
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
99050763178e67f3f1f7faee3c71dfb0a78b6af1
| 4,521
|
py
|
Python
|
experiments/delaney/plot.py
|
pfnet-research/bayesgrad
|
5db613391777b20b7a367c274804f0b736991b0a
|
[
"MIT"
] | 57
|
2018-06-30T01:47:19.000Z
|
2022-03-03T17:21:42.000Z
|
experiments/delaney/plot.py
|
pfnet-research/bayesgrad
|
5db613391777b20b7a367c274804f0b736991b0a
|
[
"MIT"
] | null | null | null |
experiments/delaney/plot.py
|
pfnet-research/bayesgrad
|
5db613391777b20b7a367c274804f0b736991b0a
|
[
"MIT"
] | 8
|
2018-07-07T06:18:40.000Z
|
2021-02-23T21:58:45.000Z
|
import argparse
import numpy as np
import os
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from saliency.visualizer.smiles_visualizer import SmilesVisualizer
def visualize(dir_path):
parent_dir = os.path.dirname(dir_path)
saliency_vanilla = np.load(os.path.join(dir_path, "saliency_vanilla.npy"))
saliency_smooth = np.load(os.path.join(dir_path, "saliency_smooth.npy"))
saliency_bayes = np.load(os.path.join(dir_path, "saliency_bayes.npy"))
visualizer = SmilesVisualizer()
os.makedirs(os.path.join(parent_dir, "result_vanilla"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_smooth"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_bayes"), exist_ok=True)
test_idx = np.load(os.path.join(dir_path, "test_idx.npy"))
answer = np.load(os.path.join(dir_path, "answer.npy"))
output = np.load(os.path.join(dir_path, "output.npy"))
smiles_all = np.load(os.path.join(parent_dir, "smiles.npy"))
def calc_range(saliency):
vmax = float('-inf')
vmin = float('inf')
for v in saliency:
vmax = max(vmax, np.max(v))
vmin = min(vmin, np.min(v))
return vmin, vmax
v_range_vanilla = calc_range(saliency_vanilla)
v_range_smooth = calc_range(saliency_smooth)
v_range_bayes = calc_range(saliency_bayes)
def get_scaler(v_range):
def scaler(saliency_):
saliency = np.copy(saliency_)
minv, maxv = v_range
if maxv == minv:
saliency = np.zeros_like(saliency)
else:
pos = saliency >= 0.0
saliency[pos] = saliency[pos]/maxv
nega = saliency < 0.0
saliency[nega] = saliency[nega]/(np.abs(minv))
return saliency
return scaler
scaler_vanilla = get_scaler(v_range_vanilla)
scaler_smooth = get_scaler(v_range_smooth)
scaler_bayes = get_scaler(v_range_bayes)
def color(x):
if x > 0:
# Red for positive value
return 1., 1. - x, 1. - x
else:
# Blue for negative value
x *= -1
return 1. - x, 1. - x, 1.
for i, id in enumerate(test_idx):
smiles = smiles_all[id]
out = output[i]
ans = answer[i]
# legend = "t:{}, p:{}".format(ans, out)
legend = ''
ext = '.png' # '.svg'
# visualizer.visualize(
# saliency_vanilla[id], smiles, save_filepath=os.path.join(parent_dir, "result_vanilla", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_vanilla, color_fn=color)
# visualizer.visualize(
# saliency_smooth[id], smiles, save_filepath=os.path.join(parent_dir, "result_smooth", str(id) + ext),
# visualize_ratio=1.0, legend=legend, scaler=scaler_smooth, color_fn=color)
visualizer.visualize(
saliency_bayes[id], smiles, save_filepath=os.path.join(parent_dir, "result_bayes", str(id) + ext),
visualize_ratio=1.0, legend=legend, scaler=scaler_bayes, color_fn=color)
def plot_result(prediction, answer, save_filepath='result.png'):
plt.scatter(prediction, answer, marker='.')
plt.plot([-100, 100], [-100, 100], c='r')
max_v = max(np.max(prediction), np.max(answer))
min_v = min(np.min(prediction), np.min(answer))
plt.xlim([min_v-0.1, max_v+0.1])
plt.xlabel("prediction")
plt.ylim([min_v-0.1, max_v+0.1])
plt.ylabel("ground truth")
plt.savefig(save_filepath)
plt.close()
def main():
parser = argparse.ArgumentParser(
description='Regression with own dataset.')
parser.add_argument('--dirpath', '-d', type=str, default='./results/M_30_3_32_32')
args = parser.parse_args()
path = args.dirpath
n_split = 5
output = []
answer = []
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
output.append(np.load(os.path.join(path, suffix, "output.npy")))
answer.append(np.load(os.path.join(path, suffix, "answer.npy")))
output = np.concatenate(output)
answer = np.concatenate(answer)
plot_result(output, answer, save_filepath=os.path.join(path, "result.png"))
for i in range(n_split):
suffix = str(i) + "-" + str(n_split)
print(suffix)
visualize(os.path.join(path, suffix))
if __name__ == '__main__':
main()
| 35.320313
| 116
| 0.628622
| 624
| 4,521
| 4.366987
| 0.216346
| 0.04844
| 0.062385
| 0.039633
| 0.352294
| 0.327706
| 0.299083
| 0.26055
| 0.173578
| 0.162569
| 0
| 0.013241
| 0.231586
| 4,521
| 127
| 117
| 35.598425
| 0.771157
| 0.111701
| 0
| 0.0625
| 0
| 0
| 0.074675
| 0.005495
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072917
| false
| 0
| 0.072917
| 0
| 0.197917
| 0.010417
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
990961ddde648d8a6e8bdae1002af6b0a3fe992c
| 1,639
|
py
|
Python
|
gpytorch/lazy/chol_lazy_tensor.py
|
harvineet/gpytorch
|
8aa8f1a4298ef61cfea9c4d11c75576a84ffcc3e
|
[
"MIT"
] | null | null | null |
gpytorch/lazy/chol_lazy_tensor.py
|
harvineet/gpytorch
|
8aa8f1a4298ef61cfea9c4d11c75576a84ffcc3e
|
[
"MIT"
] | null | null | null |
gpytorch/lazy/chol_lazy_tensor.py
|
harvineet/gpytorch
|
8aa8f1a4298ef61cfea9c4d11c75576a84ffcc3e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import torch
from .lazy_tensor import LazyTensor
from .root_lazy_tensor import RootLazyTensor
from .. import settings
class CholLazyTensor(RootLazyTensor):
def __init__(self, chol):
if isinstance(chol, LazyTensor): # Probably is an instance of NonLazyTensor
chol = chol.evaluate()
# Check that we have a lower triangular matrix
if settings.debug.on():
mask = torch.ones(chol.shape[-2:], dtype=chol.dtype, device=chol.device).triu_(1)
if torch.max(chol.mul(mask)).item() > 1e-3 and torch.equal(chol, chol):
raise RuntimeError("CholLazyVaraiable should take a lower-triangular matrix in the constructor.")
# Run super constructor
super(CholLazyTensor, self).__init__(chol)
@property
def _chol(self):
if not hasattr(self, "_chol_memo"):
self._chol_memo = self.root.evaluate()
return self._chol_memo
@property
def _chol_diag(self):
if not hasattr(self, "_chol_diag_memo"):
self._chol_diag_memo = self._chol.diagonal(dim1=-2, dim2=-1).clone()
return self._chol_diag_memo
def inv_quad_logdet(self, inv_quad_rhs=None, logdet=False, reduce_inv_quad=True):
inv_quad_term = None
logdet_term = None
if inv_quad_rhs is not None:
inv_quad_term, _ = super(CholLazyTensor, self).inv_quad_logdet(
inv_quad_rhs, logdet=False, reduce_inv_quad=reduce_inv_quad
)
if logdet:
logdet_term = self._chol_diag.pow(2).log().sum(-1)
return inv_quad_term, logdet_term
| 33.44898
| 113
| 0.654667
| 216
| 1,639
| 4.694444
| 0.393519
| 0.075937
| 0.047337
| 0.047337
| 0.126233
| 0.078895
| 0
| 0
| 0
| 0
| 0
| 0.008936
| 0.248932
| 1,639
| 48
| 114
| 34.145833
| 0.814785
| 0.078707
| 0
| 0.060606
| 0
| 0
| 0.066401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.121212
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9909642cf635ba7b413ffb8f974cd5801c613d72
| 5,765
|
py
|
Python
|
pirates/audio/AmbientManagerBase.py
|
ksmit799/POTCO-PS
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 8
|
2017-01-24T04:33:29.000Z
|
2020-11-01T08:36:24.000Z
|
pirates/audio/AmbientManagerBase.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 1
|
2017-03-02T18:05:17.000Z
|
2017-03-14T06:47:10.000Z
|
pirates/audio/AmbientManagerBase.py
|
ksmit799/Pirates-Online-Remake
|
520d38935ae8df4b452c733a82c94dddac01e275
|
[
"Apache-2.0"
] | 11
|
2017-03-02T18:46:07.000Z
|
2020-11-01T08:36:26.000Z
|
# File: A (Python 2.4)
from pandac.PandaModules import AudioSound
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import LerpFunc, Sequence
from direct.showbase.DirectObject import DirectObject
class AmbientSound:
notify = DirectNotifyGlobal.directNotify.newCategory('AmbientSound')
def __init__(self, path, masterAmbientVolume, loop = True, isMusic = False):
self.isMusic = isMusic
if self.isMusic:
self.sfx = loader.loadMusic(path)
else:
self.sfx = loader.loadSfx(path)
self.path = path
self.loop = loop
self.setLoop(loop)
self.setVolume(0)
self.masterAmbientVolume = masterAmbientVolume
self.reloadAttempt = 0
self.curPriority = 0
self.duration = 0
self.finalVolume = 0
self.startVolume = 0
self.activeInterval = None
def unload(self):
if self.activeInterval:
self.activeInterval.finish()
del self.activeInterval
self.sfx.stop()
del self.sfx
def play(self):
self.sfx.play()
def getVolume(self):
return self.sfx.getVolume()
def setVolume(self, vol):
self.sfx.setVolume(vol)
def getLoop(self):
return self.sfx.getLoop()
def setLoop(self, loop):
self.sfx.setLoop(loop)
def set3dAttributes(self, *args):
self.sfx.set3dAttributes(*args)
def requestChangeVolume(self, duration, finalVolume, priority):
if priority < self.curPriority:
return None
self.curPriority = priority
if not self.sfx.getActive():
if self.reloadAttempt < 1:
self.reloadAttempt += 1
if self.isMusic:
self.sfx = loader.loadMusic(self.path)
else:
self.sfx = loader.loadSfx(self.path)
if self.sfx:
self.sfx.setLoop(self.loop)
self.duration = duration
self.startVolume = self.getVolume()
self.finalVolume = finalVolume
if self.activeInterval:
self.activeInterval.pause()
del self.activeInterval
self.activeInterval = Sequence(LerpFunc(self.changeVolumeTask, fromData = self.startVolume, toData = self.finalVolume, duration = self.duration))
self.activeInterval.start()
def changeMasterAmbientVolume(self, newMasterAmbientVolume):
if not self.masterAmbientVolume == newMasterAmbientVolume:
self.masterAmbientVolume = newMasterAmbientVolume
if self.activeInterval and self.activeInterval.isPlaying():
pass
elif self.sfx.status() == 2:
newVol = float(self.finalVolume) * self.masterAmbientVolume
self.sfx.setVolume(newVol)
def changeVolumeTask(self, t):
curVolume = t * self.masterAmbientVolume
self.sfx.setVolume(curVolume)
if not hasattr(self, 'reportCounter'):
self.reportCounter = 0
self.reportCounter += 1
if self.reportCounter % 10 == 0:
pass
1
if curVolume > 0 and self.sfx.status() == 1:
self.sfx.play()
if curVolume <= 0 and self.sfx.status() == 2:
self.sfx.stop()
self.curPriority = 0
class AmbientManagerBase(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('AmbientManagerBase')
def __init__(self):
self.ambientDict = { }
self.masterAmbientVolume = 1.0
def load(self, name, path, looping = True, isMusic = False):
retval = False
if self.ambientDict.has_key(name):
if self.ambientDict[name].path == path:
self.notify.warning('ambient name=%s path=%s already loaded' % (name, path))
else:
self.notify.warning('ambient name %s is already bound to %s' % self.ambientDict[name].path)
else:
newAmbient = AmbientSound(path, self.masterAmbientVolume, looping, isMusic)
self.ambientDict[name] = newAmbient
def unload(self, name):
if self.ambientDict.has_key(name):
self.ambientDict[name].unload()
del self.ambientDict[name]
else:
self.notify.warning('music: %s not in ambientDict' % name)
def requestFadeIn(self, name, duration = 5, finalVolume = 1.0, priority = 0):
self.requestChangeVolume(name, duration, finalVolume, priority)
def requestFadeOut(self, name, duration = 5, finalVolume = 0.0, priority = 0):
self.requestChangeVolume(name, duration, finalVolume, priority)
def requestChangeVolume(self, name, duration, finalVolume, priority = 0):
if self.ambientDict.has_key(name):
self.ambientDict[name].requestChangeVolume(duration, finalVolume, priority)
def delete(self):
for name in self.ambientDict.keys():
self.ambientDict[name].unload()
self.ambientDict = { }
def silence(self):
for name in self.ambientDict.keys():
self.ambientDict[name].requestChangeVolume(0.0, 0.0, priority = 1)
def changeMasterAmbientVolume(self, newMasterAmbientVolume):
if not newMasterAmbientVolume == self.masterAmbientVolume:
self.masterAmbientVolume = newMasterAmbientVolume
for name in self.ambientDict.keys():
self.ambientDict[name].changeMasterAmbientVolume(self.masterAmbientVolume)
| 30.828877
| 153
| 0.601214
| 555
| 5,765
| 6.225225
| 0.196396
| 0.044573
| 0.049493
| 0.031259
| 0.280753
| 0.219971
| 0.145007
| 0.108538
| 0.108538
| 0.068307
| 0
| 0.010033
| 0.308413
| 5,765
| 186
| 154
| 30.994624
| 0.856534
| 0.003469
| 0
| 0.282258
| 0
| 0
| 0.025605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0.016129
| 0.032258
| 0.016129
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
99098c029853719101bfb8070fc7fe3e4ddbd2c3
| 6,801
|
py
|
Python
|
hexrd/ui/matrix_editor.py
|
HEXRD/hexrdgui
|
d92915463f237e0521b5830655ae73bc5bcd9f80
|
[
"BSD-3-Clause"
] | 13
|
2020-02-18T00:23:02.000Z
|
2022-02-24T20:04:36.000Z
|
hexrd/ui/matrix_editor.py
|
HEXRD/hexrdgui
|
d92915463f237e0521b5830655ae73bc5bcd9f80
|
[
"BSD-3-Clause"
] | 656
|
2020-01-14T02:33:40.000Z
|
2022-03-26T15:31:17.000Z
|
hexrd/ui/matrix_editor.py
|
HEXRD/hexrdgui
|
d92915463f237e0521b5830655ae73bc5bcd9f80
|
[
"BSD-3-Clause"
] | 6
|
2020-01-17T15:02:53.000Z
|
2020-11-01T22:02:48.000Z
|
import numpy as np
from PySide2.QtCore import QSignalBlocker, Signal
from PySide2.QtWidgets import QGridLayout, QWidget
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
DEFAULT_ENABLED_STYLE_SHEET = 'background-color: white'
DEFAULT_DISABLED_STYLE_SHEET = 'background-color: #F0F0F0'
INVALID_MATRIX_STYLE_SHEET = 'background-color: red'
class MatrixEditor(QWidget):
data_modified = Signal()
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data
# If this is not None, then only the elements present in the
# list (as (i, j) items) will be enabled.
self._enabled_elements = None
# If this is set, it will be called every time the data updates
# to apply equality constraints.
self._apply_constraints_func = None
# Whether or not the matrix is currently invalid
self.matrix_invalid = False
# Reason the matrix is currently invalid
self.matrix_invalid_reason = ''
self.setLayout(QGridLayout())
self.add_spin_boxes()
self.update_gui()
def add_spin_boxes(self):
layout = self.layout()
for i in range(self.rows):
for j in range(self.cols):
sb = self.create_spin_box()
layout.addWidget(sb, i, j)
def create_spin_box(self):
sb = ScientificDoubleSpinBox()
sb.setKeyboardTracking(False)
sb.valueChanged.connect(self.element_modified)
return sb
def element_modified(self):
self.update_data()
@property
def data(self):
return self._data
@data.setter
def data(self, v):
if not np.array_equal(self._data, v):
if self._data.shape != v.shape:
msg = (f'Shape {v.shape} does not match original shape '
f'{self._data.shape}')
raise AttributeError(msg)
self._data = v
self.reset_disabled_values()
self.update_gui()
@property
def rows(self):
return self.data.shape[0]
@property
def cols(self):
return self.data.shape[1]
def update_data(self):
self.data[:] = self.gui_data
self.apply_constraints()
self.data_modified.emit()
def update_gui(self):
self.gui_data = self.data
@property
def gui_data(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [[self.gui_value(i, j) for j in col_range] for i in row_range]
@gui_data.setter
def gui_data(self, v):
blockers = [QSignalBlocker(w) for w in self.all_widgets] # noqa: F841
for i in range(self.rows):
for j in range(self.cols):
self.set_gui_value(i, j, v[i][j])
@property
def all_widgets(self):
row_range = range(self.rows)
col_range = range(self.cols)
return [self.widget(i, j) for j in col_range for i in row_range]
@property
def enabled_widgets(self):
widgets = []
for i in range(self.rows):
for j in range(self.cols):
if (i, j) in self.enabled_elements:
widgets.append(self.widget(i, j))
return widgets
def widget(self, row, col):
return self.layout().itemAtPosition(row, col).widget()
def gui_value(self, row, col):
return self.widget(row, col).value()
def set_gui_value(self, row, col, val):
self.widget(row, col).setValue(val)
def set_matrix_invalid(self, s):
self.matrix_invalid = True
self.matrix_invalid_reason = s
self.update_tooltips()
self.update_enable_states()
def set_matrix_valid(self):
self.matrix_invalid = False
self.matrix_invalid_reason = ''
self.update_tooltips()
self.update_enable_states()
def update_tooltips(self):
if self.matrix_invalid:
tooltip = self.matrix_invalid_reason
else:
tooltip = ''
for w in self.enabled_widgets:
w.setToolTip(tooltip)
def update_enable_states(self):
enable_all = self.enabled_elements is None
for i in range(self.rows):
for j in range(self.cols):
w = self.widget(i, j)
enable = enable_all or (i, j) in self.enabled_elements
w.setEnabled(enable)
enabled_str = 'enabled' if enable else 'disabled'
style_sheet = getattr(self, f'{enabled_str}_style_sheet')
w.setStyleSheet(style_sheet)
def reset_disabled_values(self):
# Resets all disabled values to zero, then applies constraints
for i in range(self.rows):
for j in range(self.cols):
if not self.widget(i, j).isEnabled():
self.data[i, j] = 0.0
self.apply_constraints()
self.update_gui()
@property
def enabled_style_sheet(self):
if self.matrix_invalid:
return INVALID_MATRIX_STYLE_SHEET
return DEFAULT_ENABLED_STYLE_SHEET
@property
def disabled_style_sheet(self):
return DEFAULT_DISABLED_STYLE_SHEET
@property
def enabled_elements(self):
return self._enabled_elements
@enabled_elements.setter
def enabled_elements(self, v):
if self._enabled_elements != v:
self._enabled_elements = v
self.update_enable_states()
self.reset_disabled_values()
@property
def apply_constraints_func(self):
return self._apply_constraints_func
@apply_constraints_func.setter
def apply_constraints_func(self, v):
if self._apply_constraints_func != v:
self._apply_constraints_func = v
self.apply_constraints()
def apply_constraints(self):
if (func := self.apply_constraints_func) is None:
return
func(self.data)
self.update_gui()
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout
if len(sys.argv) < 2:
sys.exit('Usage: <script> <matrix_size>')
rows, cols = [int(x) for x in sys.argv[1].split('x')]
data = np.ones((rows, cols))
app = QApplication(sys.argv)
dialog = QDialog()
layout = QVBoxLayout()
dialog.setLayout(layout)
editor = MatrixEditor(data)
layout.addWidget(editor)
# def constraints(x):
# x[2][2] = x[1][1]
# editor.enabled_elements = [(1, 1), (3, 4)]
# editor.apply_constraints_func = constraints
def on_data_modified():
print(f'Data modified: {editor.data}')
editor.data_modified.connect(on_data_modified)
dialog.finished.connect(app.quit)
dialog.show()
app.exec_()
| 27.987654
| 78
| 0.617115
| 859
| 6,801
| 4.67986
| 0.194412
| 0.027861
| 0.027363
| 0.029851
| 0.247015
| 0.163184
| 0.151741
| 0.151741
| 0.091045
| 0.091045
| 0
| 0.004747
| 0.287605
| 6,801
| 242
| 79
| 28.103306
| 0.824974
| 0.070284
| 0
| 0.258824
| 0
| 0
| 0.037876
| 0.003962
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.035294
| 0.047059
| 0.311765
| 0.005882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
990b3873866758deed49ecf19b9f6e265d5bd2a4
| 3,616
|
py
|
Python
|
checkerpy/types/all/typedtuple.py
|
yedivanseven/CheckerPy
|
04612086d25fecdd0b20ca0a050db8620c437b0e
|
[
"MIT"
] | 1
|
2018-01-12T19:20:51.000Z
|
2018-01-12T19:20:51.000Z
|
checkerpy/types/all/typedtuple.py
|
yedivanseven/CheckerPy
|
04612086d25fecdd0b20ca0a050db8620c437b0e
|
[
"MIT"
] | null | null | null |
checkerpy/types/all/typedtuple.py
|
yedivanseven/CheckerPy
|
04612086d25fecdd0b20ca0a050db8620c437b0e
|
[
"MIT"
] | null | null | null |
from typing import Tuple, Union, Any, Sequence
from collections import deque, defaultdict, OrderedDict
from ...validators.one import JustLen
from ...functional.mixins import CompositionClassMixin
from ..one import Just
dict_keys = type({}.keys())
odict_keys = type(OrderedDict({}).keys())
dict_values = type({}.values())
odict_values = type(OrderedDict({}).values())
dict_items = type({}.items())
odict_items = type(OrderedDict({}).items())
NAMED_TYPES = (frozenset, slice, range,
deque, defaultdict, OrderedDict,
dict_keys, dict_values, dict_items,
odict_keys, odict_values, odict_items)
TypesT = Union[type, Sequence[type]]
class TypedTuple(CompositionClassMixin):
"""Checks for different type(s) of each element in a defined-length tuple.
Parameters
----------
value : tuple
The tuple to check the length and element types of.
name : str, optional
The name of the tuple to check the length and the element type(s) of.
Defaults to None.
types : tuple(type), tuple(tuple(type))
Tuple of the length to check for with either one type for each element
of `value` or a tuple of types for each element of `value`. Use the
ellipsis literal ... to skip type checking of the tuple element at
that position.
Returns
-------
tuple
The tuple passed in.
Methods
-------
o(callable) : CompositionOf
Daisy-chains the tuple length and type checker to another `callable`,
returning the functional composition of both. The argument `types` is
passed through to the `TypedTuple` checker when when calling the
composition.
Raises
------
WrongTypeError
If `value` is not a tuple or if any of its elements do not have (one
of) the permitted type(s).
LenError
If the tuple passed in does not have the same length as `types` or
if the type specification does not have a meaningful length.
TypeError
If `types` is not a tuple or any of its elements are not of type type.
See Also
--------
All, JustLen, CompositionOf
"""
def __new__(cls, value: tuple, name=None, *, types=(), **kwargs) -> tuple:
cls.__name = str(name) if name is not None else ''
cls.__string = cls.__name or str(value)
types, length = cls.__valid(types)
value = JustLen.JustTuple(value, name=name, length=length)
for index, element in enumerate(value):
if not cls.__is_or_contains_ellipsis(types[index]):
element_name = f'element {index} in tuple {cls.__string}'
_ = Just(types[index])(element, name=element_name)
return value
@classmethod
def __valid(cls, types: Sequence[TypesT]) -> Tuple[TypesT, int]:
if type(types) not in (tuple, list, deque):
message = cls.__wrong_type_message_for(types)
raise TypeError(message)
return types, len(types)
@staticmethod
def __wrong_type_message_for(types: Any) -> str:
type_name = type(types).__name__
if isinstance(types, NAMED_TYPES):
of_type = type_name
else:
of_type = f'{type_name} like {types}'
return f'Type of types argument must be tuple, not {of_type}!'
@staticmethod
def __is_or_contains_ellipsis(types: TypesT) -> bool:
is_ellipsis = types is ...
try:
contains_ellipsis = ... in types
except TypeError:
contains_ellipsis = False
return is_ellipsis or contains_ellipsis
| 35.45098
| 78
| 0.641316
| 465
| 3,616
| 4.84086
| 0.283871
| 0.021324
| 0.023989
| 0.013327
| 0.097734
| 0.023989
| 0.023989
| 0
| 0
| 0
| 0
| 0
| 0.264657
| 3,616
| 101
| 79
| 35.80198
| 0.846559
| 0.358407
| 0
| 0.040816
| 0
| 0
| 0.053142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.102041
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
54d0c963fcd5c7b6f9c7de58ed61e6d2623f1f5a
| 3,501
|
py
|
Python
|
cloudshell/cli/configurator.py
|
QualiSystems/cloudshell-cli
|
9a38ff37e91e7798511e860603f5a8a79b782472
|
[
"Apache-2.0"
] | 4
|
2017-01-31T14:05:19.000Z
|
2019-04-10T16:35:44.000Z
|
cloudshell/cli/configurator.py
|
QualiSystems/cloudshell-cli
|
9a38ff37e91e7798511e860603f5a8a79b782472
|
[
"Apache-2.0"
] | 89
|
2016-05-25T14:17:38.000Z
|
2022-03-17T13:09:59.000Z
|
cloudshell/cli/configurator.py
|
QualiSystems/cloudshell-cli
|
9a38ff37e91e7798511e860603f5a8a79b782472
|
[
"Apache-2.0"
] | 6
|
2016-07-21T12:24:10.000Z
|
2022-02-21T06:33:18.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from cloudshell.cli.factory.session_factory import (
CloudInfoAccessKeySessionFactory,
GenericSessionFactory,
SessionFactory,
)
from cloudshell.cli.service.cli import CLI
from cloudshell.cli.session.ssh_session import SSHSession
from cloudshell.cli.session.telnet_session import TelnetSession
ABC = ABCMeta("ABC", (object,), {"__slots__": ()})
if sys.version_info >= (3, 0):
from functools import lru_cache
else:
from functools32 import lru_cache
class CLIServiceConfigurator(object):
REGISTERED_SESSIONS = (CloudInfoAccessKeySessionFactory(SSHSession), TelnetSession)
"""Using factories instead of """
def __init__(
self,
resource_config,
logger,
cli=None,
registered_sessions=None,
reservation_context=None,
):
"""Initialize CLI service configurator.
:param cloudshell.shell.standards.resource_config_generic_models.GenericCLIConfig resource_config: # noqa: E501
:param logging.Logger logger:
:param cloudshell.cli.service.cli.CLI cli:
:param registered_sessions: Session types and order
:param cloudshell.shell.core.driver_context.ReservationContextDetails reservation_context:
"""
self._cli = cli or CLI()
self._resource_config = resource_config
self._logger = logger
self._registered_sessions = registered_sessions or self.REGISTERED_SESSIONS
self._reservation_context = reservation_context
@property
def _cli_type(self):
"""Connection type property [ssh|telnet|console|auto]."""
return self._resource_config.cli_connection_type
@property
@lru_cache()
def _session_dict(self):
session_dict = defaultdict(list)
for sess in self._registered_sessions:
session_dict[sess.SESSION_TYPE.lower()].append(sess)
return session_dict
def initialize_session(self, session):
if not isinstance(session, SessionFactory):
session = GenericSessionFactory(session)
return session.init_session(
self._resource_config, self._logger, self._reservation_context
)
def _defined_sessions(self):
return [
self.initialize_session(sess)
for sess in self._session_dict.get(
self._cli_type.lower(), self._registered_sessions
)
]
def get_cli_service(self, command_mode):
"""Use cli.get_session to open CLI connection and switch into required mode.
:param CommandMode command_mode: operation mode, can be
default_mode/enable_mode/config_mode/etc.
:return: created session in provided mode
:rtype: cloudshell.cli.service.session_pool_context_manager.SessionPoolContextManager # noqa: E501
"""
return self._cli.get_session(
self._defined_sessions(), command_mode, self._logger
)
class AbstractModeConfigurator(ABC, CLIServiceConfigurator):
"""Used by shells to run enable/config command."""
@property
@abstractmethod
def enable_mode(self):
pass
@property
@abstractmethod
def config_mode(self):
pass
def enable_mode_service(self):
return self.get_cli_service(self.enable_mode)
def config_mode_service(self):
return self.get_cli_service(self.config_mode)
| 32.119266
| 120
| 0.694087
| 380
| 3,501
| 6.139474
| 0.307895
| 0.061723
| 0.029147
| 0.02186
| 0.036005
| 0.036005
| 0.036005
| 0.036005
| 0.036005
| 0
| 0
| 0.004059
| 0.225935
| 3,501
| 108
| 121
| 32.416667
| 0.856827
| 0.234504
| 0
| 0.112676
| 0
| 0
| 0.004734
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140845
| false
| 0.028169
| 0.126761
| 0.042254
| 0.408451
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|