hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e71ac9c81a289cfab5784c2ca72d59fdcd7d4d0
| 3,300
|
py
|
Python
|
tests/test_css_parsing_tests.py
|
cmulders/styler
|
cffc6b99cc97e6299b75e84fe74e39216bd0109e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_css_parsing_tests.py
|
cmulders/styler
|
cffc6b99cc97e6299b75e84fe74e39216bd0109e
|
[
"Apache-2.0"
] | null | null | null |
tests/test_css_parsing_tests.py
|
cmulders/styler
|
cffc6b99cc97e6299b75e84fe74e39216bd0109e
|
[
"Apache-2.0"
] | null | null | null |
import codecs
import re
from collections import namedtuple
import unittest
from typing import Collection, Iterable, Sequence, Tuple, Type
import io
from pathlib import Path
from styler import decode
import json
import logging
from itertools import islice
logger = logging.getLogger(__name__)
CSS_PARSING_TESTS_DIR = Path(__file__).parent / "css-parsing-tests"
JSONCase = namedtuple("JSONCase", "case, expectation")
def pairs(iterable):
"s -> (s0,s1), (s2,s3), (s4, s5), ..."
return zip(
islice(iterable, 0, None, 2),
islice(iterable, 1, None, 2),
)
class CSSParseTestCaseMeta(type):
"""Metaclass for dynanic test loading"""
@classmethod
def __prepare__(cls, clsname, bases, **kwargs):
namespace = dict()
if not "cases" in kwargs or unittest.TestCase not in bases:
logger.warning(
f"Class `{cls}` should specify a name as intialize argument and must base unittest.TestCase, nothing loaded"
)
return namespace
namespace["cases"] = list(cls.load_cases(kwargs["cases"]))
for idx, case in enumerate(namespace["cases"]):
name, fn = cls.create_test(idx, case)
namespace[name] = fn
return namespace
def __new__(cls, name, bases, namespace, **kwargs):
kwargs.pop("cases") # Already processd this in the __prepare__
return super().__new__(cls, name, bases, namespace, **kwargs)
@classmethod
def load_cases(cls, name) -> Iterable[JSONCase]:
json_path = (CSS_PARSING_TESTS_DIR / name).with_suffix(".json")
assert json_path.exists(), f"JSON cases file does not exists: {json_path}."
with json_path.open("rb") as fd:
raw_cases = json.load(fd)
return map(JSONCase._make, pairs(raw_cases))
@staticmethod
def create_test(idx, case: JSONCase):
def inner(self):
self.run_case(case.case, case.expectation)
if isinstance(case.case, dict) and "comment" in case.case:
case_str = case.case["comment"]
elif isinstance(case.case, dict) and "css_bytes" in case.case:
case_str = case.case["css_bytes"]
else:
case_str = ""
case_str = re.sub(r"[^\w]+", "_", case_str).strip("_").strip()
if case_str:
return f"test_{idx:03}_{case_str}", inner
else:
return f"test_{idx:03}", inner
class StylesheetBytesTestCase(
unittest.TestCase,
metaclass=CSSParseTestCaseMeta,
cases="stylesheet_bytes",
):
def run_case(self, case, expectation: Tuple[Iterable, str]):
css_bytes = str(case["css_bytes"]).encode("latin1")
protocol_encoding = case.get("protocol_encoding")
environment_encoding = case.get("environment_encoding")
expected_ast, expected_encoding = expectation
stream = decode(
io.BytesIO(css_bytes),
protocol_encoding=protocol_encoding,
environment_encoding=environment_encoding,
)
# Encoding matches with expectation
self.assertEqual(
codecs.lookup(expected_encoding).name,
codecs.lookup(stream.encoding).name,
f"Detected encoding {stream.encoding} instead of {expected_encoding}",
)
| 30.841121
| 124
| 0.638788
| 389
| 3,300
| 5.228792
| 0.359897
| 0.043265
| 0.023599
| 0.017699
| 0.094395
| 0.054081
| 0.024582
| 0
| 0
| 0
| 0
| 0.006063
| 0.250303
| 3,300
| 106
| 125
| 31.132075
| 0.816087
| 0.044545
| 0
| 0.075949
| 0
| 0
| 0.147974
| 0.00754
| 0
| 0
| 0
| 0
| 0.025316
| 1
| 0.088608
| false
| 0
| 0.139241
| 0
| 0.341772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e74579632486a6b6e9af658505be492f28cf2a0
| 1,886
|
py
|
Python
|
Callum/Day3/Day3.py
|
JackDanielHarding/advent-of-code-2021
|
5b860e36b4ac1af205c992763167ffef41a81a1b
|
[
"CC0-1.0"
] | null | null | null |
Callum/Day3/Day3.py
|
JackDanielHarding/advent-of-code-2021
|
5b860e36b4ac1af205c992763167ffef41a81a1b
|
[
"CC0-1.0"
] | null | null | null |
Callum/Day3/Day3.py
|
JackDanielHarding/advent-of-code-2021
|
5b860e36b4ac1af205c992763167ffef41a81a1b
|
[
"CC0-1.0"
] | null | null | null |
from collections import Counter
from functools import reduce
with open("./input.txt", "r") as inputFile:
readingsStr = inputFile.read().splitlines()
columnsRange = range(len(readingsStr[0]))
columns = map(lambda columnIndex : map(lambda row : row[columnIndex], readingsStr), columnsRange)
multiModes = map(lambda column: Counter(column).most_common(), columns)
multiModesWithoutCount = map(lambda mm: (mm[0][0], mm[1][0]), multiModes)
rates = reduce(lambda multiModeX, multiModeY: [multiModeX[0] + multiModeY[0], multiModeX[1] + multiModeY[1]], multiModesWithoutCount)
gamma = int(rates[0], 2)
epsilon = int(rates[1], 2)
print(f'Gamma: {gamma}, Epsilon: {epsilon}, Power: {gamma * epsilon}')
# Part 2
oxygenFilteredReadings = readingsStr.copy()
co2FilteredReadings = readingsStr.copy()
for columnIndex in range(len(readingsStr[0])):
oxygenColumns = map(lambda row : row[columnIndex], oxygenFilteredReadings)
oxygenCounter = Counter(oxygenColumns)
oxygenMostCommon = oxygenCounter.most_common()[0]
oxygenMostCommonVal = oxygenMostCommon[0]
if oxygenMostCommon[1] == oxygenCounter.total() / 2:
oxygenMostCommonVal = '1'
oxygenFilteredReadings = list(filter(lambda row : row[columnIndex] == oxygenMostCommonVal, oxygenFilteredReadings))
co2Columns = map(lambda row : row[columnIndex], co2FilteredReadings)
co2Counter = Counter(co2Columns)
co2MostCommon = co2Counter.most_common()
co2LeastCommon = co2MostCommon[len(co2MostCommon)-1]
co2LeastCommonVal = co2LeastCommon[0]
if co2LeastCommon[1] == co2Counter.total() / 2:
co2LeastCommonVal = '0'
co2FilteredReadings = list(filter(lambda row : row[columnIndex] == co2LeastCommonVal, co2FilteredReadings))
oxygen = int(oxygenFilteredReadings[0], 2)
co2 = int(co2FilteredReadings[0], 2)
print(f'Oxygen: {oxygen}, CO2: {co2}, Life Support Rating: {oxygen * co2}')
| 46
| 133
| 0.73701
| 198
| 1,886
| 7.005051
| 0.333333
| 0.038933
| 0.043259
| 0.082913
| 0.103821
| 0.047585
| 0
| 0
| 0
| 0
| 0
| 0.031882
| 0.135207
| 1,886
| 41
| 134
| 46
| 0.818516
| 0.003181
| 0
| 0
| 0
| 0
| 0.073976
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.060606
| 0
| 0.060606
| 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e753ccf2f01c17789c789b78559c01a411800d2
| 2,637
|
py
|
Python
|
shell/shell.py
|
utep-cs-systems-courses/1-shell-EdwinTomy
|
5e15372a49712584bc6a1bf3d8a508eb5328287a
|
[
"BSD-3-Clause"
] | null | null | null |
shell/shell.py
|
utep-cs-systems-courses/1-shell-EdwinTomy
|
5e15372a49712584bc6a1bf3d8a508eb5328287a
|
[
"BSD-3-Clause"
] | null | null | null |
shell/shell.py
|
utep-cs-systems-courses/1-shell-EdwinTomy
|
5e15372a49712584bc6a1bf3d8a508eb5328287a
|
[
"BSD-3-Clause"
] | null | null | null |
import os, sys, re
while True:
path = os.getcwd() + " $"
# User input
os.write(1, path.encode())
args = os.read(0, 1000).decode().split()
# Exit
if args[0] == "exit":
if len(args) > 1:
print("Program terminated with exit code", args[1])
sys.exit(int(args[1]))
print("Program terminated without exit code")
sys.exit(1)
# Change Directory
if args[0] == "cd":
try:
if len(args) < 2:
os.chdir(os.path.expanduser("~"))
else:
os.chdir(args[1])
except FileNotFoundError:
print("File not found!")
pass
continue
# Forking
rc = os.fork()
if rc < 0:
os.write(1, "Fork failure :( !")
sys.exit(1)
# Child process for redirect & piping
elif rc == 0:
# Redirect output
if '>' in args:
i = args.index('>')
os.close(1)
os.open(args[i+1], os.O_CREAT | os.O_WRONLY)
os.set_inheritable(1, True)
child_command = args[:i]
# Redirect output
elif '<' in args:
i = args.index('<')
os.close(1)
os.open(args[i-1], os.O_CREAT | os.O_WRONLY)
os.set_inheritable(1, True)
child_command = args[i:]
# Piping
elif '|' in args:
i = args.index('|')
pipe1 = args[:i]
pipe2 = args[(i + 1):]
pr, pw = os.pipe()
os.set_inheritable(pr, True)
os.set_inheritable(pw, True)
pipe_child = os.fork()
if pipe_child < 0:
sys.exit(1)
if pipe_child == 0:
os.close(1)
os.dup(pw)
os.set_inheritable(1, True)
os.close(pr)
os.close(pw)
child_command = pipe1
else:
os.close(0)
os.dup(pr)
os.set_inheritable(0, True)
os.close(pr)
os.close(pw)
child_command = pipe2
# Command not found
else:
print("Command not found")
sys.exit(1)
# Try each directory in path
for directory in re.split(":", os.environ['PATH']):
program = "%s/%s" % (directory, args[0])
try:
os.execve(program, child_command, os.environ)
except FileNotFoundError:
pass
sys.exit(1)
# Check for background processes
else:
childPidCode = os.wait()
| 24.877358
| 63
| 0.454683
| 304
| 2,637
| 3.884868
| 0.266447
| 0.038103
| 0.081287
| 0.027942
| 0.292125
| 0.232007
| 0.211685
| 0.211685
| 0.211685
| 0.154107
| 0
| 0.025624
| 0.422829
| 2,637
| 105
| 64
| 25.114286
| 0.750329
| 0.072431
| 0
| 0.337838
| 0
| 0
| 0.058751
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.027027
| 0.013514
| 0
| 0.013514
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e78a464d85758a6410cf9ef2916db721432642c
| 4,860
|
py
|
Python
|
radar_label_convert_kitti_format.py
|
wzan0001/Astyx-radar-dataset-convert-to-kitti-format
|
f0e6bf04fc9cd7b49c96f09803598a2c8561bf5a
|
[
"MIT"
] | 12
|
2019-11-04T08:56:41.000Z
|
2022-03-29T05:47:14.000Z
|
radar_label_convert_kitti_format.py
|
paland3/Astyx-radar-dataset-convert-to-kitti-format
|
f0e6bf04fc9cd7b49c96f09803598a2c8561bf5a
|
[
"MIT"
] | 3
|
2019-12-04T18:19:06.000Z
|
2020-10-08T12:34:21.000Z
|
radar_label_convert_kitti_format.py
|
paland3/Astyx-radar-dataset-convert-to-kitti-format
|
f0e6bf04fc9cd7b49c96f09803598a2c8561bf5a
|
[
"MIT"
] | 3
|
2019-12-04T18:06:37.000Z
|
2020-10-01T09:25:10.000Z
|
#####################################################
##将radar 数据转为kitti格式 ##
#####################################################
import json
import math
import os
import numpy as np
import utils
def rotMat2quatern(R):
# transform the rotation matrix into quatern
q = np.zeros(4)
K = np.zeros([4, 4])
K[0, 0] = 1 / 3 * (R[0, 0] - R[1, 1] - R[2, 2])
K[0, 1] = 1 / 3 * (R[1, 0] + R[0, 1])
K[0, 2] = 1 / 3 * (R[2, 0] + R[0, 2])
K[0, 3] = 1 / 3 * (R[1, 2] - R[2, 1])
K[1, 0] = 1 / 3 * (R[1, 0] + R[0, 1])
K[1, 1] = 1 / 3 * (R[1, 1] - R[0, 0] - R[2, 2])
K[1, 2] = 1 / 3 * (R[2, 1] + R[1, 2])
K[1, 3] = 1 / 3 * (R[2, 0] - R[0, 2])
K[2, 0] = 1 / 3 * (R[2, 0] + R[0, 2])
K[2, 1] = 1 / 3 * (R[2, 1] + R[1, 2])
K[2, 2] = 1 / 3 * (R[2, 2] - R[0, 0] - R[1, 1])
K[2, 3] = 1 / 3 * (R[0, 1] - R[1, 0])
K[3, 0] = 1 / 3 * (R[1, 2] - R[2, 1])
K[3, 1] = 1 / 3 * (R[2, 0] - R[0, 2])
K[3, 2] = 1 / 3 * (R[0, 1] - R[1, 0])
K[3, 3] = 1 / 3 * (R[0, 0] + R[1, 1] + R[2, 2])
D, V = np.linalg.eig(K)
pp = 0
for i in range(1, 4):
if(D[i] > D[pp]):
pp = i
q = V[:, pp]
q = np.array([q[3], q[0], q[1], q[2]])
#print(q)
return q
def qaut_to_angle(quat):
x=quat[0]
y=quat[1]
z=quat[2]
w=quat[3]
rol = math.atan2(2*(w*x+y*z),1-2*(x*x+y*y))#the rol is the yaw angle!
#pith = math.asin(2*(w*y-z*z))
#yaw = math.atan2(2*(w*z+x*y),1-2*(z*z+y*y))
return rol
def quaternionToRotationMatrix(quat):
q = quat.copy()
q=np.array(q)
n = np.dot(q, q)
if n < np.finfo(q.dtype).eps:
rot_matrix=np.identity(4)
return rot_matrix
q = q * np.sqrt(2.0 / n)
q = np.outer(q, q)
rot_matrix = np.array(
[[1.0 - q[2, 2] - q[3, 3], q[1, 2] + q[3, 0], q[1, 3] - q[2, 0]],
[q[1, 2] - q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] + q[1, 0]],
[q[1, 3] + q[2, 0], q[2, 3] - q[1, 0], 1.0 - q[1, 1] - q[2, 2]]],
dtype=q.dtype)
return rot_matrix
def radarcoordToCameracoordYaw(quat,frame_calib):
radar_quat_to_mat=quaternionToRotationMatrix(quat)
radar_to_camera_mat=np.array(frame_calib.tr_velodyne_to_cam)
radar_to_camera_mat=radar_to_camera_mat[:,0:3]
rot_mat=np.dot(radar_to_camera_mat,radar_quat_to_mat)
rot_quat=rotMat2quatern(rot_mat)
angles=qaut_to_angle(rot_quat)
return angles
def label_convert(save_dir,read_dir,calib_dir):
name_list=[]
for file in os.listdir(read_dir):
name_list.append(file)
for name in name_list:
read_name=read_dir+name
save_name=save_dir+name[0:6]+'.txt'
img_idx=int(name[0:6])
print(save_name)
frame_calib = utils.read_calibration(calib_dir, img_idx)
with open(save_name,mode='w')as save_txt_file_name:
with open(read_name,mode='r')as read_json_file_name:
read_object=json.load(read_json_file_name)#dict
objts=read_object['objects']#list
for oo in objts:
obj=oo#dict
anotation=[]
if obj['classname']=='Other Vehicle':
anotation.append('Other_Vehicle')
else:
anotation.append(obj['classname'])
anotation.append('0')#truncated unused
anotation.append(str(obj['occlusion']))
anotation.append('-10')#alpha unused
anotation.append('0')#2d box unuseds
anotation.append('0')
anotation.append('0')
anotation.append('0')
dim=obj['dimension3d']
anotation.append(str(dim[2]))#h
anotation.append(str(dim[1]))#w
anotation.append(str(dim[0]))#l
centerpoint=np.array(obj['center3d'])
centerpoint=np.reshape(centerpoint,(1,3))
camera_centerpoint = utils.radar_to_cam_frame(centerpoint, frame_calib)#transform to camera coordinate
anotation.append(str(camera_centerpoint[0][0]))
anotation.append(str(camera_centerpoint[0][1]+dim[2]*0.5))#top centor point
anotation.append(str(camera_centerpoint[0][2]))
orientation_quat=obj['orientation_quat']#quaterns
yaw_ang=radarcoordToCameracoordYaw(orientation_quat,frame_calib)
anotation.append(str(yaw_ang))
anotation.append('0')
str_anot=' '.join(anotation)
#print(str_anot)
save_txt_file_name.write(str_anot+'\n')
| 37.384615
| 122
| 0.480864
| 746
| 4,860
| 3.016086
| 0.178284
| 0.017778
| 0.021333
| 0.012444
| 0.18
| 0.152889
| 0.072889
| 0.066667
| 0.059556
| 0.027556
| 0
| 0.073058
| 0.33251
| 4,860
| 129
| 123
| 37.674419
| 0.62053
| 0.065432
| 0
| 0.075472
| 0
| 0
| 0.0256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04717
| false
| 0
| 0.04717
| 0
| 0.141509
| 0.009434
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e7a0da2b81a2065d69c0b76472c3f6bc721ee3a
| 2,739
|
py
|
Python
|
wb/main/jobs/accuracy_analysis/per_tensor/create_per_tensor_scripts_job.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 23
|
2022-03-17T12:24:09.000Z
|
2022-03-31T09:13:30.000Z
|
wb/main/jobs/accuracy_analysis/per_tensor/create_per_tensor_scripts_job.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 18
|
2022-03-21T08:17:44.000Z
|
2022-03-30T12:42:30.000Z
|
wb/main/jobs/accuracy_analysis/per_tensor/create_per_tensor_scripts_job.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 16
|
2022-03-17T12:24:14.000Z
|
2022-03-31T12:15:12.000Z
|
"""
OpenVINO DL Workbench
Class for creating per tensor scripts job
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import closing
from pathlib import Path
from sqlalchemy.orm import Session
from config.constants import (ACCURACY_ARTIFACTS_FOLDER, JOBS_SCRIPTS_FOLDER_NAME, JOB_SCRIPT_NAME)
from wb.extensions_factories.database import get_db_session_for_celery
from wb.main.enumerates import JobTypesEnum, StatusEnum
from wb.main.jobs.interfaces.ijob import IJob
from wb.main.models import (PerTensorReportJobsModel, CreatePerTensorScriptsJobModel)
from wb.main.scripts.job_scripts_generators.tensor_distance_job_script_generator import \
get_tensor_distance_job_script_generator
from wb.main.utils.utils import create_empty_dir
class CreatePerTensorScriptsJob(IJob):
job_type = JobTypesEnum.create_per_tensor_scripts_type
_job_model_class = CreatePerTensorScriptsJobModel
def __init__(self, job_id: int, **unused_kwargs):
super().__init__(job_id=job_id)
self._attach_default_db_and_socket_observers()
def run(self):
self._job_state_subject.update_state(status=StatusEnum.running, progress=0)
with closing(get_db_session_for_celery()) as session:
session: Session
job_model: CreatePerTensorScriptsJobModel = self.get_job_model(session)
accuracy_artifacts_path = Path(ACCURACY_ARTIFACTS_FOLDER) / str(job_model.pipeline_id)
scripts_path = accuracy_artifacts_path / JOBS_SCRIPTS_FOLDER_NAME
job_script_file_path = scripts_path / JOB_SCRIPT_NAME
create_empty_dir(scripts_path)
pipeline_id = job_model.pipeline_id
per_tensor_report_job_model: PerTensorReportJobsModel = (
session.query(PerTensorReportJobsModel).filter_by(pipeline_id=pipeline_id).first()
)
job_script_generator = get_tensor_distance_job_script_generator(per_tensor_report_job_model)
job_script_generator.create(job_script_file_path)
self.on_success()
def on_success(self):
self._job_state_subject.update_state(status=StatusEnum.ready, progress=100)
self._job_state_subject.detach_all_observers()
| 44.177419
| 104
| 0.775831
| 362
| 2,739
| 5.538674
| 0.403315
| 0.040399
| 0.024938
| 0.034414
| 0.174564
| 0.114713
| 0.049875
| 0.049875
| 0.049875
| 0
| 0
| 0.005256
| 0.166484
| 2,739
| 61
| 105
| 44.901639
| 0.872974
| 0.228185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.277778
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e82bb1c42a0dd7d3d0090469ffab04c743997a6
| 3,526
|
py
|
Python
|
basic/wordcount.py
|
duyduc27/Google-s-Python-Class
|
1ea9ab6e4d4f60564f4226b9ff9aaf94b1854a7d
|
[
"Apache-2.0"
] | null | null | null |
basic/wordcount.py
|
duyduc27/Google-s-Python-Class
|
1ea9ab6e4d4f60564f4226b9ff9aaf94b1854a7d
|
[
"Apache-2.0"
] | null | null | null |
basic/wordcount.py
|
duyduc27/Google-s-Python-Class
|
1ea9ab6e4d4f60564f4226b9ff9aaf94b1854a7d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a-- print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespac
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def text_to_words(the_text):
my_substitutions = the_text.maketrans(
# If you find any of these
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&()*+,-./:;<=>?@[]^_`{|}~'\\",
# Replace them by these
"abcdefghijklmnopqrstuvwxyz ")
# Translate the text now.
cleaned_text = the_text.translate(my_substitutions)
wds = cleaned_text.split()
return wds
def get_words_in_file(file):
f = open(file, 'r')
content= f.read()
wds = text_to_words(content)
f.close()
return wds
def make_dic_from_wds(file):
dic = {} # initial dictionary
lis_wds= get_words_in_file(file)
lis_wds.sort()
for word in lis_wds:
if word not in dic:
dic[word] = 1
else:
dic[word] += 1
return dic
def print_words(filename):
"""Analyse text file. Print words and their counts
Args:
Return:
"""
dic = make_dic_from_wds(filename)
print("Word Count")
print("=======================")
for k, v in dic.items():
print(k," " ,v)
def print_top(filename):
"""Print 20 most common words sorted. So the most common word is first, so on..."""
dic = make_dic_from_wds(filename)
print("=======================")
print("20 most common words")
n= 0
for key, value in sorted(dic.items(), key=lambda kv:kv[1], reverse=True):
print(key," ", value)
n += 1
if n>= 20:
break
def main():
if len(sys.argv) != 3:
print ('usage: ./wordcount.py {--count | --topcount} file')
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print ('unknown option: ' + option)
sys.exit(1)
if __name__ == '__main__':
main()
| 28.208
| 85
| 0.67612
| 531
| 3,526
| 4.386064
| 0.380414
| 0.042937
| 0.027909
| 0.030915
| 0.145556
| 0.100472
| 0.100472
| 0.07471
| 0.042078
| 0.042078
| 0
| 0.015232
| 0.199376
| 3,526
| 124
| 86
| 28.435484
| 0.809777
| 0.547646
| 0
| 0.181818
| 0
| 0.018182
| 0.19561
| 0.071014
| 0
| 0
| 0
| 0.008065
| 0
| 1
| 0.109091
| false
| 0
| 0.018182
| 0
| 0.181818
| 0.218182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e87cdddbb6985c539e2f3fd8f43bf67a78297aa
| 862
|
py
|
Python
|
setup.py
|
al45tair/pygeon
|
70e95f6ffc8988fa212e312452d4688e0e544966
|
[
"MIT"
] | 1
|
2022-02-26T17:14:38.000Z
|
2022-02-26T17:14:38.000Z
|
setup.py
|
al45tair/pygeon
|
70e95f6ffc8988fa212e312452d4688e0e544966
|
[
"MIT"
] | null | null | null |
setup.py
|
al45tair/pygeon
|
70e95f6ffc8988fa212e312452d4688e0e544966
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst', 'rb') as f:
long_desc = f.read().decode('utf-8')
setup(name='pygeon',
version='0.1.0',
description='IP Geolocation in Python',
long_description=long_desc,
author='Alastair Houghton',
author_email='alastair@alastairs-place.net',
url='http://bitbucket.org/al45tair/pygeon',
license='MIT License',
packages=['pygeon'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking'
],
scripts=['scripts/pygeon'],
install_requires=[
'sqlalchemy >= 0.9.8',
'IPy >= 0.82',
'bintrees >= 2.0.1'
],
provides=['pygeon']
)
| 28.733333
| 55
| 0.558005
| 92
| 862
| 5.173913
| 0.706522
| 0.016807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027287
| 0.277262
| 862
| 29
| 56
| 29.724138
| 0.736758
| 0.024362
| 0
| 0.076923
| 0
| 0
| 0.424315
| 0.033373
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e8906fbd78257ce287c1863743dd186ef2262c2
| 3,535
|
py
|
Python
|
Multi_Page_WebApp/services/python_worker/receive.py
|
Anthogr/netcdf_editor_app
|
e1d5fe9bcb5e9374dceec517c3532743dd7f2539
|
[
"MIT"
] | 8
|
2020-11-04T15:55:02.000Z
|
2021-09-02T11:12:50.000Z
|
Multi_Page_WebApp/services/python_worker/receive.py
|
Anthogr/netcdf_editor_app
|
e1d5fe9bcb5e9374dceec517c3532743dd7f2539
|
[
"MIT"
] | 88
|
2020-10-09T14:32:12.000Z
|
2021-07-21T14:09:58.000Z
|
Multi_Page_WebApp/services/python_worker/receive.py
|
Anthogr/netcdf_editor_app
|
e1d5fe9bcb5e9374dceec517c3532743dd7f2539
|
[
"MIT"
] | 5
|
2020-11-10T17:10:24.000Z
|
2021-10-05T03:11:47.000Z
|
#!/usr/bin/env python
from datetime import datetime
import pika
import os
import sys
import steps # noqa: F401
import json
from climate_simulation_platform.db import step_parameters, save_step, step_seen
from climate_simulation_platform import create_app
def func_params(func, body):
# If invalidated isn't in keys then this is a "root" call meaning it should be run
if "invalidated" not in body.keys():
return body
# If 'invalidated': 'y(es)' in the body then this means the step has been invalidated
# It should be rerun IF it has already been run before OR has no params
# We will rerun it with the same parameters
if "invalidated" in body.keys() and body["invalidated"].lower() in ["yes", "y"]:
if "has_params" in body.keys() and body["has_params"].lower() in ["no", "n"]:
return body
app = create_app()
with app.app_context():
if step_seen(body["id"], func):
return step_parameters(body["id"], func)
return None
def main():
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=os.environ["BROKER_HOSTNAME"])
)
app = create_app()
channel = connection.channel()
channel.exchange_declare(exchange="preprocessing", exchange_type="topic")
channel.queue_declare(queue="preprocessing_python_task_queue", durable=True)
channel.queue_bind(
exchange="preprocessing",
queue="preprocessing_python_task_queue",
routing_key="preprocessing.*.python",
)
def callback(ch, method, properties, body):
routing_key = method.routing_key
print(
f" [x] {datetime.now()} Received message from {routing_key} with body: {body.decode()}",
flush=True,
)
func = routing_key.split(".")[1]
body = json.loads(body.decode())
params = func_params(func, body)
print(f"{datetime.now()} Params: {params}", flush=True)
if params is not None:
_id = body["id"]
if func != "invalidate":
with app.app_context():
save_step(_id, func, params, up_to_date=False)
eval(f"steps.{func}({params})")
if func != "invalidate":
with app.app_context():
save_step(_id, func, params, up_to_date=True)
routing_key_done = ".".join([*routing_key.split(".")[:2], "done"])
channel.basic_publish(
exchange="preprocessing",
routing_key=routing_key_done,
body=json.dumps(body),
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
),
)
print(
" [x] {} Sent message to {} {}".format(
datetime.now(), routing_key_done, body
),
flush=True,
)
print(f" [x] {datetime.now()} Done", flush=True)
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(
queue="preprocessing_python_task_queue", on_message_callback=callback
)
print(
f" [*] {datetime.now()} Waiting for messages. To exit press CTRL+C", flush=True
)
channel.start_consuming()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("Interrupted")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 33.037383
| 100
| 0.595474
| 416
| 3,535
| 4.879808
| 0.353365
| 0.049261
| 0.014778
| 0.025123
| 0.14335
| 0.060099
| 0.060099
| 0.060099
| 0.060099
| 0.060099
| 0
| 0.003589
| 0.290523
| 3,535
| 106
| 101
| 33.349057
| 0.805821
| 0.093918
| 0
| 0.235294
| 0
| 0.011765
| 0.170213
| 0.042866
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.094118
| 0
| 0.176471
| 0.070588
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e8bb6044559a80cc3e9ba40b40090e9b9222e9d
| 7,764
|
py
|
Python
|
run_cqa_inference.py
|
SeonjeongHwang/coqa_cqa
|
67169b62e4d213d0e61cd31d844ad9665918049b
|
[
"Apache-2.0"
] | 1
|
2022-02-22T07:05:40.000Z
|
2022-02-22T07:05:40.000Z
|
run_cqa_inference.py
|
SeonjeongHwang/coqa_cqa
|
67169b62e4d213d0e61cd31d844ad9665918049b
|
[
"Apache-2.0"
] | null | null | null |
run_cqa_inference.py
|
SeonjeongHwang/coqa_cqa
|
67169b62e4d213d0e61cd31d844ad9665918049b
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import random
import json
import tqdm
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
from transformers import BertTokenizer, BertModel, AdamW, get_linear_schedule_with_warmup
from tool.data_process import *
from tool.inference_utils import write_predictions
MIN_FLOAT = -1e30
import argparse
parser = argparse.ArgumentParser(description="CQA")
### Arguments for Traning
parser.add_argument("--batch-size", type=int)
### Directories
parser.add_argument("--output-dir", type=str)
parser.add_argument("--result-dir", type=str)
### Arguments for Dataset
parser.add_argument("--num-turn", type=int, default=3)
parser.add_argument("--max-seq-length", type=int, default=512)
parser.add_argument("--max-history-length", type=int, default=128)
parser.add_argument("--doc-stride", type=int, default=192)
parser.add_argument("--model-name", type=str, default="bert-cased-large")
### Inference Setting
parser.add_argument("--n-best-size", type=int, default=5)
parser.add_argument("--max-answer-length", type=int, default=30)
args = parser.parse_args()
exp_dir = os.path.join(args.output_dir, args.result_dir)
model_file=exp_dir+"/model/model.pth"
tokenizer_dir=exp_dir+"/tokenizer"
config = exp_dir+"/config.json"
with open(config, "r") as f:
config_items = json.load(f)
model_name = config_items["model_name"]
max_seq_length = config_items["max_seq_length"]
max_history_length = config_items["max_history_length"]
doc_stride = config_items["doc_stride"]
num_turn = config_items["num_turn"]
test_data = f"data/coqa/coqa-dev-v1.0.json"
test_example = f"data/coqa/dev_{args.num_turn}_examples.pkl"
test_feature = f"data/coqa/dev_{args.num_turn}_features.pkl"
def seed_everything(seed):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed = 2022
seed_everything(seed)
class Dataset(Dataset):
def __init__(self, data_file, example_file, feature_file, tokenizer, mode):
if os.path.exists(example_file):
print(f"Loading {mode} examples from {example_file}...")
with open(example_file, "rb") as f:
self.examples = pickle.load(f)
else:
print(f"Generating {mode} examples...")
self.examples = read_manmade_example(input_file=data_file, is_training=False, num_turn=num_turn)
print(f"Save the examples to {example_file}...")
with open(example_file, "wb") as f:
pickle.dump(self.examples, f, pickle.HIGHEST_PROTOCOL)
if os.path.exists(feature_file):
print(f"Loading {mode} features from {feature_file}...")
with open(feature_file, "rb") as f:
self.features = pickle.load(f)
else:
with open(example_file, "wb") as f:
pickle.dump(self.examples, f, pickle.HIGHEST_PROTOCOL)
print(f"Generating {mode} features...")
self.features = convert_examples_to_features(examples=self.examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
max_history_length=max_history_length,
doc_stride=doc_stride,
is_training=False)
print(f"Save the features to {feature_file}...")
with open(feature_file, "wb") as f:
pickle.dump(self.features, f, pickle.HIGHEST_PROTOCOL)
self.unique_id = self.features["unique_id"]
self.input_ids = self.features["input_ids"]
self.attention_mask = self.features["attention_mask"]
self.segment_ids = self.features["segment_ids"]
def __len__(self):
return len(self.input_ids)
def __getitem__(self, idx):
unique_id = self.unique_id[idx]
input_ids = torch.tensor(self.input_ids[idx])
attention_mask = torch.tensor(self.attention_mask[idx])
segment_ids = torch.tensor(self.segment_ids[idx])
return input_ids, attention_mask, segment_ids, unique_id
class CQA(nn.Module):
def __init__(self, bert_model_name, tokenizer):
super().__init__()
self.BertEncoder = BertModel.from_pretrained(bert_model_name)
self.BertEncoder.resize_token_embeddings(len(tokenizer))
### CODE ###
def forward(self, input_ids, segment_ids, attention_mask, history_ids, p_mask):
bert_output = self.BertEncoder(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=segment_ids).last_hidden_state
### CODE ###
def prediction(model, test_dataset, device):
progress_bar = tqdm.tqdm
model = model.to(device)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
test_pbar = progress_bar(test_loader, total=len(test_loader))
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
all_results = []
print("Predicting answers...")
for input_ids, attention_mask, p_mask, segment_ids, history_ids, unique_id in test_pbar:
start_logits, end_logits = model(input_ids=input_ids.to(device),
segment_ids=segment_ids.to(device),
attention_mask=attention_mask.to(device))
batch_num = start_logits.size(0)
for idx in range(batch_num):
start_logit = [float(x) for x in start_logits[idx].tolist()]
end_logit = [float(x) for x in end_logits[idx].tolist()]
all_results.append(RawResult(unique_id=int(unique_id[idx]),
start_logits=start_logit,
end_logits=end_logit))
return all_results
print(f"Loading tokenizer from {tokenizer_dir}...")
tokenizer = BertTokenizer.from_pretrained(tokenizer_dir)
print(f"Loading trained model from {model_file}...")
device = torch.device("cuda")
model = CQA(model_name, tokenizer, args.batch_size, device)
model.load_state_dict(torch.load(model_file))
test_dataset = Dataset(data_file=test_data,
example_file=test_example,
feature_file=test_feature,
tokenizer=tokenizer,
mode="test")
all_results = prediction(model, test_dataset, device)
output_prediction_file = os.path.join(exp_dir, "predictions.json")
output_nbest_file = os.path.join(exp_dir, "nbest_predictions.json")
print("Writing predictions...")
write_predictions(all_examples=test_dataset.examples,
features_dict=test_dataset.features,
all_results=all_results,
n_best_size=args.n_best_size,
max_answer_length=args.max_answer_length,
do_lower_case=True,
tokenizer=tokenizer,
output_prediction_file=output_prediction_file,
output_nbest_file=output_nbest_file)
print("Done")
| 39.015075
| 109
| 0.6212
| 931
| 7,764
| 4.906552
| 0.210526
| 0.021016
| 0.037215
| 0.013135
| 0.132662
| 0.094571
| 0.044877
| 0.029772
| 0.029772
| 0.029772
| 0
| 0.00409
| 0.275631
| 7,764
| 199
| 110
| 39.015075
| 0.808144
| 0.010819
| 0
| 0.060811
| 0
| 0
| 0.117608
| 0.01797
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047297
| false
| 0
| 0.101351
| 0.006757
| 0.182432
| 0.074324
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e8d0d88791289330a7412e20650652419814d5a
| 9,447
|
py
|
Python
|
datasets/kitti.py
|
ShengyuH/PredateOverlap
|
770c3063399f08b3836935212ab4c84d355b4704
|
[
"MIT"
] | 153
|
2020-11-30T09:47:11.000Z
|
2021-04-28T00:58:10.000Z
|
datasets/kitti.py
|
ShengyuH/PredateOverlap
|
770c3063399f08b3836935212ab4c84d355b4704
|
[
"MIT"
] | 31
|
2021-05-10T12:39:19.000Z
|
2022-03-27T03:07:45.000Z
|
datasets/kitti.py
|
ShengyuH/PredateOverlap
|
770c3063399f08b3836935212ab4c84d355b4704
|
[
"MIT"
] | 22
|
2020-11-30T13:50:55.000Z
|
2021-04-28T09:47:40.000Z
|
# Basic libs
import os, time, glob, random, pickle, copy, torch
import numpy as np
import open3d
from scipy.spatial.transform import Rotation
# Dataset parent class
from torch.utils.data import Dataset
from lib.benchmark_utils import to_tsfm, to_o3d_pcd, get_correspondences
class KITTIDataset(Dataset):
"""
We follow D3Feat to add data augmentation part.
We first voxelize the pcd and get matches
Then we apply data augmentation to pcds. KPConv runs over processed pcds, but later for loss computation, we use pcds before data augmentation
"""
DATA_FILES = {
'train': './configs/kitti/train_kitti.txt',
'val': './configs/kitti/val_kitti.txt',
'test': './configs/kitti/test_kitti.txt'
}
def __init__(self,config,split,data_augmentation=True):
super(KITTIDataset,self).__init__()
self.config = config
self.root = os.path.join(config.root,'dataset')
self.icp_path = os.path.join(config.root,'icp')
if not os.path.exists(self.icp_path):
os.makedirs(self.icp_path)
self.voxel_size = config.first_subsampling_dl
self.matching_search_voxel_size = config.overlap_radius
self.data_augmentation = data_augmentation
self.augment_noise = config.augment_noise
self.IS_ODOMETRY = True
self.max_corr = config.max_points
self.augment_shift_range = config.augment_shift_range
self.augment_scale_max = config.augment_scale_max
self.augment_scale_min = config.augment_scale_min
# Initiate containers
self.files = []
self.kitti_icp_cache = {}
self.kitti_cache = {}
self.prepare_kitti_ply(split)
self.split = split
def prepare_kitti_ply(self, split):
assert split in ['train','val','test']
subset_names = open(self.DATA_FILES[split]).read().split()
for dirname in subset_names:
drive_id = int(dirname)
fnames = glob.glob(self.root + '/sequences/%02d/velodyne/*.bin' % drive_id)
assert len(fnames) > 0, f"Make sure that the path {self.root} has data {dirname}"
inames = sorted([int(os.path.split(fname)[-1][:-4]) for fname in fnames])
# get one-to-one distance by comparing the translation vector
all_odo = self.get_video_odometry(drive_id, return_all=True)
all_pos = np.array([self.odometry_to_positions(odo) for odo in all_odo])
Ts = all_pos[:, :3, 3]
pdist = (Ts.reshape(1, -1, 3) - Ts.reshape(-1, 1, 3)) ** 2
pdist = np.sqrt(pdist.sum(-1))
######################################
# D3Feat script to generate test pairs
more_than_10 = pdist > 10
curr_time = inames[0]
while curr_time in inames:
next_time = np.where(more_than_10[curr_time][curr_time:curr_time + 100])[0]
if len(next_time) == 0:
curr_time += 1
else:
next_time = next_time[0] + curr_time - 1
if next_time in inames:
self.files.append((drive_id, curr_time, next_time))
curr_time = next_time + 1
# remove bad pairs
if split=='test':
self.files.remove((8, 15, 58))
print(f'Num_{split}: {len(self.files)}')
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
drive = self.files[idx][0]
t0, t1 = self.files[idx][1], self.files[idx][2]
all_odometry = self.get_video_odometry(drive, [t0, t1])
positions = [self.odometry_to_positions(odometry) for odometry in all_odometry]
fname0 = self._get_velodyne_fn(drive, t0)
fname1 = self._get_velodyne_fn(drive, t1)
# XYZ and reflectance
xyzr0 = np.fromfile(fname0, dtype=np.float32).reshape(-1, 4)
xyzr1 = np.fromfile(fname1, dtype=np.float32).reshape(-1, 4)
xyz0 = xyzr0[:, :3]
xyz1 = xyzr1[:, :3]
# use ICP to refine the ground_truth pose, for ICP we don't voxllize the point clouds
key = '%d_%d_%d' % (drive, t0, t1)
filename = self.icp_path + '/' + key + '.npy'
if key not in self.kitti_icp_cache:
if not os.path.exists(filename):
print('missing ICP files, recompute it')
M = (self.velo2cam @ positions[0].T @ np.linalg.inv(positions[1].T)
@ np.linalg.inv(self.velo2cam)).T
xyz0_t = self.apply_transform(xyz0, M)
pcd0 = to_o3d_pcd(xyz0_t)
pcd1 = to_o3d_pcd(xyz1)
reg = open3d.registration.registration_icp(pcd0, pcd1, 0.2, np.eye(4),
open3d.registration.TransformationEstimationPointToPoint(),
open3d.registration.ICPConvergenceCriteria(max_iteration=200))
pcd0.transform(reg.transformation)
M2 = M @ reg.transformation
np.save(filename, M2)
else:
M2 = np.load(filename)
self.kitti_icp_cache[key] = M2
else:
M2 = self.kitti_icp_cache[key]
# refined pose is denoted as trans
tsfm = M2
rot = tsfm[:3,:3]
trans = tsfm[:3,3][:,None]
# voxelize the point clouds here
pcd0 = to_o3d_pcd(xyz0)
pcd1 = to_o3d_pcd(xyz1)
pcd0 = pcd0.voxel_down_sample(self.voxel_size)
pcd1 = pcd1.voxel_down_sample(self.voxel_size)
src_pcd = np.array(pcd0.points)
tgt_pcd = np.array(pcd1.points)
# Get matches
matching_inds = get_correspondences(pcd0, pcd1, tsfm, self.matching_search_voxel_size)
if(matching_inds.size(0) < self.max_corr and self.split == 'train'):
return self.__getitem__(np.random.choice(len(self.files),1)[0])
src_feats=np.ones_like(src_pcd[:,:1]).astype(np.float32)
tgt_feats=np.ones_like(tgt_pcd[:,:1]).astype(np.float32)
rot = rot.astype(np.float32)
trans = trans.astype(np.float32)
# add data augmentation
src_pcd_input = copy.deepcopy(src_pcd)
tgt_pcd_input = copy.deepcopy(tgt_pcd)
if(self.data_augmentation):
# add gaussian noise
src_pcd_input += (np.random.rand(src_pcd_input.shape[0],3) - 0.5) * self.augment_noise
tgt_pcd_input += (np.random.rand(tgt_pcd_input.shape[0],3) - 0.5) * self.augment_noise
# rotate the point cloud
euler_ab=np.random.rand(3)*np.pi*2 # anglez, angley, anglex
rot_ab= Rotation.from_euler('zyx', euler_ab).as_matrix()
if(np.random.rand(1)[0]>0.5):
src_pcd_input = np.dot(rot_ab, src_pcd_input.T).T
else:
tgt_pcd_input = np.dot(rot_ab, tgt_pcd_input.T).T
# scale the pcd
scale = self.augment_scale_min + (self.augment_scale_max - self.augment_scale_min) * random.random()
src_pcd_input = src_pcd_input * scale
tgt_pcd_input = tgt_pcd_input * scale
# shift the pcd
shift_src = np.random.uniform(-self.augment_shift_range, self.augment_shift_range, 3)
shift_tgt = np.random.uniform(-self.augment_shift_range, self.augment_shift_range, 3)
src_pcd_input = src_pcd_input + shift_src
tgt_pcd_input = tgt_pcd_input + shift_tgt
return src_pcd_input, tgt_pcd_input, src_feats, tgt_feats, rot, trans, matching_inds, src_pcd, tgt_pcd, torch.ones(1)
def apply_transform(self, pts, trans):
R = trans[:3, :3]
T = trans[:3, 3]
pts = pts @ R.T + T
return pts
@property
def velo2cam(self):
try:
velo2cam = self._velo2cam
except AttributeError:
R = np.array([
7.533745e-03, -9.999714e-01, -6.166020e-04, 1.480249e-02, 7.280733e-04,
-9.998902e-01, 9.998621e-01, 7.523790e-03, 1.480755e-02
]).reshape(3, 3)
T = np.array([-4.069766e-03, -7.631618e-02, -2.717806e-01]).reshape(3, 1)
velo2cam = np.hstack([R, T])
self._velo2cam = np.vstack((velo2cam, [0, 0, 0, 1])).T
return self._velo2cam
def get_video_odometry(self, drive, indices=None, ext='.txt', return_all=False):
if self.IS_ODOMETRY:
data_path = self.root + '/poses/%02d.txt' % drive
if data_path not in self.kitti_cache:
self.kitti_cache[data_path] = np.genfromtxt(data_path)
if return_all:
return self.kitti_cache[data_path]
else:
return self.kitti_cache[data_path][indices]
def odometry_to_positions(self, odometry):
if self.IS_ODOMETRY:
T_w_cam0 = odometry.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
return T_w_cam0
def _get_velodyne_fn(self, drive, t):
if self.IS_ODOMETRY:
fname = self.root + '/sequences/%02d/velodyne/%06d.bin' % (drive, t)
return fname
def get_position_transform(self, pos0, pos1, invert=False):
T0 = self.pos_transform(pos0)
T1 = self.pos_transform(pos1)
return (np.dot(T1, np.linalg.inv(T0)).T if not invert else np.dot(
np.linalg.inv(T1), T0).T)
| 40.896104
| 146
| 0.592887
| 1,271
| 9,447
| 4.192762
| 0.230527
| 0.030024
| 0.020642
| 0.019704
| 0.210358
| 0.106587
| 0.046913
| 0.034153
| 0.034153
| 0.034153
| 0
| 0.045135
| 0.291733
| 9,447
| 230
| 147
| 41.073913
| 0.751308
| 0.073462
| 0
| 0.058824
| 0
| 0
| 0.039885
| 0.017637
| 0
| 0
| 0
| 0
| 0.011765
| 1
| 0.058824
| false
| 0
| 0.035294
| 0.005882
| 0.164706
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e8d10545762b08a28204f212d3c73b287afb2c3
| 1,344
|
py
|
Python
|
bin/compare_versions.py
|
sdss/lvmmodel
|
1ab52f51a172500f8a10e762c88b9929898e1b20
|
[
"BSD-3-Clause"
] | 2
|
2017-07-18T19:22:38.000Z
|
2021-12-17T16:02:01.000Z
|
bin/compare_versions.py
|
sdss/lvmmodel
|
1ab52f51a172500f8a10e762c88b9929898e1b20
|
[
"BSD-3-Clause"
] | 134
|
2016-02-07T03:48:48.000Z
|
2022-02-21T17:50:09.000Z
|
bin/compare_versions.py
|
sdss/lvmmodel
|
1ab52f51a172500f8a10e762c88b9929898e1b20
|
[
"BSD-3-Clause"
] | 3
|
2017-07-12T21:36:19.000Z
|
2022-01-11T16:15:44.000Z
|
#!/usr/bin/env python
"""
Make plots to compare two different versions of desimodel
Stephen Bailey, LBL
July 2014
"""
import os, sys
import numpy as np
import pylab as P
import matplotlib.pyplot as plt
import fitsio
camcolor = dict(b='b', r='r', z='k')
def compare_throughput(dir1, dir2):
P.figure()
p0 = plt.subplot2grid((3,1), (0,0), rowspan=2)
p1 = plt.subplot2grid((3,1), (2,0))
for x in ('b', 'r', 'z'):
d1 = fitsio.read(dir1+'/data/throughput/thru-'+x+'.fits')
d2 = fitsio.read(dir2+'/data/throughput/thru-'+x+'.fits')
w1 = d1['wavelength']
w2 = d2['wavelength']
t1 = d1['throughput']
t2 = d2['throughput']
p0.plot(w1, t1, '-', color=camcolor[x])
p0.plot(w2, t2, '--', color=camcolor[x])
p1.plot(w1, (t1-np.interp(w1, w2, t2))/t1, '-', color=camcolor[x])
p0.set_xlim(3500, 10000)
p0.set_ylim(0.0, 0.5)
p0.set_ylabel('Throughput')
p0.grid()
p1.set_xlim(3500, 10000)
### p1.set_ylim(-0.5, 0.5)
p1.set_xlabel('Wavelength [Angstroms]')
p1.set_ylabel('Relative difference')
p1.grid()
def compare_fiberloss(dir1, dir2):
pass
#-------------------------------------------------------------------------
dir1, dir2 = sys.argv[1:3]
compare_throughput(dir1, dir2)
plt.show()
| 23.578947
| 74
| 0.554315
| 190
| 1,344
| 3.868421
| 0.431579
| 0.043537
| 0.057143
| 0.068027
| 0.111565
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082857
| 0.21875
| 1,344
| 56
| 75
| 24
| 0.617143
| 0.15253
| 0
| 0
| 0
| 0
| 0.137533
| 0.039042
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0.030303
| 0.151515
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e8e19f97e0eb39926f29ca476d7649b8872fc92
| 1,923
|
py
|
Python
|
tests/graph/parallel_graphs.py
|
marcelotrevisani/acorns
|
682749b0963ffc0a3998a7065ef505fc95123f50
|
[
"MIT"
] | null | null | null |
tests/graph/parallel_graphs.py
|
marcelotrevisani/acorns
|
682749b0963ffc0a3998a7065ef505fc95123f50
|
[
"MIT"
] | null | null | null |
tests/graph/parallel_graphs.py
|
marcelotrevisani/acorns
|
682749b0963ffc0a3998a7065ef505fc95123f50
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import os
import json
import seaborn as sns
import re
sns.set(style="darkgrid")
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
def convert_files_to_lists(file_location):
our_times = []
with open(file_location) as json_data:
data = json.load(json_data)
for i, key in enumerate(sorted(data)):
for num_cores in sorted(data[key],key=natural_keys):
our_times.append(data[key][num_cores]['us'])
return our_times
def get_speedup_list(time_list):
speedup_list = []
single_thread_time = time_list[0]
for time in time_list[1:]:
speedup_list.append( float(single_thread_time) / float(time) )
return speedup_list
def generate_two_graph(avg_us, denom, suffix="", ylabel="Time (s)"):
plt.plot(denom, avg_us, color='#1abc9c', linestyle='dashed', markersize=7)
# legend
plt.xlabel('Threads', fontfamily='monospace')
plt.ylabel('{} (s)'.format(ylabel), fontfamily='monospace')
plt.margins(0,0)
plt.savefig('./tests/results/hess/graphs/parallel/parallel-graph{}.pdf'.format(suffix), bbox_inches = 'tight',
pad_inches = 0)
# plt.savefig('./tests/complex/graphs/graph_by_128_speedup.pdf')
plt.clf()
our_times = convert_files_to_lists("./tests/results/grad/json/parallel/parallel_results_good.json")
print(our_times)
generate_two_graph(our_times, range(1, 48))
speedup_list = get_speedup_list(our_times)
generate_two_graph(speedup_list, range(1, 47), suffix="-speedup", ylabel="Speedup (Time Single Thread / Time X Threads)")
| 33.736842
| 122
| 0.680707
| 274
| 1,923
| 4.580292
| 0.441606
| 0.044622
| 0.038247
| 0.030279
| 0.038247
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014734
| 0.188248
| 1,923
| 56
| 123
| 34.339286
| 0.789238
| 0.115445
| 0
| 0
| 0
| 0
| 0.15
| 0.07284
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131579
| false
| 0
| 0.157895
| 0.026316
| 0.394737
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e8e6d830985755cf872faa18feca1ac284fe14d
| 11,042
|
py
|
Python
|
transposonmapper/transposonmapper.py
|
EKingma/Transposonmapper
|
1413bda16a0bd5f5f3ccf84d86193c2dba0ab01b
|
[
"Apache-2.0"
] | 2
|
2021-11-23T09:39:35.000Z
|
2022-01-25T15:49:45.000Z
|
transposonmapper/transposonmapper.py
|
EKingma/Transposonmapper
|
1413bda16a0bd5f5f3ccf84d86193c2dba0ab01b
|
[
"Apache-2.0"
] | 76
|
2021-07-07T18:31:44.000Z
|
2022-03-22T10:04:40.000Z
|
transposonmapper/transposonmapper.py
|
EKingma/Transposonmapper
|
1413bda16a0bd5f5f3ccf84d86193c2dba0ab01b
|
[
"Apache-2.0"
] | 2
|
2021-09-16T10:56:20.000Z
|
2022-01-25T12:33:25.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is a tool developed for analysing transposon insertions for experiments using SAturated Transposon Analysis in Yeast (SATAY).
This python code contains one function called transposonmapper().
For more information about this code and the project, see https://satay-ll.github.io/SATAY-jupyter-book/Introduction.html
This code is based on the Matlab code created by the Kornmann lab which is available at: sites.google.com/site/satayusers/
__Author__ = Gregory van Beek. LaanLab, department of Bionanoscience, Delft University of Technology
__version__ = 1.5
__Date last update__ = 2021-01-11
Version history:
1.1; Added code for creating two text files for storing insertion locations per gene and per essential gene [2020-07-27]
1.2; Improved searching algorithm for essential genes [2020-08-06]
1.3; Load file containing all essential genes so that a search for essential genes in multiple file is not needed anymore. This file is created using Create_EssentialGenes_list.py located in the same directory as this code [2020-08-07]
1.4; Fixed bug where the gene position and transposon insertion location did not start at zero for each chromosome, causing confusing values to be stored in the _pergene_insertions.txt and _peressential_insertions.txt files [2020-08-09]
1.5; Added functionality to handle all possible sam flags in the alignment file (bam-file) instead of only flag=0 or flag=16. This is needed for the function to handle paired-end sequencing data [2021-01-11]
"""
# Local imports
from transposonmapper.properties import (
get_chromosome_names,
get_sequence_length,
)
from transposonmapper.mapping import (
get_reads,
add_chromosome_length,
add_chromosome_length_inserts,
get_insertions_and_reads,
)
from transposonmapper.utils import chromosomename_roman_to_arabic
from transposonmapper.importing import (
load_default_files,
read_genes,
)
from transposonmapper.exporting import (
save_as_bed,
save_per_gene,
save_per_gene_insertions,
save_per_essential_insertions,
save_as_wig
)
import sys
def transposonmapper(bamfile, gff_file=None, essential_file=None, gene_name_file=None):
"""This function is created for analysis of SATAY data using the species Saccharomyces Cerevisiae.
The function assumes that the reads are already aligned to a reference genome.
The input data should be a .bam-file and the location where the .bam-file is stored should also contain an index file (.bam.bai-file, which for example can be created using sambamba).
The function uses the pysam package for handling bam files (see pysam.readthedocs.io/en/latest/index.html) and therefore this function only runs on Linux systems with SAMTools installed.
Parameters
----------
bamfile : str, required
Path to the bamfile. This location should also contain the .bam.bai index file (does not need to be input in this function).
gff_file : str, optional
Path to a .gff-file including all gene information (e.g. downloaded from SGD).
Default file is 'Saccharomyces_cerevisiae.R64-1-1.99.gff3'., by default None
essential_file : str, optional
Path to a .txt file containing a list all essential genes. Every line should consist of a single essential gene and the file should have one header line.
Ideally this file is created using 'Create_EssentialGenes_list.py'. Default file is 'Cerevisiae_AllEssentialGenes_List.txt'., by default None
gene_name_file : str, optional
Path to text file that includes aliases for all genes. Default file is 'Yeast_Protein_Names.txt', by default None
Returns
-------
A set of files
It outputs the following files that store information regarding the location of all insertions:
- .bed-file: Includes all individual basepair locations of the whole genome where at least one transposon has been mapped and the number of insertions for each locations (the number of reads) according to the Browser Extensible Data (bed) format.
A distinction is made between reads that had a different reading orientation during sequencing. The number of reads are stored using the equation #reads*20+100 (e.g. 2 reads is stored as 140).
- .wig-file: Includes all individual basepair locations of the whole genome where at least one transposon has been mapped and the number of insertions for each locations (the number of reads) according to the Wiggle (wig) format.
In this file no distinction is made between reads that had a different reading orientation during sequencing. The number of reads are stored as the absolute count.
- _pergene.txt-file: Includes all genes (currently 6600) with the total number of insertions and number of reads within the genomic region of the gene.
- _peressential.txt-file: Includes all annotated essential genes (currently 1186) with the total number of insertions and number of reads within the genomic region of the gene.
- _pergene_insertions.txt-file: Includes all genes with their genomic location (i.e. chromosome number, start and end position) and the locations of all insertions within the gene location. It also include the number number of reads per insertions.
- _peressential_insertions.txt-file: Includes all essential genes with their genomic location (i.e. chromosome number, start and end position) and the locations of all insertions within the gene location. It also include the number number of reads per insertions.
(note that in the latter two files, the genomic locations are continous, for example chromosome II does not start at 0, but at 'length chromosome I + 1' etc.).
The output files are saved at the location of the input file using the same name as the input file, but with the corresponding extension.
"""
# If necessary, load default files
gff_file, essential_file, gene_name_file = load_default_files(
gff_file, essential_file, gene_name_file
)
# Verify presence of files
data_files = {
"bam": bamfile,
"gff3": gff_file,
"essentials": essential_file,
"gene_names": gene_name_file,
}
for filetype, file_path in data_files.items():
assert file_path, f"{filetype} not found at {file_path}"
# Read files for all genes and all essential genes
print("Getting coordinates of all genes ...")
gene_coordinates, essential_coordinates, aliases_designation = read_genes(
gff_file, essential_file, gene_name_file
)
try:
import pysam
except ImportError:
print("Failed to import pysam")
sys.exit(1)
# Read bam file
bam = pysam.AlignmentFile(bamfile, "rb")
# Get names of all chromosomes as stored in the bam file
ref_tid = get_chromosome_names(bam)
ref_names = list(ref_tid.keys())
# Convert chromosome names in data file to roman numerals
ref_romannums = chromosomename_roman_to_arabic()[1]
ref_tid_roman = {key: value for key, value in zip(ref_romannums, ref_tid)}
# Get sequence lengths of all chromosomes
chr_lengths, chr_lengths_cumsum = get_sequence_length(bam)
# Get all reads within a specified genomic region
readnumb_array, tncoordinates_array, tncoordinatescopy_array = get_reads(bam)
#%% CONCATENATE ALL CHROMOSOMES
# For each insertion location, add the length of all previous chromosomes
tncoordinatescopy_array = add_chromosome_length_inserts(
tncoordinatescopy_array, ref_names, chr_lengths
)
# For each gene location, add the length of all previous chromosomes
gene_coordinates = add_chromosome_length(
gene_coordinates, chr_lengths_cumsum, ref_tid_roman
)
# For each essential gene location, add the length of all previous chromosomes
essential_coordinates = add_chromosome_length(
essential_coordinates, chr_lengths_cumsum, ref_tid_roman
)
# GET NUMBER OF TRANSPOSONS AND READS PER GENE
print("Get number of insertions and reads per gene ...")
# All genes
tn_per_gene, reads_per_gene, tn_coordinates_per_gene = get_insertions_and_reads(
gene_coordinates, tncoordinatescopy_array, readnumb_array
)
# Only essential genes
(
tn_per_essential,
reads_per_essential,
tn_coordinates_per_essential,
) = get_insertions_and_reads(
essential_coordinates, tncoordinatescopy_array, readnumb_array
)
# CREATE BED FILE
bedfile = bamfile + ".bed"
print("Writing bed file at: ", bedfile)
print("")
save_as_bed(bedfile, tncoordinates_array, ref_tid, readnumb_array)
# CREATE TEXT FILE WITH TRANSPOSONS AND READS PER GENE
# NOTE THAT THE TRANSPOSON WITH THE HIGHEST READ COUNT IS IGNORED.
# E.G. IF THIS FILE IS COMPARED WITH THE _PERGENE_INSERTIONS.TXT FILE THE READS DON'T ADD UP (SEE https://groups.google.com/forum/#!category-topic/satayusers/bioinformatics/uaTpKsmgU6Q)
# TOO REMOVE THIS HACK, CHANGE THE INITIALIZATION OF THE VARIABLE readpergene
per_gene_file = bamfile + "_pergene.txt"
print("Writing pergene.txt file at: ", per_gene_file)
print("")
save_per_gene(per_gene_file, tn_per_gene, reads_per_gene, aliases_designation)
# CREATE TEXT FILE TRANSPOSONS AND READS PER ESSENTIAL GENE
per_essential_file = bamfile + "_peressential.txt"
print("Writing peressential.txt file at: ", per_essential_file)
print("")
save_per_gene(
per_essential_file, tn_per_essential, reads_per_essential, aliases_designation
)
# CREATE TEXT FILE WITH LOCATION OF INSERTIONS AND READS PER GENE
per_gene_insertions_file = bamfile + "_pergene_insertions.txt"
print("Witing pergene_insertions.txt file at: ", per_gene_insertions_file)
print("")
save_per_gene_insertions(
per_gene_insertions_file,
tn_coordinates_per_gene,
gene_coordinates,
chr_lengths_cumsum,
ref_tid_roman,
aliases_designation,
)
# CREATE TEXT FILE WITH LOCATION OF INSERTIONS AND READS PER ESSENTIAL GENE
per_essential_insertions_file = bamfile + "_peressential_insertions.txt"
print(
"Writing peressential_insertions.txt file at: ", per_essential_insertions_file
)
print("")
save_per_essential_insertions(
per_essential_insertions_file,
tn_coordinates_per_essential,
gene_coordinates,
chr_lengths_cumsum,
ref_tid_roman,
aliases_designation,
)
# ADD INSERTIONS AT SAME LOCATION BUT WITH DIFFERENT ORIENTATIONS TOGETHER (FOR STORING IN WIG-FILE)
wigfile = bamfile + ".wig"
print("Writing wig file at: ", wigfile)
print("")
save_as_wig(wigfile, tncoordinates_array, ref_tid, readnumb_array)
#%%
if __name__ == "__main__":
bamfile = "transposonmapper/data_files/files4test/SRR062634.filt_trimmed.sorted.bam"
transposonmapper(bamfile=bamfile)
| 46.394958
| 271
| 0.741442
| 1,542
| 11,042
| 5.136187
| 0.245785
| 0.019444
| 0.013131
| 0.008081
| 0.322222
| 0.260859
| 0.223106
| 0.206566
| 0.195707
| 0.171465
| 0
| 0.011552
| 0.200326
| 11,042
| 237
| 272
| 46.590717
| 0.885391
| 0.588209
| 0
| 0.155172
| 0
| 0
| 0.119464
| 0.039064
| 0
| 0
| 0
| 0
| 0.008621
| 1
| 0.008621
| false
| 0
| 0.077586
| 0
| 0.086207
| 0.12931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e98a6268aa07a08ba6a43715f82f5d441844cbc
| 2,090
|
py
|
Python
|
model-optimizer/extensions/ops/bucketize.py
|
evgenytalanin-intel/openvino
|
c3aa866a3318fe9fa8c7ebd3bd333b075bb1cc36
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/ops/bucketize.py
|
evgenytalanin-intel/openvino
|
c3aa866a3318fe9fa8c7ebd3bd333b075bb1cc36
|
[
"Apache-2.0"
] | 1
|
2021-09-09T08:43:57.000Z
|
2021-09-10T12:39:16.000Z
|
model-optimizer/extensions/ops/bucketize.py
|
evgenytalanin-intel/openvino
|
c3aa866a3318fe9fa8c7ebd3bd333b075bb1cc36
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
class Bucketize(Op):
op = 'Bucketize'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'kind': 'op',
'type': __class__.op,
'op': __class__.op,
'version': 'extension',
'type_infer': self.type_infer,
'infer': self.infer,
'in_ports_count': 2,
'out_ports_count': 1,
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return ["with_right_bound"]
@staticmethod
def type_infer(node):
# the output is always integer since the layer outputs a bucket index
node.out_port(0).set_data_type(np.int32)
@staticmethod
def infer(node: Node):
assert node.with_right_bound is not None, \
"Attribute \"with_right_bound\" is not defined"
assert len(node.in_nodes()) == 2, \
"Incorrect number of inputs for {} node".format(node.id)
output_shape = node.in_port(0).data.get_shape()
node.out_port(0).data.set_shape(output_shape)
input_value = node.in_port(0).data.get_value()
buckets_value = node.in_port(1).data.get_value()
# compute if all input is constant
if input_value is not None and buckets_value is not None:
node.out_port(0).data.set_value(np.digitize(input_value, buckets_value, right=node.with_right_bound))
| 33.709677
| 113
| 0.661244
| 296
| 2,090
| 4.47973
| 0.435811
| 0.045249
| 0.042232
| 0.027149
| 0.084465
| 0.055807
| 0
| 0
| 0
| 0
| 0
| 0.014557
| 0.244019
| 2,090
| 61
| 114
| 34.262295
| 0.824684
| 0.320096
| 0
| 0.058824
| 0
| 0
| 0.114836
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.117647
| false
| 0
| 0.088235
| 0.029412
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e9904613297e7bec0c0b0302bbc80565d246970
| 5,470
|
py
|
Python
|
app/view/winners.py
|
pitust/OfMagesAndMagic
|
e5d5d4f0a930b01b047962028e5c633f6caefe40
|
[
"MIT"
] | 15
|
2016-12-11T14:30:30.000Z
|
2019-12-15T13:26:57.000Z
|
app/view/winners.py
|
pitust/OfMagesAndMagic
|
e5d5d4f0a930b01b047962028e5c633f6caefe40
|
[
"MIT"
] | 10
|
2016-12-11T14:32:30.000Z
|
2016-12-12T13:30:37.000Z
|
app/view/winners.py
|
pitust/OfMagesAndMagic
|
e5d5d4f0a930b01b047962028e5c633f6caefe40
|
[
"MIT"
] | 4
|
2016-12-11T14:30:37.000Z
|
2021-03-13T12:46:20.000Z
|
import pygame
from app.view.animations import Delay, FadeIn, FadeOut, ChooseRandom, FrameAnimate, MovePosition, DelayCallBack, MoveValue, SequenceAnimation, ParallelAnimation, Timeout
from app.resources.event_handler import SET_GAME_STATE
from app.resources import text_renderer, colours
from app.resources.music import MusicManager
from app.resources.images import ImageManager
from app.conway.game_of_life import GameOfLife
from app.resources.event_handler import SOUND_EFFECT
class StateWinnersView:
def __init__(self, parent):
self.root = parent.parent
self.parent = parent
self.winners = self.root.winners
self.firework_size = 8
self.fireworks = GameOfLife(self.parent.resolution[0]//self.firework_size + 50,self.parent.resolution[1]//self.firework_size + 50)
self.root.event_handler.register_key_listener(self.handle_event)
self.congratulations_text = text_renderer.render_title("Champions", colours.COLOUR_WHITE)
self.team1_text = text_renderer.render_huge_text(self.winners[0].get_short_name(), colours.COLOUR_WHITE)
self.see_you = text_renderer.render_huge_text("Good Luck in the Finals!", colours.COLOUR_WHITE)
self.spawn_burst(self.fireworks.get_width()-65, 10)
self.spawn_burst(self.fireworks.get_width()-65, (self.fireworks.get_height()-65)//2-5)
self.spawn_burst(self.fireworks.get_width()-65, (self.fireworks.get_height()-65)//2+15)
self.spawn_burst(self.fireworks.get_width()-65, self.fireworks.get_height()-65)
self.spawn_burst(10, 10)
self.spawn_burst(10, (self.fireworks.get_height()-65)//2-5)
self.spawn_burst(10, (self.fireworks.get_height()-65)//2+15)
self.spawn_burst(10, self.fireworks.get_height()-65)
self.spawn_burst((self.fireworks.get_width()-65)//2 - 10, 10)
self.spawn_burst((self.fireworks.get_width()-65)//2 + 20, 10)
self.spawn_burst((self.fireworks.get_width()-65)//2 - 10, self.fireworks.get_height()-65)
self.spawn_burst((self.fireworks.get_width()-65)//2 + 20, self.fireworks.get_height()-65)
self.firework_animation = Timeout(self.update_fireworks, time=150)
self.animations = SequenceAnimation()
self.animations.add_animation(FadeIn(self.set_alpha, time=3000))
self.animations.add_animation(Delay( time=5000 ))
self.animations.add_animation(FadeOut(self.set_alpha, time=3000))
self.alpha = 0
self.frame = 0
def update_fireworks(self):
self.fireworks.update()
def set_alpha(self, alpha):
self.alpha = alpha
def spawn_burst(self, x, y):
self.fireworks.set_cell( x, y,1)
self.fireworks.set_cell(x+1, y,1)
self.fireworks.set_cell(x+2, y,1)
self.fireworks.set_cell(x+1, y+2,1)
self.fireworks.set_cell( x, y+4,1)
self.fireworks.set_cell(x+1,y+4,1)
self.fireworks.set_cell(x+2,y+4,1)
def render(self):
surface = pygame.Surface(self.parent.resolution)
for y in range(self.fireworks.get_height()):
for x in range(self.fireworks.get_width()):
node = self.fireworks.get_cell(x,y)
if node > 0:
pygame.draw.rect(surface, colours.COLOUR_YELLOW, (x*self.firework_size, y*self.firework_size, self.firework_size, self.firework_size))
surface.blit(self.congratulations_text,
((surface.get_width()-self.congratulations_text.get_width())/2,
surface.get_height()/2 - 150)
)
surface.blit(
self.team1_text,
((surface.get_width()-self.team1_text.get_width())/2,
(surface.get_height()-self.team1_text.get_height())/2)
)
mask = pygame.Surface(self.parent.resolution, pygame.SRCALPHA)
mask.fill((0,0,0, 255-self.alpha))
surface.blit(mask, (0,0))
return surface
def update(self, delta_t):
if self.animations.finished():
self.parent.trigger_exit_to_main()
self.animations.animate(delta_t)
self.firework_animation.animate(delta_t)
def handle_event(self, event):
if event.type == pygame.KEYDOWN:
if event.key in [pygame.K_ESCAPE]:
self.parent.trigger_exit_to_main()
def exit_state(self):
self.parent.parent.event_handler.unregister_key_listener(self.handle_event)
class AnnounceWinners:
def __init__(self, parent, state_seed='default'):
self.parent = parent
self.event_handler = parent.event_handler
self.resolution = self.parent.resolution
self.states = {
'default' : StateWinnersView
}
self.cur_state = state_seed
self.state = self.states[self.cur_state](self)
music_manager = MusicManager()
music_manager.restore_music_volume()
music_manager.play_song("champions", loops=-1)
def set_state(self, state):
self.state.exit_state()
self.state_code = state
self.state = self.states[state](self)
def render(self):
return self.state.render()
def update(self, delta_t):
self.state.update(delta_t)
def handle_event(self, event):
self.state.handle_event(event)
def trigger_exit_to_main(self):
self.state.exit_state()
event = pygame.event.Event(SET_GAME_STATE, state="main_menu", seed='intro')
pygame.event.post(event)
| 38.251748
| 169
| 0.668556
| 733
| 5,470
| 4.78854
| 0.189632
| 0.103704
| 0.08661
| 0.053846
| 0.414815
| 0.308547
| 0.226211
| 0.193162
| 0.15755
| 0.142735
| 0
| 0.030534
| 0.209689
| 5,470
| 142
| 170
| 38.521127
| 0.781402
| 0
| 0
| 0.111111
| 0
| 0
| 0.012797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12963
| false
| 0
| 0.074074
| 0.009259
| 0.240741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e991333e11e3674737935eaa3ae326ec405ea15
| 7,259
|
py
|
Python
|
Source Codes/Assignment1/transformations.py
|
amir-souri/ML-Exam2020
|
8feb614ce8171c2c8e88b0fa385db8b679b68748
|
[
"MIT"
] | null | null | null |
Source Codes/Assignment1/transformations.py
|
amir-souri/ML-Exam2020
|
8feb614ce8171c2c8e88b0fa385db8b679b68748
|
[
"MIT"
] | null | null | null |
Source Codes/Assignment1/transformations.py
|
amir-souri/ML-Exam2020
|
8feb614ce8171c2c8e88b0fa385db8b679b68748
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
import functools as fu
import cv2
import random as rand
def transform_points(m, points):
""" It transforms the given point/points using the given transformation matrix.
:param points: numpy array, list
The point/points to be transformed given the transformation matrix.
:param m: An 3x3 matrix
The transformation matrix which will be used for the transformation.
:return: The transformed point/points.
"""
ph = make_homogeneous(points).T
ph = m @ ph
return make_euclidean(ph.T)
def transform_image(image, m):
""" It transforms the given image using the given transformation matrix.
:param img: An image
The image to be transformed given the transformation matrix.
:param m: An 3x3 matrix
The transformation matrix which will be used for the transformation.
:return: The transformed image.
"""
row, col, _ = image.shape
return cv2.warpPerspective(image, m, (col, row))
def make_homogeneous(points):
""" It converts the given point/points in an euclidean coordinates into a homogeneous coordinate
:param points: numpy array, list
The point/points to be converted into a homogeneous coordinate.
:return: The converted point/points in the homogeneous coordinates.
"""
if isinstance(points, list):
points = np.asarray([points], dtype=np.float64)
return np.hstack((points, np.ones((points.shape[0], 1), dtype=points.dtype)))
else:
return np.hstack((points, np.ones((points.shape[0], 1), dtype=points.dtype)))
def make_euclidean(points):
"""It converts the given point/points in a homogeneous coordinate into an euclidean coordinates.
:param points: numpy array, list
The point/points to be converted into an euclidean coordinates.
:return: The converted point/points in the euclidean coordinates.
"""
return points[:, :-1]
def identity():
""" It provides an identity transformation matrix.
:return: An identity matrix (3 x 3) using homogeneous coordinates.
"""
return np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
dtype=np.float64)
def rotating(θ=0):
""" It provides a rotation matrix given θ degrees which can then be used to rotate 2D point/points or an image
clockwise about the origin. If you want to rotate counterclockwise pass a negative degree.
:param θ: int
The amount of degree to be rotated. The default value is 0 which means when using it to rotate it won't rotate
the point/points or the image at all.
:returns: The rotation matrix (3 x 3) using homogeneous coordinates.
"""
θ = np.radians(θ)
cos = math.cos(θ)
sin = math.sin(θ)
return np.array([[cos, sin, 0],
[-sin, cos, 0],
[0, 0, 1]],
dtype=np.float64)
def translating(t_x=0, t_y=0):
""" It provides a translate matrix given quantity t_x and t_y for shifting x and y axes respectively.It can then
be used to translate or ”shift” a 2D point/points or an image.
as well as the y-axis by t_y.
:param t_x: int
The amount of shifting in the direction of the x-axis
:param t_y: int
The amount of shifting in the direction of the y-axis
The default values for both are 0. That is it does not translate the point/points or the image when applied.
:returns: The translation matrix (3 x 3) in homogeneous coordinates.
"""
return np.array([[1, 0, t_x],
[0, 1, t_y],
[0, 0, 1]],
dtype=np.float64)
def scaling(scale_x=1, scale_y=1):
""" It provides a scale matrix given quantity scale_x and scale_y for scaling x and y axes respectively.It can then
be used to scale a 2D point/points or an image.
scales (enlarge or shrink) the given 2D point/points in the direction of the x-axis by scale_x
as well as the y-axis by scale_x.
:param scale_x: int
The scale factor by which we wish to enlarge/shrink the point/points in the direction of the x-axis.
:param scale_y: int
The scale factor by which we wish to enlarge/shrink the point/points in the direction of the y-axis.
The default values for both are 1. That is it does not scale the point/points or the image when applied.
:return: The scaling matrix (3 x 3) in homogeneous coordinates.
"""
return np.array([[scale_x, 0, 0],
[0, scale_y, 0],
[0, 0, 1]],
dtype=np.float64)
def arbitrary():
"""
:return: An (3 x 3) arbitrary transformation matrix using translating, scaling and rotating function randomly.
"""
θ = rand.randint(-360, 361)
r = rotating(θ)
sx, sy = rand.sample(range(-10, 11), 2)
s = scaling(sx, sy)
tx, ty = rand.sample(range(-400, 401), 2)
t = translating(tx, ty)
I = identity()
if 0 <= tx <= 200:
return s @ t @ r @ I
else:
return r @ s @ I @ t
def invert(m):
""" It provides a matrix for performing the inversion.
:param m: a (3 x 3) matrix.
:return: The inverse of the given matrix.
"""
d = np.linalg.det(m)
if d != 0:
return np.linalg.inv(m).astype(dtype=np.float64)
else:
raise Exception("It is a non-invertible matrix")
def combine(*transformations):
""" It combines the given transformation matrices.
Be aware of which order you are passing the transformation matrices since it will be used to transform in that order.
:param transformations: (3 x 3) transformation matrices. As many as you want.
The matrices to be combined.
:return: The combined matrix (3 x 3).
"""
transformations = reversed(transformations)
return fu.reduce(lambda tr1, tr2: tr1 @ tr2, transformations)
def learn_affine(srs, tar):
""" It finds the affine transformation matrix between the two given triangles (3 points).
A x = b => x = inv(A) b
:param srs: three 2D points in homogeneous coordinates representing a triangle.
The source points.
:param tar: three 2D points in homogeneous coordinates representing a triangle.
The target pints.
:return: The affine transformation matrix.
"""
x1, x2, x3 = srs[0, 0], srs[1, 0], srs[2, 0]
y1, y2, y3 = srs[0, 1], srs[1, 1], srs[2, 1]
b = tar.flatten()
a = np.array([[x1, y1, 1, 0, 0, 0],
[0, 0, 0, x1, y1, 1],
[x2, y2, 1, 0, 0, 0],
[0, 0, 0, x2, y2, 1],
[x3, y3, 1, 0, 0, 0],
[0, 0, 0, x3, y3, 1]],
dtype=np.float64)
d = np.linalg.det(a)
if d != 0:
ai = np.linalg.inv(a)
x = ai @ b
x = x.flatten()
a1, a2, a3, a4 = x[0], x[1], x[3], x[4]
tx, ty = x[2], x[5]
aff_transformation = np.array([[a1, a2, tx],
[a3, a4, ty],
[0, 0, 1]],
dtype=np.float64)
return aff_transformation
else:
raise Exception("It is a non-invertible matrix")
| 38.005236
| 121
| 0.609726
| 1,076
| 7,259
| 4.086431
| 0.194238
| 0.012736
| 0.011599
| 0.008187
| 0.440073
| 0.429156
| 0.398681
| 0.335001
| 0.284512
| 0.256311
| 0
| 0.036062
| 0.293291
| 7,259
| 191
| 122
| 38.005236
| 0.821053
| 0.529963
| 0
| 0.235955
| 0
| 0
| 0.018948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134831
| false
| 0
| 0.05618
| 0
| 0.348315
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e996e95c8e4d35efb2b156fddfb2e6b4af528f7
| 665
|
py
|
Python
|
scripts/identify_circular-from-reassembled-flye.py
|
SeanChenHCY/metaLAS
|
854db6a966a11ab628ad33fa3264a74cfc54eef9
|
[
"MIT"
] | 1
|
2021-10-04T07:45:18.000Z
|
2021-10-04T07:45:18.000Z
|
scripts/identify_circular-from-reassembled-flye.py
|
SeanChenHCY/metaLAS
|
854db6a966a11ab628ad33fa3264a74cfc54eef9
|
[
"MIT"
] | 1
|
2021-08-30T08:01:35.000Z
|
2021-09-02T09:49:01.000Z
|
scripts/identify_circular-from-reassembled-flye.py
|
SeanChenHCY/metaLAS
|
854db6a966a11ab628ad33fa3264a74cfc54eef9
|
[
"MIT"
] | null | null | null |
#sys.argv[1] = bin_dir, sys.argv[2] = flye_info, sys.argv[3] = output_dir, sys.argv[3] = output_dir
import os, sys
bin_name=sys.argv[1]
bin_dir = sys.argv[2]
output_dir = sys.argv[3]
large_circular = []
flye_info = open(bin_dir + '/assembly_info.txt','r')
read_info = True
while read_info:
read_info = flye_info.readline()
entry = read_info.split('\t')
if len(entry) > 3:
if (entry[3] == "Y") and (int(entry[1]) > 2000000):
large_circular.append(entry[0])
for i in large_circular:
os.system('seqkit grep -n -p '+ i + ' ' + bin_dir + '/assembly.fasta -o ' +output_dir + '/' + bin_name + '_'+ i + '_unpolished_rf.fasta' )
| 25.576923
| 142
| 0.628571
| 108
| 665
| 3.648148
| 0.425926
| 0.124365
| 0.101523
| 0.055838
| 0.241117
| 0.111675
| 0.111675
| 0.111675
| 0
| 0
| 0
| 0.033582
| 0.193985
| 665
| 25
| 143
| 26.6
| 0.701493
| 0.147368
| 0
| 0
| 0
| 0
| 0.152134
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e9c6e139be0c60a12be6bd43fc6f12d8b899a15
| 1,611
|
py
|
Python
|
intg/src/main/python/apache_atlas/model/lineage.py
|
alexwang789/atlas
|
b265f5e80e02d69ea7bbcfd9d0770361ca7fa185
|
[
"Apache-2.0"
] | 4
|
2020-10-30T06:15:23.000Z
|
2022-02-18T09:56:27.000Z
|
intg/src/main/python/apache_atlas/model/lineage.py
|
alexwang789/atlas
|
b265f5e80e02d69ea7bbcfd9d0770361ca7fa185
|
[
"Apache-2.0"
] | null | null | null |
intg/src/main/python/apache_atlas/model/lineage.py
|
alexwang789/atlas
|
b265f5e80e02d69ea7bbcfd9d0770361ca7fa185
|
[
"Apache-2.0"
] | 4
|
2020-10-30T07:21:57.000Z
|
2021-10-21T16:07:02.000Z
|
#!/usr/bin/env/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class AtlasLineageInfo:
lineageDirection_enum = enum.Enum('lineageDirection_enum', 'INPUT OUTPUT BOTH', module=__name__)
def __init__(self, baseEntityGuid=None, lineageDirection=None, lineageDepth=None, guidEntityMap=None, relations=None):
self.baseEntityGuid = baseEntityGuid
self.lineageDirection = lineageDirection
self.lineageDepth = lineageDepth
self.guidEntityMap = guidEntityMap if guidEntityMap is not None else {}
self.relations = relations if relations is not None else set()
class LineageRelation:
def __init__(self, fromEntityId=None, toEntityId=None, relationshipId=None):
self.fromEntityId = fromEntityId
self.toEntityId = toEntityId
self.relationshipId = relationshipId
| 41.307692
| 122
| 0.7455
| 200
| 1,611
| 5.935
| 0.505
| 0.050548
| 0.021904
| 0.026959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003063
| 0.189323
| 1,611
| 39
| 123
| 41.307692
| 0.905819
| 0.481068
| 0
| 0
| 0
| 0
| 0.046341
| 0.02561
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e9ef4bfbf0ebd4b0914ac2783452487117ef345
| 7,247
|
py
|
Python
|
ucdev/register.py
|
vpetrigo/python-ucdev
|
d3606fd1244dfefef039c7c38acb8b4a1f086c29
|
[
"MIT"
] | 11
|
2015-07-08T01:28:01.000Z
|
2022-01-26T14:29:47.000Z
|
ucdev/register.py
|
vpetrigo/python-ucdev
|
d3606fd1244dfefef039c7c38acb8b4a1f086c29
|
[
"MIT"
] | 5
|
2017-12-07T15:04:00.000Z
|
2021-06-02T14:47:14.000Z
|
ucdev/register.py
|
vpetrigo/python-ucdev
|
d3606fd1244dfefef039c7c38acb8b4a1f086c29
|
[
"MIT"
] | 4
|
2017-02-18T18:20:13.000Z
|
2022-03-23T16:21:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8-unix -*-
"""
FOO = Register("A:4 B:4", 0x12)
BAR = Register("B:4 C:4", 0x23)
# evals as int which is a register address
print FOO == 0x12
# each field attribute returns a mask for that field
print FOO.B == 0b00001111
print BAR.B == 0b11110000
# ERROR: Register definition is readonly
FOO.B = 0b10101010
# creates register instance with initial value
foo = FOO(0xAC)
print foo.A == 0xA
print foo.B == 0xC
print foo == 0xAC
foo.B = 0
print foo == 0xA0
"""
import sys, os
from bitstring import Bits, BitArray
"""
Convert various typed values into BitArray value.
"""
def to_bits(val, bitlen):
if isinstance(val, str) or isinstance(val, bytearray):
return Bits(bytes=val, length=bitlen)
elif isinstance(val, Bits):
return Bits(bytes=val.bytes, length=bitlen)
elif isinstance(val, RegisterValue):
return Bits(bytes=val.value.bytes, length=bitlen)
return Bits(uint=val, length=bitlen)
"""
Installs filter function to limit access to non-existing attribute.
NOTE:
This replaces belonging class of passed object to dynamically
generated subclass of the original class.
"""
def protect_object(obj):
sub = type("Protected" + type(obj).__name__, (type(obj),), {})
fset = sub.__setattr__
def fset_wrap(self, key, val):
if not hasattr(self, key):
raise AttributeError("Access denied for key: %s" % key)
return fset(self, key, val)
sub.__setattr__ = fset_wrap
obj.__class__ = sub
"""
Generic class to wrap built-in types with custom attributes.
"""
class Value(object):
def __new__(cls, arg, **kw):
return type(cls.__name__, (type(arg), cls, ), kw)(arg)
class Field(Bits):
# NOTE:
# Subclassing bitstring.* is a pain, so I'll just workaround it
# by a factory method.
@classmethod
def create(cls, value, masklen, bitlen, offset):
field = Bits.__new__(cls, uint=value, length=masklen)
field.__offset = offset
field.__bitlen = bitlen
return field
@property
def offset(self):
return self.__offset
@property
def bitlen(self):
return self.__bitlen
class Register(int):
def __new__(cls, desc, address):
r_fields = []
r_bitlen = 0
# parse register description
for f in desc.split():
# expected: f in (":", "HOGE", "HOGE:123", ":123")
pair = f.split(":")
if len(pair) == 2:
f_name, f_bitlen = pair[0], int(pair[1]) if pair[1] else 1
else:
f_name, f_bitlen = pair[0], 1
r_fields.append((f_name, f_bitlen))
r_bitlen += f_bitlen
# returns bitmask implemented as readonly property
def makeprop(r_bitlen, f_bitlen, f_offset):
value = ((1 << f_bitlen) - 1) << f_offset
field = Field.create(value, r_bitlen, f_bitlen, f_offset)
return property(lambda x:field)
# generate property from register description
r_fields.reverse()
kw = {}
f_offset = 0
for f_name, f_bitlen in r_fields:
if len(f_name) > 0:
kw[f_name] = makeprop(r_bitlen, f_bitlen, f_offset)
f_offset += f_bitlen
r_fields.reverse()
# dynamically generate class for this register configuration
sub = type(cls.__name__, (cls, ), kw)
sub.__fields = [k for k,v in r_fields if k]
sub.__length = r_bitlen
obj = int.__new__(sub, address)
protect_object(obj)
return obj
@property
def fields(self):
return list(self.__fields)
@property
def length(self):
return self.__length
"""
Returns a new register instance with given initial value.
"""
def __call__(self, *args, **kwargs):
reg = RegisterValue(self, 0)
if args:
reg.value = args[0]
for k, v in kwargs.items():
setattr(reg, k, v)
return reg
class RegisterValue(object):
def __new__(cls, reg, value):
if cls is not RegisterValue:
return object.__new__(cls)
def makeprop(field):
def fget(self):
fval = (self.__value & field) >> field.offset
return Bits(uint=fval.uint, length=field.bitlen)
def fset(self, val):
curval = self.__value
newval = to_bits(val, curval.length) << field.offset
curval ^= field & curval
self.__value = curval | newval
self.__notify()
return property(fget, fset)
kw = {}
for f_name in reg.fields:
field = getattr(reg, f_name)
kw[f_name] = makeprop(field)
obj = type(cls.__name__, (cls, ), kw)(reg, value)
obj.__reg = reg
obj.__mon = {}
obj.value = value
protect_object(obj)
return obj
@property
def length(self):
return self.__reg.length
@property
def value(self):
return BitArray(bytes=self.__value.tobytes())
@value.setter
def value(self, value):
self.__value = to_bits(value, self.__reg.length)
self.__notify()
@property
def fields(self):
return self.__reg.fields
def subscribe(self, func):
self.__mon[func] = 1
def unsubscribe(self, func):
if self.__mon.has_key(func):
del self.__mon[func]
def __notify(self, *args, **kwargs):
for func in self.__mon.keys():
func(self, *args, **kwargs)
def __repr__(self):
rep = []
for f_name in self.fields:
field = getattr(self, f_name)
rep.append("{0}={1}".format(f_name, field))
return "(" + ", ".join(rep) + ")"
"""
Returns a new register value instance with the same initial value.
"""
def __call__(self, *args, **kwargs):
reg = RegisterValue(self.__reg, args[0] if args else self.value)
for k, v in kwargs.items():
setattr(reg, k, v)
return reg
def __and__(self, v):
return self.value & to_bits(v, self.length)
def __or__(self, v):
return self.value | to_bits(v, self.length)
def __xor__(self, v):
return self.value ^ to_bits(v, self.length)
def __nonzero__(self):
return self.value.uint
if __name__ == "__main__":
from IPython import embed
def handle_exception(atype, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(atype, value, tb)
else:
# we are NOT in interactive mode, print the exception...
import traceback
traceback.print_exception(atype, value, tb)
print
# ...then start the debugger in post-mortem mode.
from IPython import embed
embed()
sys.excepthook = handle_exception
REG = Register("FOO:3 :1 BAR:4", 0x12)
print(REG)
print(REG.FOO)
print(REG.BAR)
reg = REG(0xAC)
print(reg)
print(reg.FOO)
print(reg.BAR)
embed()
| 27.660305
| 74
| 0.588519
| 939
| 7,247
| 4.330138
| 0.237487
| 0.014757
| 0.020659
| 0.011805
| 0.180275
| 0.147565
| 0.121495
| 0.089523
| 0.074766
| 0.074766
| 0
| 0.015785
| 0.300676
| 7,247
| 261
| 75
| 27.766284
| 0.786504
| 0.137712
| 0
| 0.213415
| 0
| 0
| 0.012322
| 0
| 0
| 0
| 0.001388
| 0
| 0
| 1
| 0.182927
| false
| 0
| 0.030488
| 0.073171
| 0.402439
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e9f896246ab8606b9ddf1a41403635bb424c413
| 2,463
|
py
|
Python
|
src/myapp/tests/test_utils.py
|
thinkAmi/DjangoCongress_JP_2019_talk
|
0b746f62808d979c1570de80084686f709996e1d
|
[
"Unlicense"
] | 1
|
2019-05-18T04:34:59.000Z
|
2019-05-18T04:34:59.000Z
|
src/myapp/tests/test_utils.py
|
thinkAmi/DjangoCongress_JP_2019_talk
|
0b746f62808d979c1570de80084686f709996e1d
|
[
"Unlicense"
] | null | null | null |
src/myapp/tests/test_utils.py
|
thinkAmi/DjangoCongress_JP_2019_talk
|
0b746f62808d979c1570de80084686f709996e1d
|
[
"Unlicense"
] | null | null | null |
import pathlib
from django.conf import settings
from django.core import mail
from django.core.mail import EmailMessage
from django.test import TestCase
class TestSendMail(TestCase):
def _callFUT(self, encoding='utf-8', has_attachment=False):
from myapp.utils import my_send_mail
my_send_mail(encoding=encoding, has_attachment=has_attachment)
def test_send_multiple(self):
# 実行前はメールボックスに何もない
self.assertEqual(len(mail.outbox), 0)
# 1回実行すると、メールが1通入る
self._callFUT()
self.assertEqual(len(mail.outbox), 1)
# もう1回実行すると、メールが2通入る
self._callFUT()
self.assertEqual(len(mail.outbox), 2)
def test_types(self):
self._callFUT()
# list(EmailMessage(), ...) な型
self.assertTrue(isinstance(mail.outbox, list))
self.assertTrue(isinstance(mail.outbox[0], EmailMessage))
def test_mail_fields(self):
self._callFUT()
actual = mail.outbox[0]
self.assertEqual(actual.subject, '件名')
self.assertEqual(actual.body, '本文')
self.assertEqual(actual.from_email, '差出人 <from@example.com>')
# 宛先系はlistとして設定
self.assertEqual(actual.to,
['送信先1 <to1@example.com>',
'送信先2 <to2@example.com>'],)
self.assertEqual(actual.cc, ['シーシー <cc@example.com>'])
self.assertEqual(actual.bcc, ['ビーシーシー <bcc@example.com>'])
self.assertEqual(actual.reply_to, ['返信先 <reply@example.com>'])
# 追加ヘッダも含まれること
self.assertEqual(actual.extra_headers['Sender'], 'sender@example.com')
def test_encoding_of_iso2022jp(self):
self._callFUT(encoding='iso-2022-jp')
actual = mail.outbox[0]
# EmailMessageには、utf-8で格納されている
self.assertEqual(actual.subject, '件名')
def test_attachment(self):
self._callFUT(has_attachment=True)
actual = mail.outbox[0]
self.assertTrue(isinstance(actual.attachments, list))
# (filename, content, mimetype) なtuple
self.assertTrue(isinstance(actual.attachments[0], tuple))
# 添付ファイルの中身の型はbytes
self.assertTrue(isinstance(actual.attachments[0][1], bytes))
# 添付ファイル自体を検証
img = pathlib.Path(settings.STATICFILES_DIRS[0]).joinpath(
'images', 'shinanogold.png')
with img.open('rb') as f:
expected_img = f.read()
self.assertEqual(actual.attachments[0][1], expected_img)
| 32.84
| 78
| 0.639464
| 276
| 2,463
| 5.597826
| 0.365942
| 0.126214
| 0.135922
| 0.042718
| 0.314563
| 0.104854
| 0.050485
| 0
| 0
| 0
| 0
| 0.016489
| 0.236703
| 2,463
| 74
| 79
| 33.283784
| 0.805319
| 0.082826
| 0
| 0.191489
| 0
| 0
| 0.090302
| 0
| 0
| 0
| 0
| 0
| 0.382979
| 1
| 0.12766
| false
| 0
| 0.12766
| 0
| 0.276596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ea0c546a1e521014a32f12995b27d65c50cbaf0
| 34,133
|
py
|
Python
|
app.py
|
luyaozou/conjugaison
|
c8d100b38e1067af17f428cba4af925465d5fd52
|
[
"MIT"
] | null | null | null |
app.py
|
luyaozou/conjugaison
|
c8d100b38e1067af17f428cba4af925465d5fd52
|
[
"MIT"
] | null | null | null |
app.py
|
luyaozou/conjugaison
|
c8d100b38e1067af17f428cba4af925465d5fd52
|
[
"MIT"
] | null | null | null |
#! encoding = utf-8
""" Practice French conjugaison """
import sys
from os.path import isfile
from time import sleep
from sqlite3 import Error as dbError
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QTextOption, QKeySequence
from dictionary import TENSE_MOODS, PERSONS
from dictionary import conjug, conjug_all
from config import Config, from_json_, to_json
from lang import LANG_PKG
from db import AppDB
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setMinimumWidth(600)
self.setMinimumHeight(700)
self.resize(QtCore.QSize(800, 750))
self.config = Config()
if isfile('config.json'):
from_json_(self.config, 'config.json')
self.config.nft = 0
self.config.nfc = 0
self.config.nbt = 0
self.config.nbc = 0
self.setWindowTitle(LANG_PKG[self.config.lang]['main_windowtitle'])
self.setStyleSheet('font-size: {:d}pt'.format(self.config.font_size))
self.db = AppDB('app.db')
centerWidget = QtWidgets.QWidget()
self.box1 = Box1(self.db, self.config, parent=self)
self.box2 = Box2(self.db, self.config, parent=self)
self.box3 = Box3(self.db, self.config, parent=self)
thisLayout = QtWidgets.QVBoxLayout()
thisLayout.setAlignment(QtCore.Qt.AlignHCenter)
thisLayout.setSpacing(10)
thisLayout.addWidget(self.box1)
thisLayout.addWidget(self.box2)
thisLayout.addWidget(self.box3)
centerWidget.setLayout(thisLayout)
# set central widget
self.setCentralWidget(centerWidget)
self.box1.btnCheck.clicked.connect(self.box3.btnClear.click)
self.box2.btnCheck.clicked.connect(self.box3.btnClear.click)
self.box1.btnGen.clicked.connect(self.box3.btnClear.click)
self.box2.btnGen.clicked.connect(self.box3.btnClear.click)
self.box1.btnHelp.clicked.connect(self._slot_help_box1)
self.box2.btnHelp.clicked.connect(self._slot_help_box2)
self.dConfig = DialogConfig(self.config, parent=self)
self.dPref = DialogPref(self.config, parent=self)
self.dAddVoc = DialogAddVoc(self.db, self.config, parent=self)
self.dBrowse = DialogBrowse(self.db, self.config, parent=self)
self.dStats = DialogStats(self.db, self.config, parent=self)
# apply config to dialogs
self.dConfig.set_tense_mood(self.config.enabled_tm_idx)
self.dPref.accepted.connect(self.apply_pref)
menubar = MenuBar(parent=self)
self.setMenuBar(menubar)
self.statusBar = StatusBar(self.config, parent=self)
self.setStatusBar(self.statusBar)
self.statusBar.refresh(*self.db.num_expired_entries(self.config.enabled_tm_idx))
self.box1.sig_checked.connect(lambda: self.statusBar.refresh(
*self.db.num_expired_entries(self.config.enabled_tm_idx)))
self.box2.sig_checked.connect(lambda: self.statusBar.refresh(
*self.db.num_expired_entries(self.config.enabled_tm_idx)))
menubar.actionConfig.triggered.connect(self._config)
menubar.actionPref.triggered.connect(self.dPref.exec)
menubar.actionAddVoc.triggered.connect(self.dAddVoc.exec)
menubar.actionBrowse.triggered.connect(self.dBrowse.exec)
menubar.actionStats.triggered.connect(self.dStats.exec)
self.statusBar.showMessage(LANG_PKG[self.config.lang]['status_bar_msg'],
1000)
self.setDisabled(True)
self.db.check_has_conjug()
self.setDisabled(False)
def _config(self):
# retrieve current checked tense mood pairs
self.dConfig.exec()
if self.dConfig.result() == QtWidgets.QDialog.Accepted:
tm_idx = self.dConfig.get_tense_mood()
if tm_idx:
# apply the new checked tms
self.config.enabled_tm_idx = tm_idx
self.box2.set_tm(self.config.enabled_tm_idx)
self.box3.set_tm(self.config.enabled_tm_idx)
self.statusBar.refresh(*self.db.num_expired_entries(tm_idx))
else:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['config_tm_warning_title'],
LANG_PKG[self.config.lang]['config_tm_warning_body'])
d.exec_()
# resume previous ones
self.dConfig.set_tense_mood(self.config.enabled_tm_idx)
else:
# resume previous ones
self.dConfig.set_tense_mood(self.config.enabled_tm_idx)
@QtCore.pyqtSlot()
def apply_pref(self):
sleep(0.02)
# apply font
self.setStyleSheet('font-size: {:d}pt'.format(self.config.font_size))
# apply language package
self.setWindowTitle(LANG_PKG[self.config.lang]['main_windowtitle'])
self.box1.apply_lang()
self.box2.apply_lang()
self.box3.apply_lang()
self.dPref.apply_lang()
self.dConfig.apply_lang()
self.dAddVoc.apply_lang()
self.dBrowse.apply_lang()
self.dStats.apply_lang()
self.menuBar().apply_lang(LANG_PKG[self.config.lang])
self.statusBar.refresh(*self.db.num_expired_entries(self.config.enabled_tm_idx))
@QtCore.pyqtSlot()
def _slot_help_box1(self):
verb, tense_mood = self.box1.ask_help()
self.box3.editVerb.setText(verb)
self.box3.comboTenseMood.setCurrentText(tense_mood)
self.box1.btnCheck.setDisabled(True)
@QtCore.pyqtSlot()
def _slot_help_box2(self):
verb, tense_mood = self.box2.ask_help()
self.box3.editVerb.setText(verb)
self.box3.comboTenseMood.setCurrentText(tense_mood)
self.box2.btnCheck.setDisabled(True)
def closeEvent(self, ev):
self.db.update_stat(self.config.nft, self.config.nfc,
self.config.nbt, self.config.nbc)
# close database
self.db.close()
# save setting to local file
self.dPref.fetch_config()
to_json(self.config, 'config.json')
class Box1(QtWidgets.QGroupBox):
sig_checked = QtCore.pyqtSignal()
def __init__(self, db, config, parent=None):
super().__init__(parent)
self.db = db
self.config = config
self.setTitle(LANG_PKG[config.lang]['box1_title'])
self._timer = QtCore.QTimer()
self._timer.setSingleShot(True)
self._timer.setInterval(1000)
self._timer.timeout.connect(self._gen)
self.lblVerb = QtWidgets.QLabel()
self.lblVerb.setStyleSheet('font-size: 14pt; color: #2c39cf; font: bold; ')
self.lblTense = QtWidgets.QLabel()
self.lblTense.setFixedWidth(150)
self.lblMood = QtWidgets.QLabel()
self.lblMood.setFixedWidth(150)
self.lblPerson = QtWidgets.QLabel()
self.editInput = QtWidgets.QLineEdit()
self.btnGen = QtWidgets.QPushButton(LANG_PKG[config.lang]['box1_btnGen'])
self.btnCheck = QtWidgets.QPushButton(LANG_PKG[config.lang]['box1_btnCheck'])
self.btnCheck.setToolTip('Shift + Enter')
self.btnCheck.setShortcut(QKeySequence(QtCore.Qt.SHIFT | QtCore.Qt.Key_Return))
self.lblCk = QtWidgets.QLabel()
self.lblCk.setFixedWidth(30)
self.lblExp = QtWidgets.QLabel()
self.lblExp.setSizePolicy(QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Minimum)
self.btnHelp = QtWidgets.QPushButton(LANG_PKG[config.lang]['box1_btnHelp'])
self.btnHelp.setToolTip('Ctrl + Shift + Enter')
self.btnHelp.setShortcut(QKeySequence(QtCore.Qt.CTRL | QtCore.Qt.SHIFT | QtCore.Qt.Key_Return))
self._answer = '*' # to avoid matching empty input and give false correct
self._entry_id = -1
self._tm_idx = -1
row1 = QtWidgets.QHBoxLayout()
row1.setAlignment(QtCore.Qt.AlignLeft)
row1.addWidget(self.lblVerb)
row1.addWidget(self.lblExp)
row2 = QtWidgets.QHBoxLayout()
row2.addWidget(self.lblTense)
row2.addWidget(self.lblMood)
row2.addWidget(self.lblPerson)
row2.addWidget(self.editInput)
row2.addWidget(self.lblCk)
row3 = QtWidgets.QHBoxLayout()
row3.setAlignment(QtCore.Qt.AlignRight)
row3.addWidget(self.btnGen)
row3.addWidget(self.btnCheck)
row3.addWidget(self.btnHelp)
thisLayout = QtWidgets.QVBoxLayout()
thisLayout.setSpacing(10)
thisLayout.setAlignment(QtCore.Qt.AlignHCenter)
thisLayout.addLayout(row1)
thisLayout.addLayout(row2)
thisLayout.addLayout(row3)
self.setLayout(thisLayout)
self.btnGen.clicked.connect(self._gen)
self.btnCheck.clicked.connect(self._ck)
def _gen(self):
""" Generate a verb & a conjugaison """
# clear previous result
self.lblCk.clear()
self.editInput.clear()
# draw random verb until there is a valid conjugation
# this is to avoid those few special verbs that do not have full conjug.
try:
while True:
# every <retry_intvl> practices, retrieve the verb with
# maximum incorrect number and try again
if not (self.config.nft % self.config.retry_intvl):
entry_id, verb, explanation, tm_idx, pers_idx = self.db.choose_verb(
'practice_forward', self.config.enabled_tm_idx,
order='correct_num ASC')
else: # randomly select a verb
entry_id, verb, explanation, tm_idx, pers_idx = self.db.choose_verb(
'practice_forward', self.config.enabled_tm_idx)
tense, mood = TENSE_MOODS[tm_idx]
answer = conjug(verb, tense, mood, pers_idx)
if answer:
self.lblVerb.setText(verb)
self.lblExp.setText(explanation)
self.lblPerson.setText(PERSONS[pers_idx])
if mood == 'impératif':
pass
else:
self.editInput.setText(PERSONS[pers_idx])
self.lblTense.setText(tense)
self.lblMood.setText(mood)
self.editInput.setFocus()
self._answer = answer
self._entry_id = entry_id
self._tm_idx = tm_idx
self.config.nft += 1 # add 1 to n total forward
self.btnCheck.setDisabled(False)
break
except ValueError as err:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Critical,
LANG_PKG[self.config.lang]['msg_error_title'], str(err))
d.exec_()
except TypeError:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_entry'])
d.exec_()
except KeyError as err:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_config'].format(str(err))
)
d.exec_()
def _ck(self):
""" Check the answer """
txt = self.editInput.text()
# remove extra spaces and only put 1
txt_striped = ' '.join(txt.split())
if txt_striped == self._answer:
self.lblCk.setText('✓')
self.lblCk.setStyleSheet('font-size: 14pt; font: bold; color: #009933')
self.config.nfc += 1
self._timer.start()
else:
self.lblCk.setText('🞪')
self.lblCk.setStyleSheet('font-size: 14pt; font: bold; color: #D63333')
try:
self.db.update_res('practice_forward', self._entry_id, txt_striped == self._answer)
self.sig_checked.emit()
except TypeError:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_entry']
)
d.exec_()
def ask_help(self):
return self.lblVerb.text(), ', '.join(TENSE_MOODS[self._tm_idx])
def apply_lang(self):
self.setTitle(LANG_PKG[self.config.lang]['box1_title'])
self.btnGen.setText(LANG_PKG[self.config.lang]['box1_btnGen'])
self.btnCheck.setText(LANG_PKG[self.config.lang]['box1_btnCheck'])
self.btnHelp.setText(LANG_PKG[self.config.lang]['box1_btnHelp'])
class Box2(QtWidgets.QGroupBox):
sig_checked = QtCore.pyqtSignal()
def __init__(self, db, config, parent=None):
super().__init__(parent)
self.db = db
self.config = config
self.setTitle(LANG_PKG[config.lang]['box2_title'])
self._timer = QtCore.QTimer()
self._timer.setSingleShot(True)
self._timer.setInterval(1000)
self._timer.timeout.connect(self._gen)
self.btnGen = QtWidgets.QPushButton(LANG_PKG[config.lang]['box2_btnGen'])
self.btnCheck = QtWidgets.QPushButton(LANG_PKG[config.lang]['box2_btnCheck'])
self.btnCheck.setToolTip('Alt + Enter')
self.btnCheck.setShortcut(QKeySequence(QtCore.Qt.ALT | QtCore.Qt.Key_Return))
self.btnHelp = QtWidgets.QPushButton(LANG_PKG[config.lang]['box2_btnHelp'])
self.btnHelp.setToolTip('Shift + Alt + Enter')
self.btnHelp.setShortcut(QKeySequence(QtCore.Qt.SHIFT | QtCore.Qt.ALT | QtCore.Qt.Key_Return))
self.editVerb = QtWidgets.QLineEdit()
self.comboTenseMood = QtWidgets.QComboBox()
self.comboTenseMood.setFixedWidth(300)
self.set_tm(self.config.enabled_tm_idx)
self.lblCk = QtWidgets.QLabel()
self.lblCk.setFixedWidth(30)
self.lblConjug = QtWidgets.QLabel()
self.lblConjug.setStyleSheet('font-size: 14pt; color: #2c39cf; font: bold; ')
self.lblAns = QtWidgets.QLabel()
self.lblAns.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
self.btnGen.clicked.connect(self._gen)
self.btnCheck.clicked.connect(self._ck)
row2 = QtWidgets.QHBoxLayout()
self.lblInf = QtWidgets.QLabel(LANG_PKG[config.lang]['box2_lblInf'])
row2.addWidget(self.lblInf)
row2.addWidget(self.editVerb)
row2.addWidget(self.comboTenseMood)
row2.addWidget(self.lblCk)
row3 = QtWidgets.QHBoxLayout()
row3.setAlignment(QtCore.Qt.AlignRight)
row3.addWidget(self.lblAns)
row3.addWidget(self.btnGen)
row3.addWidget(self.btnCheck)
row3.addWidget(self.btnHelp)
thisLayout = QtWidgets.QVBoxLayout()
thisLayout.setAlignment(QtCore.Qt.AlignHCenter)
thisLayout.setSpacing(10)
thisLayout.addWidget(self.lblConjug)
thisLayout.addLayout(row2)
thisLayout.addLayout(row3)
self.setLayout(thisLayout)
self.editVerb.editingFinished.connect(self.comboTenseMood.setFocus)
self._answer = '*' # to avoid matching empty string and give false correct
self._entry_id = -1
self._tm_idx = -1
def set_tm(self, checked_tm_idx):
""" set tense mood options """
self.comboTenseMood.clear()
self.comboTenseMood.addItems([', '.join(TENSE_MOODS[i]) for i in checked_tm_idx])
self.comboTenseMood.adjustSize()
def _gen(self):
""" Generate a conjugaison """
# clear previous result
self.lblCk.clear()
self.editVerb.clear()
# draw random verb until there is a valid conjugation
# this is to avoid those few special verbs that do not have full conjug.
try:
while True:
# every <retry_intvl> practices, retrieve the verb with
# maximum incorrect number and try again
if not (self.config.nbt % self.config.retry_intvl):
entry_id, verb, explanation, tm_idx, pers_idx = self.db.choose_verb(
'practice_backward', self.config.enabled_tm_idx,
order='correct_num ASC')
else: # randomly select a verb
entry_id, verb, explanation, tm_idx, pers_idx = self.db.choose_verb(
'practice_backward', self.config.enabled_tm_idx)
tense, mood = TENSE_MOODS[tm_idx]
conjug_str = conjug(verb, tense, mood, pers_idx)
if conjug_str:
self.lblConjug.setText(conjug_str)
self.lblAns.clear()
self.editVerb.setFocus()
self._answer = verb
self._entry_id = entry_id
self._tm_idx = tm_idx
self.config.nbt += 1 # add 1 to n total forward
self.btnCheck.setDisabled(False)
break
except ValueError as err:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Critical,
LANG_PKG[self.config.lang]['msg_error_title'], str(err))
d.exec_()
except TypeError:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_entry'])
d.exec_()
except KeyError as err:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_config'].format(str(err))
)
d.exec_()
def _ck(self):
""" Check the answer """
is_correct = self.editVerb.text().lower() == self._answer and \
self.comboTenseMood.currentText() == ', '.join(TENSE_MOODS[self._tm_idx])
if is_correct:
self.lblCk.setText('✓')
self.lblCk.setStyleSheet('font-size: 14pt; color: #009933')
self.config.nbc += 1
self._timer.start(1000)
else:
self.lblCk.setText('🞪')
self.lblCk.setStyleSheet('font-size: 14pt; color: #D63333')
self.lblAns.setText(' '.join((self._answer,) + TENSE_MOODS[self._tm_idx]))
self.btnCheck.setDisabled(True)
self._timer.start(5000)
try:
self.db.update_res('practice_backward', self._entry_id, is_correct)
self.sig_checked.emit()
except TypeError:
d = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.Warning,
LANG_PKG[self.config.lang]['msg_warning_title'],
LANG_PKG[self.config.lang]['msg_warning_no_entry']
)
d.exec_()
def ask_help(self):
return self._answer, ', '.join(TENSE_MOODS[self._tm_idx])
def apply_lang(self):
self.setTitle(LANG_PKG[self.config.lang]['box2_title'])
self.btnGen.setText(LANG_PKG[self.config.lang]['box2_btnGen'])
self.btnCheck.setText(LANG_PKG[self.config.lang]['box2_btnCheck'])
self.btnHelp.setText(LANG_PKG[self.config.lang]['box2_btnHelp'])
self.lblInf.setText(LANG_PKG[self.config.lang]['box2_lblInf'])
class Box3(QtWidgets.QGroupBox):
def __init__(self, db, config, parent=None):
super().__init__(parent)
self.config = config
self.db = db
self.setTitle(LANG_PKG[config.lang]['box3_title'])
self.editVerb = QtWidgets.QLineEdit()
self.comboTenseMood = QtWidgets.QComboBox()
self.comboTenseMood.setFixedWidth(300)
self.comboTenseMood.addItems([', '.join(TENSE_MOODS[i]) for i in config.enabled_tm_idx])
self.btnClear = QtWidgets.QPushButton(LANG_PKG[config.lang]['box3_btnClear'])
self.lblExp = QtWidgets.QLabel()
self.lblExp.setWordWrap(True)
self.lblExp.setSizePolicy(QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Minimum)
self.lblResult = QtWidgets.QTextEdit()
self.lblResult.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
self.lblResult.setReadOnly(True)
self.btnClear.clicked.connect(self._clear)
self.editVerb.editingFinished.connect(self._search)
self.comboTenseMood.currentIndexChanged.connect(self._search)
row1 = QtWidgets.QHBoxLayout()
row1.addWidget(self.editVerb)
row1.addWidget(self.comboTenseMood)
row1.addWidget(self.btnClear)
thisLayout = QtWidgets.QVBoxLayout()
thisLayout.addLayout(row1)
thisLayout.addWidget(self.lblExp)
thisLayout.addWidget(self.lblResult)
self.setLayout(thisLayout)
def set_tm(self, checked_tm_idx):
""" set tense mood options """
self.comboTenseMood.clear()
self.comboTenseMood.addItems([', '.join(TENSE_MOODS[i]) for i in checked_tm_idx])
def _search(self):
try:
verb = self.editVerb.text().strip()
tense, mood = self.comboTenseMood.currentText().split(', ')
self.lblResult.clear()
self.lblResult.setText('\n'.join(conjug_all(verb, tense, mood)))
self.lblExp.setText(self.db.get_explanation(verb))
except (KeyError, TypeError):
self.lblResult.clear()
self.lblExp.clear()
except (ValueError, IndexError) as err:
self.lblResult.setText(str(err))
self.lblExp.clear()
except dbError:
self.lblResult.setText(LANG_PKG[self.config.lang]['box3_db_error'])
self.lblExp.clear()
def _clear(self):
self.editVerb.clear()
self.lblExp.clear()
self.lblResult.clear()
def apply_lang(self):
self.setWindowTitle(LANG_PKG[self.config.lang]['box3_title'])
self.btnClear.setText(LANG_PKG[self.config.lang]['box3_btnClear'])
class DialogConfig(QtWidgets.QDialog):
def __init__(self, config, parent=None):
super().__init__(parent)
self.config = config
ckLayout = QtWidgets.QVBoxLayout()
self._cklist = []
for i, _tm in enumerate(TENSE_MOODS):
ck = QtWidgets.QCheckBox(", ".join(_tm))
self._cklist.append(ck)
ckLayout.addWidget(ck)
btnLayout = QtWidgets.QHBoxLayout()
btnLayout.setAlignment(QtCore.Qt.AlignRight)
self.btnOk = QtWidgets.QPushButton('Okay')
self.btnCancel = QtWidgets.QPushButton("Cancel")
btnLayout.addWidget(self.btnCancel)
btnLayout.addWidget(self.btnOk)
self.apply_lang()
thisLayout = QtWidgets.QVBoxLayout()
thisLayout.addLayout(ckLayout)
thisLayout.addLayout(btnLayout)
self.setLayout(thisLayout)
self.btnCancel.clicked.connect(self.reject)
self.btnOk.clicked.connect(self.accept)
def get_tense_mood(self):
checked_tense_moods = []
for i, ck in enumerate(self._cklist):
if ck.isChecked():
checked_tense_moods.append(i)
return checked_tense_moods
def set_tense_mood(self, tm_idx):
for i, ck in enumerate(self._cklist):
if ck.isEnabled():
ck.setChecked(i in tm_idx)
def apply_lang(self):
self.setWindowTitle(LANG_PKG[self.config.lang]['dialog_config_title'])
self.btnOk.setText(LANG_PKG[self.config.lang]['btnOK'])
self.btnCancel.setText(LANG_PKG[self.config.lang]['btnCancel'])
class DialogPref(QtWidgets.QDialog):
def __init__(self, config, parent=None):
super().__init__(parent)
self.config = config
self.setWindowTitle('Configure Preferences')
self.lblIntvl = QtWidgets.QLabel()
self.inpIntvl = QtWidgets.QSpinBox()
self.inpIntvl.setMinimum(1)
self.inpIntvl.setValue(config.retry_intvl)
self.lblLang = QtWidgets.QLabel()
self.lblFontSize = QtWidgets.QLabel()
self.inpFontSize = QtWidgets.QSpinBox()
self.inpFontSize.setMinimum(10)
self.inpFontSize.setSuffix(' pt')
self.inpFontSize.setValue(config.font_size)
self.comboLang = QtWidgets.QComboBox()
self.comboLang.addItems(list(LANG_PKG.keys()))
self.comboLang.setCurrentText(config.lang)
prefLayout = QtWidgets.QFormLayout()
prefLayout.addRow(self.lblIntvl, self.inpIntvl)
prefLayout.addRow(self.lblLang, self.comboLang)
prefLayout.addRow(self.lblFontSize, self.inpFontSize)
btnLayout = QtWidgets.QHBoxLayout()
btnLayout.setAlignment(QtCore.Qt.AlignRight)
self.btnOk = QtWidgets.QPushButton('Okay')
self.btnCancel = QtWidgets.QPushButton("Cancel")
self.btnOk.setDefault(True)
btnLayout.addWidget(self.btnCancel)
btnLayout.addWidget(self.btnOk)
self.apply_lang()
thisLayout = QtWidgets.QVBoxLayout()
thisLayout.addLayout(prefLayout)
thisLayout.addLayout(btnLayout)
self.setLayout(thisLayout)
self.btnCancel.clicked.connect(self.reject)
self.btnOk.clicked.connect(self.accept)
self.accepted.connect(self.fetch_config)
def fetch_config(self):
self.config.lang = list(LANG_PKG.keys())[self.comboLang.currentIndex()]
self.config.retry_intvl = self.inpIntvl.value()
self.config.font_size = self.inpFontSize.value()
def apply_lang(self):
self.setWindowTitle(LANG_PKG[self.config.lang]['dialog_pref_title'])
self.lblIntvl.setText(LANG_PKG[self.config.lang]['dialog_pref_lblIntvl'])
self.lblIntvl.setToolTip(LANG_PKG[self.config.lang]['dialog_pref_lblIntvl_tooltip'])
self.lblLang.setText(LANG_PKG[self.config.lang]['dialog_pref_lblLang'])
self.lblFontSize.setText(LANG_PKG[self.config.lang]['dialog_pref_lblFont'])
current_idx = self.comboLang.currentIndex()
self.comboLang.clear()
self.comboLang.addItems(LANG_PKG[self.config.lang]['dialog_pref_comboLang'])
self.comboLang.setCurrentIndex(current_idx)
self.btnOk.setText(LANG_PKG[self.config.lang]['btnOK'])
self.btnCancel.setText(LANG_PKG[self.config.lang]['btnCancel'])
class DialogAddVoc(QtWidgets.QDialog):
def __init__(self, db, config, parent=None):
super().__init__(parent)
self.db = db
self.config = config
self.btnAdd = QtWidgets.QPushButton('Add')
self.btnCancel = QtWidgets.QPushButton('Cancel')
self.btnUpdate = QtWidgets.QPushButton('Update')
self.editVerb = QtWidgets.QLineEdit()
self.editExp = QtWidgets.QTextEdit()
self.editExp.setWordWrapMode(QTextOption.WordWrap)
self.editExp.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
self.btnAdd.setDefault(True)
self.btnUpdate.setAutoDefault(True)
btnLayout = QtWidgets.QHBoxLayout()
btnLayout.setAlignment(QtCore.Qt.AlignRight)
btnLayout.addWidget(self.btnCancel)
btnLayout.addWidget(self.btnUpdate)
btnLayout.addWidget(self.btnAdd)
self.btnAdd.clicked.connect(self._add)
self.btnCancel.clicked.connect(self.reject)
self.btnUpdate.clicked.connect(self._update)
self.editVerb.editingFinished.connect(self._check_exist)
self.lblVerb = QtWidgets.QLabel('Verb')
self.lblExp = QtWidgets.QLabel('Explanation')
thisLayout = QtWidgets.QVBoxLayout()
thisLayout.addWidget(self.lblVerb)
thisLayout.addWidget(self.editVerb)
thisLayout.addWidget(self.lblExp)
thisLayout.addWidget(self.editExp)
thisLayout.addLayout(btnLayout)
self.setLayout(thisLayout)
self.apply_lang()
def _add(self):
verb = self.editVerb.text().strip()
explanation = self.editExp.toPlainText().strip()
self.db.add_voc(verb, explanation)
self.editVerb.clear()
self.editExp.clear()
def _update(self):
verb = self.editVerb.text().strip()
explanation = self.editExp.toPlainText().strip()
self.db.update_voc(verb, explanation)
self.editVerb.clear()
self.editExp.clear()
def _check_exist(self):
verb = self.editVerb.text().strip()
is_exist = self.db.check_exist(verb)
if is_exist:
self.btnAdd.setDisabled(True)
self.btnUpdate.setDisabled(False)
self.editExp.setText(self.db.get_explanation(verb))
else:
self.btnAdd.setDisabled(False)
self.btnUpdate.setDisabled(True)
self.editExp.clear()
def apply_lang(self):
self.setWindowTitle(LANG_PKG[self.config.lang]['dialog_addvoc_title'])
self.btnAdd.setText(LANG_PKG[self.config.lang]['dialog_addvoc_btnAdd'])
self.btnCancel.setText(LANG_PKG[self.config.lang]['dialog_addvoc_btnCancel'])
self.btnUpdate.setText(LANG_PKG[self.config.lang]['dialog_addvoc_btnUpdate'])
self.lblVerb.setText(LANG_PKG[self.config.lang]['dialog_addvoc_lblVerb'])
self.lblExp.setText(LANG_PKG[self.config.lang]['dialog_addvoc_lblExp'])
class DialogBrowse(QtWidgets.QDialog):
def __init__(self, db, config, parent=None):
super().__init__(parent)
self.db = db
self.config = config
self.setWindowTitle(LANG_PKG[config.lang]['dialog_browse_title'])
self.setWindowFlags(QtCore.Qt.Window)
self.setMinimumWidth(900)
self.resize(QtCore.QSize(900, 600))
self.setModal(False)
self.btnRefresh = QtWidgets.QPushButton(LANG_PKG[config.lang]['dialog_browse_btnRefresh'])
self.btnRefresh.clicked.connect(self._refresh)
btnLayout = QtWidgets.QHBoxLayout()
btnLayout.setAlignment(QtCore.Qt.AlignRight)
btnLayout.addWidget(self.btnRefresh)
self.dbTable = QtWidgets.QTableWidget()
area = QtWidgets.QScrollArea()
area.setWidget(self.dbTable)
area.setWidgetResizable(True)
area.setAlignment(QtCore.Qt.AlignTop)
thisLayout = QtWidgets.QVBoxLayout()
thisLayout.addWidget(area)
thisLayout.addLayout(btnLayout)
self.setLayout(thisLayout)
self._refresh()
def _refresh(self):
self.dbTable.clearContents()
records = self.db.get_glossary()
rows = len(records)
self.dbTable.setRowCount(rows)
self.dbTable.setColumnCount(2)
for row, rec in enumerate(records):
for col, x in enumerate(rec):
item = QtWidgets.QTableWidgetItem(str(x))
self.dbTable.setItem(row, col, item)
self.dbTable.resizeRowsToContents()
self.dbTable.resizeColumnsToContents()
def apply_lang(self):
self.setWindowTitle(LANG_PKG[self.config.lang]['dialog_browse_title'])
self.btnRefresh.setText(LANG_PKG[self.config.lang]['dialog_browse_btnRefresh'])
class DialogStats(QtWidgets.QDialog):
def __init__(self, db, config, parent=None):
super().__init__(parent)
self.db = db
self.config = config
self.lbl = QtWidgets.QLabel()
self.lbl.setWordWrap(True)
self.lbl.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
thisLayout = QtWidgets.QVBoxLayout()
thisLayout.addWidget(self.lbl)
self.setLayout(thisLayout)
def showEvent(self, ev):
self.lbl.setText(self.db.get_stats())
def apply_lang(self):
self.setWindowTitle(LANG_PKG[self.config.lang]['dialog_stats_title'])
class MenuBar(QtWidgets.QMenuBar):
def __init__(self, parent=None):
super().__init__(parent)
self.actionConfig = QtWidgets.QAction("Config Tense and Mood")
self.actionPref = QtWidgets.QAction('Preference')
self.actionAddVoc = QtWidgets.QAction("Add Vocabulary")
self.actionAddVoc.setShortcut('Ctrl+N')
self.actionBrowse = QtWidgets.QAction("Browse Glossary")
self.actionStats = QtWidgets.QAction('Statistics')
self.menuConfig = self.addMenu("&Config")
self.menuConfig.addAction(self.actionConfig)
self.menuConfig.addAction(self.actionPref)
self.menuGloss = self.addMenu("&Glossary")
self.menuGloss.addAction(self.actionAddVoc)
self.menuGloss.addAction(self.actionBrowse)
self.menuStats = self.addMenu("&Statistics")
self.menuStats.addAction(self.actionStats)
def apply_lang(self, lang_pkg):
self.actionConfig.setText(lang_pkg['action_config'])
self.actionPref.setText(lang_pkg['action_pref'])
self.actionAddVoc.setText(lang_pkg['action_addvoc'])
self.actionBrowse.setText(lang_pkg['action_browse'])
self.actionStats.setText(lang_pkg['action_stats'])
self.menuConfig.setTitle(lang_pkg['menu_config'])
self.menuGloss.setTitle(lang_pkg['menu_glossary'])
self.menuStats.setTitle(lang_pkg['menu_stats'])
class StatusBar(QtWidgets.QStatusBar):
def __init__(self, config, parent=None):
super().__init__(parent)
self.config = config
self.labelN1 = QtWidgets.QLabel()
self.labelN2 = QtWidgets.QLabel()
self.addPermanentWidget(self.labelN1)
self.addPermanentWidget(self.labelN2)
def refresh(self, n1, n2):
self.labelN1.setText(LANG_PKG[self.config.lang]['status_msg1'].format(n1))
self.labelN2.setText(LANG_PKG[self.config.lang]['status_msg2'].format(n2))
def launch():
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
launch()
| 40.538005
| 103
| 0.637213
| 3,776
| 34,133
| 5.6009
| 0.11732
| 0.054376
| 0.028607
| 0.043406
| 0.605986
| 0.560263
| 0.518133
| 0.469573
| 0.418081
| 0.381767
| 0
| 0.009422
| 0.250608
| 34,133
| 841
| 104
| 40.586207
| 0.817233
| 0.033135
| 0
| 0.429598
| 0
| 0
| 0.056237
| 0.007622
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064655
| false
| 0.001437
| 0.015805
| 0.002874
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ea2e6bc949b1feeca26b6c53dd72ef93834de53
| 2,515
|
py
|
Python
|
python/lib/viewer/bluf/plot_func.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
python/lib/viewer/bluf/plot_func.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
python/lib/viewer/bluf/plot_func.py
|
timtyree/bgmc
|
891e003a9594be9e40c53822879421c2b8c44eed
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt, numpy as np, pandas as pd
# general functions for plotting
# Tim Tyree
# 7.23.2021
def PlotTextBox(ax,text,text_width=150.,xcenter=0.5,ycenter=0.5,fontsize=20, family='serif', style='italic',horizontalalignment='center',
verticalalignment='center', color='black',use_turnoff_axis=True,**kwargs):
txt=ax.text(xcenter,ycenter,text,horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment, transform = ax.transAxes, fontsize=fontsize, color='black', wrap=True,**kwargs)
txt._get_wrap_line_width = lambda : text_width
if use_turnoff_axis:
ax.axis('off')
def text_plotter_function(ax,data):
text=data
# ax.text(0.5, 0.5, text, family='serif', style='italic', ha='right', wrap=True)
PlotTextBox(ax,text,fontsize=10)
return True
def format_plot_general(**kwargs):
return format_plot(**kwargs)
def format_plot(ax=None,xlabel=None,ylabel=None,fontsize=20,use_loglog=False,xlim=None,ylim=None,use_bigticks=True,**kwargs):
'''format plot formats the matplotlib axis instance, ax,
performing routine formatting to the plot,
labeling the x axis by the string, xlabel and
labeling the y axis by the string, ylabel
'''
if not ax:
ax=plt.gca()
if use_loglog:
ax.set_xscale('log')
ax.set_yscale('log')
if xlabel:
ax.set_xlabel(xlabel,fontsize=fontsize,**kwargs)
if ylabel:
ax.set_ylabel(ylabel,fontsize=fontsize,**kwargs)
if use_bigticks:
ax.tick_params(axis='both', which='major', labelsize=fontsize,**kwargs)
ax.tick_params(axis='both', which='minor', labelsize=0,**kwargs)
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_xlim(ylim)
return True
def FormatAxes(ax,x1label,x2label,title=None,x1lim=None,x2lim=None,fontsize=16,use_loglog=False,**kwargs):
if x1lim is not None:
ax.set_xlim(x1lim)
if x2lim is not None:
ax.set_ylim(x2lim)
if title is not None:
ax.set_title(title,fontsize=fontsize)
format_plot(ax, x1label, x2label, fontsize=fontsize, use_loglog=use_loglog,**kwargs)
return True
def plot_horizontal(ax,xlim,x0,Delta_thresh=1.,use_Delta_thresh=False):
#plot the solid y=0 line
x=np.linspace(xlim[0],xlim[1],10)
ax.plot(x,0*x+x0,'k-')
if use_Delta_thresh:
#plot the dotted +-Delta_thresh lines
ax.plot(x,0*x+Delta_thresh+x0,'k--',alpha=0.7)
ax.plot(x,0*x-Delta_thresh+x0,'k--',alpha=0.7)
return True
| 38.106061
| 137
| 0.690258
| 379
| 2,515
| 4.46438
| 0.30343
| 0.026596
| 0.02305
| 0.019504
| 0.095154
| 0.065012
| 0.035461
| 0.035461
| 0.035461
| 0.035461
| 0
| 0.026062
| 0.176143
| 2,515
| 65
| 138
| 38.692308
| 0.790541
| 0.150298
| 0
| 0.083333
| 0
| 0
| 0.032243
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.020833
| 0.020833
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9eab6ea7aa5627d7aec1c6ead82d9bb0ee0138e3
| 4,804
|
py
|
Python
|
src/currency_rates.py
|
akinmetin/currency-rates
|
586ea1205eb4a00b84bb37f85e781060383a673a
|
[
"MIT"
] | null | null | null |
src/currency_rates.py
|
akinmetin/currency-rates
|
586ea1205eb4a00b84bb37f85e781060383a673a
|
[
"MIT"
] | 2
|
2020-03-05T19:55:07.000Z
|
2020-10-25T13:30:18.000Z
|
src/currency_rates.py
|
akinmetin/currency-rates
|
586ea1205eb4a00b84bb37f85e781060383a673a
|
[
"MIT"
] | 2
|
2020-03-07T08:58:09.000Z
|
2020-03-07T09:01:11.000Z
|
import requests
from decouple import config
import datetime
from calendar import monthrange
import psycopg2
import time
def create_db_table():
con = psycopg2.connect(database=config("DB_NAME"), user=config("DB_USER"),
password=config("DB_PASSWORD"),
host=config("DB_HOST"), port=config("DB_PORT"))
cur = con.cursor()
cur.execute('''CREATE TABLE rates
(DATE VARCHAR(10) NOT NULL,
CURRENCY VARCHAR(3) NOT NULL,
VALUE FLOAT NOT NULL);''')
con.commit()
con.close()
def first_run():
# first create a table in the database
create_db_table()
currentDT = datetime.datetime.now()
year = currentDT.year
month = currentDT.month
# find the previous month's number. If current month is the first month,
# then go to December of the previous year.
if year != 1:
month -= 1
else:
month = 12
year -= 1
# get total number of days in target month.
total_days = monthrange(year, month)[1]
# create database connection
con = psycopg2.connect(database=config("DB_NAME"), user=config("DB_USER"),
password=config("DB_PASSWORD"),
host=config("DB_HOST"), port=config("DB_PORT"))
cur = con.cursor()
# get entire month's data.
# http://data.fixer.io/api/YYYY-MM-DD?access_key=.....
for x in range(1, total_days + 1):
date = "{}/{}/{}".format(x, month, year)
url = "http://data.fixer.io/api/%s-%s-%s?access_key=%s" % \
(year, str(month).zfill(2), str(x).zfill(2), str(config("API_KEY")))
print(url)
response = requests.get(url)
data = response.json()["rates"]
for attr in data.keys():
cur.execute("INSERT INTO rates (DATE,CURRENCY,VALUE) \
VALUES (%s, %s, %s)", (date, str(attr), data[attr]))
# commit the waiting insert queries and close the connection.
con.commit()
con.close()
insert_into_db()
def check_db_table_exits():
con = psycopg2.connect(database=config("DB_NAME"), user=config("DB_USER"),
password=config("DB_PASSWORD"),
host=config("DB_HOST"), port=config("DB_PORT"))
cur = con.cursor()
cur.execute("select * from information_schema.tables where table_name=%s", ('rates',))
if bool(cur.rowcount):
con.close()
else:
con.close()
first_run()
def insert_into_db():
# get current date
currentDT = datetime.datetime.now()
year = currentDT.year
month = currentDT.month
day = currentDT.day
date = "{}/{}/{}".format(day, month, year)
# create database connection
con = psycopg2.connect(database=config("DB_NAME"), user=config("DB_USER"),
password=config("DB_PASSWORD"),
host=config("DB_HOST"), port=config("DB_PORT"))
cur = con.cursor()
# get currency json data from the api server
response = requests.get(config("API_ENDPOINT"))
data = response.json()["rates"]
for item in data.keys():
cur.execute("INSERT INTO rates (DATE,CURRENCY,VALUE) \
VALUES (%s, %s, %s)", (date, item, data[item]))
# commit the waiting insert queries and close the connection.
con.commit()
con.close()
def get_remaining_time():
currentDT = datetime.datetime.now()
hours = currentDT.hour
minutes = currentDT.minute
seconds = currentDT.second
# start to calculate remaining sleeping time in seconds
remain = (24 - hours)*3600 + (60 - minutes)*60 + seconds
return remain
if __name__ == "__main__":
# check db table, if doesn't exists then create tables and pull last month's data into the db.
check_db_table_exits()
# endless loop, sleep until next morning 9 am. and run again
while True:
remain = get_remaining_time()
print("Sleeping: " + str(remain))
time.sleep(remain)
# run daily api request and insert fresh data into db.
insert_into_db()
# https://fixer.io/quickstart
# https://fixer.io/documentation
# https://www.dataquest.io/blog/python-api-tutorial/
# python get time --> https://tecadmin.net/get-current-date-time-python/
# python postgresql --> https://stackabuse.com/working-with-postgresql-in-python/
# check table if exists --> https://stackoverflow.com/questions/1874113/checking-if-a-postgresql-table-exists-under-python-and-probably-psycopg2
# postgres data types (postgres float) --> https://www.postgresqltutorial.com/postgresql-data-types/
# python get number of days in month --> https://stackoverflow.com/questions/4938429/how-do-we-determine-the-number-of-days-for-a-given-month-in-python
| 34.561151
| 151
| 0.622606
| 620
| 4,804
| 4.730645
| 0.280645
| 0.054552
| 0.024548
| 0.035459
| 0.361746
| 0.325264
| 0.325264
| 0.325264
| 0.325264
| 0.325264
| 0
| 0.012128
| 0.244796
| 4,804
| 139
| 151
| 34.561151
| 0.796307
| 0.294546
| 0
| 0.448276
| 0
| 0
| 0.149183
| 0.007429
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057471
| false
| 0.045977
| 0.068966
| 0
| 0.137931
| 0.022989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9eac32aaf34bddbfe912ad8b935101d15d22cb63
| 3,446
|
py
|
Python
|
association_rules_viz/graph.py
|
ScenesK/association-rules-viz
|
2134b0866509ae9b65f323da7972033e54ffb25f
|
[
"MIT"
] | 1
|
2020-06-22T09:50:26.000Z
|
2020-06-22T09:50:26.000Z
|
association_rules_viz/graph.py
|
ScenesK/association-rules-viz
|
2134b0866509ae9b65f323da7972033e54ffb25f
|
[
"MIT"
] | null | null | null |
association_rules_viz/graph.py
|
ScenesK/association-rules-viz
|
2134b0866509ae9b65f323da7972033e54ffb25f
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def graph(lhs,
rhs,
support,
confidence,
lift,
data=None,
fig_scale=2,
font_size=None,
cmap=None):
if data is None:
data = pd.DataFrame(
dict(
lhs=lhs,
rhs=rhs,
support=support,
confidence=confidence,
lift=lift))
lhs = 'lhs'
rhs = 'rhs'
support = 'support'
confidence = 'confidence'
lift = 'lift'
centers = data[lhs].unique()
graphs = centers.size
rows = np.ceil(np.sqrt(graphs)).astype(int)
cols = np.ceil(graphs / rows).astype(int)
g = nx.DiGraph()
fig, axes = plt.subplots(
rows, cols, figsize=(cols * fig_scale, rows * fig_scale))
data.loc[:, support] = data[support] / data[support].max() * 500
pc = None
for i, ((row, col), ax) in enumerate(np.ndenumerate(axes)):
ax.axis('off')
if col == cols - 1:
divider = make_axes_locatable(ax)
width = 0.25
cax = divider.append_axes("right", size=width, pad=0.25)
cbar = fig.colorbar(pc, cax=cax)
cbar.set_label('lift', size=font_size)
cbar.ax.tick_params(labelsize=font_size)
if i >= graphs:
continue
center = centers[i]
g = nx.DiGraph()
g.add_node(center, label=', '.join(center), size=0, color=0)
for node in data.loc[data[lhs] == center, rhs]:
name = (center, node)
index = data.index[(data[lhs] == center) & (data[rhs] == node)]
g.add_node(
name,
label=', '.join(node),
size=data.loc[index, support].values[0],
color=data.loc[index, lift])
g.add_edge(
center, name, weight=data.loc[index, confidence].values[0])
pos = nx.spring_layout(g)
pos[center] = np.zeros(2)
nodelist = g.nodes
sizes = nx.get_node_attributes(g, 'size')
node_size = [sizes[key] for key in nodelist]
colors = nx.get_node_attributes(g, 'color')
node_color = [colors[key] for key in nodelist]
vmax = np.abs(data[lift]).max()
vmin = -vmax
pc = nx.draw_networkx_nodes(
g,
pos,
node_size=node_size,
node_color=node_color,
cmap=cmap,
vmin=vmin,
vmax=vmax,
ax=ax)
labels = nx.get_node_attributes(g, 'label')
nx.draw_networkx_labels(g, pos, labels, font_size=font_size, ax=ax)
edgelist = g.edges
weights = nx.get_edge_attributes(g, 'weight')
edge_width = np.array([weights[key] for key in edgelist
]) / data[confidence].max() * 3
nx.draw_networkx_edges(g, pos, width=edge_width, alpha=0.5, ax=ax)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.set(
xlim=[-np.abs(xlim).max(), np.abs(xlim).max()],
ylim=[-np.abs(ylim).max(), np.abs(ylim).max()])
pc.set(clip_on=False)
for child in ax.get_children():
if isinstance(child, mpl.text.Text):
child.set(clip_on=False)
plt.show()
| 34.46
| 75
| 0.531631
| 436
| 3,446
| 4.091743
| 0.284404
| 0.022422
| 0.020179
| 0.031951
| 0.115471
| 0.060538
| 0.060538
| 0.060538
| 0.060538
| 0.060538
| 0
| 0.008807
| 0.340975
| 3,446
| 99
| 76
| 34.808081
| 0.77675
| 0
| 0
| 0.020833
| 0
| 0
| 0.018282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0
| 0.0625
| 0
| 0.072917
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9eaf5189699bfaacfc0f73117848a3fc060f8088
| 2,314
|
py
|
Python
|
cvpack/extras/clustering.py
|
alkasm/cvtools
|
7d7aceddf18aca03ac77ccf8e0da7f71ef6674a3
|
[
"MIT"
] | 10
|
2018-09-22T14:05:42.000Z
|
2020-11-30T07:12:18.000Z
|
cvpack/extras/clustering.py
|
alkasm/cvmod
|
7d7aceddf18aca03ac77ccf8e0da7f71ef6674a3
|
[
"MIT"
] | 3
|
2019-02-22T20:54:53.000Z
|
2021-04-15T17:56:44.000Z
|
cvpack/extras/clustering.py
|
alkasm/cvpack
|
7d7aceddf18aca03ac77ccf8e0da7f71ef6674a3
|
[
"MIT"
] | 1
|
2019-04-01T18:35:46.000Z
|
2019-04-01T18:35:46.000Z
|
# type: ignore
import cv2
import numpy as np
TWO_PI = 2 * np.pi
def kmeans_periodic(columns, intervals, data, *args, **kwargs):
"""Runs kmeans with periodicity in a subset of dimensions.
Transforms columns with periodicity on the specified intervals into two
columns with coordinates on the unit circle for kmeans. After running
through kmeans, the centers are transformed back to the range specified
by the intervals.
Arguments
---------
columns : sequence
Sequence of indexes specifying the columns that have periodic data
intervals : sequence of length-2 sequences
Sequence of (min, max) intervals, one interval per column
See help(cv2.kmeans) for all other arguments, which are passed through.
Returns
-------
See help(cv2.kmeans) for outputs, which are passed through; except centers,
which is modified so that it returns centers corresponding to the input
data, instead of the transformed data.
Raises
------
cv2.error
If len(columns) != len(intervals)
"""
# Check each periodic column has an associated interval
if len(columns) != len(intervals):
raise cv2.error("number of intervals must be equal to number of columns")
ndims = data.shape[1]
ys = []
# transform each periodic column into two columns with the x and y coordinate
# of the angles for kmeans; x coord at original column, ys are appended
for col, interval in zip(columns, intervals):
a, b = min(interval), max(interval)
width = b - a
data[:, col] = TWO_PI * (data[:, col] - a) / width % TWO_PI
ys.append(width * np.sin(data[:, col]))
data[:, col] = width * np.cos(data[:, col])
# append the ys to the end
ys = np.array(ys).transpose()
data = np.hstack((data, ys)).astype(np.float32)
# run kmeans
retval, bestLabels, centers = cv2.kmeans(data, *args, **kwargs)
# transform the centers back to range they came from
for i, (col, interval) in enumerate(zip(columns, intervals)):
a, b = min(interval), max(interval)
angles = np.arctan2(centers[:, ndims + i], centers[:, col]) % TWO_PI
centers[:, col] = a + (b - a) * angles / TWO_PI
centers = centers[:, :ndims]
return retval, bestLabels, centers
| 33.536232
| 81
| 0.653414
| 319
| 2,314
| 4.721003
| 0.394984
| 0.0166
| 0.018592
| 0.023904
| 0.11421
| 0.057105
| 0.057105
| 0.057105
| 0.057105
| 0
| 0
| 0.006873
| 0.245462
| 2,314
| 68
| 82
| 34.029412
| 0.85567
| 0.494382
| 0
| 0.086957
| 0
| 0
| 0.050326
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9eb1d479f5b09c6f55397ec22af703577ef9ccc9
| 6,599
|
py
|
Python
|
models/model.py
|
SeungoneKim/Transformer_implementation
|
a52bf552eb645fc9bfb812cc26842fc147d6c008
|
[
"Apache-2.0"
] | null | null | null |
models/model.py
|
SeungoneKim/Transformer_implementation
|
a52bf552eb645fc9bfb812cc26842fc147d6c008
|
[
"Apache-2.0"
] | null | null | null |
models/model.py
|
SeungoneKim/Transformer_implementation
|
a52bf552eb645fc9bfb812cc26842fc147d6c008
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.embedding import TokenEmbedding, PositionalEncoding, TransformerEmbedding
from models.attention import ScaledDotProductAttention, MultiHeadAttention, FeedForward
from models.layers import EncoderLayer, DecoderLayer
class Encoder(nn.Module):
def __init__(self, enc_vocab_size, src_max_len,
model_dim, key_dim, value_dim, hidden_dim,
num_head, num_layer, drop_prob, device):
super(Encoder,self).__init__()
self.embedding = TransformerEmbedding(enc_vocab_size, model_dim, src_max_len, drop_prob, device)
self.layers = nn.ModuleList([EncoderLayer(model_dim, key_dim, value_dim,
hidden_dim, num_head,
drop_prob) for _ in range(num_layer)])
def forward(self, tensor, src_mask):
input_emb = self.embedding(tensor)
encoder_output = input_emb
for layer in self.layers:
encoder_output = layer(encoder_output, src_mask)
return encoder_output
class Decoder(nn.Module):
def __init__(self, dec_vocab_size, tgt_max_len,
model_dim, key_dim, value_dim, hidden_dim,
num_head, num_layer, drop_prob, device):
super(Decoder,self).__init__()
self.embedding = TransformerEmbedding(dec_vocab_size, model_dim, tgt_max_len, drop_prob, device)
self.layers = nn.ModuleList([DecoderLayer(model_dim, key_dim, value_dim,
hidden_dim, num_head,
drop_prob) for _ in range(num_layer)])
def forward(self, tensor, encoder_output, src_mask, tgt_mask):
tgt_emb = self.embedding(tensor)
decoder_output = tgt_emb
for layer in self.layers:
decoder_output = layer(decoder_output, encoder_output, src_mask, tgt_mask)
return decoder_output
class LangaugeModelingHead(nn.Module):
def __init__(self, model_dim, dec_vocab_size):
super(LangaugeModelingHead,self).__init__()
self.linearlayer = nn.Linear(model_dim, dec_vocab_size)
def forward(self, decoder_output):
return self.linearlayer(decoder_output)
class TransformersModel(nn.Module):
def __init__(self, src_pad_idx, tgt_pad_idx,
enc_vocab_size, dec_vocab_size,
model_dim, key_dim, value_dim, hidden_dim,
num_head, num_layer, enc_max_len, dec_max_len, drop_prob, device):
super(TransformersModel, self).__init__()
self.src_pad_idx = src_pad_idx
self.tgt_pad_idx = tgt_pad_idx
self.device = device
self.Encoder = Encoder(enc_vocab_size, enc_max_len,
model_dim, key_dim, value_dim, hidden_dim, num_head, num_layer, drop_prob, device)
self.Decoder = Decoder(dec_vocab_size, dec_max_len,
model_dim, key_dim, value_dim, hidden_dim, num_head, num_layer, drop_prob, device)
self.LMHead = LangaugeModelingHead(model_dim, dec_vocab_size)
def forward(self, src_tensor, tgt_tensor):
enc_mask = self.generate_padding_mask(src_tensor, src_tensor, "src","src")
enc_dec_mask = self.generate_padding_mask(tgt_tensor, src_tensor, "src","tgt")
dec_mask = self.generate_padding_mask(tgt_tensor, tgt_tensor,"tgt","tgt") * \
self.generate_triangular_mask(tgt_tensor, tgt_tensor)
encoder_output = self.Encoder(src_tensor, enc_mask)
decoder_output = self.Decoder(tgt_tensor, encoder_output, enc_dec_mask, dec_mask)
output = self.LMHead(decoder_output)
return output
# applying mask(opt) : 0s are where we apply masking
# pad_type =["src". "tgt"]
def generate_padding_mask(self, query, key, query_pad_type=None, key_pad_type=None):
# query = (batch_size, query_length)
# key = (batch_size, key_length)
query_length = query.size(1)
key_length = key.size(1)
# decide query_pad_idx based on query_pad_type
if query_pad_type == "src":
query_pad_idx = self.src_pad_idx
elif query_pad_type == "tgt":
query_pad_idx = self.tgt_pad_idx
else:
assert "query_pad_type should be either src or tgt"
# decide key_pad_idx based on key_pad_type
if key_pad_type == "src":
key_pad_idx = self.src_pad_idx
elif key_pad_type == "tgt":
key_pad_idx = self.tgt_pad_idx
else:
assert "key_pad_type should be either src or tgt"
# convert query and key into 4-dimensional tensor
# query = (batch_size, 1, query_length, 1) -> (batch_size, 1, query_length, key_length)
query = query.ne(query_pad_idx).unsqueeze(1).unsqueeze(3)
query = query.repeat(1,1,1,key_length)
# key = (batch_size, 1, 1, key_length) -> (batch_size, 1, query_length, key_length)
key = key.ne(key_pad_idx).unsqueeze(1).unsqueeze(2)
key = key.repeat(1,1,query_length,1)
# create padding mask with key and query
mask = key & query
return mask
# applying mask(opt) : 0s are where we apply masking
def generate_triangular_mask(self, query, key):
# query = (batch_size, query_length)
# key = (batch_size, key_length)
query_length = query.size(1)
key_length = key.size(1)
# create triangular mask
mask = torch.tril(torch.ones(query_length,key_length)).type(torch.BoolTensor).to(self.device)
return mask
def build_model(src_pad_idx, tgt_pad_idx,
enc_vocab_size, dec_vocab_size,
model_dim, key_dim, value_dim, hidden_dim,
num_head, num_layer, enc_max_len, dec_max_len, drop_prob):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = TransformersModel(src_pad_idx, tgt_pad_idx,
enc_vocab_size, dec_vocab_size,
model_dim, key_dim, value_dim, hidden_dim,
num_head, num_layer, enc_max_len, dec_max_len, drop_prob,device)
return model.cuda() if torch.cuda.is_available() else model
| 43.993333
| 105
| 0.618882
| 837
| 6,599
| 4.508961
| 0.136201
| 0.034976
| 0.026232
| 0.033386
| 0.548225
| 0.444886
| 0.399576
| 0.371489
| 0.302067
| 0.259141
| 0
| 0.0052
| 0.300652
| 6,599
| 150
| 106
| 43.993333
| 0.812568
| 0.094257
| 0
| 0.257426
| 0
| 0
| 0.020812
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 1
| 0.108911
| false
| 0
| 0.059406
| 0.009901
| 0.277228
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9eb2558837d9b3df4ed680f126f72ec0f1a601b9
| 23,299
|
py
|
Python
|
market/models/sim_trades.py
|
LuoMaimingS/django_virtual_stock_market
|
cfeccdbb906f9998ec0a0633c2d2f39cdd87bf85
|
[
"BSD-3-Clause"
] | 1
|
2021-05-29T23:33:41.000Z
|
2021-05-29T23:33:41.000Z
|
market/models/sim_trades.py
|
LuoMaimingS/django_virtual_stock_market
|
cfeccdbb906f9998ec0a0633c2d2f39cdd87bf85
|
[
"BSD-3-Clause"
] | null | null | null |
market/models/sim_trades.py
|
LuoMaimingS/django_virtual_stock_market
|
cfeccdbb906f9998ec0a0633c2d2f39cdd87bf85
|
[
"BSD-3-Clause"
] | null | null | null |
# _*_ coding:UTF-8 _*_
"""
该文件定义了模拟环境中交易动作相关的模型
并非虚拟股市原本的模型,做了一些适应性的调整,取消了全部外键。
"""
from django.db import models
import uuid
from decimal import Decimal
import time
from .clients import BaseClient
from .sim_market import SimMarket
from .sim_clients import SimHoldingElem, SimCommissionElem, SimTransactionElem
from .sim_stocks import SimStock, SimOrderBookEntry, SimOrderBookElem
from .config import *
class SimTradeMsg(models.Model):
stock_symbol = models.CharField(max_length=12)
initiator = models.IntegerField(verbose_name='交易的发起方ID')
trade_direction = models.CharField(max_length=1)
trade_price = models.DecimalField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES)
trade_vol = models.IntegerField()
trade_date = models.DateTimeField(blank=False)
trade_tick = models.IntegerField(blank=False)
# 被交易的挂单的ID
commission_id = models.UUIDField(blank=False, default=uuid.uuid4, verbose_name='原本挂单的uuid')
acceptor = models.IntegerField(verbose_name='交易的接受方ID')
tax_charged = models.FloatField(default=0)
def sim_instant_trade(msg):
"""
client的委托立刻得到了交易,从而不会出现在委托记录中
:param msg: 交易的相关信息,是一个TradeMsg类
"""
initiator = msg.initiator
stock_symbol = msg.stock_symbol
initiator_object = BaseClient.objects.get(id=initiator)
stock_object = SimStock.objects.get(symbol=stock_symbol)
SimTransactionElem.objects.create(one_side=initiator, the_other_side=msg.acceptor,
stock_symbol=stock_symbol, price_traded=msg.trade_price, vol_traded=msg.trade_vol,
date_traded=msg.trade_date, operation=msg.trade_direction)
if msg.trade_direction == 'a':
# 卖出
hold_element = SimHoldingElem.objects.get(owner=initiator, stock_symbol=stock_symbol)
available_shares = hold_element.available_vol
assert available_shares >= msg.trade_vol
hold_element.available_vol -= msg.trade_vol
hold_element.vol -= msg.trade_vol
if hold_element.vol == 0:
# 目前为止已全部卖出,不再持有,删除该条数据
hold_element.delete()
else:
hold_element.save(update_fields=['vol', 'available_vol'])
earning = float(msg.trade_price * msg.trade_vol - msg.tax_charged)
initiator_object.cash += earning
initiator_object.flexible_cash += earning
elif msg.trade_direction == 'b':
# 买入
holding = SimHoldingElem.objects.filter(owner=initiator, stock_symbol=stock_symbol)
if holding.exists():
# 之前本就持有该股票
assert holding.count() == 1
new_holding = holding[0]
new_holding.cost = Decimal((new_holding.cost * new_holding.vol + msg.trade_price * msg.trade_vol) /
(new_holding.vol + msg.trade_vol))
new_holding.price_guaranteed = new_holding.cost
new_holding.last_price = stock_object.last_price
new_holding.vol += msg.trade_vol
new_holding.available_vol += msg.trade_vol
new_holding.profit -= msg.tax_charged
new_holding.value = float(stock_object.last_price) * new_holding.vol
new_holding.save()
else:
# 即买入新的股票
SimHoldingElem.objects.create(owner=initiator, stock_symbol=stock_symbol,
vol=msg.trade_vol, frozen_vol=0, available_vol=msg.trade_vol,
cost=msg.trade_price, price_guaranteed=msg.trade_price,
last_price=stock_object.last_price, profit=- msg.tax_charged,
value=stock_object.last_price * msg.trade_vol, date_bought=msg.trade_date)
spending = float(msg.trade_price * msg.trade_vol + msg.tax_charged)
initiator_object.cash -= spending
initiator_object.flexible_cash -= spending
initiator_object.save(update_fields=['cash', 'flexible_cash'])
return True
def sim_delayed_trade(msg):
"""
client的委托记录中的委托得到了交易,从而改变委托情况
:param msg: 交易的相关信息,是一个TradeMsg类
"""
assert isinstance(msg, SimTradeMsg)
acceptor = msg.acceptor
stock_symbol = msg.stock_symbol
if msg.trade_direction == 'a':
acceptor_direction = 'b'
else:
acceptor_direction = 'a'
acceptor_object = BaseClient.objects.get(id=acceptor)
stock_object = SimStock.objects.get(symbol=stock_symbol)
# 先处理委托
commission_element = SimCommissionElem.objects.get(unique_id=msg.commission_id)
assert commission_element.stock_symbol == stock_symbol
assert commission_element.operation == acceptor_direction
assert commission_element.vol_traded + msg.trade_vol <= commission_element.vol_committed
new_avg_price = (commission_element.price_traded * commission_element.vol_traded +
msg.trade_price * msg.trade_vol) / (commission_element.vol_traded + msg.trade_vol)
commission_element.price_traded = new_avg_price
commission_element.vol_traded += msg.trade_vol
# 委托完成时的操作,目前直接删除,没有委托历史记录,只有历史成交记录
if commission_element.vol_traded == commission_element.vol_committed:
commission_element.delete()
else:
commission_element.save(update_fields=['price_traded', 'vol_traded'])
if acceptor_direction == 'a':
# 卖出,处理持仓
hold_element = SimHoldingElem.objects.get(owner=acceptor, stock_symbol=stock_symbol)
frozen_shares = hold_element.frozen_vol
assert frozen_shares >= msg.trade_vol
hold_element.frozen_vol -= msg.trade_vol
hold_element.vol -= msg.trade_vol
if hold_element.vol == 0:
# 该持有的股票目前为止已全部卖出,不再持有,删除该条数据
hold_element.delete()
else:
hold_element.save(update_fields=['vol', 'frozen_vol'])
# 结算收益,成交金额减去收益
earning = float(msg.trade_price * msg.trade_vol - msg.tax_charged)
acceptor_object.cash += earning
acceptor_object.flexible_cash += earning
elif acceptor_direction == 'b':
# 买入,建仓
holding = SimHoldingElem.objects.filter(owner=acceptor, stock_symbol=stock_symbol)
if holding.exists():
# 之前本就持有该股票
assert holding.count() == 1
new_holding = holding[0]
new_holding.cost = Decimal((new_holding.cost * new_holding.vol + msg.trade_price * msg.trade_vol) /
(new_holding.vol + msg.trade_vol))
new_holding.price_guaranteed = new_holding.cost
new_holding.last_price = stock_object.last_price
new_holding.vol += msg.trade_vol
new_holding.available_vol += msg.trade_vol
new_holding.profit -= msg.tax_charged
new_holding.value = float(stock_object.last_price) * new_holding.vol
new_holding.save()
else:
# 即买入新的股票
SimHoldingElem.objects.create(owner=acceptor, stock_symbol=stock_symbol,
vol=msg.trade_vol, frozen_vol=0, available_vol=msg.trade_vol,
cost=msg.trade_price, price_guaranteed=msg.trade_price,
last_price=stock_object.last_price, profit=- msg.tax_charged,
value=stock_object.last_price * msg.trade_vol, date_bought=msg.trade_date)
# 结算交易成本,扣除冻结资金和资金余额
spending = float(msg.trade_price * msg.trade_vol + msg.tax_charged)
acceptor_object.cash -= spending
acceptor_object.frozen_cash -= spending
acceptor_object.save(update_fields=['cash', 'frozen_cash', 'flexible_cash'])
return True
class SimCommissionMsg(models.Model):
stock_symbol = models.CharField(max_length=12)
commit_client = models.IntegerField(verbose_name='委托的client的ID')
commit_direction = models.CharField(max_length=1, default='b')
commit_price = models.DecimalField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES, default=0)
commit_vol = models.IntegerField(default=0)
commit_date = models.DateTimeField(blank=True, default=None)
# used for cancel a commission
commission_to_cancel = models.UUIDField(verbose_name='委托取消的目标委托本身的uuid', default=uuid.uuid4)
# Confirm the commission
confirmed = models.BooleanField(default=False)
def is_valid(self):
"""
判断委托信息是否合法
:return: 合法则返回True
"""
if not SimStock.objects.filter(symbol=self.stock_symbol).exists():
# 委托的股票标的不存在
print('The STOCK COMMITTED DOES NOT EXIST!')
return False
else:
stock_corr = SimStock.objects.get(symbol=self.stock_symbol)
if stock_corr.limit_up != 0 and stock_corr.limit_down != 0:
if self.commit_price > stock_corr.limit_up or self.commit_price < stock_corr.limit_down:
# 委托价格,需要在涨跌停价之间
print('COMMIT PRICE MUST BE BETWEEN THE LIMIT UP AND THE LIMIT DOWN!')
return False
if self.commit_direction not in ['a', 'b', 'c']:
# 委托方向,需要是买/卖/撤,三者其一
print('COMMIT DIRECTION INVALID!')
return False
commit_client_object = BaseClient.objects.get(id=self.commit_client)
if self.commit_direction == 'a':
# 委卖,则委托的股票必须有合理的持仓和充足的可用余额
if not SimHoldingElem.objects.filter(owner=self.commit_client, stock_symbol=self.stock_symbol).exists():
print('DOES NOT HOLD THE STOCK!')
return False
holding_element = SimHoldingElem.objects.get(owner=self.commit_client, stock_symbol=self.stock_symbol)
if holding_element.available_vol < self.commit_vol:
print('DOES NOT HOLD ENOUGH STOCK SHARES!')
return False
elif self.commit_direction == 'b':
# 委买,则必须有充足的可用余额,能够负担税费的冻结资金
if commit_client_object.flexible_cash < self.commit_price * self.commit_vol * Decimal(1 + TAX_RATE):
print('CAN NOT AFFORD THE FROZEN CASH!')
return False
elif self.commit_direction == 'c':
# 委托撤单,则必须有合理的委托,撤单即撤销该委托
if self.commission_to_cancel is None:
print('COMMISSION CANCELED IS NONE!')
return False
return True
def sim_add_commission(msg):
"""
client成功提交了一个委托,且部分或全部没有被交易,将更新client的委托信息和相应股票的order book
:param msg:委托的相关信息,是一个CommissionMsg类
"""
assert isinstance(msg, SimCommissionMsg)
assert msg.confirmed is True
principle = msg.commit_client
stock_symbol = msg.stock_symbol
market = SimMarket.objects.get(id=1)
order_book_entry, created = SimOrderBookEntry.objects.get_or_create(stock_symbol=stock_symbol,
entry_price=msg.commit_price,
entry_direction=msg.commit_direction)
order_book_entry.total_vol += msg.commit_vol
order_book_entry.save(update_fields=['total_vol'])
new_order_book_element = SimOrderBookElem.objects.create(entry_belonged=order_book_entry.id,
client=principle,
direction_committed=msg.commit_direction,
price_committed=msg.commit_price,
vol_committed=msg.commit_vol,
date_committed=market.datetime)
SimCommissionElem.objects.create(owner=principle, stock_symbol=stock_symbol, operation=msg.commit_direction,
price_committed=msg.commit_price, vol_committed=msg.commit_vol,
date_committed=market.datetime, unique_id=new_order_book_element.unique_id)
if msg.commit_direction == 'a':
# 卖出委托
holding = SimHoldingElem.objects.get(owner=principle, stock_symbol=stock_symbol)
assert msg.commit_vol <= holding.available_vol
holding.frozen_vol += msg.commit_vol
holding.available_vol -= msg.commit_vol
holding.save(update_fields=['frozen_vol', 'available_vol'])
elif msg.commit_direction == 'b':
# 买入委托
principle_object = BaseClient.objects.get(id=principle)
freeze = float(msg.commit_price * msg.commit_vol)
assert freeze <= principle_object.flexible_cash
principle_object.frozen_cash += freeze
principle_object.flexible_cash -= freeze
principle_object.save(update_fields=['frozen_cash', 'flexible_cash'])
return True
def sim_order_book_matching(commission):
"""
将client给出的委托信息与order book中所有order进行撮合交易
"""
assert isinstance(commission, SimCommissionMsg)
assert commission.confirmed is False
stock_symbol = commission.stock_symbol
stock_object = SimStock.objects.get(symbol=stock_symbol)
direction = commission.commit_direction
remaining_vol = commission.commit_vol
market = SimMarket.objects.get(id=1)
if direction == 'a':
# 卖出委托
matching_direction = 'b'
while not stock_object.is_order_book_empty(matching_direction):
best_element = stock_object.get_best_element(matching_direction)
if best_element.price_committed < commission.commit_price:
# 价格不符合要求,结束撮合
break
if remaining_vol == 0:
# 交易量达成要求,结束撮合
break
if remaining_vol >= best_element.vol_committed:
# 交易发生,order book中的此条挂单被完全交易
trade_message = SimTradeMsg(stock_symbol=stock_symbol, initiator=commission.commit_client,
trade_direction=direction, trade_price=best_element.price_committed,
trade_vol=best_element.vol_committed, acceptor=best_element.client,
commission_id=best_element.unique_id, tax_charged=0,
trade_date=market.datetime, trade_tick=market.tick)
# 这应当是并行的
sim_instant_trade(trade_message)
sim_delayed_trade(trade_message)
# 记录交易,并删除order book中的挂单
stock_object.trading_behaviour(direction, best_element.price_committed, best_element.vol_committed,
trade_message.trade_date, trade_message.trade_tick)
remaining_vol -= best_element.vol_committed
best_entry = SimOrderBookEntry.objects.get(id=best_element.entry_belonged)
best_entry.total_vol -= best_element.vol_committed
if best_entry.total_vol == 0:
best_entry.delete()
else:
best_entry.save(update_fields=['total_vol'])
best_element.delete()
else:
# 交易发生,order book中的此条挂单被部分交易
trade_message = SimTradeMsg(stock_symbol=stock_symbol, initiator=commission.commit_client,
trade_direction=direction, trade_price=best_element.price_committed,
trade_vol=remaining_vol, acceptor=best_element.client,
commission_id=best_element.unique_id, tax_charged=0,
trade_date=market.datetime, trade_tick=market.tick)
# 这应当是并行的
sim_instant_trade(trade_message)
sim_delayed_trade(trade_message)
# 记录交易,并调整order book中的挂单
stock_object.trading_behaviour(direction, best_element.price_committed, remaining_vol,
trade_message.trade_date, trade_message.trade_tick)
best_element.vol_committed -= remaining_vol
best_entry = SimOrderBookEntry.objects.get(id=best_element.entry_belonged)
best_entry.total_vol -= remaining_vol
remaining_vol = 0
best_element.save(update_fields=['vol_committed'])
best_entry.save(update_fields=['total_vol'])
elif direction == 'b':
# 买入委托
matching_direction = 'a'
while not stock_object.is_order_book_empty(matching_direction):
best_element = stock_object.get_best_element(matching_direction)
if best_element.price_committed > commission.commit_price:
# 价格不符合要求,结束撮合
break
if remaining_vol == 0:
# 交易量达成要求,结束撮合
break
if remaining_vol >= best_element.vol_committed:
# 交易发生,order book中的此条挂单被完全交易
trade_message = SimTradeMsg(stock_symbol=stock_symbol, initiator=commission.commit_client,
trade_direction=direction, trade_price=best_element.price_committed,
trade_vol=best_element.vol_committed, acceptor=best_element.client,
commission_id=best_element.unique_id, tax_charged=0,
trade_date=market.datetime, trade_tick=market.tick)
# 这应当是并行的
sim_instant_trade(trade_message)
sim_delayed_trade(trade_message)
# 记录交易,并删除order book中的挂单
stock_object.trading_behaviour(direction, best_element.price_committed, best_element.vol_committed,
trade_message.trade_date, trade_message.trade_tick)
remaining_vol -= best_element.vol_committed
best_entry = SimOrderBookEntry.objects.get(id=best_element.entry_belonged)
best_entry.total_vol -= best_element.vol_committed
if best_entry.total_vol == 0:
best_entry.delete()
else:
best_entry.save(update_fields=['total_vol'])
best_element.delete()
else:
# 交易发生,order book中的此条挂单被部分交易
trade_message = SimTradeMsg(stock_symbol=stock_object.symbol, initiator=commission.commit_client,
trade_direction=direction, trade_price=best_element.price_committed,
trade_vol=remaining_vol, acceptor=best_element.client,
commission_id=best_element.unique_id, tax_charged=0,
trade_date=market.datetime, trade_tick=market.tick)
# 这应当是并行的
sim_instant_trade(trade_message)
sim_delayed_trade(trade_message)
# 记录交易,并调整order book中的挂单
stock_object.trading_behaviour(direction, best_element.price_committed, remaining_vol,
trade_message.trade_date, trade_message.trade_tick)
best_element.vol_committed -= remaining_vol
best_entry = SimOrderBookEntry.objects.get(id=best_element.entry_belonged)
best_entry.total_vol -= remaining_vol
remaining_vol = 0
best_element.save(update_fields=['vol_committed'])
best_entry.save(update_fields=['total_vol'])
elif direction == 'c':
# 撤单
assert commission.commission_to_cancel is not None
order_book_element_corr = SimOrderBookElem.objects.get(unique_id=commission.commission_to_cancel)
try:
assert commission.commit_client == order_book_element_corr.client
assert commission.commit_vol <= order_book_element_corr.vol_committed # 委托撤单的数量
order_book_entry = SimOrderBookEntry.objects.get(id=order_book_element_corr.entry_belonged)
order_book_entry.total_vol -= commission.commit_vol
order_book_element_corr.vol_committed -= commission.commit_vol
if order_book_element_corr.vol_committed == 0:
order_book_element_corr.delete()
else:
order_book_element_corr.save(update_fields=['vol_committed'])
if order_book_entry.total_vol == 0:
order_book_entry.delete()
else:
order_book_entry.save(update_fields=['total_vol'])
# 确认撤单成功,删除委托信息,解除冻结
origin_commission = SimCommissionElem.objects.get(unique_id=commission.commission_to_cancel)
if origin_commission.operation == 'a':
holding = SimHoldingElem.objects.get(owner=commission.commit_client, stock_symbol=stock_symbol)
holding.frozen_vol -= commission.commit_vol
holding.available_vol += commission.commit_vol
holding.save(update_fields=['frozen_vol', 'available_vol'])
else:
assert origin_commission.operation == 'b'
freeze = float(commission.commit_price * commission.commit_vol)
client_object = BaseClient.objects.get(id=commission.commit_client)
client_object.frozen_cash -= freeze
client_object.flexible_cash += freeze
client_object.save(update_fields=['frozen_cash', 'flexible_cash'])
origin_commission.vol_committed -= commission.commit_vol
if origin_commission.vol_traded == origin_commission.vol_committed:
origin_commission.delete()
else:
origin_commission.save(update_fields=['vol_committed'])
except AssertionError:
print("撤单失败!")
commission.confirmed = True
commission.save()
return True
else:
raise ValueError
if remaining_vol > 0:
# 市场上所有的挂单都不够买/卖,或不符合交易条件
commission.commit_vol = remaining_vol
commission.confirmed = True
ok = sim_add_commission(commission)
assert ok
else:
commission.confirmed = True
return True
def sim_commission_handler(new_commission, handle_info=False):
"""
委托的处理函数,如果接受的委托message合法,则根据处理情况,在数据库中建立委托项/加入order book/建立成交记录
:param new_commission:新收到的委托信息
:param handle_info:是否打印委托信息
"""
time0 = time.time()
assert isinstance(new_commission, SimCommissionMsg)
if not new_commission.is_valid():
return False
sim_order_book_matching(new_commission)
assert new_commission.confirmed
time1 = time.time()
if handle_info:
print('Commission Handled: symbol-{} {} price-{} vol-{}, Cost {} s.'.format(new_commission.stock_symbol,
new_commission.commit_direction,
new_commission.commit_price,
new_commission.commit_vol,
time1 - time0))
return True
| 45.864173
| 120
| 0.621057
| 2,436
| 23,299
| 5.631363
| 0.111248
| 0.040895
| 0.023254
| 0.024056
| 0.613719
| 0.53849
| 0.475507
| 0.46931
| 0.440589
| 0.41143
| 0
| 0.002588
| 0.303532
| 23,299
| 507
| 121
| 45.954635
| 0.842793
| 0.051934
| 0
| 0.464986
| 0
| 0
| 0.030816
| 0
| 0
| 0
| 0
| 0
| 0.061625
| 1
| 0.016807
| false
| 0
| 0.02521
| 0
| 0.140056
| 0.02521
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9eb729a54e82db494828c16cbebb47a4ee3adfae
| 1,798
|
py
|
Python
|
symplyphysics/laws/nuclear/fast_non_leakage_probability_from_fermi_age.py
|
blackyblack/symplyphysics
|
4a22ceb7ffbdd8a0b2623a09bfb1a8febf541e4f
|
[
"MIT"
] | null | null | null |
symplyphysics/laws/nuclear/fast_non_leakage_probability_from_fermi_age.py
|
blackyblack/symplyphysics
|
4a22ceb7ffbdd8a0b2623a09bfb1a8febf541e4f
|
[
"MIT"
] | null | null | null |
symplyphysics/laws/nuclear/fast_non_leakage_probability_from_fermi_age.py
|
blackyblack/symplyphysics
|
4a22ceb7ffbdd8a0b2623a09bfb1a8febf541e4f
|
[
"MIT"
] | null | null | null |
from sympy.functions import exp
from symplyphysics import (
symbols, Eq, pretty, solve, Quantity, units, S,
Probability, validate_input, expr_to_quantity, convert_to
)
# Description
## Ptnl (fast non-leakage factor) is the ratio of the number of fast neutrons that do not leak from the reactor
## core during the slowing down process to the number of fast neutrons produced by fissions at all energies.
## Law: Pfnl ≈ e^(-Bg^2 * τth)
## Where:
## e - exponent.
## Bg^2 - geometric buckling.
## See [geometric buckling](./buckling/geometric_buckling_from_neutron_flux.py) implementation.
## τth - neutron Fermi age.
## The Fermi age is related to the distance traveled during moderation, just as the diffusion length is for
## thermal neutrons. The Fermi age is the same quantity as the slowing-down length squared (Ls^2).
## Pfnl - fast non-leakage probability.
geometric_buckling = symbols('geometric_buckling')
neutron_fermi_age = symbols('neutron_fermi_age')
fast_non_leakage_probability = symbols('thermal_non_leakage_probability')
law = Eq(fast_non_leakage_probability, exp(-1 * geometric_buckling * neutron_fermi_age))
def print():
return pretty(law, use_unicode=False)
@validate_input(geometric_buckling_=(1 / units.length**2), neutron_fermi_age_=units.length**2)
def calculate_probability(geometric_buckling_: Quantity, neutron_fermi_age_: Quantity) -> Probability:
result_probability_expr = solve(law, fast_non_leakage_probability, dict=True)[0][fast_non_leakage_probability]
result_expr = result_probability_expr.subs({
geometric_buckling: geometric_buckling_,
neutron_fermi_age: neutron_fermi_age_})
result_factor = expr_to_quantity(result_expr, 'fast_non_leakage_factor')
return Probability(convert_to(result_factor, S.One).n())
| 47.315789
| 114
| 0.775306
| 251
| 1,798
| 5.290837
| 0.370518
| 0.128012
| 0.090361
| 0.094127
| 0.106928
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005138
| 0.134038
| 1,798
| 37
| 115
| 48.594595
| 0.847142
| 0.369855
| 0
| 0
| 0
| 0
| 0.080252
| 0.048693
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0.052632
| 0.315789
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9eb9fac0aba71cd2544a8102ad03715608f8d6b1
| 428
|
py
|
Python
|
sortingAlgorithm/pigeonHoleSort.py
|
slowy07/pythonApps
|
22f9766291dbccd8185035745950c5ee4ebd6a3e
|
[
"MIT"
] | 10
|
2020-10-09T11:05:18.000Z
|
2022-02-13T03:22:10.000Z
|
sortingAlgorithm/pigeonHoleSort.py
|
khairanabila/pythonApps
|
f90b8823f939b98f7bf1dea7ed35fe6e22e2f730
|
[
"MIT"
] | null | null | null |
sortingAlgorithm/pigeonHoleSort.py
|
khairanabila/pythonApps
|
f90b8823f939b98f7bf1dea7ed35fe6e22e2f730
|
[
"MIT"
] | 6
|
2020-11-26T12:49:43.000Z
|
2022-03-06T06:46:43.000Z
|
def pigeonHoleSort(nums):
minNumbers = min(nums)
maxNumbers = max(nums)
size = maxNumbers - minNumbers + 1
holes = [0] * size
for x in nums:
holes[x - minNumbers] += 1
i = 0
for count in range(size):
while holes[count] > 0:
holes[count] -= 1
nums[i] = count + minNumbers
i += 1
nums = [12,26,77,22,88,1]
print(pigeonHoleSort(nums))
print(nums)
| 20.380952
| 40
| 0.546729
| 56
| 428
| 4.178571
| 0.428571
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 0.327103
| 428
| 20
| 41
| 21.4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.0625
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9eba0cf24a0b17206785479fbe34def5787d6765
| 345
|
py
|
Python
|
service/utils/parsers.py
|
psorianom/csv_detective_api
|
7c96f497374d842226a95a26cb6627ac22cd799b
|
[
"MIT"
] | 2
|
2020-02-04T05:24:56.000Z
|
2021-05-05T17:22:55.000Z
|
service/utils/parsers.py
|
psorianom/csv_detective_api
|
7c96f497374d842226a95a26cb6627ac22cd799b
|
[
"MIT"
] | 10
|
2019-10-24T13:29:59.000Z
|
2022-02-26T17:06:15.000Z
|
service/utils/parsers.py
|
psorianom/csv_detective_api
|
7c96f497374d842226a95a26cb6627ac22cd799b
|
[
"MIT"
] | 2
|
2019-12-30T23:26:53.000Z
|
2020-03-27T17:23:28.000Z
|
# parsers.py
from werkzeug.datastructures import FileStorage
from flask_restplus import reqparse
file_upload = reqparse.RequestParser()
file_upload.add_argument('resource_csv',
type=FileStorage,
location='files',
required=True,
help='CSV file')
| 34.5
| 47
| 0.585507
| 31
| 345
| 6.354839
| 0.741935
| 0.101523
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.344928
| 345
| 10
| 48
| 34.5
| 0.871681
| 0.028986
| 0
| 0
| 0
| 0
| 0.07485
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ebb0d752144dce45cafae8a4dd28aecf263593a
| 4,410
|
py
|
Python
|
sdk/python/kfp/components/yaml_component_test.py
|
johnmacnamararseg/pipelines
|
340318625c527836af1c9abc0fd0d76c0a466333
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/kfp/components/yaml_component_test.py
|
johnmacnamararseg/pipelines
|
340318625c527836af1c9abc0fd0d76c0a466333
|
[
"Apache-2.0"
] | 1
|
2020-02-06T12:53:44.000Z
|
2020-02-06T12:53:44.000Z
|
sdk/python/kfp/components/yaml_component_test.py
|
johnmacnamararseg/pipelines
|
340318625c527836af1c9abc0fd0d76c0a466333
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kfp.components.yaml_component."""
import os
import tempfile
import textwrap
import unittest
from unittest import mock
import requests
from kfp.components import structures
from kfp.components import yaml_component
SAMPLE_YAML = textwrap.dedent("""\
components:
comp-component-1:
executorLabel: exec-component-1
inputDefinitions:
parameters:
input1:
parameterType: STRING
outputDefinitions:
parameters:
output1:
parameterType: STRING
deploymentSpec:
executors:
exec-component-1:
container:
command:
- sh
- -c
- 'set -ex
echo "$0" > "$1"'
- '{{$.inputs.parameters[''input1'']}}'
- '{{$.outputs.parameters[''output1''].output_file}}'
image: alpine
pipelineInfo:
name: component-1
root:
dag:
tasks:
component-1:
cachingOptions:
enableCache: true
componentRef:
name: comp-component-1
inputs:
parameters:
input1:
componentInputParameter: input1
taskInfo:
name: component-1
inputDefinitions:
parameters:
input1:
parameterType: STRING
schemaVersion: 2.1.0
sdkVersion: kfp-2.0.0-alpha.3
""")
class YamlComponentTest(unittest.TestCase):
def test_load_component_from_text(self):
component = yaml_component.load_component_from_text(SAMPLE_YAML)
self.assertEqual(component.component_spec.name, 'component-1')
self.assertEqual(component.component_spec.outputs,
{'output1': structures.OutputSpec(type='String')})
self.assertEqual(component._component_inputs, {'input1'})
self.assertEqual(component.name, 'component-1')
self.assertEqual(
component.component_spec.implementation.container.image, 'alpine')
def test_load_component_from_file(self):
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'sample_yaml.yaml')
with open(path, 'w') as f:
f.write(SAMPLE_YAML)
component = yaml_component.load_component_from_file(path)
self.assertEqual(component.component_spec.name, 'component-1')
self.assertEqual(component.component_spec.outputs,
{'output1': structures.OutputSpec(type='String')})
self.assertEqual(component._component_inputs, {'input1'})
self.assertEqual(component.name, 'component-1')
self.assertEqual(
component.component_spec.implementation.container.image, 'alpine')
def test_load_component_from_url(self):
component_url = 'https://raw.githubusercontent.com/some/repo/components/component_group/component.yaml'
def mock_response_factory(url, params=None, **kwargs):
if url == component_url:
response = requests.Response()
response.url = component_url
response.status_code = 200
response._content = SAMPLE_YAML
return response
raise RuntimeError('Unexpected URL "{}"'.format(url))
with mock.patch('requests.get', mock_response_factory):
component = yaml_component.load_component_from_url(component_url)
self.assertEqual(component.component_spec.name, 'component-1')
self.assertEqual(component.component_spec.outputs,
{'output1': structures.OutputSpec(type='String')})
self.assertEqual(component._component_inputs, {'input1'})
self.assertEqual(component.name, 'component-1')
self.assertEqual(
component.component_spec.implementation.container.image,
'alpine')
if __name__ == '__main__':
unittest.main()
| 34.453125
| 111
| 0.654649
| 465
| 4,410
| 6.066667
| 0.356989
| 0.079759
| 0.127614
| 0.140376
| 0.414392
| 0.405884
| 0.36441
| 0.321163
| 0.321163
| 0.321163
| 0
| 0.013864
| 0.247619
| 4,410
| 127
| 112
| 34.724409
| 0.836347
| 0.136054
| 0
| 0.34
| 0
| 0
| 0.341245
| 0.029536
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.14
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ebbdb1a7760f5a35b7e2094efb06933644ff5ca
| 10,548
|
py
|
Python
|
cpmm.py
|
wallfair-organization/amm-sim
|
0ae8584563dd44162a8e2407382d250b02474704
|
[
"MIT"
] | null | null | null |
cpmm.py
|
wallfair-organization/amm-sim
|
0ae8584563dd44162a8e2407382d250b02474704
|
[
"MIT"
] | null | null | null |
cpmm.py
|
wallfair-organization/amm-sim
|
0ae8584563dd44162a8e2407382d250b02474704
|
[
"MIT"
] | null | null | null |
#
# Constant Price Market Making Simulator
#
# simulate different liquidity provision and trading strategies
#
from typing import Tuple
import csv
import numpy as np
import pandas as pd
from numpy.random import binomial, default_rng
# TODO: switch to decimal type and control quantization. numeric errors will kill us quickly
class CPMM(object):
def __init__(self, fee_fraction = 0, fee_to_liquidity_fraction = 0) -> None:
# assert(fee_fraction >= fee_to_liquidity_fraction)
# amount of initial liquidity provided
self.initial_liquidity = 0
# total amount of liquidity
self.liquidity = 0
# total amount of collateral token
self.lp_token = 0
# yes tokens in the pool
self.lp_yes = 0
# no tokens in the pool
self.lp_no = 0
# outstanding tokens held by LP
self.outstanding_yes = 0
self.outstanding_no = 0
self.fee_pool = 0
self.history = []
self.fee_fraction = fee_fraction
self.fee_to_liquidity_fraction = fee_to_liquidity_fraction # how much from the fee is reinvested to liqudity provision
def create_event(self, intial_liquidity, initial_yes_to_no = 1) -> Tuple[int, float]:
assert(initial_yes_to_no > 0)
self.initial_liquidity = intial_liquidity
rv = self._add_liquidity(intial_liquidity, initial_yes_to_no)
n_p = self.lp_yes / self.lp_no
# print(f"invariant P {initial_yes_to_no} {n_p}")
assert(abs(initial_yes_to_no - n_p) < 0.000001)
return rv
def add_liquidity(self, amount) -> Tuple[int, float]:
assert(self.lp_token > 0)
# yes to no must be invariant when liquidity is added
p = self.lp_yes / self.lp_no
rv = self._add_liquidity(amount, p)
n_p = self.lp_yes / self.lp_no
# assert invariant, we use float and disregard rounding so must be within e ~ 0
# print(f"invariant P {p} {n_p}")
assert(abs(p - n_p) < 0.000001)
return rv
def _add_liquidity(self, amount, yes_to_no) -> Tuple[int, float]:
# print("adding liquidity:", amount)
self.liquidity += amount
self.lp_token += amount
# get token type from the ratio
type = 1 if yes_to_no >= 1 else 0
if type:
# more into YES bucket, NO is returned
old_lp_no = self.lp_no
self.lp_no = (amount + self.lp_yes) / yes_to_no
self.lp_yes += amount
tokens_return = amount + old_lp_no - self.lp_no
self.outstanding_no += tokens_return
else:
# more into NO bucket, YES is returned
old_lp_yes = self.lp_yes
self.lp_yes = (amount + self.lp_no) * yes_to_no
self.lp_no += amount
tokens_return = amount + old_lp_yes - self.lp_yes
self.outstanding_yes += tokens_return
entry = ["add", "liquidity", amount, 0, yes_to_no, 0, tokens_return, self.lp_yes, self.lp_no, self.lp_token, self.liquidity, self.fee_pool, 0 ,0]
self._add_history(entry)
# should return amount of outcome token
return (type, amount)
# def remove_liquidity(amount):
def buy_token(self, type, original_amount) -> Tuple[int, float]: #yes=1 | no = 0
# take fee before any operation and store in fee_pool
fee = original_amount * self.fee_fraction
amount = original_amount - fee
self.fee_pool += fee
# adding fee_to_liquidity fraction to liquidity fee pool
# note: liquidity is provided before buy such that added liquidity is available for current transaction
if (self.fee_to_liquidity_fraction > 0):
reinvest_fee = fee * self.fee_to_liquidity_fraction
self.add_liquidity(reinvest_fee)
# keep invariant
k = (self.lp_yes * self.lp_no)
# add liquidity
self.lp_token += amount
if type:
tokens_return, x = self.calc_buy(type, amount)
buy_price_yes = amount / tokens_return
# calc slippage
slippage_yes = self.calc_slippage(type, amount)
assert (slippage_yes > 0), f"slippage_yes {slippage_yes} <= 0"
# remove returned token form the pool, keep all no tokens
self.lp_yes += x
self.lp_no += amount
entry = ["buy", "yes", original_amount, fee, buy_price_yes, slippage_yes, tokens_return, self.lp_yes, self.lp_no, self.lp_token, self.liquidity, self.fee_pool, 0, 0]
else:
tokens_return, x = self.calc_buy(type, amount)
buy_price_no = amount / tokens_return
slippage_no = self.calc_slippage(type, amount)
assert (slippage_no > 0), f"slippage_no {slippage_no} <= 0"
# remove returned token form the pool, keep all yes tokens
self.lp_no += x
self.lp_yes += amount
entry = ["buy", "no", original_amount, fee, buy_price_no, slippage_no, tokens_return, self.lp_yes, self.lp_no, self.lp_token, self.liquidity, self.fee_pool, 0, 0]
# assert invariant, we use float and disregard rounding so must be within e ~ 0
inv_div = abs(k - (self.lp_yes * self.lp_no))
# use variable epsilon - float numbers suck due to scaling
inv_eps = min(self.lp_no, self.lp_yes) / 100000000
if inv_div > inv_eps :
print(f"invariant K {k} {self.lp_yes * self.lp_no} == {inv_div}, lp_yes {self.lp_yes} lp_no {self.lp_no} eps {inv_eps}")
assert(inv_div < inv_eps)
impermanent_loss = self.calc_impermanent_loss()
assert(impermanent_loss >= 0)
# outstanding yes/no token may be converted at event outcome to reward or immediately traded
outstanding_token = self.calc_outstanding_token()
# impermanent loss at last position in history entry
entry[-2] = impermanent_loss
entry[-1] = outstanding_token[1]
self._add_history(entry)
return (type, tokens_return)
def calc_withdrawable_liquidity(self) -> float:
# collateral taken from the pool and tokens returned when adding liquidity
return min(self.lp_yes + self.outstanding_yes, self.lp_no + self.outstanding_no)
def calc_payout(self) -> float:
# how big is reward after all liquidity is removed
return self.lp_token - self.calc_withdrawable_liquidity()
def calc_outstanding_token(self) -> Tuple[int, float]:
# outcome tokens going to LP on top of removed liquidity
withdraw_token = self.calc_withdrawable_liquidity()
total_yes = self.lp_yes + self.outstanding_yes
total_no = self.lp_no + self.outstanding_no
if total_yes > total_no:
outstanding_token = (1, total_yes - withdraw_token)
else:
outstanding_token = (0, total_no - withdraw_token)
return outstanding_token
def calc_impermanent_loss(self) -> float:
withdraw_token = self.calc_withdrawable_liquidity()
return self.liquidity - withdraw_token
def calc_buy(self, type, amount) -> Tuple[float, float]:
k = (self.lp_yes * self.lp_no)
if type:
x = k / (self.lp_no + amount) - self.lp_yes
else:
x = k / (self.lp_yes + amount) - self.lp_no
# (tokens returned to the user, amm pool delta)
return amount - x, x
def calc_marginal_price(self, type) -> float:
pool_total = (self.lp_no + self.lp_yes)
return (self.lp_no if type else self.lp_yes) / pool_total
def calc_slippage(self, type, amount) -> float:
tokens_return, _ = self.calc_buy(type, amount)
buy_price = amount / tokens_return
marginal_price = self.calc_marginal_price(type)
return (buy_price - marginal_price) / buy_price
@staticmethod
def calc_british_odds(returned_tokens, amount) -> float:
# british odds https://www.investopedia.com/articles/investing/042115/betting-basics-fractional-decimal-american-moneyline-odds.asp
# shows the reward on top of stake as a decimal fraction to the stake
# (TODO: we could use Fraction class of python for nice odds representation)
# may be negative when due to cpmm inefficiencies
return (returned_tokens - amount) / amount
# def sell_token(type, amount):
# def get_buy_price_yes():
# def get_sell_price_yes():
_csv_headers = [
"activity", "type", "amount", "fee", "token_buy_sell_price",
"slippage", "returned tokens", "lp_yes", "lp_no", "lp_token",
"liquidity", "fee_pool", "impermanent_loss", "loss_outstanding_tokens"
]
@property
def history_as_dataframe(self) -> pd.DataFrame:
return pd.DataFrame(data=self.history, columns=CPMM._csv_headers)
def save_history(self, name) -> None:
df = self.history_as_dataframe
with open(name, "wt") as f:
df.to_csv(f, index=False, quoting=csv.QUOTE_NONNUMERIC)
def _add_history(self, entry) -> None:
# check entry size
assert(len(entry) == len(CPMM._csv_headers))
self.history.append(entry)
def run_experiment(name, cpmm: CPMM, n, prior_dist, betting_dist):
# TODO: must have realistic model for betting behavior, for example
# total bets volume cannot cross % of liquidity
# individual bet cannot have slippage > 1% etc.
bet_outcomes = prior_dist(n)
bet_amounts = betting_dist(n)
print(f"{name}: bet outcomes N/Y {np.bincount(bet_outcomes)}")
for b, amount in zip(bet_outcomes, bet_amounts):
cpmm.buy_token(b, amount)
# print(cpmm.history)
cpmm.save_history(f"{name}.csv")
def main():
rng = default_rng()
# experiment 1
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 50:50, bets integer uniform range [1, 100]
cpmm = CPMM()
cpmm.create_event(1000)
run_experiment(
"experiment1",
cpmm,
1000,
lambda size: rng.binomial(1, 0.5, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 2
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 70:30, bets integer uniform range [1, 100]
cpmm = CPMM()
cpmm.create_event(1000)
run_experiment(
"experiment2",
cpmm,
1000,
lambda size: rng.binomial(1, 0.7, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 3
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 70:30, bets integer uniform range [1, 100]
# fee 2% taken and not added to liquidity pool
cpmm = CPMM(fee_fraction=0.02)
cpmm.create_event(1000)
run_experiment(
"experiment3",
cpmm,
1000,
lambda size: rng.binomial(1, 0.7, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 4
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 50:50, bets integer uniform range [1, 100]
# fee 2% taken and 50% added to liquidity pool
cpmm = CPMM(fee_fraction=0.02, fee_to_liquidity_fraction=0.5)
cpmm.create_event(1000)
run_experiment(
"experiment4",
cpmm,
1000,
lambda size: rng.binomial(1, 0.5, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 5
# 1000 rounds, initial liquidity 1:3 1000 EVNT, betters prior 50:50, bets integer uniform range [1, 100]
# fee 2% taken and 50% added to liquidity pool
cpmm = CPMM(fee_fraction=0.02, fee_to_liquidity_fraction=0.5)
cpmm.create_event(1000)
run_experiment(
"experiment5",
cpmm,
1000,
lambda size: rng.binomial(1, 0.5, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
if __name__ == "__main__":
main()
| 32.455385
| 168
| 0.724877
| 1,651
| 10,548
| 4.42338
| 0.162932
| 0.049295
| 0.032042
| 0.024921
| 0.410105
| 0.356977
| 0.304943
| 0.243735
| 0.238532
| 0.228125
| 0
| 0.031153
| 0.17226
| 10,548
| 324
| 169
| 32.555556
| 0.805291
| 0.292757
| 0
| 0.305699
| 0
| 0.005181
| 0.062356
| 0.006763
| 0
| 0
| 0
| 0.003086
| 0.046632
| 1
| 0.093264
| false
| 0
| 0.025907
| 0.020725
| 0.196891
| 0.010363
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ebe258aa483e065cfc04b0e3ffeede0cfc9e13c
| 862
|
py
|
Python
|
setup.py
|
Dhruv-Jauhar/pyDownload
|
4c64427037533dccf9d4dd958a46cded2422985a
|
[
"MIT"
] | null | null | null |
setup.py
|
Dhruv-Jauhar/pyDownload
|
4c64427037533dccf9d4dd958a46cded2422985a
|
[
"MIT"
] | null | null | null |
setup.py
|
Dhruv-Jauhar/pyDownload
|
4c64427037533dccf9d4dd958a46cded2422985a
|
[
"MIT"
] | null | null | null |
try:
from setuptools import setup, find_packages
except ImportError as e:
from distutils.core import setup
dependencies = ['docopt', 'termcolor', 'requests']
setup(
name = 'pyDownload',
version = '1.0.2',
description = 'CLI based download utility',
url = 'https://github.com/Dhruv-Jauhar/pyDownload',
author = 'Dhruv Jauhar',
author_email = 'dhruv.jhr@gmail.com',
license = 'MIT',
install_requires = dependencies,
packages = find_packages(),
entry_points = {
'console_scripts': ['pyd = pyDownload.main:start'],
},
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
#'Programming Language :: Python :: 3 :: Only',
'Topic :: Utilities'
)
)
| 25.352941
| 53
| 0.682135
| 97
| 862
| 6
| 0.721649
| 0.097938
| 0.128866
| 0.089347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009722
| 0.164733
| 862
| 33
| 54
| 26.121212
| 0.798611
| 0.053364
| 0
| 0
| 0
| 0
| 0.4914
| 0.025799
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.107143
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b356af2edb34b8b508b0178b0361112e9713c32
| 558
|
py
|
Python
|
tests/coverage/add_perf_summary.py
|
olvrou/CCF
|
3843ff0ccf3871dc49cf2102655404d17ed5dcaf
|
[
"Apache-2.0"
] | 1
|
2020-02-03T21:57:22.000Z
|
2020-02-03T21:57:22.000Z
|
tests/coverage/add_perf_summary.py
|
olvrou/CCF
|
3843ff0ccf3871dc49cf2102655404d17ed5dcaf
|
[
"Apache-2.0"
] | null | null | null |
tests/coverage/add_perf_summary.py
|
olvrou/CCF
|
3843ff0ccf3871dc49cf2102655404d17ed5dcaf
|
[
"Apache-2.0"
] | 1
|
2021-04-08T12:55:28.000Z
|
2021-04-08T12:55:28.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import time
import json
with open("coverage.json", "r") as file:
timestamp = str(int(time.time()))
data = json.load(file)["data"][0]
lines_covered = str(data["totals"]["lines"]["covered"])
lines_valid = str(data["totals"]["lines"]["count"])
with open("perf_summary.csv", "a") as f:
f.write(
timestamp
+ ","
+ lines_valid
+ ",Unit_Test_Coverage,0,0,0,"
+ lines_covered
+ ",0,0,0,0"
)
| 26.571429
| 59
| 0.594982
| 75
| 558
| 4.333333
| 0.546667
| 0.030769
| 0.027692
| 0.110769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.229391
| 558
| 20
| 60
| 27.9
| 0.732558
| 0.172043
| 0
| 0
| 0
| 0
| 0.22658
| 0.056645
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b3573ace3d1c2705447c13d56f1be46ceaa32b4
| 1,063
|
py
|
Python
|
tests/test_calculator_ui.py
|
ellinaMart/test_trade_calculator
|
d7523ba3ce00500ace9d2f9493ba7d5e483cf4f3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_calculator_ui.py
|
ellinaMart/test_trade_calculator
|
d7523ba3ce00500ace9d2f9493ba7d5e483cf4f3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_calculator_ui.py
|
ellinaMart/test_trade_calculator
|
d7523ba3ce00500ace9d2f9493ba7d5e483cf4f3
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import allure
from data.parameters import data_parameters
@allure.feature('UI TEST:open page')
def test_open_page(app):
with allure.step('Открываем страницу калькулятора'):
app.open_calculator_page()
assert '/calculator' in app.get_path_current_url()
@allure.feature('UI TEST: Check and calculate parameters')
@pytest.mark.parametrize('params', data_parameters, ids=[repr(x) for x in data_parameters])
def test_calculate(app, params):
with allure.step('Выбираем параметры и нажимаем рассчитать'):
app.choose_account_type("Standard")
app.choose_instrument(params[0]['symbol'])
app.choose_lot(str(params[0]["lot"]))
app.choose_leverage(f"1:{params[0]['leverage']}")
app.get_calculate()
with allure.step('Рассчитываем margin и сравниваем со значением на странице'):
margin_ui = app.get_margin()
conversion_factor = app.get_conversion_factor(params[0])
margin_calc = app.calculate_margin(params[0],conversion_factor)
assert margin_ui == margin_calc
| 39.37037
| 91
| 0.718721
| 141
| 1,063
| 5.22695
| 0.439716
| 0.04749
| 0.056988
| 0.05156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.164628
| 1,063
| 26
| 92
| 40.884615
| 0.823198
| 0
| 0
| 0
| 0
| 0
| 0.228598
| 0.023518
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b3a31a3ced9da114ff604217cc00699fa473e8b
| 6,383
|
py
|
Python
|
indy-diem/writeTransactionSimple.py
|
kiva/indy-diem
|
c015a44b15886a2a039c3b7768cf03a6295c134e
|
[
"Apache-2.0"
] | null | null | null |
indy-diem/writeTransactionSimple.py
|
kiva/indy-diem
|
c015a44b15886a2a039c3b7768cf03a6295c134e
|
[
"Apache-2.0"
] | 15
|
2021-08-17T15:31:07.000Z
|
2021-09-20T15:11:59.000Z
|
indy-diem/writeTransactionSimple.py
|
kiva/indy-diem
|
c015a44b15886a2a039c3b7768cf03a6295c134e
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import json
import time
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
from diem import AuthKey, testnet, utils
from indy import anoncreds, wallet
from indy import pool
from get_schema import get_schema
from diem_txn import create_diem_script, create_diem_raw_txn, sign_and_wait_diem_txn
from compress_decompress_cred_def import compress_cred_def, clean_up_cred_def_res, decompress_cred_def
from async_calls import create_master_secret, create_credential_offer, \
create_credential_req, create_credential, store_credential
PROTOCOL_VERSION = 2
CURRENCY = "XUS"
issuer = {
'did': 'NcYxiDXkpYi6ov5FcYDi1e',
'wallet_config': json.dumps({'id': 'issuer_wallet'}),
'wallet_credentials': json.dumps({'key': 'issuer_wallet_key'})
}
prover = {
'did': 'VsKV7grR1BUE29mG2Fm2kX',
'wallet_config': json.dumps({"id": "prover_wallet"}),
'wallet_credentials': json.dumps({"key": "issuer_wallet_key"})
}
verifier = {}
store = {}
async def create_schema():
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
# 1. Create Issuer Wallet and Get Wallet Handle
await wallet.create_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
issuer['wallet'] = await wallet.open_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 2. Create Prover Wallet and Get Wallet Handle
await wallet.create_wallet(prover['wallet_config'], prover['wallet_credentials'])
prover['wallet'] = await wallet.open_wallet(prover['wallet_config'], prover['wallet_credentials'])
# 3. Issuer create Credential Schema
schema = {
'name': 'gvt',
'version': '1.0',
'attributes': '["age", "sex"]'
}
issuer['schema_id'], issuer['schema'] = await anoncreds.issuer_create_schema(issuer['did'], schema['name'],
schema['version'],
schema['attributes'])
store[issuer['schema_id']] = issuer['schema']
cred_def = {
'tag': 'cred_def_tag',
'type': 'CL',
'config': json.dumps({"support_revocation": False})
}
issuer['cred_def_id'], issuer['cred_def'] = await anoncreds.issuer_create_and_store_credential_def(
issuer['wallet'], issuer['did'], issuer['schema'], cred_def['tag'], cred_def['type'], cred_def['config'])
store[issuer['cred_def_id']] = issuer['cred_def']
time.sleep(1)
return issuer['schema'], issuer['cred_def']
loop = asyncio.get_event_loop()
schema_and_cred_def = loop.run_until_complete(create_schema())
# connect to testnet
client = testnet.create_client()
# generate private key for sender account
sender_private_key = Ed25519PrivateKey.generate()
# generate auth key for sender account
sender_auth_key = AuthKey.from_public_key(sender_private_key.public_key())
print(f"Generated sender address: {utils.account_address_hex(sender_auth_key.account_address())}")
# create sender account
faucet = testnet.Faucet(client)
testnet.Faucet.mint(faucet, sender_auth_key.hex(), 100000000, "XUS")
# get sender account
sender_account = client.get_account(sender_auth_key.account_address())
# generate private key for receiver account
receiver_private_key = Ed25519PrivateKey.generate()
# generate auth key for receiver account
receiver_auth_key = AuthKey.from_public_key(receiver_private_key.public_key())
print(f"Generated receiver address: {utils.account_address_hex(receiver_auth_key.account_address())}")
# create receiver account
faucet = testnet.Faucet(client)
faucet.mint(receiver_auth_key.hex(), 10000000, CURRENCY)
METADATA = str.encode(schema_and_cred_def[0])
# create script
script = create_diem_script(CURRENCY, receiver_auth_key, METADATA)
# create transaction
raw_transaction = create_diem_raw_txn(sender_auth_key, sender_account, script, CURRENCY)
sign_and_wait_diem_txn(sender_private_key, raw_transaction, client)
print("\nRetrieving SCHEMA from Diem ledger:\n")
schema = get_schema(utils.account_address_hex(sender_auth_key.account_address()), sender_account.sequence_number,
"https://testnet.diem.com/v1")
cred_def_dict = compress_cred_def(schema_and_cred_def)
METADATA_CRED_DEF = str.encode(str(cred_def_dict))
# create script
script = create_diem_script(CURRENCY, receiver_auth_key, METADATA_CRED_DEF)
# create transaction
raw_transaction = create_diem_raw_txn(sender_auth_key, sender_account, script, CURRENCY, 1)
sign_and_wait_diem_txn(sender_private_key, raw_transaction, client)
print("\nRetrieving CRE_DEF from Diem ledger:\n")
cred_def_res = get_schema(utils.account_address_hex(sender_auth_key.account_address()),
sender_account.sequence_number + 1,
"https://testnet.diem.com/v1")
filtered_cred_def = clean_up_cred_def_res(cred_def_res)
decomp_comp = decompress_cred_def(filtered_cred_def)
master_secret_id = loop.run_until_complete(create_master_secret(prover))
prover['master_secret_id'] = master_secret_id
print("\nmaster sectet id:" + master_secret_id)
cred_offer = loop.run_until_complete(create_credential_offer(issuer['wallet'], decomp_comp['id']))
# set some values
issuer['cred_offer'] = cred_offer
prover['cred_offer'] = issuer['cred_offer']
cred_offer = json.loads(prover['cred_offer'])
prover['cred_def_id'] = cred_offer['cred_def_id']
prover['schema_id'] = cred_offer['schema_id']
prover['cred_def'] = store[prover['cred_def_id']]
prover['schema'] = store[prover['schema_id']]
# create the credential request
prover['cred_req'], prover['cred_req_metadata'] = loop.run_until_complete(create_credential_req(prover))
prover['cred_values'] = json.dumps({
"sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"},
"age": {"raw": "28", "encoded": "28"}
})
issuer['cred_values'] = prover['cred_values']
issuer['cred_req'] = prover['cred_req']
print("wallet:")
print(issuer['wallet'])
print("\ncred_offer:")
print(issuer['cred_offer'])
print("\ncred_req:")
print(issuer['cred_req'])
print("\ncred_values:")
print(issuer['cred_values'])
(cred_json, _, _) = loop.run_until_complete(create_credential(issuer))
prover['cred'] = cred_json
loop.run_until_complete(store_credential(prover))
| 34.690217
| 116
| 0.734608
| 827
| 6,383
| 5.336155
| 0.165659
| 0.050759
| 0.023567
| 0.027192
| 0.496941
| 0.355087
| 0.31022
| 0.218219
| 0.194199
| 0.139134
| 0
| 0.024649
| 0.14194
| 6,383
| 184
| 117
| 34.690217
| 0.781085
| 0.083033
| 0
| 0.053571
| 0
| 0
| 0.21868
| 0.041131
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.098214
| 0
| 0.107143
| 0.116071
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b3a56677628b2dcca3ff0494700cfb7a0aa4b48
| 2,173
|
py
|
Python
|
Packs/GoogleCloudFunctions/Integrations/GoogleCloudFunctions/GoogleCloudFunctions_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/GoogleCloudFunctions/Integrations/GoogleCloudFunctions/GoogleCloudFunctions_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/GoogleCloudFunctions/Integrations/GoogleCloudFunctions/GoogleCloudFunctions_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import pytest
from GoogleCloudFunctions import resolve_default_project_id, functions_list_command
@pytest.mark.parametrize('project, credentials_json, expected_output,expected_exception', [
("some-project-id", {"credentials_json": {"type": "service_account", "project_id": "some-project-id"}},
"some-project-id", None),
(None, {"credentials_json": {"type": "service_account", "project_id": "some-project-id"}}, "some-project-id", None),
("some-project-id", {"credentials_json": {"type": "service_account"}}, "some-project-id", None),
(None, {"credentials_json": {"type": "service_account"}}, None, SystemExit)
])
def test_resolve_default_project_id(project, credentials_json, expected_output, expected_exception):
credentials_json = credentials_json.get('credentials_json')
if expected_exception is None:
assert resolve_default_project_id(project, credentials_json) == expected_output
else:
with pytest.raises(SystemExit):
assert resolve_default_project_id(project, credentials_json) == expected_output
def test_format_parameters():
from GoogleCloudFunctions import format_parameters
parameters_to_check = "key:value , name: lastname, onemorekey : to test "
result = format_parameters(parameters_to_check)
assert result == '{"key": "value", "name": "lastname", "onemorekey": "to test"}'
bad_parameters = "oh:no,bad"
with pytest.raises(ValueError):
format_parameters(bad_parameters)
class GoogleClientMock:
def __init__(self, region='region', project='project', functions=None):
if functions is None:
functions = []
self.region = region
self.project = project
self.functions = functions
def functions_list(self, region, project_id):
return {'functions': self.functions}
def test_no_functions():
"""
Given:
- Google client without functions
When:
- Running functions-list command
Then:
- Ensure expected human readable response is returned
"""
client = GoogleClientMock()
hr, _, _ = functions_list_command(client, {})
assert hr == 'No functions found.'
| 36.830508
| 120
| 0.698113
| 241
| 2,173
| 6.037344
| 0.278008
| 0.086598
| 0.062543
| 0.06323
| 0.452234
| 0.406873
| 0.406873
| 0.309278
| 0.268729
| 0.228179
| 0
| 0
| 0.180396
| 2,173
| 58
| 121
| 37.465517
| 0.816957
| 0.07041
| 0
| 0.054054
| 0
| 0
| 0.253024
| 0.017137
| 0
| 0
| 0
| 0
| 0.108108
| 1
| 0.135135
| false
| 0
| 0.081081
| 0.027027
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b3cc4dc345dd62020834a44def43b7e9619fb29
| 3,492
|
py
|
Python
|
cssqc/parser.py
|
matematik7/CSSQC
|
f8048435e60f688fef70d1608651d31e1288b4cf
|
[
"MIT"
] | null | null | null |
cssqc/parser.py
|
matematik7/CSSQC
|
f8048435e60f688fef70d1608651d31e1288b4cf
|
[
"MIT"
] | null | null | null |
cssqc/parser.py
|
matematik7/CSSQC
|
f8048435e60f688fef70d1608651d31e1288b4cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# cssqc/__init__.py
#
# css quality control
# ----------------------------------------------------------------
# copyright (c) 2014 - Domen Ipavec
# Distributed under The MIT License, see LICENSE
# ----------------------------------------------------------------
import importlib
import csslex, cssyacc
from cssyacc.ruleset import Ruleset
from cssqc.statistics import Statistics
EVENTS = (
'IDENT',
'ATKEYWORD',
'ATBRACES',
'STRING',
'HASH',
'NUMBER',
'PERCENTAGE',
'DIMENSION',
'URI',
'UNICODE_RANGE',
'CDO',
'CDC',
'COLON',
'SEMICOLON',
'BRACES_R',
'BRACES_L',
'PARENTHESES_R',
'PARENTHESES_L',
'BRACKETS_R',
'BRACKETS_L',
'COMMENT',
'WS',
'FUNCTION',
'INCLUDES',
'DASHMATCH',
'DELIM',
'Block',
'Brackets',
'Comment',
'Function',
'Parentheses',
'Ruleset',
'Selector',
'Statement',
'Whitespace'
)
instance = None
class CSSQC:
def __init__(self, rules):
global instance
self.events = {}
for e in EVENTS:
self.events[e] = []
self.afterParse = []
self.addRules(rules)
self.parser = cssyacc.getYacc()
self.warnings = []
self.tokens = []
self.objects = []
self.current_token = 0
self.statistics = Statistics()
self.addRuleObject(self.statistics)
instance = self
@staticmethod
def getInstance():
global instance
return instance
def addRules(self, rules):
for rule in rules:
try:
enabled = rules.getboolean(rule)
except:
enabled = True
if enabled:
module = importlib.import_module("cssqc.rules."+rule)
klass = getattr(module, rule)
self.addRuleObject(klass(rules[rule]))
def eventName(self, e):
return "on_"+e
def addRuleObject(self, o):
for e in EVENTS:
f = getattr(o, self.eventName(e), None)
if callable(f):
self.events[e].append(f)
f = getattr(o, "afterParse", None)
if callable(f):
self.afterParse.append(f)
def event(self, e, obj):
for f in self.events[e]:
self.warnings += f(obj)
def register(self, name, obj):
self.objects.append((name, obj))
def token(self):
if len(self.tokens) > self.current_token:
t = self.tokens[self.current_token]
self.current_token += 1
return t
else:
return None
def parse(self, data):
# lex
l = csslex.getLexer()
l.input(data)
# parse tokens
for token in l:
self.tokens.append(token)
self.event(token.type, token)
# yacc
result = self.parser.parse(lexer=self)
for el in result:
if type(el) is Ruleset:
el.setDepth(0)
# parse objects
for obj in self.objects:
self.event(obj[0], obj[1])
# after parse
for f in self.afterParse:
self.warnings += f(result)
# sort warnings
self.warnings.sort(key=lambda qw: qw.line)
return result
| 23.28
| 69
| 0.489977
| 344
| 3,492
| 4.912791
| 0.372093
| 0.023669
| 0.03787
| 0.014201
| 0.053254
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004822
| 0.346793
| 3,492
| 149
| 70
| 23.436242
| 0.736081
| 0.120275
| 0
| 0.054545
| 0
| 0
| 0.09555
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081818
| false
| 0
| 0.045455
| 0.009091
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b3e25ca3f02a2235c0f0ca58913370560e98207
| 3,351
|
py
|
Python
|
parser/team23/instruccion/update_st.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/team23/instruccion/update_st.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/team23/instruccion/update_st.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from abstract.instruccion import *
from tools.console_text import *
from tools.tabla_tipos import *
from storage import jsonMode as funciones
from error.errores import *
from tools.tabla_simbolos import *
class update_st (instruccion):
def __init__(self, id1, id2, dato, where, line, column, num_nodo):
super().__init__(line, column)
self.id1 = id1
self.id2 = id2
self.dato = dato
self.where = where
#Nodo AST UPDATE
self.nodo = nodo_AST('UPDATE', num_nodo)
self.nodo.hijos.append(nodo_AST('UPDATE', num_nodo + 1))
self.nodo.hijos.append(nodo_AST(id1, num_nodo + 2))
self.nodo.hijos.append(nodo_AST('SET', num_nodo + 3))
self.nodo.hijos.append(nodo_AST(id2, num_nodo + 4))
self.nodo.hijos.append(nodo_AST('=', num_nodo + 5))
self.nodo.hijos.append(nodo_AST(dato, num_nodo + 6))
if where != None:
self.nodo.hijos.append(where.nodo)
# Gramatica
self.grammar_ = "<TR><TD>INSTRUCCION ::= UPDATE ID SET ID = op_val where; </TD><TD>INSTRUCCION = falta poner accicon;</TD></TR>"
if where != None:
self.grammar_ += where.grammar_
def ejecutar(self):
id_db = get_actual_use()
if self.where != None:
list_id = [self.id1]
val_return = self.where.ejecutar(list_id)
dato_val = self.dato.ejecutar(list_id)
index_col = ts.get_pos_col(id_db, self.id1, self.id2)
index_pk = ts.get_index_pk(id_db, self.id1)
for item in val_return.valor:
dict_registro = {}
count_col = 0
for col in item:
if count_col == index_col:
dict_registro[count_col] = dato_val.valor
else:
dict_registro[count_col] = col
count_col += 1
list_pk = []
for id_pk in index_pk:
list_pk.append(item[id_pk])
resultado = funciones.update(id_db, self.id1, dict_registro, list_pk)
# Valor de retorno: 0 operación exitosa, 1 error en la operación, 2 database no existente, 3 table no existente, 4 llave primaria no existe.
if resultado == 1:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se pudo realizar el update', 'Semántico'))
add_text('ERROR - No se pudo realizar el update\n')
elif resultado == 2:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encontró la base de datos', 'Semántico'))
add_text('ERROR - No se encontró la base de datos\n')
elif resultado == 3:
errores.append(nodo_error(self.line, self.column, 'ERROR - No se encontro la tabla ' + self.id1, 'Semántico'))
add_text('ERROR - No se encontro la tabla ' + self.id1 + '\n')
elif resultado == 4:
errores.append(nodo_error(self.line, self.column, 'ERROR - No existe la llave primaria', 'Semántico'))
add_text('ERROR - No existe la llave primaria\n')
add_text('Se actualizadon los registros\n')
else:
pass
| 41.37037
| 156
| 0.5661
| 430
| 3,351
| 4.237209
| 0.24186
| 0.054885
| 0.049945
| 0.072997
| 0.356202
| 0.329308
| 0.19045
| 0.163557
| 0.106476
| 0.106476
| 0
| 0.014305
| 0.332438
| 3,351
| 81
| 157
| 41.37037
| 0.800179
| 0.048941
| 0
| 0.065574
| 0
| 0.016393
| 0.152904
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0.016393
| 0.098361
| 0
| 0.147541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b405010c115340a11d63243786f96de4c6d44a6
| 3,953
|
py
|
Python
|
comp_prov.py
|
RickyMexx/ML_CompilerProvenance
|
ce19276aa93b01fa6cdd275e5c0514cb0e9a9f45
|
[
"Apache-2.0"
] | null | null | null |
comp_prov.py
|
RickyMexx/ML_CompilerProvenance
|
ce19276aa93b01fa6cdd275e5c0514cb0e9a9f45
|
[
"Apache-2.0"
] | null | null | null |
comp_prov.py
|
RickyMexx/ML_CompilerProvenance
|
ce19276aa93b01fa6cdd275e5c0514cb0e9a9f45
|
[
"Apache-2.0"
] | null | null | null |
# each JSON has: {instructions}, {opt}, {compiler}
# MODEL SETTINGS: please set these before running the main #
mode = "opt" # Labels of the model: [opt] or [compiler]
samples = 3000 # Number of the blind set samples
fav_instrs_in = ["mov"] # Set of instructions of which DEST register should be extracted [IN]
fav_instrs_eq = ["lea"] # Set of instructions of which DEST register should be extracted [EQ]
# -------------- #
# import warnings filter
from warnings import simplefilter
# ignore all future warnings
simplefilter(action='ignore', category=FutureWarning)
import json
import csv
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import *
from sklearn.metrics import confusion_matrix, classification_report, log_loss
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
import scikitplot as skplt
import matplotlib.pyplot as plt
# Function that parses the input file
# Dataset can be 1 (train) or 2 (blind test)
def processFile(name, i, o, c, dataset):
with open(name) as f:
for jsonl in f:
tmp = json.loads(jsonl)
i.append(tmp['instructions'])
if (dataset == 1):
o.append(tmp['opt'])
c.append(tmp['compiler'])
for idx in range(len(i)):
start = ""
for word in i[idx]:
tmp = ""
arr = word.split()
flag = True
for ins1 in fav_instrs_in:
if ins1 in arr[0]:
flag = False
tmp = arr[0] + " " + arr[1] + " "
for ins2 in fav_instrs_eq:
if ins2 == arr[0]:
flag = False
tmp = arr[0] + " " + arr[1] + " "
if flag:
tmp = arr[0] + " "
start += tmp
i[idx] = start
# Function that deals with the csv file
# Index can be: 1 (opt) or 2 (compiler)
def produceOutput(name, out, index):
if index != 0 and index != 1:
return
lines = list()
with open(name, "r") as fr:
rd = csv.reader(fr)
lines = list(rd)
if not lines:
lines = [None] * samples
for i in range(samples):
lines[i] = ["--", "--"]
for i in range(samples):
lines[i][index] = out[i]
with open(name, "w") as fw:
wr = csv.writer(fw)
wr.writerows(lines)
if __name__ == "__main__":
index = 1 if mode == "opt" else 0
instrs = list()
opt = list()
comp = list()
processFile("train_dataset.jsonl", instrs, opt, comp, 1)
vectorizer = CountVectorizer(min_df=5)
#vectorizer = TfidfVectorizer(min_df=5)
X_all = vectorizer.fit_transform(instrs)
y_all = opt if mode == "opt" else comp
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.2, random_state=15)
#model = RandomForestClassifier(n_estimators=200).fit(X_train, y_train)
model = GradientBoostingClassifier(n_estimators=200, max_depth=7).fit(X_train, y_train)
print("Outcomes on test set")
pred = model.predict(X_test)
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred))
ll = log_loss(y_test, model.predict_proba(X_test))
print("Log Loss: {}".format(ll))
#skplt.metrics.plot_precision_recall_curve(y_test, model.predict_proba(X_test), title="MOGB")
#skplt.metrics.plot_confusion_matrix(y_test, pred, normalize=True, title="MOGB")
#plt.show()
# Calculating the overfitting
print("Outcomes on training set")
pred2 = model.predict(X_train)
print(confusion_matrix(y_train, pred2))
print(classification_report(y_train, pred2))
# Predicting the blind dataset
b_instrs = list()
processFile("test_dataset_blind.jsonl", b_instrs, list(), list(), 2)
b_X_all = vectorizer.transform(b_instrs)
b_pred = model.predict(b_X_all)
produceOutput("1743168.csv", b_pred, index)
| 30.407692
| 101
| 0.625348
| 533
| 3,953
| 4.487805
| 0.320826
| 0.012542
| 0.013796
| 0.015886
| 0.139632
| 0.107023
| 0.107023
| 0.064381
| 0.064381
| 0.044314
| 0
| 0.016718
| 0.258538
| 3,953
| 129
| 102
| 30.643411
| 0.799386
| 0.223122
| 0
| 0.073171
| 0
| 0
| 0.056814
| 0.007882
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.109756
| 0
| 0.146341
| 0.085366
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b42bde77e1661a4f0f1591879e395dd1510705b
| 2,266
|
py
|
Python
|
task.py
|
init-helpful/jsontasks
|
2cfc0b9b7e5f0ece9d753037f2d6dacf2c165f6e
|
[
"MIT"
] | null | null | null |
task.py
|
init-helpful/jsontasks
|
2cfc0b9b7e5f0ece9d753037f2d6dacf2c165f6e
|
[
"MIT"
] | null | null | null |
task.py
|
init-helpful/jsontasks
|
2cfc0b9b7e5f0ece9d753037f2d6dacf2c165f6e
|
[
"MIT"
] | null | null | null |
from jsonlink import JsonLink
from globals import read_json_file
class Task(JsonLink):
def __init__(
self,
name="",
task_description=None,
step_groupings=[],
global_values={},
global_hooks={},
steps=[],
python_dependencies=[],
keywords_file_path="",
):
self.task_name = name
self.task_description = task_description
self.step_groupings = step_groupings
self.global_values = global_values
self.global_hooks = global_hooks
self.steps = steps
self.python_dependencies = python_dependencies
super(Task, self).__init__(
keywords_file_path=keywords_file_path,
attribute_filters=["__", "parse"],
sub_classes=[Step],
)
def __repr__(self):
return f"""
Name : {self.task_name.upper()}
Description : {self.task_description}
Global Values : {self.global_values}
Groupings : {self.step_groupings}
Hooks : {self.global_hooks}
Steps : {self.steps}
"""
# def step_name(self, *args):
# self.__update_value_in_step("name", args)
# def __update_value_in_step(self, property_to_update, args):
# self.current_task.update_step(
# index=get_indexes(path(args), return_last_found=True),
# variables={property_to_update: value(args)},
# )
# def parse(self, task, task_name=""):
# self.current_task = Task()
# return self.current_task
class Step:
def __init__(
self,
step_name="",
step_description="",
data_hooks={},
associated_data={},
dependent_on={},
):
self.name = step_name
self.description = step_description
self.hooks = data_hooks
self.data = associated_data
self.dependent_on = dependent_on
def __repr__(self):
return f"""
Name : {self.name}
Description : {self.description}
Hooks : {self.hooks}
Data : {self.data}
Dependent On : {self.dependent_on}
"""
task = Task()
task.update_from_dict(read_json_file("exampletask.json"))
print(task)
| 26.97619
| 68
| 0.581642
| 237
| 2,266
| 5.172996
| 0.236287
| 0.039152
| 0.039152
| 0.035889
| 0.042414
| 0.042414
| 0.042414
| 0
| 0
| 0
| 0
| 0
| 0.311121
| 2,266
| 83
| 69
| 27.301205
| 0.785394
| 0.170786
| 0
| 0.2
| 0
| 0
| 0.262594
| 0.036442
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.033333
| 0.033333
| 0.166667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b4355b686014d85c2cc35d093973bc84723e068
| 5,252
|
py
|
Python
|
sight_api/__init__.py
|
siftrics/sight-python
|
dd162b54efa856cd15955791bbdb563b3ca9cd35
|
[
"Apache-2.0"
] | 1
|
2020-02-23T19:08:39.000Z
|
2020-02-23T19:08:39.000Z
|
sight_api/__init__.py
|
siftrics/sight-python
|
dd162b54efa856cd15955791bbdb563b3ca9cd35
|
[
"Apache-2.0"
] | null | null | null |
sight_api/__init__.py
|
siftrics/sight-python
|
dd162b54efa856cd15955791bbdb563b3ca9cd35
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2020 Siftrics
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__version__ = '1.2.0'
import base64
import requests
import time
def _getOrElse(json, key):
if key not in json:
raise Exception('This should never happen. Got successful HTTP status code (200) but the body was not the JSON we were expecting.')
return json[key]
class Client:
def __init__(self, api_key):
self.api_key = api_key
def doPoll(self, pollingURL, files):
fileIndex2HaveSeenPages = [list() for _ in files]
while True:
response = requests.get(
pollingURL,
headers={ 'Authorization': 'Basic {}'.format(self.api_key) },
)
response.raise_for_status()
json = response.json()
pages = _getOrElse(json, 'Pages')
if not pages:
time.sleep(0.5)
continue
for page in pages:
fileIndex = _getOrElse(page, 'FileIndex')
pageNumber = _getOrElse(page, 'PageNumber')
numberOfPagesInFile = _getOrElse(page, 'NumberOfPagesInFile')
if not fileIndex2HaveSeenPages[fileIndex]:
fileIndex2HaveSeenPages[fileIndex] = [False]*numberOfPagesInFile
fileIndex2HaveSeenPages[fileIndex][pageNumber-1] = True
yield json['Pages']
haveSeenAllPages = True
for l in fileIndex2HaveSeenPages:
if not l:
haveSeenAllPages = False
break
if not all(l):
haveSeenAllPages = False
break
if haveSeenAllPages:
return
time.sleep(0.5)
def recognizeAsGenerator(self, files, words=False, autoRotate=False, exifRotate=False):
payload = {
'files': [],
'makeSentences': not words, # make love not bombs
'doAutoRotate': autoRotate,
'doExifRotate': exifRotate
}
for f in files:
fn = f.lower()
if fn.endswith('.pdf'):
mimeType = 'application/pdf'
elif fn.endswith('.bmp'):
mimeType = 'image/bmp'
elif fn.endswith('.gif'):
mimeType = 'image/gif'
elif fn.endswith('.jpeg'):
mimeType = 'image/jpeg'
elif fn.endswith('.jpg'):
mimeType = 'image/jpg'
elif fn.endswith('.png'):
mimeType = 'image/png'
else:
msg = '{} does not have a valid extension; it must be one of ".pdf", ".bmp", ".gif", ".jpeg", ".jpg", or ".png".'.format(f)
raise Exception(msg)
with open(f, 'rb') as fileObj:
base64File = base64.b64encode(fileObj.read())
payload['files'].append({
'mimeType': mimeType,
'base64File': base64File.decode('utf-8'),
})
response = requests.post(
'https://siftrics.com/api/sight/',
headers={ 'Authorization': 'Basic {}'.format(self.api_key) },
json=payload,
)
response.raise_for_status()
json = response.json()
if 'PollingURL' in json:
for pages in self.doPoll(json['PollingURL'], files):
yield pages
return
if 'RecognizedText' not in json:
raise Exception('This should never happen. Got successful HTTP status code (200) but the body was not the JSON we were expecting.')
page = {
'Error': '',
'FileIndex': 0,
'PageNumber': 1,
'NumberOfPagesInFile': 1
}
page.update(json)
yield [page]
return
def recognize(self, files, words=False, autoRotate=False, exifRotate=False):
if type(files) is not list:
msg = 'You must pass in a list of files, not a {}'.format(type(files))
raise TypeError(msg)
pages = list()
for ps in self.recognizeAsGenerator(
files, words=words, autoRotate=autoRotate, exifRotate=exifRotate):
pages.extend(ps)
return pages
| 40.091603
| 143
| 0.581493
| 573
| 5,252
| 5.291449
| 0.369983
| 0.029024
| 0.023087
| 0.009235
| 0.177441
| 0.158311
| 0.158311
| 0.106201
| 0.073879
| 0.073879
| 0
| 0.011029
| 0.326733
| 5,252
| 130
| 144
| 40.4
| 0.846154
| 0.20297
| 0
| 0.163462
| 0
| 0.028846
| 0.17575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048077
| false
| 0.009615
| 0.028846
| 0
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b483bc7a8258807d0838c4087de8c2c75b55797
| 802
|
py
|
Python
|
10_54_sobel_filter.py
|
Larilok/image_processing
|
331e50ecd127ba61d6a59a51b7e90f0fd31c7a29
|
[
"MIT"
] | null | null | null |
10_54_sobel_filter.py
|
Larilok/image_processing
|
331e50ecd127ba61d6a59a51b7e90f0fd31c7a29
|
[
"MIT"
] | null | null | null |
10_54_sobel_filter.py
|
Larilok/image_processing
|
331e50ecd127ba61d6a59a51b7e90f0fd31c7a29
|
[
"MIT"
] | null | null | null |
from skimage import img_as_int
import cv2
import numpy as np
from pylab import *
import scipy.ndimage.filters as filters
#img = cv2.imread('images/profile.jpg', 0)
img = cv2.imread('images/moon.jpg',0)
sobel_operator_v = np.array([
[-1, 0, 1],
[-2, 0 ,2],
[-1, 0, 1]
])
sobelX = cv2.Sobel(img, -1, 1, 0, ksize=5)
sobelY = cv2.Sobel(img, -1, 0, 1, ksize=5)
subplot(2,2,1)
plt.imshow(sobelX, cmap='gray')
plt.title('(-1, 1, 0)')
subplot(2,2,2)
plt.imshow(sobelY, cmap='gray')
plt.title('(-1, 0, 1)')
subplot(2,2,3)
plt.imshow(filters.convolve(img_as_int(img), sobel_operator_v), cmap='gray')
plt.title('sobel vertical')
subplot(2,2,4)
plt.imshow(filters.convolve(img_as_int(img), sobel_operator_v.T), cmap='gray')
plt.title('sobel horizontal')
plt.show()
| 22.277778
| 79
| 0.649626
| 140
| 802
| 3.635714
| 0.3
| 0.023576
| 0.023576
| 0.125737
| 0.341847
| 0.192534
| 0.192534
| 0.192534
| 0.192534
| 0.192534
| 0
| 0.06213
| 0.157107
| 802
| 35
| 80
| 22.914286
| 0.690828
| 0.051122
| 0
| 0
| 0
| 0
| 0.111724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.192308
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b4da977c274712fe30edac1bd68cf28ac41017f
| 4,964
|
py
|
Python
|
django_bootstrap_generator/management/commands/generate_bootstrap.py
|
rapilabs/django-bootstrap-generator
|
51bd0ea3eae69c04b856c1df34c5be1a4abc0385
|
[
"MIT"
] | 1
|
2015-03-10T03:41:39.000Z
|
2015-03-10T03:41:39.000Z
|
django_bootstrap_generator/management/commands/generate_bootstrap.py
|
rapilabs/django-bootstrap-generator
|
51bd0ea3eae69c04b856c1df34c5be1a4abc0385
|
[
"MIT"
] | null | null | null |
django_bootstrap_generator/management/commands/generate_bootstrap.py
|
rapilabs/django-bootstrap-generator
|
51bd0ea3eae69c04b856c1df34c5be1a4abc0385
|
[
"MIT"
] | null | null | null |
import collections
import types
from optparse import make_option
from django.db.models.loading import get_model
from django.core.management.base import BaseCommand, CommandError
from django.db.models.fields import EmailField, URLField, BooleanField, TextField
def convert(name):
return name.replace('_', ' ').capitalize()
bs_form = """\
<form role="form" class="form-horizontal">
%s\
<div class="form-group">
<div class="col-sm-offset-2 col-sm-10">
<button class="btn btn-primary"><i class="fa fa-save"></i> Save</button>
</div>
</div>
</form>
"""
bs_field = """\
<div class="form-group">
<label for="%(id)s" class="col-sm-2 control-label">%(label)s</label>
<div class="col-sm-10">
%(field)s%(error)s
</div>
</div>
"""
bs_input = """\
<input type="%(input_type)s" %(name_attr)s="%(name)s"%(class)s id="%(id)s"%(extra)s/>"""
bs_select = """\
<select %(name_attr)s="%(name)s" class="form-control" id="%(id)s"%(extra)s>%(options)s
</select>"""
bs_option = """
<option value="%(value)s">%(label)s</option>"""
optgroup = """
<optgroup label="%(label)s">%(options)s
</optgroup>"""
bs_textarea = """\
<textarea %(name_attr)s="%(name)s" class="form-control" id="%(id)s"%(extra)s></textarea>"""
react_error = """
{errors.%(name)s}"""
def format_choice(key, val):
if isinstance(val, collections.Iterable) and not isinstance(val, types.StringTypes):
return optgroup % {
'label': key,
'options': ''.join([bs_option % {'value': value, 'label': label} for value, label in val])
}
else:
return bs_option % {'value': key, 'label': val}
def format_bs_field(model_name, field, flavour):
field_id_html = model_name + '-' + field.name
if flavour == 'react':
name_attr = 'ref'
if isinstance(field, BooleanField):
extra = ' defaultChecked={this.state.data.' + field.name + '}'
else:
extra = ' defaultValue={this.state.data.' + field.name + '}'
else:
name_attr = 'name'
extra = ''
if field.choices:
field_html = bs_select % {
'id': field_id_html,
'options': "".join([format_choice(value, label) for value, label in field.choices]),
'name': field.name,
'name_attr': name_attr,
'extra': extra,
}
elif isinstance(field, TextField):
field_html = bs_textarea % {
'id': field_id_html,
'name': field.name,
'name_attr': name_attr,
'extra': extra,
}
else:
if isinstance(field, EmailField):
input_type = 'email'
class_fullstr = ' class="form-control"'
elif isinstance(field, URLField):
input_type = 'url'
class_fullstr = ' class="form-control"'
elif isinstance(field, BooleanField):
input_type = 'checkbox'
class_fullstr = ''
else:
input_type = 'text'
class_fullstr = ' class="form-control"'
field_html = bs_input % {
'id': field_id_html,
'input_type': input_type,
'name_attr': name_attr,
'name': field.name,
'class': class_fullstr,
'extra': extra,
}
if flavour == 'react':
error = react_error % {
'name': field.name,
}
else:
error = ''
rendered_html = bs_field % {
'id': field_id_html,
'label': convert(field.name),
'field': field_html,
'error': error,
}
if flavour == 'react':
rendered_html = rendered_html.replace('class="col-sm-10"', 'class={"col-sm-10 " + errorClasses.' + field.name + '}')
return rendered_html
def format_bs_form(fields, flavour):
rendered_html = bs_form % fields
if flavour == 'react':
rendered_html = rendered_html.replace('class=', 'className='). \
replace('for=', 'htmlFor=')
return rendered_html
class Command(BaseCommand):
args = '<app_name> <model_name>'
help = 'Prints a bootstrap form for the supplied app & model'
option_list = BaseCommand.option_list + (
make_option('--react',
action='store_true',
dest='react',
default=False,
help='Generate with React\'s ref and defaultValue attributes'),
)
def handle(self, *args, **options):
if len(args) != 2:
raise CommandError('Please supply an app name & model name')
app_name = args[0]
model_name = args[1]
if options['react']:
flavour = 'react'
else:
flavour = None
model_class = get_model(app_name, model_name)
fields = [format_bs_field(model_name, field, flavour) for field in model_class._meta.fields if field.name != 'id']
self.stdout.write(format_bs_form("".join(fields), flavour))
| 29.724551
| 124
| 0.567889
| 582
| 4,964
| 4.689003
| 0.221649
| 0.032246
| 0.018322
| 0.019055
| 0.209601
| 0.180652
| 0.154635
| 0.129718
| 0.095273
| 0.030048
| 0
| 0.003616
| 0.275786
| 4,964
| 166
| 125
| 29.903614
| 0.755494
| 0
| 0
| 0.275362
| 0
| 0.043478
| 0.282434
| 0.07917
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036232
| false
| 0
| 0.043478
| 0.007246
| 0.144928
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b51df476a25eb10705b2313609ccd9bca295e46
| 1,667
|
py
|
Python
|
lab_15/server/main.py
|
MrLuckUA/python_course
|
50a87bc54550aedaac3afcce5b8b5c132fb6ec98
|
[
"MIT"
] | null | null | null |
lab_15/server/main.py
|
MrLuckUA/python_course
|
50a87bc54550aedaac3afcce5b8b5c132fb6ec98
|
[
"MIT"
] | null | null | null |
lab_15/server/main.py
|
MrLuckUA/python_course
|
50a87bc54550aedaac3afcce5b8b5c132fb6ec98
|
[
"MIT"
] | null | null | null |
import queue
import select
import socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(False)
server.bind(('localhost', 9999))
server.listen(5)
inputs = [server]
outputs = []
message_queues = {}
while inputs:
readable, writable, exceptional = select.select(
inputs, outputs, inputs)
for s in readable:
if s is server:
connection, client_address = s.accept()
connection.setblocking(0)
inputs.append(connection)
message_queues[connection] = queue.Queue()
else:
data = s.recv(1024)
if data:
message_queues[s].put(data)
if s not in outputs:
outputs.append(s)
else:
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
del message_queues[s]
for s in writable:
try:
next_msg = message_queues[s].get_nowait()
except queue.Empty:
outputs.remove(s)
else:
s.send(next_msg)
for s in exceptional:
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
del message_queues[s]
# import socket
#
# server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# server.bind(('localhost', 9999))
# server.listen(1)
# while True:
# client_socket, addr = server.accept()
# print(f'New connection from {addr}')
# client_socket.send('Hello there, how are you?'.encode('utf-8'))
# answer = client_socket.recv(1024)
# print(answer)
# client_socket.close()
| 26.887097
| 69
| 0.574685
| 196
| 1,667
| 4.795918
| 0.346939
| 0.082979
| 0.059574
| 0.051064
| 0.315957
| 0.315957
| 0.247872
| 0.2
| 0.13617
| 0.13617
| 0
| 0.017575
| 0.317337
| 1,667
| 61
| 70
| 27.327869
| 0.808436
| 0.217756
| 0
| 0.318182
| 0
| 0
| 0.006971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068182
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b53b8e74efe57ed1e451add7d928562719e4e93
| 4,689
|
py
|
Python
|
BroLog.py
|
jcwoods/BroLog
|
b95c91178d4038d1e363cb8c8ef9ecc64a23193f
|
[
"MIT"
] | null | null | null |
BroLog.py
|
jcwoods/BroLog
|
b95c91178d4038d1e363cb8c8ef9ecc64a23193f
|
[
"MIT"
] | null | null | null |
BroLog.py
|
jcwoods/BroLog
|
b95c91178d4038d1e363cb8c8ef9ecc64a23193f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import codecs
import ipaddress as ip
import pandas as pd
from datetime import datetime as dt
class BroLogFile:
def doSeparator(self, fields):
sep = fields[1]
if len(sep) == 1: # a literal?
self.separator = sep
elif sep[:2] == '\\x': # a hexadecimal (ASCII) value?
self.separator = chr(int(sep[2:], 16))
else:
raise ValueError('invalid separator format in log file')
return
def default_transform(self, fields):
ntypes = len(self.field_types)
for fno in range(ntypes):
if fields[fno] == self.unset_field:
fields[fno] = None
continue
elif fields[fno] == self.empty_field:
fields[fno] = ''
continue
elif self.field_types[fno] == 'count' or self.field_types[fno] == 'port':
try:
val = int(fields[fno])
fields[fno] = val
except:
pass
elif self.field_types[fno] == 'interval':
try:
val = float(fields[fno])
fields[fno] = val
except:
pass
#elif self.field_types[fno] == 'addr':
# try:
# ip_addr = ip.ip_address(fields[fno])
# fields[fno] = int(ip_addr)
# except ValueError:
# # IPv6 address? TBD...
# fields[fno] = 0
elif self.field_types[fno] == 'time':
ts = float(fields[fno])
t = dt.fromtimestamp(ts).isoformat()
fields[fno] = t
return
def __init__(self, fname, row_transform = None, row_filter = None):
"""
Crete a new Pandas DataFrame from the given file.
fname is the name of the file to be opened.
row_transform is an (optional) function function which will be applied
to each row as it is read. It may modify the individual column
values, such as by performing integer conversions on exptected
numeric fields. This function does not return a value.
row_filter is an (optional) function which will be used to test each
input row. It is executed after row_transform (if one exists),
and must return a boolean value. If True, the row will be
included in the result. If False, the row will be suppressed.
May generate an exception if the file could not be opened or if an
invalid format is found in the separator value.
"""
self.row_transform = row_transform
self.row_filter = row_filter
self.field_names = []
self.field_types = []
self.empty_field = '(empty)'
self.unset_field = '-'
self.set_separator = ','
self.separator = ' '
self.rows = []
self.field_map = None
#f = file(fname, 'r')
f = codecs.open(fname, 'r', encoding = 'utf-8')
line = f.readline()
while line[0] == '#':
fields = line[1:].strip().split(self.separator)
if fields[0] == 'separator':
self.doSeparator(fields)
elif fields[0] == 'empty_field':
self.empty_field = fields[1]
elif fields[0] == 'unset_field':
self.unset_field = fields[1]
elif fields[0] == 'fields':
self.field_names = fields[1:]
elif fields[0] == 'types':
self.field_types = fields[1:]
line = f.readline()
for line in f:
if line[0] == '#': continue
fields = line.rstrip("\r\n").split(self.separator)
if self.row_transform is not None:
self.row_transform(fields)
else:
self.default_transform(fields)
if self.row_filter is not None:
if self.row_filter(fields, self.field_types, self.field_names) is False: continue
self.rows.append(fields)
return
def asDataFrame(self):
df = pd.DataFrame(self.rows, columns = self.field_names)
return df
def __len__(self):
return len(self.rows)
def conn_filter(fields, types, names):
return fields[6] == 'tcp'
def main(argv):
con = BroLogFile(argv[1])
#for n in range(10):
# print(con.rows[n])
df = con.asDataFrame()
print(df.head(10))
print(df.describe())
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 28.591463
| 97
| 0.527831
| 554
| 4,689
| 4.362816
| 0.301444
| 0.052131
| 0.052131
| 0.035168
| 0.086885
| 0.06206
| 0.043029
| 0.043029
| 0.043029
| 0.043029
| 0
| 0.009498
| 0.371295
| 4,689
| 163
| 98
| 28.766871
| 0.81038
| 0.228833
| 0
| 0.182796
| 0
| 0
| 0.038671
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075269
| false
| 0.021505
| 0.053763
| 0.021505
| 0.215054
| 0.021505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b53fdee739e7f6188764d53517bc20b22165406
| 8,288
|
py
|
Python
|
test_pdf.py
|
ioggstream/json-forms-pdf
|
9f18cf239ae892ebb0018bfd4a8f792af35ccfac
|
[
"BSD-3-Clause"
] | 2
|
2020-06-18T13:31:32.000Z
|
2022-02-21T08:30:37.000Z
|
test_pdf.py
|
ioggstream/json-forms-pdf
|
9f18cf239ae892ebb0018bfd4a8f792af35ccfac
|
[
"BSD-3-Clause"
] | 2
|
2020-05-25T17:31:52.000Z
|
2020-06-23T17:32:03.000Z
|
test_pdf.py
|
ioggstream/json-forms-pdf
|
9f18cf239ae892ebb0018bfd4a8f792af35ccfac
|
[
"BSD-3-Clause"
] | null | null | null |
# simple_checkboxes.py
import logging
from os.path import basename
from pathlib import Path
import jsonschema
import pytest
# from reportlab.pdfbase import pdfform
import yaml
from reportlab.pdfgen import canvas
uischema = yaml.safe_load(Path("jsonforms-react-seed/src/uischema.json").read_text())
form_schema = yaml.safe_load(Path("jsonforms-react-seed/src/schema.json").read_text())
form_fields = jsonschema.RefResolver.from_schema(form_schema)
log = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
DATA = {
"name": "foo",
"description": "Confirm if you have passed the subject\nHereby ...",
"done": True,
"recurrence": "Daily",
"rating": "3",
"due_date": "2020-05-21",
"recurrence_interval": 421,
}
def localize_date(date_string):
try:
from dateutil.parser import parse as dateparse
import locale
locale.nl_langinfo(locale.D_FMT)
d = dateparse(date_string)
return d.strftime(locale.nl_langinfo(locale.D_FMT))
except:
return date_string
def csetup(name, font_size=12):
c = canvas.Canvas(f"{name}.pdf")
c.setFont("Courier", font_size)
return c
class FormRender(object):
def __init__(self, ui, schema, font_size=11, font_size_form=None, data=DATA):
"""
:param ui: object containing ui-schema
:param schema: structure containing the schema
"""
self.ui = ui
self.schema = schema
self.resolver = jsonschema.RefResolver.from_schema(schema)
self.font_size = font_size
self.font_size_form = font_size_form or font_size
self.data = data or {}
self.line_feed = 5 * self.font_size
@staticmethod
def from_file(ui_path, schema_path):
ui = yaml.safe_load(Path(ui_path).read_text())
schema = yaml.safe_load(Path(schema_path).read_text())
return FormRender(ui, schema)
def layout_to_form(self, layout, form, canvas, point):
assert "elements" in layout
x, y = point
if layout["type"] == "Group":
canvas.setFont("Courier", int(self.font_size * 1.5))
canvas.drawString(x, y, layout["label"])
canvas.setFont("Courier", self.font_size)
y -= 2 * self.line_feed
if layout["type"] == "HorizontalLayout":
y -= 10
point = (x, y)
for e in layout["elements"]:
x, y = self.element_to_form(e, form, canvas, (x, y))
if layout["type"] == "HorizontalLayout":
x += 250
y = point[1]
if layout["type"] == "HorizontalLayout":
return point[0], y - self.line_feed
return x, y - self.line_feed
def element_to_form(self, element, form, canvas, point):
x, y = point
if "elements" in element:
return self.layout_to_form(element, form, canvas, (x, y))
assert "type" in element
assert "scope" in element
supported_types = {
"string",
"number",
"integer",
"boolean",
}
schema_url, schema = self.resolver.resolve(element["scope"])
field_type = schema["type"]
if field_type not in supported_types:
raise NotImplementedError(field_type)
property_name = basename(schema_url)
field_label = element.get("label") or labelize(schema_url)
render = self.render_function(form, property_name, schema, self.data)
y -= self.line_feed
params = {
"name": schema_url,
"x": x + self.font_size * len(field_labeltest_pdf.py) // 1.4,
"y": y,
"forceBorder": True,
}
if schema.get("description"):
params.update({"tooltip": schema.get("description")})
canvas.drawString(x, y, field_label)
render(**params)
return x, y
def render_function(self, form, name, schema, data=None):
if schema["type"] in ("integer", "number"):
def _render_number(**params):
params.update(
{
"width": self.font_size_form * 5,
"height": self.font_size_form * 1.5,
}
)
value = data.get(name)
if value:
params.update(
{"value": str(value), "borderStyle": "inset",}
)
return form.textfield(**params)
return _render_number
if "enum" in schema:
def _render_enum(**params):
options = [(x,) for x in schema["enum"]]
params.update({"options": options, "value": schema["enum"][0]})
return form.choice(**params)
# return _render_enum
def _render_enum_2(**params):
x, y = params["x"], params["y"]
for v in schema["enum"]:
form.radio(
name=name,
tooltip="TODO",
value=v,
selected=False,
x=x,
y=y,
size=self.font_size_form,
buttonStyle="check",
borderStyle="solid",
shape="square",
forceBorder=True,
)
form.canv.drawString(x + self.font_size_form * 2, y, v)
x += self.font_size * len(v)
return params["x"], y
return _render_enum_2
if schema["type"] == "boolean":
def _render_bool(**params):
params.update(
{
"buttonStyle": "check",
"size": self.font_size_form,
"shape": "square",
}
)
if data.get(name):
params.update({"checked": "true"})
return form.checkbox(**params)
return _render_bool
def _render_string(**params):
value = data.get(name) or schema.get("default")
params.update(
{
"width": self.font_size_form * 10,
"height": self.font_size_form * 1.5,
"fontSize": self.font_size_form,
"borderStyle": "inset",
}
)
if schema.get("format", "").startswith("date"):
params.update(
{"width": self.font_size_form * 8,}
)
if value:
if schema.get("format", "").startswith("date"):
value = localize_date(value)
params.update({"value": value})
return form.textfield(**params)
return _render_string
def labelize(s):
return basename(s).replace("_", " ").capitalize()
def test_get_fields():
import PyPDF2
f = PyPDF2.PdfFileReader("simple.pdf")
ff = f.getFields()
assert "#/properties/given_name" in ff
@pytest.fixture(scope="module", params=["group", "simple"])
def harn_form_render(request):
label = request.param
log.warning("Run test with, %r", label)
fr = FormRender.from_file(f"data/ui-{label}.json", f"data/schema-{label}.json")
canvas = csetup(label)
return fr, canvas
def test_group(harn_form_render):
point = (0, 800)
fr, canvas = harn_form_render
layout = fr.ui
fr.layout_to_form(layout, canvas.acroForm, canvas, point)
canvas.save()
def test_text():
c = canvas.Canvas("form.pdf")
c.setFont("Courier", 12)
c.drawCentredString(300, 700, "Pets")
c.setFont("Courier", 11)
form = c.acroForm
x, y = 110, 645
for v in "inizio cessazione talpazione donazione".split():
form.radio(
name="radio1",
tooltip="Field radio1",
value=v,
selected=False,
x=x,
y=y,
buttonStyle="check",
borderStyle="solid",
shape="square",
forceBorder=True,
)
c.drawString(x + 11 * 2, y, v)
x += 11 * len(v)
c.save()
| 30.358974
| 86
| 0.527389
| 910
| 8,288
| 4.653846
| 0.236264
| 0.043447
| 0.045336
| 0.03778
| 0.163164
| 0.134829
| 0.090437
| 0.055726
| 0
| 0
| 0
| 0.013035
| 0.352075
| 8,288
| 272
| 87
| 30.470588
| 0.775605
| 0.020029
| 0
| 0.167442
| 0
| 0
| 0.106975
| 0.014964
| 0
| 0
| 0
| 0
| 0.018605
| 1
| 0.07907
| false
| 0.004651
| 0.046512
| 0.004651
| 0.218605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b549c8cf31113881352739cc51b8f9b8d3428b5
| 701
|
py
|
Python
|
pos_map.py
|
olzama/neural-supertagging
|
340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4
|
[
"MIT"
] | null | null | null |
pos_map.py
|
olzama/neural-supertagging
|
340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4
|
[
"MIT"
] | null | null | null |
pos_map.py
|
olzama/neural-supertagging
|
340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4
|
[
"MIT"
] | null | null | null |
'''
Assuming the following tag-separated format:
VBP+RB VBP
VBZ+RB VBZ
IN+DT IN
(etc.)
'''
class Pos_mapper:
def __init__(self, filepath):
with open(filepath,'r') as f:
lines = f.readlines()
self.pos_map = {}
self.unknowns = []
for ln in lines:
if ln:
tag,mapping = ln.strip().split('\t')
self.pos_map[tag] = mapping
def map_tag(self,tag):
if tag in self.pos_map:
return self.pos_map[tag]
else:
#return the first tag
self.unknowns.append(tag)
#print('Unknown POS tag: ' + tag)
tags = tag.split('+')
return tags[0]
| 23.366667
| 52
| 0.513552
| 90
| 701
| 3.888889
| 0.477778
| 0.08
| 0.114286
| 0.074286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002247
| 0.365193
| 701
| 30
| 53
| 23.366667
| 0.78427
| 0.198288
| 0
| 0
| 0
| 0
| 0.007194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b580428c6c3fc7e1f2b5ec4c50a922c0d642dcf
| 4,051
|
py
|
Python
|
src/nti/externalization/integer_strings.py
|
NextThought/nti.externalization
|
5a445b85fb809a7c27bf8dbe45c29032ece187d8
|
[
"Apache-2.0"
] | null | null | null |
src/nti/externalization/integer_strings.py
|
NextThought/nti.externalization
|
5a445b85fb809a7c27bf8dbe45c29032ece187d8
|
[
"Apache-2.0"
] | 78
|
2017-09-15T14:59:58.000Z
|
2021-10-05T17:40:06.000Z
|
src/nti/externalization/integer_strings.py
|
NextThought/nti.externalization
|
5a445b85fb809a7c27bf8dbe45c29032ece187d8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functions to represent potentially large integers as the shortest
possible human-readable and writable strings. The motivation is to be
able to take int ids as produced by an :class:`zc.intid.IIntId`
utility and produce something that can be written down and typed in by
a human. To this end, the strings produced have to be:
* One-to-one and onto the integer domain;
* As short as possible;
* While not being easily confused;
* Or accidentaly permuted
To meet those goals, we define an alphabet consisting of the ASCII
digits and upper and lowercase letters, leaving out troublesome pairs
(zero and upper and lower oh and upper queue, one and upper and lower
ell) (actually, those troublesome pairs will all map to the same
character).
We also put a version marker at the end of the string so we can evolve
this algorithm gracefully but still honor codes in the wild.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = [
'to_external_string',
'from_external_string',
]
# stdlib imports
import string
try:
maketrans = str.maketrans
except AttributeError: # Python 2
from string import maketrans # pylint:disable=no-name-in-module
translate = str.translate
# In the first version of the protocol, the version marker, which would
# come at the end, is always omitted. Subsequent versions will append
# a value that cannot be produced from the _VOCABULARY
_VERSION = '$'
# First, our vocabulary.
# Remove the letter values o and O, Q (confused with O if you're sloppy), l and L,
# and i and I, leaving the digits 1 and 0
_REMOVED = 'oOQlLiI'
_REPLACE = '0001111'
_VOCABULARY = ''.join(
reversed(sorted(list(set(string.ascii_letters + string.digits) - set(_REMOVED))))
)
# We translate the letters we removed
_TRANSTABLE = maketrans(_REMOVED, _REPLACE)
# Leaving us a base vocabulary to map integers into
_BASE = len(_VOCABULARY)
_ZERO_MARKER = '@' # Zero is special
def from_external_string(key):
"""
Turn the string in *key* into an integer.
>>> from nti.externalization.integer_strings import from_external_string
>>> from_external_string('xkr')
6773
:param str key: A native string, as produced by `to_external_string`.
(On Python 2, unicode *keys* are also valid.)
:raises ValueError: If the key is invalid or contains illegal characters.
:raises UnicodeDecodeError: If the key is a Unicode object, and contains
non-ASCII characters (which wouldn't be valid anyway)
"""
if not key:
raise ValueError("Improper key")
if not isinstance(key, str):
# Unicode keys cause problems on Python 2: The _TRANSTABLE is coerced
# to Unicode, which fails because it contains non-ASCII values.
# So instead, we encode the unicode string to ascii, which, if it is a
# valid key, will work
key = key.decode('ascii') if isinstance(key, bytes) else key.encode('ascii')
# strip the version if needed
key = key[:-1] if key[-1] == _VERSION else key
key = translate(key, _TRANSTABLE) # translate bad chars
if key == _ZERO_MARKER:
return 0
int_sum = 0
for idx, char in enumerate(reversed(key)):
int_sum += _VOCABULARY.index(char) * pow(_BASE, idx)
return int_sum
def to_external_string(integer):
"""
Turn an integer into a native string representation.
>>> from nti.externalization.integer_strings import to_external_string
>>> to_external_string(123)
'xk'
>>> to_external_string(123456789)
'kVxr5'
"""
# we won't step into the while if integer is 0
# so we just solve for that case here
if integer == 0:
return _ZERO_MARKER
result = ''
# Simple string concat benchmarks the fastest for this size data,
# among a list and an array.array( 'c' )
while integer > 0:
integer, remainder = divmod(integer, _BASE)
result = _VOCABULARY[remainder] + result
return result
| 30.923664
| 85
| 0.709701
| 591
| 4,051
| 4.749577
| 0.416244
| 0.049875
| 0.0342
| 0.0114
| 0.052725
| 0.029925
| 0
| 0
| 0
| 0
| 0
| 0.011621
| 0.214021
| 4,051
| 130
| 86
| 31.161538
| 0.869975
| 0.626759
| 0
| 0
| 0
| 0
| 0.054402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.116279
| 0
| 0.255814
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b5a89b5d003d45628ff5ec0925287bf4802eb5a
| 2,882
|
py
|
Python
|
chokitto.py
|
WKSu/chokitto
|
9eb0c7e69a62aede76cd0c8fd43dd4879bf03ff8
|
[
"MIT"
] | null | null | null |
chokitto.py
|
WKSu/chokitto
|
9eb0c7e69a62aede76cd0c8fd43dd4879bf03ff8
|
[
"MIT"
] | null | null | null |
chokitto.py
|
WKSu/chokitto
|
9eb0c7e69a62aede76cd0c8fd43dd4879bf03ff8
|
[
"MIT"
] | 1
|
2021-01-16T18:51:57.000Z
|
2021-01-16T18:51:57.000Z
|
#!/usr/bin/python3
import argparse, os
from collections import defaultdict
from lib.data import *
from lib.exporters import *
from lib.filters import *
from lib.parsers import *
def parse_arguments():
arg_parser = argparse.ArgumentParser(description='chokitto')
arg_parser.add_argument('input', help='path to clippings file')
arg_parser.add_argument('-o', '--output', help='path to output file (default: STDOUT)')
arg_parser.add_argument('-p', '--parser', default='kindle', choices=list(PARSER_MAP.keys()), help='parser for clippings file (default: kindle)')
arg_parser.add_argument('-e', '--exporter', default='markdown', help='clipping exporter (default: markdown)')
arg_parser.add_argument('-m', '--merge', action='store_true', help='merge clippings of different types if they occur at the same location (default: False)')
arg_parser.add_argument('-f', '--filters', nargs='*', help='list of filters to apply (default: None, format: "filter(\'arg\',\'arg\')")')
arg_parser.add_argument('-ls', '--list', action='store_true', help='list titles of documents in clippings file and exit (default: False)')
arg_parser.add_argument('-v', '--verbose', action='store_true', help='set verbosity (default: False)')
return arg_parser.parse_args()
def get_user_input(prompt, options=['y', 'n']):
ans = None
while ans not in options:
ans = input(f"{prompt} [{'/'.join(options)}] ")
return ans
def main():
args = parse_arguments()
# parse clippings
parser = PARSER_MAP[args.parser](verbose=args.verbose)
documents = parser.parse(args.input)
# merge and deduplicate clippings
if args.merge:
for title, author in documents:
documents[(title, author)].merge_clippings()
documents[(title, author)].deduplicate_clippings()
# set up filters
filters = parse_filters(args.filters) if args.filters else []
if filters:
# print filters
if args.verbose:
print("Filters (%d total):" % len(filters))
for filt in filters:
print(" %s" % filt)
# apply filters
documents = apply_filters(documents, filters)
# list documents (and exit if list flag was used)
if args.verbose or args.list:
print("Documents (%d total):" % len(documents))
for title, author in sorted(documents):
print(" %s" % documents[(title, author)])
if args.list: return
# set up exporter
exporter = parse_exporter(args.exporter)
if args.output:
# check if file already exists
if os.path.exists(args.output):
ans = get_user_input(f"File '{args.output}' already exists. Overwrite?")
if ans == 'n':
return
exporter.write(documents, args.output)
if args.verbose: print(f"Output:\n Output was saved to '{args.output}' using {exporter}.")
else:
if args.verbose: print("Output:\n")
print(exporter(documents))
if __name__ == '__main__':
main()
| 37.428571
| 161
| 0.683553
| 385
| 2,882
| 5.005195
| 0.296104
| 0.046705
| 0.049818
| 0.083031
| 0.033212
| 0.033212
| 0
| 0
| 0
| 0
| 0
| 0.000418
| 0.170021
| 2,882
| 76
| 162
| 37.921053
| 0.805184
| 0.069743
| 0
| 0
| 0
| 0
| 0.279661
| 0.008089
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.109091
| 0
| 0.218182
| 0.127273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b5b098de65214be758959ec7c9a6aae3055a94c
| 3,197
|
py
|
Python
|
src/py21cmmc/_21cmfast/_utils.py
|
BradGreig/Hybrid21CMMC
|
984aa88ee4543db24095a3ba8529e1f4d0b1048d
|
[
"MIT"
] | null | null | null |
src/py21cmmc/_21cmfast/_utils.py
|
BradGreig/Hybrid21CMMC
|
984aa88ee4543db24095a3ba8529e1f4d0b1048d
|
[
"MIT"
] | null | null | null |
src/py21cmmc/_21cmfast/_utils.py
|
BradGreig/Hybrid21CMMC
|
984aa88ee4543db24095a3ba8529e1f4d0b1048d
|
[
"MIT"
] | null | null | null |
"""
Utilities that help with wrapping various C structures.
"""
class StructWithDefaults:
"""
A class which provides a convenient interface to create a C structure with defaults specified.
It is provided for the purpose of *creating* C structures in Python to be passed to C functions, where sensible
defaults are available. Structures which are created within C and passed back do not need to be wrapped.
This provides a *fully initialised* structure, and will fail if not all fields are specified with defaults.
.. note:: The actual C structure is gotten by calling an instance. This is auto-generated when called, based on the
parameters in the class.
.. warning:: This class will *not* deal well with parameters of the struct which are pointers. All parameters
should be primitive types, except for strings, which are dealt with specially.
Parameters
----------
ffi : cffi object
The ffi object from any cffi-wrapped library.
"""
_name = None
_defaults_ = {}
ffi = None
def __init__(self, **kwargs):
for k, v in self._defaults_.items():
# Prefer arguments given to the constructor.
if k in kwargs:
v = kwargs[k]
try:
setattr(self, k, v)
except AttributeError:
# The attribute has been defined as a property, save it as a hidden variable
setattr(self, "_" + k, v)
self._logic()
# Set the name of this struct in the C code
if self._name is None:
self._name = self.__class__.__name__
# A little list to hold references to strings so they don't de-reference
self._strings = []
def _logic(self):
pass
def new(self):
"""
Return a new empty C structure corresponding to this class.
"""
obj = self.ffi.new("struct " + self._name + "*")
return obj
def __call__(self):
"""
Return a filled C Structure corresponding to this instance.
"""
obj = self.new()
self._logic() # call this here to make sure any changes by the user to the arguments are re-processed.
for fld in self.ffi.typeof(obj[0]).fields:
key = fld[0]
val = getattr(self, key)
# Find the value of this key in the current class
if isinstance(val, str):
# If it is a string, need to convert it to C string ourselves.
val = self.ffi.new('char[]', getattr(self, key).encode())
try:
setattr(obj, key, val)
except TypeError:
print("For key %s, value %s:" % (key, val))
raise
self._cstruct = obj
return obj
@property
def pystruct(self):
"A Python dictionary containing every field which needs to be initialized in the C struct."
obj = self.new()
return {fld[0]:getattr(self, fld[0]) for fld in self.ffi.typeof(obj[0]).fields}
def __getstate__(self):
return {k:v for k,v in self.__dict__.items() if k not in ["_strings", "_cstruct"]}
| 32.622449
| 119
| 0.597435
| 427
| 3,197
| 4.379391
| 0.393443
| 0.005348
| 0.005348
| 0.007487
| 0.075936
| 0.033155
| 0.033155
| 0.033155
| 0.033155
| 0
| 0
| 0.002303
| 0.320926
| 3,197
| 98
| 120
| 32.622449
| 0.859051
| 0.48827
| 0
| 0.186047
| 0
| 0
| 0.087687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139535
| false
| 0.023256
| 0
| 0.023256
| 0.325581
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b5bf002a4841de751ef7a81520f03f1fc8e3906
| 2,144
|
py
|
Python
|
lib/bes/fs/dir_util.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
lib/bes/fs/dir_util.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
lib/bes/fs/dir_util.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os, os.path as path, shutil
import datetime
from .file_match import file_match
from .file_util import file_util
class dir_util(object):
@classmethod
def is_empty(clazz, d):
return clazz.list(d) == []
@classmethod
def list(clazz, d, relative = False, patterns = None):
'Return a list of a d contents. Returns absolute paths unless relative is True.'
result = sorted(os.listdir(d))
if not relative:
result = [ path.join(d, f) for f in result ]
if patterns:
result = file_match.match_fnmatch(result, patterns, file_match.ANY)
return result
@classmethod
def list_dirs(clazz, d):
'Like list() but only returns dirs.'
return [ f for f in clazz.list(d, full_path = True) if path.isdir(f) ]
@classmethod
def empty_dirs(clazz, d):
return [ f for f in clazz.list_dirs(d) if clazz.is_empty(f) ]
@classmethod
def all_parents(clazz, d):
result = []
while True:
parent = path.dirname(d)
result.append(parent)
if parent == '/':
break
d = parent
return sorted(result)
@classmethod
def older_dirs(clazz, dirs, days = 0, seconds = 0, microseconds = 0,
milliseconds = 0, minutes = 0, hours = 0, weeks = 0):
delta = datetime.timedelta(days = days,
seconds = seconds,
microseconds = microseconds,
milliseconds = milliseconds,
minutes = minutes,
hours = hours,
weeks = weeks)
now = datetime.datetime.now()
ago = now - delta
result = []
for d in dirs:
mtime = datetime.datetime.fromtimestamp(os.stat(d).st_mtime)
if mtime <= ago:
result.append(d)
return result
@classmethod
def remove(clazz, d):
if path.isfile(d):
raise ValueError('Not a directory: "{}"'.format(d))
if not path.exists(d):
raise ValueError('Directory does not exits: "{}"'.format(d))
os.rmdir(d)
| 30.197183
| 90
| 0.588619
| 274
| 2,144
| 4.543796
| 0.357664
| 0.078715
| 0.012048
| 0.016867
| 0.035341
| 0.035341
| 0.035341
| 0
| 0
| 0
| 0
| 0.006671
| 0.30084
| 2,144
| 70
| 91
| 30.628571
| 0.823883
| 0.095149
| 0
| 0.186441
| 0
| 0
| 0.080331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118644
| false
| 0
| 0.067797
| 0.033898
| 0.305085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b5cf1180972e7b3ffbdfafed33eb97b3d3772b4
| 8,241
|
py
|
Python
|
deep_light/random_selection_threeInputs.py
|
maqorbani/neural-daylighting
|
753c86dfea32483a7afbf213a7b7684e070d3672
|
[
"Apache-2.0"
] | 4
|
2020-08-24T03:12:22.000Z
|
2020-08-27T17:13:56.000Z
|
deep_light/random_selection_threeInputs.py
|
maqorbani/neural-daylighting
|
753c86dfea32483a7afbf213a7b7684e070d3672
|
[
"Apache-2.0"
] | 4
|
2020-08-24T07:30:51.000Z
|
2021-02-20T10:18:47.000Z
|
deep_light/random_selection_threeInputs.py
|
maqorbani/neural-daylighting
|
753c86dfea32483a7afbf213a7b7684e070d3672
|
[
"Apache-2.0"
] | 3
|
2020-04-08T17:37:40.000Z
|
2020-08-24T07:32:52.000Z
|
#
#
# Copyright (c) 2020. Yue Liu
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# If you find this code useful please cite:
# Predicting Annual Equirectangular Panoramic Luminance Maps Using Deep Neural Networks,
# Yue Liu, Alex Colburn and and Mehlika Inanici. 16th IBPSA International Conference and Exhibition, Building Simulation 2019.
#
#
#
import os
import numpy as np
from matplotlib import pyplot as plt
from deep_light import time_to_sun_angles
import shutil
from deep_light.genData import get_data_path
####randomly select the test data from the dataset
#### TODO make a function with source and destination subdirectoies
def select_test_samples(data_root='./ALL_DATA_FP32',
LATITUDE=47,
LONGITUDE=122,
SM=120,
NUM_SAMPLES = 500):
import os
#read seattle weahter txt data
#return data format as al, az, dir, dif
def readTxt():
#read each line
#transfer the time to sun angles
#save the data
f = open("testseattle.txt", "r")
lines = f.readlines()
data=np.zeros([4709, 7])
i =0
for line in lines:
month = int(line.splitlines()[0].split(",")[0])
date = int(line.splitlines()[0].split(",")[1])
time = float(line.splitlines()[0].split(",")[2])
dir = int(line.splitlines()[0].split(",")[3])
dif = int(line.splitlines()[0].split(",")[4])
al, az = time_to_sun_angles.timeToAltitudeAzimuth(date, month, time, LATITUDE, LONGITUDE, SM)
if(dir > 10) or (dif > 10):
data[i]=np.array([dir, dif, al, az, month, date, time])
i = i + 1
print(data.shape)
return data
# These parameters are location related. Right now we use Seattle's parameters.
AB4_DIR = data_root + get_data_path('AB4')
AB0_DIR = data_root + get_data_path('AB0')
SKY_DIR = data_root + get_data_path('SKY')
data = readTxt()
idx = np.arange(data.shape[0])
np.random.shuffle(idx)
n_im = data.shape[0]
train, test = data[idx[:NUM_SAMPLES]], data[idx[NUM_SAMPLES:]]
cwd = os.getcwd()
test_all = './data/original_test_all'
if not os.path.exists('./data'):
os.makedirs("./data")
if not os.path.exists(test_all):
os.mkdir(test_all)
os.chdir(test_all)
if (os.path.exists("./result_combo_random")):
shutil.rmtree("./result_combo_random")
os.makedirs("./result_combo_random")
fig = plt.figure(figsize=(10, 10), dpi=150)
plt.scatter(train[:, 0], train[:, 1], s=10, color='r', label="train")
plt.scatter(test[:, 0], test[:, 1], s=1, color='g', label="test")
plt.title("Sky Direct and Diffuse Irradiances Distribution", size=16)
plt.xlabel('Direct')
plt.ylabel('Diffuse')
plt.legend(fancybox=True)
plt.savefig('./result_combo_random/sky.png')
plt.close()
fig = plt.figure(figsize=(10, 10), dpi=150)
plt.scatter(train[:, 2],train[:, 3], s=10, color='r', label="train")
plt.scatter(test[:, 2], test[:, 3], s=1, color='g', label="test")
plt.title("Sun Altitude and Azimuth Distribution", size=16)
plt.xlabel('Altitude')
plt.ylabel('Azimuth')
plt.legend(fancybox=True)
plt.savefig('./result_combo_random/sun.png')
plt.close()
if (os.path.exists("./test_ab0")):
shutil.rmtree("./test_ab0")
if (os.path.exists("./test_ab4")):
shutil.rmtree("./test_ab4")
if (os.path.exists("./test_sky_ab4")):
shutil.rmtree("./test_sky_ab4")
os.makedirs("./test_ab0")
os.makedirs("./test_ab4")
os.makedirs("./test_sky_ab4")
os.chdir(cwd)
#put the data into two folders train and test
from shutil import copyfile
import os.path
i = 0
bad_samples = 0
for i in range(NUM_SAMPLES):
file_name = "pano_" + str(int(train[i][4])) + "_" + str(int(train[i][5])) + "_" + str(train[i][6]) + "_" + str(int(train[i][0])) \
+ "_" + str(int(train[i][1]))
src_ab4 = AB4_DIR + file_name + ".npy"
src_ab0 = AB0_DIR + file_name + "_ab0.npy"
src_sky = SKY_DIR + file_name + ".npy"
dst_ab0 = test_all + "/test_ab0/"+ file_name + "_ab0.npy"
dst_ab4 = test_all + "/test_ab4/" + file_name + ".npy"
dst_sky = test_all + "/test_sky_ab4/" + file_name + ".npy"
if (os.path.isfile(src_ab4)) and (os.path.isfile(src_ab0)) and (os.path.isfile(src_sky)):
copyfile(src_ab4, dst_ab4)
copyfile(src_ab0, dst_ab0)
copyfile(src_sky, dst_sky)
else:
bad_samples = bad_samples + 1
print('unable to locate:')
if not os.path.isfile(src_ab4) : print(src_ab4)
if not os.path.isfile(src_ab0) : print(src_ab0)
if not os.path.isfile(src_sky) : print(src_sky)
i = i + 1
print('Maps not found = ', bad_samples)
def sample_consistency(data_root='./ALL_DATA_FP32',
LATITUDE=47,
LONGITUDE=122,
SM=120):
import os
#read seattle weahter txt data
#return data format as al, az, dir, dif
def readTxt():
#read each line
#transfer the time to sun angles
#save the data
f = open("testseattle.txt", "r")
lines = f.readlines()
data=np.zeros([4709, 7])
i =0
for line in lines:
month = int(line.splitlines()[0].split(",")[0])
date = int(line.splitlines()[0].split(",")[1])
time = float(line.splitlines()[0].split(",")[2])
dir = int(line.splitlines()[0].split(",")[3])
dif = int(line.splitlines()[0].split(",")[4])
al, az = time_to_sun_angles.timeToAltitudeAzimuth(date, month, time, LATITUDE, LONGITUDE, SM)
if(dir > 10) or (dif > 10):
data[i]=np.array([dir, dif, al, az, month, date, time])
i = i + 1
print(data.shape)
return data
# These parameters are location related. Right now we use Seattle's parameters.
AB4_DIR = data_root + get_data_path('AB4')
AB0_DIR = data_root + get_data_path('AB0')
SKY_DIR = data_root + get_data_path('SKY')
data = readTxt()
idx = np.arange(data.shape[0])
n_im = data.shape[0]
train, test = data[idx], data[idx]
test_all = './data/original_test_all'
import os.path
i = 0
bad_samples = 0
good_samples = 0
for i in range(data.shape[0]):
file_name = "pano_" + str(int(train[i][4])) + "_" + str(int(train[i][5])) + "_" + str(train[i][6]) + "_" + str(int(train[i][0])) \
+ "_" + str(int(train[i][1]))
src_ab4 = AB4_DIR + file_name + ".npy"
src_ab0 = AB0_DIR + file_name + "_ab0.npy"
src_sky = SKY_DIR + file_name + ".npy"
dst_ab0 = test_all + "/test_ab0/"+ file_name + "_ab0.npy"
dst_ab4 = test_all + "/test_ab4/" + file_name + ".npy"
dst_sky = test_all + "/test_sky_ab4/" + file_name + ".npy"
if (os.path.isfile(src_ab4)) and (os.path.isfile(src_ab0)) and (os.path.isfile(src_sky)):
good_samples = good_samples + 1
else:
bad_samples = bad_samples + 1
print('unable to locate:')
if not os.path.isfile(src_ab4) : print(src_ab4)
if not os.path.isfile(src_ab0) : print(src_ab0)
if not os.path.isfile(src_sky) : print(src_sky)
i = i + 1
print('Maps not found = ', bad_samples)
#finish the rest part
| 31.818533
| 138
| 0.580876
| 1,149
| 8,241
| 4.011314
| 0.219321
| 0.026036
| 0.031243
| 0.039054
| 0.651334
| 0.620525
| 0.602734
| 0.602734
| 0.581037
| 0.533304
| 0
| 0.032074
| 0.277394
| 8,241
| 258
| 139
| 31.94186
| 0.741898
| 0.16964
| 0
| 0.660131
| 0
| 0
| 0.106665
| 0.024864
| 0
| 0
| 0
| 0.003876
| 0
| 1
| 0.026144
| false
| 0
| 0.071895
| 0
| 0.111111
| 0.078431
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b5e2470f1d47ae2237ac46314f2765d06fcd634
| 1,993
|
py
|
Python
|
graphs/ops.py
|
andreipoe/sve-analysis-tools
|
696d9a82af379564b05ce0207a6f872211a819eb
|
[
"MIT"
] | 2
|
2020-12-23T02:22:20.000Z
|
2020-12-31T17:30:56.000Z
|
graphs/ops.py
|
andreipoe/sve-analysis-tools
|
696d9a82af379564b05ce0207a6f872211a819eb
|
[
"MIT"
] | null | null | null |
graphs/ops.py
|
andreipoe/sve-analysis-tools
|
696d9a82af379564b05ce0207a6f872211a819eb
|
[
"MIT"
] | 3
|
2020-06-03T17:05:45.000Z
|
2021-12-26T13:45:49.000Z
|
#!/usr/bin/env python3
import argparse
import sys
from concurrent.futures import ThreadPoolExecutor
import pandas as pd
import altair as alt
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--application', help='Plot only the given application')
parser.add_argument('data', help='The data to plot, in CSV or DataFrame pickle format')
return parser.parse_args()
# Plots application `appname`
def plot(results, appname):
appdata = results[results.application == appname]
if len(appdata) == 0:
print(f'No data to plot for {appname}.')
return
if appdata[appdata.svewidth == 0].groupby('version').sum()['count'].max() >= 1e9:
scale = 'billion'
appdata.loc[:, 'count'] /= 1e9
else:
scale = 'million'
appdata.loc[:, 'count'] /= 1e6
fname = f'opcount-{appname}-all-clustered-stacked-group.png'
alt.Chart(appdata).mark_bar().encode(x=alt.X('version', title='', axis=alt.Axis(labelAngle=-30)),
y=alt.Y('sum(count)', title=f'Dynamic execution count ({scale} instructions)'),
column='svewidth',
color=alt.Color('optype', title='Op Group', scale=alt.Scale(scheme='set2')))\
.configure(background='white')\
.configure_title(anchor='middle', fontSize=14)\
.properties(title=appname)\
.save(fname, scale_factor='2.0')
print(f'Saved plot for {appname} in {fname}.')
def main():
args = parse_args()
if args.data.endswith('csv'):
df = pd.read_csv(args.data)
else:
df = pd.read_pickle(args.data)
df['svewidth'] = pd.to_numeric(df.svewidth)
applications = [args.application] if args.application else pd.unique(df['application'])
with ThreadPoolExecutor() as executor:
for a in applications:
executor.submit(plot, df, a)
if __name__ == '__main__':
main()
| 30.661538
| 114
| 0.610637
| 241
| 1,993
| 4.970954
| 0.46473
| 0.022538
| 0.028381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01061
| 0.243352
| 1,993
| 64
| 115
| 31.140625
| 0.78382
| 0.024586
| 0
| 0.045455
| 0
| 0
| 0.198249
| 0.025232
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.113636
| 0
| 0.227273
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b6172efe890112ba2bd4d2808ee33bca9779adb
| 1,646
|
py
|
Python
|
gen3_etl/utils/defaults.py
|
ohsu-comp-bio/gen3-etl
|
9114f75cc8c8085111152ce0ef686a8a12f67f8e
|
[
"MIT"
] | 1
|
2020-01-22T17:05:58.000Z
|
2020-01-22T17:05:58.000Z
|
gen3_etl/utils/defaults.py
|
ohsu-comp-bio/gen3-etl
|
9114f75cc8c8085111152ce0ef686a8a12f67f8e
|
[
"MIT"
] | 2
|
2019-02-08T23:24:58.000Z
|
2021-05-13T22:42:28.000Z
|
gen3_etl/utils/defaults.py
|
ohsu-comp-bio/gen3_etl
|
9114f75cc8c8085111152ce0ef686a8a12f67f8e
|
[
"MIT"
] | null | null | null |
from gen3_etl.utils.cli import default_argument_parser
from gen3_etl.utils.ioutils import JSONEmitter
import os
import re
DEFAULT_OUTPUT_DIR = 'output/default'
DEFAULT_EXPERIMENT_CODE = 'default'
DEFAULT_PROJECT_ID = 'default-default'
def emitter(type=None, output_dir=DEFAULT_OUTPUT_DIR, **kwargs):
"""Creates a default emitter for type."""
return JSONEmitter(os.path.join(output_dir, '{}.json'.format(type)), compresslevel=0, **kwargs)
def default_parser(output_dir, experiment_code, project_id):
parser = default_argument_parser(
output_dir=output_dir,
description='Reads bcc json and writes gen3 json ({}).'.format(output_dir)
)
parser.add_argument('--experiment_code', type=str,
default=experiment_code,
help='Name of gen3 experiment ({}).'.format(experiment_code))
parser.add_argument('--project_id', type=str,
default=project_id,
help='Name of gen3 program-project ({}).'.format(project_id))
parser.add_argument('--schema', type=bool,
default=True,
help='generate schemas (true).'.format(experiment_code))
return parser
def path_to_type(path):
"""Get the type (snakecase) of a vertex file"""
return snake_case(os.path.basename(path).split('.')[0])
def snake_case(name):
"""Converts name to snake_case."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def camel_case(snake_str):
others = snake_str.split('_')
return ''.join([*map(str.title, others)])
| 35.782609
| 99
| 0.643378
| 215
| 1,646
| 4.72093
| 0.344186
| 0.070936
| 0.050246
| 0.031527
| 0.009852
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011503
| 0.207776
| 1,646
| 45
| 100
| 36.577778
| 0.766871
| 0.064399
| 0
| 0
| 0
| 0
| 0.166011
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15625
| false
| 0
| 0.125
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b6408ae7e94f31c2cccd6d4bff3b3ad42baca0f
| 6,266
|
py
|
Python
|
csrank/dataset_reader/discretechoice/tag_genome_discrete_choice_dataset_reader.py
|
kiudee/cs-ranking
|
47cf648fa286c37b9214bbad1926004d4d7d9796
|
[
"Apache-2.0"
] | 65
|
2018-02-12T13:18:13.000Z
|
2021-12-18T12:01:51.000Z
|
csrank/dataset_reader/discretechoice/tag_genome_discrete_choice_dataset_reader.py
|
kiudee/cs-ranking
|
47cf648fa286c37b9214bbad1926004d4d7d9796
|
[
"Apache-2.0"
] | 189
|
2018-02-13T10:11:55.000Z
|
2022-03-12T16:36:23.000Z
|
csrank/dataset_reader/discretechoice/tag_genome_discrete_choice_dataset_reader.py
|
kiudee/cs-ranking
|
47cf648fa286c37b9214bbad1926004d4d7d9796
|
[
"Apache-2.0"
] | 19
|
2018-03-08T15:39:31.000Z
|
2020-11-18T12:46:36.000Z
|
import logging
import numpy as np
from sklearn.utils import check_random_state
from csrank.constants import DISCRETE_CHOICE
from csrank.dataset_reader.tag_genome_reader import critique_dist
from csrank.dataset_reader.util import get_key_for_indices
from ..tag_genome_reader import TagGenomeDatasetReader
from ...util import convert_to_label_encoding
logger = logging.getLogger(__name__)
class TagGenomeDiscreteChoiceDatasetReader(TagGenomeDatasetReader):
def __init__(self, dataset_type="similarity", **kwargs):
super(TagGenomeDiscreteChoiceDatasetReader, self).__init__(
learning_problem=DISCRETE_CHOICE, **kwargs
)
dataset_func_dict = {
"nearest_neighbour": self.make_nearest_neighbour_dataset,
"critique_fit_less": self.make_critique_fit_dataset(direction=-1),
"critique_fit_more": self.make_critique_fit_dataset(direction=1),
"dissimilar_nearest_neighbour": self.make_dissimilar_nearest_neighbour_dataset,
"dissimilar_critique_more": self.make_dissimilar_critique_dataset(
direction=1
),
"dissimilar_critique_less": self.make_dissimilar_critique_dataset(
direction=-1
),
}
if dataset_type not in dataset_func_dict:
raise ValueError(
f"dataset_type must be one of {set(dataset_func_dict.keys())}"
)
logger.info("Dataset type: {}".format(dataset_type))
self.dataset_function = dataset_func_dict[dataset_type]
def make_nearest_neighbour_dataset(self, n_instances, n_objects, seed, **kwargs):
X, scores = super(
TagGenomeDiscreteChoiceDatasetReader, self
).make_nearest_neighbour_dataset(
n_instances=n_instances, n_objects=n_objects, seed=seed
)
# Higher the similarity lower the rank of the object, getting the object with second highest similarity
Y = np.argsort(scores, axis=1)[:, -2]
Y = convert_to_label_encoding(Y, n_objects)
return X, Y
def make_critique_fit_dataset(self, direction):
def dataset_generator(n_instances, n_objects, seed, **kwargs):
X, scores = super(
TagGenomeDiscreteChoiceDatasetReader, self
).make_critique_fit_dataset(
n_instances=n_instances,
n_objects=n_objects,
seed=seed,
direction=direction,
)
Y = scores.argmax(axis=1)
Y = convert_to_label_encoding(Y, n_objects)
return X, Y
return dataset_generator
def make_dissimilar_nearest_neighbour_dataset(
self, n_instances, n_objects, seed, **kwargs
):
logger.info(
"For instances {} objects {}, seed {}".format(n_instances, n_objects, seed)
)
X, scores = super(
TagGenomeDiscreteChoiceDatasetReader, self
).make_nearest_neighbour_dataset(
n_instances=n_instances, n_objects=n_objects, seed=seed
)
# Higher the similarity lower the rank of the object, getting the object with second highest similarity
Y = np.argsort(scores, axis=1)[:, 0]
Y = convert_to_label_encoding(Y, n_objects)
return X, Y
def make_dissimilar_critique_dataset(self, direction):
def dataset_generator(n_instances, n_objects, seed, **kwargs):
logger.info(
"For instances {} objects {}, seed {}, direction {}".format(
n_instances, n_objects, seed, direction
)
)
random_state = check_random_state(seed)
X = []
scores = []
length = int(n_instances / self.n_movies) + 1
popular_tags = self.get_genre_tag_id()
for i, feature in enumerate(self.movie_features):
if direction == 1:
quartile_tags = np.where(
np.logical_and(feature >= 1 / 3, feature < 2 / 3)
)[0]
else:
quartile_tags = np.where(feature > 1 / 2)[0]
if len(quartile_tags) < length:
quartile_tags = popular_tags
tag_ids = random_state.choice(quartile_tags, size=length)
distances = [
self.similarity_matrix[get_key_for_indices(i, j)]
for j in range(self.n_movies)
]
critique_d = critique_dist(
feature,
self.movie_features,
tag_ids,
direction=direction,
relu=False,
)
critique_fit = np.multiply(critique_d, distances)
orderings = np.argsort(critique_fit, axis=-1)[:, ::-1]
minimum = np.zeros(length, dtype=int)
for k, dist in enumerate(critique_fit):
quartile = np.percentile(dist, [0, 5])
last = np.where(
np.logical_and((dist >= quartile[0]), (dist <= quartile[1]))
)[0]
if i in last:
index = np.where(last == i)[0][0]
last = np.delete(last, index)
minimum[k] = random_state.choice(last, size=1)[0]
orderings = orderings[:, 0 : n_objects - 2]
orderings = np.append(orderings, minimum[:, None], axis=1)
orderings = np.append(
orderings, np.zeros(length, dtype=int)[:, None] + i, axis=1
)
for o in orderings:
random_state.shuffle(o)
scores.extend(critique_fit[np.arange(length)[:, None], orderings])
X.extend(self.movie_features[orderings])
X = np.array(X)
scores = np.array(scores)
indices = random_state.choice(X.shape[0], n_instances, replace=False)
X = X[indices, :, :]
scores = scores[indices, :]
Y = scores.argmin(axis=1)
Y = convert_to_label_encoding(Y, n_objects)
return X, Y
return dataset_generator
| 42.62585
| 111
| 0.57804
| 672
| 6,266
| 5.122024
| 0.212798
| 0.039512
| 0.03835
| 0.047066
| 0.432888
| 0.382626
| 0.366357
| 0.320453
| 0.320453
| 0.320453
| 0
| 0.008635
| 0.334663
| 6,266
| 146
| 112
| 42.917808
| 0.816982
| 0.032397
| 0
| 0.227273
| 0
| 0
| 0.049175
| 0.017657
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05303
| false
| 0
| 0.060606
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b65bb70b1e3c95b7c5e5cfddb6056ef4399ec89
| 2,968
|
py
|
Python
|
crnpy/utils.py
|
mehradans92/crnpy
|
e145d63b5cf97eb3c91276000cc8fef92c35cde9
|
[
"BSD-3-Clause"
] | null | null | null |
crnpy/utils.py
|
mehradans92/crnpy
|
e145d63b5cf97eb3c91276000cc8fef92c35cde9
|
[
"BSD-3-Clause"
] | null | null | null |
crnpy/utils.py
|
mehradans92/crnpy
|
e145d63b5cf97eb3c91276000cc8fef92c35cde9
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
def weighted_quantile(values, quantiles, sample_weight=None,
values_sorted=False, old_style=False):
''' Very close to numpy.percentile, but supports weights.
Note: quantiles should be in [0, 1]!
:param values: numpy.array with data
:param quantiles: array-like with many quantiles needed
:param sample_weight: array-like of the same length as `array`
:param values_sorted: bool, if True, then will avoid sorting of
initial array
:param old_style: if True, will correct output to be consistent
with numpy.percentile.
:return: numpy.array with computed quantiles.
'''
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), \
'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
def plot_samples(trajs, t, ref_traj=None, lower_q_bound=1/3, upper_q_bound=2/3, alpha=0.2, restraints=None, weights=None, crn=None, sim_incr=0.001):
if weights is None:
w = np.ones(trajs.shape[0])
else:
w = weights
w /= np.sum(w)
x = range(trajs.shape[1])
qtrajs = np.apply_along_axis(lambda x: weighted_quantile(
x, [lower_q_bound, 1/2, upper_q_bound], sample_weight=w), 0, trajs)
mtrajs = np.sum(trajs * w[:, np.newaxis, np.newaxis], axis=0)
qtrajs[0, :, :] = qtrajs[0, :, :] - qtrajs[1, :, :] + mtrajs
qtrajs[2, :, :] = qtrajs[2, :, :] - qtrajs[1, :, :] + mtrajs
qtrajs[1, :, :] = mtrajs
fig, ax = plt.subplots(dpi=100)
for i in range(trajs.shape[-1]):
ax.plot(t, qtrajs[1, :, i], color=f'C{i}', label=crn.species[i])
ax.fill_between(t, qtrajs[0, :, i],
qtrajs[2, :, i], color=f'C{i}', alpha=alpha)
if ref_traj is not None:
ax.plot(t, ref_traj[:, i], '--',
color=f'C{i}', label=crn.species[i])
ax.set_xlabel('Time (s)')
ax.set_ylabel('Species concentration')
handles, labels = ax.get_legend_handles_labels()
unique = [(h, l) for i, (h, l) in enumerate(
zip(handles, labels)) if l not in labels[:i]]
ax.legend(*zip(*unique), loc='upper left', bbox_to_anchor=(1.1, 0.9))
if restraints is not None:
for r in restraints:
ax.plot(r[2]*sim_incr, r[0], marker='o', color='k')
| 41.802817
| 148
| 0.626685
| 433
| 2,968
| 4.177829
| 0.318707
| 0.079602
| 0.043118
| 0.013267
| 0.058043
| 0.053068
| 0.029851
| 0.029851
| 0.029851
| 0.029851
| 0
| 0.020318
| 0.237197
| 2,968
| 70
| 149
| 42.4
| 0.77871
| 0.171496
| 0
| 0.038462
| 0
| 0
| 0.03484
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 1
| 0.038462
| false
| 0
| 0.038462
| 0
| 0.096154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b690221ede88220da58423f3771708cee9c615d
| 4,090
|
py
|
Python
|
lib_collection/queue/resizing_array_queue.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
lib_collection/queue/resizing_array_queue.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
lib_collection/queue/resizing_array_queue.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
class ResizingArrayQueue(object):
def __init__(self, lst=None, capacity=2):
self.capacity = capacity
self.lst = [None] * self.capacity
self.head = 0
self.tail = 0
self.n = 0
def __len__(self):
"""
>>> queue = ResizingArrayQueue()
>>> len(queue)
0
>>> queue.enqueue('a')
>>> len(queue)
1
>>> queue.enqueue('b')
>>> len(queue)
2
"""
return self.n
def __contains__(self, i):
"""
>>> queue = ResizingArrayQueue()
>>> 'a' in queue
False
>>> queue.enqueue('a')
>>> queue.enqueue('b')
>>> 'a' in queue
True
"""
for j in self:
if j == i:
return True
return False
def __iter__(self):
"""
>>> queue = ResizingArrayQueue()
>>> queue.enqueue('a')
>>> queue.enqueue('b')
>>> for i in queue:
... print i
...
a
b
"""
n = self.head
for _ in range(len(self)):
if n == self.capacity:
n = 0
yield self.lst[n]
n += 1
def __repr__(self):
"""
>>> queue = ResizingArrayQueue()
>>> queue.enqueue('a')
>>> queue.enqueue('b')
>>> queue
ResizingArrayQueue(['a', 'b'])
>>> print queue
ResizingArrayQueue(['a', 'b'])
"""
return 'ResizingArrayQueue([{}])'.format(', '.join(repr(i) for i in self))
def enqueue(self, i):
"""
>>> queue = ResizingArrayQueue()
>>> queue.enqueue('a')
>>> queue.enqueue('b')
>>> queue.enqueue('c')
>>> queue
ResizingArrayQueue(['a', 'b', 'c'])
>>> queue.capacity
4
"""
if len(self) == self.capacity:
self._resize(self.capacity*2)
if self.tail == self.capacity:
self.tail = 0
self.lst[self.tail] = i
self.tail += 1
self.n += 1
def dequeue(self):
"""
>>> queue = ResizingArrayQueue()
>>> queue.dequeue()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
>>> queue.enqueue('a')
>>> queue.enqueue('b')
>>> queue.enqueue('c')
>>> queue.dequeue()
'a'
>>> queue.dequeue()
'b'
>>> queue.enqueue('d')
>>> queue.enqueue('e')
>>> queue.enqueue('f')
>>> queue.lst
['e', 'f', 'c', 'd']
>>> queue.enqueue('g')
>>> queue.capacity
8
>>> queue.dequeue()
'c'
>>> queue.dequeue()
'd'
>>> queue.dequeue()
'e'
>>> queue.dequeue()
'f'
>>> queue.capacity
4
>>> queue.dequeue()
'g'
>>> queue.capacity
2
"""
if len(self) == 0:
raise IndexError('dequeue from empty queue')
if len(self) * 4 <= self.capacity:
self._resize(self.capacity/2)
if self.head == self.capacity:
self.head = 0
res = self.lst[self.head]
self.head += 1
self.n -= 1
return res
@property
def top(self):
"""
>>> queue = ResizingArrayQueue()
>>> queue.top
Traceback (most recent call last):
...
IndexError: top from empty queue
>>> queue.enqueue('a')
>>> queue.top
'a'
>>> queue.enqueue('b')
>>> queue.top
'a'
>>> queue.dequeue()
'a'
>>> queue.top
'b'
"""
if len(self) == 0:
raise IndexError('top from empty queue')
return self.lst[self.head]
def _resize(self, n):
q = ResizingArrayQueue(capacity=n)
for e in self:
q.enqueue(e)
self.capacity = q.capacity
self.lst = q.lst
self.head = q.head
self.tail = q.tail
self.n = q.n
| 23.505747
| 82
| 0.429829
| 404
| 4,090
| 4.292079
| 0.143564
| 0.138408
| 0.05248
| 0.062284
| 0.351211
| 0.288351
| 0.201845
| 0.175317
| 0.175317
| 0.05075
| 0
| 0.00995
| 0.410269
| 4,090
| 173
| 83
| 23.641619
| 0.708955
| 0.371394
| 0
| 0.107143
| 0
| 0
| 0.03975
| 0.013629
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160714
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b6a1e3587e95d7c0cf5eb9e41ba34ccfca2c19e
| 437
|
py
|
Python
|
sort/counting.py
|
haandol/dojo
|
c29dc54614bdfaf79eb4862ed9fa25974a0f5654
|
[
"MIT"
] | null | null | null |
sort/counting.py
|
haandol/dojo
|
c29dc54614bdfaf79eb4862ed9fa25974a0f5654
|
[
"MIT"
] | null | null | null |
sort/counting.py
|
haandol/dojo
|
c29dc54614bdfaf79eb4862ed9fa25974a0f5654
|
[
"MIT"
] | null | null | null |
# https://www.geeksforgeeks.org/counting-sort/
def sort(arr):
n = len(arr)
result = [-1] * n
counts = [0] * (max(arr) + 1)
for el in arr:
counts[el] += 1
for i in range(1, len(counts)):
counts[i] += counts[i-1]
for i in range(n):
result[counts[arr[i]] - 1] = arr[i]
counts[arr[i]] -= 1
return result
if __name__ == '__main__':
arr = [10, 7, 8, 9, 1, 5]
assert [1, 5, 7, 8, 9, 10] == sort(arr)
| 18.208333
| 46
| 0.535469
| 76
| 437
| 2.973684
| 0.394737
| 0.053097
| 0.044248
| 0.061947
| 0.106195
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067901
| 0.258581
| 437
| 23
| 47
| 19
| 0.62963
| 0.100687
| 0
| 0
| 0
| 0
| 0.02046
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b6a67db77e30e9f797bb8f8a046460eef6c1f54
| 1,316
|
py
|
Python
|
uedinst/daq.py
|
trbritt/uedinst
|
e9fe1379b762be97b31ffab86a2cb149cb6291da
|
[
"BSD-3-Clause"
] | null | null | null |
uedinst/daq.py
|
trbritt/uedinst
|
e9fe1379b762be97b31ffab86a2cb149cb6291da
|
[
"BSD-3-Clause"
] | null | null | null |
uedinst/daq.py
|
trbritt/uedinst
|
e9fe1379b762be97b31ffab86a2cb149cb6291da
|
[
"BSD-3-Clause"
] | null | null | null |
import nidaqmx
from . import InstrumentException
from time import sleep
class PCI6281:
"""
Interface to NI-Data Aquisition PCI-6281.
"""
def __init__(self, *args, **kwargs):
pass
def set_voltage(self, value, timeout=None):
"""
Set voltage on the output channel.
Parameters
----------
value : float
Voltage value [V]
timeout : float or None, optional
Voltage time-out [s]. If None (default), voltage is assigned indefinitely.
Raises
------
InstrumentException : if voltage `value` is outside of += 10V.
"""
value = float(value)
if abs(value) > 10:
raise InstrumentException(
f"Voltage {value} is outside of permissible bounds of +=10V"
)
if timeout is not None:
if timeout <= 0:
raise InstrumentException(
f"A time-out value of {timeout} seconds is not valid."
)
with nidaqmx.Task() as task:
task.ao_channels.add_ao_voltage_chan("Dev1/ao1")
task.write(value)
task.stop()
if timeout is not None:
sleep(timeout)
task.write(0)
task.stop()
| 26.32
| 86
| 0.522796
| 139
| 1,316
| 4.884892
| 0.489209
| 0.053019
| 0.041237
| 0.061856
| 0.120766
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022277
| 0.386018
| 1,316
| 49
| 87
| 26.857143
| 0.818069
| 0.24772
| 0
| 0.24
| 0
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0.04
| 0.12
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b6ab2fe5bfb6e6c729ecffe273017b734826941
| 1,135
|
py
|
Python
|
tests/operations_model_test.py
|
chlemagne/python-oop-calculator
|
0259ce0f7a72faab60b058588a6838fe107e88eb
|
[
"MIT"
] | null | null | null |
tests/operations_model_test.py
|
chlemagne/python-oop-calculator
|
0259ce0f7a72faab60b058588a6838fe107e88eb
|
[
"MIT"
] | null | null | null |
tests/operations_model_test.py
|
chlemagne/python-oop-calculator
|
0259ce0f7a72faab60b058588a6838fe107e88eb
|
[
"MIT"
] | null | null | null |
""" Unittest.
"""
import unittest
from calculator.standard.operations_model import (
UniOperation,
BiOperation,
Square,
SquareRoot,
Reciprocal,
Add,
Subtract,
Multiply,
Divide,
Modulo
)
class OperationsModelTest(unittest.TestCase):
""" Operations model test suite.
"""
def test_operation_category(self):
# steps
square = Square
multiply = Multiply
# test
self.assertTrue(issubclass(square, UniOperation))
self.assertTrue(issubclass(multiply, BiOperation))
def test_uni_operand_operations(self):
# steps
square = Square
square_root = SquareRoot
# test
self.assertEqual(square.eval(5), 25)
self.assertEqual(square_root.eval(25), 5)
def test_bi_operand_operations(self):
# steps
add = Add
sub = Subtract
mul = Multiply
div = Divide
# test
self.assertEqual(add.eval(1, 2), 3)
self.assertEqual(sub.eval(5, -2), 7)
self.assertEqual(mul.eval(1.5, -5), -7.5)
self.assertEqual(div.eval(6, 0.5), 12)
| 21.415094
| 58
| 0.600881
| 121
| 1,135
| 5.545455
| 0.38843
| 0.134128
| 0.044709
| 0.062593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0275
| 0.295154
| 1,135
| 52
| 59
| 21.826923
| 0.81125
| 0.067841
| 0
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242424
| 1
| 0.090909
| false
| 0
| 0.060606
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b6c01aa137fe5eab922023b4f7b039eaadf78f0
| 684
|
py
|
Python
|
LAB5/lab/main.py
|
ThinkingFrog/MathStat
|
cd3712f4f4a59badd7f2611de64681b0e928d3db
|
[
"MIT"
] | null | null | null |
LAB5/lab/main.py
|
ThinkingFrog/MathStat
|
cd3712f4f4a59badd7f2611de64681b0e928d3db
|
[
"MIT"
] | null | null | null |
LAB5/lab/main.py
|
ThinkingFrog/MathStat
|
cd3712f4f4a59badd7f2611de64681b0e928d3db
|
[
"MIT"
] | null | null | null |
from lab.distribution import DistrManager
def main():
sizes = [20, 60, 100]
rhos = [0, 0.5, 0.9]
times = 1000
manager = DistrManager(sizes, rhos, times)
for size in sizes:
for rho in rhos:
mean, sq_mean, disp = manager.get_coeff_stats("Normal", size, rho)
print(
f"Normal\t Size = {size}\t Rho = {rho}\t Mean = {mean}\t Squares mean = {sq_mean}\t Dispersion = {disp}"
)
mean, sq_mean, disp = manager.get_coeff_stats("Mixed", size, rho)
print(
f"Mixed\t Size = {size}\t Mean = {mean}\t Squares mean = {sq_mean}\t Dispersion = {disp}"
)
manager.draw(size)
| 28.5
| 120
| 0.557018
| 94
| 684
| 3.968085
| 0.382979
| 0.064343
| 0.107239
| 0.075067
| 0.407507
| 0.407507
| 0.407507
| 0.407507
| 0.225201
| 0.225201
| 0
| 0.033898
| 0.309942
| 684
| 23
| 121
| 29.73913
| 0.756356
| 0
| 0
| 0.117647
| 0
| 0.117647
| 0.289474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.117647
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b6c677fc5296afba3c3ed059b4dbdc0e009c7cf
| 3,010
|
py
|
Python
|
haplotype_plot/tests/test_plot.py
|
neobernad/haplotype_plot
|
45d9e916f474242648baa8d8b2afe9d502302485
|
[
"MIT"
] | 2
|
2021-01-09T10:43:25.000Z
|
2021-02-16T17:21:08.000Z
|
haplotype_plot/tests/test_plot.py
|
neobernad/haplotype_plot
|
45d9e916f474242648baa8d8b2afe9d502302485
|
[
"MIT"
] | 3
|
2021-02-01T11:28:17.000Z
|
2021-03-29T22:12:48.000Z
|
haplotype_plot/tests/test_plot.py
|
neobernad/haplotype_plot
|
45d9e916f474242648baa8d8b2afe9d502302485
|
[
"MIT"
] | null | null | null |
import unittest
import logging
import os
import haplotype_plot.genotyper as genotyper
import haplotype_plot.reader as reader
import haplotype_plot.haplotyper as haplotyper
import haplotype_plot.plot as hplot
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
dir_path = os.path.dirname(os.path.realpath(__file__))
class TestPlotting(unittest.TestCase):
vcf_path = os.path.join(dir_path, "data/chr01.vcf")
chrom = "chr01"
parental_sample = "SAMPLE4"
sample_list = None
phase = False
def setUp(self) -> None:
self.sample_list = reader.get_samples(self.vcf_path)
def test_plot_config(self):
plot_config = hplot.PlotConfig()
logger.debug(plot_config)
def test_generate_heterozygous_yticks(self):
heterozygous = haplotyper.Zygosity.HET
haplotype_wrapper = genotyper.process(self.vcf_path, self.chrom,
self.parental_sample, self.phase, heterozygous)
plotter = hplot.Plotter(haplotype_wrapper)
labels = plotter.get_ytickslabels()
logger.debug("Parent: {parent}".format(parent=self.parental_sample))
logger.debug(labels)
def test_generate_homozygous_yticks(self):
homozygous = haplotyper.Zygosity.HOM
haplotype_wrapper = genotyper.process(self.vcf_path, self.chrom,
self.parental_sample, self.phase, homozygous)
plotter = hplot.Plotter(haplotype_wrapper)
labels = plotter.get_ytickslabels()
logger.debug("Parent: {parent}".format(parent=self.parental_sample))
logger.debug(labels)
def test_plot_homozygous_haplotypes(self):
homozygous = haplotyper.Zygosity.HOM
haplotype_wrapper = genotyper.process(self.vcf_path, self.chrom,
self.parental_sample, self.phase, homozygous)
plotter = hplot.Plotter(haplotype_wrapper)
ytickslabels = plotter.get_ytickslabels()
custom_config = hplot.PlotConfig(
title="Parental '{parent}' in '{chrom}'".format(parent=self.parental_sample, chrom=self.chrom),
xtickslabels=plotter.get_xtickslabels(),
ytickslabels=ytickslabels,
start=0,
end=1000,
size_x=10,
size_y=len(ytickslabels) * .2,
show=True
)
plotter.plot_haplotypes(custom_config)
def test_plot_heterozygous_haplotypes(self):
heterozygous = haplotyper.Zygosity.HET
haplotype_wrapper = genotyper.process(self.vcf_path, self.chrom,
self.parental_sample, self.phase, heterozygous)
plotter = hplot.Plotter(haplotype_wrapper)
user_conf = list(["show=False", "xtickslabels=False", "size_y=5"])
plotter.plot_haplotypes(override_conf=user_conf)
if __name__ == '__main__':
unittest.main()
| 38.589744
| 108
| 0.644186
| 317
| 3,010
| 5.873817
| 0.255521
| 0.06015
| 0.067669
| 0.068743
| 0.474758
| 0.458647
| 0.458647
| 0.458647
| 0.458647
| 0.458647
| 0
| 0.006335
| 0.265781
| 3,010
| 77
| 109
| 39.090909
| 0.836199
| 0
| 0
| 0.349206
| 0
| 0
| 0.045687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.111111
| 0
| 0.301587
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b6e114171988bb11bb357d60e9671587a0a54e0
| 1,806
|
py
|
Python
|
src/docknet/data_generator/chessboard_data_generator.py
|
Accenture/Docknet
|
e81eb0c5aefd080ebeebf369d41f8d3fa85ab917
|
[
"Apache-2.0"
] | 2
|
2020-06-29T08:58:26.000Z
|
2022-03-08T11:38:18.000Z
|
src/docknet/data_generator/chessboard_data_generator.py
|
jeekim/Docknet
|
eb3cad13701471a7aaeea1d573bc5608855bab52
|
[
"Apache-2.0"
] | 1
|
2022-03-07T17:58:59.000Z
|
2022-03-07T17:58:59.000Z
|
src/docknet/data_generator/chessboard_data_generator.py
|
jeekim/Docknet
|
eb3cad13701471a7aaeea1d573bc5608855bab52
|
[
"Apache-2.0"
] | 3
|
2020-06-29T08:58:31.000Z
|
2020-11-22T11:23:11.000Z
|
from typing import Tuple
import numpy as np
from docknet.data_generator.data_generator import DataGenerator
class ChessboardDataGenerator(DataGenerator):
"""
The chessboard data generator generates two classes (0 and 1) of 2D vectors distributed as follows:
0011
0011
1100
1100
"""
def func0(self, x: np.array):
"""
Generator function of 2D vectors of class 0 (top-left and bottom-right squares)
:param x: a 2D random generated vector
:return: the corresponding individual of class 0
"""
f0 = x[0] * self.x_half_scale + self.x_min
f1 = x[1] * self.y_scale + self.y_min
if x[1] < 0.5:
f0 += self.x_half_scale
return np.array([f0, f1])
def func1(self, x: np.array):
"""
Generator function of 2D vectors of class 1 (top-right and bottom-left squares)
:param x: a 2D random generated vector
:return: the corresponding individual of class 1
"""
f0 = x[0] * self.x_scale + self.x_min
f1 = x[1] * self.y_half_scale + self.y_min
if x[0] >= 0.5:
f1 += self.y_half_scale
return np.array([f0, f1])
def __init__(self, x0_range: Tuple[float, float], x1_range: Tuple[float, float]):
"""
Initializes the chessboard data generator
:param x0_range: tuple of minimum and maximum x values
:param x1_range: tuple of minimum and maximum y values
"""
super().__init__((self.func0, self.func1))
self.x_scale = x0_range[1] - x0_range[0]
self.x_min = x0_range[0]
self.x_half_scale = self.x_scale / 2
self.y_scale = x1_range[1] - x1_range[0]
self.y_min = x1_range[0]
self.y_half_scale = self.y_scale / 2
| 32.836364
| 103
| 0.606312
| 265
| 1,806
| 3.966038
| 0.25283
| 0.052331
| 0.022835
| 0.039962
| 0.517602
| 0.467174
| 0.359657
| 0.331113
| 0.275928
| 0.234063
| 0
| 0.054374
| 0.297342
| 1,806
| 54
| 104
| 33.444444
| 0.773838
| 0.345515
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b7181c5da71f675df29626211d629f1f9f4e5ef
| 5,485
|
py
|
Python
|
stereoVO/geometry/features.py
|
sakshamjindal/Visual-Odometry-Pipeline-in-Python
|
d4a8a8ee16f91a145b90c41744a85e8dd1c1d249
|
[
"Apache-2.0"
] | 10
|
2021-11-01T23:56:30.000Z
|
2022-03-07T08:08:25.000Z
|
stereoVO/geometry/features.py
|
sakshamjindal/StereoVO-SFM
|
d4a8a8ee16f91a145b90c41744a85e8dd1c1d249
|
[
"Apache-2.0"
] | null | null | null |
stereoVO/geometry/features.py
|
sakshamjindal/StereoVO-SFM
|
d4a8a8ee16f91a145b90c41744a85e8dd1c1d249
|
[
"Apache-2.0"
] | 1
|
2021-12-02T03:15:00.000Z
|
2021-12-02T03:15:00.000Z
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
__all__ = ['DetectionEngine']
class DetectionEngine():
"""
Main Engine Code for detection of feature in the frames and
matching of features in the frames at the current stereo state
"""
def __init__(self, left_frame, right_frame, params):
"""
:param left_frame (np.array): of size (HxWx3 or HxW) of stereo configuration
:param right_frame (np.array): of size (HxWx3 or HxW) of stereo configuation
:param params (AttriDict): contains parameters for the stereo configuration,
detection of and matching of features computer vision features
"""
self.left_frame = left_frame
self.right_frame = right_frame
self.params = params
def get_matching_keypoints(self):
"""
Runs a feature detector on both the features, computes keypoints and descriptor
information and performs flann based matching to match keypoints across the left
and right frames and applying ratio test to filter "good" matching features
Returns:
matchedPoints (tuple of np.array for (left, right)): each of size (N,2) of matched features
keypoints (tuple of lists for (left, right)) : each list containing metadata of kepoints from feature detector
descriptors (tuple of np.array for (left, right)) : each of size (MXd) list containing feature vector of keypoints from feature detector
"""
if self.params.geometry.detection.method == "SIFT":
detector = cv2.xfeatures2d.SIFT_create()
else:
raise NotImplementedError("Feature Detector has not been implemented. Please refer to the Contributing guide and raise a PR")
if len(self.left_frame.shape) == 3:
self.left_frame = cv2.cvtColor(self.left_frame.left)
if len(self.right_frame.shape) == 3:
self.right_frame = cv2.cvtColor(self.right_frame)
keyPointsLeft, descriptorsLeft = detector.detectAndCompute(self.left_frame, None)
keyPointsRight, descriptorsRight = detector.detectAndCompute(self.right_frame, None)
if self.params.debug.plotting.features:
DetectionEngine.plot_feature(self.left_frame, self.right_frame, keyPointsLeft, keyPointsRight)
args_feature_matcher = self.params.geometry.featureMatcher.configs
indexParams = args_feature_matcher.indexParams
searchParams = args_feature_matcher.searchParams
if self.params.geometry.featureMatcher.method == "FlannMatcher":
matcher = cv2.FlannBasedMatcher(indexParams, searchParams)
else:
raise NotImplementedError("Feature Matcher has not been implemented. Please refer to the Contributing guide and raise a PR")
matches = matcher.knnMatch(descriptorsLeft, descriptorsRight, args_feature_matcher.K)
#Apply ratio test
goodMatches = []
ptsLeft = []
ptsRight = []
for m, n in matches:
if m.distance < args_feature_matcher.maxRatio * n.distance:
goodMatches.append([m])
ptsLeft.append(keyPointsLeft[m.queryIdx].pt)
ptsRight.append(keyPointsRight[m.trainIdx].pt)
ptsLeft = np.array(ptsLeft).astype('float64')
ptsRight = np.array(ptsRight).astype('float64')
if self.params.debug.plotting.featureMatches:
DetectionEngine.plot_feature_matches(self.left_frame, self.right_frame, keyPointsLeft, keyPointsRight, goodMatches)
matchedPoints = ptsLeft, ptsRight
keypoints = keyPointsLeft, keyPointsRight
descriptors = descriptorsLeft, descriptorsRight
return matchedPoints, keypoints, descriptors
@staticmethod
def plot_feature(left_frame, right_frame, keyPointsLeft, keyPointsRight):
"""
Helper function for plotting features on respective left and right frames
usign keypoint computed from feature detector
"""
kp_on_left_frame = cv2.drawKeypoints(left_frame,
keyPointsLeft,
None)
kp_on_right_frame = cv2.drawKeypoints(right_frame,
keyPointsRight,
None)
plt.figure(figsize=(30, 15))
plt.subplot(1, 2, 1)
plt.imshow(kp_on_left_frame)
plt.subplot(1, 2, 2)
plt.imshow(kp_on_right_frame)
plt.show()
@staticmethod
def plot_feature_matches(left_frame, right_frame, keyPointsLeft, keyPointsRight, matches):
"""
Helper function for plotting feature matches across the left and right frames
using keypoint calculated from the feature detector and matches from the featue matcher
"""
feature_matches = cv2.drawMatchesKnn(left_frame,
keyPointsLeft,
right_frame,
keyPointsRight,
matches,
outImg=None,
flags=0)
plt.figure(figsize=(20, 10))
plt.imshow(feature_matches)
plt.show()
| 40.036496
| 149
| 0.615497
| 568
| 5,485
| 5.822183
| 0.295775
| 0.043544
| 0.031448
| 0.044754
| 0.181736
| 0.159661
| 0.115513
| 0.115513
| 0.082855
| 0.082855
| 0
| 0.008816
| 0.317593
| 5,485
| 136
| 150
| 40.330882
| 0.874699
| 0.251048
| 0
| 0.166667
| 0
| 0
| 0.060404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.041667
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b745432625aa3fd9106d5e8fec7445b66115435
| 8,070
|
py
|
Python
|
pgpointcloud_utils/pcformat.py
|
dustymugs/pgpointcloud_utils
|
24193438982a8070a0aada34fca4db62688d18ba
|
[
"BSD-3-Clause"
] | 1
|
2016-09-04T20:44:15.000Z
|
2016-09-04T20:44:15.000Z
|
pgpointcloud_utils/pcformat.py
|
dustymugs/pgpointcloud_utils
|
24193438982a8070a0aada34fca4db62688d18ba
|
[
"BSD-3-Clause"
] | 6
|
2015-02-19T10:27:39.000Z
|
2015-02-19T10:58:49.000Z
|
pgpointcloud_utils/pcformat.py
|
dustymugs/pgpointcloud_utils
|
24193438982a8070a0aada34fca4db62688d18ba
|
[
"BSD-3-Clause"
] | null | null | null |
from decimal import Decimal
import xml.etree.ElementTree as ET
from .pcexception import *
class PcDimension(object):
DEFAULT_SCALE = 1.
BYTE_1 = 1
BYTE_2 = 2
BYTE_4 = 4
BYTE_8 = 8
BYTES = [BYTE_1, BYTE_2, BYTE_4, BYTE_8]
INTERPRETATION_MAPPING = {
'unknown': {},
'int8_t': {
'size': BYTE_1,
'struct': 'b'
},
'uint8_t': {
'size': BYTE_1,
'struct': 'B'
},
'int16_t': {
'size': BYTE_2,
'struct': 'h'
},
'uint16_t': {
'size': BYTE_2,
'struct': 'H'
},
'int32_t': {
'size': BYTE_4,
'struct': 'i'
},
'uint32_t': {
'size': BYTE_4,
'struct': 'I'
},
'int64_t': {
'size': BYTE_8,
'struct': 'q'
},
'uint64_t': {
'size': BYTE_8,
'struct': 'Q'
},
'float': {
'size': BYTE_4,
'struct': 'f'
},
'double': {
'size': BYTE_8,
'struct': 'd'
},
}
INTERPRETATION = INTERPRETATION_MAPPING.keys()
def __init__(
self,
name=None, size=None, interpretation=None, scale=None
):
self._name = None
self._size = None
self._interpretation = None
self._scale = PcDimension.DEFAULT_SCALE
if name is not None:
self.name = name
if size is not None:
self.size = size
if interpretation is not None:
self.interpretation = interpretation
if scale is not None:
self.scale = scale
@property
def name(self):
return self._name
@name.setter
def name(self, new_value):
try:
new_value = str(new_value)
except:
raise PcInvalidArgException(
message='Value cannot be treated as a string'
)
self._name = new_value
@property
def size(self):
return self._size
@size.setter
def size(self, new_value):
try:
new_value = int(new_value)
except:
raise PcInvalidArgException(
message='Value cannot be treated as an integer'
)
if new_value not in PcDimension.BYTES:
raise PcInvalidArgException(
message='Invalid size provided'
)
self._size = new_value
@property
def interpretation(self):
return self._interpretation
@interpretation.setter
def interpretation(self, new_value):
if new_value not in PcDimension.INTERPRETATION:
raise PcInvalidArgException(
message='Invalid interpretation provided'
)
self._interpretation = new_value
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, new_value):
try:
new_value = float(new_value)
except:
raise PcInvalidArgException(
message='Value cannot be treated as an float'
)
# scale cannot be zero
if Decimal(new_value) == Decimal(0.):
raise PcInvalidArgException(
message='Value cannot be zero'
)
self._scale = new_value
@property
def struct_format(self):
if self.interpretation is None:
return None
return PcDimension.INTERPRETATION_MAPPING[self.interpretation].get(
'struct', None
)
class PcFormat(object):
def __init__(self, pcid=None, srid=None, proj4text=None, dimensions=None):
self._pcid = None
self._srid = None
self._proj4text = None
self._dimensions = []
self._dimension_lookup = {}
if pcid:
self.pcid = pcid
if srid:
self.srid = srid
if dimensions:
self.dimensions = dimensions
@property
def pcid(self):
return self._pcid
@pcid.setter
def pcid(self, new_value):
try:
new_value = int(new_value)
except:
raise PcInvalidArgException(
message='Value cannot be treated as an integer'
)
self._pcid = new_value
@property
def srid(self):
return self._srid
@srid.setter
def srid(self, new_value):
try:
new_value = int(new_value)
except:
raise PcInvalidArgException(
message='Value cannot be treated as an integer'
)
self._srid = new_value
@property
def proj4text(self):
return self._proj4text
@proj4text.setter
def proj4text(self, new_value):
try:
new_value = str(new_value)
except:
raise PcInvalidArgException(
message='Value cannot be treated as a string'
)
self._proj4text = new_value
@property
def dimensions(self):
return self._dimensions
@dimensions.setter
def dimensions(self, new_value):
if not isinstance(new_value, list):
raise PcInvalidArgException(
message='Value not a list'
)
for dim in new_value:
if not isinstance(dim, PcDimension):
raise PcInvalidArgException(
message='Element of list not instance of PcDimension'
)
self._dimensions = new_value
# build lookups
self._build_dimension_lookups()
def _build_dimension_lookups(self):
self._dimension_lookups = {
'name': {}
}
for dim in self._dimensions:
self._dimension_lookups['name'][dim.name] = dim
@classmethod
def import_format(cls, pcid, srid, schema):
'''
helper function to import record from pgpointcloud_formats table
'''
frmt = cls(pcid=pcid, srid=srid)
namespaces = {
'pc': 'http://pointcloud.org/schemas/PC/1.1'
}
root = ET.fromstring(schema)
# first pass, build dict of dimensions
dimensions = {}
for dim in root.findall('pc:dimension', namespaces):
index = int(dim.find('pc:position', namespaces).text) - 1
size = dim.find('pc:size', namespaces).text
name = dim.find('pc:name', namespaces).text
interpretation = dim.find('pc:interpretation', namespaces).text
scale = dim.find('pc:scale', namespaces)
if scale is not None:
scale = scale.text
dimensions[index] = PcDimension(
name=name,
size=size,
interpretation=interpretation,
scale=scale
)
# second pass, convert dict to list for guaranteed order
_dimensions = [None] * len(dimensions)
for index, dimension in dimensions.iteritems():
_dimensions[index] = dimension
frmt.dimensions = _dimensions
return frmt
@property
def struct_format(self):
frmt = []
num_dimensions = len(self.dimensions)
for index in xrange(num_dimensions):
frmt.append(
self.dimensions[index].struct_format
)
frmt = ' '.join(frmt)
return frmt
def get_dimension(self, name_or_pos):
'''
return the dimension by name or position (1-based)
'''
if isinstance(name_or_pos, int):
# position is 1-based
return self.dimensions[name_or_pos - 1]
else:
return self._dimension_lookups['name'][name_or_pos]
def get_dimension_index(self, name):
'''
return the index of the dimension by name
'''
if name not in self._dimension_lookups['name']:
return None
return self.dimensions.index(self._dimension_lookups['name'][name])
| 24.603659
| 78
| 0.53482
| 822
| 8,070
| 5.085158
| 0.16545
| 0.063158
| 0.086842
| 0.072727
| 0.250718
| 0.205742
| 0.144258
| 0.144258
| 0.144258
| 0.144258
| 0
| 0.010123
| 0.375713
| 8,070
| 327
| 79
| 24.678899
| 0.819571
| 0.037794
| 0
| 0.245968
| 0
| 0
| 0.085815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.016129
| 0.032258
| 0.217742
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b788c4e537ccfe9b9a19c03459ac9310b0314ff
| 662
|
py
|
Python
|
setup.py
|
yuji-koseki/django-home-urls
|
ef42ad08101f83c2aff941e00abd50e60c57ac51
|
[
"MIT"
] | null | null | null |
setup.py
|
yuji-koseki/django-home-urls
|
ef42ad08101f83c2aff941e00abd50e60c57ac51
|
[
"MIT"
] | null | null | null |
setup.py
|
yuji-koseki/django-home-urls
|
ef42ad08101f83c2aff941e00abd50e60c57ac51
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="django_home_urls",
version="0.1.0",
author="Yuji Koseki",
author_email="pxquuqjm0k62new7q4@gmail.com",
description="Django home urlconf.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yuji-koseki/django-home-urls",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
'Framework :: Django',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 28.782609
| 58
| 0.663142
| 74
| 662
| 5.797297
| 0.689189
| 0.13986
| 0.065268
| 0.13986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016886
| 0.194864
| 662
| 22
| 59
| 30.090909
| 0.787993
| 0
| 0
| 0
| 0
| 0
| 0.416918
| 0.042296
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b792f428ffd2ed8a9d5df151157eca526120574
| 3,553
|
py
|
Python
|
lib/DataFileIO.py
|
cttsai1985/Kaggle-Home-Credit-Default-Risk
|
a378d5fcee1895a6229c740779f64b286532de8c
|
[
"Apache-2.0"
] | null | null | null |
lib/DataFileIO.py
|
cttsai1985/Kaggle-Home-Credit-Default-Risk
|
a378d5fcee1895a6229c740779f64b286532de8c
|
[
"Apache-2.0"
] | null | null | null |
lib/DataFileIO.py
|
cttsai1985/Kaggle-Home-Credit-Default-Risk
|
a378d5fcee1895a6229c740779f64b286532de8c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script provide a class to read and save files
Created on Sat July 21 2018
@author: cttsai
"""
import pandas as pd
from Utility import CheckFileExist
from LibConfigs import logger, hdf5_compress_option, fast_hdf5_compress_option
class DataFileIO(object):
"""
"""
def __init__(self):
self.data_lastet_load = {}
def getLastestLoaded(self):
return self.data_lastet_load.copy()
@staticmethod
def checkFile(filename):
return CheckFileExist(filename, silent=False)
@staticmethod
def loadEmpty(configs):
return {k: pd.DataFrame() for k in configs.keys()}
@staticmethod
def readHDF(filename, configs={}, opt_load=True):
with pd.HDFStore(filename, 'r', **hdf5_compress_option) as store:
logger.info("{} contained {} items".format(filename, len(store.keys())))
for k in store.keys():
logger.info("{}: {}".format(k, store[k].shape))
if opt_load and configs: # load and limited by configs
ret = {k: pd.DataFrame() for k in configs.keys()}
ret.update({k.strip('/'): store[k] for k in store.keys() if k.strip('/') in configs.keys()})
return ret
if opt_load: # load all saved dataframes
return {k.strip('/'): store[k] for k in store.keys()}
return {}
def showHDF(self, filename):
self.checkFile(filename)
self.readHDF(filename, opt_load=False)
def loadCSV(self, configs={}):
"""
configs = {'name': 'file_path'}
return load_data = {'name': dataframe}
"""
logger.info("Read Data from CSV")
load_data = {}
for k, f_path in configs.items():
if not self.checkFile(f_path):
continue
load_data[k] = pd.read_csv(f_path)
logger.info("Read in {}: from {}, shape={}".format(k, f_path, load_data[k].shape))
self.data_lastet_load = load_data.copy()
return load_data
def loadHDF(self, filename, configs={}, limited_by_configs=True):
"""
"""
logger.info("Read Data from HDFS")
if not self.checkFile(filename):
return self.loadEmpty(configs)
if limited_by_configs:
logger.info("Load selected DataFrame Only")
load_data = self.readHDF(filename, configs, opt_load=True)
else: # full loaded
load_data = self.readHDF(filename, opt_load=True)
for k, v in load_data.items():
if isinstance(v, pd.DataFrame):
logger.info('memory usage on {} is {:.3f} MB'.format(k, v.memory_usage().sum() / 1024. ** 2))
self.data_lastet_load = load_data#.copy()
return load_data
def saveHDF(self, filename, data, opt_overwrite=True, opt_fast=False):
if self.checkFile(filename):
if not opt_overwrite:
logger.warning("overwrite is not allowed")
return False
compress_option = hdf5_compress_option
if opt_fast:
logger.info("use faster compression option")
compress_option = fast_hdf5_compress_option
with pd.HDFStore(filename, 'w', **compress_option) as store:
logger.info("Save to {}".format(filename))
for k, d in data.items():
store.put(k, d, format='table')
#store.put(k, d, format='fixed')
logger.info("Save {}: {}".format(k, d.shape))
| 33.205607
| 109
| 0.587672
| 439
| 3,553
| 4.621868
| 0.275626
| 0.043371
| 0.044357
| 0.035485
| 0.280434
| 0.200099
| 0.101528
| 0.101528
| 0.072942
| 0.046328
| 0
| 0.007498
| 0.2868
| 3,553
| 106
| 110
| 33.518868
| 0.793212
| 0.090909
| 0
| 0.073529
| 0
| 0
| 0.074495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132353
| false
| 0
| 0.044118
| 0.044118
| 0.338235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b7fac5e786fffa0981a48a959c7b50a97194205
| 885
|
py
|
Python
|
tests/testSevenKing.py
|
yooyoo2004/RoomAI
|
7f4d655581a03ded801f6c6d7d18f9fff47aa6f5
|
[
"MIT"
] | null | null | null |
tests/testSevenKing.py
|
yooyoo2004/RoomAI
|
7f4d655581a03ded801f6c6d7d18f9fff47aa6f5
|
[
"MIT"
] | null | null | null |
tests/testSevenKing.py
|
yooyoo2004/RoomAI
|
7f4d655581a03ded801f6c6d7d18f9fff47aa6f5
|
[
"MIT"
] | 1
|
2021-08-15T16:19:01.000Z
|
2021-08-15T16:19:01.000Z
|
#!/bin/python
from roomai.sevenking import SevenKingEnv
from roomai.sevenking import SevenKingAction
import unittest
class testSevenKing(unittest.TestCase):
def show_hand_card(self,hand_card):
str = ""
for c in hand_card:
str += "," + c.key
print (str)
def testEnv(self):
env = SevenKingEnv()
env.num_players = 2
infos, public_state, person_states, private_state = env.init()
assert(len(infos) == 2)
turn = public_state.turn
self.show_hand_card(person_states[turn].hand_card)
print (turn)
print ("available_actions=",person_states[turn].available_actions.keys())
print ("available_actions_v=",person_states[turn].available_actions.values())
action = SevenKingAction("%s,%s" % (person_states[turn].hand_card[0].key, person_states[turn].hand_card[1].key))
| 30.517241
| 120
| 0.662147
| 110
| 885
| 5.109091
| 0.427273
| 0.099644
| 0.142349
| 0.106762
| 0.241993
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00578
| 0.218079
| 885
| 28
| 121
| 31.607143
| 0.806358
| 0.013559
| 0
| 0
| 0
| 0
| 0.050633
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.1
| false
| 0
| 0.15
| 0
| 0.3
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b817580d6dc21506efb8434e6050e6f651bf968
| 1,776
|
py
|
Python
|
pyamazonlandsat/product.py
|
eamanu/pyamazonlandsat
|
cf16c5acc8fa44a89a8fcd5276e4a46421e3aa3e
|
[
"MIT"
] | null | null | null |
pyamazonlandsat/product.py
|
eamanu/pyamazonlandsat
|
cf16c5acc8fa44a89a8fcd5276e4a46421e3aa3e
|
[
"MIT"
] | null | null | null |
pyamazonlandsat/product.py
|
eamanu/pyamazonlandsat
|
cf16c5acc8fa44a89a8fcd5276e4a46421e3aa3e
|
[
"MIT"
] | null | null | null |
import attr
import os
import tarfile
from pyamazonlandsat.utils import get_path_row_from_name
from pyamazonlandsat.downloader import Downloader
@attr.s
class Product:
"""Class that represent a Product
:param name: name of the Product.
type name: str.
:param output_path: path where save the downloaded prodcuct.
:type output_path: str.
"""
name = attr.ib()
output_path = attr.ib()
_path_files = attr.ib(init=False)
_link = attr.ib(init=False,
default='https://landsat-pds.s3.amazonaws.com/c1/L8/%s/%s/%s')
def _generate_link(self):
"""Method to generate the link to download from S3
Amazon Service
"""
path, row = get_path_row_from_name(self.name)
self._link = self._link % (path, row, self.name)
def _compress_product(self):
"""Method to compress product into a tar file.
"""
with tarfile.open('%s.tar.gz' %
os.path.join(self.output_path, self.name), 'w:gz') as tar:
for ff in os.listdir(self._path_files):
tar.add(
os.path.join(
self._path_files, ff),
ff)
def get_image_product(self):
"""Method to download the product.
This method create a `Downloader`_ object and download
the images. Then compressed it and move to `output_path`
The downloaded images are saved into a temporal folder,
then is compresed into a tar file and then move to
`output_path`.
"""
self._generate_link()
downloader = Downloader(self._link)
self._path_files = downloader.download_images()
self._compress_product()
downloader.remove_tmp_files()
| 31.714286
| 84
| 0.614865
| 231
| 1,776
| 4.554113
| 0.359307
| 0.057034
| 0.034221
| 0.026616
| 0.034221
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003182
| 0.29223
| 1,776
| 55
| 85
| 32.290909
| 0.833731
| 0.309122
| 0
| 0
| 0
| 0.034483
| 0.057194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.172414
| 0
| 0.448276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b83240c1ea862333830ef3e4b3423db43db8c92
| 5,352
|
py
|
Python
|
segmentation.py
|
IgnacioPardo/RoadTrip
|
6cdded860a67bb99cc1fc81e85cd8c09eaf46431
|
[
"MIT"
] | 2
|
2021-04-13T18:54:08.000Z
|
2021-09-21T23:08:08.000Z
|
segmentation.py
|
IgnacioPardo/RoadTrip
|
6cdded860a67bb99cc1fc81e85cd8c09eaf46431
|
[
"MIT"
] | null | null | null |
segmentation.py
|
IgnacioPardo/RoadTrip
|
6cdded860a67bb99cc1fc81e85cd8c09eaf46431
|
[
"MIT"
] | null | null | null |
from __future__ import division
from skimage.segmentation import slic, mark_boundaries
from skimage.util import img_as_float
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
import os
from cv2 import boundingRect
#from argparse import ArgumentParser
img_width = 50
img_height = 50
img_depth = 4
_selected_segments = set()
_current_segments = []
_current_image = []
_original_image = []
_plt_img = []
_shift = False
def segment(image, **kwargs):
return slic(img_as_float(image), n_segments = int(kwargs.get("n_segments", max(image.shape) * 1.5)), sigma = 5)
def on_click(event):
if _shift:
x, y = int(round(event.xdata)), int(round(event.ydata))
segment_value = _current_segments[y, x]
if segment_value not in _selected_segments:
_selected_segments.add(segment_value)
_current_image[_current_segments == segment_value] = [255, 0, 0]
else:
_selected_segments.remove(segment_value)
_current_image[_current_segments == segment_value] = _original_image[_current_segments == segment_value]
_plt_img.set_data(_current_image)
plt.draw()
print(segment_value)
def on_key_press(event):
global _shift
if event.key == 'shift':
_shift = True
def on_key_release(event):
global _shift
if event.key == 'shift':
_shift = False
def select(image, segments):
global _selected_segments
global _current_segments
global _current_image
global _original_image
global _plt_img
_selected_segments = set()
_current_segments = segments
_current_image = np.copy(image)
_original_image = image
fig = plt.figure(f"Segmentation")
ax = fig.add_subplot(1, 1, 1)
_plt_img = ax.imshow(image)
fig.canvas.mpl_connect('button_press_event', on_click)
fig.canvas.mpl_connect('key_press_event', on_key_press)
fig.canvas.mpl_connect('key_release_event', on_key_release)
plt.show()
return _selected_segments
def mask_from_segments(segments, value):
mask = np.zeros(segments.shape, dtype="uint8")
mask[segments == value] = 255
return mask
def padded_image(image, segments, value):
mask = mask_from_segments(segments, value)
positions = np.transpose(mask.nonzero())
x, y, width, height = boundingRect(positions[:,::-1])
global_height, global_width, _ = image.shape
left_padding_x, top_padding_y = (img_width - width) // 2, (img_height - height) // 2
right_padding_x, bottom_padding_y = left_padding_x, top_padding_y
right_padding_x += (img_width - width) % 2
bottom_padding_y += (img_height - height) % 2
if top_padding_y > y:
return None
if left_padding_x > x:
return None
if bottom_padding_y > global_height - (y + height):
return None
if right_padding_x > global_width - (x + width):
return None
result_image = np.zeros((img_height, img_width, 4), dtype="float32")
# i is result_image's index, ii is original image's index
for i, ii in zip(range(img_height), range(y - top_padding_y, y + height + bottom_padding_y)):
for j, jj in zip(range(img_width), range(x - left_padding_x, x + width + right_padding_x)):
# Add a channel to whether each pixel belongs to the original segment
result_image[i, j] = np.array(list(image[ii, jj]) + [mask[ii, jj]], dtype="float32")
# returns a 4-channel image with dimensions (image_utils.img_width x image_utils.img_height)
return result_image
def padded_segments(image, segments, selection, mask=None):
padded_segments = []
segment_val = []
max_val = segments.max() + 1
for i in selection:
if mask is not None:
and_mask = np.logical_and(mask_from_segments(segments, i), mask)
if not and_mask.any():
continue
img = padded_image(image, segments, i)
if img is not None:
padded_segments.append(img)
segment_val.append(i)
print(f"Padding images [{int((i / max_val) * 100)}%]\r", end="")
print('\n')
return (np.array(padded_segments), segment_val)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--name", default="new")
args = parser.parse_args()
image_paths = os.listdir("inputs")
images = [io.imread(os.path.join("inputs", image_path)) for image_path in image_paths]
print(f"Found {len(images)} inputs")
output_path = os.path.join("datasets", args.name)
existing_segments = os.listdir(output_path)
if 'c0' in existing_segments:
false_index = existing_segments.index('c0')
true_index = len(existing_segments) - false_index
else:
false_index = len(existing_segments)
true_index = 0
print("Segmenting")
segments = [segment(image) for image in images]
for i in range(len(images)):
selection = select(images[i], segments[i])
true_padded_images, _ = padded_segments(images[i], segments[i], selection)
print(f"Saving {len(true_padded_images)} car images")
for img in true_padded_images:
# Can't save it as an image: it has an extra channel
with open(os.path.join(output_path, f"c{str(true_index)}"), 'wb') as save_file:
np.save(save_file, img)
true_index += 1
not_selection = set(range(segments[i].max())) - selection
false_padded_images, _ = padded_segments(images[i], segments[i], not_selection)
print(f"Saving {len(false_padded_images)} non-car images")
for img in false_padded_images:
with open(os.path.join(output_path, str(false_index)), 'wb') as save_file:
np.save(save_file, img)
false_index += 1
os.rename(os.path.join("inputs", image_paths[i]), os.path.join("processed", image_paths[i]))
| 30.409091
| 112
| 0.734865
| 816
| 5,352
| 4.533088
| 0.216912
| 0.025953
| 0.016221
| 0.021898
| 0.203839
| 0.115707
| 0.103271
| 0.088132
| 0.018383
| 0.018383
| 0
| 0.008966
| 0.145553
| 5,352
| 176
| 113
| 30.409091
| 0.799913
| 0.056241
| 0
| 0.120301
| 0
| 0
| 0.070141
| 0.010105
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06015
| false
| 0
| 0.06015
| 0.007519
| 0.18797
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b842e0e690c82590e6a6533bd9a6cab6937e48f
| 1,797
|
py
|
Python
|
benten/code/workflowgraph.py
|
stain/benten
|
40440d36025e0b27b8dfa6752aa76b15e7abc0d1
|
[
"Apache-2.0"
] | null | null | null |
benten/code/workflowgraph.py
|
stain/benten
|
40440d36025e0b27b8dfa6752aa76b15e7abc0d1
|
[
"Apache-2.0"
] | null | null | null |
benten/code/workflowgraph.py
|
stain/benten
|
40440d36025e0b27b8dfa6752aa76b15e7abc0d1
|
[
"Apache-2.0"
] | null | null | null |
"""Parse CWL and create a JSON file describing the workflow. This dictionary
is directly suitable for display by vis.js, but can be parsed for any other
purpose."""
# Copyright (c) 2019 Seven Bridges. See LICENSE
from ..cwl.lib import ListOrMap
def cwl_graph(cwl: dict):
graph = {
"nodes": [],
"edges": [],
"lines": {}
}
inputs = ListOrMap(cwl.get("inputs", {}), key_field="id", problems=[])
_add_nodes(graph, inputs, "inputs")
steps = ListOrMap(cwl.get("steps", {}), key_field="id", problems=[])
_add_nodes(graph, steps, "steps")
outputs = ListOrMap(cwl.get("outputs", {}), key_field="id", problems=[])
_add_nodes(graph, outputs, "outputs")
_add_edges(graph, inputs, outputs, steps)
return graph
def _add_nodes(graph, grp, grp_id):
for k, v in grp.as_dict.items():
graph["nodes"] += [{
"id": k,
"label": v.get("label", k) if isinstance(v, dict) else k,
"title": v.get("label", k) if isinstance(v, dict) else k,
"group": grp_id
}]
graph["lines"][k] = grp.get_range_for_value(k).start.line
def _add_edges(graph, inputs, outputs, steps):
for k, v in steps.as_dict.items():
_to = k
for _, prt in ListOrMap(v.get("in", {}), key_field="id", problems=[]).as_dict.items():
graph["edges"] += [{"from": _f, "to": _to} for _f in _get_source_step(prt, "source")]
for k, v in outputs.as_dict.items():
_to = k
graph["edges"] += [{"from": _f, "to": _to} for _f in _get_source_step(v, "outputSource")]
def _get_source_step(v, key):
src = v.get(key) if isinstance(v, dict) else v
if not isinstance(src, list):
src = [src]
return [s.split("/")[0] for s in src if isinstance(s, str)]
| 29.95
| 97
| 0.590428
| 258
| 1,797
| 3.94186
| 0.321705
| 0.031465
| 0.039331
| 0.070796
| 0.33825
| 0.290069
| 0.229105
| 0.13766
| 0.13766
| 0.13766
| 0
| 0.003658
| 0.239288
| 1,797
| 59
| 98
| 30.457627
| 0.740307
| 0.114636
| 0
| 0.054054
| 0
| 0
| 0.087697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.027027
| 0
| 0.189189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b8b21db4d1b5bb95da77aaaeac80ad479fa1496
| 477
|
py
|
Python
|
reviews/migrations/0006_review_no_login.py
|
moshthepitt/answers
|
9febf465a18c41e7a48130e987a8fd64ceae3358
|
[
"MIT"
] | 6
|
2015-07-28T09:36:39.000Z
|
2020-08-11T17:15:18.000Z
|
reviews/migrations/0006_review_no_login.py
|
Swifilaboroka/answers
|
9febf465a18c41e7a48130e987a8fd64ceae3358
|
[
"MIT"
] | 8
|
2015-12-17T22:56:16.000Z
|
2022-01-13T00:43:16.000Z
|
reviews/migrations/0006_review_no_login.py
|
Swifilaboroka/answers
|
9febf465a18c41e7a48130e987a8fd64ceae3358
|
[
"MIT"
] | 3
|
2017-07-15T12:13:03.000Z
|
2022-02-02T10:04:10.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reviews', '0005_auto_20160203_1247'),
]
operations = [
migrations.AddField(
model_name='review',
name='no_login',
field=models.BooleanField(default=False, help_text='Is this review open to the world?', verbose_name='No Login'),
),
]
| 23.85
| 125
| 0.628931
| 51
| 477
| 5.647059
| 0.784314
| 0.041667
| 0.076389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047753
| 0.253669
| 477
| 19
| 126
| 25.105263
| 0.761236
| 0.044025
| 0
| 0
| 0
| 0
| 0.187225
| 0.050661
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b8dd1f4d57db9568b64c88454ba16b6a105aa77
| 4,129
|
py
|
Python
|
run_all_benchmark_functions.py
|
ntienvu/KnowingOptimumValue_BO
|
42225cb9d61c1225bd757fe9dd02834a0bc7a3e6
|
[
"MIT"
] | 14
|
2020-06-30T00:36:14.000Z
|
2022-01-11T13:15:53.000Z
|
run_all_benchmark_functions.py
|
ntienvu/KnowingOptimumValue_BO
|
42225cb9d61c1225bd757fe9dd02834a0bc7a3e6
|
[
"MIT"
] | null | null | null |
run_all_benchmark_functions.py
|
ntienvu/KnowingOptimumValue_BO
|
42225cb9d61c1225bd757fe9dd02834a0bc7a3e6
|
[
"MIT"
] | 2
|
2020-10-17T15:27:06.000Z
|
2021-02-27T10:34:04.000Z
|
import sys
sys.path.insert(0,'..')
sys.path.insert(0,'../..')
from bayes_opt import BayesOpt,BayesOpt_KnownOptimumValue
import numpy as np
#from bayes_opt import auxiliary_functions
from bayes_opt import functions
from bayes_opt import utilities
import warnings
#from bayes_opt import acquisition_maximization
import sys
import itertools
import matplotlib.pyplot as plt
np.random.seed(6789)
warnings.filterwarnings("ignore")
counter = 0
myfunction_list=[]
#myfunction_list.append(functions.sincos())
#myfunction_list.append(functions.branin())
#myfunction_list.append(functions.hartman_3d())
#myfunction_list.append(functions.ackley(input_dim=5))
myfunction_list.append(functions.alpine1(input_dim=5))
#myfunction_list.append(functions.hartman_6d())
#myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1])))
#myfunction_list.append(functions.gSobol(a=np.array([1,1,1,1,1,1,1,1,1,1])))
acq_type_list=[]
temp={}
temp['name']='erm' # expected regret minimization
temp['IsTGP']=0 # recommended to use tgp for ERM
acq_type_list.append(temp)
temp={}
temp['name']='cbm' # confidence bound minimization
temp['IsTGP']=1 # recommended to use tgp for CBM
#acq_type_list.append(temp)
#temp={}
#temp['name']='kov_mes' # MES+f*
#temp['IsTGP']=0 # we can try 'tgp'
#acq_type_list.append(temp)
temp={}
temp['name']='kov_ei' # this is EI + f*
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='ucb' # vanilla UCB
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='ei' # vanilla EI
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
temp={}
temp['name']='random' # vanilla EI
temp['IsTGP']=0 # we can try 'tgp' by setting it =1
#acq_type_list.append(temp)
fig=plt.figure()
color_list=['r','b','k','m','c','g','o']
marker_list=['s','x','o','v','^','>','<']
for idx, (myfunction,acq_type,) in enumerate(itertools.product(myfunction_list,acq_type_list)):
print("=====================func:",myfunction.name)
print("==================acquisition type",acq_type)
IsTGP=acq_type['IsTGP']
acq_name=acq_type['name']
nRepeat=10
ybest=[0]*nRepeat
MyTime=[0]*nRepeat
MyOptTime=[0]*nRepeat
marker=[0]*nRepeat
bo=[0]*nRepeat
[0]*nRepeat
for ii in range(nRepeat):
if 'kov' in acq_name or acq_name == 'erm' or acq_name == 'cbm':
bo[ii]=BayesOpt_KnownOptimumValue(myfunction.func,myfunction.bounds,myfunction.fstar, \
acq_name,IsTGP,verbose=1)
else:
bo[ii]=BayesOpt(myfunction.func,myfunction.bounds,acq_name,verbose=1)
ybest[ii],MyTime[ii]=utilities.run_experiment(bo[ii],n_init=3*myfunction.input_dim,\
NN=10*myfunction.input_dim,runid=ii)
MyOptTime[ii]=bo[ii].time_opt
print("ii={} BFV={:.3f}".format(ii,myfunction.ismax*np.max(ybest[ii])))
Score={}
Score["ybest"]=ybest
Score["MyTime"]=MyTime
Score["MyOptTime"]=MyOptTime
utilities.print_result_sequential(bo,myfunction,Score,acq_type)
## plot the result
# process the result
y_best_sofar=[0]*len(bo)
for uu,mybo in enumerate(bo):
y_best_sofar[uu]=[ (myfunction.fstar - np.max(mybo.Y_ori[:ii+1]) ) for ii in range(len(mybo.Y_ori))]
y_best_sofar[uu]=y_best_sofar[uu][3*myfunction.input_dim:] # remove the random phase for plotting purpose
y_best_sofar=np.asarray(y_best_sofar)
myxaxis=range(y_best_sofar.shape[1])
plt.errorbar(myxaxis,np.mean(y_best_sofar,axis=0), np.std(y_best_sofar,axis=0)/np.sqrt(nRepeat),
label=acq_type['name'],color=color_list[idx],marker=marker_list[idx])
plt.ylabel("Simple Regret",fontsize=14)
plt.xlabel("Iterations",fontsize=14)
plt.legend(prop={'size': 14})
strTitle="{:s} D={:d}".format(myfunction.name,myfunction.input_dim)
plt.title(strTitle,fontsize=18)
| 25.80625
| 125
| 0.654154
| 598
| 4,129
| 4.369565
| 0.272575
| 0.057405
| 0.012629
| 0.013777
| 0.287026
| 0.233065
| 0.220054
| 0.190968
| 0.169537
| 0.140069
| 0
| 0.019469
| 0.178978
| 4,129
| 159
| 126
| 25.968553
| 0.751327
| 0.2589
| 0
| 0.164557
| 0
| 0
| 0.085979
| 0.018188
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.113924
| 0
| 0.113924
| 0.050633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b8e2e97334a2cce55aad103330d605ea89ea8e4
| 2,258
|
py
|
Python
|
coursesical/ical.py
|
cdfmlr/coursesical
|
d027db60dca6bcf543a74d3a6dd635fd8d1ee5ba
|
[
"MIT"
] | 2
|
2021-03-19T02:23:24.000Z
|
2021-12-22T15:01:46.000Z
|
coursesical/ical.py
|
cdfmlr/coursesical
|
d027db60dca6bcf543a74d3a6dd635fd8d1ee5ba
|
[
"MIT"
] | null | null | null |
coursesical/ical.py
|
cdfmlr/coursesical
|
d027db60dca6bcf543a74d3a6dd635fd8d1ee5ba
|
[
"MIT"
] | null | null | null |
import icalendar
import uuid
from datetime import datetime
import pytz
cst = pytz.timezone('Asia/Shanghai')
class Calendar(icalendar.Calendar):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add('prodid', '-//CDFMLR//coursesical//CN')
self.add('VERSION', '2.0')
self.add('X-WR-CALNAME', 'coursesical')
self.add('X-APPLE-CALENDAR-COLOR', '#ff5a1d')
self.add('X-WR-TIMEZONE', 'Asia/Shanghai')
def add_event(self, event):
self.add_component(event)
# def fCalendar():
# cal = icalendar.Calendar()
# cal.add('prodid', '-//CDFMLR//coursesical//CN')
# cal.add('VERSION', '2.0')
# cal.add('X-WR-CALNAME', 'coursesical')
# cal.add('X-APPLE-CALENDAR-COLOR', '#ff5a1d')
# cal.add('X-WR-TIMEZONE', 'Asia/Shanghai')
# return cal
class Event(icalendar.Event):
def __init__(self,
summary: str, start: datetime, end: datetime, location: str, description: str,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.add('SUMMARY', summary)
self.add('LOCATION', location)
self.add('DESCRIPTION', description)
self.add('DTSTART', datetime(start.year, start.month, start.day,
start.hour, start.minute, start.second, tzinfo=cst))
self.add('DTEND', datetime(end.year, end.month, end.day,
end.hour, end.minute, end.second, tzinfo=cst))
self.add('SEQUENCE', '0')
self.add('UID', str(uuid.uuid3(uuid.NAMESPACE_DNS, f'{summary}{str(uuid.uuid4())}')))
def alarm(self, before_minutes: int):
alarm = icalendar.Alarm()
alarm.add('UID', str(uuid.uuid3(
uuid.NAMESPACE_DNS,
str(self["summary"]) + str(uuid.uuid4()) + str(before_minutes)
)))
alarm.add('ACTION', 'DISPLAY')
alarm['TRIGGER'] = f'-PT{before_minutes}M'
alarm.add('DESCRIPTION', '提醒事项')
self.add_component(alarm)
return self
def weekly_repeat(self, until: datetime):
self.add('rrule', {'freq': 'WEEKLY',
'INTERVAL': 1,
'UNTIL': until})
return self
| 31.361111
| 95
| 0.569088
| 260
| 2,258
| 4.846154
| 0.292308
| 0.083333
| 0.019048
| 0.030159
| 0.311905
| 0.196825
| 0.111111
| 0.111111
| 0
| 0
| 0
| 0.008413
| 0.263065
| 2,258
| 71
| 96
| 31.802817
| 0.748798
| 0.124446
| 0
| 0.088889
| 0
| 0
| 0.154629
| 0.038657
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.088889
| 0
| 0.288889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b8f6c6edc977e548344a0694966296691f0f034
| 816
|
py
|
Python
|
minesweeper/test/message_tests.py
|
newnone/Multiplayer-Minesweeper
|
054adc4a14a710dfdd479791b9d1d40df061211c
|
[
"MIT"
] | null | null | null |
minesweeper/test/message_tests.py
|
newnone/Multiplayer-Minesweeper
|
054adc4a14a710dfdd479791b9d1d40df061211c
|
[
"MIT"
] | null | null | null |
minesweeper/test/message_tests.py
|
newnone/Multiplayer-Minesweeper
|
054adc4a14a710dfdd479791b9d1d40df061211c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3.2
import unittest
from minesweeper.message import *
class UTSMessageTest(unittest.TestCase):
def test_parse_infer_type(self):
"""
Instantiates one object for every concrete subclass of UTSMessage using the type-inferring
factory method parse_infer_type(), checking that the instance returned is of the expected
type.
"""
factory_strings = ("look", "dig 5 2", "flag 6 2", "deflag 3 6",
"help", "bye")
message_classes = UTSMessage.message_types
for string, mclass in zip(factory_strings, message_classes):
o = UTSMessage.parse_infer_type(string)
self.assertIsInstance(
o,
mclass
)
if __name__ == "__main__":
unittest.main()
| 27.2
| 98
| 0.61152
| 91
| 816
| 5.263736
| 0.648352
| 0.06263
| 0.087683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.303922
| 816
| 29
| 99
| 28.137931
| 0.829225
| 0.252451
| 0
| 0
| 0
| 0
| 0.077193
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b92c51e95df7d865e1969f7a3d0f8febc341130
| 1,142
|
py
|
Python
|
recursion/0043_string_multiplication.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
recursion/0043_string_multiplication.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
recursion/0043_string_multiplication.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
class Solution:
def _equalize_length(self, *args) -> tuple:
max_len = max(map(len, args))
return tuple(map(lambda x: x.zfill(max_len), args))
def _add(self, *args) -> str:
return str(sum(map(int, args)))
def _sub(self, num1: str, num2: str) -> str:
return str(int(num1) - int(num2))
def multiply(self, num1: str, num2: str) -> str:
num1, num2 = self._equalize_length(num1, num2)
n = len(num1)
if n == 1:
# multiply by single digit
return str(int(num1) * int(num2))
num1_h = num1[: n // 2]
num1_l = num1[n // 2:]
num2_h = num2[: n // 2]
num2_l = num2[n // 2:]
num1_h_num2_h = self.multiply(num1_h, num2_h)
num1_l_num2_l = self.multiply(num1_l, num2_l)
combo = self._sub(self.multiply(self._add(num1_h, num1_l), self._add(num2_h, num2_l)), self._add(num1_h_num2_h, num1_l_num2_l))
return self._add(num1_h_num2_h + '0' * 2 * (n - n // 2), combo + '0' * (n - n // 2), num1_l_num2_l)
if __name__ == "__main__":
solu = Solution()
print(solu.multiply('123', '456'))
| 27.853659
| 135
| 0.565674
| 178
| 1,142
| 3.342697
| 0.224719
| 0.05042
| 0.060504
| 0.067227
| 0.255462
| 0.255462
| 0.067227
| 0.067227
| 0
| 0
| 0
| 0.069091
| 0.277583
| 1,142
| 41
| 136
| 27.853659
| 0.652121
| 0.021016
| 0
| 0
| 0
| 0
| 0.014324
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0.083333
| 0.416667
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b95c23e30524cab22ee7e5bbccde48a49bfd895
| 9,432
|
py
|
Python
|
fluid/node.py
|
quantmind/aio-fluid
|
e75f91646ac9a0c9ca5679bda12319c208166d64
|
[
"BSD-3-Clause"
] | null | null | null |
fluid/node.py
|
quantmind/aio-fluid
|
e75f91646ac9a0c9ca5679bda12319c208166d64
|
[
"BSD-3-Clause"
] | 21
|
2021-08-13T06:11:55.000Z
|
2022-03-18T06:13:05.000Z
|
fluid/node.py
|
quantmind/aio-fluid
|
e75f91646ac9a0c9ca5679bda12319c208166d64
|
[
"BSD-3-Clause"
] | null | null | null |
import asyncio
import inspect
import logging
import os
import random
import time
import uuid
from abc import ABC, abstractmethod
from functools import cached_property, wraps
from logging import Logger
from typing import Any, Callable, Dict, List, Optional, Tuple
from aiohttp.client import ClientConnectionError, ClientConnectorError
from aiohttp.web import Application, GracefulExit
from .log import get_logger
from .utils import close_task, dot_name, underscore
class Id:
@classmethod
def name(cls) -> str:
"""My name"""
return underscore(cls.__name__)
@cached_property
def uid(self) -> str:
"""My unique ID"""
return uuid.uuid4().hex
@classmethod
def create_logger(cls, logger: Optional[logging.Logger] = None) -> logging.Logger:
return logger or get_logger(dot_name(cls.name()))
class IdLog(Id):
@cached_property
def logger(self):
return self.create_logger()
class NodeBase(ABC, Id):
exit_lag: int = 1
app: Optional[Application] = None
async def start_app(self, app: Application) -> None:
"""Start application"""
self.app = app
await self.start()
async def close_app(self, app: Application) -> None:
await self.close()
@abstractmethod
def is_running(self) -> bool:
"""True if the Node is running"""
@abstractmethod
async def start(self) -> None:
"""called when the node worker has started"""
pass
@abstractmethod
async def close(self) -> None:
"""called when the node worker closed"""
pass
async def setup(self) -> None:
"""Called by the :meth:`.start` method when the worker starts
This can be optionally implemented by derived classes
"""
pass
async def teardown(self) -> None:
"""Called my :meth:`close` when the worker is stopping.
This can be optionally implemented by derived classes
"""
pass
async def done(self) -> None:
try:
await self.teardown()
except Exception:
self.logger.exception("unhandled exception while tear down worker")
async def system_exit(self) -> None:
"""Gracefully exiting the app if possible"""
if self.is_running():
await self.done()
self.system_exit_sync()
def system_exit_sync(self) -> None:
"""Exit the app"""
self.logger.warning("bailing out!")
asyncio.get_event_loop().call_later(self.exit_lag, self._exit)
def _exit(self) -> None: # pragma: no cover
if os.getenv("PYTHON_ENV") != "test":
raise GracefulExit
class NodeWorker(NodeBase):
def __init__(self, *, logger: Optional[Logger] = None) -> None:
self.logger: Logger = self.create_logger(logger)
self._worker = None
@property
def debug(self) -> bool:
return self.logger.isEnabledFor(logging.DEBUG)
# FOR DERIVED CLASSES
async def work(self) -> None:
"""Main work coroutine, this is where you define the asynchronous loop.
Must be implemented by derived classes
"""
raise NotImplementedError
# API
def is_running(self) -> bool:
"""True if the Node is running"""
return bool(self._worker)
async def start(self) -> None:
"""Start the node"""
assert not self.is_running(), "Node already running - cannot start"
await self.setup()
self._worker = asyncio.ensure_future(self._work())
async def close(self, close_worker: bool = True) -> None:
if self._worker:
self.logger.info("closing")
worker = self._worker
self._worker = None
if close_worker:
await close_task(worker, self.done)
else:
await self.done()
self.logger.warning("closed")
# INTERNAL
async def _work(self) -> None:
self.logger.warning("started")
try:
await self.work()
except asyncio.CancelledError:
pass
except Exception:
self.logger.exception("unhandled exception in worker")
await self.system_exit()
else:
await self.close(close_worker=False)
class WorkerApplication(Dict[str, Any]):
def __init__(self):
super().__init__()
self.on_startup = []
self.on_shutdown = []
async def startup(self):
for on_startup in self.on_startup:
await on_startup(self)
async def shutdown(self):
for on_shutdown in self.on_shutdown:
await on_shutdown(self)
class NodeWorkers(NodeBase):
def __init__(self, *workers: NodeWorker, logger: Optional[Logger] = None) -> None:
self.logger: Logger = self.create_logger(logger)
self._closing: bool = False
self._workers: List[NodeBase] = list(workers)
@property
def debug(self) -> bool:
return self.logger.isEnabledFor(logging.DEBUG)
def is_running(self) -> bool:
return isinstance(self._workers, tuple)
def is_closing(self) -> bool:
return self._closing
def add_workers(self, *workers: NodeBase) -> None:
if self.is_running():
raise RuntimeError("Cannot add workers when started")
self._workers.extend(workers)
async def start(self) -> None:
await self.setup()
self.logger.warning("started")
workers = self._freeze_workers()
await asyncio.gather(*[w.start_app(self.app) for w in workers])
async def close(self) -> None:
if self.is_running():
self._closing = True
await asyncio.gather(*[w.close_app(self.app) for w in self._workers])
await self.teardown()
def _freeze_workers(self) -> Tuple[NodeBase, ...]:
if isinstance(self._workers, tuple):
raise RuntimeError("worker already started")
self._workers = tuple(self._workers)
return self._workers
class Node(NodeWorker):
"""A nodeworker with an heartbeat work loop and ability to publish
messages into a pubsub
"""
heartbeat: float = 1
ticks: int = 0
async def tick(self) -> None:
"""called at every iteration in the worker"""
pass
async def work(self) -> None:
while True:
start = time.monotonic()
self.ticks += 1
await self.tick()
dt = time.monotonic() - start
await asyncio.sleep(max(self.heartbeat - dt, 0))
class Consumer(NodeWorker):
def __init__(
self,
process_message,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.process_message = process_message
self._message_queue: Optional[asyncio.Queue] = None
def qsize(self) -> int:
return 0 if self._message_queue is None else self._message_queue.qsize()
async def setup(self) -> None:
self._message_queue = asyncio.Queue()
async def work(self):
while self.is_running():
message = await self._message_queue.get()
await self.process_message(message)
await asyncio.sleep(0)
def submit(self, message) -> None:
if self._message_queue is None:
raise RuntimeError("cannot submit to a non running consumer")
self._message_queue.put_nowait(message)
class Worker(NodeWorker):
def __init__(
self,
work: Callable[[], None],
logger: Optional[Logger] = None,
) -> None:
super().__init__(logger=logger)
self.work = work
class TickWorker(Node):
def __init__(
self,
tick: Callable[[], None],
heartbeat: float = 1,
logger: Optional[Logger] = None,
) -> None:
super().__init__(logger=logger)
self.heartbeat = heartbeat
self.tick = tick
class every:
def __init__(self, seconds: float, noise: float = 0) -> None:
self.seconds = seconds
self.noise = min(noise, seconds)
self.last = 0
self.gap = self._gap()
self.ticks = 0
def __call__(self, method):
method.every = self
@wraps(method)
async def _(node, *args) -> None:
now = time.time()
if now - self.last > self.gap:
self.last = now
self.gap = self._gap()
self.ticks += 1
try:
await method(node, *args)
except (ClientConnectionError, ClientConnectorError) as exc:
node.logger.error(str(exc))
return _
def _gap(self) -> float:
return self.seconds + self.noise * (random.random() - 0.5)
def on_error_exit(
method: Callable[[NodeBase, Any], None]
) -> Callable[[NodeBase, Any], None]:
@wraps(method)
def sync_wrap(node: NodeBase, *args) -> None:
try:
method(node, *args)
except Exception:
node.logger.exception("unhandled exception, bailing out!")
node.system_exit_sync()
@wraps(method)
async def async_wrap(node: NodeBase, *args) -> None:
try:
await method(node, *args)
except Exception:
node.logger.exception("unhandled exception, bailing out!")
await node.system_exit()
return async_wrap if inspect.iscoroutinefunction(method) else sync_wrap
| 28.155224
| 86
| 0.603584
| 1,099
| 9,432
| 5.033667
| 0.189263
| 0.031815
| 0.013919
| 0.023861
| 0.253073
| 0.199747
| 0.161244
| 0.131236
| 0.131236
| 0.131236
| 0
| 0.002246
| 0.291985
| 9,432
| 334
| 87
| 28.239521
| 0.826146
| 0.024067
| 0
| 0.344681
| 0
| 0
| 0.037085
| 0
| 0
| 0
| 0
| 0
| 0.004255
| 1
| 0.114894
| false
| 0.025532
| 0.06383
| 0.034043
| 0.302128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b98ab75092f0df028f96b2d93df9ca2c2ab75d6
| 478
|
py
|
Python
|
lib/csvtools.py
|
mtyates/scrapers
|
1fe55314b1235a971a436a8a17f05cea22b40f49
|
[
"Apache-2.0"
] | null | null | null |
lib/csvtools.py
|
mtyates/scrapers
|
1fe55314b1235a971a436a8a17f05cea22b40f49
|
[
"Apache-2.0"
] | null | null | null |
lib/csvtools.py
|
mtyates/scrapers
|
1fe55314b1235a971a436a8a17f05cea22b40f49
|
[
"Apache-2.0"
] | 1
|
2021-12-20T16:55:50.000Z
|
2021-12-20T16:55:50.000Z
|
#!/usr/bin/env python
import os
import sys
def dict_to_csv(comps, filename):
## print column headings then all attributes for each company
f = open(filename, 'wb')
columns = [x for x in comps[comps.keys()[0]].keys() if x != 'name']
columns = ['name'] + columns
f.write(','.join(columns) + '\n')
for k,v in comps.items():
for column in columns:
f.write('"' + v[column] + '"' + ',')
f.write('\n')
f.close()
| 22.761905
| 71
| 0.546025
| 67
| 478
| 3.865672
| 0.58209
| 0.069498
| 0.100386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00289
| 0.276151
| 478
| 20
| 72
| 23.9
| 0.745665
| 0.165272
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b9c4e6a952c20a965aae8106ca3b0f977bd503c
| 4,015
|
py
|
Python
|
deidentify/tokenizer/tokenizer_ons.py
|
bbieniek/deidentify
|
7021bf0540e0a7f931e65544d12a2909c79a14eb
|
[
"MIT"
] | 64
|
2020-01-16T16:20:47.000Z
|
2022-03-31T12:59:19.000Z
|
deidentify/tokenizer/tokenizer_ons.py
|
HabibMrad/deidentify
|
d8960a74c852a71b29a6ee0fd6a3cf7f946a5f60
|
[
"MIT"
] | 14
|
2020-01-28T08:47:06.000Z
|
2022-02-12T08:32:12.000Z
|
deidentify/tokenizer/tokenizer_ons.py
|
HabibMrad/deidentify
|
d8960a74c852a71b29a6ee0fd6a3cf7f946a5f60
|
[
"MIT"
] | 12
|
2020-01-21T07:54:04.000Z
|
2022-02-19T06:42:53.000Z
|
"""
Custom tokenization routines for the 'ons' corpus. Special care is taken to metadata tokens such as
=== Report: 12345 === that were inserted to distinguish between multiple documents of a client.
They will be properly handled during the tokenization and sentence segmentation stage.
"""
import re
import spacy
from spacy.matcher import Matcher
from spacy.symbols import ORTH
from deidentify.tokenizer import Tokenizer
META_REGEX = re.compile(r'=== (?:Report|Answer): [0-9]+ ===\n')
TOKENIZER_SPECIAL_CASES = [
'B.Sc.',
'Co.',
'Dhr.',
'Dr.',
'M.Sc.',
'Mevr.',
'Mgr.',
'Mr.',
'Mw.',
'O.K.',
'a.u.b.',
'ca.',
'e.g.',
'etc.',
'v.d.'
]
def _metadata_complete(doc, i):
return doc[i].text[0] == '\n' \
and doc[i - 1].text == '=' \
and META_REGEX.match(doc[i - 9: i + 1].text)
def _metadata_sentence_segmentation(doc):
"""Custom sentence segmentation rule of the Ons corpus. It segments metadata text into separate
sentences.
Metadata consists of 10 tokens:
['=', '=', '=', 'Report|Answer', ':', 'DDDDDD', '=', '=', '=', '\n']
During sentence segmentation, we want that the metadata is always a sentence in itself.
Therefore, the first token (i.e., '=') is marked as sentence start. All other tokens
are explicitly marked as non-sentence boundaries.
To ensure that anything immediately following after metadata is a new sentece, the next token
is marked as sentence start.
"""
for i in range(len(doc)):
if not _metadata_complete(doc, i):
continue
# All metadata tokens excluding the leading '='.
meta_span = doc[i - 8: i + 1]
for meta_token in meta_span:
meta_token.is_sent_start = False
# The leading '=' is a sentence boundary
doc[i - 9].is_sent_start = True
# Any token following the metadata is also a new sentence.
doc[i + 1].is_sent_start = True
return doc
NLP = spacy.load('nl_core_news_sm')
try:
NLP.add_pipe(_metadata_sentence_segmentation, before="parser") # Insert before the parser
except ValueError:
# spacy>=3
from spacy.language import Language
Language.component('meta-sentence-segmentation')(_metadata_sentence_segmentation) # pylint: disable=E1101
NLP.add_pipe('meta-sentence-segmentation', before="parser") # Insert before the parser
for case in TOKENIZER_SPECIAL_CASES:
NLP.tokenizer.add_special_case(case, [{ORTH: case}])
NLP.tokenizer.add_special_case(case.lower(), [{ORTH: case.lower()}])
infixes = NLP.Defaults.infixes + [r'\(', r'\)', r'(?<=[\D])\/(?=[\D])']
infix_regex = spacy.util.compile_infix_regex(infixes)
NLP.tokenizer.infix_finditer = infix_regex.finditer
class TokenizerOns(Tokenizer):
def parse_text(self, text: str) -> spacy.tokens.doc.Doc:
"""Custom spacy tokenizer for the 'ons' corpus that takes care of special metadata tokens.
Example:
['=', '=', '=', 'Report', ':', '1234', '=', '=', '=', '\n'] is converted to
['=== Report: 1234 ===\n']
Furthermore, common Dutch abbreviations are handled.
Parameters
----------
text : str
The text to tokenize.
Returns
-------
doc : spacy.tokens.doc.Doc
Parsed spacy document.
"""
matcher = Matcher(NLP.vocab)
pattern = [
{"ORTH": "="}, {"ORTH": "="}, {"ORTH": "="},
{"ORTH": {"IN": ['Answer', 'Report']}}, {'ORTH': ':'},
{'IS_DIGIT': True, 'OP': '+'},
{"ORTH": "="}, {"ORTH": "="}, {"ORTH": "="},
{"ORTH": "\n"}
]
matcher.add("METADATA", [pattern])
doc = NLP(text, disable=self.disable)
matches = matcher(doc)
with doc.retokenize() as retokenizer:
for _, start, end in matches:
attrs = {"LEMMA": str(doc[start:end])}
retokenizer.merge(doc[start:end], attrs=attrs)
return doc
| 31.124031
| 109
| 0.596762
| 494
| 4,015
| 4.757085
| 0.380567
| 0.068085
| 0.020426
| 0.012766
| 0.090213
| 0.070638
| 0.045106
| 0.045106
| 0
| 0
| 0
| 0.009944
| 0.248568
| 4,015
| 128
| 110
| 31.367188
| 0.768976
| 0.395268
| 0
| 0.058824
| 0
| 0
| 0.117868
| 0.021658
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044118
| false
| 0
| 0.088235
| 0.014706
| 0.191176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b9c889768e3496393e2ee54739cb4b6ccbaab96
| 1,219
|
py
|
Python
|
systemtest/quality/utils/models.py
|
IBM-Power-SystemTest/systemtest
|
a29e6d54500ca13f554073cc66a4a2d403ea5b14
|
[
"BSD-3-Clause"
] | 1
|
2022-03-09T18:07:11.000Z
|
2022-03-09T18:07:11.000Z
|
systemtest/quality/utils/models.py
|
IBM-Power-SystemTest/systemtest
|
a29e6d54500ca13f554073cc66a4a2d403ea5b14
|
[
"BSD-3-Clause"
] | null | null | null |
systemtest/quality/utils/models.py
|
IBM-Power-SystemTest/systemtest
|
a29e6d54500ca13f554073cc66a4a2d403ea5b14
|
[
"BSD-3-Clause"
] | null | null | null |
# Django
from django.conf import Settings, settings
# APPs
from systemtest.quality import forms as quality_forms, models as quality_models
from systemtest.utils.db2 import Database
def get_quality_status(status_name: str) -> quality_models.QualityStatus:
"""
Gets a specific QualityStatus by exact name
Args:
status_name:
Name of status to fetch
Raises:
DoesNotExist:
QualityStatus matching query does not exist
Returns:
QualityStatus object
"""
return quality_models.QualityStatus.objects.get(name=status_name)
def fetch_database() -> dict:
database = Database(**settings.DATABASES.get("db2"))
sql = database.get_sql(settings.QUALITY_SQL_PATH)
required_columns = {
"SYSTEM_NUMBER",
"WORKUNIT",
"OPERATION_STATUS"
}
optional_columns = {
"WORKUNIT_QTY",
"PRODUCT_LINE",
"OPERATION_NUMBER"
}
for row in database.fetch(sql):
columns = set(row.keys())
if (required_columns - columns):
continue
data = {column.lower(): row.get(column)
for column in (required_columns | optional_columns)}
yield data
| 23.442308
| 79
| 0.646432
| 134
| 1,219
| 5.708955
| 0.485075
| 0.05098
| 0.067974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002247
| 0.269893
| 1,219
| 51
| 80
| 23.901961
| 0.857303
| 0.185398
| 0
| 0
| 0
| 0
| 0.084926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.12
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b9c9c8690ed96b25a9028c69ebb2b7c65845147
| 1,849
|
py
|
Python
|
cibopath/scraper.py
|
hackebrot/cibopath
|
7b341cb92942a0ed70e21c9e5f23d281a625e30c
|
[
"BSD-3-Clause"
] | 11
|
2016-02-08T11:45:26.000Z
|
2017-05-19T16:07:31.000Z
|
cibopath/scraper.py
|
hackebrot/cibopath
|
7b341cb92942a0ed70e21c9e5f23d281a625e30c
|
[
"BSD-3-Clause"
] | 5
|
2016-02-11T22:11:54.000Z
|
2016-06-09T20:54:07.000Z
|
cibopath/scraper.py
|
hackebrot/cibopath
|
7b341cb92942a0ed70e21c9e5f23d281a625e30c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import asyncio
import logging
import aiohttp
from cibopath import readme_parser, github_api
from cibopath.templates import Template
logger = logging.getLogger('cibopath')
class CibopathError(Exception):
"""Custom error class for the app."""
class CookiecutterReadmeError(CibopathError):
"""Unable to retrieve readme from github.com/audreyr/cookiecutter."""
class UnableToFindTemplateLinks(CibopathError):
"""Cannot find links to templates in README."""
def fetch_template_data(username, token):
semaphore = asyncio.Semaphore(10)
loop = asyncio.get_event_loop()
auth = aiohttp.BasicAuth(username, token)
with aiohttp.ClientSession(loop=loop, auth=auth) as client:
logger.debug('Load Cookiecutter readme')
cookiecutter_readme = loop.run_until_complete(
github_api.get_readme(semaphore, client, 'audreyr', 'cookiecutter')
)
if not cookiecutter_readme:
raise CookiecutterReadmeError
logger.debug('Find GitHub links in Cookiecutter readme')
github_links, _ = readme_parser.read(cookiecutter_readme)
if not github_links:
raise UnableToFindTemplateLinks
tasks = [
github_api.get_template(semaphore, client, link)
for link in github_links
]
logger.debug('Fetch template data from links')
results = loop.run_until_complete(asyncio.gather(*tasks))
yield from filter(None, results) # Ignore all invalid templates
def load_templates(username, token):
templates = []
template_data = fetch_template_data(username, token)
for name, author, repo, context, readme in template_data:
_, tags = readme_parser.read(readme)
templates.append(Template(name, author, repo, context, sorted(tags)))
return templates
| 29.822581
| 79
| 0.70146
| 206
| 1,849
| 6.15534
| 0.38835
| 0.047319
| 0.040221
| 0.039432
| 0.047319
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002056
| 0.210925
| 1,849
| 61
| 80
| 30.311475
| 0.867032
| 0.102217
| 0
| 0
| 0
| 0
| 0.073646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.131579
| 0
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b9ce56039cc41fcf712d566d9141353c7327dc4
| 5,400
|
py
|
Python
|
using_force_sense_selector_switch/A-B_force_sense_switching/ForceSenseSwitchSample.py
|
sjdemartini/SpikeSafePythonSamples
|
60dc9cd175577e9601c0709ac471c72c5a666f1b
|
[
"MIT"
] | 4
|
2020-06-11T00:11:17.000Z
|
2022-03-17T22:58:13.000Z
|
using_force_sense_selector_switch/A-B_force_sense_switching/ForceSenseSwitchSample.py
|
sjdemartini/SpikeSafePythonSamples
|
60dc9cd175577e9601c0709ac471c72c5a666f1b
|
[
"MIT"
] | null | null | null |
using_force_sense_selector_switch/A-B_force_sense_switching/ForceSenseSwitchSample.py
|
sjdemartini/SpikeSafePythonSamples
|
60dc9cd175577e9601c0709ac471c72c5a666f1b
|
[
"MIT"
] | 2
|
2021-12-20T20:03:05.000Z
|
2022-01-12T18:51:54.000Z
|
# Goal:
# Demonstrate the A/B switch functionality of the SpikeSafe PSMU while operating in DC mode
#
# Expectation:
# Channel 1 will run in DC mode with the switch set to Primary.
# Afterward the Switch be set to Auxiliary mode, in which another source may operate connected to the SpikeSafe
# After the Auxiliary source has completed operation, the switch will be set to Primary to operate the SpikeSafe in DC mode again
import sys
import time
import logging
from spikesafe_python.MemoryTableReadData import log_memory_table_read
from spikesafe_python.ReadAllEvents import log_all_events
from spikesafe_python.TcpSocket import TcpSocket
from spikesafe_python.Threading import wait
from spikesafe_python.SpikeSafeError import SpikeSafeError
from tkinter import messagebox
### set these before starting application
# SpikeSafe IP address and port number
ip_address = '10.0.0.220'
port_number = 8282
### setting up sequence log
log = logging.getLogger(__name__)
logging.basicConfig(filename='SpikeSafePythonSamples.log',format='%(asctime)s, %(levelname)s, %(message)s',datefmt='%m/%d/%Y %I:%M:%S',level=logging.INFO)
### start of main program
try:
log.info("ForceSenseSwitchSample.py started.")
# instantiate new TcpSocket to connect to SpikeSafe
tcp_socket = TcpSocket()
tcp_socket.open_socket(ip_address, port_number)
# reset to default state
tcp_socket.send_scpi_command('*RST')
log_all_events(tcp_socket)
# check that the Force Sense Selector Switch is available for this SpikeSafe. We need the switch to run this sequence
# If switch related SCPI is sent and there is no switch configured, it will result in error "386, Output Switch is not installed"
tcp_socket.send_scpi_command('OUTP1:CONN:AVAIL?')
isSwitchAvailable = tcp_socket.read_data()
if isSwitchAvailable != 'Ch:1':
raise Exception('Force Sense Selector Switch is not available, and is necessary to run this sequence.')
# set the Force Sense Selector Switch state to Primary (A) so that the SpikeSafe can output to the DUT
# the default switch state can be manually adjusted using SCPI, so it is best to send this command even after sending a *RST
tcp_socket.send_scpi_command('OUTP1:CONN PRI')
# set Channel 1 settings to operate in DC mode
tcp_socket.send_scpi_command('SOUR1:FUNC:SHAP DC')
tcp_socket.send_scpi_command('SOUR1:CURR:PROT 50')
tcp_socket.send_scpi_command('SOUR1:CURR 0.1')
tcp_socket.send_scpi_command('SOUR1:VOLT 20')
# log all SpikeSafe event after settings are adjusted
log_all_events(tcp_socket)
# turn on Channel 1
tcp_socket.send_scpi_command('OUTP1 1')
# check for all events and measure readings on Channel 1 once per second for 10 seconds
time_end = time.time() + 10
while time.time() < time_end:
log_all_events(tcp_socket)
log_memory_table_read(tcp_socket)
wait(1)
# turn off Channel 1 and check for all events
# When operating in DC mode, the channel must be turned off before adjusting the switch state
tcp_socket.send_scpi_command('OUTP1 0')
log_all_events(tcp_socket)
# set the Force Sense Selector Switch state to Auxiliary (B) so that the Auxiliary Source will be routed to the DUT and the SpikeSafe will be disconnected
tcp_socket.send_scpi_command('OUTP1:CONN AUX')
# Show a message box so any tasks using the Auxiliary source may be performed before adjusting the switch back to Primary
# The SpikeSafe is not electrically connected to the DUT at this time
messagebox.showinfo("Auxiliary Source Active", "Force Sense Selector Switch is in Auxiliary (B) mode. Perform any tests using the auxiliary source, then close this window to adjust the switch back to Primary (A) mode.")
# set the Force Sense Selector Switch state to Primary (A) so that the SpikeSafe can output to the DUT
tcp_socket.send_scpi_command('OUTP1:CONN PRI')
# turn on Channel 1
tcp_socket.send_scpi_command('OUTP1 1')
# check for all events and measure readings on Channel 1 once per second for 10 seconds
time_end = time.time() + 10
while time.time() < time_end:
log_all_events(tcp_socket)
log_memory_table_read(tcp_socket)
wait(1)
# turn off Channel 1 and check for all events
tcp_socket.send_scpi_command('OUTP1 0')
log_all_events(tcp_socket)
# disconnect from SpikeSafe
tcp_socket.close_socket()
log.info("ForceSenseSwitchSample.py completed.\n")
except SpikeSafeError as ssErr:
# print any SpikeSafe-specific error to both the terminal and the log file, then exit the application
error_message = 'SpikeSafe error: {}\n'.format(ssErr)
log.error(error_message)
print(error_message)
sys.exit(1)
except Exception as err:
# print any general exception to both the terminal and the log file, then exit the application
error_message = 'Program error: {}\n'.format(err)
log.error(error_message)
print(error_message)
sys.exit(1)
| 46.551724
| 223
| 0.696111
| 766
| 5,400
| 4.776762
| 0.275457
| 0.061492
| 0.046187
| 0.060399
| 0.398743
| 0.353102
| 0.327685
| 0.29161
| 0.26182
| 0.26182
| 0
| 0.013936
| 0.242593
| 5,400
| 116
| 224
| 46.551724
| 0.880685
| 0.415556
| 0
| 0.433333
| 0
| 0.016667
| 0.204684
| 0.024382
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b9dada36fd7bad56b1a0092534a61252ce1c05e
| 2,474
|
py
|
Python
|
tripleoclient/tests/v1/overcloud_delete/test_overcloud_delete.py
|
mail2nsrajesh/python-tripleoclient
|
6646b2fc4a37b2a52c1cf7d7edb42c8007e905d8
|
[
"Apache-2.0"
] | null | null | null |
tripleoclient/tests/v1/overcloud_delete/test_overcloud_delete.py
|
mail2nsrajesh/python-tripleoclient
|
6646b2fc4a37b2a52c1cf7d7edb42c8007e905d8
|
[
"Apache-2.0"
] | null | null | null |
tripleoclient/tests/v1/overcloud_delete/test_overcloud_delete.py
|
mail2nsrajesh/python-tripleoclient
|
6646b2fc4a37b2a52c1cf7d7edb42c8007e905d8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from tripleoclient.tests.v1.overcloud_deploy import fakes
from tripleoclient.v1 import overcloud_delete
class TestDeleteOvercloud(fakes.TestDeployOvercloud):
def setUp(self):
super(TestDeleteOvercloud, self).setUp()
self.cmd = overcloud_delete.DeleteOvercloud(self.app, None)
self.app.client_manager.workflow_engine = mock.Mock()
self.workflow = self.app.client_manager.workflow_engine
@mock.patch(
'tripleoclient.workflows.stack_management.delete_stack', autospec=True)
def test_stack_delete(self, mock_delete_stack):
clients = self.app.client_manager
orchestration_client = clients.orchestration
stack = mock.Mock()
stack.id = 12345
orchestration_client.stacks.get.return_value = stack
self.cmd._stack_delete(clients, 'overcloud')
orchestration_client.stacks.get.assert_called_once_with('overcloud')
mock_delete_stack.assert_called_once_with(
clients, stack=12345)
def test_stack_delete_no_stack(self):
clients = self.app.client_manager
orchestration_client = clients.orchestration
type(orchestration_client.stacks.get).return_value = None
self.cmd.log.warning = mock.MagicMock()
self.cmd._stack_delete(clients, 'overcloud')
orchestration_client.stacks.get.assert_called_once_with('overcloud')
self.cmd.log.warning.assert_called_once_with(
"No stack found ('overcloud'), skipping delete")
@mock.patch(
'tripleoclient.workflows.plan_management.delete_deployment_plan',
autospec=True)
def test_plan_delete(self, delete_deployment_plan_mock):
self.cmd._plan_delete(self.workflow, 'overcloud')
delete_deployment_plan_mock.assert_called_once_with(
self.workflow,
container='overcloud')
| 36.382353
| 79
| 0.719887
| 305
| 2,474
| 5.639344
| 0.383607
| 0.034884
| 0.046512
| 0.05814
| 0.272093
| 0.272093
| 0.226744
| 0.182558
| 0.182558
| 0.105814
| 0
| 0.010076
| 0.197656
| 2,474
| 67
| 80
| 36.925373
| 0.856423
| 0.229992
| 0
| 0.263158
| 0
| 0
| 0.113347
| 0.060911
| 0
| 0
| 0
| 0
| 0.131579
| 1
| 0.105263
| false
| 0
| 0.078947
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b9f976e658245e57765789e6e80ca7112711034
| 8,621
|
py
|
Python
|
bird_view/models/agent_IAs_RL.py
|
magh24/carla_RL_IAs
|
a38fb353bd84330c6c20b9cc8e824d7bbb02cfe5
|
[
"MIT"
] | 39
|
2020-03-17T10:12:49.000Z
|
2022-03-12T14:18:45.000Z
|
bird_view/models/agent_IAs_RL.py
|
marintoro/LearningByCheating
|
a13b331ee8d69071570c97b35f1348758d658ee5
|
[
"MIT"
] | null | null | null |
bird_view/models/agent_IAs_RL.py
|
marintoro/LearningByCheating
|
a13b331ee8d69071570c97b35f1348758d658ee5
|
[
"MIT"
] | 16
|
2020-06-11T20:15:57.000Z
|
2022-03-13T01:55:16.000Z
|
import numpy as np
import torch
from collections import deque, namedtuple
import cv2
import os
import carla
from .model_supervised import Model_Segmentation_Traffic_Light_Supervised
from .model_RL import DQN, Orders
class AgentIAsRL:
def __init__(self, args=None, **kwargs):
super().__init__(**kwargs)
self.args = args
path_to_folder_with_model = args.path_folder_model
path_to_model_supervised = os.path.join(path_to_folder_with_model, "model_supervised/")
path_model_supervised = None
for file in os.listdir(path_to_model_supervised):
if ".pth" in file:
if path_model_supervised is not None:
raise ValueError(
"There is multiple model supervised in folder " +
path_to_model_supervised +
" you must keep only one!",
)
path_model_supervised = os.path.join(path_to_model_supervised, file)
if path_model_supervised is None:
raise ValueError("We didn't find any model supervised in folder " +
path_to_model_supervised)
# All this magic number should match the one used when training supervised...
model_supervised = Model_Segmentation_Traffic_Light_Supervised(
len(args.steps_image), len(args.steps_image), 1024, 6, 4, args.crop_sky
)
model_supervised.load_state_dict(
torch.load(path_model_supervised, map_location=args.device)
)
model_supervised.to(device=args.device)
self.encoder = model_supervised.encoder
self.last_conv_downsample = model_supervised.last_conv_downsample
self.action_space = (args.nb_action_throttle + 1) * args.nb_action_steering
path_to_model_RL = os.path.join(path_to_folder_with_model, "model_RL")
os.chdir(path_to_model_RL)
tab_model = []
for file in os.listdir(path_to_model_RL):
if ".pth" in file:
tab_model.append(os.path.join(path_to_model_RL, file))
if len(tab_model) == 0:
raise ValueError("We didn't find any RL model in folder "+ path_to_model_RL)
self.tab_RL_model = []
for current_model in tab_model:
current_RL_model = DQN(args, self.action_space).to(device=args.device)
current_RL_model_dict = current_RL_model.state_dict()
print("we load RL model ", current_model)
checkpoint = torch.load(current_model)
# 1. filter out unnecessary keys
pretrained_dict = {
k: v
for k, v in checkpoint["model_state_dict"].items()
if k in current_RL_model_dict
}
# 2. overwrite entries in the existing state dict
current_RL_model_dict.update(pretrained_dict)
# 3. load the new state dict
current_RL_model.load_state_dict(current_RL_model_dict)
self.tab_RL_model.append(current_RL_model)
self.window = (
max([abs(number) for number in args.steps_image]) + 1
) # Number of frames to concatenate
self.RGB_image_buffer = deque([], maxlen=self.window)
self.device = args.device
self.state_buffer = deque([], maxlen=self.window)
self.State = namedtuple("State", ("image", "speed", "order", "steering"))
if args.crop_sky:
blank_state = self.State(
np.zeros(6144, dtype=np.float32), -1, -1, 0
) # RGB Image, color channet first for torch
else:
blank_state = self.State(np.zeros(8192, dtype=np.float32), -1, -1, 0)
for _ in range(self.window):
self.state_buffer.append(blank_state)
if args.crop_sky:
self.RGB_image_buffer.append(
np.zeros((3, args.front_camera_height - 120, args.front_camera_width))
)
else:
self.RGB_image_buffer.append(
np.zeros((3, args.front_camera_height, args.front_camera_width))
)
self.last_steering = 0
self.last_order = 0
self.current_timestep = 0
def act(self, state_buffer, RL_model):
speeds = []
order = state_buffer[-1].order
steerings = []
for step_image in self.args.steps_image:
state = state_buffer[step_image + self.window - 1]
speeds.append(state.speed)
steerings.append(state.steering)
images = torch.from_numpy(state_buffer[-1].image).to(self.device, dtype=torch.float32)
speeds = torch.from_numpy(np.stack(speeds).astype(np.float32)).to(
self.device, dtype=torch.float32
)
steerings = torch.from_numpy(np.stack(steerings).astype(np.float32)).to(
self.device, dtype=torch.float32
)
with torch.no_grad():
quantile_values, _ = RL_model(
images.unsqueeze(0),
speeds.unsqueeze(0),
order,
steerings.unsqueeze(0),
self.args.num_quantile_samples,
)
return quantile_values.mean(0).argmax(0).item()
# We had different mapping int/order in our training than in the CARLA benchmark,
# so we need to remap orders
def adapt_order(self, incoming_obs_command):
if incoming_obs_command == 1: # LEFT
return Orders.Left.value
if incoming_obs_command == 2: # RIGHT
return Orders.Right.value
if incoming_obs_command == 3: # STRAIGHT
return Orders.Straight.value
if incoming_obs_command == 4: # FOLLOW_LANE
return Orders.Follow_Lane.value
def run_step(self, observations):
self.current_timestep += 1
rgb = observations["rgb"].copy()
if self.args.crop_sky:
rgb = np.array(rgb)[120:, :, :]
else:
rgb = np.array(rgb)
if self.args.render:
bgr = rgb[:, :, ::-1]
cv2.imshow("network input", bgr)
cv2.waitKey(1)
rgb = np.rollaxis(rgb, 2, 0)
self.RGB_image_buffer.append(rgb)
speed = np.linalg.norm(observations["velocity"])
order = self.adapt_order(int(observations["command"]))
if self.last_order != order:
print("order = ", Orders(order).name)
self.last_order = order
np_array_RGB_input = np.concatenate(
[
self.RGB_image_buffer[indice_image + self.window - 1]
for indice_image in self.args.steps_image
]
)
torch_tensor_input = (
torch.from_numpy(np_array_RGB_input)
.to(dtype=torch.float32, device=self.device)
.div_(255)
.unsqueeze(0)
)
with torch.no_grad():
current_encoding = self.encoder(torch_tensor_input)
current_encoding = self.last_conv_downsample(current_encoding)
current_encoding_np = current_encoding.cpu().numpy().flatten()
current_state = self.State(current_encoding_np, speed, order, self.last_steering)
self.state_buffer.append(current_state)
tab_action = []
for RL_model in self.tab_RL_model:
current_action = self.act(self.state_buffer, RL_model)
tab_action.append(current_action)
steer = 0
throttle = 0
brake = 0
for action in tab_action:
steer += (
(action % self.args.nb_action_steering) - int(self.args.nb_action_steering / 2)
) * (self.args.max_steering / int(self.args.nb_action_steering / 2))
if action < int(self.args.nb_action_steering * self.args.nb_action_throttle):
throttle += (int(action / self.args.nb_action_steering)) * (
self.args.max_throttle / (self.args.nb_action_throttle - 1)
)
brake += 0
else:
throttle += 0
brake += 1.0
steer = steer / len(tab_action)
throttle = throttle / len(tab_action)
if brake < len(tab_action) / 2:
brake = 0
else:
brake = brake / len(tab_action)
control = carla.VehicleControl()
control.steer = np.clip(steer, -1.0, 1.0)
control.throttle = np.clip(throttle, 0.0, 1.0)
control.brake = np.clip(brake, 0.0, 1.0)
control.manual_gear_shift = False
self.last_steering = steer
return control
| 37.482609
| 95
| 0.594363
| 1,057
| 8,621
| 4.598865
| 0.197729
| 0.05863
| 0.022629
| 0.023041
| 0.311047
| 0.225057
| 0.134129
| 0.099568
| 0.054721
| 0.021806
| 0
| 0.01723
| 0.313305
| 8,621
| 229
| 96
| 37.646288
| 0.803885
| 0.045586
| 0
| 0.091398
| 0
| 0
| 0.034814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021505
| false
| 0
| 0.043011
| 0
| 0.102151
| 0.010753
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b9fd98a85b6fed6891c0ba799c31065628711f4
| 10,547
|
py
|
Python
|
bin_testing/diff_fuzzing.py
|
KristianMika/PA193-Bech32m
|
6625c3883dd4ee4db40afc0b9eae1c945544a87b
|
[
"MIT"
] | null | null | null |
bin_testing/diff_fuzzing.py
|
KristianMika/PA193-Bech32m
|
6625c3883dd4ee4db40afc0b9eae1c945544a87b
|
[
"MIT"
] | null | null | null |
bin_testing/diff_fuzzing.py
|
KristianMika/PA193-Bech32m
|
6625c3883dd4ee4db40afc0b9eae1c945544a87b
|
[
"MIT"
] | null | null | null |
import base64
import binascii
import datetime
import os
import subprocess
import random
import sys
BECH_SYMBOLS = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
OUR_BINARY = None
LIBBECH32ENC_BINARY = None
LIBBECH32DEC_BINARY = None
NODE_REF = "node . "
# region Encoding
def node_encode(hrp, data_hex):
str_in = NODE_REF + f"encode {hrp} {data_hex}"
proc = subprocess.Popen(str_in.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("*******")
print("Node error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"HRP: {hrp}\n")
f.write(f"HEX: {data_hex}\n")
f.write("Node error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip(), err != ''
def external_encode(hrp, bech):
indexes = get_indexes(bech)
indexes_str = indexes_to_string(indexes)
str_in = f"{LIBBECH32ENC_BINARY} {hrp} {indexes_str}"
proc = subprocess.Popen(str_in.split(' '), stdout=subprocess.PIPE)
proc.wait()
return proc.stdout.read().decode(encoding='ASCII').strip()
def hex_encode(hrp, data_hex):
proc = subprocess.Popen(f"{OUR_BINARY} --input-text {data_hex} --input-format hex --hrp {hrp}".split(' '),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("******* ENCODE ERROR *******")
print("HRP: " + hrp)
print("HEX: " + data_hex)
print("OUR error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"HRP: {hrp}\n")
f.write(f"HEX: {data_hex}\n")
f.write("OUR error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip()
def base64_encode(hrp, data_base64, do_trim=True):
proc = subprocess.Popen(
f"{OUR_BINARY} --input-text {data_base64} --input-format base64 --hrp {hrp}{' --trim' if do_trim else ''}"
.split(' '),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("******* ENCODE ERROR *******")
print("HRP: " + hrp)
print("B64: " + data_base64)
print("OUR error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"HRP: {hrp}\n")
f.write(f"B64: {data_base64}\n")
f.write("OUR error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip()
def bin_encode(hrp, data_hex):
try:
with open('b.bin', 'wb') as f:
f.write(binascii.unhexlify(data_hex))
proc = subprocess.Popen(f"{OUR_BINARY} --input-file b.bin --input-format bin --hrp {hrp}".split(' '),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("******* ENCODE ERROR *******")
print("HRP: " + hrp)
print("B64: " + data_hex + "(as binary)")
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"HRP: {hrp}\n")
f.write(f"BIN: {data_hex} (as binary)\n")
f.write("OUR error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip()
finally:
os.remove('b.bin')
# endregion
# region Decoding
def node_decode(code):
str_in = NODE_REF + f"decode {code}"
proc = subprocess.Popen(str_in.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("*******")
print("Node error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"CODE: {code}\n")
f.write("Node error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip(), err != ''
def external_decode(code):
str_in = f"{LIBBECH32DEC_BINARY} {code}"
proc = subprocess.Popen(str_in.split(' '), stdout=subprocess.PIPE)
proc.wait()
return proc.stdout.read().decode(encoding='ASCII').strip()
def hex_decode(code):
proc = subprocess.Popen(
f"{OUR_BINARY} --decode --input-text {code} --output-format hex --allow-empty-hrp --trim".split(' '),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
err = proc.stderr.read().decode(encoding='ASCII').strip()
if err != '':
print("******* DECODE ERROR *******")
print("CODE: " + code)
print("OUR error:\n" + err)
with open("fuzzing_results.txt", "a") as f:
f.write("*******\n")
f.write(f"CODE: {code}\n")
f.write("OUR error:\n" + err + "\n")
f.write("*******\n")
return proc.stdout.read().decode(encoding='ASCII').strip()
# endregion
def generate_hrp():
chars = [chr(x) for x in range(33, 127)]
length = random.randint(1, 81)
ret = "".join(random.choice(chars) for _ in range(length)).lower()
while ret[0] == '-' and len(ret) > 1:
ret = ret[1:]
if ret[0] == '-':
ret = 'a'
return ret.replace("'", "").replace('"', '')
def generate_hex(hrp):
max_len = 83 - len(hrp)
chars = "0123456789abcdef"
length = random.randint(2, max_len)
if length % 2 == 1:
length -= 1
return "".join(random.choice(chars) for _ in range(length))
def to_base64(hex_str):
return base64.b64encode(base64.b16decode(hex_str)).decode(encoding='utf-8')
# Adapted from
# https://stackoverflow.com/questions/1425493/convert-hex-to-binary
def to_bin(hex_code):
return bin(int(hex_code, 16))[2:]
def extract_bech(code):
return code[code.rfind('1') + 1:-6]
def get_indexes(s):
return [BECH_SYMBOLS.index(c) for c in s]
def indexes_to_string(indexes):
return " ".join(str(i) for i in indexes)
def process(hrp, hex_str, base64_str):
success = True
try:
our_res = hex_encode(hrp, hex_str)
our_res_64 = base64_encode(hrp, base64_str, do_trim=False)
our_res_64_trim = base64_encode(hrp, base64_str)
our_res_bin = bin_encode(hrp, hex_str)
node_res, node_enc_err = node_encode(hrp, hex_str)
extract_our = extract_bech(our_res)
external_res = external_encode(hrp, extract_our)
dec_our = hex_decode(our_res)
dec_ext = external_decode(our_res)
_node_dec = node_decode(our_res)
hrp, dec_node = _node_dec[0].split(' ')
node_dec_err = _node_dec[1]
at_least_one_equal = our_res_64 == our_res or our_res_64_trim == our_res
if our_res_bin != our_res or \
not at_least_one_equal or \
our_res != external_res or \
(our_res != node_res and not node_enc_err):
success = False
print("ERROR: Our ENCODED result does not match reference result:")
print(f"HRP: {hrp}")
print(f"HEX: {hex_str}")
print(f"B64: {base64_str}")
print(f"BIN: {to_bin(hex_str)}")
print(f" Our result:\t\t{our_res}")
print(f" Our result B64:\t{our_res_64}")
print(f" Our result B64 T:\t{our_res_64_trim}")
print(f" Our result BIN:\t{our_res_bin}")
print(f" External result:\t{external_res}")
print(f" Node result:\t\t{node_res}")
with open("fuzzing_results.txt", "a") as f:
f.write("ERROR: Our ENCODED result does not match reference result:\n")
f.write(f"HRP: {hrp}\n")
f.write(f"HEX: {hex_str}\n")
f.write(f"B64: {base64_str}\n")
f.write(f"BIN: {to_bin(hex_str)}\n")
f.write(f" Our result:\t\t{our_res}\n")
f.write(f" Our result B64:\t{our_res_64}\n")
f.write(f" Our result B64 T:\t{our_res_64}\n")
f.write(f" Our result BIN:\t{our_res_bin}\n")
f.write(f" External result:\t{external_res}\n")
f.write(f" Node result:\t\t{node_res}\n")
f.write("\n")
if dec_ext not in extract_our or (dec_our != dec_node and not node_dec_err):
success = False
print("ERROR: Our DECODED result does not match node result:")
print(f" Our result:\t\t{dec_our}")
if dec_ext not in extract_our:
print(f" External result:\t{dec_ext} not in {extract_our}")
print(f" Node result:\t\t{dec_node}")
with open("fuzzing_results.txt", "a") as f:
f.write("ERROR: Our DECODED result does not match node result:\n")
f.write(f" Our result:\t\t{dec_our}\n")
if dec_ext not in extract_our:
f.write(f" External result: {dec_ext} not in {extract_our}\n")
f.write(f" Node result:\t\t{dec_node}\n")
f.write("\n")
except Exception as e:
success = False
print(e)
with open("fuzzing_results.txt", "a") as f:
f.write(f"{hrp}\n")
f.write(f"{e}\n")
f.write("\n")
return success
if __name__ == '__main__':
OUR_BINARY = sys.argv[1]
LIBBECH32ENC_BINARY = sys.argv[2]
LIBBECH32DEC_BINARY = sys.argv[3]
FUZZ_ITERATIONS = int(sys.argv[4])
FUZZ_SECONDS = int(sys.argv[5])
_hrp = 'v)zeod9[qg.ns)+}r}'
_hex_str = '857e'
_b64_str = to_base64(_hex_str.upper())
process('a', 'ff', to_base64('FF'))
fail_count = 0
start_time = datetime.datetime.now()
for _ in range(0, FUZZ_ITERATIONS):
if not process(_hrp, _hex_str, _b64_str): fail_count += 1
_hrp = generate_hrp()
_hex_str = generate_hex(_hrp)
_b64_str = to_base64(_hex_str.upper())
end_time = datetime.datetime.now()
if (end_time - start_time).seconds >= FUZZ_SECONDS:
print(f'Fuzzing stopped after {FUZZ_SECONDS} seconds')
break
print("DONE")
sys.exit(fail_count)
| 35.156667
| 114
| 0.561297
| 1,446
| 10,547
| 3.928077
| 0.123098
| 0.051761
| 0.046831
| 0.032394
| 0.585739
| 0.553873
| 0.516373
| 0.485035
| 0.426585
| 0.369366
| 0
| 0.02085
| 0.267849
| 10,547
| 299
| 115
| 35.274247
| 0.714711
| 0.012326
| 0
| 0.409091
| 0
| 0.008264
| 0.231316
| 0.028434
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066116
| false
| 0
| 0.028926
| 0.020661
| 0.161157
| 0.14876
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ba085171ad82d0c573dcc7bfc7f5421e63a5a9f
| 3,166
|
py
|
Python
|
ldt/utils/usaf/bcsd_preproc/forecast_task_07.py
|
andrewsoong/LISF
|
20e3b00a72b6b348c567d0703550f290881679b4
|
[
"Apache-2.0"
] | 67
|
2018-11-13T21:40:54.000Z
|
2022-02-23T08:11:56.000Z
|
ldt/utils/usaf/bcsd_preproc/forecast_task_07.py
|
andrewsoong/LISF
|
20e3b00a72b6b348c567d0703550f290881679b4
|
[
"Apache-2.0"
] | 679
|
2018-11-13T20:10:29.000Z
|
2022-03-30T19:55:25.000Z
|
ldt/utils/usaf/bcsd_preproc/forecast_task_07.py
|
andrewsoong/LISF
|
20e3b00a72b6b348c567d0703550f290881679b4
|
[
"Apache-2.0"
] | 119
|
2018-11-08T15:53:35.000Z
|
2022-03-28T10:16:01.000Z
|
#!/usr/bin/env python3
"""
#------------------------------------------------------------------------------
#
# SCRIPT: forecast_task_07.py
#
# PURPOSE: Combine all non-precip 6-hourly files into one file and copy BCSD
# precip files in to the same directory Based on FORECAST_TASK_07.sh.
#
# REVISION HISTORY:
# 24 Oct 2021: Ryan Zamora, first version
#
#------------------------------------------------------------------------------
"""
#
# Standard modules
#
import configparser
import os
import subprocess
import sys
#
# Local methods
#
def _usage():
"""Print command line usage."""
txt = f"[INFO] Usage: {(sys.argv[0])} current_year month_abbr config_file"
print(txt)
print("[INFO] where")
print("[INFO] current_year: Current year")
print("[INFO] month_abbr: Current month")
print("[INFO] config_file: Config file that sets up environment")
def _read_cmd_args():
"""Read command line arguments."""
if len(sys.argv) != 4:
print("[ERR] Invalid number of command line arguments!")
_usage()
sys.exit(1)
# current_year
try:
current_year = int(sys.argv[1])
except ValueError:
print(f"[ERR] Invalid argument for current_year! Received {(sys.argv[1])}")
_usage()
sys.exit(1)
if current_year < 0:
print(f"[ERR] Invalid argument for current_year! Received {(sys.argv[1])}")
_usage()
sys.exit(1)
# month_abbr
month_abbr = str(sys.argv[2])
# config_file
config_file = sys.argv[3]
if not os.path.exists(config_file):
print(f"[ERR] {config_file} does not exist!")
sys.exit(1)
return current_year, month_abbr, config_file
def read_config(config_file):
"""Read from bcsd_preproc config file."""
config = configparser.ConfigParser()
config.read(config_file)
return config
def _driver():
"""Main driver."""
current_year, month_abbr, config_file = _read_cmd_args()
# Setup local directories
config = read_config(config_file)
# Path of the main project directory
projdir = config["bcsd_preproc"]["projdir"]
# Number of precip ensembles needed
range_ens_fcst=list(range(1, 13)) + list(range(1,13)) + list(range(1,7))
range_ens_nmme=range(1,31)
fcst_date = f"{month_abbr}01"
# Path for where forecast files are located:
indir=f"{projdir}/data/forecast/CFSv2_25km/raw/6-Hourly/{fcst_date}/{current_year}"
# Path for where the linked precip files should be placed:
outdir=f"{projdir}/data/forecast/NMME/linked_cfsv2_precip_files/{fcst_date}/{current_year}"
if not os.path.exists(outdir):
os.makedirs(outdir)
for iens, ens_value in enumerate(range_ens_fcst):
src_file=f"{indir}/ens{ens_value}"
dst_file=f"{outdir}/ens{range_ens_nmme[iens]}"
cmd = f"ln -sfn {src_file} {dst_file}"
print(cmd)
returncode = subprocess.call(cmd, shell=True)
if returncode != 0:
print("[ERR] Problem calling creating symbolic links!")
sys.exit(1)
print("[INFO] Done creating symbolic links")
#
# Main Method
#
if __name__ == "__main__":
_driver()
| 26.830508
| 95
| 0.622236
| 419
| 3,166
| 4.515513
| 0.357995
| 0.06871
| 0.021142
| 0.031712
| 0.154334
| 0.136364
| 0.088795
| 0.070825
| 0.070825
| 0.070825
| 0
| 0.017537
| 0.207517
| 3,166
| 117
| 96
| 27.059829
| 0.736548
| 0.253948
| 0
| 0.169492
| 0
| 0
| 0.334053
| 0.091066
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.067797
| 0
| 0.169492
| 0.20339
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ba27d2ca0843358d969fed10afe5cbbd1851036
| 12,178
|
py
|
Python
|
model/modules/capsules.py
|
lidq92/pytorch-dynamic-routing-between-capsules
|
4388cd36193348cbb10035008360330e67acdd41
|
[
"MIT"
] | 10
|
2018-09-17T02:14:34.000Z
|
2021-06-17T12:16:35.000Z
|
model/modules/capsules.py
|
lidq92/pytorch-dynamic-routing-between-capsules
|
4388cd36193348cbb10035008360330e67acdd41
|
[
"MIT"
] | null | null | null |
model/modules/capsules.py
|
lidq92/pytorch-dynamic-routing-between-capsules
|
4388cd36193348cbb10035008360330e67acdd41
|
[
"MIT"
] | 2
|
2019-08-06T20:40:02.000Z
|
2020-01-02T08:24:39.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Normal
def squash(s, dim=-1, eps=1e-8):
"""
"Squashing" non-linearity that shrunks short vectors to almost zero
length and long vectors to a length slightly below 1
v_j = ||s_j||^2 / (1 + ||s_j||^2) * s_j / ||s_j||
Args:
s: Vector before activation
dim: Dimension along which to calculate the norm
Returns:
v: Squashed vector
"""
squared_norm = torch.sum(s**2, dim=dim, keepdim=True)
v = squared_norm / (1 + squared_norm) * \
s / (torch.sqrt(squared_norm) + eps)
return v
class PrimaryCapsules(nn.Module):
def __init__(self, in_channels, out_channels,
dim_caps, kernel_size=9, stride=2):
"""
Primary Capsules Layer
NIPS 2017
Args:
in_channels: Number of input channels
out_channels: Number of output channels
dim_caps: length of the output capsule vector
"""
super(PrimaryCapsules, self).__init__()
self.dim_caps = dim_caps
self._caps_channel = int(out_channels / dim_caps)
assert self._caps_channel * dim_caps == out_channels #
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.conv(x)
out = out.view(out.size(0), self._caps_channel,
out.size(2), out.size(3), self.dim_caps) #
out = out.view(out.size(0), -1, self.dim_caps) #
return squash(out)
class RoutingCapsules(nn.Module):
def __init__(self, in_dim, in_caps, num_caps, dim_caps,
num_routing=3, use_cuda=True):
"""
Routing Capsules Layer
NIPS 2017
Args:
in_dim: length of input capsule vector
in_caps: Number of input capsules if digits layer
num_caps: Number of capsules in the capsule layer
dim_caps: length of the output capsule vector
num_routing: Number of iterations during routing algorithm
"""
super(RoutingCapsules, self).__init__()
self.use_cuda = use_cuda
self.in_dim = in_dim
self.in_caps = in_caps
self.num_caps = num_caps
self.dim_caps = dim_caps
self.num_routing = num_routing
self.W = nn.Parameter(0.01 * torch.randn(1, num_caps, in_caps,
dim_caps, in_dim ))
def __repr__(self):
"""
"""
tab = ' '
line = '\n'
next = ' -> '
res = self.__class__.__name__ + '('
res = res + line + tab + '(' + str(0) + '): ' + 'CapsuleLinear('
res = res + str(self.in_dim) + ', ' + str(self.dim_caps) + ')'
res = res + line + tab + '(' + str(1) + '): ' + 'Routing('
res = res + 'num_routing=' + str(self.num_routing) + ')'
res = res + line + ')'
return res
def forward(self, x):
batch_size = x.size(0)
# (batch_size, in_caps, in_dim) -> (batch_size, 1, in_caps, in_dim, 1)
x = x.unsqueeze(3).unsqueeze(1)
#
# W @ x =
# (1, num_caps, in_caps, dim_caps, in_dim)
# @
# (batch_size, 1, in_caps, in_dim, 1)
# =
# (batch_size, num_caps, in_caps, dim_caps, 1)
u_hat = torch.matmul(self.W, x)
# (batch_size, num_caps, in_caps, dim_caps)
u_hat = u_hat.squeeze(-1)
'''
detach u_hat during routing iterations
to prevent gradients from flowing, i.e.,
- In forward pass, u_hat_detached = u_hat;
- In backward, no gradient can flow from u_hat_detached back to x_hat.
'''
u_hat_detached = u_hat.detach()
# Routing algorithm
b = Variable(torch.zeros(batch_size, self.num_caps, self.in_caps, 1))
if self.use_cuda:
b = b.cuda()
for route_iter in range(self.num_routing-1):
# (batch_size, num_caps, in_caps, 1) -> Softmax along num_caps
c = F.softmax(b, dim=1)
# element-wise multiplication
# (batch_size, num_caps, in_caps, 1)
# *
# (batch_size, in_caps, num_caps, dim_caps)
# -> (batch_size, num_caps, in_caps, dim_caps)
# sum across in_caps ->
# (batch_size, num_caps, dim_caps)
s = (c * u_hat_detached).sum(dim=2)
# apply "squashing" non-linearity along dim_caps
v = squash(s)
# dot product agreement
# between the current output vj and the prediction uj|i
# (batch_size, num_caps, in_caps, dim_caps)
# @
# (batch_size, num_caps, dim_caps, 1)
# -> (batch_size, num_caps, in_caps, 1)
uv = torch.matmul(u_hat_detached, v.unsqueeze(-1))
b += uv # Note: it seems more appropriate here to use b = uv
'''
last iteration is done on the original u_hat, without the routing
weights update
use u_hat to compute v in order to backpropagate gradient
'''
c = F.softmax(b, dim=1)
s = (c * u_hat).sum(dim=2)
v = squash(s)
return v
class PrimaryCaps(nn.Module):
def __init__(self, A=32, B=32):
"""
Primary Capsule Layer
ICLR 2018
Args:
A: input channel
B: number of types of capsules.
"""
super(PrimaryCaps, self).__init__()
self.B = B
self.capsules_pose = nn.ModuleList([nn.Conv2d(in_channels=A,
out_channels=4 * 4,
kernel_size=1, stride=1)
for _ in range(self.B)])
self.capsules_activation = nn.ModuleList([nn.Conv2d(in_channels=A,
out_channels=1,
kernel_size=1, stride=1)
for _ in range(self.B)])
def forward(self, x):
poses = [self.capsules_pose[i](x) for i in range(self.B)]
poses = torch.cat(poses, dim=1)
activations = [self.capsules_activation[i](x) for i in range(self.B)]
activations = F.sigmoid(torch.cat(activations, dim=1))
return poses, activations
class ConvCaps(nn.Module):
def __init__(self, B=32, C=32, K=3, stride=2, iteration=3,
coordinate_add=False, transform_share=False,
routing='EM_routing', use_cuda=True):
"""
Convolutional Capsule Layer
ICLR 2018
Args:
B: input number of types of capsules.
C: output number of types of capsules.
K: kernel size of convolution. K = 0 means the capsules in layer L+1's receptive field contain all capsules in layer L, which is used in the final ClassCaps layer.
stride: stride of convolution
iteration: number of EM iterations
coordinate_add: whether to use Coordinate Addition
transform_share: whether to share transformation matrix.
routing: 'EM_routing' or 'angle_routing'
"""
super(ConvCaps, self).__init__()
self.routing = routing
self.use_cuda = use_cuda
self.B = B
self.C = C
self.K = K # K = 0 means full receptive field like class capsules
self.Bkk = None
self.Cww = None
self.b = None # batch_size, get it in forword process
self.stride = stride
self.coordinate_add = coordinate_add
# transform_share is also set to True if K = 0
self.transform_share = transform_share or K == 0
self.beta_v = None
self.beta_a = None
if not transform_share:
self.W = nn.Parameter(torch.randn(B, K, K, C, 4, 4))
else:
self.W = nn.Parameter(torch.randn(B, C, 4, 4))
self.iteration = iteration
def coordinate_addition(self, width_in, votes):
add = [[i / width_in, j / width_in] for i in range(width_in) for j in range(width_in)] # K,K,w,w
add = Variable(torch.Tensor(add))
if self.use_cuda:
add = add.cuda()
add = add.view(1, 1, self.K, self.K, 1, 1, 1, 2)
add = add.expand(self.b, self.B, self.K, self.K, self.C, 1, 1, 2).contiguous()
votes[:, :, :, :, :, :, :, :2, -1] = votes[:, :, :, :, :, :, :, :2, -1] + add
return votes
def down_w(self, w):
return range(w * self.stride, w * self.stride + self.K)
def EM_routing(self, lambda_, a_, V):
# routing coefficient
R = Variable(torch.ones([self.b, self.Bkk, self.Cww]), requires_grad=False)
if self.use_cuda:
R = R.cuda()
R /= self.Cww
for i in range(self.iteration):
# M-step
R = (R * a_)[..., None]
sum_R = R.sum(1)
mu = ((R * V).sum(1) / sum_R)[:, None, :, :]
sigma_square = (R * (V - mu) ** 2).sum(1) / sum_R
# E-step
if i != self.iteration - 1:
mu, sigma_square, V_, a__ = mu.data, sigma_square.data, V.data, a_.data
normal = Normal(mu, sigma_square[:, None, :, :] ** (1 / 2))
p = torch.exp(normal.log_prob(V_))
ap = a__ * p.sum(-1)
R = Variable(ap / torch.sum(ap, -1)[..., None], requires_grad=False)
else:
const = (self.beta_v.expand_as(sigma_square) + torch.log(sigma_square)) * sum_R
a = torch.sigmoid(lambda_ * (self.beta_a.repeat(self.b, 1) - const.sum(2)))
return a, mu
def angle_routing(self, lambda_, a_, V):
# routing coefficient
R = Variable(torch.zeros([self.b, self.Bkk, self.Cww]), requires_grad=False)
if self.use_cuda:
R = R.cuda()
for i in range(self.iteration):
R = F.softmax(R, dim=1)
R = (R * a_)[..., None]
sum_R = R.sum(1)
mu = ((R * V).sum(1) / sum_R)[:, None, :, :]
if i != self.iteration - 1:
u_v = mu.permute(0, 2, 1, 3) @ V.permute(0, 2, 3, 1)
u_v = u_v.squeeze().permute(0, 2, 1) / V.norm(2, -1) / mu.norm(2, -1)
R = R.squeeze() + u_v
else:
sigma_square = (R * (V - mu) ** 2).sum(1) / sum_R
const = (self.beta_v.expand_as(sigma_square) + torch.log(sigma_square)) * sum_R
a = torch.sigmoid(lambda_ * (self.beta_a.repeat(self.b, 1) - const.sum(2)))
return a, mu
def forward(self, x, lambda_):
poses, activations = x
width_in = poses.size(2)
w = int((width_in - self.K) / self.stride + 1) if self.K else 1 # 5
self.Cww = w * w * self.C
self.b = poses.size(0) #
if self.beta_v is None:
if self.use_cuda:
self.beta_v = nn.Parameter(torch.randn(1, self.Cww, 1)).cuda()
self.beta_a = nn.Parameter(torch.randn(1, self.Cww)).cuda()
else:
self.beta_v = nn.Parameter(torch.randn(1, self.Cww, 1))
self.beta_a = nn.Parameter(torch.randn(1, self.Cww))
if self.transform_share:
if self.K == 0:
self.K = width_in # class Capsules' kernel = width_in
W = self.W.view(self.B, 1, 1, self.C, 4, 4).expand(self.B, self.K, self.K, self.C, 4, 4).contiguous()
else:
W = self.W # B,K,K,C,4,4
self.Bkk = self.K * self.K * self.B
# used to store every capsule i's poses in each capsule c's receptive field
pose = poses.contiguous() # b,16*32,12,12
pose = pose.view(self.b, 16, self.B, width_in, width_in).permute(0, 2, 3, 4, 1).contiguous() # b,B,12,12,16
poses = torch.stack([pose[:, :, self.stride * i:self.stride * i + self.K,
self.stride * j:self.stride * j + self.K, :] for i in range(w) for j in range(w)],
dim=-1) # b,B,K,K,w*w,16
poses = poses.view(self.b, self.B, self.K, self.K, 1, w, w, 4, 4) # b,B,K,K,1,w,w,4,4
W_hat = W[None, :, :, :, :, None, None, :, :] # 1,B,K,K,C,1,1,4,4
votes = W_hat @ poses # b,B,K,K,C,w,w,4,4
if self.coordinate_add:
votes = self.coordinate_addition(width_in, votes)
activation = activations.view(self.b, -1)[..., None].repeat(1, 1, self.Cww)
else:
activations_ = [activations[:, :, self.down_w(x), :][:, :, :, self.down_w(y)]
for x in range(w) for y in range(w)]
activation = torch.stack(
activations_, dim=4).view(self.b, self.Bkk, 1, -1) \
.repeat(1, 1, self.C, 1).view(self.b, self.Bkk, self.Cww)
votes = votes.view(self.b, self.Bkk, self.Cww, 16)
activations, poses = getattr(self, self.routing)(lambda_, activation, votes)
return poses.view(self.b, self.C, w, w, -1), activations.view(self.b, self.C, w, w)
| 36.029586
| 172
| 0.579323
| 1,842
| 12,178
| 3.667752
| 0.140065
| 0.020722
| 0.017318
| 0.017318
| 0.322232
| 0.276643
| 0.225281
| 0.19331
| 0.143132
| 0.124334
| 0
| 0.023848
| 0.28379
| 12,178
| 337
| 173
| 36.136499
| 0.750745
| 0.227377
| 0
| 0.255102
| 0
| 0
| 0.007387
| 0
| 0
| 0
| 0
| 0
| 0.005102
| 1
| 0.071429
| false
| 0
| 0.02551
| 0.005102
| 0.168367
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ba31e643aa2124a524e4368c26dcf7ed0147d91
| 16,807
|
py
|
Python
|
ci/test_marathon_lb_dcos_e2e.py
|
vivint-smarthome/marathon-lb
|
d8dd02a1889d3db6e3e7fefa62ff178b3ab72ce9
|
[
"Apache-2.0"
] | 511
|
2015-10-17T09:28:28.000Z
|
2022-02-20T21:58:56.000Z
|
ci/test_marathon_lb_dcos_e2e.py
|
vivint-smarthome/marathon-lb
|
d8dd02a1889d3db6e3e7fefa62ff178b3ab72ce9
|
[
"Apache-2.0"
] | 575
|
2015-10-09T11:54:09.000Z
|
2021-11-22T20:50:19.000Z
|
ci/test_marathon_lb_dcos_e2e.py
|
vivint-smarthome/marathon-lb
|
d8dd02a1889d3db6e3e7fefa62ff178b3ab72ce9
|
[
"Apache-2.0"
] | 411
|
2015-10-29T13:41:45.000Z
|
2022-02-11T09:27:50.000Z
|
#!python3
import contextlib
import json
import logging
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from dcos_e2e import cluster
from dcos_e2e import node
from dcos_test_utils import helpers as dcos_helpers
from dcos_test_utils import iam as dcos_iam
from dcos_test_utils import enterprise as dcos_ee_api
from dcos_test_utils import dcos_api
from dcos_test_utils import package
import dcos_installer_tools
import pytest
import test_marathon_lb
DCOS_E2E_BACKEND = 'DCOS_E2E_BACKEND'
DCOS_E2E_CLUSTER_ID = 'DCOS_E2E_CLUSTER_ID'
DCOS_E2E_NODE_TRANSPORT = 'DCOS_E2E_NODE_TRANSPORT'
DCOS_LOGIN_UNAME = 'DCOS_LOGIN_UNAME'
DCOS_LOGIN_PW = 'DCOS_LOGIN_PW'
BACKEND_AWS = 'aws'
BACKEND_DOCKER = 'docker'
BACKEND_VAGRANT = 'vagrant'
MARATHON_LB_IMAGE = os.environ.get('MARATHON_LB_IMAGE',
'marathon-lb:latest')
MARATHON_LB_VERSION = os.environ.get('MARATHON_LB_VERSION',
'dev')
OSS = 'oss'
ENTERPRISE = 'enterprise'
VARIANTS = {OSS: dcos_installer_tools.DCOSVariant.OSS,
ENTERPRISE: dcos_installer_tools.DCOSVariant.ENTERPRISE}
VARIANT_VALUES = dict((value.value, value) for value in VARIANTS.values())
logging.captureWarnings(True)
# NOTE(jkoelker) Define some helpers that should eventually be upstreamed
class Package(package.Cosmos):
def render(self, name, options=None, version=None):
params = {'packageName': name}
if version:
params['packageVersion'] = version
if options:
params['options'] = options
self._update_headers('render',
request_version=1,
response_version=1)
return self._post('/render', params).json().get('marathonJson')
class Secrets(dcos_helpers.ApiClientSession):
def __init__(self, default_url: dcos_helpers.Url, session=None):
super().__init__(default_url)
if session:
self.session = session
def list_stores(self):
r = self.get('/store')
r.raise_for_status()
return r.json()['array']
def list_secrets(self, store, path='/'):
params = {'list': True}
r = self.get(self.secret_uri(store, path), params=params)
r.raise_for_status()
return r.json()['array']
def create_secret(self, path, value, store='default'):
headers = None
data = None
if not isinstance(value, (str, bytes)):
value = json.dumps(value,
sort_keys=True,
indent=None,
ensure_ascii=False,
separators=(',', ':'))
json_value = {'value': value}
if isinstance(value, bytes):
headers = {'Content-Type': 'application/octet-stream'}
data = value
json_value = None
return self.put(self.secret_uri(store, path),
json=json_value,
data=data,
headers=headers)
def delete_secret(self, path, store='default'):
return self.delete(self.secret_uri(store, path))
@staticmethod
def secret_uri(store, path):
if not path.startswith('/'):
path = '/' + path
return '/secret/{}{}'.format(store, path)
def add_user_to_group(self, user, group):
return self.put('/groups/{}/users/{}'.format(group, user))
def delete_user_from_group(self, user, group):
if not self.user_in_group(user, group):
return
return self.delete('/groups/{}/users/{}'.format(group, user))
def list_group_users(self, group):
r = self.get('/groups/{}/users'.format(group))
r.raise_for_status()
return r.json()['array']
def user_in_group(self, user, group):
return user in [a['user']['uid']
for a in self.list_group_users(group)]
# NOTE(jkoelker) Monkey patch in our helpers
dcos_api.DcosApiSession.package = property(
lambda s: Package(default_url=s.default_url.copy(path='package'),
session=s.copy().session))
dcos_api.DcosApiSession.secrets = property(
lambda s: Secrets(
default_url=s.default_url.copy(path='secrets/v1'),
session=s.copy().session))
dcos_ee_api.EnterpriseApiSession.secrets = property(
lambda s: Secrets(
default_url=s.default_url.copy(path='secrets/v1'),
session=s.copy().session))
dcos_iam.Iam.add_user_to_group = add_user_to_group
dcos_iam.Iam.delete_user_from_group = delete_user_from_group
dcos_iam.Iam.list_group_users = list_group_users
dcos_iam.Iam.user_in_group = user_in_group
class Cluster(cluster.Cluster):
_USER_ZKCLI_CMD = (
'.',
'/opt/mesosphere/environment.export',
'&&',
'zkCli.sh',
'-server',
'"zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181,zk-4.zk:2181,'
'zk-5.zk:2181"'
)
_USER_OSS_EMAIL = 'albert@bekstil.net'
_USER_OSS_ZK_PATH = '/dcos/users/{}'.format(_USER_OSS_EMAIL)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._variant = dcos_installer_tools.DCOSVariant.OSS
@property
def _any_master(self):
return next(iter(self.masters))
def _any_master_run(self, cmd, *args, **kwargs):
return self._any_master.run(list(cmd), *args, **kwargs)
@property
def _oss_user_exists(self):
cmd = self._USER_ZKCLI_CMD + ('get',
self._USER_OSS_ZK_PATH)
output = self._any_master_run(cmd, shell=True)
stdout = output.stdout.decode()
if stdout.strip().split('\n')[-1] == self._USER_OSS_EMAIL:
return True
return False
def _create_oss_user(self):
if self._oss_user_exists:
return
cmd = self._USER_ZKCLI_CMD + ('create',
self._USER_OSS_ZK_PATH,
self._USER_OSS_EMAIL)
self._any_master_run(cmd, shell=True)
def _delete_oss_user(self):
cmd = self._USER_ZKCLI_CMD + ('delete', self._USER_OSS_ZK_PATH)
self._any_master_run(cmd, shell=True)
def _enterprise_session(self):
cmd = ('cat', '/opt/mesosphere/etc/bootstrap-config.json')
config_result = self._any_master_run(cmd)
config = json.loads(config_result.stdout.decode())
ssl_enabled = config['ssl_enabled']
scheme = 'https://' if ssl_enabled else 'http://'
dcos_url = scheme + str(self._any_master.public_ip_address)
api = dcos_ee_api.EnterpriseApiSession(
dcos_url=dcos_url,
masters=[str(n.public_ip_address) for n in self.masters],
slaves=[str(n.public_ip_address) for n in self.agents],
public_slaves=[
str(n.public_ip_address) for n in self.public_agents
],
auth_user=dcos_api.DcosUser(credentials=self.credentials),
)
if api.ssl_enabled:
api.set_ca_cert()
api.login_default_user()
api.set_initial_resource_ids()
return api
def _oss_session(self):
api = dcos_api.DcosApiSession(
dcos_url='http://{}'.format(self._any_master.public_ip_address),
masters=[str(n.public_ip_address) for n in self.masters],
slaves=[str(n.public_ip_address) for n in self.agents],
public_slaves=[
str(n.public_ip_address) for n in self.public_agents
],
auth_user=dcos_api.DcosUser(credentials=self.credentials),
)
api.login_default_user()
return api
def _session(self):
if self.enterprise:
return self._enterprise_session()
return self._oss_session()
@property
def credentials(self):
if self.enterprise:
return {
'uid': os.environ.get(DCOS_LOGIN_UNAME, 'admin'),
'password': os.environ.get(DCOS_LOGIN_PW, 'admin')
}
return dcos_helpers.CI_CREDENTIALS
@property
def enterprise(self):
return self._variant == dcos_installer_tools.DCOSVariant.ENTERPRISE
@property
def oss(self):
return self._variant == dcos_installer_tools.DCOSVariant.OSS
@property
def variant(self):
return self._variant
@variant.setter
def variant(self, value):
# NOTE(jkoelker) Hack becuase enums from vendored libraries
# are technically different
if hasattr(value, 'value') and value.value in VARIANT_VALUES:
value = VARIANT_VALUES[value.value]
if value in VARIANTS:
value = VARIANTS[value]
if value not in dcos_installer_tools.DCOSVariant:
msg = 'Expected one of {} or {} got {}'
raise ValueError(msg.format(tuple(VARIANTS.keys()),
dcos_installer_tools.DCOSVariant,
value))
self._variant = value
def create_user(self):
if self.enterprise:
return
self._create_oss_user()
def delete_user(self):
if self.enterprise:
return
self._delete_oss_user()
def create_service_account(self, name, secret, description=None,
superuser=False):
if not self.enterprise:
return
if description is None:
description = '{} service account'.format(name)
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend())
priv = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
pub = key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
priv = priv.decode('ascii')
pub = pub.decode('ascii')
with self.session as session:
iam = session.iam
try:
iam.create_service(name, pub, description)
except AssertionError:
iam.delete_service(name)
iam.create_service(name, pub, description)
if superuser:
iam.add_user_to_group(name, 'superusers')
login_endpoint = 'https://leader.mesos/{}/auth/login'
# NOTE(jkoelker) override the login_endpoint to force it to
# use `leader.mesos` by default it is set
# to the dcos_url the sesion is created with
sa_creds = iam.make_service_account_credentials(name, priv)
sa_creds['login_endpoint'] = login_endpoint.format(
iam.default_url.path)
secret_ret = session.secrets.create_secret(secret, sa_creds)
if secret_ret.status_code != 201:
session.secrets.delete_secret(secret, store='default')
session.secrets.create_secret(secret, sa_creds)
def delete_service_account(self, name, secret):
if not self.enterprise:
return
with self.session as session:
iam = session.iam
iam.delete_user_from_group(name, 'superusers')
session.secrets.delete_secret(secret, store='default')
iam.delete_service(name)
@contextlib.contextmanager
def service_account(self, name, secret, description=None,
superuser=False):
try:
yield self.create_service_account(name,
secret,
description,
superuser)
finally:
self.delete_service_account(name, secret)
@property
@contextlib.contextmanager
def session(self):
with self.user:
yield self._session()
@property
@contextlib.contextmanager
def user(self):
try:
yield self.create_user()
finally:
self.delete_user()
def get_docker_cluster(cluster_id, transport, **kwargs):
from dcos_e2e_cli.dcos_docker.commands import _common
if cluster_id not in _common.existing_cluster_ids():
return None
cluster_containers = _common.ClusterContainers(cluster_id, transport)
cluster = Cluster.from_nodes(
masters=set(map(cluster_containers.to_node,
cluster_containers.masters)),
agents=set(map(cluster_containers.to_node,
cluster_containers.agents)),
public_agents=set(map(cluster_containers.to_node,
cluster_containers.public_agents)))
cluster.variant = cluster_containers.dcos_variant
return cluster
def get_cluster():
backend = os.environ.get(DCOS_E2E_BACKEND, BACKEND_DOCKER)
cluster_id = os.environ.get(DCOS_E2E_CLUSTER_ID, 'default')
if backend == BACKEND_AWS:
return None
if backend == BACKEND_VAGRANT:
return None
transport = os.environ.get(DCOS_E2E_NODE_TRANSPORT, 'docker-exec')
if transport == 'ssh':
transport = node.Transport.SSH
else:
transport = node.Transport.DOCKER_EXEC
return get_docker_cluster(cluster_id, transport)
@pytest.fixture(scope='session')
def dcos_marathon_lb_session():
'''Fixture to return `cluster.session` after deploying `marathon-lb`'''
cluster = get_cluster()
with cluster.session as session:
options = {
'marathon-lb': {
'sysctl-params': ' '.join(
['net.ipv4.tcp_fin_timeout=30',
'net.core.somaxconn=10000']),
}
}
if cluster.enterprise:
options['marathon-lb'].update({
'secret_name': 'mlb-secret',
'marathon-uri': 'https://master.mesos:8443',
'strict-mode': True
})
with cluster.service_account('mlb-principal',
'mlb-secret',
superuser=True):
app = session.package.render('marathon-lb', options=options)
app['container']['docker']['image'] = MARATHON_LB_IMAGE
app['labels']['DCOS_PACKAGE_VERSION'] = MARATHON_LB_VERSION
with session.marathon.deploy_and_cleanup(app):
yield session
@pytest.fixture(scope='session')
def agent_public_ip(dcos_marathon_lb_session):
'''Fixture to return the first public agents ip address'''
return dcos_marathon_lb_session.public_slaves[0]
@pytest.fixture(scope='session')
def dcos_version(dcos_marathon_lb_session):
'''Fixture to return the first dcos version'''
return dcos_marathon_lb_session.get_version()
@pytest.fixture(scope='session',
params=(['backends/' + f
for f in os.listdir('backends')] +
['backends_1.9/' + f
for f in os.listdir('backends_1.9')]))
def backend_app(request, dcos_version):
if dcos_version.startswith('1.9.'):
if not request.param.startswith('backends_1.9/'):
return pytest.skip('Not a 1.9 backend')
return test_marathon_lb.get_json(request.param)
if request.param.startswith('backends_1.9/'):
return pytest.skip('Not a 1.9 cluster')
return test_marathon_lb.get_json(request.param)
@pytest.fixture(scope='session')
def app_deployment(dcos_marathon_lb_session, backend_app):
session = dcos_marathon_lb_session
with session.marathon.deploy_and_cleanup(backend_app,
check_health=False):
app_id = backend_app['id']
backend_app['name'] = app_id[1:] if app_id[0] == '/' else app_id
yield backend_app
@pytest.fixture(scope='session')
def app_port(app_deployment, agent_public_ip):
return test_marathon_lb.get_app_port(app_deployment['name'],
agent_public_ip)
def test_port(app_deployment, app_port):
assert app_port == app_deployment["labels"]["HAPROXY_0_PORT"]
def test_response(app_deployment, app_port, agent_public_ip):
(response,
status_code) = test_marathon_lb.get_app_content(app_port,
agent_public_ip)
assert status_code == 200
assert response == app_deployment['name']
| 32.571705
| 76
| 0.609091
| 1,939
| 16,807
| 5.017019
| 0.167612
| 0.023643
| 0.014803
| 0.020868
| 0.366571
| 0.275185
| 0.203845
| 0.153783
| 0.120888
| 0.067845
| 0
| 0.007462
| 0.290355
| 16,807
| 515
| 77
| 32.634951
| 0.808166
| 0.032903
| 0
| 0.221932
| 0
| 0.002611
| 0.078122
| 0.013924
| 0
| 0
| 0
| 0
| 0.010444
| 1
| 0.109661
| false
| 0.002611
| 0.046997
| 0.023499
| 0.276762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ba7975d420153a385e3680b17a15d19e06af3c9
| 308
|
py
|
Python
|
day1.py
|
danmana/adventofcode2017
|
6f80cd7c2382453b6e9d577975c2f02a024095c5
|
[
"MIT"
] | null | null | null |
day1.py
|
danmana/adventofcode2017
|
6f80cd7c2382453b6e9d577975c2f02a024095c5
|
[
"MIT"
] | null | null | null |
day1.py
|
danmana/adventofcode2017
|
6f80cd7c2382453b6e9d577975c2f02a024095c5
|
[
"MIT"
] | null | null | null |
def sumOf(s, offset):
sum = 0
n = len(s)
for i in range(0, len(s)):
if s[i] == s[(i + offset) % n]:
sum += int(s[i])
return sum
file = open("./input/input1.txt", "r")
for s in file:
s = s.strip()
print('Part 1: ', sumOf(s, 1))
print('Part 2: ', sumOf(s, int(len(s)/2)))
file.close()
| 14
| 43
| 0.519481
| 59
| 308
| 2.711864
| 0.457627
| 0.1125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029915
| 0.24026
| 308
| 22
| 44
| 14
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0.114007
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.153846
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7baf6ff631178bc7ddca808d29592a1384d2ce35
| 10,677
|
py
|
Python
|
stanCode_projects/my_drawing/my_drawing.py
|
ShihYesWei/stanCode-projects
|
69104b7be3d8c3fbd34935c1d4e15e40961e4556
|
[
"MIT"
] | null | null | null |
stanCode_projects/my_drawing/my_drawing.py
|
ShihYesWei/stanCode-projects
|
69104b7be3d8c3fbd34935c1d4e15e40961e4556
|
[
"MIT"
] | null | null | null |
stanCode_projects/my_drawing/my_drawing.py
|
ShihYesWei/stanCode-projects
|
69104b7be3d8c3fbd34935c1d4e15e40961e4556
|
[
"MIT"
] | null | null | null |
"""
File: my_drawing
Author name: Alan Chen
----------------------
This program will draw a recently famous picture of Gian(技安), one of the main characters in doraemon(哆啦A夢).
This is a picture that originally Gian was scared by something. Here, I reassign the things that scared him is the
Illuminati symbol with a string of PYTHON.
"""
from campy.graphics.gobjects import GOval, GRect, GLine, GLabel, GPolygon, GArc
from campy.graphics.gwindow import GWindow
w = GWindow(1000, 650)
def main():
"""
Draw a scared Gian.
"""
'''
#This is for adjusting the position
for i in range(0, 1000, 100):
li = GLine(i, 0, i, 650)
locatei = GLabel(str(i))
w.add(li)
w.add(locatei, i, 20)
for j in range(0, 700, 100):
lj = GLine(0, j, 1000, j)
locatej = GLabel(str(j))
w.add(lj)
w.add(locatej, 0, j)
'''
#background
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((0, 0))
bg.add_vertex((0, 325))
bg.filled = True
bg.fill_color = 'red'
bg.color = 'red'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((0, 325))
bg.add_vertex((0, 650))
bg.filled = True
bg.fill_color = 'orange'
bg.color = 'orange'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((333, 650))
bg.add_vertex((0, 650))
bg.filled = True
bg.fill_color = 'lightgreen'
bg.color = 'lightgreen'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((333, 650))
bg.add_vertex((666, 650))
bg.filled = True
bg.fill_color = 'slategrey'
bg.color = 'slategrey'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 650))
bg.add_vertex((666, 650))
bg.filled = True
bg.fill_color = 'darkcyan'
bg.color = 'darkcyan'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 650))
bg.add_vertex((1000, 400))
bg.filled = True
bg.fill_color = 'greenyellow'
bg.color = 'greenyellow'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 400))
bg.add_vertex((1000, 200))
bg.filled = True
bg.fill_color = 'khaki'
bg.color = 'khaki'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 0))
bg.add_vertex((1000, 200))
bg.filled = True
bg.fill_color = 'mistyrose'
bg.color = 'mistyrose'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((1000, 0))
bg.add_vertex((666, 0))
bg.filled = True
bg.fill_color = 'plum'
bg.color = 'plum'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((350, 0))
bg.add_vertex((666, 0))
bg.filled = True
bg.fill_color = 'magenta'
bg.color = 'magenta'
w.add(bg)
bg = GPolygon()
bg.add_vertex((666, 325))
bg.add_vertex((350, 0))
bg.add_vertex((0, 0))
bg.filled = True
bg.fill_color = 'tomato'
bg.color = 'tomato'
w.add(bg)
#body
body = GOval(900, 200)
body.filled = True
body.fill_color = 'Steelblue'
body.color = 'blue'
w.add(body, 220, 570)
#face
lower_face = GOval(530, 380)
lower_face.filled = True
lower_face.fill_color = 'Steelblue'
lower_face.color = 'navy'
w.add(lower_face, 405, 260)
upper_face = GOval(485, 575)
upper_face.filled = True
upper_face.fill_color = 'Steelblue'
upper_face.color = 'Steelblue'
w.add(upper_face, 423, 40)
shadow_on_face = GOval(420, 330)
shadow_on_face.filled = True
shadow_on_face.fill_color = 'Cadetblue'
shadow_on_face.color = 'Cadetblue'
w.add(shadow_on_face, 455, 230)
shadow_on_face2 = GOval(390, 370)
shadow_on_face2.filled = True
shadow_on_face2.fill_color = 'Cadetblue'
shadow_on_face2.color = 'Cadetblue'
w.add(shadow_on_face2, 480, 170)
# right_eye
right_eye1 = GOval(90, 90)
right_eye1.filled = True
right_eye1.fill_color = 'powderblue'
right_eye1.color = 'black'
w.add(right_eye1, 525, 225)
right_eye2 = GOval(45, 80)
right_eye2.color = 'black'
w.add(right_eye2, 546, 231)
right_eye3 = GOval(30, 45)
right_eye3.color = 'black'
w.add(right_eye3, 552, 253)
right_eye4 = GOval(5, 10)
right_eye4.filled = True
right_eye4.fill_color = 'black'
right_eye4.color = 'black'
w.add(right_eye4, 565, 271)
# left_eye
left_eye1 = GOval(90, 90)
left_eye1.filled = True
left_eye1.fill_color = 'powderblue'
left_eye1.color = 'black'
w.add(left_eye1, 710, 230)
left_eye2 = GOval(60, 80)
left_eye2.color = 'black'
w.add(left_eye2, 725, 235)
left_eye3 = GOval(25, 50)
left_eye3.color = 'black'
w.add(left_eye3, 740, 250)
left_eye4 = GOval(5, 10)
left_eye4.filled = True
left_eye4.fill_color = 'black'
left_eye4.color = 'black'
w.add(left_eye4, 750, 270)
# nose
nose = GOval(80, 52) # 610 351
nose.filled = True
nose.fill_color = 'DarkSeaGreen'
nose.color = 'black'
w.add(nose, 610, 347)
# mouse
for i in range(10):
mouse = GOval(50, 80)
mouse.filled = True
mouse.fill_color = 'navy'
mouse.color = 'navy'
w.add(mouse, 560 + 4 * i, 430 - i)
for i in range(100):
mouse = GOval(50, 80)
mouse.filled = True
mouse.fill_color = 'navy'
mouse.color = 'navy'
w.add(mouse, 600 + i, 420)
# tongue
for i in range(15):
tongue = GOval(50, 40)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 570 + 2 * i, 470 - i)
for i in range(10):
tongue = GOval(50, 45)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 600 + i, 455)
for i in range(25):
tongue = GOval(50, 30)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 600 + i, 475)
for i in range(50):
tongue = GOval(50, 45)
tongue.filled = True
tongue.fill_color = 'mediumblue'
tongue.color = 'mediumblue'
w.add(tongue, 650 + i, 455)
# hair
top_hair = GOval(330, 95)
top_hair.filled = True
top_hair.fill_color = 'navy'
top_hair.color = 'navy'
w.add(top_hair, 505, 25)
bangs = GPolygon()
bangs.add_vertex((510, 82))
bangs.add_vertex((620, 82))
bangs.add_vertex((560, 147))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
bangs = GPolygon()
bangs.add_vertex((580, 98))
bangs.add_vertex((690, 98))
bangs.add_vertex((635, 155))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
bangs = GPolygon()
bangs.add_vertex((650, 96))
bangs.add_vertex((770, 96))
bangs.add_vertex((710, 150))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
bangs = GPolygon()
bangs.add_vertex((740, 85))
bangs.add_vertex((825, 85))
bangs.add_vertex((780, 148))
bangs.filled = True
bangs.fill_color = 'navy'
bangs.color = 'navy'
w.add(bangs)
for i in range(80): # rightside
side = GOval(40, 90)
side.filled = True
side.fill_color = 'navy'
side.color = 'navy'
w.add(side, 800 + i, 55 + i ** 1.2)
for i in range(100): # leftside
side = GOval(40, 40)
side.filled = True
side.fill_color = 'navy'
side.color = 'navy'
w.add(side, 500 - i, 60 + i ** 1.2)
# right_ear
right_ear = GOval(70, 130)
right_ear.filled = True
right_ear.fill_color = 'Steelblue'
right_ear.color = 'blue'
w.add(right_ear, 395, 250)
right_inear = GOval(50, 80)
right_inear.filled = True
right_inear.fill_color = 'royalblue'
right_inear.color = 'blue'
w.add(right_inear, 410, 290)
# left_ear
left_ear = GOval(70, 130)
left_ear.filled = True
left_ear.fill_color = 'Steelblue'
left_ear.color = 'blue'
w.add(left_ear, 880, 260)
left_inear = GOval(50, 80)
left_inear.filled = True
left_inear.fill_color = 'royalblue'
left_inear.color = 'blue'
w.add(left_inear, 890, 290)
# tears
t1 = GOval(50, 25)
t1.filled = True
t1.fill_color = 'aqua'
w.add(t1, 525, 300)
t1 = GOval(50, 25)
t1.filled = True
t1.fill_color = 'aqua'
w.add(t1, 750, 300)
#left tears
for i in range(0, 10, 2):
tear = GOval(15, 50)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 525 - 2* i, 300 + 10 * i)
for i in range(0, 10, 2):
tear = GOval(21, 40)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 515 + i, 400 + 10 * i)
for i in range(0, 10, 2):
tear = GOval(18, 40)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 525, 500 + 10 * i)
#right tears
for i in range(0, 10, 2):
tear = GOval(5, 50)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 790 + 2 * i, 300 + 10 * i)
for i in range(0, 10, 2):
tear = GOval(11, 40)
tear.filled = True
tear.fill_color = 'aqua'
tear.color = 'aqua'
w.add(tear, 808 - i, 410 + 10 * i)
#lines
line1 = GLine(525, 175, 575, 185)
w.add(line1)
line2 = GLine(575,185, 625, 270)
w.add(line2)
line3 = GLine(710, 255, 760, 170)
w.add(line3)
line4 = GLine(651, 400, 651, 420)
w.add(line4)
line5 = GLine(630, 520, 660, 520)
w.add(line5)
# Illuminati
tri = GPolygon()
tri.add_vertex((150, 20))
tri.add_vertex((-20, 280))
tri.add_vertex((320, 280))
tri.filled = True
tri.fill_color = 'green'
w.add(tri)
up_eye = GArc(200, 120, 0, 180)
up_eye.filled = True
up_eye.fill_color = 'darkgreen'
w.add(up_eye, 50, 150)
low_eye = GArc(200, 120, -12, -167)
low_eye.filled = True
low_eye.fill_color = 'darkgreen'
low_eye.color = 'darkgreen'
w.add(low_eye, 50, 145)
eye_ball = GOval(55, 55)
eye_ball.filled = True
eye_ball.fill_color = 'black'
w.add(eye_ball, 125, 150)
py = GLabel('PYTHON')
py.font = '-50'
w.add(py, 20, 280)
if __name__ == '__main__':
main()
| 24.156109
| 114
| 0.579189
| 1,576
| 10,677
| 3.790609
| 0.163706
| 0.042183
| 0.060763
| 0.035152
| 0.471543
| 0.40241
| 0.380984
| 0.376967
| 0.376967
| 0.376967
| 0
| 0.112341
| 0.28051
| 10,677
| 441
| 115
| 24.210884
| 0.665322
| 0.046642
| 0
| 0.433735
| 0
| 0
| 0.064249
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003012
| false
| 0
| 0.006024
| 0
| 0.009036
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bb15b935b3d0af4caae284ba8b64031d24bf414
| 3,196
|
py
|
Python
|
ciri/modules/reddit.py
|
AmarnathCJD/Cirilla-Userbot
|
a580f2d3442ab7ebc4497aee7e381e6e220dbf93
|
[
"MIT"
] | null | null | null |
ciri/modules/reddit.py
|
AmarnathCJD/Cirilla-Userbot
|
a580f2d3442ab7ebc4497aee7e381e6e220dbf93
|
[
"MIT"
] | null | null | null |
ciri/modules/reddit.py
|
AmarnathCJD/Cirilla-Userbot
|
a580f2d3442ab7ebc4497aee7e381e6e220dbf93
|
[
"MIT"
] | 2
|
2022-01-01T06:58:10.000Z
|
2022-01-12T15:59:38.000Z
|
import json
import os
import subprocess
import requests
from bs4 import BeautifulSoup
from ciri import HelpStr
from ciri.utils import ciri_cmd, eor
@ciri_cmd(pattern="red(?:dit)? (.*)")
async def reddit(e):
url = e.pattern_match.group(1)
if not url:
return await e.edit("`No url provided?`")
if not "reddit.com" in url:
return await e.edit("`Invalid reddit url.`")
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
}
r = requests.get(url, headers=headers)
if not r.status_code == 200:
return await e.edit("`Invalid reddit url, returned 404.`")
post_id = get_post_id(url)
vid, aud, title = get_download_url(post_id, r)
msg = await eor(e, f"`Downloading...`")
file = download_files(aud, vid, title)
await msg.delete()
await e.client.send_file(e.chat_id, file, caption=f"`{title}`")
def get_post_id(url: str) -> str:
post_id = url[url.find("comments/") + 9 :]
post_id = f"t3_{post_id[:post_id.find('/')]}"
return post_id
def get_download_url(post_id: str, data: bytes):
soup = BeautifulSoup(data.content, "html.parser")
required_js = soup.find("script", id="data")
json_data = json.loads(required_js.text.replace("window.___r = ", "")[:-1])
title = json_data["posts"]["models"][post_id]["title"]
title = title.replace(" ", "_")
dash_url = json_data["posts"]["models"][post_id]["media"]["dashUrl"]
height = json_data["posts"]["models"][post_id]["media"]["height"]
if height == "1080":
height = "480"
dash_url = dash_url[: int(dash_url.find("DASH")) + 4]
return f"{dash_url}_{height}.mp4", f"{dash_url}_audio.mp3", title
def download_files(a, v, title="reddit"):
with requests.get(a) as r:
if r.status_code == 200:
with open(f"{title}_aud.mp3", "wb") as f:
f.write(r.content)
else:
with requests.get(a.split("DASH_audio.mp3")[0] + "audio") as r:
if r.status_code == 200:
with open(f"{title}_aud.mp3", "wb") as f:
f.write(r.content)
with requests.get(v) as r:
if r.status_code == 200:
with open(f"{title}_vid.mp4", "wb") as f:
f.write(r.content)
else:
with requests.get(v.split(".mp4")[0]) as r:
if r.status_code == 200:
with open(f"{title}_vid.mp4", "wb") as f:
f.write(r.content)
subprocess.call(
[
"ffmpeg",
"-i",
f"{title}_vid.mp4",
"-i",
f"{title}_aud.mp3",
"-map",
"0:v",
"-map",
"1:a",
"-c:v",
"copy",
f"{title}.mp4",
]
)
os.remove(f"{title}_vid.mp4")
os.remove(f"{title}_aud.mp3")
return f"{title}.mp4"
HelpStr.update(
{
"reddit": {
"red(ddit)": {
"Description": "Downloads the audio and video from a reddit post.",
"Usage": "red(ddit <url>)",
},
}
}
)
| 30.730769
| 129
| 0.54005
| 438
| 3,196
| 3.803653
| 0.303653
| 0.046819
| 0.033013
| 0.042017
| 0.309124
| 0.251501
| 0.236495
| 0.162065
| 0.162065
| 0.162065
| 0
| 0.034026
| 0.291927
| 3,196
| 103
| 130
| 31.029126
| 0.702165
| 0
| 0
| 0.2
| 0
| 0.011111
| 0.231227
| 0.017209
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.077778
| 0
| 0.177778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bb571ec75fa6c41fe74464726a90fe46a7374f0
| 4,373
|
py
|
Python
|
components/roode/__init__.py
|
mgernhard/Roode
|
50727e0f46d2bfc73559eb5fc73984ca87acb174
|
[
"Unlicense"
] | null | null | null |
components/roode/__init__.py
|
mgernhard/Roode
|
50727e0f46d2bfc73559eb5fc73984ca87acb174
|
[
"Unlicense"
] | null | null | null |
components/roode/__init__.py
|
mgernhard/Roode
|
50727e0f46d2bfc73559eb5fc73984ca87acb174
|
[
"Unlicense"
] | null | null | null |
from re import I
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import sensor
from esphome.const import CONF_ID, STATE_CLASS_MEASUREMENT, UNIT_EMPTY, UNIT_METER
# DEPENDENCIES = ["i2c"]
AUTO_LOAD = ["sensor", "binary_sensor", "text_sensor"]
MULTI_CONF = True
CONF_ROODE_ID = "roode_id"
roode_ns = cg.esphome_ns.namespace("roode")
Roode = roode_ns.class_("Roode", cg.PollingComponent)
CONF_ROI_HEIGHT = 'roi_height'
CONF_ROI_WIDTH = 'roi_width'
CONF_ADVISED_SENSOR_ORIENTATION = 'advised_sensor_orientation'
CONF_CALIBRATION = "calibration"
CONF_ROI_CALIBRATION = "roi_calibration"
CONF_INVERT_DIRECTION = "invert_direction"
CONF_MAX_THRESHOLD_PERCENTAGE = "max_threshold_percentage"
CONF_MIN_THRESHOLD_PERCENTAGE = "min_threshold_percentage"
CONF_MANUAL_THRESHOLD = "manual_threshold"
CONF_THRESHOLD_PERCENTAGE = "threshold_percentage"
CONF_RESTORE_VALUES = "restore_values"
CONF_I2C_ADDRESS = "i2c_address"
CONF_SENSOR_MODE = "sensor_mode"
CONF_MANUAL = "manual"
CONF_MANUAL_ACTIVE = "manual_active"
CONF_CALIBRATION_ACTIVE = "calibration_active"
CONF_TIMING_BUDGET = "timing_budget"
TYPES = [
CONF_RESTORE_VALUES, CONF_INVERT_DIRECTION,
CONF_ADVISED_SENSOR_ORIENTATION, CONF_I2C_ADDRESS
]
CONFIG_SCHEMA = (cv.Schema({
cv.GenerateID():
cv.declare_id(Roode),
cv.Optional(CONF_INVERT_DIRECTION, default='false'):
cv.boolean,
cv.Optional(CONF_RESTORE_VALUES, default='false'):
cv.boolean,
cv.Optional(CONF_ADVISED_SENSOR_ORIENTATION, default='true'):
cv.boolean,
cv.Optional(CONF_I2C_ADDRESS, default=0x29):
cv.uint8_t,
cv.Exclusive(
CONF_CALIBRATION, "mode", f"Only one mode, {CONF_MANUAL} or {CONF_CALIBRATION} is usable"):
cv.Schema({
cv.Optional(CONF_CALIBRATION_ACTIVE, default='true'):
cv.boolean,
cv.Optional(CONF_MAX_THRESHOLD_PERCENTAGE, default=85):
cv.int_range(min=50, max=100),
cv.Optional(CONF_MIN_THRESHOLD_PERCENTAGE, default=0):
cv.int_range(min=0, max=100),
cv.Optional(CONF_ROI_CALIBRATION, default='false'):
cv.boolean,
}),
cv.Exclusive(
CONF_MANUAL, "mode", f"Only one mode, {CONF_MANUAL} or {CONF_CALIBRATION} is usable"):
cv.Schema({
cv.Optional(CONF_MANUAL_ACTIVE, default='true'):
cv.boolean,
cv.Optional(CONF_TIMING_BUDGET, default=10):
cv.int_range(min=10, max=1000),
cv.Inclusive(
CONF_SENSOR_MODE,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=-1, max=2),
cv.Inclusive(
CONF_ROI_HEIGHT,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=4, max=16),
cv.Inclusive(
CONF_ROI_WIDTH,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=4, max=16),
cv.Inclusive(
CONF_MANUAL_THRESHOLD,
"manual_mode",
f"{CONF_SENSOR_MODE}, {CONF_ROI_HEIGHT}, {CONF_ROI_WIDTH} and {CONF_MANUAL_THRESHOLD} must be used together",
):
cv.int_range(min=40, max=4000),
}),
}).extend(cv.polling_component_schema("100ms")))
async def setup_conf(config, key, hub):
if key in config:
cg.add(getattr(hub, f"set_{key}")(config[key]))
def setup_manual_mode(config, hub):
manual = config[CONF_MANUAL]
for key in manual:
cg.add(getattr(hub, f"set_{key}")(manual[key]))
def setup_calibration_mode(config, hub):
calibration = config[CONF_CALIBRATION]
for key in calibration:
cg.add(getattr(hub, f"set_{key}")(calibration[key]))
async def to_code(config):
hub = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(hub, config)
cg.add_library("EEPROM", None)
cg.add_library("Wire", None)
cg.add_library("pololu", "1.3.0", "VL53L1X")
for key in TYPES:
await setup_conf(config, key, hub)
if CONF_MANUAL in config:
setup_manual_mode(config, hub)
if CONF_CALIBRATION in config:
setup_calibration_mode(config, hub)
| 35.266129
| 121
| 0.69426
| 586
| 4,373
| 4.860068
| 0.204778
| 0.03441
| 0.049157
| 0.031952
| 0.38132
| 0.314607
| 0.298455
| 0.238764
| 0.210674
| 0.210674
| 0
| 0.014702
| 0.191173
| 4,373
| 123
| 122
| 35.552846
| 0.7905
| 0.005031
| 0
| 0.272727
| 0
| 0
| 0.226259
| 0.03817
| 0
| 0
| 0.00092
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0.045455
| 0
| 0.063636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bb80c3ecc1f81bebc7a34d9d8f2cc068b53480f
| 1,632
|
py
|
Python
|
LoanPandas/code.py
|
yogprabhu/ga-learner-dsmp-repo
|
eaf27f7598f767481b08be3999024fb56612a666
|
[
"MIT"
] | 1
|
2019-05-01T18:24:49.000Z
|
2019-05-01T18:24:49.000Z
|
LoanPandas/code.py
|
yogprabhu/ga-learner-dsmp-repo
|
eaf27f7598f767481b08be3999024fb56612a666
|
[
"MIT"
] | null | null | null |
LoanPandas/code.py
|
yogprabhu/ga-learner-dsmp-repo
|
eaf27f7598f767481b08be3999024fb56612a666
|
[
"MIT"
] | null | null | null |
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop(columns='Loan_ID')
print(banks.isnull().sum())
bank_mode = banks.mode()
#print(bank_mode)
banks = banks.fillna(0)
print(banks.isna().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount = pd.pivot_table(data=banks, index=['Gender', 'Married', 'Self_Employed'],values='LoanAmount', aggfunc=np.mean)
# code ends here
# --------------
# code starts here
loan_approved_se = banks[(banks['Self_Employed']=='Yes')&(banks['Loan_Status']=='Y')].shape[0]
loan_approved_nse=banks[(banks['Self_Employed']=='No')&(banks['Loan_Status']=='Y')].shape[0]
Loan_Status = 614
percentage_se = (loan_approved_se/Loan_Status)*100
percentage_nse = (loan_approved_nse/Loan_Status)*100
# code ends here
# --------------
# code starts here
banks.Loan_Amount_Term.iloc[0]
loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12)
banks['Loan_Amount_Term']=banks['Loan_Amount_Term'].apply(lambda x: int(x)/12)
big_loan_term= banks[banks['Loan_Amount_Term']>=25].shape[0]
# code ends here
# --------------
# code starts here
columns_to_show = ['ApplicantIncome', 'Credit_History']
loan_groupby = banks.groupby(by='Loan_Status')
loan_groupby = loan_groupby[columns_to_show]
mean_values = loan_groupby.agg([np.mean])
# code ends here
| 22.666667
| 127
| 0.700368
| 236
| 1,632
| 4.610169
| 0.338983
| 0.057904
| 0.077206
| 0.073529
| 0.321691
| 0.251838
| 0.180147
| 0.075368
| 0.075368
| 0.075368
| 0
| 0.013689
| 0.104779
| 1,632
| 71
| 128
| 22.985915
| 0.731006
| 0.191176
| 0
| 0
| 0
| 0
| 0.164996
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.148148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bba3197cf6ebc84a1f3034725dd0f1b29fd1b82
| 4,699
|
py
|
Python
|
squad/merge.py
|
uwnlp/piqa
|
e18f2189c93965c94655d5cc943dcecdc2c1ea57
|
[
"Apache-2.0"
] | 89
|
2018-08-25T07:59:07.000Z
|
2021-05-04T06:37:27.000Z
|
squad/merge.py
|
seominjoon/piqa
|
e18f2189c93965c94655d5cc943dcecdc2c1ea57
|
[
"Apache-2.0"
] | 11
|
2018-09-28T17:33:27.000Z
|
2019-11-27T23:34:45.000Z
|
squad/merge.py
|
uwnlp/piqa
|
e18f2189c93965c94655d5cc943dcecdc2c1ea57
|
[
"Apache-2.0"
] | 10
|
2018-09-19T06:48:06.000Z
|
2020-04-14T20:42:06.000Z
|
"""Official merge script for PI-SQuAD v0.1"""
from __future__ import print_function
import os
import argparse
import json
import sys
import shutil
import scipy.sparse
import scipy.sparse.linalg
import numpy as np
import numpy.linalg
def get_q2c(dataset):
q2c = {}
for article in dataset:
for para_idx, paragraph in enumerate(article['paragraphs']):
cid = '%s_%d' % (article['title'], para_idx)
for qa in paragraph['qas']:
q2c[qa['id']] = cid
return q2c
def get_predictions(context_emb_path, question_emb_path, q2c, sparse=False, metric='ip', progress=False):
context_emb_dir, context_emb_ext = os.path.splitext(context_emb_path)
question_emb_dir, question_emb_ext = os.path.splitext(question_emb_path)
if context_emb_ext == '.zip':
print('Extracting %s to %s' % (context_emb_path, context_emb_dir))
shutil.unpack_archive(context_emb_path, context_emb_dir)
if question_emb_ext == '.zip':
print('Extracting %s to %s' % (question_emb_path, question_emb_dir))
shutil.unpack_archive(question_emb_path, question_emb_dir)
if progress:
from tqdm import tqdm
else:
tqdm = lambda x: x
predictions = {}
for id_, cid in tqdm(q2c.items()):
q_emb_path = os.path.join(question_emb_dir, '%s.npz' % id_)
c_emb_path = os.path.join(context_emb_dir, '%s.npz' % cid)
c_json_path = os.path.join(context_emb_dir, '%s.json' % cid)
if not os.path.exists(q_emb_path):
print('Missing %s' % q_emb_path)
continue
if not os.path.exists(c_emb_path):
print('Missing %s' % c_emb_path)
continue
if not os.path.exists(c_json_path):
print('Missing %s' % c_json_path)
continue
load = scipy.sparse.load_npz if sparse else np.load
q_emb = load(q_emb_path) # shape = [M, d], d is the embedding size.
c_emb = load(c_emb_path) # shape = [N, d], d is the embedding size.
with open(c_json_path, 'r') as fp:
phrases = json.load(fp)
if sparse:
if metric == 'ip':
sim = c_emb * q_emb.T
m = sim.max(1)
m = np.squeeze(np.array(m.todense()), 1)
elif metric == 'l1':
m = scipy.sparse.linalg.norm(c_emb - q_emb, ord=1, axis=1)
elif metric == 'l2':
m = scipy.sparse.linalg.norm(c_emb - q_emb, ord=2, axis=1)
else:
q_emb = q_emb['arr_0']
c_emb = c_emb['arr_0']
if metric == 'ip':
sim = np.matmul(c_emb, q_emb.T)
m = sim.max(1)
elif metric == 'l1':
m = numpy.linalg.norm(c_emb - q_emb, ord=1, axis=1)
elif metric == 'l2':
m = numpy.linalg.norm(c_emb - q_emb, ord=2, axis=1)
argmax = m.argmax(0)
predictions[id_] = phrases[argmax]
if context_emb_ext == '.zip':
shutil.rmtree(context_emb_dir)
if question_emb_ext == '.zip':
shutil.rmtree(question_emb_dir)
return predictions
if __name__ == '__main__':
squad_expected_version = '1.1'
parser = argparse.ArgumentParser(description='Official merge script for PI-SQuAD v0.1')
parser.add_argument('data_path', help='Dataset file path')
parser.add_argument('context_emb_dir', help='Context embedding directory')
parser.add_argument('question_emb_dir', help='Question embedding directory')
parser.add_argument('pred_path', help='Prediction json file path')
parser.add_argument('--sparse', default=False, action='store_true',
help='Whether the embeddings are scipy.sparse or pure numpy.')
parser.add_argument('--metric', type=str, default='ip',
help='ip|l1|l2 (inner product or L1 or L2 distance)')
parser.add_argument('--progress', default=False, action='store_true', help='Show progress bar. Requires `tqdm`.')
args = parser.parse_args()
with open(args.data_path) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json['version'] != squad_expected_version:
print('Evaluation expects v-' + squad_expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
q2c = get_q2c(dataset)
predictions = get_predictions(args.context_emb_dir, args.question_emb_dir, q2c, sparse=args.sparse,
metric=args.metric, progress=args.progress)
with open(args.pred_path, 'w') as fp:
json.dump(predictions, fp)
| 38.516393
| 117
| 0.613109
| 651
| 4,699
| 4.18894
| 0.225806
| 0.041071
| 0.038137
| 0.017602
| 0.411808
| 0.256692
| 0.182985
| 0.182985
| 0.098276
| 0.057572
| 0
| 0.010759
| 0.268142
| 4,699
| 121
| 118
| 38.834711
| 0.782204
| 0.025963
| 0
| 0.168317
| 0
| 0
| 0.130444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019802
| false
| 0
| 0.108911
| 0
| 0.148515
| 0.069307
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bba6288445870de13beac5ccea088e511b9306b
| 3,918
|
py
|
Python
|
src/passpredict/locations.py
|
samtx/pass-predictor
|
6577f75cd7d64bd3c12a9512880d4b29c2682b4c
|
[
"MIT"
] | null | null | null |
src/passpredict/locations.py
|
samtx/pass-predictor
|
6577f75cd7d64bd3c12a9512880d4b29c2682b4c
|
[
"MIT"
] | null | null | null |
src/passpredict/locations.py
|
samtx/pass-predictor
|
6577f75cd7d64bd3c12a9512880d4b29c2682b4c
|
[
"MIT"
] | null | null | null |
from functools import cached_property
from datetime import datetime
from math import degrees, radians, sin, cos
import numpy as np
from orbit_predictor import coordinate_systems
from .utils import get_timezone_from_latlon
from .time import make_utc
from ._time import datetime2mjd
from .solar import sun_pos_mjd
from ._rotations import elevation_at_rad
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
class Location:
def __init__(self, name, latitude_deg, longitude_deg, elevation_m):
"""Location.
Parameters
----------
latitude_deg : float
Latitude in degrees.
longitude_deg : float
Longitude in degrees.
elevation_m : float
Elevation in meters.
"""
self.name = name
self.latitude_deg = latitude_deg
self.longitude_deg = longitude_deg
self.latitude_rad = radians(latitude_deg)
self.longitude_rad = radians(longitude_deg)
self.elevation_m = elevation_m
position_ecef = coordinate_systems.geodetic_to_ecef(
self.latitude_rad,
self.longitude_rad,
elevation_m / 1000.)
self.recef = np.array(position_ecef)
def dict(self) -> dict:
d = {
'name': self.name,
'lat': self.lat,
'lon': self.lon,
'h': self.h
}
return d
@property
def lat(self) -> float:
return self.latitude_deg
@property
def lon(self) -> float:
return self.longitude_deg
@property
def h(self) -> float:
return self.elevation_m
@cached_property
def timezone(self) -> ZoneInfo:
""" Find timezone """
return get_timezone_from_latlon(self.latitude_deg, self.longitude_deg)
@property
def tz(self) -> ZoneInfo:
return self.timezone
@cached_property
def offset(self) -> float:
""" Compute timezone offset in hours from UTC """
now = datetime.now(self.timezone)
delta = now.utcoffset().total_seconds() / 3600
return delta
@cached_property
def _cached_elevation_calculation_data(self):
"""
Cache trig values used for rotating ECEF to SEZ topocentric coordinates
"""
sin_lat, sin_long = sin(self.latitude_rad), sin(self.longitude_rad)
cos_lat, cos_long = cos(self.latitude_rad), cos(self.longitude_rad)
return (cos_lat * cos_long, cos_lat * sin_long, sin_lat)
def _sun_elevation_mjd(self, mjd: float) -> float:
"""
Computes elevation angle of sun relative to location. Returns degrees.
"""
sun_recef = sun_pos_mjd(mjd)
coslatcoslon, coslatsinlon, sinlat = self._cached_elevation_calculation_data
el = elevation_at_rad(coslatcoslon, coslatsinlon, sinlat, self.recef, sun_recef)
return degrees(el)
def sun_elevation(self, d: datetime) -> float:
"""
Computes elevation angle of sun relative to location. Returns degrees.
"""
d2 = make_utc(d)
mjd = datetime2mjd(d2)
return self._sun_elevation_mjd(mjd)
def is_sunlit(self, dt: datetime) -> bool:
"""
Computes elevation angle of sun relative to location
Returns True if elevation > -6 degrees
"""
el = self.sun_elevation(dt)
return el > -6
def _is_sunlit_mjd(self, mjd: float) -> bool:
"""
Computes elevation angle of sun relative to location
Returns True if elevation > -6 degrees
"""
el = self._sun_elevation_mjd(mjd)
return el > -6
def __repr__(self):
deg = u'\N{DEGREE SIGN}'
s = '<Location '
if self.name:
s += self.name + ' '
s += f'({self.latitude_deg}{deg} , {self.longitude_deg}{deg})'
s += '>'
return s
| 29.238806
| 88
| 0.619704
| 468
| 3,918
| 4.970085
| 0.241453
| 0.037833
| 0.025795
| 0.041273
| 0.203783
| 0.138435
| 0.138435
| 0.138435
| 0.138435
| 0.138435
| 0
| 0.00577
| 0.292241
| 3,918
| 133
| 89
| 29.458647
| 0.833033
| 0.159775
| 0
| 0.108434
| 0
| 0
| 0.030007
| 0.016634
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168675
| false
| 0
| 0.156627
| 0.048193
| 0.493976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bbaeab63e6d9b82f2fcd904c0c52ba80c699e2f
| 4,559
|
py
|
Python
|
rl_baselines/evaluation/eval_post.py
|
anonymous-authors-2018/robotics-repo
|
385d1f3b49f8d414ab90f53c6f06b56614ae83ba
|
[
"MIT"
] | 5
|
2019-08-21T22:57:21.000Z
|
2021-01-01T21:15:26.000Z
|
rl_baselines/evaluation/eval_post.py
|
BillChan226/POAR-SRL-4-Robot
|
a6a8052e105369656d34fffc4f7ca4475dcc38df
|
[
"MIT"
] | null | null | null |
rl_baselines/evaluation/eval_post.py
|
BillChan226/POAR-SRL-4-Robot
|
a6a8052e105369656d34fffc4f7ca4475dcc38df
|
[
"MIT"
] | 2
|
2019-11-26T11:41:12.000Z
|
2021-08-30T16:00:27.000Z
|
import subprocess
import numpy as np
import pickle
import argparse
import os
from rl_baselines.student_eval import allPolicy
from srl_zoo.utils import printRed, printGreen
from rl_baselines.evaluation.cross_eval_utils import EnvsKwargs, loadConfigAndSetup, policyEval,createEnv
def dict2array(tasks,data):
res=[]
for t in tasks:
if(t=='sc'):
max_reward=250
else:
max_reward=1850
data[t][:,1:]=data[t][:,1:]/max_reward
res.append(data[t])
res=np.array(res)
return res
def episodeEval(log_dir, tasks,num_timesteps=1000):
for t in tasks:
eval_args=['--log-dir', log_dir, '--num-timesteps', str(num_timesteps), '--num-cpu',str(5)]
task_args=['--task',t]
subprocess.call(['python', '-m', 'rl_baselines.cross_eval_utils']+eval_args+task_args)
file_name=log_dir+'episode_eval.pkl'
with open(file_name, 'rb') as f:
eval_reward = pickle.load(f)
#Trasfer the data from dict into a numpy array and save
eval_reward=dict2array(tasks,eval_reward)
file_name=log_dir+'episode_eval.npy'
np.save(file_name, eval_reward)
def policyCrossEval(log_dir,task,episode,model_path, num_timesteps=2000,num_cpu=1,seed=0):
train_args, algo_name, algo_class, srl_model_path, env_kwargs = loadConfigAndSetup(log_dir)
env_kwargs = EnvsKwargs(task, env_kwargs)
OK = True
if (not OK):
# no latest model saved yet
return None, False
else:
pass
printGreen(
"Evaluation from the model saved at: {}, with evaluation time steps: {}".format(model_path, num_timesteps))
log_dir, environment, algo_args = createEnv(log_dir, train_args, algo_name, algo_class, env_kwargs, num_cpu=num_cpu,seed=seed)
reward = policyEval(environment, model_path, log_dir, algo_class, algo_args, num_timesteps, num_cpu)
# Just a trick to save the episode number of the reward,but need a little bit more space to store
reward = np.append(episode, reward)
return reward, True
def saveReward(log_dir,reward, task,save_name='episode_eval.pkl'):
reward = reward.astype(float)
file_name=log_dir+save_name
#can be changed accordingly
if(os.path.isfile(file_name)):
with open(file_name, 'rb') as f:
eval_reward= pickle.load(f)
if (task in eval_reward.keys()):
episodes = eval_reward[task][0]
#The fisrt dimension of reward is the episode
current_episode =reward[0]
#Check if the latest episodes policy is already saved
if (current_episode not in episodes):
# # eval_reward[task]=np.append(eval_reward[task],[reward],axis=0)
eval_reward[task][0].append(reward[0])
eval_reward[task][1].append(reward.tolist())
else:
index = episodes.index(current_episode)
eval_reward[task][1][index].extend(reward[1:])
with open(file_name, 'wb') as f:
pickle.dump(eval_reward, f, pickle.HIGHEST_PROTOCOL)
else:# The task is not in the file yet
eval_reward[task]=([reward[0]],[reward.tolist()])
with open(file_name, 'wb') as f:
pickle.dump(eval_reward, f, pickle.HIGHEST_PROTOCOL)
else: #There is still not a episodes rewards evaluation registered
eval_reward = {}
eval_reward[task]=([reward[0]],[reward.tolist()])
with open(file_name, 'wb') as f:
pickle.dump(eval_reward, f, pickle.HIGHEST_PROTOCOL)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Evaluation after training")
parser.add_argument('--log-dir',type=str, default=''
,help='RL algo to use')
parser.add_argument('--task-label', type=str, default='',
help='task to evaluate')
parser.add_argument('--episode', type=str, default='',
help='evaluation for the policy saved at this episode')
parser.add_argument('--policy-path', type=str, default='',
help='policy path')
parser.add_argument('--seed', type=int, default=0,
help='policy path')
args, unknown = parser.parse_known_args()
reward, _ = policyCrossEval(args.log_dir, args.task_label, episode=args.episode, model_path=args.policy_path,
num_timesteps=251,seed=args.seed)
saveReward(args.log_dir, reward, args.task_label, save_name='episode_eval.pkl')
| 33.77037
| 130
| 0.645317
| 615
| 4,559
| 4.59187
| 0.263415
| 0.063739
| 0.03966
| 0.028329
| 0.174575
| 0.158994
| 0.122875
| 0.122875
| 0.122875
| 0.122875
| 0
| 0.01039
| 0.239965
| 4,559
| 134
| 131
| 34.022388
| 0.804618
| 0.100241
| 0
| 0.241379
| 0
| 0
| 0.098629
| 0.007097
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045977
| false
| 0.011494
| 0.091954
| 0
| 0.183908
| 0.022989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bbb8601ea2e62414cb9ab4019393f8898c93e86
| 6,304
|
py
|
Python
|
HLTriggerOffline/SUSYBSM/test/BSMTriggerCheck/runComparison.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
HLTriggerOffline/SUSYBSM/test/BSMTriggerCheck/runComparison.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
HLTriggerOffline/SUSYBSM/test/BSMTriggerCheck/runComparison.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
#! /usr/bin/env python
import os
os.system("make clean; make; \\rm *.log log.list")
############################################
#dir1='TriggerValidation_223_HLT'
#dir2='TriggerValidation_224_HLT'
#out='223_vs_224'
#samples=['LM1']
#prefix1 = "histo_"
#prefix2 = "histo_"
#sufix1 = "_IDEALV11"
#sufix2 = "_IDEALV11_v1"
#label1 = "LM1_223"
#label2 = "LM1_224"
############################################
#dir1='TriggerValidation_224_HLT'
#dir2='TriggerValidation_300pre2_HLT'
#out='224_vs_300pre2'
#samples=['LM1']
#prefix1 = "histo_"
#prefix2 = "histo_"
#sufix1 = "_IDEALV11_v1"
#sufix2 = "_IDEALV9"
#label1 = "LM1_223"
#label2 = "LM1_300pre2"
############################################
#dir1='TriggerValidation_224_HLT'
#dir2='TriggerValidation_300pre6_HLT'
#out='224_vs_300pre6'
#samples=['LM1']
#prefix1 = "histo_"
#prefix2 = "histo_"
#sufix1 = "_IDEALV11_v1"
#sufix2 = "_IDEAL_30x_v1"
#label1 = "LM1_223"
#label2 = "LM1_300pre6"
############################################
dir1='/afs/cern.ch/user/c/chiorbo/scratch0/SUSY_2007/TriggerValidation/TriggerValidation_DQM_312_commit_V00-06-00/src/HLTriggerOffline/SUSYBSM/test'
dir2='/afs/cern.ch/user/c/chiorbo/scratch0/SUSY_2007/TriggerValidation/TriggerValidation_DQM_312_commit_V00-06-00/src/HLTriggerOffline/SUSYBSM/test'
out='mc1_vs_mc2'
samples=['_HLT']
prefix1 = "DQM_V0001"
prefix2 = "DQM_V0001"
sufix1 = "_R000000001"
sufix2 = "_R000000001_2"
label1 = "HLT"
label2 = "HLT"
############################################
os.system('mkdir html/'+out)
#create html index page
os.system('cp html/template/index.html html/'+out+'/index.html')
#create the cover page
inputhtml = open('html/template/beginning.html')
outputhtml = open('html/'+out+'/cover.html','w')
for line in inputhtml:
# remove .root
if line.find('<!-- Here python will write the name of first release -->') != -1: outputhtml.write(dir1)
# remove .root
elif line.find('<!-- Here python will write the name of second release -->') != -1: outputhtml.write(dir2)
else: outputhtml.write(line)
continue
inputhtml.close()
outputhtml.close()
#create the menu page
os.system('cp html/template/menu_beginning.html html/'+out+'/menu.html')
for sample in samples:
tmp1 = open('tmp.html','w')
tmp2 = open('html/template/menu_body.html')
for line in tmp2:
if line.find('thissample') != -1:
newline = line.replace('thissample',sample)
tmp1.write(newline)
else: tmp1.write(line)
continue
tmp1.close()
tmp2.close()
os.system('more tmp.html >> html/'+out+'/menu.html')
os.system('rm tmp.html')
continue
os.system('more html/template/menu_end.html >> html/'+out+'/menu.html')
#run the code for each sample
for sample in samples:
file1 = dir1+'/'+prefix1+sample+sufix1+'.root'
file2 = dir2+'/'+prefix2+sample+sufix2+'.root'
outputfile = 'outputfile.root'
#create html page for this sample
inputhtml = open('html/template/comp_beginning.html')
os.system('mkdir html/'+out+'/'+sample)
outputhtml = open('html/'+out+'/'+sample+'/comparison.html','w')
# add right version names in the html
for line in inputhtml:
if line.find('<!-- Here python will write the name of first release -->') != -1: outputhtml.write(dir1)
elif line.find('<!-- Here python will write the name of second release -->') != -1: outputhtml.write(dir2)
elif line.find('<!-- Here python will write the name of the model -->') != -1: outputhtml.write(sample)
elif line.find('thissample') != -1:
newline = line.replace('thissample',sample)
outputhtml.write(newline)
else: outputhtml.write(line)
continue
inputhtml.close()
outputhtml.close()
# run the comparison
os.system('./triggerComparison.x -File1='+file1+' -File2='+file2+' -OutputFile='+outputfile+' -label1='+label1+' -label2='+label2)
# for old names
# os.system('./triggerComparison.x --oldL1names -File1='+file1+' -File2='+file2+' -OutputFile='+outputfile+' -label1='+label1+' -label2='+label2)
os.system('mv HLTcomparison.log html/'+out+'/'+sample)
os.system('mv L1comparison.log html/'+out+'/'+sample)
# mv root file to the html directory
os.system('mv '+outputfile+' html/'+out+'/'+sample)
# add eff and residual pulls to the html
os.system('more html/template/comp.html >> html/'+out+'/'+sample+'/comparison.html')
# link the compatibility maps
os.system('more compatibility.html >> html/'+out+'/'+sample+'/comparison.html')
# create jpg files
os.system("ls *eps > listeps.log")
listeps = open("listeps.log")
for epsfile in listeps: os.system("convert \""+epsfile[:-1]+"\" \""+epsfile[:-4]+"jpg\"")
thefile = open('html/'+out+'/'+sample+'/comparison.html',"r+")
# link HLT files
#thefile.seek(0,2)
#thefile.write('<tr><td><center><table>\n')
#listeps.seek(0)
#for epsfile in listeps:
# if(epsfile.find('HLT') != -1): #this is a plot of a trigger path
# tmp1 = open('html/template/addplot.html')
# for line in tmp1:
# newline = line.replace('triggerpath',epsfile[:-5])
# thefile.write(newline+'\n')
# continue
# continue
# continue
#thefile.write('</table></center></td>\n')
# link L1 files
#thefile.write('<td><center><table>\n')
#listeps.seek(0)
#for epsfile in listeps:
# if(epsfile.find('L1') != -1): #this is a plot of a trigger path
# if(epsfile.find('A_') != -1): #this is a plot of a trigger path
# tmp1 = open('html/template/addplot.html')
# for line in tmp1:
# newline = line.replace('triggerpath',epsfile[:-5])
# thefile.write(newline+'\n')
# continue
# continue
# continue
#thefile.write('</table></center></td></tr>\n')
#thefile.close()
# write end of the comparison web page
os.system('more html/template/end.html >> html/'+out+'/'+sample+'/comparison.html')
# move all eps and jpg files in the proper directory
os.system('mv *jpg html/'+out+'/'+sample+'/')
os.system('mv *eps html/'+out+'/'+sample+'/')
continue
os.system('\\rm listeps.log')
| 34.075676
| 153
| 0.615641
| 791
| 6,304
| 4.825537
| 0.209861
| 0.044014
| 0.037464
| 0.023579
| 0.565627
| 0.51585
| 0.412366
| 0.412366
| 0.39979
| 0.33377
| 0
| 0.045384
| 0.182107
| 6,304
| 184
| 154
| 34.26087
| 0.694919
| 0.350888
| 0
| 0.291667
| 0
| 0.027778
| 0.39847
| 0.137395
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013889
| 0
| 0.013889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bbbb84b2ea6ce8e2867ca8c352a6bb6c21ce89f
| 1,602
|
py
|
Python
|
mecc/views.py
|
unistra/eva
|
9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0
|
[
"Apache-2.0"
] | null | null | null |
mecc/views.py
|
unistra/eva
|
9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0
|
[
"Apache-2.0"
] | 3
|
2021-03-19T10:36:10.000Z
|
2021-09-08T01:37:47.000Z
|
mecc/views.py
|
unistra/eva
|
9f7bd8c44edbca05eb45b36cb5b8e658e53bc3c0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from django_cas.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect
from mecc.apps.years.models import UniversityYear
@login_required
def home(request):
"""
root of all evil:
dispatch according to user profile
"""
try:
target_year = UniversityYear.objects.get(is_target_year=True)
request.session['current_year'] = target_year.label_year
request.session['current_code_year'] = target_year.code_year
except ObjectDoesNotExist:
pass
for e in request.user.groups.all():
if e.name == "VP":
return redirect('dashboards:general')
if e.name == "DES1":
return redirect('training:list_all')
for e in request.user.meccuser.profile.all():
if e.code == "RESPFORM" and e.year == request.session['current_code_year']:
return redirect('training:list_resp')
if e.code == 'REFAPP':
return redirect('training:list', cmp=e.cmp)
if e.code == 'DIRETU':
return redirect('training:list', cmp=e.cmp)
if e.code == "GESCOL":
return redirect('training:list', cmp=e.cmp)
if e.code in ['DIRCOMP', 'RAC']:
return redirect('dashboards:institute', code=e.cmp)
if e.code == "ECI":
return redirect('training:list_all_meccs')
if e.code == "RESPENS" and e.year == request.session['current_code_year']:
return redirect('training:my_teachings')
return render(request, 'base.html')
| 36.409091
| 83
| 0.637953
| 200
| 1,602
| 5
| 0.385
| 0.027
| 0.154
| 0.156
| 0.374
| 0.271
| 0.238
| 0.238
| 0.238
| 0.238
| 0
| 0.001638
| 0.237828
| 1,602
| 43
| 84
| 37.255814
| 0.817363
| 0.046816
| 0
| 0.090909
| 0
| 0
| 0.1858
| 0.029197
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0.030303
| 0.121212
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7bbde3e95bb2349d1613a331043db076b94f2cfe
| 1,617
|
py
|
Python
|
src/utgardtests/filewriter/statusprocessor.py
|
ess-dmsc/utgard-test-utils
|
27e244d06a681e09a10584dc6b93e5eaf767a8be
|
[
"BSD-2-Clause"
] | null | null | null |
src/utgardtests/filewriter/statusprocessor.py
|
ess-dmsc/utgard-test-utils
|
27e244d06a681e09a10584dc6b93e5eaf767a8be
|
[
"BSD-2-Clause"
] | null | null | null |
src/utgardtests/filewriter/statusprocessor.py
|
ess-dmsc/utgard-test-utils
|
27e244d06a681e09a10584dc6b93e5eaf767a8be
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import threading
import time
class StatusProcessor:
MAX_NUM_MESSAGES_PER_UPDATE = 10
GET_MESSAGES_TIMEOUT_S = 0.5
LIVENESS_TIMEOUT_S = 5
def __init__(
self,
status_consumer,
msg_processor,
logger=logging.getLogger(__name__),
time_function=time.time,
):
self._consumer = status_consumer
self._msg_processor = msg_processor
self._logger = logger
self._time_function = time_function
self._is_writing = False
self._is_writing_lock = threading.Lock()
def start(self):
self._consumer.start()
def update_status(self):
self._get_and_process_messages()
self._update_running_status()
def _get_and_process_messages(self):
msgs = self._consumer.get_messages(
self.MAX_NUM_MESSAGES_PER_UPDATE, self.GET_MESSAGES_TIMEOUT_S
)
for msg in msgs:
self._msg_processor.process_msg(msg)
def _update_running_status(self):
ts = self._msg_processor.get_latest_timestamp()
if ts is None:
return
ct = self._time_function()
with self._is_writing_lock:
self._is_writing = (ct - ts) <= self.LIVENESS_TIMEOUT_S
def is_writing(self):
with self._is_writing_lock:
status = self._is_writing
return status
def stop(self):
self._consumer.stop()
def get_metrics(self):
if self.is_writing():
raise RuntimeError("File writer is still running")
else:
return self._msg_processor.get_metrics()
| 26.508197
| 73
| 0.641311
| 195
| 1,617
| 4.871795
| 0.287179
| 0.075789
| 0.095789
| 0.053684
| 0.145263
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004325
| 0.285096
| 1,617
| 60
| 74
| 26.95
| 0.817474
| 0
| 0
| 0.040816
| 0
| 0
| 0.017316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163265
| false
| 0
| 0.061224
| 0
| 0.367347
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|