hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a35602a1c5d4bcf343e77bdb5e4000c799357ee5
| 347
|
py
|
Python
|
homeworks/kirill_shevchuk/hw05/level04.py
|
tgrx/Z22
|
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
|
[
"Apache-2.0"
] | null | null | null |
homeworks/kirill_shevchuk/hw05/level04.py
|
tgrx/Z22
|
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
|
[
"Apache-2.0"
] | 8
|
2019-11-15T18:15:56.000Z
|
2020-02-03T18:05:05.000Z
|
homeworks/kirill_shevchuk/hw05/level04.py
|
tgrx/Z22
|
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
|
[
"Apache-2.0"
] | null | null | null |
from urllib.parse import urlparse
def host(url):
if not url:
return ""
data = urlparse(url)
if data.netloc:
return data.netloc
value = data.path.split("/")[0]
if "@" not in value or ":" not in value:
return value
from_ = value.find("@") + 1
for_ = value.find(":")
return value[from_:for_]
| 21.6875
| 44
| 0.570605
| 47
| 347
| 4.12766
| 0.468085
| 0.051546
| 0.103093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00813
| 0.291066
| 347
| 15
| 45
| 23.133333
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0.014409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a35af943a1738408edb737fd87daf987635bfda0
| 1,554
|
py
|
Python
|
pertemuan_13/draw_utils.py
|
Muhammad-Yunus/Jetson-Nano-OpenCV-Learn
|
933cb2594539a877030fb82dc3e6867409c1a557
|
[
"Apache-2.0"
] | null | null | null |
pertemuan_13/draw_utils.py
|
Muhammad-Yunus/Jetson-Nano-OpenCV-Learn
|
933cb2594539a877030fb82dc3e6867409c1a557
|
[
"Apache-2.0"
] | null | null | null |
pertemuan_13/draw_utils.py
|
Muhammad-Yunus/Jetson-Nano-OpenCV-Learn
|
933cb2594539a877030fb82dc3e6867409c1a557
|
[
"Apache-2.0"
] | 2
|
2021-09-28T00:24:21.000Z
|
2022-03-09T13:38:29.000Z
|
import cv2
import numpy as np
# draw_ped() function to draw bounding box with top labeled text
def draw_ped(img, label, x0, y0, xt, yt, font_size=0.4, alpha=0.5, bg_color=(255,0,0), ouline_color=(255,255,255), text_color=(0,0,0)):
overlay = np.zeros_like(img)
y0, yt = max(y0 - 15, 0) , min(yt + 15, img.shape[0])
x0, xt = max(x0 - 15, 0) , min(xt + 15, img.shape[1])
(w, h), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, font_size, 1)
cv2.rectangle(overlay,
(x0, y0 + baseline),
(max(xt, x0 + w), yt),
bg_color,
-1)
cv2.rectangle(img,
(x0, y0 + baseline),
(max(xt, x0 + w), yt),
ouline_color,
2)
pts = np.array([[x0, y0 - h - baseline], # top left
[x0 + w, y0 - h - baseline], # top right
[x0 + w + 10, y0 + baseline], # bolom right
[x0,y0 + baseline]]) # bottom left
cv2.fillPoly(img, [pts], ouline_color) # add label white fill
cv2.polylines(img, [pts], True, ouline_color, 2) # add label white border
cv2.putText(img,
label,
(x0, y0),
cv2.FONT_HERSHEY_SIMPLEX,
font_size,
text_color,
1,
cv2.LINE_AA)
img_blend = cv2.addWeighted(img, 1, overlay, alpha, 0.0)
return img_blend
| 39.846154
| 135
| 0.47426
| 199
| 1,554
| 3.59799
| 0.341709
| 0.03352
| 0.050279
| 0.03352
| 0.142458
| 0.142458
| 0.061453
| 0.061453
| 0
| 0
| 0
| 0.081808
| 0.402188
| 1,554
| 39
| 136
| 39.846154
| 0.688913
| 0.095882
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.060606
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a35b39c11aff2330ec7aa6556e235a658417a015
| 2,204
|
py
|
Python
|
Sensors/PortStart.py
|
cybertraining-dsc/boat
|
32e4942b69059d1dd48d79c8e0f55bac438eb5e7
|
[
"Apache-2.0"
] | null | null | null |
Sensors/PortStart.py
|
cybertraining-dsc/boat
|
32e4942b69059d1dd48d79c8e0f55bac438eb5e7
|
[
"Apache-2.0"
] | null | null | null |
Sensors/PortStart.py
|
cybertraining-dsc/boat
|
32e4942b69059d1dd48d79c8e0f55bac438eb5e7
|
[
"Apache-2.0"
] | null | null | null |
"""
Code modified from:
apps.fishandwhistle.net/archives/1155
"""
from __future__ import print_function
import serial
import sys
import glob
port_list = {}
def identifyPort(port):
"""
tests the port and identifies what device is attached to it from probing it
:param port:
:return: a port list dict with the tho porst for 'GPS' and 'Sonar'
"""
global port_list
try:
with serial.Serial(port, baudrate=4800, timeout=1) as ser:
# read 10 lines from the serial output
for i in range(10):
line = ser.readline().decode('ascii', errors='replace')
msg = line.split(',')
if msg[0] == '$GPRMC':
port_list['GPS'] = port
return
elif msg[0] == '$SDDBT':
port_list['Sonar'] = port
return
except Exception as e:
print(e)
def _scan_ports():
"""
scan the ports on various devices including Windows, linux, and OSX
:return:
"""
if sys.platform.startswith('win'):
print("scan Windows")
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
print("scan Linux")
# this excludes your current terminal "/dev/tty"
patterns = ('/dev/tty[A-Za-z]*', '/dev/ttyUSB*')
ports = [glob.glob(pattern) for pattern in patterns]
ports = [item for sublist in ports for item in sublist] # flatten
elif sys.platform.startswith('darwin'):
print("scan Darwin")
patterns = ('/dev/*serial*', '/dev/ttyUSB*', '/dev/ttyS*')
ports = [glob.glob(pattern) for pattern in patterns]
ports = [item for sublist in ports for item in sublist] # flatten
else:
raise EnvironmentError('Unsupported platform')
return ports
def getPorts():
"""
get the ports
:return: return the ports dict
"""
ports = _scan_ports()
print(ports)
for port in ports:
identifyPort(port)
global port_list
return port_list
def test():
list = getPorts()
print(list)
if __name__ == "__main__":
test()
| 26.878049
| 79
| 0.583031
| 271
| 2,204
| 4.656827
| 0.420664
| 0.044374
| 0.066561
| 0.017433
| 0.141046
| 0.141046
| 0.141046
| 0.141046
| 0.141046
| 0.141046
| 0
| 0.012298
| 0.299002
| 2,204
| 81
| 80
| 27.209877
| 0.804531
| 0.197822
| 0
| 0.163265
| 0
| 0
| 0.107965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.081633
| 0
| 0.244898
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a35f847cfae16fa50a6998fa4b3afcf7165085cb
| 883
|
py
|
Python
|
tests/core/test_registered_plugins.py
|
MajesticFalcon/nornir
|
75f82dbb7f492d0f283abcc5eb6b5fee08db9487
|
[
"Apache-2.0"
] | 955
|
2018-05-16T17:10:12.000Z
|
2022-03-30T20:14:26.000Z
|
tests/core/test_registered_plugins.py
|
MajesticFalcon/nornir
|
75f82dbb7f492d0f283abcc5eb6b5fee08db9487
|
[
"Apache-2.0"
] | 490
|
2018-05-16T08:00:22.000Z
|
2022-03-28T21:14:39.000Z
|
tests/core/test_registered_plugins.py
|
MajesticFalcon/nornir
|
75f82dbb7f492d0f283abcc5eb6b5fee08db9487
|
[
"Apache-2.0"
] | 243
|
2018-05-17T11:07:24.000Z
|
2022-03-27T18:01:07.000Z
|
from nornir.core.plugins.inventory import InventoryPluginRegister
from nornir.core.plugins.runners import RunnersPluginRegister
from nornir.plugins.inventory import SimpleInventory
from nornir.plugins.runners import SerialRunner, ThreadedRunner
from nornir_utils.plugins.inventory import YAMLInventory
class Test:
def test_registered_runners(self):
RunnersPluginRegister.deregister_all()
RunnersPluginRegister.auto_register()
assert RunnersPluginRegister.available == {
"threaded": ThreadedRunner,
"serial": SerialRunner,
}
def test_registered_inventory(self):
InventoryPluginRegister.deregister_all()
InventoryPluginRegister.auto_register()
assert InventoryPluginRegister.available == {
"SimpleInventory": SimpleInventory,
"YAMLInventory": YAMLInventory,
}
| 33.961538
| 65
| 0.737259
| 72
| 883
| 8.916667
| 0.375
| 0.077882
| 0.102804
| 0.065421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197055
| 883
| 25
| 66
| 35.32
| 0.905501
| 0
| 0
| 0
| 0
| 0
| 0.047565
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.25
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3670442cb8f6ed8744f92e8d59bbfa74b3455a4
| 481
|
py
|
Python
|
app/collect/patch.py
|
luiscape/hdxscraper-unhcr-mediterranean-refugees
|
372bd7f565569e1d3a8428e6f09e86a01842bb9c
|
[
"MIT"
] | null | null | null |
app/collect/patch.py
|
luiscape/hdxscraper-unhcr-mediterranean-refugees
|
372bd7f565569e1d3a8428e6f09e86a01842bb9c
|
[
"MIT"
] | 2
|
2015-10-08T15:41:56.000Z
|
2015-10-08T15:50:48.000Z
|
app/collect/patch.py
|
luiscape/hdxscraper-unhcr-mediterranean-refugees
|
372bd7f565569e1d3a8428e6f09e86a01842bb9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
def Epoch(data):
'''Patching Epoch timestamps.'''
for record in data:
record['last_updated'] = time.strftime('%Y-%m-%d', time.localtime(record['last_updated']))
return data
def Date(data):
'''Patching date stamps.'''
for record in data:
m = time.strptime(record['month_en'], '%B')
m = time.strftime('%m', m)
record['date'] = '{year}-{month}'.format(year=record['year'], month=m)
return data
| 20.041667
| 94
| 0.619543
| 67
| 481
| 4.402985
| 0.477612
| 0.081356
| 0.074576
| 0.101695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002506
| 0.170478
| 481
| 23
| 95
| 20.913043
| 0.736842
| 0.180873
| 0
| 0.363636
| 0
| 0
| 0.173228
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3686d9e544eb4ac435a125dc81bd7efb5af661e
| 1,875
|
py
|
Python
|
datasets/nlmap/preprocess_nlmap.py
|
zhuang-li/pyaudio_with_tranx
|
934d0431539564bb815c4c2c6399fe9d2fe7db55
|
[
"Apache-2.0"
] | null | null | null |
datasets/nlmap/preprocess_nlmap.py
|
zhuang-li/pyaudio_with_tranx
|
934d0431539564bb815c4c2c6399fe9d2fe7db55
|
[
"Apache-2.0"
] | null | null | null |
datasets/nlmap/preprocess_nlmap.py
|
zhuang-li/pyaudio_with_tranx
|
934d0431539564bb815c4c2c6399fe9d2fe7db55
|
[
"Apache-2.0"
] | null | null | null |
from nltk.tokenize import TweetTokenizer
import io
def read_en_lines(lines):
tknzr = TweetTokenizer()
result = []
for line in lines:
result.append(tknzr.tokenize(line))
return result
def read_mrl_lines(lines):
result = []
for line in lines:
tgt = ''
for i, ch in enumerate(line.strip()):
if ch == '(' or ch == ')' or ch == ',':
if tgt[-1] == ' ':
tgt = tgt + ch + ' '
else:
tgt = tgt + ' ' + ch + ' '
elif ch == ' ':
tgt = tgt + "_"
else:
tgt = tgt + ch
tgt_list = tgt.strip().split(' ')
result.append(tgt_list)
return result
def read_nlmap_data(en_path, mrl_path):
with open(en_path, "r") as lines:
en_result = read_en_lines(lines)
with open(mrl_path, "r") as lines:
mrl_result = read_mrl_lines(lines)
return en_result, mrl_result
def write_to_txt_file(src_list, tgt_list, fp):
fp.write(' '.join(src_list) + '\t' + ' '.join(tgt_list) + '\n')
def process_results(src_result, tgt_result, path):
txt_fp = io.open(path, "w")
for i, src_list in enumerate(src_result):
tgt_list = tgt_result[i]
write_to_txt_file(src_list, tgt_list, txt_fp)
dir_path = "../../data/nlmap/"
train_en_path = dir_path + "nlmaps.train.en"
train_mrl_path = dir_path + "nlmaps.train.mrl"
test_en_path = dir_path + "nlmaps.test.en"
test_mrl_path = dir_path + "nlmaps.test.mrl"
train_txt = dir_path + "train.txt"
test_txt = dir_path + "test.txt"
train_en_result, train_mrl_result = read_nlmap_data(train_en_path, train_mrl_path)
test_en_result, test_mrl_result = read_nlmap_data(test_en_path, test_mrl_path)
process_results(train_en_result, train_mrl_result, train_txt)
process_results(test_en_result, test_mrl_result, test_txt)
| 29.296875
| 82
| 0.6208
| 273
| 1,875
| 3.930403
| 0.194139
| 0.045666
| 0.041007
| 0.063374
| 0.300093
| 0.149115
| 0.05219
| 0.05219
| 0
| 0
| 0
| 0.000716
| 0.255467
| 1,875
| 64
| 83
| 29.296875
| 0.767908
| 0
| 0
| 0.163265
| 0
| 0
| 0.060235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.040816
| 0
| 0.204082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3697ddf813bc6d7c74b1660f1c7cbb233952678
| 2,228
|
py
|
Python
|
ocr.py
|
RonLek/ALPR-and-Identification-for-Indian-Vehicles
|
2c6cd5d6d883e67ed17a8dbb96830f813c6ab55e
|
[
"FTL",
"Xnet",
"X11"
] | 13
|
2020-09-25T16:48:06.000Z
|
2022-01-31T01:36:33.000Z
|
ocr.py
|
RonLek/ALPR-and-Identification-for-Indian-Vehicles
|
2c6cd5d6d883e67ed17a8dbb96830f813c6ab55e
|
[
"FTL",
"Xnet",
"X11"
] | 5
|
2021-01-19T09:36:59.000Z
|
2022-03-25T06:56:08.000Z
|
ocr.py
|
RonLek/ALPR-and-Identification-for-Indian-Vehicles
|
2c6cd5d6d883e67ed17a8dbb96830f813c6ab55e
|
[
"FTL",
"Xnet",
"X11"
] | 7
|
2020-09-24T01:15:52.000Z
|
2022-03-23T06:50:55.000Z
|
states = ['AP', 'AR', 'AS', 'BR', 'CG', 'GA',
'GJ', 'HR', 'HP', 'JH', 'KA', 'KL',
'MP', 'MH', 'MN', 'ML', 'MZ', 'NL',
'OD', 'PB', 'RJ', 'SK', 'TN', 'TS',
'TR', 'UP', 'UK', 'WB', 'AN', 'CH',
'DD', 'DL', 'JK', 'LA', 'LD', 'PY']
def resultplate(plate):
result=""
j=0
for character in plate:
if character.isalnum():
result+=character
if character.isdigit():
j+=1
else:
j=0
if j==4:
break
if j!=4:
print('Couldn\'t extract number')
else:
while result[0:2] not in states and result!="":
result=result[2:]
if result=="":
print('Couldn\'t Recognize Plate. Try with a different plate')
else:
return result
def preprocess(plate):
plate = plate.replace('\n', '')
plate = plate.replace('INDIA', '')
plate = plate.replace('IND', '')
plate = plate.replace('IN', '')
return plate
def detect_text(path):
"""Detects text in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
#with open('results.txt', 'w', encoding='utf8') as f:
#result=""
#for text in texts:
# result+=text.description
# result+='\n"{}"'.format(text.description)
#vertices = (['({},{})'.format(vertex.x, vertex.y)
# for vertex in text.bounding_poly.vertices])
#result+='bounds: {}'.format(','.join(vertices))
#f.write(result)
plate = preprocess(texts[0].description)
plate = resultplate(plate)
print(plate)
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
detect_text('numberplate.jpg')
| 30.520548
| 75
| 0.508528
| 249
| 2,228
| 4.522088
| 0.526104
| 0.044405
| 0.060391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006631
| 0.32316
| 2,228
| 72
| 76
| 30.944444
| 0.740053
| 0.153501
| 0
| 0.098039
| 0
| 0
| 0.112222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.039216
| 0
| 0.137255
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a36a1929d767b48efa4751ceab577496580f2e66
| 667
|
py
|
Python
|
setup.py
|
LehmRob/photorename
|
b499b08f225264e5c7be3b51988d8e8fcbeb088f
|
[
"MIT"
] | null | null | null |
setup.py
|
LehmRob/photorename
|
b499b08f225264e5c7be3b51988d8e8fcbeb088f
|
[
"MIT"
] | null | null | null |
setup.py
|
LehmRob/photorename
|
b499b08f225264e5c7be3b51988d8e8fcbeb088f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from setuptools import setup
from distutils.util import convert_path
main_ns = {}
vpath = convert_path('photorename/version.py')
with open(vpath) as vfile:
exec(vfile.read(), main_ns)
setup(
name='photorename',
version=main_ns['__version__'],
description='bulk rename photos in a dictionary',
author='Robert Lehmann',
author_email='lehmrob@posteo.net',
url='https://github.com/lehmrob',
packages=['photorename'],
entry_points = {
'console_scripts': ['phore=photorename.cli:main'],
},
install_requires=[
'exif',
],
test_suite='nose.collector',
tests_require=['nose'],
)
| 23.821429
| 58
| 0.667166
| 79
| 667
| 5.443038
| 0.746835
| 0.04186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001838
| 0.184408
| 667
| 27
| 59
| 24.703704
| 0.788603
| 0.031484
| 0
| 0
| 0
| 0
| 0.325581
| 0.074419
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a36e85cc522d69fee1eb9747d2afca83c85e094a
| 1,643
|
py
|
Python
|
src/ctc/protocols/curve_utils/cli/curve_pools_command.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 94
|
2022-02-15T19:34:49.000Z
|
2022-03-26T19:26:22.000Z
|
src/ctc/protocols/curve_utils/cli/curve_pools_command.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-03-03T02:58:47.000Z
|
2022-03-11T18:41:05.000Z
|
src/ctc/protocols/curve_utils/cli/curve_pools_command.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-02-15T17:53:07.000Z
|
2022-03-17T19:14:17.000Z
|
from __future__ import annotations
import toolcli
from ctc.protocols import curve_utils
def get_command_spec() -> toolcli.CommandSpec:
return {
'f': async_curve_pools_command,
'help': 'list curve pools',
'args': [
{
'name': '--verbose',
'help': 'include extra data',
'action': 'store_true',
},
],
}
async def async_curve_pools_command(verbose: bool) -> None:
import asyncio
factories = [
'0xB9fC157394Af804a3578134A6585C0dc9cc990d4',
'0x90E00ACe148ca3b23Ac1bC8C240C2a7Dd9c2d7f5',
'0x0959158b6040d32d04c301a72cbfd6b39e21c9ae',
'0x8F942C20D02bEfc377D41445793068908E2250D0',
'0xF18056Bbd320E96A48e3Fbf8bC061322531aac99',
]
# get data from each factory
coroutines = [
curve_utils.async_get_factory_pool_data(factory, include_balances=False)
for factory in factories
]
factories_data = await asyncio.gather(*coroutines)
items = [item for factory_data in factories_data for item in factory_data]
# print as table
completed = set()
items = sorted(items, key=lambda item: ', '.join(item['symbols']))
for item in items:
if item['address'] in completed:
continue
else:
completed.add(item['address'])
if not verbose:
skip = False
for symbol in item['symbols']:
if symbol.startswith('RC_'):
skip = True
if skip:
continue
print(item['address'] + ' ' + ', '.join(item['symbols']))
| 26.934426
| 80
| 0.593427
| 149
| 1,643
| 6.375839
| 0.469799
| 0.031579
| 0.031579
| 0.046316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11827
| 0.310408
| 1,643
| 60
| 81
| 27.383333
| 0.720212
| 0.024954
| 0
| 0.044444
| 0
| 0
| 0.212008
| 0.131332
| 0
| 0
| 0.131332
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.088889
| 0.022222
| 0.133333
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3724c66e413effcdf21b1d39aedb643be084706
| 218
|
py
|
Python
|
constants/db.py
|
sshikshu/app.cavill.in
|
4e9472ea9640dad920f17d29b9625c8485022a5e
|
[
"MIT"
] | null | null | null |
constants/db.py
|
sshikshu/app.cavill.in
|
4e9472ea9640dad920f17d29b9625c8485022a5e
|
[
"MIT"
] | null | null | null |
constants/db.py
|
sshikshu/app.cavill.in
|
4e9472ea9640dad920f17d29b9625c8485022a5e
|
[
"MIT"
] | null | null | null |
"""
db constants
"""
DB_HOST = 'localhost'
DB_PORT = 28015
# Database is cavilling
DB_NAME = 'cavilling'
DB_TABLE_CAVILLS = 'cavills'
DB_TABLE_HAIRDOS = 'hairdos'
DB_TABLE_POLRUS = 'polrus'
DB_TABLE_USERS = 'users'
| 14.533333
| 28
| 0.733945
| 30
| 218
| 4.966667
| 0.5
| 0.187919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026882
| 0.146789
| 218
| 14
| 29
| 15.571429
| 0.774194
| 0.16055
| 0
| 0
| 0
| 0
| 0.245714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a37355a19aa8f440bb3300c6b512a843d8e672aa
| 3,494
|
py
|
Python
|
jdit/trainer/instances/fashionClassParallelTrainer.py
|
dingguanglei/jdit
|
ef878e696c9e2fad5069f106496289d4e4cc6154
|
[
"Apache-2.0"
] | 28
|
2019-06-18T15:56:53.000Z
|
2021-11-09T13:11:13.000Z
|
jdit/trainer/instances/fashionClassParallelTrainer.py
|
dingguanglei/jdit
|
ef878e696c9e2fad5069f106496289d4e4cc6154
|
[
"Apache-2.0"
] | 2
|
2018-10-24T01:09:56.000Z
|
2018-11-08T07:13:48.000Z
|
jdit/trainer/instances/fashionClassParallelTrainer.py
|
dingguanglei/jdit
|
ef878e696c9e2fad5069f106496289d4e4cc6154
|
[
"Apache-2.0"
] | 8
|
2019-01-11T01:12:15.000Z
|
2021-03-12T10:15:43.000Z
|
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from jdit.trainer.single.classification import ClassificationTrainer
from jdit.model import Model
from jdit.optimizer import Optimizer
from jdit.dataset import FashionMNIST
from jdit.parallel import SupParallelTrainer
class SimpleModel(nn.Module):
def __init__(self, depth=64, num_class=10):
super(SimpleModel, self).__init__()
self.num_class = num_class
self.layer1 = nn.Conv2d(1, depth, 3, 1, 1)
self.layer2 = nn.Conv2d(depth, depth * 2, 4, 2, 1)
self.layer3 = nn.Conv2d(depth * 2, depth * 4, 4, 2, 1)
self.layer4 = nn.Conv2d(depth * 4, depth * 8, 4, 2, 1)
self.layer5 = nn.Conv2d(depth * 8, num_class, 4, 1, 0)
def forward(self, x):
out = F.relu(self.layer1(x))
out = F.relu(self.layer2(out))
out = F.relu(self.layer3(out))
out = F.relu(self.layer4(out))
out = self.layer5(out)
out = out.view(-1, self.num_class)
return out
class FashionClassTrainer(ClassificationTrainer):
def __init__(self, logdir, nepochs, gpu_ids, net, opt, dataset, num_class):
super(FashionClassTrainer, self).__init__(logdir, nepochs, gpu_ids, net, opt, dataset, num_class)
def compute_loss(self):
var_dic = {}
var_dic["CEP"] = loss = nn.CrossEntropyLoss()(self.output, self.ground_truth.squeeze().long())
_, predict = torch.max(self.output.detach(), 1) # 0100=>1 0010=>2
total = predict.size(0) * 1.0
labels = self.ground_truth.squeeze().long()
correct = predict.eq(labels).cpu().sum().float()
acc = correct / total
var_dic["ACC"] = acc
return loss, var_dic
def compute_valid(self):
_,var_dic = self.compute_loss()
return var_dic
def build_task_trainer(unfixed_params):
"""build a task just like FashionClassTrainer.
:param unfixed_params:
:return:
"""
logdir = unfixed_params['logdir']
gpu_ids_abs = unfixed_params["gpu_ids_abs"]
depth = unfixed_params["depth"]
lr = unfixed_params["lr"]
batch_size = 32
opt_name = "RMSprop"
lr_decay = 0.94
decay_position= 1
position_type = "epoch"
weight_decay = 2e-5
momentum = 0
nepochs = 100
num_class = 10
torch.backends.cudnn.benchmark = True
mnist = FashionMNIST(root="datasets/fashion_data", batch_size=batch_size, num_workers=2)
net = Model(SimpleModel(depth), gpu_ids_abs=gpu_ids_abs, init_method="kaiming", verbose=False)
opt = Optimizer(net.parameters(), opt_name, lr_decay, decay_position, position_type=position_type,
lr=lr, weight_decay=weight_decay, momentum=momentum)
Trainer = FashionClassTrainer(logdir, nepochs, gpu_ids_abs, net, opt, mnist, num_class)
return Trainer
def trainerParallel():
unfixed_params = [
{'task_id': 1, 'gpu_ids_abs': [],
'depth': 4, 'lr': 1e-3,
},
{'task_id': 1, 'gpu_ids_abs': [],
'depth': 8, 'lr': 1e-2,
},
{'task_id': 2, 'gpu_ids_abs': [],
'depth': 4, 'lr': 1e-2,
},
{'task_id': 2, 'gpu_ids_abs': [],
'depth': 8, 'lr': 1e-3,
},
]
tp = SupParallelTrainer(unfixed_params, build_task_trainer)
return tp
def start_fashionClassPrarallelTrainer(run_type="debug"):
tp = trainerParallel()
tp.train()
if __name__ == '__main__':
start_fashionClassPrarallelTrainer()
| 32.351852
| 105
| 0.634516
| 461
| 3,494
| 4.585683
| 0.286334
| 0.03122
| 0.038316
| 0.033113
| 0.139073
| 0.087985
| 0.087985
| 0.062441
| 0.062441
| 0.024598
| 0
| 0.032934
| 0.23526
| 3,494
| 107
| 106
| 32.654206
| 0.758234
| 0.030624
| 0
| 0.047619
| 0
| 0
| 0.054351
| 0.006237
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.27381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a37644a1e11006bb540b7235f3216f75efbca584
| 5,711
|
py
|
Python
|
movies_modeling.py
|
amotter443/movies
|
ae375d19befb8133c014199dc1bf1ae728fd0147
|
[
"MIT"
] | 1
|
2022-01-13T21:46:40.000Z
|
2022-01-13T21:46:40.000Z
|
movies_modeling.py
|
amotter443/movies
|
ae375d19befb8133c014199dc1bf1ae728fd0147
|
[
"MIT"
] | null | null | null |
movies_modeling.py
|
amotter443/movies
|
ae375d19befb8133c014199dc1bf1ae728fd0147
|
[
"MIT"
] | null | null | null |
#Initialize packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn.model_selection as model_selection
from sklearn import linear_model
import sklearn.metrics as metrics
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.inspection import permutation_importance
from sklearn.feature_selection import RFE
from sklearn.impute import KNNImputer
import warnings
#Read in data
df = pd.read_csv(r'\movie_data_final.csv')
#If revenue is less than $5000 set to NA
df.loc[df['revenue'] <= 5000,'revenue'] = np.nan
#Impute missing reveneue using KNN (ignoring date and name columns)
imputer = KNNImputer(n_neighbors=2)
df.iloc[: , 2:] = imputer.fit_transform(df.iloc[: , 2:])
#Drop columns that cause problems with the modeling aspect
df=df.drop(['Logged_Date','Name','Logged_Year'], axis=1)
######################## Transformations ########################
#Plot correlation matrix
corrMatrix = df.corr()
plt.subplots(figsize=(20,15))
sns_plot = sns.heatmap(corrMatrix,cmap="RdBu",annot=True)
fig = sns_plot.get_figure()
fig.savefig("jupyter_heatmap.png")
#Scale non-boolean features
df[['Year','popularity','vote_average','vote_count','revenue','runtime','Rating','Logged_DOW','Logged_Month','Logged_Week','Daily_Movie_Count','Weekly_Movie_Count']] = StandardScaler().fit_transform(df[['Year','popularity','vote_average','vote_count','revenue','runtime','Rating','Logged_DOW','Logged_Month','Logged_Week','Daily_Movie_Count','Weekly_Movie_Count']])
#Plot potenitally problematic features
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, sharey=True,figsize=(14,5))
sns.scatterplot(data=df,x="movie_sentiment",y="revenue",ax=ax1)
sns.scatterplot(data=df,x="runtime",y="revenue",ax=ax2)
sns.scatterplot(data=df,x="popularity",y="revenue",ax=ax3);
#Remove outliers and replace with mean
replace = df['runtime'].mean()
df.loc[df['runtime'] >= 2,'runtime'] = np.nan
df['runtime'] = np.where(df['runtime'].isna(),replace,df['runtime'])
#Same process but with popularity
replace = df['popularity'].mean()
df.loc[df['popularity'] >= 2,'popularity'] = np.nan
df['popularity'] = np.where(df['popularity'].isna(),replace,df['popularity'])
#Transform problematic columns
df['movie_sentiment'] = df['movie_sentiment']**(1./3.)
#Recode bad values to mean
df.replace([np.inf, -np.inf], np.nan, inplace=True)
replace = df['movie_sentiment'].mean()
df['movie_sentiment'] = np.where(df['movie_sentiment'].isna(),replace,df['movie_sentiment'])
#Plot again to see change in features after transformation
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, sharey=True,figsize=(14,5))
sns.scatterplot(data=df,x="movie_sentiment",y="revenue",ax=ax1)
sns.scatterplot(data=df,x="runtime",y="revenue",ax=ax2)
sns.scatterplot(data=df,x="popularity",y="revenue",ax=ax3);
############ Research Question: Which factors impact revenue the most? ############
#Train Test Split
X=df.drop('revenue', axis=1)
y=df[['revenue']]
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.3, random_state=24)
###### 1.1 OLS ######
lm = linear_model.LinearRegression()
lm.fit(X_train, y_train)
ols_fitted = lm.predict(X_test)
#Calculate R Squared
print("OLS R Squared: %s" % round(metrics.r2_score(y_test, ols_fitted),2))
###### 1.2 Elastic Net ######
search=model_selection.GridSearchCV(estimator=linear_model.ElasticNet(),param_grid={'alpha':np.logspace(-5,2,8),'l1_ratio':[.2,.4,.6,.8]},scoring='neg_mean_squared_error',n_jobs=1,refit=True,cv=10)
search.fit(X_train,y_train)
print(search.best_params_)
enet=linear_model.ElasticNet(normalize=True,alpha=0.001,l1_ratio=0.8)
enet.fit(X_train, y_train)
enet_fitted = enet.predict(X_test)
#Calculate R Squared
print("Elastic Net R Squared: %s" % round(metrics.r2_score(y_test, enet_fitted),2))
###### 1.3 RF ######
warnings.simplefilter("ignore")
nof_list=np.arange(1,37)
high_score=0
nof=0
score_list =[]
#Variable to store the optimum features
for n in range(len(nof_list)):
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.3, random_state=24)
model = linear_model.LinearRegression()
rfe = RFE(model,nof_list[n])
X_train_rfe = rfe.fit_transform(X_train,y_train)
X_test_rfe = rfe.transform(X_test)
model.fit(X_train_rfe,y_train)
score = model.score(X_test_rfe,y_test)
score_list.append(score)
if(score>high_score):
high_score = score
nof = nof_list[n]
print("Optimum number of features: %d" %nof)
print("Score with %d features: %f" % (nof, high_score))
#Optimum number of features: 35
#Score with 35 features: 0.645497
rf = RandomForestRegressor(max_features = 35, n_estimators=100)
rf.fit(X_train, y_train)
rf_fitted = rf.predict(X_test)
#Generate Feature Importance
rev_importance = {} # a dict to hold feature_name: feature_importance
for feature, importance in zip(X_train.columns, rf.feature_importances_):
rev_importance[feature] = importance #add the name/value pair
rev_importance = pd.DataFrame.from_dict(rev_importance, orient='index').rename(columns={0: 'Revenue_Importance'})
#Calculate R Squared
print("RF R Squared: %s" % round(metrics.r2_score(y_test, rf_fitted),2))
################### Feature Importance ###################
#Plot Feature Importance table
print(rev_importance.sort_values(by='Revenue_Importance', ascending=False))
#Plot as bar chart
rev_importance.sort_values(by='Revenue_Importance', ascending=False).plot(kind='bar', rot=45)
| 38.073333
| 366
| 0.711959
| 845
| 5,711
| 4.64497
| 0.294675
| 0.015287
| 0.027516
| 0.030573
| 0.278981
| 0.263694
| 0.263694
| 0.246369
| 0.246369
| 0.221147
| 0
| 0.020563
| 0.122921
| 5,711
| 149
| 367
| 38.328859
| 0.763027
| 0.160392
| 0
| 0.117647
| 0
| 0
| 0.192402
| 0.009609
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.223529
| 0
| 0.223529
| 0.082353
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3767371ed8f0cd8ffdd0f52e641dd47e92c68df
| 1,287
|
py
|
Python
|
Python/142.py
|
jaimeliew1/Project_Euler_Solutions
|
963c9c6d6571cade8f87341f97a6a2cd1af202bb
|
[
"MIT"
] | null | null | null |
Python/142.py
|
jaimeliew1/Project_Euler_Solutions
|
963c9c6d6571cade8f87341f97a6a2cd1af202bb
|
[
"MIT"
] | 1
|
2018-04-16T21:01:50.000Z
|
2018-04-16T21:01:50.000Z
|
Python/142.py
|
jaimeliew1/Project_Euler_Solutions
|
963c9c6d6571cade8f87341f97a6a2cd1af202bb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 142 - Perfect Square Collection
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
from itertools import combinations
import numpy as np
def run():
N = 1000000
candids = {}
# generate all pairs of squares which differ by an even number. record the
# midpoint and the distance from the midpoint. These are the candidates for
# squares which satisfy both (x+y) and (x-y).
for i in range(1, int(np.sqrt(N))):
for j in range(i + 1, int(np.sqrt(N))):
diff_squares = j ** 2 - i ** 2
if diff_squares % 2 == 0:
midpoint = (j ** 2 + i ** 2) // 2
d = diff_squares // 2
if midpoint not in candids.keys():
candids[midpoint] = [d]
else:
candids[midpoint].append(d)
best_xyz = 1e20
for x, v in candids.items():
if len(v) == 1:
continue
for y, z in combinations(v, 2):
if z > y:
z, y = y, z
if y in candids.keys():
if z in candids[y]:
best_xyz = min(best_xyz, x + y + z)
return best_xyz
if __name__ == "__main__":
print(run())
| 28.6
| 79
| 0.529915
| 177
| 1,287
| 3.757062
| 0.480226
| 0.054135
| 0.018045
| 0.030075
| 0.033083
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032767
| 0.359751
| 1,287
| 44
| 80
| 29.25
| 0.774272
| 0.274281
| 0
| 0
| 0
| 0
| 0.008667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.071429
| 0
| 0.142857
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a37b1669512f165099c1e03b767ae6863a2fb2c7
| 7,754
|
py
|
Python
|
csvdiff.py
|
bugph0bia/PyCsvDiff
|
57d438d50e758e13834affd8d0e46f8a7bfe0ebe
|
[
"MIT"
] | null | null | null |
csvdiff.py
|
bugph0bia/PyCsvDiff
|
57d438d50e758e13834affd8d0e46f8a7bfe0ebe
|
[
"MIT"
] | null | null | null |
csvdiff.py
|
bugph0bia/PyCsvDiff
|
57d438d50e758e13834affd8d0e46f8a7bfe0ebe
|
[
"MIT"
] | null | null | null |
import sys
import csv
import json
import argparse
from collections import namedtuple
# diff info
DiffInfo = namedtuple('DiffInfo', [
'mark', # diff kind (!, -, +)
'address', # row/column addresses of diff
'keyname', # row/column key names of diff
'value', # values of diff
])
def main():
"""main"""
parser = argparse.ArgumentParser(description='Output the difference between two CSV files.')
parser.add_argument('csv1', help='1st CSV file.')
parser.add_argument('csv2', help='2nd CSV file.')
parser.add_argument('-e', '--encoding', default='utf-8', help='Encoding for CSV files. (default: utf-8)')
parser.add_argument('-p', '--primary-key', type=int, default=1, help='Column number as primary key. (range: 1-N, default: 1)')
parser.add_argument('-t', '--has-title', action='store_true', help='Treat the first line as a header.')
parser.add_argument('-f', '--format', default='normal', help='Set format. (normal, json)')
parser.add_argument('--excel-style', action='store_true', help='Print addresses excel A1 style.')
parser.add_argument('--hide-address', action='store_true', help='Do not print row/column addresses.')
parser.add_argument('--hide-keyname', action='store_true', help='Do not print row/column key names.')
parser.add_argument('--hide-value', action='store_true', help='Do not print difference values.')
args = parser.parse_args()
# read csv
csv1, header1 = read_csv(args.csv1, args.encoding, args.has_title)
csv2, header2 = read_csv(args.csv2, args.encoding, args.has_title)
# check column count
if len(header1) != len(header2):
print(f'error: different column count in CSV files. (csv1:{len(header1)}, csv2:{len(header2)})', file=sys.stderr)
return
# check primary key value
if not (0 < args.primary_key <= len(header1)):
print(f'error: primary key invalid. (primary key:{args.primary_key}, column count:{len(header1)})', file=sys.stderr)
return
# correct column number to start with 0
primary_key = args.primary_key - 1
# sort by primary key
csv1.sort(key=lambda x: x[primary_key])
csv2.sort(key=lambda x: x[primary_key])
# get diff info
diffs = diff_csv(csv1, header1, csv2, header2, primary_key, args.excel_style)
# print result
if args.format.lower() == 'json':
print(json.dumps([d._asdict() for d in diffs]))
else:
print_diffs(diffs, args.hide_address, args.hide_keyname, args.hide_value)
def read_csv(fname: str, encoding: str, has_header: bool):
"""Read CSV file
Args:
fname (str): CSV file.
encoding (str): encoding for CSV File.
has_header (bool): if first row is header then True, else False.
Returns:
tuple[list[list[str]], list[str]]: Tuple of CSV data and CSV header.
"""
with open(fname, 'r', encoding=encoding) as f:
csvdata = list(csv.reader(f))
# Match the column count to their max
max_colmuns = max(map(lambda x: len(x), csvdata))
for row in csvdata:
row.extend([''] * (max_colmuns - len(row)))
# get header row
if has_header:
header = csvdata[0]
csvdata = csvdata[1:]
else:
header = [''] * len(csvdata[0])
return csvdata, header
def diff_csv(csv1: list[list[str]], header1: list[str],
csv2: list[list[str]], header2: list[str],
primary_key: int, excel_style: bool):
"""Diff CSV files.
Args:
csv1 (list[list[str]]): 1st CSV data.
header1 (list[str]): 1st CSV header.
csv2 (list[list[str]]): 2nd CSV data.
header2 (list[str]): 2nd CSV header.
primary_key (int): column number of primary key.
excel_style (bool): excel A1 style.
Returns:
list[DiffInfo]: list of diff infos.
"""
diffs = []
ri1 = ri2 = 0
while True:
# get target row
row1 = csv1[ri1] if len(csv1) > ri1 else None
row2 = csv2[ri2] if len(csv2) > ri2 else None
# get primary key of target row
pkey1 = row1[primary_key] if row1 else None
pkey2 = row2[primary_key] if row2 else None
# exit when both CSV data is terminated
if row1 is None and pkey2 is None:
break
# remaining lines of csv2, if csv1 is terminated
# (== the row in csv2 only)
elif pkey1 is None:
diffs.append(DiffInfo(
mark='+',
address=make_row_address(ri2, excel_style),
keyname='',
value=','.join(row2),
))
ri2 += 1
# remaining lines of csv1, if csv2 is terminated
# (== the row in csv1 only)
elif pkey2 is None:
diffs.append(DiffInfo(
mark='-',
address=make_row_address(ri1, excel_style),
keyname='',
value=','.join(row1),
))
ri1 += 1
# the row in csv2 only
elif pkey1 > pkey2:
diffs.append(DiffInfo(
mark='+',
address=make_row_address(ri2, excel_style),
keyname='',
value=','.join(row2),
))
ri2 += 1
# the row in csv1 only
elif pkey1 < pkey2:
diffs.append(DiffInfo(
mark='-',
address=make_row_address(ri1, excel_style),
keyname='',
value=','.join(row1),
))
ri1 += 1
# the row in both files
else: # pkey1 == pkey2
for ci, (v1, v2) in enumerate(zip(row1, row2)):
if v1 != v2:
diffs.append(DiffInfo(
mark='!',
address=make_cell_address(ri1, ri2, ci, excel_style),
keyname=f'{pkey1},{header1[ci]}',
value=f'{v1} | {v2}',
))
ri1 += 1
ri2 += 1
return diffs
def a1_address(ri, ci):
"""Make Excel A1 style address from row/column address."""
CHR_A = 65 # ascii code of 'A'
ALNUM = 26 # number of alphabet
if ci >= ALNUM:
return chr(CHR_A + (ci // ALNUM)) + chr(CHR_A + (ci % ALNUM)) + str(ri+1)
else:
return chr(CHR_A + (ci % ALNUM)) + str(ri+1)
def make_row_address(ri, excel_style):
"""Make row address for print."""
if excel_style:
return f'{ri+1}:{ri+1}'
else:
return f'R{ri+1}'
def make_cell_address(ri1, ri2, ci, excel_style):
"""Make cell addresses for print."""
if excel_style:
return f'{a1_address(ri1, ci)} | {a1_address(ri2, ci)}'
else:
return f'R{ri1+1},C{ci+1} | R{ri2+1},C{ci+1}'
def print_diffs(diffs, hide_address, hide_keyname, hide_value):
"""Print diffs.
Args:
diffs (list[DiffInfo]): list of diff infos.
hide_address (bool): if true then do not print addresses.
hide_keyname (bool): if true then do not print key names.
hide_value (bool): if true then do not print values.
"""
for diff in diffs:
pstr = f'{diff.mark} '
if not hide_address and diff.address:
pstr += f'[{diff.address}] '
if not hide_keyname and diff.keyname:
pstr += f'[{diff.keyname}] '
if not hide_value and diff.value:
pstr += f'> {diff.value}'
print(pstr)
print(f'(diff count: {len(diffs)})')
if __name__ == '__main__':
main()
| 33.5671
| 131
| 0.554037
| 995
| 7,754
| 4.222111
| 0.171859
| 0.045227
| 0.040467
| 0.022614
| 0.265175
| 0.225184
| 0.191383
| 0.135682
| 0.11045
| 0.092359
| 0
| 0.026669
| 0.318158
| 7,754
| 230
| 132
| 33.713043
| 0.767921
| 0.203379
| 0
| 0.304348
| 0
| 0.021739
| 0.167703
| 0.014853
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050725
| false
| 0
| 0.036232
| 0
| 0.15942
| 0.072464
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a37e9163f756c5b933aa7522cfc07f57edae5c1e
| 3,431
|
py
|
Python
|
setup.py
|
michael-borisov/django-omnibus
|
3275ae41dcad5a140433f0bfcea5961dc837e913
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
michael-borisov/django-omnibus
|
3275ae41dcad5a140433f0bfcea5961dc837e913
|
[
"BSD-3-Clause"
] | 4
|
2020-08-19T08:39:55.000Z
|
2021-03-31T08:23:26.000Z
|
setup.py
|
radiosilence/django-omnibus
|
c31337306c601e75fbdac9d6b9b62dcc980e04f5
|
[
"BSD-3-Clause"
] | null | null | null |
import codecs
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
test_requires = [
'pytest>=2.5.2',
'pytest-cov>=1.6',
'pytest-flakes>=0.2',
'pytest-pep8>=1.0.5',
'pytest-django>=2.6',
'mock==1.0.1',
'pep8==1.4.6'
]
install_requires = [
'Django>=1.4',
'pyzmq==14.1.1',
'tornado==3.1.1',
'sockjs-tornado>=1.0.0',
]
dev_requires = [
'tox',
]
docs_requires = [
'sphinx',
'sphinx_rtd_theme'
]
class PyTest(TestCommand):
user_options = [('cov=', None, 'Run coverage'),
('cov-xml=', None, 'Generate junit xml report'),
('cov-html=', None, 'Generate junit html report'),
('junitxml=', None, 'Generate xml of test results'),
('clearcache', None, 'Clear cache first')]
boolean_options = ['clearcache']
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
self.cov_xml = False
self.cov_html = False
self.junitxml = None
self.clearcache = False
def run_tests(self):
import pytest
params = {'args': self.test_args}
if self.cov is not None:
params['plugins'] = ['cov']
params['args'].extend(['--cov', self.cov, '--cov-report', 'term-missing'])
if self.cov_xml:
params['args'].extend(['--cov-report', 'xml'])
if self.cov_html:
params['args'].extend(['--cov-report', 'html'])
if self.junitxml is not None:
params['args'].extend(['--junitxml', self.junitxml])
if self.clearcache:
params['args'].extend(['--clearcache'])
self.test_suite = True
errno = pytest.main(**params)
sys.exit(errno)
setup(
name='django-omnibus',
version='0.1.0',
description='Django/JavaScript WebSocket Connections',
long_description=read('README.md'),
author='Stephan Jaekel, Norman Rusch',
author_email='info@moccu.com',
url='https://github.com/moccu/django-omnibus/',
packages=find_packages(exclude=[
'testing',
'testing.pytests',
'examples',
]),
include_package_data=True,
extras_require={
'docs': docs_requires,
'tests': test_requires,
'dev': dev_requires,
},
test_suite='.',
install_requires=install_requires,
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: CPython',
'Framework :: Django',
],
zip_safe=False,
)
| 27.669355
| 86
| 0.575051
| 378
| 3,431
| 5.126984
| 0.391534
| 0.088235
| 0.116099
| 0.029412
| 0.053664
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018733
| 0.268726
| 3,431
| 123
| 87
| 27.894309
| 0.753687
| 0
| 0
| 0
| 0
| 0
| 0.352084
| 0.006121
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029126
| false
| 0
| 0.058252
| 0
| 0.126214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a37f43b419e3def4e72bb772a8952c0f709cee66
| 1,823
|
py
|
Python
|
config.py
|
greatwallet/cosypose
|
e72ce7d521ef61870daef267cbbe65aaebe9d24d
|
[
"MIT"
] | null | null | null |
config.py
|
greatwallet/cosypose
|
e72ce7d521ef61870daef267cbbe65aaebe9d24d
|
[
"MIT"
] | null | null | null |
config.py
|
greatwallet/cosypose
|
e72ce7d521ef61870daef267cbbe65aaebe9d24d
|
[
"MIT"
] | null | null | null |
import cosypose
import os
import yaml
from joblib import Memory
from pathlib import Path
import getpass
import socket
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
hostname = socket.gethostname()
username = getpass.getuser()
PROJECT_ROOT = Path(cosypose.__file__).parent.parent
PROJECT_DIR = PROJECT_ROOT
DATA_DIR = PROJECT_DIR / 'data'
LOCAL_DATA_DIR = PROJECT_DIR / 'local_data'
TEST_DATA_DIR = LOCAL_DATA_DIR
DASK_LOGS_DIR = LOCAL_DATA_DIR / 'dasklogs'
SYNT_DS_DIR = LOCAL_DATA_DIR / 'synt_datasets'
BOP_DS_DIR = LOCAL_DATA_DIR / 'bop_datasets'
BOP_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_cosypose'
BOP_CHALLENGE_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_challenge'
EXP_DIR = LOCAL_DATA_DIR / 'experiments'
RESULTS_DIR = LOCAL_DATA_DIR / 'results'
DEBUG_DATA_DIR = LOCAL_DATA_DIR / 'debug_data'
DEPS_DIR = PROJECT_DIR / 'deps'
CACHE_DIR = LOCAL_DATA_DIR / 'joblib_cache'
assert LOCAL_DATA_DIR.exists()
CACHE_DIR.mkdir(exist_ok=True)
TEST_DATA_DIR.mkdir(exist_ok=True)
DASK_LOGS_DIR.mkdir(exist_ok=True)
SYNT_DS_DIR.mkdir(exist_ok=True)
RESULTS_DIR.mkdir(exist_ok=True)
DEBUG_DATA_DIR.mkdir(exist_ok=True)
ASSET_DIR = DATA_DIR / 'assets'
MEMORY = Memory(CACHE_DIR, verbose=2)
CONDA_PREFIX = os.environ['CONDA_PREFIX']
if 'CONDA_PREFIX_1' in os.environ:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX_1']
CONDA_ENV = os.environ['CONDA_DEFAULT_ENV']
else:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX']
CONDA_ENV = 'base'
cfg = yaml.load((PROJECT_DIR / 'config_yann.yaml').read_text(), Loader=yaml.FullLoader)
SLURM_GPU_QUEUE = cfg['slurm_gpu_queue']
SLURM_QOS = cfg['slurm_qos']
DASK_NETWORK_INTERFACE = cfg['dask_network_interface']
# Kwai path
KWAI_PATH = "/data2/cxt/kwai/IMG_3486"
| 30.383333
| 88
| 0.765222
| 273
| 1,823
| 4.695971
| 0.304029
| 0.087363
| 0.093604
| 0.093604
| 0.25429
| 0.138846
| 0.102964
| 0
| 0
| 0
| 0
| 0.005067
| 0.133845
| 1,823
| 60
| 89
| 30.383333
| 0.80684
| 0.004937
| 0
| 0
| 0
| 0
| 0.18016
| 0.038198
| 0
| 0
| 0
| 0
| 0.021277
| 1
| 0
| false
| 0.042553
| 0.170213
| 0
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a38463fb4d443f7e3aa2457876c06216a04ae227
| 1,010
|
py
|
Python
|
tests/test_nodes.py
|
simonzabrocki/GraphModels
|
b43e44a189d663364ae08de9a1d1305320854d63
|
[
"MIT"
] | null | null | null |
tests/test_nodes.py
|
simonzabrocki/GraphModels
|
b43e44a189d663364ae08de9a1d1305320854d63
|
[
"MIT"
] | null | null | null |
tests/test_nodes.py
|
simonzabrocki/GraphModels
|
b43e44a189d663364ae08de9a1d1305320854d63
|
[
"MIT"
] | null | null | null |
import pytest
from GraphModels.models.Sarah.model_agricultural_water import AgriculturalWaterNodes
from GraphModels.models.Sarah.model_freshwater_available import FreshwaterAvailableNodes
from GraphModels.models.Sarah.model_municipal_water import MunicipalWaterNodes
nodes_list = AgriculturalWaterNodes + FreshwaterAvailableNodes + MunicipalWaterNodes
computationnal_nodes = [node for node in nodes_list if 'computation' in node.keys()]
@pytest.mark.parametrize(('node'), nodes_list)
def test_node_minimal_keys(node):
assert set(['type', 'unit', 'id', 'name']) <= set(node.keys())
@pytest.mark.parametrize(('node'), computationnal_nodes)
def test_node_computationnal(node):
assert set(['formula', 'name']) == set(node['computation'].keys())
def test_inputs_computation():
inputs_computation = [val for sublist in [node['in'] for node in nodes_list if 'in' in node] for val in sublist]
node_ids = [node['id'] for node in nodes_list]
assert set(inputs_computation) <= set(node_ids)
| 37.407407
| 116
| 0.773267
| 128
| 1,010
| 5.914063
| 0.3125
| 0.059445
| 0.083223
| 0.103038
| 0.286658
| 0.140026
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112871
| 1,010
| 26
| 117
| 38.846154
| 0.844866
| 0
| 0
| 0
| 0
| 0
| 0.060456
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.1875
| false
| 0
| 0.25
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3859a2bc6f5180117d2aa59a1b851252ca8c8a5
| 1,350
|
py
|
Python
|
backend/telegram/methods/messages_and_media/get_updated_message_entity_types.py
|
appheap/social-media-analyzer
|
0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c
|
[
"Apache-2.0"
] | 5
|
2021-09-11T22:01:15.000Z
|
2022-03-16T21:33:42.000Z
|
backend/telegram/methods/messages_and_media/get_updated_message_entity_types.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | null | null | null |
backend/telegram/methods/messages_and_media/get_updated_message_entity_types.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | 3
|
2022-01-18T11:06:22.000Z
|
2022-02-26T13:39:28.000Z
|
from django.db import transaction
from db.scaffold import Scaffold
from typing import List
from telegram import models as tg_models
from pyrogram import types
class GetUpdatedMessageEntityTypes(Scaffold):
def get_updated_message_entity_types(
self,
*,
db_message: 'tg_models.Message',
raw_message: 'types.Message'
) -> List['tg_models.EntityType']:
if db_message is None or raw_message is None:
return None
if raw_message.type == 'message' and raw_message.content.entities:
entity_types = set()
entities = raw_message.content.entities
for entity in entities:
entity_types.add(entity.type)
if len(entity_types):
db_entity_types = []
with transaction.atomic():
for raw_entity in entities:
db_entity_types.append(
self.tg_models.EntityType.objects.update_or_create_from_raw(
raw_entity=raw_entity,
db_message=db_message,
)
)
db_entity_types = list(filter(lambda obj: obj is not None, db_entity_types))
return db_entity_types
return None
| 32.926829
| 92
| 0.565926
| 144
| 1,350
| 5.048611
| 0.333333
| 0.136176
| 0.089409
| 0.068776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.376296
| 1,350
| 40
| 93
| 33.75
| 0.86342
| 0
| 0
| 0.0625
| 0
| 0
| 0.042222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.15625
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a389bd7328bfeb9809c940787f3815d94a0c7bd6
| 2,783
|
py
|
Python
|
commands.py
|
abcxyz618/MovieGeek
|
06029ed4202c63d3da4e306eb5d500ab81f2e1cb
|
[
"MIT"
] | null | null | null |
commands.py
|
abcxyz618/MovieGeek
|
06029ed4202c63d3da4e306eb5d500ab81f2e1cb
|
[
"MIT"
] | null | null | null |
commands.py
|
abcxyz618/MovieGeek
|
06029ed4202c63d3da4e306eb5d500ab81f2e1cb
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from omdb_api import *
from tmdb_api import *
class Commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def movie(self, ctx, *, in_str=""): # input format: *movie {movie name} / {year}
if in_str == "":
return await self.handle_empty(ctx)
title, index = self.process_input(in_str)
if index.isdigit() is False:
ctx.send(":warning: Invalid input")
return
try:
released, title, runtime, genres, t_bo, overview, poster_url, \
comp_url, trailer_url, color, year = tmdb_search(title, int(index) - 1)
release, rated, director_str, actor_str, d_bo, awards, ratings = omdb_search(title, year)
except TypeError:
await ctx.send(":x: No movie found!")
return
except IndexError:
await ctx.send(":x: Invalid index!")
return
except:
await ctx.send(":x: Unidentified Error. Please relay error to I'm Peter #1327")
return
e = discord.Embed(color=discord.Color.from_rgb(color[0], color[1], color[2]), title=title)
e.add_field(name="Released", value=release, inline=True)
e.add_field(name="Duration", value=runtime if released else 'N/A', inline=True)
e.add_field(name="Rated", value=rated if released else 'N/A', inline=True)
e.add_field(name="Genres", value=genres, inline=True)
e.add_field(name="Director", value=director_str, inline=True)
e.add_field(name="Actors", value=actor_str, inline=True)
e.add_field(name="Box Office", value=t_bo + '\n' + d_bo if released else 'N/A', inline=True)
e.add_field(name="Awards", value=awards if released else 'N/A', inline=True)
e.add_field(name="Rating", value=ratings if released else 'N/A', inline=True)
e.add_field(name="Overview:", value=overview, inline=False)
e.set_image(url=poster_url)
e.set_thumbnail(url=comp_url)
await ctx.send(embed=e)
await ctx.send(f":movie_camera::clapper: Watch Movie Trailer here:\n{trailer_url}")
@commands.command()
async def actor(self, ctx, *, in_str):
ctx.send("Searching...")
@staticmethod
def process_input(in_str):
if '[' not in in_str:
return in_str, '1'
li = [x.strip() for x in in_str.split('[')]
if len(li) != 2:
return False, False
return li[0], li[1][:-1]
@staticmethod
async def handle_empty(ctx):
await ctx.send(":warning: No input was given! ")
def setup(bot):
bot.add_cog(Commands(bot))
| 36.618421
| 102
| 0.594682
| 379
| 2,783
| 4.237467
| 0.295515
| 0.024907
| 0.05604
| 0.080946
| 0.206102
| 0.182441
| 0.153798
| 0.12142
| 0.12142
| 0.12142
| 0
| 0.006455
| 0.276321
| 2,783
| 75
| 103
| 37.106667
| 0.790963
| 0.015092
| 0
| 0.135593
| 0
| 0
| 0.119745
| 0.008634
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0
| 0.067797
| 0
| 0.271186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a38b317b32dcbc6c9dff08940ace5dc60a5e39cd
| 1,853
|
py
|
Python
|
examples/run_ranch_baseline.py
|
pinjutien/DeepExplain
|
a80d85dcd5adc90968b6924a7ef39528170830f0
|
[
"MIT"
] | null | null | null |
examples/run_ranch_baseline.py
|
pinjutien/DeepExplain
|
a80d85dcd5adc90968b6924a7ef39528170830f0
|
[
"MIT"
] | null | null | null |
examples/run_ranch_baseline.py
|
pinjutien/DeepExplain
|
a80d85dcd5adc90968b6924a7ef39528170830f0
|
[
"MIT"
] | null | null | null |
"""
RANdom CHoice baseline (RANCH): random image from the target class
"""
import random
import numpy as np
import tensorflow_datasets as tfds
from tqdm import tqdm
# output_pattern = '/home/ec2-user/gan_submission_1/mnist/mnist_v2/ranch_baselines_%d'
# tfds_name = 'mnist'
# target_size = [28, 28, 1]
# num_class = 10
# n_samples = 10000
# output_pattern = '/home/ec2-user/gan_submission_1/svhn/svhn_v2/ranch_baselines_%d'
# tfds_name = 'svhn_cropped'
# target_size = [32, 32, 3]
# num_class = 10
# n_samples = 26032
output_pattern = '/home/ec2-user/gan_submission_1/cifar10/cifar10_v2/ranch_baselines_%d'
tfds_name = 'cifar10'
target_size = [32, 32, 3]
num_class = 10
n_samples = 10000
if __name__ == '__main__':
# obtain train images
data_train = list(tfds.as_numpy(tfds.load(tfds_name, split='train')))
# obtain test images with target labels
ds_test = tfds.load(tfds_name, split='test')
dslist = list(tfds.as_numpy(ds_test.take(n_samples)))
ys_target = np.random.RandomState(seed=222).randint(num_class - 1, size=n_samples)
xs, ys_label = [], []
for ind, sample in enumerate(dslist):
xs.append(sample['image'])
ys_label.append(sample['label'])
if ys_target[ind] >= sample['label']:
ys_target[ind] += 1
for ind in range(len(data_train)):
data_train[ind]['image'] = data_train[ind]['image'] / 255.0
xs = np.array(xs)
xs = xs / 255.5
ys_label = np.array(ys_label)
index_map = {i: [] for i in range(10)}
for i, train_sample in enumerate(data_train):
index_map[train_sample['label']].append(i)
outputs = []
for ind in tqdm(range(n_samples)):
i = random.choice(index_map[ys_target[ind]])
outputs.append(data_train[i]['image'])
outputs = np.array(outputs)
np.save(output_pattern % n_samples, outputs)
| 28.953125
| 88
| 0.67674
| 281
| 1,853
| 4.209964
| 0.291815
| 0.047337
| 0.043111
| 0.050719
| 0.274725
| 0.239222
| 0.152156
| 0.152156
| 0.05579
| 0.05579
| 0
| 0.043738
| 0.185645
| 1,853
| 64
| 89
| 28.953125
| 0.740225
| 0.247167
| 0
| 0
| 0
| 0
| 0.092956
| 0.050109
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a38b4a3c4607025ed47cb0e6994bcee905fa97f0
| 359
|
py
|
Python
|
pageOne.py
|
3bru/qt-tkinter-Test
|
41eefe7621c6a0bf3a25b4503df7a7451fc363b2
|
[
"MIT"
] | 1
|
2020-05-18T21:59:39.000Z
|
2020-05-18T21:59:39.000Z
|
pageOne.py
|
3bru/qt-tkinter-Test
|
41eefe7621c6a0bf3a25b4503df7a7451fc363b2
|
[
"MIT"
] | null | null | null |
pageOne.py
|
3bru/qt-tkinter-Test
|
41eefe7621c6a0bf3a25b4503df7a7451fc363b2
|
[
"MIT"
] | null | null | null |
import sqlite3, os
con = sqlite3.connect('database.sqlite')
im = con.cursor()
tablo = """CREATE TABLE IF NOT EXISTS writes(day, topic, texti)"""
deger = """INSERT INTO writes VALUES('oneDay', 'nmap', 'nmaple ilgili bisiler')"""
im.execute(tablo)
im.execute(deger)
con.commit()
im.execute("""SELECT * FROM writes""")
veriler = im.fetchall()
print(veriler)
| 22.4375
| 82
| 0.696379
| 49
| 359
| 5.102041
| 0.714286
| 0.108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006349
| 0.122563
| 359
| 15
| 83
| 23.933333
| 0.787302
| 0
| 0
| 0
| 0
| 0
| 0.431755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a38b9d380fbd10ce2b7350457ab818a75b222fac
| 6,075
|
py
|
Python
|
basicsr/metrics/psnr_ssim.py
|
BCV-Uniandes/RSR
|
dad60eedd3560f2655e3d1ed444153ed2616af2e
|
[
"zlib-acknowledgement"
] | 14
|
2021-08-28T04:15:37.000Z
|
2021-12-28T17:00:33.000Z
|
basicsr/metrics/psnr_ssim.py
|
BCV-Uniandes/RSR
|
dad60eedd3560f2655e3d1ed444153ed2616af2e
|
[
"zlib-acknowledgement"
] | 2
|
2021-09-26T01:27:06.000Z
|
2021-12-24T19:06:09.000Z
|
basicsr/metrics/psnr_ssim.py
|
BCV-Uniandes/RSR
|
dad60eedd3560f2655e3d1ed444153ed2616af2e
|
[
"zlib-acknowledgement"
] | 1
|
2021-10-18T15:48:56.000Z
|
2021-10-18T15:48:56.000Z
|
import cv2
import numpy as np
from basicsr.metrics.metric_util import reorder_image, to_y_channel
def calculate_psnr(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse))
def _ssim(img1, img2):
"""Calculate SSIM (structural similarity) for one channel images.
It is called by func:`calculate_ssim`.
Args:
img1 (ndarray): Images with range [0, 255] with order 'HWC'.
img2 (ndarray): Images with range [0, 255] with order 'HWC'.
Returns:
float: ssim result.
"""
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) *
(2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the SSIM calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
ssims = []
for i in range(img1.shape[2]):
ssims.append(_ssim(img1[..., i], img2[..., i]))
return np.array(ssims).mean()
import torch
import torch.nn as nn
import lpips
import torchvision
import numpy
# from misc.kernel_loss import shave_a2b
def calculate_lpips(output, gt, device):
lpips = LPIPS(net='alex', verbose=False).to(device)
# output = 2*((output - (output.min()))/(output.max() - (output.min()))) - 1
# gt = 2*((gt - (gt.min()))/(gt.max() - (gt.min()))) - 1
return lpips(output, gt).cpu().numpy().mean()
class LPIPS(nn.Module):
def __init__(self, net='alex', verbose=True, device='cpu', vgg19=False):
super().__init__()
if vgg19:
self.lpips = VGGFeatureExtractor(device=device).to(device)
else:
self.lpips = lpips.LPIPS(net=net, verbose=verbose).to(device)
# imagenet normalization for range [-1, 1]
self.lpips.eval()
for param in self.lpips.parameters():
param.requires_grad = False
def perceptual_rec(self, x, y):
loss_rgb = nn.L1Loss()(x, y)
loss = loss_rgb + self(x, y)
return loss
@torch.no_grad()
def forward(self, x, y):
# normalization -1,+1
# if x.size(-1) > y.size(-1):
# x = shave_a2b(x, y)
# elif x.size(-1) < y.size(-1):
# y = shave_a2b(y, x)
lpips_value = self.lpips(x, y, normalize=True) # True
return lpips_value.mean()
| 33.379121
| 80
| 0.596708
| 836
| 6,075
| 4.202153
| 0.226077
| 0.062625
| 0.047822
| 0.068318
| 0.522061
| 0.509536
| 0.502704
| 0.493026
| 0.475377
| 0.453743
| 0
| 0.051919
| 0.270782
| 6,075
| 182
| 81
| 33.379121
| 0.741084
| 0.312428
| 0
| 0.428571
| 0
| 0
| 0.072891
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 1
| 0.071429
| false
| 0
| 0.081633
| 0
| 0.234694
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a38ee10bfa692aa23805d2d2b99b5f0481e7ce48
| 14,224
|
py
|
Python
|
data/dataset.py
|
limingwu8/Pneumonia-Detection
|
8541e0f34a72f6e94773bf234cfd071732229b2b
|
[
"MIT"
] | 7
|
2019-01-27T02:30:56.000Z
|
2020-04-29T18:47:21.000Z
|
data/dataset.py
|
limingwu8/Pneumonia-Detection
|
8541e0f34a72f6e94773bf234cfd071732229b2b
|
[
"MIT"
] | 1
|
2020-01-28T04:40:15.000Z
|
2020-05-01T02:37:40.000Z
|
data/dataset.py
|
limingwu8/Pneumonia-Detection
|
8541e0f34a72f6e94773bf234cfd071732229b2b
|
[
"MIT"
] | 3
|
2019-08-09T09:16:00.000Z
|
2021-07-01T11:45:00.000Z
|
import os
import numpy as np
import torch
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from skimage import io, transform
from utils.Config import opt
from skimage import exposure
import matplotlib.pylab as plt
from utils import array_tool as at
from sklearn.model_selection import train_test_split
from data.data_utils import read_image, resize_bbox, flip_bbox, random_flip, flip_masks
from utils.vis_tool import apply_mask_bbox
import matplotlib.patches as patches
DSB_BBOX_LABEL_NAMES = ('p') # Pneumonia
def inverse_normalize(img):
if opt.caffe_pretrain:
img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1))
return img[::-1, :, :].clip(min=0, max=255)
# approximate un-normalize for visualize
return (img * 0.225 + 0.45).clip(min=0, max=1) * 255
"""Transforms:
Data augmentation
"""
class Transform(object):
def __init__(self, min_size=600, max_size=1000, train=True):
self.min_size = min_size
self.max_size = max_size
self.train = train
def __call__(self, in_data):
if len(in_data.keys())!=2:
img_id, img, bbox, label = in_data['img_id'], in_data['image'], in_data['bbox'], in_data['label']
_, H, W = img.shape
img = preprocess(img, self.min_size, self.max_size, self.train)
_, o_H, o_W = img.shape
scale = o_H/H
# horizontally flip
# img, params = random_flip(img, x_random=True, y_random=True, return_param=True)
bbox = resize_bbox(bbox, (H, W), (o_H, o_W))
img, params = random_flip(img, x_random=True, y_random=False, return_param=True)
bbox = flip_bbox(bbox, (o_H, o_W), x_flip=params['x_flip'], y_flip=params['y_flip'])
label = label if label is None else label.copy()
return {'img_id': img_id, 'image': img.copy(), 'bbox': bbox, 'label': label, 'scale': scale}
else:
img_id, img = in_data['img_id'], in_data['image']
_, H, W = img.shape
img = preprocess(img, self.min_size, self.max_size, self.train)
_, o_H, o_W = img.shape
scale = o_H/H
# horizontally flip
# img, params = random_flip(img, x_random=True, y_random=True, return_param=True)
return {'img_id': img_id, 'image': img.copy(), 'scale': scale}
def preprocess(img, min_size=600, max_size=1000, train=True):
"""Preprocess an image for feature extraction.
The length of the shorter edge is scaled to :obj:`self.min_size`.
After the scaling, if the length of the longer edge is longer than
:param min_size:
:obj:`self.max_size`, the image is scaled to fit the longer edge
to :obj:`self.max_size`.
After resizing the image, the image is subtracted by a mean image value
:obj:`self.mean`.
Args:
img (~numpy.ndarray): An image. This is in CHW and RGB format.
The range of its value is :math:`[0, 255]`.
Returns:
~numpy.ndarray: A preprocessed image.
"""
C, H, W = img.shape
scale1 = min_size / min(H, W)
scale2 = max_size / max(H, W)
scale = min(scale1, scale2)
if opt.caffe_pretrain:
normalize = caffe_normalize
else:
normalize = pytorch_normalze
if opt.hist_equalize:
hist_img = exposure.equalize_hist(img)
hist_img = transform.resize(hist_img, (C, H * scale, W * scale), mode='reflect')
hist_img = normalize(hist_img)
return hist_img
img = img / 255.
img = transform.resize(img, (C, H * scale, W * scale), mode='reflect')
# both the longer and shorter should be less than
# max_size and min_size
img = normalize(img)
return img
def pytorch_normalze(img):
"""
https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683
https://github.com/pytorch/vision/issues/223
return appr -1~1 RGB
"""
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img = normalize(torch.from_numpy(img))
return img.numpy()
def caffe_normalize(img):
"""
return appr -125-125 BGR
"""
img = img[[2, 1, 0], :, :] # RGB-BGR
img = img * 255
mean = np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1)
img = (img - mean).astype(np.float32, copy=True)
return img
class RSNADataset(Dataset):
def __init__(self, root_dir, img_id, transform=True, train=True):
"""
Args:
:param root_dir (string): Directory with all the images
:param img_id (list): lists of image id
:param train: if equals true, then read training set, so the output is image, mask and imgId
if equals false, then read testing set, so the output is image and imgId
:param transform (callable, optional): Optional transform to be applied on a sample
"""
self.root_dir = root_dir
self.img_id = img_id
self.transform = transform
self.tsf = Transform(opt.min_size, opt.max_size, train)
def __len__(self):
return len(self.img_id)
def __getitem__(self, idx):
img_path = os.path.join(self.root_dir, self.img_id[idx], 'image.png')
bbox_path = os.path.join(self.root_dir, self.img_id[idx], 'bbox.npy')
image = read_image(img_path, np.float32, True)
if os.path.exists(bbox_path):
bbox = np.load(bbox_path)
label = np.zeros(len(bbox)).astype(np.int32)
sample = {'img_id': self.img_id[idx], 'image':image.copy(), 'bbox':bbox, 'label': label}
else:
sample = {'img_id': self.img_id[idx], 'image':image.copy()}
if self.transform:
sample = self.tsf(sample)
return sample
class RSNADatasetTest(Dataset):
def __init__(self, root_dir, transform=True, train=False):
"""
Args:
:param root_dir (string): Directory with all the images
:param img_id (list): lists of image id
:param train: if equals true, then read training set, so the output is image, mask and imgId
if equals false, then read testing set, so the output is image and imgId
:param transform (callable, optional): Optional transform to be applied on a sample
"""
self.root_dir = root_dir
self.img_id = os.listdir(root_dir)
self.transform = transform
self.tsf = Transform(opt.min_size, opt.max_size, train)
def __len__(self):
return len(self.img_id)
def __getitem__(self, idx):
img_path = os.path.join(self.root_dir, self.img_id[idx], 'image.png')
image = read_image(img_path, np.float32, True)
sample = {'img_id': self.img_id[idx], 'image': image.copy()}
if self.transform:
sample = self.tsf(sample)
return sample
def get_train_loader(root_dir, batch_size=16, shuffle=False, num_workers=4, pin_memory=False):
"""Utility function for loading and returning training and validation Dataloader
:param root_dir: the root directory of data set
:param batch_size: batch size of training and validation set
:param split: if split data set to training set and validation set
:param shuffle: if shuffle the image in training and validation set
:param num_workers: number of workers loading the data, when using CUDA, set to 1
:param val_ratio: ratio of validation set size
:param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True
:return:
if split the data set then returns:
- train_loader: Dataloader for training
- valid_loader: Dataloader for validation
else returns:
- dataloader: Dataloader of all the data set
"""
img_ids = os.listdir(root_dir)
img_ids.sort()
transformed_dataset = RSNADataset(root_dir=root_dir, img_id=img_ids, transform=True, train=True)
dataloader = DataLoader(transformed_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return dataloader
def get_train_val_loader(root_dir, batch_size=16, val_ratio=0.2, shuffle=False, num_workers=4, pin_memory=False):
"""Utility function for loading and returning training and validation Dataloader
:param root_dir: the root directory of data set
:param batch_size: batch size of training and validation set
:param split: if split data set to training set and validation set
:param shuffle: if shuffle the image in training and validation set
:param num_workers: number of workers loading the data, when using CUDA, set to 1
:param val_ratio: ratio of validation set size
:param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True
:return:
if split the data set then returns:
- train_loader: Dataloader for training
- valid_loader: Dataloader for validation
else returns:
- dataloader: Dataloader of all the data set
"""
img_ids = os.listdir(root_dir)
img_ids.sort()
train_id, val_id = train_test_split(img_ids, test_size=val_ratio, random_state=55, shuffle=shuffle)
train_dataset = RSNADataset(root_dir=root_dir, img_id=train_id, transform=True, train=True)
train_loader = DataLoader(train_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
val_dataset = RSNADataset(root_dir=root_dir, img_id=val_id, transform=True, train=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return train_loader, val_loader
def get_test_loader(test_dir, batch_size=16, shuffle=False, num_workers=4, pin_memory=False):
"""Utility function for loading and returning training and validation Dataloader
:param root_dir: the root directory of data set
:param batch_size: batch size of training and validation set
:param shuffle: if shuffle the image in training and validation set
:param num_workers: number of workers loading the data, when using CUDA, set to 1
:param pin_memory: store data in CPU pin buffer rather than memory. when using CUDA, set to True
:return:
- testloader: Dataloader of all the test set
"""
transformed_dataset = RSNADatasetTest(root_dir=test_dir)
testloader = DataLoader(transformed_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return testloader
def show_batch_train(sample_batched):
"""
Visualize one training image and its corresponding bbox
"""
if len(sample_batched.keys())==5:
# if sample_batched['img_id']=='8d978e76-14b9-4d9d-9ba6-aadd3b8177ce':
# print('stop')
img_id, image, bbox = sample_batched['img_id'], sample_batched['image'], sample_batched['bbox']
orig_img = at.tonumpy(image)
orig_img = inverse_normalize(orig_img)
bbox = bbox[0, :]
ax = plt.subplot(111)
ax.imshow(np.transpose(np.squeeze(orig_img / 255.), (1, 2, 0)))
ax.set_title(img_id[0])
for i in range(bbox.shape[0]):
y1, x1, y2, x2 = int(bbox[i][0]), int(bbox[i][1]), int(bbox[i][2]), int(bbox[i][3])
h = y2 - y1
w = x2 - x1
rect = patches.Rectangle((x1, y1), w, h, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
plt.show()
def show_batch_test(sample_batch):
img_id, image = sample_batch['img_id'], sample_batch['image']
image = inverse_normalize(at.tonumpy(image[0]))
plt.figure()
plt.imshow(np.transpose(at.tonumpy(image/255), (1, 2, 0)))
plt.show()
if __name__ == '__main__':
# dataset = RSNADataset(root_dir=opt.root_dir, transform=True)
# sample = dataset[13]
# print(sample.keys())
# Load training set
# trainloader = get_train_loader(opt.root_dir, batch_size=opt.batch_size, shuffle=opt.shuffle,
# num_workers=opt.num_workers, pin_memory=opt.pin_memory)
#
# for i_batch, sample in tqdm(enumerate(trainloader)):
# B,C,H,W = sample['image'].shape
# if (H,W)!=(600,600):
# print(sample['img_id'])
# show_batch_train(sample)
# Load testing set
# testloader = get_test_loader(opt.test_dir, batch_size=opt.batch_size, shuffle=opt.shuffle,
# num_workers=opt.num_workers, pin_memory=opt.pin_memory)
# for i_batch, sample in enumerate(testloader):
# print('i_batch: ', i_batch, 'len(sample)', len(sample.keys()))
# show_batch_test(sample)
# Load training & validation set
train_loader, val_loader = get_train_val_loader(opt.root_dir, batch_size=opt.batch_size, val_ratio=0.1,
shuffle=True, num_workers=opt.num_workers,
pin_memory=opt.pin_memory)
for i_batch, sample in enumerate(train_loader):
show_batch_train(sample)
# Test train & validation set on densenet
# img_ids = os.listdir(opt.root_dir)
# dataset = RSNADataset_densenet(root_dir=opt.root_dir, img_id=img_ids, transform=True)
# sample = dataset[13]
# print(sample.keys())
# train_loader, val_loader = get_train_val_loader_densenet(opt.root_dir, batch_size=128, val_ratio=0.1,
# shuffle=False, num_workers=opt.num_workers,
# pin_memory=opt.pin_memory)
# non_zeros = 0 # 4916 + 743 = 5659
# zeros = 0 # 15692 + 4505 = 20197
# for i, sample in tqdm(enumerate(val_loader)):
# non_zeros += np.count_nonzero(at.tonumpy(sample['label']))
# zeros += (128-np.count_nonzero(at.tonumpy(sample['label'])))
# # print(sample['img_id'], ', ', at.tonumpy(sample['label']))
# print("non_zeros: ", non_zeros)
# print("zeros: ", zeros)
| 41.228986
| 113
| 0.646161
| 2,026
| 14,224
| 4.346496
| 0.148569
| 0.021008
| 0.01022
| 0.019078
| 0.568817
| 0.541563
| 0.532024
| 0.519305
| 0.457983
| 0.452305
| 0
| 0.024169
| 0.246625
| 14,224
| 344
| 114
| 41.348837
| 0.797592
| 0.394263
| 0
| 0.304348
| 0
| 0
| 0.025335
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.10559
| false
| 0
| 0.093168
| 0.012422
| 0.310559
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a38f9c51d087930a15e07db3d41e43fedee278f9
| 8,344
|
py
|
Python
|
make_dataset/kor_sample_dataset.py
|
park-sungmoo/odqa_baseline_code
|
45954be766e5f987bef18e5b8a2e47f1508742cd
|
[
"Apache-2.0"
] | 67
|
2021-05-12T15:54:28.000Z
|
2022-03-12T15:55:35.000Z
|
make_dataset/kor_sample_dataset.py
|
park-sungmoo/odqa_baseline_code
|
45954be766e5f987bef18e5b8a2e47f1508742cd
|
[
"Apache-2.0"
] | 71
|
2021-05-01T06:07:37.000Z
|
2022-01-28T16:54:46.000Z
|
make_dataset/kor_sample_dataset.py
|
park-sungmoo/odqa_baseline_code
|
45954be766e5f987bef18e5b8a2e47f1508742cd
|
[
"Apache-2.0"
] | 14
|
2021-05-24T10:57:27.000Z
|
2022-02-18T06:34:11.000Z
|
import json
import os.path as p
from collections import defaultdict
import pandas as pd
from datasets import load_dataset
from datasets import concatenate_datasets
from datasets import Sequence, Value, Features, Dataset, DatasetDict
from utils.tools import get_args
f = Features(
{
"answers": Sequence(
feature={"text": Value(dtype="string", id=None), "answer_start": Value(dtype="int32", id=None)},
length=-1,
id=None,
),
"id": Value(dtype="string", id=None),
"context": Value(dtype="string", id=None),
"question": Value(dtype="string", id=None),
"title": Value(dtype="string", id=None),
}
)
def remove_multiple_indexes(rlist, indexes):
assert indexes == sorted(indexes, reverse=True)
for index in indexes:
del rlist[index]
return rlist
def filtering_by_doc_len(kor_dataset, doc_len=512):
indexes = []
for idx, context in enumerate(kor_dataset["context"]):
if len(context) < doc_len:
indexes.append(idx)
indexes.sort(reverse=True)
tmp = {}
for key in kor_dataset.features.keys():
tmp[key] = remove_multiple_indexes(kor_dataset[key], indexes)
df = pd.DataFrame(tmp)
datasets = Dataset.from_pandas(df, features=f)
return datasets
def filtering_by_dup_question(kor_dataset, dup_limit=4):
indexes = []
context_cnt = defaultdict(int)
for idx, context in enumerate(kor_dataset["context"]):
context_cnt[context] += 1
if context_cnt[context] > dup_limit:
indexes.append(idx)
indexes.sort(reverse=True)
tmp = {}
for key in kor_dataset.features.keys():
tmp[key] = remove_multiple_indexes(kor_dataset[key], indexes)
df = pd.DataFrame(tmp)
datasets = Dataset.from_pandas(df, features=f)
return datasets
def sampling_by_ans_start_weights(kor_dataset, sample=8000):
kor_df = kor_dataset.to_pandas()
kor_ans_cnt = defaultdict(int)
kor_ans_weights = defaultdict(float)
bucket = 100
for i, rows in kor_df.iterrows():
kor_ans_cnt[rows["answers"]["answer_start"][0] // bucket] += 1
total_cnt = sum(kor_ans_cnt.values())
for k, v in kor_ans_cnt.items():
kor_ans_weights[k] = (1 - (v / total_cnt)) ** 6 # 5가 적당
def apply_weights(row):
key = row["answer_start"][0] // bucket
return kor_ans_weights[key]
kor_df["weight"] = kor_df["answers"].apply(apply_weights)
kor_df = kor_df.sample(n=sample, weights="weight", random_state=42) # 다시 생각해보니깐 전체 저장은 불가능, 2배수 가능
datasets = Dataset.from_pandas(kor_df, features=f)
return datasets
def sampling_by_doc_lens(kor_dataset, sample):
kor_df = kor_dataset.to_pandas()
kor_ans_cnt = defaultdict(int)
kor_ans_weights = defaultdict(float)
bucket = 100
for i, rows in kor_df.iterrows():
kor_ans_cnt[len(rows["context"]) // bucket] += 1
total_cnt = sum(kor_ans_cnt.values())
for k, v in kor_ans_cnt.items():
kor_ans_weights[k] = (1 - (v / total_cnt)) ** 6 # 5가 적당
def apply_weights(row):
key = len(row) // bucket
return kor_ans_weights[key]
kor_df["weight"] = kor_df["context"].apply(apply_weights)
kor_df = kor_df.sample(n=sample, weights="weight", random_state=42) # 다시 생각해보니깐 전체 저장은 불가능, 2배수 가능
datasets = Dataset.from_pandas(kor_df, features=f)
return datasets
def make_kor_dataset_v1(args):
"""KorQuad Dataset V1
1. 문서 길이 512이하 Filtering
2. Context당 Question 최대 4개
3. ans_start 위치로 8000개 샘플링
"""
kor_dataset_path = p.join(args.path.train_data_dir, "kor_dataset")
if p.exists(kor_dataset_path):
raise FileExistsError(f"{kor_dataset_path}는 이미 존재하는 파일입니다!")
kor_dataset = load_dataset("squad_kor_v1")
kor_dataset = concatenate_datasets(
[kor_dataset["train"].flatten_indices(), kor_dataset["validation"].flatten_indices()]
)
# (1) 문서 길이: KLUE MRC 512가 최소 길이
kor_dataset = filtering_by_doc_len(kor_dataset, doc_len=512)
# (2) 중복 Context 제거: Context당 최대 4개의 질문
kor_dataset = filtering_by_dup_question(kor_dataset, dup_limit=4)
# (3) KOR answer_start Weight Sampling 2배수 사용
kor_dataset = sampling_by_ans_start_weights(kor_dataset, sample=8000)
# (4) KOR_DATASET만 저장
kor_datasets = DatasetDict({"train": kor_dataset})
kor_datasets.save_to_disk(kor_dataset_path)
print(f"{kor_dataset_path}에 저장되었습니다!")
def make_kor_dataset_v2(args):
"""KorQuad Dataset V1
1. 문서 길이 512이하 Filtering
2. Context당 Question 최대 4개
3. ans_start 위치로 8000개 샘플링
4. doc_len 길이로 4000개 필터링
"""
kor_dataset_path = p.join(args.path.train_data_dir, "kor_dataset_v2")
if p.exists(kor_dataset_path):
raise FileExistsError(f"{kor_dataset_path}는 이미 존재하는 파일입니다!")
kor_dataset = load_dataset("squad_kor_v1")
kor_dataset = concatenate_datasets(
[kor_dataset["train"].flatten_indices(), kor_dataset["validation"].flatten_indices()]
)
# (1) 문서 길이: KLUE MRC 512가 최소 길이
kor_dataset = filtering_by_doc_len(kor_dataset, doc_len=512)
# (2) 중복 Context 제거: Context당 최대 4개의 질문
kor_dataset = filtering_by_dup_question(kor_dataset, dup_limit=4)
# (3) KOR answer_start Weight Sampling 2배수 사용
kor_dataset = sampling_by_ans_start_weights(kor_dataset)
# (4) KOR docs_len Weights Sampling 4000개 까지
kor_dataset = sampling_by_doc_lens(kor_dataset, sample=4000)
# (5) KOR_DATASET만 저장
kor_datasets = DatasetDict({"train": kor_dataset})
kor_datasets.save_to_disk(kor_dataset_path)
print(f"{kor_dataset_path}에 저장되었습니다!")
def get_etr_dataset(args):
etr_path = p.join(args.path.train_data_dir, "etr_qa_dataset.json")
if not p.exists(etr_path):
raise FileNotFoundError(f"ETRI 데이터 셋 {etr_path}로 파일명 바꿔서 데이터 넣어주시길 바랍니다.")
with open(etr_path, "r") as f:
etr_dict = json.load(f)
# print(etr_dict["data"][0])
new_dataset = defaultdict(list)
cnt = 0
for datas in etr_dict["data"]:
title = datas["title"]
context = datas["paragraphs"][0]["context"]
for questions in datas["paragraphs"][0]["qas"]:
question = questions["question"]
answers = {
"answer_start": [questions["answers"][0]["answer_start"]],
"text": [questions["answers"][0]["text"]],
}
new_dataset["id"].append(f"etr-custom-{cnt}")
new_dataset["title"].append(title)
new_dataset["context"].append(context)
new_dataset["question"].append(question)
new_dataset["answers"].append(answers)
cnt += 1
f = Features(
{
"answers": Sequence(
feature={"text": Value(dtype="string", id=None), "answer_start": Value(dtype="int32", id=None)},
length=-1,
id=None,
),
"id": Value(dtype="string", id=None),
"context": Value(dtype="string", id=None),
"question": Value(dtype="string", id=None),
"title": Value(dtype="string", id=None),
}
)
df = pd.DataFrame(new_dataset)
etr_dataset = Dataset.from_pandas(df, features=f)
return etr_dataset
def make_etr_dataset_v1(args):
"""ETRI 데이터 셋 가져오는 함수
1. 문서 길이 512이하 Filtering
2. 중복 Context 제거, Question 최대 4개
3. ans_start 위치로 3000개 샘플링
"""
etr_dataset_path = p.join(args.path.train_data_dir, "etr_dataset_v1")
if p.exists(etr_dataset_path):
raise FileExistsError(f"{etr_dataset_path}는 이미 존재하는 파일입니다!")
etr_dataset = get_etr_dataset(args)
# (1) 문서 길이: KLUE MRC 512가 최소 길이
etr_dataset = filtering_by_doc_len(etr_dataset, doc_len=512)
# (2) 중복 Context 제거: Context당 최대 4개의 질문
etr_dataset = filtering_by_dup_question(etr_dataset, dup_limit=4)
# (3) ETR answer_start Weight 3000개 Sampling
etr_dataset = sampling_by_ans_start_weights(etr_dataset, sample=3000)
# (4) ETR_DATASET만 저장
etr_datasets = DatasetDict({"train": etr_dataset})
etr_datasets.save_to_disk(etr_dataset_path)
print(f"{etr_dataset_path}에 저장되었습니다!")
def main(args):
make_kor_dataset_v1(args)
make_kor_dataset_v2(args)
make_etr_dataset_v1(args)
if __name__ == "__main__":
args = get_args()
main(args)
| 28.772414
| 112
| 0.65604
| 1,175
| 8,344
| 4.4
| 0.164255
| 0.10058
| 0.030948
| 0.034816
| 0.718956
| 0.673308
| 0.658801
| 0.63675
| 0.615087
| 0.575822
| 0
| 0.023662
| 0.225072
| 8,344
| 289
| 113
| 28.871972
| 0.775905
| 0.103667
| 0
| 0.5
| 0
| 0
| 0.102287
| 0
| 0
| 0
| 0
| 0
| 0.005814
| 1
| 0.069767
| false
| 0
| 0.046512
| 0
| 0.162791
| 0.017442
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a392dab4e0208bcba731af6d1b6b1dd6d3c0e78a
| 21,317
|
py
|
Python
|
train.py
|
eapache/HawkEars
|
3b979166ed09de9f9254b830bb57499e1da7a015
|
[
"MIT"
] | null | null | null |
train.py
|
eapache/HawkEars
|
3b979166ed09de9f9254b830bb57499e1da7a015
|
[
"MIT"
] | 1
|
2021-12-17T16:56:12.000Z
|
2021-12-19T15:53:55.000Z
|
train.py
|
eapache/HawkEars
|
3b979166ed09de9f9254b830bb57499e1da7a015
|
[
"MIT"
] | 1
|
2021-12-17T16:59:04.000Z
|
2021-12-17T16:59:04.000Z
|
# Train the selected neural network model on spectrograms for birds and a few other classes.
# Train the selected neural network model on spectrograms for birds and a few other classes.
# To see command-line arguments, run the script with -h argument.
import argparse
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import random
import shutil
import sys
import time
import zlib
from collections import namedtuple
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # 1 = no info, 2 = no warnings, 3 = no errors
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
from tensorflow import keras
from core import audio
from core import constants
from core import data_generator
from core import database
from core import plot
from core import util
from model import model_checkpoint
from model import efficientnet_v2
class Trainer:
def __init__(self, parameters):
global trainer
trainer = self
self.parameters = parameters
self.db = database.Database(f'data/{parameters.training}.db')
self.classes = util.get_class_list()
self.init()
# create a plot and save it to the output directory
def plot_results(self, dir, history, key1, key2 = None):
plt.clf() # clear any existing plot data
plt.plot(history.history[key1])
if key2 != None:
plt.plot(history.history[key2])
plt.title(key1)
plt.ylabel(key1)
plt.xlabel('epoch')
if key2 is None:
plt.legend(['train'], loc='upper left')
else:
plt.legend(['train', 'test'], loc='upper left')
plt.savefig(f'{dir}/{key1}.png')
def run(self):
# only use MirroredStrategy in a multi-GPU environment
#strategy = tf.distribute.MirroredStrategy()
strategy = tf.distribute.get_strategy()
with strategy.scope():
# define and compile the model
if self.parameters.type == 0:
model = keras.models.load_model(constants.CKPT_PATH)
else:
if self.parameters.multilabel:
class_act = 'sigmoid'
else:
class_act = 'softmax'
model = efficientnet_v2.EfficientNetV2(
model_type=self.parameters.eff_config,
num_classes=len(self.classes),
input_shape=(self.spec_height, constants.SPEC_WIDTH, 1),
activation='swish',
classifier_activation=class_act,
dropout=0.15,
drop_connect_rate=0.25)
opt = keras.optimizers.Adam(learning_rate = cos_lr_schedule(0))
if self.parameters.multilabel:
loss = keras.losses.BinaryCrossentropy(label_smoothing = 0.13)
else:
loss = keras.losses.CategoricalCrossentropy(label_smoothing = 0.13)
model.compile(loss = loss, optimizer = opt, metrics = 'accuracy')
# create output directory
dir = 'summary'
if not os.path.exists(dir):
os.makedirs(dir)
# output text and graphical descriptions of the model
with open(f'{dir}/table.txt','w') as text_output:
model.summary(print_fn=lambda x: text_output.write(x + '\n'))
if self.parameters.verbosity == 3:
keras.utils.plot_model(model, show_shapes=True, to_file=f'{dir}/graphic.png')
# initialize callbacks
lr_scheduler = keras.callbacks.LearningRateScheduler(cos_lr_schedule)
model_checkpoint_callback = model_checkpoint.ModelCheckpoint(
constants.CKPT_PATH, self.parameters.ckpt_min_epochs, self.parameters.ckpt_min_val_accuracy,
copy_ckpt=self.parameters.copy_ckpt, save_best_only=self.parameters.save_best_only)
callbacks = [lr_scheduler, model_checkpoint_callback]
# create the training and test datasets
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
datagen = data_generator.DataGenerator(self.x_train, self.y_train, seed=self.parameters.seed,
binary_classifier=self.parameters.binary_classifier, multilabel=self.parameters.multilabel)
train_ds = tf.data.Dataset.from_generator(
datagen,
output_types=(tf.float16, tf.float16),
output_shapes=([self.spec_height, constants.SPEC_WIDTH, 1],[len(self.classes)]))
train_ds = train_ds.with_options(options)
train_ds = train_ds.batch(self.parameters.batch_size)
test_ds = tf.data.Dataset.from_tensor_slices((self.x_test, self.y_test))
test_ds = test_ds.with_options(options)
test_ds = test_ds.batch(self.parameters.batch_size)
class_weight = self._get_class_weight()
# run training
if self.parameters.seed is None:
workers = 2
else:
workers = 0 # run data augmentation in main thread to improve repeatability
start_time = time.time()
history = model.fit(train_ds, epochs = self.parameters.epochs, verbose = self.parameters.verbosity, validation_data = test_ds,
workers = workers, shuffle = False, callbacks = callbacks, class_weight = class_weight)
elapsed = time.time() - start_time
# output loss/accuracy graphs and a summary report
training_accuracy = history.history["accuracy"][-1]
if len(self.x_test) > 0:
self.plot_results(dir, history, 'accuracy', 'val_accuracy')
self.plot_results(dir, history, 'loss', 'val_loss')
scores = model.evaluate(self.x_test, self.y_test)
test_accuracy = scores[1]
else:
self.plot_results(dir, history, 'accuracy')
self.plot_results(dir, history, 'loss')
if self.parameters.verbosity >= 2 and len(self.x_test) > 0:
# report on misidentified test spectrograms
predictions = model.predict(self.x_test)
self.analyze_predictions(predictions)
if self.parameters.verbosity > 0:
with open(f'{dir}/summary.txt','w') as text_output:
text_output.write(f'EfficientNetV2 config: {self.parameters.eff_config}\n')
text_output.write(f'Batch size: {self.parameters.batch_size}\n')
text_output.write(f'Epochs: {self.parameters.epochs}\n')
text_output.write(f'Training loss: {history.history["loss"][-1]:.3f}\n')
text_output.write(f'Training accuracy: {training_accuracy:.3f}\n')
if len(self.x_test) > 0:
text_output.write(f'Test loss: {scores[0]:.3f}\n')
text_output.write(f'Final test accuracy: {test_accuracy:.3f}\n')
text_output.write(f'Best test accuracy: {model_checkpoint_callback.best_val_accuracy:.4f}\n')
minutes = int(elapsed) // 60
seconds = int(elapsed) % 60
text_output.write(f'Elapsed time for training = {minutes}m {seconds}s\n')
print(f'Best test accuracy: {model_checkpoint_callback.best_val_accuracy:.4f}\n')
print(f'Elapsed time for training = {minutes}m {seconds}s\n')
return model_checkpoint_callback.best_val_accuracy
# find and report on incorrect predictions;
# always generate summary/stats.csv, but output misident/*.png only if verbosity >= 2;
# this is based on the last epoch, which may not be the best saved model
def analyze_predictions(self, predictions):
class ClassInfo:
def __init__(self):
self.spec_count = 0
self.true_pos = 0
self.false_pos = 0
self.false_neg = 0
misident_dir = 'misident'
if os.path.exists(misident_dir):
shutil.rmtree(misident_dir) # ensure we start with an empty folder
os.makedirs(misident_dir)
# collect data per class and output images if requested
classes = {}
for i in range(len(predictions)):
actual_index = np.argmax(self.y_test[i])
actual_name = self.classes[actual_index]
predicted_index = np.argmax(predictions[i])
predicted_name = self.classes[predicted_index]
if actual_name in classes:
actual_class_info = classes[actual_name]
else:
actual_class_info = ClassInfo()
classes[actual_name] = actual_class_info
if predicted_name in classes:
predicted_class_info = classes[predicted_name]
else:
predicted_class_info = ClassInfo()
classes[predicted_name] = predicted_class_info
actual_class_info.spec_count += 1
if predicted_index == actual_index:
actual_class_info.true_pos += 1
else:
actual_class_info.false_neg += 1
predicted_class_info.false_pos += 1
if self.parameters.verbosity >= 2:
if i in self.spec_file_name.keys():
suffix = self.spec_file_name[i]
else:
suffix = i
spec = self.x_test[i].reshape(self.x_test[i].shape[0], self.x_test[i].shape[1])
plot.plot_spec(spec, f'{misident_dir}/{actual_name}_{predicted_name}_{suffix}.png')
# output stats.csv containing data per class
stats = 'class,count,TP,FP,FN,FP+FN,precision,recall,average\n'
for class_name in sorted(classes):
count = classes[class_name].spec_count
tp = classes[class_name].true_pos
fp = classes[class_name].false_pos
fn = classes[class_name].false_neg
if tp + fp == 0:
precision = 0
else:
precision = tp / (tp + fp)
if tp + fn == 0:
recall = 0
else:
recall = tp / (tp + fn)
stats += f'{class_name},{count},{tp:.3f},{fp:.3f},{fn:.3f},{fp + fn:.3f},{precision:.3f},{recall:.3f},{(precision+recall)/2:.3f}\n'
with open(f'summary/stats.csv','w') as text_output:
text_output.write(stats)
# given the total number of spectrograms in a class, return a dict of randomly selected
# indices to use for testing (indices not in the list are used for training)
def get_test_indices(self, total):
num_test = math.ceil(self.parameters.test_portion * total)
test_indices = {}
while len(test_indices.keys()) < num_test:
index = random.randint(0, total - 1)
if index not in test_indices.keys():
test_indices[index] = 1
return test_indices
# heuristic to adjust weights of classes;
# data/weights.txt contains optional weight per class name;
# format is "class-name,weight", e.g. "Noise,1.1";
# classes not listed there default to a weight of 1.0
def _get_class_weight(self):
input_weight = {}
path = 'data/weights.txt'
try:
with open(path, 'r') as file:
for line in file.readlines():
line = line.strip()
if len(line) > 0 and line[0] != '#':
tokens = line.split(',')
if len(tokens) > 1:
try:
weight = float(tokens[1])
input_weight[tokens[0].strip()] = weight
except ValueError:
print(f'Invalid input weight = {tokens[1]} for class {tokens[0]}')
except IOError:
print(f'Unable to open weights file "{path}"')
return
class_weight = {}
for i in range(len(self.classes)):
if self.classes[i] in input_weight.keys():
print(f'Assigning weight {input_weight[self.classes[i]]} to {self.classes[i]}')
class_weight[i] = input_weight[self.classes[i]]
else:
class_weight[i] = 1.0
return class_weight
def init(self):
if self.parameters.binary_classifier:
self.spec_height = constants.BINARY_SPEC_HEIGHT
else:
self.spec_height = constants.SPEC_HEIGHT
# count spectrograms and randomly select which to use for testing vs. training
num_spectrograms = []
self.test_indices = []
for i in range(len(self.classes)):
total = self.db.get_num_spectrograms(self.classes[i])
num_spectrograms.append(total)
self.test_indices.append(self.get_test_indices(total))
# get the total training and testing counts across all classes
test_total = 0
train_total = 0
for i in range(len(self.classes)):
test_count = len(self.test_indices[i].keys())
train_count = num_spectrograms[i] - test_count
test_total += test_count
train_total += train_count
if len(self.parameters.val_db) > 0:
# when we just use a portion of the training data for testing/validation, it ends up being highly
# correlated with the training data, so the validation percentage is artificially high and it's
# difficult to detect overfitting;
# adding separate test data from a validation database helps to counteract this;
# there can be multiple, which must be comma-separated
val_names = self.parameters.val_db.split(',')
for val_name in val_names:
validation_db = database.Database(f'data/{val_name}.db')
num_validation_specs = 0
for class_name in self.classes:
test_total += validation_db.get_num_spectrograms(class_name)
print(f'# training samples: {train_total}, # test samples: {test_total}')
# initialize arrays
self.x_train = [0 for i in range(train_total)]
self.y_train = np.zeros((train_total, len(self.classes)))
self.x_test = np.zeros((test_total, self.spec_height, constants.SPEC_WIDTH, 1))
self.y_test = np.zeros((test_total, len(self.classes)))
self.input_shape = (self.spec_height, constants.SPEC_WIDTH, 1)
# map test spectrogram indexes to file names for outputting names of misidentified ones
self.spec_file_name = {}
# populate from the database;
# they will be selected randomly per mini batch, so no need to randomize here
train_index = 0
test_index = 0
for i in range(len(self.classes)):
results = self.db.get_recordings_by_subcategory_name(self.classes[i])
spec_index = 0
for result in results:
recording_id, file_name, _ = result
specs = self.db.get_spectrograms_by_recording_id(recording_id)
for j in range(len(specs)):
spec, offset = specs[j]
if spec_index in self.test_indices[i].keys():
# test spectrograms are expanded here
self.spec_file_name[test_index] = f'{file_name}-{offset}' # will be used in names of files written to misident folder
self.x_test[test_index] = util.expand_spectrogram(spec, binary_classifier=self.parameters.binary_classifier)
self.y_test[test_index][i] = 1
test_index += 1
else:
# training spectrograms are expanded in data generator
self.x_train[train_index] = spec
self.y_train[train_index][i] = 1
train_index += 1
spec_index += 1
if len(self.parameters.val_db) > 0:
# append test data from the validation database(s)
val_names = self.parameters.val_db.split(',')
for val_name in val_names:
validation_db = database.Database(f'data/{val_name}.db')
for i in range(len(self.classes)):
specs = validation_db.get_spectrograms_by_name(self.classes[i])
for spec in specs:
self.x_test[test_index] = util.expand_spectrogram(spec[0], binary_classifier=self.parameters.binary_classifier)
self.y_test[test_index][i] = 1
test_index += 1
# learning rate schedule with cosine decay
def cos_lr_schedule(epoch):
global trainer
base_lr = trainer.parameters.base_lr * trainer.parameters.batch_size / 64
lr = base_lr * (1 + math.cos(epoch * math.pi / max(trainer.parameters.epochs, 1))) / 2
if trainer.parameters.verbosity == 0:
print(f'epoch: {epoch + 1} / {trainer.parameters.epochs}') # so there is at least some status info
return lr
if __name__ == '__main__':
# command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-b', type=int, default=32, help='Batch size. Default = 32.')
parser.add_argument('-c', type=int, default=15, help='Minimum epochs before saving checkpoint. Default = 15.')
parser.add_argument('-d', type=float, default=0.0, help='Minimum validation accuracy before saving checkpoint. Default = 0.')
parser.add_argument('-e', type=int, default=10, help='Number of epochs. Default = 10.')
parser.add_argument('-f', type=str, default='training', help='Name of training database. Default = training.')
parser.add_argument('-g', type=int, default=1, help='If 1, make a separate copy of each saved checkpoint. Default = 1.')
parser.add_argument('-j', type=int, default=0, help='If 1, save checkpoint only when val accuracy improves. Default = 0.')
parser.add_argument('-m', type=int, default=1, help='Model type (0 = Load existing model, 1 = EfficientNetV2. Default = 1.')
parser.add_argument('-m2', type=str, default='a0', help='Name of EfficientNetV2 configuration to use. Default = "a0". ')
parser.add_argument('-r', type=float, default=.006, help='Base learning rate. Default = .006')
parser.add_argument('-t', type=float, default=.01, help='Test portion. Default = .01')
parser.add_argument('-u', type=int, default=0, help='1 = Train a multi-label classifier. Default = 0.')
parser.add_argument('-v', type=int, default=1, help='Verbosity (0-2, 0 omits output graphs, 2 plots misidentified test spectrograms, 3 adds graph of model). Default = 1.')
parser.add_argument('-x', type=str, default='', help='Name(s) of extra validation databases. "abc" means load "abc.db". "abc,def" means load both databases for validation. Default = "". ')
parser.add_argument('-y', type=int, default=0, help='If y = 1, extract spectrograms for binary classifier. Default = 0.')
parser.add_argument('-z', type=int, default=None, help='Integer seed for random number generators. Default = None (do not). If specified, other settings to increase repeatability will also be enabled, which slows down training.')
args = parser.parse_args()
Parameters = namedtuple('Parameters', ['base_lr', 'batch_size', 'binary_classifier', 'ckpt_min_epochs', 'ckpt_min_val_accuracy',
'copy_ckpt', 'eff_config', 'epochs', 'multilabel', 'save_best_only', 'seed', 'test_portion', 'training', 'type',
'val_db', 'verbosity'])
parameters = Parameters(base_lr=args.r, batch_size = args.b, binary_classifier=(args.y==1), ckpt_min_epochs=args.c, ckpt_min_val_accuracy=args.d,
copy_ckpt=(args.g == 1), eff_config = args.m2, epochs = args.e, multilabel=(args.u==1), save_best_only=(args.j == 1), seed=args.z,
test_portion = args.t, training=args.f, type = args.m, val_db = args.x, verbosity = args.v)
if args.z != None:
# these settings make results more reproducible, which is very useful when tuning parameters
os.environ['PYTHONHASHSEED'] = str(args.z)
#os.environ['TF_DETERMINISTIC_OPS'] = '1'
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
random.seed(args.z)
np.random.seed(args.z)
tf.random.set_seed(args.z)
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
keras.mixed_precision.set_global_policy("mixed_float16") # trains 25-30% faster
trainer = Trainer(parameters)
trainer.run()
| 47.476615
| 233
| 0.598208
| 2,620
| 21,317
| 4.701145
| 0.182443
| 0.037509
| 0.022083
| 0.011691
| 0.196801
| 0.142892
| 0.103515
| 0.073882
| 0.06966
| 0.055695
| 0
| 0.012996
| 0.303326
| 21,317
| 448
| 234
| 47.582589
| 0.816376
| 0.123751
| 0
| 0.122324
| 0
| 0.015291
| 0.148486
| 0.034947
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027523
| false
| 0
| 0.06422
| 0
| 0.11315
| 0.024465
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3943fc348baced6fa934c762ac87be734e9ae13
| 2,002
|
py
|
Python
|
limix/heritability/estimate.py
|
fpcasale/limix
|
a6bc2850f243fe779991bb53a24ddbebe0ab74d2
|
[
"Apache-2.0"
] | null | null | null |
limix/heritability/estimate.py
|
fpcasale/limix
|
a6bc2850f243fe779991bb53a24ddbebe0ab74d2
|
[
"Apache-2.0"
] | null | null | null |
limix/heritability/estimate.py
|
fpcasale/limix
|
a6bc2850f243fe779991bb53a24ddbebe0ab74d2
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
from numpy import ascontiguousarray, copy, ones, var
from numpy_sugar.linalg import economic_qs
from glimix_core.glmm import GLMMExpFam
def estimate(pheno, lik, K, covs=None, verbose=True):
r"""Estimate the so-called narrow-sense heritability.
It supports Normal, Bernoulli, Binomial, and Poisson phenotypes.
Let :math:`N` be the sample size and :math:`S` the number of covariates.
Parameters
----------
pheno : tuple, array_like
Phenotype. Dimensions :math:`N\\times 0`.
lik : {'normal', 'bernoulli', 'binomial', 'poisson'}
Likelihood name.
K : array_like
Kinship matrix. Dimensions :math:`N\\times N`.
covs : array_like
Covariates. Default is an offset. Dimensions :math:`N\\times S`.
Returns
-------
float
Estimated heritability.
Examples
--------
.. doctest::
>>> from numpy import dot, exp, sqrt
>>> from numpy.random import RandomState
>>> from limix.heritability import estimate
>>>
>>> random = RandomState(0)
>>>
>>> G = random.randn(50, 100)
>>> K = dot(G, G.T)
>>> z = dot(G, random.randn(100)) / sqrt(100)
>>> y = random.poisson(exp(z))
>>>
>>> print('%.2f' % estimate(y, 'poisson', K, verbose=False))
0.70
"""
K = _background_standardize(K)
QS = economic_qs(K)
lik = lik.lower()
if lik == "binomial":
p = len(pheno[0])
else:
p = len(pheno)
if covs is None:
covs = ones((p, 1))
glmm = GLMMExpFam(pheno, lik, covs, QS)
glmm.feed().maximize(verbose=verbose)
g = glmm.scale * (1 - glmm.delta)
e = glmm.scale * glmm.delta
h2 = g / (var(glmm.mean()) + g + e)
return h2
def _background_standardize(K):
from ..stats.kinship import gower_norm
K = copy(K, "C")
K = ascontiguousarray(K, dtype=float)
gower_norm(K, K)
K /= K.diagonal()
return K
| 24.414634
| 76
| 0.580919
| 251
| 2,002
| 4.565737
| 0.442231
| 0.031414
| 0.039267
| 0.052356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015183
| 0.276224
| 2,002
| 81
| 77
| 24.716049
| 0.775707
| 0.5
| 0
| 0
| 0
| 0
| 0.010441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.178571
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a394632989f95d229e000f46db6a73bbdcda0cf3
| 2,739
|
py
|
Python
|
pyrat/__main__.py
|
gitmarek/pyrat
|
cbf918d5c23d5d39e62e00bb64b6d0596170c68b
|
[
"MIT"
] | null | null | null |
pyrat/__main__.py
|
gitmarek/pyrat
|
cbf918d5c23d5d39e62e00bb64b6d0596170c68b
|
[
"MIT"
] | null | null | null |
pyrat/__main__.py
|
gitmarek/pyrat
|
cbf918d5c23d5d39e62e00bb64b6d0596170c68b
|
[
"MIT"
] | null | null | null |
import argparse, importlib, sys
import pyrat
from pyrat import name, version, logger
# This returns a function to be called by a subparser below
# We assume in the tool's submodule there's a function called 'start(args)'
# That takes over the execution of the program.
def tool_(tool_name):
def f(args):
submodule = importlib.import_module('pyrat.' + tool_name)
getattr(submodule, 'start')(args)
return f
if __name__ == '__main__':
# create the top-level parser
parser = argparse.ArgumentParser(prog=name,
description='Raw tools for raw audio.',
epilog= name+' <command> -h for more details.')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--quiet', action='store_true',
help='takes precedence over \'verbose\'')
parser.add_argument('-v', '--version', action='store_true',
help='print version number and exit')
subparsers = parser.add_subparsers(title="Commands")
# create the parser for the "conv" command
parser_conv = subparsers.add_parser('conv',
description='''Convolve input signal with kernel.
Normalize the result and write it to outfile.''',
help='Convolve input with a kernel.')
parser_conv.add_argument('infile', type=argparse.FileType('r'))
parser_conv.add_argument('kerfile', type=argparse.FileType('r'),
help="kernel to be convolved with infile")
parser_conv.add_argument('outfile', type=argparse.FileType('w'))
parser_conv.set_defaults(func=tool_('conv'))
# create the parser for the "randph" command
parser_randph = subparsers.add_parser('randph',
description='''Randomize phases of Fourier coefficients.
Calculate the FFT of the entire signal; then randomize the phases of each
frequency bin by multiplying the frequency coefficient by a random phase:
e^{2pi \phi}, where $\phi$ is distributed uniformly on the interval [0,b). By
default, b=0.1. The result is saved to outfile.''',
help='Randomize phases of Fourier coefficients.')
parser_randph.add_argument('infile', type=argparse.FileType('r'))
parser_randph.add_argument('outfile', type=argparse.FileType('w'))
parser_randph.add_argument('-b', type=float, default=0.1,
help='phases disttibuted uniformly on [0,b)')
parser_randph.set_defaults(func=tool_('randph'))
if len(sys.argv) < 2:
parser.print_usage()
sys.exit(1)
args = parser.parse_args()
if args.version:
print(name + '-' + version)
sys.exit(0)
if args.verbose:
logger.setLevel('INFO')
else:
logger.setLevel('WARNING')
if args.quiet:
logger.setLevel(60) # above 'CRITICAL'
args.func(args)
sys.exit(0)
| 36.039474
| 78
| 0.683826
| 367
| 2,739
| 4.986376
| 0.381471
| 0.054098
| 0.054645
| 0.034426
| 0.159563
| 0.097268
| 0.097268
| 0.097268
| 0
| 0
| 0
| 0.005888
| 0.193866
| 2,739
| 75
| 79
| 36.52
| 0.822917
| 0.11172
| 0
| 0.037037
| 0
| 0.018519
| 0.329897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.074074
| 0
| 0.12963
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3955ee346d7a3a5338cd528fa6afbec24d5527c
| 2,007
|
py
|
Python
|
python/projecteuler/src/longest_collatz_sequence.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
python/projecteuler/src/longest_collatz_sequence.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | 2
|
2022-03-10T03:49:14.000Z
|
2022-03-14T00:49:54.000Z
|
python/projecteuler/src/longest_collatz_sequence.py
|
christopher-burke/warmups
|
140c96ada87ec5e9faa4622504ddee18840dce4a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Longest Collatz sequence.
The following iterative sequence is defined
for the set of positive integers:
n → n/2 (n is even)
n → 3n + 1 (n is odd)
Using the rule above and starting with 13,
we generate the following sequence:
13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1
It can be seen that this sequence (starting at 13 and finishing at 1)
contains 10 terms. Although it has not been proved yet
(Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are
allowed to go above one million.
source: https://projecteuler.net/problem=14
"""
CACHE = {1: [1]}
CACHE_LENGTH = {1: 1}
def collatz_sequence(n) -> int:
"""Get the Collatz Sequence list.
Add each found Collatz Sequence to CACHE.
:return:
"""
if n in CACHE:
return CACHE[n]
next_ = int(n // 2) if n % 2 == 0 else int(3 * n + 1)
CACHE[n] = [n] + collatz_sequence(next_)
return CACHE[n]
def longest_collatz_sequence(limit: int) -> int:
"""Find the longest Collatz Sequence length.
:return: number that generates the longest collazt sequence.
"""
for i in range(2, limit+1):
collatz_sequence_length(i)
longest = max(CACHE_LENGTH.keys(), key=lambda k: CACHE_LENGTH[k])
return longest
def collatz_sequence_length(n):
"""Get the Collatz Sequence of n.
:return: List of Collatz Sequence.
"""
if n not in CACHE_LENGTH:
next_ = int(n // 2) if n % 2 == 0 else int(3 * n + 1)
CACHE_LENGTH[n] = 1 + collatz_sequence_length(next_)
return CACHE_LENGTH[n]
def main() -> int:
"""Find the Longest Collatz sequence under 1,000,000.
:return: Longest Collatz sequence under 1,000,000
"""
return longest_collatz_sequence(1000000)
if __name__ == "__main__":
lcs = main()
print(lcs, CACHE_LENGTH[lcs])
print(" → ".join(map(str, collatz_sequence(lcs))))
| 23.611765
| 71
| 0.659691
| 320
| 2,007
| 4.075
| 0.3625
| 0.184049
| 0.101227
| 0.032209
| 0.154908
| 0.154908
| 0.122699
| 0.122699
| 0.122699
| 0.122699
| 0
| 0.043535
| 0.233184
| 2,007
| 84
| 72
| 23.892857
| 0.795971
| 0.523169
| 0
| 0.166667
| 0
| 0
| 0.01236
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.375
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a396aa841a074ff27cad63b9fc597eb1d7fa8b7c
| 1,823
|
py
|
Python
|
examples/classify_pose.py
|
scottamain/aiy-maker-kit
|
4cdb973067b83d27cf0601c811d887877d1bc253
|
[
"Apache-2.0"
] | null | null | null |
examples/classify_pose.py
|
scottamain/aiy-maker-kit
|
4cdb973067b83d27cf0601c811d887877d1bc253
|
[
"Apache-2.0"
] | null | null | null |
examples/classify_pose.py
|
scottamain/aiy-maker-kit
|
4cdb973067b83d27cf0601c811d887877d1bc253
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performs pose classification using the MoveNet model.
The MoveNet model identifies the body keypoints on a person, and then this
code passes those keypoints to a custom-trained pose classifier model that
classifies the pose with a label, such as the name of a yoga pose.
You must first complete the Google Colab to train the pose classification model:
https://g.co/coral/train-poses
And save the output .tflite and .txt files into the examples/models/ directory.
Then just run this script:
python3 classify_pose.py
For more instructions, see g.co/aiy/maker
"""
from aiymakerkit import vision
from pycoral.utils.dataset import read_label_file
import models
MOVENET_CLASSIFY_MODEL = 'models/pose_classifier.tflite'
MOVENET_CLASSIFY_LABELS = 'models/pose_labels.txt'
pose_detector = vision.PoseDetector(models.MOVENET_MODEL)
pose_classifier = vision.PoseClassifier(MOVENET_CLASSIFY_MODEL)
labels = read_label_file(MOVENET_CLASSIFY_LABELS)
for frame in vision.get_frames():
# Detect the body points and draw the skeleton
pose = pose_detector.get_pose(frame)
vision.draw_pose(frame, pose)
# Classify different body poses
label_id = pose_classifier.get_class(pose)
vision.draw_label(frame, labels.get(label_id))
| 35.745098
| 80
| 0.785518
| 278
| 1,823
| 5.053957
| 0.489209
| 0.042705
| 0.018505
| 0.022776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005814
| 0.15085
| 1,823
| 50
| 81
| 36.46
| 0.901809
| 0.65277
| 0
| 0
| 0
| 0
| 0.08347
| 0.08347
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a39afee8e197b6834391bc0d4c2a7ba0f29e4cdf
| 622
|
py
|
Python
|
tests/test_versions_in_sync.py
|
simon-graham/pure_interface
|
da7bf05151c1c906c753987fbf7e3251905b4ba0
|
[
"MIT"
] | 10
|
2018-08-27T04:15:53.000Z
|
2021-08-18T09:45:35.000Z
|
tests/test_versions_in_sync.py
|
simon-graham/pure_interface
|
da7bf05151c1c906c753987fbf7e3251905b4ba0
|
[
"MIT"
] | 35
|
2018-08-27T04:17:44.000Z
|
2021-09-22T05:39:57.000Z
|
tests/test_versions_in_sync.py
|
tim-mitchell/pure_interface
|
46a2de2574f4543980303cafd89cfcbdb643fbbb
|
[
"MIT"
] | 3
|
2018-09-19T21:32:01.000Z
|
2020-11-17T00:58:55.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import pure_interface
class TestVersionsMatch(unittest.TestCase):
def test_versions(self):
setup_py = os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')
with open(setup_py, 'r') as f:
setup_contents = f.readlines()
for line in setup_contents:
if 'version =' in line:
self.assertIn(pure_interface.__version__, line)
break
else:
self.fail('did not find version in setup.py')
| 29.619048
| 82
| 0.636656
| 76
| 622
| 4.921053
| 0.644737
| 0.05615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002174
| 0.26045
| 622
| 20
| 83
| 31.1
| 0.81087
| 0.033762
| 0
| 0
| 0
| 0
| 0.088481
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.066667
| false
| 0
| 0.266667
| 0
| 0.4
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a39ece0f6a490b1cd3625b5fef325786496075c3
| 2,973
|
py
|
Python
|
train.py
|
Saaaber/urban-segmentation
|
fc893feb9208d3206d7c5329b1ccf4cfab97ed31
|
[
"MIT"
] | 3
|
2020-11-16T20:21:25.000Z
|
2021-06-11T13:09:30.000Z
|
train.py
|
Saaaber/urban-segmentation
|
fc893feb9208d3206d7c5329b1ccf4cfab97ed31
|
[
"MIT"
] | null | null | null |
train.py
|
Saaaber/urban-segmentation
|
fc893feb9208d3206d7c5329b1ccf4cfab97ed31
|
[
"MIT"
] | 3
|
2020-11-11T23:43:15.000Z
|
2022-03-17T09:03:42.000Z
|
# Copyright (c) Ville de Montreal. All rights reserved.
# Licensed under the MIT license.
# See LICENSE file in the project root for full license information.
import os
import json
import torch
import argparse
import datetime
from utils.factories import ModelFactory, OptimizerFactory, TrainerFactory
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Semantic Segmentation Training")
parser.add_argument('-c', '--config', default=None, type=str,
help="config file path (default: None)")
parser.add_argument('-r', '--resume', default=None, type=str,
help="path to latest checkpoint (default: None)")
parser.add_argument('-d', '--dir', default=None, type=str,
help="experiment dir path (default: None)")
args = parser.parse_args()
# Check for GPU
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
torch.backends.cudnn.deterministic = True
# Check if Colab run
COLAB = os.path.exists("/content/gdrive")
if args.config:
# Load config file
config = json.load(open(args.config))
elif args.resume:
# Load config file from checkpoint
config = torch.load(args.resume, map_location=device)['config']
# Change log dir if colab run
if COLAB is True:
config['trainer']['log_dir'] = "/content/gdrive/My Drive/colab_saves/logs/"
# Set experiment dir to current time if none provided
if args.dir:
experiment_dir = args.dir
else:
experiment_dir = datetime.datetime.now().strftime("%m%d_%H%M%S")
# Init model and optimizer from config with factories
model = ModelFactory.get(config['model'])
params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = OptimizerFactory.get(config['optimizer'], params)
# Check if semi-supervised run
if config['semi'] is True:
# Init model_d and optimizer_d from config with factories
model_d = ModelFactory.get(config['model_d'])
params_d = filter(lambda p: p.requires_grad, model_d.parameters())
optimizer_d = OptimizerFactory.get(config['optimizer_d'], params_d)
# Init semi-supervised trainer object from config with factory
trainer = TrainerFactory.get(config)(
model,
model_d,
optimizer,
optimizer_d,
config=config,
resume=args.resume,
experiment_dir=experiment_dir,
**config['trainer']['options'])
else:
# Init supervised trainer object from config with factory
trainer = TrainerFactory.get(config)(
model,
optimizer,
config=config,
resume=args.resume,
experiment_dir=experiment_dir,
**config['trainer']['options'])
# Run a training experiment
trainer.train()
| 33.784091
| 83
| 0.636731
| 350
| 2,973
| 5.305714
| 0.34
| 0.056004
| 0.030156
| 0.029079
| 0.294023
| 0.198169
| 0.198169
| 0.164782
| 0.164782
| 0.164782
| 0
| 0
| 0.258325
| 2,973
| 87
| 84
| 34.172414
| 0.842177
| 0.200135
| 0
| 0.298246
| 0
| 0
| 0.140559
| 0.009738
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3a1c89d1bcdd899b6c1712a17770e89aa6ef0b0
| 5,062
|
py
|
Python
|
vivarium/lidar.py
|
Pyrofoux/vivarium
|
90c07384929f6c34915f053fd8e95e91358c4e58
|
[
"MIT"
] | 2
|
2020-10-30T15:28:06.000Z
|
2022-01-31T17:13:25.000Z
|
vivarium/lidar.py
|
Pyrofoux/vivarium
|
90c07384929f6c34915f053fd8e95e91358c4e58
|
[
"MIT"
] | null | null | null |
vivarium/lidar.py
|
Pyrofoux/vivarium
|
90c07384929f6c34915f053fd8e95e91358c4e58
|
[
"MIT"
] | null | null | null |
from simple_playgrounds.entities.agents.sensors.sensor import *
from simple_playgrounds.entities.agents.sensors.semantic_sensors import *
from collections import defaultdict
from pymunk.vec2d import Vec2d
import math
#@SensorGenerator.register('lidar')
class LidarSensor(SemanticSensor):
def __init__(self, anchor, invisible_elements=None,
remove_occluded=True, allow_duplicates=False, **sensor_params):
self.sensor_type = 'lidar'
#Todo later: add default config, as in visual_sensors
sensor_param = {**sensor_params}
super(LidarSensor, self).__init__(anchor, invisible_elements, sensor_param, fov=0)
#Sensor paramters TODO: make it parametrable
self.FoV = sensor_param.get('FoV',100) #in pixels
self.angle_ranges = sensor_param.get('angle_ranges',[(0,90),(90,180),(180,270),(270,359)])
self.cones_number = len(self.angle_ranges)
self.observation = None
self.anchor = anchor
def update_sensor(self, pg):
#current_agent, entities, agents):
entities = pg.scene_elements
agents = pg.agents
current_agent = self.anchor
#Initialising ouput
output = [dict() for i in range(self.cones_number)]
#Current's agent Shape
agent_position = current_agent.position
agent_coord = Vec2d(agent_position[0], agent_position[1])
agent_angle = agent_position[2]
#Gathering positions of entities and agents, in sorted dict by entity/agent type
sorted_positions = dict()
#Gathering key and shapes from entities
for entity in entities:
key = type(entity).__name__ #Key in matrix
if not key in sorted_positions:
sorted_positions[key] = []
#Looks like the relevant Pymunk position is the last one
#To check in entity.py
sorted_positions[key].append(entity.position)
#Gathering key and shapes from agents
for agent in agents:
key = type(agent).__name__ #Key in matrix
if not key in sorted_positions:
sorted_positions[key] = []
#Agent shouldn't detect itself
if not agent is current_agent:
sorted_positions[key].append(agent.position)
#For each entity type
for entity_type, positions in sorted_positions.items():
#add here: Tests on entity_type : can the entity be detected ?
#Value initialisation: initial activation = 0
for i in range(self.cones_number):
output[i][entity_type] = 0
#For each entity
for position in positions:
#Calculating the nearest point on the entity's surface
#query = shape.segment_query(agent_coord, shape_position)
#near_point = query.point
#For debugging purpose
#Approximation : center ph position instead of projection
near_point = position
#if entity_type == 'Candy':
#self.logger.add((position[0], position[1]),"near_point")
#self.logger.add((agent_position[0], agent_position[1]), "agent_position")
#Distance check - is the object too far ?
distance = agent_coord.get_distance(near_point)
if distance > self.FoV:
continue
#Angle check - In which cone does it fall ?
dy = (near_point[1] - agent_coord[1])
dx = (near_point[0] - agent_coord[0])
target_angle = math.atan2(dy, dx)
relative_angle = target_angle - agent_angle #Add agent angle to count for rotation
#if entity_type == 'Candy':
#self.logger.add(relative_angle,"relative_angle")
#self.logger.add(target_angle,"target_angle")
#self.logger.add(agent_angle, "agent_angle")
relative_angle_degrees =math.degrees(relative_angle)%360 #To avoid negative and angles > to 360
cone = None
#Calculating in which cone the position is detected
for i in range(len(self.angle_ranges)):
angle_range = self.angle_ranges[i]
if relative_angle_degrees >= angle_range[0] and relative_angle_degrees < angle_range[1]:
cone = i
if cone is None:
continue
if not entity_type in output[cone]:
output[cone][entity_type] = 0
normalised_distance = distance/self.FoV
activation = 1 - normalised_distance
#Keeping only the nearest distance = highest activation
if output[cone][entity_type] < activation:
output[cone][entity_type] = activation
self.observation = output
return output
def get_shape_observation(self):
pass
| 34.435374
| 111
| 0.600356
| 581
| 5,062
| 5.04475
| 0.283993
| 0.034118
| 0.022177
| 0.011259
| 0.186967
| 0.128966
| 0.100307
| 0.039577
| 0.039577
| 0.039577
| 0
| 0.014864
| 0.322205
| 5,062
| 146
| 112
| 34.671233
| 0.839405
| 0.280522
| 0
| 0.090909
| 0
| 0
| 0.005548
| 0
| 0
| 0
| 0
| 0.006849
| 0
| 1
| 0.045455
| false
| 0.015152
| 0.075758
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3a3cd19889c828efa32a912a6cda2aa73fb4ca6
| 4,310
|
py
|
Python
|
bin/allplots.py
|
Gabaldonlab/karyon
|
ba81828921b83b553f126892795253be1fd941ba
|
[
"MIT"
] | null | null | null |
bin/allplots.py
|
Gabaldonlab/karyon
|
ba81828921b83b553f126892795253be1fd941ba
|
[
"MIT"
] | 2
|
2021-07-07T08:40:56.000Z
|
2022-01-06T16:10:27.000Z
|
bin/allplots.py
|
Gabaldonlab/karyon
|
ba81828921b83b553f126892795253be1fd941ba
|
[
"MIT"
] | null | null | null |
#!/bin/python
import sys, os, re, subprocess, math
import argparse
import psutil
from pysam import pysam
from Bio import SeqIO
import numpy as np
import numpy.random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#import seaborn as sns
import pandas as pd
import scipy.stats
from scipy.stats import gaussian_kde
from scipy import stats
from decimal import Decimal
import string, random
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fasta', required=True, help="fasta file used as input")
parser.add_argument('-d', '--output_directory', default="./", help='Directory where all the output files will be generated.')
parser.add_argument('-o', '--output_name', required=True, help="Output prefix")
parser.add_argument('-v', '--vcf', required=True, help="VCF file used as input")
parser.add_argument('-p', '--pileup', required=True, help="Mpileup file used as input")
parser.add_argument('-b', '--bam', required=True, help="Bam file used as input")
parser.add_argument('-l', '--library', required=True, nargs='+', help="Illumina libraries used for the KAT plot")
parser.add_argument('--configuration', default=False, help="Configuration file. By default will use ./configuration.txt as the configuration file.")
parser.add_argument('-w', '--window_size', default=1000, help="Window size for plotting")
parser.add_argument('-x', '--max_scaf2plot', default=20, help="Number of scaffolds to analyze")
parser.add_argument('-s', '--scafminsize', default=False, help="Will ignore scaffolds with length below the given threshold")
parser.add_argument('-S', '--scafmaxsize', default=False, help="Will ignore scaffolds with length above the given threshold")
parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files generated by the different programs. If false, the program will assign a name consisting of a string of 6 random alphanumeric characters.')
args = parser.parse_args()
true_output = os.path.abspath(args.output_directory)
if true_output[-1] != "/":
true_output=true_output+"/"
def parse_config(config):
config_dict = {}
prev = 0
for line in open(config):
if line[0] == "#": continue
elif line[0] == "+":
prev = line[1:-1]
config_dict[prev] = ["","",""]
elif line[0] == "@":
if config_dict[prev][0] != "": continue
config_dict[prev][0] = line[1:-1]
elif line[0] == ">":
config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + " "
elif line[0] == "?":
if config_dict[prev][2] != "": continue
config_dict[prev][2] = line[1:-1] + " "
return config_dict
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
config_path = args.configuration
if not args.configuration:
selfpath = os.path.dirname(os.path.realpath(sys.argv[0]))
config_path = selfpath[:selfpath.rfind('/')]
config_path = selfpath[:selfpath.rfind('/')]+"/configuration.txt"
config_dict = parse_config(config_path)
counter = int(args.max_scaf2plot)
window_size=int(args.window_size)
step=window_size/2
true_output = os.path.abspath(args.output_directory)
cwd = os.path.abspath(os.getcwd())
os.chdir(true_output)
os.system("bgzip -c "+ args.vcf + " > " + args.vcf + ".gz")
os.system("tabix -p vcf "+ args.vcf+".gz")
#vcf_file = pysam.VariantFile(args.vcf+".gz", 'r')
bam_file = pysam.AlignmentFile(args.bam, 'rb')
home = config_dict["karyon"][0]
job_ID = args.job_id if args.job_id else id_generator()
name = args.output_name if args.output_name else job_ID
kitchen = home + "tmp/"+job_ID
lendict = {}
fastainput = SeqIO.index(args.fasta, "fasta")
for i in fastainput:
lendict[i] = len(fastainput[i].seq)
from karyonplots import katplot, allplots
from report import report, ploidy_veredict
df = allplots(window_size,
args.vcf,
args.fasta,
args.bam,
args.pileup,
args.library[0],
config_dict['nQuire'][0],
config_dict["KAT"][0],
kitchen,
true_output,
counter,
job_ID, name,
args.scafminsize,
args.scafmaxsize, False)
df2 = ploidy_veredict(df, true_output, name, window_size)
report(true_output, name, df2, True, False, window_size, False, False)
df2.to_csv(true_output+"/Report/"+name+".csv", index=False)
os.chdir(cwd)
| 35.916667
| 236
| 0.710905
| 629
| 4,310
| 4.739269
| 0.292528
| 0.039249
| 0.074136
| 0.020127
| 0.168735
| 0.147937
| 0.118081
| 0.05837
| 0
| 0
| 0
| 0.010721
| 0.134339
| 4,310
| 119
| 237
| 36.218487
| 0.788261
| 0.019026
| 0
| 0.020619
| 0
| 0.010309
| 0.215858
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020619
| false
| 0
| 0.175258
| 0.010309
| 0.216495
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e55e971b17323a0b8342354a7a6ad601469f01e
| 18,524
|
py
|
Python
|
syntropynac/resolve.py
|
SyntropyNet/syntropy-nac
|
8beddcd606d46fd909f51d0c53044be496cec995
|
[
"MIT"
] | 3
|
2021-01-06T08:24:47.000Z
|
2021-02-27T08:08:07.000Z
|
syntropynac/resolve.py
|
SyntropyNet/syntropy-nac
|
8beddcd606d46fd909f51d0c53044be496cec995
|
[
"MIT"
] | null | null | null |
syntropynac/resolve.py
|
SyntropyNet/syntropy-nac
|
8beddcd606d46fd909f51d0c53044be496cec995
|
[
"MIT"
] | null | null | null |
import functools
from dataclasses import dataclass
from itertools import combinations
import click
import syntropy_sdk as sdk
from syntropy_sdk import utils
from syntropynac.exceptions import ConfigureNetworkError
from syntropynac.fields import ALLOWED_PEER_TYPES, ConfigFields, PeerState, PeerType
@dataclass
class ConnectionServices:
agent_1: int
agent_2: int
agent_1_service_names: list
agent_2_service_names: list
@classmethod
def create(cls, link, endpoints):
endpoint_1, endpoint_2 = endpoints
return cls(
link[0],
link[1],
cls._get_services(endpoint_1),
cls._get_services(endpoint_2),
)
@staticmethod
def _get_services(endpoint):
service_names = endpoint[1].get(ConfigFields.SERVICES)
if service_names is None:
return []
if isinstance(service_names, str):
return [service_names]
if not isinstance(service_names, list) or any(
not isinstance(name, str) for name in service_names
):
raise ConfigureNetworkError(
f"Services parameter must be a list of service names for endpoint {endpoint[0]}"
)
return service_names
def get_subnets(self, endpoint_id, agents):
agent_id = getattr(self, f"agent_{endpoint_id}")
service_names = getattr(self, f"agent_{endpoint_id}_service_names")
agent = agents[agent_id]
return [
subnet["agent_service_subnet_id"]
for service in agent["agent_services"]
for subnet in service["agent_service_subnets"]
if service["agent_service_name"] in service_names
]
@functools.lru_cache(maxsize=None)
def resolve_agent_by_name(api, name, silent=False):
return [
agent["agent_id"]
for agent in utils.WithPagination(sdk.AgentsApi(api).platform_agent_index)(
filter=f"name:'{name}'", _preload_content=False
)["data"]
]
@functools.lru_cache(maxsize=None)
def get_all_agents(api, silent=False):
all_agents = utils.WithPagination(sdk.AgentsApi(api).platform_agent_index)(
_preload_content=False
)["data"]
return {agent["agent_id"]: agent for agent in all_agents}
def resolve_agents(api, agents, silent=False):
"""Resolves endpoint names to ids inplace.
Args:
api (PlatformApi): API object to communicate with the platform.
agents (dict): A dictionary containing endpoints.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
"""
for name, id in agents.items():
if id is not None:
continue
result = resolve_agent_by_name(api, name, silent=silent)
if len(result) != 1:
error = f"Could not resolve endpoint name {name}, found: {result}."
if not silent:
click.secho(
error,
err=True,
fg="red",
)
continue
else:
raise ConfigureNetworkError(error)
agents[name] = result[0]
def get_peer_id(peer_name, peer_config):
peer_type = peer_config.get(ConfigFields.PEER_TYPE, PeerType.ENDPOINT)
if peer_type == PeerType.ENDPOINT:
return peer_config.get(ConfigFields.ID)
elif peer_type == PeerType.ID:
try:
return int(peer_name)
except ValueError:
return None
else:
return None
def resolve_present_absent(agents, present, absent):
"""Resolves agent connections by objects into agent connections by ids.
Additionally removes any present connections if they were already added to absent.
Present connections are the connections that appear as "present" in the config
and will be added to the network.
Absent connections are the connections that appear as "absent" in the config and
will be removed from the existing network.
Services is a list of service names assigned to the connection's corresponding endpoints.
Args:
agents (dict[str, int]): Agent map from name to id.
present (list): A list of connections that are marked as present in the config.
absent (list): A list of connections that are marked as absent in the config.
Returns:
tuple: Three items that correspond to present/absent connections and a list
of ConnectionServices objects that correspond to present connections.
Present/absent connections is a list of lists of two elements, where
elements are agent ids.
"""
present_ids = [[agents[src[0]], agents[dst[0]]] for src, dst in present]
absent_ids = [[agents[src[0]], agents[dst[0]]] for src, dst in absent]
services = [
ConnectionServices.create(link, conn)
for link, conn in zip(present_ids, present)
if link not in absent_ids
and link[::-1] not in absent_ids
and link[0] != link[1]
]
return (
[
link
for link in present_ids
if link not in absent_ids
and link[::-1] not in absent_ids
and link[0] != link[1]
],
[i for i in absent_ids if i[0] != i[1]],
services,
)
def validate_connections(connections, silent=False, level=0):
"""Check if the connections structure makes any sense.
Recursively goes inside 'connect_to' dictionary up to 1 level.
Args:
connections (dict): A dictionary describing connections.
silent (bool, optional): Indicates whether to suppress output to stderr.
Raises ConfigureNetworkError instead. Defaults to False.
level (int, optional): Recursion level depth. Defaults to 0.
Raises:
ConfigureNetworkError: If silent==True, then raise an exception in case of irrecoverable error.
Returns:
bool: Returns False in case of invalid connections structure.
"""
if level > 1:
silent or click.secho(
(
f"Field {ConfigFields.CONNECT_TO} found at level {level + 1}. This will be ignored, "
"however, please double check your configuration file."
)
)
return True
for name, con in connections.items():
if not name or not isinstance(name, (str, int)):
error = f"Invalid endpoint name found."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if not isinstance(con, dict):
error = f"Entry '{name}' in {ConfigFields.CONNECT_TO} must be a dictionary, but found {con.__class__.__name__}."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.PEER_TYPE not in con:
error = f"Endpoint '{name}' {ConfigFields.PEER_TYPE} must be present."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if con[ConfigFields.PEER_TYPE] not in ALLOWED_PEER_TYPES:
error = f"Endpoint '{name}' {ConfigFields.PEER_TYPE} '{con[ConfigFields.PEER_TYPE]}' is not allowed."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
probably_an_id = False
try:
name_as_id = int(name)
probably_an_id = True
except ValueError:
name_as_id = name
if probably_an_id and con[ConfigFields.PEER_TYPE] == PeerType.ENDPOINT:
click.secho(
(
f"Endpoint '{name}' {ConfigFields.PEER_TYPE} is {PeerType.ENDPOINT}, however, "
f"it appears to be an {PeerType.ID}."
),
err=True,
fg="yellow",
)
if not probably_an_id and con[ConfigFields.PEER_TYPE] == PeerType.ID:
error = (
f"Endpoint '{name}' {ConfigFields.PEER_TYPE} is {PeerType.ID}, however, "
f"it appears to be an {PeerType.ENDPOINT}."
)
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.ID in con and con[ConfigFields.ID] is not None:
try:
_ = int(con[ConfigFields.ID])
id_valid = True
except ValueError:
id_valid = False
if (
not isinstance(con[ConfigFields.ID], (str, int))
or not con[ConfigFields.ID]
or not id_valid
):
error = f"Endpoint '{name}' {ConfigFields.ID} is invalid."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if (
con[ConfigFields.PEER_TYPE] == PeerType.ID
and int(con[ConfigFields.ID]) != name_as_id
):
error = f"Endpoint '{name}' {ConfigFields.ID} field does not match endpoint id."
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.SERVICES in con:
if not isinstance(con[ConfigFields.SERVICES], (list, tuple)):
error = (
f"Endpoint '{name}' {ConfigFields.SERVICES} must be a "
f"list, but found {con[ConfigFields.SERVICES].__class__.__name__}."
)
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
for service in con[ConfigFields.SERVICES]:
if not isinstance(service, (str, int)):
error = (
f"Endpoint '{name}' service must be a string"
f", but found {service.__class__.__name__}."
)
if not silent:
click.secho(error, err=True, fg="red")
return False
else:
raise ConfigureNetworkError(error)
if ConfigFields.CONNECT_TO in con:
if not validate_connections(
con[ConfigFields.CONNECT_TO], silent, level + 1
):
return False
return True
def resolve_p2p_connections(api, connections, silent=False):
"""Resolves configuration connections for Point to Point topology.
Args:
api (PlatformApi): API object to communicate with the platform.
connections (dict): A dictionary containing connections as described in the config file.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Returns:
list: A list of two item lists describing endpoint to endpoint connections.
"""
present = []
absent = []
agents = {}
for src in connections.items():
dst = src[1].get(ConfigFields.CONNECT_TO)
if dst is None or len(dst.keys()) == 0:
continue
dst = list(dst.items())[0]
agents[src[0]] = get_peer_id(*src)
agents[dst[0]] = get_peer_id(*dst)
if (
src[1].get(ConfigFields.STATE) == PeerState.ABSENT
or dst[1].get(ConfigFields.STATE) == PeerState.ABSENT
):
absent.append((src, dst))
elif (
src[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
or dst[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
):
present.append((src, dst))
else:
error = f"Invalid state for agents {src[0]} or {dst[0]}"
if not silent:
click.secho(error, fg="red", err=True)
else:
raise ConfigureNetworkError(error)
resolve_agents(api, agents, silent=silent)
if any(id is None for id in agents.keys()):
return resolve_present_absent({}, [], [])
return resolve_present_absent(agents, present, absent)
def expand_agents_tags(api, dst_dict, silent=False):
"""Expand tag endpoints into individual endpoints.
Args:
api (PlatformApi): API object to communicate with the platform.
dst_dict (dict): Connections dictionary that contain tags as endpoints.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Raises:
ConfigureNetworkError: In case of any errors
Returns:
Union[dict, None]: Dictionary with expanded endpoints where key is the name and value is the config(id, state, type).
"""
items = {}
# First expand tags
for name, dst in dst_dict.items():
if dst.get(ConfigFields.PEER_TYPE) != PeerType.TAG:
continue
agents = utils.WithPagination(sdk.AgentsApi(api).platform_agent_index)(
filter=f"tags_names[]:{name}",
_preload_content=False,
)["data"]
if not agents:
error = f"Could not find endpoints by the tag {name}"
if not silent:
click.secho(error, err=True, fg="red")
return
else:
raise ConfigureNetworkError(error)
tag_state = dst.get(ConfigFields.STATE, PeerState.PRESENT)
for agent in agents:
agent_name = agent["agent_name"]
if agent_name not in items or (
tag_state == PeerState.ABSENT
and items[agent_name][ConfigFields.STATE] == PeerState.PRESENT
):
items[agent_name] = {
ConfigFields.ID: agent["agent_id"],
ConfigFields.STATE: tag_state,
ConfigFields.PEER_TYPE: PeerType.ENDPOINT,
ConfigFields.SERVICES: dst.get(ConfigFields.SERVICES),
}
# Then override with explicit configs
for name, dst in dst_dict.items():
if dst.get(ConfigFields.PEER_TYPE) != PeerType.TAG:
items[name] = dst
continue
return items
def resolve_p2m_connections(api, connections, silent=False):
"""Resolves configuration connections for Point to Multipoint topology. Also, expands tags.
Args:
api (PlatformApi): API object to communicate with the platform.
connections (dict): A dictionary containing connections as described in the config file.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Returns:
list: A list of two item lists describing endpoint to endpoint connections.
"""
present = []
absent = []
agents = {}
for src in connections.items():
dst_dict = src[1].get(ConfigFields.CONNECT_TO)
if dst_dict is None or len(dst_dict.keys()) == 0:
continue
dst_dict = expand_agents_tags(api, dst_dict)
if dst_dict is None:
return resolve_present_absent({}, [], [])
agents[src[0]] = get_peer_id(*src)
for dst in dst_dict.items():
agents[dst[0]] = get_peer_id(*dst)
if (
src[1].get(ConfigFields.STATE) == PeerState.ABSENT
or dst[1].get(ConfigFields.STATE) == PeerState.ABSENT
):
absent.append((src, dst))
elif (
src[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
or dst[1].get(ConfigFields.STATE, PeerState.PRESENT)
== PeerState.PRESENT
):
present.append((src, dst))
else:
error = f"Invalid state for agents {src[0]} or {dst[0]}"
if not silent:
click.secho(error, fg="red", err=True)
else:
raise ConfigureNetworkError(error)
resolve_agents(api, agents, silent=silent)
if any(id is None for id in agents.keys()):
return resolve_present_absent({}, [], [])
return resolve_present_absent(agents, present, absent)
def resolve_mesh_connections(api, connections, silent=False):
"""Resolves configuration connections for mesh topology. Also, expands tags.
Args:
api (PlatformApi): API object to communicate with the platform.
connections (dict): A dictionary containing connections.
silent (bool, optional): Indicates whether to suppress messages - used with Ansible. Defaults to False.
Returns:
list: A list of two item lists describing endpoint to endpoint connections.
"""
present = []
absent = []
connections = expand_agents_tags(api, connections)
if connections is None:
return resolve_present_absent({}, [], [])
agents = {
name: get_peer_id(name, connection) for name, connection in connections.items()
}
# NOTE: Assuming connections are bidirectional
for src, dst in combinations(connections.items(), 2):
if (
src[1].get(ConfigFields.STATE) == PeerState.ABSENT
or dst[1].get(ConfigFields.STATE) == PeerState.ABSENT
):
absent.append((src, dst))
elif (
src[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
or dst[1].get(ConfigFields.STATE, PeerState.PRESENT) == PeerState.PRESENT
):
present.append((src, dst))
else:
error = f"Invalid state for agents {src[0]} or {dst[0]}"
if not silent:
click.secho(error, fg="red", err=True)
else:
raise ConfigureNetworkError(error)
resolve_agents(api, agents, silent=silent)
if any(id is None for id in agents.keys()):
return resolve_present_absent({}, [], [])
return resolve_present_absent(agents, present, absent)
| 36.608696
| 125
| 0.589721
| 2,114
| 18,524
| 5.060549
| 0.113056
| 0.01075
| 0.022434
| 0.020938
| 0.567583
| 0.520938
| 0.495607
| 0.464105
| 0.431669
| 0.403533
| 0
| 0.004936
| 0.321961
| 18,524
| 505
| 126
| 36.681188
| 0.846815
| 0.201252
| 0
| 0.49162
| 0
| 0.002793
| 0.104254
| 0.026133
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036313
| false
| 0
| 0.022346
| 0.002793
| 0.167598
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e56c45295d74ab6452768ca7c9600d73e511225
| 10,298
|
py
|
Python
|
idact/detail/nodes/node_impl.py
|
garstka/idact
|
b9c8405c94db362c4a51d6bfdf418b14f06f0da1
|
[
"MIT"
] | 5
|
2018-12-06T15:40:34.000Z
|
2019-06-19T11:22:58.000Z
|
idact/detail/nodes/node_impl.py
|
garstka/idact
|
b9c8405c94db362c4a51d6bfdf418b14f06f0da1
|
[
"MIT"
] | 9
|
2018-12-06T16:35:26.000Z
|
2019-04-28T19:01:40.000Z
|
idact/detail/nodes/node_impl.py
|
garstka/idact
|
b9c8405c94db362c4a51d6bfdf418b14f06f0da1
|
[
"MIT"
] | 2
|
2019-04-28T19:18:58.000Z
|
2019-06-17T06:56:28.000Z
|
"""This module contains the implementation of the cluster node interface."""
import datetime
from typing import Optional, Any, Callable
import bitmath
import fabric.operations
import fabric.tasks
import fabric.decorators
from fabric.exceptions import CommandTimeout
from fabric.state import env
from idact.core.retry import Retry
from idact.core.config import ClusterConfig
from idact.core.jupyter_deployment import JupyterDeployment
from idact.core.node_resource_status import NodeResourceStatus
from idact.detail.auth.authenticate import authenticate
from idact.detail.helper.raise_on_remote_fail import raise_on_remote_fail
from idact.detail.helper.retry import retry_with_config
from idact.detail.helper.stage_info import stage_debug
from idact.detail.helper.utc_from_str import utc_from_str
from idact.detail.helper.utc_now import utc_now
from idact.detail.jupyter.deploy_jupyter import deploy_jupyter
from idact.detail.log.capture_fabric_output_to_log import \
capture_fabric_output_to_log
from idact.detail.log.get_logger import get_logger
from idact.detail.nodes.node_internal import NodeInternal
from idact.detail.nodes.node_resource_status_impl import NodeResourceStatusImpl
from idact.detail.serialization.serializable_types import SerializableTypes
from idact.detail.tunnel.build_tunnel import build_tunnel
from idact.detail.tunnel.get_bindings_with_single_gateway import \
get_bindings_with_single_gateway
from idact.detail.tunnel.ssh_tunnel import SshTunnel
from idact.detail.tunnel.tunnel_internal import TunnelInternal
from idact.detail.tunnel.validate_tunnel_ports import validate_tunnel_ports
ANY_TUNNEL_PORT = 0
class NodeImpl(NodeInternal):
"""Implementation of cluster node interface.
:param config: Client cluster config.
"""
def connect(self, timeout: Optional[int] = None):
result = self.run("echo 'Testing connection...'", timeout=timeout)
if result != 'Testing connection...':
raise RuntimeError("Unexpected test command output.")
def __init__(self,
config: ClusterConfig):
self._config = config
self._host = None # type: Optional[str]
self._port = None # type: Optional[int]
self._cores = None # type: Optional[int]
self._memory = None # type: Optional[bitmath.Byte]
self._allocated_until = None # type: Optional[datetime.datetime]
def _ensure_allocated(self):
"""Raises an exception if the node is not allocated."""
if self._host is None:
raise RuntimeError("Node is not allocated.")
if self._allocated_until and self._allocated_until < utc_now():
message = "'{node}' was terminated at '{timestamp}'."
raise RuntimeError(message.format(
node=self._host,
timestamp=self._allocated_until.isoformat()))
def run(self,
command: str,
timeout: Optional[int] = None) -> str:
return self.run_impl(command=command,
timeout=timeout,
install_keys=False)
def run_impl(self,
command: str,
timeout: Optional[int] = None,
install_keys: bool = False) -> str:
try:
@fabric.decorators.task
def task():
"""Runs the command with a timeout."""
with capture_fabric_output_to_log():
return fabric.operations.run(command,
pty=False,
timeout=timeout)
return self.run_task(task=task,
install_keys=install_keys)
except CommandTimeout as e:
raise TimeoutError("Command timed out: '{command}'".format(
command=command)) from e
except RuntimeError as e:
raise RuntimeError("Cannot run '{command}'".format(
command=command)) from e
def run_task(self,
task: Callable,
install_keys: bool = False) -> Any:
try:
self._ensure_allocated()
with raise_on_remote_fail(exception=RuntimeError):
with authenticate(host=self._host,
port=self._port,
config=self._config,
install_shared_keys=install_keys):
result = fabric.tasks.execute(task)
output = next(iter(result.values()))
return output
except RuntimeError as e:
raise RuntimeError("Cannot run task.") from e
def make_allocated(self,
host: str,
port: int,
cores: Optional[int],
memory: Optional[bitmath.Byte],
allocated_until: Optional[datetime.datetime]):
"""Updates the allocation info.
:param host: Hostname of the cluster node.
:param port: SSH port of the cluster node.
:param cores: Allocated core count.
:param memory: Allocated memory.
:param allocated_until: Timestamp for job termination. Must be UTC
or contain timezone info.
None is treated as unlimited allocation.
"""
self._host = host
self._port = port
self._cores = cores
self._memory = memory
self._allocated_until = allocated_until
def make_cancelled(self):
"""Updates the allocation info after the allocation was cancelled."""
self._host = None
self._port = None
self._cores = None
self._memory = None
self._allocated_until = None
def __str__(self):
if not self._host:
return "Node(NotAllocated)"
return "Node({host}:{port}, {allocated_until})".format(
host=self._host,
port=self._port,
allocated_until=self._allocated_until)
def __repr__(self):
return str(self)
def tunnel(self,
there: int,
here: Optional[int] = None) -> TunnelInternal:
try:
log = get_logger(__name__)
with stage_debug(log, "Opening tunnel %s -> %d to %s",
here, there, self):
self._ensure_allocated()
here, there = validate_tunnel_ports(here=here,
there=there)
first_try = [True]
def get_bindings_and_build_tunnel() -> TunnelInternal:
bindings = get_bindings_with_single_gateway(
here=here if first_try[0] else ANY_TUNNEL_PORT,
node_host=self._host,
node_port=self._port,
there=there)
first_try[0] = False
return build_tunnel(config=self._config,
bindings=bindings,
ssh_password=env.password,
ssh_pkey=env.key_filename)
with authenticate(host=self._host,
port=self._port,
config=self._config):
if here == ANY_TUNNEL_PORT:
return get_bindings_and_build_tunnel()
return retry_with_config(
get_bindings_and_build_tunnel,
name=Retry.TUNNEL_TRY_AGAIN_WITH_ANY_PORT,
config=self._config)
except RuntimeError as e:
raise RuntimeError(
"Unable to tunnel {there} on node '{host}'.".format(
there=there,
host=self._host)) from e
def tunnel_ssh(self,
here: Optional[int] = None) -> TunnelInternal:
return SshTunnel(tunnel=self.tunnel(here=self.port, there=self.port))
def deploy_notebook(self, local_port: int = 8080) -> JupyterDeployment:
return deploy_jupyter(node=self,
local_port=local_port)
@property
def config(self) -> ClusterConfig:
return self._config
@property
def host(self) -> Optional[str]:
return self._host
@property
def port(self) -> Optional[int]:
return self._port
@property
def cores(self) -> Optional[int]:
return self._cores
@property
def memory(self) -> Optional[bitmath.Byte]:
return self._memory
@property
def resources(self) -> NodeResourceStatus:
return NodeResourceStatusImpl(node=self)
def serialize(self) -> dict:
return {'type': str(SerializableTypes.NODE_IMPL),
'host': self._host,
'port': self._port,
'cores': self._cores,
'memory': (None if self._memory is None
else str(self._memory)),
'allocated_until': (None if self._allocated_until is None
else self._allocated_until.isoformat())}
@staticmethod
def deserialize(config: ClusterConfig, serialized: dict) -> 'NodeImpl':
try:
assert serialized['type'] == str(SerializableTypes.NODE_IMPL)
node = NodeImpl(config=config)
node.make_allocated(
host=serialized['host'],
port=serialized['port'],
cores=serialized['cores'],
memory=(None if serialized['memory'] is None
else bitmath.parse_string(serialized['memory'])),
allocated_until=(
None if serialized['allocated_until'] is None
else utc_from_str(serialized['allocated_until'])))
return node
except KeyError as e:
raise RuntimeError("Unable to deserialize.") from e
@property
def allocated_until(self) -> Optional[datetime.datetime]:
return self._allocated_until
def __eq__(self, other):
return self.__dict__ == other.__dict__
| 37.721612
| 79
| 0.583026
| 1,071
| 10,298
| 5.390289
| 0.180205
| 0.032739
| 0.044171
| 0.018188
| 0.209077
| 0.088689
| 0.048155
| 0.035683
| 0.019401
| 0.019401
| 0
| 0.001026
| 0.337444
| 10,298
| 272
| 80
| 37.860294
| 0.845083
| 0.077005
| 0
| 0.185714
| 0
| 0
| 0.049669
| 0
| 0
| 0
| 0
| 0
| 0.004762
| 1
| 0.119048
| false
| 0.004762
| 0.138095
| 0.061905
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e5770f83af2ce49e0548c12ebb2126470694c34
| 2,012
|
py
|
Python
|
geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py
|
arnaud-morvan/geoportailv3
|
b9d676cf78e45e12894f7d1ceea99b915562d64f
|
[
"MIT"
] | 17
|
2015-01-14T08:40:22.000Z
|
2021-05-08T04:39:50.000Z
|
geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py
|
arnaud-morvan/geoportailv3
|
b9d676cf78e45e12894f7d1ceea99b915562d64f
|
[
"MIT"
] | 1,477
|
2015-01-05T09:58:41.000Z
|
2022-03-18T11:07:09.000Z
|
geoportal/LUX_alembic/versions/17fb1559a5cd_create_table_for_hierarchy_of_accounts.py
|
arnaud-morvan/geoportailv3
|
b9d676cf78e45e12894f7d1ceea99b915562d64f
|
[
"MIT"
] | 14
|
2015-07-24T07:33:13.000Z
|
2021-03-02T13:51:48.000Z
|
"""create table for hierarchy of accounts
Revision ID: 17fb1559a5cd
Revises: 3b7de32aebed
Create Date: 2015-09-16 14:20:30.972593
"""
# revision identifiers, used by Alembic.
revision = '17fb1559a5cd'
down_revision = '3b7de32aebed'
branch_labels = None
depends_on = None
from alembic import op, context
import sqlalchemy as sa
def downgrade():
schema = context.get_context().config.get_main_option('schema')
op.drop_table('lux_user_inheritance', schema=schema)
op.execute("DROP FUNCTION IF EXISTS "
"%(schema)s.getMainAccount(VARCHAR)"
% {"schema": schema})
def upgrade():
schema = context.get_context().config.get_main_option('schema')
op.create_table(
'lux_user_inheritance',
sa.Column(
'login', sa.VARCHAR(), autoincrement=False,
nullable=False),
sa.Column(
'login_father', sa.VARCHAR(), autoincrement=False,
nullable=False),
schema=schema
)
op.create_primary_key(
"lux_user_inheritance_pkey", "lux_user_inheritance",
['login', 'login_father'],
schema=schema
)
op.execute(
"CREATE OR REPLACE FUNCTION %(schema)s.getMainAccount "
"(child_login VARCHAR)"
"RETURNS VARCHAR AS "
"$$ "
"DECLARE "
"cur_login_father VARCHAR;"
"res_login_father VARCHAR;"
"c_father Cursor (p_login VARCHAR) FOR "
"Select login_father From %(schema)s.lux_user_inheritance Where "
"login = p_login;"
"BEGIN "
"cur_login_father := child_login;"
"LOOP "
"OPEN c_father(cur_login_father);"
"FETCH FIRST FROM c_father into res_login_father;"
"IF FOUND THEN "
"cur_login_father := res_login_father;"
"END IF;"
"CLOSE c_father;"
"IF NOT FOUND THEN "
"RETURN cur_login_father;"
"END IF;"
"END LOOP;"
"END;"
"$$"
"LANGUAGE plpgsql;" % {"schema": schema})
| 27.944444
| 73
| 0.611332
| 227
| 2,012
| 5.193833
| 0.396476
| 0.102629
| 0.076336
| 0.039016
| 0.152672
| 0.152672
| 0.084818
| 0.084818
| 0.084818
| 0.084818
| 0
| 0.028787
| 0.274851
| 2,012
| 71
| 74
| 28.338028
| 0.779301
| 0.083002
| 0
| 0.214286
| 0
| 0
| 0.421339
| 0.0773
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e5a5481a3630f1bb09ba60f327038cb691a80cf
| 2,422
|
py
|
Python
|
src/challenges/CtCI/dynamic/P1_triple_step.py
|
Ursidours/pythonic_interviews
|
a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045
|
[
"MIT"
] | 2
|
2021-11-13T01:30:25.000Z
|
2022-02-11T18:17:22.000Z
|
src/challenges/CtCI/dynamic/P1_triple_step.py
|
arnaudblois/pythonic_interviews
|
a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045
|
[
"MIT"
] | null | null | null |
src/challenges/CtCI/dynamic/P1_triple_step.py
|
arnaudblois/pythonic_interviews
|
a88e10b82ed2a163dfcc0bfd1d01a9e9e606c045
|
[
"MIT"
] | null | null | null |
"""
Problem 1 of Chapter 8 in CtCi
Triple Step: A child is running up a staircase with N steps and can hop either
1 step, 2 steps, or 3 steps at a time. Return the number of possible ways exist
this can be done.
General idea of the solution: At any step N, the child must necessarily come
from the steps N-3, N-2 or N-1. The possible ways to go to N are therefore the
sums of the possible ways to come to N-3, N-2 and N-1. This is the definition
of the tribonacci numbers, a generalization of the Fibonacci sequence.
"""
from src.utils.decorators import Memoize
def tribonacci_number(N):
"""
Closed-form formula to calculate the Nth Tribonacci number. Of course, no
one would expect this in an interview :)
"""
a1 = (19 + 3 * 33**0.5)**(1 / 3)
a2 = (19 - 3 * 33**0.5)**(1 / 3)
b = (586 + 102 * 33**0.5)**(1 / 3)
numerator = 3 * b * (1 / 3 * (a1 + a2 + 1))**(N + 1)
denominator = b**2 - 2 * b + 4
result = round(numerator / denominator)
return result
def triple_step_iterative(nb_of_steps):
"""
The most naive implementation, using 3 variables corresponding
to the 3 previous states, we calculate the next and update them
continuously until we've looped up to nb_of_steps.
"""
a, b, c = 0, 0, 1
for step in range(nb_of_steps):
temp_var = a + b + c
a = b
b = c
c = temp_var
return c
def triple_step_bottom_up(nb_of_steps):
"""
As with all bottom-up approaches, we initiate a list which we
update as we calculate the next step.
"""
nb_possible_ways = [1, 1, 2] + [None for _ in range(3, nb_of_steps + 1)]
for step in range(3, nb_of_steps + 1):
nb_possible_ways[step] = (
nb_possible_ways[step - 1]
+ nb_possible_ways[step - 2]
+ nb_possible_ways[step - 3]
)
return nb_possible_ways[nb_of_steps]
@Memoize
def triple_step_top_down(nb_of_steps):
"""
In the top-down approach, the problem is broken down into easier
problems: solving for N corresponds to solving for N-1, N-2 and
N-3 and adding them. The use of memoization avoids recomputation.
"""
if nb_of_steps == 0:
return 1
if nb_of_steps in [1, 2]:
return nb_of_steps
return (
triple_step_top_down(nb_of_steps - 1)
+ triple_step_top_down(nb_of_steps - 2)
+ triple_step_top_down(nb_of_steps - 3)
)
| 31.454545
| 79
| 0.641618
| 411
| 2,422
| 3.632603
| 0.323601
| 0.037508
| 0.084394
| 0.048225
| 0.149364
| 0.105827
| 0.105827
| 0
| 0
| 0
| 0
| 0.043993
| 0.26796
| 2,422
| 76
| 80
| 31.868421
| 0.798082
| 0.454583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.027027
| 0
| 0.297297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e5d48ba91cb1100ebbf354d7f7d6405aa099be0
| 20,335
|
py
|
Python
|
bots/invaders/agent.py
|
alv67/Lux-AI-challenge
|
4fdd623a8ff578f769a6925ec0200170f84d4737
|
[
"MIT"
] | null | null | null |
bots/invaders/agent.py
|
alv67/Lux-AI-challenge
|
4fdd623a8ff578f769a6925ec0200170f84d4737
|
[
"MIT"
] | 27
|
2021-10-17T22:46:41.000Z
|
2021-12-05T23:41:19.000Z
|
bots/invaders/agent.py
|
alv67/Lux-AI-challenge
|
4fdd623a8ff578f769a6925ec0200170f84d4737
|
[
"MIT"
] | 3
|
2021-11-14T19:22:16.000Z
|
2021-12-04T06:46:33.000Z
|
import os
import math
import sys
from typing import List, Tuple
# for kaggle-environments
from abn.game_ext import GameExtended
from abn.jobs import Task, Job, JobBoard
from abn.actions import Actions
from lux.game_map import Position, Cell, RESOURCE_TYPES
from lux.game_objects import City
from lux.game_constants import GAME_CONSTANTS
from lux import annotate
## DEBUG ENABLE
DEBUG_SHOW_TIME = False
DEBUG_SHOW_CITY_JOBS = False
DEBUG_SHOW_CITY_FULLED = False
DEBUG_SHOW_EXPAND_MAP = True
DEBUG_SHOW_EXPAND_LIST = False
DEBUG_SHOW_INPROGRESS = True
DEBUG_SHOW_TODO = True
DEBUG_SHOW_ENERGY_MAP = False
DEBUG_SHOW_ENEMY_CITIES = False
DEBUG_SHOW_INVASION_MAP = False
DEBUG_SHOW_EXPLORE_MAP = False
MAX_CITY_SIZE = 10
DISTANCE_BETWEEN_CITIES = 5
def find_closest_city_tile(pos, player):
closest_city_tile = None
if len(player.cities) > 0:
closest_dist = math.inf
# the cities are stored as a dictionary mapping city id to the city object, which has a citytiles field that
# contains the information of all citytiles in that city
for k, city in player.cities.items():
for city_tile in city.citytiles:
dist = city_tile.pos.distance_to(pos)
if dist < closest_dist:
closest_dist = dist
closest_city_tile = city_tile
return closest_city_tile
def can_build_worker(player) -> int:
# get nr of cytititles
nr_cts = 0
for k, c in player.cities.items():
nr_cts += len(c.citytiles)
return max(0, nr_cts - len(player.units))
def city_can_expand(city: City, jobs: JobBoard) -> bool:
# City can expand if has fuel to pass the night
has_energy = city.isFulled()
# City can expand to MAX_CITY_SIZE tiles
can_expand = len(city.citytiles) + jobs.count(Task.BUILD, city_id=city.cityid) < MAX_CITY_SIZE
return has_energy & can_expand
# Define global variables
game_state = GameExtended()
actions = Actions(game_state)
lets_build_city = False
build_pos = None
jobs = game_state.job_board
completed_cities = []
def agent(observation, configuration, DEBUG=False):
global game_state
global actions
global lets_build_city
global build_pos
global completed_cities
### Do not edit ###
game_state._update(observation)
actions.update()
path: List[Tuple] = []
### AI Code goes down here! ###
player = game_state.player
opponent = game_state.opponent
# width, height = game_state.map.width, game_state.map.height
if DEBUG_SHOW_TIME:
actions.append(annotate.sidetext(f"Time : {game_state.time}"))
actions.append(annotate.sidetext(f" {game_state.lux_time}h till night"))
if game_state.isMorning() : dbg = "Morning"
elif game_state.isEvening() : dbg = "Evening"
elif game_state.isNight() : dbg = "Night"
else: dbg = "Daytime"
actions.append(annotate.sidetext(f"it is {dbg}"))
#---------------------------------------------------------------------------------------------------------
# Cities Management
#---------------------------------------------------------------------------------------------------------
for _, city in player.cities.items():
city_size = len(city.citytiles)
#--- EXPAND THE CITY ---
if DEBUG_SHOW_EXPAND_LIST:
exp_pos = game_state.expand_map.get(city.cityid)
actions.append(annotate.sidetext(f"{city.cityid} expand in "))
for x, y, v in exp_pos:
actions.append(annotate.sidetext(f" ({x}; {y}) {v}"))
if city_can_expand(city, jobs) and city.isFulled():
exp_pos = game_state.expand_map.get(city.cityid)
if exp_pos:
x, y, v = exp_pos[0]
#if v: # expand only if there is a resource nearby
jobs.addJob(Task.BUILD, Position(x, y), city_id=city.cityid)
#else:
# jobs.addJob(Task.INVASION, None, city_id=city.cityid)
#--- SPAWN WORKERS OR RESEARCH ---
for ct in city.citytiles:
pxy = ct.pos
if DEBUG_SHOW_CITY_FULLED:
actions.append(annotate.text(pxy.x, pxy.y, f"{city.isFulled()}"))
if ct.can_act():
if can_build_worker(player) - actions.new_workers > 0:
actions.build_worker(ct)
# actions.append(ct.build_worker())
elif not player.researched_uranium():
actions.append(ct.research())
if not city.isFulled(): # and not game_state.isNight():
if jobs.count(Task.ENERGIZE, city_id=city.cityid) < (city_size + 1) // 2:
dbg = jobs.count(Task.ENERGIZE, city_id=city.cityid)
dbg2 = (city_size + 1) // 2
if DEBUG_SHOW_CITY_JOBS:
actions.append(annotate.sidetext(f"{city.cityid}: NRG {dbg} < {dbg2}"))
jobs.addJob(Task.ENERGIZE, ct.pos, city_id = city.cityid)
# Debug jobs.count
if DEBUG_SHOW_CITY_JOBS:
dbg = jobs.count(Task.BUILD, city_id=city.cityid)
actions.append(annotate.sidetext(f"{city.cityid}: {dbg} BLD"))
dbg = jobs.count(Task.ENERGIZE, city_id=city.cityid)
actions.append(annotate.sidetext(f"{city.cityid}: {dbg} NRG"))
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
# Units Management
#---------------------------------------------------------------------------------------------------------
if DEBUG_SHOW_INPROGRESS:
actions.append(annotate.sidetext(f"[INPROGRESS]"))
sorted_units = sorted(player.units, key=lambda u: u.cooldown, reverse=True)
for unit in sorted_units:
# if the unit is a worker (can mine resources) and can perform an action this turn
if unit.is_worker():
my_job = jobs.jobRequest(unit)
if not unit.can_act():
actions.stay(unit)
if DEBUG_SHOW_INPROGRESS:
actions.append(annotate.sidetext(f"!{my_job}"))
continue
else:
if DEBUG_SHOW_INPROGRESS:
actions.append(annotate.sidetext(f">{my_job}"))
# Check if is evening time, if so, to survive, every
# job with risk of not having enough energy is dropped
# and a new HARVEST job is taken.
# if game_state.isNight():
# if (my_job.task == Task.BUILD and my_job.subtask > 0) or \
# (my_job.task == Task.EXPLORE and my_job.subtask > 0):
# actions.stay(unit)
# jobs.jobDrop(unit.id)
# continue
if my_job.task == Task.HARVEST:
# if not in a city and in a cell with energy available stay here to harvest
if game_state.getEnergy(unit.pos.x, unit.pos.y) != 0 and \
not game_state.map.get_cell_by_pos(unit.pos).citytile:
actions.stay(unit) # stay in the same position
else: # find a new resource position
if unit.pos == my_job.pos:
tile = game_state.find_closest_resources(unit.pos)
if not tile: # no more resources to harvest
actions.stay(unit) # stay in the same position
jobs.jobDrop(unit.id)
else: # move to resource
my_job.pos = tile.pos
if unit.pos != my_job.pos:
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction): # cannot move to a resource tile
jobs.jobReject(unit.id)
if unit.get_cargo_space_left() == 0:
actions.stay(unit)
jobs.jobDone(unit.id)
elif my_job.task == Task.ENERGIZE:
if my_job.subtask == 0: # search for resource
if game_state.getEnergy(my_job.pos.x, my_job.pos.y) != 0:
# citytile is adiacent to a resource so go directly there
my_job.subtask = 1
# If unit is in the citytile and can grab energy then job is done (unit stay there)
elif unit.energy >= 10 * unit.light_upkeep:
# citytile is adiacent to a resource so go directly there
my_job.subtask = 1
elif unit.get_cargo_space_left() == 0:
my_job.subtask = 1
elif (game_state.map.get_cell_by_pos(unit.pos).citytile or
game_state.getEnergy(unit.pos.x, unit.pos.y) == 0 ):
tile = game_state.find_closest_resources(unit.pos)
if not tile:
actions.stay(unit) # stay in the same position
jobs.jobReject(unit.id)
else:
move = unit.pos.path_to(tile.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction): # cannot move to a resource tile
jobs.jobReject(unit.id)
if my_job.subtask == 1: # go to citytile
if unit.pos == my_job.pos:
actions.stay(unit) # stay in the same position
jobs.jobDone(unit.id)
else:
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction):
jobs.jobReject(unit.id)
elif my_job.task == Task.BUILD:
if my_job.subtask == 0: # First need to full up unit
if unit.get_cargo_space_left() == 0:
my_job.subtask = 1
elif (game_state.map.get_cell_by_pos(unit.pos).citytile or
game_state.getEnergy(unit.pos.x, unit.pos.y) == 0 ):
tile = game_state.find_closest_resources(unit.pos)
if not tile: # no reacheable resource
actions.stay(unit) # stay in the same position
jobs.jobDrop(unit.id)
else:
move = unit.pos.path_to(tile.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction):
jobs.jobDrop(unit.id)
if my_job.subtask == 1: # Go to Build position
if unit.pos == my_job.pos:
if unit.get_cargo_space_left() > 0:
actions.stay(unit) # stay in the same position
jobs.jobDrop(unit.id)
else:
actions.build_city(unit)
my_job.subtask = 2
else:
move = unit.pos.path_to(my_job.pos, game_state.map, noCities=True)
if move.path:
if not actions.move(unit, move.direction):
jobs.jobDrop(unit.id)
# actions.append(unit.move(move_dir))
# Draw the path
actions.append(annotate.x(my_job.pos.x, my_job.pos.y))
for i in range(len(move.path)-1):
actions.append(annotate.line(
move.path[i][1], move.path[i][2],
move.path[i+1][1], move.path[i+1][2]))
else: # not path found
jobs.jobDone(unit.id)
elif my_job.subtask == 2:
# if city has adiacent energy then Unit Stay until new day
if game_state.getEnergy(unit.pos.x, unit.pos.y) > 0:
if game_state.time >= 39:
jobs.jobDone(unit.id)
else:
jobs.jobDone(unit.id)
elif my_job.task == Task.SLEEP:
if unit.pos == my_job.pos:
if game_state.time >= 39:
jobs.jobDone(unit.id)
else:
move_dir = unit.pos.direction_to(my_job.pos)
if not actions.move(unit, move_dir):
jobs.jobReject(unit.id)
elif my_job.task == Task.EXPLORE:
# this is a multistate task so my_job.subtask is the state
if my_job.subtask == 0: # find the position of resource (min 4 step from city)
# get position of city that emitted the job
if my_job.city_id in player.cities:
pos = player.cities[my_job.city_id].citytiles[0].pos
else:
pos = my_job.pos
explore_pos = game_state.getClosestExploreTarget(pos, min_distance=DISTANCE_BETWEEN_CITIES)
if explore_pos:
my_job.subtask = 1 # HARVEST resource from position
my_job.pos = explore_pos
else:
jobs.jobDone(unit.id)
if my_job.subtask == 1: # HARVEST resource from position
if unit.pos == my_job.pos:
if unit.get_cargo_space_left() > 0:
if not game_state.map.get_cell_by_pos(unit.pos).has_resource:
#jobs.jobReject(unit.id)
jobs.jobDrop(unit.id)
else: # next subtask
my_job.pos = game_state.find_closest_freespace(unit.pos)
my_job.subtask = 2 # BUILD A NEW CITY
else:
# move_dir = unit.pos.direction_to(my_job.pos)
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction):
# jobs.jobReject(unit.id)
jobs.jobDrop(unit.id)
if my_job.subtask == 2: # BUILD A NEW CITY
if unit.pos == my_job.pos:
# TODO: need to wait until next day
actions.build_city(unit)
my_job.subtask = 3 # WAIT UNTIL NEXT DAY
else:
#move_dir = unit.pos.direction_to(my_job.pos)
move = unit.pos.path_to(my_job.pos, game_state.map, noCities=True, playerid=game_state.id)
if not actions.move(unit, move.direction):
action = unit.build_city()
# jobs.jobReject(unit.id)
jobs.jobDrop(unit.id)
if my_job.subtask == 3: # Now feed that city
my_job.task = Task.ENERGIZE
my_job.subtask = 0
actions.stay(unit)
elif my_job.task == Task.INVASION:
if my_job.subtask == 0:
# get an invasion target position
target_pos = game_state.getClosestInvasionTarget(unit.pos)
if not target_pos:
actions.stay(unit)
jobs.jobDone(unit.id)
continue
my_job.data["target"] = target_pos
if unit.get_cargo_space_left() == 0: # if unit is full
my_job.pos = target_pos
my_job.subtask = 2
else:
# find a resource in the halfway to the target
res_cell = game_state.find_closest_resources(unit.pos.halfway(target_pos))
if res_cell:
my_job.subtask = 1 # HARVEST resource from position
my_job.pos = res_cell.pos
else:
actions.stay(unit)
jobs.jobDone(unit.id)
continue
if my_job.subtask == 1: # HARVEST resource from position
if unit.pos == my_job.pos:
if unit.get_cargo_space_left() == 0:
my_job.pos = my_job.data["target"]
my_job.subtask = 2 # BUILD A NEW CITY
elif not game_state.getEnergy(unit.pos.x, unit.pos.y) > 0:
res_cell = game_state.find_closest_resources(unit.pos)
if res_cell:
my_job.pos = res_cell.pos
else:
actions.stay(unit)
jobs.jobDone(unit.id)
continue
else: # next subtask
actions.stay(unit) # stay untill cargo is fulled
else:
# move_dir = unit.pos.direction_to(my_job.pos)
move = unit.pos.path_to(my_job.pos, game_state.map, playerid=game_state.id)
if not actions.move(unit, move.direction): # no way to move
jobs.jobDrop(unit.id)
if my_job.subtask == 2: # BUILD A NEW CITY
if unit.pos == my_job.pos:
actions.build_city(unit)
jobs.jobDone(unit.id)
else:
move = unit.pos.path_to(my_job.pos, game_state.map, noCities=True, playerid=game_state.id)
if not actions.move(unit, move.direction):
if unit.get_cargo_space_left() == 0 and not game_state.map.get_cell_by_pos(unit.pos).has_resource:
actions.build_city(unit)
jobs.jobDone(unit.id)
else:
actions.stay(unit)
## Debug Text
if DEBUG_SHOW_TODO:
actions.append(annotate.sidetext(f"[TODO] {len(jobs.todo)}"))
for task in jobs.todo:
actions.append(annotate.sidetext(task))
#--------------------------------------------------------------------------------------------------------
# Debug "show expand map"
if DEBUG_SHOW_EXPAND_MAP:
for x, y, e in [ p for a in game_state.expand_map.values() for p in a]:
actions.append(annotate.circle(x, y))
actions.append(annotate.text(x, y, e))
## Debug "show energy map"
if DEBUG_SHOW_ENERGY_MAP:
for (x, y),v in game_state.energy_map.items():
actions.append(annotate.text(x, y, v))
## Debug "show enemy map"
if DEBUG_SHOW_ENEMY_CITIES:
for x, y in game_state.enemy_map:
actions.append(annotate.circle(x, y))
## Debug "show invasion map"
if DEBUG_SHOW_INVASION_MAP:
for x, y in game_state.invasion_map:
actions.append(annotate.x(x, y))
## Debug "show explore map"
if DEBUG_SHOW_EXPLORE_MAP:
for x, y in game_state.explore_map:
actions.append(annotate.x(x, y))
# actions.append(annotate.sidetext(f"[INPROGRESS] {len(jobs.inprogress)}"))
# for task in jobs.inprogress:
# actions.append(annotate.sidetext(jobs.inprogress[task]))
# actions.append(annotate.sidetext("-[CEMETERY]-"))
# for uid in jobs.rip:
# actions.append(annotate.sidetext(uid))
return actions.actions
| 47.847059
| 126
| 0.503369
| 2,352
| 20,335
| 4.18665
| 0.113946
| 0.038083
| 0.025998
| 0.050066
| 0.530517
| 0.474967
| 0.442571
| 0.405403
| 0.375851
| 0.310145
| 0
| 0.0055
| 0.383034
| 20,335
| 424
| 127
| 47.959906
| 0.779372
| 0.190214
| 0
| 0.474843
| 0
| 0
| 0.018227
| 0.001346
| 0
| 0
| 0
| 0.002358
| 0
| 1
| 0.012579
| false
| 0
| 0.034591
| 0
| 0.059748
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e5de644fd911fb842013165cff69e62361a9159
| 12,503
|
py
|
Python
|
PySRCG/src/Tabs/cyberware_tab.py
|
apampuch/PySRCG
|
bb3777aed3517b473e5860336c015e2e8d0905e9
|
[
"MIT"
] | null | null | null |
PySRCG/src/Tabs/cyberware_tab.py
|
apampuch/PySRCG
|
bb3777aed3517b473e5860336c015e2e8d0905e9
|
[
"MIT"
] | null | null | null |
PySRCG/src/Tabs/cyberware_tab.py
|
apampuch/PySRCG
|
bb3777aed3517b473e5860336c015e2e8d0905e9
|
[
"MIT"
] | null | null | null |
from copy import copy
from tkinter import *
from tkinter import ttk
from src import app_data
from src.CharData.augment import Cyberware
from src.Tabs.notebook_tab import NotebookTab
from src.statblock_modifier import StatMod
from src.utils import treeview_get, recursive_treeview_fill, calculate_attributes, get_variables
# list of attributes that we need to look for variables in, eg "Cost: rating * 500"
ATTRIBUTES_TO_CALCULATE = ["essence", "cost", "availability_rating", "availability_time", "mods"]
STRINGS_TO_IGNORE = [] # nyi
class CyberwareTab(NotebookTab):
@property
def library_selected(self):
return treeview_get(self.cyberware_library, self.tree_library_dict)
@property
def list_selected_index(self) -> int:
"""index of the index of the selected item"""
selection = self.cyberware_list.curselection()
if len(selection) is 0:
return None
return selection[-1]
def __init__(self, parent):
super().__init__(parent)
# used to validate input
self.vcmd = (self.register(self.int_validate), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
self.tree_library_dict = {} # maps library terminal children iids to (skill name, skill attribute) tuple
# cyberware library
self.cyberware_library = ttk.Treeview(self, height=20, show="tree")
self.cyberware_library_scroll = ttk.Scrollbar(self, orient=VERTICAL, command=self.cyberware_library.yview)
# cyberware list
self.cyberware_list = Listbox(self, width=40)
self.cyberware_list_scroll = ttk.Scrollbar(self, orient=VERTICAL, command=self.cyberware_list.yview)
# description box
self.desc_box = Text(self, width=40, state=DISABLED, bg='#d1d1d1')
self.desc_box_scroll = ttk.Scrollbar(self, orient=VERTICAL, command=self.desc_box.yview)
# radio boxes
self.grade_var = StringVar()
self.grade_frame = LabelFrame(self, text="Grade")
self.standard_radio = Radiobutton(self.grade_frame,
text="Standard",
variable=self.grade_var,
value="standard")
self.alpha_radio = Radiobutton(self.grade_frame,
text="Alphaware",
variable=self.grade_var,
value="alpha")
self.beta_radio = Radiobutton(self.grade_frame,
text="Betaware",
variable=self.grade_var,
value="beta")
self.delta_radio = Radiobutton(self.grade_frame,
text="Deltaware",
variable=self.grade_var,
value="delta")
# buttons
self.buy_sell_frame = Frame(self)
self.buy_button = Button(self.buy_sell_frame, text="Buy", command=self.on_buy_click)
self.sell_button = Button(self.buy_sell_frame, text="Sell", command=self.on_sell_click)
# variable objects frame and list
self.variables_frame = Frame(self)
self.variables_dict = {}
# bind events
self.cyberware_library["yscrollcommand"] = self.cyberware_library_scroll.set
self.cyberware_library.bind("<<TreeviewSelect>>", self.on_tree_item_click)
self.cyberware_list["yscrollcommand"] = self.cyberware_list_scroll.set
self.cyberware_list.bind("<<ListboxSelect>>", self.on_inv_item_click)
self.desc_box["yscrollcommand"] = self.desc_box_scroll.set
# grids
self.cyberware_library.grid(column=0, row=0, sticky=(N, S))
self.cyberware_library_scroll.grid(column=1, row=0, sticky=(N, S))
self.desc_box.grid(column=3, row=0, sticky=(N, S))
self.desc_box_scroll.grid(column=4, row=0, sticky=(N, S))
self.cyberware_list.grid(column=5, row=0, sticky=(N, S))
self.cyberware_list_scroll.grid(column=6, row=0, sticky=(N, S))
self.buy_sell_frame.grid(column=5, row=1, sticky=E)
self.buy_button.grid(column=0, row=0, sticky=W)
self.sell_button.grid(column=1, row=0, sticky=W)
self.grade_frame.grid(column=0, row=1, sticky=W, columnspan=4)
self.standard_radio.grid(column=0, row=0)
self.alpha_radio.grid(column=1, row=0)
self.beta_radio.grid(column=2, row=0)
self.delta_radio.grid(column=3, row=0)
self.standard_radio.select()
self.standard_radio.invoke()
self.variables_frame.grid(column=0, row=3)
def augment_tab_recurse_check(val):
return "essence" not in val.keys()
def augment_tab_recurse_end_callback(key, val, iid):
# key is a string
# val is a dict from a json
try:
self.tree_library_dict[iid] = Cyberware(name=key, **val)
except TypeError as e:
print("Error with cyberware {}:".format(key))
print(e)
print()
recursive_treeview_fill(self.parent.game_data["Augments"]["Cyberware"], "", self.cyberware_library,
augment_tab_recurse_check, augment_tab_recurse_end_callback)
def on_buy_click(self):
# TODO make this set rating value
if self.library_selected is not None:
current_essence = self.statblock.essence
# make copies of info we need to copy from the dict
cyber = copy(self.library_selected)
cyber.grade = str(self.grade_var.get())
# make a new dict from the variables dict that we can pass into parse_arithmetic()
# because parse_arithmetic() can't take IntVars
var_dict = {}
for key in self.variables_dict.keys():
var_dict[key] = self.variables_dict[key].get()
# calculate any arithmetic expressions we have
calculate_attributes(cyber, var_dict, ATTRIBUTES_TO_CALCULATE)
cyber.essence = self.calc_essence_cost(cyber, cyber.grade)
cyber.cost = int(self.calc_yen_cost(cyber, cyber.grade))
# if we have enough essence
if cyber.essence < current_essence:
# if we have enough money
if app_data.pay_cash(cyber.cost):
self.add_cyberware_item(cyber)
self.calculate_total()
else:
print("Not enough essence left!")
else:
print("Can't get that!")
def on_sell_click(self):
# don't do anything if nothing is selected
if len(self.cyberware_list.curselection()) is 0:
return
# return cash value
self.statblock.cash += self.statblock.cyberware[self.list_selected_index].cost
self.remove_cyberware_item(self.list_selected_index)
self.calculate_total()
def add_cyberware_item(self, cyber):
"""
:type cyber: Cyberware
"""
for key in cyber.mods.keys():
value = cyber.mods[key]
StatMod.add_mod(key, value)
self.statblock.cyberware.append(cyber)
self.cyberware_list.insert(END, cyber.name)
def remove_cyberware_item(self, index):
cyber = self.statblock.cyberware[index]
for key in cyber.mods.keys():
value = cyber.mods[key]
StatMod.remove_mod(key, value)
del self.statblock.cyberware[index]
self.cyberware_list.delete(index)
def calc_essence_cost(self, cyber, grade):
essence = cyber.essence
if grade == "standard":
pass
elif grade == "alpha":
essence *= 0.8
elif grade == "beta":
essence *= 0.6
elif grade == "delta":
essence *= 0.5
else:
raise ValueError("Invalid grade {}.".format(grade))
if cyber.fits is None:
return essence
fit_dict = self.statblock.make_fit_dict()
if cyber.fits in fit_dict.keys():
hold_amount = fit_dict[cyber.fits][0]
fit_amount = fit_dict[cyber.fits][1]
# subtract fit amount from held amount to get
subtotal = max(hold_amount - fit_amount, 0)
total = max(essence - subtotal, 0)
return total
else:
return essence
def calc_yen_cost(self, cyber, grade):
cost = cyber.cost
if grade == "standard":
pass
elif grade == "alpha":
cost *= 2
elif grade == "beta":
cost *= 4
elif grade == "delta":
cost *= 8
else:
raise ValueError("Invalid grade {}.".format(grade))
return cost
def fill_description_box(self, contents):
"""Clears the item description box and fills it with contents."""
# temporarily unlock box, clear it, set the text, then re-lock it
self.desc_box.config(state=NORMAL)
self.desc_box.delete(1.0, END)
self.desc_box.insert(END, contents)
self.desc_box.config(state=DISABLED)
def on_tree_item_click(self, event):
# only select the last one selected if we've selected multiple things
selected = self.cyberware_library.selection()[-1]
if selected in self.tree_library_dict.keys():
selected_cyberware = self.tree_library_dict[selected]
self.fill_description_box(selected_cyberware.report())
# destroy all variable objects
self.variables_dict = {}
for child in self.variables_frame.winfo_children():
child.destroy()
# get any variables in the item
self.variables_dict = get_variables(selected_cyberware, ATTRIBUTES_TO_CALCULATE)
# make variable objects if any
i = 0
for var in self.variables_dict.keys():
var_frame = Frame(self.variables_frame)
Label(var_frame, text="{}:".format(var)).grid(column=0, row=0) # label
Entry(var_frame, textvariable=self.variables_dict[var], validate="key", validatecommand=self.vcmd) \
.grid(column=1, row=0)
var_frame.grid(column=0, row=i)
i += 1
def int_validate(self, action, index, value_if_allowed,
prior_value, text, validation_type, trigger_type, widget_name):
"""
Validates if entered text can be an int and over 0.
:param action:
:param index:
:param value_if_allowed:
:param prior_value:
:param text:
:param validation_type:
:param trigger_type:
:param widget_name:
:return: True if text is valid
"""
if value_if_allowed == "":
return True
if value_if_allowed:
try:
i = int(value_if_allowed)
if i > 0:
return True
else:
self.bell()
return False
except ValueError:
self.bell()
return False
else:
self.bell()
return False
def on_inv_item_click(self, event):
curselection_ = self.cyberware_list.curselection()[-1]
item_report = self.statblock.cyberware[curselection_].report()
self.fill_description_box(item_report)
def calculate_total(self):
# unlike the other tabs places we directly manipulate the top bar
# since this has nothing to do with the generation mode
app_data.top_bar.update_karma_bar("{:.2f}".format(self.statblock.essence),
self.statblock.base_attributes["essence"],
"Augments Tab")
# app_data.top_bar.karma_fraction.set(("{}/{}".format("{:.2f}".format(self.statblock.essence),
# self.statblock.base_attributes["essence"])))
def on_switch(self):
self.calculate_total()
def load_character(self):
# clear everything
# self.tree_library_dict = {}
self.cyberware_list.delete(0, END)
# add stuff to the list
for cyber in self.statblock.cyberware:
self.cyberware_list.insert(END, cyber.name)
# self.on_switch()
| 38.589506
| 116
| 0.589698
| 1,485
| 12,503
| 4.783838
| 0.201347
| 0.047579
| 0.035895
| 0.013795
| 0.221143
| 0.148367
| 0.110642
| 0.067145
| 0.04589
| 0.030124
| 0
| 0.008727
| 0.312645
| 12,503
| 324
| 117
| 38.589506
| 0.817896
| 0.136927
| 0
| 0.224299
| 0
| 0
| 0.039435
| 0
| 0
| 0
| 0
| 0.003086
| 0
| 1
| 0.084112
| false
| 0.009346
| 0.037383
| 0.009346
| 0.191589
| 0.023364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e5f43493f76b33f089dfbae79e524b7b68ad4b5
| 337
|
py
|
Python
|
myapp/mymetric/my-metric.py
|
affoliveira/hiring-engineers
|
4064d8c7b6cead9a88197e95fcd6a0f2395e4d44
|
[
"Apache-2.0"
] | null | null | null |
myapp/mymetric/my-metric.py
|
affoliveira/hiring-engineers
|
4064d8c7b6cead9a88197e95fcd6a0f2395e4d44
|
[
"Apache-2.0"
] | null | null | null |
myapp/mymetric/my-metric.py
|
affoliveira/hiring-engineers
|
4064d8c7b6cead9a88197e95fcd6a0f2395e4d44
|
[
"Apache-2.0"
] | null | null | null |
from datadog import initialize, statsd
import time
import random
import os
options = {
'statsd_host':os.environ['DD_AGENT_HOST'],
'statsd_port':8125
}
initialize(**options)
i = 0
while(1):
i += 1
r = random.randint(0, 1000)
statsd.gauge('mymetric',r , tags=["environment:dev"])
time.sleep(int(os.environ['interval']))
| 17.736842
| 55
| 0.68546
| 48
| 337
| 4.729167
| 0.625
| 0.079295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042105
| 0.154303
| 337
| 19
| 56
| 17.736842
| 0.754386
| 0
| 0
| 0
| 0
| 0
| 0.195266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.266667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e63b1a8022fa7d3c4dd2cc0d17b00043e002831
| 1,024
|
py
|
Python
|
youtube_sync/tasks.py
|
abhayagiri/youtube-sync
|
ce3861f1b0c1448b1d48e5ba17925f5c082f04a2
|
[
"MIT"
] | null | null | null |
youtube_sync/tasks.py
|
abhayagiri/youtube-sync
|
ce3861f1b0c1448b1d48e5ba17925f5c082f04a2
|
[
"MIT"
] | null | null | null |
youtube_sync/tasks.py
|
abhayagiri/youtube-sync
|
ce3861f1b0c1448b1d48e5ba17925f5c082f04a2
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import os
import re
import subprocess
from . import app, celery, db
from .database import Job
@celery.task()
def make_audio(youtube_id):
worker_path = os.path.join(app.root_path, 'worker.sh')
env = {
'DYNAMIC_AUDIO_NORMALIZER_BIN': app.config['DYNAMIC_AUDIO_NORMALIZER_BIN'],
'DESTINATION_SERVER_PATH': app.config['DESTINATION_SERVER_PATH'],
}
job([worker_path, youtube_id], env=env)
def job(cmd, env={}):
job_env = os.environ.copy()
job_env.update(env)
job = Job(command=repr(cmd))
db.session.add(job)
db.session.commit()
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=job_env)
return_code = 0
except subprocess.CalledProcessError as e:
output = e.output
return_code = e.returncode
job.complete = True
job.return_code = return_code
job.output = output.decode('utf-8')
job.completed_at = datetime.now()
db.session.commit()
return return_code == 0
| 26.947368
| 84
| 0.682617
| 140
| 1,024
| 4.807143
| 0.421429
| 0.074294
| 0.065379
| 0.074294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003672
| 0.202148
| 1,024
| 37
| 85
| 27.675676
| 0.820073
| 0
| 0
| 0.0625
| 0
| 0
| 0.113281
| 0.099609
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e6bf3bcb9f6b04ecf66cf6829603687c806b677
| 4,140
|
py
|
Python
|
markov.py
|
themichaelusa/zuckerkov
|
d68780f987b3f032d6382ea75118c84e7f205a39
|
[
"MIT"
] | 1
|
2020-03-17T23:34:17.000Z
|
2020-03-17T23:34:17.000Z
|
markov.py
|
themichaelusa/zuckerkov
|
d68780f987b3f032d6382ea75118c84e7f205a39
|
[
"MIT"
] | null | null | null |
markov.py
|
themichaelusa/zuckerkov
|
d68780f987b3f032d6382ea75118c84e7f205a39
|
[
"MIT"
] | null | null | null |
### IMPORTS
import json
import glob
import string
import random
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
import markovify
### CONSTANTS/GLOBALS/LAMBDAS
SYMBOLS_TO_RM = tuple(list(string.punctuation) + ['\xad'])
NUMBERS_TO_RM = tuple(string.digits)
spacy.prefer_gpu()
NLP_ENGINE = spacy.load("en_core_web_sm")
def clean_word(word):
word_chars = list(word)
ignore_flag = False
for s in SYMBOLS_TO_RM:
if s in word_chars:
ignore_flag = True
break
for n in NUMBERS_TO_RM:
if n in word_chars:
ignore_flag = True
break
if not ignore_flag and len(word) >= 1:
return word.lower()
else:
return None
def clean_set(raw_set, by_letters=False):
clean_set = []
for l in raw_set:
words = l.split(' ')[:-1]
clean_sentence = []
for w in words:
cleaned_word = None
if by_letters:
cw_temp = clean_word(w)
if cw_temp is None:
continue
cleaned_word = cw_temp
else:
cleaned_word = clean_word(w)
if cleaned_word is not None:
clean_sentence.append(cleaned_word)
clean_sentence = ' '.join(clean_sentence)
if clean_sentence != '':
clean_set.append(clean_sentence)
return clean_set
def gen_user_corpus(sender, wpath):
parsed_mesgs = []
for mesg_corpus_path in glob.glob('message_*.json'):
with open(mesg_corpus_path) as rjson:
raw_data = json.load(rjson)
# parse only textual mesgs from given sender
for mesg in raw_data['messages']:
sname = mesg['sender_name']
if sname == sender:
text_mesg = mesg.get('content')
if text_mesg is not None:
#text_mesg = text_mesg.decode('utf-8')
parsed_mesgs.append(text_mesg)
cset = clean_set((pm for pm in parsed_mesgs))
# derive corpus of only words
word_set = set()
for sent in cset:
words = sent.split(' ')
for word in words:
word_set.add(word)
cset.extend(word_set)
# generate final corpus
with open(wpath, 'w+') as corpus:
for mesg in cset:
corpus.write(mesg + '\n')
def build_mm_for_user(sender, corpus_path):
with open(corpus_path, 'r') as corpus:
cread = corpus.read()
model = markovify.NewlineText(cread)
return model.compile()
def gen_valid_sent(model, init_state=None):
if init_state is not None:
init_state = ('___BEGIN__', init_state)
sent = model.make_sentence(init_state=init_state)
while sent is None:
sent = model.make_sentence(init_state=init_state)
return sent
def get_next_sent_subj(sent):
doc = NLP_ENGINE(sent)
subj_toks = [tok.text.lower() for tok in doc]
subj_toks = [NLP_ENGINE.vocab[tok] for tok in subj_toks]
subj_toks = [tok.text for tok in subj_toks if not tok.is_stop]
no_stop_str = ' '.join(subj_toks)
no_stop_doc = NLP_ENGINE(no_stop_str)
subjs = [tok.text for tok in no_stop_doc if tok.pos_ == 'NOUN']
if len(subjs) == 0:
return None
else:
return random.choice(subjs)
if __name__ == '__main__':
mu = gen_user_corpus('Michael Usachenko', 'mu_corpus.txt')
mu_model = build_mm_for_user('Michael Usachenko', 'mu_corpus.txt')
js = gen_user_corpus('Jonathan Shobrook', 'js_corpus.txt')
js_model = build_mm_for_user('Jonathan Shobrook', 'js_corpus.txt')
# generate starting sentence
init_sent = gen_valid_sent(mu_model)
init_subj = get_next_sent_subj(init_sent)
# WIP: back and forth conversation. need to modify markovify libs
# works for a few cycles, then errors
past_init = False
prior_resp = None
"""
for i in range(100):
if not past_init:
past_init = True
js_resp = gen_valid_sent(js_model, init_state=init_subj)
print('JONATHAN:', js_resp)
prior_resp = js_resp
else:
next_subj = get_next_sent_subj(prior_resp)
mu_resp = gen_valid_sent(mu_model, init_state=next_subj)
print('MICHAEL:', mu_resp)
next_subj = get_next_sent_subj(mu_resp)
js_resp = gen_valid_sent(js_model, init_state=next_subj)
print('JONATHAN:', js_resp)
prior_resp = js_resp
"""
for i in range(100):
#next_subj = get_next_sent_subj(prior_resp)
mu_resp = gen_valid_sent(mu_model)
print('MICHAEL:', mu_resp)
#next_subj = get_next_sent_subj(mu_resp)
js_resp = gen_valid_sent(js_model)
print('JONATHAN:', js_resp)
#prior_resp = js_resp
| 23
| 67
| 0.717874
| 670
| 4,140
| 4.126866
| 0.244776
| 0.035805
| 0.03038
| 0.03255
| 0.303797
| 0.214467
| 0.198553
| 0.176854
| 0.136347
| 0.093309
| 0
| 0.002933
| 0.17657
| 4,140
| 179
| 68
| 23.128492
| 0.808155
| 0.094444
| 0
| 0.104762
| 0
| 0
| 0.069818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.066667
| 0
| 0.190476
| 0.019048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e6ceb4b1bd05af797219ac67e3f71b01f520394
| 6,211
|
py
|
Python
|
src/cnf_shuffler.py
|
jreeves3/BiPartGen-Artifact
|
d7c6db628cad25701a398da67ab87bb725513a61
|
[
"MIT"
] | null | null | null |
src/cnf_shuffler.py
|
jreeves3/BiPartGen-Artifact
|
d7c6db628cad25701a398da67ab87bb725513a61
|
[
"MIT"
] | null | null | null |
src/cnf_shuffler.py
|
jreeves3/BiPartGen-Artifact
|
d7c6db628cad25701a398da67ab87bb725513a61
|
[
"MIT"
] | null | null | null |
#/**********************************************************************************
# Copyright (c) 2021 Joseph Reeves and Cayden Codel, Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# **************************************************************************************************/
# @file cnf_shuffler.py
#
# @usage python cnf_shuffler.py [-cnsv] <input.cnf>
#
# @author Cayden Codel (ccodel@andrew.cmu.edu)
#
# @bug No known bugs.
import random
import sys
import os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c", "--clauses", dest="clauses", action="store_true",
help="Shuffle the order of the clause lines in the CNF")
parser.add_option("-n", "--names", dest="names", action="store_true",
help="Shuffle the names of the literals in the clauses")
parser.add_option("-r", "--random", dest="seed",
help="Provide a randomization seed")
parser.add_option("-s", "--signs", dest="signs",
help="Switch the sign of literals with the provided prob")
parser.add_option("-v", "--variables", dest="variables",
help="Shuffle the order of the variables with prob")
(options, args) = parser.parse_args()
f_name = sys.argv[-1]
if len(sys.argv) == 1:
print("Must supply a CNF file")
exit()
# Parse the provided CNF file
if not os.path.exists(f_name) or os.path.isdir(f_name):
print("Supplied CNF file does not exist or is directory", file=sys.stderr)
exit()
cnf_file = open(f_name, "r")
cnf_lines = cnf_file.readlines()
cnf_file.close()
# Verify that the file has at least one line
if len(cnf_lines) == 0:
print("Supplied CNF file is empty", file=sys.stderr)
exit()
# Do treatment on the lines
cnf_lines = list(map(lambda x: x.strip(), cnf_lines))
# Verify that the file is a CNF file
header_line = cnf_lines[0].split(" ")
if header_line[0] != "p" or header_line[1] != "cnf":
print("Supplied file doesn't follow DIMACS CNF convention")
exit()
num_vars = int(header_line[2])
num_clauses = int(header_line[3])
print(" ".join(header_line))
cnf_lines = cnf_lines[1:]
# If the -r option is specified, initialize the random library
if options.seed is not None:
random.seed(a=int(options.seed))
else:
random.seed()
# If the -c option is specified, permute all other lines
if options.clauses:
cnf_lines = random.shuffle(cnf_lines)
# If the -v option is specified, permute the order of variables
if options.variables is not None:
var_prob = float(options.variables)
if var_prob <= 0 or var_prob > 1:
print("Prob for var shuffling not between 0 and 1", file=sys.stderr)
exit()
# TODO this doesn't work if each line is a single variable, etc.
for i in range(0, len(cnf_lines)):
line = cnf_lines[i]
atoms = line.split(" ")
if atoms[0][0] == "c" or random.random() > var_prob:
continue
if atoms[-1] == "0":
atoms = atoms[:-1]
random.shuffle(atoms)
atoms.append("0")
else:
random.shuffle(atoms)
cnf_lines[i] = " ".join(atoms)
# Now do one pass through all other lines to get the variable names
if options.names:
literals = {}
for line in cnf_lines:
if line[0] == "c":
continue
atoms = line.split(" ")
for atom in atoms:
lit = abs(int(atom))
if lit != 0:
literals[lit] = True
# After storing all the literals, permute
literal_keys = list(literals.keys())
p_keys = list(literals.keys())
random.shuffle(p_keys)
zipped = list(zip(literal_keys, p_keys))
for k, p in zipped:
literals[k] = p
for i in range(0, len(cnf_lines)):
line = cnf_lines[i]
if line[0] == "c":
continue
atoms = line.split(" ")
for j in range(0, len(atoms)):
if atoms[j] != "0":
if int(atoms[j]) < 0:
atoms[j] = "-" + str(literals[abs(int(atoms[j]))])
else:
atoms[j] = str(literals[int(atoms[j])])
cnf_lines[i] = " ".join(atoms)
if options.signs is not None:
signs_prob = float(options.signs)
if signs_prob < 0 or signs_prob > 1:
print("Sign prob must be between 0 and 1", file=sys.stderr)
exit()
flipped_literals = {}
for i in range(0, len(cnf_lines)):
line = cnf_lines[i]
if line[0] == "c":
continue
# For each symbol inside, flip weighted coin and see if flip
atoms = line.split(" ")
for j in range(0, len(atoms)):
atom = atoms[j]
if atom != "0":
if flipped_literals.get(atom) is None:
if random.random() <= signs_prob:
flipped_literals[atom] = True
else:
flipped_literals[atom] = False
if flipped_literals[atom]:
atoms[j] = str(-int(atom))
cnf_lines[i] = " ".join(atoms)
# Finally, output the transformed lines
for line in cnf_lines:
print(line)
| 34.893258
| 101
| 0.605378
| 867
| 6,211
| 4.264129
| 0.288351
| 0.043278
| 0.014606
| 0.014877
| 0.129565
| 0.105761
| 0.080876
| 0.080876
| 0.065188
| 0.056262
| 0
| 0.008465
| 0.258251
| 6,211
| 177
| 102
| 35.090395
| 0.794009
| 0.321526
| 0
| 0.318584
| 0
| 0
| 0.134961
| 0
| 0
| 0
| 0
| 0.00565
| 0
| 1
| 0
| false
| 0
| 0.035398
| 0
| 0.035398
| 0.070796
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e6dbb5cefe12073382965816c2a9d3f10ed725c
| 4,171
|
py
|
Python
|
test/app_page_scraper_test.py
|
googleinterns/betel
|
2daa56081ccc753f5b7eafbd1e9a48e3aca4b657
|
[
"Apache-2.0"
] | 1
|
2020-09-21T12:52:33.000Z
|
2020-09-21T12:52:33.000Z
|
test/app_page_scraper_test.py
|
googleinterns/betel
|
2daa56081ccc753f5b7eafbd1e9a48e3aca4b657
|
[
"Apache-2.0"
] | null | null | null |
test/app_page_scraper_test.py
|
googleinterns/betel
|
2daa56081ccc753f5b7eafbd1e9a48e3aca4b657
|
[
"Apache-2.0"
] | 1
|
2020-07-31T09:55:33.000Z
|
2020-07-31T09:55:33.000Z
|
import pathlib
import pytest
from betel import app_page_scraper
from betel import betel_errors
from betel import utils
ICON_HTML = """
<img src="%s" class="T75of sHb2Xb">
"""
CATEGORY_HTML = """
<a itemprop="genre">Example</a>
"""
FILTERED_CATEGORY_HTML = """
<a itemprop="genre">Filtered</a>
"""
SIMPLE_HTML = """
<p>Simple paragraph.</p>
"""
ICON_SUBDIR = pathlib.Path("icon_subdir")
APP_ID = "com.example"
ICON_NAME = "icon_com.example"
EXPECTED_CATEGORY = "example"
FILE = "file:"
@pytest.fixture
def icon_dir(tmp_path_factory):
return tmp_path_factory.mktemp("icon_dir")
@pytest.fixture
def test_dir(tmp_path_factory):
return tmp_path_factory.mktemp("test_dir")
@pytest.fixture
def play_scraper(icon_dir, test_dir):
base_url = FILE + str(test_dir) + "/"
return app_page_scraper.PlayAppPageScraper(base_url, icon_dir, ["example"])
@pytest.fixture
def input_dir(tmp_path_factory):
return tmp_path_factory.mktemp("input_dir")
class TestAppPageScraper:
def test_get_icon(self, play_scraper, test_dir, icon_dir):
rand_icon = _create_icon(test_dir)
_create_html_file(test_dir, ICON_HTML, icon_src=True)
play_scraper.get_app_icon(APP_ID, ICON_SUBDIR)
read_icon = icon_dir / ICON_SUBDIR / ICON_NAME
assert read_icon.exists()
assert read_icon.read_text() == rand_icon.read_text()
def test_get_category(self, play_scraper, test_dir):
_create_html_file(test_dir, CATEGORY_HTML)
genre = play_scraper.get_app_category(APP_ID)
assert genre == EXPECTED_CATEGORY
def test_missing_icon_class(self, play_scraper, test_dir):
_create_html_file(test_dir, SIMPLE_HTML)
with pytest.raises(betel_errors.PlayScrapingError) as exc:
play_scraper.get_app_icon(APP_ID, ICON_SUBDIR)
assert str(exc.value) == "Icon class not found in html."
def test_missing_category_itemprop(self, play_scraper, test_dir):
_create_html_file(test_dir, SIMPLE_HTML)
with pytest.raises(betel_errors.PlayScrapingError) as exc:
play_scraper.get_app_category(APP_ID)
assert str(exc.value) == "Category itemprop not found in html."
def test_invalid_base_url(self, icon_dir):
random_url = "https://127.0.0.1/betel-test-invalid-base-url-835AHD/"
play_scraper = app_page_scraper.PlayAppPageScraper(random_url, icon_dir)
with pytest.raises(betel_errors.AccessError) as exc:
play_scraper.get_app_category(APP_ID)
assert "Can not open URL." in str(exc.value)
def test_invalid_icon_url(self, play_scraper, test_dir):
_create_html_file(test_dir, ICON_HTML, icon_src=True)
with pytest.raises(betel_errors.AccessError) as exc:
play_scraper.get_app_icon(APP_ID)
assert "Can not retrieve icon." in str(exc.value)
def test_store_app_info(self, play_scraper, test_dir, icon_dir):
expected_info = f"{APP_ID},{EXPECTED_CATEGORY}"
_create_html_file(test_dir, ICON_HTML + CATEGORY_HTML, icon_src=True)
rand_icon = _create_icon(test_dir)
play_scraper.store_app_info(APP_ID)
retrieved_icon = icon_dir / ICON_NAME
info_file = icon_dir / utils.SCRAPER_INFO_FILE_NAME
assert retrieved_icon.exists()
assert rand_icon.read_text() == retrieved_icon.read_text()
assert expected_info in info_file.read_text()
def test_store_app_info_filter(self, play_scraper, test_dir, icon_dir):
_create_html_file(test_dir, ICON_HTML + FILTERED_CATEGORY_HTML, icon_src=True)
_create_icon(test_dir)
play_scraper.store_app_info(APP_ID)
retrieved_icon = icon_dir / ICON_NAME
assert not retrieved_icon.exists()
def _create_html_file(test_dir, text, icon_src=False):
html_file = test_dir / "details?id=com.example"
if icon_src:
html_img_src = FILE + str(test_dir / ICON_NAME)
text = text % html_img_src
html_file.write_text(text)
def _create_icon(test_dir):
rand_array = str([15, 934, 8953, 409, 32])
rand_icon = test_dir / ICON_NAME
rand_icon.write_text(rand_array)
return rand_icon
| 28.182432
| 86
| 0.714217
| 606
| 4,171
| 4.521452
| 0.155116
| 0.066423
| 0.036131
| 0.04927
| 0.490511
| 0.429197
| 0.389781
| 0.34854
| 0.323723
| 0.253285
| 0
| 0.007695
| 0.189883
| 4,171
| 147
| 87
| 28.37415
| 0.803196
| 0
| 0
| 0.273684
| 0
| 0.010526
| 0.100695
| 0.025653
| 0
| 0
| 0
| 0
| 0.115789
| 1
| 0.147368
| false
| 0
| 0.052632
| 0.031579
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e734b51dd3ec79fecc1a0e0800072ebad29c909
| 556
|
py
|
Python
|
lang/string/reverse-words.py
|
joez/letspy
|
9f653bc0071821fdb49da8c19787dc7e12921457
|
[
"Apache-2.0"
] | null | null | null |
lang/string/reverse-words.py
|
joez/letspy
|
9f653bc0071821fdb49da8c19787dc7e12921457
|
[
"Apache-2.0"
] | null | null | null |
lang/string/reverse-words.py
|
joez/letspy
|
9f653bc0071821fdb49da8c19787dc7e12921457
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
def reverse_words(s):
return ' '.join(w[::-1] for w in s.split(' '))
def reverse_words_ext(s):
# support other whitespaces
strs, word = [], ''
for c in s:
if c.isspace():
if word:
strs.append(word[::-1])
word = ''
strs.append(c)
else:
word += c
if word:
strs.append(word[::-1])
return ''.join(strs)
if __name__ == '__main__':
s = input()
for f in (reverse_words, reverse_words_ext):
print(f(s))
| 19.857143
| 50
| 0.491007
| 72
| 556
| 3.597222
| 0.430556
| 0.185328
| 0.162162
| 0.123552
| 0.162162
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.352518
| 556
| 27
| 51
| 20.592593
| 0.708333
| 0.084532
| 0
| 0.210526
| 0
| 0
| 0.019724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0
| 0.052632
| 0.210526
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e74bf0ffc1a010178cf010d5be1824b1235b7ba
| 11,166
|
py
|
Python
|
python-scripts/gt_generate_python_curve.py
|
TrevisanGMW/maya
|
4e3b45210d09a1cd2a1c0419defe6a5ffa97cf92
|
[
"MIT"
] | 26
|
2020-11-16T12:49:05.000Z
|
2022-03-09T20:39:22.000Z
|
python-scripts/gt_generate_python_curve.py
|
TrevisanGMW/maya
|
4e3b45210d09a1cd2a1c0419defe6a5ffa97cf92
|
[
"MIT"
] | 47
|
2020-11-08T23:35:49.000Z
|
2022-03-10T03:43:00.000Z
|
python-scripts/gt_generate_python_curve.py
|
TrevisanGMW/maya
|
4e3b45210d09a1cd2a1c0419defe6a5ffa97cf92
|
[
"MIT"
] | 5
|
2021-01-27T06:10:34.000Z
|
2021-10-30T23:29:44.000Z
|
"""
Python Curve Generator
@Guilherme Trevisan - github.com/TrevisanGMW/gt-tools - 2020-01-02
1.1 - 2020-01-03
Minor patch adjustments to the script
1.2 - 2020-06-07
Fixed random window widthHeight issue.
Updated naming convention to make it clearer. (PEP8)
Added length checker for selection before running.
1.3 - 2020-06-17
Changed UI
Added help menu
Added icon
1.4 - 2020-06-27
No longer failing to generate curves with non-unique names
Tweaked the color and text for the title and help menu
1.5 - 2021-01-26
Fixed way the curve is generated to account for closed and opened curves
1.6 - 2021-05-12
Made script compatible with Python 3 (Maya 2022+)
"""
import maya.cmds as cmds
import sys
from decimal import *
from maya import OpenMayaUI as omui
try:
from shiboken2 import wrapInstance
except ImportError:
from shiboken import wrapInstance
try:
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QWidget
except ImportError:
from PySide.QtGui import QIcon, QWidget
# Script Name
script_name = "GT - Generate Python Curve"
# Version:
script_version = "1.6"
#Python Version
python_version = sys.version_info.major
# Default Settings
close_curve = False
add_import = False
# Function for the "Run Code" button
def run_output_code(out):
try:
exec(out)
except Exception as e:
cmds.warning("Something is wrong with your code!")
cmds.warning(e)
# Main Form ============================================================================
def build_gui_py_curve():
window_name = "build_gui_py_curve"
if cmds.window(window_name, exists =True):
cmds.deleteUI(window_name)
# Main GUI Start Here =================================================================================
build_gui_py_curve = cmds.window(window_name, title=script_name + ' (v' + script_version + ')',\
titleBar=True, mnb=False, mxb=False, sizeable =True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
content_main = cmds.columnLayout(adj = True)
# Title
title_bgc_color = (.4, .4, .4)
cmds.separator(h=10, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 270)], cs=[(1, 10)], p=content_main) # Window Size Adjustment
cmds.rowColumnLayout(nc=3, cw=[(1, 10), (2, 200), (3, 50)], cs=[(1, 10), (2, 0), (3, 0)], p=content_main) # Title Column
cmds.text(" ", bgc=title_bgc_color) # Tiny Empty Green Space
cmds.text(script_name, bgc=title_bgc_color, fn="boldLabelFont", align="left")
cmds.button( l ="Help", bgc=title_bgc_color, c=lambda x:build_gui_help_py_curve())
cmds.separator(h=10, style='none', p=content_main) # Empty Space
# Body ====================
body_column = cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)], p=content_main)
cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)])
settings = cmds.checkBoxGrp(columnWidth2=[150, 1], numberOfCheckBoxes=2, \
label1 = 'Add import \"maya.cmds\" ', label2 = "Force Open", v1 = add_import, v2 = close_curve)
cmds.rowColumnLayout(nc=1, cw=[(1, 230)], cs=[(1,0)])
cmds.separator(h=10, style='none') # Empty Space
cmds.button(l ="Generate", bgc=(.6, .6, .6), c=lambda x:generate_python_curve())
cmds.separator(h=10, style='none', p=content_main) # Empty Space
cmds.separator(h=10, p=content_main)
# Bottom ====================
cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)], p=content_main)
cmds.text(label='Output Python Curve' )
output_python = cmds.scrollField(editable=True, wordWrap=True)
cmds.separator(h=10, style='none') # Empty Space
cmds.button(l ="Run Code", c=lambda x:run_output_code(cmds.scrollField(output_python, query=True, text=True)))
cmds.separator(h=10, style='none') # Empty Space
def generate_python_curve():
not_curve_error = "Please make sure you selected a Nurbs Curve or a Bezier Curve object before generating it"
if len(cmds.ls(selection=True)) != 0:
getcontext().prec = 5
sel_one = cmds.ls(sl=1)[0]
shape = cmds.listRelatives(sel_one, s=1 , fullPath=True)[0]
type_checker = str(cmds.objectType(shape))
if "nurbsCurve" in type_checker or "bezierCurve" in type_checker:
opened_curve = cmds.checkBoxGrp (settings, q=True, value2=True)
per_state = cmds.getAttr(shape + '.form')
knots_string = ''
extra_cvs_per = ''
is_periodic = False
if not opened_curve and per_state == 2:
is_periodic=True
curve_info = cmds.arclen(sel_one, ch=True)
curve_knots = cmds.getAttr( curve_info + '.knots[*]' )
knots_string = ', per=True, k=' + str(curve_knots)
cmds.delete(curve_info)
cvs = cmds.getAttr(shape+'.cv[*]')
cvs_list = []
for c in cvs:
cvs_list.append([float(Decimal("%.3f" % c[0])),float(Decimal("%.3f" % c[1])),float(Decimal("%.3f" % c[2]))])
if is_periodic and len(cvs) > 2:
extra_cvs_per = ', '
for i in range(3):
if i != 2:
extra_cvs_per += str(cvs_list[i]) + ', '
else:
extra_cvs_per += str(cvs_list[i])
if cmds.checkBoxGrp(settings, q=True, value1=True):
out = 'import maya.cmds as cmds\n\ncmds.curve(p='
else:
out = 'cmds.curve(p='
out += '[%s' % ', '.join(map(str, cvs_list))
out += extra_cvs_per + '], d='+str(cmds.getAttr(shape+'.degree'))+ knots_string + ')'
print ("#" * 100)
print (out)
print ("#" * 100)
cmds.scrollField(output_python, edit=True, wordWrap=True, text=out ,sl=True)
cmds.setFocus(output_python)
else:
cmds.warning(not_curve_error)
cmds.scrollField(output_python, edit=True, wordWrap=True, text=not_curve_error ,sl=True)
cmds.setFocus(output_python)
else:
cmds.warning(not_curve_error)
# Show and Lock Window
cmds.showWindow(build_gui_py_curve)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QWidget)
else:
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/pythonFamily.png')
widget.setWindowIcon(icon)
# Main GUI Ends Here =================================================================================
# Creates Help GUI
def build_gui_help_py_curve():
window_name = "build_gui_help_py_curve"
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title= script_name + " Help", mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
cmds.columnLayout("main_column", p= window_name)
# Title Text
cmds.separator(h=12, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p="main_column") # Window Size Adjustment
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p="main_column") # Title Column
cmds.text(script_name + " Help", bgc=[.4,.4,.4], fn="boldLabelFont", align="center")
cmds.separator(h=10, style='none', p="main_column") # Empty Space
# Body ====================
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column")
cmds.text(l='This script generates the Python code necessary to create', align="left")
cmds.text(l='a selected curve.', align="left")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Make sure you delete the curve\'s history before ', align="left")
cmds.text(l='generating the code.', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='Add import "maya.cmds":', align="left", fn="boldLabelFont")
cmds.text(l='Adds a line that imports Maya\'s API. This is necessary', align="left")
cmds.text(l='when running python scripts.', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='Force Open: ', align="left", fn="boldLabelFont")
cmds.text(l='Doens\'t check if the curve is periodic leaving it open.', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='"Generate" button:', align="left", fn="boldLabelFont")
cmds.text(l='Outputs the python code necessary to create the curve', align="left")
cmds.text(l='inside the "Output Python Curve" box.', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='Run Code:', align="left", fn="boldLabelFont")
cmds.text(l='Attempts to run the code (or anything written) inside ', align="left")
cmds.text(l='"Output Python Curve" box', align="left")
cmds.separator(h=15, style='none') # Empty Space
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column")
cmds.text('Guilherme Trevisan ')
cmds.text(l='<a href="mailto:trevisangmw@gmail.com">TrevisanGMW@gmail.com</a>', hl=True, highlightColor=[1,1,1])
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='<a href="https://github.com/TrevisanGMW">Github</a>', hl=True, highlightColor=[1,1,1])
cmds.separator(h=7, style='none') # Empty Space
# Close Button
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column")
cmds.separator(h=10, style='none')
cmds.button(l='OK', h=30, c=lambda args: close_help_gui())
cmds.separator(h=8, style='none')
# Show and Lock Window
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QWidget)
else:
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/question.png')
widget.setWindowIcon(icon)
def close_help_gui():
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
#Build UI
if __name__ == '__main__':
build_gui_py_curve()
| 40.901099
| 129
| 0.5729
| 1,450
| 11,166
| 4.304828
| 0.213793
| 0.028196
| 0.042615
| 0.039571
| 0.456104
| 0.414611
| 0.364627
| 0.323454
| 0.307113
| 0.275072
| 0
| 0.0353
| 0.266792
| 11,166
| 273
| 130
| 40.901099
| 0.727128
| 0.136934
| 0
| 0.323529
| 0
| 0.017647
| 0.143503
| 0.011304
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.094118
| 0
| 0.123529
| 0.017647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e75ab3bf35f32714181bf627668b80eaa462378
| 1,766
|
py
|
Python
|
client/core/scene/summary.py
|
krerkkiat/space-invader
|
428b1041c9246b55cb63bc6c0b2ec20beb7a32ed
|
[
"MIT"
] | null | null | null |
client/core/scene/summary.py
|
krerkkiat/space-invader
|
428b1041c9246b55cb63bc6c0b2ec20beb7a32ed
|
[
"MIT"
] | null | null | null |
client/core/scene/summary.py
|
krerkkiat/space-invader
|
428b1041c9246b55cb63bc6c0b2ec20beb7a32ed
|
[
"MIT"
] | null | null | null |
import pygame
from config import Config
from core.ui import Table, Button
from core.scene import Scene
from core.manager import SceneManager
from core.scene.preload import Preload
class SummaryScene(Scene):
def __init__(self, game):
super().__init__(game)
self._background = pygame.display.get_surface()
self._background.set_alpha(180)
self._background.fill(Config.colors['black'])
self._elements.clear(self._canvas, self._background)
w, h = (200, 200)
rowData = [('Score', 'Wave'), (str(self._parent._pilot.score), str(self._parent._pilot.wave))]
columnWidth = [100, 100]
self._scoreBoard = Table(self, w, h, rowData, columnWidth, title='Summary', line=False, button=False)
self._scoreBoard.rect.centerx = Config.windowWidth//2
self._scoreBoard.rect.centery = Config.windowHeight//2
self.addElement(self._scoreBoard)
def callBack():
# SceneManager.call(MainScene(self._parent), Preload(self._parent))
self._parent._pilot.update()
SceneManager.ret(Preload(self._parent))
self._btn = Button(self, 'Continue', callBack)
self._btn.rect.right = self._scoreBoard.rect.right
self._btn.rect.top = self._scoreBoard.rect.bottom
self.addElement(self._btn)
self.addEventListener(self._btn.handleEvent)
def loadData(self):
pass
def run(self):
for event in pygame.event.get():
self._handleEvent(event)
self.update()
self.draw()
self._clock.tick(Config.ticks)
def update(self):
super().update()
def draw(self):
updatedRects = self._elements.draw(self._canvas)
pygame.display.update(updatedRects)
| 34.627451
| 109
| 0.656285
| 207
| 1,766
| 5.405797
| 0.376812
| 0.053619
| 0.064343
| 0.032172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012391
| 0.223103
| 1,766
| 51
| 110
| 34.627451
| 0.803207
| 0.036806
| 0
| 0
| 0
| 0
| 0.017059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0.02439
| 0.146341
| 0
| 0.317073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e78083845e016661893639e08ffab0d50cff621
| 546
|
py
|
Python
|
src/python/intensity/components/shutdown_if_empty.py
|
kripken/intensityengine
|
9ae352b4f526ecb180004ae4968db7f64f140762
|
[
"MIT"
] | 31
|
2015-01-18T20:27:31.000Z
|
2021-07-03T03:58:47.000Z
|
src/python/intensity/components/shutdown_if_empty.py
|
JamesLinus/intensityengine
|
9ae352b4f526ecb180004ae4968db7f64f140762
|
[
"MIT"
] | 4
|
2015-07-05T21:09:37.000Z
|
2019-09-06T14:34:59.000Z
|
src/python/intensity/components/shutdown_if_empty.py
|
JamesLinus/intensityengine
|
9ae352b4f526ecb180004ae4968db7f64f140762
|
[
"MIT"
] | 11
|
2015-02-03T19:24:10.000Z
|
2019-09-20T10:59:50.000Z
|
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
from intensity.signals import client_connect, client_disconnect
from intensity.base import quit
class Data:
counter = 0
def add(sender, **kwargs):
Data.counter += 1
client_connect.connect(add, weak=False)
def subtract(sender, **kwargs):
Data.counter -= 1
if Data.counter <= 0:
quit()
client_disconnect.connect(subtract, weak=False)
| 22.75
| 110
| 0.717949
| 74
| 546
| 5.243243
| 0.635135
| 0.113402
| 0.061856
| 0.118557
| 0.123711
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018018
| 0.186813
| 546
| 23
| 111
| 23.73913
| 0.855856
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e780b142bddebcec890df30277381a71e204488
| 694
|
py
|
Python
|
pyforms/utils/timeit.py
|
dominic-dev/pyformsd
|
23e31ceff2943bc0f7286d25dd14450a14b986af
|
[
"MIT"
] | null | null | null |
pyforms/utils/timeit.py
|
dominic-dev/pyformsd
|
23e31ceff2943bc0f7286d25dd14450a14b986af
|
[
"MIT"
] | null | null | null |
pyforms/utils/timeit.py
|
dominic-dev/pyformsd
|
23e31ceff2943bc0f7286d25dd14450a14b986af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Ricardo Ribeiro"
__credits__ = ["Ricardo Ribeiro"]
__license__ = "MIT"
__version__ = "0.0"
__maintainer__ = "Ricardo Ribeiro"
__email__ = "ricardojvr@gmail.com"
__status__ = "Development"
import time
from datetime import datetime, timedelta
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
time_elapsed = datetime(1,1,1) + timedelta(seconds=(te-ts) )
print("%s: %d:%d:%d:%d;%d" % (method.__name__, time_elapsed.day-1, time_elapsed.hour, time_elapsed.minute, time_elapsed.second, time_elapsed.microsecond))
return result
return timed
| 27.76
| 156
| 0.674352
| 89
| 694
| 4.831461
| 0.539326
| 0.153488
| 0.02093
| 0.018605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012153
| 0.170029
| 694
| 25
| 157
| 27.76
| 0.734375
| 0.054755
| 0
| 0
| 0
| 0
| 0.152672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e7a4b454a8651618254290e5f7ef6b4e1cd99a9
| 1,388
|
py
|
Python
|
cogs/botinfo.py
|
MM-coder/salbot-rewrite
|
322c34ba85a2c852e02cd3c183d5a7a4a077ff6f
|
[
"Apache-2.0"
] | 1
|
2020-08-17T05:14:58.000Z
|
2020-08-17T05:14:58.000Z
|
cogs/botinfo.py
|
MM-coder/salbot-rewrite
|
322c34ba85a2c852e02cd3c183d5a7a4a077ff6f
|
[
"Apache-2.0"
] | null | null | null |
cogs/botinfo.py
|
MM-coder/salbot-rewrite
|
322c34ba85a2c852e02cd3c183d5a7a4a077ff6f
|
[
"Apache-2.0"
] | 1
|
2020-08-17T16:57:30.000Z
|
2020-08-17T16:57:30.000Z
|
"""
Created by vcokltfre at 2020-07-08
"""
import json
import logging
import time
from datetime import datetime
import discord
from discord.ext import commands
from discord.ext.commands import has_any_role
class BotInfo(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger("salbot.cogs.botinfo")
self.uptime_start = round(time.time())
self.socket_stats = {}
self.opcodes = {
10: "HELLO",
11: "HEARTBEAT",
9: "HI",
7: "RECONNECT"
}
@commands.Cog.listener()
async def on_socket_response(self, data):
t = data["t"]
if not t:
try:
t = self.opcodes[data["op"]]
except KeyError:
self.logger.warning(f"Unknown opcode. Received: {data['op']}")
self.socket_stats[t] = self.socket_stats.get(t, 0) + 1
@commands.command(name="stats")
@has_any_role("Administrator", "Moderator")
async def stats_bot(self, ctx, typ="raw"):
if typ == "raw":
jsd = json.dumps(self.socket_stats, indent=4)
desc = f"```json\n{jsd}```"
embed = discord.Embed(title="Raw Socket Stats", color=0xFF0000, description=desc, timestamp=datetime.now())
await ctx.channel.send(embed=embed)
def setup(bot):
bot.add_cog(BotInfo(bot))
| 28.326531
| 119
| 0.591499
| 174
| 1,388
| 4.62069
| 0.511494
| 0.068408
| 0.074627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021869
| 0.275216
| 1,388
| 48
| 120
| 28.916667
| 0.777336
| 0.024496
| 0
| 0
| 0
| 0
| 0.112184
| 0
| 0
| 0
| 0.005944
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.184211
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e7b6d33ac9f184e61e6b426b75d7acfe7a99f1e
| 6,486
|
py
|
Python
|
ninja_extra/pagination.py
|
eadwinCode/django-ninja-extra
|
16246c466ab8895ba1bf29d69f3d3e9337031edd
|
[
"MIT"
] | 43
|
2021-09-09T14:20:59.000Z
|
2022-03-28T00:38:52.000Z
|
ninja_extra/pagination.py
|
eadwinCode/django-ninja-extra
|
16246c466ab8895ba1bf29d69f3d3e9337031edd
|
[
"MIT"
] | 6
|
2022-01-04T10:53:11.000Z
|
2022-03-28T19:53:46.000Z
|
ninja_extra/pagination.py
|
eadwinCode/django-ninja-extra
|
16246c466ab8895ba1bf29d69f3d3e9337031edd
|
[
"MIT"
] | null | null | null |
import inspect
import logging
from collections import OrderedDict
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, Optional, Type, Union, cast, overload
from django.core.paginator import InvalidPage, Page, Paginator
from django.db.models import QuerySet
from django.http import HttpRequest
from ninja import Schema
from ninja.constants import NOT_SET
from ninja.pagination import LimitOffsetPagination, PageNumberPagination, PaginationBase
from ninja.signature import has_kwargs
from ninja.types import DictStrAny
from pydantic import Field
from ninja_extra.conf import settings
from ninja_extra.exceptions import NotFound
from ninja_extra.schemas import PaginatedResponseSchema
from ninja_extra.urls import remove_query_param, replace_query_param
logger = logging.getLogger()
if TYPE_CHECKING:
from .controllers import ControllerBase # pragma: no cover
__all__ = [
"PageNumberPagination",
"PageNumberPaginationExtra",
"PaginationBase",
"LimitOffsetPagination",
"paginate",
"PaginatedResponseSchema",
]
def _positive_int(
integer_string: Union[str, int], strict: bool = False, cutoff: Optional[int] = None
) -> int:
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
return min(ret, cutoff)
return ret
class PageNumberPaginationExtra(PaginationBase):
class Input(Schema):
page: int = Field(1, gt=0)
page_size: int = Field(100, lt=200)
page_query_param = "page"
page_size_query_param = "page_size"
max_page_size = 200
paginator_class = Paginator
def __init__(
self,
page_size: int = settings.PAGINATION_PER_PAGE,
max_page_size: Optional[int] = None,
) -> None:
super().__init__()
self.page_size = page_size
self.max_page_size = max_page_size or 200
self.Input = self.create_input() # type:ignore
def create_input(self) -> Type[Input]:
class DynamicInput(PageNumberPaginationExtra.Input):
page: int = Field(1, gt=0)
page_size: int = Field(self.page_size, lt=self.max_page_size)
return DynamicInput
def paginate_queryset(
self, items: QuerySet, request: HttpRequest, **params: Any
) -> Any:
pagination_input = cast(PageNumberPaginationExtra.Input, params["pagination"])
page_size = self.get_page_size(pagination_input.page_size)
current_page_number = pagination_input.page
paginator = self.paginator_class(items, page_size)
try:
url = request.build_absolute_uri()
page: Page = paginator.page(current_page_number)
return self.get_paginated_response(base_url=url, page=page)
except InvalidPage as exc:
msg = "Invalid page. {page_number} {message}".format(
page_number=current_page_number, message=str(exc)
)
raise NotFound(msg)
def get_paginated_response(self, *, base_url: str, page: Page) -> DictStrAny:
return OrderedDict(
[
("count", page.paginator.count),
("next", self.get_next_link(base_url, page=page)),
("previous", self.get_previous_link(base_url, page=page)),
("results", list(page)),
]
)
@classmethod
def get_response_schema(
cls, response_schema: Union[Schema, Type[Schema], Any]
) -> Any:
return PaginatedResponseSchema[response_schema]
def get_next_link(self, url: str, page: Page) -> Optional[str]:
if not page.has_next():
return None
page_number = page.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self, url: str, page: Page) -> Optional[str]:
if not page.has_previous():
return None
page_number = page.previous_page_number()
if page_number == 1:
return remove_query_param(url, self.page_query_param)
return replace_query_param(url, self.page_query_param, page_number)
def get_page_size(self, page_size: int) -> int:
if page_size:
try:
return _positive_int(page_size, strict=True, cutoff=self.max_page_size)
except (KeyError, ValueError):
pass
return self.page_size
@overload
def paginate() -> Callable[..., Any]:
...
@overload
def paginate(
func_or_pgn_class: Any = NOT_SET, **paginator_params: Any
) -> Callable[..., Any]:
...
def paginate(
func_or_pgn_class: Any = NOT_SET, **paginator_params: Any
) -> Callable[..., Any]:
isfunction = inspect.isfunction(func_or_pgn_class)
isnotset = func_or_pgn_class == NOT_SET
pagination_class: Type[PaginationBase] = settings.PAGINATION_CLASS
if isfunction:
return _inject_pagination(func_or_pgn_class, pagination_class)
if not isnotset:
pagination_class = func_or_pgn_class
def wrapper(func: Callable[..., Any]) -> Any:
return _inject_pagination(func, pagination_class, **paginator_params)
return wrapper
def _inject_pagination(
func: Callable[..., Any],
paginator_class: Type[PaginationBase],
**paginator_params: Any,
) -> Callable[..., Any]:
func.has_kwargs = True # type: ignore
if not has_kwargs(func):
func.has_kwargs = False # type: ignore
logger.debug(
f"function {func.__name__} should have **kwargs if you want to use pagination parameters"
)
paginator: PaginationBase = paginator_class(**paginator_params)
paginator_kwargs_name = "pagination"
@wraps(func)
def view_with_pagination(
controller: "ControllerBase", *args: Any, **kw: Any
) -> Any:
func_kwargs = dict(kw)
if not func.has_kwargs: # type: ignore
func_kwargs.pop(paginator_kwargs_name)
items = func(controller, *args, **func_kwargs)
assert (
controller.context and controller.context.request
), "Request object is None"
return paginator.paginate_queryset(items, controller.context.request, **kw)
view_with_pagination._ninja_contribute_args = [ # type: ignore
(
paginator_kwargs_name,
paginator.Input,
paginator.InputSource,
),
]
return view_with_pagination
| 31.333333
| 101
| 0.665433
| 758
| 6,486
| 5.437995
| 0.217678
| 0.044639
| 0.016012
| 0.020378
| 0.141679
| 0.104561
| 0.104561
| 0.09704
| 0.09704
| 0.09704
| 0
| 0.003863
| 0.241751
| 6,486
| 206
| 102
| 31.485437
| 0.834282
| 0.019581
| 0
| 0.134969
| 0
| 0
| 0.051594
| 0.010887
| 0
| 0
| 0
| 0
| 0.006135
| 1
| 0.092025
| false
| 0.006135
| 0.116564
| 0.018405
| 0.361963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e7d261c65a6ddf389725d10b7241f84b3620572
| 501
|
py
|
Python
|
toys/urls.py
|
julesc00/restful
|
11b5312caf4affeaa06e3ceb5b86a7c73357eed1
|
[
"MIT"
] | null | null | null |
toys/urls.py
|
julesc00/restful
|
11b5312caf4affeaa06e3ceb5b86a7c73357eed1
|
[
"MIT"
] | null | null | null |
toys/urls.py
|
julesc00/restful
|
11b5312caf4affeaa06e3ceb5b86a7c73357eed1
|
[
"MIT"
] | null | null | null |
from django.urls import path
from toys.views import (toy_list_view, toy_detail_view, toy_sql_view, toy_raw_sql_view,
toy_aggregate_view)
app_name = "toys"
urlpatterns = [
path("toys/", toy_list_view, name="toys_list"),
path("toys_sql/", toy_sql_view, name="toys_sql_list"),
path("toys/count/", toy_aggregate_view, name="toys_count"),
path("toys_raw/", toy_raw_sql_view, name="toys_raw_list"),
path("toys/<int:pk>/", toy_detail_view, name="toy_detail"),
]
| 35.785714
| 87
| 0.692615
| 77
| 501
| 4.090909
| 0.25974
| 0.126984
| 0.152381
| 0.08254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159681
| 501
| 13
| 88
| 38.538462
| 0.748219
| 0
| 0
| 0
| 0
| 0
| 0.213573
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e7ff5caf482e80185273f9434f18cc9786fbe99
| 692
|
py
|
Python
|
setup.py
|
ellwise/kedro-light
|
8f5a05d880f3ded23b024d5db72b5fc615e75230
|
[
"MIT"
] | 2
|
2021-10-16T12:19:50.000Z
|
2022-01-20T16:50:14.000Z
|
setup.py
|
ellwise/kedro-light
|
8f5a05d880f3ded23b024d5db72b5fc615e75230
|
[
"MIT"
] | null | null | null |
setup.py
|
ellwise/kedro-light
|
8f5a05d880f3ded23b024d5db72b5fc615e75230
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from os import path
# read the contents of your README file
curr_dir = path.abspath(path.dirname(__file__))
with open(path.join(curr_dir, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="kedro-light",
version="0.1",
description="A lightweight interface to Kedro",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ellwise/naive-bayes-explainer",
author="Elliott Wise",
author_email="ell.wise@gmail.com",
license="MIT",
packages=["kedro_light"],
install_requires=["kedro"],
include_package_data=True,
zip_safe=False,
)
| 27.68
| 67
| 0.710983
| 94
| 692
| 5.042553
| 0.723404
| 0.126582
| 0.080169
| 0.126582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005128
| 0.154624
| 692
| 24
| 68
| 28.833333
| 0.805128
| 0.053468
| 0
| 0
| 0
| 0
| 0.260337
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e812cd9d9f3ad6325c8b7be7fb0c2f7d95ff84f
| 1,217
|
py
|
Python
|
app.py
|
mh-github/mh-wtgw
|
0e8d9b622954e14d1e24fda6fc6a4e63af2cd822
|
[
"CC0-1.0"
] | null | null | null |
app.py
|
mh-github/mh-wtgw
|
0e8d9b622954e14d1e24fda6fc6a4e63af2cd822
|
[
"CC0-1.0"
] | null | null | null |
app.py
|
mh-github/mh-wtgw
|
0e8d9b622954e14d1e24fda6fc6a4e63af2cd822
|
[
"CC0-1.0"
] | null | null | null |
import random
from flask import Flask, request, render_template, jsonify
app = Flask(__name__)
data_list = []
with open('data.txt', 'r') as data_file:
data_list = data_file.readlines()
@app.route("/", methods=['GET'])
def index():
index = random.randint(1, len(data_list) - 1)
clue = data_list[index].split('|')[0]
return render_template('game.html',
clue=clue.strip(),
index=index)
@app.route("/check")
def checkAnswer():
ind = int(request.args.get("index"))
ans = request.args.get("answer").strip().upper()
correct_answer = data_list[ind].split('|')[1].strip()
return "You got it right!" if (ans == correct_answer) else "Wrong Answer! Please try again!!"
@app.route("/show")
def showAnswer():
ind = int(request.args.get("index"))
return data_list[ind].split('|')[1].strip()
@app.route("/new")
def newClue():
index = random.randint(1, len(data_list) - 1)
clue = data_list[index].split('|')[0].strip()
response = {
'index': index,
'clue': clue
}
return jsonify(response)
if __name__ == "__main__":
app.run(host='0.0.0.0')
| 27.659091
| 98
| 0.57765
| 155
| 1,217
| 4.367742
| 0.393548
| 0.094535
| 0.062038
| 0.05613
| 0.298375
| 0.298375
| 0.159527
| 0.159527
| 0.159527
| 0.159527
| 0
| 0.013043
| 0.244043
| 1,217
| 44
| 99
| 27.659091
| 0.722826
| 0
| 0
| 0.117647
| 0
| 0
| 0.110638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e81c177879d88e6b010319496c61e52cdb196f1
| 13,606
|
py
|
Python
|
imported_files/plotting_cswl05.py
|
SoumyaShreeram/Locating_AGN_in_DM_halos
|
1cfbee69b2c000faee4ecb199d65c3235afbed42
|
[
"MIT"
] | null | null | null |
imported_files/plotting_cswl05.py
|
SoumyaShreeram/Locating_AGN_in_DM_halos
|
1cfbee69b2c000faee4ecb199d65c3235afbed42
|
[
"MIT"
] | null | null | null |
imported_files/plotting_cswl05.py
|
SoumyaShreeram/Locating_AGN_in_DM_halos
|
1cfbee69b2c000faee4ecb199d65c3235afbed42
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Plotting.py for notebook 05_Preliminary_comparison_of_simulations_AGN_fraction_with_data
This python file contains all the functions used for plotting graphs and maps in the 2nd notebook (.ipynb) of the repository: 05. Preliminary comparison of the 𝑓MM between simulation and data
Script written by: Soumya Shreeram
Project supervised by Johan Comparat
Date created: 27th April 2021
"""
# astropy modules
import astropy.units as u
import astropy.io.fits as fits
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM, z_at_value
import numpy as np
# scipy modules
from scipy.spatial import KDTree
from scipy.interpolate import interp1d
import os
import importlib
# plotting imports
import matplotlib
from mpl_toolkits import axes_grid1
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import seaborn as sns
import Agn_incidence_from_Major_Mergers as aimm
import Comparison_simulation_with_literature_data as cswl
from scipy.stats import norm
def setLabel(ax, xlabel, ylabel, title='', xlim='default', ylim='default', legend=True):
"""
Function defining plot properties
@param ax :: axes to be held
@param xlabel, ylabel :: labels of the x-y axis
@param title :: title of the plot
@param xlim, ylim :: x-y limits for the axis
"""
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim != 'default':
ax.set_xlim(xlim)
if ylim != 'default':
ax.set_ylim(ylim)
if legend:
l = ax.legend(loc='best', fontsize=14, frameon=False)
for legend_handle in l.legendHandles:
legend_handle._legmarker.set_markersize(12)
ax.grid(False)
ax.set_title(title, fontsize=18)
return
def plotFpairs(ax, r_p, f_pairs, f_pairs_err, label, color='r', errorbar = True):
# changing all unit to kpc
r_p_kpc, f_pairs = 1e3*r_p[1:], f_pairs
# plotting the results
ax.plot( r_p_kpc , f_pairs, 's', ls='--', color=color, label = label)
if errorbar:
ax.errorbar(r_p_kpc , f_pairs.value, yerr=np.array(f_pairs_err), ecolor='k', fmt='none', capsize=4.5)
return ax
def plotScaleMMdistribution(halo_m_scale_arr_all_r, cosmo, dt_m_arr):
"""
Function plots the number of objects in pairs as a function of the scale of last MM
--> the cuts on delta t_mm are overplotted to see the selection criterion
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
bins = 20
hist_all_r = np.zeros((0, bins))
for i in range(len(halo_m_scale_arr_all_r)):
hist_counts, a = np.histogram(halo_m_scale_arr_all_r[i], bins=bins)
hist_all_r = np.append(hist_all_r, [hist_counts], axis=0)
ax.plot(a[1:], hist_counts, '--', marker = 'd', color='k')
scale_mm = cswl.tmmToScale(cosmo, dt_m_arr)
pal1 = sns.color_palette("Spectral", len(scale_mm)+1).as_hex()
for j, l in enumerate(scale_mm):
ax.vlines(l, np.min(hist_all_r), np.max(hist_all_r), colors=pal1[j], label=r'$t_{\rm MM}$ = %.1f Gyr'%dt_m_arr[j])
setLabel(ax, r'Scale factor, $a$', r'Counts', '', 'default',[np.min(hist_all_r), np.max(hist_all_r)], legend=False)
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', frameon=False)
ax.set_yscale('log')
return
def plotNpSep(ax, hd_z_halo, pairs_all, color, label, mec, errorbars = True):
"""
Function plots the n_p as a function of separation
"""
pairs_all = np.array(pairs_all)
# get shell volume and projected radius bins [Mpc]
r_p, shell_volume = aimm.shellVolume()
# get number density of pairs with and without selection cuts
n_pairs, n_pairs_err = cswl.nPairsToFracPairs(hd_z_halo, pairs_all)
# changing all unit to kpc
r_p_kpc, n_pairs = 1e3*r_p[1:len(n_pairs)+1], n_pairs
# plotting the results
ax.plot( r_p_kpc , n_pairs, 'd', mec = mec, ms = 10, color=color, label=label)
# errorbars
if errorbars:
n_pairs_err = np.array(n_pairs_err)
ax.errorbar(r_p_kpc , np.array(n_pairs), yerr=n_pairs_err, ecolor=mec, fmt='none', capsize=4.5)
return ax, n_pairs, n_pairs_err
def plotFracNdensityPairs(hd_z_halo, pairs_all, pairs_mm_dv_all, pairs_selected_all, plot_selected_pairs=True):
"""
Function to plot the fractional number density of pairs for different selection criteria
"""
flare = sns.color_palette("pastel", 5).as_hex()
mec = ['k', '#05ad2c', '#db5807', '#a30a26', 'b']
fig, ax = plt.subplots(1,1,figsize=(5,4))
# plotting the 4 cases with the 4 different cuts
ax, n_pairs, n_pairs_err = plotNpSep(ax, hd_z_halo, pairs_all[1], 'k', r' $\mathbf{\Gamma}_{m;\ \Delta v;\ t_{\rm MM};\ \tilde{X}_{\rm off}}(r)\ $', mec[0])
ax, n_mm_dv_pairs, n_pairs_mm_dv_err = plotNpSep(ax, hd_z_halo, pairs_mm_dv_all[1], flare[3], r'$\mathbf{\Gamma}_{t_{\rm MM};\ \tilde{X}_{\rm off}}(r|\ m;\ \Delta v)$', mec[3])
if plot_selected_pairs:
ax, n_selected_pairs, n_selected_err = plotNpSep(ax, hd_z_halo, pairs_selected_all[1], flare[2], r'$\mathbf{\Gamma}(r|\ m;\ \Delta v;\ t_{\rm MM};\ \tilde{X}_{\rm off} )$'+'\n'+r'$t_{\rm MM} \in [0.6-1.2]$ Gyr, $\tilde{X}_{\rm off} \in [0.17, 0.54]$', mec[1])
ax.set_yscale("log")
setLabel(ax, r'Separation, $r$ [kpc]', r'$\mathbf{\Gamma}(r)$ [Mpc$^{-3}$]', '', 'default', 'default', legend=False)
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=15, frameon=False)
pairs_arr = np.array([n_pairs, n_mm_dv_pairs, n_selected_pairs], dtype=object)
pairs_arr_err = np.array([n_pairs_err, n_pairs_mm_dv_err, n_selected_err], dtype=object)
return pairs_arr, pairs_arr_err, ax
def plotCumulativeDist(vol, dt_m_arr, pairs_mm_all, pairs_mm_dv_all, n_pairs_mm_dt_all, n_pairs_mm_dv_dt_all, param = 't_mm'):
"""
Function to plot the cumulative number of pairs for the total vol (<z=2) for pairs with dz and mass ratio criteria
"""
# get shell volume and projected radius bins [Mpc]
r_p, _ = aimm.shellVolume()
fig, ax = plt.subplots(1,2,figsize=(17,6))
pal = sns.color_palette("coolwarm", len(dt_m_arr)+1).as_hex()
ax[0].plot( (1e3*r_p[1:]), (pairs_mm_all[1][1:]/(2*vol)), 'X', color='k', label='No criterion')
ax[1].plot( (1e3*r_p[1:]), (pairs_mm_dv_all[1][1:]/(2*vol)), 'X', color='k', label='No criterion')
for t_idx in range(len(dt_m_arr)):
np_mm_dt, np_mm_dv_dt = n_pairs_mm_dt_all[t_idx], n_pairs_mm_dv_dt_all[t_idx]
if param == 't_mm':
label = r'$t_{\rm MM} \in$ %.1f-%.1f Gyr'%(dt_m_arr[t_idx][0], dt_m_arr[t_idx][1])
else:
label = r'$\tilde{X}_{\rm off} \in$ %.1f-%.1f Gyr'%(dt_m_arr[t_idx][0], dt_m_arr[t_idx][1])
ax[0].plot( (1e3*r_p[1:]), (np_mm_dt[1:]/(2*vol)), 'kX', label = label, color=pal[t_idx])
ax[1].plot( (1e3*r_p[1:]), (np_mm_dv_dt[1:]/(2*vol)), 'kX', color=pal[t_idx])
ax[0].set_yscale('log')
ax[1].set_yscale('log')
setLabel(ax[0], r'Separation, $r$ [kpc]', 'Cumulative number of halo pairs\n'+r'[Mpc$^{-3}$]', r'Mass ratio 3:1, $\Delta z_{\rm R, S} < 10^{-3}$', 'default', 'default', legend=False)
setLabel(ax[1], r'Separation, $r$ [kpc]', r'', 'Mass ratio 3:1', 'default', 'default', legend=False)
ax[0].legend(bbox_to_anchor=(-0.5, -0.7), loc='lower left', ncol=4, frameon=False)
return pal
def plotParameterDistributions(xoff_all, string=r'$\tilde{X}_{\rm off}$', xmax=5, filestring='xoff'):
"""
Function to plot the parameter distribution i.e. SF and PDF
"""
fig, ax = plt.subplots(1,1,figsize=(7,6))
sf_xoff = norm.sf(np.sort(xoff_all))
if string == r'$\tilde{X}_{\rm off}$':
ax.plot(np.sort(xoff_all), sf_xoff, 'r-', label=r'Survival Function of '+string)
xmax = np.max(xoff_all)
else:
ax.plot(np.sort(xoff_all), 1-sf_xoff, 'r-', label=r'CDF of '+string)
pdf_xoff = norm.pdf(np.sort(xoff_all))
ax.plot(np.sort(xoff_all), pdf_xoff, 'k-', label=r'PDF of '+string)
setLabel(ax, string, 'Distribution of '+string, '', [np.min(xoff_all), xmax], 'default', legend=True)
plt.savefig('../figures/'+filestring+'_function.png', facecolor='w', edgecolor='w', bbox_inches='tight')
return ax
def axId(i):
if i == 0: m, n = 0, 0
if i == 1: m, n = 0, 1
if i == 2: m, n = 1, 0
if i == 3: m, n = 1, 1
return int(m), int(n)
def plotPdf(ax, arr, string, color):
pdf_arr = norm.pdf(np.sort(arr))
ax.plot(np.sort(arr), pdf_arr, '-', color=color, label=r'PDF of '+string, lw=4)
return
def saveFig(filename):
plt.savefig('../figures/'+filename, facecolor='w', edgecolor='w', bbox_inches='tight')
return
def plotContour(u_pix, matrix_2D, xmin=10, xmax=150, ymin=0, ymax=2, ax=None, cmap='YlGnBu'):
"""
Function plots a contour map
@u_pix :: number of pixels in the FOV
@Returns :: 2D matrix
"""
if ax == None:
fig, ax = plt.subplots(1,1,figsize=(7,6))
if isinstance(u_pix, (int, float)):
X, Y = np.meshgrid(np.linspace(0, u_pix, u_pix), np.linspace(0, u_pix, u_pix))
if isinstance(u_pix, (list, tuple, np.ndarray)): # if FOV is a rectangle
X, Y = np.meshgrid(np.linspace(xmin, xmax, u_pix[0]), np.linspace(ymin, ymax, u_pix[1]))
plot = ax.contourf(X, Y, matrix_2D, cmap=cmap, origin='image')
return ax, plot
def labelMZTmmXoff(ax, ylabel, redshift_limit=2):
setLabel(ax[0, 0], r'Stellar mass, $\log{M^*}$', ylabel, '', 'default', 'default', legend=False)
setLabel(ax[0, 1], 'Redshift, $z$', '', '', [0, redshift_limit], 'default', legend=False)
setLabel(ax[1, 0], r'$t_{\rm MM}$', ylabel, '', 'default', 'default', legend=False)
ax[1,0].set_xscale('log')
setLabel(ax[1, 1], r'$\tilde{X}_{\rm off}$', '', '', 'default', 'default', legend=False)
return
def plotBinsMZdistribution(mz_mat_tmm0, mz_mat_tmm1, tmm_bins, param=r'$t_{\rm MM} = $'):
fig, ax = plt.subplots(2,2,figsize=(15,15))
ax0, pt0 = plotContour((mz_mat_tmm0[0].shape[1], mz_mat_tmm0[0].shape[0]), mz_mat_tmm0[0], ymin=0.8, ymax=1.3, cmap='terrain', ax=ax[0, 0])
ax1, pt1 = plotContour((mz_mat_tmm0[1].shape[1], mz_mat_tmm0[1].shape[0]), mz_mat_tmm0[1], ymin=0., ymax=2, cmap='terrain', ax=ax[1, 0])
setLabel(ax[0, 0], '', 'Mass ratio', param+' %.2f - %.2f'%(tmm_bins[0][0], tmm_bins[0][1]), 'default', 'default', legend=False)
setLabel(ax[1, 0], r'Separation, $r_p$ [kpc]', 'Mean redshift', '', 'default', 'default', legend=False)
ax2, pt2 = plotContour((mz_mat_tmm1[0].shape[1], mz_mat_tmm1[0].shape[0]), mz_mat_tmm1[0], ymin=0.8, ymax=1.3, cmap='terrain', ax=ax[0, 1])
ax3, pt3 = plotContour((mz_mat_tmm1[1].shape[1], mz_mat_tmm1[1].shape[0]), mz_mat_tmm1[1], ymin=0., ymax=2, cmap='terrain', ax=ax[1, 1])
setLabel(ax[0, 1], '', '', param+ ' %.2f - %.2f'%(tmm_bins[1][0], tmm_bins[1][1]), 'default', 'default', legend=False)
setLabel(ax[1, 1], r'Separation, $r_p$ [kpc]', '', '', 'default', 'default', legend=False)
return
def snsPlotLabels():
plt.xlabel(r'$t_{\rm MM}$ [Gyr]', fontsize=20)
plt.ylabel(r'$\tilde{X}_{\rm off}$', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
return
def plotGaussianKde(param_arr, Z, string, i, j, set_xy_lim=True):
xmin, xmax = np.min(param_arr[i]), np.max(param_arr[i])
ymin, ymax = np.min(param_arr[j]), np.max(param_arr[j])
fig, ax = plt.subplots(1,1,figsize=(5, 5))
ax.plot(param_arr[i], param_arr[j], 'k.', markersize=.02)
ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
if set_xy_lim:
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
setLabel(ax, string[i], string[j], '', 'default', 'default', legend=False)
return ax
def plotModelResults(ax, hd_halo, pairs_all, pairs_selected, vol):
"""
Plots the models generated for bins of Tmm and Xoff
"""
# get shell volume and projected radius bins [Mpc]
r_p, shell_volume = aimm.shellVolume()
# plotting the cumulative pairs
norm = vol*len(hd_halo)
np_all, np_selected = pairs_all/norm, pairs_selected[1]/norm
ax[0].plot( (1e3*r_p), (np_selected), 'rX', ls = '--', ms=9, label='Selected pairs')
ax[0].plot( (1e3*r_p), (np_all), 'kX', ls = '--', label = 'All pairs', ms = 9)
setLabel(ax[0], r'', r'Cumulative $n_{\rm halo\ pairs}}$ [Mpc$^{-3}$]', '', 'default', 'default', legend=True)
# plotting the pairs in bins of radius
np_all_bins, np_all_bins_err = cswl.nPairsToFracPairs(hd_halo, pairs_all)
np_selected_bins, np_selected_bins_err = cswl.nPairsToFracPairs(hd_halo, pairs_selected[1])
_ = plotFpairs(ax[1], r_p, np_all_bins, np_all_bins_err, label = 'All pairs', color='k')
_ = plotFpairs(ax[1], r_p, np_selected_bins, np_selected_bins_err, label = 'Selected pairs')
ax[1].set_yscale('log')
setLabel(ax[1], r'', r'$n_{\rm halo\ pairs}}$ [Mpc$^{-3}$]', '', 'default', 'default', legend=True)
# plotting the pairs in bins with respect to the control
_ = plotFpairs(ax[2], r_p, np_selected_bins/np_all_bins, np_selected_bins_err, label='wrt all pairs', color='orange')
setLabel(ax[2], r'Separation, $r$ [kpc]', r'Fraction of pairs, $f_{\rm halo\ pairs}}$ ', '', 'default', 'default', legend=False)
return np_selected_bins/np_all_bins
| 43.193651
| 269
| 0.648317
| 2,259
| 13,606
| 3.714475
| 0.170429
| 0.005482
| 0.033369
| 0.035753
| 0.373495
| 0.26445
| 0.206888
| 0.141461
| 0.104636
| 0.086879
| 0
| 0.029322
| 0.180362
| 13,606
| 315
| 270
| 43.193651
| 0.723099
| 0.129575
| 0
| 0.101064
| 0
| 0.021277
| 0.141278
| 0.003855
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.117021
| 0
| 0.287234
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e83557731c2fd4923e8fa481bc7d1048e5e106e
| 985
|
py
|
Python
|
codetree/cli.py
|
slank/codetree
|
c1aad059ad31aa1b3cca80a89861c659fce217ac
|
[
"MIT"
] | 2
|
2015-03-16T11:46:28.000Z
|
2017-04-01T13:58:47.000Z
|
codetree/cli.py
|
slank/codetree
|
c1aad059ad31aa1b3cca80a89861c659fce217ac
|
[
"MIT"
] | null | null | null |
codetree/cli.py
|
slank/codetree
|
c1aad059ad31aa1b3cca80a89861c659fce217ac
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
import logging
from .config import Config
import sys
def main():
ap = ArgumentParser()
ap.add_argument("cfgfile", nargs="+", help="Codetree configuration file")
verbosity = ap.add_mutually_exclusive_group(required=False)
verbosity.add_argument("-v", "--verbose", action="store_true", default=False)
verbosity.add_argument("-q", "--quiet", action="store_true", default=False)
verbosity.add_argument("-f", "--fatality", action="store_true", default=False,
help="Any error is fatal")
args = ap.parse_args()
logfmt = "%(message)s"
loglevel = logging.INFO
if args.verbose:
logfmt = "%(levelname)s: %(message)s"
loglevel = logging.DEBUG
if args.quiet:
loglevel = logging.CRITICAL
logging.basicConfig(format=logfmt, level=loglevel)
config = Config(args.cfgfile)
if config.build(args.fatality):
sys.exit(0)
else:
sys.exit(1)
| 30.78125
| 82
| 0.655838
| 117
| 985
| 5.42735
| 0.487179
| 0.069291
| 0.080315
| 0.11811
| 0.190551
| 0.148032
| 0.148032
| 0.148032
| 0
| 0
| 0
| 0.002567
| 0.209137
| 985
| 31
| 83
| 31.774194
| 0.81258
| 0
| 0
| 0
| 0
| 0
| 0.154315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.153846
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e85d5b6b7bc4a9b52702783da32bcd642bd2255
| 5,862
|
py
|
Python
|
notebooks/utils.py
|
cognoma/ml-workers
|
781763c8361d49023222c7349350c3c4774ce4fa
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/utils.py
|
cognoma/ml-workers
|
781763c8361d49023222c7349350c3c4774ce4fa
|
[
"BSD-3-Clause"
] | 13
|
2017-01-31T22:54:03.000Z
|
2021-02-02T21:42:33.000Z
|
notebooks/utils.py
|
cognoma/ml-workers
|
781763c8361d49023222c7349350c3c4774ce4fa
|
[
"BSD-3-Clause"
] | 7
|
2017-06-29T14:19:11.000Z
|
2018-04-08T12:06:21.000Z
|
"""
Methods for building Cognoma mutation classifiers
Usage - Import only
"""
import pandas as pd
from sklearn.metrics import roc_curve, roc_auc_score
import plotnine as gg
def theme_cognoma(fontsize_mult=1):
return (gg.theme_bw(base_size=14 * fontsize_mult) +
gg.theme(line=gg.element_line(color="#4d4d4d"),
rect=gg.element_rect(fill="white", color=None),
text=gg.element_text(color="black"),
axis_ticks=gg.element_line(color="#4d4d4d"),
legend_key=gg.element_rect(color=None),
panel_border=gg.element_rect(color="#4d4d4d"),
panel_grid=gg.element_line(color="#b3b3b3"),
panel_grid_major_x=gg.element_blank(),
panel_grid_minor=gg.element_blank(),
strip_background=gg.element_rect(fill="#FEF2E2",
color="#4d4d4d"),
axis_text=gg.element_text(size=12 * fontsize_mult,
color="#4d4d4d"),
axis_title_x=gg.element_text(size=13 * fontsize_mult,
color="#4d4d4d"),
axis_title_y=gg.element_text(size=13 * fontsize_mult,
color="#4d4d4d")))
def get_model_coefficients(classifier, feature_set, covariate_names):
"""
Extract the feature names and associate them with the coefficient values
in the final classifier object.
* Only works for expressions only model with PCA, covariates only model,
and a combined model
* Assumes the PCA features come before any covariates that are included
* Sorts the final dataframe by the absolute value of the coefficients
Args:
classifier: the final sklearn classifier object
feature_set: string of the model's name {expressions, covariates, full}
covariate_names: list of the names of the covariate features matrix
Returns:
pandas.DataFrame: mapping of feature name to coefficient value
"""
import pandas as pd
import numpy as np
coefs = classifier.coef_[0]
if feature_set == 'expressions':
features = ['PCA_%d' % cf for cf in range(len(coefs))]
elif feature_set == 'covariates':
features = covariate_names
else:
features = ['PCA_%d' % cf for cf in range(len(coefs) - len(covariate_names))]
features.extend(covariate_names)
coef_df = pd.DataFrame({'feature': features, 'weight': coefs})
coef_df['abs'] = coef_df['weight'].abs()
coef_df = coef_df.sort_values('abs', ascending=False)
coef_df['feature_set'] = feature_set
return coef_df
def get_genes_coefficients(pca_object, classifier_object,
expression_df, expression_genes_df,
num_covariates=None):
"""Identify gene coefficients from classifier after pca.
Args:
pca_object: The pca object from running pca on the expression_df.
classifier_object: The logistic regression classifier object.
expression_df: The original (pre-pca) expression data frame.
expression_genes_df: The "expression_genes" dataframe used for gene
names.
num_covariates: Optional, only needed if PCA was only performed on a
subset of the features. This should be the number of
features that PCA was not performed on. This function
assumes that the covariates features were at the end.
Returns:
gene_coefficients_df: A dataframe with entreze gene-ID, gene name,
coefficient abbsolute value of coefficient, and
gene description. The dataframe is sorted by
absolute value of coefficient.
"""
# Get the classifier coefficients.
if num_covariates:
coefficients = classifier_object.coef_[0][0:-num_covariates]
else:
coefficients = classifier_object.coef_[0]
# Get the pca weights
weights = pca_object.components_
# Combine the coefficients and weights
gene_coefficients = weights.T @ coefficients.T
# Create the dataframe with correct index
gene_coefficients_df = pd.DataFrame(gene_coefficients, columns=['weight'])
gene_coefficients_df.index = expression_df.columns
gene_coefficients_df.index.name = 'entrez_id'
expression_genes_df.index = expression_genes_df.index.map(str)
# Add gene symbol and description
gene_coefficients_df['symbol'] = expression_genes_df['symbol']
gene_coefficients_df['description'] = expression_genes_df['description']
# Add absolute value and sort by highest absolute value.
gene_coefficients_df['abs'] = gene_coefficients_df['weight'].abs()
gene_coefficients_df.sort_values(by='abs', ascending=False, inplace=True)
# Reorder columns
gene_coefficients_df = gene_coefficients_df[['symbol', 'weight', 'abs',
'description']]
return(gene_coefficients_df)
def select_feature_set_columns(X, feature_set, n_covariates):
"""
Select the feature set for the different models within the pipeline
"""
if feature_set == 'covariates':
return X[:, :n_covariates]
if feature_set == 'expressions':
return X[:, n_covariates:]
raise ValueError('feature_set not supported: {}'.format(feature_set))
def get_threshold_metrics(y_true, y_pred):
roc_columns = ['fpr', 'tpr', 'threshold']
roc_items = zip(roc_columns, roc_curve(y_true, y_pred))
roc_df = pd.DataFrame.from_items(roc_items)
auroc = roc_auc_score(y_true, y_pred)
return {'auroc': auroc, 'roc_df': roc_df}
| 42.788321
| 85
| 0.638519
| 703
| 5,862
| 5.105263
| 0.280228
| 0.066871
| 0.060184
| 0.015046
| 0.092784
| 0.053775
| 0.042352
| 0.042352
| 0.042352
| 0.018947
| 0
| 0.009218
| 0.278233
| 5,862
| 136
| 86
| 43.102941
| 0.839045
| 0.330604
| 0
| 0.128571
| 0
| 0
| 0.080988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0.014286
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e85eafe88b2abc4b10f2eb6623ed07ecab6567b
| 1,740
|
py
|
Python
|
docs/fossil-help-cmd.py
|
smitty1eGH/pyphlogiston
|
5134be190cdb31ace04ac5ce2e699a48e54e036e
|
[
"MIT"
] | null | null | null |
docs/fossil-help-cmd.py
|
smitty1eGH/pyphlogiston
|
5134be190cdb31ace04ac5ce2e699a48e54e036e
|
[
"MIT"
] | null | null | null |
docs/fossil-help-cmd.py
|
smitty1eGH/pyphlogiston
|
5134be190cdb31ace04ac5ce2e699a48e54e036e
|
[
"MIT"
] | null | null | null |
from subprocess import run
cmds = [
"3-way-merge",
"ci",
"help",
"push",
"stash",
"add",
"clean",
"hook",
"rebuild",
"status",
"addremove",
"clone",
"http",
"reconstruct",
"sync",
"alerts",
"close",
"import",
"redo",
"tag",
"all",
"co",
"info",
"remote",
"tarball",
"amend",
"commit",
"init",
"remote-url",
"ticket",
"annotate",
"configuration",
"interwiki",
"rename",
"timeline",
"artifact",
"dbstat",
"json",
"reparent",
"tls-config",
"attachment",
"deconstruct",
"leaves",
"revert",
"touch",
"backoffice",
"delete",
"login-group",
"rm",
"ui",
"backup",
"descendants",
"ls",
"rss",
"undo",
"bisect",
"diff",
"md5sum",
"scrub",
"unpublished",
"blame",
"export",
"merge",
"search",
"unset",
"branch",
"extras",
"mv",
"server",
"unversioned",
"bundle",
"finfo",
"new",
"settings",
"update",
"cache",
"forget",
"open",
"sha1sum",
"user",
"cat",
"fts-config",
"pikchr",
"sha3sum",
"uv",
"cgi",
"gdiff",
"praise",
"shell",
"version",
"changes",
"git",
"publish",
"sql",
"whatis",
"chat",
"grep",
"pull",
"sqlar",
"wiki",
"checkout",
"hash-policy",
"purge",
"sqlite3",
"zip",
]
with open("fossile-cmds-help.org", "w") as f:
for c in cmds:
d = run(
["/home/osboxes/src/fossil-snapshot-20210429/fossil", "help", c],
capture_output=True,
)
f.write(d.stdout.decode("utf-8"))
| 14.745763
| 77
| 0.440805
| 151
| 1,740
| 5.072848
| 0.907285
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012335
| 0.347701
| 1,740
| 117
| 78
| 14.871795
| 0.662555
| 0
| 0
| 0
| 0
| 0
| 0.399425
| 0.04023
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017391
| 0
| 0.017391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e8aa5fdaccdc2cf8e079b7b4e650e213a55472a
| 1,154
|
py
|
Python
|
monitor.py
|
projectsbyif/trillian-demo-audit
|
5bb08ae3c359698d8beb47ced39d21e793539396
|
[
"Apache-2.0"
] | null | null | null |
monitor.py
|
projectsbyif/trillian-demo-audit
|
5bb08ae3c359698d8beb47ced39d21e793539396
|
[
"Apache-2.0"
] | 1
|
2021-06-02T02:13:46.000Z
|
2021-06-02T02:13:46.000Z
|
monitor.py
|
projectsbyif/trillian-demo-audit
|
5bb08ae3c359698d8beb47ced39d21e793539396
|
[
"Apache-2.0"
] | null | null | null |
import logging
import sys
from trillian import TrillianLog
from print_helper import Print
from pprint import pprint
def main(argv):
logging.basicConfig(level=logging.INFO)
trillian_log = TrillianLog.load_from_environment()
Print.status('Checking signature on signed log root')
validated_log_root = trillian_log.get_log_root()
Print.tick('Log root is signed correctly by public key')
# * do full audit between hash[previous] and hash[current]
# * do consistency check between hash[previous] and hash[current]
Print.status('Rebuilding Merkle tree from {} entries to get root '
'hash'.format(validated_log_root.tree_size))
Print.bullet('Looking for root hash: {}'.format(
validated_log_root.root_hash))
if trillian_log.full_audit(validated_log_root):
Print.bullet('Calculated root hash: {}'.format(
validated_log_root.root_hash))
Print.tick('Root hashes match, Merkle tree appears correct')
Print.status('Showing latest log entry')
Print.normal(str(trillian_log.latest().json()))
print()
if __name__ == '__main__':
main(sys.argv)
| 26.227273
| 70
| 0.707972
| 152
| 1,154
| 5.171053
| 0.427632
| 0.071247
| 0.101781
| 0.087786
| 0.21883
| 0.21883
| 0.096692
| 0.096692
| 0
| 0
| 0
| 0
| 0.195841
| 1,154
| 43
| 71
| 26.837209
| 0.846983
| 0.103986
| 0
| 0.083333
| 0
| 0
| 0.254122
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.208333
| 0
| 0.25
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e8b21d90213008722c8b31b5d6059ea9e59aa07
| 875
|
py
|
Python
|
src/geocurrency/units/urls.py
|
OpenPrunus/geocurrency
|
23cc075377d47ac631634cd71fd0e7d6b0a57bad
|
[
"MIT"
] | 5
|
2021-01-28T16:45:49.000Z
|
2021-08-15T06:47:17.000Z
|
src/geocurrency/units/urls.py
|
OpenPrunus/geocurrency
|
23cc075377d47ac631634cd71fd0e7d6b0a57bad
|
[
"MIT"
] | 8
|
2020-10-01T15:12:45.000Z
|
2021-10-05T14:45:33.000Z
|
src/geocurrency/units/urls.py
|
OpenPrunus/geocurrency
|
23cc075377d47ac631634cd71fd0e7d6b0a57bad
|
[
"MIT"
] | 2
|
2021-01-28T16:43:16.000Z
|
2021-10-05T14:25:25.000Z
|
"""
Units module URLs
"""
from django.conf.urls import url, include
from django.urls import path
from rest_framework import routers
from .viewsets import UnitSystemViewset, UnitViewset, \
ConvertView, CustomUnitViewSet
from geocurrency.calculations.viewsets import ValidateViewSet, CalculationView
app_name = 'units'
router = routers.DefaultRouter()
router.register(r'', UnitSystemViewset, basename='unit_systems')
router.register(r'(?P<system_name>\w+)/units',
UnitViewset, basename='units')
router.register(r'(?P<system_name>\w+)/custom',
CustomUnitViewSet, basename='custom')
urlpatterns = [
path('convert/', ConvertView.as_view()),
path('<str:unit_system>/formulas/validate/', ValidateViewSet.as_view()),
path('<str:unit_system>/formulas/calculate/', CalculationView.as_view()),
url(r'^', include(router.urls)),
]
| 31.25
| 78
| 0.726857
| 98
| 875
| 6.387755
| 0.438776
| 0.067093
| 0.071885
| 0.051118
| 0.185304
| 0.185304
| 0.185304
| 0
| 0
| 0
| 0
| 0
| 0.131429
| 875
| 27
| 79
| 32.407407
| 0.823684
| 0.019429
| 0
| 0
| 0
| 0
| 0.191765
| 0.148235
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.263158
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e8f20f780d781f8cdc23f8a2e62a4a9d0aaaf14
| 6,451
|
py
|
Python
|
randominette.py
|
Dutesier/randominette
|
2260c0f521d9fcc97f30a8cceb36c94dbee3d474
|
[
"MIT"
] | null | null | null |
randominette.py
|
Dutesier/randominette
|
2260c0f521d9fcc97f30a8cceb36c94dbee3d474
|
[
"MIT"
] | null | null | null |
randominette.py
|
Dutesier/randominette
|
2260c0f521d9fcc97f30a8cceb36c94dbee3d474
|
[
"MIT"
] | 2
|
2022-01-19T00:27:59.000Z
|
2022-01-19T03:46:21.000Z
|
# **************************************************************************** #
# #
# ::: :::::::: #
# randominette.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: ayalla, sotto & dutesier +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2022/01/13 18:14:29 by dareias- #+# #+# #
# Updated: 2022/01/20 13:10:47 by dareias- ### ########.fr #
# #
# **************************************************************************** #
import requests
import json
import random
import sys
import pprint
from decouple import config
import time
def main():
my_time = 1
argc = len(sys.argv)
pmode = 0
if argc > 1 and sys.argv[1].find("c") > 0:
pmode = 1
if len(sys.argv) > 1 and sys.argv[1].find("s") > 0:
# Get Campus ID and Cluster from user
campus = int(input("Campus ID (38 for Lisbon): "))
cluster = int(input("Cluster: "))
my_time = int(input("Time between requests (change at your own risk): "))
else :
campus = 38
cluster = 1
if (my_time < 0):
my_time = 1
print("We're not time travelers - time set to 1 second")
client_id = config('42-UID')
client_secret = config('42-SECRET')
# Get authorization token
token_url = "https://api.intra.42.fr/oauth/token"
data = {
"grant_type": "client_credentials",
"client_id": client_id,
"client_secret": client_secret
}
access_token = requests.post(
token_url,
data,
)
ret = access_token
if ret.status_code != 200:
return(print(f"Error: Failed to get OAUTH2 token: {ret.status_code}"))
ret = ret.json()
# Set pagination
page = {
"number": 1,
"size": 100
}
# Pass our authorization token as a header
headers = {
"Authorization": f"{ret['token_type']} {ret['access_token']}",
}
# Pass our pagination definitions as a dict
params = {
"page": page
}
time.sleep(my_time)
# Get info from the API
url = f'https://api.intra.42.fr/v2/campus/{campus}/locations?sort=-end_at,host&filter[active]=true&range[host]=c{cluster}, c{cluster + 1}r00s00'
ret = requests.get(url, headers=headers, json=params)
if ret.status_code != 200:
return(print(f"Error: Failed to GET from {url}: Got status code {ret.status_code}"))
users_in_campus = ret.json()
i = 0
if len(sys.argv) > 1 and sys.argv[1].find("l") > 0:
# pprint.pprint(users_in_campus)
print_user_info(users_in_campus)
if len(users_in_campus) == 0:
return(print(f"There are currently {i} active users in cluster {cluster} at campus {campus}"))
# Check if we have all elements or if there are more pages
if 'Link' in ret.headers and len(users_in_campus)==page['size'] :
while True:
time.sleep(my_time)
page['number'] = page['number'] + 1
ret = requests.get(url, headers=headers, json=params)
second_page = ret.json()
users_in_campus = users_in_campus + second_page
if len(second_page) != page['size']:
break
# Get ammount of active users
for student in users_in_campus:
i = i + 1
print(f"There are currently {i} active users in cluster {cluster} at campus {campus}")
if i == 0:
return
chosen_one = random_user(users_in_campus)
print("The Chosen One is: ")
if pmode:
print(users_in_campus[chosen_one]['user']['location'])
else:
print(users_in_campus[chosen_one]['user']['login'])
print(users_in_campus[chosen_one]['user']['location'])
# Pick all users from the random's user row
if len(sys.argv) > 1 and sys.argv[1].find("r") > 0 :
row = get_user_row(users_in_campus[chosen_one]['user']['location'])
if row:
print(f"The Chosen Row is {row}, and the unlucky ones are: ")
for student in users_in_campus:
if (get_user_row(student['user']['location'])==row):
if pmode:
print(student['user']['location'], end=" ")
else:
print(student['user']['login'])
print(student['user']['location'])
if pmode:
print("")
# Pick a random percentage for users to be randomly selected
if len(sys.argv) > 1 and sys.argv[1].find("p") > 0 :
while (True):
percentage = int(input("Percentage of victims (%): "))
if (percentage <= 100 and percentage > 0):
break
else :
print("Percentage must be between 0 and 100")
number_users = int(len(users_in_campus) * (percentage / 100))
if number_users <= 0:
return (print(f"The percentage {percentage}% translates to a total of 0 users"))
sample = random_users(users_in_campus, number_users)
# Print chosen users
for n in sample:
if pmode:
print(users_in_campus[n]['user']['location'], end=" ")
else:
print(users_in_campus[n]['user']['login'])
print(users_in_campus[n]['user']['location'])
if pmode:
print("")
def random_users(users_in_campus, nu):
i = len(users_in_campus)
if (i == 1):
sample = [0]
else:
sample = random.sample(range(i), nu)
return (sample)
def random_user(users_in_campus):
# Pick a random user
i = len(users_in_campus)
if i > 1:
chosen_one = random.randrange(0, i - 1)
if i == 1:
chosen_one = 0
return (chosen_one)
def print_user_info(users_in_campus):
for student in users_in_campus:
print(f"user: {student['user']['login']}\tloc: {student['user']['location']}")
def get_user_row(location):
return (location[location.find("r"):location.find("s")])
if __name__ == '__main__':
main()
| 38.39881
| 148
| 0.509068
| 769
| 6,451
| 4.126138
| 0.217165
| 0.059565
| 0.102427
| 0.034037
| 0.338481
| 0.268831
| 0.208951
| 0.168925
| 0.105263
| 0.105263
| 0
| 0.024436
| 0.340257
| 6,451
| 167
| 149
| 38.628743
| 0.7211
| 0.196714
| 0
| 0.223881
| 0
| 0.007463
| 0.220769
| 0.016006
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037313
| false
| 0
| 0.052239
| 0.007463
| 0.126866
| 0.179104
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e918c5815dd4774b7932aa1ec3b9fffa1176641
| 750
|
py
|
Python
|
newsman/factories.py
|
acapitanelli/newsman
|
3f109f42afe6131383fba1e118b7b9457d76096b
|
[
"MIT"
] | null | null | null |
newsman/factories.py
|
acapitanelli/newsman
|
3f109f42afe6131383fba1e118b7b9457d76096b
|
[
"MIT"
] | null | null | null |
newsman/factories.py
|
acapitanelli/newsman
|
3f109f42afe6131383fba1e118b7b9457d76096b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This module provides a way to initialize components for processing
pipeline.
Init functions are stored into a dictionary which can be used by `Pipeline` to
load components on demand.
"""
from .pipeline import Byte2html, Html2text, Html2image, Html2meta, Text2title
def build_factories():
"""Creates default factories for Processor."""
factories = {
'byte2html': lambda config: Byte2html(config),
'html2text': lambda config: Html2text(config),
'html2image': lambda config: Html2image(config),
'html2meta': lambda config: Html2meta(config),
'text2title': lambda config: Text2title(config),
'text2title': lambda config: Text2title(config)
}
return factories
| 32.608696
| 78
| 0.698667
| 81
| 750
| 6.45679
| 0.54321
| 0.137667
| 0.08413
| 0.107075
| 0.156788
| 0.156788
| 0
| 0
| 0
| 0
| 0
| 0.0299
| 0.197333
| 750
| 22
| 79
| 34.090909
| 0.83887
| 0.329333
| 0
| 0
| 0
| 0
| 0.11609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e9740ebd2a997095586f788ec3e7c7b37619818
| 9,622
|
py
|
Python
|
hbgd_data_store_server/studies/management/commands/load_idx.py
|
pcstout/study-explorer
|
b49a6853d8155f1586360138ed7f87d165793184
|
[
"Apache-2.0"
] | 2
|
2019-04-02T14:31:27.000Z
|
2020-04-13T20:41:46.000Z
|
hbgd_data_store_server/studies/management/commands/load_idx.py
|
pcstout/study-explorer
|
b49a6853d8155f1586360138ed7f87d165793184
|
[
"Apache-2.0"
] | 7
|
2019-08-07T14:44:54.000Z
|
2020-06-05T17:30:51.000Z
|
hbgd_data_store_server/studies/management/commands/load_idx.py
|
pcstout/study-explorer
|
b49a6853d8155f1586360138ed7f87d165793184
|
[
"Apache-2.0"
] | 1
|
2019-03-27T01:32:30.000Z
|
2019-03-27T01:32:30.000Z
|
# Copyright 2017-present, Bill & Melinda Gates Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import zipfile
import fnmatch
from pandas import read_csv
from django.core.management.base import BaseCommand, CommandError
from ...models import Study, Count, Variable, Domain, EMPTY_IDENTIFIERS
# Regex file pattern defining the naming convention of IDX files
FILE_PATTERN = r'^IDX_(\w*)\.csv'
# Suffixes of domain name, code and category columns
# e.g. LB domain columns are LBTEST, LBTESTCD and LBCAT
DOMAIN_FORMAT = '{domain}TEST'
DOMAIN_CODE_FORMAT = '{domain}TESTCD'
DOMAIN_CAT_FORMAT = '{domain}CAT'
def get_study(row, study_cache=None, **kwargs):
"""
Finds the study for an entry.
"""
study_id_field = kwargs['study_id_field']
if not study_cache:
study_cache = {}
study_id = row[study_id_field]
if study_id in EMPTY_IDENTIFIERS:
return None
elif study_id in study_cache:
return study_cache[study_id]
study, _ = Study.objects.get_or_create(study_id=study_id)
study_cache[study_id] = study
return study
def get_domain_variable(row, domain, variable_cache=None):
"""
Get a Variable model specifying the rows domain, category and
code.
"""
if not variable_cache:
variable_cache = {}
decode_idx = DOMAIN_FORMAT.format(domain=domain.code)
code_idx = DOMAIN_CODE_FORMAT.format(domain=domain.code)
cat_idx = DOMAIN_CAT_FORMAT.format(domain=domain.code)
code = row[code_idx]
if code in EMPTY_IDENTIFIERS:
return None
attrs = dict(domain=domain, code=code)
cache_key = (domain.id, code)
if cache_key in variable_cache:
return variable_cache[cache_key]
try:
var = Variable.objects.get(**attrs)
except Variable.DoesNotExist:
category = row.get(cat_idx)
if category not in EMPTY_IDENTIFIERS:
attrs['category'] = category
var = Variable.objects.create(label=row[decode_idx], **attrs)
variable_cache[cache_key] = var
return var
def get_qualifiers(row, valid_qualifiers, qualifier_cache=None):
"""
Extract qualifier variables from row
"""
if not qualifier_cache:
qualifier_cache = {}
qualifiers = []
for qualifier, qual_code, suffix in valid_qualifiers:
code = row.get(qual_code + suffix)
if code in EMPTY_IDENTIFIERS:
raise ValueError('Qualifiers cannot be empty')
elif isinstance(code, float) and code.is_integer():
code = int(code)
attrs = dict(domain=qualifier, code=str(code))
cache_key = (qualifier.id, str(code))
if cache_key in qualifier_cache:
qualifiers.append(qualifier_cache[cache_key])
continue
try:
var = Variable.objects.get(**attrs)
except Variable.DoesNotExist:
var = Variable.objects.create(label=row[qual_code],
**attrs)
qualifier_cache[cache_key] = var
qualifiers.append(var)
return qualifiers
def get_valid_qualifiers(columns):
"""
Returns a list of the valid qualifier columns.
"""
valid_qualifiers = []
qualifiers = Domain.objects.filter(is_qualifier=True)
for qual in qualifiers:
wildcard_re = fnmatch.translate(qual.code)
cols = [col for col in columns if re.match(wildcard_re, col)]
if not cols:
continue
elif len(cols) > 1:
raise Exception('Qualifier code must match only one column per file.')
qual_code = cols[0]
suffix_re = qual_code + r'(\w{1,})'
potential_suffixes = [re.match(suffix_re, col).group(1) for col in columns
if re.match(suffix_re, col)]
suffix = ''
if len(potential_suffixes) > 0:
suffix = potential_suffixes[0]
valid_qualifiers.append((qual, qual_code, suffix))
return valid_qualifiers
def process_idx_df(df, domain, **kwargs):
"""
Process an IDX csv file, creating Code, Count and Study
objects.
"""
count_subj_field = kwargs['count_subj_field']
count_obs_field = kwargs['count_obs_field']
study_id_field = kwargs['study_id_field']
for required in [study_id_field, count_subj_field, count_obs_field]:
if required not in df.columns:
raise ValueError('IDX file does not contain %s column, '
'skipping.' % required)
valid_qualifiers = get_valid_qualifiers(df.columns)
study_cache, variable_cache, qualifier_cache = {}, {}, {}
df = df.fillna('NaN')
for _, row in df.iterrows():
count = row[count_obs_field]
subjects = row[count_subj_field]
if any(c in EMPTY_IDENTIFIERS for c in (count, subjects)):
continue
try:
qualifiers = get_qualifiers(row, valid_qualifiers, qualifier_cache)
except ValueError:
continue
study = get_study(row, study_cache, **kwargs)
if not study:
continue
variable = get_domain_variable(row, domain, variable_cache)
if variable:
qualifiers = [variable] + qualifiers
query = Count.objects.create(count=count, subjects=subjects, study=study)
query.codes = qualifiers
query.save()
class Command(BaseCommand):
help = """
Loads queries into database given one or more IDX csv files or zip
files containing IDX csv files (disregarding all zipfile structure).
"""
def add_arguments(self, parser):
parser.add_argument('files', nargs='+', type=str,
help='One or more csv or zip files')
parser.add_argument('-study_id_field', type=str, default='STUDYID',
help='Name of column to use as study_id.')
parser.add_argument('-count_subj_field', type=str, default='COUNT_SUBJ',
help='Name of column to use as subject count.')
parser.add_argument('-count_obs_field', type=str, default='COUNT_OBS',
help='Name of column to use as observation count.')
parser.add_argument('--clear', action='store_true',
default=True, dest='clear',
help='Clear database before processing data.')
def process_file(self, filepath, zip_file=None, **kwargs):
# Ensure the file matches the FILE_PATTERN
basename = os.path.basename(filepath)
match = re.search(FILE_PATTERN, basename)
if not match:
return False
# Ensure that Domain exists
domain = match.group(1)
try:
domain = Domain.objects.get(code=domain)
except Domain.DoesNotExist:
return False
# Load file
try:
if zip_file:
with zip_file.open(filepath) as f:
df = read_csv(f)
else:
with open(filepath) as f:
df = read_csv(f)
except:
self.stderr.write('%s could not be read ensure '
'it is a valid csv file.' % basename)
return False
# Process dataframe
self.stdout.write('Processing %s' % basename)
try:
process_idx_df(df, domain, **kwargs)
except ValueError as e:
self.stderr.write(str(e))
return True
def handle(self, *args, **options):
if options['clear']:
queries = Count.objects.all()
self.stdout.write('Deleting %s counts' % len(queries))
queries.delete()
codes = Variable.objects.all()
self.stdout.write('Deleting %s variables' % len(codes))
codes.delete()
n_queries = Count.objects.count()
n_studies = Study.objects.count()
n_codes = Variable.objects.count()
processed = False
for f in options['files']:
if f.endswith('.csv'):
if not re.search(FILE_PATTERN, os.path.basename(f)):
self.stderr.write('Processing %s skipped, does '
'not match %s naming convention.'
% (f, FILE_PATTERN))
continue
processed = self.process_file(f, **options)
elif f.endswith('.zip') or f.endswith('.upload'):
zip_file = zipfile.ZipFile(f)
for zf in zip_file.filelist:
processed |= self.process_file(zf.filename, zip_file, **options)
if not processed:
raise CommandError('None of the supplied files could '
'be processed.')
self.stdout.write('Wrote %s Study entries' %
(Study.objects.count() - n_studies))
self.stdout.write('Wrote %s Variable entries' %
(Variable.objects.count() - n_codes))
self.stdout.write('Wrote %s Count entries' %
(Count.objects.count() - n_queries))
| 35.902985
| 84
| 0.610788
| 1,168
| 9,622
| 4.884418
| 0.224315
| 0.018405
| 0.014724
| 0.00894
| 0.206661
| 0.141455
| 0.100438
| 0.028046
| 0.019281
| 0
| 0
| 0.002214
| 0.295884
| 9,622
| 267
| 85
| 36.037453
| 0.839852
| 0.113074
| 0
| 0.139896
| 0
| 0
| 0.117187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041451
| false
| 0
| 0.036269
| 0
| 0.150259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e98642f2b6b958a07ac0e545cf862d4394aa56c
| 786
|
py
|
Python
|
Thread.PY/thread-rlock.py
|
Phoebus-Ma/Python-Helper
|
d880729f0bbfbc2b1503602fd74c9177ecd4e970
|
[
"MIT"
] | null | null | null |
Thread.PY/thread-rlock.py
|
Phoebus-Ma/Python-Helper
|
d880729f0bbfbc2b1503602fd74c9177ecd4e970
|
[
"MIT"
] | null | null | null |
Thread.PY/thread-rlock.py
|
Phoebus-Ma/Python-Helper
|
d880729f0bbfbc2b1503602fd74c9177ecd4e970
|
[
"MIT"
] | null | null | null |
###
# Thread rlock test.
#
# License - MIT.
###
import time
from threading import Thread, RLock
# thread_test2 - Thread test2 function.
def thread_test2(rlock):
# {
time.sleep(0.5)
rlock.acquire()
print('Third acquire.')
rlock.release()
# }
# thread_test1 - Thread test1 function.
def thread_test1(rlock):
# {
rlock.acquire()
print('First acquire.')
rlock.acquire()
print('Second acquire.')
rlock.release()
rlock.release()
# }
# Main function.
def main():
# {
# Create RLock
thrd_rlock = RLock()
thrd1 = Thread(target = thread_test1, args = (thrd_rlock, ))
thrd2 = Thread(target = thread_test2, args = (thrd_rlock, ))
thrd1.start()
thrd2.start()
# }
# Program entry.
if '__main__' == __name__:
main()
| 14.290909
| 64
| 0.619593
| 90
| 786
| 5.222222
| 0.377778
| 0.093617
| 0.108511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023217
| 0.232824
| 786
| 54
| 65
| 14.555556
| 0.756219
| 0.209924
| 0
| 0.272727
| 0
| 0
| 0.085142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.090909
| 0
| 0.227273
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e98aa2320fefc8b613e9eb26ab879e97d03ea24
| 1,319
|
py
|
Python
|
api/python/tests/test_bingo_nosql.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 204
|
2015-11-06T21:34:34.000Z
|
2022-03-30T16:17:01.000Z
|
api/python/tests/test_bingo_nosql.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 509
|
2015-11-05T13:54:43.000Z
|
2022-03-30T22:15:30.000Z
|
api/python/tests/test_bingo_nosql.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 89
|
2015-11-17T08:22:54.000Z
|
2022-03-17T04:26:28.000Z
|
import shutil
import tempfile
from indigo.bingo import Bingo
from tests import TestIndigoBase
class TestBingo(TestIndigoBase):
def setUp(self) -> None:
super().setUp()
self.test_folder = tempfile.mkdtemp()
def tearDown(self) -> None:
shutil.rmtree(self.test_folder)
def test_molecule_search_sub(self) -> None:
bingo = Bingo.createDatabaseFile(self.indigo, self.test_folder, 'molecule', '')
self.assertTrue(bingo)
m1 = self.indigo.loadMolecule('C1CCCCC1')
m2 = self.indigo.loadMolecule('C1CCCCC1')
m3 = self.indigo.loadMolecule('C1CCNCC1')
m4 = self.indigo.loadMolecule('N')
m1_id = bingo.insert(m1)
m2_id = bingo.insert(m2)
m3_id = bingo.insert(m3)
bingo.insert(m4)
bingo.optimize()
q = self.indigo.loadQueryMolecule('C')
result = bingo.searchSub(q)
ids = []
while result.next():
ids.append(result.getCurrentId())
self.assertEqual(3, len(ids))
self.assertEqual([m1_id, m2_id, m3_id], ids)
self.assertTrue(self.indigo.exactMatch(m1, bingo.getRecordById(m1_id)))
self.assertTrue(self.indigo.exactMatch(m2, bingo.getRecordById(m2_id)))
self.assertTrue(self.indigo.exactMatch(m3, bingo.getRecordById(m3_id)))
| 33.820513
| 87
| 0.64746
| 156
| 1,319
| 5.378205
| 0.326923
| 0.107271
| 0.104887
| 0.085816
| 0.126341
| 0.085816
| 0
| 0
| 0
| 0
| 0
| 0.026471
| 0.226687
| 1,319
| 38
| 88
| 34.710526
| 0.796078
| 0
| 0
| 0
| 0
| 0
| 0.025777
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.09375
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e9910237b294e11a1a1bbded611300e71f69a4f
| 3,932
|
py
|
Python
|
src/core/src/tortuga/scripts/get_kit.py
|
sutasu/tortuga
|
48d7cde4fa652346600b217043b4a734fa2ba455
|
[
"Apache-2.0"
] | 33
|
2018-03-02T17:07:39.000Z
|
2021-05-21T18:02:51.000Z
|
src/core/src/tortuga/scripts/get_kit.py
|
sutasu/tortuga
|
48d7cde4fa652346600b217043b4a734fa2ba455
|
[
"Apache-2.0"
] | 201
|
2018-03-05T14:28:24.000Z
|
2020-11-23T19:58:27.000Z
|
src/core/src/tortuga/scripts/get_kit.py
|
sutasu/tortuga
|
48d7cde4fa652346600b217043b4a734fa2ba455
|
[
"Apache-2.0"
] | 23
|
2018-03-02T17:21:59.000Z
|
2020-11-18T14:52:38.000Z
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
import json
import sys
from tortuga.exceptions.kitNotFound import KitNotFound
from tortuga.kit.kitCli import KitCli
from tortuga.wsapi.kitWsApi import KitWsApi
class GetKitCli(KitCli):
"""
Get kit command line interface.
"""
def parseArgs(self, usage=None):
cmd_options_group = _('Command Options')
self.addOptionGroup(cmd_options_group, '')
self.addOptionToGroup(cmd_options_group, '--quiet',
action='store_true', dest='bQuiet',
help=_('Return success (0) if kit exists,'
' otherwise 1.'))
output_attr_group = _('Output formatting options')
self.addOptionGroup(output_attr_group, None)
self.addOptionToGroup(
output_attr_group, '--json',
action='store_true', default=False,
help=_('JSON formatted output')
)
self.addOptionToGroup(
output_attr_group, '--xml',
action='store_true', default=False,
help=_('XML formatted output')
)
super(GetKitCli, self).parseArgs(usage=usage)
def runCommand(self):
self.parseArgs(_("""
Returns details of the specified kit
"""))
name, version, iteration = \
self.getKitNameVersionIteration(self.getArgs().kitspec)
api = self.configureClient(KitWsApi)
try:
kit = api.getKit(name, version=version, iteration=iteration)
if not self.getArgs().bQuiet:
if self.getArgs().xml:
print(kit.getXmlRep())
elif self.getArgs().json:
print(json.dumps({
'kit': kit.getCleanDict(),
}, sort_keys=True, indent=4, separators=(',', ': ')))
else:
self._console_output(kit)
sys.exit(0)
except KitNotFound:
if self.getArgs().bQuiet:
sys.exit(1)
# Push the "kit not found" exception up the stack
raise
def _console_output(self, kit):
print('{0}-{1}-{2}'.format(kit.getName(),
kit.getVersion(),
kit.getIteration()))
print(' ' * 2 + '- Description: {0}'.format(kit.getDescription()))
print(' ' * 2 + '- Type: {0}'.format(
'OS' if kit.getIsOs() else 'Application'
if kit.getName() != 'base' else 'System'))
print(' ' * 2 + '- Removable: {0}'.format(kit.getIsRemovable()))
print(' ' * 2 + '- Components:')
for component in kit.getComponentList():
print(' ' * 4 + '- Name: {0}, Version: {1}'.format(
component.getName(), component.getVersion()))
print(' ' * 6 + '- Description: {0}'.format(
component.getDescription()))
if not kit.getIsOs():
compatible_os = component.getOsInfoList() +\
component.getOsFamilyInfoList()
else:
compatible_os = []
if compatible_os:
print(' ' * 6 + '- Operating system(s): {0}'.format(
', '.join([str(item) for item in compatible_os])))
def main():
GetKitCli().run()
| 31.96748
| 74
| 0.559003
| 403
| 3,932
| 5.37469
| 0.436725
| 0.027701
| 0.027701
| 0.014774
| 0.060942
| 0.028624
| 0
| 0
| 0
| 0
| 0
| 0.012734
| 0.320956
| 3,932
| 122
| 75
| 32.229508
| 0.798502
| 0.169125
| 0
| 0.081081
| 0
| 0
| 0.122064
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.067568
| 0
| 0.135135
| 0.135135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e9f10181a7ecfeffe5b3e63362769aa8677cc14
| 12,338
|
py
|
Python
|
eventide/message.py
|
blakev/python-eventide
|
ef547a622c52eec8acb9d7ca4cc01fae0ab7bad0
|
[
"MIT"
] | 1
|
2021-01-14T18:35:44.000Z
|
2021-01-14T18:35:44.000Z
|
eventide/message.py
|
blakev/python-eventide
|
ef547a622c52eec8acb9d7ca4cc01fae0ab7bad0
|
[
"MIT"
] | null | null | null |
eventide/message.py
|
blakev/python-eventide
|
ef547a622c52eec8acb9d7ca4cc01fae0ab7bad0
|
[
"MIT"
] | 2
|
2021-04-20T22:09:02.000Z
|
2021-07-29T21:52:30.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# >>
# python-eventide, 2020
# LiveViewTech
# <<
from uuid import UUID, uuid4
from datetime import datetime
from operator import attrgetter
from functools import total_ordering
from dataclasses import (
field,
asdict,
fields,
dataclass,
_process_class,
make_dataclass,
)
from typing import (
Dict,
List,
Type,
Mapping,
Callable,
Optional,
NamedTuple,
)
from pydantic import BaseModel, Field
from eventide.utils import jdumps, jloads, dense_dict
from eventide._types import JSON
f_blank = Field(default=None)
class Metadata(BaseModel):
"""A message's metadata object contains information about the stream where the
message resides, the previous message in a series of messages that make up a
messaging workflow, the originating process to which the message belongs, as well
as other data that are pertinent to understanding the provenance and disposition.
Message metadata is data about messaging machinery, like message schema version,
source stream, positions, provenance, reply address, and the like.
"""
class Config:
extra = 'allow'
orm_mode = True
# yapf: disable
stream_name: Optional[str] = f_blank
position: Optional[int] = f_blank
global_position: Optional[int] = f_blank
causation_message_stream_name: Optional[str] = f_blank
causation_message_position: Optional[int] = f_blank
causation_message_global_position: Optional[int] = f_blank
correlation_stream_name: Optional[str] = f_blank
reply_stream_name: Optional[str] = f_blank
schema_version: Optional[str] = f_blank
time: Optional[float] = f_blank
# yapf: enable
def __repr__(self) -> str:
# dynamically scan the available fields the first time this
# object instance is printed out, looking for fields where
# repr=True -- we then save those fields so we can dynamically
# extract their current value each time.
attr = '__repr_fields__'
if not hasattr(self, attr):
repr_fields = filter(lambda f: f.repr, fields(self))
repr_fields = set(map(attrgetter('name'), repr_fields))
setattr(self, attr, repr_fields)
o = ', '.join('%s=%s' % (k, getattr(self, k)) for k in getattr(self, attr))
return '%s(%s)' % (self.__class__.__name__, o)
def to_dict(self) -> Dict:
return self.dict(skip_defaults=True, exclude_unset=True)
def to_json(self) -> str:
return jdumps(self.to_dict())
@property
def identifier(self) -> str:
return '%s/%d' % (self.stream_name, self.position)
@property
def causation_identifier(self) -> str:
return '%s/%d' % (
self.causation_message_stream_name, self.causation_message_position
)
@property
def replies(self) -> bool:
return bool(self.reply_stream_name)
def do_not_reply(self) -> 'Metadata':
self.reply_stream_name = None
return self
def follow(self, other: 'Metadata') -> 'Metadata':
self.causation_message_stream_name = other.stream_name
self.causation_message_position = other.position
self.causation_message_global_position = other.global_position
self.correlation_stream_name = other.correlation_stream_name
self.reply_stream_name = other.reply_stream_name
return self
def follows(self, other: 'Metadata') -> bool:
return self.causation_message_stream_name == other.stream_name \
and self.causation_message_position == other.position \
and self.causation_message_global_position == other.global_position \
and self.correlation_stream_name == other.correlation_stream_name \
and self.reply_stream_name == other.reply_stream_name
def correlates(self, stream_name: str) -> bool:
return self.correlation_stream_name == stream_name
@dataclass(frozen=True, repr=True)
@total_ordering
class MessageData:
"""MessageData is the raw, low-level storage representation of a message.
These instances are READ from the database and should not be created directly.
"""
type: str
stream_name: str
data: JSON
metadata: JSON
id: UUID
position: int
global_position: int
time: float
@classmethod
def from_record(cls, record: Mapping) -> 'MessageData':
"""Build a new instance from a row in the message store."""
rec = dict(record)
rec['data'] = jloads(rec.get('data', '{}'))
rec['metadata'] = jloads(rec.get('metadata', '{}'))
rec['time'] = rec.get('time', datetime.utcnow()).timestamp()
return cls(**rec)
def __gt__(self, other: 'MessageData') -> bool:
return self.global_position > other.global_position
def __ge__(self, other: 'MessageData') -> bool:
return self.global_position >= other.global_position
def __eq__(self, other: 'MessageData') -> bool:
return self.stream_name == other.stream_name \
and self.type == other.type \
and self.data == other.data \
and self.metadata == other.metadata
@property
def category(self) -> str:
return self.stream_name.split('-')[0]
@property
def is_category(self) -> bool:
return '-' not in self.stream_name
@property
def stream_id(self) -> Optional[str]:
if '-' not in self.stream_name:
return None
return self.stream_name.split('-', 1)[1]
@property
def cardinal_id(self) -> Optional[str]:
if '-' not in self.stream_name:
return None
return self.stream_name.split('-', 1)[1].split('+')[0]
@property
def command(self) -> Optional[str]:
if ':' not in self.category:
return None
return self.category.split(':', 1)[1].split('-')[0]
class SerializedMessage(NamedTuple):
"""A light representation of a Message instance before writing to message store."""
id: str
stream_name: str
type: str
data: str
metadata: str
expected_version: Optional[int]
@dataclass(frozen=False, repr=False, init=True, eq=False)
class Message:
"""Base class for defining custom Message records for the message store.
Messages are converted into SerializedMessage right before being written, and
are created from MessageData instances when being deserialized.
This class should not be instantiated directly but instead should be the parent
class on other structures that are persisted to the database.
"""
id: UUID = Field(default_factory=uuid4, alias='_id_')
metadata: Metadata = Field(default_factory=Metadata, alias='_metadata_')
@classmethod
def from_messagedata(cls, data: 'MessageData', strict: bool = False) -> 'Message':
if strict:
if data.type != cls.__name__:
raise ValueError('invalid class name, does not match type `%s`' % data.type)
# coerce the metadata object
# .. attempt to assign all the metadata fields and values from the
# incoming MessageData instance onto this custom Message instance.
# These additional attributes can be specified before the underlying Message
# instance is created by decorating the class with @messagecls.
meta_obj = {}
meta_fields = cls.__dataclass_fields__['metadata'].metadata or {}
for k, v in data.metadata.items():
if k not in meta_fields:
if strict:
raise ValueError('undefined metadata field name `%s`' % k)
# else:
# skipping field: value
if k in meta_fields:
meta_obj[k] = v
# create instance
msg = cls(**data.data)
msg.id = data.id
msg.metadata = msg.metadata.__class__(**meta_obj)
# return instance of custom class
return msg
def __eq__(self, other: 'Message') -> bool:
if not isinstance(other, self.__class__):
return False
attrs = self.attributes()
for k, v in other.attributes().items():
if k in ('id', 'metadata'):
continue
if attrs.get(k, not v) != v:
return False
return True
@property
def type(self) -> str:
return self.__class__.__name__
def attributes(self) -> Dict:
return asdict(self)
def attribute_names(self) -> List[str]:
return list(self.attributes().keys())
def follow(self, other: 'Message') -> 'Message':
self.metadata.follow(other.metadata)
return self
def follows(self, other: 'Message') -> bool:
return self.metadata.follows(other.metadata)
def serialize(
self,
stream_name: str,
expected_version: Optional[int] = None,
json_default_fn: Optional[Callable] = None,
) -> SerializedMessage:
"""Prepare this instance to be written to the message store.
Returns a serialized version of this object's data.
"""
data = self.attributes()
# separate the metadata from the data
meta = dense_dict(data.pop('metadata'))
# remove the UUID, since it has its own column
del data['id']
# build the response instance
return SerializedMessage(
str(self.id),
stream_name,
self.type,
jdumps(data, json_default_fn),
jdumps(meta, json_default_fn),
expected_version,
)
def messagecls(
cls_=None,
*,
msg_meta: Type[Metadata] = Metadata,
init=True,
repr=True,
eq=True,
order=False,
unsafe_hash=False,
frozen=False,
) -> Type[Message]:
"""Decorator used to build a custom Message type, with the ability to bind
a custom Metadata class with additional fields. When these instances are built,
serialized, or de-serialized from the database all the correct fields will be
filled out with no interference on in-editor linters.
The parameters for this decorator copy @dataclass with the addition of ``msg_meta``
which allows the definition to have a custom Metadata class assigned to it.
All @messagecls decorated classes behave like normal dataclasses.
"""
def wrap(cls):
# turn the wrapped class into a dataclass
kls = dataclass(
cls,
init=init,
repr=repr,
eq=eq,
order=order,
unsafe_hash=unsafe_hash,
frozen=frozen,
)
# extract all the field names and types from the new class definition
m_fields = {f.name: f.type for f in fields(msg_meta)}
# re-create the msg_meta class on the `metadata` attribute for this Message
# object. We attach the new (and old) fields into the metadata flag for
# this field so we don't have to process those values every time an instance
# is de-serialized from the database.
return make_dataclass(
cls.__name__,
fields=[
(
'metadata',
msg_meta,
field(
init=False,
default_factory=msg_meta,
metadata=m_fields,
),
),
],
bases=(
kls,
Message,
),
)
# ensure this class definition follows basic guidelines
if not hasattr(msg_meta, '__dataclass_fields__'):
raise ValueError('custom message metadata class must be a @dataclass')
if not issubclass(msg_meta, Metadata):
raise ValueError('custom message metadata class must inherit eventide.Metadata')
# "wrap" the Metadata class with @dataclass so we don't have to on its definition
msg_meta = _process_class(msg_meta, True, False, True, False, False, False)
# mimic @dataclass functionality
if cls_ is None:
return wrap
return wrap(cls_)
message_cls = messagecls # alias
| 33.710383
| 92
| 0.623197
| 1,481
| 12,338
| 5.035787
| 0.220122
| 0.046929
| 0.018772
| 0.011397
| 0.208903
| 0.179941
| 0.128855
| 0.0893
| 0.038884
| 0.038884
| 0
| 0.001823
| 0.288702
| 12,338
| 365
| 93
| 33.80274
| 0.847995
| 0.249635
| 0
| 0.122951
| 0
| 0
| 0.052052
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118852
| false
| 0
| 0.036885
| 0.065574
| 0.42623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6e9f30208ea04fa7ad96c88e5f93a7fce170ab1e
| 10,926
|
py
|
Python
|
utils/minifier.py
|
MateuszDabrowski/elquent
|
9ff9c57d01a8ade7ebc7a903f228d4b7ed7324c4
|
[
"MIT"
] | 4
|
2021-05-26T19:48:31.000Z
|
2022-03-01T03:52:39.000Z
|
utils/minifier.py
|
MateuszDabrowski/ELQuent
|
9ff9c57d01a8ade7ebc7a903f228d4b7ed7324c4
|
[
"MIT"
] | null | null | null |
utils/minifier.py
|
MateuszDabrowski/ELQuent
|
9ff9c57d01a8ade7ebc7a903f228d4b7ed7324c4
|
[
"MIT"
] | 3
|
2021-03-05T23:06:38.000Z
|
2021-10-05T19:56:28.000Z
|
#!/usr/bin/env python3.6
# -*- coding: utf8 -*-
'''
ELQuent.minifier
E-mail code minifier
Mateusz Dąbrowski
github.com/MateuszDabrowski
linkedin.com/in/mateusz-dabrowski-marketing/
'''
import os
import re
import sys
import json
import pyperclip
from colorama import Fore, Style, init
# ELQuent imports
import utils.api.api as api
# Initialize colorama
init(autoreset=True)
# Globals
naming = None
source_country = None
# Predefined messege elements
ERROR = f'{Fore.WHITE}[{Fore.RED}ERROR{Fore.WHITE}] {Fore.YELLOW}'
WARNING = f'{Fore.WHITE}[{Fore.YELLOW}WARNING{Fore.WHITE}] '
SUCCESS = f'{Fore.WHITE}[{Fore.GREEN}SUCCESS{Fore.WHITE}] '
YES = f'{Style.BRIGHT}{Fore.GREEN}y{Fore.WHITE}{Style.NORMAL}'
NO = f'{Style.BRIGHT}{Fore.RED}n{Fore.WHITE}{Style.NORMAL}'
def country_naming_setter(country):
'''
Sets source_country for all functions
Loads json file with naming convention
'''
global source_country
source_country = country
# Loads json file with naming convention
with open(file('naming'), 'r', encoding='utf-8') as f:
global naming
naming = json.load(f)
'''
=================================================================================
File Path Getter
=================================================================================
'''
def file(file_path, file_name=''):
'''
Returns file path to template files
'''
def find_data_file(filename, directory='outcomes'):
'''
Returns correct file path for both script and frozen app
'''
if directory == 'main': # Files in main directory
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, filename)
elif directory == 'api': # For reading api files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, 'utils', directory, filename)
elif directory == 'outcomes': # For writing outcome files
if getattr(sys, 'frozen', False):
datadir = os.path.dirname(sys.executable)
else:
datadir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(datadir, directory, filename)
file_paths = {
'naming': find_data_file('naming.json', directory='api'),
'mail_html': find_data_file(f'WK{source_country}_{file_name}.txt')
}
return file_paths.get(file_path)
'''
=================================================================================
Code Output Helper
=================================================================================
'''
def output_method(html_code):
'''
Allows user choose how the program should output the results
Returns email_id if creation/update in Eloqua was selected
'''
# Asks which output
print(
f'\n{Fore.GREEN}New code should be:',
f'\n{Fore.WHITE}[{Fore.YELLOW}0{Fore.WHITE}]\t»',
f'{Fore.WHITE}[{Fore.YELLOW}FILE{Fore.WHITE}] Only saved to Outcomes folder',
f'\n{Fore.WHITE}[{Fore.YELLOW}1{Fore.WHITE}]\t»',
f'{Fore.WHITE}[{Fore.YELLOW}HTML{Fore.WHITE}] Copied to clipboard as HTML for pasting [CTRL+V]',
f'\n{Fore.WHITE}[{Fore.YELLOW}2{Fore.WHITE}]\t»',
f'{Fore.WHITE}[{Fore.YELLOW}CREATE{Fore.WHITE}] Uploaded to Eloqua as a new E-mail',
f'\n{Fore.WHITE}[{Fore.YELLOW}3{Fore.WHITE}]\t»',
f'{Fore.WHITE}[{Fore.YELLOW}UPDATE{Fore.WHITE}] Uploaded to Eloqua as update to existing E-mail')
email_id = ''
while True:
print(f'{Fore.YELLOW}Enter number associated with chosen utility:', end='')
choice = input(' ')
if choice == '0':
break
elif choice == '1' and html_code:
pyperclip.copy(html_code)
print(
f'\n{SUCCESS}You can now paste the HTML code [CTRL+V]')
break
elif choice == '2':
print(
f'\n{Fore.WHITE}[{Fore.YELLOW}NAME{Fore.WHITE}] » Write or copypaste name of the E-mail:')
name = api.eloqua_asset_name()
api.eloqua_create_email(name, html_code)
break
elif choice == '3':
print(
f'\n{Fore.WHITE}[{Fore.YELLOW}ID{Fore.WHITE}] » Write or copypaste ID of the E-mail to update:')
email_id = input(' ')
if not email_id:
email_id = pyperclip.paste()
api.eloqua_update_email(email_id, html_code)
break
else:
print(f'{ERROR}Entered value does not belong to any utility!')
choice = ''
return
'''
=================================================================================
E-mail Minifier
=================================================================================
'''
def email_minifier(code):
'''
Requires html code of an e-mail
Returns minified html code of an e-mail
'''
# HTML Minifier
html_attr = ['html', 'head', 'style', 'body',
'table', 'tbody', 'tr', 'td', 'th', 'div']
for attr in html_attr:
code = re.sub(rf'{attr}>\s*\n\s*', f'{attr}>', code)
code = re.sub(rf'\s*\n\s+<{attr}', f'<{attr}', code)
code = re.sub(r'"\n+\s*', '" ', code)
for attr in ['alt', 'title', 'data-class']:
code = re.sub(rf'{attr}=""', '', code)
code = re.sub(r'" />', '"/>', code)
code = re.sub(r'<!--[^\[\]]*?-->', '', code)
for attr in html_attr:
code = re.sub(rf'{attr}>\s*\n\s*', f'{attr}>', code)
code = re.sub(rf'\s*\n\s+<{attr}', f'<{attr}', code)
# Conditional Comment Minifier
code = re.sub(
r'\s*\n*\s*<!--\[if mso \| IE\]>\s*\n\s*', '\n<!--[if mso | IE]>', code)
code = re.sub(
r'\s*\n\s*<!\[endif\]-->\s*\n\s*', '<![endif]-->\n', code)
# CSS Minifier
code = re.sub(r'{\s*\n\s*', '{', code)
code = re.sub(r';\s*\n\s*}\n\s*', '} ', code)
code = re.sub(r';\s*\n\s*', '; ', code)
code = re.sub(r'}\n+', '} ', code)
# Whitespace Minifier
code = re.sub(r'\t', '', code)
code = re.sub(r'\n+', ' ', code)
while ' ' in code:
code = re.sub(r' {2,}', ' ', code)
# Trim lines to maximum of 500 characters
count = 0
newline_indexes = []
for i, letter in enumerate(code):
if count > 450:
if letter in ['>', ' ']:
newline_indexes.append(i)
count = 0
else:
count += 1
for index in reversed(newline_indexes):
output = code[:index+1] + '\n' + code[index+1:]
code = output
# Takes care of lengthy links that extends line over 500 characters
while True:
lengthy_lines_list = re.findall(r'^.{500,}$', code, re.MULTILINE)
if not lengthy_lines_list:
break
lengthy_link_regex = re.compile(r'href=\".{40,}?\"|src=\".{40,}?\"')
for line in lengthy_lines_list:
lengthy_link_list = re.findall(lengthy_link_regex, line)
code = code.replace(
lengthy_link_list[0], f'\n{lengthy_link_list[0]}')
return code
def email_workflow(email_code=''):
'''
Minifies the e-mail code
'''
if email_code:
module = True
# Gets e-mail code if not delivered via argument
elif not email_code:
module = False
print(
f'\n{Fore.WHITE}[{Fore.YELLOW}Code{Fore.WHITE}] » Copy code of the E-mail to minify and click [Enter]:')
input()
email_code = pyperclip.paste()
# Gets the code from the user
while True:
email_code = pyperclip.paste()
is_html = re.compile(r'<html[\s\S\n]*?</html>', re.UNICODE)
if is_html.findall(email_code):
print(f'{Fore.WHITE}» {SUCCESS}Code copied from clipboard')
break
print(
f'{Fore.WHITE}» {ERROR}Invalid HTML. Copy valid code and click [Enter]', end='')
input(' ')
# Saves original code to outcomes folder
with open(file('mail_html', file_name='original_code'), 'w', encoding='utf-8') as f:
f.write(email_code)
# Gets file size of original file
original_size = os.path.getsize(
file('mail_html', file_name='original_code'))
# Minified the code
minified_code = email_minifier(email_code)
# Saves minified code to outcomes folder
with open(file('mail_html', file_name='minified_code'), 'w', encoding='utf-8') as f:
f.write(minified_code)
# Gets file size of minified file
minified_size = os.path.getsize(
file('mail_html', file_name='minified_code'))
print(f'\n{Fore.WHITE}» {SUCCESS}E-mail was minified from {Fore.YELLOW}{round(original_size/1024)}kB'
f'{Fore.WHITE} to {Fore.YELLOW}{round(minified_size/1024)}kB'
f' {Fore.WHITE}({Fore.GREEN}-{round((original_size-minified_size)/original_size*100)}%{Fore.WHITE})!')
if not module:
# Outputs the code
output_method(minified_code)
# Asks user if he would like to repeat
print(f'\n{Fore.YELLOW}» {Fore.WHITE}Do you want to {Fore.YELLOW}minify another Email{Fore.WHITE}?',
f'{Fore.WHITE}({YES}/{NO}):', end=' ')
choice = input('')
if choice.lower() == 'y':
print(
f'\n{Fore.GREEN}-----------------------------------------------------------------------------')
email_workflow()
return
'''
=================================================================================
Minifier module menu
=================================================================================
'''
def minifier_module(country):
'''
Lets user minify the HTML code
'''
# Create global source_country and load json file with naming convention
country_naming_setter(country)
# Report type chooser
print(
f'\n{Fore.GREEN}ELQuent.minifier Utilites:'
f'\n{Fore.WHITE}[{Fore.YELLOW}1{Fore.WHITE}]\t» [{Fore.YELLOW}E-mail{Fore.WHITE}] Minifies e-mail code'
f'\n{Fore.WHITE}[{Fore.YELLOW}Q{Fore.WHITE}]\t» [{Fore.YELLOW}Quit to main menu{Fore.WHITE}]'
)
while True:
print(f'{Fore.YELLOW}Enter number associated with chosen utility:', end='')
choice = input(' ')
if choice.lower() == 'q':
break
elif choice == '1':
email_workflow()
break
else:
print(f'{Fore.RED}Entered value does not belong to any utility!')
choice = ''
return
| 33.618462
| 116
| 0.532857
| 1,365
| 10,926
| 4.190476
| 0.204396
| 0.070804
| 0.040909
| 0.049825
| 0.383741
| 0.327273
| 0.25035
| 0.225524
| 0.217483
| 0.175699
| 0
| 0.006299
| 0.258924
| 10,926
| 324
| 117
| 33.722222
| 0.69853
| 0.124016
| 0
| 0.305699
| 0
| 0.056995
| 0.330715
| 0.154396
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036269
| false
| 0
| 0.036269
| 0
| 0.11399
| 0.07772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ea22002e9ef59fb7dc0ae80af6cf9fc57e8fc02
| 2,305
|
py
|
Python
|
doc/conf.py
|
safay/uta
|
bf3cf5a531aec4cca61f8926e79a624d01c76682
|
[
"Apache-2.0"
] | 48
|
2016-09-20T16:28:46.000Z
|
2022-02-02T10:32:02.000Z
|
doc/conf.py
|
safay/uta
|
bf3cf5a531aec4cca61f8926e79a624d01c76682
|
[
"Apache-2.0"
] | 45
|
2016-12-12T23:41:12.000Z
|
2022-02-09T11:48:04.000Z
|
doc/conf.py
|
safay/uta
|
bf3cf5a531aec4cca61f8926e79a624d01c76682
|
[
"Apache-2.0"
] | 20
|
2016-10-09T10:16:44.000Z
|
2021-06-18T02:19:58.000Z
|
############################################################################
# Theme setup
html_theme = 'invitae'
html_theme_path = ['themes']
if html_theme == 'sphinx_rtd_theme':
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
elif html_theme == 'bootstrap':
import sphinx_bootstrap_theme
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
############################################################################
# Project config
import uta
version = uta.__version__
release = str(uta.__version__)
project = u'UTA'
authors = project + ' Contributors'
copyright = u'2015, ' + authors
extlinks = {
'issue': ('https://bitbucket.org/biocommons/uta/issue/%s', 'UTA issue '),
}
man_pages = [
('index', 'uta', u'UTA Documentation', [u'UTA Contributors'], 1)
]
############################################################################
# Boilerplate
# , 'inherited-members']
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
exclude_patterns = ['build', 'static', 'templates', 'themes']
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.pngmath',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.fulltoc',
]
html_favicon = 'static/favicon.ico'
html_logo = 'static/logo.png'
html_static_path = ['static']
html_title = '{project} {release}'.format(project=project, release=release)
intersphinx_mapping = {
'http://docs.python.org/': None,
}
master_doc = 'index'
pygments_style = 'sphinx'
source_suffix = '.rst'
templates_path = ['templates']
# <LICENSE>
# Copyright 2014 UTA Contributors (https://bitbucket.org/biocommons/uta)
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
##
# http://www.apache.org/licenses/LICENSE-2.0
##
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
| 29.935065
| 77
| 0.647722
| 270
| 2,305
| 5.355556
| 0.5
| 0.049793
| 0.044952
| 0.024896
| 0.103734
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006507
| 0.133189
| 2,305
| 76
| 78
| 30.328947
| 0.717217
| 0.291106
| 0
| 0
| 0
| 0
| 0.348518
| 0.046999
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.069767
| 0
| 0.069767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ea3527b6763af10afefd4e777c572e2ac4172fc
| 997
|
py
|
Python
|
exercises_gustguan/ex113.py
|
Ewerton12F/Python-Notebook
|
85c4d38c35c6063fb475c25ebf4645688ec9dbcb
|
[
"MIT"
] | null | null | null |
exercises_gustguan/ex113.py
|
Ewerton12F/Python-Notebook
|
85c4d38c35c6063fb475c25ebf4645688ec9dbcb
|
[
"MIT"
] | null | null | null |
exercises_gustguan/ex113.py
|
Ewerton12F/Python-Notebook
|
85c4d38c35c6063fb475c25ebf4645688ec9dbcb
|
[
"MIT"
] | null | null | null |
def leiaInt(msg):
while True:
try:
i = int(input(msg))
except (ValueError, TypeError):
print('\033[1;3;31mERRO: Por favor, digite um número inteiro válido.\033[0;0;0m')
continue
except (KeyboardInterrupt):
print('\n\033[1;3;33mUsuário preferiu não digitar esse número.\033[0;0;0m\n')
return 0
else:
return i
def leiaFloat(msg):
while True:
try:
r = float(input(msg))
except (TypeError, ValueError):
print('\033[1;3;31mERRO: Por favor, digite um número real válido.\033[0;0;0m')
continue
except (KeyboardInterrupt):
print('\n\033[1;3;33mUsuário preferiu não digitar esse número.\033[0;0;0m\n')
return 0
else:
return r
li = leiaInt('Digite um número inteiro: ')
lr = leiaFloat('Digite um número real: ')
print(f'\033[1;3;34mO valor inteiro foi {li} e o real foi {lr}.\033[0;0;0m')
| 33.233333
| 93
| 0.565697
| 137
| 997
| 4.116788
| 0.350365
| 0.035461
| 0.044326
| 0.062057
| 0.556738
| 0.556738
| 0.556738
| 0.556738
| 0.556738
| 0.556738
| 0
| 0.097242
| 0.308927
| 997
| 30
| 94
| 33.233333
| 0.721335
| 0
| 0
| 0.518519
| 0
| 0.185185
| 0.392786
| 0.042084
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0
| 0
| 0.222222
| 0.185185
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ea43d3eb6ab1823ba2e818e55cba7f4297fc931
| 10,851
|
py
|
Python
|
frameworks/kafka/tests/auth.py
|
minyk/dcos-activemq
|
57a0cf01053a7e2dc59020ed5cbb93f0d1c9cff1
|
[
"Apache-2.0"
] | null | null | null |
frameworks/kafka/tests/auth.py
|
minyk/dcos-activemq
|
57a0cf01053a7e2dc59020ed5cbb93f0d1c9cff1
|
[
"Apache-2.0"
] | null | null | null |
frameworks/kafka/tests/auth.py
|
minyk/dcos-activemq
|
57a0cf01053a7e2dc59020ed5cbb93f0d1c9cff1
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import retrying
import sdk_cmd
LOG = logging.getLogger(__name__)
def wait_for_brokers(client: str, brokers: list):
"""
Run bootstrap on the specified client to resolve the list of brokers
"""
LOG.info("Running bootstrap to wait for DNS resolution")
bootstrap_cmd = ['/opt/bootstrap',
'-print-env=false',
'-template=false',
'-install-certs=false',
'-resolve-hosts', ','.join(brokers)]
bootstrap_output = sdk_cmd.task_exec(client, ' '.join(bootstrap_cmd))
LOG.info(bootstrap_output)
assert "SDK Bootstrap successful" in ' '.join(str(bo) for bo in bootstrap_output)
def is_not_authorized(output: str) -> bool:
return "AuthorizationException: Not authorized to access" in output
def get_kerberos_client_properties(ssl_enabled: bool) -> list:
protocol = "SASL_SSL" if ssl_enabled else "SASL_PLAINTEXT"
return ['security.protocol={protocol}'.format(protocol=protocol),
'sasl.mechanism=GSSAPI',
'sasl.kerberos.service.name=kafka', ]
def get_ssl_client_properties(cn: str, has_kerberos: bool) -> list:
if has_kerberos:
client_properties = []
else:
client_properties = ["security.protocol=SSL", ]
client_properties.extend(["ssl.truststore.location = {cn}_truststore.jks".format(cn=cn),
"ssl.truststore.password = changeit",
"ssl.keystore.location = {cn}_keystore.jks".format(cn=cn),
"ssl.keystore.password = changeit", ])
return client_properties
def write_client_properties(id: str, task: str, lines: list) -> str:
"""Write a client properties file containing the specified lines"""
output_file = "{id}-client.properties".format(id=id)
LOG.info("Generating %s", output_file)
output = sdk_cmd.create_task_text_file(task, output_file, lines)
LOG.info(output)
return output_file
def write_jaas_config_file(primary: str, task: str, krb5: object) -> str:
output_file = "{primary}-client-jaas.config".format(primary=primary)
LOG.info("Generating %s", output_file)
# TODO: use kafka_client keytab path
jaas_file_contents = ['KafkaClient {',
' com.sun.security.auth.module.Krb5LoginModule required',
' doNotPrompt=true',
' useTicketCache=true',
' principal=\\"{primary}@{realm}\\"'.format(primary=primary, realm=krb5.get_realm()),
' useKeyTab=true',
' serviceName=\\"kafka\\"',
' keyTab=\\"/tmp/kafkaconfig/kafka-client.keytab\\"',
' client=true;',
'};', ]
output = sdk_cmd.create_task_text_file(task, output_file, jaas_file_contents)
LOG.info(output)
return output_file
def write_krb5_config_file(task: str, krb5: object) -> str:
output_file = "krb5.config"
LOG.info("Generating %s", output_file)
try:
# TODO: Set realm and kdc properties
krb5_file_contents = ['[libdefaults]',
'default_realm = {}'.format(krb5.get_realm()),
'',
'[realms]',
' {realm} = {{'.format(realm=krb5.get_realm()),
' kdc = {}'.format(krb5.get_kdc_address()),
' }', ]
log.info("%s", krb5_file_contents)
except Exception as e:
log.error("%s", e)
raise(e)
output = sdk_cmd.create_task_text_file(task, output_file, krb5_file_contents)
LOG.info(output)
return output_file
def setup_krb5_env(primary: str, task: str, krb5: object) -> str:
env_setup_string = "export KAFKA_OPTS=\\\"" \
"-Djava.security.auth.login.config={} " \
"-Djava.security.krb5.conf={}" \
"\\\"".format(write_jaas_config_file(primary, task, krb5), write_krb5_config_file(task, krb5))
LOG.info("Setting environment to %s", env_setup_string)
return env_setup_string
def get_bash_command(cmd: str, environment: str) -> str:
env_str = "{} && ".format(environment) if environment else ""
return "bash -c \"{}{}\"".format(env_str, cmd)
def write_to_topic(cn: str, task: str, topic: str, message: str,
client_properties: list=[], environment: str=None) -> bool:
client_properties_file = write_client_properties(cn, task, client_properties)
cmd = "echo {message} | kafka-console-producer \
--topic {topic} \
--producer.config {client_properties_file} \
--broker-list \$KAFKA_BROKER_LIST".format(message=message,
topic=topic,
client_properties_file=client_properties_file)
write_cmd = get_bash_command(cmd, environment)
def write_failed(output) -> bool:
LOG.info("Checking write output: %s", output)
rc = output[0]
stderr = output[2]
if rc:
LOG.error("Write failed with non-zero return code")
return True
if "UNKNOWN_TOPIC_OR_PARTITION" in stderr:
LOG.error("Write failed due to stderr: UNKNOWN_TOPIC_OR_PARTITION")
return True
if "LEADER_NOT_AVAILABLE" in stderr and "ERROR Error when sending message" in stderr:
LOG.error("Write failed due to stderr: LEADER_NOT_AVAILABLE")
return True
LOG.info("Output check passed")
return False
@retrying.retry(wait_exponential_multiplier=1000,
wait_exponential_max=60 * 1000,
retry_on_result=write_failed)
def write_wrapper():
LOG.info("Running: %s", write_cmd)
rc, stdout, stderr = sdk_cmd.task_exec(task, write_cmd)
LOG.info("rc=%s\nstdout=%s\nstderr=%s\n", rc, stdout, stderr)
return rc, stdout, stderr
rc, stdout, stderr = write_wrapper()
rc_success = rc is 0
stdout_success = ">>" in stdout
stderr_success = not is_not_authorized(stderr)
return rc_success and stdout_success and stderr_success
def read_from_topic(cn: str, task: str, topic: str, messages: int,
client_properties: list=[], environment: str=None) -> str:
client_properties_file = write_client_properties(cn, task, client_properties)
cmd = "kafka-console-consumer \
--topic {topic} \
--consumer.config {client_properties_file} \
--bootstrap-server \$KAFKA_BROKER_LIST \
--from-beginning --max-messages {messages} \
--timeout-ms {timeout_ms}".format(topic=topic,
client_properties_file=client_properties_file,
messages=messages,
timeout_ms=60000)
read_cmd = get_bash_command(cmd, environment)
def read_failed(output) -> bool:
LOG.info("Checking read output: %s", output)
rc = output[0]
stderr = output[2]
if rc:
LOG.error("Read failed with non-zero return code")
return True
if "kafka.consumer.ConsumerTimeoutException" in stderr:
return True
LOG.info("Output check passed")
return False
@retrying.retry(wait_exponential_multiplier=1000,
wait_exponential_max=60 * 1000,
retry_on_result=read_failed)
def read_wrapper():
LOG.info("Running: %s", read_cmd)
rc, stdout, stderr = sdk_cmd.task_exec(task, read_cmd)
LOG.info("rc=%s\nstdout=%s\nstderr=%s\n", rc, stdout, stderr)
return rc, stdout, stderr
output = read_wrapper()
assert output[0] is 0
return " ".join(str(o) for o in output)
log = LOG
def create_tls_artifacts(cn: str, task: str) -> str:
pub_path = "{}_pub.crt".format(cn)
priv_path = "{}_priv.key".format(cn)
log.info("Generating certificate. cn={}, task={}".format(cn, task))
output = sdk_cmd.task_exec(
task,
'openssl req -nodes -newkey rsa:2048 -keyout {} -out request.csr '
'-subj "/C=US/ST=CA/L=SF/O=Mesosphere/OU=Mesosphere/CN={}"'.format(priv_path, cn))
log.info(output)
assert output[0] is 0
rc, raw_csr, _ = sdk_cmd.task_exec(task, 'cat request.csr')
assert rc is 0
request = {
"certificate_request": raw_csr
}
token = sdk_cmd.run_cli("config show core.dcos_acs_token")
output = sdk_cmd.task_exec(
task,
"curl --insecure -L -X POST "
"-H 'Authorization: token={}' "
"leader.mesos/ca/api/v2/sign "
"-d '{}'".format(token, json.dumps(request)))
log.info(output)
assert output[0] is 0
# Write the public cert to the client
certificate = json.loads(output[1])["result"]["certificate"]
output = sdk_cmd.task_exec(task, "bash -c \"echo '{}' > {}\"".format(certificate, pub_path))
log.info(output)
assert output[0] is 0
create_keystore_truststore(cn, task)
return "CN={},OU=Mesosphere,O=Mesosphere,L=SF,ST=CA,C=US".format(cn)
def create_keystore_truststore(cn: str, task: str):
pub_path = "{}_pub.crt".format(cn)
priv_path = "{}_priv.key".format(cn)
keystore_path = "{}_keystore.jks".format(cn)
truststore_path = "{}_truststore.jks".format(cn)
log.info("Generating keystore and truststore, task:{}".format(task))
output = sdk_cmd.task_exec(task, "curl -L -k -v leader.mesos/ca/dcos-ca.crt -o dcos-ca.crt")
# Convert to a PKCS12 key
output = sdk_cmd.task_exec(
task,
'bash -c "export RANDFILE=/mnt/mesos/sandbox/.rnd && '
'openssl pkcs12 -export -in {} -inkey {} '
'-out keypair.p12 -name keypair -passout pass:export '
'-CAfile dcos-ca.crt -caname root"'.format(pub_path, priv_path))
log.info(output)
assert output[0] is 0
log.info("Generating certificate: importing into keystore and truststore")
# Import into the keystore and truststore
output = sdk_cmd.task_exec(
task,
"keytool -importkeystore "
"-deststorepass changeit -destkeypass changeit -destkeystore {} "
"-srckeystore keypair.p12 -srcstoretype PKCS12 -srcstorepass export "
"-alias keypair".format(keystore_path))
log.info(output)
assert output[0] is 0
output = sdk_cmd.task_exec(
task,
"keytool -import -trustcacerts -noprompt "
"-file dcos-ca.crt -storepass changeit "
"-keystore {}".format(truststore_path))
log.info(output)
assert output[0] is 0
| 35.345277
| 117
| 0.599853
| 1,285
| 10,851
| 4.881712
| 0.209339
| 0.030129
| 0.021043
| 0.02455
| 0.37765
| 0.337956
| 0.302567
| 0.243902
| 0.202774
| 0.131994
| 0
| 0.010093
| 0.278684
| 10,851
| 306
| 118
| 35.460784
| 0.791363
| 0.027739
| 0
| 0.290323
| 0
| 0.013825
| 0.2438
| 0.07696
| 0
| 0
| 0
| 0.003268
| 0.041475
| 1
| 0.078341
| false
| 0.036866
| 0.032258
| 0.004608
| 0.202765
| 0.004608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ea45f9b51639f8a0b82e891df2cc0bae0501648
| 1,242
|
py
|
Python
|
python/problem-060.py
|
mbuhot/mbuhot-euler-solutions
|
30066543cfd2d84976beb0605839750b64f4b8ef
|
[
"MIT"
] | 1
|
2015-12-18T13:25:41.000Z
|
2015-12-18T13:25:41.000Z
|
python/problem-060.py
|
mbuhot/mbuhot-euler-solutions
|
30066543cfd2d84976beb0605839750b64f4b8ef
|
[
"MIT"
] | null | null | null |
python/problem-060.py
|
mbuhot/mbuhot-euler-solutions
|
30066543cfd2d84976beb0605839750b64f4b8ef
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import prime
description = '''
Prime pair sets
Problem 60
The primes 3, 7, 109, and 673, are quite remarkable. By taking any two primes and concatenating them in any order the result will always be prime. For example, taking 7 and 109, both 7109 and 1097 are prime. The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property.
Find the lowest sum for a set of five primes for which any two primes concatenate to produce another prime.
'''
prime.loadPrimes('primes.bin')
def digitconcat(a, b):
return int(str(a) + str(b))
def isconnected(a, b):
return prime.isPrime(digitconcat(a,b)) and prime.isPrime(digitconcat(b,a))
def search(space, path, n):
if len(path) == n: return path
p = path[0]
sspace = filter(lambda x: not x in path and isconnected(p,x), sorted(space))
for c in sspace:
r = search(sspace, [c]+path, n)
if r is not None:
return r
def findPairSets(n):
for p in prime.primes():
space = [p]
for p2 in prime.primes(p):
if isconnected(p, p2):
space.append(p2)
if len(space) >= n:
r = search(space, [p], n)
if r is not None: yield r
result = next(findPairSets(5))
print(result, sum(result))
| 29.571429
| 313
| 0.681159
| 213
| 1,242
| 3.971831
| 0.42723
| 0.007092
| 0.028369
| 0.035461
| 0.080378
| 0.080378
| 0.049645
| 0
| 0
| 0
| 0
| 0.0316
| 0.210145
| 1,242
| 41
| 314
| 30.292683
| 0.830785
| 0.017713
| 0
| 0
| 0
| 0.064516
| 0.378179
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.032258
| 0.064516
| 0.258065
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ea54be459981a2401f315126f120b27aa749589
| 5,298
|
py
|
Python
|
multilanguage_frappe_website/hooks.py
|
developmentforpeople/frappe-multilingual-website
|
c0bf74453f3d1de6127ad174aab6c05360cc1ec1
|
[
"MIT"
] | null | null | null |
multilanguage_frappe_website/hooks.py
|
developmentforpeople/frappe-multilingual-website
|
c0bf74453f3d1de6127ad174aab6c05360cc1ec1
|
[
"MIT"
] | null | null | null |
multilanguage_frappe_website/hooks.py
|
developmentforpeople/frappe-multilingual-website
|
c0bf74453f3d1de6127ad174aab6c05360cc1ec1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "multilanguage_frappe_website"
app_title = "Multilanguage Frappe Website"
app_publisher = "DFP developmentforpeople"
app_description = "Multilanguage Frappe Framework website example"
app_icon = "octicon octicon-file-directory"
app_color = "green"
app_email = "developmentforpeople@gmail.com"
app_license = "MIT"
# App name (used to override only sites with this app installed)
multilanguage_app_site_name = app_name
# Hosts/sites where this app will be enabled
multilanguage_app_site_hosts = ["mf.local", "frappe-multilingual-website.developmentforpeople.com"]
# Languages available for site
translated_languages_for_website = ["en", "es"]
# First one on list will be the default one
language_default = translated_languages_for_website[0]
# Home page
home_page = "index"
# Url 301 redirects
website_redirects = [
# Remove duplicated pages for home:
{ "source": "/index", "target": "/" },
{ "source": "/index.html", "target": "/" },
# Languages: Remove main language segment. For example,
# if "en" is first one in "translated_languages_for_website"
# then route "/en/example" will be redirected 301 to "/example"
{ "source": r"/{0}".format(language_default), "target": "/" },
{ "source": r"/{0}/(.*)".format(language_default), "target": r"/\1" },
# Foce url language for some Frappe framework dynamic pages:
{ "source": "/en/login", "target": "/login?_lang=en" },
{ "source": "/es/login", "target": "/login?_lang=es" },
{ "source": "/en/contact", "target": "/contact?_lang=en" },
{ "source": "/es/contact", "target": "/contact?_lang=es" },
# Foce url language for not language specific pages:
{ "source": "/en/translations", "target": "/translations?_lang=en" },
{ "source": "/es/translations", "target": "/translations?_lang=es" },
]
# Setup some global context variables related to languages
website_context = {
"languages": translated_languages_for_website,
"language_default": language_default,
"app_site_name": app_name,
}
# Calculate active language from url first segment
update_website_context = [
"{0}.context_extend".format(app_name),
]
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/multilanguage_frappe_website/css/multilanguage_frappe_website.css"
# app_include_js = "/assets/multilanguage_frappe_website/js/multilanguage_frappe_website.js"
# include js, css files in header of web template
web_include_css = "/assets/multilanguage_frappe_website/css/multilanguage_frappe_website.css"
# web_include_js = "/assets/multilanguage_frappe_website/js/multilanguage_frappe_website.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "multilanguage_frappe_website.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "multilanguage_frappe_website.install.before_install"
# after_install = "multilanguage_frappe_website.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "multilanguage_frappe_website.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "multilanguage_frappe_website.tasks.all"
# ],
# "daily": [
# "multilanguage_frappe_website.tasks.daily"
# ],
# "hourly": [
# "multilanguage_frappe_website.tasks.hourly"
# ],
# "weekly": [
# "multilanguage_frappe_website.tasks.weekly"
# ]
# "monthly": [
# "multilanguage_frappe_website.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "multilanguage_frappe_website.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "multilanguage_frappe_website.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "multilanguage_frappe_website.task.get_dashboard_data"
# }
| 29.597765
| 99
| 0.714232
| 633
| 5,298
| 5.723539
| 0.300158
| 0.120618
| 0.15788
| 0.042782
| 0.237924
| 0.143527
| 0.116754
| 0.085012
| 0.085012
| 0.085012
| 0
| 0.002596
| 0.127595
| 5,298
| 178
| 100
| 29.764045
| 0.781264
| 0.651755
| 0
| 0
| 0
| 0
| 0.419113
| 0.143351
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ea618363d6a6f275346b95643dd61b27b8e3d12
| 12,045
|
py
|
Python
|
RsNet/train_models.py
|
gehuangyi20/random_spiking
|
c98b550420ae4061b9d47ca475e86c981caf5514
|
[
"MIT"
] | 1
|
2020-08-03T17:47:40.000Z
|
2020-08-03T17:47:40.000Z
|
RsNet/train_models.py
|
gehuangyi20/random_spiking
|
c98b550420ae4061b9d47ca475e86c981caf5514
|
[
"MIT"
] | null | null | null |
RsNet/train_models.py
|
gehuangyi20/random_spiking
|
c98b550420ae4061b9d47ca475e86c981caf5514
|
[
"MIT"
] | null | null | null |
## train_models.py -- train the neural network models for attacking
##
## Copyright (C) 2016, Nicholas Carlini <nicholas@carlini.com>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
## Modified for the needs of MagNet.
import os
import argparse
import utils
import numpy as np
import tensorflow as tf
from keras import backend as k
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
from keras.models import Model
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from RsNet.setup_mnist import MNIST, MNISTModel
from RsNet.tf_config import gpu_config, setup_visibile_gpus, CHANNELS_LAST, CHANNELS_FIRST
from RsNet.dataset_nn import model_mnist_meta
from RsNet.random_spiking.nn_ops import random_spike_sample_scaling, random_spike_sample_scaling_per_sample
def random_spike(x, sample_rate, scaling, is_batch=True):
if is_batch:
return random_spike_sample_scaling(x, sample_rate=sample_rate, scaling=scaling)
else:
return random_spike_sample_scaling_per_sample(x, sample_rate=sample_rate, scaling=scaling)
def train(data, file_name, params, rand_params, num_epochs=50, batch_size=128, is_batch=True,
dropout=0.0, data_format=None, init_model=None, train_temp=1, data_gen=None):
"""
Standard neural network training procedure.
"""
_input = Input(shape=data.train_data.shape[1:])
x = _input
x = Conv2D(params[0], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[0], "scaling": rand_params[1], "is_batch": is_batch})(x)
x = Conv2D(params[1], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[2], "scaling": rand_params[3], "is_batch": is_batch})(x)
x = MaxPooling2D(pool_size=(2, 2), data_format=data_format)(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[4], "scaling": rand_params[5], "is_batch": is_batch})(x)
x = Conv2D(params[2], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[6], "scaling": rand_params[7], "is_batch": is_batch})(x)
x = Conv2D(params[3], (3, 3), padding="same", data_format=data_format)(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[8], "scaling": rand_params[9], "is_batch": is_batch})(x)
x = MaxPooling2D(pool_size=(2, 2), data_format=data_format)(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[10], "scaling": rand_params[11], "is_batch": is_batch})(x)
x = Flatten()(x)
x = Dense(params[4])(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[12], "scaling": rand_params[13], "is_batch": is_batch})(x)
if dropout > 0:
x = Dropout(dropout)(x, training=True)
x = Dense(params[5])(x)
x = Activation('relu')(x)
x = Lambda(function=random_spike, arguments={
"sample_rate": rand_params[14], "scaling": rand_params[15], "is_batch": is_batch})(x)
x = Dense(10)(x)
model = Model(_input, x)
model.summary()
def fn(correct, predicted):
return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
logits=predicted/train_temp)
if init_model is not None:
model.load_weights(init_model)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss=fn,
optimizer=sgd,
metrics=['accuracy'])
if data_gen is None:
model.fit(data.train_data, data.train_labels,
batch_size=batch_size,
validation_data=(data.test_data, data.test_labels),
nb_epoch=num_epochs,
shuffle=True)
else:
data_flow = data_gen.flow(data.train_data, data.train_labels, batch_size=128, shuffle=True)
model.fit_generator(data_flow,
steps_per_epoch=len(data_flow),
validation_data=(data.validation_data, data.validation_labels),
nb_epoch=num_epochs,
shuffle=True)
if file_name is not None:
model.save(file_name)
# save idx
utils.save_model_idx(file_name, data)
return model
def parse_rand_spike(_str):
_str = _str.split(',')
return [float(x) for x in _str]
parser = argparse.ArgumentParser(description='Train mnist model')
parser.add_argument('--data_dir', help='data dir, required', type=str, default=None)
parser.add_argument('--data_name', help='data name, required', type=str, default=None)
parser.add_argument('--model_dir', help='save model directory, required', type=str, default=None)
parser.add_argument('--model_name', help='save model name, required', type=str, default=None)
parser.add_argument('--validation_size', help='size of validation dataset', type=int, default=5000)
parser.add_argument('--random_spike', help='parameter used for random spiking', type=str, default=None)
parser.add_argument('--random_spike_batch', help='whether to use batch-wised random noise', type=str, default='yes')
parser.add_argument('--dropout', help='dropout rate', type=float, default=0.5)
parser.add_argument('--rotation', help='rotation angle', type=float, default=10)
parser.add_argument('--gpu_idx', help='gpu index', type=int, default=0)
parser.add_argument('--data_format', help='channels_last or channels_first', type=str, default=CHANNELS_FIRST)
parser.add_argument('--is_dis', help='whether to use distillation training', type=str, default='no')
parser.add_argument('--is_trans', help='whether do transfer training using soft label', type=str, default='no')
parser.add_argument('--is_data_gen', help='whether train on data generator, zoom, rotation', type=str, default='no')
parser.add_argument('--trans_model', help='transfer model name', type=str, default='no')
parser.add_argument('--trans_drop', help='dropout trans model name', type=float, default=0.5)
parser.add_argument('--trans_random_spike', help='random spiking parameter used for trans model',
type=str, default=None)
parser.add_argument('--train_sel_rand', help='whether to random select the training data', type=str, default='no')
parser.add_argument('--train_size', help='number of training example', type=int, default=0)
parser.add_argument('--pre_idx', help='predefined idx, duplicated training dataset', type=str, default=None)
parser.add_argument('--ex_data_dir', help='extra data dir, required', type=str, default=None)
parser.add_argument('--ex_data_name', help='extra data name, required', type=str, default=None)
parser.add_argument('--ex_data_size', help='number of extra training example', type=int, default=0)
parser.add_argument('--ex_data_sel_rand', help='whether to random select the extra training data',
type=str, default='no')
args = parser.parse_args()
data_dir = args.data_dir
data_name = args.data_name
save_model_dir = args.model_dir
save_model_name = args.model_name
validation_size = args.validation_size
train_size = args.train_size
train_sel_rand = args.train_sel_rand == 'yes'
para_random_spike = None if args.random_spike is None else parse_rand_spike(args.random_spike)
_is_batch = args.random_spike_batch == 'yes'
dropout = args.dropout
gpu_idx = args.gpu_idx
rotation = args.rotation
data_format = args.data_format
is_distillation = args.is_dis == 'yes'
is_data_gen = args.is_data_gen == 'yes'
ex_data_dir = args.ex_data_dir
ex_data_name = args.ex_data_name
ex_data_size = args.ex_data_size
ex_data_sel_rand = args.ex_data_sel_rand == 'yes'
pre_idx_path = args.pre_idx
setup_visibile_gpus(str(gpu_idx))
k.tensorflow_backend.set_session(tf.Session(config=gpu_config))
if not os.path.exists(save_model_dir):
os.makedirs(save_model_dir)
data = MNIST(data_dir, data_name, validation_size, model_meta=model_mnist_meta,
input_data_format=CHANNELS_LAST, output_data_format=data_format,
train_size=train_size, train_sel_rand=train_sel_rand)
if pre_idx_path is not None:
pre_idx = utils.load_model_idx(pre_idx_path)
data.apply_pre_idx(pre_idx)
if ex_data_dir is not None and ex_data_name is not None and ex_data_size > 0:
data.append_train_data(ex_data_dir, ex_data_name, ex_data_size,
input_data_format=CHANNELS_LAST, output_data_format=data_format, sel_rand=ex_data_sel_rand)
# config data if using transfer training here
is_trans = args.is_trans == 'yes'
if is_trans:
print("Get the soft label of the transfer model")
trans_random_spike = None if args.trans_random_spike is None else parse_rand_spike(args.trans_random_spike)
trans_model = MNISTModel(args.trans_model, None, output_logits=False,
input_data_format=data_format, data_format=data_format, dropout=0,
rand_params=trans_random_spike, is_batch=True)
predicted = trans_model.model.predict(data.train_data, batch_size=500, verbose=1)
train_data_acc = np.mean(np.argmax(predicted, 1) == np.argmax(data.train_labels, 1))
data.train_labels = predicted
print("trasfer model acc on training data:", train_data_acc)
if is_data_gen:
data_gen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=rotation,
shear_range=0.2,
zoom_range=0.2,
fill_mode='reflect',
width_shift_range=4,
height_shift_range=4,
horizontal_flip=False,
vertical_flip=False,
data_format=data_format
)
else:
data_gen = None
if is_distillation:
print("train init model")
train(data, save_model_dir + "/" + save_model_name + '_init',
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=1, is_batch=_is_batch,
data_format=data_format, dropout=dropout, data_gen=data_gen)
print("train teacher model")
train(data, save_model_dir + "/" + save_model_name + '_teacher',
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch,
data_format=data_format, dropout=dropout,
init_model=save_model_dir + "/" + save_model_name + '_init', train_temp=100, data_gen=data_gen)
# evaluate label with teacher model
model_teacher = MNISTModel(os.path.join(save_model_dir, save_model_name + '_teacher'), None, output_logits=True,
input_data_format=data_format, data_format=data_format, dropout=0,
rand_params=para_random_spike, is_batch=True)
predicted = model_teacher.model.predict(data.train_data, batch_size=500, verbose=1)
train_data_acc = np.mean(np.argmax(predicted, 1) == np.argmax(data.train_labels, 1))
print("train teacher acc:", train_data_acc)
with tf.Session() as sess:
y = sess.run(tf.nn.softmax(predicted/100))
print(y)
data.train_labels = y
print("train student model")
train(data, save_model_dir + "/" + save_model_name,
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch,
data_format=data_format, dropout=dropout,
init_model=save_model_dir + "/" + save_model_name + '_init', train_temp=100, data_gen=data_gen)
else:
train(data, save_model_dir + "/" + save_model_name,
[32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch,
data_format=data_format, dropout=dropout, data_gen=data_gen)
| 47.235294
| 118
| 0.703778
| 1,759
| 12,045
| 4.541217
| 0.149517
| 0.050075
| 0.051077
| 0.047571
| 0.461067
| 0.435653
| 0.397596
| 0.358287
| 0.302704
| 0.25313
| 0
| 0.018246
| 0.176422
| 12,045
| 254
| 119
| 47.42126
| 0.786996
| 0.03296
| 0
| 0.186603
| 0
| 0
| 0.130682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019139
| false
| 0
| 0.07177
| 0.004785
| 0.114833
| 0.033493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ea71b4513f1f9f11b82f5034de5e9e21242e450
| 3,151
|
py
|
Python
|
link_crawler.py
|
Stevearzh/greedy-spider
|
ca8b1d892e4ac5066ab33aafe7755ee959ef630a
|
[
"MIT"
] | null | null | null |
link_crawler.py
|
Stevearzh/greedy-spider
|
ca8b1d892e4ac5066ab33aafe7755ee959ef630a
|
[
"MIT"
] | null | null | null |
link_crawler.py
|
Stevearzh/greedy-spider
|
ca8b1d892e4ac5066ab33aafe7755ee959ef630a
|
[
"MIT"
] | null | null | null |
import datetime
import re
import time
import urllib
from urllib import robotparser
from urllib.request import urlparse
from downloader import Downloader
DEFAULT_DELAY = 5
DEFAULT_DEPTH = -1
DEFAULT_URL = -1
DEFAULT_AGENT = 'wswp'
DEFAULT_RETRY = 1
DEFAULT_TIMEOUT = 60
DEFAULT_IGNORE_ROBOTS = False
def link_crawler(seed_url, link_regex=None, delay=DEFAULT_DELAY, max_depth=DEFAULT_DEPTH,
max_urls=DEFAULT_URL, user_agent=DEFAULT_AGENT, proxies=None, num_retries=DEFAULT_RETRY,
timeout=DEFAULT_TIMEOUT, ignore_robots=DEFAULT_IGNORE_ROBOTS, scrape_callback=None, cache=None):
'''
Crawl from the given seed URL following links matched by link_regex
'''
# the queue of URL's that still need to be crawled
crawl_queue = [seed_url]
# the URL's that have been seen and at what depth
seen = {seed_url: 0}
# track how many URL's have been downloaded
num_urls = 0
rp = get_robots(seed_url)
D = Downloader(delay=delay, user_agent=user_agent, proxies=proxies,
num_retries=num_retries, timeout=timeout, cache=cache)
while crawl_queue:
url = crawl_queue.pop()
depth = seen[url]
# check url passes robots.txt restrictions
if ignore_robots or rp.can_fetch(user_agent, url):
html = D(url)
links = []
if scrape_callback:
links.extend(scrape_callback(url, html) or [])
if depth != max_depth:
# can still crawl further
if link_regex:
# filter for links matching our regular expression
links.extend(link for link in get_links(html) if \
re.match(link_regex, link))
for link in links:
link = normalize(seed_url, link)
# check whether already crawled this link
if link not in seen:
seen[link] = depth + 1
# check link is within same domain
if same_domain(seed_url, link):
# success add this new link to queue
crawl_queue.append(link)
# check whether have reached downloaded maximum
num_urls += 1
if num_urls == max_urls:
break
else:
print('Blocked by robots.txt', url)
def normalize(seed_url, link):
'''
Normalize this URL by removing hash and adding domain
'''
link, _ = urllib.parse.urldefrag(link) # remove hash to avoid duplicates
return urllib.parse.urljoin(seed_url, link)
def same_domain(url1, url2):
'''
Return True if both URL's belong to same domain
'''
return urllib.parse.urlparse(url1).netloc == urllib.parse.urlparse(url2).netloc
def get_robots(url):
'''
Initialize robots parser for this domain
'''
rp = robotparser.RobotFileParser()
rp.set_url(urllib.parse.urljoin(url, '/robots.txt'))
rp.read()
return rp
def get_links(html):
'''
Return a list of links from html
'''
# a regular expression to extract all links from the webpage
webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
# list of all links from the webpage
return webpage_regex.findall(html)
if __name__ == '__main__':
# execute only if run as a script
pass
| 28.645455
| 100
| 0.668042
| 438
| 3,151
| 4.643836
| 0.335616
| 0.030973
| 0.02704
| 0.012783
| 0.021632
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00588
| 0.244367
| 3,151
| 109
| 101
| 28.908257
| 0.848383
| 0.256744
| 0
| 0
| 0
| 0
| 0.026513
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0.016393
| 0.114754
| 0
| 0.262295
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ea8ed0768136e88f53d0dbb5391ad2ceecced0d
| 3,914
|
py
|
Python
|
python/create_account_with_captcha.py
|
shivanshbindal9/MediaWiki-Action-API-Code-Samples
|
7d673e73d7cabbf4342a18d275b271e7d4456808
|
[
"MIT"
] | null | null | null |
python/create_account_with_captcha.py
|
shivanshbindal9/MediaWiki-Action-API-Code-Samples
|
7d673e73d7cabbf4342a18d275b271e7d4456808
|
[
"MIT"
] | null | null | null |
python/create_account_with_captcha.py
|
shivanshbindal9/MediaWiki-Action-API-Code-Samples
|
7d673e73d7cabbf4342a18d275b271e7d4456808
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
create_account_with_captcha.py
MediaWiki Action API Code Samples
Demo of `createaccount` module: Create an account on a wiki with a special
authentication extension installed. This example considers a case of a wiki
where captcha is enabled through extensions like ConfirmEdit
(https://www.mediawiki.org/wiki/Extension:ConfirmEdit)
MIT license
"""
import requests
from flask import Flask, render_template, flash, request
S = requests.Session()
WIKI_URL = "https://test.wikipedia.org"
API_ENDPOINT = WIKI_URL + "/w/api.php"
# App config.
DEBUG = True
APP = Flask(__name__)
APP.config.from_object(__name__)
APP.config['SECRET_KEY'] = 'enter_your_secret_key'
@APP.route("/", methods=['GET', 'POST'])
def show_form():
""" Render form template and handle form submission request """
fields = get_form_fields()
captcha = fields['CaptchaAuthenticationRequest']
captcha_url = WIKI_URL + captcha['captchaInfo']['value']
captcha_id = captcha['captchaId']['value']
display_fields = []
user_fields = []
captcha_fields = []
for field in fields:
for name in fields[field]:
details = {
'name': name,
'type': fields[field][name]['type'],
'label': fields[field][name]['label']
}
if field != "CaptchaAuthenticationRequest":
user_fields.append(details)
else:
if name == 'captchaWord':
captcha_fields.append(details)
display_fields = user_fields + captcha_fields
if request.method == 'POST':
create_account(request.form, captcha_id)
return render_template('create_account_form.html', \
captcha=captcha_url, fields=display_fields)
def get_form_fields():
""" Fetch the form fields from `authmanagerinfo` module """
result = {}
response = S.get(url=API_ENDPOINT, params={
'action': 'query',
'meta': 'authmanagerinfo',
'amirequestsfor': 'create',
'format': 'json'
})
data = response.json()
query = data and data['query']
authmanagerinfo = query and query['authmanagerinfo']
fields = authmanagerinfo and authmanagerinfo['requests']
for field in fields:
if field['id'] in ('MediaWiki\\Auth\\UserDataAuthenticationRequest', \
'CaptchaAuthenticationRequest', 'MediaWiki\\Auth\\PasswordAuthenticationRequest'):
result[field['id']] = field['fields']
return result
def create_account(form, captcha_id):
""" Send a post request along with create account token, user information
and return URL to the API to create an account on a wiki """
createtoken = fetch_create_token()
response = S.post(url=API_ENDPOINT, data={
'action': 'createaccount',
'createtoken': createtoken,
'username': form['username'],
'password': form['password'],
'retype': form['retype'],
'email': form['email'],
'createreturnurl': 'http://127.0.0.1:5000/',
'captchaId': captcha_id,
'captchaWord': form['captchaWord'],
'format': 'json'
})
data = response.json()
createaccount = data['createaccount']
if createaccount['status'] == "PASS":
flash('Success! An account with username ' + \
form['username'] + ' has been created!')
else:
flash('Oops! Something went wrong -- ' + \
createaccount['messagecode'] + "." + createaccount['message'])
def fetch_create_token():
""" Fetch create account token via `tokens` module """
response = S.get(url=API_ENDPOINT, params={
'action': 'query',
'meta': 'tokens',
'type': 'createaccount',
'format': 'json'
})
data = response.json()
return data['query']['tokens']['createaccounttoken']
if __name__ == "__main__":
APP.run()
| 29.428571
| 94
| 0.625447
| 416
| 3,914
| 5.737981
| 0.350962
| 0.032677
| 0.023879
| 0.02765
| 0.120654
| 0.087977
| 0.03938
| 0.03938
| 0.03938
| 0.03938
| 0
| 0.003706
| 0.241696
| 3,914
| 132
| 95
| 29.651515
| 0.800539
| 0.168881
| 0
| 0.2
| 0
| 0
| 0.256981
| 0.069344
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047059
| false
| 0.035294
| 0.023529
| 0
| 0.105882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6eabff19e23935994d6606bc2eb537d62eca55d2
| 498
|
py
|
Python
|
referralnote/urls.py
|
fahimfarhan/cancer-web-app
|
6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef
|
[
"MIT"
] | null | null | null |
referralnote/urls.py
|
fahimfarhan/cancer-web-app
|
6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef
|
[
"MIT"
] | 5
|
2021-03-18T20:13:38.000Z
|
2022-01-13T00:35:37.000Z
|
referralnote/urls.py
|
fahimfarhan/cancer-web-app
|
6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from referralnote import views
app_name = 'referral_note'
#view_obj = views.ReferralNotes()
urlpatterns = [
url(r'^(?P<p_id>[0-9]+)/delete_referralnote/(?P<notenum>[0-9]+)$', views.delete_refnote,
name='delete_referralnote'),
url(r'^(?P<p_id>[0-9]+)/edit_referralnote/(?P<notenum>[0-9]+)$', views.edit_referralnote, name='edit_referralnote'),
url(r'^(?P<p_id>[0-9]+)/new_referralnote/$', views.new_referralnote, name='new_referralnote'),
]
| 31.125
| 120
| 0.688755
| 72
| 498
| 4.555556
| 0.361111
| 0.030488
| 0.045732
| 0.054878
| 0.329268
| 0.329268
| 0.164634
| 0.134146
| 0
| 0
| 0
| 0.022422
| 0.104418
| 498
| 15
| 121
| 33.2
| 0.713004
| 0.064257
| 0
| 0
| 0
| 0
| 0.462366
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6eac2fd4f481feb51a0c938cf7a2f928ddd2ec46
| 742
|
py
|
Python
|
beetlesafari/io/_imread_raw.py
|
haesleinhuepf/beetle-safari
|
a6c90d10b6b4d67c153f87c83c02bd23f2b13843
|
[
"BSD-3-Clause"
] | null | null | null |
beetlesafari/io/_imread_raw.py
|
haesleinhuepf/beetle-safari
|
a6c90d10b6b4d67c153f87c83c02bd23f2b13843
|
[
"BSD-3-Clause"
] | null | null | null |
beetlesafari/io/_imread_raw.py
|
haesleinhuepf/beetle-safari
|
a6c90d10b6b4d67c153f87c83c02bd23f2b13843
|
[
"BSD-3-Clause"
] | null | null | null |
# thanks to max9111, https://stackoverflow.com/questions/41651998/python-read-and-convert-raw-3d-image-file
import numpy as np
from functools import lru_cache
@lru_cache(maxsize=2)
def imread_raw(filename : str, width : int = 1, height : int = 1, depth : int = 1, dtype = np.uint16):
"""Loads a raw image file (3D) with given dimensions from disk
Parameters
----------
filename
width
height
depth
dtype
Returns
-------
numpy array with given dimensions containing pixels from specified file
"""
f = open(filename, 'rb') # only opens the file for reading
img_arr = np.fromfile(f, dtype=dtype)
img_arr = img_arr.reshape(depth, height, width)
f.close()
return img_arr
| 27.481481
| 107
| 0.663073
| 104
| 742
| 4.663462
| 0.605769
| 0.049485
| 0.078351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034783
| 0.225067
| 742
| 27
| 108
| 27.481481
| 0.808696
| 0.469003
| 0
| 0
| 0
| 0
| 0.005831
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6eaef18c836b626ea67de2012928ab0468afa91b
| 10,336
|
py
|
Python
|
highest_iso.py
|
chem-william/helix_fit
|
b9921b0068f1a3084985ca820094a0db15b6aac2
|
[
"MIT"
] | null | null | null |
highest_iso.py
|
chem-william/helix_fit
|
b9921b0068f1a3084985ca820094a0db15b6aac2
|
[
"MIT"
] | null | null | null |
highest_iso.py
|
chem-william/helix_fit
|
b9921b0068f1a3084985ca820094a0db15b6aac2
|
[
"MIT"
] | null | null | null |
import os
from ase.visualize import view
from mpl_toolkits.mplot3d import Axes3D # noqa
from scipy.optimize import curve_fit
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set(
style="ticks",
rc={
"font.family": "Arial",
"font.size": 40,
"axes.linewidth": 2,
"lines.linewidth": 5,
},
font_scale=3.5,
palette=sns.color_palette("Set2")
)
c = ["#007fff", "#ff3616", "#138d75", "#7d3c98", "#fbea6a"] # Blue, Red, Green, Purple, Yellow
import utilities
from Helix import Helix
import matplotlib
matplotlib.use("Qt5Agg")
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return rho, phi
def center_atoms(atoms, center):
x = center[0]
y = center[1]
z = center[2]
# Centering atoms around given atom
for idx, atom in enumerate(atoms):
atoms[idx].position[0] = atom.position[0] - x
atoms[idx].position[1] = atom.position[1] - y
atoms[idx].position[2] = atom.position[2] - z
return atoms
def print_jmol_str(line_values, center):
file = "analyzed/diffp_2me_homo-1"
print("*"*25)
print(f"Writing to {file}")
print("*"*25)
curve_str = f"draw curve1 CURVE curve width 0.3"
for value in line_values:
x = value[0] + center[0]
y = value[1] + center[1]
z = value[2] + center[2]
curve_str += f" {{ {x} {y} {z} }}"
with open(f"{file}/jmol_export.spt", "a") as f:
f.write(curve_str)
print(curve_str)
def remove_outlier(ordered):
# Not elegant, possibly slow, but it works
temp = []
for idx, value in enumerate(ordered[:, 2]):
if idx < len(ordered[:, 2]) - 1:
temp.append(abs(value - ordered[idx + 1, 2]))
std = np.std(temp)
mean = np.mean(temp)
# It lies much further down the z-axis
# than the rest of the points
if not (mean - std) < temp[0] < (mean + std):
return ordered[1:]
# If no outliers is found, return the original array
else:
return ordered
center_bottom_top = np.array([2, 9, 7])
handedness = None
truncation = [None, None]
file = "./8cum_me_homo_homo/homo.cube"
ax = plt.axes(projection='3d')
radius = 1.4
limits = 3
# Check that the analysis hasn't already been done
names = file.split("/")
folder = "/".join(names[-3:-1])
print(f"foldername: {folder}")
if os.path.exists(folder):
print(f"Found existing data files in {folder}")
planes = np.load(folder + "/planes.npy", allow_pickle=True)
atoms, _, _, center = np.load(
folder + "/atom_info.npy", allow_pickle=True
)
xyz_vec = np.load(folder + "/xyz_vec.npy", allow_pickle=True)
else:
atoms, all_info, xyz_vec = utilities.read_cube(file)
# Sort the data after z-value
all_info = all_info[all_info[:, 2].argsort()]
# Center of the molecule is chosen to be Ru
# center = atoms[3].position
center = atoms[center_bottom_top[0]].position
all_info[:, :3] = all_info[:, :3] - center
atoms = center_atoms(atoms, center)
planes = []
plane = []
prev_coord = all_info[0]
for coordinate in tqdm(all_info, desc="Finding planes.."):
if np.equal(coordinate[2], prev_coord[2]):
# we're in the same plane so add the coordinate
plane.append([coordinate[0],
coordinate[1],
coordinate[2],
coordinate[3]])
else:
plane = np.array(plane)
# Drop coordinates with isovalues == 0.0
plane = plane[np.where(plane[:, 3] != 0.0)]
if plane.size != 0:
planes.append(plane)
plane = []
prev_coord = coordinate
planes = np.array(planes)
mean_z = []
ordered = []
all_r = []
bottom_carbon = atoms[center_bottom_top[1]].position
top_carbon = atoms[center_bottom_top[2]].position
print('Cleaning values..')
for idx, plane in enumerate(planes):
if top_carbon[2] > plane[0, 2] > bottom_carbon[2]:
if idx < len(planes) - 1:
# Uncomment to find points with the most positive isovalue
# Rare cases there might be the same maximum at two locations
# That's I just take the first one with [0][0]
maximum = np.amax(plane[:, 3])
max_index = np.where(plane[:, 3] == maximum)[0][0]
next_plane = planes[idx + 1]
next_maximum = np.amax(next_plane[:, 3])
next_index = np.where(next_plane[:, 3] == next_maximum)[0][0]
# Uncomment to find points with the most negative isovalue
# minimum = np.amin(plane[:, 3])
# min_index = np.where(plane[:, 3] == minimum)
# next_plane = planes[idx + 1]
# next_minimum = np.amin(next_plane[:, 3])
# next_index = np.where(next_plane[:, 3] == next_minimum)
current_iso_idx = max_index
next_iso_idx = next_index
# Check if point is within certain radius of the helical axis
if cart2pol(plane[current_iso_idx, 0], plane[current_iso_idx, 1])[0] < radius:
current_x = plane[current_iso_idx, 0].item()
current_y = plane[current_iso_idx, 1].item()
current_z = plane[current_iso_idx, 2].item()
current_iso = plane[current_iso_idx, 3].item()
next_x = next_plane[next_index, 0].item()
next_y = next_plane[next_index, 1].item()
next_z = next_plane[next_index, 2].item()
next_iso = next_plane[next_iso_idx, 3].item()
# Current point is beneath the next point
if (current_x == next_x) & (current_y == next_y):
delta_z = abs(next_z - current_z)
# Are they direcly on top of each other?
if round(delta_z, 4) <= 2*round(xyz_vec[2], 4):
mean_z.append(current_z)
# They are not directly on top of each other
else:
ax.scatter(
plane[current_iso_idx, 0],
plane[current_iso_idx, 1],
plane[current_iso_idx, 2],
# c='purple',
c=c[0],
)
# To be used as an estimate of
# the radius when fitting the helix
all_r.append(
cart2pol(plane[current_iso_idx, 0], plane[current_iso_idx, 1])[0]
)
mean_z.append(current_z)
ordered.append(
[current_x, current_y, np.mean(mean_z), current_iso]
)
mean_z = []
# TODO: Maybe I'm skipping the last point? Does it even matter?
# else:
# prev_x = current_x
# prev_y = current_y
# prev_z = current_z
# prev_iso = current_iso
# current_x = plane[max_index, 0].item()
# current_y = plane[max_index, 1].item()
# current_z = plane[max_index, 2].item()
# current_iso = plane[max_index, 3].item()
# if cart2pol(current_x, current_y)[0] < radius:
# all_r.append(cart2pol(plane[max_index, 0], plane[max_index, 1])[0])
# if (current_x == prev_x) & (current_y == prev_y):
# delta_z = abs(prev_z - current_z)
# # Are they directly on top of each other?
# if round(delta_z, 4) <= 2*round(z_vec, 4):
# mean_z.append(current_z)
# ordered.append([current_x,
# current_y,
# np.mean(mean_z),
# current_iso])
# # They are not directly on top of each other
# else:
# mean_z.append(current_z)
# ordered.append([current_x,
# current_y,
# np.mean(mean_z),
# current_iso])
# mean_z = []
ordered = np.array(ordered)
mean_radius = np.mean(all_r)
# Check if the first point is an outlier
ordered = remove_outlier(ordered)
# ordered, mean_radius = np.load("orbital_16_helix.npy", allow_pickle=True)
# ax.plot([0, ordered[0, 0]], [0, ordered[0, 1]], [0, 0])
# Line that connects each data point
# ax.plot(
# ordered[truncation[0]:truncation[1], 0],
# ordered[truncation[0]:truncation[1], 1],
# ordered[truncation[0]:truncation[1], 2],
# color='blue'
# )
print('Fitting datapoints to helix..')
helix = Helix(
ordered[0:, :3],
fitting_method='ampgo',
radius=mean_radius,
handedness=handedness,
truncation=truncation,
)
out = helix.fit_helix()
fitted_values = helix.fitted_values
# print_jmol_str(fitted_values, center)
print('RMSD: {}'.format(helix.RMSD))
print(out)
print('handedness: {}'.format(helix.handedness))
delta_z = helix.get_statistics()
print('std: {}'.format(np.std(delta_z)))
print('mean: {}'.format(np.mean(delta_z)))
print(f'p-value: {helix.p_value}')
ax.plot(
fitted_values[:, 0],
fitted_values[:, 1],
fitted_values[:, 2],
)
ax.plot((0, helix.a[0]), (0, helix.a[1]), (0, helix.a[2]))
ax.plot((0, helix.v[0]), (0, helix.v[1]), (0, helix.v[2]))
ax.plot((0, helix.w[0]), (0, helix.w[1]), (0, helix.w[2]), color='black')
print('Plotting atoms..')
for atom in atoms:
if atom.symbol == 'C':
ax.scatter(
atom.position[0],
atom.position[1],
atom.position[2],
c='black'
)
if atom.symbol == 'Ru':
ax.scatter(
atom.position[0],
atom.position[1],
atom.position[2],
c='turquoise'
)
# if atom.symbol == 'P':
# ax.scatter3D(atom.position[0],
# atom.position[1],
# atom.position[2],
# c='orange')
ax.set_xlim([-limits, limits])
ax.set_ylim([-limits, limits])
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
| 31.039039
| 95
| 0.544601
| 1,363
| 10,336
| 3.979457
| 0.217168
| 0.033186
| 0.028761
| 0.036504
| 0.24115
| 0.157448
| 0.143621
| 0.131822
| 0.131822
| 0.131822
| 0
| 0.028857
| 0.322755
| 10,336
| 332
| 96
| 31.13253
| 0.746
| 0.279605
| 0
| 0.107143
| 0
| 0
| 0.07233
| 0.010313
| 0
| 0
| 0
| 0.003012
| 0
| 1
| 0.020408
| false
| 0
| 0.056122
| 0
| 0.096939
| 0.081633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6eb3861496ef592c07af6ae7b192e47f4452b309
| 1,481
|
py
|
Python
|
secure_ml/attack/model_inversion.py
|
Koukyosyumei/secure_ml
|
9da24f4ce4782ec2f6dd63b0437f657a0e190e40
|
[
"MIT"
] | 10
|
2021-02-23T01:32:48.000Z
|
2021-11-16T06:02:26.000Z
|
secure_ml/attack/model_inversion.py
|
Koukyosyumei/secure_ml
|
9da24f4ce4782ec2f6dd63b0437f657a0e190e40
|
[
"MIT"
] | 2
|
2021-05-16T08:38:19.000Z
|
2021-06-20T09:01:45.000Z
|
secure_ml/attack/model_inversion.py
|
Koukyosyumei/secure_ml
|
9da24f4ce4782ec2f6dd63b0437f657a0e190e40
|
[
"MIT"
] | 4
|
2021-02-25T04:33:06.000Z
|
2021-08-17T05:43:47.000Z
|
import torch
from ..attack.base_attack import BaseAttacker
class Model_inversion(BaseAttacker):
def __init__(self, target_model, input_shape):
"""implementation of model inversion attack
reference https://dl.acm.org/doi/pdf/10.1145/2810103.2813677
Args:
target_model: model of the victim
input_shape: input shapes of taregt model
Attributes:
target_model: model of the victim
input_shape: input shapes of taregt model
"""
super().__init__(target_model)
self.input_shape = input_shape
def attack(self, target_label,
lam, num_itr, process_func=lambda x: x):
"""Execute the model inversion attack on the target model.
Args:
target_label (int): taregt label
lam (float) : step size
num_itr (int) : number of iteration
process_func (function) : default is identity function
Returns:
x_numpy (np.array) :
loss ([float]) :
"""
log = []
x = torch.zeros(self.input_shape, requires_grad=True)
for i in range(num_itr):
c = process_func(1 - self.target_model(x)[:, [target_label]])
c.backward()
grad = x.grad
with torch.no_grad():
x -= lam * grad
log.append(c.item())
x_numpy = x.to('cpu').detach().numpy().copy()
return x_numpy, log
| 30.854167
| 73
| 0.574612
| 178
| 1,481
| 4.58427
| 0.455056
| 0.080882
| 0.055147
| 0.044118
| 0.14951
| 0.14951
| 0.14951
| 0.14951
| 0.14951
| 0.14951
| 0
| 0.021191
| 0.330858
| 1,481
| 47
| 74
| 31.510638
| 0.80222
| 0.388251
| 0
| 0
| 0
| 0
| 0.003963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6eb49308c616d061678bacc901cf34896446490d
| 448
|
py
|
Python
|
tests/integration_tests/data/service_utils_integration_test/main_service.py
|
ZacharyATanenbaum/service_framework
|
b5dde4407998350d1b7ad09284110b986fd4e12a
|
[
"MIT"
] | 1
|
2020-03-20T21:33:56.000Z
|
2020-03-20T21:33:56.000Z
|
tests/integration_tests/data/service_utils_integration_test/main_service.py
|
ZacharyATanenbaum/service_framework
|
b5dde4407998350d1b7ad09284110b986fd4e12a
|
[
"MIT"
] | 1
|
2020-03-22T03:48:45.000Z
|
2020-03-22T03:48:45.000Z
|
tests/integration_tests/data/service_utils_integration_test/main_service.py
|
ZacharyATanenbaum/service_framework
|
b5dde4407998350d1b7ad09284110b986fd4e12a
|
[
"MIT"
] | null | null | null |
""" Basic service for testing the service_utils run_main """
def main(to_send, config):
print('Hello World Main...')
connection_models = {
'out': {
'out_connection_1': {
'connection_type': 'requester',
'required_arguments': {
'this_is_a_test_arg': str,
},
'required_return_arguments': {
'this_is_a_return_arg': str,
},
}
}
}
| 22.4
| 60
| 0.517857
| 44
| 448
| 4.863636
| 0.681818
| 0.121495
| 0.140187
| 0.149533
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003472
| 0.357143
| 448
| 19
| 61
| 23.578947
| 0.739583
| 0.116071
| 0
| 0
| 0
| 0
| 0.368557
| 0.064433
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.066667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6eba2b9c96279d6bf89fd584cbe501fd83d1e0c7
| 2,708
|
py
|
Python
|
wtdb_test.py
|
hughgrigg/wtdb
|
6614053cbbc0086d5470ee7e9f2d6a68fdb9ed94
|
[
"MIT"
] | null | null | null |
wtdb_test.py
|
hughgrigg/wtdb
|
6614053cbbc0086d5470ee7e9f2d6a68fdb9ed94
|
[
"MIT"
] | null | null | null |
wtdb_test.py
|
hughgrigg/wtdb
|
6614053cbbc0086d5470ee7e9f2d6a68fdb9ed94
|
[
"MIT"
] | null | null | null |
import wtdb
import unittest
class TestWtdbFunctions(unittest.TestCase):
def test_n_swaps_zero(self):
self.assertEqual(
frozenset(),
wtdb.n_swaps('foo', 'bar', 0),
)
def test_n_swaps_single(self):
self.assertSequenceEqual(
{
frozenset({'bar', 'foo'}), frozenset({'boo', 'far'}),
frozenset({'oo', 'fbar'}), frozenset({'bfoo', 'ar'}),
},
wtdb.n_swaps('foo', 'bar', 1),
)
def test_n_swaps_one_double(self):
self.assertSequenceEqual(
{
frozenset({'strain', 'team'}), frozenset({'train', 'steam'}),
frozenset({'srain', 'tteam'}), frozenset({'trsteam', 'ain'}),
frozenset({'stain', 'tream'}), frozenset({'tsteam', 'rain'}),
frozenset({'sttrain', 'eam'}), frozenset({'sain', 'trteam'}),
},
wtdb.n_swaps('steam', 'train', 2),
)
def test_order_pair(self):
self.assertSequenceEqual(
('national', 'rail'),
wtdb.order_pair(('rail', 'national'))
)
def test_order_pair_same_length(self):
self.assertSequenceEqual(
('steam', 'train'),
wtdb.order_pair(('train', 'steam'))
)
class TestWordSet(unittest.TestCase):
def test_find_swaps_none(self):
word_set = wtdb.WordSet()
word_set.add('foo')
word_set.add('bar')
self.assertListEqual([], list(word_set.find_swaps('hello')))
def test_find_swaps_single_letter(self):
word_set = wtdb.WordSet()
word_set.add('national')
word_set.add('rail')
word_set.add('rational')
self.assertListEqual(
[
(('national', 'rail'), ('rational', 'nail')),
],
sorted(word_set.find_swaps('nail')),
)
def test_find_swaps_double_letter(self):
word_set = wtdb.WordSet()
word_set.add('steam')
word_set.add('train')
word_set.add('team')
self.assertListEqual(
[
(('steam', 'train'), ('strain', 'team')),
],
sorted(word_set.find_swaps('strain')),
)
def test_validate_ok(self):
word_set = wtdb.WordSet()
word_set.add('foo')
word_set.add('bar')
self.assertTrue(word_set.validate('foo', 'bar'))
def test_validate_bad(self):
word_set = wtdb.WordSet()
word_set.add('foo')
word_set.add('bar')
self.assertFalse(word_set.validate('foo', 'bar', 'foobar'))
if __name__ == '__main__':
import doctest
doctest.testmod(wtdb)
unittest.main()
| 28.505263
| 77
| 0.531388
| 277
| 2,708
| 4.945848
| 0.274368
| 0.112409
| 0.087591
| 0.054745
| 0.255474
| 0.169343
| 0.169343
| 0.169343
| 0.169343
| 0.113869
| 0
| 0.001596
| 0.305761
| 2,708
| 94
| 78
| 28.808511
| 0.727128
| 0
| 0
| 0.24359
| 0
| 0
| 0.113368
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 1
| 0.128205
| false
| 0
| 0.038462
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ebb9230b1ec2e150157978f3bf6129f5b2db4e9
| 5,423
|
py
|
Python
|
envisionpy/processor_network/BandstructureNetworkHandler.py
|
Vevn/ENVISIoN
|
d0e48a5ec38ed95375f632eafdc5814415f0f570
|
[
"BSD-2-Clause"
] | null | null | null |
envisionpy/processor_network/BandstructureNetworkHandler.py
|
Vevn/ENVISIoN
|
d0e48a5ec38ed95375f632eafdc5814415f0f570
|
[
"BSD-2-Clause"
] | null | null | null |
envisionpy/processor_network/BandstructureNetworkHandler.py
|
Vevn/ENVISIoN
|
d0e48a5ec38ed95375f632eafdc5814415f0f570
|
[
"BSD-2-Clause"
] | 1
|
2020-05-15T14:52:19.000Z
|
2020-05-15T14:52:19.000Z
|
# ENVISIoN
#
# Copyright (c) 2019 Jesper Ericsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################################
# TODO: add hdf5 validation
import sys,os,inspect
import inviwopy
import numpy as np
import h5py
from .LinePlotNetworkHandler import LinePlotNetworkHandler
class BandstructureNetworkHandler(LinePlotNetworkHandler):
""" Handler class for charge visualization network.
Sets up and manages the charge visualization
"""
def __init__(self, hdf5_path, inviwoApp):
LinePlotNetworkHandler.__init__(self, inviwoApp)
self.setup_bandstructure_network(hdf5_path)
def get_ui_data(self):
# Return data required to fill user interface
return [
"bandstructure",
LinePlotNetworkHandler.get_ui_data(self)
]
# ------------------------------------------
# ------- Network building functions -------
def setup_bandstructure_network(self, hdf5_path, xpos=0, ypos=0):
with h5py.File(hdf5_path,"r") as h5:
# A bool that tells if the band structure should be normalized around the fermi energy.
has_fermi_energy = "/FermiEnergy" in h5
# Start building the Inviwo network.
h5source = self.add_h5source(hdf5_path, xpos, ypos)
ypos += 75
path_selection = self.add_processor("org.inviwo.hdf5.PathSelection", "Select Bandstructure", xpos, ypos)
self.network.addConnection(h5source.getOutport("outport"),
path_selection.getInport("inport"))
# if has_fermi_energy:
# fermi_point = self.add_processor("org.inviwo.HDF5ToPoint", "Fermi energy", xpos + 175, ypos)
# self.network.addConnection(h5source.getOutport("outport"),
# fermi_point.getInport("hdf5HandleFlatMultiInport"))
ypos += 75
all_children_processor = self.add_processor("org.inviwo.HDF5PathSelectionAllChildren", "Select all bands", xpos, ypos)
self.network.addConnection(path_selection.getOutport("outport"),
all_children_processor.getInport("hdf5HandleInport"))
ypos += 75
HDF5_to_function = self.add_processor("org.inviwo.HDF5ToFunction", "Convert to function", xpos, ypos)
self.network.addConnection(all_children_processor.getOutport("hdf5HandleVectorOutport"),
HDF5_to_function.getInport("hdf5HandleFlatMultiInport"))
ypos += 75
function_to_dataframe = self.get_processor("Function to dataframe")
self.network.addConnection(HDF5_to_function.getOutport("functionVectorOutport"),
function_to_dataframe.getInport("functionFlatMultiInport"))
# if has_fermi_energy:
# self.network.addConnection(fermi_point.getOutport("pointVectorOutport"),
# self.get_processor("Line plot").getInport("pointInport"))
if has_fermi_energy:
self.set_title("Energy - Fermi energy [eV]")
else:
self.set_title("Energy [eV]")
# energy_text_processor.font.fontSize.value = 20
# energy_text_processor.position.value = inviwopy.glm.vec2(0.31, 0.93)
# energy_text_processor.color.value = inviwopy.glm.vec4(0,0,0,1)
# Start modifying properties.
path_selection.selection.value = '/Bandstructure/Bands'
# HDF5_to_function.yPathSelectionProperty.value = '/Energy'
# self.toggle_all_y(True)
self.set_y_selection_type(2)
# background_processor.bgColor1.value = inviwopy.glm.vec4(1)
# background_processor.bgColor2.value = inviwopy.glm.vec4(1)
# canvas_processor.inputSize.dimensions.value = inviwopy.glm.ivec2(900, 700)
# if has_fermi_energy:
# fermi_point.pathSelectionProperty.value = '/FermiEnergy'
| 47.570175
| 130
| 0.650009
| 582
| 5,423
| 5.919244
| 0.398625
| 0.025544
| 0.0418
| 0.022061
| 0.171843
| 0.085341
| 0.070247
| 0.039478
| 0.039478
| 0.039478
| 0
| 0.017879
| 0.247096
| 5,423
| 113
| 131
| 47.99115
| 0.825863
| 0.495667
| 0
| 0.1
| 0
| 0
| 0.148076
| 0.071901
| 0
| 0
| 0
| 0.00885
| 0
| 1
| 0.075
| false
| 0
| 0.125
| 0.025
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ebdf946af2b4bf33b3b815af562307b2b1a73fd
| 26,442
|
py
|
Python
|
smt/decoder/stackdecoder.py
|
kenkov/smt
|
db0a9fff15876442f1895b3ef730e91f7c84ad9b
|
[
"MIT"
] | 83
|
2015-01-12T14:40:08.000Z
|
2022-01-07T09:41:09.000Z
|
smt/decoder/stackdecoder.py
|
HimmelStein/smt
|
db0a9fff15876442f1895b3ef730e91f7c84ad9b
|
[
"MIT"
] | 1
|
2016-12-08T21:22:23.000Z
|
2016-12-08T21:22:23.000Z
|
smt/decoder/stackdecoder.py
|
HimmelStein/smt
|
db0a9fff15876442f1895b3ef730e91f7c84ad9b
|
[
"MIT"
] | 38
|
2015-04-08T04:39:13.000Z
|
2021-11-14T13:16:19.000Z
|
#! /usr/bin/env python
# coding:utf-8
from __future__ import division, print_function
import math
# sqlalchemy
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, TEXT, REAL, INTEGER
from sqlalchemy.orm import sessionmaker
from smt.db.tables import Tables
#from pprint import pprint
# prepare classes for sqlalchemy
class Phrase(declarative_base()):
__tablename__ = "phrase"
id = Column(INTEGER, primary_key=True)
lang1p = Column(TEXT)
lang2p = Column(TEXT)
class TransPhraseProb(declarative_base()):
__tablename__ = "phraseprob"
id = Column(INTEGER, primary_key=True)
lang1p = Column(TEXT)
lang2p = Column(TEXT)
p1_2 = Column(REAL)
p2_1 = Column(REAL)
def phrase_prob(lang1p, lang2p,
transfrom=2,
transto=1,
db="sqlite:///:memory:",
init_val=1.0e-10):
"""
"""
engine = create_engine(db)
Session = sessionmaker(bind=engine)
session = Session()
# search
query = session.query(TransPhraseProb).filter_by(lang1p=lang1p,
lang2p=lang2p)
if transfrom == 2 and transto == 1:
try:
# Be Careful! The order of conditional prob is reversed
# as transfrom and transto because of bayes rule
return query.one().p2_1
except sqlalchemy.orm.exc.NoResultFound:
return init_val
elif transfrom == 1 and transto == 2:
try:
return query.one().p1_2
except sqlalchemy.orm.exc.NoResultFound:
return init_val
def available_phrases(inputs, transfrom=2, transto=1, db="sqlite:///:memory:"):
"""
>>> decode.available_phrases(u"He is a teacher.".split(),
db_name="sqlite:///:db:"))
set([((1, u'He'),),
((1, u'He'), (2, u'is')),
((2, u'is'),),
((2, u'is'), (3, u'a')),
((3, u'a'),),
((4, u'teacher.'),)])
"""
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
available = set()
for i, f in enumerate(inputs):
f_rest = ()
for fr in inputs[i:]:
f_rest += (fr,)
rest_phrase = u" ".join(f_rest)
if transfrom == 2 and transto == 1:
query = session.query(Phrase).filter_by(lang2p=rest_phrase)
elif transfrom == 1 and transto == 2:
query = session.query(Phrase).filter_by(lang1p=rest_phrase)
lst = list(query)
if lst:
available.add(tuple(enumerate(f_rest, i+1)))
return available
class HypothesisBase(object):
def __init__(self,
db,
totalnumber,
sentences,
ngram,
ngram_words,
inputps_with_index,
outputps,
transfrom,
transto,
covered,
remained,
start,
end,
prev_start,
prev_end,
remain_phrases,
prob,
prob_with_cost,
prev_hypo,
cost_dict
):
self._db = db
self._totalnumber = totalnumber
self._sentences = sentences
self._ngram = ngram
self._ngram_words = ngram_words
self._inputps_with_index = inputps_with_index
self._outputps = outputps
self._transfrom = transfrom
self._transto = transto
self._covered = covered
self._remained = remained
self._start = start
self._end = end
self._prev_start = prev_start
self._prev_end = prev_end
self._remain_phrases = remain_phrases
self._prob = prob
self._prob_with_cost = prob_with_cost
self._prev_hypo = prev_hypo
self._cost_dict = cost_dict
self._output_sentences = outputps
@property
def db(self):
return self._db
@property
def totalnumber(self):
return self._totalnumber
@property
def sentences(self):
return self._sentences
@property
def ngram(self):
return self._ngram
@property
def ngram_words(self):
return self._ngram_words
@property
def inputps_with_index(self):
return self._inputps_with_index
@property
def outputps(self):
return self._outputps
@property
def transfrom(self):
return self._transfrom
@property
def transto(self):
return self._transto
@property
def covered(self):
return self._covered
@property
def remained(self):
return self._remained
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def prev_start(self):
return self._prev_start
@property
def prev_end(self):
return self._prev_end
@property
def remain_phrases(self):
return self._remain_phrases
@property
def prob(self):
return self._prob
@property
def prob_with_cost(self):
return self._prob_with_cost
@property
def prev_hypo(self):
return self._prev_hypo
@property
def cost_dict(self):
return self._cost_dict
@property
def output_sentences(self):
return self._output_sentences
def __unicode__(self):
d = [("db", self._db),
("sentences", self._sentences),
("inputps_with_index", self._inputps_with_index),
("outputps", self._outputps),
("ngram", self._ngram),
("ngram_words", self._ngram_words),
("transfrom", self._transfrom),
("transto", self._transto),
("covered", self._covered),
("remained", self._remained),
("start", self._start),
("end", self._end),
("prev_start", self._prev_start),
("prev_end", self._prev_end),
("remain_phrases", self._remain_phrases),
("prob", self._prob),
("prob_with_cost", self._prob_with_cost),
#("cost_dict", self._cost_dict),
#("prev_hypo", ""),
]
return u"Hypothesis Object\n" +\
u"\n".join([u" " + k + u": " +
unicode(v) for (k, v) in d])
def __str__(self):
return unicode(self).encode('utf-8')
def __hash__(self):
return hash(unicode(self))
class Hypothesis(HypothesisBase):
"""
Realize like the following class
>>> args = {"sentences": sentences,
... "inputps_with_index": phrase,
... "outputps": outputps,
... "covered": hyp0.covered.union(set(phrase)),
... "remained": hyp0.remained.difference(set(phrase)),
... "start": phrase[0][0],
... "end": phrase[-1][0],
... "prev_start": hyp0.start,
... "prev_end": hyp0.end,
... "remain_phrases": remain_phrases(phrase,
... hyp0.remain_phrases),
... "prev_hypo": hyp0
... }
>>> hyp1 = decode.HypothesisBase(**args)
"""
def __init__(self,
prev_hypo,
inputps_with_index,
outputps,
):
start = inputps_with_index[0][0]
end = inputps_with_index[-1][0]
prev_start = prev_hypo.start
prev_end = prev_hypo.end
args = {"db": prev_hypo.db,
"totalnumber": prev_hypo.totalnumber,
"prev_hypo": prev_hypo,
"sentences": prev_hypo.sentences,
"ngram": prev_hypo.ngram,
# set later
"ngram_words": prev_hypo.ngram_words,
"inputps_with_index": inputps_with_index,
"outputps": outputps,
"transfrom": prev_hypo.transfrom,
"transto": prev_hypo.transto,
"covered": prev_hypo.covered.union(set(inputps_with_index)),
"remained": prev_hypo.remained.difference(
set(inputps_with_index)),
"start": start,
"end": end,
"prev_start": prev_start,
"prev_end": prev_end,
"remain_phrases": self._calc_remain_phrases(
inputps_with_index,
prev_hypo.remain_phrases),
"cost_dict": prev_hypo.cost_dict,
# set later
"prob": 0,
"prob_with_cost": 0,
}
HypothesisBase.__init__(self, **args)
# set ngram words
self._ngram_words = self._set_ngram_words()
# set the exact probability
self._prob = self._cal_prob(start - prev_end)
# set the exact probability with cost
self._prob_with_cost = self._cal_prob_with_cost(start - prev_end)
# set the output phrases
self._output_sentences = prev_hypo.output_sentences + outputps
def _set_ngram_words(self):
lst = self._prev_hypo.ngram_words + list(self._outputps)
o_len = len(self._outputps)
return list(reversed(list(reversed(lst))[:o_len - 1 + self._ngram]))
def _cal_phrase_prob(self):
inputp = u" ".join(zip(*self._inputps_with_index)[1])
outputp = u" ".join(self._outputps)
if self._transfrom == 2 and self._transto == 1:
return phrase_prob(lang1p=outputp,
lang2p=inputp,
transfrom=self._transfrom,
transto=self._transto,
db=self._db,
init_val=-100)
elif self._transfrom == 1 and self._transto == 2:
return phrase_prob(lang1p=inputp,
lang2p=outputp,
transfrom=self._transfrom,
transto=self._transto,
db=self._db,
init_val=-100)
else:
raise Exception("specify transfrom and transto")
def _cal_language_prob(self):
nw = self.ngram_words
triwords = zip(nw, nw[1:], nw[2:])
prob = 0
for first, second, third in triwords:
prob += language_model(first, second, third, self._totalnumber,
transto=self._transto,
db=self._db)
return prob
def _cal_prob(self, dist):
val = self._prev_hypo.prob +\
self._reordering_model(0.1, dist) +\
self._cal_phrase_prob() +\
self._cal_language_prob()
return val
def _sub_cal_prob_with_cost(self, s_len, cvd):
insert_flag = False
lst = []
sub_lst = []
for i in range(1, s_len+1):
if i not in cvd:
insert_flag = True
else:
insert_flag = False
if sub_lst:
lst.append(sub_lst)
sub_lst = []
if insert_flag:
sub_lst.append(i)
else:
if sub_lst:
lst.append(sub_lst)
return lst
def _cal_prob_with_cost(self, dist):
s_len = len(self._sentences)
cvd = set(i for i, val in self._covered)
lst = self._sub_cal_prob_with_cost(s_len, cvd)
prob = self._cal_prob(dist)
prob_with_cost = prob
for item in lst:
start = item[0]
end = item[-1]
cost = self._cost_dict[(start, end)]
prob_with_cost += cost
return prob_with_cost
def _reordering_model(self, alpha, dist):
return math.log(math.pow(alpha, math.fabs(dist)))
def _calc_remain_phrases(self, phrase, phrases):
"""
>>> res = remain_phrases(((2, u'is'),),
set([((1, u'he'),),
((2, u'is'),),
((3, u'a'),),
((2, u'is'),
(3, u'a')),
((4, u'teacher'),)]))
set([((1, u'he'),), ((3, u'a'),), ((4, u'teacher'),)])
>>> res = remain_phrases(((2, u'is'), (3, u'a')),
set([((1, u'he'),),
((2, u'is'),),
((3, u'a'),),
((2, u'is'),
(3, u'a')),
((4, u'teacher'),)]))
set([((1, u'he'),), ((4, u'teacher'),)])
"""
s = set()
for ph in phrases:
for p in phrase:
if p in ph:
break
else:
s.add(ph)
return s
def create_empty_hypothesis(sentences, cost_dict,
ngram=3, transfrom=2, transto=1,
db="sqlite:///:memory:"):
phrases = available_phrases(sentences,
db=db)
hyp0 = HypothesisBase(sentences=sentences,
db=db,
totalnumber=_get_total_number(transto=transto,
db=db),
inputps_with_index=(),
outputps=[],
ngram=ngram,
ngram_words=["</s>", "<s>"]*ngram,
transfrom=transfrom,
transto=transto,
covered=set(),
start=0,
end=0,
prev_start=0,
prev_end=0,
remained=set(enumerate(sentences, 1)),
remain_phrases=phrases,
prev_hypo=None,
prob=0,
cost_dict=cost_dict,
prob_with_cost=0)
#print(_get_total_number(transto=transto, db=db))
return hyp0
class Stack(set):
def __init__(self, size=10,
histogram_pruning=True,
threshold_pruning=False):
set.__init__(self)
self._min_hyp = None
self._max_hyp = None
self._size = size
self._histogram_pruning = histogram_pruning
self._threshold_pruning = threshold_pruning
def add_hyp(self, hyp):
#prob = hyp.prob
# for the first time
if self == set([]):
self._min_hyp = hyp
self._max_hyp = hyp
else:
raise Exception("Don't use add_hyp for nonempty stack")
#else:
# if self._min_hyp.prob > prob:
# self._min_hyp = hyp
# if self._max_hyp.prob < prob:
# self._max_hyp = hyp
self.add(hyp)
def _get_min_hyp(self):
# set value which is more than 1
lst = list(self)
mn = lst[0]
for item in self:
if item.prob_with_cost < mn.prob_with_cost:
mn = item
return mn
def add_with_combine_prune(self, hyp):
prob_with_cost = hyp.prob_with_cost
if self == set([]):
self._min_hyp = hyp
self._max_hyp = hyp
else:
if self._min_hyp.prob_with_cost > prob_with_cost:
self._min_hyp = hyp
if self._max_hyp.prob_with_cost < prob_with_cost:
self._max_hyp = hyp
self.add(hyp)
# combine
for _hyp in self:
if hyp.ngram_words[:-1] == _hyp.ngram_words[:-1] and \
hyp.end == hyp.end:
if hyp.prob_with_cost > _hyp:
self.remove(_hyp)
self.add(hyp)
break
# histogram pruning
if self._histogram_pruning:
if len(self) > self._size:
self.remove(self._min_hyp)
self._min_hyp = self._get_min_hyp()
# threshold pruning
if self._threshold_pruning:
alpha = 1.0e-5
if hyp.prob_with_cost < self._max_hyp + math.log(alpha):
self.remove(hyp)
def _get_total_number(transto=1, db="sqlite:///:memory:"):
"""
return v
"""
Trigram = Tables().get_trigram_table('lang{}trigram'.format(transto))
# create connection in SQLAlchemy
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
# calculate total number
query = session.query(Trigram)
return len(list(query))
def language_model(first, second, third, totalnumber, transto=1,
db="sqlalchemy:///:memory:"):
class TrigramProb(declarative_base()):
__tablename__ = 'lang{}trigramprob'.format(transto)
id = Column(INTEGER, primary_key=True)
first = Column(TEXT)
second = Column(TEXT)
third = Column(TEXT)
prob = Column(REAL)
class TrigramProbWithoutLast(declarative_base()):
__tablename__ = 'lang{}trigramprob'.format(transto)
id = Column(INTEGER, primary_key=True)
first = Column(TEXT)
second = Column(TEXT)
prob = Column(REAL)
# create session
engine = create_engine(db)
Session = sessionmaker(bind=engine)
session = Session()
try:
# next line can raise error if the prob is not found
query = session.query(TrigramProb).filter_by(first=first,
second=second,
third=third)
item = query.one()
return item.prob
except sqlalchemy.orm.exc.NoResultFound:
query = session.query(TrigramProbWithoutLast
).filter_by(first=first,
second=second)
# I have to modify the database
item = query.first()
if item:
return item.prob
else:
return - math.log(totalnumber)
class ArgumentNotSatisfied(Exception):
pass
def _future_cost_estimate(sentences,
phrase_prob):
'''
warning:
pass the complete one_word_prob
'''
s_len = len(sentences)
cost = {}
one_word_prob = {(st, ed): prob for (st, ed), prob in phrase_prob.items()
if st == ed}
if set(one_word_prob.keys()) != set((x, x) for x in range(1, s_len+1)):
raise ArgumentNotSatisfied("phrase_prob doesn't satisfy the condition")
# add one word prob
for tpl, prob in one_word_prob.items():
index = tpl[0]
cost[(index, index)] = prob
for length in range(1, s_len+1):
for start in range(1, s_len-length+1):
end = start + length
try:
cost[(start, end)] = phrase_prob[(start, end)]
except KeyError:
cost[(start, end)] = -float('inf')
for i in range(start, end):
_val = cost[(start, i)] + cost[(i+1, end)]
if _val > cost[(start, end)]:
cost[(start, end)] = _val
return cost
def _create_estimate_dict(sentences,
phrase_prob,
init_val=-100):
one_word_prob_dict_nums = set(x for x, y in phrase_prob.keys() if x == y)
comp_dic = {}
# complete the one_word_prob
s_len = len(sentences)
for i in range(1, s_len+1):
if i not in one_word_prob_dict_nums:
comp_dic[(i, i)] = init_val
for key, val in phrase_prob.items():
comp_dic[key] = val
return comp_dic
def _get_total_number_for_fce(transto=1, db="sqlite:///:memory:"):
"""
return v
"""
# create connection in SQLAlchemy
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
tablename = 'lang{}unigram'.format(transto)
Unigram = Tables().get_unigram_table(tablename)
# calculate total number
query = session.query(Unigram)
sm = 0
totalnumber = 0
for item in query:
totalnumber += 1
sm += item.count
return {'totalnumber': totalnumber,
'sm': sm}
def _future_cost_langmodel(word,
tn,
transfrom=2,
transto=1,
alpha=0.00017,
db="sqlite:///:memory:"):
tablename = "lang{}unigramprob".format(transto)
# create session
engine = create_engine(db)
Session = sessionmaker(bind=engine)
session = Session()
UnigramProb = Tables().get_unigramprob_table(tablename)
query = session.query(UnigramProb).filter_by(first=word)
try:
item = query.one()
return item.prob
except sqlalchemy.orm.exc.NoResultFound:
sm = tn['sm']
totalnumber = tn['totalnumber']
return math.log(alpha) - math.log(sm + alpha*totalnumber)
def future_cost_estimate(sentences,
transfrom=2,
transto=1,
init_val=-100.0,
db="sqlite:///:memory:"):
# create phrase_prob table
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
phrases = available_phrases(sentences,
db=db)
tn = _get_total_number_for_fce(transto=transto, db=db)
covered = {}
for phrase in phrases:
phrase_str = u" ".join(zip(*phrase)[1])
if transfrom == 2 and transto == 1:
query = session.query(TransPhraseProb).filter_by(
lang2p=phrase_str).order_by(
sqlalchemy.desc(TransPhraseProb.p2_1))
elif transfrom == 1 and transto == 2:
query = session.query(TransPhraseProb).filter_by(
lang1p=phrase_str).order_by(
sqlalchemy.desc(TransPhraseProb.p1_2))
lst = list(query)
if lst:
# extract the maximum val
val = query.first()
start = zip(*phrase)[0][0]
end = zip(*phrase)[0][-1]
pos = (start, end)
if transfrom == 2 and transto == 1:
fcl = _future_cost_langmodel(word=val.lang1p.split()[0],
tn=tn,
transfrom=transfrom,
transto=transto,
alpha=0.00017,
db=db)
print(val.lang1p.split()[0], fcl)
covered[pos] = val.p2_1 + fcl
if transfrom == 1 and transto == 2:
covered[pos] = val.p1_2
# + language_model()
# estimate future costs
phrase_prob = _create_estimate_dict(sentences, covered)
print(phrase_prob)
return _future_cost_estimate(sentences,
phrase_prob)
def stack_decoder(sentence, transfrom=2, transto=1,
stacksize=10,
searchsize=10,
lang1method=lambda x: x,
lang2method=lambda x: x,
db="sqlite:///:memory:",
verbose=False):
# create phrase_prob table
engine = create_engine(db)
# create session
Session = sessionmaker(bind=engine)
session = Session()
if transfrom == 2 and transto == 1:
sentences = lang2method(sentence).split()
else:
sentences = lang1method(sentence).split()
# create stacks
len_sentences = len(sentences)
stacks = [Stack(size=stacksize,
histogram_pruning=True,
threshold_pruning=False,
) for i in range(len_sentences+1)]
cost_dict = future_cost_estimate(sentences,
transfrom=transfrom,
transto=transto,
db=db)
#create the initial hypothesis
hyp0 = create_empty_hypothesis(sentences=sentences,
cost_dict=cost_dict,
ngram=3,
transfrom=2,
transto=1,
db=db)
stacks[0].add_hyp(hyp0)
# main loop
for i, stack in enumerate(stacks):
for hyp in stack:
for phrase in hyp.remain_phrases:
phrase_str = u" ".join(zip(*phrase)[1])
if transfrom == 2 and transto == 1:
query = session.query(TransPhraseProb).filter_by(
lang2p=phrase_str).order_by(
sqlalchemy.desc(TransPhraseProb.p2_1))[:searchsize]
elif transfrom == 1 and transto == 2:
query = session.query(TransPhraseProb).filter_by(
lang1p=phrase_str).order_by(
sqlalchemy.desc(TransPhraseProb.p1_2))[:searchsize]
query = list(query)
for item in query:
if transfrom == 2 and transto == 1:
outputp = item.lang1p
elif transfrom == 1 and transto == 2:
outputp = item.lang2p
#print(u"calculating\n {0} = {1}\n in stack {2}".format(
# phrase, outputp, i))
if transfrom == 2 and transto == 1:
outputps = lang1method(outputp).split()
elif transfrom == 1 and transto == 2:
outputps = lang2method(outputp).split()
# place in stack
# and recombine with existing hypothesis if possible
new_hyp = Hypothesis(prev_hypo=hyp,
inputps_with_index=phrase,
outputps=outputps)
if verbose:
print(phrase, u' '.join(outputps))
print("loop: ", i, "len:", len(new_hyp.covered))
stacks[len(new_hyp.covered)].add_with_combine_prune(
new_hyp)
return stacks
if __name__ == '__main__':
#import doctest
#doctest.testmod()
pass
| 32.444172
| 79
| 0.514182
| 2,802
| 26,442
| 4.619914
| 0.104211
| 0.017304
| 0.025029
| 0.011124
| 0.337968
| 0.295558
| 0.214909
| 0.182928
| 0.171572
| 0.155504
| 0
| 0.014761
| 0.382535
| 26,442
| 814
| 80
| 32.484029
| 0.778098
| 0.095984
| 0
| 0.322259
| 0
| 0
| 0.03259
| 0.000935
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081395
| false
| 0.003322
| 0.013289
| 0.039867
| 0.227575
| 0.008306
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ebe31c36f436e8ae484085447aaa84df7e9db45
| 6,711
|
py
|
Python
|
amoebaelib/generate_histogram_plot.py
|
laelbarlow/amoebae
|
3c6607bcb64a60baee2f19f0a25e14b325e9725d
|
[
"Apache-2.0"
] | 8
|
2020-07-16T21:36:38.000Z
|
2021-11-28T08:32:05.000Z
|
amoebaelib/generate_histogram_plot.py
|
laelbarlow/amoebae
|
3c6607bcb64a60baee2f19f0a25e14b325e9725d
|
[
"Apache-2.0"
] | null | null | null |
amoebaelib/generate_histogram_plot.py
|
laelbarlow/amoebae
|
3c6607bcb64a60baee2f19f0a25e14b325e9725d
|
[
"Apache-2.0"
] | 1
|
2020-07-31T21:21:15.000Z
|
2020-07-31T21:21:15.000Z
|
#!/usr/bin/env python3
# Copyright 2018 Lael D. Barlow
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains functions for generating histograms.
"""
import sys
import os
import re
import pylab
import subprocess
import numpy as np
def generate_histogram(title,
values,
num_bins,
output_filename
):
"""Take a list of values, a number of bins, and an output file name, and
generate a histogram using pylab and write it to the file path.
"""
# Make histogram of scores.
pylab.hist(values, bins=num_bins) #specify the number of bins for the histogram
pylab.title(title)
pylab.xlabel("Value")
pylab.ylabel("Number of values in bin")
#pylab.show() #can do this instead of the savefig method if just want to view
pylab.savefig(output_filename) #works for pdf or png
pylab.close()
def generate_double_histogram(title,
values1,
label1,
values2,
label2,
num_bins,
output_filename
):
"""Take two lists of values, a number of bins, and an output file name, and
generate a histogram using pylab and write it to the file path.
"""
pylab.style.use('seaborn-deep')
# Make histogram of scores.
#pylab.hist(values1, bins=num_bins, label=label1) #specify the number of bins for the histogram
#pylab.hist(values2, bins=num_bins, label=label2) #specify the number of bins for the histogram
pylab.hist([values1, values2], bins=num_bins, label=[label1, label2]) #specify the number of bins for the histogram
pylab.title(title)
pylab.xlabel("Value")
pylab.ylabel("Number of values in bin")
pylab.legend(loc='upper right')
#pylab.show() #can do this instead of the savefig method if just want to view
pylab.savefig(output_filename) #works for pdf or png
pylab.close()
def autolabel_bars(rects, ax):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
def generate_bar_chart(title,
categories,
labels,
num_hits,
output_filename
):
"""Take data and use matplotlib to generate a bar chart and write to a
specified file path.
"""
## Simple bar chart.
#fig, ax = pylab.subplots()
#pylab.style.use('seaborn-deep')
#pylab.rcdefaults()
#fig, ax = pylab.subplots()
# Example data
#x_pos = np.arange(len(labels))
#ax.barh(y_pos, performance, xerr=error, align='center')
#ax.bar(x_pos, values, align='center')
#ax.set_xticks(x_pos)
#ax.set_xticklabels(labels)
#ax.set_ylabel('Positive hit count')
#ax.set_title(title)
#pylab.show()
#pylab.close()
pylab.style.use('seaborn-deep')
#categories = ['Prot', 'Nucl']
#labels = ['Non-redundant', 'Final positive']
#num_hits = [[35, 30],
# [12, 6]]
assert len(labels) == len(num_hits)
for sublist in num_hits:
assert len(sublist) == len(categories)
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = pylab.subplots()
rects_list = []
num = 0
for category, sublist in zip(categories, num_hits):
num += 1
if not (num % 2) == 0:
rects = ax.bar(x - width/2, sublist, width, label=category)
rects_list.append(rects)
else:
rects = ax.bar(x + width/2, sublist, width, label=category)
rects_list.append(rects)
# Add numbers to label individual bars.
for r in rects_list:
autolabel_bars(r, ax)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Number of sequences')
ax.set_title(title)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
fig.tight_layout()
#pylab.show()
pylab.savefig(output_filename) #works for pdf or png
pylab.close()
if __name__ == '__main__':
# Generate example plot.
## Define title for plot.
#title = "Histogram of random values in 30 bins"
## Define input data for example plot.
#mu, sigma = 0, 0.1 # mean and standard deviation
#s = np.random.normal(mu, sigma, 1000)
## Define output filepath.
#output_filepath = 'test_histogram_plot.pdf'
## Call function to generate plot.
#generate_histogram(title,
# s,
# 30,
# output_filepath
# )
## Open output file.
#subprocess.call(['open', output_filepath])
## Delete output file.
#os.remove(output_filepath)
#
#title = 'test bar chart'
#values = [20, 10]
#labels = ['prot', 'nucl']
#output_filename = 'test_bar_chart.pdf'
#generate_bar_chart(title,
# values,
# labels,
# output_filename
# )
## Open output file.
#subprocess.call(['open', output_filepath])
## Delete output file.
#os.remove(output_filepath)
# Test bar chart.
title = 'test bar chart'
categories = ['Prot', 'Nucl']
labels = ['Non-redundant', 'Final positive']
num_hits = [[35, 30],
[12, 6]]
output_filename = 'test_bar_chart.pdf'
generate_bar_chart(title,
categories,
labels,
num_hits,
output_filename
)
# Open output file.
subprocess.call(['open', output_filename])
# Delete output file.
os.remove(output_filename)
| 31.069444
| 119
| 0.58203
| 829
| 6,711
| 4.618818
| 0.289505
| 0.043876
| 0.018804
| 0.018804
| 0.462784
| 0.39906
| 0.375555
| 0.375555
| 0.375555
| 0.361974
| 0
| 0.01374
| 0.316793
| 6,711
| 215
| 120
| 31.213953
| 0.821374
| 0.477276
| 0
| 0.348315
| 0
| 0
| 0.064401
| 0
| 0
| 0
| 0
| 0
| 0.022472
| 1
| 0.044944
| false
| 0
| 0.067416
| 0
| 0.11236
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ebfe39e2bd770fe4ca5a9e9da4a3af5efa9f58a
| 12,838
|
py
|
Python
|
fast_segmentation/core/evaluate.py
|
eilonshi/tevel-segmentation
|
bf9168fafa181ff4eac1d1eba0b0f8a06f5daae1
|
[
"MIT"
] | null | null | null |
fast_segmentation/core/evaluate.py
|
eilonshi/tevel-segmentation
|
bf9168fafa181ff4eac1d1eba0b0f8a06f5daae1
|
[
"MIT"
] | null | null | null |
fast_segmentation/core/evaluate.py
|
eilonshi/tevel-segmentation
|
bf9168fafa181ff4eac1d1eba0b0f8a06f5daae1
|
[
"MIT"
] | null | null | null |
import os
import os.path as osp
import logging
import argparse
import math
import yaml
from tabulate import tabulate
from torch.utils.data import Dataset
from tqdm import tqdm
from typing import Tuple, List
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.distributed as dist
from fast_segmentation.core.utils import get_next_file_name, delete_directory_content
from fast_segmentation.model_components.architectures import model_factory
from fast_segmentation.model_components.data_cv2 import get_data_loader
from fast_segmentation.model_components.logger import setup_logger
from fast_segmentation.core.consts import IGNORE_LABEL, NUM_CLASSES, BAD_IOU
from fast_segmentation.visualization.visualize import save_labels_mask_with_legend
def parse_args():
"""
Creates the parser for evaluation arguments
Returns:
The parser
"""
parse = argparse.ArgumentParser()
parse.add_argument('--local_rank', dest='local_rank',
type=int, default=-1)
parse.add_argument('--weight-path', dest='weight_pth', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/models/8/best_model.pth')
parse.add_argument('--im_root', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data')
parse.add_argument('--val_im_anns', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/data/val.txt')
parse.add_argument('--false_analysis_path', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/data/false_analysis')
parse.add_argument('--log_path', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/logs/regular_logs')
parse.add_argument('--port', dest='port', type=int, default=44553, )
parse.add_argument('--model', dest='model', type=str, default='bisenetv2')
parse.add_argument('--config_path', type=str,
default='/home/bina/PycharmProjects/fast-segmentation/configs/main_cfg.yaml')
return parse.parse_args()
class MscEvalV0(object):
"""
"""
def __init__(self, scales=(1.,), flip=False, ignore_label=IGNORE_LABEL):
self.scales = scales
self.flip = flip
self.ignore_label = ignore_label
def __call__(self, net: nn.Module, data_loader, num_classes):
# evaluate
hist = torch.zeros(num_classes, num_classes).cuda().detach()
if dist.is_initialized() and dist.get_rank() != 0:
d_iter = enumerate(data_loader)
else:
d_iter = enumerate(tqdm(data_loader))
for i, (imgs, labels) in d_iter:
n, _, h, w = labels.shape
labels = labels.squeeze(1).cuda()
size = labels.size()[-2:]
probs = torch.zeros((n, num_classes, h, w), dtype=torch.float32).cuda().detach()
for scale in self.scales:
s_h, s_w = int(scale * h), int(scale * w)
im_sc = functional.interpolate(imgs, size=(s_h, s_w), mode='bilinear', align_corners=True)
im_sc = im_sc.cuda()
if self.flip:
im_sc = torch.flip(im_sc, dims=(3,))
logits = net(im_sc)[0]
if self.flip:
logits = torch.flip(logits, dims=(3,))
logits = functional.interpolate(logits, size=size, mode='bilinear', align_corners=True)
probs += torch.softmax(logits, dim=1)
# calc histogram of the predictions in each class
preds = torch.argmax(probs, dim=1)
relevant_labels = labels != self.ignore_label
hist += torch.bincount(labels[relevant_labels] * num_classes + preds[relevant_labels],
minlength=num_classes ** 2).view(num_classes, num_classes)
if dist.is_initialized():
dist.all_reduce(hist, dist.ReduceOp.SUM)
# diagonal is the intersection and the
ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag() + 1e-6)
ious[ious != ious] = 0 # replace nan with zero
miou = ious.mean()
return miou.item()
class MscEvalCrop(object):
def __init__(self, crop_size: Tuple[int, int], crop_stride: float, false_analysis_path: str, flip: bool = True,
scales: Tuple = (0.5, 0.75, 1, 1.25, 1.5, 1.75), label_ignore: int = IGNORE_LABEL):
self.scales = scales
self.ignore_label = label_ignore
self.flip = flip
self.distributed = dist.is_initialized()
self.crop_size = crop_size if isinstance(crop_size, (list, tuple)) else (crop_size, crop_size)
self.crop_stride = crop_stride
self.false_analysis_path = false_analysis_path
def pad_tensor(self, in_tensor: torch.Tensor):
n, c, h, w = in_tensor.size()
crop_h, crop_w = self.crop_size
if crop_h < h and crop_w < w:
return in_tensor, [0, h, 0, w]
pad_h, pad_w = max(crop_h, h), max(crop_w, w)
out_tensor = torch.zeros(n, c, pad_h, pad_w).cuda()
out_tensor.requires_grad_(False)
margin_h, margin_w = pad_h - h, pad_w - w
hst, hed = margin_h // 2, margin_h // 2 + h
wst, wed = margin_w // 2, margin_w // 2 + w
out_tensor[:, :, hst:hed, wst:wed] = in_tensor
return out_tensor, [hst, hed, wst, wed]
def eval_chip(self, net: nn.Module, crop: torch.Tensor):
prob = net(crop)[0].softmax(dim=1)
if self.flip:
crop = torch.flip(crop, dims=(3,))
prob += net(crop)[0].flip(dims=(3,)).softmax(dim=1)
prob = torch.exp(prob)
return prob
def crop_eval(self, net: nn.Module, im: torch.Tensor, n_classes: int):
crop_h, crop_w = self.crop_size
stride_rate = self.crop_stride
im, indices = self.pad_tensor(im)
n, c, h, w = im.size()
stride_h = math.ceil(crop_h * stride_rate)
stride_w = math.ceil(crop_w * stride_rate)
n_h = math.ceil((h - crop_h) / stride_h) + 1
n_w = math.ceil((w - crop_w) / stride_w) + 1
prob = torch.zeros(n, n_classes, h, w).cuda()
prob.requires_grad_(False)
for i in range(n_h):
for j in range(n_w):
st_h, st_w = stride_h * i, stride_w * j
end_h, end_w = min(h, st_h + crop_h), min(w, st_w + crop_w)
st_h, st_w = end_h - crop_h, end_w - crop_w
chip = im[:, :, st_h:end_h, st_w:end_w]
prob[:, :, st_h:end_h, st_w:end_w] += self.eval_chip(net, chip)
hst, hed, wst, wed = indices
prob = prob[:, :, hst:hed, wst:wed]
return prob
def scale_crop_eval(self, net: nn.Module, im: torch.Tensor, scale: Tuple, n_classes: int):
n, c, h, w = im.size()
new_hw = [int(h * scale), int(w * scale)]
im = functional.interpolate(im, new_hw, mode='bilinear', align_corners=True)
prob = self.crop_eval(net, im, n_classes)
prob = functional.interpolate(prob, (h, w), mode='bilinear', align_corners=True)
return prob
@torch.no_grad()
def __call__(self, net: nn.Module, dl: Dataset, n_classes: int):
data_loader = dl if self.distributed and not dist.get_rank() == 0 else tqdm(dl)
hist = torch.zeros(n_classes, n_classes).cuda().detach()
hist.requires_grad_(False)
for i, (images, labels) in enumerate(data_loader):
images = images.cuda()
labels = labels.squeeze(1).cuda()
n, h, w = labels.shape
probs = torch.zeros((n, n_classes, h, w)).cuda()
probs.requires_grad_(False)
for sc in self.scales:
probs += self.scale_crop_eval(net, images, sc, n_classes)
torch.cuda.empty_cache()
preds = torch.argmax(probs, dim=1)
keep = labels != self.ignore_label
cur_hist = torch.zeros(n_classes, n_classes).cuda().detach()
bin_count = torch.bincount(labels[keep] * n_classes + preds[keep], minlength=n_classes ** 2). \
view(n_classes, n_classes)
cur_hist += bin_count
cur_miou = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag())
cur_miou[cur_miou != cur_miou] = 0 # replace nan with zero
cur_miou = cur_miou.mean()
if cur_miou < BAD_IOU:
save_in_false_analysis(preds=preds, labels=labels, path=self.false_analysis_path)
hist += bin_count
if self.distributed:
dist.all_reduce(hist, dist.ReduceOp.SUM)
ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag())
ious[ious != ious] = 0 # replace nan with zero
miou = ious.mean()
return miou.item()
def save_in_false_analysis(preds: torch.Tensor, labels: torch.Tensor, path: str):
delete_directory_content(path)
for i, (pred, label) in enumerate(zip(preds, labels)):
pred = pred.detach().cpu().numpy()
label = label.detach().cpu().numpy()
label_path = get_next_file_name(root_dir=path, prefix='label', suffix='.jpg')
pred_path = get_next_file_name(root_dir=path, prefix='pred', suffix='.jpg')
save_labels_mask_with_legend(mask=pred, save_path=pred_path)
save_labels_mask_with_legend(mask=label, save_path=label_path)
@torch.no_grad()
def eval_model(net: nn.Module, ims_per_gpu: int, crop_size: Tuple[int, int], im_root: str, im_anns: str,
false_analysis_path: str) -> Tuple[List[str], List[float]]:
is_dist = dist.is_initialized()
dl = get_data_loader(data_path=im_root, ann_path=im_anns, ims_per_gpu=ims_per_gpu, crop_size=crop_size, mode='val',
distributed=is_dist)
net.eval()
heads, mious = [], []
logger = logging.getLogger()
single_scale = MscEvalV0((1.,), False)
miou = single_scale(net, dl, NUM_CLASSES)
heads.append('single_scale')
mious.append(miou)
logger.info('single mIOU is: %s\n', miou)
single_crop = MscEvalCrop(crop_size=crop_size, crop_stride=2. / 3, flip=False, scales=(1.,),
label_ignore=IGNORE_LABEL, false_analysis_path=false_analysis_path)
miou = single_crop(net, dl, NUM_CLASSES)
heads.append('single_scale_crop')
mious.append(miou)
logger.info('single scale crop mIOU is: %s\n', miou)
ms_flip = MscEvalV0((0.5, 0.75, 1, 1.25, 1.5, 1.75), True)
miou = ms_flip(net, dl, NUM_CLASSES)
heads.append('ms_flip')
mious.append(miou)
logger.info('ms flip mIOU is: %s\n', miou)
ms_flip_crop = MscEvalCrop(crop_size=crop_size, crop_stride=2. / 3, flip=True,
scales=(0.5, 0.75, 1.0, 1.25, 1.5, 1.75), label_ignore=IGNORE_LABEL,
false_analysis_path=false_analysis_path)
miou = ms_flip_crop(net, dl, NUM_CLASSES)
heads.append('ms_flip_crop')
mious.append(miou)
logger.info('ms crop mIOU is: %s\n', miou)
return heads, mious
def evaluate(ims_per_gpu: int, crop_size: Tuple[int, int], weight_pth: str, model_type: str, im_root: str,
val_im_anns: str, false_analysis_path: str):
logger = logging.getLogger()
# model
logger.info('setup and restore model')
net = model_factory[model_type](NUM_CLASSES)
net.load_state_dict(torch.load(weight_pth))
net.cuda()
is_dist = dist.is_initialized()
if is_dist:
local_rank = dist.get_rank()
net = nn.parallel.DistributedDataParallel(net, device_ids=[local_rank, ], output_device=local_rank)
# evaluator
heads, mious = eval_model(net=net, ims_per_gpu=ims_per_gpu, im_root=im_root, im_anns=val_im_anns,
false_analysis_path=false_analysis_path, crop_size=crop_size)
logger.info(tabulate([mious], headers=heads, tablefmt='orgtbl'))
if __name__ == "__main__":
args = parse_args()
with open(args.config_path) as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
if not args.local_rank == -1:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl',
init_method='tcp://127.0.0.1:{}'.format(args.port),
world_size=torch.cuda.device_count(),
rank=args.local_rank
)
if not osp.exists(args.log_path):
os.makedirs(args.log_path)
setup_logger('{}-eval'.format(args.model), args.log_path)
evaluate(ims_per_gpu=cfg['ims_per_gpu'], crop_size=cfg['crop_size'], weight_pth=args.weight_pth,
model_type=args.model, im_root=args.im_root, val_im_anns=args.val_im_anns,
false_analysis_path=args.false_analysis_path)
| 38.668675
| 119
| 0.621436
| 1,806
| 12,838
| 4.179402
| 0.150609
| 0.021198
| 0.033784
| 0.014308
| 0.352146
| 0.286963
| 0.205485
| 0.177663
| 0.139507
| 0.063328
| 0
| 0.01139
| 0.254557
| 12,838
| 331
| 120
| 38.785498
| 0.777325
| 0.019084
| 0
| 0.151261
| 0
| 0
| 0.062709
| 0.030837
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05042
| false
| 0
| 0.084034
| 0
| 0.180672
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ec38b237400c36dd79206a7de24521d924c26b5
| 940
|
py
|
Python
|
talent/urls.py
|
flannerykj/urbanapplause
|
c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5
|
[
"MIT"
] | null | null | null |
talent/urls.py
|
flannerykj/urbanapplause
|
c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5
|
[
"MIT"
] | null | null | null |
talent/urls.py
|
flannerykj/urbanapplause
|
c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
app_name = 'talent'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^musicians/$', views.MusicianIndex.as_view(), name='musicians'),
url(r'^musicians/(?P<pk>[0-9]+)/$', views.MusicianDetail.as_view(), name='musician-detail'),
url(r'^musicians/(?P<pk>\d+)/edit/$', views.UpdateMusician.as_view(), name='musician-edit'),
url(r'^musicians/create/$', views.MusicianCreate.as_view(), name='musician-create'),
url(r'^musicians/(?P<pk>\d+)/delete/$', views.DeleteTalent.as_view(), name='musician-delete'),
url(r'^artists/$', views.ArtistIndex.as_view(), name='artists'),
url(r'^artists/(?P<pk>[0-9]+)/$', views.ArtistDetail.as_view(), name='artist-detail'),
url(r'^artists/(?P<pk>\d+)/edit/$', views.UpdateArtist.as_view(), name='artist-edit'),
url(r'^artists/create/$', views.ArtistCreate.as_view(), name='artist-create'),
]
| 55.294118
| 98
| 0.659574
| 131
| 940
| 4.648855
| 0.290076
| 0.065681
| 0.164204
| 0.118227
| 0.182266
| 0.055829
| 0
| 0
| 0
| 0
| 0
| 0.004684
| 0.091489
| 940
| 17
| 99
| 55.294118
| 0.708431
| 0
| 0
| 0
| 0
| 0
| 0.341126
| 0.147715
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ec4b74ab4db122d30b5b736d0e106eb22df1a59
| 11,282
|
py
|
Python
|
plazma.py
|
caos21/Grodi
|
3ae09f9283f3e1afdd641943e2244afc78511053
|
[
"Apache-2.0"
] | 2
|
2019-12-11T16:19:46.000Z
|
2020-08-19T20:14:18.000Z
|
plazma.py
|
caos21/Grodi
|
3ae09f9283f3e1afdd641943e2244afc78511053
|
[
"Apache-2.0"
] | null | null | null |
plazma.py
|
caos21/Grodi
|
3ae09f9283f3e1afdd641943e2244afc78511053
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Benjamin Santos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
""" This module contains the classes functions and helpers to compute
the plasma.
"""
__author__ = "Benjamin Santos"
__copyright__ = "Copyright 2019"
__credits__ = ["Benjamin Santos"]
__license__ = "Apache 2.0"
__version__ = "0.0.1"
__maintainer__ = "Benjamin Santos"
__email__ = "caos21@gmail.com"
__status__ = "Beta"
from collections import namedtuple
import numpy as np
import scipy.constants as const
import trazar as tzr
PI = const.pi
KE = 1.0/(4.0*PI*const.epsilon_0)
INVKE = 1.0/KE
KB = const.Boltzmann
QE = const.elementary_charge
ME = const.electron_mass
PlasmaSystem = namedtuple('System',
['length',
'radius',
'temperature',
'ion_temperature',
'pressure_torr',
'arsih4_ratio',
'armass',
'sih4mass',
'power',
'with_tunnel'])
def constant_rate(energy, avar, bvar, cvar):
""" Returns a constant rate a
"""
return avar*np.ones_like(energy)
def arrhenius_rate(energy, avar, bvar, cvar):
""" Returns the Arrhenius rate
"""
return avar * np.power(energy, cvar) * np.exp(-bvar/energy)
def a1expb_rate(energy, avar, bvar, cvar):
""" Returns a1expb rate
"""
return avar * (1.0 - np.exp(-bvar*energy))
class RateSpec:
""" Defines a rate
"""
def __init__(self, rate_function=None, avar=0.0, bvar=0.0, cvar=0.0, name=""):
self.rate_function = rate_function
self.avar = avar
self.bvar = bvar
self.cvar = cvar
self.name = name
def __call__(self, energy):
""" Returns the rate at mean electron energy value
"""
return self.rate_function(energy, self.avar, self.bvar, self.cvar)
class RatesMap:
""" Returns a dict of rates
"""
def __init__(self, rates_dict):
"""
"""
self.rates_dict = rates_dict
self.rates_map = dict()
def get_ratesmap(self):
""" Get the rates map
"""
for k, var in self.rates_dict.items():
if var[0] == "a1expb":
self.rates_map[k] = RateSpec(a1expb_rate, var[1], var[2], var[3], k)
if var[0] == "arrhenius":
self.rates_map[k] = RateSpec(arrhenius_rate, var[1], var[2], var[3], k)
if var[0] == "constant":
self.rates_map[k] = RateSpec(constant_rate, var[1], var[2], var[3], k)
return self.rates_map
def plot_rates(self, energy, savename="figx.eps"):
""" Plot the rates
"""
rates, labels = [], []
for k, var in self.rates_map.items():
rates.append(var(energy))
labels.append(var.name)
tzr.plot_plain(energy, rates, title="Rates",
axislabel=["Time (s)", r"Rate coefficient (m$^{3}$s$^{-1}$)"],
logx=False, logy=True, labels=labels,
ylim=[1e-18, 1e-12], savename=savename)
class PlasmaChem():
""" Plasma model
"""
def __init__(self, rates_map, plasmasystem):
self.rates_map = rates_map
self.plasmasystem = plasmasystem
self.electron_density = 1.0
self.nano_qdens = 0.0
self.nano_qdens_rate = 0.0
self.kbtg = KB * self.plasmasystem.temperature
self.ion_kbtg = KB * self.plasmasystem.ion_temperature
self.pressure = 133.32237 * self.plasmasystem.pressure_torr
self.reactor_volume = (self.plasmasystem.length*PI*self.plasmasystem.radius
*self.plasmasystem.radius)
self.reactor_area = self.plasmasystem.length*2.0*PI*self.plasmasystem.radius
self.ratio_av = self.reactor_area / self.reactor_volume
self.gas_dens = self.pressure / self.kbtg
self.nar = self.plasmasystem.arsih4_ratio * self.gas_dens
self.nsih4 = (1.0-self.plasmasystem.arsih4_ratio) * self.gas_dens
self.vth_ar = self.thermal_velocity(self.plasmasystem.armass)
self.vth_sih4 = self.thermal_velocity(self.plasmasystem.sih4mass)
self.flux_sih3 = self.flux_neutrals(self.plasmasystem.sih4mass)
self.flux_sih2 = self.flux_neutrals(self.plasmasystem.sih4mass)
self.flux_ar = self.flux_neutrals(self.plasmasystem.armass)
## From Lieberman pag 80 (117)
self.lambdai = 1. / (330 * self.plasmasystem.pressure_torr)
self.flux_arp = self.flux_ions(self.plasmasystem.armass, self.lambdai)
self.flux_sih3p = self.flux_ions(self.plasmasystem.sih4mass, 2.9e-3)
## peak voltage
self.vsheath = 0.25*100.0
self.density_sourcedrain = np.zeros(7)
self.past_plasmadensity = np.ones(7)
self.next_plasmadensity = np.zeros(7)
def thermal_velocity(self, mass):
""" computes the thermal velocity
"""
return np.sqrt(2.0*self.kbtg/mass)
def diffusion_neutrals(self, mass, lambdax=3.5*1e-3):
""" computes the diffusion coefficient for neutrals
"""
return self.kbtg*lambdax/(mass*self.thermal_velocity(mass))
def center2edge_neutrals(self, mass):
""" center to edge ratio for neutrals
"""
pfcn = (1.0 + (self.plasmasystem.length/2.0) * self.thermal_velocity(mass)
/ (4.0*self.diffusion_neutrals(mass)))
return 1.0/pfcn
def flux_neutrals(self, mass):
""" computes the neutral flux
"""
return 0.25 * self.center2edge_neutrals(mass) * self.thermal_velocity(mass)
def bohm_velocity(self, mass):
""" computes the Bohm velocity
"""
return np.sqrt(self.ion_kbtg/mass)
def center2edge_ions(self, lambdax):
""" center to edge ratio for ions
"""
pfcn = np.sqrt(3.0+(0.5*self.plasmasystem.length/lambdax))
return 1.0/pfcn
def flux_ions(self, mass, lambdax):
""" computes the ion flux
"""
return self.center2edge_ions(lambdax) * self.bohm_velocity(mass)
def ion_velocity(self, mass):
""" computes the ion velocity
"""
return np.sqrt(8.0*self.ion_kbtg/(PI*mass))
def get_system(self):
""" returns the system of equations
"""
return self.system
def system(self, time, nvector):
""" system of equations for the densities
"""
nel = nvector[0]
narp = nvector[1]
narm = nvector[2]
nsih3p = nvector[3]
nsih3 = nvector[4]
nsih2 = nvector[5]
neps = nvector[6]
energy = neps/nel
kel = self.rates_map["R1:kel"](energy)
kio = self.rates_map["R2:ki"](energy)
kex = self.rates_map["R3:kex"](energy)
kiarm = self.rates_map["R4:kiarm"](energy)
kelsih4 = self.rates_map["R5:kelsih4"](energy)
kdisih4 = self.rates_map["R6:kdisih4"](energy)
kdsih3 = self.rates_map["R7:kdsih3"](energy)
kdsih2 = self.rates_map["R8:kdsih2"](energy)
kisih3 = self.rates_map["R9:kisih3"](energy)
kv13 = self.rates_map["R10:kv13"](energy)
kv24 = self.rates_map["R11:kv24"](energy)
k12 = self.rates_map["R12:k12"](energy)
k13 = self.rates_map["R13:k13"](energy)
k14 = self.rates_map["R14:k14"](energy)
k15 = self.rates_map["R15:k15"](energy)
ekio = self.rates_map["R2:ki"].bvar
ekex = self.rates_map["R3:kex"].bvar
ekiarm = self.rates_map["R4:kiarm"].bvar
ekdisih4 = self.rates_map["R6:kdisih4"].bvar
ekdsih3 = self.rates_map["R7:kdsih3"].bvar
ekdsih2 = self.rates_map["R8:kdsih2"].bvar
ekisih3 = self.rates_map["R9:kisih3"].bvar
ekv13 = self.rates_map["R10:kv13"].bvar
ekv24 = self.rates_map["R11:kv24"].bvar
nar = self.nar
nsih4 = self.nsih4
flux_arp = self.flux_arp
flux_ar = self.flux_ar
flux_sih3p = self.flux_sih3p
flux_sih3 = self.flux_sih3
flux_sih2 = self.flux_sih2
ratio_av = self.ratio_av
sourcedrain = self.density_sourcedrain
with_tunnel = self.plasmasystem.with_tunnel
nsih3p = nel - narp - self.nano_qdens
dnel = (+kio*nar*nel
+ kiarm*nel*narm
+ kdisih4*nel*nsih4
+ kisih3*nel*nsih3
- flux_arp*ratio_av*narp
- flux_sih3p*ratio_av*nsih3p
- sourcedrain[0]*nel
+ with_tunnel*sourcedrain[4])
dnarp = (+kio*nar*nel
+ kiarm*nel*narm
- flux_arp*ratio_av*narp
- sourcedrain[1]*narp)
dnarm = (+ kex*nar*nel
- kiarm*narm*nel
- k12*narm*nsih4
- k13*narm*nsih4
- k14*narm*nsih3
- k15*narm*nsih2
- flux_ar*ratio_av*narm)
dnsih3p = (+ kdisih4*nel*nsih4
+ kisih3*nel*nsih3
- flux_sih3p*ratio_av*nsih3p)
dnsih3 = (+ kdsih3*nel*nsih4
- kisih3*nel*nsih3
+ k12*narm*nsih4
- k14*narm*nsih3
- flux_sih3*ratio_av*nsih3)
dnsih2 = (+ kdsih2*nel*nsih4
+ k13*narm*nsih4
+ k14*narm*nsih3
- k15*narm*nsih2
- flux_sih2*ratio_av*nsih2)
power = self.plasmasystem.power
reactor_volume = self.reactor_volume
vsheath = self.vsheath
armass = self.plasmasystem.armass
sih4mass = self.plasmasystem.sih4mass
dneps = (power/reactor_volume
- ekio*kio*nar*nel
- ekex*kex*nar*nel
- ekiarm*kiarm*narm*nel
- (5./3.)*self.bohm_velocity(armass)*ratio_av*neps
- QE*vsheath*self.bohm_velocity(armass)*ratio_av*nel
- (5./3.)*self.bohm_velocity(sih4mass)*ratio_av*neps
- QE*vsheath*self.bohm_velocity(sih4mass)*ratio_av*nel
- 3.0*(ME/armass)*kel*neps*nar
- 3.0*(ME/sih4mass)*kelsih4*neps*nsih4
- ekisih3*kisih3*nel*nsih3
- ekdisih4*kdisih4*nel*nsih4
- ekdsih3*kdsih3*nel*nsih4
- ekdsih2*kdsih2*nel*nsih4
- ekv13*kv13*nel*nsih4
- ekv24*kv24*nel*nsih4
- sourcedrain[6]*nel
+ with_tunnel*sourcedrain[5])
return np.nan_to_num([dnel, dnarp, dnarm, dnsih3p, dnsih3, dnsih2, dneps],
copy=False)
| 34.713846
| 87
| 0.576139
| 1,378
| 11,282
| 4.56894
| 0.214078
| 0.050032
| 0.060991
| 0.012071
| 0.286055
| 0.129924
| 0.074809
| 0.064327
| 0.021601
| 0.021601
| 0
| 0.043301
| 0.306063
| 11,282
| 324
| 88
| 34.820988
| 0.760889
| 0.123028
| 0
| 0.0553
| 0
| 0
| 0.047087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087558
| false
| 0
| 0.018433
| 0
| 0.18894
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ec843c15f7247a66d7807de9b1b0eb37d884ff5
| 3,598
|
py
|
Python
|
venvs/sitio_web/restaurantes/views.py
|
mmaguero/MII-SSBW16-17
|
25b6c340c63a2fbe8342b48ec7f730b68c58d1bc
|
[
"MIT"
] | 1
|
2017-04-22T11:02:38.000Z
|
2017-04-22T11:02:38.000Z
|
venvs/sitio_web/restaurantes/views.py
|
mmaguero/MII-SSBW16-17
|
25b6c340c63a2fbe8342b48ec7f730b68c58d1bc
|
[
"MIT"
] | 4
|
2017-06-17T16:10:45.000Z
|
2022-02-13T20:23:04.000Z
|
venvs/sitio_web/restaurantes/views.py
|
mmaguero/MII-SSBW16-17
|
25b6c340c63a2fbe8342b48ec7f730b68c58d1bc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.shortcuts import render, HttpResponse,redirect
from django.http import JsonResponse
from .forms import RestaurantesForm
from .models import restaurants, addr#, image
from django.contrib.auth.decorators import login_required
import logging
log = logging.getLogger(__name__)
# Create your views here.
def index(request):
log.info("INDEX - Hey there it works!!")
context = {
'menu': 'index'
}
#return HttpResponse('My Restaurants Manager')
return render(request,'index.html',context)
def test(request):
valor = 3
context = {
'variable': valor,
'resta': restaurants.objects[:5],
} # Aqui van la las variables para la plantilla
return render(request,'test.html', context)
@login_required
def listar(request):
log.info("LIST - Hey there it works!!")
context = {
'resta': restaurants.objects[:10],
'menu': 'list'
} # Aqui van la las variables para la plantilla
return render(request,'listar.html', context)
@login_required
def buscar(request):
log.info("SEARCH - Hey there it works!!")
cocina = request.GET.get('cocina')
lista=restaurants.objects(cuisine__icontains=cocina)
context = {
'resta': lista,
}
return render(request,'listar.html', context)
@login_required
def add(request):
log.info("ADD - Hey there it works!!")
formu = RestaurantesForm()
if request.method == "POST":
formu = RestaurantesForm(request.POST, request.FILES)
if formu.is_valid(): # valida o anhade errores
# datos sueltos
nombre = formu.cleaned_data['nombre']
cocina = formu.cleaned_data['cocina']
barrio = formu.cleaned_data['barrio']
calle = formu.cleaned_data['direccion']
imagen = request.FILES['imagen'] #formu.cleaned_data['imagen']
#tipo_foto = imagen.content_type
# tipo y nombre
direc = addr(street=calle)
#i = image(extension=tipo_foto, img=imagen)
r = restaurants(name=nombre, cuisine=cocina, borough=barrio, address=direc , image=imagen)
r.save()
# formu.save() # si está ligado al model
return redirect(index)
# GET o error
context = {
'form': formu,
'menu': 'add',
}
return render(request, 'form.html', context)
# @login_required
# def update(request):
# log.info("UPD - Hey there it works!!")
# name = request.GET.get('name')
# obj=restaurants.objects(name=name)
# context = {
# 'resta': obj,
# }
# return render(request,'formUpdate.html', context)
# url
@login_required
def restaurant(request, name):
log.info("DETAIL - Hey there it works!!")
resta=restaurants.objects(name=name)[0]
context = {
'resta': resta
}
return render(request, 'detalle.html', context)
# recuperar foto
@login_required
def imagen(request, name):
log.info("IMAGE - Hey there it works!!")
res = restaurants.objects(name=name)[0]
img = res.image.read()
return HttpResponse(img, content_type="image/" + res.image.format)
def r_ajax(request, name):
log.info("AJAX - Hey there it works!!")
resta = restaurants.objects(name=name)[0]
maps = '<iframe width="450" height="300" frameborder="0" style="border:0" src="https://maps.google.com/maps?q='+str(name) + ' ' + str(resta.address.street) + ' ' + str(resta.borough)+'&ie=UTF8&&output=embed" allowfullscreen></iframe>'
return JsonResponse({'map':maps}) # podría ser string o HTML
| 30.491525
| 248
| 0.636465
| 434
| 3,598
| 5.221198
| 0.345622
| 0.024713
| 0.035305
| 0.052957
| 0.182701
| 0.127538
| 0.127538
| 0.127538
| 0.127538
| 0.090026
| 0
| 0.006113
| 0.227071
| 3,598
| 117
| 249
| 30.752137
| 0.808702
| 0.196776
| 0
| 0.194805
| 0
| 0.012987
| 0.184836
| 0.020964
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103896
| false
| 0
| 0.077922
| 0
| 0.298701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ecb10199a77a73ed62e4b61289eba8a812c02c6
| 342
|
py
|
Python
|
products/urls.py
|
Tsatsubii/tsatsubii-helpdesk
|
baee05b4fd1aedfda8e4039c45f182f29e8db348
|
[
"MIT"
] | null | null | null |
products/urls.py
|
Tsatsubii/tsatsubii-helpdesk
|
baee05b4fd1aedfda8e4039c45f182f29e8db348
|
[
"MIT"
] | null | null | null |
products/urls.py
|
Tsatsubii/tsatsubii-helpdesk
|
baee05b4fd1aedfda8e4039c45f182f29e8db348
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import *
app_name = 'products'
urlpatterns = [
path('create', CreateProduct.as_view(), name='create'),
path('view/<int:pk>', ProductDetail.as_view(), name='detail'),
path('list', ProductList.as_view(), name='list'),
path('<int:pk>/update', ProductUpdate.as_view(), name='update'),
]
| 28.5
| 68
| 0.666667
| 44
| 342
| 5.068182
| 0.5
| 0.107623
| 0.179372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 342
| 11
| 69
| 31.090909
| 0.750842
| 0
| 0
| 0
| 0
| 0
| 0.19883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6eceb56b5b2b3973a3c6557e579442305e42471b
| 3,374
|
py
|
Python
|
asyncspotify/album.py
|
minibox24/asyncspotify
|
3767cf19cf598fb179883cffd878e2440c16a57c
|
[
"MIT"
] | 7
|
2020-06-16T21:24:42.000Z
|
2022-03-10T20:23:29.000Z
|
asyncspotify/album.py
|
minibox24/asyncspotify
|
3767cf19cf598fb179883cffd878e2440c16a57c
|
[
"MIT"
] | 13
|
2020-03-22T12:07:04.000Z
|
2021-08-15T19:06:57.000Z
|
asyncspotify/album.py
|
minibox24/asyncspotify
|
3767cf19cf598fb179883cffd878e2440c16a57c
|
[
"MIT"
] | 5
|
2020-03-22T18:21:55.000Z
|
2021-10-03T06:30:30.000Z
|
from datetime import datetime
from .mixins import ArtistMixin, ExternalIDMixin, ExternalURLMixin, ImageMixin, TrackMixin
from .object import SpotifyObject
from .track import SimpleTrack
class _BaseAlbum(SpotifyObject, TrackMixin, ImageMixin, ExternalURLMixin, ArtistMixin):
_type = 'album'
_track_class = SimpleTrack
__date_fmt = dict(year='%Y', month='%Y-%m', day='%Y-%m-%d')
def __init__(self, client, data):
super().__init__(client, data)
TrackMixin.__init__(self, data)
ImageMixin.__init__(self, data)
ExternalURLMixin.__init__(self, data)
ArtistMixin.__init__(self, data)
self.album_group = data.pop('album_group', None) # can be None, though this is not specified in the API docs
self.album_type = data.pop('album_type')
self.available_markets = data.pop('available_markets', None)
self.release_date_precision = data.pop('release_date_precision')
if self.release_date_precision is None:
self.release_date = None
else:
try:
self.release_date = datetime.strptime(
data.pop('release_date'),
self.__date_fmt[self.release_date_precision]
)
except ValueError:
self.release_date = None
class SimpleAlbum(_BaseAlbum):
'''
Represents an Album object.
.. note::
To iterate all tracks, you have to use the ``async for`` construct or fill the object with ``.fill()`` before iterating ``.tracks``.
id: str
Spotify ID of the album.
name: str
Name of the album.
tracks: List[:class:`Track`]
List of tracks on the album.
artists: List[:class:`Artist`]
List of artists that appear on the album.
images: List[:class:`Image`]
List of associated images, such as album cover in different sizes.
track_count: int
The expected track count as advertised by the last paging object. ``is_filled()`` can return True even if fewer tracks than this exists in ``tracks``, since some fetched tracks from the API can be None for various reasons.
uri: str
Spotify URI of the album.
link: str
Spotify URL of the album.
type: str
Plaintext string of object type: ``album``.
album_type:
Type of album, e.g. ``album``, ``single`` or ``compilation``.
available_markets: List[str] or None
Markets where the album is available: ISO-3166-1_.
external_urls: dict
Dictionary that maps type to url.
release_date: `datetime <https://docs.python.org/3/library/datetime.html#module-datetime>`_
Date (and maybe time) of album release.
release_date_precision: str
Precision of ``release_date``. Can be ``year``, ``month``, or ``day``.
album_group: str or None
Type of album, e.g. ``album``, ``single``, ``compilation`` or ``appears_on``.
'''
class FullAlbum(_BaseAlbum, ExternalIDMixin):
'''
Represents a complete Album object.
This type has some additional attributes not existent in :class:`SimpleAlbum`.
genres: List[str]
List of genres associated with the album.
label: str
The label for the album.
popularity: int
An indicator of the popularity of the album, 0 being least popular and 100 being the most.
copyrights: dict
List of copyright objects.
external_ids: dict
Dictionary of external IDs.
'''
def __init__(self, client, data):
super().__init__(client, data)
ExternalIDMixin.__init__(self, data)
self.genres = data.pop('genres')
self.label = data.pop('label')
self.popularity = data.pop('popularity')
self.copyrights = data.pop('copyrights')
| 31.830189
| 224
| 0.727623
| 477
| 3,374
| 4.979036
| 0.339623
| 0.050947
| 0.037895
| 0.030316
| 0.053895
| 0.053895
| 0.053895
| 0.033684
| 0.033684
| 0
| 0
| 0.003537
| 0.162122
| 3,374
| 105
| 225
| 32.133333
| 0.836576
| 0.595436
| 0
| 0.162162
| 0
| 0
| 0.085357
| 0.015267
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.108108
| 0
| 0.324324
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ed14769a9b989c5617ba6fd0d9b5db6ab25f970
| 2,948
|
py
|
Python
|
tools_box/_hr/doctype/stationaries_log/stationaries_log.py
|
maisonarmani/Tools_Box
|
4f8cc3a0deac1be50a3ac80758a10608faf58454
|
[
"MIT"
] | 4
|
2017-09-25T23:34:08.000Z
|
2020-07-17T23:52:26.000Z
|
tools_box/_hr/doctype/stationaries_log/stationaries_log.py
|
maisonarmani/Tools_Box
|
4f8cc3a0deac1be50a3ac80758a10608faf58454
|
[
"MIT"
] | null | null | null |
tools_box/_hr/doctype/stationaries_log/stationaries_log.py
|
maisonarmani/Tools_Box
|
4f8cc3a0deac1be50a3ac80758a10608faf58454
|
[
"MIT"
] | 5
|
2017-06-02T01:58:32.000Z
|
2022-02-22T16:59:01.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, masonarmani38@gmail.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class StationariesLog(Document):
def on_submit(self):
for item in self.items_issued:
_create_bin_card(item, self)
def _create_bin_card(item, doc):
import datetime
last_value = _last_bin_card_value(item)
if last_value[0] < 0:
frappe.throw("No more ")
new_bin_card = frappe.new_doc("Stationaries Bin Card")
new_bin_card.date = datetime.datetime.today()
new_bin_card.item = item.item_issued
new_bin_card.value = item.pqty
new_bin_card.current_value = last_value[0] + item.pqty
new_bin_card.last_value = last_value[2]
new_bin_card.reference_doctype = doc.doctype
new_bin_card.reference_docname = doc.name
new_bin_card.ppu = last_value[1]
new_bin_card.count = last_value[0] + item.pqty
less = new_bin_card.count
if less >= new_bin_card.ppu: # count and current has to change
unit = int(less / new_bin_card.ppu)
if (less / new_bin_card.ppu) > 1:
new_count = less % new_bin_card.ppu
else:
new_count = less - new_bin_card.ppu
# set new values
new_bin_card.count = new_count
new_bin_card.current_value = new_count
# set item values
item.qty = unit
item.ppu = new_bin_card.ppu
# remove value from stock
_remove_unit(item)
new_bin_card.submit()
def _remove_unit(item):
wh = "Stationaries - GCL"
se = frappe.new_doc("Stock Entry")
se.purpose = "Material Issue"
se.title = "Material Issue"
se.from_warehouse = wh
# using the latest cost center for item
last_cost_center = frappe.get_list(doctype="Stock Entry Detail",
filters={"item_code": item.item_issued}, fields=['cost_center'],
order_by='creation')
d_cost_center = ""
if last_cost_center[0].get('cost_center') != None:
d_cost_center = last_cost_center[0].cost_center
it = frappe.get_list(doctype="Item", filters={"name": item.item_issued},
fields=['stock_uom, item_name'])
# set new item
item = dict(
f_warehouse=wh,
t_warehouse="",
qty=item.qty,
item_code=item.item_issued,
item_name=it[0].item_name,
uom=it[0].stock_uom,
cost_center=d_cost_center
)
se.append('items', item)
se.submit()
def _last_bin_card_value(item):
last_value = frappe.db.sql("SELECT `count`, ppu, current_value FROM `tabStationaries Bin Card` where item = '{item}' "
"ORDER BY date DESC LIMIT 1".format(item=item.item_issued))
if len(last_value):
return last_value[0]
return [0, item.ppu, 0]
| 30.708333
| 123
| 0.640095
| 413
| 2,948
| 4.27845
| 0.276029
| 0.102999
| 0.113186
| 0.0515
| 0.162422
| 0.049802
| 0.028297
| 0
| 0
| 0
| 0
| 0.010082
| 0.259837
| 2,948
| 95
| 124
| 31.031579
| 0.799725
| 0.090909
| 0
| 0
| 0
| 0
| 0.109363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.060606
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ed320ed0b2ababd92ec43fa0249838c7f41091f
| 1,119
|
py
|
Python
|
build_scripts/yariv_shaders/yariv_to_hex.py
|
danilw/vulkan-shadertoy-launcher
|
8a8a00f2f32d5c4dc64b625a9bdfe4adcdc5c4ec
|
[
"MIT"
] | 37
|
2020-03-16T00:21:03.000Z
|
2022-03-04T23:30:30.000Z
|
build_scripts/yariv_shaders/yariv_to_hex.py
|
danilw/vulkan-shadertoy-launcher
|
8a8a00f2f32d5c4dc64b625a9bdfe4adcdc5c4ec
|
[
"MIT"
] | 1
|
2020-06-04T12:29:24.000Z
|
2021-03-14T21:31:55.000Z
|
example_game/shaders/yariv_to_hex.py
|
danilw/vulkan_shader_launcher
|
e41c5a9c0f35a72e12a5300f194e9faff83aa684
|
[
"MIT"
] | 2
|
2021-03-27T06:28:53.000Z
|
2021-09-05T20:29:36.000Z
|
import struct
import os
import sys
import subprocess
if len(sys.argv) != 2:
print('Usage: python %s filename \n output is *.spv *.yariv and *.hex file \n' % sys.argv[0])
quit()
inputfilepath = sys.argv[1]
outputname = os.path.basename(inputfilepath)
outdir = os.path.dirname(inputfilepath)
ginfile = os.path.basename(inputfilepath)
ooutdir = os.path.join(outdir,"bin");
spirvcompiler = 'glslangValidator'
if os.name == 'nt':
spirvcompiler += ".exe"
yariv_pack = './yariv_pack'
if os.name == 'nt':
spirvcompiler += ".exe"
if not os.path.isdir(ooutdir):
os.mkdir(ooutdir, 0o0755 );
subprocess.call([spirvcompiler,'-V100',inputfilepath,'-o',os.path.join(ooutdir,ginfile) + '.spv'])
subprocess.call([yariv_pack,os.path.join(ooutdir,ginfile) + '.spv'])
infile = open(os.path.join(ooutdir,ginfile) + '.yariv', 'rb')
outfilepath = os.path.join(ooutdir,outputname + '.hex')
outfile = open(outfilepath, 'w')
lineno = 1
while 1 :
b = infile.read(1)
if len(b) == 0 :
break
d, = struct.unpack('B', b)
outfile.write(hex(d) + ',')
if lineno % 20 == 0:
outfile.write('\n')
lineno = lineno + 1
| 23.808511
| 98
| 0.670241
| 158
| 1,119
| 4.727848
| 0.398734
| 0.072289
| 0.066934
| 0.091031
| 0.174029
| 0.141901
| 0
| 0
| 0
| 0
| 0
| 0.019916
| 0.147453
| 1,119
| 46
| 99
| 24.326087
| 0.763103
| 0
| 0
| 0.114286
| 0
| 0
| 0.12958
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.114286
| 0
| 0.114286
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ed529dee684fd60fac1d8d89d7b4a98c0265b6b
| 3,672
|
py
|
Python
|
gui/cli.py
|
HaoZeke/prest
|
eec6b34bde4e060f52a391662347918995ded245
|
[
"BSD-3-Clause"
] | null | null | null |
gui/cli.py
|
HaoZeke/prest
|
eec6b34bde4e060f52a391662347918995ded245
|
[
"BSD-3-Clause"
] | null | null | null |
gui/cli.py
|
HaoZeke/prest
|
eec6b34bde4e060f52a391662347918995ded245
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import sys
import logging
import argparse
import tqdm
import dataset.budgetary
from model import *
from test import MockWorker
from dataset import load_raw_csv
from gui.estimation import Options as EstimationOpts
from dataset.experimental_data import ExperimentalData
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class ProgressWorker:
def __init__(self):
self.bar = None
self.size = None
self.last_value = 0
def set_work_size(self, size : int) -> None:
self.size = size
self.bar = tqdm.tqdm(total=size)
def set_progress(self, value : int) -> None:
self.bar.update(value - self.last_value)
self.last_value = value
def budgetary_consistency(args):
ds = dataset.budgetary.load_from_csv(args.fname_in)
dsc = ds.analysis_consistency(ProgressWorker(), None)
variant = dsc._get_export_variant(args.export_variant)
dsc.export(args.fname_out, '*.csv', variant, ProgressWorker())
def estimate(args):
rows = load_raw_csv(args.fname_in)
ds = ExperimentalData.from_csv('dataset', rows[1:], (0, 1, None, 2))
AVAILABLE_MODELS = [
preorder(strict=True, total=True),
preorder(strict=False, total=True),
unattractive(strict=True, total=True),
unattractive(strict=False, total=True),
preorder(strict=True, total=False),
preorder(strict=False, total=False),
UndominatedChoice(strict=True),
UndominatedChoice(strict=False),
PartiallyDominantChoice(fc=True),
PartiallyDominantChoice(fc=False),
Overload(PreorderParams(strict=True, total=True)),
Overload(PreorderParams(strict=False, total=True)),
StatusQuoUndominatedChoice(),
TopTwo(),
SequentiallyRationalizableChoice(),
]
if not args.models:
print('Please specify a model using -m:')
for m in AVAILABLE_MODELS:
print(' ' + str(m))
sys.exit(1)
if args.models == 'all':
models = AVAILABLE_MODELS
else:
models = [
m
for m in AVAILABLE_MODELS
if str(m) in args.models
]
dsm = ds.analysis_estimation(ProgressWorker(), EstimationOpts(
models=models,
disable_parallelism=args.sequential,
))
variant = dsm._get_export_variant(args.export_variant)
dsm.export(args.fname_out, '*.csv', variant, MockWorker())
def main(args):
if args.action == 'estimate':
estimate(args)
elif args.action == 'budgetary':
budgetary_consistency(args)
else:
raise Exception(f'unknown action: {args.action}')
if __name__ == '__main__':
ap = argparse.ArgumentParser()
sub = ap.add_subparsers(dest='action', help='subcommands')
sub.required = True
apE = sub.add_parser('estimate', help='model estimation')
apE.add_argument('fname_in', metavar='input.csv')
apE.add_argument('fname_out', metavar='output.csv')
apE.add_argument('-e', dest='export_variant',
default='compact (human-friendly)',
help='export variant [%(default)s]',
)
apE.add_argument('-s', '--sequential', default=False, action='store_true', help='disable paralellism')
apE.add_argument('-m', dest='models', metavar='MODEL', nargs='+', help='model(s)')
apB = sub.add_parser('budgetary', help='budgetary consistency')
apB.add_argument('fname_in', metavar='input.csv')
apB.add_argument('fname_out', metavar='output.csv')
apB.add_argument('-e', dest='export_variant',
default='Summary',
help='export variant [%(default)s]',
)
main(ap.parse_args())
| 31.655172
| 106
| 0.659586
| 434
| 3,672
| 5.419355
| 0.299539
| 0.044218
| 0.029762
| 0.024235
| 0.180272
| 0.159014
| 0.088435
| 0
| 0
| 0
| 0
| 0.002419
| 0.211874
| 3,672
| 115
| 107
| 31.930435
| 0.810297
| 0.005719
| 0
| 0.042105
| 0
| 0
| 0.119178
| 0
| 0.010526
| 0
| 0
| 0
| 0
| 1
| 0.063158
| false
| 0
| 0.105263
| 0
| 0.178947
| 0.021053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ed8e2ed4b2704d5c1be4866ec0fcfee29634e45
| 1,932
|
py
|
Python
|
api.py
|
amagrabi/oeb-importer-proto
|
f93b1ac1834e10595c8d89e23cde1fadfc88d009
|
[
"Apache-2.0"
] | null | null | null |
api.py
|
amagrabi/oeb-importer-proto
|
f93b1ac1834e10595c8d89e23cde1fadfc88d009
|
[
"Apache-2.0"
] | null | null | null |
api.py
|
amagrabi/oeb-importer-proto
|
f93b1ac1834e10595c8d89e23cde1fadfc88d009
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions to make API calls.
@author: amagrabi
"""
import requests
def login(client_id, client_secret, project_key, scope, host = 'EU'):
'''Authentification
Args:
client_id: client_id.
client_secret: client_secret.
project_key: project_key.
scope: Scope of access (read, write, etc.).
host: 'EU' or 'NA'.
Returns:
Authentification data.
'''
headers = { 'Content-Type' : 'application/x-www-form-urlencoded' }
body = "grant_type=client_credentials&scope=%s" % scope
if host == 'EU':
url = "https://auth.sphere.io/oauth/token"
elif host == 'US':
url = "https://auth.commercetools.co/oauth/token"
else:
raise Exception("Host is unknown (has to be 'EU' or 'US').")
auth = (client_id, client_secret)
r = requests.post(url, data=body, headers=headers, auth=auth)
if r.status_code is 200:
return r.json()
else:
raise Exception("Failed to get an access token. Are you sure you have added them to config.py?")
def query(endpoint, project_key, auth, host = 'EU'):
'''Fetch Data via API into Json-Format
Args:
endpoint: API endpoint (products, orders, etc.).
project_key: project_key.
auth: Login data.
host: 'EU' or 'NA'.
Returns:
Query output in json.
'''
headers = { "Authorization" : "Bearer %s" % auth["access_token"] }
if host == 'EU':
url = "https://api.sphere.io/%s/%s" % (project_key, endpoint)
elif host == 'US':
url = "https://api.commercetools.co/%s/%s" % (project_key, endpoint)
else:
raise Exception("Host is unknown (has to be 'EU' or 'US').")
r = requests.get(url, headers=headers)
data_json = r.json() # json-format as nested dict-/list-structure
return data_json
| 29.272727
| 104
| 0.59058
| 249
| 1,932
| 4.493976
| 0.413655
| 0.071492
| 0.050045
| 0.053619
| 0.205541
| 0.078642
| 0.078642
| 0.078642
| 0.078642
| 0.078642
| 0
| 0.003544
| 0.269669
| 1,932
| 66
| 105
| 29.272727
| 0.789511
| 0.30176
| 0
| 0.333333
| 0
| 0
| 0.343598
| 0.057536
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ed978e5e0ccff6910a7ff36922b214818cfc125
| 769
|
py
|
Python
|
model/addParams.py
|
thegricean/modals
|
9bb267a64542ee30e2770d79d9cd5d9cce890be8
|
[
"MIT"
] | null | null | null |
model/addParams.py
|
thegricean/modals
|
9bb267a64542ee30e2770d79d9cd5d9cce890be8
|
[
"MIT"
] | null | null | null |
model/addParams.py
|
thegricean/modals
|
9bb267a64542ee30e2770d79d9cd5d9cce890be8
|
[
"MIT"
] | null | null | null |
import sys, re, string, numpy
p_strongs = numpy.arange(0.1, 0.9, 0.1)
costs = range(3, 10, 1)
for p_s in p_strongs:
p_meds = numpy.arange(0.1, 1-p_s, 0.1)
for p_m in p_meds:
p_w = 1 - p_s - p_m
for cost in costs:
filename = str(p_s) + "_" + str(p_m) + "_" + str(p_w) + "_" + str(cost) + ".church"
wF = open("model_fits/" + filename, "w")
wF.write("(define p-strong " + str(p_s) + ")\n" + "(define p-mod " + str(p_m) + ")\n" + "(define p-weak " + str(p_w) + ")\n" + "(define cost " + str(cost) + ")\n")
f = open(sys.argv[1], "r")
for l in f:
wF.write(l)
f.close()
#print str(p_s) + "," + str(p_v) + "," + str(p_a) + "," + str(alpha)
| 40.473684
| 186
| 0.461638
| 128
| 769
| 2.59375
| 0.34375
| 0.108434
| 0.045181
| 0.078313
| 0.054217
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03301
| 0.330299
| 769
| 18
| 187
| 42.722222
| 0.61165
| 0.087126
| 0
| 0
| 0
| 0
| 0.134094
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ee3a4796a1381dccf1edb9593a4f340f39bac8a
| 9,750
|
py
|
Python
|
main_cifar10.py
|
snu-ccl/approxCNN
|
49cc0e6635682f678f8501424063102fe30d7dd6
|
[
"CECILL-B"
] | 1
|
2022-01-16T03:45:43.000Z
|
2022-01-16T03:45:43.000Z
|
main_cifar10.py
|
snu-ccl/approxCNN
|
49cc0e6635682f678f8501424063102fe30d7dd6
|
[
"CECILL-B"
] | null | null | null |
main_cifar10.py
|
snu-ccl/approxCNN
|
49cc0e6635682f678f8501424063102fe30d7dd6
|
[
"CECILL-B"
] | null | null | null |
from __future__ import print_function
from tqdm import *
import sys
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from models.resnet_cifar10 import *
from models.vgg_cifar10 import *
from models.utils_approx import rangeException
parser = argparse.ArgumentParser(description='Implementation of of Section V-A for `Precise Approximation of Convolutional Neural'
+ 'Networks for Homomorphically Encrypted Data.`')
parser.add_argument('--mode', default='inf', dest='mode', type=str,
help='Program mode. `train`: train randomly initialized model, '\
'`inf`: inference the proposed approximate deep learning model')
parser.add_argument('--gpu', default=0, dest='gpuid', type=int,
help='ID of GPU that is used for training and inference.')
parser.add_argument('--backbone', default='resnet20', dest='backbone', type=str,
help='Backbone model.')
parser.add_argument('--approx_method', default='proposed', dest='approx_method', type=str,
help='Method of approximating non-arithmetic operations. `proposed`: proposed composition of minimax polynomials, '\
'`square`: approximate ReLU as x^2, `relu_aq`: approximate ReLU as 2^-3*x^2+2^-1*x+2^-2. '\
'For `square` and `relu_aq`, we use exact max-pooling function.')
parser.add_argument('--batch_inf', default=128, dest='batch_inf', type=int,
help='Batch size for inference.')
parser.add_argument('--alpha', default=14, dest='alpha', type=int,
help='The precision parameter. Integers from 4 to 14 can be used.')
parser.add_argument('--B_relu', default=50.0, dest='B_relu', type=float,
help='The bound of approximation range for the approximate ReLU function.')
parser.add_argument('--B_max', default=50.0, dest='B_max', type=float,
help='The bound of approximation range for the approximate max-pooling function.')
parser.add_argument('--B_search', default=5.0, dest='B_search', type=float,
help='The size of the interval to find B such that all input values fall within the approximate region.')
parser.add_argument('--dataset_path', default='../dataset/CIFAR10', dest='dataset_path', type=str,
help='The path which contains the CIFAR10.')
parser.add_argument('--params_name', default='ours', dest='params_name', type=str,
help='The pre-trained parameters file name. Please omit `.pt`.')
args = parser.parse_args()
torch.cuda.set_device(args.gpuid)
params_path = ''.join(['./pretrained/cifar10/', args.backbone, '_', args.params_name, '.pt'])
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
cifar10_train = datasets.CIFAR10(args.dataset_path, train=True, download=True,
transform=transform_train)
loader_train = DataLoader(cifar10_train, batch_size=128)
cifar10_test = datasets.CIFAR10(args.dataset_path, train=False, download=True,
transform=transform_test)
loader_test = DataLoader(cifar10_test, batch_size=args.batch_inf)
dtype = torch.FloatTensor # the CPU datatype
gpu_dtype = torch.cuda.FloatTensor
def train(model, loss_fn, optimizer, scheduler, num_epochs=1):
for epoch in range(num_epochs):
print('Starting epoch %d / %d' % (epoch + 1, num_epochs))
model.train()
print('Training...')
for t, (x, y) in tqdm(enumerate(loader_train)):
torch.cuda.empty_cache()
x_var = Variable(x.cuda())
y_var = Variable(y.cuda().long())
scores = model(x_var)
loss = loss_fn(scores, y_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Evaluating...')
test_acc = check_accuracy(model, loader_test) * 100
print('Loss: %.4f, test accuracy: %.2f' % (loss.data, test_acc))
scheduler.step()
print('--------------------------')
def check_accuracy(model, loader, use_tqdm = False):
num_correct = 0
num_samples = 0
model.eval()
torch.cuda.empty_cache()
with torch.no_grad():
for x, y in (tqdm(loader) if use_tqdm else loader):
x_var = Variable(x.cuda())
scores = model(x_var)
_, preds = scores.data.cpu().max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
return acc
def checking_batchsize_inference(model):
model.eval()
with torch.no_grad():
for t, (x, y) in enumerate(loader_test):
x_var = Variable(x.cuda())
try:
_ = model(x_var)
except rangeException as e:
e.show()
print('The validity of the batch size cannot be checked since the given B is to small.')
print('Please give larger B_relu or B_max.')
sys.exit("Terminated.")
except Exception:
print('The batch size of INFERENCE seems to be large for your GPU.')
print('Your current batch size is ' + str(args.batch_inf) + '. Try reducing `--batch_inf`.')
sys.exit("Terminated.")
break
approx_dict_list = [{'alpha': args.alpha, 'B': args.B_relu, 'type': args.approx_method},
{'alpha': args.alpha, 'B': args.B_max, 'type': args.approx_method}]
if args.backbone == 'resnet20':
original_model = resnet20()
approx_model = resnet20(approx_dict_list)
elif args.backbone == 'resnet32':
original_model = resnet32()
approx_model = resnet32(approx_dict_list)
elif args.backbone == 'resnet44':
original_model = resnet44()
approx_model = resnet44(approx_dict_list)
elif args.backbone == 'resnet56':
original_model = resnet56()
approx_model = resnet56(approx_dict_list)
elif args.backbone == 'resnet110':
original_model = resnet110()
approx_model = resnet110(approx_dict_list)
elif args.backbone == 'vgg11bn':
original_model = vgg11_bn()
approx_model = vgg11_bn(approx_dict_list)
elif args.backbone == 'vgg13bn':
original_model = vgg13_bn()
approx_model = vgg13_bn(approx_dict_list)
elif args.backbone == 'vgg16bn':
original_model = vgg16_bn()
approx_model = vgg16_bn(approx_dict_list)
elif args.backbone == 'vgg19bn':
original_model = vgg19_bn()
approx_model = vgg19_bn(approx_dict_list)
original_model.cuda()
approx_model.cuda()
if args.mode == 'train':
if args.params_name == 'ours':
print('Please set your own name or use another name rather than `ours` '
'to avoid overwriting our pre-trained parameters used in the paper.')
sys.exit("Terminated.")
print("Training random initialized", args.backbone, "for CIFAR10")
print("")
loss_fn = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(original_model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-3, nesterov=True)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150], last_epoch=-1)
train(original_model, loss_fn, optimizer, scheduler, num_epochs=200)
torch.save(original_model.state_dict(), params_path)
print("Saved pre-trained parameters. Path:", params_path)
if args.mode == 'inf':
original_model.load_state_dict(torch.load(params_path))
approx_model.load_state_dict(torch.load(params_path))
print("Used pre-trained parameter:", params_path)
print('==========================')
print("Inference the pre-trained original", args.backbone, "for CIFAR10")
original_model.load_state_dict(torch.load(params_path))
original_acc = check_accuracy(original_model, loader_test, use_tqdm=True) * 100
print("Test accuracy: %.2f" % original_acc)
print('==========================')
print("Inference the approximate", args.backbone, "with same pre-trained parameters for CIFAR10")
print("Precision parameter:", args.alpha)
print("")
# Check if given batch size is valid.
checking_batchsize_inference(approx_model)
while True:
try:
print("Trying to approximate inference...")
print("with B_ReLU = %.1f," % approx_dict_list[0]['B'])
print("and B_max = %.1f," % approx_dict_list[1]['B'])
approx_acc = check_accuracy(approx_model, loader_test, use_tqdm=True) * 100
print("Approximation success!")
break
except rangeException as e:
e.show()
if e.type == 'relu':
print("We increase B_ReLU", args.B_search, "and try inference again.")
approx_dict_list[0]['B'] += args.B_search
elif e.type == 'max':
print("We increase B_maxpooling", args.B_search, "and try inference again.")
approx_dict_list[1]['B'] += args.B_search
print('--------------------------')
print("")
print("Test accuracy: %.2f" % approx_acc)
rate = (approx_acc - original_acc) / original_acc * 100
print("Difference from the baseline: %.2f%%" % rate)
| 41.489362
| 136
| 0.643282
| 1,234
| 9,750
| 4.91248
| 0.233387
| 0.034312
| 0.032333
| 0.023755
| 0.212141
| 0.180304
| 0.116628
| 0.088255
| 0.070934
| 0.056087
| 0
| 0.029478
| 0.224103
| 9,750
| 234
| 137
| 41.666667
| 0.771844
| 0.005333
| 0
| 0.19171
| 0
| 0.005181
| 0.249226
| 0.015061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015544
| false
| 0
| 0.082902
| 0
| 0.103627
| 0.165803
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ee540409b07f789b14091c60316fa04528be96a
| 1,355
|
py
|
Python
|
annotation_predictor/concat_detection_records.py
|
Inch4Tk/label_server
|
3d0c39dd5a0c456794a1375051ca4f93a438ebf6
|
[
"MIT"
] | null | null | null |
annotation_predictor/concat_detection_records.py
|
Inch4Tk/label_server
|
3d0c39dd5a0c456794a1375051ca4f93a438ebf6
|
[
"MIT"
] | null | null | null |
annotation_predictor/concat_detection_records.py
|
Inch4Tk/label_server
|
3d0c39dd5a0c456794a1375051ca4f93a438ebf6
|
[
"MIT"
] | null | null | null |
import argparse
import json
import os
from datetime import datetime
from settings import annotation_predictor_metadata_dir
def concat_detection_record(record1: str, record2: str):
"""
Concatenates two detection records and saves them in a new file.
Args:
record1: Path to first record, saved in a json-file
record2: Path to second record, saved in a json-file
Returns: path to new file
"""
timestamp = datetime.now().strftime('%Y_%m_%d_%H%M%S')
filename = '{}.json'.format(timestamp)
path_to_file = os.path.join(annotation_predictor_metadata_dir, filename)
with open(record1, 'r') as f:
r1 = json.load(f)
with open(record2, 'r') as f:
r2 = json.load(f)
r1.update(r2)
with open(path_to_file, 'w') as f:
json.dump(r1, f)
return path_to_file
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Concatenate two detection records')
parser.add_argument('path_to_record_1', type=str, metavar='path_to_record_1',
help='path to first training record')
parser.add_argument('path_to_record_2', type=str, metavar='path_to_record_2',
help='path to second training record')
args = parser.parse_args()
concat_detection_record(args.path_to_record_1, args.path_to_record_2)
| 32.261905
| 81
| 0.678967
| 194
| 1,355
| 4.494845
| 0.381443
| 0.09633
| 0.082569
| 0.044725
| 0.176606
| 0.176606
| 0
| 0
| 0
| 0
| 0
| 0.016114
| 0.221402
| 1,355
| 42
| 82
| 32.261905
| 0.810427
| 0.15572
| 0
| 0
| 0
| 0
| 0.169811
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.192308
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6ee9460f4f92c18fd62442a8f5aa3304ac1ef00f
| 9,068
|
py
|
Python
|
phot/command_test.py
|
SilverRon/gppy
|
0ee56ca270af62afe1702fce37bef30add14f12a
|
[
"MIT"
] | 4
|
2019-05-08T08:08:59.000Z
|
2021-12-22T08:57:46.000Z
|
phot/command_test.py
|
SilverRon/gppy
|
0ee56ca270af62afe1702fce37bef30add14f12a
|
[
"MIT"
] | null | null | null |
phot/command_test.py
|
SilverRon/gppy
|
0ee56ca270af62afe1702fce37bef30add14f12a
|
[
"MIT"
] | 2
|
2019-05-08T08:09:02.000Z
|
2019-06-27T13:41:44.000Z
|
# PHOTOMETRY CODE (TEST) FOR PYTHON 3.X
# 2019.03.09
# GREGORY S.H. PAEK
#============================================================
import os, glob
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
from astropy.io import fits
#from imsng import zpcal
#============================================================
# USER SETTING
#============================================================
sharepath = '/home/sonic/Research/yourpy/config/'
configfile = sharepath+'targetphot.sex'
paramfile = sharepath+'targetphot.param'
nnwfile = sharepath+'targetphot.nnw'
convfile = sharepath+'targetphot.conv'
psfexconf_prese_conf = sharepath+'prepsfex.sex'
psfexconf_prese_param = sharepath+'prepsfex.param'
psfexconf_psfex_conf = sharepath+'default.psfex'
psfexconf_psfex_conv = sharepath+'default.conv'
obsinfo = ascii.read('/home/sonic/Research/table/obs.txt')
#------------------------------------------------------------
def puthdr(inim, hdrkey, hdrval, hdrcomment=''):
from astropy.io import fits
hdr = fits.getheader(inim)
fits.setval(inim, hdrkey, value=hdrval, comment=hdrcomment)
comment = inim+'\t'+'('+hdrkey+'\t'+str(hdrval)+')'
#print(comment)
#------------------------------------------------------------
# INITIAL INPUT
#------------------------------------------------------------
# TARGET COORD.
#ra1, de1 = 54.50492875, -26.94636444 # GRB 190114C
#ra1, de1 = 173.137, +27.699 # GRB 130427A
#ra1, de1 = 196.5942029, +20.35490083
#ra1, de1 = 223.3201092, 34.75006139 # AT2019ein
#ra1, de1 = 185.733875, 15.826 # SN2019ehk
ra1, de1 = 161.63775, 13.74194444
# IMAGES TO CALC.
#imlist = glob.glob('Calib-*com.fits')
os.system('ls *.fits')
imlist = glob.glob(input('image to process\t: '))
imlist.sort()
for img in imlist: print(img)
# REF. CATALOG
refcatname = 'PS1' #PS1/SDSS/APASS/2MASS
# RESULT FILE
f = open('phot.dat', 'w')
colline = '#obs\tdate-obs\taperture\tseeing\tzp\tzperr\tinstmag\tinstmagerr\tmag\tmagerr\n'
f.write(colline)
#============================================================
# MAIN COMMAND
#============================================================
imfail = []
for inim in imlist:
query_checklist = glob.glob('*.cat')
try:
hdr = fits.getheader(inim)
part = inim.split('-')
obs = part[1]
name = part[2]
exptime = part[6]
refmagkey = part[5]
refmagerkey = refmagkey+'err'
gain = obsinfo[obsinfo['obs']==obs]['gain'][0]
pixscale = obsinfo[obsinfo['obs']==obs]['pixelscale'][0]
# SourceEXtractor
intbl0, incat, fwhm_pix, fwhm_arcsec = secom(inim, gain=gain, pixscale=pixscale, det_sigma=3.0, backsize=str(64))
# APPROXIMATE CENTER POS. & DIST CUT
xim_cent, yim_cent = np.max(intbl0['X_IMAGE'])/2, np.max(intbl0['Y_IMAGE'])/2
im_dist = sqsum((xim_cent-intbl0['X_IMAGE']), (yim_cent-intbl0['Y_IMAGE']))
indx_dist = np.where( im_dist < 0.99*(xim_cent+yim_cent)/2. ) # 90% area
intbl = intbl0[indx_dist]
intbl.write(incat, format='ascii', overwrite=True)
# NEAR CENTER RA DEC
radeg = np.median(intbl['ALPHA_J2000'])
dedeg = np.median(intbl['DELTA_J2000'])
#------------------------------------------------------------
# REF. CATALOG QUERY
#------------------------------------------------------------
if refcatname == 'PS1':
if 'ps1-'+name+'.cat' not in query_checklist:
querytbl = ps1_query(name, radeg, dedeg, radius=0.65)
else:
querytbl = ascii.read('ps1-'+name+'.cat')
reftbl, refcat = ps1_Tonry(querytbl, name)
elif refcatname == 'SDSS':
if 'sdss-'+name+'.cat' not in query_checklist:
querytbl = sdss_query(name, radeg, dedeg)
else:
querytbl = ascii.read('sdss-'+name+'.cat')
reftbl, refcat = sdss_Blaton(querytbl, name)
elif refcatname == 'APASS':
if 'apass-'+name+'.cat' not in query_checklist:
querytbl = apass_query(name, radeg, dedeg)
else:
querytbl = ascii.read('apass-'+name+'.cat')
reftbl, refcat = apass_Blaton(querytbl, name)
elif refcatname == '2MASS':
if '2mass-'+name+'.cat' not in query_checklist:
querytbl = twomass_query(name, radeg, dedeg, band=refmagkey, radius=1.0)
else:
querytbl = ascii.read('2mass-'+name+'.cat')
reftbl, refcat = querytbl, '2mass-'+name+'.cat'
#------------------------------------------------------------
# MATCHING
#------------------------------------------------------------
merge_raw = matching(incat, refcat)
colnames = merge_raw.colnames
maglist = []
magerlist = []
for col in colnames:
if 'MAG_APER_7' in col:
#print(col)
maglist.append(col)
elif 'MAGERR_APER_7' in col:
#print(col)
magerlist.append(col)
#intbl = ascii.read(incat)
for i in range(0, len(maglist)):
mtbl = merge_raw
inmagkey = maglist[i]
inmagerkey = magerlist[i]
param_st4zp = dict( intbl=mtbl,
inmagerkey=inmagerkey,
refmagkey=refmagkey,
refmagerkey=refmagerkey,
refmaglower=13,
refmagupper=16.5,
refmagerupper=0.05,
inmagerupper=0.1,
class_star_cut=0.001)
stars_zp = star4zp(**param_st4zp)
#stars_zp = star4zp(mtbl, inmagerkey, refmagkey, refmagerkey, refmaglower=14, refmagupper=16.5, refmagerupper=0.05, inmagerupper=0.1, class_star_cut=0.001)
#stars_zp, stdnumb = star4zp(mtbl, inmagerkey, refmagkey, refmagerkey, refmaglower=14, refmagupper=18, refmagerupper=0.05, inmagerupper=0.1, class_star_cut=0.01)
zp, zper, intbl_alive, intbl_exile = zpcal(stars_zp, inmagkey, inmagerkey, refmagkey, refmagerkey)
zp_plot(inim, inmagkey, zp, zper, intbl_alive[inmagkey], intbl_alive[refmagkey], intbl_alive['zp'], intbl_exile[inmagkey], intbl_exile[refmagkey], intbl_exile['zp'])
intbl['REAL_'+inmagkey] = zp + intbl[inmagkey]
intbl['REAL_'+inmagerkey] = sqsum(zper, intbl[inmagerkey])
#------------------------------------------------------------
# TARGET PHOT
#------------------------------------------------------------
ra2, de2 = intbl['ALPHA_J2000'], intbl['DELTA_J2000']
indx_target = targetfind(ra1, de1, ra2, de2, sep=10)
skymean, skymed, skysig = bkgest_mask(inim)
if len(indx_target[0]) == 0:
aper = 2*fwhm_pix
ul = limitmag(3, zp, aper, skysig)
try:
comment = inim+'\t\t'+hdr['date-obs']+'\t\t'+'MAG_APER_7'+'\t\t'+str(round(fwhm_arcsec, 3))+'\t\t' \
+str(round(zp, 3))+'\t'+str(round(zper, 3)) \
+'\t--\t\t\t'+'--\t' \
+'\t'+str(round(ul, 3))+'\t'+'0'+'\n'
except:
comment = inim+'\t\t'+'MAG_APER_7'+'\t\t'+str(round(fwhm_arcsec, 3))+'\t\t' \
+str(round(zp, 3))+'\t'+str(round(zper, 3)) \
+'\t--\t\t\t'+'--\t' \
+'\t'+str(round(ul, 3))+'\t'+'0'+'\n'
print(comment)
f.write(comment)
else:
try:
comment = inim+'\t\t'+hdr['date-obs']+'\t\t'+'MAG_APER_7'+'\t\t'+str(round(fwhm_arcsec, 3))+'\t\t' \
+str(round(zp, 3))+'\t'+str(round(zper, 3)) \
+'\t'+str(round(intbl[indx_target]['MAG_APER_7'][0], 3))+'\t\t'+str(round(intbl[indx_target]['MAGERR_APER_7'][0], 3)) \
+'\t'+str(round(intbl[indx_target]['REAL_MAG_APER_7'][0], 3))+'\t'+str(round(intbl[indx_target]['REAL_MAGERR_APER_7'][0], 3))+'\n'
except:
comment = inim+'\t\t'+'MAG_APER_7'+'\t\t'+str(round(fwhm_arcsec, 3))+'\t\t' \
+str(round(zp, 3))+'\t'+str(round(zper, 3)) \
+'\t'+str(round(intbl[indx_target]['MAG_APER_7'][0], 3))+'\t\t'+str(round(intbl[indx_target]['MAGERR_APER_7'][0], 3)) \
+'\t'+str(round(intbl[indx_target]['REAL_MAG_APER_7'][0], 3))+'\t'+str(round(intbl[indx_target]['REAL_MAGERR_APER_7'][0], 3))+'\n'
print(comment)
f.write(comment)
# PLOT IMAGE
numb_list = intbl_alive['NUMBER']
xim_list = intbl_alive['X_IMAGE']
yim_list = intbl_alive['Y_IMAGE']
numb_addlist= intbl_exile['NUMBER']
xim_addlist = intbl_exile['X_IMAGE']
yim_addlist = intbl_exile['Y_IMAGE']
plotshow(inim, numb_list, xim_list, yim_list, add=True, numb_addlist=numb_addlist, xim_addlist=xim_addlist, yim_addlist=yim_addlist)
puthdr(inim, 'SEEING', round(fwhm_arcsec, 3), hdrcomment='SEEING [arcsec]')
puthdr(inim, 'PEEING', round(fwhm_pix, 3), hdrcomment='SEEING [pixel]')
puthdr(inim, 'STDNUMB', len(intbl_alive), hdrcomment='# OF STD STARS')
puthdr(inim, 'OPTZP', round(zp, 3), hdrcomment='2*SEEING DIAMETER')
puthdr(inim, 'OPTZPERR', round(zper, 3), hdrcomment='2*SEEING DIAMETER')
puthdr(inim, 'SKYSIG', round(skysig, 3), hdrcomment='SKY SIGMA VALUE')
puthdr(inim, 'SKYVAL', round(skymed, 3), hdrcomment='SKY MEDIAN VALUE')
except:
imfail.append(inim)
pass
#-------------------------------------------------------------------------#
f.close()
photbl = ascii.read('phot.dat')
#photbl[photbl['mag']>20]
comment = '='*60;print(comment)
os.system('mkdir zpcal/;mv ./*zpcal.png ./zpcal/')
os.system('mkdir zpcal_test/;mv ./*zpcal_test.png ./zpcal_test/')
os.system('rm *aper.fits *xml snap*.fits psf-*.fits')
os.system('mkdir overview/;mv ./*png ./overview/')
os.system('cat phot.dat')
| 41.031674
| 168
| 0.585686
| 1,174
| 9,068
| 4.399489
| 0.260647
| 0.010068
| 0.038335
| 0.023233
| 0.279187
| 0.25789
| 0.241239
| 0.200968
| 0.16244
| 0.16244
| 0
| 0.041147
| 0.150419
| 9,068
| 220
| 169
| 41.218182
| 0.629283
| 0.209969
| 0
| 0.22293
| 0
| 0.006369
| 0.172433
| 0.020816
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006369
| false
| 0.038217
| 0.038217
| 0
| 0.044586
| 0.025478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6eef0375ed02fe1987ef09d32e826630ef2e2fef
| 4,599
|
py
|
Python
|
ponyo/train_vae_modules.py
|
ajlee21/ponyo
|
f68a461b2edd4ab3d9c0699a1e5e8dcd1308cc75
|
[
"BSD-3-Clause"
] | 1
|
2020-12-17T17:34:53.000Z
|
2020-12-17T17:34:53.000Z
|
ponyo/train_vae_modules.py
|
ajlee21/ponyo
|
f68a461b2edd4ab3d9c0699a1e5e8dcd1308cc75
|
[
"BSD-3-Clause"
] | 37
|
2020-06-15T18:15:10.000Z
|
2022-02-10T02:34:29.000Z
|
ponyo/train_vae_modules.py
|
ajlee21/ponyo
|
f68a461b2edd4ab3d9c0699a1e5e8dcd1308cc75
|
[
"BSD-3-Clause"
] | 3
|
2020-06-12T19:56:16.000Z
|
2021-04-21T15:22:33.000Z
|
"""
Author: Alexandra Lee
Date Created: 11 March 2020
Scripts related to training the VAE including
1. Normalizing gene expression data
2. Wrapper function to input training parameters and run vae
training in `vae.tybalt_2layer_model`
"""
from ponyo import vae, utils
import os
import pickle
import pandas as pd
from sklearn import preprocessing
import tensorflow as tf
import numpy as np
import random
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
def set_all_seeds(seed_val=42):
"""
This function sets all seeds to get reproducible VAE trained
models.
"""
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
os.environ["PYTHONHASHSEED"] = "0"
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(seed_val)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(seed_val)
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
tf.set_random_seed(seed_val)
def normalize_expression_data(
base_dir, config_filename, raw_input_data_filename, normalized_data_filename
):
"""
0-1 normalize the expression data.
Arguments
----------
base_dir: str
Root directory containing analysis subdirectories
config_filename: str
File containing user defined parameters
raw_input_data_filename: str
File containing raw expression data
normalize_data_filename:
Output file containing normalized expression data
"""
# Read in config variables
params = utils.read_config(config_filename)
# Read data
data = pd.read_csv(raw_input_data_filename, header=0, sep="\t", index_col=0)
print(
"input: dataset contains {} samples and {} genes".format(
data.shape[0], data.shape[1]
)
)
# 0-1 normalize per gene
scaler = preprocessing.MinMaxScaler()
data_scaled_df = scaler.fit_transform(data)
data_scaled_df = pd.DataFrame(
data_scaled_df, columns=data.columns, index=data.index
)
print(
"Output: normalized dataset contains {} samples and {} genes".format(
data_scaled_df.shape[0], data_scaled_df.shape[1]
)
)
# Save scaler transform
scaler_filename = params["scaler_transform_filename"]
outfile = open(scaler_filename, "wb")
pickle.dump(scaler, outfile)
outfile.close()
# Save scaled data
data_scaled_df.to_csv(normalized_data_filename, sep="\t", compression="xz")
def train_vae(config_filename, input_data_filename):
"""
Trains VAE model using parameters set in config file
Arguments
----------
config_filename: str
File containing user defined parameters
input_data_filename: str
File path corresponding to input dataset to use
"""
# Read in config variables
params = utils.read_config(config_filename)
# Load parameters
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../"))
dataset_name = params["dataset_name"]
learning_rate = params["learning_rate"]
batch_size = params["batch_size"]
epochs = params["epochs"]
kappa = params["kappa"]
intermediate_dim = params["intermediate_dim"]
latent_dim = params["latent_dim"]
epsilon_std = params["epsilon_std"]
train_architecture = params["NN_architecture"]
validation_frac = params["validation_frac"]
# Read data
normalized_data = pd.read_csv(input_data_filename, header=0, sep="\t", index_col=0)
print(
"input dataset contains {} samples and {} genes".format(
normalized_data.shape[0], normalized_data.shape[1]
)
)
# Train (VAE)
vae.tybalt_2layer_model(
learning_rate,
batch_size,
epochs,
kappa,
intermediate_dim,
latent_dim,
epsilon_std,
normalized_data,
base_dir,
dataset_name,
train_architecture,
validation_frac,
)
| 27.375
| 112
| 0.68928
| 579
| 4,599
| 5.300518
| 0.355786
| 0.035191
| 0.033236
| 0.018573
| 0.222874
| 0.195503
| 0.162268
| 0.124471
| 0.090583
| 0.090583
| 0
| 0.012559
| 0.220918
| 4,599
| 167
| 113
| 27.538922
| 0.843985
| 0.378995
| 0
| 0.064935
| 0
| 0
| 0.123202
| 0.009222
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051948
| false
| 0
| 0.116883
| 0
| 0.168831
| 0.038961
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|