hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3ce1874797f955e0861f0ec1dfc943c5714b8253
| 6,192
|
py
|
Python
|
utils.py
|
kalpetros/greek-dictionary
|
962f36c299cbb46ffce9c7f78db7c9e513269499
|
[
"MIT"
] | 3
|
2021-04-27T16:39:12.000Z
|
2021-11-17T02:15:13.000Z
|
utils.py
|
kalpetros/greek-dictionary
|
962f36c299cbb46ffce9c7f78db7c9e513269499
|
[
"MIT"
] | null | null | null |
utils.py
|
kalpetros/greek-dictionary
|
962f36c299cbb46ffce9c7f78db7c9e513269499
|
[
"MIT"
] | 1
|
2021-06-15T23:57:44.000Z
|
2021-06-15T23:57:44.000Z
|
import click
import os
import requests
import shutil
import sys
import time
from bs4 import BeautifulSoup
alphabet = [
{
'letter': 'Α',
'pages': 31660
},
{
'letter': 'Β',
'pages': 5050
},
{
'letter': 'Γ',
'pages': 5890
},
{
'letter': 'Δ',
'pages': 7130
},
{
'letter': 'Ε',
'pages': 12530
},
{
'letter': 'Ζ',
'pages': 1500
},
{
'letter': 'Η',
'pages': 1310
},
{
'letter': 'Θ',
'pages': 2300
},
{
'letter': 'Ι',
'pages': 1720
},
{
'letter': 'Κ',
'pages': 17700
},
{
'letter': 'Λ',
'pages': 4740
},
{
'letter': 'Μ',
'pages': 13020
},
{
'letter': 'Ν',
'pages': 3790
},
{
'letter': 'Ξ',
'pages': 5250
},
{
'letter': 'Ο',
'pages': 4970
},
{
'letter': 'Π',
'pages': 18560
},
{
'letter': 'Ρ',
'pages': 2720
},
{
'letter': 'Σ',
'pages': 14340
},
{
'letter': 'Τ',
'pages': 7680
},
{
'letter': 'Υ',
'pages': 3170
},
{
'letter': 'Φ',
'pages': 5640
},
{
'letter': 'Χ',
'pages': 5370
},
{
'letter': 'Ψ',
'pages': 2080
},
{
'letter': 'Ω',
'pages': 470
}
]
def is_clean(word):
"""
Check for profanity
"""
clean = True
profane_words = []
if word in profane_words:
clean = False
return clean
def log(text, type):
colors = {
'success': 'green',
'info': 'yellow',
'warning': 'red'
}
click.secho(f'[{type}] - {text}', fg=colors[type])
def get_source(url):
"""
Get page source for the given url
"""
rs = requests.get(url)
source = BeautifulSoup(rs.content, 'html.parser')
return source
def parse(source):
"""
Return words array for the given page source
"""
children = source.find(id='lemmas').children
words = []
for node in children:
dt = node.find('dt')
if dt != -1:
word = dt.find('b').text.strip(',')
words.append(word)
return words
def scrape(letter: str, pages: int):
"""
Scrapes www.greek-language.gr to build
a full list of modern Greek words
https://www.greek-language.gr/greekLang/index.html
"""
log(f'Getting letter {letter} words...', 'info')
start = time.time()
url = 'https://www.greek-language.gr/greekLang/modern_greek/tools/lexica/reverse/search.html'
results = []
page = 0
while page <= int(pages):
time.sleep(0.1)
endpoint = f'{url}?start={page}&lq={letter}*'
source = get_source(endpoint)
words = parse(source)
page = page + 10
for word in words:
results.append(word)
end = time.time()
total = end - start
log(f'Got {letter} in {total}', 'success')
return results
def get_data(file_name):
"""
Return words in a given file
"""
results = []
if not os.path.isfile(file_name):
return results
try:
with open(file_name, 'r') as words:
for word in words:
results.append(word.strip())
except Exception as e:
log(f'Could not get data {str(e)}', 'warning')
return results
def check():
"""
Check if necessary files exist
"""
if not os.path.isfile('files/el.txt'):
log('el.txt is missing from files. Please restore the repository.', 'warning')
sys.exit(2)
if not os.path.isdir('output'):
log('Output folder is missing. Creating folder...', 'warning')
os.mkdir('output')
def clean_output():
"""
Delete output files and folder
"""
if not os.path.isdir('output'):
log('Working directory already clean...', 'info')
return
shutil.rmtree('output')
log('Working directory clean', 'success')
return
def romanize_words(words):
"""
Romanize words
"""
mappings = {
'α': 'a',
'ά': 'a',
'β': 'v',
'γ': 'g',
'δ': 'd',
'ε': 'e',
'έ': 'e',
'ζ': 'z',
'η': 'i',
'ή': 'i',
'θ': 'th',
'ι': 'i',
'ί': 'i',
'ϊ': 'i',
'ΐ': 'i',
'κ': 'k',
'λ': 'l',
'μ': 'm',
'ν': 'n',
'ξ': 'ks',
'ο': 'o',
'ό': 'o',
'π': 'p',
'ρ': 'r',
'σ': 's',
'ς': 's',
'τ': 't',
'υ': 'y',
'ύ': 'y',
'ϋ': 'y',
'ΰ': 'y',
'φ': 'f',
'χ': 'h',
'x': 'h',
'ψ': 'ps',
'ω': 'o',
'ώ': 'o',
'-': '-',
'!': '!',
'.': '.',
',': ',',
"'": "'"
}
results = []
if not words:
log('No data provided', 'info')
return results
for word in words:
result = []
chars = list(word.strip())
for char in chars:
try:
char = char.lower()
result.append(mappings[char])
except Exception as e:
log(f'Could not map {str(e)}', 'warning')
word = ''.join(result)
results.append(word)
log('Romanized all words', 'success')
return results
def export(file_name, words, file_type='txt'):
"""
Create a words file
"""
if not words:
log('No data provided', 'warning')
return
check()
log(f'Creating file {file_name}.{file_type}...', 'info')
output = open(f'output/{file_name}.{file_type}', 'w')
if file_type == 'json':
output.write('[')
for word in words:
if file_type == 'txt':
output.write(f'{word.strip()}\n')
elif file_type == 'json':
output.write(f'"{word.strip()}",\n')
if file_type == 'json':
output.write(']')
output.close()
log(f'Created {file_name}.{file_type}', 'success')
| 18.211765
| 97
| 0.439599
| 670
| 6,192
| 4.026866
| 0.346269
| 0.023721
| 0.013343
| 0.020756
| 0.159377
| 0.142328
| 0.083766
| 0.022239
| 0
| 0
| 0
| 0.028378
| 0.379683
| 6,192
| 339
| 98
| 18.265487
| 0.674043
| 0.056363
| 0
| 0.106299
| 0
| 0.003937
| 0.20179
| 0.019302
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03937
| false
| 0
| 0.027559
| 0
| 0.110236
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ce959e8fac079b9e0e0bacc34e00bde93edb83c
| 1,937
|
py
|
Python
|
Log1/HiPyQt3/HiPyQt38QTableWidget.py
|
codenara/PyQt1
|
1550920577188e4d318b47fc69ba5ee243092d88
|
[
"MIT"
] | null | null | null |
Log1/HiPyQt3/HiPyQt38QTableWidget.py
|
codenara/PyQt1
|
1550920577188e4d318b47fc69ba5ee243092d88
|
[
"MIT"
] | null | null | null |
Log1/HiPyQt3/HiPyQt38QTableWidget.py
|
codenara/PyQt1
|
1550920577188e4d318b47fc69ba5ee243092d88
|
[
"MIT"
] | null | null | null |
# HiPyQt version 3.8
# use QTableWidget
# use QCheckBox
# use QPushButton
import sys
from PyQt5.QtWidgets import *
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Hi PyQt")
self.setGeometry(50, 50, 400, 300)
# QTableWidget
self.tableWidget = QTableWidget(self)
self.tableWidget.resize(290, 290)
self.tableWidget.setRowCount(2)
self.tableWidget.setColumnCount(2)
self.tableWidget.setItem(0, 0, QTableWidgetItem("John"))
self.tableWidget.setItem(0, 1, QTableWidgetItem("21"))
self.tableWidget.setItem(1, 0, QTableWidgetItem("Paul"))
self.tableWidget.setItem(1, 1, QTableWidgetItem("22"))
horizontalHeaderLabels = ["Name", "Age"]
self.tableWidget.setHorizontalHeaderLabels(horizontalHeaderLabels)
verticalHeaderLabels = ["One", "Two"]
self.tableWidget.setVerticalHeaderLabels(verticalHeaderLabels)
# QCheckBox
self.checkBox = QCheckBox("Editable", self)
self.checkBox.move(300, 10)
self.checkBox.resize(90, 30)
self.checkBox.stateChanged.connect(self.checkBox_stateChanged)
# QPushButton
self.button = QPushButton("Resize", self)
self.button.move(300, 50)
self.button.resize(80, 30)
self.button.clicked.connect(self.button_clicked)
def checkBox_stateChanged(self):
if self.checkBox.isChecked() == True:
self.tableWidget.setEditTriggers(QAbstractItemView.AllEditTriggers) # Enable editing
else:
self.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers) # Disable editing
def button_clicked(self):
self.tableWidget.resizeColumnsToContents()
self.tableWidget.resizeRowsToContents()
if __name__ == "__main__":
app = QApplication(sys.argv)
myWindow = MyWindow()
myWindow.show()
app.exec()
| 32.283333
| 97
| 0.674239
| 187
| 1,937
| 6.877005
| 0.42246
| 0.163297
| 0.068429
| 0.03577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033619
| 0.21683
| 1,937
| 59
| 98
| 32.830508
| 0.814107
| 0.06763
| 0
| 0
| 0
| 0
| 0.030067
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cea6fdbaa10d4f4a87f24213944a946b586b65c
| 1,346
|
py
|
Python
|
predictor.py
|
abhayraw1/crnn.pytorch
|
307f2dbf8163148d165ef15cdd522c7c137041e4
|
[
"MIT"
] | null | null | null |
predictor.py
|
abhayraw1/crnn.pytorch
|
307f2dbf8163148d165ef15cdd522c7c137041e4
|
[
"MIT"
] | null | null | null |
predictor.py
|
abhayraw1/crnn.pytorch
|
307f2dbf8163148d165ef15cdd522c7c137041e4
|
[
"MIT"
] | null | null | null |
import torch
from torch.autograd import Variable
from . import utils
from . import dataset
from PIL import Image
from pathlib import Path
from . import crnn
model_path = Path(__file__).parent/'data/crnn.pth'
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
model = crnn.CRNN(32, 1, 37, 256)
if torch.cuda.is_available():
model = model.cuda()
print('loading pretrained model from %s' % model_path)
model.load_state_dict(torch.load(model_path))
converter = utils.strLabelConverter(alphabet)
transformer = dataset.resizeNormalize((100, 32))
def predict(img_path=None, arr=None):
assert img_path is not None or arr is not None
if arr is not None:
image = Image.fromarray(arr)
else:
image = Image.open(img_path)
image = image.convert('L')
image = transformer(image)
if torch.cuda.is_available():
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
# print('%-20s => %-20s' % (raw_pred, sim_pred))
return sim_pred
| 28.041667
| 71
| 0.696137
| 188
| 1,346
| 4.861702
| 0.37234
| 0.054705
| 0.02954
| 0.028446
| 0.14442
| 0.09628
| 0.09628
| 0.09628
| 0.09628
| 0
| 0
| 0.029864
| 0.179049
| 1,346
| 47
| 72
| 28.638298
| 0.797285
| 0.034175
| 0
| 0.055556
| 0
| 0
| 0.063223
| 0.027756
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.027778
| false
| 0
| 0.194444
| 0
| 0.25
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3ced3da168b0c4d5fb8345ab35a6e8f79cade777
| 2,951
|
py
|
Python
|
src/graph_transpiler/webdnn/backend/webgl/kernels/split_axis.py
|
gunpowder78/webdnn
|
c659ea49007f91d178ce422a1eebe289516a71ee
|
[
"MIT"
] | 1
|
2018-07-26T13:52:21.000Z
|
2018-07-26T13:52:21.000Z
|
src/graph_transpiler/webdnn/backend/webgl/kernels/split_axis.py
|
gunpowder78/webdnn
|
c659ea49007f91d178ce422a1eebe289516a71ee
|
[
"MIT"
] | null | null | null |
src/graph_transpiler/webdnn/backend/webgl/kernels/split_axis.py
|
gunpowder78/webdnn
|
c659ea49007f91d178ce422a1eebe289516a71ee
|
[
"MIT"
] | null | null | null |
from typing import List, Sequence
from webdnn.backend.code_generator.injectors.kernel_name_injector import KernelNameInjector
from webdnn.backend.webgl.attributes.channel_mode import ChannelMode, ChannelModeEnum
from webdnn.backend.webgl.generator import WebGLDescriptorGenerator
from webdnn.backend.webgl.kernel import Kernel
from webdnn.backend.webgl.kernels.util import FragmentShaderPreamble, texture_stride, texture_shape
from webdnn.backend.webgl.uniform_injector import UniformInjector
from webdnn.graph.operators.split_axis import SplitAxis
template = FragmentShaderPreamble + """
%%UNIFORM(sampler2D, sampler_x)%%;
%%UNIFORM(vec2, texture_stride_y)%%;
%%UNIFORM(vec4, variable_shape_y)%%;
%%UNIFORM(vec4, variable_stride_y)%%;
%%UNIFORM(vec4, variable_shape_x)%%;
%%UNIFORM(vec4, variable_stride_x)%%;
%%UNIFORM(vec2, texture_stride_x)%%;
%%UNIFORM(vec2, texture_shape_x)%%;
%%UNIFORM(vec4, offset)%%;
void main() {
vec4 variable_position_y = convert_position(gl_FragCoord.xy, texture_stride_y, variable_stride_y, variable_shape_y);
vec4 variable_position_x = variable_position_y + offset;
float x = texture2D(sampler_x, convert_coord(variable_position_x, variable_stride_x, texture_stride_x, texture_shape_x)).r;
gl_FragColor = vec4(x, 0, 0, 0);
}
"""
def _pad_to_4d(arr: Sequence[int], val: int = 1):
assert len(arr) <= 4, ValueError
arr = list(arr)
while len(arr) < 4:
arr.append(val)
return arr
@WebGLDescriptorGenerator.register_handler(SplitAxis)
def split_axis(op: SplitAxis) -> List[Kernel]:
x = op.inputs["x"]
ys = [op.outputs[f"y{i}"] for i in range(len(op.outputs))]
sections = [0] + op.sections
axis = op.axis
kernels = []
for i, y in enumerate(ys):
assert x.order.check_same_axes(y.order)
assert ChannelMode.get(x) == ChannelMode.get(y) == ChannelModeEnum.R
name_injector = KernelNameInjector(op)
uniform_injector = UniformInjector()
offset = [sections[i] if a == axis else 0 for a in y.order.axes]
uniform_injector.register({
"sampler_x": x,
"texture_stride_y": texture_stride(y),
"variable_shape_y": _pad_to_4d(y.shape),
"variable_stride_y": _pad_to_4d(y.stride),
"texture_shape_x": texture_shape(x),
"texture_stride_x": texture_stride(x),
"variable_shape_x": _pad_to_4d([x.shape_dict[a] for a in y.order.axes]),
"variable_stride_x": _pad_to_4d([x.stride_dict[a] for a in y.order.axes]),
"offset": _pad_to_4d(offset, 0)
})
source = template
source = uniform_injector.inject(source)
source = name_injector.inject(source)
kernel = Kernel(
source,
name_injector.name,
uniform_injector.samplers,
uniform_injector.uniforms,
y
)
kernels.append(kernel)
return kernels
| 32.788889
| 127
| 0.686208
| 382
| 2,951
| 5.044503
| 0.253927
| 0.060716
| 0.052932
| 0.057084
| 0.167618
| 0.062273
| 0.021796
| 0.021796
| 0
| 0
| 0
| 0.011849
| 0.199254
| 2,951
| 89
| 128
| 33.157303
| 0.803639
| 0
| 0
| 0
| 0
| 0
| 0.280583
| 0.036937
| 0
| 0
| 0
| 0
| 0.044776
| 1
| 0.029851
| false
| 0
| 0.119403
| 0
| 0.179104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cedde962258fae75ef3400a99dada61c8a82bd1
| 1,244
|
py
|
Python
|
systemstat.py
|
asl97/asl97-i3bar-status-spacer
|
83245582cf8973b0d128b5ed806e776e00960c5e
|
[
"MIT"
] | null | null | null |
systemstat.py
|
asl97/asl97-i3bar-status-spacer
|
83245582cf8973b0d128b5ed806e776e00960c5e
|
[
"MIT"
] | null | null | null |
systemstat.py
|
asl97/asl97-i3bar-status-spacer
|
83245582cf8973b0d128b5ed806e776e00960c5e
|
[
"MIT"
] | null | null | null |
import time
import psutil
def _parsesendrecv(interface, new, old):
up = max(new[interface].bytes_sent - old[interface].bytes_sent, -1)
down = max(new[interface].bytes_recv - old[interface].bytes_recv, -1)
return up, down
class _netlink:
def __init__(self):
self.old = psutil.net_io_counters(pernic=True)
def get_status(self, exclude=[]):
new = psutil.net_io_counters(pernic=True)
o = []
with open("/proc/net/route") as f:
route = f.read()
for interface in new:
if interface in exclude or interface not in route:
continue
up, down = _parsesendrecv(interface, new, self.old)
if up == -1:
sup = "?K"
else:
sup = "%.1fK" % (up/1024)
if down == -1:
sdown = "?K"
else:
sdown = "%.1fK" % (down/1024)
o.append((interface, sup, sdown))
self.old = new
return o
netlink = _netlink().get_status
def cpu():
return psutil.cpu_percent()
def ram():
mem = psutil.virtual_memory()
return ((mem.used+mem.buffers)/mem.total)*100
def datetime():
return time.strftime("%a %d/%m/%Y %H:%M:%S")
| 28.272727
| 73
| 0.549035
| 157
| 1,244
| 4.22293
| 0.426752
| 0.084465
| 0.075415
| 0.060332
| 0.087481
| 0.087481
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.31672
| 1,244
| 43
| 74
| 28.930233
| 0.76
| 0
| 0
| 0.054054
| 0
| 0
| 0.039389
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162162
| false
| 0
| 0.054054
| 0.054054
| 0.378378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cefbde68b0741c1883ec538b390be6d177b8949
| 18,044
|
py
|
Python
|
tests/test_net.py
|
ciubecca/kalasanty
|
df99f6814f073f2fb0fbd271d2fbfccb209c4b45
|
[
"BSD-3-Clause"
] | 1
|
2021-10-19T16:59:31.000Z
|
2021-10-19T16:59:31.000Z
|
tests/test_net.py
|
ciubecca/kalasanty
|
df99f6814f073f2fb0fbd271d2fbfccb209c4b45
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_net.py
|
ciubecca/kalasanty
|
df99f6814f073f2fb0fbd271d2fbfccb209c4b45
|
[
"BSD-3-Clause"
] | 1
|
2021-10-20T13:05:56.000Z
|
2021-10-20T13:05:56.000Z
|
import os
import numpy as np
import h5py
import tempfile
import pytest
from keras import backend as K
from keras.layers import Input, Convolution3D, concatenate
from keras.models import Model
from keras.optimizers import Adam
import pybel
from tfbio.data import Featurizer
from kalasanty.net import dice_np, dice, dice_loss, ovl_np, ovl, ovl_loss, DataWrapper, UNet
path = os.path.dirname(os.path.realpath(__file__))
test_dataset = os.path.join(path, 'test_data.hdf')
protein_file = os.path.join(path, 'datasets', 'scpdb', '2qfo_1', 'protein.mol2')
featurizer = Featurizer(save_molecule_codes=False)
num_features = len(featurizer.FEATURE_NAMES)
input_shape = (1, 4, 2, 3, 1)
arr_zeros = np.zeros(input_shape)
arr_ones = np.ones(input_shape)
def teardown_function(function):
K.clear_session()
@pytest.fixture(scope='function')
def data():
data = DataWrapper(test_dataset, test_set=0.2, max_dist=52, scale=0.33)
yield data
data.close()
@pytest.mark.parametrize('smoothing', (0, 0.1, 0.001),
ids=lambda x: 'smoothing %s' % x)
def test_dice(smoothing):
x = Input(input_shape[1:])
m = Model(inputs=x, outputs=x)
arr_random = np.random.choice([0, 1], size=input_shape,
p=[0.75, 0.25])
arrays = (arr_random, arr_zeros, arr_ones)
arr_sum = arr_random.sum()
ones_sum = arr_ones.sum()
scores = (1.0, smoothing / (arr_sum + smoothing),
(2 * arr_sum + smoothing) / (arr_sum + ones_sum + smoothing))
m.compile(Adam(), lambda x, y: dice(x, y, smoothing_factor=smoothing))
for array, score in zip(arrays, scores):
score_keras = m.evaluate(arr_random, array, verbose=0)
score_np = dice_np(arr_random, array, smoothing_factor=smoothing)
assert np.allclose(score_keras, score_np, 6)
assert np.allclose(score_keras, score, 6)
@pytest.mark.parametrize('smoothing', (0, 0.1, 0.001),
ids=lambda x: 'smoothing %s' % x)
def test_ovl(smoothing):
x = Input(input_shape[1:])
m = Model(inputs=x, outputs=x)
arr_random = np.random.choice([0, 1], size=input_shape,
p=[0.75, 0.25])
arr_sum = arr_random.sum()
ones_sum = arr_ones.sum()
arrays = (arr_random, arr_zeros, arr_ones)
scores = (1.0, smoothing / (arr_sum + smoothing),
(arr_sum + smoothing) / (ones_sum + smoothing))
m.compile(Adam(), lambda x, y: ovl(x, y, smoothing_factor=smoothing))
for array, score in zip(arrays, scores):
score_keras = m.evaluate(arr_random, array, verbose=0)
score_np = ovl_np(arr_random, array, smoothing_factor=smoothing)
assert np.allclose(score_keras, score_np, 6)
assert np.allclose(score_keras, score, 6)
def test_unet_from_data_handle(data):
with pytest.raises(ValueError, match='you must either provide'):
UNet()
with pytest.raises(TypeError, match='data_handle should be a DataWrapper'):
UNet(data_handle='10gs')
model = UNet(data_handle=data)
assert model.data_handle == data
assert model.scale == data.scale
assert model.max_dist == data.max_dist
assert len(model.inputs) == 1
assert model.inputs[0].shape[-1] == data.x_channels
assert len(model.outputs) == 1
assert model.outputs[0].shape[-1] == data.y_channels
@pytest.mark.parametrize('box_size', (4, 16), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('i', (5, 1), ids=lambda x: 'i=%s' % x)
@pytest.mark.parametrize('o', (2, 1), ids=lambda x: 'o=%s' % x)
def test_unet_from_layers(box_size, i, o):
inputs = Input([box_size] * 3 + [i])
conv1 = Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(inputs)
outputs = Convolution3D(filters=o, kernel_size=1, activation='sigmoid',
padding='same')(conv1)
model = UNet(inputs=inputs, outputs=outputs, box_size=box_size,
input_channels=i, output_channels=o)
assert hasattr(model, 'data_handle')
assert model.data_handle is None
with pytest.raises(ValueError, match='input should be 5D'):
UNet(inputs=inputs[0], outputs=inputs)
with pytest.raises(ValueError, match='output should be 5D'):
UNet(inputs=inputs, outputs=outputs[1])
with pytest.raises(ValueError, match='input and output shapes do not match'):
UNet(inputs=inputs, outputs=concatenate([outputs, outputs], 1))
@pytest.mark.parametrize('box_size', (36, 144), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('o', (4, 2), ids=lambda x: 'o=%s' % x)
def test_unet_with_featurizer(box_size, o):
f = Featurizer()
i = len(f.FEATURE_NAMES)
with pytest.raises(TypeError, match='should be a tfbio.data.Featurize'):
UNet(box_size=box_size, input_channels=i, output_channels=o,
scale=0.5, featurizer=1)
model = UNet(box_size=box_size, input_channels=i, output_channels=o,
scale=0.5, featurizer=f)
assert hasattr(model, 'data_handle')
assert model.data_handle is None
assert hasattr(model, 'featurizer')
assert isinstance(model.featurizer, Featurizer)
@pytest.mark.parametrize('box_size', (8, 16), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('i_channels', ([5, 3], [2, 1, 1]),
ids=lambda x: 'i=' + ','.join([str(i) for i in x]))
@pytest.mark.parametrize('o_channels', ([3, 3], [2, 1, 4]),
ids=lambda x: 'o=' + ','.join([str(i) for i in x]))
def test_multiple_inputs_outputs(box_size, i_channels, o_channels):
inputs = [Input([box_size] * 3 + [i]) for i in i_channels]
conv1 = [Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(inp) for inp in inputs]
conv1 = concatenate(conv1, axis=-1)
outputs = [Convolution3D(filters=o, kernel_size=1, activation='sigmoid',
padding='same')(conv1) for o in o_channels]
model = UNet(inputs=inputs, outputs=outputs, box_size=box_size,
input_channels=sum(i_channels),
output_channels=sum(o_channels))
assert len(model.inputs) == len(i_channels)
assert len(model.outputs) == len(o_channels)
@pytest.mark.parametrize('loss', (dice_loss, ovl_loss))
def test_training(data, loss):
train_gen = data.batch_generator(batch_size=5)
eval_gen = data.batch_generator(batch_size=5)
test_gen = data.batch_generator(batch_size=2, subset='test')
num_epochs = 2
box_size = data.box_size
input_channels = data.x_channels
output_channels = data.y_channels
inputs = Input((box_size, box_size, box_size, input_channels))
outputs = Convolution3D(filters=output_channels, kernel_size=1,
activation='sigmoid')(inputs)
model = UNet(inputs=inputs, outputs=outputs)
model.compile(optimizer=Adam(lr=1e-6), loss=loss,
metrics=[dice, dice_loss, ovl, ovl_loss])
model.fit_generator(train_gen, steps_per_epoch=2,
epochs=num_epochs, verbose=0)
for scores in (model.evaluate_generator(eval_gen, steps=2),
model.evaluate_generator(test_gen, steps=1)):
assert np.allclose(scores[1], -scores[2])
assert np.allclose(scores[3], -scores[4])
loss_change = model.history.history['loss']
assert len(loss_change) == num_epochs
assert (loss_change[0] != loss_change[1:]).all()
@pytest.mark.parametrize('kwargs, err', (
({'scale': 1.0}, ValueError),
({'max_dist': 35}, ValueError),
({'featurizer': 123}, TypeError),
({'featurizer': Featurizer()}, ValueError)
), ids=('wrong scale', 'wrong dist', 'wrong featurizer type',
'wrong featurizer shape'))
@pytest.mark.parametrize('compiled', (True, False),
ids=('compiled', 'not compiled'))
@pytest.mark.filterwarnings('ignore:No training configuration found')
def test_load_wrong_args(data, kwargs, err, compiled):
box_size = data.box_size
i = data.x_channels
o = data.y_channels
model1 = UNet(box_size=box_size, input_channels=i,
output_channels=o, scale=data.scale,
data_handle=data)
if compiled:
model1.compile(optimizer=Adam(lr=1e-6),
loss='binary_crossentropy',
metrics=[dice, dice_loss, ovl, ovl_loss])
with tempfile.NamedTemporaryFile(suffix='.hdf') as f:
model1.save(f.name)
with pytest.raises(err, match=list(kwargs)[0]):
UNet.load_model(f.name, data_handle=data, **kwargs)
@pytest.mark.parametrize('kwargs', (
{},
{'max_dist': 52, 'scale': 0.33, 'featurizer': featurizer},
), ids=('no args', 'scale 1:3, dist=52, featurizer'))
@pytest.mark.parametrize('compiled', (True, False),
ids=('compiled', 'not compiled'))
@pytest.mark.filterwarnings('ignore:No training configuration found')
def test_save_load(data, kwargs, compiled):
from keras.models import load_model as keras_load
box_size = data.box_size
i = data.x_channels
o = data.y_channels
model1 = UNet(box_size=box_size, input_channels=i,
output_channels=o, scale=data.scale,
data_handle=data)
if compiled:
model1.compile(optimizer=Adam(lr=1e-6),
loss='binary_crossentropy',
metrics=[dice, dice_loss, ovl, ovl_loss])
weights1 = model1.get_weights()
with tempfile.NamedTemporaryFile(suffix='.hdf') as f:
model1.save(f.name)
model2 = UNet.load_model(f.name, data_handle=data, **kwargs)
weights2 = model2.get_weights()
assert model1.to_json() == model2.to_json()
for w1, w2 in zip(weights1, weights2):
assert np.allclose(w1, w2)
with tempfile.NamedTemporaryFile(suffix='.hdf') as f:
model1.save_keras(f.name)
model2 = keras_load(f.name)
weights2 = model2.get_weights()
for w1, w2 in zip(weights1, weights2):
assert np.allclose(w1, w2)
@pytest.mark.parametrize('kwargs', (
{'box_size': 30},
{'input_channels': 1},
{'output_channels': 4},
{'scale': 2.0},
{'featurizer': Featurizer()},
{'inputs': Input([36] * 3 + [1])},
{'outputs': Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(Input([36] * 3 + [1]))}
), ids=('box_size', 'input_channels', 'output_channels', 'scale', 'featurizer',
'inputs, no outputs', 'outputs, no inputs'))
def test_incompatible_with_data_handle(data, kwargs):
with pytest.raises(ValueError, match=list(kwargs)[0]):
UNet(data_handle=data, **kwargs)
@pytest.mark.parametrize('input_shape, strides, message', (
([10] * 3 + [1], 1, 'input shape does not match box_size'),
([20] * 5 + [1], 1, 'input should be 5D'),
([20] * 3 + [1], 2, 'input and output shapes do not match'),
), ids=('box size', 'not 3D image', 'different shapes'))
def test_incompatible_layers_shapes(input_shape, strides, message):
inputs = Input(input_shape)
if message == 'input should be 5D':
outputs = inputs
else:
outputs = Convolution3D(filters=3, kernel_size=1, activation='sigmoid',
padding='same', strides=strides)(inputs)
with pytest.raises(ValueError, match=message):
UNet(inputs=inputs, outputs=outputs, box_size=20)
@pytest.mark.parametrize('kwargs', (
{'box_size': 30},
{'input_channels': 1},
{'output_channels': 4},
{'featurizer': Featurizer()},
), ids=lambda x: ', '.join(str(k) for k in x))
def test_incompatible_with_layers(kwargs):
inputs = Input([10] * 3 + [3])
conv1 = Convolution3D(filters=3, kernel_size=1, activation='elu',
padding='same')(inputs)
outputs = Convolution3D(filters=5, kernel_size=1, activation='sigmoid',
padding='same')(conv1)
with pytest.raises(ValueError, match=list(kwargs)[0]):
UNet(inputs=inputs, outputs=outputs, **kwargs)
def test_get_pockets_segmentation(data):
with pytest.raises(ValueError, match='data_handle must be set'):
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7)
model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='scale must be set'):
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7, data_handle=data)
model.scale = None
model.pocket_density_from_grid('10gs')
np.random.seed(42)
model = UNet(box_size=data.box_size,
input_channels=data.x_channels,
output_channels=data.y_channels,
l2_lambda=1e-7, data_handle=data)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, *_ = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='not supported'):
model.get_pockets_segmentation(np.array([density] * 2), 0.6)
pocket = model.get_pockets_segmentation(density, 0.6)
assert pocket.shape == (data.box_size,) * 3
assert pocket.max() > 0
assert len(np.unique(pocket)) - 1 <= pocket.max()
def test_save_pockets_cmap(data):
model = UNet(data_handle=data, l2_lambda=1e-7)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, origin, step = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='saving more than one prediction'):
model.save_density_as_cmap(np.concatenate((density, density)), origin,
step)
with tempfile.NamedTemporaryFile(suffix='.cmap') as cmap_file:
fname = cmap_file.name
model.save_density_as_cmap(density, origin, step, fname=fname)
with h5py.File(fname, 'r') as f:
assert 'Chimera' in f
group = f['Chimera']
assert len(group.keys()) == data.y_channels
for i in range(data.y_channels):
key = 'image%s' % (i + 1)
assert key in group
assert 'data_zyx' in group[key]
dataset = group[key]['data_zyx'][:]
assert np.allclose(density[0, ..., i].transpose([2, 1, 0]),
dataset[:])
def test_save_pockets_cube(data):
model = UNet(data_handle=data, l2_lambda=1e-7)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
density, origin, step = model.pocket_density_from_grid('10gs')
with pytest.raises(ValueError, match='saving more than one prediction'):
model.save_density_as_cube(np.concatenate((density, density)), origin,
step)
with pytest.raises(NotImplementedError, match='saving multichannel'):
model.save_density_as_cube(density, origin, step)
density = density[..., [0]]
with tempfile.NamedTemporaryFile(suffix='.cube') as cmap_file:
fname = cmap_file.name
model.save_density_as_cube(density, origin, step, fname=fname)
with open(fname, 'r') as f:
# skip header
for _ in range(7):
f.readline()
values = np.array(f.read().split()).reshape(density.shape)
assert np.allclose(density, values.astype(float))
@pytest.mark.parametrize('box_size', (36, 72), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('o', (1, 3), ids=lambda x: 'o=%s' % x)
def test_predict_mol(box_size, o):
mol = next(pybel.readfile('mol2', protein_file))
with pytest.raises(ValueError, match='featurizer must be set'):
model = UNet(box_size=box_size, scale=0.5, input_channels=num_features,
output_channels=o)
model.pocket_density_from_mol(mol)
with pytest.raises(ValueError, match='scale must be set'):
model = UNet(featurizer=featurizer, box_size=box_size,
input_channels=num_features, output_channels=o)
model.pocket_density_from_mol(mol)
model = UNet(featurizer=featurizer, box_size=box_size, scale=0.5,
output_channels=o)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
with pytest.raises(TypeError, match='pybel.Molecule'):
model.pocket_density_from_mol(protein_file)
density, origin, step = model.pocket_density_from_mol(mol)
assert (density > 0).any()
@pytest.mark.parametrize('box_size', (36, 72), ids=lambda x: 'box=%s' % x)
@pytest.mark.parametrize('o', (1, 2), ids=lambda x: 'o=%s' % x)
def test_predict_pocket_atoms(box_size, o):
np.random.seed(42)
mol = next(pybel.readfile('mol2', protein_file))
model = UNet(featurizer=featurizer, box_size=box_size, scale=0.5,
output_channels=o)
model.compile(optimizer=Adam(lr=1e-6), loss='binary_crossentropy')
segmentation_kwargs = {'threshold': 0.55, 'min_size': 5}
pocket_mols_atoms = model.predict_pocket_atoms(mol, dist_cutoff=3,
expand_residue=False,
**segmentation_kwargs)
pocket_mols_residues = model.predict_pocket_atoms(mol, dist_cutoff=3,
expand_residue=True,
**segmentation_kwargs)
assert len(pocket_mols_atoms) == len(pocket_mols_residues)
assert len(pocket_mols_atoms) > 0
for p1, p2 in zip(pocket_mols_atoms, pocket_mols_residues):
assert isinstance(p1, pybel.Molecule)
assert isinstance(p2, pybel.Molecule)
assert len(p1.atoms) <= len(p2.atoms)
res1 = set([res.idx for res in p1.residues])
res2 = set([res.idx for res in p2.residues])
assert res1 == res2
| 39.483589
| 92
| 0.63323
| 2,383
| 18,044
| 4.624423
| 0.117919
| 0.034301
| 0.041924
| 0.033031
| 0.630581
| 0.579583
| 0.524864
| 0.465699
| 0.445009
| 0.409074
| 0
| 0.024901
| 0.232155
| 18,044
| 456
| 93
| 39.570175
| 0.77048
| 0.00061
| 0
| 0.381616
| 0
| 0
| 0.090012
| 0
| 0
| 0
| 0
| 0
| 0.116992
| 1
| 0.052925
| false
| 0
| 0.036212
| 0
| 0.089136
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cf130cd62278bdee384dab7ff29ec047f8b848a
| 2,256
|
py
|
Python
|
tests/test_bash_runner.py
|
rtmigo/svet
|
06f9c5be7706351c2ef93fae0f9fa97ee69593f7
|
[
"BSD-3-Clause"
] | 5
|
2021-05-18T19:55:22.000Z
|
2022-03-07T20:52:19.000Z
|
tests/test_bash_runner.py
|
rtmigo/vien
|
06f9c5be7706351c2ef93fae0f9fa97ee69593f7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_bash_runner.py
|
rtmigo/vien
|
06f9c5be7706351c2ef93fae0f9fa97ee69593f7
|
[
"BSD-3-Clause"
] | 1
|
2021-05-23T04:04:29.000Z
|
2021-05-23T04:04:29.000Z
|
# SPDX-FileCopyrightText: (c) 2021 Artëm IG <github.com/rtmigo>
# SPDX-License-Identifier: BSD-3-Clause
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from timeit import default_timer as timer
from tests.common import is_posix
from vien._bash_runner import *
from tests.time_limited import TimeLimited
@unittest.skipUnless(is_posix, "not POSIX")
class TestRunAsBash(unittest.TestCase):
# python3 -m unittest svet.bash_runner_test
def test_good_command_code_zero(self):
bash_lines = [
f'set -e',
f"ls"]
code = run_as_bash_script("\n".join(bash_lines), capture_output=True)
self.assertEqual(code.returncode, 0) # ok
def test_bad_command_error_code(self):
bash_lines = [
f'set -e',
f"ok_computer_make_me_happy"]
code = run_as_bash_script("\n".join(bash_lines), capture_output=True)
self.assertEqual(code.returncode, 127) # error
def test_alias_expansion(self):
with TemporaryDirectory() as td:
file_to_create = Path(td) / "to_be_or_not_to_be.txt"
file_to_create_quoted = repr(str(file_to_create))
bash_lines = [
f'set -e',
f"shopt -s expand_aliases",
f'alias ohoho="echo"', # this will work in bash, but not in sh
f'ohoho "that is the answer" > {file_to_create_quoted}']
self.assertFalse(file_to_create.exists())
code = run_as_bash_script("\n".join(bash_lines),
capture_output=True)
self.assertEqual(code.returncode, 0)
self.assertTrue(file_to_create.exists())
self.assertEqual(file_to_create.read_text().strip(),
"that is the answer")
def test_input_delay(self):
start = timer()
# run interactive shell end type "exit" after small delay
with TimeLimited(seconds=10): # safety net
run_as_bash_script("exec bash", input="exit\n".encode(),
input_delay=1, timeout=10, capture_output=True)
end = timer()
self.assertGreater(end - start, 0.9)
self.assertLess(end - start, 5)
| 37.6
| 79
| 0.624113
| 291
| 2,256
| 4.597938
| 0.443299
| 0.03139
| 0.06278
| 0.044843
| 0.218236
| 0.218236
| 0.207025
| 0.178625
| 0.178625
| 0.178625
| 0
| 0.011664
| 0.277926
| 2,256
| 59
| 80
| 38.237288
| 0.809699
| 0.113032
| 0
| 0.222222
| 0
| 0
| 0.104418
| 0.035141
| 0
| 0
| 0
| 0
| 0.177778
| 1
| 0.088889
| false
| 0
| 0.155556
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cf1aac57cec16e9686acb6784d6d3e00f8dc890
| 8,825
|
py
|
Python
|
adversarial/train_adversarial.py
|
liguge/Conditional-Adversarial-Domain-Generalization-with-Single-Discriminator
|
e0f2cd042e2c124e73d2982af28fa270263180d8
|
[
"MIT"
] | 1
|
2022-01-16T03:21:18.000Z
|
2022-01-16T03:21:18.000Z
|
adversarial/train_adversarial.py
|
liguge/Conditional-Adversarial-Domain-Generalization-with-Single-Discriminator
|
e0f2cd042e2c124e73d2982af28fa270263180d8
|
[
"MIT"
] | 1
|
2022-03-29T10:50:48.000Z
|
2022-03-30T07:14:56.000Z
|
adversarial/train_adversarial.py
|
hectorLop/Conditional-Adversarial-Domain-Generalization-with-Single-Discriminator
|
e0f2cd042e2c124e73d2982af28fa270263180d8
|
[
"MIT"
] | 2
|
2022-01-16T03:21:54.000Z
|
2022-03-10T01:17:12.000Z
|
from typing import Dict, List, Tuple
import torch
import numpy as np
import argparse
from torch import nn
import yaml
import pandas as pd
from sklearn.metrics import roc_auc_score
from adversarial.adversarial import AdversarialNetwork, Classifier, Discriminator
from adversarial.dataset import (
AdversarialDataset,
get_transforms
)
from adversarial.config import Config
from adversarial.utils import (
fix_all_seeds,
freeze_unfreeze,
get_ground_truth_vector
)
from torch.utils.data import DataLoader
def train_step(
model : nn.Module,
train_loader : DataLoader,
config : Config,
class_criterion : object,
disc_criterion : object,
extractor_criterion : object,
optimizer : torch.optim.Optimizer
) -> Tuple[float, float, float, float]:
model.train()
class_loss_accum, disc_loss_accum, extr_loss_accum = 0., 0., 0.
y_train = []
preds = []
for images, domains, labels in train_loader:
images = images.to(config.DEVICE)
domains = domains.to(config.DEVICE)
labels = labels.to(config.DEVICE)
# Set the gradients to zero before backprop step
optimizer.zero_grad()
# # # # # # # # # # # # # #
# Step 1: Classification #
# # # # # # # # # # # # # #
freeze_unfreeze(model.feature_extractor, True)
freeze_unfreeze(model.discriminator, True)
freeze_unfreeze(model.classifier, True)
# Get predictions and calculate the loss
y_preds_class = model(images)
y_preds_class = y_preds_class.to(config.DEVICE)
class_loss = class_criterion(y_preds_class.squeeze(), labels)
class_loss_accum += class_loss.item()
# Backward step
class_loss.backward()
optimizer.step()
optimizer.zero_grad()
y_train.append(labels.detach().cpu().numpy())
preds.append(y_preds_class.softmax(1).detach().cpu().numpy())
# # # # # # # # # # # # #
# Step 2: Discriminator #
# # # # # # # # # # # # #
freeze_unfreeze(model.feature_extractor, False)
freeze_unfreeze(model.discriminator, True)
freeze_unfreeze(model.classifier, True)
# Get predictions and calculate the loss
y_preds_disc = model.forward_disc(images)
y_preds_disc = y_preds_disc.to(config.DEVICE)
disc_loss = disc_criterion(y_preds_disc.squeeze(), domains)
disc_loss_accum += disc_loss.item()
# Backward step
disc_loss.backward()
optimizer.step()
optimizer.zero_grad()
# # # # # # # # # # #
# Step 3: Extractor #
# # # # # # # # # # #
freeze_unfreeze(model.feature_extractor, True)
freeze_unfreeze(model.discriminator, False)
freeze_unfreeze(model.classifier, True)
# Get predictions and calculate the loss
y_preds_extr = model.forward_disc(images)
y_preds_extr = y_preds_extr.to(config.DEVICE)
gt_vector = get_ground_truth_vector(labels, config.N_DOMAINS, config.N_CLASSES)
gt_vector = gt_vector.to(config.DEVICE)
extr_loss = extractor_criterion(y_preds_extr.squeeze(), gt_vector)
extr_loss_accum += extr_loss.item()
# Backward step
extr_loss.backward()
optimizer.step()
optimizer.zero_grad()
y_train = np.concatenate(y_train)
preds = np.concatenate(preds)
preds = preds[np.arange(len(preds)), preds.argmax(1)]
auc = roc_auc_score(y_train, preds)
return class_loss_accum, disc_loss_accum, extr_loss_accum, auc
def val_step(model : nn.Module, val_loader : DataLoader,
config : Config, criterion : object) -> Tuple[float, float]:
model.eval()
preds = []
epoch_loss = 0
y_test = []
with torch.no_grad():
for images, domains, labels in val_loader:
images = images.to(config.DEVICE)
domains = domains.to(config.DEVICE)
labels = labels.to(config.DEVICE)
y_preds = model(images)
y_preds = y_preds.to(config.DEVICE)
loss = criterion(y_preds.squeeze(), labels)
y_test.append(labels.cpu().numpy())
preds.append(y_preds.softmax(1).cpu().numpy())
epoch_loss += loss.item()
y_test = np.concatenate(y_test)
preds = np.concatenate(preds)
preds = preds[np.arange(len(preds)), preds.argmax(1)]
auc = roc_auc_score(y_test, preds)
return epoch_loss, auc
def fit(
model : nn.Module,
train_loader : DataLoader,
val_loader : DataLoader,
config : Config,
filepath : str
) -> Tuple[nn.Module, List[float], List[float]]:
model = model.to(config.DEVICE)
optimizer = torch.optim.SGD(model.parameters(),
lr=config.LEARNING_RATE,
momentum=config.MOMENTUM,
weight_decay=config.WEIGHT_DECAY)
# Criterions for each step
class_criterion = torch.nn.CrossEntropyLoss()
disc_criterion = torch.nn.CrossEntropyLoss()
extr_criterion = torch.nn.MSELoss()
n_batches, n_batches_val = len(train_loader), len(val_loader)
best_loss = np.inf
val_loss_accum, train_loss_accum = [], []
with torch.cuda.device(config.DEVICE):
for epoch in range(1, config.EPOCHS + 1):
class_loss, disc_loss, extr_loss, train_auc = train_step(model,
train_loader,
config,
class_criterion,
disc_criterion,
extr_criterion,
optimizer)
class_loss = class_loss / n_batches
disc_loss = disc_loss / n_batches
extr_loss = extr_loss / n_batches
val_loss, val_auc = val_step(model,
val_loader,
config,
class_criterion)
val_loss = val_loss / n_batches_val
prefix = f"[Epoch {epoch:2d} / {config.EPOCHS:2d}]"
print(prefix)
print(f"{prefix} Train Class loss: {class_loss:7.5f}. Train Disc Loss: {disc_loss:7.5f}. Train Extr Loss: {extr_loss:7.5f}")
print(f"{prefix} Val Class loss: {val_loss:7.5f}")
print(f"{prefix} Train AUC-ROC: {train_auc:7.5f}. Val AUC-ROC: {val_auc:7.5f}")
if val_loss < best_loss:
best_loss = val_loss
print(f'{prefix} Save Val loss: {val_loss:7.5f}')
torch.save(model.state_dict(), filepath)
print(prefix)
return model, train_loss_accum, val_loss_accum
def get_loaders(df_train, df_val, config=Config):
ds_train = AdversarialDataset(df_train, get_transforms(config, augment=True), config)
dl_train = DataLoader(ds_train,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=0)
ds_val = AdversarialDataset(df_val, get_transforms(config, augment=False), config)
dl_val = DataLoader(ds_val,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=0)
return dl_train, dl_val
def train(parameters : Dict):
fix_all_seeds(3088)
train = pd.read_csv(parameters['train_set'])
val = pd.read_csv(parameters['val_set'])
train_loader, val_loader = get_loaders(train, val)
print('Getting the model')
classifier = Classifier(256, 2)
discriminator = Discriminator(256, 0.5, Config.N_DOMAINS, Config.N_CLASSES)
model = AdversarialNetwork(discriminator, classifier,
parameters['model_name'], 2048)
print('TRAINING')
model, train_loss, val_loss = fit(model,
train_loader,
val_loader,
Config,
parameters['checkpoint'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=True, help='Config YAML file')
args = parser.parse_args()
with open(args.config) as file:
params = yaml.load(file, Loader=yaml.FullLoader)
train(params)
| 33.942308
| 136
| 0.566799
| 962
| 8,825
| 4.95842
| 0.177755
| 0.022642
| 0.03522
| 0.01761
| 0.313417
| 0.272537
| 0.216352
| 0.207547
| 0.207547
| 0.17065
| 0
| 0.008205
| 0.33711
| 8,825
| 260
| 137
| 33.942308
| 0.807179
| 0.041813
| 0
| 0.255556
| 0
| 0.011111
| 0.047334
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.072222
| 0
| 0.122222
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cf1f4f9c94b916e1af4be610a5cfc8f880bc37a
| 18,425
|
py
|
Python
|
generate_md.py
|
wzyjerry/EPO-patent-process
|
686c0ea6d9122436071c809a238b8348cdf65120
|
[
"MIT"
] | null | null | null |
generate_md.py
|
wzyjerry/EPO-patent-process
|
686c0ea6d9122436071c809a238b8348cdf65120
|
[
"MIT"
] | null | null | null |
generate_md.py
|
wzyjerry/EPO-patent-process
|
686c0ea6d9122436071c809a238b8348cdf65120
|
[
"MIT"
] | null | null | null |
def trans_date(field: dict) -> str:
text = str(field['date'])
return '%s.%s.%s' % (text[6:], text[4:6], text[:4])
def trans_4xx(field: dict, lang: str) -> str:
text = str(field['bnum'])
return '%s %s %s/%s' % (trans_date(field), labels['bulletin'][lang], text[:4], text[4:])
def trans_ipc(field: str) -> str:
field = field.split()
return '%s %s %s %s' % (field[0][1:], field[1][:2], field[1][2:], field[2])
def trans_ipcr(field: dict) -> str:
text = field['text'].split()
return '%s %s <sup>(%s.%s)</sup>' % (text[0], text[1], text[2][:4], text[2][4:6])
def trans_name(field: dict, out_str: bool) -> str:
if 'B725EP' in field:
return '<br>'.join(field['B725EP']['text'])
if 'sfx' in field:
sfx = field['sfx']
else:
sfx = ''
snm = field['snm'] + sfx
if 'adr' not in field or len(field['adr']) == 0:
return snm
adr = field['adr']
if out_str and 'str' in adr:
return '%s<br>%s<br>%s (%s)' % (snm, adr['str'], adr['city'], adr['ctry'])
else:
return '%s<br>%s (%s)' % (snm, adr['city'], adr['ctry'])
def trans_international_an(field: dict) -> str:
anum = field['B861']['dnum']['anum']
return 'PCT/%s/%s' % (anum[:6], anum[6:])
def trans_international_pn(field: dict) -> str:
B871 = field['B871']
pnum = B871['dnum']['pnum']
bnum = str(B871['bnum'])
return '%s %s/%s (%s Gazette %s/%s)' % (pnum[:2], pnum[2:6], pnum[6:], trans_date(B871), bnum[:4], bnum[4:])
def trans_doc(field: dict) -> str:
dnum = field['dnum']
anum = dnum['anum']
if 'pnum' in dnum:
pnum = dnum['pnum']
return '%s / %s' % (anum, format(int(pnum), ',').replace(',', ' '))
else:
return anum
labels = {
15: {
'de': [
'Korrekturinformation',
'Korrigierte Fassung Nr.',
'Korrekturen, siehe'
],
'en': [
'Correction information',
'Corrected version no',
'Corrections, see'
],
'fr': [
'Information de correction',
'Version corrigée no',
'Corrections, voir'
]
},
21: {
'de': 'Anmeldenummer',
'en': 'Application number',
'fr': 'Numéro de dépôt'
},
22: {
'de': 'Anmeldetag',
'en': 'Date of filing',
'fr': 'Date de dépôt'
},
30: {
'de': 'Priorität',
'en': 'Priority',
'fr': 'Priorité'
},
43: {
'de': {
'A1': 'Veröffentlichungstag',
'A3': 'Veröffentlichungstag A2',
'A8': 'Veröffentlichungstag',
'A9': 'Veröffentlichungstag',
'B1': 'Veröffentlichungstag der Anmeldung',
'B2': 'Veröffentlichungstag der Anmeldung',
'B3': 'Veröffentlichungstag der Anmeldung',
'B9': 'Veröffentlichungstag der Anmeldung'
},
'en': {
'A1': 'Date of publication',
'A3': 'Date of publication A2',
'A8': 'Date of publication',
'A9': 'Date of publication',
'B1': 'Date of publication of application',
'B2': 'Date of publication of application',
'B3': 'Date of publication of application',
'B9': 'Date of publication of application'
},
'fr': {
'A1': 'Date de publication',
'A3': 'Date de publication A2',
'A8': 'Date de publication',
'A9': 'Date de publication',
'B1': 'Date de publication de la demande',
'B2': 'Date de publication de la demande',
'B3': 'Date de publication de la demande',
'B9': 'Date de publication de la demande'
}
},
45: {
'de': {
'B1': 'Veröffentlichungstag und Bekanntmachung des Hinweises auf die Patenterteilung',
'B2': {
45: 'Hinweis auf die Patenterteilung',
47: 'Veröffentlichungstag und Bekanntmachung des Hinweises auf die Entscheidung über den Einspruch'
},
'B9': {
45: 'Hinweis auf die Patenterteilung',
47: 'Veröffentlichungstag und Bekanntmachung des Hinweises auf die Entscheidung über den Einspruch'
}
},
'en': {
'B1': 'Date of publication and mention of the grant of the patent',
'B2': {
45: 'Mention of the grant of the patent',
47: 'Date of publication and mention of the opposition decision:'
},
'B9': {
45: 'Mention of the grant of the patent',
47: 'Date of publication and mention of the opposition decision:'
}
},
'fr': {
'B1': 'Date de publication et mention de la délivrance du brevet',
'B2': {
45: 'Mention de la délivrance du brevet',
47: 'Date de publication et mention de la décision concernant l’opposition'
},
'B9': {
45: 'Mention de la délivrance du brevet',
47: 'Date de publication et mention de la décision concernant l’opposition'
}
}
},
48: {
'de': 'Corrigendum ausgegeben am',
'en': 'Corrigendum issued on',
'fr': 'Corrigendum publié le'
},
51: {
'de': 'Int Cl.',
'en': 'Int Cl.',
'fr': 'Int Cl.',
},
56: {
'de': 'Entgegenhaltungen',
'en': 'References cited',
'fr': 'Documents cités'
},
60: {
'de': 'Teilanmeldung',
'en': 'Divisional application',
'fr': 'Demande divisionnaire'
},
71: {
'de': 'Anmelder',
'en': 'Applicant',
'fr': 'Demandeur'
},
72: {
'de': 'Erfinder',
'en': 'Inventor',
'fr': 'Inventeur'
},
73: {
'de': 'Patentinhaber',
'en': 'Proprietor',
'fr': 'Titulaire'
},
74: {
'de': 'Vertreter',
'en': 'Representative',
'fr': 'Mandataire'
},
84: {
'de': [
'Benannte Vertragsstaaten',
'Benannte Erstreckungsstaaten',
'Benannte Validierungsstaaten'
],
'en': [
'Designated Contracting States',
'Designated Extension States',
'Designated Validation States'
],
'fr': [
'Etats contractants désignés',
'Etats d’extension désignés',
'Etats de validation désignés'
]
},
86: {
'de': 'Internationale Anmeldenummer',
'en': 'International application number',
'fr': 'Numéro de dépôt international'
},
87: {
'de': 'Internationale Veröffentlichungsnummer',
'en': 'International publication number',
'fr': 'Numéro de publication internationale'
},
88: {
'de': 'Veröffentlichungstag A3',
'en': 'Date of publication A3',
'fr': 'Date de publication A3'
},
'bulletin': {
'de': 'Patentblatt',
'en': 'Bulletin',
'fr': 'Bulletin'
},
'description': {
'de': 'Beschreibung',
'en': 'Description',
'fr': 'Description'
},
'remarks': {
'de': 'Bemerkungen',
'en': 'Remarks'
}
}
def generate_md(patent: str) -> str:
md = []
kind = patent['attr']['kind']
lang = patent['attr']['lang']
SDOBI = patent['SDOBI']
B000 = SDOBI['B000']
eptags = B000['eptags']
B100 = SDOBI['B100']
B200 = SDOBI['B200']
B400 = SDOBI['B400']
B500 = SDOBI['B500']
B700 = SDOBI['B700']
B800 = SDOBI['B800']
md.append('# (11)(19) **%s %s %s**' % (B100['B190'], format(int(B100['B110']), '0>7,').replace(',', ' '), B100['B130']))
if 'B120' in B100:
if 'B121EP' in B100['B120']:
md.append('## (12) **%s**<br>%s' % (B100['B120']['B121'], B100['B120']['B121EP']))
else:
md.append('## (12) **%s**' % B100['B120']['B121'])
if kind in ['A3']:
md.append('## (88) %s:<br>**%s**' % (labels[88][lang], trans_4xx(B800['B880'], lang)))
if kind in ['B1']:
md.append('## (45) %s:<br>**%s**' % (labels[45][lang][kind], trans_4xx(B400['B450'], lang)))
if kind in ['A8', 'A9', 'B9']:
B150 = B100['B150']
md.append('## (15) %s:<br>' % labels[15][lang][0])
B151 = B150['B151']
if B151[0] == 'W':
md.append('**%s %s (%s %s)**<br>' % (labels[15][lang][1], B151[1:], B151, B100['B132EP']))
else:
raise Exception('not W')
# TODO: Mismatch here. eg. EP10153923W1B9
# TODO: EP12812953W1B9
md.append('**%s**<br>' % labels[15][lang][2])
for B155 in B150['B155']:
if B155['B1551'] == lang:
if 'B153' in B150:
md.append('**%s  INID code(s)  %s**' % (B155['B1552'], B150['B153']))
elif 'B154' in B150:
for B154 in B150['B154']:
if B154['B1541'] == lang:
md.append('**%s**<br>**%s**' % (B155['B1552'], B154['B1542']))
else:
md.append('<br>**%s**<br>' % (B155['B1552']))
md.append('## (48) %s:<br>**%s**' % (labels[48][lang], trans_4xx(B400['B480'], lang)))
if kind in ['B2', 'B9']:
if 'B477' in B400:
md.append('## (45) %s<br>**%s**' % (labels[45][lang][kind][47], trans_4xx(B400['B477'], lang)))
md.append('## (45) %s<br>**%s**' % (labels[45][lang][kind][45], trans_4xx(B400['B450'], lang)))
if kind in ['B3']:
md.append('## (45) Date of publication and mention of the limitation decision:<br>')
for B4530EP in B400['B453EP']['B4530EP']:
md.append('1. **%s-%s %s**' % (B4530EP['kind'], B4530EP['attr']['limitation-sequence'], trans_4xx(B4530EP, lang)))
md.append('## (45) Mention of the grant of the patent:<br>**%s**' % trans_4xx(B400['B450'], lang))
if kind in ['A1', 'A3', 'A8', 'A9']:
md.append('## (43) %s:<br>**%s**' % (labels[43][lang][kind], trans_4xx(B400['B430'], lang)))
md.append('## (21) %s: **%s**' % (labels[21][lang], B200['B210']))
md.append('## (22) %s: **%s**' % (labels[22][lang], trans_date(B200['B220'])))
if 'B510' in B500:
B510 = B500['B510']
md.append('## (51) %s<sup>%s</sup>:' % (labels[51][lang], B510['B516']))
md.append('+ **%s**' % trans_ipc(B510['B511']))
if 'B512' in B510:
for B512 in B510['B512']:
md.append('+ %s' % trans_ipc(B512))
if 'B513' in B510:
for B513 in B510['B513']:
md.append('+ %s' % trans_ipc(B513))
if 'B514' in B510:
md.append('+ %s' % B510['B517EP'])
if 'B510EP' in B500:
md.append('## (51) %s:' % labels[51][lang])
for ipcr in B500['B510EP']:
md.append('+ ***%s***' % trans_ipcr(ipcr))
if 'B860' in B800:
md.append('## (86) %s:<br>**%s**' % (labels[86][lang], trans_international_an(B800['B860'])))
if 'B870' in B800:
md.append('## (87) %s:<br>**%s**' % (labels[87][lang], trans_international_pn(B800['B870'])))
md.append('***')
md.append('## (54)')
for B540 in B500['B540']:
if B540['B541'] == patent['attr']['lang']:
md.append('+ **%s**' % B540['B542'])
else:
md.append('+ %s' % B540['B542'])
md.append('***')
md.append('## (84) %s:' % labels[84][lang][0])
md.append('**%s**' % ' '.join(B800['B840']))
if 'B844EP' in B800:
md.append('<br>%s:<br>**%s**' % (labels[84][lang][1], ' '.join([x['ctry'] for x in B800['B844EP']['B845EP']])))
if 'B848EP' in B800:
md.append('<br>%s:<br>**%s**' % (labels[84][lang][2], ' '.join([x['ctry'] for x in B800['B848EP']['B849EP']])))
if 'B300' in SDOBI:
B300 = SDOBI['B300']
md.append('## (30) %s:' % labels[30][lang])
for priority in B300:
md.append('+ **%s %s %s**' % (trans_date(priority['B320']), priority['B330']['ctry'], priority['B310']))
if 'B600' in SDOBI:
B600 = SDOBI['B600']
if 'B620' in B600:
B620 = B600['B620']['parent']
md.append('## (62) Document number(s) of the earlier application(s) in accordance with Art. 76 EPC:')
for pdoc_list in B620['pdoc']:
for pdoc in pdoc_list:
md.append('+ **%s**' % trans_doc(pdoc))
if 'B270' in B200:
B270 = B200['B270']
md.append('## (27) Previously filed application:')
md.append('+ **%s %s %s**' % (trans_date(B270), B270['ctry'], B270['dnum']['anum']))
if kind in ['B1', 'B2', 'B3', 'B9']:
md.append('## (43) %s: **%s**' % (labels[43][lang][kind], trans_4xx(B400['B430'], lang)))
if 'B600' in SDOBI:
B600 = SDOBI['B600']
if 'B620EP' in B600:
B620EP = B600['B620EP']['parent']
md.append('## (60) %s:' % labels[60][lang])
for cdoc_list in B620EP['cdoc']:
for cdoc in cdoc_list:
md.append('+ **%s**' % trans_doc(cdoc))
if 'B710' in B700:
md.append('## (71) %s:' % labels[71][lang])
for applicant in B700['B710']:
if 'B716EP' in applicant:
md.append('+ **%s**<br>Designated Contracting States:<br>**%s**' % (trans_name(applicant, False), ' '.join(applicant['B716EP']['ctry'])))
else:
md.append('+ **%s**' % trans_name(applicant, False))
if 'B730' in B700:
md.append('## (73) %s:' % labels[73][lang])
for grantee in B700['B730']:
if 'B736EP' in grantee:
md.append('+ **%s**<br>Designated Contracting States:<br>**%s**' % (trans_name(grantee, False), ' '.join(grantee['B736EP']['ctry'])))
else:
md.append('+ **%s**' % trans_name(grantee, False))
md.append('## (72) %s:' % labels[72][lang])
for inventor in B700['B720']:
md.append('+ **%s**' % trans_name(inventor, False).strip())
if 'B740' in B700:
md.append('## (74) %s:' % labels[74][lang])
for agent in B700['B740']:
md.append('+ **%s**' % trans_name(agent, True))
if 'B560' in B500:
B560 = B500['B560']
md.append('## (56) %s:' % labels[56][lang])
if 'B561' in B560:
B561 = B560['B561']
for patent_citation in B561:
md.append('1. **%s**' % patent_citation['text'])
if 'B562' in B560:
B562 = B560['B562']
md.append('')
for patent_citation in B562:
md.append('+ **%s**' % patent_citation['text'])
if 'B050EP' in eptags or 'B053EP' in eptags or 'B070EP' in eptags:
md.append('<br><br><u>%s:</u>' % labels['remarks'][lang])
if 'B050EP' in eptags:
for B050EP in eptags['B050EP']:
md.append('+ %s' % B050EP['B052EP'])
if 'B053EP' in eptags:
for B053EP in eptags['B053EP']:
md.append('+ %s' % B053EP)
if 'B070EP' in eptags:
md.append('+ %s' % eptags['B070EP'])
md.append('***')
if 'abstract' in patent:
md.append('(57) ')
abstract = patent['abstract']
for abst in abstract:
for content in abst['content']:
md.append('%s<br>' % content['content'])
md.append('***')
if 'description' in patent:
md.append('**%s**<br>' % labels['description'][lang])
description = patent['description']
for content in description['content']:
if content['type'] == 'heading':
md.append('<br>%s<br>' % content['content'])
elif content['type'] == 'p':
md.append('**[%s]** %s<br>\n' % (content['attr']['num'], content['content']))
md.append('***')
if 'claims' in patent:
for claims in patent['claims']:
claims_title = 'Claims'
if claims['attr']['lang'] == 'de':
claims_title = 'Patentansprüche'
elif claims['attr']['lang'] == 'fr':
claims_title = 'Revendications'
md.append('### **%s**<br><br>' % claims_title)
for claim in claims['claim']:
md.append('1. %s<br><br>' % '<br>'.join(claim['claim_text']).replace('\n', '<br>'))
md.append('***')
if 'amended-claims' in patent:
amended_claims = patent['amended-claims']
for claims in amended_claims:
md.append('**%s**<br><br>' % claims['heading']['content'])
for claim in claims['claim']:
md.append('1. %s<br><br>' % '<br>'.join(claim['claim_text']).replace('\n', '<br>'))
if 'amended-claims-statement' in claims:
amended_claims_statement = claims['amended-claims-statement']
for item in amended_claims_statement:
for claims_statement in item['claims-statement']:
for content in claims_statement['content']:
if content['type'] == 'heading':
md.append('<br><br>**%s**<br><br>' % content['content'])
elif content['type'] == 'p':
md.append('%s<br>\n' % content['content'])
md.append('***')
if 'amended-claims-statement' in patent:
amended_claims_statement = patent['amended-claims-statement']
for item in amended_claims_statement:
for claims_statement in item['claims-statement']:
for content in claims_statement['content']:
if content['type'] == 'heading':
md.append('<br><br>**%s**<br><br>' % content['content'])
elif content['type'] == 'p':
md.append('%s<br>\n' % content['content'])
md.append('***')
if 'ep-reference-list' in patent:
ep_reference_list = patent['ep-reference-list']
for content in ep_reference_list['content']:
if content['type'] == 'heading':
md.append('<br><br>%s<br><br>' % content['content'])
elif content['type'] == 'p':
md.append('%s<br>' % content['content'])
return '\n'.join(md)
| 39.623656
| 153
| 0.48711
| 2,109
| 18,425
| 4.220009
| 0.157895
| 0.077303
| 0.034382
| 0.013596
| 0.346629
| 0.279213
| 0.238427
| 0.205281
| 0.182022
| 0.182022
| 0
| 0.084032
| 0.314084
| 18,425
| 464
| 154
| 39.709052
| 0.620193
| 0.003256
| 0
| 0.174779
| 0
| 0.002212
| 0.310533
| 0.011655
| 0
| 0
| 0
| 0.002155
| 0
| 1
| 0.019912
| false
| 0
| 0
| 0
| 0.048673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cf5831f266719f857798ff19bb7f65e432caf03
| 710
|
py
|
Python
|
Python/287. FindTheDuplicateNumber.py
|
RaymondWaterlooLi/LeetCode-Solutions
|
7973d2838b114f1dffc29f436fb660a96b51f660
|
[
"MIT"
] | 263
|
2020-10-05T18:47:29.000Z
|
2022-03-31T19:44:46.000Z
|
Python/287. FindTheDuplicateNumber.py
|
RaymondWaterlooLi/LeetCode-Solutions
|
7973d2838b114f1dffc29f436fb660a96b51f660
|
[
"MIT"
] | 1,264
|
2020-10-05T18:13:05.000Z
|
2022-03-31T23:16:35.000Z
|
Python/287. FindTheDuplicateNumber.py
|
RaymondWaterlooLi/LeetCode-Solutions
|
7973d2838b114f1dffc29f436fb660a96b51f660
|
[
"MIT"
] | 760
|
2020-10-05T18:22:51.000Z
|
2022-03-29T06:06:20.000Z
|
#Given an array of integers nums containing n + 1 integers where each integer is in the range [1, n] inclusive.
#There is only one duplicate number in nums, return this duplicate number.
class Solution(object):
def findDuplicate(self, nums):
#Traversing the list using for loop
s = sorted(nums) #sorting given array
a,b = 0,len(nums)
temp=(a+b)//2
t = 1
while t: #using binary search to find duplicate
if s[temp] == temp and s[temp-1] == temp:
return s[temp]
if s[temp] == temp+1 and s[temp-1] == temp:
a = temp
else:
b = temp
temp = (a+b)//2
| 37.368421
| 111
| 0.539437
| 101
| 710
| 3.792079
| 0.524752
| 0.065274
| 0.031332
| 0.036554
| 0.067885
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.366197
| 710
| 18
| 112
| 39.444444
| 0.831111
| 0.384507
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cf74e26261f13d85a64a42ef32a7fccd8ef0a55
| 2,484
|
py
|
Python
|
utils/evaluate_annotation.py
|
cltl-students/hamersma-agression-causes
|
11cbfd94031a0a3c84a27afa20d8a539acdab609
|
[
"MIT"
] | null | null | null |
utils/evaluate_annotation.py
|
cltl-students/hamersma-agression-causes
|
11cbfd94031a0a3c84a27afa20d8a539acdab609
|
[
"MIT"
] | null | null | null |
utils/evaluate_annotation.py
|
cltl-students/hamersma-agression-causes
|
11cbfd94031a0a3c84a27afa20d8a539acdab609
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.metrics import cohen_kappa_score, confusion_matrix
import os
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
dirname = os.path.dirname(__file__)
def extract_annotations(files):
'''Function that takes a file with the annotations as input and extracts lists of annotations for vims that are
annotated by both annotators.
:param files: list of files
:returns annotations_ann1: list of strings
:returns annotations_ann2: list of strings'''
file_ann1 = dirname +'/annotations/' + files[0]
file_ann2 = dirname + '/annotations/' + files[1]
ann1 = pd.read_excel(file_ann1, index_col=1).T.to_dict()
ann2 = pd.read_excel(file_ann2, index_col=1).T.to_dict()
annotations_ann1 = []
annotations_ann2 = []
for key, value in ann2.items():
label2 = value['Aggression']
label1 = ann1.get(key).get('Aggression')
annotations_ann1.append(label1)
annotations_ann2.append(label2)
return annotations_ann1, annotations_ann2
def calculate_score(ann1, ann2):
"""Function that calculates the inter agreement score using Cohen's Kappa, prints the scores and confusion matrix.
:param ann1: list of annotation labels
:param ann2: list of annotation labels """
agreement = [anno1 == anno2 for anno1, anno2 in zip(ann1, ann2)]
percentage = sum(agreement) / len(agreement)
print("Percentage Agreement: %.2f" % percentage)
termlabels = ['pos', 'neg']
kappa = cohen_kappa_score(ann1, ann2, labels=termlabels)
print("Cohen's Kappa: %.2f" % kappa)
confusions = confusion_matrix(ann1, ann2, labels=termlabels)
pandas_table = pd.DataFrame(confusions, index=termlabels, columns = ['pos', 'neg'])
group_names = ["True Pos", "False Neg", "False Pos", "True Neg"]
group_counts = ["{0: 0.0f}".format(value) for value in confusions.flatten()]
labels = [f"{v1} {v2}" for v1, v2 in zip(group_names, group_counts)]
labels = np.asarray(labels).reshape(2, 2)
sns.heatmap(pandas_table, annot=labels, fmt='', cmap = 'Blues')
plt.title("Confusion matrix annotations", size=12)
plt.show()
print(pandas_table)
def main():
files = ['202103022_chunks_annotated_Sanne.xlsx', '20210322_chunks_annotated_Zana.xlsx']
terms_an1, terms_an2 = extract_annotations(files)
calculate_score(terms_an1, terms_an2)
if __name__ == '__main__':
main()
| 39.428571
| 119
| 0.686393
| 326
| 2,484
| 5.055215
| 0.398773
| 0.018204
| 0.018204
| 0.018204
| 0.019417
| 0.019417
| 0
| 0
| 0
| 0
| 0
| 0.036272
| 0.200886
| 2,484
| 63
| 120
| 39.428571
| 0.793955
| 0.178341
| 0
| 0
| 0
| 0
| 0.137789
| 0.037018
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.139535
| 0
| 0.232558
| 0.069767
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cf9d103d47dd847c7bbdc09c8f10bae634a2961
| 20,459
|
py
|
Python
|
src/astrild/particles/halo.py
|
Christovis/wys-ars
|
bb15f2d392842f9b32de12b5db5c86079bc97105
|
[
"MIT"
] | 3
|
2021-07-27T14:45:58.000Z
|
2022-01-31T21:09:46.000Z
|
src/astrild/particles/halo.py
|
Christovis/wys-ars
|
bb15f2d392842f9b32de12b5db5c86079bc97105
|
[
"MIT"
] | 1
|
2021-11-03T10:47:45.000Z
|
2021-11-03T10:47:45.000Z
|
src/astrild/particles/halo.py
|
Christovis/wys-ars
|
bb15f2d392842f9b32de12b5db5c86079bc97105
|
[
"MIT"
] | 1
|
2021-11-03T10:17:34.000Z
|
2021-11-03T10:17:34.000Z
|
import os
from gc import collect
from pathlib import Path
from typing import List, Optional, Tuple, Type, Union
from importlib import import_module
import yaml
import numpy as np
import pandas as pd
from sklearn.neighbors import BallTree
#from halotools.mock_observables import tpcf_multipole
from astrild.particles.ecosmog import Ecosmog
from astrild.particles.hutils import SubFind
from astrild.particles.hutils import Rockstar
#from astrild.particles.utils import TPCF
from astrild.utils import read_hdf5
from astrild.io import IO
dir_src = Path(__file__).parent.absolute()
default_halo_stats_config = dir_src / "configs/halo_stats.yaml"
dm_particle_mass = 7.98408e10 #[Msun/h]
class HalosWarning(BaseException):
pass
class Halos:
"""
Class to manage Rockstar & SubFind halos and get their statistics such as:
- halo mass fct.
- two point correlation fct.
- concentration mass relation
- pairwise velocity distribution
Attributes:
sim_type:
simulation:
Methods:
from_subfind:
from_rockstar:
from_dataframe:
from_file:
get_subfind_stats:
get_subfind_tpcf:
get_rockstar_stats:
get_rockstar_tpcf:
filter_resolved_subfind_halos:
filter_resolved_rockstar_halos:
_save_results:
_sort_statistics:
_create_filename:
"""
def __init__(
self,
halos: Union[read_hdf5.snapshot, pd.DataFrame],
simulation: Optional[Type[Ecosmog]] = None,
):
self.data = halos
self.sim = simulation
if hasattr(self.sim, "files") == False:
self.halotype = None
elif "fof" in list(self.sim.files.keys()):
self.halotype = "Arepo"
elif "halos" in list(self.sim.files.keys()):
self.halotype = "Rockstar"
@classmethod
def from_subfind(
cls, snap_nr: int, simulation: Optional[Type[Ecosmog]] = None,
) -> "Halos":
""" """
snapshot = read_hdf5.snapshot(
snap_nr,
simulation.dirs["sim"],
part_type_list=["dm"],
snapbases=["/snap-groupordered_"],
# check_total_particle_number=True,
# verbose=True,
)
snapshot.group_catalog(
[
"Group_M_Crit200",
"Group_R_Crit200",
"GroupPos",
"GroupVel",
"GroupFirstSub",
"GroupLenType",
"SubhaloVmax",
"SubhaloPos",
"SubhaloVel",
"SubhaloMass",
"SubhaloHalfmassRad",
]
)
if snapshot.cat["n_groups"] == 0:
snapshot = None
else:
snapshot.cat.update(
{
"SubhaloVmax": snapshot.cat["SubhaloVmax"][
(snapshot.cat["GroupFirstSub"][:]).astype(np.int64)
]
}
)
return cls(snapshot, simulation)
@classmethod
def from_rockstar(
cls, snap_nr: int, simulation: Optional[Type[Ecosmog]] = None,
) -> "Halos":
"""
Load halo data from Rockstar halo finder into pandas.DataFrame
Args:
snap_nr:
simulation:
"""
# TODO: currently only one directory supported, e.g. 012
files_path = simulation.files["halos"][str(snap_nr)]
first = True
for file_path in files_path:
snapshot_part = pd.read_csv(
file_path, header=0, skiprows=np.arange(1, 20), delim_whitespace=True,
)
if first is True:
snapshot = snapshot_part
first = False
else:
snapshot = snapshot.append(snapshot_part, ignore_index=True)
return cls.from_dataframe(snapshot, simulation)
@classmethod
def from_file(
cls, filename: str, simulation: Optional[Type[Ecosmog]] = None,
) -> "Halos":
""" """
df = pd.read_hdf(filename, key="df")
return cls.from_dataframe(df, simulation)
@classmethod
def from_dataframe(
cls, df: pd.DataFrame, simulation: Optional[Type[Ecosmog]] = None,
) -> "Halos":
""" """
return cls(df, simulation)
def get_subfind_stats(
self, config_file: str = default_halo_stats_config, save: bool = True,
) -> None:
"""
Compute statistics of halos identified with SubFind from one or a
collection of simulations.
Args:
config_file:
file pointer in which containes info on what statistics to
compute and their settings.
save:
wether to save results to file.
"""
# load settings (stg)
with open(config_file) as f:
statistics = yaml.load(f, Loader=yaml.FullLoader)
for name in statistics.keys():
statistics[name]["results"] = {"bins": {}, "values": {}}
# load particles/utils/stats.py package for dynamic function call
module = import_module("astrild.particles.hutils")
# sort statistics according to required halos resolutions
stat_names_ord = self._sort_statistics(statistics)
for snap_nr in self.sim.dir_nrs:
snapshot = self.get_subfind_halo_data(snap_nr)
if snapshot is None:
print(f"No sub- & halos found for snapshot {snap_nr}")
continue
resolution = 0
for stat_name in stat_names_ord:
if statistics[stat_name]["resolution"] != resolution:
resolution = int(statistics[stat_name]["resolution"])
snapshot = self.filter_resolved_subfind_halos(snapshot, resolution)
print(f" Compute {stat_name}")
clas = getattr(module, "SubFind")
fct = getattr(clas, stat_name)
bins, values = fct(snapshot, **statistics[stat_name]["args"])
if (bins is not None) and (values is not None):
statistics[stat_name]["results"]["bins"]["snap_%d" % snap_nr] = bins
statistics[stat_name]["results"]["values"][
"snap_%d" % snap_nr
] = values
collect()
if save:
self._save_results("subfind", statistics)
else:
self.statistics = statistics
def filter_resolved_subfind_halos(
self, snapshot: read_hdf5.snapshot, nr_particles: int,
) -> read_hdf5.snapshot:
"""
Filter halos with '> nr_particles' particles
Args:
Return:
"""
min_mass = dm_particle_mass * nr_particles
mass = snapshot.cat["Group_M_Crit200"][:] * snapshot.header.hubble # [Msun/h]
idx_groups = mass > min_mass
mass = snapshot.cat["SubhaloMass"][:] * snapshot.header.hubble # [Msun/h]
idx_subhalos = mass > min_mass
# idx = snapshot.cat["GroupLenType"][:, 1] > nr_particles
# idx = snapshot.cat["Group_M_Crit200"][:] > \
# 100*(snapshot.header.massarr[1] * 1e10 / snapshot.header.hubble)
return self.filter_subfind_and_fof_halos(snapshot, idx_groups, idx_subhalos)
def filter_nonzero_subfind_halos_size(
self, snapshot: read_hdf5.snapshot,
) -> read_hdf5.snapshot:
"""
Filter halos with non-zero size
Args:
Return:
"""
rad = snapshot.cat["Group_R_Crit200"][:] # [ckpc/h]
idx_groups = rad > 0
rad = snapshot.cat["SubhaloHalfmassRad"][:] # [ckpc/h]
idx_subhalos = rad > 0
return self.filter_subfind_and_fof_halos(snapshot, idx_groups, idx_subhalos)
def filter_subfind_and_fof_halos(
self,
snapshot: read_hdf5.snapshot,
idx_groups: np.ndarray,
idx_subhalos: np.ndarray,
) -> read_hdf5.snapshot:
""" Filter sub- and fof-halos by indices """
for key, value in snapshot.cat.items():
if "Group" in key:
idx = idx_groups
elif ("Subhalo" in key) and (len(snapshot.cat[key]) > len(idx_groups)):
idx = idx_subhalos
else:
HalosWarning(f"The key is {key} is a problem")
continue
if len(value.shape) == 0:
continue
elif len(value.shape) == 1:
snapshot.cat.update({key: value[idx]})
elif len(value.shape) == 2:
snapshot.cat.update({key: value[idx, :]})
else:
raise HalosWarning(
f"The group data {key} has weird dimensions: {value.shape}."
)
return snapshot
#def get_subfind_tpcf(
# self,
# subfind_type: str,
# config: dict,
# save: bool = True,
#) -> None:
# """
# Compute real- and redshift-space TPCF for halos. This computation is
# done using halotools.
# https://halotools.readthedocs.io/en/latest/index.html
# Args:
# subfind_type: ["Group", "Subhalo"]
# config:
# save:
# wether to save results to file.
# """
# tpcf = {}
# for l in config["multipoles"]:
# tpcf[str(l)] = {}
# multipoles = config["multipoles"]
# del config["multipoles"]
# for snap_nr in self.sim.dir_nrs:
# snapshot = self.get_subfind_halo_data(snap_nr)
#
# if snapshot is None:
# print(f"No sub- & halos found for snapshot {snap_nr}")
# continue
# snapshot = self.filter_resolved_subfind_halos(snapshot, 100)
#
# if subfind_type == "group":
# halo_pos = snapshot.cat["GroupPos"][:] * \
# snapshot.header.hubble / 1e3 #[Mpc/h]
# scale_factor = 1 / (1 + snapshot.header.redshift)
# print("test a -------", scale_factor)
# halo_vel = snapshot.cat["GroupVel"][:] / scale_factor #[km/s]
# if subfind_type == "subhalo":
# halo_pos = snapshot.cat["SubhaloPos"][:] * \
# snapshot.header.hubble / 1e3 #[Mpc/h]
# halo_vel = snapshot.cat["SubhaloVel"][:] #[km/s]
# s_bins, mu_range, tpcf_s= TPCF.compute(
# pos=halo_pos,
# vel=halo_vel,
# **config,
# multipole=l,
# )
# for l in multipoles:
# _tpcf = tpcf_multipole(tpcf_s, mu_range, order=l)
# tpcf[str(l)]["snap_%d" % snap_nr] = _tpcf
# print(l, "!!!!!!!!!!!! snap_%d" % snap_nr, _tpcf)
#
# tpcf["s_bins"] = s_bins
# if save:
# IO.save_tpcf(
# self.sim.dirs['out'],
# config,
# multipoles,
# "subfind",
# "_"+subfind_type,
# tpcf,
# )
# else:
# self.tpcf = tpcf
def get_rockstar_stats(
self,
config_file: str = default_halo_stats_config,
snap_nrs: Optional[List[int]] = None,
save: bool = True,
):
"""
Compute statistics of halos identified with Rockstar from one or a
collection of simulations.
rockstar:
https://bitbucket.org/gfcstanford/rockstar/src/main/
https://github.com/yt-project/rockstar
https://www.cosmosim.org/cms/documentation/database-structure/tables/rockstar/
Args:
config_file:
file pointer in which containes info on what statistics to
compute and their settings.
save:
wether to save results to file.
"""
# load settings (stg)
with open(config_file) as f:
statistics = yaml.load(f, Loader=yaml.FullLoader)
for name in statistics.keys():
statistics[name]["results"] = {"bins": {}, "values": {}}
# load particles/utils/stats.py package for dynamic function call
module = import_module("astrild.particles.hutils")
# sort statistics according to required halo resolutions
stat_names_ord = self._sort_statistics(statistics)
if snap_nrs is None:
snap_nrs = self.sim.dir_nrs
for snap_nr in snap_nrs:
snapshot = self.get_rockstar_halo_data(
self.sim.files["halos"][str(snap_nr)]
)
if len(snapshot.index.values) == 0:
print(f"No sub- & halos found for snapshot {snap_nr}")
continue
resolution = 0
for stat_name in stat_names_ord:
if statistics[stat_name]["resolution"] != resolution:
resolution = int(statistics[stat_name]["resolution"])
snapshot = self.filter_resolved_rockstar_halos(
snapshot, resolution
)
print(f" Compute {stat_name}")
clas = getattr(module, "Rockstar")
fct = getattr(clas, stat_name)
if stat_name != "histograms":
bins, values = fct(snapshot, **statistics[stat_name]["args"])
if (bins is not None) and (values is not None):
statistics[stat_name]["results"]["bins"]["snap_%d" % snap_nr] = bins
statistics[stat_name]["results"]["values"][
"snap_%d" % snap_nr
] = values
else:
hist = fct(snapshot, **statistics[stat_name]["args"])
statistics[stat_name]["results"]["values"]["snap_%d" % snap_nr] = hist
if save:
self._save_results("rockstar", statistics)
else:
self.statistics = statistics
#def get_rockstar_tpcf(
# self,
# config: dict,
# snap_nrs: Optional[List[int]] = None,
# save: bool = True,
#) -> None:
# """
# Compute real- and redshift-space TPCF for halos. This computation is
# done using halotools.
# https://halotools.readthedocs.io/en/latest/index.html
# Args:
# config:
# save:
# wether to save results to file.
# """
# tpcf = {}
# for l in config["multipoles"]:
# tpcf[str(l)] = {}
# multipoles = config["multipoles"]
# del config["multipoles"]
#
# if snap_nrs is None:
# snap_nrs = self.sim.dir_nrs
# for snap_nr in snap_nrs:
# snapshot = self.get_rockstar_halo_data(
# self.sim.files["halos"][str(snap_nr)]
# )
#
# if snapshot is None:
# print(f"No sub- & halos found for snapshot {snap_nr}")
# continue
# snapshot = self.filter_resolved_rockstar_halos(snapshot, 100)
#
# halo_pos = snapshot[["x", "y", "z"]].values #[Mpc/h]
# halo_vel = snapshot[["vx", "vy", "vz"]].values #[km/s]
# s_bins, mu_range, tpcf_s= TPCF.compute(
# pos=halo_pos,
# vel=halo_vel,
# **config,
# )
# for l in multipoles:
# _tpcf = tpcf_multipole(tpcf_s, mu_range, order=l)
# tpcf[str(l)]["snap_%d" % snap_nr] = _tpcf
#
# tpcf["s_bins"] = s_bins
# if save:
# IO.save_tpcf(
# self.sim.dirs['out'],
# config,
# multipoles,
# "rockstar",
# "",
# tpcf,
# )
# else:
# self.tpcf = tpcf
def filter_resolved_rockstar_halos(
self, snapshot: pd.DataFrame, nr_particles: int,
) -> pd.DataFrame:
"""
Filter halos with '> nr_particles' particles
"""
min_mass = dm_particle_mass * nr_particles
return snapshot[snapshot["m200c"] > min_mass]
def _sort_statistics(self, statistics: dict) -> List[str]:
"""
Sort statistics by their required particle resolution
(low -to-> high).
"""
resolutions = np.zeros(len(list(statistics.keys())))
for idx, (_, stg) in enumerate(statistics.items()):
resolutions[idx] = int(stg["resolution"])
idxs = np.argsort(resolutions)
return [list(statistics.keys())[idx] for idx in idxs]
def _save_results(self, halofinder: str, methods: dict):
"""
Save results of each statistic of each simulations snapshot
for Rockstar and SubFind.
"""
for method, stg in methods.items():
if method != "histograms":
columns = list(stg["results"]["bins"].keys())
if len(self.sim.dir_nrs) > 1:
assert np.sum(stg["results"]["bins"][columns[0]]) == np.sum(
stg["results"]["bins"][columns[1]]
)
df = pd.DataFrame(
data=stg["results"]["values"], index=stg["results"]["bins"][columns[0]],
)
if "seperate" in list(stg["args"].keys()):
compare = np.sum(stg["args"]["seperate"]["compare"])
if compare == 2:
compare = "11"
if compare == 3:
compare = "12"
if compare == 4:
compare = "22"
else:
compare = "00"
file_out = f"{self.sim.dirs['out']}{halofinder}_{method}_{compare}.h5"
if os.path.exists(file_out):
os.remove(file_out)
print(f"Saving results to -> {file_out}")
df.to_hdf(file_out, key="df", mode="w")
else:
for snap_nr, stg_in_snap in stg["results"]["values"].items():
data = np.asarray(list(stg_in_snap.values())).T
columns = list(stg_in_snap.keys())
df = pd.DataFrame(data=data, columns=columns)
file_out = f"{self.sim.dirs['out']}{halofinder}_{method}" + \
"_{snap_nr}.h5"
if os.path.exists(file_out):
os.remove(file_out)
print(f"Saving results to -> {file_out}")
df.to_hdf(file_out, key="df", mode="w")
def _create_filename(self, file_in: str, quantity: str):
""" Create file-name for merged snapshots"""
quantity = quantity.replace("_", "")
file_out = file_in.split("/")[-1].replace("Ray", quantity)
file_out = file_out.replace(".h5", "_lt.fits")
if ("_lc" not in file_in) or ("zrange" not in file_in):
file_out = file_out.split("_")
box_string = [string for string in file_in.split("/") if "box" in string][0]
idx, string = [
(idx, "%s_" % box_string + string)
for idx, string in enumerate(file_out)
if "output" in string
][0]
file_out[idx] = string
file_out = "_".join(file_out)
return self.sim.dirs["out"] + file_out
@staticmethod
def get_nearest_neighbours(
df: pd.DataFrame,
target_id: int,
dmax: Optional[int] = None,
extent: Optional[int] = None,
) -> tuple:
"""
Args:
df: halo DataFrame
target_id: object id for which to find NNs
dmax: maximal distance between objects
Return:
indices and distances
"""
pos = df[["theta1_deg", "theta2_deg"]].values
pos_i = df[df["id"] == target_id][["theta1_deg", "theta2_deg"]].values
if dmax is None:
dmax = df[df["id"] == target_id]["r200_deg"].values
if extent is not None:
dmax *= extent
if len(pos_i.shape) == 1:
pos_i = pos_i[np.newaxis, :]
btree = BallTree(pos)
pairs = btree.query_radius(pos_i, dmax, return_distance=True,)
return pairs[0][0], pairs[1][0]
| 34.853492
| 92
| 0.525783
| 2,172
| 20,459
| 4.777624
| 0.16989
| 0.015611
| 0.020815
| 0.00848
| 0.511709
| 0.465549
| 0.399923
| 0.373518
| 0.357136
| 0.331117
| 0
| 0.0081
| 0.360379
| 20,459
| 586
| 93
| 34.912969
| 0.784885
| 0.29762
| 0
| 0.286195
| 0
| 0
| 0.091823
| 0.012478
| 0
| 0
| 0
| 0.001706
| 0.003367
| 1
| 0.050505
| false
| 0.003367
| 0.053872
| 0
| 0.148148
| 0.020202
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cfcd1fb4a8c9717754df6618804de4a66eaa349
| 5,475
|
py
|
Python
|
notebooks/working/_02_tb-Demo-visual-marginal-independence-tests.py
|
hassanobeid1994/tr_b_causal_2020
|
1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5
|
[
"MIT"
] | null | null | null |
notebooks/working/_02_tb-Demo-visual-marginal-independence-tests.py
|
hassanobeid1994/tr_b_causal_2020
|
1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5
|
[
"MIT"
] | 89
|
2020-02-10T02:52:11.000Z
|
2020-06-23T03:50:27.000Z
|
notebooks/working/_02_tb-Demo-visual-marginal-independence-tests.py
|
hassan-obeid/tr_b_causal_2020
|
1ffaeb7dcefccf5e1f24c459e9a2f140b2a052a5
|
[
"MIT"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py,md
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Purpose
# The point of this notebook is to demonstrate how to perform at least one type of visual, marginal independence test.
#
# In particular, the notebook will show how to visually test the following implication<br>
# $
# \begin{aligned}
# P \left( X_1 \mid X_2 \right) &= P \left( X_1 \right) \\
# \int x_1 P \left( X_1 \mid X_2 \right) \partial{x_1} &= \int x_1 P \left( X_1 \right) \partial{x_1} \\
# E \left[ X_1 \mid X_2 \right] &= E \left[ X_1 \right]
# \end{aligned}
# $
#
# In other words, if $X_1$ is marginally independent of $X_2$, then the expectation of $X_1$ conditional on $X_2$ is equal to the marginal expectation of $X_1$. This implies that shuffling / permuting the $X_2$ columns should make no difference to predicting $X_1$, beyond predicting the mean of $X_1$.
# +
# Declare hyperparameters for testing
NUM_PERMUTATIONS = 100
# Declare the columns to be used for testing
x1_col = "num_licensed_drivers"
x2_col = "num_cars"
mode_id_col = "mode_id"
# Set the colors for plotting
permuted_color = "#a6bddb"
# Declare paths to data
DATA_PATH = "../../data/raw/spring_2016_all_bay_area_long_format_plus_cross_bay_col.csv"
# +
import sys # noqa: E402
import matplotlib.pyplot as plt # noqa: E402
import numpy as np # noqa: E402
import pandas as pd # noqa: E402
import seaborn as sbn # noqa: E402
from scipy.stats import multinomial # noqa: E402
from tqdm.notebook import tqdm # noqa: E402
# %matplotlib inline
sys.path.insert(0, "../../src/")
import testing.observable_independence as oi # noqa: E402
# -
# Load the raw data
df = pd.read_csv(DATA_PATH)
# +
title_str = "{} vs {}"
print(title_str.format(x1_col, x2_col))
drive_alone_filter = df[mode_id_col] == 1
license_array = df.loc[drive_alone_filter, x1_col].values
num_cars_array = df.loc[drive_alone_filter, x2_col].values
oi.visual_permutation_test(
license_array,
num_cars_array,
z_array=None,
seed=1038,
num_permutations=NUM_PERMUTATIONS,
permutation_color=permuted_color,
)
# -
# ## Test `visual_permutation_test`
# +
# Figure out how many observations to simulate, based on real data
num_drive_alone_obs = (df.mode_id == 1).sum()
# Determine how many simulations to carry out
NUM_TEST_SIM = 200
# Initialize an array to store the simulated p-values
test_p_vals = np.empty((NUM_TEST_SIM,), dtype=float)
# Set a random seed for reproducibility
np.random.seed(340)
# Compute the p-values of the visual permutation test when the
# null-hypothesis is true.
for i in tqdm(range(NUM_TEST_SIM)):
# Simulate data that, by construction, satisfies x2 indep x1
sim_x1 = 0.2 + 0.5 * np.random.normal(size=num_drive_alone_obs)
sim_x2 = -0.1 - 0.01 * np.random.uniform(size=num_drive_alone_obs)
# Determine which simulations to plot.
# Just plot 1 simulation for visual comparison with real data
current_close = True if i != 0 else False
# Carry out the permutation test
current_p = oi.visual_permutation_test(
sim_x1,
sim_x2,
z_array=None,
seed=None,
progress=False,
verbose=False,
show=False,
close=current_close,
)
# Store the resulting p-values
test_p_vals[i] = current_p
# +
# Create a distribution of p-values that is for sure are uniformly distributed
null_histogram_dist = multinomial(NUM_TEST_SIM, [0.1 for x in range(10)])
null_hist_samples = null_histogram_dist.rvs(100)
null_hist_mean = null_histogram_dist.mean()
null_hist_upper_bound = np.percentile(null_hist_samples, 95, axis=0)
null_hist_lower_bound = np.percentile(null_hist_samples, 5, axis=0)
# Plot the distribution of our test p-values versus the p-values from
# a uniform distriburtion
fig, ax = plt.subplots(figsize=(10, 6))
plot_categories = [0.05 + 0.1 * x for x in range(10)]
ax.fill_between(
plot_categories,
null_hist_upper_bound,
null_hist_lower_bound,
color=permuted_color,
label="Null 95% Distribution",
alpha=0.5,
zorder=2,
)
ax.hlines(null_hist_mean, 0, 1, label="Null Mean")
ax.hist(test_p_vals, bins=10, label="Observed", zorder=0)
ax.scatter(
plot_categories,
null_hist_upper_bound,
label="Null 95% Upper Bound",
color=permuted_color,
marker="+",
zorder=1,
)
ax.scatter(
plot_categories,
null_hist_lower_bound,
label="Null 5% Lower Bound",
color=permuted_color,
marker="*",
zorder=1,
)
ax.legend(loc=(1.05, 0.75))
ax.set_xlabel("p-values", fontsize=13)
ax.set_ylabel("Num Observations", rotation=0, labelpad=70, fontsize=13)
sbn.despine()
fig.show()
# -
# ## Conclusions
# - From the last plot, we can see that under the null hypothesis of $X_1$ independent of $X_2$, we get p-values that close to uniformly distributed.<br>
# This means the permutation p-values in `visual_permutation_test` are unlikely to be overly-optimistic.<br>
# In other words, we can feel safe(r) about relying on this test to distinguish conditional dependence from independence.
# - From the first two plots of this notebook, we can see from applying the `visual_permutation_test` that the number of licensed drivers per household and number of automobiles per household are not marginally independent.
| 30.586592
| 302
| 0.715982
| 865
| 5,475
| 4.342197
| 0.338728
| 0.00852
| 0.009585
| 0.007455
| 0.135517
| 0.096113
| 0.038072
| 0.029286
| 0
| 0
| 0
| 0.033028
| 0.181553
| 5,475
| 178
| 303
| 30.758427
| 0.805177
| 0.47726
| 0
| 0.183908
| 0
| 0
| 0.085038
| 0.026552
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.091954
| 0
| 0.091954
| 0.011494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cfd1eff7aa3274bf5ba215dcc74c84bcd761113
| 1,799
|
py
|
Python
|
Labs/Lab-4.0 WiFi/5_wifi_logging.py
|
Josverl/MicroPython-Bootcamp
|
29f5ccc9768fbea621029dcf6eea9c91ff84c1d5
|
[
"MIT"
] | 4
|
2018-04-28T13:43:20.000Z
|
2021-03-11T16:10:35.000Z
|
Labs/Lab-4.0 WiFi/5_wifi_logging.py
|
Josverl/MicroPython-Bootcamp
|
29f5ccc9768fbea621029dcf6eea9c91ff84c1d5
|
[
"MIT"
] | null | null | null |
Labs/Lab-4.0 WiFi/5_wifi_logging.py
|
Josverl/MicroPython-Bootcamp
|
29f5ccc9768fbea621029dcf6eea9c91ff84c1d5
|
[
"MIT"
] | null | null | null |
# import the network module
# This module provides access to various network related functions and classes.
# https://github.com/loboris/MicroPython_ESP32_psRAM_LoBo/wiki/network
import network,utime #pylint: disable=import-error
# ----------------------------------------------------------
# Define callback function used for monitoring wifi activity
# ----------------------------------------------------------
'''
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
'''
def wifi_cb(info):
_red = "\033[31m"
_cyan= "\033[36m"
_norm = "\033[00m"
if (info[2]):
msg = ", info: {}".format(info[2])
else:
msg = ""
print(_cyan+"I [WiFi] event: {} ({}){}".format( info[0], info[1], msg)+_norm)
# Enable callbacks
network.WLANcallback(wifi_cb)
# ----------------------------------------------------------
# create station interface - Standard WiFi client
wlan = network.WLAN(network.STA_IF)
wlan.active(False)
# activate the interface
wlan.active(True)
# connect to a known WiFi
wlan.connect('IOTBOOTCAMP', 'MicroPython')
# Note that this may take some time, so we need to wait
# Wait 5 sec or until connected
tmo = 50
while not wlan.isconnected():
utime.sleep_ms(100)
tmo -= 1
if tmo == 0:
break
# check if the station is connected to an AP
if wlan.isconnected():
print("=== Station Connected to WiFi \n")
else:
print("!!! Not able to connect to WiFi")
# gets or sets the interface's IP/netmask/gw/DNS addresses
# 'Raw'
print( wlan.ifconfig() )
#pretty
c = wlan.ifconfig()
print("IP:{0}, Network mask:{1}, Router:{2}, DNS: {3}".format( *c ))
| 24.986111
| 81
| 0.568093
| 226
| 1,799
| 4.469027
| 0.588496
| 0.011881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048849
| 0.203446
| 1,799
| 71
| 82
| 25.338028
| 0.655967
| 0.423013
| 0
| 0.071429
| 0
| 0
| 0.231144
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.071429
| 0.178571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cfd92551f129b14e3271b5e4699d932dae50065
| 681
|
py
|
Python
|
medium/1282.py
|
nkwib/leetcode
|
73f7492ba208417d8bf8340b6bf9dc68a6ded7f7
|
[
"MIT"
] | null | null | null |
medium/1282.py
|
nkwib/leetcode
|
73f7492ba208417d8bf8340b6bf9dc68a6ded7f7
|
[
"MIT"
] | null | null | null |
medium/1282.py
|
nkwib/leetcode
|
73f7492ba208417d8bf8340b6bf9dc68a6ded7f7
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
def slice_per(source, step):
for i in range(0, len(source), step):
yield source[i:i + step]
groups = {}
res = []
for index, person in enumerate(groupSizes, start=0):
if person in groups.keys(): groups[person].append(index)
else: groups[person] = [index]
for k in groups.keys():
group = list(slice_per(groups[k], k))
res.extend(group)
return res
groupSizes = [3,3,3,3,4,4,2,2,4,3,4,3,1]
print(Solution().groupThePeople(groupSizes))
| 32.428571
| 71
| 0.565345
| 91
| 681
| 4.208791
| 0.450549
| 0.015666
| 0.062663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031381
| 0.298091
| 681
| 21
| 72
| 32.428571
| 0.769874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.294118
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3cff24ff2a3befb7112dd8c73ae11e32acd5099b
| 1,576
|
py
|
Python
|
Code/Data_Collection/Web_Scraping/job_scraping/job_scraping/scrapy_crawler.py
|
gilnribeiro/Work-Project
|
15ad906ef5e757daed1df9c7547e5703ad496930
|
[
"MIT"
] | 1
|
2022-01-31T11:31:04.000Z
|
2022-01-31T11:31:04.000Z
|
Code/Data_Collection/Web_Scraping/job_scraping/job_scraping/scrapy_crawler.py
|
gilnribeiro/Work-Project
|
15ad906ef5e757daed1df9c7547e5703ad496930
|
[
"MIT"
] | null | null | null |
Code/Data_Collection/Web_Scraping/job_scraping/job_scraping/scrapy_crawler.py
|
gilnribeiro/Work-Project
|
15ad906ef5e757daed1df9c7547e5703ad496930
|
[
"MIT"
] | null | null | null |
# Import spiders
from .spiders.bons_empregos import BonsEmpregosSpider
from .spiders.cargadetrabalhos import CargaDeTrabalhosSpider
from .spiders.emprego_org import EmpregoOrgSpider
from .spiders.emprego_xl import EmpregoXlSpider
from .spiders.net_empregos import NetEmpregosSpider
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
# Make sure to be in the Data Collection directory
FOLDER_PATH = "/Users/gilnr/OneDrive - NOVASBE/Work Project/Code/Data/"
def main():
configure_logging()
settings = get_project_settings()
settings.set('FEED_FORMAT', 'jsonlines')
# settings.set('FEED_URI', 'result.json')
runner = CrawlerRunner(settings)
@defer.inlineCallbacks
def crawl():
settings.set('FEED_URI', FOLDER_PATH + "BonsEmpregos.json")
yield runner.crawl(BonsEmpregosSpider)
settings.set('FEED_URI', FOLDER_PATH + "CargaDeTrabalhos.json")
yield runner.crawl(CargaDeTrabalhosSpider)
settings.set('FEED_URI', FOLDER_PATH + "EmpregoOrg.json")
yield runner.crawl(EmpregoOrgSpider)
settings.set('FEED_URI', FOLDER_PATH + "EmpregoXl.json")
yield runner.crawl(EmpregoXlSpider)
settings.set('FEED_URI', FOLDER_PATH + "NetEmpregos.json")
yield runner.crawl(NetEmpregosSpider)
reactor.stop()
crawl()
reactor.run() # the script will block here until the last crawl call is finished
if __name__ == '__main__':
main()
| 34.26087
| 84
| 0.741751
| 183
| 1,576
| 6.218579
| 0.415301
| 0.067663
| 0.092267
| 0.094903
| 0.123023
| 0.123023
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166244
| 1,576
| 46
| 85
| 34.26087
| 0.866058
| 0.106599
| 0
| 0
| 0
| 0
| 0.146724
| 0.029915
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.28125
| 0
| 0.34375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a70095a05438f3493dabb7b856707d3589d2cc37
| 2,302
|
py
|
Python
|
sentiment/train/management/commands/train.py
|
mnvx/sentiment
|
b24fad4cfc67b0b443e8ab93b08ac1dbcb095a7c
|
[
"MIT"
] | null | null | null |
sentiment/train/management/commands/train.py
|
mnvx/sentiment
|
b24fad4cfc67b0b443e8ab93b08ac1dbcb095a7c
|
[
"MIT"
] | null | null | null |
sentiment/train/management/commands/train.py
|
mnvx/sentiment
|
b24fad4cfc67b0b443e8ab93b08ac1dbcb095a7c
|
[
"MIT"
] | null | null | null |
import configparser
import csv
from django.core.management.base import BaseCommand
import logging
import os
from ....common.catalog.sentiment_type import SentimentType
from ....common.catalog.source import Source
class Command(BaseCommand):
help = 'Train the sentiment classifier'
def add_arguments(self, parser):
parser.add_argument(
'type',
type=str,
help='Training data type',
choices=SentimentType.get_list()
)
parser.add_argument(
'--path',
type=str,
required=False,
help="Path to csv file with training data"
)
parser.add_argument(
'--source',
type=str,
required=False,
help="Source with training data",
choices=Source.get_list()
)
def handle(self, *args, **options):
if options['source'] is None and options['path'] is None:
message = 'Cant run training. Set --path or --source option.'
logging.warning(message)
self.stdout.write(self.style.WARNING(message))
return
if options['source'] is not None and options['path'] is not None:
message = 'Cant run training. Set only one of --path or --source option.'
logging.warning(message)
self.stdout.write(self.style.WARNING(message))
return
path = options['path']
if options['source'] is not None:
path = os.path.join(Source.get_path(options['source']), options['type'] + '.csv')
config_file = os.path.join(os.path.dirname(path), 'settings.ini')
config = configparser.ConfigParser()
config.read(config_file)
column_index = int(config['csv']['IndexOfColumnWithData'])
delimiter = config['csv']['Delimiter']
encoding = config['csv']['Encoding']
quote_char = config['csv']['QuoteChar']
with open(path, newline='', encoding=encoding) as csvfile:
reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quote_char)
for row in reader:
print(row[column_index])
return
self.stdout.write('path: %s' % path)
self.stdout.write(self.style.SUCCESS('Success'))
| 34.878788
| 93
| 0.591659
| 255
| 2,302
| 5.286275
| 0.34902
| 0.038576
| 0.04451
| 0.037834
| 0.281899
| 0.20178
| 0.123145
| 0.123145
| 0.123145
| 0.123145
| 0
| 0
| 0.290182
| 2,302
| 66
| 94
| 34.878788
| 0.824969
| 0
| 0
| 0.263158
| 0
| 0
| 0.158923
| 0.009119
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.122807
| 0
| 0.245614
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a70361c3e3b8431100d15650b5da10d40acb287d
| 504
|
py
|
Python
|
appzoo/utils/log/__init__.py
|
streamlit-badge-bot/AppZoo
|
86547fdc5209fa137b0a6384d63e92f263c1e160
|
[
"MIT"
] | 5
|
2020-11-05T12:13:45.000Z
|
2021-11-19T12:26:49.000Z
|
appzoo/utils/log/__init__.py
|
streamlit-badge-bot/AppZoo
|
86547fdc5209fa137b0a6384d63e92f263c1e160
|
[
"MIT"
] | null | null | null |
appzoo/utils/log/__init__.py
|
streamlit-badge-bot/AppZoo
|
86547fdc5209fa137b0a6384d63e92f263c1e160
|
[
"MIT"
] | 3
|
2020-11-23T23:06:34.000Z
|
2021-04-18T02:12:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-App.
# @File : __init__.py
# @Time : 2019-12-10 17:24
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
from loguru import logger
trace = logger.add('runtime_{time}.log', rotation="100 MB", retention='10 days')
logger.debug('this is a debug message')
if __name__ == '__main__':
@logger.catch()
def f():
1/0
return 1111
print(f())
| 21.913043
| 80
| 0.571429
| 64
| 504
| 4.296875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064865
| 0.265873
| 504
| 23
| 81
| 21.913043
| 0.678378
| 0.450397
| 0
| 0
| 0
| 0
| 0.231343
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a704ebb77dcf3890670eefaa40d9424024056adf
| 1,850
|
py
|
Python
|
beast/tools/run/helper_functions.py
|
galaxyumi/beast
|
f5ce89d73c88ce481b04fc31a8c099c9c19041fb
|
[
"BSD-3-Clause"
] | 21
|
2017-03-18T13:46:06.000Z
|
2022-02-21T16:02:10.000Z
|
beast/tools/run/helper_functions.py
|
galaxyumi/beast
|
f5ce89d73c88ce481b04fc31a8c099c9c19041fb
|
[
"BSD-3-Clause"
] | 673
|
2017-03-12T23:39:28.000Z
|
2022-03-17T14:07:38.000Z
|
beast/tools/run/helper_functions.py
|
galaxyumi/beast
|
f5ce89d73c88ce481b04fc31a8c099c9c19041fb
|
[
"BSD-3-Clause"
] | 36
|
2017-03-18T18:00:35.000Z
|
2021-09-22T06:35:55.000Z
|
# other imports
from multiprocessing import Pool
def subcatalog_fname(full_cat_fname, source_density, sub_source_density):
"""
Return the name of a sub-catalog
Parameters
----------
full_cat_fname : string
name of the photometry catalog
source_density : string
the current source density bin
sub_source_density : string
the current sub-file for the source density bin
Returns
-------
string
the file name of the sub-catalog
"""
return full_cat_fname.replace(
".fits",
"_SD{}_sub{}.fits".format(source_density.replace("_", "-"), sub_source_density),
)
def parallel_wrapper(function, arg_tuples, nprocs=1):
"""
A wrapper to automatically either run the function as-is or run it with parallel processes
Parameters
----------
function : function
the function to be evaluated
argument : list of tuples
the input to the function (details of course depend on the function)
nprocs : int (default=1)
number of parallel processes (no parallelization if nprocs=1)
Returns
-------
nothing
"""
if nprocs > 1:
p = Pool(nprocs)
for r in p.starmap(function, arg_tuples):
print(r)
else:
for a in arg_tuples:
r = function(*a)
print(r)
def get_modelsubgridfiles(subgrid_names_file):
"""
Read in the file that has the list of subgridded physicsmodel files
Parameters
----------
subgrid_names_file : string
name of the file with the list of names
Returns
-------
list of strings
the names of the subgridded physicsmodel files
"""
with open(subgrid_names_file, "r") as f:
modelsedgridfiles = f.read().split("\n")[:-1]
return modelsedgridfiles
| 21.511628
| 94
| 0.621081
| 229
| 1,850
| 4.886463
| 0.379913
| 0.09294
| 0.032172
| 0.02681
| 0.051832
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003788
| 0.286486
| 1,850
| 85
| 95
| 21.764706
| 0.843939
| 0.521081
| 0
| 0.105263
| 0
| 0
| 0.036466
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.052632
| 0
| 0.315789
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a70572ac4f62a9762d70dcd70a9fd3e4dc437ab3
| 2,621
|
py
|
Python
|
experiments/sparse_sparsity_fixed_results.py
|
Remi-Boutin/sparsebm
|
5979eafff99d59a3b6edac586ee5658529763402
|
[
"MIT"
] | 1
|
2021-09-22T23:25:25.000Z
|
2021-09-22T23:25:25.000Z
|
experiments/sparse_sparsity_fixed_results.py
|
Remi-Boutin/sparsebm
|
5979eafff99d59a3b6edac586ee5658529763402
|
[
"MIT"
] | null | null | null |
experiments/sparse_sparsity_fixed_results.py
|
Remi-Boutin/sparsebm
|
5979eafff99d59a3b6edac586ee5658529763402
|
[
"MIT"
] | 1
|
2021-09-08T13:25:15.000Z
|
2021-09-08T13:25:15.000Z
|
from matplotlib import rc
# rc("text", usetex=True)
import matplotlib
# font = {"size": 14}
# matplotlib.rc("font", **font)
import numpy as np
import matplotlib.pyplot as plt
import glob
import pickle
import time
import matplotlib.colors as mcolors
dataset_files = glob.glob("./experiments/results/sparsity_fixed/*.pkl")
from collections import defaultdict
time_results_sparse = defaultdict(list)
time_results_not_sparse = defaultdict(list)
cari_results_sparse = defaultdict(list)
cari_results_not_sparse = defaultdict(list)
e = 0.25
exponent = 5
connection_probabilities = (
np.array([[4 * e, e, e, e * 2], [e, e, e, e], [2 * e, e, 2 * e, 2 * e]])
/ 2 ** exponent
)
for file in dataset_files:
results = pickle.load(open(file, "rb"))
n1 = results["model"]["tau_1"].shape[0]
n2 = results["model"]["tau_2"].shape[0]
time_results_sparse[(n1, n2)].append(results["end_time"])
cari_results_sparse[(n1, n2)].append(results["co_ari"])
if results["end_time_not_sparse"]:
cari_results_not_sparse[(n1, n2)].append(results["co_ari_not_sparse"])
time_results_not_sparse[(n1, n2)].append(
results["end_time_not_sparse"]
)
xs = sorted(list(time_results_sparse.keys()), key=lambda x: x[0])
fig, ax = plt.subplots(1, 1, figsize=(7, 4))
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
xs_values = [a * a / 2 for a in np.array([a[0] for a in xs])]
ax.plot(
xs_values,
[np.median(time_results_sparse[x]) for x in xs],
marker="^",
markersize=7,
linewidth=0.5,
color=mcolors.TABLEAU_COLORS["tab:green"],
)
xs_value_not_sparse = [
a * a / 2
for a in np.array(
[a[0] for a in sorted(list(time_results_not_sparse.keys()))]
)
]
ax.plot(
xs_value_not_sparse,
[
np.median(time_results_not_sparse[x])
for x in sorted(list(time_results_not_sparse.keys()))
],
marker="*",
markersize=7,
linewidth=0.5,
color=mcolors.TABLEAU_COLORS["tab:blue"],
)
# ax.annotate(
# "OOM",
# (
# xs_value_not_sparse[-1],
# 20
# + np.median(
# time_results_not_sparse[
# sorted(list(time_results_not_sparse.keys()))[-1]
# ]
# ),
# ),
# color=mcolors.TABLEAU_COLORS["tab:blue"],
# )
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel("Execution time (sec.)")
ax.set_xlabel("Network size $(n_1 \cdot n_2)$")
# ax.ticklabel_format(style="sci", axis="x")
plt.show()
fig.savefig("experiments/results/sparsity_fixed.png")
print("Figure saved in " + "experiments/results/sparsity_fixed.png")
| 26.474747
| 78
| 0.649752
| 385
| 2,621
| 4.212987
| 0.296104
| 0.083231
| 0.088779
| 0.086313
| 0.459926
| 0.321825
| 0.278668
| 0.1418
| 0.098644
| 0.098644
| 0
| 0.022118
| 0.189241
| 2,621
| 98
| 79
| 26.744898
| 0.741176
| 0.149943
| 0
| 0.088235
| 0
| 0
| 0.139819
| 0.053394
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.132353
| 0
| 0.132353
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a70af31dd713880205073e138c1e10e6d9d8591d
| 4,236
|
py
|
Python
|
SerialController/Camera.py
|
Moi-poke/Poke-Controller-temp
|
b632f55eb6e5adc0f85f2ba6ef59c1230a5d5606
|
[
"MIT"
] | 3
|
2021-04-23T06:30:36.000Z
|
2022-01-04T09:10:25.000Z
|
SerialController/Camera.py
|
Moi-poke/Poke-Controller-temp
|
b632f55eb6e5adc0f85f2ba6ef59c1230a5d5606
|
[
"MIT"
] | 1
|
2022-01-04T06:33:11.000Z
|
2022-01-04T06:33:11.000Z
|
SerialController/Camera.py
|
Moi-poke/Poke-Controller-temp
|
b632f55eb6e5adc0f85f2ba6ef59c1230a5d5606
|
[
"MIT"
] | 6
|
2021-10-03T05:42:50.000Z
|
2022-03-15T00:29:09.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import datetime
import os
import numpy as np
from logging import getLogger, DEBUG, NullHandler
def imwrite(filename, img, params=None):
_logger = getLogger(__name__)
_logger.addHandler(NullHandler())
_logger.setLevel(DEBUG)
_logger.propagate = True
try:
ext = os.path.splitext(filename)[1]
result, n = cv2.imencode(ext, img, params)
if result:
with open(filename, mode='w+b') as f:
n.tofile(f)
return True
else:
return False
except Exception as e:
print(e)
_logger.error(f"Image Write Error: {e}")
return False
class Camera:
def __init__(self, fps=45):
self.camera = None
self.capture_size = (1280, 720)
# self.capture_size = (1920, 1080)
self.capture_dir = "Captures"
self.fps = int(fps)
self._logger = getLogger(__name__)
self._logger.addHandler(NullHandler())
self._logger.setLevel(DEBUG)
self._logger.propagate = True
def openCamera(self, cameraId):
if self.camera is not None and self.camera.isOpened():
self._logger.debug("Camera is already opened")
self.destroy()
if os.name == 'nt':
self._logger.debug("NT OS")
self.camera = cv2.VideoCapture(cameraId, cv2.CAP_DSHOW)
# self.camera = cv2.VideoCapture(cameraId)
else:
self._logger.debug("Not NT OS")
self.camera = cv2.VideoCapture(cameraId)
if not self.camera.isOpened():
print("Camera ID " + str(cameraId) + " can't open.")
self._logger.error(f"Camera ID {cameraId} cannot open.")
return
print("Camera ID " + str(cameraId) + " opened successfully")
self._logger.debug(f"Camera ID {cameraId} opened successfully.")
# print(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH))
# self.camera.set(cv2.CAP_PROP_FPS, 60)
self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, self.capture_size[0])
self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, self.capture_size[1])
# self.camera.set(cv2.CAP_PROP_SETTINGS, 0)
def isOpened(self):
self._logger.debug("Camera is opened")
return self.camera.isOpened()
def readFrame(self):
_, self.image_bgr = self.camera.read()
return self.image_bgr
def saveCapture(self, filename=None, crop=None, crop_ax=None, img=None):
if crop_ax is None:
crop_ax = [0, 0, 1280, 720]
else:
pass
# print(crop_ax)
dt_now = datetime.datetime.now()
if filename is None or filename == "":
filename = dt_now.strftime('%Y-%m-%d_%H-%M-%S') + ".png"
else:
filename = filename + ".png"
if crop is None:
image = self.image_bgr
elif crop is 1 or crop is "1":
image = self.image_bgr[
crop_ax[1]:crop_ax[3],
crop_ax[0]:crop_ax[2]
]
elif crop is 2 or crop is "2":
image = self.image_bgr[
crop_ax[1]:crop_ax[1] + crop_ax[3],
crop_ax[0]:crop_ax[0] + crop_ax[2]
]
elif img is not None:
image = img
else:
image = self.image_bgr
if not os.path.exists(self.capture_dir):
os.makedirs(self.capture_dir)
self._logger.debug("Created Capture folder")
save_path = os.path.join(self.capture_dir, filename)
try:
imwrite(save_path, image)
self._logger.debug(f"Capture succeeded: {save_path}")
print('capture succeeded: ' + save_path)
except cv2.error as e:
print("Capture Failed")
self._logger.error(f"Capture Failed :{e}")
def destroy(self):
if self.camera is not None and self.camera.isOpened():
self.camera.release()
self.camera = None
self._logger.debug("Camera destroyed")
| 33.09375
| 77
| 0.553824
| 516
| 4,236
| 4.389535
| 0.251938
| 0.07947
| 0.05298
| 0.028256
| 0.234879
| 0.180574
| 0.147461
| 0.082561
| 0.082561
| 0.064459
| 0
| 0.021314
| 0.335458
| 4,236
| 127
| 78
| 33.354331
| 0.783304
| 0.061615
| 0
| 0.171717
| 0
| 0
| 0.094271
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070707
| false
| 0.010101
| 0.050505
| 0
| 0.191919
| 0.050505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a70ebc7cdf0e76c3a3a02437342d60d6be4b5d1f
| 4,513
|
py
|
Python
|
test/test_cli.py
|
Datateer/upload-agent
|
4684bcf902d6c54baefb08446252a69612bf15a0
|
[
"MIT"
] | null | null | null |
test/test_cli.py
|
Datateer/upload-agent
|
4684bcf902d6c54baefb08446252a69612bf15a0
|
[
"MIT"
] | 2
|
2021-02-05T18:58:23.000Z
|
2021-02-14T15:23:46.000Z
|
test/test_cli.py
|
Datateer/upload-agent
|
4684bcf902d6c54baefb08446252a69612bf15a0
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
from unittest.mock import patch
from click.testing import CliRunner
import pytest
from datateer.upload_agent.main import cli
from datateer.upload_agent.config import load_config, save_config, save_feed
import datateer.upload_agent.constants as constants
@pytest.fixture
def runner():
return CliRunner()
def test_command_config_upload_agent_handles_show_option(runner):
result = runner.invoke(cli, ['config', 'upload-agent', '--show'])
assert result.exit_code == 0
@patch('datateer.upload_agent.main.load_config')
def test_command_config_feed_handles_show_option(mock_load_config, config, runner):
mock_load_config.return_value = config
result = runner.invoke(cli, ['config', 'feed', '--show', 'SAMPLE-FEED-1'])
print(result.output)
assert result.exit_code == 0
@patch('datateer.upload_agent.main.load_config')
def test_command_config_feed_show_option_errors_if_not_exist(mock_load_config, config, runner):
mock_load_config.return_value = config
result = runner.invoke(cli, ['config', 'feed', '--show', 'NONEXISTENT-KEY'])
print(result.output)
assert result.exit_code == 1
assert 'Feed with key NONEXISTENT-KEY does not exist' in result.output
def test_command_upload_handles_feed_key_and_path_arguments(runner):
result = runner.invoke(cli, ['upload', 'FEED-KEY', 'PATH'])
print(result.output)
assert result.exit_code == 1
assert 'Feed with key FEED-KEY does not exist'
@patch.dict('datateer.upload_agent.main.config', constants.SAMPLE_CONFIG, clear=True)
def test_config_upload_agent_prompts_show_defaults_if_config_exists(runner, config):
defaults = config
result = runner.invoke(cli, ['config', 'upload-agent'], input='CLIENT-CODE\nRAW-BUCKET\nACCESS-KEY\nACCESS-SECRET')
print(result.output)
assert result.exit_code == 0
assert f'Client code [{defaults["client-code"]}]: CLIENT-CODE' in result.output
assert f'Raw bucket name [{defaults["upload-agent"]["raw-bucket"]}]: RAW-BUCKET' in result.output
assert f'Access key [{defaults["upload-agent"]["access-key"]}]: ACCESS-KEY' in result.output
assert f'Access secret [{defaults["upload-agent"]["access-secret"]}]: ACCESS-SECRET' in result.output
@patch.dict('datateer.upload_agent.main.config', {'client-code': 'TEST-CLIENT-CODE'}, clear=True)
@patch('datateer.upload_agent.main.load_config')
def test_config_feed_prompts(mock_load_config, runner, config):
mock_load_config.return_value = config
result = runner.invoke(cli, ['config', 'feed'], input='PROVIDER\nSOURCE\nFEED\nFEED-KEY')
print(config)
print(result.output)
assert result.exit_code == 0
assert 'Provider [SAMPLE-CLIENT-CODE]: PROVIDER' in result.output
assert 'Source: SOURCE' in result.output
assert 'Feed: FEED' in result.output
assert 'Feed key [FEED]: FEED-KEY' in result.output
@patch.dict('datateer.upload_agent.main.config', {'client-code': 'MY-TEST-CLIENT-CODE'})
@patch('datateer.upload_agent.main.load_config')
def test_config_feed_provider_code_defaults_to_client_code(mock_load_config, config, runner):
mock_load_config.return_value = config
result = runner.invoke(cli, ['config', 'feed', '--source', 'SOURCE', '--feed', 'FEED'], input='\n\n')
assert f'Provider [{config["client-code"]}]:' in result.output
assert f'Provider [{config["client-code"]}]: {config["client-code"]}' not in result.output # assert user did not type in a value
def test_config_feed_key_defaults_to_feed_code(runner):
result = runner.invoke(cli, ['config', 'feed', '--provider', 'PROVIDER', '--source', 'SOURCE', '--feed', 'FEED'])
assert 'Feed key [FEED]:' in result.output
assert 'Feed key [FEED]: FEED' not in result.output # user did not type in a value
@patch.dict('datateer.upload_agent.main.config', constants.SAMPLE_CONFIG, clear=True)
@patch('datateer.upload_agent.main.load_config')
def test_config_feed_handles_existing_feed_key(mock_load_config, runner, config):
mock_load_config.return_value = config
print(config)
result = runner.invoke(cli, ['config', 'feed', '--update', 'SAMPLE-FEED-1'], input='test\ntest\ntest\ntest\n')
print(result.output)
assert result.exit_code == 0
assert f'Provider [{constants.SAMPLE_FEED["provider"]}]:' in result.output
assert f'Source [{constants.SAMPLE_FEED["source"]}]:' in result.output
assert f'Feed [{constants.SAMPLE_FEED["feed"]}]:' in result.output
def test_show_version(runner):
pytest.skip()
| 41.40367
| 132
| 0.734766
| 636
| 4,513
| 5.022013
| 0.13522
| 0.082655
| 0.095805
| 0.068879
| 0.599249
| 0.539449
| 0.493112
| 0.424546
| 0.419224
| 0.381027
| 0
| 0.00228
| 0.125194
| 4,513
| 108
| 133
| 41.787037
| 0.806738
| 0.014181
| 0
| 0.346154
| 0
| 0
| 0.32081
| 0.178178
| 0
| 0
| 0
| 0
| 0.307692
| 1
| 0.141026
| false
| 0
| 0.102564
| 0.012821
| 0.25641
| 0.102564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a70f8fbd9aef0f039b565e8b5e5bf81d26036760
| 14,899
|
py
|
Python
|
modron/characters.py
|
WardLT/play-by-post-helper
|
26df681f2a28510f88e552be628910e4e5fe57bb
|
[
"MIT"
] | null | null | null |
modron/characters.py
|
WardLT/play-by-post-helper
|
26df681f2a28510f88e552be628910e4e5fe57bb
|
[
"MIT"
] | 13
|
2020-04-08T02:56:58.000Z
|
2020-10-04T21:52:43.000Z
|
modron/characters.py
|
WardLT/play-by-post-helper
|
26df681f2a28510f88e552be628910e4e5fe57bb
|
[
"MIT"
] | null | null | null |
"""Saving and using information about characters"""
import json
import os
from enum import Enum
from typing import Dict, List, Optional, Tuple
import yaml
from pydantic import BaseModel, Field, validator
from modron.config import get_config
_config = get_config()
def _compute_mod(score: int) -> int:
"""Compute a mod given an ability score
Args:
score (int): Ability score
Returns:
(int) Modifier for that score
"""
return score // 2 - 5
class Ability(str, Enum):
"""Character abilities"""
STR = 'strength'
DEX = 'dexterity'
CON = 'constitution'
INT = 'intelligence'
WIS = 'wisdom'
CHA = 'charisma'
@classmethod
def match(cls, name: str) -> 'Ability':
"""Match a name to known ability
Args:
name (str): Name to be matched
Returns:
(Ability) Standardized version of that name
"""
name = name.lower()
matched_abilities = [x for x in cls.__members__.values() if x.startswith(name)]
assert len(matched_abilities) == 1, f"Unrecognized ability: {name}"
return matched_abilities[0]
_5e_skills = {
'acrobatics': Ability.DEX, 'animal handling': Ability.WIS, 'arcana': Ability.INT, 'athletics': Ability.STR,
'deception': Ability.CHA, 'history': Ability.INT, 'insight': Ability.WIS, 'intimidation': Ability.CHA,
'investigation': Ability.INT, 'medicine': Ability.WIS, 'nature': Ability.INT, 'perception': Ability.WIS,
'performance': Ability.CHA, 'persuasion': Ability.CHA, 'religion': Ability.INT, 'sleight of hand': Ability.DEX,
'stealth': Ability.DEX, 'survival': Ability.WIS
}
class Alignment(str, Enum):
"""Possible alignments"""
LAWFUL_GOOD = 'lawful good'
GOOD = 'good'
CHAOTIC_GOOD = 'chaotic good'
LAWFUL_NEUTRAL = 'lawful'
NEUTRAL = 'neutral'
CHAOTIC_NEUTRAL = 'chaotic neutral'
LAWFUL_EVIL = 'lawful evil'
EVIL = 'evil'
CHAOTIC_EVIL = 'chaotic evil'
_class_hit_die = {
'artificer': 8, 'barbarian': 12, 'bard': 8, 'cleric': 8, 'druid': 8, 'fighter': 10, 'monk': 8, 'paladin': 10,
'ranger': 10, 'rogue': 8, 'sorcerer': 6, 'warlock': 8, 'wizard': 6
}
"""Hit die for each 5E class"""
class Character(BaseModel):
"""A D&D 5th edition character sheet, in Python form.
This object stores only the mechanics-related aspects of a character sheet
that remained fixed between level ups. For example, we store the hit point
maximum but not the current hit points and the skill ist but not the languages."""
# Basic information about the character
name: str = Field(..., description='Name of the character')
player: str = Field(None, description='Slack user ID of the player')
classes: Dict[str, int] = Field(..., description='Levels in different classes')
background: str = Field(None, description='Character background')
race: str = Field(None, description='Race of the character')
alignment: Alignment = Field(..., description='Alignment for the character')
# Attributes
strength: int = Field(..., description='Physical strength of the character', ge=0)
dexterity: int = Field(..., description='Gracefulness of the character', ge=0)
constitution: int = Field(..., description='Resistance to physical adversity', ge=0)
intelligence: int = Field(..., description='Ability to apply knowledge and skills', ge=0)
wisdom: int = Field(..., description='Aptitude towards using knowledge to make good decisions', ge=0)
charisma: int = Field(..., description='Proficiency with bringing people to agreement with you', ge=0)
# Combat attributes
speed: int = Field(30, description='Speed in feet per round')
armor_class: int = Field(..., description='Resistance to physical attacks.') # Eventually make derived
current_hit_points: Optional[int] = Field(..., description='Current hit points. Does not include temporary', ge=0)
hit_points: int = Field(..., description='Maximum number of hit points', gt=0)
temporary_hit_points: int = Field(0, description='Amount of temporary hit points.', ge=0)
hit_points_adjustment: int = Field(0, description='Adjustments to the hit point maximum. '
'Can be positive or negative')
# Abilities
saving_throws: List[Ability] = Field(..., description='Saving throws for which the character is proficient')
custom_skills: Dict[str, Ability] = Field(dict(), description='Skills not included in 5e. '
'Dictionary of skill names and associated ability')
proficiencies: List[str] = Field(..., description='Names of skills in which the characters is proficient.')
expertise: List[str] = Field([], description='Skills in which the character is an expert')
@classmethod
def from_yaml(cls, path: str) -> 'Character':
"""Parse the character sheet from YAML
Args:
path: Path to the YAML file
"""
with open(path) as fp:
data = yaml.load(fp, yaml.SafeLoader)
return cls.parse_obj(data)
def to_yaml(self, path: str):
"""Save character sheet to a YAML file"""
with open(path, 'w') as fp:
data = json.loads(self.json())
yaml.dump(data, fp, indent=2)
# Validators for different fields
@validator('proficiencies', 'expertise', each_item=True)
def _val_lowercase(cls, v: str) -> str:
return v.lower()
@validator('custom_skills', 'classes')
def _val_dicts(cls, v: dict):
"""Makes keys for dictionaries """
return dict((k.lower(), v) for k, v in v.items())
# Derived quantities, such as modifiers
@property
def strength_mod(self) -> int:
return _compute_mod(self.strength)
@property
def dexterity_mod(self) -> int:
return _compute_mod(self.dexterity)
@property
def constitution_mod(self) -> int:
return _compute_mod(self.constitution)
@property
def intelligence_mod(self) -> int:
return _compute_mod(self.intelligence)
@property
def wisdom_mod(self) -> int:
return _compute_mod(self.wisdom)
@property
def charisma_mod(self) -> int:
return _compute_mod(self.charisma)
@property
def level(self) -> int:
return sum(self.classes.values())
@property
def proficiency_bonus(self) -> int:
return (self.level - 1) // 4 + 2
@property
def initiative(self) -> int:
return self.dexterity_mod
@property
def total_hit_points(self) -> int:
"""Current hit point amount, including temporary hit points"""
return self.current_hit_points + self.temporary_hit_points
@property
def current_hit_point_maximum(self) -> int:
"""Current hit point maximum"""
return self.hit_points + self.hit_points_adjustment
def heal(self, amount: int):
"""Heal the character by a certain amount
Args:
amount (int): Amount of healing
"""
assert amount >= 0, "Amount must be nonnegative"
if self.current_hit_points is None:
self.full_heal()
self.current_hit_points += amount
self.current_hit_points = min(self.current_hit_points, self.current_hit_point_maximum)
def harm(self, amount: int):
"""Apply damage to this character
Args:
amount (int): Amount of damage
"""
assert amount >= 0, "Damage must be nonnegative"
if self.current_hit_points is None:
self.full_heal()
# Damage hits the temporary first
amount_to_temp = min(self.temporary_hit_points, amount)
amount_to_base = amount - amount_to_temp
self.temporary_hit_points -= amount_to_temp
# Subtract off the remaining damage from the base hit points
self.current_hit_points -= amount_to_base
self.current_hit_points = max(0, self.current_hit_points)
def full_heal(self):
"""Heal character up to hit point maximum"""
self.current_hit_points = self.current_hit_point_maximum
def grant_temporary_hit_points(self, amount: int):
"""Grant temporary hit points
Args:
amount: Amount of HP to give to the character
"""
assert amount > 0, "Amount must be positive"
self.temporary_hit_points += amount
def remove_temporary_hit_points(self):
"""Remove all temporary hit points"""
self.temporary_hit_points = 0
def adjust_hit_point_maximum(self, amount: int):
"""Apply a change to the hit point maximum
Args:
amount: Amount to change the HP maximum
"""
self.hit_points_adjustment += amount
# Make sure the hit point maximum is zero or more
self.hit_points_adjustment = max(-self.hit_points, self.hit_points_adjustment)
# Make sure the hit points stays below the maximum
self.current_hit_points = min(
self.current_hit_point_maximum,
self.current_hit_points
)
def reset_hit_point_maximum(self):
"""Remove any adjustments to the hit point maximum"""
self.hit_points_adjustment = 0
def get_hit_die(self) -> Dict[str, int]:
"""Maximum hit die, computed based on class
Returns:
(dict) Where key is the hit die and value is the number
"""
output = {}
for cls, num in self.classes.items():
hit_die = f'd{_class_hit_die[cls]}'
if hit_die not in output:
output[hit_die] = num
else:
output[hit_die] += num
return output
# Skills and checks
def save_modifier(self, ability: str) -> int:
"""Get the modifier for a certain save type of save
Args:
ability (str): Ability to check. You can use the full name or
the first three letters. Not case-sensitive
Returns:
(int) Modifier for the roll
"""
# Get the modifier
mod = self.ability_modifier(ability)
# Match the name of the ability
matched_ability = Ability.match(ability)
# Add any proficiency bonus
if matched_ability.lower() in self.saving_throws:
mod += self.proficiency_bonus
return mod
def ability_modifier(self, ability: str) -> int:
"""Get the modifier for a certain ability
Args:
ability (str): Ability to check. You can use the full name or
the first three letters. Not case-sensitive
Returns:
(int) Modifier for the roll
"""
# Attempt to match the ability to the pre-defined list
ability = ability.lower()
matched_ability = Ability.match(ability)
# Look up the ability modifier
return getattr(self, f'{matched_ability}_mod')
def skill_modifier(self, name: str) -> int:
"""Get the skill modifier for a certain skill
First looks in custom skill list and then in the standard 5e skills.
In this way, you can define a character to use a non-standard ability
for a certain skill (as in how Monks can use Wisdom for many checks).
Args:
name (str): Name of the skill. Not case sensitive
"""
name_lower = name.lower()
# Determine which ability modifier to use
if name_lower in self.custom_skills:
ability = self.custom_skills[name_lower]
elif name_lower in _5e_skills:
ability = _5e_skills[name_lower]
else:
raise ValueError(f'Unrecognized skill: {name}')
mod = getattr(self, f'{ability}_mod')
# Add proficiency or expertise
if name_lower in self.expertise:
return mod + self.proficiency_bonus * 2
elif name_lower in self.proficiencies:
return mod + self.proficiency_bonus
else:
return mod
def lookup_modifier(self, check: str) -> int:
"""Get the modifier for certain roll
Args:
check (str): Description of which check to make
Returns:
(int) Modifier for the d20 roll
"""
# Make it all lowercase
check = check.lower()
words = check.split(" ")
# Save
if 'save' in words:
return self.save_modifier(words[0])
# Ability check
try:
return self.ability_modifier(check)
except AssertionError:
pass # and try something else
# Skill
return self.skill_modifier(check)
def get_skills_by_ability(self, ability: str) -> Dict[str, str]:
"""List out the skills for this character that use a certain base ability
Args:
ability: Name of the ability
Returns:
Dictionary of the skill mapped to the level of skill (expert, proficient, untrained)
"""
# Match the ability
matched_ability = Ability.match(ability)
# Loop over the 5e skills
matched_skills = [skill for skill, attr in _5e_skills.items() if attr == matched_ability]
# Match the custom skills
matched_skills.extend([
skill for skill, attr in self.custom_skills.items() if attr == matched_ability
])
# Return the outputs
output = {}
for skill in matched_skills:
if skill in self.proficiencies:
output[skill] = "proficient"
elif skill in self.expertise:
output[skill] = "expert"
else:
output[skill] = "untrained"
return output
def list_available_characters(team_id: str, user_id: str) -> List[str]:
"""List the names of character sheets that are available to a user
Args:
team_id (str): ID of the Slack workspace
user_id (str): ID of the user in question
Returns:
([str]): List of characters available to this player
"""
# Get all characters for this team
sheets = _config.list_character_sheets(team_id)
# Return only the sheets
return [
os.path.basename(s)[:-4] # Remove the ".yml"
for s in sheets
if Character.from_yaml(s).player == user_id
]
def load_character(team_id: str, name: str) -> Tuple[Character, str]:
"""Load a character sheet
Arg:
team_id (str): ID of the Slack workspace
name (str): Name of the character
Returns:
- (Character) Desired character sheet
- (str): Absolute path to the character sheet, in case you must save it later
"""
config = get_config()
sheet_path = config.get_character_sheet_path(team_id, name)
return Character.from_yaml(sheet_path), os.path.abspath(sheet_path)
| 34.093822
| 118
| 0.627626
| 1,873
| 14,899
| 4.873465
| 0.194875
| 0.039439
| 0.026293
| 0.026293
| 0.224693
| 0.150745
| 0.114483
| 0.072305
| 0.059159
| 0.059159
| 0
| 0.005389
| 0.27767
| 14,899
| 436
| 119
| 34.172018
| 0.84278
| 0.25391
| 0
| 0.137615
| 0
| 0
| 0.143515
| 0.004128
| 0
| 0
| 0
| 0
| 0.022936
| 1
| 0.146789
| false
| 0.004587
| 0.03211
| 0.045872
| 0.490826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a7101a610a52017f13a5fe2d6d32d405867f9aef
| 1,558
|
py
|
Python
|
setup.py
|
Borsos/rubik
|
af220a142b81a8f5b5011e4e072be9e3d130e827
|
[
"Apache-2.0"
] | 1
|
2019-11-13T00:44:09.000Z
|
2019-11-13T00:44:09.000Z
|
setup.py
|
Borsos/rubik
|
af220a142b81a8f5b5011e4e072be9e3d130e827
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Borsos/rubik
|
af220a142b81a8f5b5011e4e072be9e3d130e827
|
[
"Apache-2.0"
] | 1
|
2019-11-13T00:47:16.000Z
|
2019-11-13T00:47:16.000Z
|
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
from distutils.core import setup
import os
import sys
scripts = [
'bin/rubik',
]
try:
dirname = os.path.dirname(os.path.abspath(sys.argv[0]))
py_dirname = dirname
sys.path.insert(0, py_dirname)
from rubik import conf
finally:
del sys.path[0]
setup(
name = "python-rubik",
version = conf.VERSION,
requires = [],
description = "Tool to read/write/visualize N-dimensional cubes",
author = "Simone Campagna",
author_email = "simone.campagna@tiscali.it",
url="https://github.com/simone-campagna/rubik",
download_url = 'https://github.com/simone-campagna/rubik/archive/{}.tar.gz'.format(conf.VERSION),
packages = ["rubik",
"rubik.application",
"rubik.application.help_functions",
"rubik.cubes",
"rubik.visualizer",
"rubik.visualizer.impl"
],
scripts = scripts,
package_data = {},
)
| 27.333333
| 101
| 0.668164
| 200
| 1,558
| 5.155
| 0.575
| 0.081474
| 0.025218
| 0.031038
| 0.069835
| 0.069835
| 0.069835
| 0
| 0
| 0
| 0
| 0.009076
| 0.22208
| 1,558
| 56
| 102
| 27.821429
| 0.841584
| 0.3543
| 0
| 0
| 0
| 0
| 0.328615
| 0.079879
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.121212
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a710a43bb737f726810f9f83e8727afbf0fbd72e
| 5,130
|
py
|
Python
|
geco/mips/tests/test_set_cover.py
|
FreestyleBuild/GeCO
|
6db1a549b3145b3bc5d3025a9bccc03be6575564
|
[
"MIT"
] | 8
|
2020-12-16T09:59:05.000Z
|
2022-03-18T09:48:43.000Z
|
geco/mips/tests/test_set_cover.py
|
FreestyleBuild/GeCO
|
6db1a549b3145b3bc5d3025a9bccc03be6575564
|
[
"MIT"
] | 101
|
2020-11-09T10:20:03.000Z
|
2022-03-24T13:50:06.000Z
|
geco/mips/tests/test_set_cover.py
|
FreestyleBuild/GeCO
|
6db1a549b3145b3bc5d3025a9bccc03be6575564
|
[
"MIT"
] | 3
|
2021-04-06T13:26:03.000Z
|
2022-03-22T13:22:16.000Z
|
import collections
import itertools
import pytest
from geco.mips.set_cover.yang import *
from geco.mips.set_cover.sun import *
from geco.mips.set_cover.orlib import *
from geco.mips.set_cover.gasse import *
"""
Generic Tests
"""
def test_set_cover_solution_1():
model = set_cover([1], [{0}])
model.optimize()
assert model.getStatus() == "optimal"
assert model.getObjVal() == 1
def test_set_cover_solution_2():
model = set_cover([1, 1, 1], [{0}, {1}, {2}])
model.optimize()
assert model.getStatus() == "optimal"
assert model.getObjVal() == 3
"""
Yang Tests
"""
@pytest.mark.parametrize(
"m,seed", itertools.product([10, 100, 200], [0, 1, 1337, 53115])
)
def test_yang_set_cover_creation(m, seed):
model = yang_instance(m, seed)
assert model.getNVars() == 10 * m
assert model.getNConss() == m
assert model.getObjectiveSense() == "minimize"
@pytest.mark.parametrize(
"m,seed1,seed2",
itertools.product([10, 100, 200], [0, 1, 1337, 53115], [0, 1, 1337, 53115]),
)
def test_yang_parameter(m, seed1, seed2):
params1 = yang_params(m, seed=seed1)
params2 = yang_params(m, seed=seed2)
same_seeds_produce_same_params = seed1 == seed2 and params1 == params2
different_seeds_produce_different_params = seed1 != seed2 and params1 != params2
assert same_seeds_produce_same_params or different_seeds_produce_different_params
"""
Sun Tests
"""
@pytest.mark.parametrize(
"n,m,seed", itertools.product([10, 100, 200], [10, 100, 200], [0, 1, 1337, 53115])
)
def test_sun_set_cover_creation(n, m, seed):
model = sun_instance(n, m, seed)
assert model.getNVars() == n
assert model.getNConss() == m
assert model.getObjectiveSense() == "minimize"
@pytest.mark.parametrize(
"n,m,seed1,seed2",
itertools.product(
[10, 100, 200], [10, 100, 200], [0, 1, 1337, 53115], [0, 1, 1337, 53115]
),
)
def test_sun_params(n, m, seed1, seed2):
params1 = sun_params(n, m, seed=seed1)
params2 = sun_params(n, m, seed=seed2)
same_seeds_produce_same_params = seed1 == seed2 and params1 == params2
different_seeds_produce_different_params = seed1 != seed2 and params1 != params2
assert same_seeds_produce_same_params or different_seeds_produce_different_params
@pytest.mark.parametrize(
"n,m,seed", itertools.product([10, 100, 200], [10, 100, 200], [0, 1, 1337, 53115])
)
def test_sun_at_least_two_elements_in_set(n, m, seed):
_, sets = sun_params(n, m, seed=seed)
counter = collections.defaultdict(int)
for s in sets:
for e in s:
counter[e] += 1
assert all([count >= 2 for count in counter.values()])
@pytest.mark.parametrize(
"n,base_n,base_m,seed1,seed2",
itertools.product(
[10, 100, 200],
[1, 5, 9],
[10, 100, 200],
[0, 1, 1337, 53115],
[0, 1, 1337, 53115],
),
)
def test_expand_sun_params(n, base_n, base_m, seed1, seed2):
base_costs1, base_sets1 = sun_params(base_n, base_m, seed1)
base_costs2, base_sets2 = sun_params(base_n, base_m, seed2)
params1 = costs1, sets1 = expand_sun_params((n,), (base_costs1, base_sets1), seed1)
params2 = costs2, sets2 = expand_sun_params((n,), (base_costs2, base_sets2), seed2)
# test seeding
same_seeds_produce_same_params = seed1 == seed2 and params1 == params2
different_seeds_produce_different_params = seed1 != seed2 and params1 != params2
assert same_seeds_produce_same_params or different_seeds_produce_different_params
# test correct size
assert len(costs1) == len(costs2) == n
assert len(sets1) == len(sets2) == base_m
"""
OR-Library tests
"""
def test_scp_orlib():
instance_name = "scp41.txt"
instance = orlib_instance(instance_name)
assert instance.getNVars() == 1000
assert instance.getNConss() == 200
def test_rail_orlib():
instance_name = "rail507.txt"
instance = orlib_instance(instance_name)
assert instance.getNVars() == 63009
assert instance.getNConss() == 507
"""
Gasse tests
"""
@pytest.mark.parametrize(
"nrows,ncols,density,seed1,seed2",
itertools.product(
[100, 200],
[10, 100, 200],
[0.2, 0.3, 0.5],
[0, 1, 1337, 53115],
[0, 1, 1337, 53115],
),
)
def test_gasse_params(nrows, ncols, density, seed1, seed2):
params1 = gasse_params(nrows, ncols, density, seed=seed1)
params2 = gasse_params(nrows, ncols, density, seed=seed2)
same_seeds_produce_same_params = seed1 == seed2 and params1 == params2
different_seeds_produce_different_params = seed1 != seed2 and params1 != params2
assert same_seeds_produce_same_params or different_seeds_produce_different_params
@pytest.mark.parametrize(
"nrows,ncols,density,seed",
itertools.product(
[100, 200],
[50, 70],
[0.2, 0.3, 0.5],
[0, 1, 1337, 53115],
),
)
def test_gasse_instance(nrows, ncols, density, seed):
model = gasse_instance(nrows, ncols, density, max_coef=10, seed=seed)
assert model.getNVars() == ncols
assert model.getNConss() == nrows
assert model.getObjectiveSense() == "minimize"
| 28.5
| 87
| 0.670175
| 703
| 5,130
| 4.678521
| 0.14367
| 0.048647
| 0.021891
| 0.040134
| 0.690179
| 0.595014
| 0.520523
| 0.501064
| 0.483734
| 0.394345
| 0
| 0.089809
| 0.194737
| 5,130
| 179
| 88
| 28.659218
| 0.706367
| 0.005848
| 0
| 0.424
| 0
| 0
| 0.038015
| 0.016407
| 0
| 0
| 0
| 0
| 0.192
| 1
| 0.096
| false
| 0
| 0.056
| 0
| 0.152
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a711b022a699f3a1657ba1bf4a22b34ce38cfe57
| 2,878
|
py
|
Python
|
hcplot/scales/colors/hue.py
|
bernhard-42/hcplot
|
1c791e2b19b173b9b98a3d8914095e3c372c9de4
|
[
"Apache-2.0"
] | null | null | null |
hcplot/scales/colors/hue.py
|
bernhard-42/hcplot
|
1c791e2b19b173b9b98a3d8914095e3c372c9de4
|
[
"Apache-2.0"
] | null | null | null |
hcplot/scales/colors/hue.py
|
bernhard-42/hcplot
|
1c791e2b19b173b9b98a3d8914095e3c372c9de4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Bernhard Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils.color import hcl2rgb, rgb2str
import numpy as np
class HueColors(object):
"""
"Escaping RGBland: Selecting Colors for Statistical Graphics"
Achim Zeileis, Wirtschaftsuniversität Wien
Kurt Hornik, Wirtschaftsuniversität Wien
Paul Murrell, The University of Auckland
https://eeecon.uibk.ac.at/~zeileis/papers/Zeileis+Hornik+Murrell-2009.pdf
"""
#
# Accessors
#
@classmethod
def qual(cls, h=(0, 360), c=100, l=65, sizeOrSeries=5, asString=False):
size = sizeOrSeries if isinstance(sizeOrSeries, int) else (len(sizeOrSeries))
d = (h[1] - h[0]) // (size - 1)
result = [hcl2rgb(h[0] + d * i, c, l) for i in range(size)]
return rgb2str(result) if asString else result
@classmethod
def seq(cls, h=260, c=(30, 90), l=(30, 90), fl=None, fc=None, sizeOrSeries=5, asString=False):
size = sizeOrSeries if isinstance(sizeOrSeries, int) else (len(sizeOrSeries))
if isinstance(c, int):
crange = [c] * size
else:
if fc is None:
crange = np.linspace(c[0], c[1], size)
else:
d = c[0] - c[1]
crange = [c[1] + d * fc(x) for x in np.linspace(1, 0, size)]
if isinstance(l, int):
lrange = [l] * size
else:
if fl is None:
lrange = np.linspace(l[0], l[1], size)
else:
d = l[0] - l[1]
lrange = [l[1] + d * fl(x) for x in np.linspace(1, 0, size)]
return [hcl2rgb(h, ci, li) for ci, li in zip(crange, lrange)]
@classmethod
def div(cls, h=[260, 0], c=(100, 0, 100), l=(30, 90, 30), fc=None, fl=None,
sizeOrSeries=7, asString=False):
size = sizeOrSeries if isinstance(sizeOrSeries, int) else (len(sizeOrSeries))
s = size // 2 + 1
return cls.seq(h[0], c[:2], l[:2], fc=fc, fl=fl, sizeOrSeries=s)[:-1] + \
list(reversed(cls.seq(h[1], (c[2], c[1]), (l[2], l[1]), sizeOrSeries=s, fc=fc, fl=fl)))
#
# Info
#
@classmethod
def info(cls):
pass
@classmethod
def toDF(cls, typ):
pass
#
# Quick Accessor
#
def getBrewer(typ, palette, size):
return getattr(HueColors, typ)(palette, size)
| 30.294737
| 99
| 0.592078
| 406
| 2,878
| 4.197044
| 0.374384
| 0.035211
| 0.056338
| 0.051056
| 0.174296
| 0.174296
| 0.174296
| 0.174296
| 0.174296
| 0.1473
| 0
| 0.041386
| 0.277971
| 2,878
| 94
| 100
| 30.617021
| 0.778633
| 0.297429
| 0
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0.045455
| 0.045455
| 0.022727
| 0.295455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a7143837d4f1b09881e05cb620fce36372532de7
| 2,010
|
py
|
Python
|
alipay/aop/api/domain/AlipayEcoCityserviceIndustryEnergySendModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayEcoCityserviceIndustryEnergySendModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayEcoCityserviceIndustryEnergySendModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.EnergyExtRequest import EnergyExtRequest
class AlipayEcoCityserviceIndustryEnergySendModel(object):
def __init__(self):
self._ext_info = None
self._outer_no = None
self._scene = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
if isinstance(value, EnergyExtRequest):
self._ext_info = value
else:
self._ext_info = EnergyExtRequest.from_alipay_dict(value)
@property
def outer_no(self):
return self._outer_no
@outer_no.setter
def outer_no(self, value):
self._outer_no = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
def to_alipay_dict(self):
params = dict()
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.outer_no:
if hasattr(self.outer_no, 'to_alipay_dict'):
params['outer_no'] = self.outer_no.to_alipay_dict()
else:
params['outer_no'] = self.outer_no
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoCityserviceIndustryEnergySendModel()
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'outer_no' in d:
o.outer_no = d['outer_no']
if 'scene' in d:
o.scene = d['scene']
return o
| 26.8
| 69
| 0.584577
| 244
| 2,010
| 4.557377
| 0.184426
| 0.100719
| 0.079137
| 0.048561
| 0.203237
| 0.149281
| 0
| 0
| 0
| 0
| 0
| 0.000727
| 0.31592
| 2,010
| 74
| 70
| 27.162162
| 0.808
| 0.020896
| 0
| 0.118644
| 0
| 0
| 0.064155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152542
| false
| 0
| 0.050847
| 0.050847
| 0.322034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a715a55b0649d434e3e3db7475617b277a5112ae
| 1,657
|
py
|
Python
|
project_receipt/receipt/urls.py
|
Guilouf/django-receipt
|
fb42de12311cd1a20cc28c74a732d818f28ef551
|
[
"Apache-2.0"
] | null | null | null |
project_receipt/receipt/urls.py
|
Guilouf/django-receipt
|
fb42de12311cd1a20cc28c74a732d818f28ef551
|
[
"Apache-2.0"
] | 8
|
2021-02-01T12:47:02.000Z
|
2021-12-13T09:34:38.000Z
|
project_receipt/receipt/urls.py
|
Guilouf/django-receipt
|
fb42de12311cd1a20cc28c74a732d818f28ef551
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from receipt import views
urlpatterns = [
path('', views.ReceiptList.as_view(), name='home'),
path('receipt/', views.ReceiptList.as_view(), name='receipt_list'),
path('receipt/create', views.ReceiptCreate.as_view(), name='receipt_create'),
path('receipt/<int:pk>/edit', views.ReceiptUpdate.as_view(), name='receipt_update'),
path('establishment/', views.EstablishmentList.as_view(), name='establishment_list'),
path('establishment/create', views.EstablishmentCreate.as_view(), name='establishment_create'),
path('establishment/<int:pk>/edit', views.EstablishmentUpdate.as_view(), name='establishment_update'),
path('establishment/<int:pk>', views.EstablishmentDetail.as_view(), name='establishment_detail'),
path('establishment/<int:pk>/add_receipt', views.ReceiptFromEstablishmentCreate.as_view(),
name='establishment_add_receipt'),
path('company/', views.CompanyList.as_view(), name='company_list'),
path('company/create', views.CompanyCreate.as_view(), name='company_create'),
path('company/<int:pk>/edit', views.CompanyUpdate.as_view(), name='company_update'),
path('company/<int:pk>', views.CompanyDetail.as_view(), name='company_detail'),
path('company/<int:pk>/add_establishment', views.EstablishmentFromCompanyCreate.as_view(),
name='company_add_establishment'),
path('tag/', views.TagList.as_view(), name='tag_list'),
path('tag/create', views.TagCreate.as_view(), name='tag_create'),
path('tag/<int:pk>/edit', views.TagUpdate.as_view(), name='tag_update'),
path('tag/<int:pk>', views.TagDetail.as_view(), name='tag_detail'),
]
| 61.37037
| 106
| 0.719976
| 202
| 1,657
| 5.712871
| 0.19802
| 0.093588
| 0.155979
| 0.099653
| 0.045061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095353
| 1,657
| 26
| 107
| 63.730769
| 0.769847
| 0
| 0
| 0
| 0
| 0
| 0.33796
| 0.126132
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a71e3a4361a99f178927d847326e3096eeaee755
| 4,216
|
py
|
Python
|
utils/common/_common.py
|
Pzqqt/Django_Transportation_Management_System
|
f4f0905d8e007920ae190252eeaefbc6ee67ed85
|
[
"MIT"
] | null | null | null |
utils/common/_common.py
|
Pzqqt/Django_Transportation_Management_System
|
f4f0905d8e007920ae190252eeaefbc6ee67ed85
|
[
"MIT"
] | null | null | null |
utils/common/_common.py
|
Pzqqt/Django_Transportation_Management_System
|
f4f0905d8e007920ae190252eeaefbc6ee67ed85
|
[
"MIT"
] | null | null | null |
from functools import partial
from itertools import chain
from collections import UserList
import logging
import traceback
from django import forms
from django.db.models import Model
from django.core.validators import validate_comma_separated_integer_list
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.fields.related import ForeignKey
from django.http import JsonResponse
from django.utils import timezone
class UnescapedDjangoJSONEncoder(DjangoJSONEncoder):
""" 自定义的JSON编码器, 强制ensure_ascii为False, 避免中文字符被编码为乱码 """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# 强制ensure_ascii为False
self.ensure_ascii = False
UnescapedJsonResponse = partial(JsonResponse, encoder=UnescapedDjangoJSONEncoder)
class SortableModelChoiceField(forms.ModelChoiceField):
"""
为ModelChoiceField的choices进行排序是件很麻烦的事
尽管我们可以对queryset属性使用`order_by`进行排序
但是还需要考虑对数据库的优化(尽可能避免explain中出现`using filesort`)
因此, 我们在ModelChoiceIterator中添加一个额外可选的属性, 以允许在遍历choices时对其进行排序
这是在应用层的排序, 意在减少数据库的压力
"""
class _ModelChoiceIterator(forms.models.ModelChoiceIterator):
class _FakeQuerySet(UserList):
_prefetch_related_lookups = ()
def iterator(self):
yield from self
def __iter__(self):
sort_key = self.field.sort_key
if sort_key is not None:
# sorted之后(立即执行数据库查询), _prefetch_related_lookups就没有意义了
self.queryset = self._FakeQuerySet(sorted(self.queryset, key=sort_key))
return super().__iter__()
iterator = _ModelChoiceIterator
def __init__(self, queryset, **kwargs):
super().__init__(queryset, **kwargs)
self.sort_key = kwargs.get("sort_key", None)
def multi_lines_log(logger: logging.Logger, string: str, level=logging.INFO):
""" 记录多行日志 """
for line in string.splitlines():
logger.log(level, line)
def traceback_log(logger: logging.Logger, level=logging.ERROR):
""" 记录异常栈 """
multi_lines_log(logger=logger, string=traceback.format_exc(), level=level)
def traceback_and_detail_log(request, logger: logging.Logger, level=logging.ERROR):
""" 记录异常栈和其他一些详细信息 """
logger.log(level, "=" * 100)
logger.log(level, "Exception:")
logger.log(level, "Time: %s" % timezone.make_naive(timezone.now()).strftime("%Y-%m-%d %H:%M:%S"))
logger.log(level, "Url: %s" % request.path)
logger.log(level, "Method: %s" % request.method)
logger.log(level, "Cookies: %s" % request.COOKIES)
logger.log(level, "Session: %s" % dict(request.session.items()))
if request.method == "POST":
logger.log(level, "Post data: %s" % request.POST.dict())
logger.log(level, "")
traceback_log(logger=logger, level=level)
logger.log(level, "=" * 100)
def validate_comma_separated_integer_list_and_split(string: str, auto_strip=True) -> list:
""" 判断字符串是否是一个以逗号分隔的数字列表
如果是, 则自动进行分割并返回列表; 如果不是, 则抛出ValidationError异常
:param string: 要解析的字符串
:param auto_strip: 为True时则提前对string进行strip(默认)
:return: list
"""
if auto_strip:
string = string.strip()
validate_comma_separated_integer_list(string)
return [int(x) for x in string.split(',')]
def model_to_dict_(instance: Model) -> dict:
""" Django有一个内置的django.forms.models.model_to_dict方法(以下简称原model_to_dict方法)
可以方便地把模型转为字典, 但是有一个坑, 被标记为不可编辑(editable为False)的字段不会包含在输出的字典中
原model_to_dict方法仅在初始化ModelForm时被使用, 为了安全起见, 这样做无可厚非
但是我们想要的"模型转为字典"的方法应该包含模型的所有字段
所以我们参考原model_to_dict方法编写了新的model_to_dict_方法
比起原model_to_dict方法缺少了fields和exclude参数, 因为我们暂时不需要
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
# 对于一对一和多对一外键, 返回外键模型对象 (多对多外键会在else子句中合适地处理)
# 注: 由于ForeignKey的attname属性值为"字段名_id", 所以调用value_from_object方法的话, 返回的是外键对象的id
if isinstance(f, ForeignKey):
data[f.name] = getattr(instance, f.name, None)
else:
data[f.name] = f.value_from_object(instance)
return data
def del_session_item(request, *items):
""" 从request会话中删除键值 """
for item in items:
request.session.pop(item, None)
| 35.728814
| 101
| 0.708491
| 468
| 4,216
| 6.168803
| 0.410256
| 0.034292
| 0.053343
| 0.030135
| 0.059231
| 0.024939
| 0
| 0
| 0
| 0
| 0
| 0.001745
| 0.184298
| 4,216
| 117
| 102
| 36.034188
| 0.837744
| 0.225569
| 0
| 0.029851
| 0
| 0
| 0.032463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.149254
| false
| 0
| 0.179104
| 0
| 0.447761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a72993531283fe9cd45b23f3481f393933bdc390
| 15,777
|
py
|
Python
|
main.py
|
chilipolygon/Amazon-Requests-Module
|
20fcfa9b9764e097bc107aa9dc5b0db772ce3ad9
|
[
"Apache-2.0"
] | 3
|
2022-01-18T20:54:08.000Z
|
2022-02-05T23:27:13.000Z
|
main.py
|
chilipolygon/Amazon-Requests-Module
|
20fcfa9b9764e097bc107aa9dc5b0db772ce3ad9
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
chilipolygon/Amazon-Requests-Module
|
20fcfa9b9764e097bc107aa9dc5b0db772ce3ad9
|
[
"Apache-2.0"
] | null | null | null |
# ---------------------
from bs4 import BeautifulSoup as bs
import requests
import urllib3
import urllib
from urllib.parse import unquote
import re
import os
import sys
import json
import time
from colorama import Fore, init
from pprint import pprint
from datetime import datetime
import uuid
import threading
# ----------------------
from dhooks import Webhook
from dhooks import Webhook, Embed
# ---------------------
init()
init(autoreset=True)
urllib3.disable_warnings()
os.system('cls' if os.name == 'nt' else 'clear')
# ---------------------
# MUST HAVE PRIME
# MUST HAVE ONE CLICK
# MUST SELECT "Keep me signed in"
# MUST USE AGED ACCOUNT
# ====================================
# MUST HAVE THESE FOR BEST SUCCESS
class main:
def __init__(self, sku, code, account) -> None:
self.account = account
f = open(f'./appdata/cookies.json')
self.cookies = json.load(f)
self.sku = sku
self.code = code
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Making Session')
self.session = requests.Session()
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Fetching Cookies')
for cookie in self.cookies:
self.session.cookies.set(
self.cookies[cookie]['name'], self.cookies[cookie]['value'])
self.productPage()
def productPage(self):
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Getting Product Page')
self.asin_page = self.session.get(
'https://smile.amazon.com/dp/' + str(self.sku),
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36"}
)
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting Product Price:', end=" ")
soup = bs(self.asin_page.text, "lxml")
self.og_price = soup.find(
'span', {'class': 'a-offscreen'}).getText().strip()
print(f'{self.og_price}')
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting Session ID:', end=" ")
self.session_id = self.asin_page.text.split(
'id="session-id" name="session-id" value="')[1].split('"')[0]
print(f'{self.session_id}')
try:
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting Offer Id:', end=" ")
self.offerListingId = re.search(
"&offerListingId=(.*?)\&", self.asin_page.text).group(1)
print(f'{self.offerListingId}')
self.promoPage() # if we find an OID, it means the the listing have an UNREDEEMED coupon
except Exception as e: # This error will occur when the coupon is redeemed OR there is no coupon
print(Fore.RED + '[-] Coupon Clipped')
self.addToCart()
pass
def promoPage(self):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36",
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'accept-encoding': 'gzip, deflate, br',
}
self.productPage = self.session.get(
f'https://smile.amazon.com/gp/aod/ajax/ref=auto_load_aod?asin={self.sku}', headers=headers)
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Getting Promo Object')
self.promoObj = {
'promoId': re.search("&promotionId=(.*?)\&", self.productPage.text).group(1),
'merchantID': re.search(";seller=(.*?)\&", self.productPage.text).group(1),
'sku': re.search("&sku=(.*?)\&", self.productPage.text).group(1),
'anti-csrftoken-a2z': re.search("&anti-csrftoken-a2z=(.*?)\'", self.productPage.text).group(1)
}
for i in self.promoObj:
print(Fore.WHITE + f"Session: {self.account} || " + Fore.YELLOW +
f'[*] {i.title()}: ' + Fore.WHITE + f'{self.promoObj[i]}')
self.clipCoupon()
# ---------------------
def clipCoupon(self):
headers = {
'anti-csrftoken-a2z': unquote(self.promoObj['anti-csrftoken-a2z']),
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'x-requested-with': 'XMLHttpRequest',
'referer': f'https://www.amazon.com/dp/{self.sku}'
}
params = {
'promotionId': self.promoObj['promoId'],
'asin': self.sku,
'offerListingId': self.offerListingId,
'sku': self.promoObj['sku'],
'anti-csrftoken-a2z': unquote(self.promoObj['anti-csrftoken-a2z']),
'source': 'dp_cxcw'
}
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Clipping Coupon')
promoUrl = f'https://www.amazon.com/promotion/redeem/?{urllib.parse.urlencode(params)}'
while True:
clipCoupon = self.session.get(promoUrl, headers=headers)
if 'SUCCESS' in clipCoupon.text:
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.GREEN + '[+] Coupon Clipped')
break
self.addToCart()
def addToCart(self):
headers = {
'Connection': 'keep-alive',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'x-amz-checkout-entry-referer-url': 'https://smile.amazon.com/dp/' + self.sku,
'x-amz-turbo-checkout-dp-url': 'https://smile.amazon.com/dp/' + self.sku,
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',
'x-amz-support-custom-signin': '1',
'x-amz-checkout-csrf-token': self.session_id,
'Origin': 'https://smile.amazon.com',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': 'https://smile.amazon.com/dp/' + self.sku
}
payload = {
'addressID': 'nmqgnomolpkq',
'isAsync': '1',
'quantity.1': '1',
}
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Adding To Cart')
while True:
try:
self.session_atc = self.session.post(
f'https://smile.amazon.com/checkout/turbo-initiate?ref_=dp_start-bbf_1_glance_buyNow_2-1&referrer=detail&pipelineType=turbo&clientId=retailwebsite&weblab=RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783&temporaryAddToCart=1&asin.1={self.sku}',
data=payload, headers=headers
)
break
except self.session_atc.status_code != 200:
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.RED + '[-] Error Adding To Cart', end=" ")
time.sleep(1)
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.RED + '[-] Retrying', end=" ")
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.GREEN + '[+] Added to Cart')
checkout_url_tuple = re.search(
'\/(.*)shipmentId=(.*)\d', self.session_atc.text).group(0)
self.checkout_url_str = ''.join(checkout_url_tuple)
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting PID:', end=" ")
self.pid = re.search(
"pid=(.*?)\&", str(self.checkout_url_str)).group(1)
print(f'{self.pid}')
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting Anti CSRF:', end=" ")
self.AntiCSRF = re.search(
"anti-csrftoken-a2z'.value='(.*?)\'", str(self.session_atc.text)).group(1)
print(f'{self.AntiCSRF}') # use this to checkout
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting SID:', end=" ")
self.sid = re.search(
"'CacheDetection.RequestID': \"(.*?)\",", self.session_atc.text).group(1)
print(f'{self.sid}')
if not self.code: # check if there is no code
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.RED + '[-] No Code Found')
self.checkSummary()
else:
self.claimCode()
def claimCode(self):
if '' in self.code:
return
else:
headers = {
'Connection': 'keep-alive',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'x-amz-checkout-entry-referer-url': 'https://smile.amazon.com/dp/' + self.sku,
'anti-csrftoken-a2z': self.AntiCSRF,
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',
'x-amz-checkout-csrf-token': self.session_id,
'Origin': 'https://smile.amazon.com',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': 'https://smile.amazon.com/checkout/pay?pid=' + self.pid + '&pipelineType=turbo&clientId=retailwebsite&temporaryAddToCart=1&hostPage=detail&weblab=RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783'
}
payload = {
'claimcode': self.code,
'isClientTimeBased': '1'
}
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.YELLOW + '[*] Applying Code')
claimurl = f'https://smile.amazon.com/checkout/pay/add-gc-promo?ref_=chk_pay_addGcPromo&referrer=pay&temporaryAddToCart=1&hostPage=detail&weblab=RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783&_srcRID={self.sid}&clientId=retailwebsite&pipelineType=turbo&pid={self.pid}'
claim = self.session.post(
claimurl, headers=headers, data=payload, allow_redirects=True)
with open("./html/claimCode.html", "w", encoding='utf-8') as f:
f.write(claim.text)
self.checkSummary()
def checkSummary(self):
headers = {
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
}
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.GREEN + '[+] Reviewing Summary')
summaryUrl = f'https://www.amazon.com/checkout/ordersummary?ref_=chk_spc_select__summary&referrer=spc&pid={self.pid}&pipelineType=turbo&clientId=retailwebsite&temporaryAddToCart=1&hostPage=detail&weblab=RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783'
summary = self.session.get(summaryUrl, headers=headers)
soup = bs(summary.text, "lxml")
self.finalPrice = soup.find(
'td', {'class': 'a-color-price a-text-right a-align-bottom a-text-bold a-nowrap'}).getText().strip()
print(Fore.WHITE + f"Session: {self.account} || " + Fore.YELLOW +
'[+] Order Total: ' + Fore.WHITE + f'{self.finalPrice}')
self.checkout()
def checkout(self):
print(Fore.WHITE +
f"Session: {self.account} || " + Fore.GREEN + '[+] Checking Out')
headers = {
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',
'anti-csrftoken-a2z': self.AntiCSRF
}
payload = {
'x-amz-checkout-csrf-token': self.session_id,
'ref_': 'chk_summary_placeOrder',
'referrer': 'summary',
'pid': self.pid,
'pipelineType': 'turbo',
'clientId': 'retailwebsite',
'temporaryAddToCart': 1,
'hostPage': 'detail',
'weblab': 'RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783',
'isClientTimeBased': 1
}
params = {
'ref_': 'chk_summary_placeOrder',
'_srcRID': self.sid,
'clientId': 'retailwebsite',
'pipelineType': 'turbo',
'pid': self.pid
}
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Status: ', end=' ')
checkoutUrl = f'https://www.amazon.com/checkout/spc/place-order?{urllib.parse.urlencode(params)}'
checkout = self.session.post(
checkoutUrl, data=payload, headers=headers)
if checkout.status_code == 200:
print(Fore.GREEN + 'Success')
self.sendWebhook(self.sku, self.finalPrice)
else:
print(f'something went wrong {checkout.text}')
def sendWebhook(self, sku, finalPrice):
soup = bs(self.asin_page.text, "lxml")
title = soup.find('span', {'id': 'productTitle'}).getText().strip()
a = soup.find('div', {'id': 'imgTagWrapperId'})
if a.img:
img = a.img['src']
price = soup.find('span', {'class': 'a-offscreen'}).getText().strip()
product_url = f'https://www.amazon.com/dp/{sku}?tag=Chili'
f = open('./appdata/config.json')
data = json.load(f)
url = data['webhook']
hook = Webhook(url)
embed = Embed(
color=0x8AFF8A,
timestamp='now'
)
embed.set_title(
title='🎉Successful Checkout')
embed.set_thumbnail(img)
embed.add_field(
name='Item', value=f'[{title}]({product_url})', inline=False)
embed.add_field(name='Original Price', value=f'{price}', inline=False)
embed.add_field(name='Check Out Price',
value=f'{finalPrice}', inline=False)
embed.add_field(
name='Account', value=f'||{self.account.replace(".json", "")}||', inline=False)
embed.set_footer(
text='Made by #chili9999')
print(Fore.GREEN + '[+] Sending Webhook')
hook.send(embed=embed)
def callback(account: str):
sku = input('Put in a product asin:')
promo = input('Put in a product promo code, if none, press Enter:')
threads = []
threads.append(threading.Thread(
target=main, args=[sku, promo, account]))
for thread in threads:
thread.start()
time.sleep(.1)
for thread in threads:
thread.join()
if __name__ == "__main__":
f = open(f'./appdata/config.json')
account = json.load(f)['account']
callback(account)
# asin, promo code, email
# if you don't have a promocode, leave it as ''
| 41.518421
| 272
| 0.548647
| 1,811
| 15,777
| 4.725014
| 0.202098
| 0.027346
| 0.029216
| 0.040318
| 0.511745
| 0.454248
| 0.422695
| 0.404698
| 0.397102
| 0.310506
| 0
| 0.026249
| 0.280408
| 15,777
| 379
| 273
| 41.627968
| 0.727385
| 0.03353
| 0
| 0.319749
| 0
| 0.047022
| 0.38562
| 0.050624
| 0
| 0
| 0.000525
| 0
| 0
| 1
| 0.031348
| false
| 0.003135
| 0.053292
| 0
| 0.090909
| 0.106583
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a72d7496d5e3f428cdf8342b764e52a9a68ac6a0
| 3,092
|
py
|
Python
|
cdparser/Features.py
|
opengulf/nyc-directories-support-scripts
|
e22582b8f4cb3c365e9aac1d860d9c36831277a5
|
[
"MIT"
] | 1
|
2021-09-07T20:41:00.000Z
|
2021-09-07T20:41:00.000Z
|
cdparser/Features.py
|
opengulf/nyc-directories-support-scripts
|
e22582b8f4cb3c365e9aac1d860d9c36831277a5
|
[
"MIT"
] | null | null | null |
cdparser/Features.py
|
opengulf/nyc-directories-support-scripts
|
e22582b8f4cb3c365e9aac1d860d9c36831277a5
|
[
"MIT"
] | 2
|
2021-09-07T20:49:14.000Z
|
2021-11-05T02:03:47.000Z
|
from functools import partial
class Features:
@staticmethod
def __emit_word_features(rel_pos, word):
features = {}
for f in Features.__word_feature_functions().items():
features.update({str(rel_pos) + ":" + f[0]: f[1](word)})
return features
@staticmethod
def get_word_features(sentence,i):
features = {}
for x in range(i - 2, i + 3):
if 0 <= x < len(sentence):
features.update(Features.__emit_word_features(-(i - x), sentence[x][0]))
if i == 0:
features.update({'BOS' : True})
if i == len(sentence) - 1:
features.update({'EOS': True})
return features
@staticmethod
def __word_feature_functions():
return {
"word.junior": Features.__is_junior_token,
"word.widow.token": Features.__is_widow_token,
"word.contains.digit": Features.__contains_digit,
"word.is.delimiter": Features.__is_delimiter,
"word.is.start.token": Features.__is_start,
"word.is.end.token": Features.__is_end,
"word.is.lower": str.islower,
"word.is.title": str.istitle,
"word.is.upper": str.isupper,
"word.substr[-2:]" : partial(Features.__substr, 2),
"word.substr[-1:]": partial(Features.__substr, 1)
}
@staticmethod
def get_sentence_features(sentence):
return [Features.get_word_features(sentence, i) for i in range(len(sentence))]
@staticmethod
def get_sentence_labels(sentence):
return [label for token, label in sentence]
@staticmethod
def get_sentence_tokens(sentence):
return [token for token, label in sentence]
@staticmethod
def __contains_digit(input):
for c in input:
if c.isdigit():
return True
return False
@staticmethod
def __substr(amount, word):
return word[amount:]
@staticmethod
def __is_start(input):
if input == "START":
return True
return False
@staticmethod
def __is_end(input):
if input == "END":
return True
return False
@staticmethod
def __is_delimiter(input):
for c in input:
if c == '.' or c == ',':
return True
return False
@staticmethod
def __is_known_position_adj(input):
if len(input) == 1:
if input == 'h' or input == 'r':
return True
return False
@staticmethod
def __is_junior_token(input):
dc = input.lower()
if dc == "jr":
return True
return False
@staticmethod
def __segment_of_sentence(sent, i, div):
sent_length = len(sent)
pos = i + 1
for j in range(1,div + 1):
if pos <= j*(sent_length / float(div)):
return j
@staticmethod
def __is_widow_token(input):
dc = input.lower()
if dc == "wid" or dc == "widow":
return True
return False
| 28.366972
| 88
| 0.559185
| 356
| 3,092
| 4.617978
| 0.210674
| 0.136861
| 0.068127
| 0.089416
| 0.293796
| 0.237226
| 0.193431
| 0
| 0
| 0
| 0
| 0.007726
| 0.330207
| 3,092
| 109
| 89
| 28.366972
| 0.786094
| 0
| 0
| 0.397849
| 0
| 0
| 0.064339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.010753
| 0.053763
| 0.419355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a733182bb7d063e48b371c3b9b8871a0afe48521
| 19,712
|
py
|
Python
|
dashboard/api/config.py
|
x3niasweden/fomalhaut-panel
|
8b4b3d81e2c91bef8f24ccbaf9cf898a47ac38a6
|
[
"MIT"
] | 14
|
2017-08-01T08:28:00.000Z
|
2020-08-29T06:55:16.000Z
|
dashboard/api/config.py
|
x3niasweden/fomalhaut-panel
|
8b4b3d81e2c91bef8f24ccbaf9cf898a47ac38a6
|
[
"MIT"
] | 1
|
2021-03-29T06:16:34.000Z
|
2021-03-29T06:16:34.000Z
|
dashboard/api/config.py
|
x3niasweden/fomalhaut-panel
|
8b4b3d81e2c91bef8f24ccbaf9cf898a47ac38a6
|
[
"MIT"
] | 12
|
2017-07-18T02:59:03.000Z
|
2021-03-23T04:04:58.000Z
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# created by restran on 2016/1/2
from __future__ import unicode_literals, absolute_import
import traceback
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_protect
from django.db import transaction
from cerberus import Validator
import redis
from fomalhaut import settings
from ..forms import *
from common.utils import http_response_json, json_dumps, json_loads
from accounts.decorators import login_required
from common.utils import error_404
logger = logging.getLogger(__name__)
@login_required
@require_http_methods(["GET"])
def get_model_data(request, model_name):
logger.debug('run api_get_model_data')
return_data = {'success': False, 'msg': ''}
get_default_form = request.GET.get('get_default_form', False)
if model_name == 'client':
model = Client
model_form = ClientForm
data = model.get_all_in_json()
elif model_name == 'endpoint':
model = Endpoint
model_form = EndpointForm
data = model.get_all_in_json()
elif model_name == 'client_endpoint':
model = ClientEndpoint
model_form = None
client_id = request.GET.get('client_id')
data = model.get_all_in_json(client_id)
else:
model = None
model_form = None
data = []
if model is None:
raise error_404(request)
# 获取一个缺省值用来添加和编辑数据
if get_default_form:
t = model_form.get_default_form_json()
return_data['default_form'] = t
return_data['data'] = data
return_data['success'] = True
return http_response_json(return_data)
def do_create_or_update_model_data(request, model_name, is_update, post_data, form):
return_data = {'success': False, 'msg': ''}
if model_name == 'client_endpoint':
client_id = post_data.get('client_id', [])
endpoints = post_data.get('endpoints', [])
client = Client.get_client(client_id)
if client is None:
return_data['msg'] = '提交的数据有误, client_id 不存在'
return return_data
ClientEndpoint.objects.filter(client_id=client_id).delete()
endpoint_list = []
for t in endpoints:
ce = ClientEndpoint(client=client, endpoint_id=t['id'], enable=t['enable'])
endpoint_list.append(ce)
# bulk_create 不会返回 id
ClientEndpoint.objects.bulk_create(endpoint_list)
return_data['success'] = True
return_data['data'] = ClientEndpoint.get_all_in_json(client_id)
return return_data
else:
form_is_valid = form.is_valid()
return_validation = {}
acl_rules = post_data.get('acl_rules', [])
if model_name == 'endpoint':
acl_rules_validation = {'data': [], 'has_error': False, 'errors': ''}
for t in acl_rules:
tf = ACLRuleForm(t)
if not tf.is_valid():
acl_rules_validation['has_error'] = True
acl_rules_validation['errors'] = '访问控制列表数据为空或不正确'
break
return_validation['acl_rules'] = acl_rules_validation
form_is_valid = form_is_valid and not acl_rules_validation['has_error']
elif model_name == 'client_endpoint':
pass
if form_is_valid:
# logger.debug(form.cleaned_data)
logger.debug('form is valid')
entry = form.save(commit=False)
if model_name == 'endpoint':
entry.save()
acl_rules = [ACLRule(endpoint_id=entry.id,
re_uri=t['re_uri'],
is_permit=t['is_permit'])
for t in acl_rules]
# 删除旧的
ACLRule.objects.filter(endpoint_id=entry.id).delete()
# 创建 ACLRule
ACLRule.objects.bulk_create(acl_rules)
entry.acl_rules = acl_rules
else:
entry.save()
return_data['success'] = True
return_data['data'] = entry.to_json_dict()
logger.debug(return_data['data'])
else:
return_data['msg'] = '提交的数据有误'
logger.debug('form is not valid')
logger.debug(form.get_form_json())
return_validation.update(form.get_form_json())
return_data['data'] = return_validation
return return_data
@login_required
@csrf_protect
@require_http_methods(["POST"])
def create_model_data(request, model_name):
"""
创建或更新数据
:param request:
:param model_name:
:return:
"""
logger.debug('run api_create_model_data')
post_data = json_loads(request.body)
logger.debug(post_data)
if model_name == 'client':
form = ClientForm(post_data['data'])
elif model_name == 'endpoint':
form = EndpointForm(post_data['data'])
elif model_name == 'client_endpoint':
form = None
else:
form = None
return_data = do_create_or_update_model_data(
request, model_name, False, post_data, form)
return http_response_json(return_data)
@login_required
@csrf_protect
@require_http_methods(["POST"])
def update_model_data(request, model_name, entry_id):
"""
创建或更新数据
:param request:
:param model_name:
:param entry_id:
:return:
"""
logger.debug('run api_update_model_data')
return_data = {'success': False, 'msg': ''}
if model_name == 'client':
model = Client
model_form = ClientForm
elif model_name == 'endpoint':
model = Endpoint
model_form = EndpointForm
elif model_name == 'client_endpoint':
model = None
model_form = None
else:
model = None
model_form = None
post_data = json_loads(request.body)
logger.debug(post_data)
if model_name != 'client_endpoint':
try:
entry = model.objects.get(id=entry_id)
except models.Model.DoesNotExist:
return_data['msg'] = '数据不存在'
return http_response_json(return_data)
if model_name == 'client':
form = model_form(post_data['data'], instance=entry)
elif model_name == 'endpoint':
form = model_form(post_data['data'], instance=entry)
else:
form = None
else:
form = None
return_data = do_create_or_update_model_data(
request, model_name, True, post_data, form)
return http_response_json(return_data)
@login_required
@csrf_protect
@require_http_methods(["POST"])
def delete_model_data(request, model_name, entry_id=None):
"""
删除数据
:param request:
:param model_name:
:param entry_id:
:return:
"""
logger.debug('run api_delete_model_data')
return_data = {'success': False, 'msg': ''}
if model_name == 'client':
model = Client
elif model_name == 'endpoint':
model = Endpoint
elif model_name == 'client_endpoint':
model = ClientEndpoint
else:
model = None
if model and entry_id is not None:
try:
entry = model.objects.get(id=entry_id)
entry.delete()
return_data['success'] = True
except models.Model.DoesNotExist:
return_data['msg'] = u'数据不存在'
return http_response_json(return_data)
@login_required
@csrf_protect
@require_http_methods(["POST"])
def update_enable_state_model_data(request, model_name, entry_id=None):
"""
点击启用按钮,更新启用状态
:param request:
:param model_name:
:param entry_id:
:return:
"""
logger.debug('run api_update_enable_state_model_data')
return_data = {'success': False, 'msg': ''}
if model_name == 'client':
model = Client
elif model_name == 'endpoint':
model = Endpoint
elif model_name == 'client_endpoint':
model = ClientEndpoint
else:
model = None
post_data = json_loads(request.body)
if model and entry_id:
try:
model.objects.filter(id=entry_id).update(enable=post_data['enable'])
return_data['success'] = True
except Exception as e:
logger.error(e.message)
return_data['msg'] = u'更新启用状态失败'
return http_response_json(return_data)
def do_import_config(upload_file):
"""
从json文件导入配置
:param upload_file:
:return:
"""
file_contents = upload_file.read()
try:
json_data = json_loads(file_contents)
except Exception as e:
logger.error(e.message)
return False, u'上传的文件不是JSON或者格式有误', []
json_data_schema = {
'clients': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'id': {
'type': 'integer',
'required': True,
},
'name': {
'type': 'string',
'required': True,
},
'app_id': {
'type': 'string',
'required': True,
},
'secret_key': {
'type': 'string',
'required': True,
},
'enable': {
'type': 'boolean',
'required': True,
},
'memo': {
'type': 'string',
'required': True,
}
}
}
},
'client_endpoints': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'id': {
'type': 'integer',
'required': True,
},
'client_id': {
'type': 'integer',
'required': True,
},
'endpoint_id': {
'type': 'integer',
'required': True,
},
'enable': {
'type': 'boolean',
'required': True,
}
}
}
},
'endpoints': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'id': {
'type': 'integer',
'required': True,
},
'unique_name': {
'type': 'string',
'required': True,
},
'name': {
'type': 'string',
'required': True,
},
'version': {
'type': 'string',
'required': True,
},
'url': {
'type': 'string',
'required': True,
},
'memo': {
'type': 'string',
'required': True,
},
'async_http_connect_timeout': {
'type': 'integer',
'required': True,
},
'async_http_request_timeout': {
'type': 'integer',
'required': True,
},
'enable_acl': {
'type': 'boolean',
'required': True,
},
'acl_rules': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'is_permit': {
'type': 'boolean',
'required': True,
},
're_uri': {
'type': 'string',
'required': True,
}
}
}
}
}
}
}
}
validator = Validator(json_data_schema, allow_unknown=True)
if not validator.validate(json_data):
errors = []
for (k, v) in validator.errors.items():
errors.append('%s: %s' % (k, v))
return False, '上传的 JSON 配置文件格式有误,请先导出 JSON 配置文件再修改', errors
else:
success, msg, errors = False, '', []
try:
# 出现异常的时候,会自动回滚
with transaction.atomic():
# 清除旧的数据,不包含 Client 和 Endpoint
ClientEndpoint.objects.all().delete()
ACLRule.objects.all().delete()
old_client_list = Client.objects.all()
old_client_dict = {}
for t in old_client_list:
old_client_dict[t.app_id] = t
old_endpoint_list = Endpoint.objects.all()
old_endpoint_dict = {}
for t in old_endpoint_list:
old_endpoint_dict[t.unique_name] = t
new_client_dict = {}
for t in json_data['clients']:
# del t['id']
old_client = old_client_dict.get(t['app_id'])
# 如果已存在相同的,则更新
if old_client is not None:
form = ClientForm(t, instance=old_client)
del old_client_dict[t['app_id']]
else:
form = ClientForm(t)
if not form.is_valid():
errors = []
form_errors = form.get_form_json()
for (k, v) in form_errors.items():
if v['has_error']:
errors.append('%s: %s' % (k, v['errors']))
msg, errors = '上传的 JSON 配置文件格式有误,请先导出 JSON 配置文件再修改', errors
raise Exception('error')
client = form.save()
new_client_dict[t['id']] = client
new_endpoint_dict = {}
for t in json_data['endpoints']:
# del t['id']
old_endpoint = old_endpoint_dict.get(t['unique_name'])
# 如果已存在相同的,则更新
if old_endpoint is not None:
form = EndpointForm(t, instance=old_endpoint)
del old_endpoint_dict[t['unique_name']]
else:
form = EndpointForm(t)
if not form.is_valid():
errors = []
form_errors = form.get_form_json()
for (k, v) in form_errors.items():
if v['has_error']:
errors.append('%s: %s' % (k, v['errors']))
msg, errors = '上传的 JSON 配置文件格式有误,请先导出 JSON 配置文件再修改', errors
raise Exception('error')
endpoint = form.save(commit=False)
endpoint.save()
new_endpoint_dict[t['id']] = endpoint
acl_rules = t['acl_rules']
for y in acl_rules:
# del t['id']
tf = ACLRuleForm(y)
if not tf.is_valid():
msg, errors = '上传的 JSON 配置文件格式有误,请先导出 JSON 配置文件再修改', \
['访问控制列表数据为空或不正确']
raise Exception('error')
acl_rules = [ACLRule(endpoint_id=endpoint.id,
re_uri=t['re_uri'], is_permit=t['is_permit'])
for t in acl_rules]
# 创建 ACLRule
ACLRule.objects.bulk_create(acl_rules)
# 根据新的 id 匹配正确的 client_endpoint
client_endpoint_list = []
for t in json_data['client_endpoints']:
client = new_client_dict.get(t['client_id'])
endpoint = new_endpoint_dict.get(t['endpoint_id'])
enable = t['enable']
ce = ClientEndpoint(client=client, endpoint=endpoint, enable=enable)
client_endpoint_list.append(ce)
ClientEndpoint.objects.bulk_create(client_endpoint_list)
# 删除导入的配置中,不存在的 Client
Client.objects.filter(id__in=[t.id for t in old_client_dict.values()]).delete()
# 删除导入的配置中,不存在的 Endpoint
Endpoint.objects.filter(id__in=[t.id for t in old_endpoint_dict.values()]).delete()
success, msg = True, u'导入配置成功'
except Exception as e:
logger.error(e.message)
return success, msg, errors
@login_required
@csrf_protect
@require_http_methods(["POST"])
def import_config(request):
"""
上传文件,导入配置
"""
if request.FILES:
success, msg, errors = False, '', []
for _file in request.FILES:
# 关闭了分块上传,上传上来的就是完整的
# 只允许选择一份文件,处理完就break
success, msg, errors = do_import_config(request.FILES[_file])
break
return http_response_json({'success': success, 'msg': msg, 'errors': errors})
else:
raise error_404(request)
@login_required
@csrf_protect
@require_http_methods(["POST"])
def transfer_to_redis(request):
"""
将配置数据同步到Redis中
"""
success, msg = False, ''
try:
config_data = get_config_redis_json()
logger.debug(config_data)
r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT,
db=settings.REDIS_DB, password=settings.REDIS_PASSWORD)
# 默认transaction=True
pipe = r.pipeline(transaction=True)
# 按模式匹配批量删除
pattern_delete_lua = """
local keys = redis.call('keys', ARGV[1])
for i = 1, table.getn(keys) do
redis.call('del', keys[i])
end
"""
pattern_delete = r.register_script(pattern_delete_lua)
pattern_delete(keys=[''], args=['%s:*' % settings.CLIENT_CONFIG_REDIS_PREFIX], client=pipe)
for t in config_data:
logger.debug(t)
#
# client = {}
# for k, v in t.iteritems():
# if k != 'endpoints':
# client[k] = v
pipe.set('%s:%s' % (settings.CLIENT_CONFIG_REDIS_PREFIX, t['app_id']), json_dumps(t))
# for s in t['endpoints']:
# pipe.set('%s:%s:%s:%s' % (settings.PROXY_CONFIG_REDIS_PREFIX, t['access_key'], s['name'], s['version']),
# json_dumps(s))
# pipe.delete('config:*')
# the EXECUTE call sends all buffered commands to the server, returning
# a list of responses, one for each command.
pipe.execute()
success = True
except Exception as e:
msg = '同步配置数据到 Redis 出现异常'
logger.error(e.message)
logger.error(traceback.format_exc())
return http_response_json({'success': success, 'msg': msg})
| 33.241147
| 122
| 0.491985
| 1,928
| 19,712
| 4.782676
| 0.130187
| 0.033185
| 0.022774
| 0.023859
| 0.523913
| 0.438239
| 0.380761
| 0.318621
| 0.261251
| 0.202581
| 0
| 0.001527
| 0.401887
| 19,712
| 592
| 123
| 33.297297
| 0.780577
| 0.057325
| 0
| 0.511013
| 0
| 0
| 0.112272
| 0.008109
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019824
| false
| 0.004405
| 0.03304
| 0
| 0.0837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a733c76add330a704c87d51a39a3121429990715
| 2,209
|
py
|
Python
|
WX_BG.py
|
boristown/WX_BG
|
c715d1f3ffeef60187be0289f26549204d6b963f
|
[
"MIT"
] | 1
|
2019-08-17T23:21:28.000Z
|
2019-08-17T23:21:28.000Z
|
WX_BG.py
|
boristown/WX_BG
|
c715d1f3ffeef60187be0289f26549204d6b963f
|
[
"MIT"
] | null | null | null |
WX_BG.py
|
boristown/WX_BG
|
c715d1f3ffeef60187be0289f26549204d6b963f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# filename: WX_BG.py
import prices
import glob
import prediction
import os
import time
import random
#预测数据文件
prices_file_pattern = "Output\\prices\\*.csv"
#预测数据文件
predict_file_pattern = "Output\\predict\\*.csv"
#预测数据文件
prices_file_second_pattern = "Output\\prices_second\\*.csv"
#预测数据文件
predict_file_second_pattern = "Output\\predict_second\\*.csv"
modeStr = {0: "v1", 1:"v2"}
predict_batch_size = 10000
while True:
'''
randint = random.randint(0, 9)
if randint == 0:
modeType = 0
else:
modeType = 1
'''
modeType = 1
print( "mode = " + modeStr[modeType] )
#删除旧的价格数据
prices_files = glob.glob(prices_file_pattern)
for prices_file in prices_files:
os.remove(prices_file)
prices_files_second = glob.glob(prices_file_second_pattern)
for prices_file_second in prices_files_second:
os.remove(prices_file_second)
#删除旧的预测数据
predict_files = glob.glob(predict_file_pattern)
for predict_file in predict_files:
os.remove(predict_file)
predict_files_second = glob.glob(predict_file_second_pattern)
for predict_file_second in predict_files_second:
os.remove(predict_file_second)
time.sleep(10)
print("正在读取价格……")
#读取价格并生成输入数据
if modeType == 0:
symbol_id_list = prices.read_prices()
else:
symbol_id_list = prices.read_pricehistory(predict_batch_size)
try:
if len(symbol_id_list) == 0:
continue
except:
continue
print("正在执行预测……")
# 预测并读取结果
while True:
time.sleep(1)
predict_files = glob.glob(predict_file_pattern)
predict_files_second = glob.glob(predict_file_second_pattern)
if len(predict_files) == 0 or len(predict_files_second) == 0:
continue
print("检测到预测文件:", predict_files[0])
print("检测到预测文件2:", predict_files_second[0])
time.sleep(2)
if modeType == 0:
prediction.get_prediction(symbol_id_list, predict_files[0])
else:
prediction.get_predictionhistory(symbol_id_list, predict_files[0], predict_files_second[0])
break
print("预测执行完毕!")
time.sleep(20)
| 26.939024
| 103
| 0.663649
| 280
| 2,209
| 4.982143
| 0.246429
| 0.111828
| 0.077419
| 0.05448
| 0.193548
| 0.162007
| 0.126165
| 0.071685
| 0.071685
| 0
| 0
| 0.019492
| 0.23359
| 2,209
| 81
| 104
| 27.271605
| 0.797401
| 0.044817
| 0
| 0.236364
| 0
| 0
| 0.076301
| 0.050531
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.109091
| 0
| 0.109091
| 0.109091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a734a04a2790536248f0af4b3c7aedde27c72873
| 929
|
py
|
Python
|
hyppo/d_variate/tests/test_dhsic.py
|
zdbzdb123123/hyppo
|
c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde
|
[
"MIT"
] | 116
|
2020-02-28T10:29:22.000Z
|
2022-03-22T12:19:39.000Z
|
hyppo/d_variate/tests/test_dhsic.py
|
zdbzdb123123/hyppo
|
c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde
|
[
"MIT"
] | 253
|
2020-02-17T16:18:56.000Z
|
2022-03-30T16:55:02.000Z
|
hyppo/d_variate/tests/test_dhsic.py
|
zdbzdb123123/hyppo
|
c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde
|
[
"MIT"
] | 27
|
2020-03-02T21:07:41.000Z
|
2022-03-08T08:33:23.000Z
|
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from ...tools import linear, power
from .. import dHsic # type: ignore
class TestdHsicStat:
@pytest.mark.parametrize("n, obs_stat", [(100, 0.04561), (200, 0.03911)])
@pytest.mark.parametrize("obs_pvalue", [1 / 1000])
def test_linear_oned(self, n, obs_stat, obs_pvalue):
np.random.seed(123456789)
x, y = linear(n, 1)
stat, pvalue = dHsic(gamma=0.5).test(x, y)
assert_almost_equal(stat, obs_stat, decimal=2)
assert_almost_equal(pvalue, obs_pvalue, decimal=2)
class TestdHsicTypeIError:
def test_oned(self):
np.random.seed(123456789)
est_power = power(
"dhsic",
sim_type="multi",
sim="multimodal_independence",
n=100,
p=1,
alpha=0.05,
)
assert_almost_equal(est_power, 0.05, decimal=2)
| 27.323529
| 77
| 0.620022
| 124
| 929
| 4.475806
| 0.427419
| 0.086486
| 0.122523
| 0.075676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082969
| 0.260495
| 929
| 33
| 78
| 28.151515
| 0.724891
| 0.012917
| 0
| 0.076923
| 0
| 0
| 0.059016
| 0.025137
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.076923
| false
| 0
| 0.192308
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a7351f98fb299d1d929cbe7b4a8c9742f60b725d
| 2,844
|
py
|
Python
|
Pages/showHistory.py
|
ajaydeepsingh/ATLZoo
|
ab5ba27dc8602da39ce8bb47c4a050ff09d79b82
|
[
"MIT"
] | null | null | null |
Pages/showHistory.py
|
ajaydeepsingh/ATLZoo
|
ab5ba27dc8602da39ce8bb47c4a050ff09d79b82
|
[
"MIT"
] | null | null | null |
Pages/showHistory.py
|
ajaydeepsingh/ATLZoo
|
ab5ba27dc8602da39ce8bb47c4a050ff09d79b82
|
[
"MIT"
] | null | null | null |
from tkinter import *
from PIL import ImageTk, Image
import pymysql
from tkinter import messagebox
from tkinter import ttk
from datetime import datetime, timedelta
import decimal
class ATLzooShowHistory:
def __init__(self):
self.createShowHistoryWindow()
self.buildShowHistoryWindow(self.showHistoryWindow)
self.showHistoryWindow.mainloop()
sys.exit()
def createShowHistoryWindow(self):
self.showHistoryWindow=Toplevel()
self.showHistoryWindow.title("Zoo Atlanta")
self.showHistoryWindow.geometry("800x600")
def buildShowHistoryWindow(self, showHistoryWindow):
titleLabel= Label(showHistoryWindow,text = "Show History", font = "Verdana 16 bold ")
titleLabel.grid(row=1,column=2,sticky=W+E)
# Labels
showLabel = Label(showHistoryWindow,text = "Name")
showLabel.grid(row=2,column=0,pady=10)
self.showNameString = StringVar()
showNameEntry = Entry(showHistoryWindow, textvariable=self.showNameString, width=20)
showNameEntry.grid(row=2,column=1,pady=10)
exhibitLabel = Label(showHistoryWindow,text = "Exhibit")
exhibitLabel.grid(row=2,column=2,pady=10)
exhibitDefault = StringVar()
exhibitDefault.set("options")
exhibitMenu = OptionMenu(showHistoryWindow, exhibitDefault, "this","will","have","options","later")
exhibitMenu.grid(row=2, column=3,pady=10)
dateLabel = Label(showHistoryWindow,text = "Date")
dateLabel.grid(row=3, column=0,pady=10)
#showDateEntry = CalendarDialog.main()
showDateEntry= Entry(showHistoryWindow)
showDateEntry.grid(row=3, column=1,pady=10)
# Button
findShowsButton = Button(showHistoryWindow, text="Search", command=self.showHistoryWindowFindShowsButtonClicked)
findShowsButton.grid(row=3,column=2,pady=10)
selectShowTree = ttk.Treeview(showHistoryWindow, columns=("Name", "Exhibit", "Date"))
selectShowTree.heading('#0', text = "Name")
selectShowTree.heading('#1', text = "Exhibit")
selectShowTree.heading('#2', text = "Date")
selectShowTree.column('#0', width = 200, anchor = "center")
selectShowTree.column('#1', width = 200, anchor = "center")
selectShowTree.column('#2', width = 200, anchor = "center")
selectShowTree.place(x=20, y=130,width=600)
backButton = Button(showHistoryWindow, text="Back", command=self.showHistoryWindowBackButtonClicked)
backButton.place(x=310,y=370)
def showHistoryWindowFindShowsButtonClicked(self):
self.showHistoryWindow.destroy()
self.createShowsDetailWindow()
def showHistoryWindowBackButtonClicked(self):
self.showHistoryWindow.withdraw()
import visitorFunctionality
a = ATLzooShowHistory()
| 37.92
| 120
| 0.688819
| 275
| 2,844
| 7.109091
| 0.374545
| 0.085934
| 0.053197
| 0.028645
| 0.058312
| 0.040921
| 0
| 0
| 0
| 0
| 0
| 0.030197
| 0.196554
| 2,844
| 74
| 121
| 38.432432
| 0.825383
| 0.017581
| 0
| 0
| 0
| 0
| 0.058085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092593
| false
| 0
| 0.148148
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a739f43b0588186a90f5d8f8245209820d58a6a6
| 1,683
|
py
|
Python
|
setup.py
|
eltonn/toki
|
22efd9ce84414380904e3a5ac84e84de9bdb5bce
|
[
"Apache-2.0"
] | 1
|
2020-11-30T16:52:50.000Z
|
2020-11-30T16:52:50.000Z
|
setup.py
|
eltonn/toki
|
22efd9ce84414380904e3a5ac84e84de9bdb5bce
|
[
"Apache-2.0"
] | 7
|
2020-05-29T23:22:21.000Z
|
2020-11-30T20:49:37.000Z
|
setup.py
|
eltonn/toki
|
22efd9ce84414380904e3a5ac84e84de9bdb5bce
|
[
"Apache-2.0"
] | 1
|
2020-04-29T21:59:25.000Z
|
2020-04-29T21:59:25.000Z
|
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('docs/release-notes.md') as history_file:
history = history_file.read()
requirements = []
dev_requirements = [
# lint and tools
'black',
'flake8',
'isort',
'mypy',
'pre-commit',
'seed-isort-config',
# publishing
're-ver',
'twine',
# docs
'jupyter-book',
'Sphinx>=2.0,<3',
# tests
'responses',
# devops
'docker-compose',
]
extra_requires = {'dev': requirements + dev_requirements}
setup(
author="Ivan Ogasawara",
author_email='ivan.ogasawara@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Toki: Database Expression API",
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='toki',
name='toki',
packages=find_packages(include=['toki']),
test_suite='tests',
extras_require=extra_requires,
url='https://github.com/toki-project/toki',
version='0.0.1',
zip_safe=False,
)
| 26.296875
| 61
| 0.616756
| 185
| 1,683
| 5.513514
| 0.556757
| 0.130392
| 0.171569
| 0.127451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017028
| 0.232323
| 1,683
| 63
| 62
| 26.714286
| 0.772446
| 0.036839
| 0
| 0
| 0
| 0
| 0.430788
| 0.027933
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019608
| 0
| 0.019608
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a73aed88b329c068d8782d3c38cdfcf8ff4be7a3
| 3,109
|
py
|
Python
|
dq0/sdk/estimators/data_handler/csv.py
|
gradientzero/dq0-sdk
|
90856dd5ac56216971ffe33004447fd037a21660
|
[
"0BSD"
] | 2
|
2020-09-16T09:28:00.000Z
|
2021-03-18T21:26:29.000Z
|
dq0/sdk/estimators/data_handler/csv.py
|
gradientzero/dq0-sdk
|
90856dd5ac56216971ffe33004447fd037a21660
|
[
"0BSD"
] | 22
|
2020-04-15T10:19:33.000Z
|
2022-03-12T00:20:57.000Z
|
dq0/sdk/estimators/data_handler/csv.py
|
gradientzero/dq0-sdk
|
90856dd5ac56216971ffe33004447fd037a21660
|
[
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Base data handler.
Copyright 2021, Gradient Zero
All rights reserved
"""
import logging
import dq0.sdk
from dq0.sdk.estimators.data_handler.base import BasicDataHandler
import pandas as pd
from sklearn.model_selection import train_test_split
logger = logging.getLogger(__name__)
class CSVDataHandler(BasicDataHandler):
"""Basic CSV Data Handler for all estimators"""
def __init__(self, pipeline_steps=None, pipeline_config_path=None, transformers_root_dir='.', log_key_string='.'):
super().__init__(pipeline_steps=pipeline_steps, pipeline_config_path=pipeline_config_path, transformers_root_dir=transformers_root_dir,
log_key_string=log_key_string)
self.log_key_string = log_key_string
def setup_data(self, data_source, train_size=0.66, **kwargs):
""" Setup data from CSV file. Using the CSV data source.
"""
# Check if the data source is of expected type
if not isinstance(data_source, dq0.sdk.data.text.csv.CSV):
raise ValueError("data_source attached to estimator and handled by the CSV data handler is not of Type: dq0.sdk.data.text.csv.CSV but: {}".format(type(data_source))) # noqa
if not hasattr(data_source, 'feature_cols') and not hasattr(data_source, 'target_cols'):
raise ValueError("CSV data source has not attribute feature_cols or target_cols. Please set this values on init or in the metadata")
self.data = super().setup_data(data_source=data_source, **kwargs)
# Check type of data, must be pandas.DataFrame
if not isinstance(self.data, pd.DataFrame):
raise ValueError("Data loaded is not of type pandas.DataFrame, but: {}".format(type(self.data)))
# run pipeline
if self.pipeline is not None:
self.data = self.pipeline.fit_transform(self.data)
X = self._get_X(self.data, data_source.feature_cols)
y = self._get_y(self.data, data_source.target_cols)
X_train, X_test, y_train, y_test = self._train_test_split(X, y, train_size=train_size)
return X_train, X_test, y_train, y_test
def get_input_dim(self, X):
if not len(X.shape) == 2:
raise ValueError("Feature Vector X is not 2-dim. The CSVDataHandler can only handle 2-dim DFs")
return X.shape[-1]
def get_output_dim(self, y):
return len(y.unique())
def _get_X(self, data, feature_cols):
"""Get X features vectors assuming data is a Pandas DataFrame"""
return data[feature_cols]
def _get_y(self, data, target_cols):
"""Get y target vector assuming data is a Pandas DataFrame"""
if len(target_cols) == 1:
return data[target_cols[-1]]
else:
raise ValueError("CSVDataHandler currently only supports one target_col (Check Metadata!); len(target_cols): {}".format(len(target_cols)))
def _train_test_split(self, X, y, train_size=0.66):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_size)
return X_train, X_test, y_train, y_test
| 42.013514
| 184
| 0.690254
| 458
| 3,109
| 4.445415
| 0.279476
| 0.063851
| 0.02947
| 0.021611
| 0.179764
| 0.179764
| 0.082515
| 0.082515
| 0.060904
| 0.060904
| 0
| 0.008568
| 0.211644
| 3,109
| 73
| 185
| 42.589041
| 0.822113
| 0.13477
| 0
| 0.04878
| 0
| 0.02439
| 0.179082
| 0.009406
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170732
| false
| 0
| 0.121951
| 0.02439
| 0.463415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
595209a149b488a190b55a28e227e0653341e30a
| 407
|
py
|
Python
|
core/utils/template_updater.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | 3
|
2021-12-22T07:02:24.000Z
|
2022-01-27T20:19:11.000Z
|
core/utils/template_updater.py
|
vulnman/vulnman
|
d48ee022bc0e4368060a990a527b1c7a5e437504
|
[
"MIT"
] | 44
|
2021-12-14T07:24:29.000Z
|
2022-03-23T07:01:16.000Z
|
core/utils/template_updater.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | 1
|
2022-01-21T16:29:56.000Z
|
2022-01-21T16:29:56.000Z
|
import os
from django.conf import settings
from git import Repo
def update_vulnerability_templates():
template_dir = os.path.join(
settings.BASE_DIR, "resources/vuln_templates")
if os.path.isdir(template_dir):
repo = Repo(template_dir)
origin = repo.remotes.origin
origin.pull()
else:
Repo.clone_from(settings.VULNERABILITY_TEMPLATE_REPO, template_dir)
| 27.133333
| 75
| 0.712531
| 52
| 407
| 5.365385
| 0.5
| 0.157706
| 0.107527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203931
| 407
| 14
| 76
| 29.071429
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0.058968
| 0.058968
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5952c5d9520173eb54626c3cf8e791dbdc5d7f03
| 656
|
py
|
Python
|
pages/basket_page.py
|
Espad/stepik_autotests_final_tasks
|
2d9e3408766cc00387a8ddd656006556cce567b4
|
[
"MIT"
] | null | null | null |
pages/basket_page.py
|
Espad/stepik_autotests_final_tasks
|
2d9e3408766cc00387a8ddd656006556cce567b4
|
[
"MIT"
] | null | null | null |
pages/basket_page.py
|
Espad/stepik_autotests_final_tasks
|
2d9e3408766cc00387a8ddd656006556cce567b4
|
[
"MIT"
] | null | null | null |
from .base_page import BasePage
from .locators import BasketPageLocators
class BasketPage(BasePage):
def should_be_empty_basket_message(self):
assert self.is_element_present(*BasketPageLocators.BASKET_EMPTY_MESSAGE), \
"Empty basket message element not found on page"
assert self.browser.find_element(*BasketPageLocators.BASKET_EMPTY_MESSAGE).text == "Your basket is empty. Continue shopping", \
"Invalid Basket empty message"
def should_be_empty_basket(self):
assert self.is_not_element_present(*BasketPageLocators.BASKET_ITEM_EXIST_SELECTOR), \
"Busket is not empty, but should be"
| 41
| 135
| 0.745427
| 80
| 656
| 5.85
| 0.4375
| 0.051282
| 0.115385
| 0.068376
| 0.094017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184451
| 656
| 15
| 136
| 43.733333
| 0.874766
| 0
| 0
| 0
| 0
| 0
| 0.224085
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.181818
| false
| 0
| 0.181818
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
595945cb1c25f789695dd2fae8ba200ee3b77c80
| 1,454
|
py
|
Python
|
trypython/extlib/aiohttp/aiohttp01.py
|
devlights/try-python-extlib
|
9bfb649d3f5b249b67991a30865201be794e29a9
|
[
"MIT"
] | null | null | null |
trypython/extlib/aiohttp/aiohttp01.py
|
devlights/try-python-extlib
|
9bfb649d3f5b249b67991a30865201be794e29a9
|
[
"MIT"
] | null | null | null |
trypython/extlib/aiohttp/aiohttp01.py
|
devlights/try-python-extlib
|
9bfb649d3f5b249b67991a30865201be794e29a9
|
[
"MIT"
] | null | null | null |
"""
aiohttp モジュールのサンプルです
基本的な使い方について
REFERENCES:: http://bit.ly/2O2lmeU
http://bit.ly/2O08oy3
"""
import asyncio
from asyncio import Future
from typing import List, Dict
import aiohttp
from trypython.common.commoncls import SampleBase
async def fetch_async(index: int, url: str) -> Dict:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
html = await response.read()
return {
'index': index,
'resp': response,
'length': len(html),
'url': url
}
def build_futures() -> List[Future]:
urls = [
'https://www.google.co.jp/',
'https://stackoverflow.com/',
'https://www.yahoo.co.jp/',
'https://devlights.hatenablog.com/',
'https://docs.python.org/3.7/index.html',
'https://docs.python.org/ja/3/'
]
futures = [asyncio.ensure_future(fetch_async(i, url)) for i, url in enumerate(urls, start=1)]
return futures
class Sample(SampleBase):
def exec(self):
# 結果を元の順序で取得したい場合は asyncio.gather を使う
future = asyncio.wait(build_futures(), return_when=asyncio.ALL_COMPLETED)
done, pending = asyncio.get_event_loop().run_until_complete(future)
for r in done:
tr = r.result()
print(f'{tr["index"]} {tr["url"]} {tr["length"]} bytes')
def go():
obj = Sample()
obj.exec()
| 25.068966
| 97
| 0.592847
| 174
| 1,454
| 4.890805
| 0.528736
| 0.016451
| 0.021152
| 0.042303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009381
| 0.26685
| 1,454
| 57
| 98
| 25.508772
| 0.788931
| 0.096974
| 0
| 0
| 0
| 0
| 0.183142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.138889
| 0
| 0.305556
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
595abb6fdb13a008e2f80cf057085a05a97b14a8
| 1,860
|
py
|
Python
|
models.py
|
camerongray1515/HackDee-2015
|
6459c5bd3ad895e0a216ff61342eb73877dc9ee5
|
[
"MIT"
] | null | null | null |
models.py
|
camerongray1515/HackDee-2015
|
6459c5bd3ad895e0a216ff61342eb73877dc9ee5
|
[
"MIT"
] | 1
|
2015-04-04T20:55:52.000Z
|
2015-12-17T23:35:08.000Z
|
models.py
|
camerongray1515/HackDee-2015
|
6459c5bd3ad895e0a216ff61342eb73877dc9ee5
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, String, Boolean, ForeignKey, Integer
from sqlalchemy.orm import relationship
from database import Base
from string import ascii_letters
from random import choice
class Playlist(Base):
__tablename__ = "playlists"
id = Column(String, primary_key=True)
name = Column(String)
def __init__(self, name):
generate = True
while generate:
random_string = "".join(choice(ascii_letters) for i in range(5))
p = Playlist.query.get(random_string)
# Only set value and exit loop if the id is not already in use
if p == None:
generate = False
self.id = random_string
self.name = name
@staticmethod
def get_videos(playlist_id):
videos = Video.query.filter(Video.playlist_id==playlist_id).order_by("rank desc")
playlist = []
for video in videos:
playlist_entry = {
"playlist_id": playlist_id,
"slug": video.slug,
"thumbnail_url": video.thumbnail_url,
"title": video.title,
"rank": video.rank
}
playlist.append(playlist_entry)
return playlist
def __repr__():
return "<Playlist ID:{0}, Name:{1}>".format(self.id, self.name)
class Video(Base):
__tablename__ = "video"
id = Column(Integer, primary_key=True)
playlist_id = Column(String, ForeignKey(Playlist.id))
playlist = relationship("Playlist")
slug = Column(String)
thumbnail_url = Column(String)
title = Column(String)
rank = Column(Integer)
def __init__(self, playlist_id, slug, thumbnail_url, title):
self.playlist_id = playlist_id
self.slug = slug
self.thumbnail_url = thumbnail_url
self.title = title
self.rank = 0
| 29.0625
| 89
| 0.614516
| 216
| 1,860
| 5.078704
| 0.333333
| 0.100273
| 0.065634
| 0.054695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003044
| 0.293548
| 1,860
| 63
| 90
| 29.52381
| 0.831811
| 0.032258
| 0
| 0
| 0
| 0
| 0.052836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.102041
| 0.020408
| 0.489796
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
595ecf0b3419dbc932591ff7beb5487e3db35f47
| 932
|
py
|
Python
|
script/rmLinebyIndFile.py
|
ASLeonard/danbing-tk
|
15540124ff408777d0665ace73698b0c2847d1cc
|
[
"BSD-3-Clause"
] | 17
|
2020-08-16T14:28:11.000Z
|
2022-03-23T23:30:47.000Z
|
script/rmLinebyIndFile.py
|
ASLeonard/danbing-tk
|
15540124ff408777d0665ace73698b0c2847d1cc
|
[
"BSD-3-Clause"
] | 7
|
2021-01-25T15:26:18.000Z
|
2022-03-31T14:30:46.000Z
|
script/rmLinebyIndFile.py
|
ASLeonard/danbing-tk
|
15540124ff408777d0665ace73698b0c2847d1cc
|
[
"BSD-3-Clause"
] | 2
|
2020-11-01T20:41:38.000Z
|
2021-05-29T03:22:24.000Z
|
#!/usr/bin/env python3
import sys
import numpy as np
if len(sys.argv) == 1 or sys.argv[1] == "-h" or sys.argv[1] == "--help":
print(
"""
Remove line indices (0-based) specified in 'index.txt'
usage: program [-k] index.txt inFile
-k Keep line indices in 'index.txt' instead of removing them.
""")
sys.exit()
rm = True
idxf = ""
infile = ""
for i, v in enumerate(sys.argv):
if i == 0:
continue
elif v == "-k":
rm = False
elif not idxf:
idxf = v
elif not infile:
infile = v
else:
assert False, f"too many arguments {v}"
if not idxf:
assert False, "index.txt not specified"
if not infile:
assert False, "inFile not specified"
ids = set(np.loadtxt(idxf, dtype=int, ndmin=1).tolist())
with open(infile) as f:
ind = 0
for line in f:
if (ind not in ids) == rm:
print(line, end='')
ind += 1
| 22.731707
| 78
| 0.55794
| 141
| 932
| 3.687943
| 0.453901
| 0.053846
| 0.046154
| 0.038462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01391
| 0.305794
| 932
| 40
| 79
| 23.3
| 0.789799
| 0.022532
| 0
| 0
| 0
| 0
| 0.104167
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5960088035b5df4aefdc1abf2b6dd9894a0c53be
| 5,978
|
py
|
Python
|
estimators.py
|
RakitinDen/pytorch-recursive-gumbel-max-trick
|
44f9854020e727946a074a6e53b20dd593f96cc1
|
[
"Apache-2.0"
] | 20
|
2021-12-03T13:20:17.000Z
|
2022-03-20T18:58:06.000Z
|
estimators.py
|
RakitinDen/pytorch-recursive-gumbel-max-trick
|
44f9854020e727946a074a6e53b20dd593f96cc1
|
[
"Apache-2.0"
] | null | null | null |
estimators.py
|
RakitinDen/pytorch-recursive-gumbel-max-trick
|
44f9854020e727946a074a6e53b20dd593f96cc1
|
[
"Apache-2.0"
] | null | null | null |
# Estimators are partially based on the "estimators.py" from the following repositories:
# https://github.com/agadetsky/pytorch-pl-variance-reduction
# https://github.com/sdrobert/pydrobert-pytorch
import torch
def uniform_to_exp(logits, uniform=None, enable_grad=False):
'''
Converts a tensor of independent uniform samples into a tensor of independent exponential samples
Tensor 'logits' contains log-means of the exponential distributions
Parameters of the exponentials can be represented as
lambda = exp(-logit), since expected value is equal to 1/lambda
'''
if uniform is not None:
assert uniform.size() == logits.size()
else:
uniform = torch.distributions.utils.clamp_probs(torch.rand_like(logits))
exp = torch.exp(logits + torch.log(-torch.log(uniform)))
if enable_grad:
exp.requires_grad_(True)
return exp
def reattach_exp_to_new_logits(logits, exp):
'''
Creates a new tensor of exponential variables that depends on logits in the same way
as if it was obtained by transforming uniform samples via 'uniform_to_exp'
Used in 'relax' to obtain gradient for the detached version of the logits
'''
exp = torch.exp(torch.log(exp.detach()) + logits - logits.detach())
return exp
def E_reinforce(loss_value, logits, exp, plus_samples=1, mask_unused_values=None, **kwargs):
'''
Returns the REINFORCE [williams1992] gradient estimate with respect to the exponential score
grad = loss(X) * (d / d logits) log p(E ; logits)
If plus_samples > 1, the estimate is E-REINFORCE+ / E-REINFORCE with LOO baseline [kool2019buy, richter2020vargrad]
'''
batch_size = logits.shape[0] // plus_samples
loss_value = loss_value.detach()
exp = exp.detach()
log_prob = -logits - torch.exp(torch.log(exp) - logits)
if mask_unused_values is not None:
log_prob = mask_unused_values(log_prob, **kwargs)
dims_except_batch = tuple(-i for i in range(1, logits.ndimension()))
log_prob = log_prob.sum(dim=dims_except_batch)
score = torch.autograd.grad([log_prob], [logits], grad_outputs=torch.ones_like(log_prob))[0]
if plus_samples > 1:
score_shape = (batch_size, plus_samples) + logits.shape[1:]
score = score.view(score_shape)
loss_value = loss_value.view(batch_size, plus_samples)
loss_value = loss_value - loss_value.mean(dim=-1)[:, None]
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = (loss_value * score).sum(dim=1) / (plus_samples - 1)
else:
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = loss_value * score
return grad
def T_reinforce(loss_value, struct_var, logits, f_log_prob, plus_samples=1, **kwargs):
'''
Returns the REINFORCE [williams1992] gradient estimate with respect to the score function of the execution trace
grad = loss(X) * (d / d logits) log p(T ; logits)
If plus_samples > 1, the estimate is T-REINFORCE+ / T-REINFORCE with LOO baseline [kool2019buy, richter2020vargrad]
'''
batch_size = logits.shape[0] // plus_samples
loss_value = loss_value.detach()
struct_var = struct_var.detach()
log_prob = f_log_prob(struct_var, logits, **kwargs)
score = torch.autograd.grad([log_prob], [logits], grad_outputs=torch.ones_like(log_prob))[0]
if plus_samples > 1:
score_shape = (batch_size, plus_samples) + logits.shape[1:]
score = score.view(score_shape)
loss_value = loss_value.view(batch_size, plus_samples)
loss_value = loss_value - loss_value.mean(dim=-1)[:, None]
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = (loss_value * score).sum(dim=1) / (plus_samples - 1)
else:
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = loss_value * score
return grad
def relax(loss_value, struct_var, logits, exp, critic, f_log_prob, f_cond, uniform=None, **kwargs):
'''
Returns the RELAX [grathwohl2017backpropagation] gradient estimate
grad = (loss(X(T)) - c(e_2)) * (d / d logits) log p(T ; logits) - (d / d logits) c(e_2) + (d / d logits) c(e_1)
e_1 ~ p(E ; logits) - exponential sample
T = T(e_1) - execution trace of the algorithm
X = X(T) - structured variable, obtained as the output of the algorithm
e_2 ~ p(E | T ; logits) - conditional exponential sample
c(.) - critic (typically, a neural network)
e_1 and e_2 are sampled using the reparameterization trick
(d / d logits) c(e_1) and (d / d logits) c(e_2) are the reparameterization gradients
In code, exp := e_1, cond_exp := e_2
'''
loss_value = loss_value.detach()
struct_var = struct_var.detach()
logits = logits.detach().requires_grad_(True)
exp = reattach_exp_to_new_logits(logits, exp)
cond_exp = f_cond(struct_var, logits, uniform, **kwargs)
baseline_exp = critic(exp)
baseline_cond = critic(cond_exp).squeeze()
diff = loss_value - baseline_cond
log_prob = f_log_prob(struct_var, logits, **kwargs)
score, = torch.autograd.grad(
[log_prob],
[logits],
grad_outputs = torch.ones_like(log_prob)
)
d_baseline_exp, = torch.autograd.grad(
[baseline_exp],
[logits],
create_graph=True,
retain_graph=True,
grad_outputs=torch.ones_like(baseline_exp)
)
d_baseline_cond, = torch.autograd.grad(
[baseline_cond],
[logits],
create_graph=True,
retain_graph=True,
grad_outputs=torch.ones_like(baseline_cond)
)
for i in range(logits.ndimension() - 1):
diff = diff.unsqueeze(-1)
grad = diff * score + d_baseline_exp - d_baseline_cond
assert grad.size() == logits.size()
return grad
| 36.674847
| 119
| 0.666109
| 837
| 5,978
| 4.561529
| 0.197133
| 0.075432
| 0.044264
| 0.061289
| 0.502095
| 0.469094
| 0.456784
| 0.427973
| 0.399686
| 0.399686
| 0
| 0.015125
| 0.225828
| 5,978
| 162
| 120
| 36.901235
| 0.809853
| 0.321512
| 0
| 0.550562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022472
| 1
| 0.05618
| false
| 0
| 0.011236
| 0
| 0.123596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
596098c174bcd92a072f4a63dcf655eaaf7c83e8
| 1,332
|
py
|
Python
|
squareroot.py
|
martinaobrien/pands-problem-sets
|
5928f9ed2a743f46a9615f41192fd6dfb810b73c
|
[
"CNRI-Python"
] | null | null | null |
squareroot.py
|
martinaobrien/pands-problem-sets
|
5928f9ed2a743f46a9615f41192fd6dfb810b73c
|
[
"CNRI-Python"
] | null | null | null |
squareroot.py
|
martinaobrien/pands-problem-sets
|
5928f9ed2a743f46a9615f41192fd6dfb810b73c
|
[
"CNRI-Python"
] | null | null | null |
#Martina O'Brien 10/3/2019
#Problem Set 7 - squareroots
#Programming Code to determining the squareroots of positive floating point numbers
## Reference for try and expect https://www.w3schools.com/python/python_try_except.asp
while True: # this loop will run to allow the user to input a value again if they do not enter a positive integer
try:
num = input("Please enter a positive number: ") # Here the user will enter positive number.
number = float(num) # using a float(num) to allow numbers with decimal points
except ValueError:
print('Sorry this is not a number. Can you please try again and enter a positive number.')
# If the value is entered is correct then the value will move to the next statement.
continue #continue to the next interation of the loop
if number <= 0:
print('Please enter a number greater than zero')
# to ensure that the user inputs a positive number
break
# break from the while loop to the next variable
number_sqrt = (number ** 0.5)
# Using ** 0.5 gives the squareroot of the num inputted
# Using %0.1f returns the answers to one decimal point
print("The square root of %0.1f is approx %0.1f" %(number, number_sqrt))
# print the result of the variable to one decimal place.
| 45.931034
| 114
| 0.693694
| 209
| 1,332
| 4.401914
| 0.464115
| 0.026087
| 0.045652
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01992
| 0.246246
| 1,332
| 28
| 115
| 47.571429
| 0.896414
| 0.600601
| 0
| 0
| 0
| 0
| 0.393443
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5961e885fedcd68b3653416c363d4e461726bdc8
| 5,578
|
py
|
Python
|
pywbemtools/pywbemlistener/_context_obj.py
|
pywbem/pywbemtools
|
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
|
[
"Apache-2.0"
] | 8
|
2017-04-01T13:55:00.000Z
|
2022-03-15T18:28:47.000Z
|
pywbemtools/pywbemlistener/_context_obj.py
|
pywbem/pywbemtools
|
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
|
[
"Apache-2.0"
] | 918
|
2017-03-03T14:29:03.000Z
|
2022-03-29T15:32:16.000Z
|
pywbemtools/pywbemlistener/_context_obj.py
|
pywbem/pywbemtools
|
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
|
[
"Apache-2.0"
] | 2
|
2020-01-17T15:56:46.000Z
|
2020-02-12T18:49:30.000Z
|
# (C) Copyright 2021 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Click context object for the pybemlistener command.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import click_spinner
class ContextObj(object):
# pylint: disable=useless-object-inheritance, too-many-instance-attributes
"""
Click context object for the pybemlistener command.
This object is attached to the Click context, and is used as follows:
- Contains all general options for use by command functions.
- Serves as the central object for executing command functions.
- Has support for starting and stopping the Click spinner.
"""
spinner_envvar = 'PYWBEMLISTENER_SPINNER'
def __init__(self, output_format, logdir, verbose, pdb, warn):
"""
Parameters:
output_format (:term:`string` or `None`):
Value of --output-format general option, or `None` if not specified.
logdir (:term:`string` or `None`):
Value of --logdir general option, or `None` if not specified.
verbose (int):
Verbosity. See VERBOSE_* constants for a definition.
pdb (:class:`py:bool`):
Indicates whether the --pdb general option was specified.
warn (:class:`py:bool`):
Indicates whether the --warn general option was specified.
"""
self._output_format = output_format
self._logdir = logdir
self._verbose = verbose
self._pdb = pdb
self._warn = warn
self._spinner_enabled = None # Deferred init in getter
self._spinner_obj = click_spinner.Spinner()
def __repr__(self):
return 'ContextObj(at {:08x}, output_format={s.output_format}, ' \
'logdir={s.logdir}, verbose={s.verbose}, pdb={s.pdb}, ' \
'warn={s.warn}, spinner_enabled={s.spinner_enabled}' \
.format(id(self), s=self)
@property
def output_format(self):
"""
:term:`string`: String defining the output format requested. This may
be `None` meaning that the default format should be used or may be
one of the values in the TABLE_FORMATS variable.
"""
return self._output_format
@property
def logdir(self):
"""
:term:`string`: Path name of log directory for the 'run' command,
or `None` for no logging.
"""
return self._logdir
@property
def verbose(self):
"""
int: Verbosity. See VERBOSE_* constants for a definition.
"""
return self._verbose
@property
def pdb(self):
"""
bool: Indicates whether to break in the debugger.
"""
return self._pdb
@property
def warn(self):
"""
bool: Indicates whether to enable Python warnings.
"""
return self._warn
@property
def spinner_enabled(self):
"""
:class:`py:bool`: Indicates and controls whether the spinner is enabled.
If the spinner is enabled, subcommands will display a spinning wheel
while waiting for completion.
This attribute can be modified.
The initial state of the spinner is enabled, but it can be disabled by
setting the {0} environment variable to 'false', '0', or the empty
value.
""".format(self.spinner_envvar)
# Deferred initialization
if self._spinner_enabled is None:
value = os.environ.get(self.spinner_envvar, None)
if value is None:
# Default if not set
self._spinner_enabled = True
elif value == '0' or value == '' or value.lower() == 'false':
self._spinner_enabled = False
else:
self._spinner_enabled = True
return self._spinner_enabled
@spinner_enabled.setter
def spinner_enabled(self, enabled):
"""Setter method; for a description see the getter method."""
self._spinner_enabled = enabled
def spinner_start(self):
"""
Start the spinner, if the spinner is enabled.
"""
if self.spinner_enabled:
self._spinner_obj.start()
def spinner_stop(self):
"""
Stop the spinner, if the spinner is enabled.
"""
if self.spinner_enabled:
self._spinner_obj.stop()
def execute_cmd(self, cmd):
"""
Call the command function for a command, after enabling the spinner
(except when in debug mode) and after entering debug mode if desired.
"""
if not self.pdb:
self.spinner_start()
try:
if self.pdb:
import pdb # pylint: disable=import-outside-toplevel
pdb.set_trace() # pylint: disable=forgotten-debug-statement
cmd() # The command function for the pywbemlistener command
finally:
if not self.pdb:
self.spinner_stop()
| 31.693182
| 80
| 0.620115
| 674
| 5,578
| 5.020772
| 0.32641
| 0.052009
| 0.047872
| 0.028073
| 0.182624
| 0.155437
| 0.11052
| 0.065012
| 0.038416
| 0.038416
| 0
| 0.003307
| 0.295267
| 5,578
| 175
| 81
| 31.874286
| 0.857543
| 0.435281
| 0
| 0.184615
| 0
| 0
| 0.079352
| 0.037969
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184615
| false
| 0
| 0.061538
| 0.015385
| 0.384615
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5963d226f34e95078375678dfe6099b78982408c
| 573
|
py
|
Python
|
userbot/modules/trd.py
|
LUCKYRAJPUTOP/VibeXUserbot
|
257c86ff1775592688815435d8c5ce91e1dd299e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/trd.py
|
LUCKYRAJPUTOP/VibeXUserbot
|
257c86ff1775592688815435d8c5ce91e1dd299e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/trd.py
|
LUCKYRAJPUTOP/VibeXUserbot
|
257c86ff1775592688815435d8c5ce91e1dd299e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
import asyncio
from asyncio import sleep
from random import choice
from userbot.events import register
T_R_D = [
"@PrajjuS",
"@Vin02vin",
"@Iamsaisharan",
"@venomsamurai",
]
@register(outgoing=True, pattern="^.trd$")
async def truthrdare(trd):
"""Truth or Dare"""
await trd.edit("`Choosing Name...`")
await sleep(1.5)
await trd.edit("`..............`")
await sleep(1.5)
msg = await trd.edit("`Name is.....`")
await sleep(3)
await trd.delete()
await msg.reply("**∆ Truth or Dare ∆**\n\n__Name:__ " + choice(T_R_D))
| 22.92
| 74
| 0.602094
| 77
| 573
| 4.402597
| 0.506494
| 0.094395
| 0.106195
| 0.070796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015317
| 0.202443
| 573
| 24
| 75
| 23.875
| 0.722101
| 0
| 0
| 0.1
| 0
| 0
| 0.238267
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
596512b76ad497342148f69daf0ea980f36bbf49
| 2,384
|
py
|
Python
|
collectors/nct/collector.py
|
almeidaah/collectors
|
f03096855b8d702969d22af0b20a4d6a0d820bd0
|
[
"MIT"
] | 17
|
2016-06-28T21:20:21.000Z
|
2022-03-02T16:31:25.000Z
|
collectors/nct/collector.py
|
almeidaah/collectors
|
f03096855b8d702969d22af0b20a4d6a0d820bd0
|
[
"MIT"
] | 41
|
2016-04-04T10:36:45.000Z
|
2017-04-24T10:04:57.000Z
|
collectors/nct/collector.py
|
kenferrara/collectors
|
e6c1f45df3a1ffd5d60dada1816484812eb51417
|
[
"MIT"
] | 25
|
2016-05-18T09:27:42.000Z
|
2021-03-21T14:44:31.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import zipfile
import logging
import requests
import tempfile
import contextlib
from .parser import parse_record
from .. import base
logger = logging.getLogger(__name__)
# Module API
def collect(conf, conn, nct_xml_dump_url):
'''
Downloads and parse data from NCT's XML dump. Considering you want the data
from 2017-01-01 until 2017-02-01, the XML dump can be downloaded from:
https://clinicaltrials.gov/search?resultsxml=True&rcv_s=01/01/2017&rcv_e=01/02/2017
'''
base.helpers.start(conf, 'nct', {'url': nct_xml_dump_url})
with tempfile.TemporaryFile() as fp:
_download_to_file(nct_xml_dump_url, fp)
file_count = 0
for identifier, record_fp in _iter_nct_dump_files(fp):
base.config.SENTRY.extra_context({
'url': nct_xml_dump_url,
'identifier': identifier,
})
rec = parse_record(record_fp)
query = {'nct_id': rec['nct_id']}
if rec.table in conn['warehouse'].tables:
existing = conn['warehouse'][rec.table].find_one(**query)
if existing:
rec['nct_id'] = existing['nct_id']
rec.write(conf, conn)
file_count += 1
logger.info('Collected %s NCT records', file_count)
base.helpers.stop(conf, 'nct', {
'url': nct_xml_dump_url,
'collected': file_count,
})
def _download_to_file(url, fp):
CHUNK_SIZE = 1024 * 1024 # 1 MB
bytes_to_mb = lambda value: value / 1048576.0
with contextlib.closing(requests.get(url, stream=True)) as response:
completed_bytes = 0
chunk_count = 0
for block in response.iter_content(CHUNK_SIZE):
fp.write(block)
completed_bytes += len(block)
chunk_count += 1
if chunk_count % 1000 == 0:
logger.debug('Downloaded %.2f MB', bytes_to_mb(completed_bytes))
fp.seek(0)
def _iter_nct_dump_files(fp):
with zipfile.ZipFile(fp) as archive:
for filename in archive.namelist():
identifier = filename.split('.')[0]
with archive.open(filename, 'rU') as rec_file:
yield identifier, rec_file
| 32.657534
| 87
| 0.633389
| 314
| 2,384
| 4.544586
| 0.382166
| 0.034338
| 0.035039
| 0.04555
| 0.068676
| 0.032235
| 0.032235
| 0
| 0
| 0
| 0
| 0.035897
| 0.263842
| 2,384
| 72
| 88
| 33.111111
| 0.777208
| 0.113255
| 0
| 0.074074
| 0
| 0
| 0.05795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.203704
| 0
| 0.259259
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5969ba0b61715dcc3c0755544d810b16a9ba7f4b
| 6,116
|
py
|
Python
|
src/contexts/context_local_structure.py
|
aindrila-ghosh/SmartReduce
|
b2b28055bc0b269155270c1f8206445e405e8d9b
|
[
"MIT"
] | null | null | null |
src/contexts/context_local_structure.py
|
aindrila-ghosh/SmartReduce
|
b2b28055bc0b269155270c1f8206445e405e8d9b
|
[
"MIT"
] | null | null | null |
src/contexts/context_local_structure.py
|
aindrila-ghosh/SmartReduce
|
b2b28055bc0b269155270c1f8206445e405e8d9b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import Isomap
from scipy.spatial.distance import pdist
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score, LeaveOneOut
RANDOM_STATE = 42
def calculate_pairwise_distances(df_for_Box_Plot_features, points, distance='euclidean'):
"""
Computes Pairwise euclidean distances
Parameters
----------
df_for_Box_Plot_features : list
original features
points : nD array
embedding
distance: String
distance, default value is "euclidean"
Returns
----------
distance_original : nD array
euclidean distances in the original dataset
distance_embeddings : nD array
euclidean distances in the embedding
"""
distance_original = pdist(df_for_Box_Plot_features, metric=distance)
distance_embeddings = pdist(points, metric=distance)
return distance_original, distance_embeddings
def calculate_geodesic_distance(df_for_Box_Plot_features, points):
"""
Computes Pairwise geodesic distances
Parameters
----------
df_for_Box_Plot_features : list
original features
points : nD array
embedding
Returns
----------
geo_distance_original : nD array
geodesic distances in the original dataset
geo_distance_embeddings : nD array
geodesic distances in the embedding
"""
embedding = Isomap(n_components=2)
embedding.fit(df_for_Box_Plot_features)
unsquareform = lambda a: a[np.nonzero(np.triu(a, 1))] ## define a lambda to unsquare the distance matrix
geo_distance_original = unsquareform(embedding.dist_matrix_) ## get a condensed matrix of pairwise geodesic distance among points
embedding1 = Isomap(n_components=2)
embedding1.fit(points)
embedding1.dist_matrix_[embedding1.dist_matrix_ == 0] = -9999 ## turn all 0 distances to -9999
geo_distance_embeddings = unsquareform(embedding1.dist_matrix_) ## get a condensed matrix of pairwise geodesic distance among points
geo_distance_embeddings[geo_distance_embeddings == -9999] = 0 ## turn all -9999 distances back to 0
return geo_distance_original, geo_distance_embeddings
def generate_histograms(distance_original, distance_embeddings, no_of_bins):
"""
Generates histograms
Parameters
----------
distance_original : nD array
original distances
distance_embeddings : nD array
embedding distances
no_of_bins : integer
number of bins in the histogram
Returns
----------
bin_edges_original : list
bin edges
"""
countsOriginal, bin_edges_original = np.histogram(distance_original, bins = no_of_bins)
#print("Original Distance Binned Element Counts: ", countsOriginal)
countsEmbedding, bin_edges_embedding = np.histogram(distance_embeddings, bins = no_of_bins)
#print("Embedding Distance Binned Element Counts: ", countsEmbedding)
plt.figure()
plt.hist(distance_original, bins = no_of_bins)
plt.show()
plt.title("Pairwise distances in original data")
plt.hist(distance_embeddings, bins = no_of_bins)
plt.show()
plt.title("Pairwise distances in embeddings")
return bin_edges_original
def calculate_box_plot_details(distance_original, distance_embeddings, bin_edges_original):
"""
Computes the details of the Box-plots
"""
inds_original = np.digitize(distance_original, bins=bin_edges_original)
##print("number of bins = ", np.unique(inds_original))
for i in range(1,52):
globals()["array" + str(i)] = []
for j in range(0,len(inds_original)):
globals()["array" + str(inds_original[j])].append(distance_embeddings[j])
data_to_plot = [array1, array2, array3, array4, array5, array6, array7, array8, array9, array10,
array11, array12, array13, array14, array15, array16, array17, array18, array19, array20,
array21, array22, array23, array24, array25, array26, array27, array28, array29, array30,
array31, array32, array33, array34, array35, array36, array37, array38, array39, array40,
array41, array42, array43, array44, array45, array46, array47, array48, array49, array50, array51]
return data_to_plot
def generate_box_plots(data_to_plot):
"""
Generates Box-plots
"""
fig = plt.figure(1, figsize=(14, 10))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data_to_plot)
# Save the figure
fig.savefig('fig1.png', bbox_inches='tight')
## add patch_artist=True option to ax.boxplot()
## to get fill color
bp = ax.boxplot(data_to_plot, patch_artist=True)
## change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#7570b3', linewidth=2)
# change fill color
box.set( facecolor = '#1b9e77' )
## change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#7570b3', linewidth=2)
## change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#7570b3', linewidth=2)
## change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#b2df8a', linewidth=2)
## change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0.5)
def gen_error_1_NN(embedding, labels):
"""
Computes 1-NN generalization error
Parameters
----------
embedding : nD array
embedding
labels : list
original labels
Returns
----------
gen_error : float
generalization error
"""
model = KNeighborsClassifier(n_neighbors=1)
loo = LeaveOneOut()
loo.get_n_splits(embedding)
scores = cross_val_score(model , X = embedding , y = labels, cv = loo)
gen_error = (1 - np.mean(scores))
return gen_error
| 28.985782
| 137
| 0.680347
| 745
| 6,116
| 5.404027
| 0.311409
| 0.062593
| 0.011923
| 0.017884
| 0.23696
| 0.211624
| 0.121709
| 0.121709
| 0.121709
| 0.121709
| 0
| 0.036099
| 0.225474
| 6,116
| 210
| 138
| 29.12381
| 0.813806
| 0.332243
| 0
| 0.028986
| 0
| 0
| 0.045977
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.086957
| 0
| 0.246377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
596bbf6cce06d70f6a325d7a5bf75a3e2280c89c
| 1,110
|
py
|
Python
|
hparams.py
|
TanUkkii007/vqvae
|
6ac433490fd827174e5b925780d32bea14bfb097
|
[
"MIT"
] | 2
|
2019-03-30T16:49:11.000Z
|
2019-12-18T22:50:56.000Z
|
hparams.py
|
TanUkkii007/vqvae
|
6ac433490fd827174e5b925780d32bea14bfb097
|
[
"MIT"
] | null | null | null |
hparams.py
|
TanUkkii007/vqvae
|
6ac433490fd827174e5b925780d32bea14bfb097
|
[
"MIT"
] | 1
|
2020-01-06T12:37:00.000Z
|
2020-01-06T12:37:00.000Z
|
import tensorflow as tf
default_params = tf.contrib.training.HParams(
# Encoder
encoder_num_hiddens=128,
encoder_num_residual_hiddens=32,
encoder_num_residual_layers=2,
# Decoder
decoder_num_hiddens=128,
decoder_num_residual_hiddens=32,
decoder_num_residual_layers=2,
embedding_dim=64,
num_embeddings=512,
commitment_cost=0.25,
# VectorQuantizer
vector_quantizer="VectorQuantizer",
sampling_count=10,
# Training
batch_size=32,
learning_rate=3e-4,
save_summary_steps=100,
save_checkpoints_steps=500,
keep_checkpoint_max=200,
keep_checkpoint_every_n_hours=1,
log_step_count_steps=1,
shuffle_buffer_size=4,
# Validation
num_evaluation_steps=32,
eval_start_delay_secs=3600, # 1h: disable time based evaluation
eval_throttle_secs=86400, # 24h: disable time based evaluation
# Misc
logfile="log.txt",
)
def hparams_debug_string(hparams):
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values)]
return 'Hyperparameters:\n' + '\n'.join(hp)
| 23.617021
| 71
| 0.711712
| 145
| 1,110
| 5.117241
| 0.6
| 0.059299
| 0.03504
| 0.053908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058231
| 0.195496
| 1,110
| 46
| 72
| 24.130435
| 0.772676
| 0.112613
| 0
| 0
| 0
| 0
| 0.05123
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.033333
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
596db7d21a1d0b9384a4b3ba2a66f7f8e7dbfeba
| 1,080
|
py
|
Python
|
coroutines.py
|
PraveenMathew92/python-chatroom-asyncio
|
8b3048f17b76e649aff6bcbb7d084362cab32b58
|
[
"MIT"
] | null | null | null |
coroutines.py
|
PraveenMathew92/python-chatroom-asyncio
|
8b3048f17b76e649aff6bcbb7d084362cab32b58
|
[
"MIT"
] | null | null | null |
coroutines.py
|
PraveenMathew92/python-chatroom-asyncio
|
8b3048f17b76e649aff6bcbb7d084362cab32b58
|
[
"MIT"
] | null | null | null |
"""
File to demonstrate the coroutines api in python
"""
import asyncio
async def coroutine(caller):
print(f'entering ${caller}')
await asyncio.sleep(1)
print(f'exited {caller}')
"""
asyncio.run takes a coroutine and
A RuntimeWarning is generated if the coroutine is not awaited
Eg: coroutine('without_run')
"""
asyncio.run(coroutine('coroutine_call'))
"""
create_task creates a task which runs a coroutine in the event loop
"""
async def task_runner():
task = asyncio.create_task(coroutine('task_call'))
await task
asyncio.run(task_runner())
print("""
\t\t\tRunning with gather task
""")
async def gather_runner():
"""
asyncio.gather takes in a bunch of coroutines and runs them concurrently
"""
await asyncio.gather(
(coroutine('gather')),
(task_runner()))
asyncio.run(gather_runner())
"""
OUTPUT:
entering $coroutine_call
exited coroutine_call
entering $task_call
exited task_call
Running with gather task
entering $gather
entering $task_call
exited gather
exited task_call
"""
| 16.363636
| 76
| 0.694444
| 142
| 1,080
| 5.169014
| 0.366197
| 0.054496
| 0.038147
| 0.059946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001153
| 0.197222
| 1,080
| 66
| 77
| 16.363636
| 0.845444
| 0.044444
| 0
| 0
| 0
| 0
| 0.185039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5972ea55ea758af92089d41c09629539cc06ea40
| 12,048
|
py
|
Python
|
test/test_subprocess.py
|
python-useful-helpers/exec-helpers
|
3e0adfa7dded72ac1c9c93bd88db070f4c9050b6
|
[
"Apache-2.0"
] | 12
|
2018-03-23T23:37:40.000Z
|
2021-07-16T16:07:28.000Z
|
test/test_subprocess.py
|
penguinolog/exec-helpers
|
0784a4772f6e9937540b266fdbb1f5a060fd4b76
|
[
"Apache-2.0"
] | 111
|
2018-03-26T14:10:52.000Z
|
2021-07-12T07:12:45.000Z
|
test/test_subprocess.py
|
penguinolog/exec-helpers
|
0784a4772f6e9937540b266fdbb1f5a060fd4b76
|
[
"Apache-2.0"
] | 6
|
2018-03-26T13:37:21.000Z
|
2018-09-07T03:35:09.000Z
|
# Copyright 2018 - 2020 Alexey Stepanov aka penguinolog.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Standard Library
import logging
import random
import subprocess
import typing
from unittest import mock
# External Dependencies
import pytest
# Package Implementation
import exec_helpers
from exec_helpers import _subprocess_helpers
from exec_helpers import proc_enums
from exec_helpers.subprocess import SubprocessExecuteAsyncResult
pytestmark = pytest.mark.skip("Rewrite whole execute tests.")
# All test coroutines will be treated as marked.
command = "ls ~\nline 2\nline 3\nline с кирилицей"
command_log = f"Executing command:\n{command.rstrip()!r}\n"
print_stdin = 'read line; echo "$line"'
default_timeout = 60 * 60 # 1 hour
class FakeFileStream:
"""Mock-like object for stream emulation."""
def __init__(self, *args):
self.__src = list(args)
self.closed = False
def __iter__(self):
"""Normally we iter over source."""
for _ in range(len(self.__src)):
yield self.__src.pop(0)
def fileno(self):
return hash(tuple(self.__src))
def close(self):
"""We enforce close."""
self.closed = True
def read_stream(stream: FakeFileStream):
return tuple([line for line in stream])
configs = {
"positive_simple": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin=None, open_stdout=True, open_stderr=True
),
"with_stderr": dict(
ec=0,
stdout=(b" \n", b"2\n", b"3\n", b" \n"),
stderr=(b" \n", b"0\n", b"1\n", b" \n"),
stdin=None,
open_stdout=True,
open_stderr=True,
),
"negative": dict(
ec=1,
stdout=(b" \n", b"2\n", b"3\n", b" \n"),
stderr=(b" \n", b"0\n", b"1\n", b" \n"),
stdin=None,
open_stdout=True,
open_stderr=True,
),
"with_stdin_str": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin="stdin", open_stdout=True, open_stderr=True
),
"with_stdin_bytes": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin=b"stdin", open_stdout=True, open_stderr=True
),
"with_stdin_bytearray": dict(
ec=0,
stdout=(b" \n", b"2\n", b"3\n", b" \n"),
stderr=(),
stdin=bytearray(b"stdin"),
open_stdout=True,
open_stderr=True,
),
"no_stderr": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin=None, open_stdout=True, open_stderr=False
),
"no_stdout": dict(ec=0, stdout=(), stderr=(), stdin=None, open_stdout=False, open_stderr=False),
}
def pytest_generate_tests(metafunc):
"""Tests parametrization."""
if "run_parameters" in metafunc.fixturenames:
metafunc.parametrize(
"run_parameters",
[
"positive_simple",
"with_stderr",
"negative",
"with_stdin_str",
"with_stdin_bytes",
"with_stdin_bytearray",
"no_stderr",
"no_stdout",
],
indirect=True,
)
@pytest.fixture
def run_parameters(request):
"""Tests configuration apply."""
return configs[request.param]
@pytest.fixture
def exec_result(run_parameters):
return exec_helpers.ExecResult(
cmd=command,
stdin=run_parameters["stdin"],
stdout=tuple([line for line in run_parameters["stdout"]]) if run_parameters["stdout"] else None,
stderr=tuple([line for line in run_parameters["stderr"]]) if run_parameters["stderr"] else None,
exit_code=run_parameters["ec"],
)
@pytest.fixture
def execute(mocker, exec_result):
return mocker.patch("exec_helpers.subprocess.Subprocess.execute", name="execute", return_value=exec_result)
@pytest.fixture
def popen(mocker, run_parameters):
mocker.patch("psutil.Process")
def create_mock(
ec: typing.Union[exec_helpers.ExitCodes, int] = exec_helpers.ExitCodes.EX_OK,
stdout: typing.Optional[typing.Tuple] = None,
stderr: typing.Optional[typing.Tuple] = None,
**kwargs,
):
"""Parametrized code."""
proc = mock.Mock()
proc.configure_mock(pid=random.randint(1025, 65536))
if stdout is None:
proc.configure_mock(stdout=None)
else:
proc.attach_mock(FakeFileStream(*stdout), "stdout")
if stderr is None:
proc.configure_mock(stderr=None)
else:
proc.attach_mock(FakeFileStream(*stderr), "stderr")
proc.attach_mock(mock.Mock(return_value=int(ec)), "wait")
proc.configure_mock(returncode=int(ec))
run_shell = mocker.patch("subprocess.Popen", name="popen", return_value=proc)
return run_shell
return create_mock(**run_parameters)
def test_001_execute_async(popen, subprocess_logger, run_parameters) -> None:
"""Test low level API."""
runner = exec_helpers.Subprocess()
res = runner._execute_async(
command,
stdin=run_parameters["stdin"],
open_stdout=run_parameters["open_stdout"],
open_stderr=run_parameters["open_stderr"],
)
assert isinstance(res, SubprocessExecuteAsyncResult)
assert res.interface.wait() == run_parameters["ec"]
assert res.interface.returncode == run_parameters["ec"]
stdout = run_parameters["stdout"]
stderr = run_parameters["stderr"]
if stdout is not None:
assert read_stream(res.stdout) == stdout
else:
assert res.stdout is stdout
if stderr is not None:
assert read_stream(res.stderr) == stderr
else:
assert res.stderr is stderr
if run_parameters["stdin"] is None:
stdin = None
elif isinstance(run_parameters["stdin"], bytes):
stdin = run_parameters["stdin"]
elif isinstance(run_parameters["stdin"], str):
stdin = run_parameters["stdin"].encode(encoding="utf-8")
else:
stdin = bytes(run_parameters["stdin"])
if stdin:
assert res.stdin is None
popen.assert_called_once_with(
args=[command],
stdout=subprocess.PIPE if run_parameters["open_stdout"] else subprocess.DEVNULL,
stderr=subprocess.PIPE if run_parameters["open_stderr"] else subprocess.DEVNULL,
stdin=subprocess.PIPE,
shell=True,
cwd=run_parameters.get("cwd", None),
env=run_parameters.get("env", None),
universal_newlines=False,
**_subprocess_helpers.subprocess_kw,
)
if stdin is not None:
res.interface.stdin.write.assert_called_once_with(stdin)
res.interface.stdin.close.assert_called_once()
def test_002_execute(popen, subprocess_logger, exec_result, run_parameters) -> None:
"""Test API without checkers."""
runner = exec_helpers.Subprocess()
res = runner.execute(
command,
stdin=run_parameters["stdin"],
open_stdout=run_parameters["open_stdout"],
open_stderr=run_parameters["open_stderr"],
)
assert isinstance(res, exec_helpers.ExecResult)
assert res == exec_result
popen().wait.assert_called_once_with(timeout=default_timeout)
assert subprocess_logger.mock_calls[0] == mock.call.log(level=logging.DEBUG, msg=command_log)
def test_003_context_manager(mocker, popen, subprocess_logger, exec_result, run_parameters) -> None:
"""Test context manager for threads synchronization."""
lock_mock = mocker.patch("threading.RLock")
with exec_helpers.Subprocess() as runner:
res = runner.execute(command, stdin=run_parameters["stdin"])
lock_mock.acquire_assert_called_once()
lock_mock.release_assert_called_once()
assert isinstance(res, exec_helpers.ExecResult)
assert res == exec_result
def test_004_check_call(execute, exec_result, subprocess_logger) -> None:
"""Test exit code validator."""
runner = exec_helpers.Subprocess()
if exec_result.exit_code == exec_helpers.ExitCodes.EX_OK:
assert runner.check_call(command, stdin=exec_result.stdin) == exec_result
else:
with pytest.raises(exec_helpers.CalledProcessError) as e:
runner.check_call(command, stdin=exec_result.stdin)
exc: exec_helpers.CalledProcessError = e.value
assert exc.cmd == exec_result.cmd
assert exc.returncode == exec_result.exit_code
assert exc.stdout == exec_result.stdout_str
assert exc.stderr == exec_result.stderr_str
assert exc.result == exec_result
assert exc.expected == (proc_enums.EXPECTED,)
assert subprocess_logger.mock_calls[-1] == mock.call.error(
msg=f"Command {exc.result.cmd!r} returned exit code {exc.result.exit_code!s} "
f"while expected {exc.expected!r}"
)
def test_005_check_call_no_raise(execute, exec_result, subprocess_logger) -> None:
"""Test exit code validator in permissive mode."""
runner = exec_helpers.Subprocess()
res = runner.check_call(command, stdin=exec_result.stdin, raise_on_err=False)
assert res == exec_result
if exec_result.exit_code != exec_helpers.ExitCodes.EX_OK:
expected = (proc_enums.EXPECTED,)
assert subprocess_logger.mock_calls[-1] == mock.call.error(
msg=f"Command {res.cmd!r} returned exit code {res.exit_code!s} while expected {expected!r}"
)
def test_006_check_call_expect(execute, exec_result, subprocess_logger) -> None:
"""Test exit code validator with custom return codes."""
runner = exec_helpers.Subprocess()
assert runner.check_call(command, stdin=exec_result.stdin, expected=[exec_result.exit_code]) == exec_result
def test_007_check_stderr(execute, exec_result, subprocess_logger) -> None:
"""Test STDERR content validator."""
runner = exec_helpers.Subprocess()
if not exec_result.stderr:
assert runner.check_stderr(command, stdin=exec_result.stdin, expected=[exec_result.exit_code]) == exec_result
else:
with pytest.raises(exec_helpers.CalledProcessError) as e:
runner.check_stderr(command, stdin=exec_result.stdin, expected=[exec_result.exit_code])
exc: exec_helpers.CalledProcessError = e.value
assert exc.result == exec_result
assert exc.cmd == exec_result.cmd
assert exc.returncode == exec_result.exit_code
assert exc.stdout == exec_result.stdout_str
assert exc.stderr == exec_result.stderr_str
assert exc.result == exec_result
assert subprocess_logger.mock_calls[-1] == mock.call.error(
msg=f"Command {exc.result.cmd!r} output contains STDERR while not expected\n"
f"\texit code: {exc.result.exit_code!s}"
)
def test_008_check_stderr_no_raise(execute, exec_result, subprocess_logger) -> None:
"""Test STDERR content validator in permissive mode."""
runner = exec_helpers.Subprocess()
assert (
runner.check_stderr(command, stdin=exec_result.stdin, expected=[exec_result.exit_code], raise_on_err=False)
== exec_result
)
def test_009_call(popen, subprocess_logger, exec_result, run_parameters) -> None:
"""Test callable."""
runner = exec_helpers.Subprocess()
res = runner(
command,
stdin=run_parameters["stdin"],
open_stdout=run_parameters["open_stdout"],
open_stderr=run_parameters["open_stderr"],
)
assert isinstance(res, exec_helpers.ExecResult)
assert res == exec_result
popen().wait.assert_called_once_with(timeout=default_timeout)
| 34.820809
| 117
| 0.664011
| 1,549
| 12,048
| 4.962556
| 0.183344
| 0.05724
| 0.030051
| 0.028099
| 0.508781
| 0.458827
| 0.422792
| 0.392351
| 0.34734
| 0.30376
| 0
| 0.009221
| 0.216882
| 12,048
| 345
| 118
| 34.921739
| 0.805511
| 0.099187
| 0
| 0.321429
| 0
| 0.007937
| 0.101301
| 0.011338
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.079365
| false
| 0
| 0.039683
| 0.015873
| 0.150794
| 0.003968
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
597345ee49817e67d67ebede702d14893a6e8c4d
| 4,732
|
py
|
Python
|
Lib/featureMan/familyFeatures.py
|
typoman/featureman
|
f115ea8d3faae042845cfca9502d91da88405c68
|
[
"MIT"
] | 13
|
2019-07-21T14:00:49.000Z
|
2019-07-29T21:43:03.000Z
|
Lib/featureMan/familyFeatures.py
|
typoman/featureman
|
f115ea8d3faae042845cfca9502d91da88405c68
|
[
"MIT"
] | 1
|
2019-07-28T12:06:23.000Z
|
2019-07-28T12:06:23.000Z
|
Lib/featureMan/familyFeatures.py
|
typoman/featureman
|
f115ea8d3faae042845cfca9502d91da88405c68
|
[
"MIT"
] | null | null | null |
from featureMan.otSingleSubFeatures import *
from featureMan.otNumberFeatures import *
from featureMan.otLanguages import *
from featureMan.otLocalized import *
from featureMan.otLigatureFeatures import *
from featureMan.otMark import mark
from featureMan.otSyntax import fontDic, GDEF
from featureMan.otKern import kern
from featureMan.otCursive import cursive
def l2str(l):
return '\n'.join(l)
def generateFeatures(f, marksToSkip=None, include=None, base="", path=""):
from time import time
start = time()
if marksToSkip == None:
marksToSkip = set("a c d e i k l n o r s t u y z A C D E G I J K L N O R S T U Y Z dotlessi acute breve caron cedilla circumflex dieresis dotaccent grave hungarumlaut macron ogonek ring tilde acute.case breve.case caron.case circumflex.case dieresis.case dotaccent.case grave.case hungarumlaut.case macron.case ring.case tilde.case caronslovak commaturnedtop commaaccent".split(" "))
fDic = fontDic(f, marksToSkip)
aaltSet = set()
interpretTime = time()
print("Elapsed time for interpreting the ufo data: %s" %(interpretTime - start))
marksSet = set()
basesSet = set()
ligaturesSet = set()
componentsSet = set()
classes = {}
allFeatures = [
ccmpFeature, smcpFeature, caseFeature, arabicFeatures,
lnumFeature, onumFeature, pnumFeature, tnumFeature,
zeroFeature, localized,
ss01Feature, ss02Feature, ss03Feature, ss04Feature, ss05Feature, ss06Feature, ss07Feature,
ss08Feature, ss09Feature, ss10Feature, ss11Feature, ss12Feature, ss13Feature, ss14Feature,
ss15Feature, ss16Feature, ss17Feature, ss18Feature, ss19Feature, ss20Feature,
rligFeature, ligaFeature, dligFeature,
cursive, kern, mark
]
middleSyntax = []
for feaClass in allFeatures:
fea = feaClass(fDic, classes)
feaSyntax = fea.syntax()
if feaSyntax:
middleSyntax.append((fea.tag, feaSyntax))
classes.update(fea.classes)
aaltSet.update(fea.aalt)
marksSet.update(fea.mark)
basesSet.update(fea.base)
componentsSet.update(fea.component)
ligaturesSet.update(fea.ligature)
gdef = GDEF(basesSet, ligaturesSet, marksSet, componentsSet, fDic.glyphs)
finalAalt = aaltFeature(aaltSet)
langs = languages(fDic)
allFeaturesSyntax = []
allFeaturesSyntax.append(('logs' , l2str(fDic.log)))
allFeaturesSyntax.append(('lang' , langs.syntax()))
allFeaturesSyntax.append(('aalt' , finalAalt.syntax()))
allFeaturesSyntax.extend(middleSyntax)
allFeaturesSyntax.append(('gdef', gdef.syntax()))
finaFea = base
if include is not None:
if type(include) is str:
include = set(include.split(","))
elif type(include) is list:
include = set(include)
finaFea += l2str([f[1] for f in allFeaturesSyntax if f[0] in include])
else:
finaFea += l2str([f[1] for f in allFeaturesSyntax])
featTime = time()
print("Elapsed time for generating the features: %s" %(featTime - interpretTime))
fontName = ''
fontPath = ''
if f.path:
fontName = f.path.split("/")[-1].split('.')[0]
fontPath = '/'.join(f.path.split("/")[:-1])
if path:
fontPath = path
feaPath = '%s_features.fea' %(fontPath+'/'+fontName)
relativePath = '%s_features.fea' %fontName
with open(feaPath, 'w') as File:
File.write(finaFea)
f.features.text = 'include(%s);' %relativePath
f.features.changed()
print("Elapsed time for saving the features: %s" %(time() - featTime))
print("Elapsed time for the whole process: %s" %(time() - start))
if __name__ == '__main__':
import argparse
from fontParts.fontshell.font import RFont
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--ufo", help="Path to the ufo file.", type=str)
parser.add_argument("-b", "--base", help="Base features to include in the begining. It can be used to add some manual features at top of the feature file.", type=str, default="")
parser.add_argument("-o", "--only", help="Only unclude the comma seperated feature tags written here. For example: mark,gdef", type=str)
parser.add_argument("-p", "--path", help="Path to save the feature file at, default path is next to the UFO.", type=str)
args = parser.parse_args()
if args.ufo is not None:
f = RFont(args.ufo)
generateFeatures(f, marksToSkip=None, base=args.base, include=args.only, path=args.path)
else:
print('You need a UFO for the familyFeatures module to work. Use the following command for help:\npython3 "/path/to/repo/Lib/featureMan/familyFeatures.py" -h')
| 40.793103
| 391
| 0.674134
| 571
| 4,732
| 5.56042
| 0.383538
| 0.039685
| 0.031496
| 0.023937
| 0.059213
| 0.029606
| 0.029606
| 0.029606
| 0.006299
| 0.006299
| 0
| 0.013658
| 0.210904
| 4,732
| 115
| 392
| 41.147826
| 0.836636
| 0
| 0
| 0.020833
| 0
| 0.03125
| 0.223373
| 0.010144
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.125
| 0.010417
| 0.15625
| 0.052083
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5975a408ae1c989c338845f71aa3900205bb24fd
| 15,265
|
py
|
Python
|
FFSP/FFSP_MatNet/FFSPModel.py
|
MinahPark/MatNet
|
63342de76f6a982bdfb5c1e8d5930d64ec3efa61
|
[
"MIT"
] | 18
|
2021-11-22T09:37:52.000Z
|
2022-03-31T03:48:00.000Z
|
FFSP/FFSP_MatNet/FFSPModel.py
|
MinahPark/MatNet
|
63342de76f6a982bdfb5c1e8d5930d64ec3efa61
|
[
"MIT"
] | 1
|
2021-12-04T05:14:26.000Z
|
2021-12-14T03:04:55.000Z
|
FFSP/FFSP_MatNet/FFSPModel.py
|
MinahPark/MatNet
|
63342de76f6a982bdfb5c1e8d5930d64ec3efa61
|
[
"MIT"
] | 5
|
2021-12-15T01:56:02.000Z
|
2022-03-07T13:13:05.000Z
|
"""
The MIT License
Copyright (c) 2021 MatNet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from FFSPModel_SUB import AddAndInstanceNormalization, FeedForward, MixedScore_MultiHeadAttention
class FFSPModel(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
stage_cnt = self.model_params['stage_cnt']
self.stage_models = nn.ModuleList([OneStageModel(stage_idx, **model_params) for stage_idx in range(stage_cnt)])
def pre_forward(self, reset_state):
stage_cnt = self.model_params['stage_cnt']
for stage_idx in range(stage_cnt):
problems = reset_state.problems_list[stage_idx]
model = self.stage_models[stage_idx]
model.pre_forward(problems)
def soft_reset(self):
# Nothing to reset
pass
def forward(self, state):
batch_size = state.BATCH_IDX.size(0)
pomo_size = state.BATCH_IDX.size(1)
stage_cnt = self.model_params['stage_cnt']
action_stack = torch.empty(size=(batch_size, pomo_size, stage_cnt), dtype=torch.long)
prob_stack = torch.empty(size=(batch_size, pomo_size, stage_cnt))
for stage_idx in range(stage_cnt):
model = self.stage_models[stage_idx]
action, prob = model(state)
action_stack[:, :, stage_idx] = action
prob_stack[:, :, stage_idx] = prob
gathering_index = state.stage_idx[:, :, None]
# shape: (batch, pomo, 1)
action = action_stack.gather(dim=2, index=gathering_index).squeeze(dim=2)
prob = prob_stack.gather(dim=2, index=gathering_index).squeeze(dim=2)
# shape: (batch, pomo)
return action, prob
class OneStageModel(nn.Module):
def __init__(self, stage_idx, **model_params):
super().__init__()
self.model_params = model_params
machine_cnt_list = self.model_params['machine_cnt_list']
machine_cnt = machine_cnt_list[stage_idx]
embedding_dim = self.model_params['embedding_dim']
self.encoder = FFSP_Encoder(**model_params)
self.decoder = FFSP_Decoder(**model_params)
self.encoded_col = None
# shape: (batch, machine_cnt, embedding)
self.encoded_row = None
# shape: (batch, job_cnt, embedding)
def pre_forward(self, problems):
# problems.shape: (batch, job_cnt, machine_cnt)
batch_size = problems.size(0)
job_cnt = problems.size(1)
machine_cnt = problems.size(2)
embedding_dim = self.model_params['embedding_dim']
row_emb = torch.zeros(size=(batch_size, job_cnt, embedding_dim))
# shape: (batch, job_cnt, embedding)
col_emb = torch.zeros(size=(batch_size, machine_cnt, embedding_dim))
# shape: (batch, machine_cnt, embedding)
seed_cnt = self.model_params['one_hot_seed_cnt']
rand = torch.rand(batch_size, seed_cnt)
batch_rand_perm = rand.argsort(dim=1)
rand_idx = batch_rand_perm[:, :machine_cnt]
b_idx = torch.arange(batch_size)[:, None].expand(batch_size, machine_cnt)
m_idx = torch.arange(machine_cnt)[None, :].expand(batch_size, machine_cnt)
col_emb[b_idx, m_idx, rand_idx] = 1
# shape: (batch, machine_cnt, embedding)
self.encoded_row, self.encoded_col = self.encoder(row_emb, col_emb, problems)
# encoded_row.shape: (batch, job_cnt, embedding)
# encoded_col.shape: (batch, machine_cnt, embedding)
self.decoder.set_kv(self.encoded_row)
def forward(self, state):
batch_size = state.BATCH_IDX.size(0)
pomo_size = state.BATCH_IDX.size(1)
encoded_current_machine = self._get_encoding(self.encoded_col, state.stage_machine_idx)
# shape: (batch, pomo, embedding)
all_job_probs = self.decoder(encoded_current_machine,
ninf_mask=state.job_ninf_mask)
# shape: (batch, pomo, job)
if self.training or self.model_params['eval_type'] == 'softmax':
while True: # to fix pytorch.multinomial bug on selecting 0 probability elements
job_selected = all_job_probs.reshape(batch_size * pomo_size, -1).multinomial(1) \
.squeeze(dim=1).reshape(batch_size, pomo_size)
# shape: (batch, pomo)
job_prob = all_job_probs[state.BATCH_IDX, state.POMO_IDX, job_selected] \
.reshape(batch_size, pomo_size)
# shape: (batch, pomo)
job_prob[state.finished] = 1 # do not backprob finished episodes
if (job_prob != 0).all():
break
else:
job_selected = all_job_probs.argmax(dim=2)
# shape: (batch, pomo)
job_prob = torch.zeros(size=(batch_size, pomo_size)) # any number is okay
return job_selected, job_prob
def _get_encoding(self, encoded_nodes, node_index_to_pick):
# encoded_nodes.shape: (batch, problem, embedding)
# node_index_to_pick.shape: (batch, pomo)
batch_size = node_index_to_pick.size(0)
pomo_size = node_index_to_pick.size(1)
embedding_dim = self.model_params['embedding_dim']
gathering_index = node_index_to_pick[:, :, None].expand(batch_size, pomo_size, embedding_dim)
# shape: (batch, pomo, embedding)
picked_nodes = encoded_nodes.gather(dim=1, index=gathering_index)
# shape: (batch, pomo, embedding)
return picked_nodes
########################################
# ENCODER
########################################
class FFSP_Encoder(nn.Module):
def __init__(self, **model_params):
super().__init__()
encoder_layer_num = model_params['encoder_layer_num']
self.layers = nn.ModuleList([EncoderLayer(**model_params) for _ in range(encoder_layer_num)])
def forward(self, row_emb, col_emb, cost_mat):
# col_emb.shape: (batch, col_cnt, embedding)
# row_emb.shape: (batch, row_cnt, embedding)
# cost_mat.shape: (batch, row_cnt, col_cnt)
for layer in self.layers:
row_emb, col_emb = layer(row_emb, col_emb, cost_mat)
return row_emb, col_emb
class EncoderLayer(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.row_encoding_block = EncodingBlock(**model_params)
self.col_encoding_block = EncodingBlock(**model_params)
def forward(self, row_emb, col_emb, cost_mat):
# row_emb.shape: (batch, row_cnt, embedding)
# col_emb.shape: (batch, col_cnt, embedding)
# cost_mat.shape: (batch, row_cnt, col_cnt)
row_emb_out = self.row_encoding_block(row_emb, col_emb, cost_mat)
col_emb_out = self.col_encoding_block(col_emb, row_emb, cost_mat.transpose(1, 2))
return row_emb_out, col_emb_out
class EncodingBlock(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
self.Wq = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wk = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wv = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.mixed_score_MHA = MixedScore_MultiHeadAttention(**model_params)
self.multi_head_combine = nn.Linear(head_num * qkv_dim, embedding_dim)
self.add_n_normalization_1 = AddAndInstanceNormalization(**model_params)
self.feed_forward = FeedForward(**model_params)
self.add_n_normalization_2 = AddAndInstanceNormalization(**model_params)
def forward(self, row_emb, col_emb, cost_mat):
# NOTE: row and col can be exchanged, if cost_mat.transpose(1,2) is used
# input1.shape: (batch, row_cnt, embedding)
# input2.shape: (batch, col_cnt, embedding)
# cost_mat.shape: (batch, row_cnt, col_cnt)
head_num = self.model_params['head_num']
q = reshape_by_heads(self.Wq(row_emb), head_num=head_num)
# q shape: (batch, head_num, row_cnt, qkv_dim)
k = reshape_by_heads(self.Wk(col_emb), head_num=head_num)
v = reshape_by_heads(self.Wv(col_emb), head_num=head_num)
# kv shape: (batch, head_num, col_cnt, qkv_dim)
out_concat = self.mixed_score_MHA(q, k, v, cost_mat)
# shape: (batch, row_cnt, head_num*qkv_dim)
multi_head_out = self.multi_head_combine(out_concat)
# shape: (batch, row_cnt, embedding)
out1 = self.add_n_normalization_1(row_emb, multi_head_out)
out2 = self.feed_forward(out1)
out3 = self.add_n_normalization_2(out1, out2)
return out3
# shape: (batch, row_cnt, embedding)
########################################
# Decoder
########################################
class FFSP_Decoder(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
self.encoded_NO_JOB = nn.Parameter(torch.rand(1, 1, embedding_dim))
self.Wq_1 = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wq_2 = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wq_3 = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wk = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wv = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.multi_head_combine = nn.Linear(head_num * qkv_dim, embedding_dim)
self.k = None # saved key, for multi-head attention
self.v = None # saved value, for multi-head_attention
self.single_head_key = None # saved key, for single-head attention
def set_kv(self, encoded_jobs):
# encoded_jobs.shape: (batch, job, embedding)
batch_size = encoded_jobs.size(0)
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
encoded_no_job = self.encoded_NO_JOB.expand(size=(batch_size, 1, embedding_dim))
encoded_jobs_plus_1 = torch.cat((encoded_jobs, encoded_no_job), dim=1)
# shape: (batch, job_cnt+1, embedding)
self.k = reshape_by_heads(self.Wk(encoded_jobs_plus_1), head_num=head_num)
self.v = reshape_by_heads(self.Wv(encoded_jobs_plus_1), head_num=head_num)
# shape: (batch, head_num, job+1, qkv_dim)
self.single_head_key = encoded_jobs_plus_1.transpose(1, 2)
# shape: (batch, embedding, job+1)
def forward(self, encoded_machine, ninf_mask):
# encoded_machine.shape: (batch, pomo, embedding)
# ninf_mask.shape: (batch, pomo, job_cnt+1)
head_num = self.model_params['head_num']
# Multi-Head Attention
#######################################################
q = reshape_by_heads(self.Wq_3(encoded_machine), head_num=head_num)
# shape: (batch, head_num, pomo, qkv_dim)
out_concat = self._multi_head_attention_for_decoder(q, self.k, self.v,
rank3_ninf_mask=ninf_mask)
# shape: (batch, pomo, head_num*qkv_dim)
mh_atten_out = self.multi_head_combine(out_concat)
# shape: (batch, pomo, embedding)
# Single-Head Attention, for probability calculation
#######################################################
score = torch.matmul(mh_atten_out, self.single_head_key)
# shape: (batch, pomo, job_cnt+1)
sqrt_embedding_dim = self.model_params['sqrt_embedding_dim']
logit_clipping = self.model_params['logit_clipping']
score_scaled = score / sqrt_embedding_dim
# shape: (batch, pomo, job_cnt+1)
score_clipped = logit_clipping * torch.tanh(score_scaled)
score_masked = score_clipped + ninf_mask
probs = F.softmax(score_masked, dim=2)
# shape: (batch, pomo, job_cnt+1)
return probs
def _multi_head_attention_for_decoder(self, q, k, v, rank2_ninf_mask=None, rank3_ninf_mask=None):
# q shape: (batch, head_num, n, qkv_dim) : n can be either 1 or PROBLEM_SIZE
# k,v shape: (batch, head_num, job_cnt+1, qkv_dim)
# rank2_ninf_mask.shape: (batch, job_cnt+1)
# rank3_ninf_mask.shape: (batch, n, job_cnt+1)
batch_size = q.size(0)
n = q.size(2)
job_cnt_plus_1 = k.size(2)
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
sqrt_qkv_dim = self.model_params['sqrt_qkv_dim']
score = torch.matmul(q, k.transpose(2, 3))
# shape: (batch, head_num, n, job_cnt+1)
score_scaled = score / sqrt_qkv_dim
if rank2_ninf_mask is not None:
score_scaled = score_scaled + rank2_ninf_mask[:, None, None, :].expand(batch_size, head_num, n, job_cnt_plus_1)
if rank3_ninf_mask is not None:
score_scaled = score_scaled + rank3_ninf_mask[:, None, :, :].expand(batch_size, head_num, n, job_cnt_plus_1)
weights = nn.Softmax(dim=3)(score_scaled)
# shape: (batch, head_num, n, job_cnt+1)
out = torch.matmul(weights, v)
# shape: (batch, head_num, n, qkv_dim)
out_transposed = out.transpose(1, 2)
# shape: (batch, n, head_num, qkv_dim)
out_concat = out_transposed.reshape(batch_size, n, head_num * qkv_dim)
# shape: (batch, n, head_num*qkv_dim)
return out_concat
########################################
# NN SUB FUNCTIONS
########################################
def reshape_by_heads(qkv, head_num):
# q.shape: (batch, n, head_num*key_dim) : n can be either 1 or PROBLEM_SIZE
batch_s = qkv.size(0)
n = qkv.size(1)
q_reshaped = qkv.reshape(batch_s, n, head_num, -1)
# shape: (batch, n, head_num, key_dim)
q_transposed = q_reshaped.transpose(1, 2)
# shape: (batch, head_num, n, key_dim)
return q_transposed
| 39.141026
| 123
| 0.648411
| 2,088
| 15,265
| 4.42433
| 0.133621
| 0.061702
| 0.053583
| 0.02533
| 0.492639
| 0.384066
| 0.319658
| 0.264992
| 0.235224
| 0.192141
| 0
| 0.00849
| 0.228431
| 15,265
| 389
| 124
| 39.241645
| 0.775853
| 0.244546
| 0
| 0.268041
| 0
| 0
| 0.025486
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097938
| false
| 0.005155
| 0.020619
| 0
| 0.195876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5975bf51cf6b40314443cbac07c50fa49c107d36
| 1,697
|
py
|
Python
|
compose.py
|
lvyufeng/mindspore_poems
|
2f46afa290a8065cd1c774c26a96be76da30873e
|
[
"MIT"
] | null | null | null |
compose.py
|
lvyufeng/mindspore_poems
|
2f46afa290a8065cd1c774c26a96be76da30873e
|
[
"MIT"
] | null | null | null |
compose.py
|
lvyufeng/mindspore_poems
|
2f46afa290a8065cd1c774c26a96be76da30873e
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import mindspore
from mindspore import Tensor
from mindspore import load_checkpoint, load_param_into_net
from src.model import RNNModel, RNNModelInfer
from src.utils import process_poems
start_token = 'B'
end_token = 'E'
model_dir = './ckpt/'
corpus_file = './data/poems.txt'
def to_word(predict, vocabs):
t = np.cumsum(predict)
s = np.sum(predict)
sample = int(np.searchsorted(t, np.random.rand(1) * s))
if sample > len(vocabs):
sample = len(vocabs) - 1
return vocabs[sample]
def gen_poem(begin_word):
print('## loading corpus from %s' % model_dir)
poems_vector, word_int_map, vocabularies = process_poems(corpus_file)
print(len(vocabularies))
rnn_model = RNNModel(len(vocabularies), rnn_size=128, model='lstm')
param_dict = load_checkpoint(
os.path.join(model_dir, f'poems.6.ckpt'))
param_not_load = load_param_into_net(rnn_model, param_dict)
print(param_not_load)
rnn_model = RNNModelInfer(rnn_model)
x = np.array([list(map(word_int_map.get, start_token))])
predict = rnn_model(Tensor(x, mindspore.int32))
word = begin_word or to_word(predict.asnumpy(), vocabularies)
poem_ = ''
i = 0
while word != end_token:
poem_ += word
i += 1
if i > 24:
break
x = np.array([[word_int_map[word]]])
predict = rnn_model(Tensor(x, mindspore.int32))
word = to_word(predict.asnumpy(), vocabularies)
return poem_
if __name__ == '__main__':
begin_char = input('## (输入 quit 退出)请输入第一个字 please input the first character: ')
if begin_char == 'quit':
exit()
poem = gen_poem(begin_char)
print(poem)
| 30.303571
| 83
| 0.669417
| 240
| 1,697
| 4.483333
| 0.383333
| 0.04461
| 0.036245
| 0.02974
| 0.133829
| 0.074349
| 0.074349
| 0.074349
| 0
| 0
| 0
| 0.010487
| 0.213318
| 1,697
| 56
| 84
| 30.303571
| 0.795506
| 0
| 0
| 0.041667
| 0
| 0
| 0.079505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.145833
| 0
| 0.229167
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5976b5eadcdfa649651a6db9b9bd714639c5b347
| 1,523
|
py
|
Python
|
pychemia/core/from_file.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 67
|
2015-01-31T07:44:55.000Z
|
2022-03-21T21:43:34.000Z
|
pychemia/core/from_file.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 13
|
2016-06-03T19:07:51.000Z
|
2022-03-31T04:20:40.000Z
|
pychemia/core/from_file.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 37
|
2015-01-22T15:37:23.000Z
|
2022-03-21T15:38:10.000Z
|
import os
import sys
from pychemia import HAS_PYMATGEN, pcm_log
from .structure import Structure
from pychemia.code.vasp import read_poscar
from pychemia.code.abinit import AbinitInput
def structure_from_file(structure_file):
"""
Attempts to reconstruct a PyChemia Structure from the contents of any given file. Valid entries
:param structure_file: The path to a file where the structure can be reconstructed
:type structure_file: str
:return: PyChemia Structure if succeed, None otherwise
"""
st = None
basename = os.path.basename(structure_file)
if not os.path.isfile(structure_file):
raise ValueError("ERROR: Could not open file '%s'" % structure_file)
if basename[-4:].lower() == 'json':
st = Structure.load_json(structure_file)
elif basename[-3:].lower() == 'cif' and HAS_PYMATGEN:
import pychemia.external.pymatgen
st = pychemia.external.pymatgen.cif2structure(structure_file)[0]
elif 'poscar' in basename.lower():
st = read_poscar(structure_file)
elif 'contcar' in basename.lower():
st = read_poscar(structure_file)
elif 'abinit' in basename.lower():
av = AbinitInput(structure_file)
st = av.get_structure()
else:
try:
st = read_poscar(structure_file)
except ValueError:
raise ValueError('Ćould not convert file as POSCAR')
if st is None:
pcm_log.debug("ERROR: Could not extract structure from file '%s'" % structure_file)
return st
| 37.146341
| 99
| 0.692055
| 199
| 1,523
| 5.170854
| 0.38191
| 0.164237
| 0.049563
| 0.061224
| 0.109815
| 0.08552
| 0.08552
| 0.08552
| 0.08552
| 0
| 0
| 0.00337
| 0.220617
| 1,523
| 40
| 100
| 38.075
| 0.863521
| 0.170716
| 0
| 0.096774
| 0
| 0
| 0.111741
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.225806
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59792e136f9480b5e034aa6d01981255bd1bfdd7
| 992
|
py
|
Python
|
snptools/vc_matrix.py
|
pvanheus/variant_exploration_with_tralynca
|
4ffadc29c19d68909beed2254646e36513311847
|
[
"MIT"
] | null | null | null |
snptools/vc_matrix.py
|
pvanheus/variant_exploration_with_tralynca
|
4ffadc29c19d68909beed2254646e36513311847
|
[
"MIT"
] | null | null | null |
snptools/vc_matrix.py
|
pvanheus/variant_exploration_with_tralynca
|
4ffadc29c19d68909beed2254646e36513311847
|
[
"MIT"
] | null | null | null |
from os import listdir
import os.path
import pandas as pd
from .count_variants_per_gene import process_vcf
from .genetree import make_gene_tree
def make_variant_count_matrix(input_directory, output_filename):
gene_tree = make_gene_tree()
locus_names = sorted([ interval.data['locus'] for interval in gene_tree ])
matrix = []
futures = []
for filename in sorted(listdir(input_directory)):
if filename.endswith('.vcf.gz') or filename.endswith('.vcf'):
path = os.path.join(input_directory, filename)
counts = process_vcf(path, gene_tree)
row = [ counts.get(locus, 0) for locus in locus_names ]
matrix.append(row)
sample_names = [ filename.split('.')[0] for filename in sorted(listdir(input_directory))
if filename.endswith('.vcf.gz') or filename.endswith('.vcf') ]
data = pd.DataFrame(matrix, index=sample_names, columns=locus_names)
data.to_csv(output_filename)
| 41.333333
| 93
| 0.676411
| 130
| 992
| 4.953846
| 0.376923
| 0.062112
| 0.118012
| 0.059006
| 0.26087
| 0.26087
| 0.26087
| 0.26087
| 0.26087
| 0.26087
| 0
| 0.002581
| 0.21875
| 992
| 24
| 94
| 41.333333
| 0.828387
| 0
| 0
| 0
| 0
| 0
| 0.028226
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.25
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5979cf5bed5000445a52e27786a6829f4458f888
| 481
|
py
|
Python
|
oarepo_records_draft/merge.py
|
oarepo/invenio-records-draft
|
6d77309996c58fde7731e5f182e9cd5400f81f14
|
[
"MIT"
] | 1
|
2020-06-03T14:44:49.000Z
|
2020-06-03T14:44:49.000Z
|
oarepo_records_draft/merge.py
|
oarepo/invenio-records-draft
|
6d77309996c58fde7731e5f182e9cd5400f81f14
|
[
"MIT"
] | 7
|
2020-06-02T14:45:48.000Z
|
2021-11-16T08:38:47.000Z
|
oarepo_records_draft/merge.py
|
oarepo/invenio-records-draft
|
6d77309996c58fde7731e5f182e9cd5400f81f14
|
[
"MIT"
] | 1
|
2019-08-15T07:59:48.000Z
|
2019-08-15T07:59:48.000Z
|
from deepmerge import Merger
def list_merge(config, path, base, nxt):
for k in range(0, min(len(base), len(nxt))):
if isinstance(base[k], (dict, list, tuple)):
draft_merger.merge(base[k], nxt[k])
else:
base[k] = nxt[k]
for k in range(len(base), len(nxt)):
base.append(nxt[k])
return base
draft_merger = Merger(
[
(list, [list_merge]),
(dict, ["merge"])
],
["override"],
["override"]
)
| 20.913043
| 52
| 0.534304
| 64
| 481
| 3.953125
| 0.421875
| 0.059289
| 0.047431
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00295
| 0.295218
| 481
| 22
| 53
| 21.863636
| 0.743363
| 0
| 0
| 0
| 0
| 0
| 0.043659
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
597bfa5b6f7cdb21349ef3d1cce73227ae2c86fc
| 4,951
|
py
|
Python
|
source/01_make_coordinates/make_coordinates.py
|
toshi-k/kaggle-airbus-ship-detection-challenge
|
872a160057592022488b1772b6c7a8982677d1dc
|
[
"Apache-2.0"
] | 90
|
2018-11-17T21:37:41.000Z
|
2021-11-24T11:55:34.000Z
|
source/01_make_coordinates/make_coordinates.py
|
jackweiwang/kaggle-airbus-ship-detection-challenge
|
872a160057592022488b1772b6c7a8982677d1dc
|
[
"Apache-2.0"
] | 3
|
2018-11-27T14:23:15.000Z
|
2020-03-09T09:23:25.000Z
|
source/01_make_coordinates/make_coordinates.py
|
jackweiwang/kaggle-airbus-ship-detection-challenge
|
872a160057592022488b1772b6c7a8982677d1dc
|
[
"Apache-2.0"
] | 14
|
2018-11-17T21:37:44.000Z
|
2020-11-30T02:22:28.000Z
|
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
from lib.img2_coord_ica import img2_coord_iter, coord2_img
from lib.log import Logger
# ref: https://www.kaggle.com/paulorzp/run-length-encode-and-decode
def rle_decode(mask_rle, shape=(768, 768)):
"""
Args:
mask_rle: run-length as string formated (start length)
shape: (height,width) of array to return
Returns:
numpy array, 1 - mask, 0 - background
"""
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 255
return img.reshape(shape).T
def main_test():
i = 5304 # 11, 15, 16, 5398
image_id = segmentations.iloc[i, 0]
truth_img = rle_decode(segmentations.iloc[i, 1])
print(np.max(truth_img))
coord = img2_coord_iter(truth_img / 255.0, threshold=0.05)
reconst_img = coord2_img(*coord)
sse = np.sum((reconst_img - truth_img) ** 2)
print('sum of squared error: {}'.format(sse))
os.makedirs('_result_sample', exist_ok=True)
Image.fromarray(reconst_img).save(os.path.join('_result_sample', image_id[:-4] + '_reconstruct.png'), format='PNG')
Image.fromarray(truth_img).save(os.path.join('_result_sample', image_id[:-4] + '_truth.png'), format='PNG')
def main():
logger = Logger('coord_ica')
list_mean_x = list()
list_mean_y = list()
list_height = list()
list_aspect_ratio = list()
list_rotate = list()
num_error = 0
num_zero_ship = 0
os.makedirs('_error_imgs', exist_ok=True)
sse_array = np.array([])
for i, image_id in tqdm(enumerate(segmentations.ImageId), total=len(segmentations)):
encoded = segmentations.iloc[i, 1]
if encoded == '':
list_mean_x.append(np.nan)
list_mean_y.append(np.nan)
list_height.append(np.nan)
list_aspect_ratio.append(np.nan)
list_rotate.append(np.nan)
num_zero_ship += 1
continue
truth_img = rle_decode(encoded)
reconst_img = np.zeros(truth_img.shape) # initialize
threshold_iter = 0.95
threshold_last = 0.6
truth_img_norm = truth_img / 255.0
try:
mean_x, mean_y, height, aspect_ratio, rotate, img_size = img2_coord_iter(truth_img_norm, threshold_iter)
reconst_img = coord2_img(mean_x, mean_y, height, aspect_ratio, rotate, img_size)
reconst_img_norm = reconst_img / 255.0
sse = np.sum((reconst_img_norm - truth_img_norm) ** 2)
sse_array = np.append(sse_array, sse)
area_intersect = np.sum(truth_img_norm * reconst_img_norm)
area_union = np.sum(truth_img_norm) + np.sum(reconst_img_norm) - area_intersect
matching_degree = area_intersect / area_union
if matching_degree < threshold_last:
logger.info('[{}] sse: {} matching_degree: {}'.format(image_id, sse, matching_degree))
raise RuntimeError
list_mean_x.append(mean_x)
list_mean_y.append(mean_y)
list_height.append(height)
list_aspect_ratio.append(aspect_ratio)
list_rotate.append(rotate)
except (RuntimeError, ValueError):
num_error += 1
list_mean_x.append(np.nan)
list_mean_y.append(np.nan)
list_height.append(np.nan)
list_aspect_ratio.append(np.nan)
list_rotate.append(np.nan)
if matching_degree < threshold_last:
try:
Image.fromarray(reconst_img).save(
os.path.join('_error_imgs', image_id[:-4] + '_deg{:.3f}_re.png'.format(matching_degree)))
Image.fromarray(truth_img).save(
os.path.join('_error_imgs', image_id[:-4] + '_deg{:.3f}_truth.png'.format(matching_degree)))
except:
pass
logger.info('mean of reconstruct error: {:.3f}'.format(np.mean(sse_array)))
logger.info('num zero ship: {0:d} / {1:d}'.format(num_zero_ship, len(segmentations)))
logger.info('num_error: {0:d} / {1:d}'.format(num_error, len(segmentations)))
result = pd.DataFrame()
result['ImageID'] = segmentations.ImageId
result['x'] = list_mean_y
result['y'] = list_mean_x
result['height'] = list_height
result['width'] = [height / ratio for height, ratio in zip(list_height, list_aspect_ratio)]
result['rotate'] = list_rotate
result.to_csv('../../input/coordinates.csv', index=False, float_format='%.4f')
if __name__ == '__main__':
segmentations = pd.read_csv('../../dataset/train_ship_segmentations_v2.csv')
print(segmentations.head())
segmentations = segmentations.fillna('')
# main_test()
main()
| 30.006061
| 119
| 0.626338
| 675
| 4,951
| 4.33037
| 0.238519
| 0.038317
| 0.037633
| 0.041054
| 0.255559
| 0.189531
| 0.180636
| 0.180636
| 0.153267
| 0.153267
| 0
| 0.021925
| 0.244597
| 4,951
| 164
| 120
| 30.189024
| 0.759626
| 0.05413
| 0
| 0.137255
| 0
| 0
| 0.086957
| 0.015497
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0.009804
| 0.068627
| 0
| 0.107843
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
597e7da85300fb6bd6d365c07bb2ba1dbac55565
| 1,598
|
py
|
Python
|
scripts/combine_errors.py
|
nbren12/nn_atmos_param
|
cb138f0b211fd5743e56ad659aec38c082d2b3ac
|
[
"MIT"
] | 4
|
2018-09-16T20:55:57.000Z
|
2020-12-06T11:27:50.000Z
|
scripts/combine_errors.py
|
nbren12/nn_atmos_param
|
cb138f0b211fd5743e56ad659aec38c082d2b3ac
|
[
"MIT"
] | 5
|
2018-04-07T07:40:39.000Z
|
2018-06-20T06:56:08.000Z
|
scripts/combine_errors.py
|
nbren12/nn_atmos_param
|
cb138f0b211fd5743e56ad659aec38c082d2b3ac
|
[
"MIT"
] | null | null | null |
import numpy as np
import re
import json
import xarray as xr
import pandas as pd
def read_train_loss(epoch, fname,
variables=['test_loss', 'train_loss']):
"""Read the loss.json file for the current epochs test and train loss"""
df = pd.read_json(fname)
epoch_means = df.groupby('epoch').mean()
# need to look for epoch-1 because this data is accumulated over the whole first epoch
if epoch > 0:
return epoch_means.loc[epoch-1][variables].to_dict()
else:
return {'test_loss': np.nan, 'train_loss': np.nan}
errors = []
dims = []
pattern = re.compile("data/output/model.(.*?)/(.*?)/(.*?)/error.nc")
for f in snakemake.input:
m = pattern.search(f)
if m:
model, seed, epoch = m.groups()
ds = xr.open_dataset(f)
arg_file = f"data/output/model.{model}/{seed}/arguments.json"
args = json.load(open(arg_file))
# nhidden is a list, so need to just take the first element
# since all the neural networks I fit are single layer
args['nhidden'] = args['nhidden'][0]
args.pop('seed', None)
ds = ds.assign(**args)
loss_file = f"data/output/model.{model}/{seed}/loss.json"
train_error = read_train_loss(int(epoch), loss_file)
ds = ds.assign(**train_error)
# append to lists
dims.append((model, seed, int(epoch)))
errors.append(ds)
names = ['model', 'seed', 'epoch']
dim = pd.MultiIndex.from_tuples(dims, names=names)
dim.name = 'tmp'
ds = xr.concat(errors, dim=dim).unstack('tmp')
ds.to_netcdf(snakemake.output[0])
| 30.150943
| 90
| 0.627034
| 236
| 1,598
| 4.15678
| 0.432203
| 0.045872
| 0.045872
| 0.030581
| 0.059123
| 0.059123
| 0.059123
| 0
| 0
| 0
| 0
| 0.004055
| 0.228411
| 1,598
| 52
| 91
| 30.730769
| 0.791565
| 0.174593
| 0
| 0
| 0
| 0
| 0.163359
| 0.101527
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.138889
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5980a13b88db20b5e773819c926a4981f53bb21e
| 1,611
|
py
|
Python
|
mu.py
|
cool2645/shadowsocksrr
|
0a594857f4c3125ab14d27d7fd8143291b7c9fee
|
[
"Apache-2.0"
] | 2
|
2018-05-14T10:41:38.000Z
|
2020-05-22T12:40:57.000Z
|
mu.py
|
cool2645/shadowsocksrr
|
0a594857f4c3125ab14d27d7fd8143291b7c9fee
|
[
"Apache-2.0"
] | null | null | null |
mu.py
|
cool2645/shadowsocksrr
|
0a594857f4c3125ab14d27d7fd8143291b7c9fee
|
[
"Apache-2.0"
] | 1
|
2018-09-22T16:15:14.000Z
|
2018-09-22T16:15:14.000Z
|
import db_transfer
import config
import logging
from musdk.client import Client
class MuApiTransfer(db_transfer.TransferBase):
client = None
users = []
def __init__(self):
super(MuApiTransfer, self).__init__()
self.pull_ok = False
self.port_uid_table = {}
self.init_mu_client()
def init_mu_client(self):
mu_url = config.mu_uri
mu_token = config.token
node_id = config.node_id
mu_client = Client(mu_url, node_id, mu_token)
self.client = mu_client
def pull_db_all_user(self):
print("pull all users...")
return self.pull_db_users()
def pull_db_users(self):
users = self.client.get_users_res()
if users is None:
return self.users
for user in users:
self.port_uid_table[user['port']] = user['id']
self.users = users
return users
def update_all_user(self, dt_transfer):
print('call update all user')
print(dt_transfer)
update_transfer = {}
logs = []
for id in dt_transfer.keys():
transfer = dt_transfer[id]
if transfer[0] + transfer[1] < 1024:
continue
update_transfer[id] = transfer
uid = self.port_uid_table[id]
log = self.client.gen_traffic_log(uid, transfer[0], transfer[1])
logs.append(log)
print("logs ", logs)
ok = self.client.update_traffic(logs)
if ok is False:
logging.error("update traffic failed...")
return {}
return update_transfer
| 28.767857
| 76
| 0.590937
| 202
| 1,611
| 4.455446
| 0.267327
| 0.035556
| 0.036667
| 0.053333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00724
| 0.314091
| 1,611
| 55
| 77
| 29.290909
| 0.80724
| 0
| 0
| 0
| 0
| 0
| 0.044693
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0
| 0.083333
| 0
| 0.354167
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
598126ffcc8da7b8ff9a91f8f601f2ef5306a660
| 2,001
|
py
|
Python
|
tests/test_json.py
|
NyntoFive/data_extractor
|
965e12570d6b7549aa2f8b3bd1951e06b010c444
|
[
"MIT"
] | null | null | null |
tests/test_json.py
|
NyntoFive/data_extractor
|
965e12570d6b7549aa2f8b3bd1951e06b010c444
|
[
"MIT"
] | null | null | null |
tests/test_json.py
|
NyntoFive/data_extractor
|
965e12570d6b7549aa2f8b3bd1951e06b010c444
|
[
"MIT"
] | null | null | null |
# Standard Library
import json
# Third Party Library
import pytest
from jsonpath_rw.lexer import JsonPathLexerError
# First Party Library
from data_extractor.exceptions import ExprError, ExtractError
from data_extractor.json import JSONExtractor
@pytest.fixture(scope="module")
def text():
return """
{
"foo": [
{
"baz": 1
},
{
"baz": 2
}
]
}
"""
@pytest.fixture(scope="module")
def element(text):
return json.loads(text)
@pytest.mark.parametrize(
"expr,expect",
[
("foo[*].baz", [1, 2]),
("foo.baz", []),
("foo[0].baz", [1]),
("foo[1].baz", [2]),
("foo[2].baz", []),
],
ids=repr,
)
def test_extract(element, expr, expect):
assert expect == JSONExtractor(expr).extract(element)
@pytest.mark.parametrize(
"expr,expect",
[
("foo[*].baz", 1),
("foo.baz", "default"),
("foo[0].baz", 1),
("foo[1].baz", 2),
("foo[2].baz", "default"),
],
ids=repr,
)
def test_extract_first(element, expr, expect):
assert expect == JSONExtractor(expr).extract_first(element, default="default")
@pytest.mark.parametrize("expr", ["foo.baz", "foo[2].baz"], ids=repr)
def test_extract_first_without_default(element, expr):
extractor = JSONExtractor(expr)
with pytest.raises(ExtractError) as catch:
extractor.extract_first(element)
exc = catch.value
assert len(exc.extractors) == 1
assert exc.extractors[0] is extractor
assert exc.element is element
@pytest.mark.parametrize("expr", ["foo..", "a[]", ""], ids=repr)
def test_invalid_css_selector_expr(element, expr):
extractor = JSONExtractor(expr)
with pytest.raises(ExprError) as catch:
extractor.extract(element)
exc = catch.value
assert exc.extractor is extractor
assert isinstance(exc.exc, (JsonPathLexerError, Exception))
| 23.267442
| 82
| 0.590705
| 224
| 2,001
| 5.205357
| 0.258929
| 0.030875
| 0.072041
| 0.085763
| 0.506003
| 0.35506
| 0.328473
| 0.328473
| 0.039451
| 0.039451
| 0
| 0.012089
| 0.255872
| 2,001
| 85
| 83
| 23.541176
| 0.770987
| 0.027986
| 0
| 0.215385
| 0
| 0
| 0.192169
| 0
| 0
| 0
| 0
| 0
| 0.107692
| 1
| 0.092308
| false
| 0
| 0.076923
| 0.030769
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59814b4554d683700762543937d73f8de4e2078a
| 938
|
py
|
Python
|
demo/predictions/visualize.py
|
qixuxiang/maskrcnn_tianchi_stage2
|
52023b64268dc91f0b5b9f085203ab00a542458a
|
[
"MIT"
] | null | null | null |
demo/predictions/visualize.py
|
qixuxiang/maskrcnn_tianchi_stage2
|
52023b64268dc91f0b5b9f085203ab00a542458a
|
[
"MIT"
] | null | null | null |
demo/predictions/visualize.py
|
qixuxiang/maskrcnn_tianchi_stage2
|
52023b64268dc91f0b5b9f085203ab00a542458a
|
[
"MIT"
] | null | null | null |
import numpy as np
from PIL import Image
import os
npy_file1 = './prediction/1110_1.npy'
npy_file2 = './prediction/1110_2.npy'
npy_file3 = './prediction/1110_3.npy'
npy_file4 = './prediction/1110_4.npy'
npy_file5 = './prediction/1110_5.npy'
arr1 = np.load(npy_file1)
arr2 = np.load(npy_file2)
arr3 = np.load(npy_file3)
arr4 = np.load(npy_file4)
arr5 = np.load(npy_file5)
print(sum(sum(arr1)))
print(sum(sum(arr2)))
print(sum(sum(arr3)))
print(sum(sum(arr4)))
print(sum(sum(arr5)))
arr1 = 50*arr1
arr2 = 50*arr2
arr3 = 50*arr3
arr4 = 50*arr4
arr5 = 50*arr5
img1 = Image.fromarray(arr1).convert("L")
img2 = Image.fromarray(arr2).convert("L")
img3 = Image.fromarray(arr3).convert("L")
img4 = Image.fromarray(arr4).convert("L")
img5 = Image.fromarray(arr5).convert("L")
img1.save("./test_pic/test1.png")
img2.save("./test_pic/test2.png")
img3.save("./test_pic/test3.png")
img4.save("./test_pic/test4.png")
img5.save("./test_pic/test5.png")
| 26.055556
| 41
| 0.715352
| 160
| 938
| 4.06875
| 0.29375
| 0.107527
| 0.069124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099299
| 0.08742
| 938
| 35
| 42
| 26.8
| 0.661215
| 0
| 0
| 0
| 0
| 0
| 0.234542
| 0.122601
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.151515
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5985441293e6489af243c2cd16aa10e62e49c056
| 16,658
|
py
|
Python
|
gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_view.py
|
clairvoyant/GamestonkTerminal
|
7b40cfe61b32782e36f5de8a08d075532a08c294
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_view.py
|
clairvoyant/GamestonkTerminal
|
7b40cfe61b32782e36f5de8a08d075532a08c294
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_view.py
|
clairvoyant/GamestonkTerminal
|
7b40cfe61b32782e36f5de8a08d075532a08c294
|
[
"MIT"
] | null | null | null |
"""CoinGecko view"""
__docformat__ = "numpy"
import argparse
from typing import List, Tuple
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import matplotlib.pyplot as plt
from tabulate import tabulate
import mplfinance as mpf
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
plot_autoscale,
)
from gamestonk_terminal.feature_flags import USE_ION as ion
import gamestonk_terminal.cryptocurrency.due_diligence.pycoingecko_model as gecko
from gamestonk_terminal.cryptocurrency.dataframe_helpers import wrap_text_in_df
register_matplotlib_converters()
# pylint: disable=inconsistent-return-statements
# pylint: disable=R0904, C0302
def load(other_args: List[str]):
"""Load selected Cryptocurrency. You can pass either symbol of id of the coin
Parameters
----------
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description="""Load cryptocurrency, from CoinGecko.
You will have access to a lot of statistics on that coin like price data,
coin development stats, social media and many others. Loading coin
also will open access to technical analysis menu.""",
)
parser.add_argument(
"-c",
"--coin",
required="-h" not in other_args,
type=str,
dest="coin",
help="Coin to load data for (symbol or coin id). You can use either symbol of the coin or coinId"
"You can find all coins using command `coins` or visit https://www.coingecko.com/en. "
"To use load a coin use command load -c [symbol or coinId]",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
coin = gecko.Coin(ns_parser.coin)
print("")
return coin
except KeyError:
print(f"Could not find coin with the id: {ns_parser.coin}", "\n")
return None
except SystemExit:
print("")
return None
except Exception as e:
print(e, "\n")
return None
def chart(coin: gecko.Coin, other_args: List[str]):
"""Plots chart for loaded cryptocurrency
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="chart",
description="""
Display chart for loaded coin. You can specify currency vs which you want
to show chart and also number of days to get data for.
By default currency: usd and days: 30.
E.g. if you loaded in previous step Bitcoin and you want to see it's price vs ethereum
in last 90 days range use `chart --vs eth --days 90`
""",
)
parser.add_argument(
"--vs", default="usd", dest="vs", help="Currency to display vs coin"
)
parser.add_argument(
"-d", "--days", default=30, dest="days", help="Number of days to get data for"
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.get_coin_market_chart(ns_parser.vs, ns_parser.days)
df = df["price"].resample("1D").ohlc().ffill()
df.columns = [
"Open",
"High",
"Low",
"Close",
]
title = (
f"\n{coin.coin_symbol}/{ns_parser.vs} from {df.index[0].strftime('%Y/%m/%d')} "
f"to {df.index[-1].strftime('%Y/%m/%d')}",
)
mpf.plot(
df,
type="candle",
volume=False,
title=str(title[0]) if isinstance(title, tuple) else title,
xrotation=20,
style="binance",
figratio=(10, 7),
figscale=1.10,
figsize=(plot_autoscale()),
update_width_config=dict(
candle_linewidth=1.0, candle_width=0.8, volume_linewidth=1.0
),
)
if ion:
plt.ion()
plt.show()
print("")
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def load_ta_data(coin: gecko.Coin, other_args: List[str]) -> Tuple[pd.DataFrame, str]:
"""Load data for Technical Analysis
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
Returns
----------
Tuple[pd.DataFrame, str]
dataframe with prices
quoted currency
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ta",
description="""
Loads data for technical analysis. You can specify currency vs which you want
to show chart and also number of days to get data for.
By default currency: usd and days: 30.
E.g. if you loaded in previous step Bitcoin and you want to see it's price vs ethereum
in last 90 days range use `ta --vs eth --days 90`
""",
)
parser.add_argument(
"--vs", default="usd", dest="vs", help="Currency to display vs coin"
)
parser.add_argument(
"-d", "--days", default=30, dest="days", help="Number of days to get data for"
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return pd.DataFrame(), ""
df = coin.get_coin_market_chart(ns_parser.vs, ns_parser.days)
df = df["price"].resample("1D").ohlc().ffill()
df.columns = [
"Open",
"High",
"Low",
"Close",
]
df.index.name = "date"
return df, ns_parser.vs
except SystemExit:
print("")
return pd.DataFrame(), ""
except Exception as e:
print(e, "\n")
return pd.DataFrame(), ""
def info(coin: gecko.Coin, other_args: List[str]):
"""Shows basic information about loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="info",
description="""
Shows basic information about loaded coin like:
Name, Symbol, Description, Market Cap, Public Interest, Supply, and Price related metrics
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = wrap_text_in_df(coin.base_info, w=80)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def web(coin: gecko.Coin, other_args: List[str]):
"""Shows found websites corresponding to loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="web",
description="""Websites found for given Coin. You can find there urls to
homepage, forum, announcement site and others.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.websites
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def social(coin: gecko.Coin, other_args: List[str]):
"""Shows social media corresponding to loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="social",
description="""Shows social media corresponding to loaded coin. You can find there name of
telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.social_media
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def dev(coin: gecko.Coin, other_args: List[str]):
"""Shows developers data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="dev",
description="""Developers data for loaded coin. If the development data is available you can see
how the code development of given coin is going on.
There are some statistics that shows number of stars, forks, subscribers, pull requests,
commits, merges, contributors on github.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.developers_data
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def ath(coin: gecko.Coin, other_args: List[str]):
"""Shows all time high data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ath",
description="""All time high data for loaded coin""",
)
parser.add_argument(
"--vs", dest="vs", help="currency", default="usd", choices=["usd", "btc"]
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.all_time_high(currency=ns_parser.vs)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def atl(coin: gecko.Coin, other_args: List[str]):
"""Shows all time low data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="atl",
description="""All time low data for loaded coin""",
)
parser.add_argument(
"--vs", dest="vs", help="currency", default="usd", choices=["usd", "btc"]
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.all_time_low(currency=ns_parser.vs)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def score(coin: gecko.Coin, other_args: List[str]):
"""Shows different kind of scores for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="score",
description="""
In this view you can find different kind of scores for loaded coin.
Those scores represents different rankings, sentiment metrics, some user stats and others.
You will see CoinGecko scores, Developer Scores, Community Scores, Sentiment, Reddit scores
and many others.
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.scores
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def bc(coin: gecko.Coin, other_args: List[str]):
"""Shows urls to blockchain explorers
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bc",
description="""
Blockchain explorers URLs for loaded coin. Those are sites like etherescan.io or polkascan.io
in which you can see all blockchain data e.g. all txs, all tokens, all contracts...
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.blockchain_explorers
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def market(coin: gecko.Coin, other_args: List[str]):
"""Shows market data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="market",
description="""
Market data for loaded coin. There you find metrics like:
Market Cap, Supply, Circulating Supply, Price, Volume and many others.
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.market_data
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
| 27.308197
| 117
| 0.551327
| 1,786
| 16,658
| 5.015677
| 0.166853
| 0.040188
| 0.034829
| 0.042867
| 0.666667
| 0.653159
| 0.646796
| 0.625251
| 0.591762
| 0.591762
| 0
| 0.005174
| 0.350222
| 16,658
| 609
| 118
| 27.353038
| 0.822432
| 0.119642
| 0
| 0.65122
| 0
| 0.014634
| 0.266301
| 0.007307
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029268
| false
| 0
| 0.026829
| 0
| 0.102439
| 0.087805
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5985716e3511f569993e2ea970c450df3042b443
| 701
|
py
|
Python
|
source/loaders/tploaders.py
|
rodsom22/gcn_refinement
|
b1b76811b145a2fa7e595cc6d131d75c0553d5a3
|
[
"MIT"
] | 24
|
2020-05-04T20:24:35.000Z
|
2022-03-21T07:57:02.000Z
|
source/loaders/tploaders.py
|
rodsom22/gcn_refinement
|
b1b76811b145a2fa7e595cc6d131d75c0553d5a3
|
[
"MIT"
] | 3
|
2020-09-02T15:54:10.000Z
|
2021-05-27T03:09:31.000Z
|
source/loaders/tploaders.py
|
rodsom22/gcn_refinement
|
b1b76811b145a2fa7e595cc6d131d75c0553d5a3
|
[
"MIT"
] | 6
|
2020-08-03T21:01:37.000Z
|
2021-02-04T02:24:46.000Z
|
"""
Data loaders based on tensorpack
"""
import numpy as np
from utilities import nparrays as arrtools
def get_pancreas_generator(sample_name, volumes_path, references_path):
sample_vol_name = volumes_path + sample_name[0]
reference_vol_name = references_path + sample_name[1]
volume = np.load(sample_vol_name)
reference = np.load(reference_vol_name)
reference[reference != 0] = 1
y, x, z = volume.shape
for i in range(z):
vol_slice = volume[:, :, i]
reference_slice = reference[:, :, i]
vol_slice = arrtools.extend2_before(vol_slice)
reference_slice = arrtools.extend2_before(reference_slice)
yield[vol_slice, reference_slice]
| 29.208333
| 71
| 0.706134
| 94
| 701
| 4.978723
| 0.425532
| 0.059829
| 0.064103
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010733
| 0.202568
| 701
| 23
| 72
| 30.478261
| 0.826476
| 0.045649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5986b5465c4c37fe33e19dc8df090df96c8f030d
| 3,137
|
py
|
Python
|
deep_learning/dl.py
|
remix-yh/moneycount
|
e8f35549ef96b8ebe6ca56417f0833f519179173
|
[
"MIT"
] | null | null | null |
deep_learning/dl.py
|
remix-yh/moneycount
|
e8f35549ef96b8ebe6ca56417f0833f519179173
|
[
"MIT"
] | 7
|
2020-09-26T00:46:23.000Z
|
2022-02-10T01:08:15.000Z
|
deep_learning/dl.py
|
remix-yh/moneycount
|
e8f35549ef96b8ebe6ca56417f0833f519179173
|
[
"MIT"
] | null | null | null |
import os
import io
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.preprocessing import image
import numpy as np
from scipy.misc import imread
import tensorflow as tf
from ssd_v2 import SSD300v2
from ssd_utils import BBoxUtility
voc_classes = ['10', '100', '5', 'Boat', 'Bottle',
'Bus', 'Car', 'Cat', 'Chair', 'Cow', 'Diningtable',
'Dog', 'Horse','Motorbike', 'Person', 'Pottedplant',
'Sheep', 'Sofa', 'Train', 'Tvmonitor']
NUM_CLASSES = len(voc_classes) + 1
def initialize(weight_file_path):
np.set_printoptions(suppress=True)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
set_session(tf.Session(config=config))
input_shape = (300, 300, 3)
model = SSD300v2(input_shape, num_classes=NUM_CLASSES)
model.load_weights(weight_file_path, by_name=True)
return model
def predict(model, img):
inputs = []
plt.cla()
img = image.img_to_array(img)
img = np.asarray(img)
inputs.append(img.copy())
inputs = np.asarray(inputs)
inputs = preprocess_input(inputs)
preds = model.predict(inputs, batch_size=1, verbose=1)
bbox_util = BBoxUtility(NUM_CLASSES)
results = bbox_util.detection_out(preds)
# Parse the outputs.
det_label = results[0][:, 0]
det_conf = results[0][:, 1]
det_xmin = results[0][:, 2]
det_ymin = results[0][:, 3]
det_xmax = results[0][:, 4]
det_ymax = results[0][:, 5]
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6] #0.6
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(img / 255.)
currentAxis = plt.gca()
money_total = 0
money_num_list = [10, 100, 5]
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * img.shape[1]))
ymin = int(round(top_ymin[i] * img.shape[0]))
xmax = int(round(top_xmax[i] * img.shape[1]))
ymax = int(round(top_ymax[i] * img.shape[0]))
score = top_conf[i]
label = int(top_label_indices[i])
label_name = voc_classes[label - 1]
display_txt = '{:0.2f}, {}'.format(score, label_name)
coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1
color = colors[label]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5})
money_total = money_total + money_num_list[label - 1]
plt.title(f'Total:{money_total} yen')
canvas = FigureCanvasAgg(currentAxis.figure)
buf = io.BytesIO()
plt.savefig(buf)
buf.seek(0)
return buf
| 31.37
| 95
| 0.667198
| 450
| 3,137
| 4.464444
| 0.371111
| 0.034843
| 0.025884
| 0.009955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02913
| 0.201148
| 3,137
| 100
| 96
| 31.37
| 0.772546
| 0.006694
| 0
| 0
| 0
| 0
| 0.048812
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.179487
| 0
| 0.230769
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
598974722569cb3c84cf300f7c787f22839c151a
| 2,255
|
py
|
Python
|
authors/tests/test_article_filters.py
|
andela/ah-backend-odin
|
0e9ef1a10c8a3f6736999a5111736f7bd7236689
|
[
"BSD-3-Clause"
] | null | null | null |
authors/tests/test_article_filters.py
|
andela/ah-backend-odin
|
0e9ef1a10c8a3f6736999a5111736f7bd7236689
|
[
"BSD-3-Clause"
] | 43
|
2018-10-25T10:14:52.000Z
|
2022-03-11T23:33:46.000Z
|
authors/tests/test_article_filters.py
|
andela/ah-backend-odin
|
0e9ef1a10c8a3f6736999a5111736f7bd7236689
|
[
"BSD-3-Clause"
] | 4
|
2018-10-29T07:04:58.000Z
|
2020-04-02T14:15:10.000Z
|
from . import BaseAPITestCase
class TestArticleFilters(BaseAPITestCase):
def setUp(self):
super().setUp()
self.authenticate()
def test_it_filters_articles_by_article_title(self):
self.create_article()
self.create_article(title="Some article with another title")
response = self.client.get(
"/api/articles/?title=Some article with another title"
)
self.assertEqual(len(response.data['results']), 1)
def test_it_filters_articles_by_article_tag(self):
self.create_article()
self.create_article(tagList=['learning', 'django'])
self.create_article(tagList=['learning', 'vuejs', "aws", "jest"])
response = self.client.get("/api/articles/?tag=learning")
self.assertEqual(len(response.data['results']), 2)
def test_it_filters_articles_by_article_description(self):
description = "Testing django apps"
self.create_article(description=description)
response = self.client.get(
f"/api/articles/?description={description}"
)
self.assertEqual(len(response.data['results']), 1)
def test_it_filters_articles_by_author_username(self):
self.create_articles_with_diferent_authors()
response = self.client.get("/api/articles/?author=krm")
self.assertEqual(len(response.data['results']), 1)
def test_it_filters_articles_by_author_email(self):
self.create_articles_with_diferent_authors()
response = self.client.get("/api/articles/?author=krm@example.com")
self.assertEqual(len(response.data['results']), 1)
def create_articles_with_diferent_authors(self):
self.create_article()
self.authenticate(
{"username": "krm", "email": "krm@example.com"}
)
self.create_article()
def create_article(self, **kwargs):
article = {
"title": "How to train your dragon",
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": ["reactjs", "angularjs", "dragons"],
"published": True
}
data = {**article}
data.update(kwargs)
self.client.post("/api/articles/", {"article": data})
| 36.370968
| 75
| 0.640355
| 250
| 2,255
| 5.572
| 0.276
| 0.071788
| 0.097631
| 0.05743
| 0.594401
| 0.519024
| 0.400574
| 0.293611
| 0.264178
| 0.264178
| 0
| 0.002872
| 0.227938
| 2,255
| 61
| 76
| 36.967213
| 0.797243
| 0
| 0
| 0.24
| 0
| 0
| 0.208426
| 0.068293
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.16
| false
| 0
| 0.02
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
598d5551f035952fc6ef820f0bbd414d1bb129f0
| 720
|
py
|
Python
|
myexporter/tcpexporter.py
|
abh15/flower
|
7e1ab9393e0494f23df65bfa4f858cc35fea290e
|
[
"Apache-2.0"
] | null | null | null |
myexporter/tcpexporter.py
|
abh15/flower
|
7e1ab9393e0494f23df65bfa4f858cc35fea290e
|
[
"Apache-2.0"
] | null | null | null |
myexporter/tcpexporter.py
|
abh15/flower
|
7e1ab9393e0494f23df65bfa4f858cc35fea290e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import subprocess
import time
from prometheus_client import start_http_server, Gauge
def getstat():
s=subprocess.getoutput('ss -i -at \'( dport = :x11 or sport = :x11 )\' | awk \'FNR == 3 { print $4}\'')
if s == "":
return(0.0,"")
else:
rtt=s.lstrip("rtt:")
r=rtt.split("/", 1)[0]
l=subprocess.getoutput('ss -i -at \'( dport = :x11 or sport = :x11 )\' | awk \'FNR == 2 { print $5}\'')
label=l.split(":", 1)[0]
return(float(r),label)
start_http_server(9200)
latencygauge = Gauge('tcprtt', 'provides rtt to fed server using ss',['cohort'])
while True:
stat, lbl= getstat()
latencygauge.labels(cohort=lbl).set(stat)
time.sleep(2)
| 32.727273
| 112
| 0.590278
| 102
| 720
| 4.117647
| 0.568627
| 0.042857
| 0.071429
| 0.104762
| 0.228571
| 0.228571
| 0.228571
| 0.228571
| 0.228571
| 0.228571
| 0
| 0.042553
| 0.216667
| 720
| 22
| 113
| 32.727273
| 0.702128
| 0.023611
| 0
| 0
| 0
| 0
| 0.129445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.210526
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
598f144f73e5a69e09521df868c498cc54751d48
| 516
|
py
|
Python
|
tests/features/steps/roman.py
|
TestowanieAutomatyczneUG/laboratorium_14-maciejSzcz
|
b92186c574d3f21acd9f3e913e1a8ddcb5ec81fd
|
[
"MIT"
] | null | null | null |
tests/features/steps/roman.py
|
TestowanieAutomatyczneUG/laboratorium_14-maciejSzcz
|
b92186c574d3f21acd9f3e913e1a8ddcb5ec81fd
|
[
"MIT"
] | null | null | null |
tests/features/steps/roman.py
|
TestowanieAutomatyczneUG/laboratorium_14-maciejSzcz
|
b92186c574d3f21acd9f3e913e1a8ddcb5ec81fd
|
[
"MIT"
] | null | null | null |
from behave import *
use_step_matcher("re")
@given("user inputs (?P<number>.+) and (?P<guess>.+)")
def step_impl(context, number, guess):
context.number = int(number)
context.user_guess = guess
@when("we run the converter")
def step_impl(context):
try:
context.res = context.roman.check_guess(context.number, context.user_guess)
except TypeError as e:
context.e = e
@then("the result should be (?P<value>.+)")
def step_impl(context, value):
assert str(context.res) == value
| 24.571429
| 83
| 0.672481
| 74
| 516
| 4.581081
| 0.513514
| 0.061947
| 0.097345
| 0.159292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178295
| 516
| 20
| 84
| 25.8
| 0.799528
| 0
| 0
| 0
| 0
| 0
| 0.193798
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.2
| false
| 0
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
599099e8cbd4ce7be2457cb90f171f8cb872d8d1
| 1,266
|
py
|
Python
|
main.py
|
AbirLOUARD/AspiRobot
|
0ea78bfd7c20f1371c01a0e912f5e92bed6648b7
|
[
"MIT"
] | 1
|
2022-03-31T18:37:11.000Z
|
2022-03-31T18:37:11.000Z
|
main.py
|
AbirLOUARD/AspiRobot
|
0ea78bfd7c20f1371c01a0e912f5e92bed6648b7
|
[
"MIT"
] | null | null | null |
main.py
|
AbirLOUARD/AspiRobot
|
0ea78bfd7c20f1371c01a0e912f5e92bed6648b7
|
[
"MIT"
] | null | null | null |
import functions
import Aspirobot
import time
import os
import Manoir
import Capteur
import Etat
import threading
import Case
from threading import Thread
manor_size = 5
gameIsRunning = True
clearConsole = lambda: os.system('cls' if os.name in ('nt', 'dos') else 'clear')
manoir = Manoir.Manoir(manor_size, manor_size)
caseRobot = Case.Case(1, 1)
agent = Aspirobot.Aspirobot(manoir, caseRobot)
manoir.draw()
"""
while (gameIsRunning):
clearConsole()
if (functions.shouldThereBeANewDirtySpace(dirtys_number)):
functions.generateDirt(manor_dirty)
dirtys_number += 1
if (functions.shouldThereBeANewLostJewel(jewels_number)):
functions.generateJewel(manor_jewel)
jewels_number += 1
functions.drawManor(manor_dirty, manor_jewel)
time.sleep(pause_length)
"""
for init in range(10):
manoir.initialisation()
init += 1
def runAgent():
while True:
agent.run(3)
def runManoir():
while True:
#clearConsole()
manoir.ModifierPositionRobot(agent.getCase())
manoir.run()
if __name__ == "__main__":
t1 = Thread(target = runAgent)
t2 = Thread(target = runManoir)
t1.setDaemon(True)
t2.setDaemon(True)
t1.start()
t2.start()
while True:
pass
| 21.827586
| 80
| 0.691153
| 147
| 1,266
| 5.816327
| 0.442177
| 0.031579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014866
| 0.203002
| 1,266
| 57
| 81
| 22.210526
| 0.832507
| 0.011058
| 0
| 0.083333
| 0
| 0
| 0.024561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.027778
| 0.277778
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
599104a205da723279b528df24bd43e2dcb5bdbb
| 1,169
|
py
|
Python
|
docs/src/newsgroups_data.py
|
vishalbelsare/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 61
|
2015-03-06T08:48:01.000Z
|
2021-04-26T16:13:07.000Z
|
docs/src/newsgroups_data.py
|
andrecamara/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 5
|
2016-09-08T15:47:00.000Z
|
2019-02-25T17:44:55.000Z
|
docs/src/newsgroups_data.py
|
vishalbelsare/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 31
|
2015-01-28T15:05:33.000Z
|
2021-04-16T19:39:48.000Z
|
import numpy as np
from scipy import sparse as sp
from rlscore.utilities import multiclass
def load_newsgroups():
T = np.loadtxt("train.data")
#map indices from 1...n to 0...n-1
rows = T[:,0] -1
cols = T[:,1] -1
vals = T[:,2]
X_train = sp.coo_matrix((vals, (rows, cols)))
X_train = X_train.tocsc()
T = np.loadtxt("test.data")
#map indices from 1...n to 0...n-1
rows = T[:,0] -1
cols = T[:,1] -1
vals = T[:,2]
X_test = sp.coo_matrix((vals, (rows, cols)))
X_test = X_test.tocsc()
#X_test has additional features not present in X_train
X_test = X_test[:,:X_train.shape[1]]
Y_train = np.loadtxt("train.label", dtype=int)
Y_train = multiclass.to_one_vs_all(Y_train, False)
Y_test = np.loadtxt("test.label", dtype=int)
Y_test = multiclass.to_one_vs_all(Y_test, False)
return X_train, Y_train, X_test, Y_test
def print_stats():
X_train, Y_train, X_test, Y_test = load_newsgroups()
print("Train X dimensions %d %d" %X_train.shape)
print("Test X dimensions %d %d" %X_test.shape)
print("Number of labels %d" %Y_train.shape[1])
if __name__=="__main__":
print_stats()
| 30.763158
| 58
| 0.638152
| 202
| 1,169
| 3.455446
| 0.292079
| 0.06447
| 0.025788
| 0.051576
| 0.363897
| 0.323782
| 0.26361
| 0.194842
| 0.131805
| 0.131805
| 0
| 0.019481
| 0.209581
| 1,169
| 37
| 59
| 31.594595
| 0.735931
| 0.101796
| 0
| 0.206897
| 0
| 0
| 0.108883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.103448
| 0
| 0.206897
| 0.172414
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59962bcd6324fb181e2aeed2776a6d4ee13fa678
| 1,245
|
py
|
Python
|
5hours/14_dictionaries.py
|
matiasmasca/python
|
7631583820d51e3132bdb793fed28cc83f4877a2
|
[
"MIT"
] | null | null | null |
5hours/14_dictionaries.py
|
matiasmasca/python
|
7631583820d51e3132bdb793fed28cc83f4877a2
|
[
"MIT"
] | null | null | null |
5hours/14_dictionaries.py
|
matiasmasca/python
|
7631583820d51e3132bdb793fed28cc83f4877a2
|
[
"MIT"
] | null | null | null |
# como los hash de ruby, guarda "clave" "valor"
# al igual que un diccionario, esta la Palabra, que es la clave y la definción que seria el valor.
# las claves tienen que ser unicas
nombre_de_diccionario = {} #curly brackets.
monthConversions = {
"Jan": "January",
"Feb": "February",
"Mar": "March",
"Apr": "April",
"May": "May",
"Jun": "June",
"Jul": "July",
"Ago": "August",
"Sep": "September",
"Oct": "October",
"Nov": "November",
"Dic": "December",
}
# acceder a los valores del diccionario
# hay varias formas
# poner la clave entre brackets
print(monthConversions["Mar"])
# Get, permite definir que valor devuelve si no hay esa clave
print(monthConversions.get("Nov"))
print(monthConversions.get("Mat"))
print(monthConversions.get("Mat", "No es una clave valida"))
# Pueden ser claves pueden ser numericas, y los valores de diferentes tipos
monthConversions = {
1: ("January", "Enero", "Janeiro"), # un tupla
2: ["February", "Febrero", "Fevereiro"], #una lista
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December",
}
print(monthConversions[1])
print(monthConversions[1][1])
print(monthConversions[2][2])
| 23.055556
| 98
| 0.654618
| 164
| 1,245
| 4.957317
| 0.579268
| 0.180812
| 0.088561
| 0.066421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019455
| 0.174297
| 1,245
| 53
| 99
| 23.490566
| 0.771401
| 0.343775
| 0
| 0.055556
| 0
| 0
| 0.306351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.194444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5997a4ecb7f8086a5d0b295c0471521ff04b54f7
| 6,985
|
py
|
Python
|
graph/__init__.py
|
worldwise001/stylometry
|
b5a4cc98fb8dfb6d1600d41bb15c96aeaf4ecb72
|
[
"MIT"
] | 14
|
2015-02-24T16:14:07.000Z
|
2022-02-19T21:49:55.000Z
|
graph/__init__.py
|
worldwise001/stylometry
|
b5a4cc98fb8dfb6d1600d41bb15c96aeaf4ecb72
|
[
"MIT"
] | 1
|
2015-02-25T09:45:13.000Z
|
2015-02-25T09:45:13.000Z
|
graph/__init__.py
|
worldwise001/stylometry
|
b5a4cc98fb8dfb6d1600d41bb15c96aeaf4ecb72
|
[
"MIT"
] | 4
|
2015-11-20T10:47:11.000Z
|
2021-03-30T13:14:20.000Z
|
import matplotlib
matplotlib.use('Agg')
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
def hist_prebin(filename, values, width=1, x_title='', y_title='', title=None):
if title is None:
title = filename
left = [ v[0] for v in values ]
height = [ v[1] for v in values ]
plt.figure(figsize=(24,18), dpi=600)
plt.bar(left=left, height=height, width=width)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def hist(filename, values, x_title='', y_title='', title=None):
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
plt.hist(values, bins=20)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def generate(filename, rows, columns, x_title='', y_title='', title=None):
rows_num = range(1, len(rows)+1)
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
plt.scatter(rows_num, columns)
locs, labels = plt.xticks(rows_num, rows)
plt.setp(labels, rotation=90)
plt.plot(rows_num, columns)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def scatter(filename, x, y, line=True, xr=None, yr=None, x_title='', y_title='', title=None):
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
plt.scatter(x, y)
if xr is not None:
plt.xlim(xr)
if yr is not None:
plt.ylim(yr)
if line:
est = sm.OLS(y, sm.add_constant(x)).fit()
x_prime = np.linspace(min(x), max(x), 100)[:, np.newaxis]
x_prime = sm.add_constant(x_prime)
y_hat = est.predict(x_prime)
line_plot1 = plt.plot(x_prime[:, 1], y_hat, 'r', alpha=0.9, label='r^2 = %s' % est.rsquared)
#res = linregress(x,y)
#line_plot2 = plt.plot([min(x), max(x)], [res[0]*min(x)+res[1], res[0]*max(x)+res[1]],
# 'g', alpha=0.9, label='r^2 = %s' % res[2])
plt.legend(['r^2 = %s' % est.rsquared])
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
plt.close()
def roc(filename, y_truth, y_predicted, title=None):
fpr, tpr, _ = roc_curve(y_truth, y_predicted, 1)
roc_auc = auc(fpr, tpr)
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0,1], [0,1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC: %s' % title)
plt.legend(loc="lower right")
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def rocs(filename, y_truths, y_predicteds, labels, title=None):
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
for i in range(0, len(y_truths)):
fpr, tpr, _ = roc_curve(y_truths[i], y_predicteds[i], 1)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='%s (area = %0.2f)' % (labels[i], roc_auc))
plt.plot([0,1], [0,1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC: %s' % title)
plt.legend(loc="lower right")
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def setboxcol(bp, i, col):
plt.setp(bp['boxes'][i], color=col)
plt.setp(bp['caps'][i*2], color=col)
plt.setp(bp['caps'][i*2+1], color=col)
plt.setp(bp['whiskers'][i*2], color=col)
plt.setp(bp['whiskers'][i*2+1], color=col)
plt.setp(bp['fliers'][i*2], color=col)
plt.setp(bp['fliers'][i*2+1], color=col)
plt.setp(bp['medians'][i], color=col)
def boxplot_single(filename, data, xr=None, yr=None, x_title='', y_title='', title=None):
if title is None:
title = filename
author_labels = []
author_data = []
for author in data:
author_labels.append(author)
author_data.append(data[author])
for start in range(0, len(data), 50):
end = start+50
if end > len(data):
end = len(data)
width = end-start
fig = plt.figure(figsize=(width,12), dpi=600)
ax = plt.axes()
bp = plt.boxplot(author_data[start:end], positions=range(1, width+1), widths = 0.8)
plt.xlim(0, width+1)
ax.set_xticklabels(author_labels[start:end], rotation=70)
ax.set_xticks(range(1, width+1))
if xr is not None:
plt.xlim(xr)
if yr is not None:
plt.ylim(yr)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.tight_layout()
plt.savefig('%s_%d.png' % (filename,start), format='png')
plt.savefig('%s_%d.eps' % (filename,start), format='eps')
plt.close()
def boxplot(filename, data, groups, x_title='', y_title='', title=None):
if title is None:
title = filename
plt.figure(figsize=(1.5*len(data)+3,12), dpi=600)
ax = plt.axes()
colors=['blue', 'red', 'green']*10
i = 1
k = 0
interval = len(groups)
print(groups)
author_labels = []
author_label_pos = []
for author in data:
author_labels.append(author)
author_data = []
if interval == 0:
interval = len(data[author])
cols = []
for src_reddit in data[author]:
author_data.append(data[author][src_reddit])
print(groups.index(src_reddit))
cols.append(colors[groups.index(src_reddit)])
pos = [ i+j for j in range(0, interval) ]
bp = plt.boxplot(author_data, positions=pos, widths = 0.8)
for m in range(0, interval):
setboxcol(bp, m, cols[m])
author_label_pos.append(i + (interval/2.0))
i += interval + 1
k += 1
plt.xlim(0, i)
ax.set_xticklabels(author_labels, rotation=70)
ax.set_xticks(author_label_pos)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.tight_layout()
hB, = plt.plot([1,1],'b-')
hR, = plt.plot([1,1],'r-')
hG, = plt.plot([1,1],'g-')
plt.legend((hB, hR, hG),(groups[0], groups[1], groups[2]))
hB.set_visible(False)
hR.set_visible(False)
hG.set_visible(False)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
plt.close()
| 30.502183
| 100
| 0.591553
| 1,077
| 6,985
| 3.751161
| 0.16156
| 0.039604
| 0.043564
| 0.025743
| 0.582673
| 0.515099
| 0.481683
| 0.474257
| 0.425
| 0.425
| 0
| 0.030654
| 0.234073
| 6,985
| 228
| 101
| 30.635965
| 0.724486
| 0.024338
| 0
| 0.505435
| 0
| 0
| 0.057701
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048913
| false
| 0
| 0.038043
| 0
| 0.086957
| 0.01087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59995210d6ac282b5113ee3252c96de5a50256f9
| 2,251
|
py
|
Python
|
test/test_component.py
|
gadalang/gada
|
2dd4f4dfd5b7390c06307040cad23203a015f7a4
|
[
"MIT"
] | null | null | null |
test/test_component.py
|
gadalang/gada
|
2dd4f4dfd5b7390c06307040cad23203a015f7a4
|
[
"MIT"
] | null | null | null |
test/test_component.py
|
gadalang/gada
|
2dd4f4dfd5b7390c06307040cad23203a015f7a4
|
[
"MIT"
] | 1
|
2021-06-15T13:52:33.000Z
|
2021-06-15T13:52:33.000Z
|
__all__ = ["ComponentTestCase"]
import os
import sys
import yaml
import unittest
from gada import component
from test.utils import TestCaseBase
class ComponentTestCase(TestCaseBase):
def test_load(self):
"""Test loading the testnodes package that is in PYTHONPATH."""
# Load component configuration
config = self.write_config_and_load(TestCaseBase.CONFIG_NODES)
self.assertEqual(config["runner"], "generic", "incorrect configuration")
# Get node configuration
node_config = component.get_node_config(config, "hello")
self.assertEqual(
node_config["runner"], "generic", "incorrect node configuration"
)
self.assertEqual(node_config["bin"], "python", "incorrect node configuration")
self.assertEqual(
node_config["argv"],
r"${comp_dir}/__init__.py ${argv}",
"incorrect node configuration",
)
def test_load_not_found(self):
"""Test loading a package that is not in the PYTHONPATH."""
with self.assertRaises(Exception):
comp = component.load("invalid")
def test_load_config(self):
"""Test loading config.yml file from testnodes package."""
config = self.write_config_and_load(TestCaseBase.CONFIG_NO_NODES)
self.assertEqual(
config, TestCaseBase.CONFIG_NO_NODES, "incorrect loaded configuration"
)
def test_load_config_empty(self):
"""Test loading an existing but empty config.yml file."""
with open(TestCaseBase.CONFIG_YML, "w+") as f:
f.write("")
config = self.load_config()
self.assertIsNotNone(config, "invalid configuration")
def test_load_config_not_found(self):
"""Test loading a non existing config.yml file."""
self.remove_config()
with self.assertRaises(Exception):
component.load_config(sys)
def test_get_node_config_not_found(self):
"""Test loading a config.yml file with unknown node."""
config = self.write_config_and_load(TestCaseBase.CONFIG_NODES)
with self.assertRaises(Exception):
component.get_node_config(config, "invalid")
if __name__ == "__main__":
unittest.main()
| 32.157143
| 86
| 0.662372
| 255
| 2,251
| 5.603922
| 0.278431
| 0.055983
| 0.062981
| 0.044087
| 0.357593
| 0.23373
| 0.216935
| 0.103569
| 0.071379
| 0
| 0
| 0
| 0.236784
| 2,251
| 69
| 87
| 32.623188
| 0.831781
| 0.161706
| 0
| 0.181818
| 0
| 0
| 0.147709
| 0.012399
| 0
| 0
| 0
| 0
| 0.204545
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.295455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
599a3aac676f1bdb004c22bf7034b685260f3101
| 17,820
|
py
|
Python
|
color pattern with threading.py
|
HashtagInnovator/Alpha-Star
|
f69a35b1924320dfec9610d6b61acae8d9de4afa
|
[
"Apache-2.0"
] | null | null | null |
color pattern with threading.py
|
HashtagInnovator/Alpha-Star
|
f69a35b1924320dfec9610d6b61acae8d9de4afa
|
[
"Apache-2.0"
] | null | null | null |
color pattern with threading.py
|
HashtagInnovator/Alpha-Star
|
f69a35b1924320dfec9610d6b61acae8d9de4afa
|
[
"Apache-2.0"
] | null | null | null |
import time
import random
from multiprocessing import pool
from playsound import playsound
from threading import Thread
i = -1
l = 0
count = 0
class loops:
def loop(self):
print(" ", end="")
def A(self):
global i
global l
global i
for j in range(i, 5):
for k in range(4, i, -1):
print(" ", end="")
print("*", end="")
if i != 0:
l = 1
for q in range(0, l):
if (i == 3):
print(" *" * 3, end="")
else:
print(" " * (i + (i - 1)), end="*")
for k in range(4, i, -1):
print(" ", end="")
x.loop()
return
def B(self):
global i
for j in range(i, 6):
print("*", end="")
if (i == 0 or i == 2 or i == 4):
print(" *" * 3, end=" ")
else:
print(" " * 6, end="*")
x.loop()
return
def C(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print(" " * 2, end=" *" * 3)
elif (i == 1 or i == 3):
print(" " * 1, end="*")
print(" " * 5, end=" ")
else:
print("*", end=" " *7)
x.loop()
return
def D(self):
global i
for i in range(i, 5):
print("*", end=" ")
if (i == 0 or i == 4):
print("* " * 2, end=" " * 1)
elif (i == 1 or i == 3):
print(" " * 4, end="*")
else:
print(" " * 3, end=" *")
x.loop()
return
def E(self):
global i
for i in range(i, 5):
if (i == 0 or i == 2 or i == 4):
print("* " * 3, end="*")
else:
print("* ", end=" " * 5)
x.loop()
return
def F(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
elif (i == 2):
print("* " * 3, end=" ")
else:
print("* ", end=" " * 5)
x.loop()
return
def G(self):
global i
for i in range(i, 5):
if (i == 0):
print(" " * 2, end=" *" * 3)
print(" ", end="")
elif (i == 4):
print(" " * 2, end=" * " * 2)
print(" ", end="")
elif (i == 1):
print(" " * 1, end="*")
print(" " * 7, end="")
elif (i == 3):
print(" " * 1, end="*")
print(" " * 5, end=" *")
else:
print("*", end=" " * 2)
print(" *" * 3, end="")
x.loop()
return
def H(self):
global i
for i in range(i, 5):
if (i == 2):
print("* " * 3, end="*")
else:
print("*", end=" " * 5)
print("*", end="")
x.loop()
return
def I(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("* " * 3, end="*")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def J(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
elif (i == 3 or i == 2):
print("* ", end=" *")
print(" " * 3, end="")
elif (i == 4):
print(" ", end="*")
print(" " * 2, end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def K(self):
global i
for i in range(i, 5):
if i == 0 or i == 4:
print("*", end=" " * 3)
print("*", end="")
elif i == 1 or i == 3:
print("*", end=" " * 2)
print("* ", end=" ")
else:
print("* ", end=" *")
print(" ", end=" ")
x.loop()
return
def L(self):
global i
for i in range(i,5):
if(i==4):
print("* "*3,end="*")
else:
print("* ",end=" "*5)
x.loop()
return
def M(self):
global i
for i in range(i,5):
print("* ",end="")
if(i==1):
print("* ",end=" * ")
elif(i==2):
print(" "*2,end="* ")
else:
print(" "*3,end="")
print("*",end="")
x.loop()
return
def N(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 ):
print(" "*3,end="")
else:
print(" "*i,end="*")
print(" "*(5-i),end="")
print("*",end="")
x.loop()
return
def O(self):
global i
for i in range(i,5):
if(i==0 or i==4):
print(" "*4,end="*")
print(" "*3,end=" ")
elif(i==2):
print("*",end=" "*7)
print("*",end="")
else:
print(" ",end="*")
print(" ",end="* ")
x.loop()
return
def P(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 or i==2):
print(" *"*3,end=" ")
elif(i==1):
print(" "*6,end="*")
else:
print(" "*7,end="")
x.loop()
return
def Q(self):
global i
for i in range(i,5):
if(i==0):
print(" "*4,end="*")
print(" "*3,end=" ")
elif(i==4):
print(" "*4,end="*")
print(" "*3,end="*")
elif(i==2):
print("*",end=" "*7)
print("*",end="")
elif(i==3):
print(" ",end="*")
print(" "*3,end="* * ")
else:
print(" ",end="*")
print(" ",end="* ")
x.loop()
return
def R(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 or i==2):
print(" *"*3,end=" ")
elif(i==1):
print(" "*6,end="*")
else:
print(" "*i,end=" *")
print(" ",end=" "*(4-i))
x.loop()
return
def S(self):
global i
for i in range(i, 5):
if (i == 0):
print(" " * 2, end="* " * 3)
print("", end="")
elif (i == 4):
print(" ", end="* " * 3)
print("", end="")
elif (i == 1):
print("*", end=" " * 7)
elif (i == 2):
print(" ", end="*")
print(" " * 4, end="")
else:
print("*", end=" " * 6)
print("*", end="")
x.loop()
return
def T(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
else:
print(" " * 2, end=" *")
print(" " * 2, end=" ")
x.loop()
return
def U(self):
global i
for i in range(i, 5):
if (i == 4):
print(" " * 2, end="* " * 2)
print(" " * 2, end="")
elif (i == 3):
print(" ", end="*")
print(" " * 4, end="*")
print(" ", end="")
else:
print("* ", end=" " * 5)
print("*", end="")
x.loop()
return
def V(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 7)
print("*", end="")
elif (i == 1):
print(" *", end=" " * 5)
print("*", end=" ")
elif (i == 2):
print(" *", end=" " * 3)
print("*", end=" ")
elif (i == 3):
print(" *", end=" ")
print("*", end=" ")
else:
print(" " * 4, end="*")
print(" " * 4, end="")
x.loop()
return
def W(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 11)
print("*", end="")
elif i == 1:
print(" *", end=" " * 9)
print("", end="* ")
elif (i == 2):
print(" * ", end=" *")
print(" ", end=" ")
elif (i == 3):
print(" " * 3, end="*")
print(" * * ", end=" " * 2)
else:
print(" " * 3, end=" *")
print(" *", end=" " * 4)
x.loop()
return
def X(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("*", end=" " * 5)
print("*", end="")
elif (i == 1 or i == 3):
print(" *", end=" " * 3)
print("* ", end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def Y(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 5)
print("*", end="")
elif (i == 1):
print(" *", end=" " * 3)
print("* ", end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def Z(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("* " * 3, end="*")
elif (i == 1):
print(" " * 5, end="*")
print(" ", end="")
elif (i == 2):
print(" " * 3, end="*")
print(" " * 2, end=" ")
else:
print(" " * 1, end="*")
print(" " * 3, end=" ")
x.loop()
return
print()
def play():
soun = input("ENTER SOUND")
time.sleep(1.8)
print("\n"*30)
# CHANGE DIRECTORY HERE ................................................................
playsound("C:\\Users\\chetan\\Desktop\\language\\playsound\\" + soun + ".mp3")
# CHANGE DIRECTORY HERE.................................................................
time.sleep(1.1)
x = loops()
# DRIVER CODE
n = input("ENTER YOUR TEXT")
print("type any song name from here ...")
lis=["birth",'rider','standard','teri mitti me','chitrakaar']
print(lis)
#WE CAN ADD birthday and rider SONG HERE
thread=Thread(target=play)
thread.start()
time.sleep(7)
k = len(n)
aa,bb,cc,dd,ee,ff,gg,hh,ii,jj,kk,ll,mm,nn,oo,pp,qq,rr,ss,tt,uu,vv,ww,xx,yy,zz=0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
s=0.5
list=[30,31,32,33,34,35,36,37]
color=0
for o in range(5):
i = i + 1
for f in range(k):
if (n[f] == "A" or n[f] == "a"):
if(aa==0):
aa=random.choice(list)
aa=aa+1
print("\033[1;{}m".format(aa),end="")
time.sleep(s)
x.A()
elif (n[f] == "B" or n[f] == "b"):
if(bb==0):
bb=random.choice(list)
bb=bb+1
print("\033[1;{}m".format(bb),end="")
time.sleep(s)
x.B()
elif (n[f] == "C" or n[f] == "c"):
if(cc==0):
cc=random.choice(list)
cc=cc+1
print("\033[1;{}m".format(cc),end="")
time.sleep(s)
x.C()
elif (n[f] == "D" or n[f] == "d"):
if(dd==0):
dd=random.choice(list)
dd=dd+1
print("\033[1;{}m".format(dd),end="")
time.sleep(s)
x.D()
elif (n[f] == "E" or n[f] == "e"):
if(ee==0):
ee=random.choice(list)
ee=ee+1
print("\033[1;{}m".format(ee),end="")
time.sleep(s)
x.E()
elif (n[f] == "F" or n[f] == "f"):
if(ff==0):
ff=random.choice(list)
ff=ff+1
print("\033[1;{}m".format(ff),end="")
time.sleep(s)
x.F()
elif (n[f] == "G" or n[f] == "g"):
if(gg==0):
gg=random.choice(list)
gg=gg+1
print("\033[1;{}m".format(gg),end="")
time.sleep(s)
x.G()
elif (n[f] == "H" or n[f] == "h"):
if(hh==0):
hh=random.choice(list)
hh=hh+1
print("\033[1;{}m".format(hh),end="")
time.sleep(s)
x.H()
elif (n[f] == "I" or n[f] == "i"):
if(ii==0):
ii=random.choice(list)
ii=ii+1
print("\033[1;{}m".format(ii),end="")
time.sleep(s)
x.I()
elif (n[f] == "J" or n[f] == "j"):
if(jj==0):
jj=random.choice(list)
jj=jj+1
print("\033[1;{}m".format(jj),end="")
time.sleep(s)
x.J()
elif (n[f] == "K" or n[f] == "k"):
if(kk==0):
kk=random.choice(list)
kk=kk+1
print("\033[1;{}m".format(kk),end="")
time.sleep(s)
x.K()
elif (n[f] == "L" or n[f] == "l"):
if(ll==0):
ll=random.choice(list)
ll=ll+1
print("\033[1;{}m".format(ll),end="")
time.sleep(s)
x.L()
elif (n[f] == "m" or n[f] == "M"):
if(mm==0):
mm=random.choice(list)
mm=mm+1
print("\033[1;{}m".format(mm),end="")
time.sleep(s)
x.M()
elif (n[f] == "N" or n[f] == "n"):
if(nn==0):
nn=random.choice(list)
nn=nn+1
print("\033[1;{}m".format(nn),end="")
time.sleep(s)
x.N()
elif (n[f] == "O" or n[f] == "o"):
if(oo==0):
oo=random.choice(list)
oo=oo+1
print("\033[1;{}m".format(oo),end="")
time.sleep(s)
x.O()
elif (n[f] == "P" or n[f] == "p"):
if(pp==0):
pp=random.choice(list)
pp=pp+1
print("\033[1;{}m".format(pp),end="")
time.sleep(s)
x.P()
elif (n[f] == "q" or n[f] == "Q"):
if(qq==0):
qq=random.choice(list)
qq=qq+1
print("\033[1;{}m".format(qq),end="")
time.sleep(s)
x.Q()
elif (n[f] == "R" or n[f] == "r"):
if(rr==0):
rr=random.choice(list)
rr=rr+1
print("\033[1;{}m".format(rr),end="")
time.sleep(s)
x.R()
elif (n[f] == "S" or n[f] == "s"):
if(ss==0):
ss=random.choice(list)
ss=ss+1
print("\033[1;{}m".format(ss),end="")
time.sleep(s)
x.S()
elif (n[f] == "T" or n[f] == "t"):
if(tt==0):
tt=random.choice(list)
tt=tt+1
print("\033[1;{}m".format(tt),end="")
time.sleep(s)
x.T()
elif (n[f] == "U" or n[f] == "u"):
if(uu==0):
uu=random.choice(list)
uu=uu+1
print("\033[1;{}m".format(uu),end="")
time.sleep(s)
x.U()
elif (n[f] == "V" or n[f] == "v"):
if(vv==0):
vv=random.choice(list)
vv=vv+1
print("\033[1;{}m".format(vv),end="")
time.sleep(s)
x.V()
elif (n[f] == "W" or n[f] == "w"):
if(ww==0):
ww=random.choice(list)
ww=ww+1
print("\033[1;{}m".format(ww),end="")
time.sleep(s)
x.W()
elif (n[f] == "X" or n[f] == "x"):
if(xx==0):
xx=random.choice(list)
xx=xx+1
print("\033[1;{}m".format(xx),end="")
time.sleep(s)
x.X()
elif (n[f] == "Y" or n[f] == "y"):
if(yy==0):
yy=random.choice(list)
yy=yy+1
print("\033[1;{}m".format(yy),end="")
time.sleep(s)
x.Y()
elif (n[f] == "Z" or n[f] == "z"):
if(zz==0):
zz=random.choice(list)
zz=zz+1
print("\033[1;{}m".format(zz),end="")
time.sleep(s)
x.Z()
elif(n[f]==" "):
x.loop()
x.loop()
print()
time.sleep(6)
print("\n"*8)
print('THANK YOU ', end='', flush=True)
for x in range(8):
for frame in r'-\|/-\|/':
print('\b', frame, sep='', end='', flush=True)
time.sleep(0.2)
print('\b ')
thread.join()
| 26.322009
| 129
| 0.306285
| 2,048
| 17,820
| 2.665039
| 0.071777
| 0.111396
| 0.056064
| 0.047637
| 0.633199
| 0.538476
| 0.423232
| 0.360572
| 0.329425
| 0.317332
| 0
| 0.046841
| 0.48844
| 17,820
| 676
| 130
| 26.360947
| 0.551887
| 0.01257
| 0
| 0.580101
| 0
| 0
| 0.048209
| 0.002786
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047218
| false
| 0
| 0.008432
| 0
| 0.10118
| 0.305228
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
599d3203f355bf0108b50dc6b8026b093b4736fc
| 395
|
py
|
Python
|
scripts/test_web3.py
|
AeneasHe/eth-brownie-enhance
|
e53995924ffb93239b9fab6c1c1a07e9166dd1c6
|
[
"MIT"
] | 1
|
2021-10-04T23:34:14.000Z
|
2021-10-04T23:34:14.000Z
|
scripts/test_web3.py
|
AeneasHe/eth-brownie-enhance
|
e53995924ffb93239b9fab6c1c1a07e9166dd1c6
|
[
"MIT"
] | null | null | null |
scripts/test_web3.py
|
AeneasHe/eth-brownie-enhance
|
e53995924ffb93239b9fab6c1c1a07e9166dd1c6
|
[
"MIT"
] | null | null | null |
import wpath
from web3 import Web3
from web3 import Web3, HTTPProvider, IPCProvider, WebsocketProvider
def get_web3_by_http_rpc():
address = "http://47.243.92.131:8545"
print("===>address:", address)
p = HTTPProvider(address)
web3 = Web3(p)
return web3
w3 = get_web3_by_http_rpc()
eth = w3.eth
r = eth.getBalance("0x3d32aA995FdD334c671C2d276345DE6fe2F46D88")
print(r)
| 18.809524
| 67
| 0.721519
| 52
| 395
| 5.326923
| 0.5
| 0.057762
| 0.101083
| 0.129964
| 0.115523
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154545
| 0.164557
| 395
| 20
| 68
| 19.75
| 0.684848
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.106329
| 0
| 0
| 0.106329
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
599f0418376070df049179da7c8e1b8f17a142f2
| 834
|
py
|
Python
|
models/sklearn_model.py
|
Ailln/stock-prediction
|
9de77de5047446ffceeed83cb610c7edd2cb1ad3
|
[
"MIT"
] | 11
|
2020-07-11T06:14:29.000Z
|
2021-12-02T08:48:53.000Z
|
models/sklearn_model.py
|
HaveTwoBrush/stock-prediction
|
9de77de5047446ffceeed83cb610c7edd2cb1ad3
|
[
"MIT"
] | null | null | null |
models/sklearn_model.py
|
HaveTwoBrush/stock-prediction
|
9de77de5047446ffceeed83cb610c7edd2cb1ad3
|
[
"MIT"
] | 8
|
2020-04-15T14:29:47.000Z
|
2021-12-19T09:26:53.000Z
|
from sklearn import svm
from sklearn import ensemble
from sklearn import linear_model
class Model(object):
def __init__(self):
self.model_dict = {
"SGDRegressor": linear_model.SGDRegressor(max_iter=1000),
"HuberRegressor": linear_model.HuberRegressor(),
"LinearRegression": linear_model.LinearRegression(),
"LinearSVR": svm.LinearSVR(),
"BaggingRegressor": ensemble.BaggingRegressor(),
"AdaBoostRegressor": ensemble.AdaBoostRegressor(),
"ExtraTreesRegressor": ensemble.ExtraTreesRegressor(),
"RandomForestRegressor": ensemble.RandomForestRegressor(),
"GradientBoostingRegressor": ensemble.GradientBoostingRegressor()
}
def sklearn_model(self, model_name):
return self.model_dict[model_name]
| 37.909091
| 77
| 0.681055
| 67
| 834
| 8.268657
| 0.402985
| 0.079422
| 0.092058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006173
| 0.223022
| 834
| 21
| 78
| 39.714286
| 0.848765
| 0
| 0
| 0
| 0
| 0
| 0.178657
| 0.055156
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0.055556
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59a0a3b7aa59f29b5ba0e35ea23ff02112e179f9
| 1,023
|
py
|
Python
|
00Python/day05/basic02.py
|
HaoZhang95/PythonAndMachineLearning
|
b897224b8a0e6a5734f408df8c24846a98c553bf
|
[
"MIT"
] | 937
|
2019-05-08T08:46:25.000Z
|
2022-03-31T12:56:07.000Z
|
00Python/day05/basic02.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 47
|
2019-09-17T10:06:02.000Z
|
2022-03-11T23:46:52.000Z
|
00Python/day05/basic02.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 354
|
2019-05-10T02:15:26.000Z
|
2022-03-30T05:52:57.000Z
|
"""
list元素的排序
sort() 默认无参数是从小到大
reversed(list) 整个列表直接反过来,返回值是一个新的list
"""
import random
a_list = []
for i in range(10):
a_list.append(random.randint(0, 200))
print(a_list)
a_list.sort()
print(a_list)
a_list.sort(reverse=True) # 降序,从大到小
print(a_list)
new_list = reversed(a_list) # [12,10,7,9] -> [9,7,10,12]
print(new_list)
"""
一个学校,三个办公室, 八位老师进行随机分配办公室
"""
school = [[], [], []]
teacher_list = list("ABCDEFGH")
for name in teacher_list:
index = random.randint(0,2)
school[index].append(name)
print(school)
"""
字符串表示:"", '', """"""
list表示:[], 可修改
元组的表示:(), 元组的元素不能进行修改,
元组中如果只有一个元素的话,后面加上逗号表明是一个tuple,否则就是元素真实类型
"""
a_tuple = (1, 3.14, "Hello", True)
empty_tuple = ()
empty_tuple2 = tuple()
# 特例
b_tuple = (1) # type = int
c_tuple = (1,) # type = tuple
"""
访问元组tuple
查询的话和list一样使用count, index
"""
print(a_tuple[2])
# a_tuple[1] = "哈哈" 元组的元素不能重新赋值和修改,因为tuple是不可变的
print(a_tuple.count(1)) # 元组中1对象出现的次数是2, 因为Ture在计算机眼中就是1
print(a_tuple.index(3.14))
| 18.267857
| 60
| 0.641251
| 139
| 1,023
| 4.568345
| 0.467626
| 0.062992
| 0.047244
| 0.034646
| 0.059843
| 0.059843
| 0
| 0
| 0
| 0
| 0
| 0.042959
| 0.180841
| 1,023
| 55
| 61
| 18.6
| 0.714797
| 0.200391
| 0
| 0.166667
| 0
| 0
| 0.156112
| 0.060383
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0.266667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59a69dfbb3f7dfb97929bbbc436b9c105fe9fa48
| 1,643
|
py
|
Python
|
ThreeBotPackages/unlock_service/scripts/restore.py
|
threefoldfoundation/tft-stellar
|
b36460e8dba547923778273b53fe4f0e06996db0
|
[
"Apache-2.0"
] | 7
|
2020-02-05T16:10:46.000Z
|
2021-04-28T10:39:20.000Z
|
ThreeBotPackages/unlock_service/scripts/restore.py
|
threefoldfoundation/tft-stellar
|
b36460e8dba547923778273b53fe4f0e06996db0
|
[
"Apache-2.0"
] | 379
|
2020-01-13T10:22:21.000Z
|
2022-03-23T08:59:57.000Z
|
ThreeBotPackages/unlock_service/scripts/restore.py
|
threefoldfoundation/tft-stellar
|
b36460e8dba547923778273b53fe4f0e06996db0
|
[
"Apache-2.0"
] | 3
|
2020-01-24T09:56:44.000Z
|
2020-08-03T21:02:38.000Z
|
#!/usr/bin/env python
# pylint: disable=no-value-for-parameter
import click
import os
import sys
import requests
import json
UNLOCK_SERVICE_DEFAULT_HOSTS = {"test": "https://testnet.threefold.io", "public": "https://tokenservices.threefold.io"}
@click.command()
@click.option("--source", default="export_data", help="Sourcefile to import data from")
@click.option("--network", type=click.Choice(["test", "public"], case_sensitive=False), default="public")
@click.option("--unlock_service_host", default=None, help="Destination to restore to (overrides the network parameter)")
def import_unlockhash_transaction_data(source, network, unlock_service_host):
if not unlock_service_host:
unlock_service_host = UNLOCK_SERVICE_DEFAULT_HOSTS[network]
print(f"Restoring data to {unlock_service_host} from {source}\n")
restored=[]
with open(source,mode="r") as f:
for line in f.readlines():
if line.strip() == "":
continue
unlockhash_transaction_data = json.loads(line)
unlockhash = unlockhash_transaction_data.get("unlockhash")
transaction_xdr = unlockhash_transaction_data.get("transaction_xdr")
if unlockhash in restored:
continue
r = requests.post(
f"{unlock_service_host}/threefoldfoundation/unlock_service/create_unlockhash_transaction",
json={"unlockhash": unlockhash, "transaction_xdr": transaction_xdr},
)
r.raise_for_status()
restored.append(unlockhash)
if __name__ == "__main__":
import_unlockhash_transaction_data()
| 37.340909
| 120
| 0.684114
| 187
| 1,643
| 5.748663
| 0.42246
| 0.108837
| 0.094884
| 0.046512
| 0.043721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.200243
| 1,643
| 43
| 121
| 38.209302
| 0.818113
| 0.03591
| 0
| 0.0625
| 0
| 0
| 0.269279
| 0.08091
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.25
| 0
| 0.28125
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59a7951eb259bc0943a926370fa409960f8cba7c
| 4,984
|
py
|
Python
|
pgdiff/diff/PgDiffConstraints.py
|
Onapsis/pgdiff
|
ee9f618bc339cbfaf7967103e95f9650273550f8
|
[
"MIT"
] | 2
|
2020-05-11T16:42:48.000Z
|
2020-08-27T04:11:49.000Z
|
diff/PgDiffConstraints.py
|
Gesha3809/PgDiffPy
|
00466429d0385eb999c32addcbe6e2746782cb5d
|
[
"MIT"
] | 1
|
2018-04-11T18:19:33.000Z
|
2018-04-13T15:18:40.000Z
|
diff/PgDiffConstraints.py
|
Gesha3809/PgDiffPy
|
00466429d0385eb999c32addcbe6e2746782cb5d
|
[
"MIT"
] | 1
|
2018-04-11T15:09:22.000Z
|
2018-04-11T15:09:22.000Z
|
from PgDiffUtils import PgDiffUtils
class PgDiffConstraints(object):
@staticmethod
def createConstraints(writer, oldSchema, newSchema, primaryKey, searchPathHelper):
for newTableName, newTable in newSchema.tables.items():
oldTable = None
if (oldSchema is not None):
oldTable = oldSchema.tables.get(newTableName)
# Add new constraints
for constraint in PgDiffConstraints.getNewConstraints(oldTable, newTable, primaryKey):
searchPathHelper.outputSearchPath(writer)
writer.writeln(constraint.getCreationSQL())
@staticmethod
def dropConstraints(writer, oldSchema, newSchema, primaryKey, searchPathHelper):
for newTableName in newSchema.tables:
oldTable = None
if oldSchema is not None:
oldTable = oldSchema.tables.get(newTableName)
newTable = newSchema.tables[newTableName]
# Drop constraints that no more exist or are modified
for constraint in PgDiffConstraints.getDropConstraints(oldTable, newTable, primaryKey):
searchPathHelper.outputSearchPath(writer)
writer.writeln(constraint.getDropSQL())
@staticmethod
def alterComments(writer, oldSchema, newSchema, searchPathHelper):
if oldSchema is None:
return
for oldTableName, oldTable in oldSchema.tables.items():
newTable = newSchema.tables.get(oldTableName)
if newTable is None:
continue
for oldConstraintName, oldConstraint in oldTable.constraints.items():
newConstraint = newTable.constraints.get(oldConstraintName)
if newConstraint is None:
continue
# sbSQL = []
if (oldConstraint.comment is None
and newConstraint.comment is not None
or oldConstraint.comment is not None
and newConstraint.comment is not None
and oldConstraint.comment != newConstraint.comment):
searchPathHelper.outputSearchPath(writer)
writer.write("COMMENT ON ")
if newConstraint.isPrimaryKeyConstraint():
writer.write("INDEX ")
writer.write(PgDiffUtils.getQuotedName(newConstraint.name))
else:
writer.write("CONSTRAINT ")
writer.write(PgDiffUtils.getQuotedName(newConstraint.name))
writer.write(" ON ")
writer.write(PgDiffUtils.getQuotedName(newConstraint.tableName))
writer.write(" IS ")
writer.write(newConstraint.comment)
writer.writeln(';')
elif (oldConstraint.comment is not None and newConstraint.comment is None):
searchPathHelper.outputSearchPath(writer)
writer.write("COMMENT ON ")
if newConstraint.isPrimaryKeyConstraint():
writer.write("INDEX ");
writer.write(PgDiffUtils.getQuotedName(newConstraint.name))
else:
writer.write("CONSTRAINT ");
writer.write(PgDiffUtils.getQuotedName(newConstraint.name))
writer.write(" ON ");
writer.write(PgDiffUtils.getQuotedName(newConstraint.tableName))
writer.writeln(" IS NULL;")
@staticmethod
def getNewConstraints(oldTable, newTable, primaryKey):
result = []
if newTable is not None:
if oldTable is None:
for constraintName, constraint in newTable.constraints.items():
if constraint.isPrimaryKeyConstraint() == primaryKey:
result.append(constraint)
else:
for constraintName, constraint in newTable.constraints.items():
if (constraint.isPrimaryKeyConstraint() == primaryKey
and (constraintName not in oldTable.constraints
or oldTable.constraints[constraintName] != constraint)):
result.append(constraint)
return result
@staticmethod
def getDropConstraints(oldTable, newTable, primaryKey):
result = list()
if newTable is not None and oldTable is not None:
for constraintName in oldTable.constraints:
oldConstraint = oldTable.constraints[constraintName]
newConstraint = newTable.constraints.get(constraintName)
if (oldConstraint.isPrimaryKeyConstraint() == primaryKey
and (newConstraint is None or newConstraint != oldConstraint)):
result.append(oldConstraint)
return result
| 41.190083
| 99
| 0.58427
| 386
| 4,984
| 7.544041
| 0.178756
| 0.06044
| 0.027816
| 0.072115
| 0.50206
| 0.487981
| 0.487981
| 0.428571
| 0.428571
| 0.331731
| 0
| 0
| 0.347311
| 4,984
| 121
| 100
| 41.190083
| 0.895174
| 0.016453
| 0
| 0.404494
| 0
| 0
| 0.015922
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0
| 0.011236
| 0
| 0.11236
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59a8688939bcf65bd9fa72756ce61831127d2530
| 7,715
|
py
|
Python
|
experiments/old_code/result_scripts.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | null | null | null |
experiments/old_code/result_scripts.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | null | null | null |
experiments/old_code/result_scripts.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | 1
|
2018-10-30T08:57:14.000Z
|
2018-10-30T08:57:14.000Z
|
import inspect
import logging
import os
from itertools import product
import numpy as np
import pandas as pd
from skopt import load, dump
from csrank.constants import OBJECT_RANKING
from csrank.util import files_with_same_name, create_dir_recursively, rename_file_if_exist
from experiments.util import dataset_options_dict, rankers_dict, lp_metric_dict
DIR_NAME = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
def log_best_params(file):
opt = load(file)
if "ps" in opt.acq_func:
best_i = np.argmin(np.array(opt.yi)[:, 0])
best_loss = opt.yi[best_i]
best_params = opt.Xi[best_i]
logger.info(
"Best parameters so far with a loss for file {} of {:.4f} time of {:.4f}:\n {}".format(
os.path.basename(file), best_loss[0],
best_loss[1],
best_params))
else:
best_i = np.argmin(opt.yi)
best_loss = opt.yi[best_i]
best_params = opt.Xi[best_i]
logger.info(
"Best parameters so far with a loss for file {} of {:.4f}:\n {}".format(os.path.basename(file), best_loss,
best_params))
return best_loss
def remove_redundant_optimizer_models(model_path, files_list):
logger.info('Results Files {} for Path {}'.format(files_list, os.path.basename(model_path)))
minimum_error = 50000
if len(files_list) >= 2:
for file in files_list:
try:
opt = load(file)
best_loss = log_best_params(file)
if best_loss < minimum_error:
minimum_error = best_loss
if (file != model_path):
logger.info('Writing from the file {} to {}'.format(os.path.basename(file),
os.path.basename(model_path)))
os.remove(model_path)
dump(opt, model_path)
except KeyError:
logger.error('Cannot open the file {}'.format(file))
except ValueError:
logger.error('Cannot open the file {}'.format(file))
elif len(files_list) == 1:
file = files_list[0]
try:
best_loss = log_best_params(file)
except KeyError:
logger.error('Cannot open the file {}'.format(file))
except ValueError:
logger.error('Cannot open the file {}'.format(file))
if len(files_list) != 0:
files_list.remove(model_path)
for file in files_list:
logger.error('Removing the File {}'.format(file))
os.remove(file)
def remove_redundant_log_files(logs_path, logs_files_list, ranker_name, dataset):
logger.info('Log Files {} for Path {}'.format(logs_files_list, os.path.basename(logs_path)))
minimum_error = 50000
if len(logs_files_list) >= 2:
for file in logs_files_list:
lines = np.array([line.rstrip('\n') for line in open(file)])
out = 'zero_one_rank_loss'
matching = [s for s in lines if out in s]
try:
logger.info("For File {} the error is {}".format(file, matching))
err = float(matching[0].split(out + ' : ')[-1])
logger.info("For File {} the zero one rank errro is {}".format(file, err))
if err <= minimum_error:
minimum_error = err
if (file != logs_path):
logger.info('Renaming from the file {} to {}'.format(os.path.basename(file),
os.path.basename(logs_path)))
os.remove(logs_path)
os.system('mv {} {}'.format(file, logs_path))
except IndexError:
logger.error('error {} in ranker {} is not evaluated for dataset {}'.format(out, ranker_name, dataset))
except ValueError:
logger.error('error {} in ranker {} is not evaluated for dataset {}'.format(out, ranker_name, dataset))
def remove_redundant_results():
for dataset, ranker_name in product(dataset_options.keys(), ranker_options.keys()):
model_path = os.path.join(DIR_NAME, 'optimizer_results_single_fold', '{}_{}'.format(dataset, ranker_name))
files_list = files_with_same_name(model_path)
remove_redundant_optimizer_models(model_path, files_list)
logs_path = os.path.join(DIR_NAME, 'logs_single_fold', '{}_{}.log'.format(dataset, ranker_name))
logs_files_list = files_with_same_name(logs_path)
remove_redundant_log_files(logs_path, logs_files_list, ranker_name, dataset)
def generate_concise_results_for_dataset(dataset='medoid', directory='logs_single_fold', result_directory='results'):
ranker_names = list(ranker_options.keys())
ranker_names.sort()
metric_names.sort()
data = []
data.append(['**************', dataset.upper(), '**************', ""])
for ranker_name in ranker_names:
try:
log_path = os.path.join(DIR_NAME, directory, '{}_{}.log'.format(dataset, ranker_name))
lines = np.array([line.rstrip('\n') for line in open(log_path)])
except FileNotFoundError:
logger.error('File {} is not found'.format(log_path))
data.append(['NE' for i in range(len(metric_names))])
continue
one_row = []
for out in metric_names:
try:
matching = [s for s in lines if out in s][0]
if out in matching:
one_row.append(matching.split(out + ' : ')[-1])
except IndexError:
logger.error('error {} in ranker {} is not evaluated for dataset {}'.format(out, ranker_name, dataset))
one_row.append('NE')
data.append(one_row)
columns = [name.upper() for name in metric_names]
indexes = [name.upper() for name in ranker_names]
indexes.insert(0, 'DATASET')
dataFrame = pd.DataFrame(data, index=indexes, columns=columns)
file_path = os.path.join(DIR_NAME, result_directory, '{}.csv'.format(dataset))
create_dir_recursively(file_path, True)
dataFrame.to_csv(file_path)
return dataFrame
def create_concise_results(result_directory='results', directory='logs_single_fold'):
df_list = []
datasets = list(dataset_options.keys())
datasets.sort()
for dataset in datasets:
dataFrame = generate_concise_results_for_dataset(dataset=dataset, directory=directory,
result_directory=result_directory)
df_list.append(dataFrame)
full_df = pd.concat(df_list)
fout = os.path.join(DIR_NAME, result_directory, 'complete_results.csv')
full_df.to_csv(fout)
def configure_logging():
log_path = os.path.join(DIR_NAME, 'results', 'compiling_result.log')
create_dir_recursively(log_path, True)
log_path = rename_file_if_exist(log_path)
global logger
logging.basicConfig(filename=log_path, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(name='Compiling Results')
if __name__ == '__main__':
configure_logging()
dataset_options = dataset_options_dict[OBJECT_RANKING]
ranker_options = rankers_dict[OBJECT_RANKING]
metric_names = list(lp_metric_dict[OBJECT_RANKING].keys())
remove_redundant_results()
create_concise_results()
# create_concise_results(result_directory='logs_new_experiments', directory='logs_new_experiments')
| 43.835227
| 119
| 0.608425
| 960
| 7,715
| 4.647917
| 0.179167
| 0.03429
| 0.025101
| 0.017481
| 0.422008
| 0.355446
| 0.284626
| 0.259525
| 0.23801
| 0.23801
| 0
| 0.004839
| 0.276734
| 7,715
| 175
| 120
| 44.085714
| 0.794803
| 0.012573
| 0
| 0.231788
| 0
| 0.006623
| 0.123687
| 0.003808
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046358
| false
| 0
| 0.066225
| 0
| 0.125828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59a98cedbef2ddabf9e787d32a317a09b1db8b5e
| 13,108
|
py
|
Python
|
notochord/features/BagOfWords.py
|
jroose/notochord
|
da9a6ff5d0fabbf0694d0bee1b81a240b66fa006
|
[
"MIT"
] | null | null | null |
notochord/features/BagOfWords.py
|
jroose/notochord
|
da9a6ff5d0fabbf0694d0bee1b81a240b66fa006
|
[
"MIT"
] | null | null | null |
notochord/features/BagOfWords.py
|
jroose/notochord
|
da9a6ff5d0fabbf0694d0bee1b81a240b66fa006
|
[
"MIT"
] | null | null | null |
from .. import schema, App, QueryCache, batcher, grouper, insert_ignore, export, lookup, persist, lookup_or_persist, ABCArgumentGroup, WorkOrderArgs, filter_widgets, temptable_scope, FeatureCache
from ..ObjectStore import ABCObjectStore
from sqlalchemy import Column, Integer, String, Float, ForeignKey, UnicodeText, Unicode, LargeBinary, Boolean, Index
import collections
import csv
import os
import re
import sqlalchemy
import sys
import tempfile
import time
import stat
from sklearn.feature_extraction.text import CountVectorizer
re_word = re.compile(r'[a-zA-Z]+')
__all__ = []
class BagOfWordsArgs(ABCArgumentGroup):
def __call__(self, group):
group.add_argument("--output-feature-set", type=unicode, action="store", metavar="NAME", default=None, help="Name of output feature set (required)")
group.add_argument("--input-feature-set", type=unicode, action="store", metavar="NAME", default=None, help="Name of input feature set (required)")
group.add_argument("--input-feature", type=unicode, action="store", metavar="NAME", default=None, help="Name of input feature")
group.add_argument("--chunk-size", type=int, action="store", metavar="INT", default=None, help="Number or widgets per chunk")
@export
class BagOfWords(App):
@staticmethod
def build_parser_groups():
return [BagOfWordsArgs(), WorkOrderArgs()] + App.build_parser_groups()
def __init__(self, datadir, input_feature_set=None, output_feature_set=None, input_feature=None, min_idwidget=None, max_idwidget=None, datasources=None, chunk_size=None, **kwargs):
super(BagOfWords, self).__init__(datadir, **kwargs)
self.config['output_feature_set'] = output_feature_set or self.config['output_feature_set']
self.config['input_feature_set'] = input_feature_set or self.config['input_feature_set']
self.config['input_feature'] = input_feature or self.config.get('input_feature')
self.config['datasources'] = datasources or self.config.get('datasources')
self.config["chunk_size"] = chunk_size or self.config.get('chunk_size', 1024)
self.config['min_idwidget'] = (min_idwidget, None)[min_idwidget is None]
self.config['max_idwidget'] = (max_idwidget, None)[max_idwidget is None]
def main(self):
import MySQLdb
from warnings import filterwarnings
filterwarnings('ignore', category = MySQLdb.Warning)
import sqlalchemy
from sqlalchemy import Column, literal, tuple_, insert
from ..schema import widget as t_w
from ..schema import widget_feature as t_wf
from ..schema import feature as t_f
from ..schema import feature_set as t_fs
from ..schema import datasource as t_ds
from ..schema import object_store as t_os
with self.session_scope() as session:
self.log.info("Preparing")
fs_in = lookup(session, t_fs, name=self.config['input_feature_set'])
if fs_in is None: raise KeyError("Invalid feature set: '{}'".format(self.config['input_feature_set']))
fs_out = lookup_or_persist(session, t_fs, name=self.config['output_feature_set'])
if fs_out is None: raise KeyError("Invalid feature set: '{}'".format(self.config['output_feature_set']))
os_in = lookup(session, t_os, idobject_store=fs_in.idobject_store)
if fs_in.idobject_store is None or os_in is None:
raise ValueError("Feature set '{}' has no associated object store".format(self.config['input_feature_set']))
else:
object_store = ABCObjectStore.open(session, os_in.name)
f_in = lookup(session, t_f, name=self.config['input_feature'], idfeature_set=fs_in.idfeature_set)
if f_in is None:
if self.config['input_feature'] is not None:
raise KeyError("Invalid feature: '{}' for feature_set '{}'".format(self.config['input_feature'], self.config['input_feature_set']))
else:
raise KeyError("Invalid feature_set '{}' has no default feature".format(self.config['input_feature_set']))
q_w = session.query(t_w.idwidget)
q_w = filter_widgets(
q_w,
min_idwidget = self.config['min_idwidget'],
max_idwidget = self.config['max_idwidget'],
datasources = self.config['datasources']
)
q_wf = session.query(t_wf.idwidget, t_wf.idfeature) \
.join(t_w, t_w.idwidget == t_wf.idwidget) \
.join(t_f, t_f.idfeature == t_wf.idfeature) \
.filter(t_f.idfeature_set == fs_out.idfeature_set)
if self.config['min_idwidget'] is not None:
q_wf = q_wf.filter(t_w.idwidget >= self.config['min_idwidget'])
if self.config['max_idwidget'] is not None:
q_wf = q_wf.filter(t_w.idwidget < self.config['max_idwidget'])
if self.config['datasources'] is not None and len(self.config['datasources']) > 0:
q_wf = q_wf.join(t_ds, t_ds.iddatasource == t_w.iddatasource)
q_wf = q_wf.filter(t_ds.name.in_(self.config['datasources']))
self.log.info("Deleting old features")
#q_del = q_wf.delete()
#q_del = t_wf.__table__.delete() \
# .where(tuple_(t_wf.idwidget, t_wf.idfeature).in_(q_wf))
#self.log.debug("Delete widget query: {}".format(q_del.compile(bind=session.bind)))
#session.execute(q_del)
q_w = session.query(t_w.idwidget, t_w.uuid)
q_w = filter_widgets(
q_w,
min_idwidget = self.config['min_idwidget'],
max_idwidget = self.config['max_idwidget'],
datasources = self.config['datasources']
)
q_w = q_w.join(t_wf, t_wf.idwidget == t_w.idwidget) \
.filter(t_wf.idfeature == f_in.idfeature)
class tmp_upload(schema.TableBase):
idtmp_upload = Column(t_f.idfeature.type, nullable=False, primary_key=True)
idwidget = Column(t_wf.idwidget.type, nullable=False)
idfeature = Column(t_wf.idfeature.type, nullable=False)
value = Column(t_wf.value.type, nullable=False)
__table_args__ = ({'prefixes':["TEMPORARY"]},)
__tablename__ = "tmp_upload"
class tmp_wf(schema.TableBase):
idwidget = Column(Integer, ForeignKey('widget.idwidget', onupdate='RESTRICT', ondelete='CASCADE'), primary_key=True, nullable=False)
idfeature = Column(Integer, ForeignKey('feature.idfeature', onupdate='RESTRICT', ondelete='CASCADE'), primary_key=True, nullable=False)
value = Column(Float, nullable=True)
__table_args__ = ({'prefixes':["TEMPORARY"]},)
__tablename__ = "tmp_widget_feature"
self.log.info("Beginning Execution")
self.log.debug("Widget query: {}".format(q_w.statement.compile(bind=session.bind)))
FC = FeatureCache(1024*1024, log=self.log)
count_time = 0.0
feature_time = 0.0
widget_time = 0.0
upload_time = 0.0
primary_key_time = 0.0
num_widgets = 0
num_widget_features = 0
start_time = time.time()
if session.bind.dialect.name.lower() == 'mysql':
session.execute("SET @@foreign_key_checks=0;")
session.execute("ALTER TABLE widget_feature DISABLE KEYS;")
insert_fout, insert_file = tempfile.mkstemp()
os.close(insert_fout)
os.chmod(insert_file, stat.S_IREAD | stat.S_IWRITE | stat.S_IROTH)
begin_time = time.time()
for it, result_chunk in enumerate(grouper(q_w, self.config['chunk_size'])):
start_time = time.time()
self.log.info("Executing chunk {}".format(it))
upload_chunk = []
N = 0
words = []
widgets = []
values = []
for row in result_chunk:
if row is not None:
idwidget, uuid = row
content = object_store.get(uuid, feature=f_in.name)
if content is None:
continue
cnt = collections.Counter(x.group(0).lower() for x in re_word.finditer(content))
words.extend(cnt.iterkeys())
values.extend(cnt.itervalues())
widgets.extend(idwidget for _ in xrange(len(cnt)))
N += len(cnt)
end_time = time.time()
count_time += (end_time - start_time)
start_time = time.time()
self.log.info("Getting feature id's")
word_idents = FC(session, fs_out.idfeature_set, (w for w in words))
self.log.info("Copying into upload_chunk")
upload_chunk = [dict(idwidget=widgets[it], idfeature=word_idents[it], value=values[it]) for it in xrange(N)]
num_widget_features += len(upload_chunk)
end_time = time.time()
feature_time += (end_time - start_time)
start_time = time.time()
dialect = session.bind.dialect.name
with temptable_scope(session, tmp_upload), temptable_scope(session, tmp_wf):
self.log.info("Uploading widget_feature chunk of size: {}".format(len(upload_chunk)))
session.bulk_insert_mappings(tmp_upload, upload_chunk)
end_time = time.time()
upload_time += (end_time - start_time)
start_time = time.time()
self.log.info("Constructing primary key")
insert_stmt = insert_ignore(tmp_wf, dialect).from_select(
[tmp_wf.idwidget, tmp_wf.idfeature, tmp_wf.value],
session.query(tmp_upload.idwidget, tmp_upload.idfeature, tmp_upload.value) \
.select_from(tmp_upload) \
)
session.execute(insert_stmt)
end_time = time.time()
primary_key_time += (end_time - start_time)
start_time = time.time()
if session.bind.dialect.name.lower() == 'mysql':
with open(insert_file, 'w') as fout:
csvout = csv.writer(fout, delimiter=',', escapechar='\\')
for row in session.query(tmp_wf.idwidget, tmp_wf.idfeature, tmp_wf.value):
csvout.writerow(tuple(row))
del csvout
self.log.info("Temp file size: {}".format(os.path.getsize(insert_file)))
insert_stmt = sqlalchemy.text(r"""
LOAD DATA CONCURRENT LOCAL INFILE '{insert_file}'
IGNORE
INTO TABLE widget_feature
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"'
ESCAPED BY '\\'
LINES TERMINATED BY '\n'
(idwidget, idfeature, value)
""".format(insert_file=insert_file))
else:
insert_stmt = insert_ignore(t_wf, dialect).from_select(
[t_wf.idwidget, t_wf.idfeature, t_wf.value],
session.query(tmp_wf.idwidget, tmp_wf.idfeature, tmp_wf.value)
)
start_time = time.time()
self.log.info("Transferring into place")
session.execute(insert_stmt)
end_time = time.time()
widget_time += (end_time - start_time)
num_widgets += len(result_chunk)
self.log.info("Average Times: {} {} {} {} {} {}".format(count_time / num_widgets, feature_time / num_widgets, upload_time / num_widgets, primary_key_time / num_widgets, widget_time / num_widgets, num_widget_features / num_widgets))
self.log.info("Average Rate: {}".format(num_widgets / (time.time() - begin_time)))
self.log.info("Max Rate: {}".format(num_widgets / widget_time))
if session.bind.dialect.name.lower() == 'mysql':
session.execute("ALTER TABLE widget_feature ENABLE KEYS;")
session.execute("SET @@foreign_key_checks=1;")
os.remove(insert_file)
tmp_upload.metadata.remove(tmp_upload.__table__)
tmp_upload.metadata.remove(tmp_wf.__table__)
if __name__ == "__main__":
A = BagOfWords.from_args(sys.argv[1:])
A.run()
| 51.403922
| 251
| 0.580333
| 1,515
| 13,108
| 4.772277
| 0.171617
| 0.049793
| 0.019779
| 0.033472
| 0.329737
| 0.283541
| 0.209959
| 0.188658
| 0.167082
| 0.138036
| 0
| 0.003319
| 0.310345
| 13,108
| 254
| 252
| 51.606299
| 0.79646
| 0.016555
| 0
| 0.161905
| 0
| 0
| 0.151094
| 0.00357
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019048
| false
| 0
| 0.109524
| 0.004762
| 0.152381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59ac1cf688342acfde23c07e10ca2e33caf1f078
| 450
|
py
|
Python
|
trains/ATIO.py
|
Columbine21/TFR-Net
|
1da01577542e7f477fdf7323ec0696aebc632357
|
[
"MIT"
] | 7
|
2021-11-19T01:32:01.000Z
|
2021-12-16T11:42:44.000Z
|
trains/ATIO.py
|
Columbine21/TFR-Net
|
1da01577542e7f477fdf7323ec0696aebc632357
|
[
"MIT"
] | 2
|
2021-11-25T08:28:08.000Z
|
2021-12-29T08:42:55.000Z
|
trains/ATIO.py
|
Columbine21/TFR-Net
|
1da01577542e7f477fdf7323ec0696aebc632357
|
[
"MIT"
] | 1
|
2021-12-02T09:42:51.000Z
|
2021-12-02T09:42:51.000Z
|
"""
AIO -- All Trains in One
"""
from trains.baselines import *
from trains.missingTask import *
__all__ = ['ATIO']
class ATIO():
def __init__(self):
self.TRAIN_MAP = {
# single-task
'tfn': TFN,
'mult': MULT,
'misa': MISA,
# missing-task
'tfr_net': TFR_NET,
}
def getTrain(self, args):
return self.TRAIN_MAP[args.modelName.lower()](args)
| 19.565217
| 59
| 0.52
| 49
| 450
| 4.530612
| 0.591837
| 0.09009
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.346667
| 450
| 22
| 60
| 20.454545
| 0.755102
| 0.111111
| 0
| 0
| 0
| 0
| 0.056266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0.076923
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59ad06dd6ba9abadeea6a1f889a37f3edb2cafd7
| 4,928
|
py
|
Python
|
split_data.py
|
Anchorboy/PR_FinalProject
|
e744723c9c9dd55e6995ae5929eb45f90c70819b
|
[
"MIT"
] | null | null | null |
split_data.py
|
Anchorboy/PR_FinalProject
|
e744723c9c9dd55e6995ae5929eb45f90c70819b
|
[
"MIT"
] | null | null | null |
split_data.py
|
Anchorboy/PR_FinalProject
|
e744723c9c9dd55e6995ae5929eb45f90c70819b
|
[
"MIT"
] | null | null | null |
import os
import cv2
import random
import shutil
import numpy as np
def split_img(input_path):
split_ratio = 0.8
for dir_name in xrange(10):
dir_name += 1
dir_name = str(dir_name)
dir_path = os.path.join(input_path, dir_name)
img_in_class = os.listdir(dir_path)
rand_train_img = set(random.sample(img_in_class, int(len(img_in_class) * split_ratio)))
rand_test_img = set(img_in_class) - rand_train_img
for img_name in rand_train_img:
img_path = os.path.join(dir_path, img_name)
if not os.path.exists("train/"+dir_name):
os.mkdir("train/"+dir_name)
shutil.copyfile(img_path, "train/"+dir_name+"/"+img_name)
for img_name in rand_test_img:
img_path = os.path.join(dir_path, img_name)
if not os.path.exists("test/"+dir_name):
os.mkdir("test/"+dir_name)
shutil.copyfile(img_path, "test/"+dir_name+"/"+img_name)
def split_data(samples):
split_rate = 0.6
train_all = []
test_all = []
for class_id, img_in_class in enumerate(samples):
rand_ind = [ i for i in xrange(len(img_in_class)) ]
rand_train_ind = set(random.sample(rand_ind, int(len(img_in_class) * split_rate)))
rand_test_ind = set(rand_ind) - rand_train_ind
# train_in_class = []
# test_in_class = []
for ind in rand_train_ind:
img_vec = img_in_class[ind]
img_vec = img_vec.reshape(img_vec.shape[0] * img_vec.shape[1] * img_vec.shape[2],)
train_all.append((class_id, img_vec))
for ind in rand_test_ind:
img_vec = img_in_class[ind]
img_vec = img_vec.reshape(img_vec.shape[0] * img_vec.shape[1] * img_vec.shape[2],)
test_all.append((class_id, img_vec))
# train_all.append(train_in_class)
# test_all.append(test_in_class)
return train_all, test_all
def read_img(input_path):
img_size = (200, 200)
sample_all = []
for dir_name in xrange(10):
dir_name += 1
dir_name = str(dir_name)
dir_path = os.path.join(input_path, dir_name)
img_in_class = []
for img_name in os.listdir(dir_path):
img_path = os.path.join(dir_path, img_name)
img_vec = cv2.imread(img_path, flags=1)
# print img_vec.shape
# res = cv2.resize(img_vec, (int(img_vec.shape[0]*0.5), int(img_vec.shape[1]*0.5)), interpolation=cv2.INTER_CUBIC)
res = cv2.resize(img_vec, img_size, interpolation=cv2.INTER_CUBIC)
nor_res = np.zeros_like(res)
nor_res = cv2.normalize(src=res, dst=nor_res, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
img_in_class.append(nor_res)
sample_all.append(img_in_class)
train_all, test_all = split_data(sample_all)
return train_all, test_all
def read_data():
img_size = (200, 200)
train_all = []
test_all = []
current_base = os.path.abspath('.')
train_path = os.path.join(current_base, "train")
test_path = os.path.join(current_base, "test")
# read train
for dir_name in os.listdir(train_path):
dir_path = os.path.join(train_path, dir_name)
img_in_class = []
for img_name in os.listdir(dir_path):
img_path = os.path.join(dir_path, img_name)
img_vec = cv2.imread(img_path, flags=1)
# print img_vec.shape
# res = cv2.resize(img_vec, (int(img_vec.shape[0]*0.5), int(img_vec.shape[1]*0.5)), interpolation=cv2.INTER_CUBIC)
res = cv2.resize(img_vec, img_size, interpolation=cv2.INTER_CUBIC)
nor_res = np.zeros_like(res)
nor_res = cv2.normalize(src=res, dst=nor_res, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
img_in_class.append(nor_res)
train_all.append(img_in_class)
# read test
for dir_name in os.listdir(test_path):
dir_path = os.path.join(test_path, dir_name)
img_in_class = []
for img_name in os.listdir(dir_path):
img_path = os.path.join(dir_path, img_name)
img_vec = cv2.imread(img_path, flags=1)
# print img_vec.shape
# res = cv2.resize(img_vec, (int(img_vec.shape[0]*0.5), int(img_vec.shape[1]*0.5)), interpolation=cv2.INTER_CUBIC)
res = cv2.resize(img_vec, img_size, interpolation=cv2.INTER_CUBIC)
nor_res = np.zeros_like(res)
nor_res = cv2.normalize(src=res, dst=nor_res, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
img_in_class.append(nor_res)
test_all.append(img_in_class)
return train_all, test_all
if __name__ == "__main__":
current_base = os.path.abspath('.')
input_base = os.path.join(current_base, 'data')
split_img(input_base)
# train_all, test_all = read_data()
# print train_all
| 36.503704
| 126
| 0.631494
| 773
| 4,928
| 3.699871
| 0.108668
| 0.067133
| 0.062937
| 0.053846
| 0.729371
| 0.663287
| 0.566783
| 0.534965
| 0.534965
| 0.534965
| 0
| 0.023281
| 0.250406
| 4,928
| 135
| 127
| 36.503704
| 0.750947
| 0.116071
| 0
| 0.526882
| 0
| 0
| 0.013358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043011
| false
| 0
| 0.053763
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59adc6e4725be00b3a4565680e9bf5a9aec1470e
| 2,507
|
py
|
Python
|
src/eval_command.py
|
luoyan407/n-reference
|
f486b639dc824d296fe0e5ab7a4959e2aef7504c
|
[
"MIT"
] | 7
|
2020-07-14T02:50:13.000Z
|
2021-05-11T05:50:51.000Z
|
src/eval_command.py
|
luoyan407/n-reference
|
f486b639dc824d296fe0e5ab7a4959e2aef7504c
|
[
"MIT"
] | 1
|
2020-12-29T07:25:00.000Z
|
2021-01-05T01:15:47.000Z
|
src/eval_command.py
|
luoyan407/n-reference
|
f486b639dc824d296fe0e5ab7a4959e2aef7504c
|
[
"MIT"
] | 3
|
2021-02-25T13:58:01.000Z
|
2021-08-10T05:49:27.000Z
|
import os, sys
srcFolder = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'src')
sys.path.append(srcFolder)
from metrics import nss
from metrics import auc
from metrics import cc
from utils import *
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Evaluate predicted saliency map')
parser.add_argument('--output', type=str, default='')
parser.add_argument('--fixation_folder', type=str, default='')
parser.add_argument('--salmap_folder', type=str, default='')
parser.add_argument('--split_file', type=str, default='')
parser.add_argument('--fxt_loc_name', type=str, default='fixationPts')
parser.add_argument('--fxt_size', type=str, default='',
help='fixation resolution: (600, 800) | (480, 640) | (320, 640)')
parser.add_argument('--appendix', type=str, default='')
parser.add_argument('--file_extension', type=str, default='jpg')
args = parser.parse_args()
if args.fxt_size != '':
spl_tokens = args.fxt_size.split()
args.fxt_size = (int(spl_tokens[0]), int(spl_tokens[1]))
else:
args.fxt_size = (480, 640)
fixation_folder = args.fixation_folder
salmap_folder = args.salmap_folder
fxtimg_type = detect_images_type(fixation_folder)
split_file = args.split_file
if split_file != '' and os.path.isfile(split_file):
npzfile = np.load(split_file)
salmap_names = [os.path.join(salmap_folder, x) for x in npzfile['val_imgs']]
gtsal_names = [os.path.join(fixation_folder, x[:x.find('.')+1]+fxtimg_type) for x in npzfile['val_imgs']]
fxtpts_names = [os.path.join(fixation_folder, '{}mat'.format(x[:x.find('.')+1])) for x in npzfile['val_imgs']]
else:
salmap_names = load_allimages_list(salmap_folder)
gtsal_names = []
fxtpts_names = []
for sn in salmap_names:
file_name = sn.split('/')[-1]
gtsal_names.append(os.path.join(fixation_folder,'{}{}'.format(file_name[:file_name.find('.')+1], fxtimg_type)))
fxtpts_names.append(os.path.join(fixation_folder,'{}mat'.format(file_name[:file_name.find('.')+1])))
nss_score, _ = nss.compute_score(salmap_names, fxtpts_names, image_size=args.fxt_size, fxt_field_in_mat=args.fxt_loc_name)
cc_score, _ = cc.compute_score(salmap_names, gtsal_names, image_size=args.fxt_size)
auc_score, _ = auc.compute_score(salmap_names, fxtpts_names, image_size=args.fxt_size, fxt_field_in_mat=args.fxt_loc_name)
with open(args.output, 'a') as f:
f.write('{:0.4f}, {:0.4f}, {:0.4f}{}\n'.format(
nss_score, auc_score, cc_score, args.appendix))
| 45.581818
| 122
| 0.717591
| 379
| 2,507
| 4.480211
| 0.248021
| 0.031802
| 0.080094
| 0.058893
| 0.358068
| 0.358068
| 0.233216
| 0.095406
| 0.095406
| 0.095406
| 0
| 0.016735
| 0.118069
| 2,507
| 55
| 123
| 45.581818
| 0.751244
| 0
| 0
| 0.041667
| 0
| 0
| 0.111643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.145833
| 0
| 0.145833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59af05716663597c09c673680d272fcbf76c4851
| 294
|
py
|
Python
|
Graficos/grafico_barras.py
|
brendacgoncalves97/Graficos
|
250715bf8a0be9b9d39116be396d84512c79d45f
|
[
"MIT"
] | 1
|
2021-07-14T13:33:02.000Z
|
2021-07-14T13:33:02.000Z
|
Graficos/grafico_barras.py
|
brendacgoncalves97/Graficos
|
250715bf8a0be9b9d39116be396d84512c79d45f
|
[
"MIT"
] | null | null | null |
Graficos/grafico_barras.py
|
brendacgoncalves97/Graficos
|
250715bf8a0be9b9d39116be396d84512c79d45f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Importação da biblioteca
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5]
y = [2, 3, 7, 1, 0]
titulo = "Gráfico de barras"
eixoX = "EixoX"
eixoY = "EixoY"
# Legendas
plt.title(titulo)
plt.xlabel(eixoX)
plt.ylabel(eixoY)
plt.bar(x, y)
plt.show()
| 16.333333
| 32
| 0.602041
| 47
| 294
| 3.765957
| 0.680851
| 0.022599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048246
| 0.22449
| 294
| 18
| 33
| 16.333333
| 0.72807
| 0.187075
| 0
| 0
| 0
| 0
| 0.123288
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59ba9203063b76fa754fc6f24d65541dacb224e0
| 2,786
|
py
|
Python
|
features/steps/new-providers.py
|
lilydartdev/ppe-inventory
|
aaec9839fe324a3f96255756c15de45853bbb940
|
[
"MIT"
] | 2
|
2020-10-06T11:33:02.000Z
|
2021-10-10T13:10:12.000Z
|
features/steps/new-providers.py
|
foundry4/ppe-inventory
|
1ee782aeec5bd3cd0140480f9bf58396eb11403b
|
[
"MIT"
] | 1
|
2020-04-23T22:19:17.000Z
|
2020-04-23T22:19:17.000Z
|
features/steps/new-providers.py
|
foundry4/ppe-inventory
|
1ee782aeec5bd3cd0140480f9bf58396eb11403b
|
[
"MIT"
] | 3
|
2020-05-26T11:41:40.000Z
|
2020-06-29T08:53:34.000Z
|
from behave import *
from google.cloud import datastore
import os
import uuid
import pandas as pd
@given('site "{site}" exists')
def step_impl(context, site):
print(f'STEP: Given provider {site} exists')
context.domain = os.getenv('DOMAIN')
# Instantiates a client
datastore_client = datastore.Client()
context.site_one = site
provider_key = datastore_client.key('Site', context.site_one)
datastore_client.delete(provider_key)
entity = datastore.Entity(key=provider_key)
entity['site'] = context.site_one
code = str(uuid.uuid4())
entity['code'] = code
context.site_one_link = 'https://' + context.domain + '/register?site=' + site + '&code=' + code
entity['link'] = context.site_one_link
datastore_client.put(entity)
@step('site "{site}" does not exist')
def step_impl(context, site):
print(f'STEP: And site {site} does not exists')
context.provider_two = site
# Instantiates a client
datastore_client = datastore.Client()
datastore_client.delete(datastore_client.key('Site', site))
@step("both sites are included in the input file")
def step_impl(context):
context.file = 'features/resources/input-file.xlsx'
print(f'STEP: And both sites are included in the input file at {context.file}')
@when("the input file is processed")
def step_impl(context):
print(u'STEP: When the input file is processed')
context.output_file = 'features/resources/output-file.xlsx'
command = f'python3 scripts/new-providers/new-providers.py {context.domain} {context.file} {context.output_file}'
print(command)
os.system(command)
@then('site "{site}" is updated with the original link')
def step_impl(context, site):
print(f'STEP: And site {site} is updated with the original link')
# Instantiates a client
datastore_client = datastore.Client()
key = datastore_client.key('Site', site)
assert key is not None
entity = datastore_client.get(key)
assert entity['link'] == context.site_one_link
print(entity)
@then('site "{site}" is created with a new link')
def step_impl(context, site):
print(f'STEP: Then {site} is created')
# Instantiates a client
datastore_client = datastore.Client()
key = datastore_client.key('Site', site)
assert key is not None
entity = datastore_client.get(key)
assert entity['link'] == 'https://' + context.domain + '/register?site=' + site + '&code=' + entity['code']
print(entity)
@step('site "{site}" appears in the output file as "{status}"')
def step_impl(context, site, status):
print(f'STEP: And site {site} appears in the output file as {status}')
df = pd.read_excel(context.output_file)
row = df.loc[df['site'] == site]
print(row)
assert row['comment'].values[0] == status
| 34.395062
| 117
| 0.693108
| 390
| 2,786
| 4.85641
| 0.215385
| 0.134636
| 0.099789
| 0.066526
| 0.538015
| 0.491552
| 0.43189
| 0.380148
| 0.253432
| 0.192186
| 0
| 0.001304
| 0.174444
| 2,786
| 80
| 118
| 34.825
| 0.822174
| 0.031228
| 0
| 0.290323
| 0
| 0.016129
| 0.31997
| 0.047513
| 0
| 0
| 0
| 0
| 0.080645
| 1
| 0.112903
| false
| 0
| 0.080645
| 0
| 0.193548
| 0.177419
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59bafbd060c805be29e0312f879c03efc18325bc
| 2,137
|
py
|
Python
|
params.py
|
adarshchbs/disentanglement
|
77e74409cd0220dbfd9e2809688500dcb2ecf5a5
|
[
"MIT"
] | null | null | null |
params.py
|
adarshchbs/disentanglement
|
77e74409cd0220dbfd9e2809688500dcb2ecf5a5
|
[
"MIT"
] | null | null | null |
params.py
|
adarshchbs/disentanglement
|
77e74409cd0220dbfd9e2809688500dcb2ecf5a5
|
[
"MIT"
] | null | null | null |
import os
gpu_flag = False
gpu_name = 'cpu'
x_dim = 2048
num_class = 87
num_query = 5
batch_size = 84
eval_batch_size = 128
glove_dim = 200
pretrain_lr = 1e-4
num_epochs_pretrain = 20
eval_step_pre = 1
fusion_iter_len = 100000
# num_epochs_pretrain = 30
num_epochs_style = 30
num_epochs_fusion = 50
log_step_pre = 60
folder_path = os.path.dirname(os.path.realpath(__file__)) + '/'
path_class_list = folder_path + 'extra/common_class_list.txt'
dir_saved_model = folder_path + 'saved_model_qd/'
dir_saved_feature = folder_path + 'saved_features_qd/'
dir_dataset = folder_path + 'dataset/'
dir_extra = folder_path + 'extra/'
os.makedirs( dir_saved_model, exist_ok = True)
os.makedirs( dir_saved_feature, exist_ok = True )
os.makedirs( dir_extra, exist_ok = True )
path_model_image = dir_saved_model + 'resnet_50_image.pt'
path_model_sketchy = dir_saved_model + 'resnet_50_sketchy.pt'
path_z_encoder_sketchy = dir_saved_model + 'z_encoder_sketch.pt'
path_s_encoder_sketchy = dir_saved_model + 's_encoder_sketch.pt'
path_adv_model_sketchy = dir_saved_model + 'adv_sketch.pt'
path_recon_model_sketchy = dir_saved_model + 'reconstruck_sketch.pt'
path_z_encoder_image = dir_saved_model + 'z_encoder_image.pt'
path_s_encoder_image = dir_saved_model + 's_encoder_image.pt'
path_adv_model_image = dir_saved_model + 'adv_image.pt'
path_recon_model_image = dir_saved_model + 'reconstruck_image.pt'
path_fusion_model = dir_saved_model + 'fusion_model.pt'
path_image_dataset = dir_dataset + 'images/'
path_sketchy_dataset = dir_dataset + 'sketchy/'
path_quickdraw_dataset = dir_dataset + 'quick_draw/'
path_image_features = dir_saved_feature + 'image_features.p'
path_sketchy_features = dir_saved_feature + 'sketchy_features.p'
path_quickdraw_features = dir_saved_feature + 'quick_draw_features.p'
path_image_file_list = dir_extra + 'images_file_list.p'
path_sketchy_file_list = dir_extra + 'sketchy_file_list.p'
path_quickdraw_file_list = dir_extra + 'quick_draw_file_list.p'
path_model = folder_path + 'resnet_50_da.pt'
path_sketch_z_encoder = folder_path + 'sketch_encoder.pt'
path_glove_vector = folder_path + 'glove_vector'
| 29.273973
| 69
| 0.801591
| 342
| 2,137
| 4.473684
| 0.22807
| 0.094118
| 0.110458
| 0.058824
| 0.224837
| 0.031373
| 0
| 0
| 0
| 0
| 0
| 0.021042
| 0.110435
| 2,137
| 72
| 70
| 29.680556
| 0.783798
| 0.011231
| 0
| 0
| 0
| 0
| 0.215538
| 0.043108
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020833
| 0
| 0.020833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59bbb20f29672cea5fbe599708a44a6f4792d1f5
| 17,567
|
py
|
Python
|
tests/views/view_test_case.py
|
BMeu/Aerarium
|
119946cead727ef68b5ecea339990d982c006391
|
[
"MIT"
] | null | null | null |
tests/views/view_test_case.py
|
BMeu/Aerarium
|
119946cead727ef68b5ecea339990d982c006391
|
[
"MIT"
] | 139
|
2018-12-26T07:54:31.000Z
|
2021-06-01T23:14:45.000Z
|
tests/views/view_test_case.py
|
BMeu/Aerarium
|
119946cead727ef68b5ecea339990d982c006391
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Any
from typing import Dict
from typing import Optional
from typing import Set
from unittest import TestCase
from flask import abort
from app import create_app
from app import db
from app.configuration import TestConfiguration
from app.userprofile import Permission
from app.userprofile import Role
from app.userprofile import User
class ViewTestCase(TestCase):
"""
This class is a base test case for all view tests, providing helpful methods that are needed in many situations
when testing views.
"""
# region Test Setup
def setUp(self) -> None:
"""
Prepare the test cases.
"""
self.app = create_app(TestConfiguration)
self.client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
self.request_context = self.app.test_request_context()
self.request_context.push()
db.create_all()
def tearDown(self) -> None:
"""
Clean up after each test case.
"""
db.session.remove()
db.drop_all()
self.request_context.pop()
self.app_context.pop()
# endregion
# region Route Accessing
def get(self, url: str, expected_status: int = 200, follow_redirects: bool = True) -> str:
"""
Access the given URL via HTTP GET. Assert that the returned status code is the given one.
The status code is checked using `self.assertEqual`.
:param url: The URL to access.
:param expected_status: The status code that should be returned. Defaults to `200`.
:param follow_redirects: Set to `False` if redirects by the route should not be followed. Defaults to
`True`.
:return: The response of accessing the URL as a string.
"""
response = self.client.get(url, follow_redirects=follow_redirects)
data = response.get_data(as_text=True)
self.assertEqual(expected_status, response.status_code, msg='Expected Status Code')
return data
def post(self, url: str, data: Dict[str, Any] = None, expected_status: int = 200, follow_redirects: bool = True) \
-> str:
"""
Access the given URL via HTTP POST, sending the given data. Assert that the returned status code is the
given one.
The status code is checked using `self.assertEqual`.
:param url: The URL to access.
:param data: The data to send in the POST request. Defaults to `dict()`.
:param expected_status: The status code that should be returned. Defaults to `200`.
:param follow_redirects: Set to `False` if redirects by the route should not be followed. Defaults to
`True`.
:return: The response of accessing the URL as a string.
"""
if data is None:
data = dict()
response = self.client.post(url, follow_redirects=follow_redirects, data=data)
data = response.get_data(as_text=True)
self.assertEqual(expected_status, response.status_code, msg='Expected Status Code')
return data
# TODO: Rename assert_allowed_methods().
def check_allowed_methods(self, url: str, allowed_methods: Optional[Set[str]] = None, allow_options: bool = True) \
-> None:
"""
Check if the given URL can be accessed only by the specified methods.
This method will assert that the URL can be accessed by all HTTP methods listed in `allowed_methods` by
checking that a request via each of these methods to the URL does not return a response code of 405.
Likewise, it will test that all methods not listed in `allowed_methods` return a response code of 405.
Flask by default allows 'OPTIONS'. This method follows this behaviour and automatically adds 'OPTIONS' to
the set of allowed methods unless configured otherwise.
Flask also automatically allows 'HEAD' if 'GET' is allowed. This method follows this behaviour and always
adds 'HEAD' to the set of allowed methods if 'GET' is included in the set.
:param url: The URL to check.
:param allowed_methods: A set of all HTTP methods via which the URL can be accessed. If the set is not given
or an empty set of allowed methods is passed, 'GET' will automatically be allowed
to mimic Flask's behaviour. Defaults to `None`.
:param allow_options: If this parameter is set to `True`, 'OPTIONS' will automatically be added to the set
of allowed methods to follow Flask's behaviour. Defaults to `True`.
"""
all_methods = ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'PATCH', 'POST', 'PUT']
# Be default, 'GET' is the only allowed method.
if allowed_methods is None or not allowed_methods:
allowed_methods = {'GET'}
# If 'GET' is allowed, Flask also allows 'HEAD' automatically.
if 'GET' in allowed_methods:
allowed_methods.add('HEAD')
# Follow Flask's behaviour and add 'OPTIONS' by default.
if allow_options:
allowed_methods.add('OPTIONS')
prohibited_methods = [method for method in all_methods if method not in allowed_methods]
for allowed_method in allowed_methods:
status_code = self._get_status_code_for_method(url, allowed_method)
self.assertNotEqual(405, status_code, f'{allowed_method} {url} is not allowed, but should be.')
for prohibited_method in prohibited_methods:
status_code = self._get_status_code_for_method(url, prohibited_method)
self.assertEqual(405, status_code, f'{prohibited_method} {url} is allowed, but should not be.')
def _get_status_code_for_method(self, url: str, method: str) -> int:
"""
Access the given URL via the given HTTP method and return the response status code.
:param url: The URL to access.
:param method: The HTTP method used to access the URL.
:return: The HTTP status code that accessing the URL via the method returned.
:raise ValueError: if the HTTP method is invalid.
"""
if method == 'DELETE':
response = self.client.delete(url)
elif method == 'GET':
response = self.client.get(url)
elif method == 'HEAD':
response = self.client.head(url)
elif method == 'OPTIONS':
response = self.client.options(url)
elif method == 'PATCH':
response = self.client.patch(url)
elif method == 'POST':
response = self.client.post(url)
elif method == 'PUT':
response = self.client.put(url)
else:
raise ValueError(f'Invalid HTTP method {method}')
return response.status_code
# endregion
# region Permissions
def assert_no_permission_required(self, url: str, method: str = 'GET') -> None:
"""
Assert that accessing the URL via the given method requires no permission at all and that accessing the URL
with any permission is actually possible.
The test checks for a response code of 403. This can lead to a false positive if a route aborts with an
error 403.
:param url: The URL to access.
:param method: The HTTP method to access the URL by. Defaults to `'GET'`.
"""
allowed_permissions = Permission.get_permissions(include_empty_permission=True, all_combinations=True)
self._assert_permissions(url, allowed_permissions, method)
def assert_permission_required(self, url: str, permission: Permission, method: str = 'GET') -> None:
"""
Assert that accessing the URL via the given method requires the specified permission and that accessing the
URL with the permission is actually possible.
The test checks for a response code of 403. This can lead to a false positive if a route does not require
the specified permission but aborts with an error 403 in some other case.
:param url: The URL to access.
:param permission: The permission that must be required to access the URL.
:param method: The HTTP method to access the URL by. Defaults to `'GET'`.
"""
all_permissions = Permission.get_permissions(include_empty_permission=True, all_combinations=True)
allowed_permissions = {p for p in all_permissions if p.includes_permission(permission)}
self._assert_permissions(url, allowed_permissions, method)
def assert_permission_required_one_of(self, url: str, *permissions: Permission, method: str = 'GET') -> None:
"""
Assert that accessing the URL via the given method requires one of the specified permissions and that
accessing the URL with the permission is actually possible, while accessing the URL with any other
permission fails.
:param url: The url to access.
:param permissions: The permissions that must be required to access the URL.
:param method: The HTTP method to access the URL by. Defaults to `'GET'`.
"""
all_permissions = Permission.get_permissions(include_empty_permission=True, all_combinations=True)
allowed_permissions = set({})
for permission in permissions:
allowed_permissions.update({p for p in all_permissions if p.includes_permission(permission)})
self._assert_permissions(url, allowed_permissions, method)
def assert_permission_required_all(self, url: str, *permissions: Permission, method: str = 'GET') -> None:
"""
Assert that accessing the URL via the given method requires all of the specified permissions and that
accessing the URL with the permissions is actually possible.
:param url: The URL to access.
:param permissions: The permissions that must be required to access the URL.
:param method: The HTTP method to access the URL by. Defaults to `'GET'`.
"""
all_permissions = Permission.get_permissions(include_empty_permission=True, all_combinations=True)
#
allowed_permission = Permission(0)
for permission in permissions:
allowed_permission |= permission
allowed_permissions = {p for p in all_permissions if p.includes_permission(allowed_permission)}
self._assert_permissions(url, allowed_permissions, method)
def _assert_permission_grants_access(self, url: str, permission: Permission, method: str = 'GET') -> None:
"""
Assert that the given permission is sufficient to access the given URL.
:param url: The URL to access.
:param permission: The permission that should be able to able to access the URL.
:param method: The HTTP method to access the URL by. Defaults to `'GET'`.
"""
# Create and log in a user with the given permission.
role = self.create_role(permission)
user = self.create_and_login_user(role=role)
# Ensure that accessing the URL with the given permission is possible.
status_code = self._get_status_code_for_method(url, method)
self.assertNotEqual(403, status_code,
f'{method} {url} must be accessible with permission {permission}, but it is not.')
# Delete the user and role so that this method can be called multiple times in the same test.
# Since the role might be the only role with permissions to edit roles, we cannot use role.delete() which will
# fail in such a case.
user._delete()
db.session.delete(role)
db.session.commit()
def _assert_permission_does_not_grant_access(self, url: str, permission: Permission, method: str = 'GET') -> None:
"""
Assert that the given permission is not sufficient to access the given URL.
:param url: The URL to access.
:param permission: The permission that should not be able to able to access the URL.
:param method: The HTTP method to access the URL by. Defaults to `'GET'`.
"""
# Create and log in a user with the given permission.
role = self.create_role(permission)
user = self.create_and_login_user(role=role)
# Ensure that accessing the URL with the given permission is impossible.
status_code = self._get_status_code_for_method(url, method)
self.assertEqual(403, status_code,
f'{method} {url} must not be accessible with permission {permission}, but it is.')
# Delete the user and role so that this method can be called multiple times in the same test.
# Since the role might be the only role with permissions to edit roles, we cannot use role.delete() which will
# fail in such a case.
user._delete()
db.session.delete(role)
db.session.commit()
def _assert_permissions(self, url: str, allowed_permissions: Set[Permission], method: str) -> None:
"""
Assert that the given URL can be accessed with the allowed permissions, but not with any other permission.
:param url: The URL to access.
:param allowed_permissions: List of permissions that must be able to access the URL.
:param method: The HTTP method to access the URL by.
"""
for allowed_permission in allowed_permissions:
self._assert_permission_grants_access(url, allowed_permission, method)
all_permissions = Permission.get_permissions(include_empty_permission=True, all_combinations=True)
prohibited_permissions = {permission for permission in all_permissions if permission not in allowed_permissions}
for prohibited_permission in prohibited_permissions:
self._assert_permission_does_not_grant_access(url, prohibited_permission, method)
# endregion
# region Application Entities
@staticmethod
def create_user(email: str, name: str, password: str, role: Optional[Role] = None) -> User:
"""
Create a user with the given parameters. If a role is given, assign the role to the user. Commit this user
to the DB.
:param email: The email address of the user.
:param name: The name of the user.
:param password: The password of the user.
:param role: The role for the user. Defaults to `None`.
:return: The created user.
"""
user = User(email, name)
user.set_password(password)
if role:
user.role = role
db.session.add(user)
db.session.commit()
return user
def create_and_login_user(self,
email: str = 'doe@example.com',
name: str = 'Jane Doe',
password: str = 'ABC123!',
role: Optional[Role] = None
) -> User:
"""
Create a user with the given parameters and log them in. If a role is given, assign the role to the user.
The user is committed to the DB.
:param email: The email address of the user. Defaults to `'doe@example.com'`.
:param name: The name of the user. Defaults to `'Jane Doe'`.
:param password: The password of the user. Defaults to `'ABC123!'`.
:param role: The role for the user. Defaults to `None`.
:return: The created user.
"""
user = self.create_user(email, name, password, role)
self.client.post('/user/login', follow_redirects=True, data=dict(
email=email,
password=password,
))
return user
@staticmethod
def create_role(*permissions: Permission, name: str = 'Test Role') -> Role:
"""
Create a role with the given permissions.
:param permissions: The permissions of the role.
:param name: The name of the new role. Defaults to `'Test Role'`.
:return: The created role.
"""
role = Role(name)
for permission in permissions:
role.permissions |= permission
db.session.add(role)
db.session.commit()
return role
# endregion
# region Routes
@staticmethod
def aborting_route(code: int) -> None:
"""
A route handler that aborts with the given status code.
:param code: The status code of the HTTP response.
"""
abort(code)
@staticmethod
def example_route() -> str:
"""
A route handler that returns an example string as its response.
:return: 'Hello, world!'
"""
return 'Hello, world!'
# endregion
# region Other Helper Methods
@classmethod
def get_false(cls) -> bool:
"""
Get `False`. Useful for mocking.
This method must be a class method so that it can be used in the patch decorator when mocking another
method.
:return: `False`
"""
return False
# endregion
| 40.383908
| 120
| 0.632322
| 2,239
| 17,567
| 4.866012
| 0.112997
| 0.02313
| 0.016154
| 0.01799
| 0.569252
| 0.527031
| 0.48738
| 0.469298
| 0.461404
| 0.461404
| 0
| 0.004039
| 0.29527
| 17,567
| 434
| 121
| 40.476959
| 0.87601
| 0.430637
| 0
| 0.237179
| 0
| 0
| 0.058414
| 0
| 0
| 0
| 0
| 0.002304
| 0.121795
| 1
| 0.121795
| false
| 0.032051
| 0.076923
| 0
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59bd0619a2a8bf9b935ee21c0cf4d04a4238a3ac
| 1,483
|
py
|
Python
|
packtype/union.py
|
Intuity/packtype
|
bcd74dad8388883ddb4cfde40e1a11a14282dcbd
|
[
"Apache-2.0"
] | 1
|
2021-09-08T21:42:33.000Z
|
2021-09-08T21:42:33.000Z
|
packtype/union.py
|
Intuity/packtype
|
bcd74dad8388883ddb4cfde40e1a11a14282dcbd
|
[
"Apache-2.0"
] | 2
|
2021-12-30T17:43:04.000Z
|
2021-12-30T18:10:14.000Z
|
packtype/union.py
|
Intuity/packtype
|
bcd74dad8388883ddb4cfde40e1a11a14282dcbd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Peter Birch, mailto:peter@lightlogic.co.uk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .container import Container
from .scalar import Scalar
from .struct import Struct
class Union(Container):
""" Packed data structure formed of other structures or unions """
def __init__(self, name, fields, desc=None):
""" Initialise union with name and fields
Args:
name : Name of the container
fields: Dictionary of fields
desc : Optional description
"""
# Perform container construction
super().__init__(name, fields, desc=desc, legal=[Scalar, Struct, Union])
# Check all fields are the same width
widths = [x._pt_width for x in self._pt_values()]
assert len(set(widths)) == 1, \
f"Unmatched widths of fields in union {self._pt_name}: {widths}"
# Calculate the width
self._pt_width = next((x._pt_width for x in self._pt_values()))
| 39.026316
| 80
| 0.688469
| 207
| 1,483
| 4.835749
| 0.550725
| 0.05994
| 0.025974
| 0.031968
| 0.051948
| 0.051948
| 0.051948
| 0.051948
| 0.051948
| 0
| 0
| 0.007874
| 0.229265
| 1,483
| 37
| 81
| 40.081081
| 0.867892
| 0.585974
| 0
| 0
| 0
| 0
| 0.111927
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59bd6a738434b3879975e016eb21f88fd1d0fd13
| 644
|
py
|
Python
|
ExerciseFiles/Ch03/03_07/03_07_Start.py
|
rlwheelwright/PY3_StandardLib
|
0d9acc02f5ca934eab774bbdd5acc3c92eff7191
|
[
"Apache-2.0"
] | null | null | null |
ExerciseFiles/Ch03/03_07/03_07_Start.py
|
rlwheelwright/PY3_StandardLib
|
0d9acc02f5ca934eab774bbdd5acc3c92eff7191
|
[
"Apache-2.0"
] | null | null | null |
ExerciseFiles/Ch03/03_07/03_07_Start.py
|
rlwheelwright/PY3_StandardLib
|
0d9acc02f5ca934eab774bbdd5acc3c92eff7191
|
[
"Apache-2.0"
] | null | null | null |
# Zipfile Module
import zipfile
# Open and List
zip = zipfile.ZipFile('Archive.zip', 'r')
print(zip.namelist()) # Lists everything within zip file
# Metadata in the zip folder
for meta in zip.infolist(): # List of the metadata within zip file
print(meta)
info = zip.getinfo("purchased.txt")
# Access to files in zip folder
print(zip.read("wishlist.txt"))
with zip.open('wishlist.txt') as f: # establishing f = zip.open('wishlist.txt')
print(f.read())
# Extracting files
zip.extract('purchased.txt') # Extract just the one file
input("Continue? Press any key.")
zip.extractall() # Extracts everything
# Closing the zip
zip.close()
| 24.769231
| 79
| 0.718944
| 97
| 644
| 4.773196
| 0.505155
| 0.071274
| 0.056156
| 0.077754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150621
| 644
| 25
| 80
| 25.76
| 0.846435
| 0.428571
| 0
| 0
| 0
| 0
| 0.241573
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.307692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59bee126e5a1aef1b499b08431cc09a3c72eb295
| 4,059
|
py
|
Python
|
code/src/algorithm/algo.py
|
haloship/rec-sys-dynamics
|
886095eca8c71cc2f30d64f0b1da9a0a8f2f37f5
|
[
"MIT"
] | null | null | null |
code/src/algorithm/algo.py
|
haloship/rec-sys-dynamics
|
886095eca8c71cc2f30d64f0b1da9a0a8f2f37f5
|
[
"MIT"
] | null | null | null |
code/src/algorithm/algo.py
|
haloship/rec-sys-dynamics
|
886095eca8c71cc2f30d64f0b1da9a0a8f2f37f5
|
[
"MIT"
] | null | null | null |
"""Recommendation Algorithm Base Class
This module is a base class for algorithms using sparse matrices
The required packages can be found in requirements.txt
"""
import pandas as pd
import numpy as np
from lenskit import batch, topn, util
from lenskit import crossfold as xf
from lenskit.algorithms import Recommender, Predictor, als, basic, user_knn
from lenskit.data import sparse_ratings
from scipy import sparse
from sklearn.metrics.pairwise import cosine_similarity
from abc import ABCMeta, abstractmethod
# Visualizations and debugging
import plotly.graph_objs as go
import logging
class SparseBasedAlgo(Recommender, Predictor, metaclass=ABCMeta):
# def __init__(self):
# pass
@abstractmethod
def __str__(self):
"""Name of the class"""
@abstractmethod
def get_num_users(self):
"""Get the number of users in the recommender system"""
@abstractmethod
def get_num_items(self):
"""Get the number of items in the recommender system"""
@abstractmethod
def fit(self, ratings, **kwargs):
"""Fit the algorithm over the initial dataset
:param ratings: user item ratings in a dataframe
:type ratings: pandas DataFrame
"""
@abstractmethod
def update(self):
"""Refit the algorithm over the internal sparse matrix"""
# Add a user to the ratings matrix
def add_user(self, user_id):
# Check if user_id to be added already exists
try:
assert (
user_id in self.user_index_
) == False, "User ID already exists! Not adding anything..."
except AssertionError as e:
print(e)
exit(1)
# Build a sparse matrix of length of number of items
tmp_sparse_row = sparse.csr_matrix(np.zeros((1, len(self.item_index_))))
# Vertically stack temporary matrix to original matrix
self.rating_matrix_ = sparse.vstack([self.rating_matrix_, tmp_sparse_row])
# Update user index
self.user_index_ = self.user_index_.append(pd.Index([user_id]))
# Add an item to the ratings matrix
def add_item(self, item_id):
# Check if item_id to be added already exists
try:
assert (item_id in self.item_index_) == False, "Item ID already exists!"
except AssertionError as e:
print(e)
exit(1)
# Build a sparse matrix of length of number of users
tmp_sparse_col = sparse.csr_matrix(np.zeros((len(self.user_index_), 1)))
# Horizotnally stack temporary matrix to original matrix
self.rating_matrix_ = sparse.hstack(
[self.rating_matrix_, tmp_sparse_col]
).tocsr()
# Update item index
self.item_index_ = self.item_index_.append(pd.Index([item_id]))
# Add a user-item interaction for existing users and items
def add_interactions(self, user_id, item_id, rating):
# Check if inputs are lists and all input list lengths are equal
assert type(user_id) == list, "Input user_id is not a list"
assert type(item_id) == list, "Input item_id is not a list"
assert type(rating) == list, "Input rating is not a list"
assert (
len(user_id) == len(item_id) == len(rating)
), "Input lists are not of the same length"
# Build a temporary sparse LIL matrix
tmp_ratings = sparse.lil_matrix(self.rating_matrix_.shape)
for i in range(len(user_id)):
# Obtain locations from ID
(user_pos,) = np.where(self.user_index_ == user_id[i])[0]
(item_pos,) = np.where(self.item_index_ == item_id[i])[0]
# If rating does not exist
if self.rating_matrix_[user_pos, item_pos] == 0:
# Fill into temporary sparse matrix
tmp_ratings[user_pos, item_pos] = rating[i]
# Convert temporary LIL to CSR
tmp_ratings = tmp_ratings.tocsr()
# Add temporary CSR to main ratings matrix
self.rating_matrix_ += tmp_ratings
| 32.733871
| 84
| 0.652624
| 551
| 4,059
| 4.637024
| 0.281307
| 0.025832
| 0.043836
| 0.034442
| 0.277104
| 0.192564
| 0.143249
| 0.126027
| 0.100196
| 0.100196
| 0
| 0.002358
| 0.268785
| 4,059
| 124
| 85
| 32.733871
| 0.858491
| 0.301059
| 0
| 0.254237
| 0
| 0
| 0.067729
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 1
| 0.135593
| false
| 0
| 0.186441
| 0
| 0.338983
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59bf1bf5bc46d061cd8f9152d683ef35b28e4ff5
| 8,871
|
py
|
Python
|
resources/tvdbsimple/user.py
|
sergserg2/script.uptodate.imdb.ratings
|
091cafc2b2249dc757f877136b55fee86083c140
|
[
"Apache-2.0"
] | null | null | null |
resources/tvdbsimple/user.py
|
sergserg2/script.uptodate.imdb.ratings
|
091cafc2b2249dc757f877136b55fee86083c140
|
[
"Apache-2.0"
] | null | null | null |
resources/tvdbsimple/user.py
|
sergserg2/script.uptodate.imdb.ratings
|
091cafc2b2249dc757f877136b55fee86083c140
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This module implements the User functionality of TheTVDb API.
Allows to retrieve, add and delete user favorites and ratings.
See [Users API section](https://api.thetvdb.com/swagger#!/Users)
"""
from .base import TVDB
class User(TVDB):
"""
User class to retrieve, add and delete user favorites and ratings.
Requires username and user-key.
"""
_BASE_PATH = 'user'
_URLS = {
'info': '',
'favorites': '/favorites',
'alter_favorite': '/favorites/{id}'
}
def __init__(self, user, key):
"""
Initialize the User class.
`user` is the username for login. `key` is the userkey needed to
authenticate with the user, you can find it in the
[account info](http://thetvdb.com/?tab=userinfo) under account identifier.
"""
super(User, self).__init__(user=user, key=key)
self.Ratings = User_Ratings(user, key)
"""
Allows to retrieve, add and delete user ratings.
"""
def info(self):
"""
Get the basic user info and set its values to local attributes.
Returns a dict with all the information of the user.
For example
#!python
>>> import tvdbsimple as tvdb
>>> tvdb.KEYS.API_KEY = 'YOUR_API_KEY'
>>> user = tvdb.User('username', 'userkey')
>>> response = user.info()
>>> user.userName
'username'
"""
path = self._get_path('info')
response = self._GET(path)
self._set_attrs_to_values(response)
return response
def favorites(self):
"""
Get the a list of the favorite series of the user and
sets it to `favorites` attribute.
Returns a list of the favorite series ids.
For example
#!python
>>> import tvdbsimple as tvdb
>>> tvdb.KEYS.API_KEY = 'YOUR_API_KEY'
>>> user = tvdb.User('username', 'userkey')
>>> response = user.favorites()
>>> user.favorites[0]
'73545'
"""
path = self._get_path('favorites')
response = self._GET(path)
self._set_attrs_to_values(response)
return self._clean_return(response)
def _clean_return(self,jsn):
if 'favorites' in jsn:
return jsn['favorites']
return jsn
def add_favorite(self, id):
"""
Add a series to user favorite series from its series id.
`id` is the series id you want to add to favorites.
Returns the updated list of the favorite series ids.
For example
#!python
>>> import tvdbsimple as tvdb
>>> tvdb.KEYS.API_KEY = 'YOUR_API_KEY'
>>> user = tvdb.User('username', 'userkey')
>>> response = user.add_favorite(78804)
>>> response[-1]
'78804'
"""
path = self._get_path('alter_favorite').format(id=id)
return self._clean_return(self._PUT(path))
def delete_favorite(self, id):
"""
Delete a series from user favorite series from its series id.
`id` is the series id you want to delete from favorites.
Returns the updated list of the favorite series ids.
For example
#!python
>>> import tvdbsimple as tvdb
>>> tvdb.KEYS.API_KEY = 'YOUR_API_KEY'
>>> user = tvdb.User('username', 'userkey')
>>> response = user.delete_favorite(78804)
>>> response[-1]
'73545'
"""
path = self._get_path('alter_favorite').format(id=id)
return self._clean_return(self._DELETE(path))
class User_Ratings(TVDB):
"""
Class needed to organize user ratings. Allows to retrieve, add and delete user ratings.
Requires username and user-key.
"""
_BASE_PATH = 'user/ratings'
_URLS = {
'all': '',
'query': '/query',
'query_params': '/query/params',
'add': '/{itemType}/{itemId}/{itemRating}',
'delete': '/{itemType}/{itemId}'
}
_PAGES = -1
_PAGES_LIST = {}
_FILTERS = {}
def __init__(self, user, key, **kwargs):
"""
Initialize the class.
`user` is the username for login. `key` is the userkey needed to
authenticate with the user, you can find it in the
[account info](http://thetvdb.com/?tab=userinfo) under account identifier.
It's possible to provide `itemType` that filters ratings by type.
Can be either 'series', 'episode', or 'banner'.
"""
super(User_Ratings, self).__init__(user=user, key=key)
self._FILTERS = {}
self.update_filters(**kwargs)
def update_filters(self, **kwargs):
"""
Set the filters for the user rating.
It's possible to provide `itemType` that filters ratings by type.
Can be either 'series', 'episode', or 'banner'.
"""
self._FILTERS = kwargs
def query_params(self):
"""
Get the query parameters allowed for filtering and set it to `query_params` attribute.
Returns a list of parameters you can set to filters.
"""
path = self._get_id_path('query_params')
response = self._GET(path)
self._set_attrs_to_values({'query_params': response})
return response
def pages(self):
"""
Get the number of rating pages available for filtered ratings of the specific user.
Returns the number of rating pages available with current filters.
"""
if self._PAGES < 0:
self.page(1)
return self._PAGES
def add(self, type, id, rating):
"""
Add a new rating to the user's ratings..
`type` is the item type of the item you want to rate. Can be either
'series', 'episode', or 'image'.
`id` is the ID of the item that you want to rate.
`rating` is the `integer` rating you want to set.
Returns a list with the new updated rating.
For example
#!python
>>> import tvdbsimple as tvdb
>>> tvdb.KEYS.API_KEY = 'YOUR_API_KEY'
>>> rtn = tvdb.User_Ratings('username', 'userkey')
>>> response = rtn.add('series', 78804, 8)
"""
path = self._get_path('add').format(itemType=type, itemId=id, itemRating=rating)
return self._PUT(path)
def delete(self, type, id):
"""
Delete an existing user's rating..
`type` is the item type of the item rating you want to delete. Can be either
'series', 'episode', or 'image'.
`id` is the ID of the item rating that you want to delete.
Returns an empty dictionary.
For example
#!python
>>> import tvdbsimple as tvdb
>>> tvdb.KEYS.API_KEY = 'YOUR_API_KEY'
>>> rtn = tvdb.User_Ratings('username', 'userkey')
>>> response = rtn.delete('series', 78804)
"""
path = self._get_path('delete').format(itemType=type, itemId=id)
return self._DELETE(path)
def all(self):
"""
Get the full rating list filtered for the user and adds it
to the `ratings` attribute.
Returns a list of ratings info.
For example
#!python
>>> import tvdbsimple as tvdb
>>> tvdb.KEYS.API_KEY = 'YOUR_API_KEY'
>>> rtn = tvdb.User_Rating('phate89', '3EF7CF9BBC8BB430')
>>> response = rtn.all()
>>> rtn.ratings[0]['ratingType']
'episode'
"""
ratings = []
for i in range (1, self.pages()+1):
ratings.extend(self.page(i))
self._set_attrs_to_values({'ratings': ratings})
return ratings
def page(self, page):
"""
Get the rating list for a specific page for the user.
`page` is the rating page number.
Returns a list ratings available in the page.
"""
if page in self._PAGES_LIST:
return self._PAGES_LIST[page]
if self._FILTERS:
path = self._get_path('query')
else:
path = self._get_path('all')
filters = self._FILTERS.copy()
filters['page'] = page
response = self._GET(path, params=filters, cleanJson=False)
if 'links' in response and 'last' in response['links']:
self._PAGES = response['links']['last']
self._PAGES_LIST[page] = response['data']
return response['data']
def __iter__(self):
for i in range (1, self.pages()+1):
yield self.page(i)
| 29.868687
| 94
| 0.557547
| 1,057
| 8,871
| 4.551561
| 0.14948
| 0.02619
| 0.027437
| 0.024943
| 0.523592
| 0.477863
| 0.45105
| 0.438994
| 0.404282
| 0.360216
| 0
| 0.009627
| 0.332544
| 8,871
| 297
| 95
| 29.868687
| 0.802905
| 0.492053
| 0
| 0.149425
| 0
| 0
| 0.092919
| 0.00986
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.011494
| 0
| 0.436782
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59bf391cc29d920dbbc64d180ce68aef3842279a
| 14,178
|
py
|
Python
|
LR35902Arch.py
|
Lukas-Dresel/binja-GameBoy_LR35902
|
f4d34b0477c20d353f45e731a2a68ee83e5509e3
|
[
"MIT"
] | null | null | null |
LR35902Arch.py
|
Lukas-Dresel/binja-GameBoy_LR35902
|
f4d34b0477c20d353f45e731a2a68ee83e5509e3
|
[
"MIT"
] | null | null | null |
LR35902Arch.py
|
Lukas-Dresel/binja-GameBoy_LR35902
|
f4d34b0477c20d353f45e731a2a68ee83e5509e3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import re
from binaryninja.log import log_info
from binaryninja.architecture import Architecture
from binaryninja.function import RegisterInfo, InstructionInfo, InstructionTextToken
from binaryninja.enums import InstructionTextTokenType, BranchType, FlagRole, LowLevelILFlagCondition
from . import LR35902IL
from .hardware_documentation import IO_REGISTERS as IO_REGS
from lr35902dis.lr35902 import *
CC_TO_STR = {
CC.ALWAYS:'1', CC.NOT_Z:'nz', CC.Z:'z',
CC.NOT_C:'nc', CC.C:'c'
}
class LR35902(Architecture):
name = 'LR35902'
address_size = 2
default_int_size = 1
instr_alignment = 1
max_instr_length = 4
# register related stuff
regs = {
# main registers
'AF': RegisterInfo('AF', 2),
'BC': RegisterInfo('BC', 2),
'DE': RegisterInfo('DE', 2),
'HL': RegisterInfo('HL', 2),
# main registers (sub)
"A": RegisterInfo("AF", 1, 1),
"F": RegisterInfo("AF", 1, 0),
"B": RegisterInfo("BC", 1, 1),
"C": RegisterInfo("BC", 1, 0),
"D": RegisterInfo("DE", 1, 1),
"E": RegisterInfo("DE", 1, 0),
"H": RegisterInfo("HL", 1, 1),
"L": RegisterInfo("HL", 1, 0),
"Flags": RegisterInfo("AF", 0),
# index registers
'SP': RegisterInfo('SP', 2),
# program counter
'PC': RegisterInfo('PC', 2),
# status
# 'status': RegisterInfo('status', 1)
}
stack_pointer = "SP"
IO_REGISTERS = IO_REGS
#------------------------------------------------------------------------------
# FLAG fun
#------------------------------------------------------------------------------
flags = ['z', 'h', 'n', 'c']
# remember, class None is default/integer
semantic_flag_classes = ['class_bitstuff']
# flag write types and their mappings
flag_write_types = ['dummy', '*', 'c', 'z', 'not_c']
flags_written_by_flag_write_type = {
'dummy': [],
'*': ['z', 'h', 'n', 'c'],
'c': ['c'],
'z': ['z'],
'not_c': ['z', 'h', 'n'] # eg: LR35902's DEC
}
semantic_class_for_flag_write_type = {
# by default, everything is type None (integer)
# '*': 'class_integer',
# 'c': 'class_integer',
# 'z': 'class_integer',
# 'cszpv': 'class_integer',
# 'not_c': 'class_integer'
}
# groups and their mappings
semantic_flag_groups = ['group_e', 'group_ne', 'group_lt']
flags_required_for_semantic_flag_group = {
'group_lt': ['c'],
'group_e': ['z'],
'group_ne': ['z']
}
flag_conditions_for_semantic_flag_group = {
#'group_e': {None: LowLevelILFlagCondition.LLFC_E},
#'group_ne': {None: LowLevelILFlagCondition.LLFC_NE}
}
# roles
flag_roles = {
'z': FlagRole.ZeroFlagRole,
'h': FlagRole.HalfCarryFlagRole,
'n': FlagRole.SpecialFlagRole, # set if last instruction was a subtraction (incl. CP)
'c': FlagRole.CarryFlagRole
}
# MAP (condition x class) -> flags
def get_flags_required_for_flag_condition(self, cond, sem_class):
#LogDebug('incoming cond: %s, incoming sem_class: %s' % (str(cond), str(sem_class)))
if sem_class == None:
lookup = {
# Z, zero flag for == and !=
LowLevelILFlagCondition.LLFC_E: ['z'],
LowLevelILFlagCondition.LLFC_NE: ['z'],
# Z, zero flag for == and !=
LowLevelILFlagCondition.LLFC_E: ['z'],
LowLevelILFlagCondition.LLFC_NE: ['z'],
# H, half carry for ???
# P, parity for ???
# s> s>= s< s<= done by sub and overflow test
#if cond == LowLevelILFlagCondition.LLFC_SGT:
#if cond == LowLevelILFlagCondition.LLFC_SGE:
#if cond == LowLevelILFlagCondition.LLFC_SLT:
#if cond == LowLevelILFlagCondition.LLFC_SLE:
# C, for these
LowLevelILFlagCondition.LLFC_UGE: ['c'],
LowLevelILFlagCondition.LLFC_ULT: ['c']
}
if cond in lookup:
return lookup[cond]
return []
#------------------------------------------------------------------------------
# CFG building
#------------------------------------------------------------------------------
def get_instruction_info(self, data, addr):
decoded = decode(data, addr)
# on error, return nothing
if decoded.status == DECODE_STATUS.ERROR or decoded.len == 0:
return None
# on non-branching, return length
result = InstructionInfo()
result.length = decoded.len
if decoded.typ != INSTRTYPE.JUMP_CALL_RETURN:
return result
# jp has several variations
if decoded.op == OP.JP:
(oper_type, oper_val) = decoded.operands[0]
# jp pe,0xDEAD
if oper_type == OPER_TYPE.COND:
assert decoded.operands[1][0] == OPER_TYPE.ADDR
result.add_branch(BranchType.TrueBranch, decoded.operands[1][1])
result.add_branch(BranchType.FalseBranch, addr + decoded.len)
# jp hl
elif oper_type in [OPER_TYPE.REG]:
result.add_branch(BranchType.IndirectBranch)
# jp 0xDEAD
elif oper_type == OPER_TYPE.ADDR:
result.add_branch(BranchType.UnconditionalBranch, oper_val)
else:
print(f"Missed JP handling type: {decoded}")
raise Exception('handling JP')
# jr can be conditional
elif decoded.op == OP.JR:
(oper_type, oper_val) = decoded.operands[0]
# jr c,0xdf07
if oper_type == OPER_TYPE.COND:
assert decoded.operands[1][0] == OPER_TYPE.ADDR
result.add_branch(BranchType.TrueBranch, decoded.operands[1][1])
result.add_branch(BranchType.FalseBranch, addr + decoded.len)
# jr 0xdf07
elif oper_type == OPER_TYPE.ADDR:
result.add_branch(BranchType.UnconditionalBranch, oper_val)
else:
raise Exception('handling JR')
# call can be conditional
elif decoded.op == OP.CALL:
(oper_type, oper_val) = decoded.operands[0]
# call c,0xdf07
if oper_type == OPER_TYPE.COND:
assert decoded.operands[1][0] == OPER_TYPE.ADDR
result.add_branch(BranchType.CallDestination, decoded.operands[1][1])
# call 0xdf07
elif oper_type == OPER_TYPE.ADDR:
result.add_branch(BranchType.CallDestination, oper_val)
else:
raise Exception('handling CALL')
# ret can be conditional
elif decoded.op == OP.RET:
if decoded.operands and decoded.operands[0][0] == OPER_TYPE.COND:
# conditional returns dont' end block
pass
else:
result.add_branch(BranchType.FunctionReturn)
# ret from interrupts
elif decoded.op == OP.RETI:
result.add_branch(BranchType.FunctionReturn)
return result
#------------------------------------------------------------------------------
# STRING building, disassembly
#------------------------------------------------------------------------------
def reg2str(self, reg):
reg_name = reg.name if isinstance(reg, REG) else reg
# enum AF_ should be returned as AF'
return reg_name if reg_name[-1] != '_' else reg_name[:-1]+"'"
# from api/python/function.py:
#
# TextToken Text that doesn't fit into the other tokens
# InstructionToken The instruction mnemonic
# OperandSeparatorToken The comma or whatever else separates tokens
# RegisterToken Registers
# IntegerToken Integers
# PossibleAddressToken Integers that are likely addresses
# BeginMemoryOperandToken The start of memory operand
# EndMemoryOperandToken The end of a memory operand
# FloatingPointToken Floating point number
def get_instruction_text(self, data, addr):
decoded = decode(data, addr)
if decoded.status != DECODE_STATUS.OK or decoded.len == 0:
return None
result = []
# opcode
result.append(InstructionTextToken( \
InstructionTextTokenType.InstructionToken, decoded.op.name))
# space for operand
if decoded.operands:
result.append(InstructionTextToken(InstructionTextTokenType.TextToken, ' '))
# operands
for i, operand in enumerate(decoded.operands):
(oper_type, oper_val) = operand
if oper_type == OPER_TYPE.REG:
result.append(InstructionTextToken( \
InstructionTextTokenType.RegisterToken, self.reg2str(oper_val)))
elif oper_type == OPER_TYPE.REG_DEREF:
toks = [
(InstructionTextTokenType.BeginMemoryOperandToken, '('),
(InstructionTextTokenType.RegisterToken, self.reg2str(oper_val)),
(InstructionTextTokenType.EndMemoryOperandToken, ')'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type in {OPER_TYPE.REG_DEREF_DEC, OPER_TYPE.REG_DEREF_INC}:
update = '-' if oper_type == OPER_TYPE.REG_DEREF_DEC else '+'
toks = [
(InstructionTextTokenType.BeginMemoryOperandToken, '('),
(InstructionTextTokenType.RegisterToken, self.reg2str(oper_val)),
(InstructionTextTokenType.TextToken, update),
(InstructionTextTokenType.EndMemoryOperandToken, ')'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type in {OPER_TYPE.REG_DEREF_FF00}:
toks = [
(InstructionTextTokenType.BeginMemoryOperandToken, '('),
(InstructionTextTokenType.PossibleAddressToken, '0xFF00', 0xFF00),
(InstructionTextTokenType.TextToken, '+'),
(InstructionTextTokenType.RegisterToken, self.reg2str(oper_val)),
(InstructionTextTokenType.EndMemoryOperandToken, ')'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type == OPER_TYPE.ADDR:
oper_val = oper_val & 0xFFFF
txt = '0x%04x' % oper_val
result.append(InstructionTextToken( \
InstructionTextTokenType.PossibleAddressToken, txt, oper_val))
elif oper_type == OPER_TYPE.ADDR_DEREF:
txt = '0x%04x' % oper_val
toks = [
(InstructionTextTokenType.BeginMemoryOperandToken, '('),
(InstructionTextTokenType.PossibleAddressToken, txt, oper_val),
(InstructionTextTokenType.EndMemoryOperandToken, ')'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type == OPER_TYPE.ADDR_DEREF_FF00:
val = 0xFF00 + (oper_val & 0xff)
txt = '0x{:04x}'.format(val)
toks = [
(InstructionTextTokenType.BeginMemoryOperandToken, '('),
(InstructionTextTokenType.PossibleAddressToken, txt, val),
(InstructionTextTokenType.EndMemoryOperandToken, ')'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
elif oper_type == OPER_TYPE.IMM:
if oper_val == 0:
txt = '0'
elif oper_val >= 16:
txt = '0x%x' % oper_val
else:
txt = '%d' % oper_val
result.append(InstructionTextToken( \
InstructionTextTokenType.IntegerToken, txt, oper_val))
elif oper_type == OPER_TYPE.COND:
txt = CC_TO_STR[oper_val]
result.append(InstructionTextToken( \
InstructionTextTokenType.TextToken, txt))
elif oper_type == OPER_TYPE.SP_OFFSET:
offset = '{:+02x}'.format(oper_val)
sign, offset = offset[0], offset[1:]
toks = [
(InstructionTextTokenType.BeginMemoryOperandToken, '('),
(InstructionTextTokenType.RegisterToken, 'SP'),
(InstructionTextTokenType.TextToken, sign),
(InstructionTextTokenType.IntegerToken, offset, abs(oper_val)),
(InstructionTextTokenType.EndMemoryOperandToken, ')'),
]
result.extend([InstructionTextToken(*ts) for ts in toks])
else:
raise Exception('unknown operand type: ' + str(oper_type))
# if this isn't the last operand, add comma
if i < len(decoded.operands)-1:
result.append(InstructionTextToken( \
InstructionTextTokenType.OperandSeparatorToken, ','))
return result, decoded.len
#------------------------------------------------------------------------------
# LIFTING
#------------------------------------------------------------------------------
def get_flag_write_low_level_il(self, op, size, write_type, flag, operands, il):
flag_il = LR35902IL.gen_flag_il(op, size, write_type, flag, operands, il)
if flag_il:
return flag_il
return Architecture.get_flag_write_low_level_il(self, op, size, write_type, flag, operands, il)
def get_instruction_low_level_il(self, data, addr, il):
decoded = decode(data, addr)
if decoded.status != DECODE_STATUS.OK or decoded.len == 0:
return None
LR35902IL.gen_instr_il(addr, decoded, il)
return decoded.len
| 38.010724
| 103
| 0.544576
| 1,340
| 14,178
| 5.598507
| 0.21194
| 0.049054
| 0.030392
| 0.031991
| 0.464276
| 0.400027
| 0.350706
| 0.280325
| 0.268328
| 0.268328
| 0
| 0.01475
| 0.311398
| 14,178
| 372
| 104
| 38.112903
| 0.753662
| 0.192834
| 0
| 0.331878
| 0
| 0
| 0.029474
| 0
| 0
| 0
| 0.002463
| 0
| 0.0131
| 1
| 0.026201
| false
| 0.004367
| 0.034935
| 0
| 0.19214
| 0.004367
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59c3e17fa8af255a1451fdfdd7e503426c323a8e
| 5,023
|
py
|
Python
|
src/main.py
|
possoj/Mobile-URSONet
|
1db664091f4a0daa2925174a67c21d20a1ed4db3
|
[
"MIT"
] | null | null | null |
src/main.py
|
possoj/Mobile-URSONet
|
1db664091f4a0daa2925174a67c21d20a1ed4db3
|
[
"MIT"
] | null | null | null |
src/main.py
|
possoj/Mobile-URSONet
|
1db664091f4a0daa2925174a67c21d20a1ed4db3
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2022 Julien Posso
"""
import torch
import optuna
from config import Config
from pose_net import POSENet
from submission import SubmissionWriter
from print_results import print_training_loss, print_training_score, print_beta_tuning, print_error_distance
import os
import numpy as np
import random
def main():
# SELECT DEVICE AUTOMATICALLY: if available, select the GPU with the most available memory, else select the CPU
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
# The following branch works with nvidia 470 drivers + cuda 11.4
# Do not work with nvidia 510 + cuda 11.6: Free memory is replaced with reserved memory
# get the GPU id with max memory available
os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >../tmp')
memory_available = [int(x.split()[2]) for x in open('../tmp', 'r').readlines()]
os.remove('../tmp') # remove the temporary file
gpu_id = np.argmax(memory_available)
device = torch.device(f"cuda:{gpu_id}")
print(f"Device used: {device} with {memory_available[gpu_id]} MB memory available")
else:
device = torch.device("cuda")
else:
device = torch.device("cpu")
print("Device used:", device)
# Create config and Pose estimation class
config = Config(device)
pose_estimation = POSENet(config)
# Set manual seeds for reproducibility. See https://pytorch.org/docs/stable/notes/randomness.html#reproducibility
torch.manual_seed(config.SEED)
random.seed(config.SEED) # Python random module.
np.random.seed(config.SEED) # Numpy module.
torch.use_deterministic_algorithms(True)
if torch.cuda.is_available():
torch.cuda.manual_seed(config.SEED)
torch.cuda.manual_seed_all(config.SEED) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
# Count number of model parameters
pytorch_total_params = pose_estimation.get_n_params()
print(f"Number of trainable parameters in the model :{pytorch_total_params:,}")
# Submissions on ESA website
sub = SubmissionWriter()
if config.HPARAM_TUNING:
print("hyperparameter tuning")
sampler = optuna.samplers.TPESampler(seed=config.SEED) # Make the sampler behave in a deterministic way.
study = optuna.create_study(sampler=sampler, direction="minimize")
print(f"Sampler is {study.sampler.__class__.__name__}")
study.optimize(pose_estimation.objective, n_trials=config.N_TRIALS)
data = study.trials_dataframe(("number", "value", "intermediate_values", "datetime_start", "datetime_complete",
"duration", "params", "user_attrs", "system_attrs", "state"))
data.to_csv('../optuna_tuning/hyperparameter_tuning_result.csv', encoding='utf-8')
pruned_trials = [t for t in study.trials if t.state == optuna.structs.TrialState.PRUNED]
complete_trials = [t for t in study.trials if t.state == optuna.structs.TrialState.COMPLETE]
print("Study statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
if config.TRAINING:
print("Training...")
model, loss, score = pose_estimation.train()
# Save model
model.cpu()
torch.save(model.state_dict(), config.MODEL_PATH)
# Print training
print_training_loss(loss, show=False, save=True)
print_training_score(score, show=False, save=True)
else:
# If not training, try to load the model from config.MODEL_PATH instead of saving the model to config.MODEL_PATH
model = pose_estimation.get_model()
model.load_state_dict(torch.load(config.MODEL_PATH))
# Move model to GPU if needed and prepare it for evaluation
model.to(config.DEVICE)
model.eval()
if config.EVALUATION:
print("Evaluation on valid and real set")
pose_estimation.evaluate('valid')
pose_estimation.evaluate('real')
if config.EVAL_SUBMIT:
print("Evaluation for submission...")
pose_estimation.evaluate_submit(sub)
sub.export(out_dir='../submissions/', suffix="pytorch")
sub.reset()
if config.EVAL_DISTANCE:
print("Evaluate by distance...")
ori_err, pos_err, distance = pose_estimation.eval_error_distance()
print_error_distance(ori_err, pos_err, distance, show=False, save=True)
print("The end!")
if __name__ == '__main__':
main()
| 39.865079
| 120
| 0.665539
| 639
| 5,023
| 5.073552
| 0.334898
| 0.043183
| 0.021592
| 0.015731
| 0.078347
| 0.051203
| 0.03393
| 0.03393
| 0.03393
| 0.03393
| 0
| 0.006433
| 0.226359
| 5,023
| 125
| 121
| 40.184
| 0.827844
| 0.174398
| 0
| 0.057471
| 0
| 0
| 0.190927
| 0.038088
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011494
| false
| 0
| 0.103448
| 0
| 0.114943
| 0.252874
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59c55dd1d2d8290c9a483ce4efd76296c72441ee
| 1,482
|
py
|
Python
|
run_time/src/gae_server/third_party/old-fonttools-master/Lib/fontTools/misc/fixedTools.py
|
moyogo/tachyfont
|
05c8b3e7357e7a13af37ef81b719a0ff749105a5
|
[
"Apache-2.0"
] | 2
|
2019-05-24T18:19:18.000Z
|
2020-09-17T10:23:13.000Z
|
run_time/src/gae_server/third_party/old-fonttools-master/Lib/fontTools/misc/fixedTools.py
|
moyogo/tachyfont
|
05c8b3e7357e7a13af37ef81b719a0ff749105a5
|
[
"Apache-2.0"
] | 9
|
2019-06-15T21:31:27.000Z
|
2021-05-08T18:55:51.000Z
|
run_time/src/gae_server/third_party/old-fonttools-master/Lib/fontTools/misc/fixedTools.py
|
moyogo/tachyfont
|
05c8b3e7357e7a13af37ef81b719a0ff749105a5
|
[
"Apache-2.0"
] | null | null | null |
"""fontTools.misc.fixedTools.py -- tools for working with fixed numbers.
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
__all__ = [
"fixedToFloat",
"floatToFixed",
]
def fixedToFloat(value, precisionBits):
"""Converts a fixed-point number to a float, choosing the float
that has the shortest decimal reprentation. Eg. to convert a
fixed number in a 2.14 format, use precisionBits=14. This is
pretty slow compared to a simple division. Use sporadically.
>>> fixedToFloat(13107, 14)
0.8
>>> fixedToFloat(0, 14)
0.0
>>> fixedToFloat(0x4000, 14)
1.0
"""
if not value: return 0.0
scale = 1 << precisionBits
value /= scale
eps = .5 / scale
digits = (precisionBits + 2) // 3
fmt = "%%.%df" % digits
lo = fmt % (value - eps)
hi = fmt % (value + eps)
out = []
length = min(len(lo), len(hi))
for i in range(length):
if lo[i] != hi[i]:
break;
out.append(lo[i])
outlen = len(out)
if outlen < length:
out.append(max(lo[outlen], hi[outlen]))
return float(strjoin(out))
def floatToFixed(value, precisionBits):
"""Converts a float to a fixed-point number given the number of
precisionBits. Ie. int(round(value * (1<<precisionBits))).
>>> floatToFixed(0.8, 14)
13107
>>> floatToFixed(1.0, 14)
16384
>>> floatToFixed(1, 14)
16384
>>> floatToFixed(0, 14)
0
"""
return int(round(value * (1<<precisionBits)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22.454545
| 72
| 0.669366
| 209
| 1,482
| 4.660287
| 0.425837
| 0.01848
| 0.053388
| 0.055441
| 0.055441
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056432
| 0.18691
| 1,482
| 65
| 73
| 22.8
| 0.751867
| 0.439946
| 0
| 0
| 0
| 0
| 0.048101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.1
| 0
| 0.233333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59c911d5e0c8485540b7cc8e8e2d8d57369a43f1
| 725
|
py
|
Python
|
Class3/selenium_waits.py
|
techsparksguru/python_ci_automation
|
65e66266fdf2c14f593c6f098a23770621faef41
|
[
"MIT"
] | null | null | null |
Class3/selenium_waits.py
|
techsparksguru/python_ci_automation
|
65e66266fdf2c14f593c6f098a23770621faef41
|
[
"MIT"
] | 9
|
2020-02-13T09:14:12.000Z
|
2022-01-13T03:17:03.000Z
|
Class3/selenium_waits.py
|
techsparksguru/python_ci_automation
|
65e66266fdf2c14f593c6f098a23770621faef41
|
[
"MIT"
] | 1
|
2021-03-10T03:27:37.000Z
|
2021-03-10T03:27:37.000Z
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# import selenium exceptions module
from selenium.common.exceptions import *
browser = webdriver.Chrome()
browser.get("http://www.seleniumframework.com/python-course/")
browser.maximize_window()
browser.implicitly_wait(10)
try:
element = WebDriverWait(browser, 10).until(
EC.presence_of_all_elements_located((By.TAG_NAME, "randomtag"))
)
except TimeoutException:
print("Encounted timeout while waiting for the element")
else:
print("Successfully located all the elements by tag name")
browser.quit()
| 30.208333
| 71
| 0.787586
| 92
| 725
| 6.119565
| 0.565217
| 0.106572
| 0.111901
| 0.099467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006289
| 0.122759
| 725
| 24
| 72
| 30.208333
| 0.878931
| 0.045517
| 0
| 0
| 0
| 0
| 0.219971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.277778
| 0
| 0.277778
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59cb2e8f401ebf77f3a739d719001e9eb7a93754
| 1,670
|
py
|
Python
|
stacker/lookups/cognito-user-pool-app-client-secret-lookup.py
|
pataraco/hart_challenge
|
47872a17b5ade54620df92a27d0ece2a8dfa6f07
|
[
"MIT"
] | null | null | null |
stacker/lookups/cognito-user-pool-app-client-secret-lookup.py
|
pataraco/hart_challenge
|
47872a17b5ade54620df92a27d0ece2a8dfa6f07
|
[
"MIT"
] | 2
|
2020-04-15T16:39:18.000Z
|
2021-05-11T15:24:56.000Z
|
stacker/lookups/cognito-user-pool-app-client-secret-lookup.py
|
pataraco/scripts
|
14ac6e10369ad3cc56eb7ce45adc87acd8935b60
|
[
"MIT"
] | 1
|
2017-05-28T10:45:14.000Z
|
2017-05-28T10:45:14.000Z
|
"""Stacker custom lookup to get a Cognito User Pool App Client Secret."""
import logging
from stacker.session_cache import get_session
TYPE_NAME = 'CognitoUserPoolAppClientSecret'
LOGGER = logging.getLogger(__name__)
def handler(value, provider, **kwargs): # pylint: disable=W0613
""" Lookup a Cognito User Pool App Client secret by UserPoolId::AppClientId.
Need to specify the Cognito User Pool ID and App Client ID
Region is obtained from the environment file
[in the environment file]:
region: us-west-2
For example:
[in the stacker yaml (configuration) file]:
lookups:
CognitoUserPoolAppClientSecret: lookups.instance-attribute-by-name-tag-lookup.handler
stacks:
variables:
AppClientSecret: ${CognitoUserPoolAppClientSecret ${user-pool-id}::${app-client-id}}
"""
user_pool_id = value.split('::')[0]
app_client_id = value.split('::')[1]
session = get_session(provider.region)
cognito_client = session.client('cognito-idp')
try:
desc_user_pool_client_output = cognito_client.describe_user_pool_client(
ClientId=app_client_id,
UserPoolId=user_pool_id)
except Exception as e:
LOGGER.error('could not describe user pool client: %s', e)
return 'error: could not describe user pool client'
secret = desc_user_pool_client_output['UserPoolClient'].get('ClientSecret')
if secret:
LOGGER.debug('found user pool app client secret')
return secret
else:
LOGGER.debug('did not find user pool app client secret')
return 'not found'
| 32.745098
| 94
| 0.673653
| 202
| 1,670
| 5.425743
| 0.415842
| 0.094891
| 0.063869
| 0.062044
| 0.217153
| 0.173358
| 0.120438
| 0
| 0
| 0
| 0
| 0.005499
| 0.237725
| 1,670
| 50
| 95
| 33.4
| 0.85546
| 0.356287
| 0
| 0
| 0
| 0
| 0.230769
| 0.029586
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59cbb8472e1057ce2bd992ed7975f6ff17e93e2b
| 4,202
|
py
|
Python
|
src/year2018/day07a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 10
|
2017-12-11T17:54:52.000Z
|
2021-12-09T20:16:30.000Z
|
src/year2018/day07a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 260
|
2015-12-09T11:03:03.000Z
|
2021-12-12T14:32:23.000Z
|
src/year2018/day07a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | null | null | null |
r"""2018 - Day 7 Part 1: The Sum of Its Parts.
You find yourself standing on a snow-covered coastline; apparently, you landed
a little off course. The region is too hilly to see the North Pole from here,
but you do spot some Elves that seem to be trying to unpack something that
washed ashore. It's quite cold out, so you decide to risk creating a paradox by
asking them for directions.
"Oh, are you the search party?" Somehow, you can understand whatever Elves from
the year 1018 speak; you assume it's Ancient Nordic Elvish. Could the device on
your wrist also be a translator? "Those clothes don't look very warm; take
this." They hand you a heavy coat.
"We do need to find our way back to the North Pole, but we have higher
priorities at the moment. You see, believe it or not, this box contains
something that will solve all of Santa's transportation problems - at least,
that's what it looks like from the pictures in the instructions." It doesn't
seem like they can read whatever language it's in, but you can: "Sleigh kit.
Some assembly required."
"'Sleigh'? What a wonderful name! You must help us assemble this 'sleigh' at
once!" They start excitedly pulling more parts out of the box.
The instructions specify a series of steps and requirements about which steps
must be finished before others can begin (your puzzle input). Each step is
designated by a single letter. For example, suppose you have the following
instructions:
Step C must be finished before step A can begin.
Step C must be finished before step F can begin.
Step A must be finished before step B can begin.
Step A must be finished before step D can begin.
Step B must be finished before step E can begin.
Step D must be finished before step E can begin.
Step F must be finished before step E can begin.
Visually, these requirements look like this:
-->A--->B--
/ \ \
C -->D----->E
\ /
---->F-----
Your first goal is to determine the order in which the steps should be
completed. If more than one step is ready, choose the step which is first
alphabetically. In this example, the steps would be completed as follows:
Only C is available, and so it is done first.
Next, both A and F are available. A is first alphabetically, so it is done
next.
Then, even though F was available earlier, steps B and D are now also
available, and B is the first alphabetically of the three.
After that, only D and F are available. E is not available because only
some of its prerequisites are complete. Therefore, D is completed next.
F is the only choice, so it is done next.
Finally, E is completed.
So, in this example, the correct order is CABDFE.
In what order should the steps in your instructions be completed?
"""
from collections import defaultdict
from string import ascii_uppercase
from typing import DefaultDict
from typing import Generator
from typing import Optional
from typing import Set
Step = str
Parents = DefaultDict[Step, Set[Step]]
StepGenerator = Generator[Step, None, None]
def process_date(data: str) -> Parents:
"""Generate a dict of step: parents from raw data."""
parents: Parents = defaultdict(set)
for line in data.strip().split("\n"):
parent, child = line[5], line[36]
parents[child].add(parent)
return parents
def next_step(
parents: Parents, done: Set[Step], todo: Set[Step]
) -> Optional[Step]:
"""Get next available step to take."""
ready = set()
for step in todo:
if parents[step].issubset(done):
ready.add(step)
return min(ready) if ready else None
def ordered_steps(parents: Parents, steps: str) -> StepGenerator:
"""Yield next available step in the correct order."""
done: Set[Step] = set()
todo: Set[Step] = set(steps)
while todo:
new_step = next_step(parents, done, todo)
if new_step:
yield new_step
done.add(new_step)
todo.remove(new_step)
def solve(task: str, steps=ascii_uppercase) -> str:
"""Find the sequence of steps."""
parents = process_date(task)
return "".join(ordered_steps(parents, steps))
| 36.859649
| 79
| 0.706568
| 675
| 4,202
| 4.379259
| 0.371852
| 0.016238
| 0.037889
| 0.054127
| 0.090325
| 0.080853
| 0.080853
| 0.061231
| 0.050068
| 0
| 0
| 0.003984
| 0.223465
| 4,202
| 113
| 80
| 37.185841
| 0.901931
| 0.705616
| 0
| 0
| 0
| 0
| 0.001653
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.361111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59cffb75f77e318a5d2a6eaf36504527fb164588
| 12,140
|
py
|
Python
|
functions_utils.py
|
b-mu/kbfgs_neurips2020_public
|
f9e8300211dee764e0a669d50a7176f83a28034a
|
[
"MIT"
] | null | null | null |
functions_utils.py
|
b-mu/kbfgs_neurips2020_public
|
f9e8300211dee764e0a669d50a7176f83a28034a
|
[
"MIT"
] | null | null | null |
functions_utils.py
|
b-mu/kbfgs_neurips2020_public
|
f9e8300211dee764e0a669d50a7176f83a28034a
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy
import copy
def get_loss_from_z(model, z, t, reduction):
if model.name_loss == 'multi-class classification':
criterion = torch.nn.CrossEntropyLoss()
loss = criterion(z, t.type(torch.LongTensor).to(z.device))
elif model.name_loss == 'binary classification':
loss = torch.nn.BCEWithLogitsLoss(reduction = reduction)(z, t.float().unsqueeze_(1))
if reduction == 'none':
loss = loss.squeeze(1)
elif model.name_loss == 'logistic-regression':
if reduction == 'none':
loss = torch.nn.BCEWithLogitsLoss(reduction = reduction)(z, t.float())
loss = torch.sum(loss, dim=1)
elif reduction == 'mean':
loss = torch.nn.BCEWithLogitsLoss(reduction = 'sum')(z, t.float())
loss = loss / z.size(0) / z.size(1)
elif reduction == 'sum':
loss = torch.nn.BCEWithLogitsLoss(reduction = reduction)(z, t.float())
elif model.name_loss == 'logistic-regression-sum-loss':
if reduction == 'none':
loss = torch.nn.BCEWithLogitsLoss(reduction = reduction)(z, t.float())
loss = torch.sum(loss, dim=1)
elif reduction == 'mean':
loss = torch.nn.BCEWithLogitsLoss(reduction = 'sum')(z, t.float())
loss = loss / z.size(0)
elif reduction == 'sum':
loss = torch.nn.BCEWithLogitsLoss(reduction = reduction)(z, t.float())
elif model.name_loss == 'linear-regression-half-MSE':
if reduction == 'mean':
loss = torch.nn.MSELoss(reduction = 'sum')(z, t) / 2
loss = loss / z.size(0)
elif reduction == 'none':
loss = torch.nn.MSELoss(reduction = 'none')(z, t) / 2
loss = torch.sum(loss, dim=1)
elif model.name_loss == 'linear-regression':
if reduction == 'mean':
loss = torch.nn.MSELoss(reduction = 'sum')(z, t)
loss = loss / z.size(0)
elif reduction == 'none':
loss = torch.nn.MSELoss(reduction = 'none')(z, t)
loss = torch.sum(loss, dim=1)
return loss
def get_zero_torch(params):
layers_params = params['layers_params']
device = params['device']
delta = []
for l in range(len(layers_params)):
delta_l = {}
delta_l['W'] = torch.zeros(layers_params[l]['output_size'], layers_params[l]['input_size'], device=device)
delta_l['b'] = torch.zeros(layers_params[l]['output_size'], device=device)
delta.append(delta_l)
return delta
def get_subtract(model_grad, delta, params):
diff_p = get_zero(params)
for l in range(params['numlayers']):
for key in diff_p[l]:
diff_p[l][key] = np.subtract(model_grad[l][key], delta[l][key])
return diff_p
def get_subtract_torch(model_grad, delta):
diff_p = []
for l in range(len(model_grad)):
diff_p_l = {}
for key in model_grad[l]:
diff_p_l[key] = torch.sub(model_grad[l][key], delta[l][key])
diff_p.append(diff_p_l)
return diff_p
def get_plus(model_grad, delta):
sum_p = []
for l in range(len(model_grad)):
sum_p_l = {}
for key in model_grad[l]:
sum_p_l[key] = np.add(model_grad[l][key], delta[l][key])
sum_p.append(sum_p_l)
return sum_p
def get_plus_torch(model_grad, delta):
sum_p = []
for l in range(len(model_grad)):
sum_p_l = {}
for key in model_grad[l]:
sum_p_l[key] = model_grad[l][key] + delta[l][key]
sum_p.append(sum_p_l)
return sum_p
def get_if_nan(p):
for l in range(len(p)):
for key in p[l]:
if torch.sum(p[l][key] != p[l][key]):
return True
return False
def get_torch_tensor(p, params):
p_torch = []
for l in range(len(p)):
p_torch_l = {}
for key in p[l]:
p_torch_l[key] = torch.from_numpy(p[l][key]).to(params['device'])
p_torch.append(p_torch_l)
return p_torch
def get_plus_scalar(alpha, model_grad):
sum_p = []
for l in range(len(model_grad)):
sum_p_l = {}
for key in model_grad[l]:
sum_p_l[key] = model_grad[l][key] + alpha
sum_p.append(sum_p_l)
return sum_p
def get_multiply_scalar(alpha, delta):
alpha_p = []
for l in range(len(delta)):
alpha_p_l = {}
for key in delta[l]:
alpha_p_l[key] = alpha * delta[l][key]
alpha_p.append(alpha_p_l)
return alpha_p
def get_multiply_scalar_no_grad(alpha, delta):
alpha_p = []
for l in range(len(delta)):
alpha_p_l = {}
for key in delta[l]:
alpha_p_l[key] = alpha * delta[l][key].data
alpha_p.append(alpha_p_l)
return alpha_p
def get_multiply_scalar_blockwise(alpha, delta, params):
alpha_p = []
for l in range(params['numlayers']):
alpha_p_l = {}
for key in delta[l]:
alpha_p_l[key] = alpha[l] * delta[l][key]
alpha_p.append(alpha_p_l)
return alpha_p
def get_multiply_torch(alpha, delta):
alpha_p = []
for l in range(len(delta)):
alpha_p_l = {}
for key in delta[l]:
alpha_p_l[key] = torch.mul(alpha[l][key], delta[l][key])
alpha_p.append(alpha_p_l)
return alpha_p
def get_multiply(alpha, delta):
alpha_p = []
for l in range(len(delta)):
alpha_p_l = {}
for key in delta[l]:
alpha_p_l[key] = np.multiply(alpha[l][key], delta[l][key])
alpha_p.append(alpha_p_l)
return alpha_p
def get_weighted_sum_batch(hat_v, batch_grads_test, params):
alpha_p = get_zero(params)
for l in range(params['numlayers']):
alpha_p['W'][l] = np.sum(hat_v[:, None, None] * batch_grads_test['W'][l], axis=0)
alpha_p['b'][l] = np.sum(hat_v[:, None] * batch_grads_test['b'][l], axis=0)
return alpha_p
def get_opposite(delta):
numlayers = len(delta)
p = []
for l in range(numlayers):
p_l = {}
for key in delta[l]:
p_l[key] = -delta[l][key]
p.append(p_l)
return p
def get_model_grad(model, params):
model_grad_torch = []
for l in range(model.numlayers):
model_grad_torch_l = {}
for key in model.layers_weight[l]:
model_grad_torch_l[key] = copy.deepcopy(model.layers_weight[l][key].grad)
model_grad_torch.append(model_grad_torch_l)
return model_grad_torch
def get_regularized_loss_and_acc_from_x_whole_dataset(model, x, t, reduction, params):
N1 = params['N1']
N1 = np.minimum(N1, len(x))
i = 0
device = params['device']
list_loss = []
list_acc = []
model.eval()
while i + N1 <= len(x):
# with torch.no_grad():
z, test_a, test_h = model.forward(torch.from_numpy(x[i: i+N1]).to(device))
torch_t_mb = torch.from_numpy(t[i: i+N1]).to(params['device'])
list_loss.append(
get_regularized_loss_from_z(model, z, torch_t_mb,
reduction, params['tau']).item())
list_acc.append(
get_acc_from_z(model, params, z, torch_t_mb))
i += N1
model.train()
return sum(list_loss) / len(list_loss), sum(list_acc) / len(list_acc)
def get_regularized_loss_from_z(model, z, t, reduction, tau):
loss = get_loss_from_z(model, z, t, reduction)
loss += 0.5 * tau *\
get_dot_product_torch(model.layers_weight, model.layers_weight)
return loss
def get_if_stop(args, i, iter_per_epoch, timesCPU):
if args['if_max_epoch']:
if i < int(args['max_epoch/time'] * iter_per_epoch):
return False
else:
return True
else:
if timesCPU[-1] < args['max_epoch/time']:
return False
else:
return True
def get_square(delta_1):
numlayers = len(delta_1)
sqaure_p = []
for l in range(numlayers):
sqaure_p_l = {}
for key in delta_1[l]:
sqaure_p_l[key] = np.square(delta_1[l][key])
sqaure_p.append(sqaure_p_l)
return sqaure_p
def get_square_torch(delta_1):
numlayers = len(delta_1)
sqaure_p = []
for l in range(numlayers):
sqaure_p_l = {}
for key in delta_1[l]:
sqaure_p_l[key] = torch.mul(delta_1[l][key], delta_1[l][key])
sqaure_p.append(sqaure_p_l)
return sqaure_p
def get_sqrt(delta_1):
sqaure_p = []
for l in range(len(delta_1)):
sqaure_p_l = {}
for key in delta_1[l]:
sqaure_p_l[key] = np.sqrt(delta_1[l][key])
sqaure_p.append(sqaure_p_l)
return sqaure_p
def get_sqrt_torch(delta_1):
sqaure_p = []
for l in range(len(delta_1)):
sqaure_p_l = {}
for key in delta_1[l]:
sqaure_p_l[key] = torch.sqrt(delta_1[l][key])
sqaure_p.append(sqaure_p_l)
return sqaure_p
def get_max_with_0(delta_1):
sqaure_p = []
for l in range(len(delta_1)):
sqaure_p_l = {}
for key in delta_1[l]:
sqaure_p_l[key] = F.relu(delta_1[l][key])
sqaure_p.append(sqaure_p_l)
return sqaure_p
def get_divide(delta_1, delta_2):
numlayers = len(delta_1)
sqaure_p = []
for l in range(numlayers):
sqaure_p_l = {}
for key in delta_1[l]:
sqaure_p_l[key] = np.divide(delta_1[l][key], delta_2[l][key])
sqaure_p.append(sqaure_p_l)
return sqaure_p
def get_divide_torch(delta_1, delta_2):
numlayers = len(delta_1)
sqaure_p = []
for l in range(numlayers):
sqaure_p_l = {}
for key in delta_1[l]:
sqaure_p_l[key] = torch.div(delta_1[l][key], delta_2[l][key])
sqaure_p.append(sqaure_p_l)
return sqaure_p
def get_dot_product_torch(delta_1, delta_2):
dot_product = 0
for l in range(len(delta_1)):
for key in delta_1[l]:
dot_product += torch.sum(torch.mul(delta_1[l][key], delta_2[l][key]))
return dot_product
def get_dot_product_blockwise_torch(delta_1, delta_2):
dot_product = []
for l in range(len(delta_1)):
dot_product_l = 0
for key in delta_1[l]:
dot_product_l += torch.sum(torch.mul(delta_1[l][key], delta_2[l][key]))
dot_product.append(dot_product_l)
return dot_product
def get_dot_product_batch(model_grad, batch_grads_test, params):
# numlayers = params['numlayers']
dot_product = np.zeros(len(batch_grads_test['W'][0]))
for l in range(params['numlayers']):
dot_product += np.sum(
np.sum(np.multiply(model_grad['W'][l][None, :], batch_grads_test['W'][l]), axis=-1), axis=-1)
dot_product += np.sum(np.multiply(model_grad['b'][l][None, :], batch_grads_test['b'][l]), axis=-1)
return dot_product
def get_acc_from_z(model, params, z, torch_t):
if model.name_loss == 'multi-class classification':
y = z.argmax(dim=1)
acc = torch.mean((y == torch_t).float())
elif model.name_loss == 'binary classification':
z_1 = torch.sigmoid(z)
y = (z_1 > 0.5)
y = y[:, 0]
acc = np.mean(y.cpu().data.numpy() == np_t)
elif model.name_loss in ['logistic-regression',
'logistic-regression-sum-loss']:
z_sigmoid = torch.sigmoid(z)
criterion = nn.MSELoss(reduction = 'mean')
acc = criterion(z_sigmoid, torch_t)
elif model.name_loss in ['linear-regression',
'linear-regression-half-MSE']:
acc = nn.MSELoss(reduction = 'mean')(z, torch_t)
acc = acc.item()
return acc
def get_homo_grad(model_grad_N1, params):
device = params['device']
homo_model_grad_N1 = []
for l in range(params['numlayers']):
homo_model_grad_N1_l = torch.cat((model_grad_N1[l]['W'], model_grad_N1[l]['b'].unsqueeze(1)), dim=1)
homo_model_grad_N1.append(homo_model_grad_N1_l)
return homo_model_grad_N1
| 31.28866
| 114
| 0.590198
| 1,826
| 12,140
| 3.673604
| 0.07667
| 0.017293
| 0.02415
| 0.044275
| 0.660256
| 0.608229
| 0.545617
| 0.461688
| 0.434556
| 0.418008
| 0
| 0.010804
| 0.2757
| 12,140
| 387
| 115
| 31.369509
| 0.752076
| 0.004366
| 0
| 0.503185
| 0
| 0
| 0.043226
| 0.008943
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101911
| false
| 0
| 0.019108
| 0
| 0.235669
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59d34d92fd7a34250d7ea36c0e7b42576a6d3121
| 1,332
|
py
|
Python
|
p352_Data_Stream_as_Disjoint_Intervals.py
|
bzhou26/leetcode_sol
|
82506521e2cc412f96cd1dfc3c8c3ab635f67f73
|
[
"MIT"
] | null | null | null |
p352_Data_Stream_as_Disjoint_Intervals.py
|
bzhou26/leetcode_sol
|
82506521e2cc412f96cd1dfc3c8c3ab635f67f73
|
[
"MIT"
] | null | null | null |
p352_Data_Stream_as_Disjoint_Intervals.py
|
bzhou26/leetcode_sol
|
82506521e2cc412f96cd1dfc3c8c3ab635f67f73
|
[
"MIT"
] | null | null | null |
'''
- Leetcode problem: 352
- Difficulty: Hard
- Brief problem description:
Given a data stream input of non-negative integers a1, a2, ..., an, ..., summarize the numbers seen so far as a list of
disjoint intervals.
For example, suppose the integers from the data stream are 1, 3, 7, 2, 6, ..., then the summary will be:
[1, 1]
[1, 1], [3, 3]
[1, 1], [3, 3], [7, 7]
[1, 3], [7, 7]
[1, 3], [6, 7]
Follow up:
What if there are lots of merges and the number of disjoint intervals are small compared to the data stream's size?
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
class SummaryRanges:
def __init__(self):
"""
Initialize your data structure here.
"""
self.ih = [] # interval heap
def addNum(self, val: int) -> None:
heapq.heappush(self.ih, [val, val])
def getIntervals(self) -> List[List[int]]:
newh = []
while self.ih:
newInter = heapq.heappop(self.ih)
if newh and newh[-1][1] + 1 >= newInter[0]:
newh[-1][1] = max(newh[-1][1], newInter[1])
else:
heapq.heappush(newh, newInter)
self.ih = newh
return self.ih
# Your SummaryRanges object will be instantiated and called as such:
# obj = SummaryRanges()
# obj.addNum(val)
# param_2 = obj.getIntervals()
| 23.368421
| 119
| 0.594595
| 191
| 1,332
| 4.120419
| 0.497382
| 0.02033
| 0.011436
| 0.010165
| 0.011436
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04111
| 0.26952
| 1,332
| 57
| 120
| 23.368421
| 0.767729
| 0.574324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|