hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
256135f3261bda49e4b410a35a4a8f8355d98ad8
| 722
|
py
|
Python
|
rpi/tcp_server.py
|
nicolasGibaud7/App-domotic
|
aee4d80aa05a39388efd92ab9ecf9b5dd1460322
|
[
"MIT"
] | 4
|
2020-01-01T15:22:55.000Z
|
2020-01-10T09:34:26.000Z
|
rpi/tcp_server.py
|
nicolasGibaud7/App-domotic
|
aee4d80aa05a39388efd92ab9ecf9b5dd1460322
|
[
"MIT"
] | 2
|
2020-01-01T15:16:02.000Z
|
2020-01-02T13:56:29.000Z
|
rpi/tcp_server.py
|
nicolasGibaud7/App-domotic
|
aee4d80aa05a39388efd92ab9ecf9b5dd1460322
|
[
"MIT"
] | null | null | null |
import socket
import sys
IP_ADDR = "192.168.1.19"
TCP_PORT = 10000
if __name__ == "__main__":
# Create TCP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Associate the socket with the server address
server_address = (IP_ADDR, TCP_PORT)
print("Start TCP server at address {} on port {} ".format(server_address[0], server_address[1]))
sock.bind(server_address)
# Mode TCP server
sock.listen(1)
while True:
connection, client_address = sock.accept()
while True :
print("Connection from {} ".format(client_address))
data = connection.recv(16)
print("Data : %s" % data)
else:
connection.close()
| 26.740741
| 100
| 0.631579
| 92
| 722
| 4.73913
| 0.5
| 0.149083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035514
| 0.259003
| 722
| 26
| 101
| 27.769231
| 0.779439
| 0.108033
| 0
| 0.111111
| 0
| 0
| 0.140625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
256327adbdadb9819f932122ab31855bfe822e1d
| 2,011
|
py
|
Python
|
List Comprehensions/examples.py
|
mervatkheir/kite-python-blog-post-code
|
9a331e5d327cd27c6ecd72926f3e74afd252efb5
|
[
"MIT"
] | 238
|
2018-10-10T18:50:40.000Z
|
2022-02-09T21:26:24.000Z
|
List Comprehensions/examples.py
|
mrrizal/kite-python-blog-post-code
|
597f2d75b2ad5dda97e9b19f6e9c7195642e1739
|
[
"MIT"
] | 38
|
2019-12-04T22:42:45.000Z
|
2022-03-12T00:04:57.000Z
|
List Comprehensions/examples.py
|
mrrizal/kite-python-blog-post-code
|
597f2d75b2ad5dda97e9b19f6e9c7195642e1739
|
[
"MIT"
] | 154
|
2018-11-11T22:48:09.000Z
|
2022-03-22T07:12:18.000Z
|
"""
List Comprehensions Examples
"""
my_list = []
# my_list.append()
# my_list.extend()
"""
When to use ListComps
"""
phones = [
{
'number': '111-111-1111',
'label': 'phone',
'extension': '1234',
},
{
'number': '222-222-2222',
'label': 'mobile',
'extension': None,
}
]
my_phone_list = []
for phone in phones:
my_phone_list.append(phone['number'])
# List Comprehension
[phone['number'] for phone in phones]
"""
Advanced Usage
"""
# Buld an explicit nested list
table = [
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
]
fields = ['x', 'y', 'z']
rows = [1, 2, 3]
table = []
for r in rows:
row = []
for field in fields:
row.append(field)
table.append(row)
[field for field in fields]
[row for row in rows]
table = [[field for field in fields] for row in rows]
"""
Dictionary Comprehensions
"""
[{str(item): item} for item in [1, 2, 3, ]]
dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
double_dict1 = {k: v * 2 for (k, v) in dict1.items()}
dict_map = {
'apple' : 1,
'cherry': 2,
'earwax': 3,
}
{v:k for (k, v) in dict_map.items()}
items = dict_map.items()
"""
Logical Comparisons
"""
values = [1,2,3]
[i for i in values if i < 3]
[k for k, v in dict_map.items() if v < 3]
"""
Performance, Spongecase Example
"""
original_string = 'hello world'
spongecase_letters = []
for index, letter in enumerate(original_string):
if index % 2 == 1:
spongecase_letters.append(letter.upper())
else:
spongecase_letters.append(letter)
spongecase_string = ''.join(spongecase_letters)
# hElLo wOrLd
def spongecase(index, letter):
if index % 2 == 1:
return letter.upper()
else:
return letter
original_string = 'hello world'
spongecase_letters = []
for index, letter in enumerate(original_string):
transformed_letter = spongecase(index, letter)
spongecase_letters.append(transformed_letter)
spongecase_string = ''.join(spongecase_letters)
# hElLo wOrLd
| 15.960317
| 53
| 0.604674
| 274
| 2,011
| 4.339416
| 0.310219
| 0.100084
| 0.015139
| 0.04037
| 0.318755
| 0.264929
| 0.257359
| 0.257359
| 0.134567
| 0.134567
| 0
| 0.038936
| 0.233715
| 2,011
| 125
| 54
| 16.088
| 0.732641
| 0.067131
| 0
| 0.230769
| 0
| 0
| 0.08056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015385
| false
| 0
| 0
| 0
| 0.046154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2564c2f1d6dd5e44be1def881988d5a419b3038e
| 2,549
|
py
|
Python
|
ImageDenoising/network/denoising.py
|
jiunbae/ITE4053
|
873d53493b7588f67406e0e6ed0e74e5e3f957bc
|
[
"MIT"
] | 5
|
2019-06-20T09:54:04.000Z
|
2021-06-15T04:22:49.000Z
|
ImageDenoising/network/denoising.py
|
jiunbae/ITE4053
|
873d53493b7588f67406e0e6ed0e74e5e3f957bc
|
[
"MIT"
] | null | null | null |
ImageDenoising/network/denoising.py
|
jiunbae/ITE4053
|
873d53493b7588f67406e0e6ed0e74e5e3f957bc
|
[
"MIT"
] | 1
|
2019-04-19T04:52:34.000Z
|
2019-04-19T04:52:34.000Z
|
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import models as KM
from tensorflow.keras import layers as KL
class DenoisingNetwork(object):
def __new__(cls, mode: str) \
-> KM.Model:
assert mode in ['base', 'skip', 'bn']
inputs = KL.Input(shape=[None, None, 3],
name="input_image")
x = inputs
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer1")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer2")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer3")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(64, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer4")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
x = KL.Conv2D(3, (3, 3), padding="SAME",
kernel_initializer='random_uniform',
bias_initializer='zeros',
name="layer5")(x)
if mode == 'bn':
x = KL.BatchNormalization()(x)
x = KL.ReLU()(x)
if mode == 'skip' or mode == 'bn':
x = KL.average([x, inputs])
return KM.Model(inputs=inputs, outputs=x,
name='denoising')
@staticmethod
def loss(y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return K.mean(K.square(y_pred - y_true))
@classmethod
def metric(cls, y_true: tf.Tensor, y_pred: tf.Tensor) \
-> tf.Tensor:
return tf.image.psnr(y_true, y_pred, max_val=1.)
@classmethod
def compile(cls, model, optimizer, loss, metric)\
-> None:
model.compile(optimizer=optimizer,
loss=loss,
metrics=[metric])
| 33.986667
| 59
| 0.488035
| 282
| 2,549
| 4.308511
| 0.262411
| 0.039506
| 0.02963
| 0.044444
| 0.545679
| 0.545679
| 0.545679
| 0.545679
| 0.545679
| 0.545679
| 0
| 0.019534
| 0.377403
| 2,549
| 74
| 60
| 34.445946
| 0.746062
| 0
| 0
| 0.515625
| 0
| 0
| 0.074931
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 1
| 0.0625
| false
| 0
| 0.0625
| 0.03125
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
256670e4e127db5ef91b0b78cc07a367f32674c1
| 884
|
py
|
Python
|
utils/timer.py
|
YorkSu/hat
|
b646b6689f3d81c985ed13f3d5c23b6c717fd07d
|
[
"Apache-2.0"
] | 1
|
2019-04-10T04:49:30.000Z
|
2019-04-10T04:49:30.000Z
|
utils/timer.py
|
Suger131/HAT-tf2.0
|
b646b6689f3d81c985ed13f3d5c23b6c717fd07d
|
[
"Apache-2.0"
] | null | null | null |
utils/timer.py
|
Suger131/HAT-tf2.0
|
b646b6689f3d81c985ed13f3d5c23b6c717fd07d
|
[
"Apache-2.0"
] | 1
|
2019-06-14T05:53:42.000Z
|
2019-06-14T05:53:42.000Z
|
import time
class Timer(object):
def __init__(self, Log, *args, **kwargs):
self.Log = Log
return super().__init__(*args, **kwargs)
@property
def time(self):
return time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
def mktime(self, timex):
return time.mktime(time.strptime(timex, '%Y-%m-%d-%H-%M-%S'))
def timer(self, text, func, *args, **kwargs):
start_time = self.time
self.Log(start_time, _T=f'{text} Start:')
result = func(*args, **kwargs)
stop_time = self.time
self.Log(stop_time, _T=f'{text} Stop:')
cost_time = self.mktime(stop_time) - self.mktime(start_time)
self.Log(cost_time, _T=f'{text} cost time (second):')
time_dict = {f'{text}_start_time'.upper(): start_time,
f'{text}_stop_time'.upper(): stop_time,
f'{text}_cost_time'.upper(): cost_time}
return time_dict, result
| 31.571429
| 65
| 0.623303
| 132
| 884
| 3.94697
| 0.265152
| 0.122841
| 0.06334
| 0.057582
| 0.095969
| 0.023033
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193439
| 884
| 28
| 66
| 31.571429
| 0.730715
| 0
| 0
| 0
| 0
| 0
| 0.151412
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.045455
| 0.090909
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
256a8cd6b55c2a6f3936b57c2975d63cfcb67d9a
| 4,050
|
py
|
Python
|
tests/test_functional.py
|
tirkarthi/humpty
|
8652cf7b18a09d1a1d73465afd38581ef4e2369e
|
[
"BSD-3-Clause"
] | 14
|
2015-09-05T20:20:50.000Z
|
2021-04-08T08:53:20.000Z
|
tests/test_functional.py
|
tirkarthi/humpty
|
8652cf7b18a09d1a1d73465afd38581ef4e2369e
|
[
"BSD-3-Clause"
] | 6
|
2017-05-12T20:46:40.000Z
|
2020-02-08T05:05:03.000Z
|
tests/test_functional.py
|
tirkarthi/humpty
|
8652cf7b18a09d1a1d73465afd38581ef4e2369e
|
[
"BSD-3-Clause"
] | 8
|
2017-02-13T15:38:53.000Z
|
2020-11-11T20:16:58.000Z
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from contextlib import contextmanager
import imp
import posixpath
from zipfile import ZipFile
from click.testing import CliRunner
import pkginfo
import pytest
from six import PY3
def test_pyfile_compiled(packages, tmpdir):
packages.require_eggs('dist1')
unzip = False
if PY3:
# Python >= 3.2 doesn't seem to run .pyc files from PEP 3147
# (__pycache__) repository directories.
unzip = True
venv = packages.get_venv('dist1', unzip=unzip)
assert venv.run("__import__('dist1').test_is_compiled()") == 0
@pytest.fixture
def dist1_metadata(packages):
egg = packages.get_egg('dist1')
return pkginfo.BDist(str(egg))
def test_summary(dist1_metadata):
assert dist1_metadata.summary == "A dummy distribution"
def test_description(dist1_metadata):
assert dist1_metadata.description.rstrip() \
== u"Long description.\n\nGruß."
def test_script_wrapper(packages):
packages.require_eggs('dist1')
venv = packages.get_venv('dist1')
assert venv.call(['dist1_wrapper']) == 42
def test_old_style_script(packages):
packages.require_eggs('dist1')
venv = packages.get_venv('dist1')
assert venv.call(['dist1_script']) == 42
def test_namespace_package(packages):
packages.require_eggs('dist1', 'dist2')
venv = packages.get_venv('dist2')
prog = (
'import sys\n'
'from dist2.plugins.builtin import the_answer\n'
'sys.exit(the_answer)\n'
)
assert venv.run(prog) == 42
def test_namespace_stubs_in_egg(packages):
dist2_egg = packages.get_egg('dist2')
dist2_stubs = with_byte_compiled(['dist2/__init__.py',
'dist2/plugins/__init__.py'])
with fileobj(ZipFile(str(dist2_egg))) as zf:
files_in_egg = dist2_stubs.intersection(zf.namelist())
# Make sure we generated the stubs (or not, depending on python
# version)
stubs_in_egg = files_in_egg.intersection(dist2_stubs)
assert stubs_in_egg == dist2_stubs
# Make sure we didn't copy the .pth file that the wheel installer
# creates for the namespaces
assert not any(fn.lower().endswith('.pth')
for fn in files_in_egg)
def test_extension(packages):
packages.require_eggs('extension_dist')
venv = packages.get_venv('extension_dist')
assert venv.run("__import__('extension_dist').test_extension()") == 0
def test_eager_resources(packages):
packages.require_eggs('extension_dist')
venv = packages.get_venv('extension_dist')
assert venv.run("__import__('extension_dist').test_eager_resources()") == 0
def test_extras(packages):
packages.require_eggs('dist1', 'extension_dist')
venv = packages.get_venv('dist1[extras]')
assert venv.run("__import__('dist1').test_extras()") == 0
def test_no_extras(packages):
packages.require_eggs('dist1', 'extension_dist')
venv = packages.get_venv('dist1')
assert venv.run("__import__('dist1').test_no_extras()") == 0
def test_main(packages, tmpdir):
from humpty import main
wheel = packages.get_wheel('dist1')
runner = CliRunner()
result = runner.invoke(main, ['-d', str(tmpdir), str(wheel)])
assert result.exit_code == 0
eggs = list(tmpdir.listdir(fil="*.egg"))
assert len(eggs) == 1
egg = eggs[0]
assert egg.isfile()
assert egg.fnmatch("dist1-*")
@contextmanager
def fileobj(fp):
try:
yield fp
finally:
fp.close()
def with_byte_compiled(paths):
""" Augment PATHS with paths of byte-compiled files.
"""
get_tag = getattr(imp, 'get_tag', None)
compiled = set()
for path in paths:
head, tail = posixpath.split(path)
root, ext = posixpath.splitext(tail)
if ext == '.py':
if get_tag:
root = '%s.%s' % (root, get_tag())
head = posixpath.join(head, '__pycache__')
compiled.add(posixpath.join(head, root + '.pyc'))
return compiled.union(paths)
| 27.739726
| 79
| 0.66716
| 523
| 4,050
| 4.913958
| 0.307839
| 0.032685
| 0.059144
| 0.059144
| 0.280156
| 0.233463
| 0.204669
| 0.200778
| 0.200778
| 0.200778
| 0
| 0.018369
| 0.206914
| 4,050
| 145
| 80
| 27.931034
| 0.781756
| 0.082716
| 0
| 0.123711
| 0
| 0
| 0.166712
| 0.079026
| 0
| 0
| 0
| 0
| 0.164948
| 1
| 0.154639
| false
| 0
| 0.175258
| 0
| 0.350515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
256b989b63c37dd38e854142d7a19f85d5f03b4f
| 1,401
|
py
|
Python
|
diy_gym/addons/debug/joint_trace.py
|
wassname/diy-gym
|
83232ae6971341a86683d316feecf4d34d3caf47
|
[
"MIT"
] | null | null | null |
diy_gym/addons/debug/joint_trace.py
|
wassname/diy-gym
|
83232ae6971341a86683d316feecf4d34d3caf47
|
[
"MIT"
] | null | null | null |
diy_gym/addons/debug/joint_trace.py
|
wassname/diy-gym
|
83232ae6971341a86683d316feecf4d34d3caf47
|
[
"MIT"
] | null | null | null |
import pybullet as p
from gym import spaces
import pybullet_planning as pbp
import numpy as np
from diy_gym.addons.addon import Addon
class JointTrace(Addon):
"""
JointTrace
Trace the follows a joints movements
"""
def __init__(self, parent, config):
super().__init__(parent, config)
self.uid = parent.uid
joint_info = [p.getJointInfo(self.uid, i) for i in range(p.getNumJoints(self.uid))]
if 'joint' in config:
joints = [config.get('joint')]
elif 'joints' in config:
joints = config.get('joints')
else:
joints = [info[1].decode('UTF-8') for info in joint_info]
self.joint_ids = [info[0] for info in joint_info if info[1].decode('UTF-8') in joints and info[3] > -1]
self.last = None
def reset(self):
p.removeAllUserDebugItems()
self.last = None
def update(self, action):
# A colored trace for each joint
joint_pos = np.array([pbp.get_link_pose(self.uid, i)[0] for i in self.joint_ids])
if self.last is not None:
m = len(joint_pos)
for i in range(n):
p.addUserDebugLine(
lineFromXYZ=joint_pos[i],
lineToXYZ=self.last[i], lineColorRGB=[(n-i)/(n+1), 0.9, i/(n+1)], lineWidth=1, lifeTime=360)
self.last = joint_pos
| 31.133333
| 120
| 0.581727
| 194
| 1,401
| 4.092784
| 0.381443
| 0.050378
| 0.02267
| 0.027708
| 0.141058
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.303355
| 1,401
| 44
| 121
| 31.840909
| 0.797131
| 0.057102
| 0
| 0.066667
| 0
| 0
| 0.024596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.166667
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
256c54c224c3656056ad73a0292f2c0577a7fce0
| 1,612
|
py
|
Python
|
ngraph/flex/flexargparser.py
|
NervanaSystems/ngraph-python
|
ac032c83c7152b615a9ad129d54d350f9d6a2986
|
[
"Apache-2.0"
] | 18
|
2018-03-19T04:16:49.000Z
|
2021-02-08T14:44:58.000Z
|
ngraph/flex/flexargparser.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 2
|
2019-04-16T06:41:49.000Z
|
2019-05-06T14:08:13.000Z
|
ngraph/flex/flexargparser.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 11
|
2018-06-16T15:59:08.000Z
|
2021-03-06T00:45:30.000Z
|
from __future__ import print_function
import ngraph.transformers as ngt
from ngraph.flex.names import flex_gpu_transformer_name
import argparse
class FlexNgraphArgparser():
"""
Flex specific command line args
"""
@staticmethod
def setup_flex_args(argParser):
"""
Add flex specific arguments to other default args used by ngraph
"""
# use fixed point for flex backend
argParser.add_argument('--fixed_point',
action="store_true",
help=argparse.SUPPRESS)
# turn on flex verbosity for debug
argParser.add_argument('--flex_verbose',
action="store_true",
help=argparse.SUPPRESS)
# collect flex data and save it to h5py File
argParser.add_argument('--collect_flex_data',
action="store_true",
default=argparse.SUPPRESS)
@staticmethod
def make_and_set_transformer_factory(args):
flex_args = ('fixed_point', 'flex_verbose', 'collect_flex_data')
# default value for all flex args if not given, confusing with store_true in add_argument
default = False
if args.backend == flex_gpu_transformer_name:
flex_args_dict = dict((a, getattr(args, a, default)) for a in flex_args)
factory = ngt.make_transformer_factory(args.backend, **flex_args_dict)
else:
factory = ngt.make_transformer_factory(args.backend)
ngt.set_transformer_factory(factory)
| 36.636364
| 97
| 0.614144
| 179
| 1,612
| 5.27933
| 0.396648
| 0.050794
| 0.063492
| 0.046561
| 0.165079
| 0.165079
| 0.091005
| 0
| 0
| 0
| 0
| 0.000904
| 0.313896
| 1,612
| 43
| 98
| 37.488372
| 0.853526
| 0.182382
| 0
| 0.269231
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.269231
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2571f7e0a4f394d6c21f691f7de829e3237dd090
| 8,442
|
py
|
Python
|
models/linnet.py
|
mengxiangke/bsn
|
df6458a44b8d8b442c086e158366dd296fab54cc
|
[
"Apache-2.0"
] | 5
|
2020-09-19T18:05:08.000Z
|
2022-01-23T14:55:07.000Z
|
models/linnet.py
|
mengxiangke/bsn
|
df6458a44b8d8b442c086e158366dd296fab54cc
|
[
"Apache-2.0"
] | null | null | null |
models/linnet.py
|
mengxiangke/bsn
|
df6458a44b8d8b442c086e158366dd296fab54cc
|
[
"Apache-2.0"
] | 7
|
2020-09-19T18:05:11.000Z
|
2021-12-28T02:41:12.000Z
|
import os
from os.path import join as pjoin
import time
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import CosineAnnealingLR
try:
from .radam import RAdam
except (ImportError, ModuleNotFoundError) as err:
from radam import RAdam
try:
from torch.nn import Flatten
except ImportError:
class Flatten(nn.Module):
__constants__ = ['start_dim', 'end_dim']
def __init__(self, start_dim=1, end_dim=-1):
super(Flatten, self).__init__()
self.start_dim = start_dim
self.end_dim = end_dim
def forward(self, input):
return input.flatten(self.start_dim, self.end_dim)
class HPF(nn.Conv1d):
def __init__(self,
in_channels,
out_channels,
kernel_size,
*args,
**kwargs):
super(HPF, self).__init__(in_channels,
out_channels,
kernel_size,
*args, bias=False, **kwargs)
self.hpf_kernel = np.array([[[ 1, -1, 0, 0, 0]],
[[ 1, -2, 1, 0, 0]],
[[ 1, -3, 3, -1, 0]],
[[ 1, -4, 6, -4, 1]]])
self.weight.data = torch.tensor(self.hpf_kernel,
dtype=self.weight.dtype)
def initialize_parameters(self):
device = next(iter(self.parameters())).device
self.weight.data = torch.tensor(self.hpf_kernel,
dtype=self.weight.dtype,
device=device)
# The following settings does not allow training HPF.
#self.bias.data.fill_(0)
#self.hpf.bias.requires_grad = False
class TLU(nn.Module):
def __init__(self, thr=3.0):
"""truncated linear unit (TLU)
"""
super(TLU, self).__init__()
self.thr = thr
def forward(self, x):
return x.clamp(-self.thr, self.thr) #torch.min(torch.max(x, -self.thr), self.thr)
class Group1(nn.Module):
def __init__(self):
super(Group1, self).__init__()
self.module = nn.Sequential(nn.Conv1d(4, 8, 1),
TLU(3.0),
nn.Conv1d(8, 8, 5, padding=2),
nn.Conv1d(8, 16, 1))
def forward(self, x):
return self.module(x)
class Group2(nn.Module):
def __init__(self):
super(Group2, self).__init__()
self.module = nn.Sequential(nn.Conv1d(16, 16, 5, padding=2),
nn.ReLU(),
nn.Conv1d(16, 32, 1),
nn.ReLU(),
nn.AvgPool1d(3, stride=2, padding=1))
def forward(self, x):
return self.module(x)
class Group3(nn.Module):
def __init__(self):
super(Group3, self).__init__()
self.module = nn.Sequential(nn.Conv1d(32, 32, 5, padding=2),
nn.ReLU(),
nn.Conv1d(32, 64, 1),
nn.ReLU(),
nn.AvgPool1d(3, stride=2, padding=1))
def forward(self, x):
return self.module(x)
class Group4(nn.Module):
def __init__(self):
super(Group4, self).__init__()
self.module = nn.Sequential(nn.Conv1d(64, 64, 5, padding=2),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.ReLU(),
nn.AvgPool1d(3, stride=2, padding=1))
def forward(self, x):
return self.module(x)
class Group5(nn.Module):
def __init__(self):
super(Group5, self).__init__()
self.module = nn.Sequential(nn.Conv1d(128, 128, 5, padding=2),
nn.ReLU(),
nn.Conv1d(128, 256, 1),
nn.ReLU(),
nn.AvgPool1d(3, stride=2, padding=1))
def forward(self, x):
return self.module(x)
class Group6(nn.Module):
def __init__(self):
super(Group6, self).__init__()
self.module = nn.Sequential(nn.Conv1d(256, 256, 5, padding=2),
nn.ReLU(),
nn.Conv1d(256, 512, 1),
nn.ReLU(),
nn.AdaptiveAvgPool1d(1))
def forward(self, x):
return self.module(x)
class Classifier(nn.Module):
def __init__(self, n_classes=2):
super(Classifier, self).__init__()
self.module = nn.Sequential(Flatten(1),
nn.Linear(512, n_classes))
def forward(self, x):
return self.module(x)
class LinNet(nn.Module):
@staticmethod
def get_optimizer(model, lr):
return RAdam(model.parameters(), lr=lr, weight_decay=1e-5)
@staticmethod
def get_lr_scheduler(optimizer):
return CosineAnnealingLR(optimizer, T_max=20, eta_min=1e-7)
def __str__(self):
return self._name
def __init__(self, n_classes=2):
super(LinNet, self).__init__()
self._name = "linnet"
# HPF
self.hpf = HPF(1, 4, 5, padding=2)
self.group1 = Group1()
self.group2 = Group2()
self.group3 = Group3()
self.group4 = Group4()
self.group5 = Group5()
self.group6 = Group6()
self.classifier = Classifier(n_classes)
self.initialize_parameters()
def forward(self, x):
y = self.hpf(x)
g1 = self.group1(y)
g2 = self.group2(g1)
g3 = self.group3(g2)
g4 = self.group4(g3)
g5 = self.group5(g4)
g6 = self.group6(g5)
logits = self.classifier(g6)
return logits
def initialize_parameters(self):
"""
In the original paper, Lin et al.
Conv1d: Xavier uniform initializer with zero biases
"""
"""
[Original]
for m in self.modules():
if isinstance(m, HPF):
self.hpf.initialize_parameters()
elif isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight,
mode='fan_in',
nonlinearity='relu')
nn.init.constant_(m.bias.data, val=1e-3)
elif isinstance(m, nn.Linear):
# Zero mean Gaussian with std 0.01
nn.init.normal_(m.weight, 0.0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 1e-3)
"""
# Following settings is the same with that of BSN.
for m in self.modules():
if isinstance(m, HPF):
self.hpf.initialize_parameters()
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias.data)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias.data)
def initialize_curriculum_learning(self):
for m in self.modules():
if isinstance(m, HPF):
self.hpf.initialize_parameters()
elif isinstance(m, nn.Linear):
# Zero mean Gaussian with std 0.01
nn.init.normal_(m.weight, 0.0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 1e-3)
if __name__ == "__main__":
model = LinNet()
n_ch = 1
for i in range(1, 2):
x = torch.randn(1, n_ch, i*16000)
t_beg = time.time()
out = model(x)
t_end = time.time()
print("LinNet model output:", out)
print("Execution time:", t_end - t_beg)
# end of for
| 31.977273
| 98
| 0.47394
| 940
| 8,442
| 4.079787
| 0.18617
| 0.043807
| 0.031552
| 0.035202
| 0.482399
| 0.449544
| 0.396089
| 0.334811
| 0.275359
| 0.265711
| 0
| 0.045907
| 0.416844
| 8,442
| 263
| 99
| 32.098859
| 0.73309
| 0.045605
| 0
| 0.365169
| 0
| 0
| 0.008857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151685
| false
| 0
| 0.073034
| 0.067416
| 0.365169
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2516c459b4df1dceb074080d5a8ce6f229681ed
| 16,278
|
py
|
Python
|
mvmm/multi_view/SpectralPenSearchByBlockMVMM.py
|
idc9/mvmm
|
64fce755a7cd53be9b08278484c7a4c77daf38d1
|
[
"MIT"
] | 1
|
2021-08-17T13:22:54.000Z
|
2021-08-17T13:22:54.000Z
|
mvmm/multi_view/SpectralPenSearchByBlockMVMM.py
|
idc9/mvmm
|
64fce755a7cd53be9b08278484c7a4c77daf38d1
|
[
"MIT"
] | null | null | null |
mvmm/multi_view/SpectralPenSearchByBlockMVMM.py
|
idc9/mvmm
|
64fce755a7cd53be9b08278484c7a4c77daf38d1
|
[
"MIT"
] | null | null | null |
from sklearn.base import clone
import pandas as pd
from abc import ABCMeta
from time import time
from datetime import datetime
import numpy as np
from sklearn.model_selection import ParameterGrid
from sklearn.base import BaseEstimator, MetaEstimatorMixin
from mvmm.utils import get_seeds
from mvmm.multi_view.utils import linspace_zero_to, \
expspace_zero_to, polyspace_zero_to
from mvmm.multi_view.block_diag.graph.linalg import geigh_Lsym_bp_smallest
from mvmm.multi_view.block_diag.utils import asc_sort
from mvmm.clustering_measures import unsupervised_cluster_scores, \
several_unsupervised_cluster_scores, MEASURE_MIN_GOOD
class SpectralPenSearchByBlockMVMM(MetaEstimatorMixin, BaseEstimator,
metaclass=ABCMeta):
"""
Does a grid search over the continuous hyper-parameter for the spentral penalized MVMM. Stores the best MVMM for each block.
Parameters
----------
base_mvmm_0:
Unconstrained MVMM.
base_wbd_mvmm: mvmm.multi_view.BlockDiagMVMM.BlockDiagMVMM()
The base class for the spectral penalized MVMM
eval_weights:
The weights to put on the generalized eigenvalues.
adapt_expon:
max_n_blocks:
Maximum number of blocks to get i.e. the number of eigenvalues to penalized.
user_eval_weights:
(Optional) User provied eignvalue weights.
pen_max: str, float
Largest penalty value to try. If 'default' will make an automatic, educated guess.
n_pen_seq: int
Number of penalty values to try.
user_pen_vals: None, list
(Optional) User provided penalty values to try
default_c: float
Multiplicative factor for infering pen_max with the default method.
pen_seq_spacing: str
How to space the penalty values along the penalty sequence.
n_init: int
Number of random initalizations.
random_state: None, int
Random seed.
select_metric: str
How to pick the best model for each fixed number of blocks.
metrics2compute: list of st
Model selection measures to compute for tracking purposes.
verbosity: int
Level of printout
"""
def __init__(self, base_mvmm_0, base_wbd_mvmm,
eval_weights='adapt', adapt_expon=1,
max_n_blocks='default', user_eval_weights=None,
pen_max='default', n_pen_seq=100, user_pen_vals=None,
# adapt_pen=False, pen_incr=.5, max_n_pen_incr=200,
default_c=100, pen_seq_spacing='lin',
n_init=1, random_state=None,
select_metric='bic',
metrics2compute=['aic', 'bic'],
verbosity=0):
self.base_mvmm_0 = base_mvmm_0
self.base_wbd_mvmm = base_wbd_mvmm
self.eval_weights = eval_weights
self.adapt_expon = adapt_expon
self.max_n_blocks = max_n_blocks
self.user_eval_weights = user_eval_weights
self.pen_max = pen_max
self.n_pen_seq = n_pen_seq
self.user_pen_vals = user_pen_vals
self.default_c = default_c
self.pen_seq_spacing = pen_seq_spacing
assert pen_seq_spacing in ['lin', 'quad', 'exp']
# self.adapt_pen = adapt_pen
# self.pen_incr = pen_incr
# self.max_n_pen_incr = max_n_pen_incr
# if self.adapt_pen:
# assert self.user_pen_vals is None
self.random_state = random_state
self.n_init = n_init
self.select_metric = select_metric
self.metrics2compute = metrics2compute
self.verbosity = verbosity
def get_pen_seq_from_max(self, pen_max):
if self.pen_seq_spacing == 'lin':
return linspace_zero_to(stop=pen_max,
num=self.n_pen_seq)
elif self.pen_seq_spacing == 'quad':
return polyspace_zero_to(stop=pen_max,
num=self.n_pen_seq,
deg=2)
elif self.pen_seq_spacing == 'exp':
return expspace_zero_to(stop=pen_max,
num=self.n_pen_seq,
base=10)
@property
def n_pen_vals_(self):
if self.user_pen_vals is not None:
return len(self.user_pen_vals) + 1
else:
return self.n_pen_seq + 1
@property
def param_grid_(self):
"""
List of all parameter settings
"""
if hasattr(self, 'est_n_blocks_'):
param_grid = {'n_blocks': self.est_n_blocks_}
return list(ParameterGrid(param_grid))
else:
return None
def get_default_pen_max(self, model, X):
# steup temp model
temp_model = clone(model)
temp_model.view_models_ = \
[temp_model.base_view_models[v]
for v in range(temp_model.n_views)]
temp_model.initialize_parameters(X)
eval_pen_default = temp_model.\
get_eval_pen_guess(X=X, c=self.default_c,
use_bipt_sp=True,
K='default')
if self.verbosity >= 1:
print('default pen val', eval_pen_default)
return eval_pen_default
def fit(self, X):
# assert all(self.pen_vals_[1:] > 0)
# assert len(np.unique(self.pen_vals)) == len(self.pen_vals)
init_seeds = get_seeds(n_seeds=self.n_init,
random_state=self.random_state)
fit_data = pd.DataFrame()
n_blocks_best_models = {}
n_blocks_best_idx = {}
init_adapt_weights = []
for init in range(self.n_init):
if self.verbosity >= 1:
current_time = datetime.now().strftime("%H:%M:%S")
print('Initialization {}/{} at {}'.
format(init + 1, self.n_init, current_time))
# max number of evals to penalize
if self.max_n_blocks == 'default':
K = min(self.base_mvmm_0.n_view_components)
else:
K = int(self.max_n_blocks)
for pen_idx in range(self.n_pen_vals_):
if self.verbosity >= 1:
current_time = datetime.now().strftime("%H:%M:%S")
print('Penalty {}/{} at {}'.
format(pen_idx + 1, self.n_pen_vals_,
current_time))
data = {'pen_idx': pen_idx, 'init': init}
start_time = time()
if pen_idx == 0:
pen_val = None
# fit model
fit_model = clone(self.base_mvmm_0)
fit_model.set_params(random_state=init_seeds[init],
n_init=1)
fit_model.fit(X)
# get current parameter values for warm starting
current_view_params = fit_model._get_parameters()['views']
current_bd_weights = fit_model.weights_mat_
current_bd_weights = current_bd_weights * \
self.base_wbd_mvmm.epsilon_tilde / \
current_bd_weights.sum()
# track data
data['n_blocks'] = 1
data['n_steps'] = fit_model.opt_data_['n_steps']
# compute adaptive weights
if self.eval_weights == 'adapt':
evals = geigh_Lsym_bp_smallest(X=self.bd_weights_,
rank=K,
zero_tol=1e-10,
method='tsym')
# deal with 0 evals by artificially setting
# them to the smallest non-zero eval
zero_evals = evals < 1e-6
if np.mean(zero_evals) == 1:
# edge case: if all evals are 0 just use uiniform
evals = np.ones(len(evals))
else:
evals[zero_evals] = min(evals[~zero_evals])
# clip for numerical stability
eval_weights = (1 / evals) ** self.adapt_expon
init_adapt_weights.append(eval_weights)
else:
# setup and fit model
fit_model = clone(self.base_wbd_mvmm)
params = {'init_params_method': 'user',
'init_params_value': current_view_params,
'init_weights_method': 'user',
'init_weights_value': current_bd_weights
# 'eval_pen_base': pen_val,
}
params.update({'n_pen_tries': 1,
'n_init': 1,
# 'fine_tune_n_steps': None
})
fit_model.set_params(**params)
# set eval weights
if self.user_eval_weights:
eval_weights = self.user_eval_weights
elif self.eval_weights == 'adapt':
eval_weights = init_adapt_weights[init]
elif self.eval_weights == 'uniform':
eval_weights = np.ones(K)
elif self.eval_weights == 'lin':
eval_weights = 1 / np.arange(1, K + 1)
elif self.eval_weights == 'quad':
eval_weights = (1 / np.arange(1, K + 1)) ** 2
elif self.eval_weights == 'exp':
eval_weights = .5 ** np.arange(1, K + 1)
else:
raise ValueError("invalid input for eval_weights: {}"
.format(self.eval_weights))
def process(x):
x = np.clip(x, a_min=0, a_max=1e5)
return asc_sort(x * len(x) / np.sum(x))
# eval_weights = np.clip(eval_weights, a_min=0, a_max=1e5)
# superficial normalization step keeps
# penalty value reasonable
# eval_weights *= K / np.sum(eval_weights)
# eval_weights = desc_sort(eval_weights)
eval_weights = process(eval_weights)
fit_model.set_params(eval_weights=eval_weights)
# set penalty sequence for this initialization
if pen_idx == 1:
if self.user_pen_vals is not None:
pen_seq = np.sort(self.user_pen_vals)
elif self.pen_max == 'default':
# compute default max penalty
default_pen_max = \
self.get_default_pen_max(model=fit_model, X=X)
pen_seq = self.\
get_pen_seq_from_max(pen_max=default_pen_max)
elif self.pen_max != 'default':
pen_seq = self.\
get_pen_seq_from_max(pen_max=self.pen_max)
pen_seq = np.concatenate([[None], pen_seq])
# set penalty value
pen_val = pen_seq[pen_idx]
fit_model.set_params(eval_pen_base=pen_val)
fit_model.fit(X)
# get current parameter values for warm starting
current_view_params = fit_model._get_parameters()['views']
current_bd_weights = fit_model.bd_weights_
# track data
data['pen_val'] = pen_val
data['n_blocks'] = fit_model.opt_data_['n_blocks_est']
data['n_steps'] = \
fit_model.opt_data_['adpt_opt_data']['n_steps']
# store tracking data
data['fit_time'] = time() - start_time
tracking_data = fit_model.compute_tracking_data(X)
data['loss_val'] = tracking_data['loss_val']
data['obs_nll'] = tracking_data['obs_nll']
# TODO: possibly precompute distances
model_sel_scores = \
unsupervised_cluster_scores(X=X,
estimator=fit_model,
measures=self.metrics2compute)
for measure in model_sel_scores.keys():
data[measure] = model_sel_scores[measure]
# data['bic'] = fit_model.bic(X)
# data['aic'] = fit_model.aic(X)
fit_data = fit_data.append(data, ignore_index=True)
# save this model if it is the best
current_n_blocks = data['n_blocks'] # current n_blocks
# get th
block_scores = fit_data.query("n_blocks == @current_n_blocks")
if MEASURE_MIN_GOOD[self.select_metric]:
best_idx = block_scores[self.select_metric].idxmin()
else:
best_idx = block_scores[self.select_metric].idxmax()
# best_idx = fit_data.\
# query("n_blocks == @n_blocks")[self.select_metric].\
# idxmin()
if fit_data.loc[best_idx, 'init'] == init:
n_blocks_best_models[current_n_blocks] = fit_model
n_blocks_best_idx[current_n_blocks] = best_idx
self.est_n_blocks_ = np.sort(list(n_blocks_best_models.keys()))
self.estimators_ = [n_blocks_best_models[n_blocks]
for n_blocks in self.est_n_blocks_]
int_cols = ['init', 'pen_idx', 'n_blocks', 'n_steps']
fit_data[int_cols] = fit_data[int_cols].astype(int)
self.init_fit_data_ = fit_data
self.fit_init_best_idxs = [n_blocks_best_idx[n_blocks]
for n_blocks in self.est_n_blocks_]
if self.eval_weights == 'adapt':
self.init_adapt_weights_ = init_adapt_weights
self.model_sel_scores_ = \
several_unsupervised_cluster_scores(X=X,
estimators=self.estimators_,
measures=self.metrics2compute)
return self
def check_fit(self):
return hasattr(self, 'estimators_')
@property
def best_idx_(self):
"""
Index of selected model.
"""
if self.check_fit():
if MEASURE_MIN_GOOD[self.select_metric]:
return self.model_sel_scores_[self.select_metric].idxmin()
else:
return self.model_sel_scores_[self.select_metric].idxmax()
else:
return None
@property
def best_estimator_(self):
"""
Selected estimator.
"""
if self.check_fit():
return self.estimators_[self.best_idx_]
else:
return None
def predict(self, X):
"""
Predict the labels for the data samples in X using trained model.
"""
return self.best_estimator_.predict(X)
def predict_proba(self, X):
"""
Predict posterior probability of each component given the data.
"""
return self.best_estimator_.predict_proba(X)
def sample(self, n_samples=1):
"""
Generate random samples from the fitted Gaussian distribution.
"""
return self.best_estimator_.sample(n_samples=n_samples)
def score(self, X, y=None):
"""
Compute the per-sample average log-likelihood of the given data X.
"""
return self.best_estimator_.score(X)
def score_samples(self, X):
"""
Compute the weighted log probabilities for each sample.
"""
return self.best_estimator_.score_samples(X)
| 37.42069
| 128
| 0.531699
| 1,843
| 16,278
| 4.37873
| 0.170917
| 0.051797
| 0.012268
| 0.011152
| 0.219827
| 0.13891
| 0.121933
| 0.092193
| 0.070136
| 0.070136
| 0
| 0.006958
| 0.390773
| 16,278
| 434
| 129
| 37.506912
| 0.806796
| 0.181226
| 0
| 0.172131
| 0
| 0
| 0.042921
| 0
| 0
| 0
| 0
| 0.002304
| 0.004098
| 1
| 0.061475
| false
| 0
| 0.053279
| 0.004098
| 0.204918
| 0.012295
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c251ec2f4862db71edcfa85809de82aead64c14b
| 812
|
py
|
Python
|
tests/unit/providers/traversal/test_delegate_py3.py
|
YelloFam/python-dependency-injector
|
541131e33858ee1b8b5a7590d2bb9f929740ea1e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/providers/traversal/test_delegate_py3.py
|
YelloFam/python-dependency-injector
|
541131e33858ee1b8b5a7590d2bb9f929740ea1e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/providers/traversal/test_delegate_py3.py
|
YelloFam/python-dependency-injector
|
541131e33858ee1b8b5a7590d2bb9f929740ea1e
|
[
"BSD-3-Clause"
] | null | null | null |
"""Delegate provider traversal tests."""
from dependency_injector import providers
def test_traversal_provider():
another_provider = providers.Provider()
provider = providers.Delegate(another_provider)
all_providers = list(provider.traverse())
assert len(all_providers) == 1
assert another_provider in all_providers
def test_traversal_provider_and_overriding():
provider1 = providers.Provider()
provider2 = providers.Provider()
provider3 = providers.Provider()
provider3.override(provider2)
provider = providers.Delegate(provider1)
provider.override(provider3)
all_providers = list(provider.traverse())
assert len(all_providers) == 3
assert provider1 in all_providers
assert provider2 in all_providers
assert provider3 in all_providers
| 24.606061
| 51
| 0.752463
| 88
| 812
| 6.738636
| 0.295455
| 0.161889
| 0.094435
| 0.084317
| 0.290051
| 0.178752
| 0.178752
| 0.178752
| 0.178752
| 0
| 0
| 0.017804
| 0.169951
| 812
| 32
| 52
| 25.375
| 0.862018
| 0.041872
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c256ecf86fa244e6c6873a974253c22509fa427e
| 3,380
|
py
|
Python
|
source_dir/densenet_3d_estimator.py
|
ffeijoo/3d-DenseNet
|
baec68af07294ac5e432096055909ff08ea2e81c
|
[
"MIT"
] | null | null | null |
source_dir/densenet_3d_estimator.py
|
ffeijoo/3d-DenseNet
|
baec68af07294ac5e432096055909ff08ea2e81c
|
[
"MIT"
] | null | null | null |
source_dir/densenet_3d_estimator.py
|
ffeijoo/3d-DenseNet
|
baec68af07294ac5e432096055909ff08ea2e81c
|
[
"MIT"
] | null | null | null |
import os
import tensorflow as tf
from densenet_3d_model import DenseNet3D
def model_fn(features, labels, mode, params):
# Define the model
model = DenseNet3D(
video_clips=features['video_clips'], labels=labels, **params)
# Get the prediction result
if mode == tf.estimator.ModeKeys.PREDICT:
model.is_training = False
return _predict_result(model)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=model.losses,
train_op=model.train_op,
eval_metric_ops={'eval_accuracy': model.accuracy})
def _predict_result(model):
predictions = {'prediction': model.prediction, 'probability': model.probability, 'logits': model.logits}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions)
def serving_input_fn(params):
inputs = {
'video_clips':
tf.placeholder(
tf.float32,
shape=[
None, params['num_frames_per_clip'], params['height'],
params['width'], params['channel']
])
}
return tf.estimator.export.build_raw_serving_input_receiver_fn(inputs)()
def train_input_fn(training_dir, params):
directory = os.path.join(training_dir, 'train.tfrecord')
return _build_tfrecord_dataset(directory, params['train_total_video_clip'],
**params)
def eval_input_fn(evaluating_dir, params):
directory = os.path.join(evaluating_dir, 'eval.tfrecord')
return _build_tfrecord_dataset(directory, params['eval_total_video_clip'],
**params)
def _build_tfrecord_dataset(directory, total_clip_num, batch_size, **params):
'''
Buffer the training dataset to TFRecordDataset with the following video shape
[num_frames_per_clip, height, width, channel]
ex: [16, 100, 120, 3]
'''
print('Building dataset, number of clips: ' + str(total_clip_num))
dataset = tf.data.TFRecordDataset(directory)
dataset = dataset.shuffle(buffer_size=total_clip_num)
dataset = dataset.map(
map_func=
lambda serialized_example: _parser(serialized_example, **params))
dataset = dataset.repeat()
iterator = dataset.batch(batch_size=batch_size).make_one_shot_iterator()
clips, labels = iterator.get_next()
return {'video_clips': clips}, labels
def _parser(serialized_example, num_frames_per_clip, **params):
features = tf.parse_single_example(
serialized_example,
features={
'clip/width': tf.FixedLenFeature([], tf.int64),
'clip/height': tf.FixedLenFeature([], tf.int64),
'clip/channel': tf.FixedLenFeature([], tf.int64),
'clip/raw': tf.FixedLenFeature([num_frames_per_clip], tf.string),
'clip/label': tf.FixedLenFeature([], tf.int64)
})
def mapping_func(image):
return _decode_image(image, **params)
clip = tf.map_fn(mapping_func, features['clip/raw'], dtype=tf.float32)
return clip, features['clip/label']
def _decode_image(image, channel, width, height, **params):
image = tf.image.decode_jpeg(image, channels=channel)
# This set_shape step is necesary for the last trainsition_layer_to_classes layer in the model
image.set_shape([height, width, channel])
image = tf.cast(image, tf.float32)
return image
| 34.845361
| 108
| 0.671598
| 401
| 3,380
| 5.42394
| 0.306733
| 0.025287
| 0.022069
| 0.029425
| 0.207816
| 0.070805
| 0.045057
| 0
| 0
| 0
| 0
| 0.009815
| 0.216272
| 3,380
| 97
| 109
| 34.845361
| 0.81125
| 0.083432
| 0
| 0.059701
| 0
| 0
| 0.092418
| 0.013993
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134328
| false
| 0
| 0.044776
| 0.014925
| 0.328358
| 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2611c72bea7ee655df6077231d5fe5c6f79d18c
| 2,973
|
py
|
Python
|
2021/day.3.py
|
craignicol/adventofcode
|
41ea3325adeb373dccc70d36a9a685eaf13359eb
|
[
"Apache-2.0"
] | null | null | null |
2021/day.3.py
|
craignicol/adventofcode
|
41ea3325adeb373dccc70d36a9a685eaf13359eb
|
[
"Apache-2.0"
] | null | null | null |
2021/day.3.py
|
craignicol/adventofcode
|
41ea3325adeb373dccc70d36a9a685eaf13359eb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from statistics import mode
def execute():
with open('./input/day.3.txt') as inp:
lines = inp.readlines()
data = [l.strip() for l in lines if len(l.strip()) > 0]
return power_consumption(data), life_support_rating(data)
tests_failed = 0
tests_executed = 0
def verify(a, b):
global tests_executed
global tests_failed
tests_executed += 1
if (a == b):
print("✓")
return
tests_failed += 1
print (locals())
example1= """00100
11110
10110
10111
10101
01111
00111
11100
10000
11001
00010
01010""".split('\n')
def sum_parts(diagnostics):
exploded = [s[:] for s in diagnostics]
accumulator = [0] * len(exploded[0])
for next in exploded:
for i, v in enumerate(next):
accumulator[i] += int(v)
return accumulator
powers = [8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def convert_to_int(exploded):
return sum([a for (a,b) in zip(powers[-len(exploded):], exploded) if b])
def epsilon_rate(parts, length):
exploded = [s < length / 2 for s in parts]
return convert_to_int(exploded)
def gamma_rate(parts, length):
exploded = [s > length // 2 for s in parts]
return convert_to_int(exploded)
def power_consumption(diagnostics):
sp = sum_parts(diagnostics)
return epsilon_rate(sp, len(diagnostics)) * gamma_rate(sp, len(diagnostics))
def match_bit_criteria(bitcount, diagnostics, default):
bits = powers[-bitcount:]
while len(diagnostics) > 1:
f = bits.pop(0)
bit_matches = [d&f for d in diagnostics]
if (bit_matches.count(0) == len(bit_matches)/2):
criteria = f
else:
criteria = mode(bit_matches)
if (default == 1) :
diagnostics = [d for d in diagnostics if d&f == criteria&f]
else:
diagnostics = [d for d in diagnostics if d&f != criteria&f]
return diagnostics[0]
def oxygen_generator_rating(parts, diagnostics):
return match_bit_criteria(len(parts), diagnostics, 1)
def co2_scrubber_rating(parts, diagnostics):
return match_bit_criteria(len(parts), diagnostics, 0)
def life_support_rating(diagnostics):
sp = sum_parts(diagnostics)
values = [int(d, 2) for d in diagnostics]
return oxygen_generator_rating(sp, values) * co2_scrubber_rating(sp, values)
def test_cases():
verify(sum_parts(example1), [7,5,8,7,5])
verify(gamma_rate(sum_parts(example1), len(example1)), 0b10110)
verify(epsilon_rate(sum_parts(example1), len(example1)), 0b01001)
verify(power_consumption(example1), 198)
verify(oxygen_generator_rating(sum_parts(example1), [int(d,2) for d in example1]), 0b10111)
verify(co2_scrubber_rating(sum_parts(example1), [int(d,2) for d in example1]), 0b01010)
verify(life_support_rating(example1), 230)
print("Failed {} out of {} tests. ".format(tests_failed, tests_executed))
if __name__ == "__main__":
test_cases()
print(execute())
| 29.147059
| 95
| 0.667003
| 421
| 2,973
| 4.548694
| 0.299287
| 0.03342
| 0.018799
| 0.035509
| 0.308094
| 0.264752
| 0.226632
| 0.226632
| 0.226632
| 0.226632
| 0
| 0.070913
| 0.207871
| 2,973
| 102
| 96
| 29.147059
| 0.741826
| 0.007064
| 0
| 0.073171
| 0
| 0
| 0.042683
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.012195
| 0.036585
| 0.292683
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c263e873beab15ef3148ddea30b0dcbd4c5dcb1c
| 6,194
|
py
|
Python
|
src/propagation.py
|
haoningwu3639/EE229_Project_VideoStabilization
|
74603e9dc5f10b3deffb2f4e0753c15dc8b9a92d
|
[
"MIT"
] | 1
|
2021-06-13T06:32:29.000Z
|
2021-06-13T06:32:29.000Z
|
src/propagation.py
|
haoningwu3639/EE229_Project_VideoStabilization
|
74603e9dc5f10b3deffb2f4e0753c15dc8b9a92d
|
[
"MIT"
] | null | null | null |
src/propagation.py
|
haoningwu3639/EE229_Project_VideoStabilization
|
74603e9dc5f10b3deffb2f4e0753c15dc8b9a92d
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from scipy.signal import medfilt
from utils import init_dict, l2_dst
def keypoint_transform(H, keypoint):
"""
Input:
H: homography matrix of dimension (3*3)
keypoint: the (x, y) point to be transformed
Output:
keypoint_trans: Transformed point keypoint_trans = H * (keypoint, 1)
"""
keypoint = np.append(keypoint, 1)
a, b, c = np.dot(H, keypoint)
keypoint_trans = np.array([[a/c, b/c]]).flatten()
return keypoint_trans
def propagate(input_points, output_points, input_frame, PATCH_SIZE=16, PROP_R=300):
"""
Input:
intput_points: points in input_frame which are matched feature points with output_frame
output_points: points in input_frame which are matched feature points with intput_frame
input_frame
H: the homography between input and output points
Output:
x_motion_patch, y_motion_patch: Motion patch in x-direction and y-direction for input_frame
"""
cols, rows = input_frame.shape[1] // PATCH_SIZE, input_frame.shape[0] // PATCH_SIZE
x_motion = init_dict(cols, rows)
y_motion = init_dict(cols, rows)
temp_x_motion = init_dict(cols, rows)
temp_y_motion = init_dict(cols, rows)
# pre-warping with global homography
H, _ = np.array(cv2.findHomography(input_points, output_points, cv2.RANSAC))
for i in range(rows):
for j in range(cols):
point = np.array([[PATCH_SIZE * j, PATCH_SIZE * i]])
point_trans = keypoint_transform(H, point)
x_motion[i, j] = point.flatten()[0] - point_trans[0]
y_motion[i, j] = point.flatten()[1] - point_trans[1]
# distribute feature motion vectors
for i in range(rows):
for j in range(cols):
vertex = np.array([[PATCH_SIZE * j, PATCH_SIZE * i]])
for in_point, out_point in zip(input_points, output_points):
# velocity = point - feature point in current frame
distance = l2_dst(in_point, vertex)
if distance < PROP_R:
point_trans = keypoint_transform(H, in_point)
temp_x_motion[i, j] = [out_point[0] - point_trans[0]]
temp_y_motion[i, j] = [out_point[1] - point_trans[1]]
# Apply one Median Filter on obtained motion for each vertex
x_motion_patch = np.zeros((rows, cols), dtype=float)
y_motion_patch = np.zeros((rows, cols), dtype=float)
for key in x_motion.keys():
temp_x_motion[key].sort()
temp_y_motion[key].sort()
x_motion_patch[key] = x_motion[key] + temp_x_motion[key][len(temp_x_motion[key]) // 2]
y_motion_patch[key] = y_motion[key] + temp_y_motion[key][len(temp_y_motion[key]) // 2]
# Apply the other Median Filter over the motion patch for outliers
x_motion_patch = medfilt(x_motion_patch, kernel_size=[3, 3])
y_motion_patch = medfilt(y_motion_patch, kernel_size=[3, 3])
return x_motion_patch, y_motion_patch
def vertex_motion_path(x_path, y_path, x_motion_patch, y_motion_patch):
"""
Input:
x_path: motion path along x_direction
y_path: motion path along y_direction
x_motion_patch: obtained motion patch along x_direction
y_motion_patch: obtained motion patch along y_direction
Output:
x_paths, y_paths: Updated x_paths, y_paths with new x_motion_patch, y_motion_patch added to the last x_paths, y_paths
"""
x_path_new = x_path[:, :, -1] + x_motion_patch
y_path_new = y_path[:, :, -1] + y_motion_patch
x_paths = np.concatenate((x_path, np.expand_dims(x_path_new, axis=2)), axis=2)
y_paths = np.concatenate((y_path, np.expand_dims(y_path_new, axis=2)), axis=2)
return x_paths, y_paths
def warp_frame(frame, x_motion_patch, y_motion_patch, PATCH_SIZE=16):
"""
Input:
frame is the current frame
x_motion_patch: the motion_patch to be warped on frame along x-direction
y_motion_patch: the motion patch to be warped on frame along y-direction
Output:
new_frame: a warped frame according to given motion patches x_motion_patch, y_motion_patch
"""
map_x = np.zeros((frame.shape[0], frame.shape[1]), np.float32)
map_y = np.zeros((frame.shape[0], frame.shape[1]), np.float32)
for i in range(x_motion_patch.shape[0] - 1):
for j in range(x_motion_patch.shape[1] - 1):
x, y = int(j * PATCH_SIZE), int(i * PATCH_SIZE)
x_next, y_next = int((j+1) * PATCH_SIZE), int((i+1) * PATCH_SIZE)
src = np.array(
[[x, y], [x, y_next], [x_next, y], [x_next, y_next]]
)
dst = np.array(
[[x + x_motion_patch[i, j], y + y_motion_patch[i, j]],
[x + x_motion_patch[i+1, j], y_next + y_motion_patch[i+1, j]],
[x_next + x_motion_patch[i, j+1], y + y_motion_patch[i, j+1]],
[x_next + x_motion_patch[i+1, j+1], y_next + y_motion_patch[i+1, j+1]]]
)
H, _ = cv2.findHomography(src, dst, cv2.RANSAC)
for k in range(y, y_next):
for l in range(x, x_next):
x_res, y_res, w_res = np.dot(H, np.append(np.array([[l, k]]), 1))
if w_res != 0:
x_res, y_res = x_res / (w_res*1.0), y_res / (w_res*1.0)
else:
x_res, y_res = l, k
map_x[k, l] = x_res
map_y[k, l] = y_res
# repeat motion vectors for remaining frame in x-direction
for j in range(PATCH_SIZE*x_motion_patch.shape[1], map_x.shape[1]):
map_x[:, j] = map_x[:, PATCH_SIZE * x_motion_patch.shape[0] - 1]
map_y[:, j] = map_y[:, PATCH_SIZE * x_motion_patch.shape[0] - 1]
# repeat motion vectors for remaining frame in y-direction
for i in range(PATCH_SIZE*x_motion_patch.shape[0], map_x.shape[0]):
map_x[i, :] = map_x[PATCH_SIZE * x_motion_patch.shape[0] - 1, :]
map_y[i, :] = map_y[PATCH_SIZE * x_motion_patch.shape[0] - 1, :]
# deforms patch
new_frame = cv2.remap(frame, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
return new_frame
| 37.768293
| 121
| 0.628996
| 979
| 6,194
| 3.712972
| 0.144025
| 0.145254
| 0.082531
| 0.037414
| 0.41403
| 0.368088
| 0.216506
| 0.195323
| 0.131499
| 0.131499
| 0
| 0.01804
| 0.257184
| 6,194
| 163
| 122
| 38
| 0.772006
| 0.240555
| 0
| 0.049383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0
| 0.049383
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c267cbc162d3355bf7a9a7568e5120c20f9a8b94
| 15,306
|
py
|
Python
|
src/utils/scout_compiler.py
|
CheckPointSW/Scour
|
2f9391da45803b44181f7973e4e7c93bc2208252
|
[
"MIT"
] | 152
|
2018-08-13T05:48:59.000Z
|
2022-03-30T15:18:44.000Z
|
src/utils/scout_compiler.py
|
CheckPointSW/Scour
|
2f9391da45803b44181f7973e4e7c93bc2208252
|
[
"MIT"
] | 7
|
2019-08-29T15:24:41.000Z
|
2021-05-04T06:38:49.000Z
|
src/utils/scout_compiler.py
|
CheckPointSW/Scour
|
2f9391da45803b44181f7973e4e7c93bc2208252
|
[
"MIT"
] | 21
|
2018-08-13T19:11:29.000Z
|
2022-02-28T15:25:47.000Z
|
import os
import struct
from .compilation.scout_flags import *
from .compilation.scout_files import *
from .compilation.arc_intel import arcIntel
from .compilation.arc_arm import arcArm, arcArmThumb
from .compilation.arc_mips import arcMips
from .context_creator import *
###################################
## Architecture Configurations ##
###################################
# Using an enum to support feature extensions
ARC_INTEL = arcIntel.name()
ARC_ARM = arcArm.name()
ARC_ARM_THUMB = arcArmThumb.name()
ARC_MIPS = arcMips.name()
arc_factory = {
ARC_INTEL: arcIntel,
ARC_ARM: arcArm,
ARC_ARM_THUMB: arcArmThumb,
ARC_MIPS: arcMips,
}
arc_flags = {
ARC_INTEL: (flag_arc_intel,),
ARC_ARM: (flag_arc_arm,),
ARC_ARM_THUMB: (flag_arc_arm, flag_arc_thumb),
ARC_MIPS: (flag_arc_mips,),
}
#################
## Utilities ##
#################
def systemLine(line, logger):
"""Issue (and debug trace) a systen line.
Args:
line (string): cmd line to be executed
logger (logger, elementals): logger to be used by the function (elementals)
"""
logger.debug(line)
os.system(line)
###############################
## The full Scout Compiler ##
###############################
class scoutCompiler:
"""A class representing the Scout Compiler object, which manages the entire compilation logic.
Attributes
----------
logger (logger): (elementals) logger
target_arc (targetArc): target architecture instance to hold CPU-specific configurations
project_folder (str): path to the user's working folder
scout_folder (str): path to Scout's base folder
config_flags (list): list of Scout configuration flags, accumulated along the process
is_32_bits (bool): True iff we are going to compile a 32-bits binary
is_little_endian (bool): True iff we are going to compile a Little Endian binary
is_pic (bool): True iff we are going to compile a PIC binary blob
full_got (bytes): blob containing the GOT function address table for a PIC compilation
global_vars (bytes): blob containing the global variables content for a PIC compilation
Notes
-----
This class serves as the main object to be used by the suer when compiling an executable or
a Position-Independent-Code (PIC) Scout binary.
"""
def __init__(self, logger):
"""Construct the basic Scout compiler object.
Args:
logger (logger): (elementals) logger
"""
self.logger = logger
self.target_arc = None
self.project_folder = None
self.scout_folder = None
self.config_flags = []
self.is_32_bits = True
self.is_little_endian = True
self.is_pic = False
self.full_got = b''
self.global_vars = b''
def setArc(self, arc, is_pic, is_32_bits=True, is_little_endian=True, is_native=False):
"""Set the target's architecture specifications.
Args:
arc (string, enum): name of the target architecture (should be a key of arc_factory)
is_pic (bool): True iff compiling a position independent blob
is_32_bits (bool, optional): True iff the architecture is 32 bit, otherwise it will be 64 bits (True by default)
is_little_endian (bool, optional): True iff the architecture is little endian, otherwise it will be big endian (True by default)
is_native (bool, optional): True iff should use the native compilation programs, regardless of the arc (False by default)
"""
# Sanity check
if arc not in arc_factory.keys():
self.logger.error("Unknown architecture: \"%s\". Supported options are: \"%s\"", arc, ', '.join(arc_factory.keys()))
# Apply the chosen settings
self.is_pic = is_pic
self.target_arc = arc_factory[arc](is_pic)
if is_native:
self.config_flags.append(flag_native_compiler)
else:
self.target_arc.setNotNative()
# Configure the architecture
self.target_arc.setEndianness(is_little_endian)
self.target_arc.setBitness(is_32_bits)
self.is_32_bits = is_32_bits
self.is_little_endian = is_little_endian
# Store the values for the configuration flags
self.config_flags.append(flag_32_bit if is_32_bits else flag_64_bit)
self.config_flags.append(flag_little_endian if is_little_endian else flag_big_endian)
self.config_flags += list(arc_flags[arc])
if self.is_pic:
self.config_flags.append(flag_pic_code)
def setScoutMode(self, is_user):
"""Set the target's permission level.
Args:
is_user (bool): True iff the scout will run in user mode, otherwise it will assume kernel mode permissions
"""
self.config_flags.append(flag_mode_user if is_user else flag_mode_kernel)
def setWorkingDirs(self, project_dir, scout_dir, include_dirs=[]):
"""Set the paths for the used directories.
Args:
project_dir (string): path to the project's directory
scout_dir (string): path to the directory of the basic Scout (Example: ".../src/scout")
include_dirs (list, optional): list of additional include directories
"""
self.project_folder = project_dir
self.scout_folder = scout_dir
# Ends with "/scout" (and not "/scout/")
if scout_dir.endswith(os.path.sep + "scout"):
main_folder = os.path.sep.join(scout_dir.split(os.path.sep)[:-1])
else:
main_folder = scout_dir + os.path.sep + ".."
self.target_arc.compile_flags += ['I' + x for x in [self.project_folder, main_folder] + include_dirs]
def addScoutFlags(self, flags):
"""Add the flags regarding the target's specifications.
Args:
flags (list): list of configuration flags (strings)
"""
self.config_flags += flags
def addCompilationFlags(self, user_compile_flags=[], user_link_flags=[]):
"""Add custom compilation / linking flags.
Args:
user_compile_flags (list, optional): list of compiler flags (without the '-' prefix)
user_link_flags (list, optional) list of linker flags (without the '-' prefix)
"""
self.target_arc.compile_flags += user_compile_flags
self.target_arc.link_flags += user_link_flags
def verifyScoutFlags(self):
"""Check that all of the configuration flags are set correctly."""
if flag_mode_user not in self.config_flags and flag_mode_kernel not in self.config_files:
self.logger.warning("Missing Scout flag - unknown permission mode. Defaulting to USER-MODE (low privileges)")
def generateFlagsFile(self):
"""Generate the architecture's "flags.h" file."""
# Verify the flags
self.verifyScoutFlags()
# Verify we know where to store this file
if self.project_folder is None:
self.logger.error("Working directories are NOT defined...")
return
flag_path = os.path.join(self.project_folder, FLAGS_FILE_NAME)
self.logger.info(f"Generating the {flag_path} file")
fd = open(flag_path, "w")
# file prefix
fd.write("#ifndef __SCOUT__FLAGS__H__\n")
fd.write("#define __SCOUT__FLAGS__H__\n")
fd.write('\n')
# auto-generation comment
fd.write("/* This file is AUTO-GENERATED, please do NOT edit it manually */\n")
# The actual flags
for flag in self.config_flags:
fd.write(f"#define {flag}\n")
# file suffix
fd.write("\n")
fd.write("#endif /* _SCOUT__FLAGS__H__ */")
# can close the file
fd.close()
def populateGOT(self, scout_got, project_got, project_vars_size=0, is_host_thumb=False):
"""Populate the PIC context with the GOT entries, and capacity for global variables.
Args:
scout_got (list): list of (virtual) addresses according to Scout's GOT order
project_got (list): list of additional memory addresses for symbols used in the project's GOT
projects_vars_size (int, optional): size (in bytes) of the project's global variables (0 by default)
is_host_thumb (bool, optional): True iff the host process is a Thumb binary (False by default)
"""
# Sanity Check #1 - PIC Compilation
if not self.is_pic:
self.logger.error("Can't populate a PIC context (GOT and globals) for a non-PIC compilation!")
return
# Sanity Check #2 - GOT Size
expected_size = scout_got_base_size_mmap if flag_mmap in self.config_flags else scout_got_base_size
if len(scout_got) != expected_size:
self.logger.error(f"Wrong size for Scout's GOT: Expected {expected_size} entries, and got {len(scout_got)}!")
return
format = ("<" if self.is_little_endian else ">") + ("L" if self.is_32_bits else "Q")
self.full_got = b''.join([struct.pack(format, func + (1 if is_host_thumb else 0)) for func in scout_got + project_got])
# Calculate the size for the global variables
size_globals = project_vars_size
# The base loaders don't use global variables, only the full scout
if flag_loader not in self.config_flags:
if flag_instructions in self.config_flags:
if self.is_32_bits:
size_globals += scout_instructions_globals_32_size
if flag_dynamic_buffers not in self.config_flags:
size_globals += scout_static_buffers_32_size
else:
size_globals += scout_instructions_globals_64_size
if flag_dynamic_buffers not in self.config_flags:
size_globals += scout_static_buffers_64_size
# Now generate the blob
self.global_vars = b'\x00' * size_globals
def compile(self, scout_files, project_files, elf_file):
"""Compile the "Scout" project, according to the PIC setup that was defined earlier.
Args:
scout_files (list): list of file paths for scout's code (*.c) files
proect_files (list): list of file paths for the project's code (*.c) files
elf_file (string): path to the (created) compiled ELF file
Note:
If this is a PIC compilation, the final binary file will be named to match the ELF
file. For example: "project.elf" => "project.bin".
Return Value:
Name of the PIC binary file (in PIC compilations), None otherwise.
"""
self.logger.addIndent()
# 1. Auto-Generate the flags.h file
self.generateFlagsFile()
# 2. Prepare the list of compilation files
compilation_files = [os.path.join(self.scout_folder, f) for f in scout_files] + project_files
# 3. Prepare the compilation & linking flags
compile_flags, link_flags = self.target_arc.prepareFlags()
#############################
## Compiling an Executable ##
#############################
if not self.is_pic:
# 4. Re-organize the linker flags
fixed_link_flags = "".join("-Wl,-" + x for x in link_flags.split("-")[1:])
# 5. Compile together all of the file (and that's it)
self.logger.info(f"Compiling the *.c files, linking them together and creating: {elf_file}")
systemLine(f"{self.target_arc.compiler_path} {compile_flags} {' '.join(compilation_files)} {fixed_link_flags} -o {elf_file}", self.logger)
self.logger.removeIndent()
return None
###########################
## Compiling a PIC Scout ##
###########################
# 4. Generate all of the *.S files
self.logger.info("Compiling the *.c files")
compile_flags, link_flags = self.target_arc.prepareFlags()
s_files = []
for c_file in compilation_files:
local_out_file = ".".join(c_file.split(".")[:-1]) + ".S"
systemLine(f"{self.target_arc.compiler_path} -S -c {compile_flags} {c_file} -o {local_out_file}", self.logger)
s_files.append(local_out_file)
# 5. Work-around GCC's bugs
# We can afford these changes due to the following:
# a) We only perform them on PIC compilations
# b) PIC compilations don't contain string literals, so we won't conflict with them
# c) Our strings are very specific, so they (probably) won't conflict with something else
self.logger.info("Fixing the *.S files to work around GCC's bugs")
for s_file in s_files:
fd = open(s_file, "r")
content_lines = fd.readlines()
fd.close()
new_content_lines = []
for content in content_lines:
# Makes sure that only our special "_start" will be at the beginning of the compiled blob
# This is needed because gcc tends to place "Main" in .text.startup section, instead of our _start.
if ".section .text.startup" in content and "Scout" not in content:
continue
content = content.replace(".space #", ".space ").replace(".space $", ".space ")
# Mips: convert the calls to relative (PIC)
if self.target_arc.name() == ARC_MIPS:
content = content.replace("\tjal\t", "\tbal\t").replace("\tj\t", "\tb\t")
# save the modified line
new_content_lines.append(content)
fd = open(s_file, "w")
fd.writelines(new_content_lines)
fd.close()
# 6. Generate all of the *.o files
self.logger.info("Compiling the *.S files")
o_files = []
for s_file in s_files:
local_out_file = ".".join(s_file.split(".")[:-1]) + ".o"
systemLine(f"{self.target_arc.compiler_path} -c {compile_flags} {s_file} -o {local_out_file}", self.logger)
o_files.append(local_out_file)
# 7. Link together all of the *.o files
self.logger.info(f"Linking together all of the files, creating: {elf_file}")
systemLine(f"{self.target_arc.linker_path} {link_flags} {' '.join(o_files)} -o {elf_file}", self.logger)
# 8. Objcopy the content to the actual wanted file
if elf_file.split('.')[-1].lower() == "elf":
binary_file = '.'.join(elf_file.split('.')[:-1] + ['bin'])
else:
binary_file = elf_file + ".bin"
self.logger.info(f"Extracting the final binary to: {binary_file}")
systemLine(f"{self.target_arc.objcopy_path} -O binary -j .text -j .rodata {' '.join(self.target_arc.objcopy_flags)} {elf_file} {binary_file}", self.logger)
# 9. Place the PIC context inside the file
placeContext(self.full_got, self.global_vars, binary_file, self.logger)
self.logger.removeIndent()
return binary_file
| 43.731429
| 163
| 0.616425
| 2,001
| 15,306
| 4.530735
| 0.191404
| 0.02537
| 0.024377
| 0.013126
| 0.185749
| 0.105228
| 0.089676
| 0.051291
| 0.026362
| 0.016104
| 0
| 0.00567
| 0.27401
| 15,306
| 349
| 164
| 43.856734
| 0.810205
| 0.347707
| 0
| 0.121212
| 0
| 0.030303
| 0.15341
| 0.028151
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.048485
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c26ac1a91dabdb0034c28b5241ea7cfad78d438f
| 3,375
|
py
|
Python
|
jscatter/jscatter_test.py
|
flekschas/jupyter-scatter
|
550eceb2311b0394caad83dbb399ed2f29e55af6
|
[
"Apache-2.0"
] | 23
|
2021-02-03T02:05:47.000Z
|
2022-03-17T14:53:39.000Z
|
jscatter/jscatter_test.py
|
manzt/jupyter-scatter
|
c38f94abfb655e03f407e7fcec80a883439796b5
|
[
"Apache-2.0"
] | 5
|
2021-02-04T22:19:35.000Z
|
2022-03-07T04:49:31.000Z
|
jscatter/jscatter_test.py
|
manzt/jupyter-scatter
|
c38f94abfb655e03f407e7fcec80a883439796b5
|
[
"Apache-2.0"
] | 1
|
2021-06-15T14:14:47.000Z
|
2021-06-15T14:14:47.000Z
|
import numpy as np
import pandas as pd
from .jscatter import Scatter, component_idx_to_name
from .utils import minmax_scale
def test_component_idx_to_name():
assert 'valueA' == component_idx_to_name(2)
assert 'valueB' == component_idx_to_name(3)
assert None == component_idx_to_name(4)
assert None == component_idx_to_name(1)
assert None == component_idx_to_name(None)
def test_scatter_numpy():
x = np.random.rand(500)
y = np.random.rand(500)
scatter = Scatter(x, y)
widget = scatter.widget
widget_data = np.asarray(widget.points)
assert (500, 4) == widget_data.shape
assert np.allclose(minmax_scale(x, (-1,1)), widget_data[:,0])
assert np.allclose(minmax_scale(y, (-1,1)), widget_data[:,1])
assert np.sum(widget_data[:,2:]) == 0
def get_df():
num_groups = 8
data = np.random.rand(500, 7)
data[:,2] *= 100
data[:,3] *= 100
data[:,3] = data[:,3].astype(int)
data[:,4] = np.round(data[:,4] * (num_groups - 1)).astype(int)
data[:,5] = np.repeat(np.arange(100), 5).astype(int)
data[:,6] = np.resize(np.arange(5), 500).astype(int)
df = pd.DataFrame(
data,
columns=['a', 'b', 'c', 'd', 'group', 'connect', 'connect_order']
)
df['group'] = df['group'].astype('int').astype('category').map(lambda c: chr(65 + c), na_action=None)
df['connect'] = df['connect'].astype('int')
df['connect_order'] = df['connect_order'].astype('int')
return df
def test_scatter_pandas():
df = get_df()
scatter = Scatter(data=df, x='a', y='b')
widget = scatter.widget
widget_data = np.asarray(widget.points)
assert (500, 4) == np.asarray(widget.points).shape
assert np.allclose(minmax_scale(df['a'].values, (-1,1)), widget_data[:,0])
assert np.allclose(minmax_scale(df['b'].values, (-1,1)), widget_data[:,1])
def test_scatter_point_encoding_updates():
df = get_df()
scatter = Scatter(data=df, x='a', y='b')
widget = scatter.widget
widget_data = np.asarray(widget.points)
assert len(scatter._encodings.data) == 0
assert np.sum(widget_data[:,2:]) == 0
scatter.color(by='group')
widget_data = np.asarray(widget.points)
assert 'color' in scatter._encodings.visual
assert 'group' in scatter._encodings.data
assert np.sum(widget_data[:,2]) > 0
assert np.sum(widget_data[:,3]) == 0
scatter.opacity(by='c')
widget_data = np.asarray(widget.points)
assert 'opacity' in scatter._encodings.visual
assert 'c' in scatter._encodings.data
assert np.sum(widget_data[:,3]) > 0
scatter.size(by='c')
widget_data = np.asarray(widget.points)
assert 'size' in scatter._encodings.visual
assert 'c' in scatter._encodings.data
assert np.sum(widget_data[:,3]) > 0
def test_scatter_connection_encoding_updates():
df = get_df()
scatter = Scatter(data=df, x='a', y='b')
widget = scatter.widget
scatter.connect(by='connect')
widget_data = np.asarray(widget.points)
assert widget_data.shape == (500, 5)
assert np.all(
df['connect'].values == widget_data[:,4].astype(df['connect'].dtype)
)
scatter.connect(order='connect_order')
widget_data = np.asarray(widget.points)
assert widget_data.shape == (500, 6)
assert np.all(
df['connect_order'].values == widget_data[:,5].astype(df['connect_order'].dtype)
)
| 31.25
| 105
| 0.647704
| 497
| 3,375
| 4.2334
| 0.173038
| 0.109316
| 0.064164
| 0.089829
| 0.554183
| 0.502852
| 0.430133
| 0.389734
| 0.372148
| 0.313688
| 0
| 0.030105
| 0.183111
| 3,375
| 107
| 106
| 31.542056
| 0.733043
| 0
| 0
| 0.309524
| 0
| 0
| 0.061055
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.071429
| false
| 0
| 0.047619
| 0
| 0.130952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c26b638d6a13eb8cf7404de0290463e08f694482
| 10,917
|
py
|
Python
|
py-world/world/main.py
|
Coastchb/Tacotron-2
|
0a61c8ff4fadfbd9d4157ee93b875e7d79fd750c
|
[
"MIT"
] | null | null | null |
py-world/world/main.py
|
Coastchb/Tacotron-2
|
0a61c8ff4fadfbd9d4157ee93b875e7d79fd750c
|
[
"MIT"
] | null | null | null |
py-world/world/main.py
|
Coastchb/Tacotron-2
|
0a61c8ff4fadfbd9d4157ee93b875e7d79fd750c
|
[
"MIT"
] | null | null | null |
import logging
import sys
from typing import Iterable
# 3rd party imports
import numpy as np
# import matplotlib.pyplot as plt
from scipy.io.wavfile import read as wavread
# local imports
from .dio import dio
from .stonemask import stonemask
from .harvest import harvest
from .cheaptrick import cheaptrick
from .d4c import d4c
from .d4cRequiem import d4cRequiem
from .get_seeds_signals import get_seeds_signals
from .synthesis import synthesis
from .synthesisRequiem import synthesisRequiem
from .swipe import swipe
class World(object):
def get_f0(self, fs: int, x: np.ndarray, f0_method: str = 'harvest', f0_floor: int = 71, f0_ceil: int = 800,
channels_in_octave: int = 2, target_fs: int = 4000, frame_period: int = 5) -> tuple:
"""
:param fs: sample frequency
:param x: signal
:param f0_method: f0 extraction method: dio, harvest
:param f0_floor: smallest f0
:param f0_ceil: largest f0
:param channels_in_octave:
:param target_fs: downsampled frequency for f0 extraction
:param frame_period: in ms
:return:
"""
if f0_method == 'dio':
source = dio(x, fs, f0_floor, f0_ceil, channels_in_octave, target_fs, frame_period)
source['f0'] = stonemask(x, fs, source['temporal_positions'], source['f0'])
elif f0_method == 'harvest':
source = harvest(x, fs, f0_floor, f0_ceil, frame_period)
elif f0_method == 'swipe':
source = swipe(fs, x, plim=[f0_floor, f0_ceil],sTHR=0.3)
else:
raise Exception
return source['temporal_positions'], source['f0'], source['vuv'] # or a dict
def get_spectrum(self, fs: int, x: np.ndarray, f0_method: str = 'harvest', f0_floor: int = 71, f0_ceil: int = 800,
channels_in_octave: int = 2, target_fs: int = 4000, frame_period: int = 5, fft_size=None) -> dict:
'''
This function extract pitch-synchronous WORLD spectrogram
:param fs: sampling frequency
:param x: signal (in float)
:param f0_method: dio, harvest, swipe
:param f0_floor: f0 min
:param f0_ceil: f0 max
:param frame_period: frame shift
:param fft_size: fourier transform length
:param: channels_in_octave: channels per octave
:return:
'''
if f0_method == 'dio':
source = dio(x, fs, f0_floor, f0_ceil, channels_in_octave, target_fs, frame_period)
source['f0'] = stonemask(x, fs, source['temporal_positions'], source['f0'])
elif f0_method == 'harvest':
source = harvest(x, fs, f0_floor, f0_ceil, frame_period)
elif f0_method == 'swipe':
source = swipe(fs, x, plim=[f0_floor, f0_ceil],sTHR=0.3)
else:
raise Exception
filter = cheaptrick(x, fs, source, fft_size=fft_size)
return {'f0': source['f0'],
'temporal_positions': source['temporal_positions'],
'fs': fs,
'ps spectrogram': filter['ps spectrogram'],
'spectrogram': filter['spectrogram']}
def encode_w_gvn_f0(self, fs: int, x: np.ndarray, source: dict, fft_size=None, is_requiem: bool=False) -> dict:
'''
Do WORLD pitch-synchronous analysis with given F0 contour
:param fs: sampling rate
:param x: signal
:param source: a dictionary contains source['temporal_positions'] time in second, source['f0'] f0 contour and source['vuv'] voice/unvoice
:param fft_size: length of Fourier transform
:return: a dictionary contains WORLD's components
'''
assert np.all(source['f0'] >= 3 * fs / fft_size)
filter = cheaptrick(x, fs, source, fft_size=fft_size)
if is_requiem:
source = d4cRequiem(x, fs, source, fft_size=fft_size)
else:
source = d4c(x, fs, source, fft_size_for_spectrum=fft_size)
return {'temporal_positions': source['temporal_positions'],
'vuv': source['vuv'],
'f0': source['f0'],
'fs': fs,
'spectrogram': filter['spectrogram'],
'aperiodicity': source['aperiodicity'],
'coarse_ap': source['coarse_ap'],
'is_requiem': is_requiem
}
def encode(self, fs: int, x: np.ndarray, f0_method: str = 'harvest', f0_floor: int = 71, f0_ceil: int = 800,
channels_in_octave: int = 2, target_fs: int = 4000, frame_period: int = 5,
allowed_range: float = 0.1, fft_size=None, is_requiem: bool=False) -> dict:
'''
encode speech to excitation signal, f0, spectrogram
:param fs: sample frequency
:param x: signal
:param f0_method: f0 extraction method: dio, harvest
:param f0_floor: smallest f0
:param f0_ceil: largest f0
:param channels_in_octave: number of channels per octave
:param target_fs: downsampled frequency for f0 extraction
:param frame_period: in ms
:param allowed_range:
:param fft_size: length of Fourier transform
:return: a dictionary contains WORLD components
'''
if fft_size != None:
f0_floor = 3.0 * fs / fft_size
if f0_method == 'dio':
source = dio(x, fs,
f0_floor=f0_floor, f0_ceil=f0_ceil, channels_in_octave=channels_in_octave, target_fs=target_fs,
frame_period=frame_period, allowed_range=allowed_range)
source['f0'] = stonemask(x, fs, source['temporal_positions'], source['f0'])
elif f0_method == 'harvest':
source = harvest(x, fs,
f0_floor=f0_floor, f0_ceil=f0_ceil, frame_period=frame_period)
elif f0_method == 'swipe':
source = swipe(fs, x, plim=[f0_floor, f0_ceil], sTHR=0.3)
else:
raise Exception
filter = cheaptrick(x, fs, source, fft_size=fft_size)
if is_requiem:
source = d4cRequiem(x, fs, source, fft_size=fft_size)
else:
source = d4c(x, fs, source, fft_size_for_spectrum=fft_size)
return {'temporal_positions': source['temporal_positions'],
'vuv': source['vuv'],
'fs': filter['fs'],
'f0': source['f0'],
'aperiodicity': source['aperiodicity'],
'ps spectrogram': filter['ps spectrogram'],
'spectrogram': filter['spectrogram'],
'is_requiem': is_requiem
}
def scale_pitch(self, dat: dict, factor: float) -> dict:
'''
the function does pitch scaling
:param dat: WORLD components (F0, spectrogram, aperiodicity)
:param factor: scaling factor
:return: scaled pitch.
'''
dat['f0'] *= factor
return dat
def set_pitch(self, dat: dict, time: np.ndarray, value: np.ndarray) -> dict:
raise NotImplementedError # TODO: need to resample to set values at given temporal positions (which are presumably shared with the spectrogram)
dat['f0'] = value
dat['temporal_positions'] = time
return dat
def scale_duration(self, dat: dict, factor: float) -> dict:
'''
the function does duration scaling
:param dat: WORLD components (F0, spectrogram, aperiodicity)
:param factor: scaling factor
:return: scaled event-time to speech up or slow down the speech
'''
dat['temporal_positions'] *= factor
return dat
def modify_duration(self, dat: dict, from_time: Iterable, to_time: Iterable) -> dict:
end = dat['temporal_positions'][-1]
assert np.all(np.diff(from_time)) > 0
assert np.all(np.diff(to_time)) > 0
assert from_time[0] > 0
assert from_time[-1] < end
from_time = np.r_[0, from_time, end]
if to_time[-1] == -1:
to_time[-1] = end
dat['temporal_positions'] = np.interp(dat['temporal_positions'], from_time, to_time)
def warp_spectrum(self, dat: dict, factor: float) -> dict:
dat['spectrogram'][:] = np.array([np.interp((np.arange(0, len(s)) / len(s)) ** factor,
(np.arange(0, len(s)) / len(s)),
s)
for s in dat['spectrogram'].T]).T
return dat
def decode(self, dat: dict) -> dict:
'''
This function combine WORLD components (F0, spectrogram, and aperiodicity) to make sound signal
:param dat: contains WORLD components
:return: a dictionary contains synthesized speech and WORLD components
'''
if dat['is_requiem']:
seeds_signals = get_seeds_signals(dat['fs'])
y = synthesisRequiem(dat, dat, seeds_signals)
else:
y = synthesis(dat, dat)
m = np.max(np.abs(y))
if m > 1.0:
logging.info('rescaling waveform')
y /= m
dat['out'] = y
return dat
def draw(self, x: np.ndarray, dat: dict):
'''
An example of visualize WORLD components, original signal, synthesized signal
'''
from matplotlib import pyplot as plt
fs = dat['fs']
time = dat['temporal_positions']
y = dat['out']
fig, ax = plt.subplots(nrows=5, figsize=(8, 6), sharex=True)
ax[0].set_title('input signal and resynthesized-signal')
ax[0].plot(np.arange(len(x)) / fs, x, alpha=0.5)
ax[0].plot(np.arange(len(y)) / fs, y, alpha=0.5)
ax[0].set_xlabel('samples')
ax[0].legend(['original', 'synthesis'])
X = dat['ps spectrogram']
X = np.where(X==0, sys.float_info.epsilon, X)
ax[1].set_title('pitch-synchronous spectrogram')
ax[1].imshow(20 * np.log10(np.abs(X[:X.shape[0] // 2, :])), cmap=plt.cm.gray_r, origin='lower',
extent=[0, len(x) / fs, 0, fs / 2], aspect='auto')
ax[1].set_ylabel('frequency (Hz)')
ax[2].set_title('phase spectrogram')
ax[2].imshow(np.diff(np.unwrap(np.angle(X[:X.shape[0] // 2, :]), axis=1), axis=1), cmap=plt.cm.gray_r,
origin='lower',
extent=[0, len(x) / fs, 0, fs / 2], aspect='auto')
ax[2].set_ylabel('frequency (Hz)')
ax[3].set_title('WORLD spectrogram')
Y = dat['spectrogram']
Y = np.where(Y < sys.float_info.epsilon, sys.float_info.epsilon, Y)
ax[3].imshow(20 * np.log10(Y), cmap=plt.cm.gray_r, origin='lower',
extent=[0, len(x) / fs, 0, fs / 2], aspect='auto')
ax[3].set_ylabel('frequency (Hz)')
ax[4].set_title('WORLD fundamental frequency')
ax[4].plot(time, dat['f0'])
ax[4].set_ylabel('time (s)')
plt.show()
| 42.644531
| 152
| 0.582944
| 1,394
| 10,917
| 4.422525
| 0.167145
| 0.026115
| 0.017518
| 0.018978
| 0.502352
| 0.455637
| 0.44558
| 0.436334
| 0.406164
| 0.392863
| 0
| 0.026172
| 0.29651
| 10,917
| 255
| 153
| 42.811765
| 0.776563
| 0.202895
| 0
| 0.374233
| 0
| 0
| 0.113881
| 0
| 0
| 0
| 0
| 0.003922
| 0.030675
| 1
| 0.067485
| false
| 0
| 0.09816
| 0
| 0.226994
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c26b881427d152a0f3576dc1d7e1e0a52917ad82
| 8,165
|
py
|
Python
|
src/universal_build/helpers/build_docker.py
|
prototypefund/universal-build
|
809e641d5cf9dc1378cd0e0e3ea6e79f773ae4e7
|
[
"MIT"
] | 17
|
2020-11-20T15:58:02.000Z
|
2022-02-06T19:18:20.000Z
|
src/universal_build/helpers/build_docker.py
|
prototypefund/universal-build
|
809e641d5cf9dc1378cd0e0e3ea6e79f773ae4e7
|
[
"MIT"
] | 3
|
2021-02-17T13:47:44.000Z
|
2021-10-14T13:53:15.000Z
|
src/universal_build/helpers/build_docker.py
|
prototypefund/universal-build
|
809e641d5cf9dc1378cd0e0e3ea6e79f773ae4e7
|
[
"MIT"
] | 6
|
2020-11-23T09:51:26.000Z
|
2022-02-11T13:46:57.000Z
|
"""Utilities to help building Docker images."""
import argparse
import os
import subprocess
from typing import List, Optional
from universal_build import build_utils
FLAG_DOCKER_IMAGE_PREFIX = "docker_image_prefix"
def parse_arguments(
input_args: List[str] = None, argument_parser: argparse.ArgumentParser = None
) -> dict:
"""Parses all arguments and returns a sanitized & augmented list of arguments.
Sanitized means that, for example, the version is already checked and set depending on our build guidelines.
If arguments are not valid, exit the script run.
Args:
input_args (List[str], optional): List of arguments that are used instead of the arguments passed to the process. Defaults to `None`.
argument_parser (arparse.ArgumentParser, optional): An argument parser which is passed as a parents parser to the default ArgumentParser to be able to use additional flags besides the default ones.
Returns:
dict: The parsed default arguments thar are already checked for validity.
"""
if argument_parser is None:
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"--" + FLAG_DOCKER_IMAGE_PREFIX.replace("_", "-"),
help="Provide a prefix for a Docker image, e.g. 'mltooling/' or even a repository path. When leaving blank, the default Dockerhub Repository is used.",
required=False,
default="",
)
return build_utils.parse_arguments(
input_args=input_args, argument_parser=argument_parser
)
def check_image(
image: str, trivy: bool = True, exit_on_error: bool = True
) -> subprocess.CompletedProcess:
"""Run vulnerability checks on Dockerimage.
Args:
image (str): The name of the docker image to check.
trivy (bool, optional): Activate trivy vulnerability check. Defaults to `True`.
exit_on_error (bool, optional): If `True`, exit process as soon as an error occurs.
"""
build_utils.log("Run vulnerability checks on docker image:")
if trivy and build_utils.command_exists("trivy", exit_on_error=exit_on_error):
return build_utils.run(
f"trivy image --timeout=20m0s --exit-code 1 --severity HIGH,CRITICAL {image}",
exit_on_error=exit_on_error,
)
return subprocess.CompletedProcess(args="", returncode=-1, stdout="", stderr="")
# TODO: Implement dockl container scan
def lint_dockerfile(
hadolint: bool = True, dockerfile: str = "Dockerfile", exit_on_error: bool = True
) -> None:
"""Run hadolint on the Dockerfile.
Args:
hadolint (bool, optional): Activate hadolint dockerfile linter. Defaults to `True`.
dockerfile (str, optional): Specify a specific Dockerfile. If not specified, the default `Dockerfile` wil be used.
exit_on_error (bool, optional): Exit process if an error occurs. Defaults to `True`.
"""
build_utils.log("Run linters and style checks:")
if hadolint and build_utils.command_exists("hadolint", exit_on_error=exit_on_error):
config_file_arg = ""
if os.path.exists(".hadolint.yml"):
config_file_arg = "--config=.hadolint.yml"
build_utils.run(
f"hadolint {config_file_arg} {dockerfile}", exit_on_error=exit_on_error
)
def get_image_name(name: str, tag: str, image_prefix: str = "") -> str:
"""Get a valid versioned image name.
Args:
name (str): Name of the docker image.
tag (str): Version to use for the tag.
image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
Returns:
str: a valid docker image name based on: prefix/name:tag
"""
versioned_tag = name.strip() + ":" + tag.strip()
if image_prefix:
versioned_tag = image_prefix.strip().rstrip("/") + "/" + versioned_tag
return versioned_tag
def build_docker_image(
name: str,
version: str,
build_args: str = "",
docker_image_prefix: str = "",
dockerfile: Optional[str] = None,
additional_build_args: str = "",
exit_on_error: bool = True,
) -> subprocess.CompletedProcess:
"""Build a docker image from a Dockerfile in the working directory.
Args:
name (str): Name of the docker image.
version (str): Version to use as tag.
build_args (str, optional): Add additional build arguments for docker build.
docker_image_prefix (str, optional): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
dockerfile (str, optional): Specify a specific Dockerfile. If not specified, the default `Dockerfile` wil be used.
exit_on_error (bool, optional): If `True`, exit process as soon as an error occurs.
Returns:
subprocess.CompletedProcess: Returns the CompletedProcess object of the
"""
# Check if docker exists on the system
build_utils.command_exists("docker", exit_on_error=exit_on_error)
versioned_tag = get_image_name(name=name, tag=version)
latest_tag = get_image_name(name=name, tag="latest")
dockerfile_command = ""
if dockerfile:
dockerfile_command = " -f " + dockerfile
completed_process = build_utils.run(
"docker build "
+ dockerfile_command
+ "-t "
+ versioned_tag
+ " -t "
+ latest_tag
+ " "
+ build_args
+ " ./",
exit_on_error=exit_on_error,
)
if completed_process.returncode > 0:
build_utils.log(f"Failed to build Docker image {versioned_tag}")
return completed_process
if docker_image_prefix:
remote_versioned_tag = get_image_name(
name=name, tag=version, image_prefix=docker_image_prefix
)
build_utils.run(
"docker tag " + versioned_tag + " " + remote_versioned_tag,
exit_on_error=exit_on_error,
)
return completed_process
def release_docker_image(
name: str, version: str, docker_image_prefix: str, exit_on_error: bool = True
) -> subprocess.CompletedProcess:
"""Push a Docker image to a repository.
Args:
name (str): The name of the image. Must not be prefixed!
version (str): The tag used for the image.
docker_image_prefix (str): The prefix added to the name to indicate an organization on DockerHub or a completely different repository.
exit_on_error (bool, optional): Exit process if an error occurs. Defaults to `True`.
Returns:
subprocess.CompletedProcess: Returns the CompletedProcess object of the `docker push ...` command.
"""
# Check if docker exists on the system
build_utils.command_exists("docker", exit_on_error=exit_on_error)
if not docker_image_prefix:
build_utils.log(
"The flag --docker-image-prefix cannot be blank when pushing a Docker image."
)
build_utils.exit_process(build_utils.EXIT_CODE_GENERAL)
versioned_tag = get_image_name(name=name, tag=version)
remote_versioned_tag = get_image_name(
name=name, tag=version, image_prefix=docker_image_prefix
)
build_utils.run(
"docker tag " + versioned_tag + " " + remote_versioned_tag,
exit_on_error=exit_on_error,
)
completed_process = build_utils.run(
"docker push " + remote_versioned_tag, exit_on_error=exit_on_error
)
if completed_process.returncode > 0:
build_utils.log(f"Failed to release Docker image {name}:{version}")
# Only push version with latest tag if no suffix is added (pre-release)
if "-" not in version:
remote_latest_tag = get_image_name(
name=name, tag="latest", image_prefix=docker_image_prefix
)
build_utils.log(
"Release Docker image with latest tag as well: " + remote_latest_tag
)
build_utils.run(
"docker tag " + versioned_tag + " " + remote_latest_tag,
exit_on_error=exit_on_error,
)
build_utils.run("docker push " + remote_latest_tag, exit_on_error=exit_on_error)
return completed_process
| 37.113636
| 205
| 0.679731
| 1,064
| 8,165
| 5.015038
| 0.168233
| 0.035982
| 0.065967
| 0.033733
| 0.490442
| 0.440217
| 0.397114
| 0.37069
| 0.334708
| 0.26593
| 0
| 0.00112
| 0.23466
| 8,165
| 219
| 206
| 37.283105
| 0.852776
| 0.369259
| 0
| 0.275
| 0
| 0.008333
| 0.148997
| 0.008717
| 0
| 0
| 0
| 0.004566
| 0
| 1
| 0.05
| false
| 0
| 0.041667
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c26e8a076cd054bdeb3d8edfa2f30d5c046667f6
| 1,121
|
py
|
Python
|
src/genie/libs/parser/ios/tests/ShowProcessesCpuSorted/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/ios/tests/ShowProcessesCpuSorted/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/ios/tests/ShowProcessesCpuSorted/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"sort": {
1: {
"invoked": 3321960,
"usecs": 109,
"tty": 0,
"one_min_cpu": 0.54,
"process": "PIM Process",
"five_min_cpu": 0.48,
"runtime": 362874,
"pid": 368,
"five_sec_cpu": 1.03,
},
2: {
"invoked": 1466728,
"usecs": 2442,
"tty": 0,
"one_min_cpu": 0.87,
"process": "IOSv e1000",
"five_min_cpu": 2.77,
"runtime": 3582279,
"pid": 84,
"five_sec_cpu": 0.55,
},
3: {
"invoked": 116196,
"usecs": 976,
"tty": 0,
"one_min_cpu": 0.07,
"process": "OSPF-1 Hello",
"five_min_cpu": 0.07,
"runtime": 113457,
"pid": 412,
"five_sec_cpu": 0.15,
},
},
"five_sec_cpu_total": 4,
"five_min_cpu": 9,
"one_min_cpu": 4,
"nonzero_cpu_processes": ["PIM Process", "IOSv e1000", "OSPF-1 Hello"],
"five_sec_cpu_interrupts": 0,
}
| 26.069767
| 75
| 0.407672
| 119
| 1,121
| 3.579832
| 0.411765
| 0.112676
| 0.08216
| 0.070423
| 0.098592
| 0.098592
| 0
| 0
| 0
| 0
| 0
| 0.164297
| 0.435326
| 1,121
| 42
| 76
| 26.690476
| 0.508689
| 0
| 0
| 0.071429
| 0
| 0
| 0.317574
| 0.039251
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c26f5c129b7cbf79a66da9961a7b6a906731cbb8
| 4,702
|
py
|
Python
|
watcher_metering/publisher/publisher.py
|
b-com/watcher-metering
|
7c09b243347146e5a421700d5b07d1d0a5c4d604
|
[
"Apache-2.0"
] | 2
|
2015-10-22T19:44:57.000Z
|
2017-06-15T15:01:07.000Z
|
watcher_metering/publisher/publisher.py
|
b-com/watcher-metering
|
7c09b243347146e5a421700d5b07d1d0a5c4d604
|
[
"Apache-2.0"
] | 1
|
2015-10-26T13:52:58.000Z
|
2015-10-26T13:52:58.000Z
|
watcher_metering/publisher/publisher.py
|
b-com/watcher-metering
|
7c09b243347146e5a421700d5b07d1d0a5c4d604
|
[
"Apache-2.0"
] | 4
|
2015-10-10T13:59:39.000Z
|
2020-05-29T11:47:07.000Z
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from oslo_log import log
from six.moves.queue import Queue
from watcher_metering.publisher.base import PublisherServerBase
from watcher_metering.publisher.worker import Worker
LOG = log.getLogger(__name__)
class Publisher(PublisherServerBase):
def __init__(self, use_nanoconfig_service, publisher_endpoint,
nanoconfig_service_endpoint, nanoconfig_update_endpoint,
nanoconfig_profile, metrics_store, max_queue_size,
max_worker, min_worker=5):
"""
:param use_nanoconfig_service: Indicates whether or not it should use a
nanoconfig service
:type use_nanoconfig_service: bool
:param publisher_endpoint: Publisher server URI
:type publisher_endpoint: str
:param nanoconfig_service_endpoint: Nanoconfig service URI
:type nanoconfig_service_endpoint: str
:param nanoconfig_update_endpoint: Nanoconfig update service URI
:type nanoconfig_update_endpoint: str
:param nanoconfig_profile: Nanoconfig profile URI
:type nanoconfig_profile: str
:param max_queue_size: Max size for the message queue
:type max_queue_size: int
:param max_worker: Max number of worker to be spawned at a given time
:type max_worker: int
:param min_worker: Min number of worker to be spawned at a given time
:type min_worker: int
"""
super(Publisher, self).__init__(
use_nanoconfig_service, publisher_endpoint,
nanoconfig_service_endpoint, nanoconfig_update_endpoint,
nanoconfig_profile
)
self.max_queue_size = max_queue_size
self.metrics_store = metrics_store
self.min_worker = min_worker
self.max_worker = max_worker
self.msg_queue = Queue(self.max_queue_size)
self.workers = []
@property
def num_workers(self):
return len(self.workers)
def on_receive(self, msg):
LOG.debug('[Publisher] Queue msg size = %s | workers = %s',
self.msg_queue.qsize(), self.num_workers)
try:
self.check_workers_alive()
self.adjust_pool_size()
except OSError as exc:
LOG.exception(exc)
LOG.error("[Publisher] Error upon receiving a message")
self.msg_queue.put(msg)
def check_workers_alive(self):
# Because we can create new workers in this loop, we create a copy
# --> We could otherwise loop onto a new workers...
worker_items = self.workers[:]
for worker_thread in worker_items:
if not worker_thread.is_alive():
self.workers.pop(self.workers.index(worker_thread))
self.start_worker()
def adjust_pool_size(self):
needed_size = self.msg_queue.qsize() + self.min_worker
if abs(needed_size - self.num_workers) > self.min_worker * 2:
LOG.debug(("[Publisher] Auto adjust pool size needed size is `%s` "
"and the current size is `%s`"),
needed_size, self.num_workers)
while self.num_workers > min(self.min_worker, needed_size):
self.stop_worker()
# Create enough, but not too many
while self.num_workers < min(self.max_worker, needed_size):
self.start_worker()
def start_worker(self):
LOG.debug("[Publisher] starting worker")
worker = Worker(self.msg_queue, self.metrics_store)
worker.start()
self.workers.append(worker)
def stop_worker(self):
if self.num_workers:
LOG.debug("[Publisher] stopping worker")
worker = self.workers.pop(-1) # Pops the last worker
worker.stop()
def stop(self):
super(Publisher, self).stop()
join_threads = []
for key in self.workers:
t = Thread(target=self.workers.get(key).stop)
t.start()
join_threads.append(t)
for join_thread in join_threads:
join_thread.join()
| 38.227642
| 79
| 0.652276
| 595
| 4,702
| 4.954622
| 0.294118
| 0.057666
| 0.024423
| 0.035617
| 0.145862
| 0.115332
| 0.097693
| 0.097693
| 0.097693
| 0.097693
| 0
| 0.003497
| 0.270098
| 4,702
| 122
| 80
| 38.540984
| 0.855478
| 0.317312
| 0
| 0.057143
| 0
| 0
| 0.073684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.071429
| 0.014286
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2722d474ea8fa2b576a6ea93761caf6c92cb828
| 5,547
|
py
|
Python
|
export_pdf_decaissement.py
|
Ciwara/DE-ENCAISSEMENT
|
bd816b40c857a768e866535b46b30ae6fb5020e9
|
[
"Apache-2.0"
] | null | null | null |
export_pdf_decaissement.py
|
Ciwara/DE-ENCAISSEMENT
|
bd816b40c857a768e866535b46b30ae6fb5020e9
|
[
"Apache-2.0"
] | null | null | null |
export_pdf_decaissement.py
|
Ciwara/DE-ENCAISSEMENT
|
bd816b40c857a768e866535b46b30ae6fb5020e9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding= UTF-8 -*-
# Fad
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
# setup the empty canvas
from io import FileIO as file
from reportlab.platypus import Flowable
# from Common.pyPdf import PdfFileWriter, PdfFileReader
from PyPDF2 import PdfFileWriter, PdfFileReader
from reportlab.lib import colors
from reportlab.platypus import Table, TableStyle, Paragraph
# from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from num2words import num2words
# from configuration import Config
from Common.ui.util import formatted_number
from Common.ui.util import get_temp_filename
class flowable_rect(Flowable):
def __init__(self, text, text2="", chck=0):
Flowable.__init__(self)
self.width = 10
self.height = 10
self.text = text
self.text2 = text2
self.chck = 0
def draw(self):
self.canv.rect(0, 0, self.width, self.height, fill=0)
self.canv.drawString(13, 0, self.text)
if self.text2 != "":
self.canv.rect(0, 15, self.width, self.height, fill=0)
self.canv.drawString(13, 15, self.text2)
def pdFview(filename, invoice):
"""
cette views est cree pour la generation du PDF
"""
styles = getSampleStyleSheet()
# styleN = styles["BodyText"]
styleBH = styles["Normal"]
if not filename:
filename = get_temp_filename('pdf')
PDFSOURCE = 'static/encaissement_source.pdf'
TMP_FILE = 'static/tmp.pdf'
DEFAULT_FONT_SIZE = 11
FONT_BOLD = 'Helvetica-Bold'
FONT = 'Helvetica'
# FONT = 'Courier-Bold'
# A simple function to return a leading 0 on any single digit int.
# PDF en entrée
input1 = PdfFileReader(file(PDFSOURCE, "rb"))
# PDF en sortie
output = PdfFileWriter()
# Récupération du nombre de pages
n_pages = input1.getNumPages()
# Pour chaque page
y = 750
x = 40
recever_name = Paragraph('''{}'''.format(invoice.recever_name), styleBH)
description = Paragraph('''{}'''.format(invoice.description), styleBH)
date_valeur = invoice.date.strftime("%d - %b - %Y")
for i in range(n_pages):
# Récupération de la page du doc initial (input1)
page = input1.getPage(i)
p = canvas.Canvas(TMP_FILE, pagesize=A4)
p.setFont(FONT_BOLD, 12)
p.drawString(x + 300, y - 60, "DECAISEMENT N° :")
p.drawString(x + 300, y - 80, "BAMAKO le ")
p.setFont(FONT, 12)
p.drawString(x + 420, y - 60, invoice.number)
p.drawString(x + 380, y - 80, date_valeur)
ldata = []
ht = invoice.amount
amount = str(formatted_number(ht))
ldata.append(['', "DESIGNATION", 'MONTANT', 'NOM'])
ldata.append(["MONTANT", description, amount, recever_name])
ldata.append(["TAUX", "", "", "MONTANT"])
ldata.append(["VALEUR", "", "", amount])
row = 0.8
col = 1.5
btable = Table(
ldata,
colWidths=[col * inch, 2.8 * inch, col * inch, col * inch],
rowHeights=[0.5 * inch, row * inch, row * inch, row * inch])
btable.setStyle(
TableStyle(
[("BOX", (0, 0), (-1, -1), 0.25, colors.black),
('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black),
('ALIGN', (0, 1), (-1, -1), "RIGHT"),
('BACKGROUND', (0, 0), (-1, 0), colors.white),
('TEXTCOLOR', (0, 0), (-1, 0), colors.black),
('FONTSIZE', (0, 0), (-1, 0), 14),
('FONTNAME', (0, 0), (-1, -1), FONT_BOLD),
# ('BACKGROUND', (1, 1), (1, 1), colors.black),
('ALIGN', (1, 0), (1, -1), 'LEFT')])
)
a_w = 800
a_h = y - 320
w, h = btable.wrap(a_w, a_h)
btable.drawOn(p, 40, a_h)
ht_en_lettre = num2words(ht, lang='fr')
y = a_h - 15
ht_en_lettre1, ht_en_lettre2 = controle_caratere(ht_en_lettre + " franc CFA", 55, 40)
p.drawString(x, y - 30, "Arrêté la présente facture à la somme de : {}".format(ht_en_lettre1.title()))
p.drawString(x, y - 45, (ht_en_lettre2))
y -= 90
p.drawString(x + 230, y - 20, str(invoice.num_client))
p.setFont(FONT_BOLD, 12)
p.drawString(x, y, "Signature Client")
p.drawString(x + 220, y, "Numéro Client")
p.drawString(x + 440, y, "Signature")
p.showPage()
# Sauvegarde de la page
p.save()
# Création du watermark
watermark = PdfFileReader(file(TMP_FILE, "rb"))
# Création page_initiale+watermark
page.mergePage(watermark.getPage(0))
# Création de la nouvelle page
output.addPage(page)
# Nouveau pdf
file_dest = filename + ".pdf"
output_stream = file(file_dest, u"wb")
output.write(output_stream)
output_stream.close()
return file_dest
def controle_caratere(lettre, nb_controle, nb_limite):
"""
cette fonction decoupe une chaine de caratere en fonction
du nombre de caratere donnée et conduit le reste à la ligne
"""
lettre = lettre
if len(lettre) <= nb_controle:
ch = lettre
ch2 = u""
return ch, ch2
else:
ch = ch2 = u""
for n in lettre.split(u" "):
if len(ch) <= nb_limite:
ch = ch + u" " + n
else:
ch2 = ch2 + u" " + n
return ch, ch2
| 33.415663
| 110
| 0.581035
| 714
| 5,547
| 4.42577
| 0.35014
| 0.03481
| 0.037975
| 0.013291
| 0.110443
| 0.058228
| 0.058228
| 0.058228
| 0.027848
| 0.027848
| 0
| 0.041404
| 0.28592
| 5,547
| 165
| 111
| 33.618182
| 0.756122
| 0.138453
| 0
| 0.051724
| 0
| 0
| 0.07152
| 0.006367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.103448
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c274168a8c5a204b07518e6afd5c3fd89f5eb019
| 9,073
|
py
|
Python
|
pose/datasets/real_animal_all.py
|
chaneyddtt/UDA-Animal-Pose
|
f1ebfda860a2585c60fe86ce1632e910ac97ebc5
|
[
"MIT"
] | 61
|
2021-03-30T08:34:24.000Z
|
2022-03-30T02:45:46.000Z
|
pose/datasets/real_animal_all.py
|
chaneyddtt/UDA-Animal-Pose
|
f1ebfda860a2585c60fe86ce1632e910ac97ebc5
|
[
"MIT"
] | 13
|
2021-04-10T12:46:58.000Z
|
2022-03-11T10:40:02.000Z
|
pose/datasets/real_animal_all.py
|
chaneyddtt/UDA-Animal-Pose
|
f1ebfda860a2585c60fe86ce1632e910ac97ebc5
|
[
"MIT"
] | 2
|
2021-07-22T04:53:44.000Z
|
2022-02-15T14:19:02.000Z
|
from __future__ import print_function, absolute_import
import random
import torch.utils.data as data
from pose.utils.osutils import *
from pose.utils.transforms import *
from scipy.io import loadmat
import argparse
class Real_Animal_All(data.Dataset):
def __init__(self, is_train=True, is_aug=False, **kwargs):
print()
print("==> real_animal_all")
self.img_folder = kwargs['image_path'] # root image folders
self.is_train = is_train # training set or test set
self.inp_res = kwargs['inp_res']
self.out_res = kwargs['out_res']
self.sigma = kwargs['sigma']
self.scale_factor = kwargs['scale_factor']
self.rot_factor = kwargs['rot_factor']
self.label_type = kwargs['label_type']
self.animal = ['horse', 'tiger'] if kwargs['animal'] == 'all' else [kwargs['animal']] # train on single or all animal categories
self.train_on_all_cat = kwargs['train_on_all_cat'] # train on single or mul, decide mean file to load
self.is_aug = is_aug
# create train/val split
self.train_img_set = []
self.valid_img_set = []
self.train_pts_set = []
self.valid_pts_set = []
self.load_animal()
self.mean, self.std = self._compute_mean()
def load_animal(self):
# generate train/val data
for animal in sorted(self.animal):
img_list = [] # img_list contains all image paths
anno_list = [] # anno_list contains all anno lists
range_path = os.path.join(self.img_folder, 'behaviorDiscovery2.0/ranges', animal, 'ranges.mat')
landmark_path = os.path.join(self.img_folder, 'behaviorDiscovery2.0/landmarks', animal)
range_file = loadmat(range_path)
frame_num = 0
train_idxs = np.load('./data/real_animal/' + animal + '/train_idxs_by_video.npy')
valid_idxs = np.load('./data/real_animal/' + animal + '/valid_idxs_by_video.npy')
for video in range_file['ranges']:
# range_file['ranges'] is a numpy array [Nx3]: shot_id, start_frame, end_frame
shot_id = video[0]
landmark_path_video = os.path.join(landmark_path, str(shot_id) + '.mat')
if not os.path.isfile(landmark_path_video):
continue
landmark_file = loadmat(landmark_path_video)
for frame in range(video[1], video[2] + 1): # ??? video[2]+1
frame_id = frame - video[1]
img_name = animal + '/' + '0' * (8 - len(str(frame))) + str(frame) + '.jpg'
img_list.append([img_name, shot_id, frame_id])
coord = landmark_file['landmarks'][frame_id][0][0][0][0]
vis = landmark_file['landmarks'][frame_id][0][0][0][1]
landmark = np.hstack((coord, vis))
landmark_18 = landmark[:18, :]
if animal == 'horse':
anno_list.append(landmark_18)
elif animal == 'tiger':
landmark_18 = landmark_18[
np.array([1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 17, 18, 13, 14, 9, 10, 11, 12]) - 1]
anno_list.append(landmark_18)
frame_num += 1
for idx in range(train_idxs.shape[0]):
train_idx = train_idxs[idx]
self.train_img_set.append(img_list[train_idx])
self.train_pts_set.append(anno_list[train_idx])
for idx in range(valid_idxs.shape[0]):
valid_idx = valid_idxs[idx]
self.valid_img_set.append(img_list[valid_idx])
self.valid_pts_set.append(anno_list[valid_idx])
print('Animal:{}, number of frames:{}, train: {}, valid: {}'.format(animal, frame_num,
train_idxs.shape[0], valid_idxs.shape[0]))
print('Total number of frames:{}, train: {}, valid {}'.format(len(img_list), len(self.train_img_set),
len(self.valid_img_set)))
def _compute_mean(self):
animal = 'all' if self.train_on_all_cat else self.animal[0] # which mean file to load
meanstd_file = './data/synthetic_animal/' + animal + '_combineds5r5_texture' + '/mean.pth.tar'
if isfile(meanstd_file):
print('load from mean file:', meanstd_file)
meanstd = torch.load(meanstd_file)
else:
print("generate mean file")
mean = torch.zeros(3)
std = torch.zeros(3)
for index in self.train_list:
a = self.img_list[index][0]
img_path = os.path.join(self.img_folder, 'behaviorDiscovery2.0', a)
img = load_image_ori(img_path) # CxHxW
mean += img.view(img.size(0), -1).mean(1)
std += img.view(img.size(0), -1).std(1)
mean /= len(self.train_list)
std /= len(self.train_list)
meanstd = {
'mean': mean,
'std': std,
}
torch.save(meanstd, meanstd_file)
print(' Real animal mean: %.4f, %.4f, %.4f' % (meanstd['mean'][0], meanstd['mean'][1], meanstd['mean'][2]))
print(' Real animal std: %.4f, %.4f, %.4f' % (meanstd['std'][0], meanstd['std'][1], meanstd['std'][2]))
return meanstd['mean'], meanstd['std']
def __getitem__(self, index):
sf = self.scale_factor
rf = self.rot_factor
img_list = self.train_img_set if self.is_train else self.valid_img_set
anno_list = self.train_pts_set if self.is_train else self.valid_pts_set
try:
a = img_list[index][0]
except IndexError:
print(index)
img_path = os.path.join(self.img_folder, 'behaviorDiscovery2.0', a)
img = load_image_ori(img_path) # CxHxW
pts = anno_list[index].astype(np.float32)
x_vis = pts[:, 0][pts[:, 0] > 0]
y_vis = pts[:, 1][pts[:, 1] > 0]
try:
# generate bounding box using keypoints
height, width = img.size()[1], img.size()[2]
y_min = float(max(np.min(y_vis) - 15, 0.0))
y_max = float(min(np.max(y_vis) + 15, height))
x_min = float(max(np.min(x_vis) - 15, 0.0))
x_max = float(min(np.max(x_vis) + 15, width))
except ValueError:
print(img_path, index)
# Generate center and scale for image cropping,
# adapted from human pose https://github.com/princeton-vl/pose-hg-train/blob/master/src/util/dataset/mpii.lua
c = torch.Tensor(((x_min + x_max) / 2.0, (y_min + y_max) / 2.0))
s = max(x_max - x_min, y_max - y_min) / 200.0 * 1.25
# For single-animal pose estimation with a centered/scaled figure
nparts = pts.shape[0]
pts = torch.Tensor(pts)
r = 0
if self.is_aug and self.is_train:
# print('augmentation')
s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0
# Flip
if random.random() <= 0.5:
img = torch.from_numpy(fliplr(img.numpy())).float()
pts = shufflelr_ori(pts, width=img.size(2), dataset='real_animal')
c[0] = img.size(2) - c[0]
# Color
img[0, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
img[1, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
img[2, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
# Prepare image and groundtruth map
inp = crop_ori(img, c, s, [self.inp_res, self.inp_res], rot=r)
inp = color_normalize(inp, self.mean, self.std)
# Generate ground truth
tpts = pts.clone()
tpts_inpres = pts.clone()
target = torch.zeros(nparts, self.out_res, self.out_res)
target_weight = tpts[:, 2].clone().view(nparts, 1)
for i in range(nparts):
if tpts[i, 1] > 0:
tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2] + 1, c, s, [self.out_res, self.out_res], rot=r))
tpts_inpres[i, 0:2] = to_torch(transform(tpts_inpres[i, 0:2] + 1, c, s, [self.inp_res, self.inp_res], rot=r))
target[i], vis = draw_labelmap_ori(target[i], tpts[i] - 1, self.sigma, type=self.label_type)
target_weight[i, 0] *= vis
# Meta info
meta = {'index': index, 'center': c, 'scale': s,
'pts': pts, 'tpts': tpts, 'target_weight': target_weight, 'pts_256': tpts_inpres}
return inp, target, meta
def __len__(self):
if self.is_train:
return len(self.train_img_set)
else:
return len(self.valid_img_set)
def real_animal_all(**kwargs):
return Real_Animal_All(**kwargs)
real_animal_all.njoints = 18 # ugly but works
| 45.139303
| 136
| 0.554502
| 1,235
| 9,073
| 3.868826
| 0.193522
| 0.024487
| 0.013813
| 0.015697
| 0.224152
| 0.166388
| 0.130808
| 0.105693
| 0.080578
| 0.061323
| 0
| 0.030671
| 0.310041
| 9,073
| 200
| 137
| 45.365
| 0.732588
| 0.082222
| 0
| 0.063694
| 0
| 0
| 0.086496
| 0.01807
| 0.006369
| 0
| 0
| 0
| 0
| 1
| 0.038217
| false
| 0
| 0.044586
| 0.006369
| 0.121019
| 0.070064
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2769ae34a085e912e6eacf2499ecd7dc14d3eeb
| 492
|
py
|
Python
|
cap6/ex6.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap6/ex6.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
cap6/ex6.py
|
felipesch92/livroPython
|
061b1c095c3ec2d25fb1d5fdfbf9e9dbe10b3307
|
[
"MIT"
] | null | null | null |
p = [1, 4, 9, 10, 20, 25]
e1 = int(input('Primeiro elemento: '))
e2 = int(input('Segundo elemento: '))
x = 0
achou = False
primeiro = 0
while x < len(p):
if p[x] == e1:
print(f'Elemento 1 encontrado na posição {x} da lista!')
if primeiro == 0:
primeiro = 1
if p[x] == e2:
print(f'Elemento 2 encontrado na posição {x} da lista!')
if primeiro == 0:
primeiro = 2
x += 1
print(f'Foi encontrado primeiro o {primeiro} elemento!')
| 27.333333
| 64
| 0.556911
| 75
| 492
| 3.653333
| 0.413333
| 0.09854
| 0.029197
| 0.145985
| 0.335766
| 0.335766
| 0.335766
| 0.335766
| 0.335766
| 0.335766
| 0
| 0.063584
| 0.296748
| 492
| 17
| 65
| 28.941176
| 0.728324
| 0
| 0
| 0.117647
| 0
| 0
| 0.355691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.176471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c278440f2c1e433cd58705bc68bd303258b8e21b
| 8,084
|
py
|
Python
|
lib/datasets/myvg.py
|
zhydong/faster-rcnn.pytorch
|
36fa8b9718228edb4702b039deab924c40b973f5
|
[
"MIT"
] | null | null | null |
lib/datasets/myvg.py
|
zhydong/faster-rcnn.pytorch
|
36fa8b9718228edb4702b039deab924c40b973f5
|
[
"MIT"
] | null | null | null |
lib/datasets/myvg.py
|
zhydong/faster-rcnn.pytorch
|
36fa8b9718228edb4702b039deab924c40b973f5
|
[
"MIT"
] | null | null | null |
"""
Visual Genome in Scene Graph Generation by Iterative Message Passing split
"""
import os
import cv2
import json
import h5py
import pickle
import numpy as np
import scipy.sparse
import os.path as osp
from datasets.imdb import imdb
from model.utils.config import cfg
from IPython import embed
class vg_sggimp(imdb):
"""
Visual Genome with sgg split
"""
def __init__(self, split):
"""
Args:
split: integer, 0 is training, 1 is val, 2 is test
"""
imdb.__init__(self, 'vg_sggimp')
# TODO: add file existence asserts
# load files
self.data_dir = osp.join(cfg.DATA_DIR, 'visual_genome')
self.cache_dir = osp.join(cfg.DATA_DIR, 'cache')
self.anno_dir = osp.join(self.data_dir, 'sggimp')
self.img_dir = osp.join(self.data_dir, 'VG_100K')
with open(osp.join(self.anno_dir, 'VG-SGG-dicts.json'), 'r') as f:
self.vg_dicts = json.load(f)
with open(osp.join(self.anno_dir, 'image_data.json'), 'r') as f:
self.img_meta = json.load(f)
self.vg_h5 = h5py.File(osp.join(self.anno_dir, 'VG-SGG.h5'))
# filter boxes
# filter_corrupted_imgs()
# corrupted files: 1592.jpg 1722.jpg 4616.jpg 4617.jpg
del self.img_meta[1591], self.img_meta[1720]
del self.img_meta[4613], self.img_meta[4613]
self.imgs_path = []
for meta in self.img_meta:
fn = '{}.jpg'.format(meta['image_id'])
img_path = osp.join(self.img_dir, fn)
if osp.exists(img_path):
self.imgs_path.append(img_path)
self.imgs_path = np.array(self.imgs_path)
self.img_meta = np.array(self.img_meta)
assert len(self.imgs_path) == 108073
# there are 108073 images now
# load statistics
self.idx_to_labels = dict(map(lambda x: (int(x[0]), x[1]), self.vg_dicts['idx_to_label'].items()))
self.idx_to_labels.update({0: 'background'})
self.idx_to_predicates = dict(map(lambda x: (int(x[0]), x[1]), self.vg_dicts['idx_to_predicate'].items()))
self.idx_to_predicates.update({0: '__irrelevant__'})
self.nr_predicates = len(self.idx_to_predicates)
self._classes = tuple(self.idx_to_labels.values())
# shape : (NumOfImages, )
self.img_to_first_box = self.vg_h5['img_to_first_box'].value
self.img_to_last_box = self.vg_h5['img_to_first_box'].value
self.img_to_first_rel = self.vg_h5['img_to_first_rel'].value
self.img_to_last_rel = self.vg_h5['img_to_last_rel'].value
# shape: (NumOfBoxes, 4)
self.bboxes = self.vg_h5['boxes_%s' % cfg.BOX_SCALE_H5].value
# covert from xcenter, ycenter, w, h to x0, y0, x1, y1
self.bboxes[:, :2] = self.bboxes[:, :2] - np.floor(self.bboxes[:, 2:] / 2)
self.bboxes[:, 2:] += self.bboxes[:, :2] - 1
# shape: (NumOfBoxes, )
self.bbox_labels = self.vg_h5['labels'].value
# predicates, shape: (NumOfRelationships, )
self.predicates = self.vg_h5['predicates'].value
# box relationships, shape: (NumOfRelationships, 2)
# specify the ids of two boxes related to the relationship
# e.g. bbox_rels[0] is [boxid1, boxid2]
self.bbox_rels = self.vg_h5['relationships'].value
# split, shape (NumOfImages, )
self.split_indicator = self.vg_h5['split'].value
self.split_data(split)
self.filter_invalid_box()
# set imdb shit
self._image_index = np.arange(len(self.img_meta))
self._image_ext = '.jpg'
def image_id_at(self, idx):
"""
Args:
idx: integer, image index
"""
return self.img_meta[idx]['image_id']
def image_path_at(self, idx):
"""
Args:
idx: integer, image index
"""
return self.imgs_path[idx]
def split_data(self, split):
"""
Args:
split: integer, 0,1,2, train val test
"""
split_mask = self.split_indicator == split
self._filter(split_mask)
def filter_invalid_box(self):
"""
delelte those image without boxes
"""
valid_mask = self.img_to_first_box >= 0
assert np.all(
valid_mask == (self.img_to_last_box >= 0)
)
self._filter(valid_mask)
def _filter(self, mask):
"""
Args:
mask: numpy array of boolean
"""
self.img_to_first_box = self.img_to_first_box[mask]
self.img_to_last_box = self.img_to_last_box[mask]
self.img_to_first_rel = self.img_to_first_rel[mask]
self.img_to_last_rel = self.img_to_last_rel[mask]
self.imgs_path = self.imgs_path[mask]
self.img_meta = self.img_meta[mask]
def gt_roidb(self):
cache_path = osp.join(self.cache_dir, '%s_roidb.pkl' % self._name)
if osp.exists(cache_path):
print('load roidb from cache pickle file')
with open(cache_path, 'rb') as f:
roidb = pickle.load(f)
return roidb
roidb = [self._load_vg_anno(i) for i in self.image_index]
with open(cache_path, 'wb') as f:
pickle.dump(roidb, f)
return roidb
@staticmethod
def get_size_after_resizing(self, height, width, scale):
if height > width:
return int(scale), int(width / height * scale)
else:
return int(height / width * scale), int(scale)
def _load_vg_anno(self, idx):
"""
load visual genome annotations of image with index `idx`
you should know the difference between image index and image id
image id is in annotation file, image index is the index of img_meta
Args:
idx: integer, index of image
"""
idx_roidb = {}
# image annotations
height, width = self.img_meta[idx]['height'], self.img_meta[idx]['width']
img_scales = max(height, width) / cfg.BOX_SCALE_H5
# bounding boxes annotations
bboxes = self.bboxes[self.img_to_first_box[idx]: self.img_to_last_box[idx] + 1, :]
# bboxes was in cfg.BBOX_SCALE_H5, supposed 1024
# original image max size: max(h, w)
# original bboxes = bboxes in 1024-size image * max(h,w)/ 1024
bboxes = bboxes * img_scales
bboxes = bboxes.astype('int32')
bbox_labels = self.bbox_labels[self.img_to_first_box[idx]: self.img_to_last_box[idx] + 1]
overlaps = np.zeros((bboxes.shape[0], self.num_classes))
for ci, o in enumerate(overlaps):
o[bbox_labels[ci]] = 1.
overlaps = scipy.sparse.csr_matrix(overlaps)
seg_areas = np.multiply(bboxes[:, 2] - bboxes[:, 0] + 1,
bboxes[:, 3] - bboxes[:, 1] + 1)
# relation annotations
rels = []
first_rel_idx = self.img_to_first_rel[idx]
last_rel_idx = self.img_to_last_rel[idx]
if first_rel_idx >= 0:
assert last_rel_idx >= 0
predicates = self.predicates[first_rel_idx: last_rel_idx + 1]
bbox_rels = self.bbox_rels[first_rel_idx: last_rel_idx + 1]
# img_to_first_box validness has been checked
bbox_rels -= self.img_to_first_box[idx]
assert bbox_rels.shape[0] == predicates.shape[0]
for ri, predicate in enumerate(predicates):
rels.append([bbox_rels[ri][0], predicate, bbox_rels[ri][1]])
rels = np.array(rels)
idx_roidb.update(
{
'boxes': bboxes,
'gt_classes': bbox_labels,
'gt_rels': rels,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'flipped': False,
'width': width,
'height': height
}
)
return idx_roidb
if __name__ == '__main__':
fuck = vg_sggimp(0)
embed(header='myvg.py in lib/datasets')
| 36.579186
| 114
| 0.588323
| 1,102
| 8,084
| 4.072595
| 0.211434
| 0.057709
| 0.042112
| 0.034314
| 0.235963
| 0.18672
| 0.098039
| 0.07041
| 0.07041
| 0.07041
| 0
| 0.022312
| 0.295893
| 8,084
| 220
| 115
| 36.745455
| 0.766163
| 0.169594
| 0
| 0.014925
| 0
| 0
| 0.065671
| 0
| 0
| 0
| 0
| 0.004545
| 0.029851
| 1
| 0.067164
| false
| 0
| 0.08209
| 0
| 0.208955
| 0.007463
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c279e40781e717d53e7e9d1d3467cb0c61eb0740
| 6,213
|
py
|
Python
|
src/rfid/__init__.py
|
whaleygeek/SL030
|
ff96337cd1619b4a5bd8097a5d5dd0455d2e1674
|
[
"MIT"
] | null | null | null |
src/rfid/__init__.py
|
whaleygeek/SL030
|
ff96337cd1619b4a5bd8097a5d5dd0455d2e1674
|
[
"MIT"
] | 8
|
2020-11-14T11:01:38.000Z
|
2020-11-18T15:06:07.000Z
|
src/rfid/__init__.py
|
whaleygeek/SL030
|
ff96337cd1619b4a5bd8097a5d5dd0455d2e1674
|
[
"MIT"
] | 2
|
2020-07-23T14:41:31.000Z
|
2020-11-19T13:19:38.000Z
|
# SL030 RFID reader driver for skpang supplied SL030 Mifare reader
# (c) 2013-2014 Thinking Binaries Ltd, David Whale
#===============================================================================
# CONFIGURATION
#
# You can change these configuration items either by editing them in this
# file, or by refering to the module by name inside your own program.
# e.g.
# import rfid
# rfid.CFGEN_GPIO = False
# set to True to detect card presence by using GPIO
# set to False to detect card presence by reading card status
CFGEN_GPIO = True
# Set to the GPIO required to monitor the tag detect (OUT) line
CFG_TAG_DETECT = 4
# The I2C address of the SL030 RFID tag reader
CFG_ADDRESS = 0x50
# How often to poll (in seconds) for a tag present
CFG_TAG_PRESENT_POLL_TIME = 0.01
# How often to poll (in seconds) for a tag absent
CFG_TAG_ABSENT_POLL_TIME = 0.5
# Set to True to throw an exception when an error is printed
# Set to False to just print the error
CFGEN_EXCEPTIONS = True
# The function called when an error occurs in this module
# you can replace this with a function of your own to handle errors
def error(str):
print("ERROR:" + str)
if CFGEN_EXCEPTIONS:
raise ValueError(str)
#===============================================================================
# SETUP
try:
import ci2c # python2
except ImportError:
from . import ci2c # python3
import time
CMD_SELECT_MIFARE = 0x01
CMD_GET_FIRMWARE = 0xF0
WR_RD_DELAY = 0.05
ci2c.initDefaults()
#===============================================================================
# UTILITIES
def typename(type):
if (type == 0x01):
return "mifare 1k, 4byte UID"
elif (type == 0x02):
return "mifare 1k, 7byte UID"
elif (type == 0x03):
return "mifare UltraLight, 7 byte UID"
elif (type == 0x04):
return "mifare 4k, 4 byte UID"
elif (type == 0x05):
return "mifare 4k, 7 byte UID"
elif (type == 0x06):
return "mifare DesFilre, 7 byte UID"
elif (type == 0x0A):
return "other"
else:
return "unknown:" + str(type)
#===============================================================================
# class-based interface.
# If for some reason you had multiple SL030's with different addresses,
# you could use this to have multiple instances. It's not really written
# that way yet as CFG_ADDRESS is global, but it's easy to change if you
# did want more than one reader, or if you wanted different types of readers
# that implemented this same interface and were interchangeable at product
# install time.
# The gpio parameter in __init__ can be used to provide an alternative GPIO
# implementation or to share an application wide GPIO object.
class SL030:
def __init__(self, gpio=None):
self.type = None
self.uid = None
self.GPIO = gpio
if CFGEN_GPIO:
if gpio == None:
# use default RPi.GPIO, if nothing else provided
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
self.GPIO = GPIO
self.GPIO.setup(CFG_TAG_DETECT, GPIO.IN)
def tagIsPresent(self):
if CFGEN_GPIO:
return self.GPIO.input(CFG_TAG_DETECT) == False
else:
return self.select_mifare()
def waitTag(self):
while not self.tagIsPresent():
time.sleep(CFG_TAG_PRESENT_POLL_TIME)
def waitNoTag(self):
while self.tagIsPresent():
time.sleep(CFG_TAG_ABSENT_POLL_TIME)
def validateVer(self, ver):
first = ver[0]
if first != ord('S'):
if first == ord('S') + 0x80:
error("validateVer:Corruption from device detected")
else:
error("validateVer:unrecognised device")
def tostr(self, ver):
verstr = ""
for b in ver:
verstr += chr(b)
return verstr
def getFirmware(self):
# Tx ADDRESS, 1, CMD_GET_FIRMWARE
result = ci2c.write(CFG_ADDRESS, [1, CMD_GET_FIRMWARE])
time.sleep(WR_RD_DELAY)
if result != 0:
error("getFirmware:Cannot read, result=" + str(result))
return None
result, buf = ci2c.read(CFG_ADDRESS, 15)
if result != 0:
error("getFirmware:Cannot write, result=" + str(result))
return None
ver = buf[3:]
self.validateVer(ver)
return self.tostr(ver)
def readMifare(self):
result = ci2c.write(CFG_ADDRESS, [1, CMD_SELECT_MIFARE])
time.sleep(WR_RD_DELAY)
if result != 0:
error("readMifare:Cannot read, result=" + str(result))
return False
result, buf = ci2c.read(CFG_ADDRESS, 15)
if result != 0:
error("readMifare:Cannot write, result=" + str(result))
return False
length = buf[0]
cmd = buf[1]
status = buf[2]
if (status != 0x00):
self.uid = None
self.type = None
return False
# uid length varies on type, and type is after uuid
uid = buf[3:length]
type = buf[length]
self.type = type
self.uid = uid
return True
def getUID(self):
return self.uid
def getUniqueId(self):
uidstr = ""
for b in self.uid:
uidstr += "%02X" % b
return uidstr
def getType(self):
return self.type
#===============================================================================
# class-less interface
#
# Useful if you want kids to use the interface and don't want the complexity
# of classes. It also allows us to hide some of the more complex functions
# and provide simpler documentation strings
instance = SL030()
def tagIsPresent():
"""Check if there is a tag present or not"""
return instance.tagIsPresent()
def waitTag():
"""Wait until a tag is present"""
instance.waitTag()
def waitNoTag():
"""Wait until there is no longer a tag present"""
instance.waitNoTag()
def readMifare():
"""Try to read this as a mifare tag. Returns False if not a mifare"""
return instance.readMifare()
def getUID():
"""Get the unique ID number of the card"""
return instance.getUID()
def getUniqueId():
"""Get the unique ID number of the card as a printable string"""
return instance.getUniqueId()
def getType():
"""Get the type number of the card"""
return instance.getType()
def getTypeName():
"""Get a string representing the name of the type of card in use"""
return typename(instance.getType())
# END
| 26.21519
| 80
| 0.63013
| 859
| 6,213
| 4.488941
| 0.317811
| 0.010892
| 0.017116
| 0.01556
| 0.210581
| 0.155342
| 0.084544
| 0.069502
| 0.054461
| 0.022303
| 0
| 0.022273
| 0.21954
| 6,213
| 236
| 81
| 26.326271
| 0.772943
| 0.404474
| 0
| 0.167939
| 0
| 0
| 0.100996
| 0.012728
| 0
| 0
| 0.013282
| 0
| 0
| 1
| 0.160305
| false
| 0
| 0.038168
| 0.015267
| 0.412214
| 0.007634
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c27a8632891f52402501c008dbb668b6e46297a0
| 3,821
|
py
|
Python
|
pyjapt/lexing.py
|
alejandroklever/pyjapt
|
21b11fd4b5b21cabcc59673538c473e33af9e646
|
[
"MIT"
] | 8
|
2020-07-23T06:19:28.000Z
|
2021-11-06T04:26:47.000Z
|
pyjapt/lexing.py
|
alejandroklever/PyJapt
|
21b11fd4b5b21cabcc59673538c473e33af9e646
|
[
"MIT"
] | null | null | null |
pyjapt/lexing.py
|
alejandroklever/PyJapt
|
21b11fd4b5b21cabcc59673538c473e33af9e646
|
[
"MIT"
] | null | null | null |
import re
from typing import List, Any, Generator, Tuple, Pattern, Optional, Callable, Dict
class Token:
"""
A Token class.
Parameters
----------
lex: str
Token's lexeme.
token_type: Enum
Token's type.
"""
def __init__(self, lex, token_type, line=0, column=0):
"""
:param lex: str
:param token_type: Enum
:param line: int
:param column: int
"""
self.lex: str = lex
self.token_type: Any = token_type
self.line: int = line
self.column: int = column
def __str__(self):
return f'{self.token_type}: {self.lex}'
def __repr__(self):
return str(self)
@property
def is_valid(self):
return True
class Lexer:
def __init__(self, table: List[Tuple[str, str]], eof: str,
token_rules: Optional[Dict[str, Callable[['Lexer'], Optional[Token]]]] = None,
error_handler: Optional[Callable[['Lexer'], None]] = None):
if token_rules is None:
token_rules = {}
if error_handler is None:
error_handler = self.error
self.lineno: int = 1 # Current line number
self.column: int = 1 # Current column in the line
self.position: int = 0 # Current position in recognition
self.text = '' # current text
self.token: Token = Token('', '', 0, 0) # Current token in recognition
self.pattern: Pattern = self._build_regex(table)
self.token_rules = token_rules # type: Dict[str, Callable[['Lexer'], Optional[Token]]]
self.contain_errors: bool = False
self.error_handler = error_handler if error_handler is not None else self.error
self.eof: str = eof
self._errors: List[Tuple[int, int, str]] = []
def tokenize(self, text: str) -> Generator[Token, None, None]:
self.text = text
while self.position < len(text):
match = self.pattern.match(text, pos=self.position)
if match is None:
self.contain_errors = True
self.token = Token(text[self.position], None, self.lineno, self.column)
self.error_handler(self)
continue
lexeme = match.group()
token_type = match.lastgroup if match.lastgroup is not None else match.group()
self.token = Token(lexeme, token_type, self.lineno, self.column)
if token_type in self.token_rules:
token = self.token_rules[token_type](self)
if token is not None and isinstance(token, Token):
yield token
continue
yield self.token
self.position = match.end()
self.column += len(match.group())
yield Token('$', self.eof, self.lineno, self.column)
@property
def errors(self, clean: bool = True):
return [(m if clean else (r, c, m)) for r, c, m in sorted(self._errors)]
def add_error(self, line: int, col: int, error_msg: str):
self._errors.append((line, col, error_msg))
@staticmethod
def error(lexer: 'Lexer') -> None:
lexer.add_error(
lexer.token.line,
lexer.token.column,
f'Tokenization error: unexpected symbol "{lexer.token.lex}" '
f'at line "{lexer.token.line}" and column "{lexer.token.column}"'
)
lexer.position += len(lexer.token.lex)
lexer.column += len(lexer.token.lex)
@staticmethod
def _build_regex(table: List[Tuple[str, str]]) -> Pattern:
return re.compile('|'.join(
[('(?P<%s>%s)' % (name, regex) if name is not None else '(%s)' % regex) for name, regex in table]))
def __call__(self, text: str) -> List[Token]:
return list(self.tokenize(text))
| 33.517544
| 111
| 0.576289
| 478
| 3,821
| 4.485356
| 0.196653
| 0.041978
| 0.024254
| 0.026586
| 0.04944
| 0.030784
| 0
| 0
| 0
| 0
| 0
| 0.002631
| 0.303585
| 3,821
| 113
| 112
| 33.814159
| 0.803082
| 0.092384
| 0
| 0.078947
| 0
| 0
| 0.053318
| 0.006517
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144737
| false
| 0
| 0.026316
| 0.078947
| 0.276316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c27b06a45e2113932d5e033fa31486f2b933313d
| 3,975
|
py
|
Python
|
src/jobs/management/commands/try_retrain.py
|
fleur101/predict-python
|
d40c876d919232bbb77904e050b182c875bc36fa
|
[
"MIT"
] | 12
|
2018-06-27T08:09:18.000Z
|
2021-10-10T22:19:04.000Z
|
src/jobs/management/commands/try_retrain.py
|
fleur101/predict-python
|
d40c876d919232bbb77904e050b182c875bc36fa
|
[
"MIT"
] | 17
|
2018-06-12T17:36:11.000Z
|
2020-11-16T21:23:22.000Z
|
src/jobs/management/commands/try_retrain.py
|
fleur101/predict-python
|
d40c876d919232bbb77904e050b182c875bc36fa
|
[
"MIT"
] | 16
|
2018-08-02T14:40:17.000Z
|
2021-11-12T12:28:46.000Z
|
import random
from django.core.management.base import BaseCommand
from pandas import Series
from src.cache.cache import put_labelled_logs
from src.core.core import get_encoded_logs
from src.jobs.models import Job
from src.jobs.tasks import prediction_task
from src.runtime.tasks import create_prediction_job
from src.utils.django_orm import duplicate_orm_row
class Command(BaseCommand):
help = 'tries to deliver an explanation of a random prediction of the trained model'
def handle(self, *args, **kwargs):
TARGET_JOB = 439
initial_job_obj = Job.objects.filter(pk=TARGET_JOB)[0]
# todo: return performances
print('Initial Job:', initial_job_obj.evaluation.classificationmetrics) # TODO future bug
training_df_old, test_df_old = get_encoded_logs(initial_job_obj)
training_df = training_df_old.copy()
test_df = test_df_old.copy()
# todo: what should I randomise?
TARGETS = [
[('prefix_1', 2)], # <- simple pattern
[('prefix_2', 3)], # <- simple pattern
[('prefix_3', 2),
('prefix_4', 3),] # <- complex pattern
]
for target in TARGETS:
if len(target) == 1:
target = target[0]
for df in [training_df, test_df]:
m_col = df[target[0]]
del df[target[0]]
target_values1 = list(set(m_col.values))
df[target[0]] = m_col.apply(
lambda x:
x if (x != target[1])
else random.choice(target_values1)
)
elif len(target) > 1:
for df in [training_df, test_df]:
m_col = df[[column for column, _ in target]]
possible_values = {}
for column, _ in target:
possible_values[column] = list(set(df[column]))
del df[column]
df[[column for column, _ in target]] = m_col.apply(
lambda x:
x if any([x[column] != value for column, value in target])
else Series({
column: random.choice(possible_values[column])
for column, value in target
}),
axis=1)
else:
raise Exception('target list with unexpected value')
assert not training_df.equals(training_df_old)
assert not test_df.equals(test_df_old)
# todo: save new dataset in memory and create split to use it
initial_split_obj = initial_job_obj.split
new_split = duplicate_orm_row(initial_split_obj)
train_log = duplicate_orm_row(new_split.train_log)
test_log = duplicate_orm_row(new_split.test_log)
# TODO future bug creates shadows
train_log.name = 'RETRAIN' + train_log.name
train_log.path = 'cache/log_cache/' + train_log.name
train_log.properties = {}
test_log.name = 'RETRAIN' + test_log.name
test_log.path = 'cache/log_cache/' + test_log.name
test_log.properties = {}
new_split.train_log = train_log
new_split.test_log = test_log
new_split.additional_columns = None
new_split.save()
prediction_job = create_prediction_job(initial_job_obj, initial_job_obj.encoding.prefix_length)
prediction_job.split = new_split
prediction_job.split.save()
prediction_job.save()
put_labelled_logs(prediction_job, training_df, test_df)
# todo: build model
prediction_task(prediction_job.id, do_publish_result=False)
prediction_job.refresh_from_db()
# todo: return performances
print('Retrain Job:', prediction_job.evaluation.classificationmetrics)
print('Done, cheers!')
| 38.592233
| 103
| 0.585409
| 477
| 3,975
| 4.622642
| 0.295597
| 0.058957
| 0.035374
| 0.021769
| 0.179138
| 0.109297
| 0.043537
| 0.026304
| 0.026304
| 0.026304
| 0
| 0.008277
| 0.331321
| 3,975
| 103
| 104
| 38.592233
| 0.821294
| 0.066164
| 0
| 0.051282
| 0
| 0
| 0.060238
| 0
| 0
| 0
| 0
| 0.009709
| 0.025641
| 1
| 0.012821
| false
| 0
| 0.115385
| 0
| 0.153846
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c27ba0e5861f097686336335cdd99739a27bfdc4
| 1,646
|
py
|
Python
|
flydra_camnode/flydra_camnode/camnode_utils.py
|
elhananby/flydra
|
09b86859b1863700cdea0bbcdd4758da6c83930b
|
[
"Apache-2.0",
"MIT"
] | 45
|
2017-08-25T06:46:56.000Z
|
2021-08-29T16:42:49.000Z
|
flydra_camnode/flydra_camnode/camnode_utils.py
|
elhananby/flydra
|
09b86859b1863700cdea0bbcdd4758da6c83930b
|
[
"Apache-2.0",
"MIT"
] | 7
|
2017-10-16T10:46:20.000Z
|
2020-12-03T16:42:55.000Z
|
flydra_camnode/flydra_camnode/camnode_utils.py
|
elhananby/flydra
|
09b86859b1863700cdea0bbcdd4758da6c83930b
|
[
"Apache-2.0",
"MIT"
] | 21
|
2018-04-11T09:06:40.000Z
|
2021-12-26T23:38:40.000Z
|
#emacs, this is -*-Python-*- mode
from __future__ import division
from __future__ import with_statement
import contextlib
import threading, Queue
class ChainLink(object):
"""essentially a linked list of threads"""
def __init__(self):
self._queue = Queue.Queue()
self._lock = threading.Lock()
# start: vars access controlled by self._lock
self._next = None
# end: vars access controlled by self._lock
def fire(self, buf):
"""fire a listener in new thread. Threadsafe"""
self._queue.put( buf )
def append_link(self, chain ):
if not isinstance(chain,ChainLink):
raise ValueError("%s is not instance of ChainLink"%(str(chain),))
with self._lock:
if self._next is None:
self._next = chain
return
else:
next = self._next
next.append_link( chain )
def get_buf(self,blocking=True):
"""called from client thread to get a buffer"""
if blocking:
return self._queue.get()
else:
return self._queue.get_nowait()
def end_buf(self,buf):
"""called from client thread to release a buffer"""
with self._lock:
next = self._next
if next is not None:
next.fire(buf)
else:
pool = buf.get_pool()
pool.return_buffer( buf )
@contextlib.contextmanager
def use_buffer_from_chain(link,blocking=True):
"""manage access to the buffer"""
buf = link.get_buf(blocking=blocking)
try:
yield buf
finally:
link.end_buf(buf)
| 27.433333
| 77
| 0.592345
| 202
| 1,646
| 4.633663
| 0.361386
| 0.042735
| 0.034188
| 0.047009
| 0.115385
| 0.064103
| 0
| 0
| 0
| 0
| 0
| 0
| 0.313487
| 1,646
| 59
| 78
| 27.898305
| 0.828319
| 0.191373
| 0
| 0.170732
| 0
| 0
| 0.023791
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.097561
| 0
| 0.341463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c27c8a4376df70bee9dccb2ba1429b510d6719df
| 3,439
|
py
|
Python
|
app.py
|
Nerolation/Ethereum-Notary-Service-Prototype
|
ea5487a29813caee1e4be9edac495d89010c593e
|
[
"MIT"
] | null | null | null |
app.py
|
Nerolation/Ethereum-Notary-Service-Prototype
|
ea5487a29813caee1e4be9edac495d89010c593e
|
[
"MIT"
] | null | null | null |
app.py
|
Nerolation/Ethereum-Notary-Service-Prototype
|
ea5487a29813caee1e4be9edac495d89010c593e
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, redirect, logging, make_response, json
from ethw3 import genkey, create_chain_data, verify_chain_data, create_acct, mine, history_slice
from utils_s3 import load_from_fetchlist
# Initialize flask an other global variables
app = Flask(__name__)
address, username, addr, priv, contVer, web3Ver = None,None,None,None,None,None
sig = []
txHash = []
status,status2 = 0,0
recordDict, matchedData = {}, {}
entryList = []
@app.route('/')
def render_index():
mine()
recordDict, history = load_from_fetchlist(history=True)
history = history_slice(history, 20)
global address, sig, txHash, username, status, contVer, web3Ver, addr, priv, status2, matchedData
_sig, _address, _txHash, _username, _status, _contVer, _web3Ver, _addr, _priv, _status2, _matchedData = sig, address, txHash, username, status, contVer, web3Ver, addr, priv, status2, matchedData
address, username, contVer, web3Ver, addr, priv = tuple([None]*6)
sig, txHash = [], []
status, status2 = 0,0
matchedData = {}
return render_template("index.html",
entryList = recordDict,
history = history,
txhash = _txHash,
address = _addr,
username = _username,
sig=_sig,
privkey=_priv,
showStatus = _status,
web3Ver = _web3Ver,
contVer = _contVer,
status2 = _status2,
matchedData = _matchedData)
@app.route('/submit', methods=['POST'])
def hash_to_chain():
global sig, txHash, username, address
sig, address, txHash, username = None,None,None,None
test = 'check' in request.form
fsFile = request.files["file"].read()
pkey = request.form.get("pkey")
username = request.form.get("name")
cookie_value = create_acct(pkey).address
if username == "":
username = "Anonymous"
username, sig, address, txHash = create_chain_data(fsFile, pkey, username, test)
resp = make_response(redirect('/', code=302))
resp.set_cookie("ID", cookie_value, max_age=60*1)
return resp
@app.route("/verify", methods=["POST"])
def verify_from_chain():
global status, contVer, web3Ver
status, contVer, web3Ver = None,None,None
test = 'check2' in request.form
print(test)
fsFile = request.files["file"].read()
sig = request.form.get("sig")
if fsFile == b'':
web3Ver = "nofile"
status = 0
return redirect("/", code=302)
status, contVer, web3Ver = verify_chain_data(fsFile, sig, test)
return redirect("/", code=302)
@app.route('/generate', methods=['POST', "GET"])
def generatekeypair():
global addr, priv
addr, priv = genkey()
return redirect("/", code=302)
@app.route("/whosigned", methods=["POST"])
def direct_verify():
global status2, contVer, matchedData
status, contVer = None,None
test = 'check3' in request.form
sig = request.form.get("sig")
status2, contVer, matchedData= verify_chain_data(sig=sig, test=test)
return redirect("/", code=302)
if __name__ == '__main__':
app.run(debug=True)
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
| 37.791209
| 198
| 0.628962
| 385
| 3,439
| 5.438961
| 0.272727
| 0.042025
| 0.040115
| 0.030564
| 0.234479
| 0.146132
| 0.095511
| 0.095511
| 0.095511
| 0.095511
| 0
| 0.01935
| 0.248619
| 3,439
| 90
| 199
| 38.211111
| 0.791022
| 0.012213
| 0
| 0.123457
| 0
| 0
| 0.043004
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0
| 0.037037
| 0
| 0.17284
| 0.012346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c27d5e7133ef2989b1ce8e0b881cabce3f8f0dab
| 8,800
|
py
|
Python
|
python/dsbox/planner/common/library.py
|
RqS/dsbox-ta2
|
43800d4365a154684fa5b9551c2c1cd21ec7139c
|
[
"MIT"
] | null | null | null |
python/dsbox/planner/common/library.py
|
RqS/dsbox-ta2
|
43800d4365a154684fa5b9551c2c1cd21ec7139c
|
[
"MIT"
] | null | null | null |
python/dsbox/planner/common/library.py
|
RqS/dsbox-ta2
|
43800d4365a154684fa5b9551c2c1cd21ec7139c
|
[
"MIT"
] | null | null | null |
import json
import os
from datetime import date
from typing import List, Dict
from d3m_metadata.metadata import PrimitiveMetadata, PrimitiveFamily, PrimitiveAlgorithmType
from d3m import index
from dsbox.planner.common.primitive import Primitive
from dsbox.schema.profile_schema import DataProfileType as dpt
from collections import defaultdict
class D3MPrimitiveLibrary(object):
'''Creates a primitive library based on primitives_repo or d3m.index'''
def __init__(self):
self.api_version = ''
# List of all primitives, except those in black list
self.primitives : List[Primitive] = []
# List of black listed primitives, e.g. pickling problems
self.black_list_package : List[str] = []
self.primitive_by_package : Dict[str, Primitive] = {}
self.primitives_by_family : Dict[PrimitiveFamily, List[Primitive]] = defaultdict(list)
self.primitives_by_type : Dict[PrimitiveAlgorithmType, List[Primitive]] = defaultdict(list)
def has_api_version(self, primitives_repo_dir, api_version):
return api_version in os.listdir(primitives_repo_dir)
def load_from_directory(self, primitives_repo_dir, api_version=''):
'''Load primitive description from filesystem.
E.g. from repo https://gitlab.datadrivendiscovery.org/jpl/primitives_repo'''
# Use fully for debugging
listing = os.listdir(primitives_repo_dir)
if api_version:
if not api_version in listing:
raise ValueError('API version {} not found')
else:
date_str = [x[1:] for x in listing if x.startswith('v')]
if not date_str:
raise ValueError('No API version found under {}'.format(primitives_repo_dir))
dates = [date(*(map(int, x.split('.')))) for x in date_str]
vdate = sorted(dates)[-1]
api_version = 'v{}.{}.{}'.format(vdate.year, vdate.month, vdate.day)
self.api_version = api_version
api_dir = os.path.join(primitives_repo_dir, self.api_version)
for team in os.listdir(api_dir):
team_dir = os.path.join(api_dir, team)
for module in os.listdir(team_dir):
module_dir = os.path.join(team_dir, module)
version = self._get_latest_version(os.listdir(module_dir))
primitive_file = os.path.join(module_dir, version, 'primitive.json')
with open(primitive_file) as fp:
d3m_metadata = PrimitiveMetadata(json.load(fp))
primitive = self._create_primitive_desc(d3m_metadata)
if primitive.cls in self.black_list_package:
print('Black listing primitive: {}'.format(primitive.name))
else:
self.primitives.append(primitive)
self._setup()
def load_from_d3m_index(self):
'''Load primitive description from installed python packages'''
for primitive_path, primitive_type in index.search().items():
primitive = self._create_primitive_desc(primitive_type.metadata)
if primitive.cls in self.black_list_package:
print('Black listing primitive: {}'.format(primitive.name))
else:
self.primitives.append(primitive)
self._setup()
def get_primitives_by_family(self, family : PrimitiveFamily) -> List[Primitive]:
return self.primitives_by_family[family]
def has_primitive_by_package(self, path):
return path in self.primitive_by_package
def get_primitive_by_package(self, path):
return self.primitive_by_package[path]
def augment_with_primitive_profiler(self, profiler_json_file):
'''Augment primitive with its requirements using Daniel's primitive profiler output'''
with open(profiler_json_file) as fp:
primitive_profiles = json.load(fp)
for package, profile in primitive_profiles.items():
if not self.has_primitive_by_package(package):
print('Cannot find class: {}'.format(package))
continue
primitive = self.get_primitive_by_package(package)
if 'Requirements' in profile:
# Note: Cannot use {PrimitivePrecodition[x] : True for x in ...}, because extra "POSTIVE_VALUES"
primitive.addPrecondition({x : True
for x in profile['Requirements']})
if 'Error' in profile:
primitive.addErrorCondition({x:True for x in profile['Error']})
def add_custom_primitive(self, class_str):
mod, cls = class_str.rsplit('.', 1)
try:
import importlib
module = importlib.import_module(mod)
primitive_type = getattr(module, cls)
primitive = self._create_primitive_desc(primitive_type.metadata)
# Modify to actual python path
primitive.cls = class_str
self.primitives.append(primitive)
self.primitive_by_package[class_str] = primitive
return primitive
except Exception as e:
print('Failed to add primitive: {}'.format(e))
return None
def _get_latest_version(self, versions : List[str]):
version_tuples = [v.split('.') if not v.startswith('v') else v[1:].split('.') for v in versions]
version_tuples = list(map(lambda x : list(map(int, x)), version_tuples))
latest_tuple = sorted(version_tuples)[-1]
index = version_tuples.index(latest_tuple)
return versions[index]
def _create_primitive_desc(self, d3m : PrimitiveMetadata):
primitive = Primitive(d3m.query()['id'], d3m.query()['name'], d3m.query()['python_path'])
primitive.d3m_metadata = d3m
return primitive
def load_black_list(self, jsonfile):
"""Black list primitives that do not work properly"""
with open(jsonfile) as json_data:
black_list = json.load(json_data)
names = []
for pdict in black_list:
# pid = pdict['Id']
name = pdict["Name"]
cls = pdict["Class"]
self.black_list_package.append(cls)
names.append(name)
print('Primitives to black list: {}'.format(names))
def is_black_listed(self, cls):
return cls in self.black_list_package
def _setup(self):
for p in self.primitives:
self.primitive_by_package[p.cls] = p
self.primitives_by_family[p.getFamily()].append(p)
types = p.getAlgorithmTypes()
for entry in types:
if isinstance(types[0], str):
self.primitives_by_type[entry].append(p)
else:
self.primitives_by_type[entry.value].append(p)
class PrimitiveLibrary(object):
"""
Creates a Library of Primitives given the location of a library json
"""
def __init__(self, location):
self.primitives = []
self.json = self.loadjson(location)
for p in self.json:
prim = Primitive(p['Id'], p['Name'], p['Class'])
for precstr in p.get('Requirements', []):
prec = self.parseProfile(precstr)
if prec:
prim.addPrecondition(prec)
for effectstr in p.get('Effects', []):
effect = self.parseProfile(effectstr)
if effect:
prim.addEffect(effect)
prim.type = p.get('LearningType', None)
prim.task = p.get('Task', None)
prim.column_primitive = p.get('RequiresColumnData', False)
prim.is_persistent = (prim.task == "Modeling") or (not p.get('NotPersistent', False))
prim.unified_interface = p.get('UnifiedInterface', False)
prim.init_args = p.get('InitArguments', [])
prim.init_kwargs = p.get('InitKeywordArguments', {})
self.primitives.append(prim)
def parseProfile(self, profile):
value = True
if profile.startswith('!'):
value = False
profile = profile[1:]
if hasattr(dpt, profile):
return {getattr(dpt, profile): value}
return None
def loadjson(self, jsonfile):
with open(jsonfile) as json_data:
d = json.load(json_data)
json_data.close()
return d
def getPrimitivesByEffect(self, effect, value):
plist = [];
for primitive in self.primitives:
if (primitive.preconditions.get(effect, False) != value and
primitive.effects.get(effect, False) == value):
plist.append(primitive)
return plist
| 42.307692
| 112
| 0.611932
| 1,009
| 8,800
| 5.166501
| 0.212091
| 0.04297
| 0.031076
| 0.019183
| 0.150201
| 0.116056
| 0.070209
| 0.070209
| 0.05141
| 0.05141
| 0
| 0.003197
| 0.289091
| 8,800
| 207
| 113
| 42.512077
| 0.830083
| 0.081023
| 0
| 0.12963
| 0
| 0
| 0.050672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.067901
| 0.030864
| 0.271605
| 0.030864
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c27e29fee5d31c11bd5413203986db3871f9139b
| 3,610
|
py
|
Python
|
gmsh_cad/emi_system_gap.py
|
MiroK/emi-cylinders
|
ccbbfa51003fc4fe8abc257dee916e229398c520
|
[
"MIT"
] | null | null | null |
gmsh_cad/emi_system_gap.py
|
MiroK/emi-cylinders
|
ccbbfa51003fc4fe8abc257dee916e229398c520
|
[
"MIT"
] | null | null | null |
gmsh_cad/emi_system_gap.py
|
MiroK/emi-cylinders
|
ccbbfa51003fc4fe8abc257dee916e229398c520
|
[
"MIT"
] | 1
|
2018-05-30T14:26:59.000Z
|
2018-05-30T14:26:59.000Z
|
from dolfin import *
parameters['form_compiler']['representation'] = 'uflacs'
parameters['form_compiler']['cpp_optimize'] = True
parameters['form_compiler']['cpp_optimize_flags'] = '-O3 -ffast-math -march=native'
parameters['ghost_mode'] = 'shared_facet'
mesh_file = 'cell_grid.h5'
comm = mpi_comm_world()
h5 = HDF5File(comm, mesh_file, 'r')
mesh = Mesh()
h5.read(mesh, 'mesh', False)
# The mesh comes in micro meters. Below it is more convenient to work in cm
mesh.coordinates()[:] *= 1E-4
# Facets in the mesh have tags 0, 1, 2, 3 One is for interfaces between
# cells and exterior, 3 are cell-cell interfaces. Two is used for marking
# boundary facets of the domain - this is where typically zero DirichletBCs
# are applied for the potential
surfaces = MeshFunction('size_t', mesh, mesh.topology().dim()-1)
h5.read(surfaces, 'facet')
# The domain is split into 2 subdomains marked as 1 and 2 (cell interior,
# cell exterior). These differ by conductivities
volumes = MeshFunction('size_t', mesh, mesh.topology().dim())
h5.read(volumes, 'physical')
cell = mesh.ufl_cell()
# We have 3 spaces S for sigma = -kappa*grad(u) [~electric field]
# U for potential u
# Q for transmebrane potential p
Sel = FiniteElement('RT', cell, 1)
Vel = FiniteElement('DG', cell, 0)
Qel = FiniteElement('Discontinuous Lagrange Trace', cell, 0)
W = FunctionSpace(mesh, MixedElement([Sel, Vel, Qel]))
sigma, u, p = TrialFunctions(W)
tau, v, q = TestFunctions(W)
# Grounding for potential
bcs = [DirichletBC(W.sub(2), Constant(0), surfaces, 2)]
# Make measures aware of subdomains
dx = Measure('dx', domain=mesh, subdomain_data=volumes)
dS = Measure('dS', domain=mesh, subdomain_data=surfaces)
ds = Measure('ds', domain=mesh, subdomain_data=surfaces)
# Normal fo the INTERIOR surface. Note that 1, 2 marking of volume makes
# 2 cells the '+' cells w.r.t to surface and n('+') would therefore be their
# outer normal (that is an outer normal of the outside). ('-') makes the orientation
# right
n = FacetNormal(mesh)('-')
# Now onto the weak form
# Electric properties of membrane and interior/exterior
C_m = Constant(1) # 1 mu F / cm^2 @ 1
C_mcc = Constant(1.1) # @ 3
cond_int = Constant(5) # 5 mS / cm
cond_ext = Constant(20) # 20 mS / cm
# Time step
dt_fem = Constant(1E-3) # ms
# The source term as a function Q is coming from ODE solver. Here it is
# just some random function
Q = FunctionSpace(mesh, Qel)
p0 = interpolate(Constant(1), Q)
# And additional source on the boundary is the ionic current. For simplicity
I_ion = p0
# The source term for cell-cell interface
I_gap = 2*p0
# The system
a = ((1/cond_int)*inner(sigma, tau)*dx(1)+(1/cond_ext)*inner(sigma, tau)*dx(2)
- inner(div(tau), u)*dx(1) - inner(div(tau), u)*dx(2)
+ inner(p('+'), dot(tau('+'), n))*dS(1)
+ inner(p('+'), dot(tau('+'), n))*dS(3)
- inner(div(sigma), v)*dx(1) - inner(div(sigma), v)*dx(2)
+ inner(q('+'), dot(sigma('+'), n))*dS(1)
+ inner(q('+'), dot(sigma('+'), n))*dS(3)
- (C_m/dt_fem)*inner(q('+'), p('+'))*dS(1)
- (C_mcc/dt_fem)*inner(q('+'), p('+'))*dS(3))
L = (inner(q('+'), I_ion('+')-(C_m/dt_fem)*p0('+'))*dS(1)
+ inner(q('+'), I_gap('+')-(C_mcc/dt_fem)*p0('+'))*dS(3))
# Additional terms to set to zero the dofs of W.sub(2) which are not on
# the interfaces
a -= inner(p('+'), q('+'))*dS(0) + inner(p, q)*ds(2)
L -= inner(Constant(0)('+'), q('+'))*dS(0) + inner(Constant(0), q)*ds(2)
A, b = PETScMatrix(), PETScVector()
assemble_system(a, L, bcs, A_tensor=A, b_tensor=b)
info("size(A) = %d" % A.size(0))
| 38.404255
| 84
| 0.650139
| 585
| 3,610
| 3.94188
| 0.365812
| 0.015611
| 0.028621
| 0.029922
| 0.17693
| 0.107546
| 0.06765
| 0.036427
| 0
| 0
| 0
| 0.024757
| 0.172022
| 3,610
| 93
| 85
| 38.817204
| 0.746738
| 0.370637
| 0
| 0
| 0
| 0
| 0.113839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019231
| 0
| 0.019231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2818a738014513d0cd2309428321fbec20d821d
| 2,855
|
py
|
Python
|
commands/load_metadata/products.py
|
DataViva/dataviva-scripts
|
1e36f11e2849c33b8118cefe1755d312b19c0ecd
|
[
"MIT"
] | 10
|
2015-05-20T14:41:23.000Z
|
2020-05-27T22:36:19.000Z
|
commands/load_metadata/products.py
|
DataViva/dataviva-scripts
|
1e36f11e2849c33b8118cefe1755d312b19c0ecd
|
[
"MIT"
] | 11
|
2018-05-17T14:30:58.000Z
|
2018-09-06T21:20:34.000Z
|
commands/load_metadata/products.py
|
DataViva/dataviva-scripts
|
1e36f11e2849c33b8118cefe1755d312b19c0ecd
|
[
"MIT"
] | 12
|
2015-07-14T13:46:41.000Z
|
2019-09-20T00:47:10.000Z
|
import click
import pandas
import pickle
import json
from clients import s3, redis
@click.command()
@click.option('--both', 'upload', flag_value='s3_and_redis', default=True, help='Upload metadata to both s3 and Redis')
@click.option('--s3', 'upload', flag_value='only_s3', help='Upload metadata only to s3')
@click.option('--redis', 'upload', flag_value='only_redis', help='Upload metadata only to Redis')
def products(upload):
csv = s3.get('metadata/hs.csv')
df = pandas.read_csv(
csv,
sep=';',
header=0,
names=['id', 'name_pt', 'name_en', 'profundidade_id', 'profundidade'],
converters={
"id": str
}
)
products = {}
product_sections = {}
product_chapters = {}
for _, row in df.iterrows():
if row['profundidade'] == 'Seção':
product_section_id = row['id']
product_section = {
'id': product_section_id,
'name_pt': row["name_pt"],
'name_en': row["name_en"],
}
if upload != 'only_s3':
redis.set('product_section/' + str(product_section_id),
json.dumps(product_section, ensure_ascii=False))
product_sections[product_section_id] = product_section
elif row['profundidade'] == 'Capítulo':
product_chapter_id = row['id'][2:]
product_chapter = {
'id': product_chapter_id,
'name_pt': row["name_pt"],
'name_en': row["name_en"],
}
if upload != 'only_s3':
redis.set('product_chapter/' + str(product_chapter_id),
json.dumps(product_chapter, ensure_ascii=False))
product_chapters[product_chapter_id] = product_chapter
for _, row in df.iterrows():
if row['profundidade'] == 'Posição':
product_id = row['id'][2:]
product_section_id = row["id"][:2]
product_chapter_id = row["id"][2:4]
product = {
'id': row['id'][2:],
'name_pt': row["name_pt"],
'name_en': row["name_en"],
'product_section': product_sections[product_section_id],
'product_chapter': product_chapters[product_chapter_id],
}
products[product_id] = product
if upload != 'only_s3':
redis.set('product/' + str(product_id), json.dumps(product, ensure_ascii=False))
if upload != 'only_redis':
s3.put('product.json', json.dumps(products, ensure_ascii=False))
s3.put('product_section.json', json.dumps(
product_sections, ensure_ascii=False))
s3.put('product_chapter.json', json.dumps(
product_chapters, ensure_ascii=False))
click.echo("Products loaded.")
| 33.588235
| 119
| 0.561121
| 322
| 2,855
| 4.720497
| 0.198758
| 0.110526
| 0.073684
| 0.026316
| 0.454605
| 0.296711
| 0.196711
| 0.146053
| 0.1
| 0.1
| 0
| 0.01
| 0.299475
| 2,855
| 84
| 120
| 33.988095
| 0.75
| 0
| 0
| 0.161765
| 0
| 0
| 0.190193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.073529
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c282ea17115bac2dbe64555ce16709d1698a3231
| 5,158
|
py
|
Python
|
docker/entrypoint_benchmark.py
|
augustoproiete-forks/OasisLMF--OasisLMF
|
560749e9dd7d8bd84307cd2767517b3e1d3a1c01
|
[
"BSD-3-Clause"
] | 88
|
2018-03-24T11:57:10.000Z
|
2022-03-21T13:04:41.000Z
|
docker/entrypoint_benchmark.py
|
augustoproiete-forks/OasisLMF--OasisLMF
|
560749e9dd7d8bd84307cd2767517b3e1d3a1c01
|
[
"BSD-3-Clause"
] | 558
|
2018-03-14T14:16:30.000Z
|
2022-03-29T12:48:14.000Z
|
docker/entrypoint_benchmark.py
|
augustoproiete-forks/OasisLMF--OasisLMF
|
560749e9dd7d8bd84307cd2767517b3e1d3a1c01
|
[
"BSD-3-Clause"
] | 41
|
2018-04-09T11:13:12.000Z
|
2021-10-05T14:43:11.000Z
|
#!/usr/bin/env python3
import argparse
import os
import io
import subprocess
import sys
from tabulate import tabulate
def parse_args():
desc = (
'Performance testing script for OasisLMF input file generation'
'This script expects a set of nested sub directories each containing'
'acc.csv, loc.csv, keys.csv'
)
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-t', '--time-threshold', default=300, type=int, help='Maximum time for each file generation test')
parser.add_argument('-d', '--test-directory', default='.', type=str, help='File path of the test data directory')
parser.add_argument('-o', '--output-directory', default='/tmp/oasis-files', type=str, help='Filepath to generate oasisfiles in')
parser.add_argument('-l', '--log-output', default='/var/report/oasisfiles_benchmark.log', type=str, help='Log file path')
parser.add_argument('-a', '--extra-oasislmf-args', default='', type=str, help='Addtional Aguments to run Oasislmf with')
return vars(parser.parse_args())
def run_command(cmd_str):
resp = subprocess.run(cmd_str.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=False)
stdout = resp.stdout.decode('utf-8').strip()
print(stdout)
resp.check_returncode()
return stdout
def pasrse_gen_output(stdout_string):
runtime_list = [l for l in stdout_string.split('\n') if 'COMPLETED' in l]
t_breakdown = dict()
total = runtime_list.pop().rsplit(' ').pop()
t_breakdown['total'] = float(total[:-1])
for l in runtime_list:
line = l.split(' ')
func = line[-3]
time = line[-1]
t_breakdown[func] = float(time[:-1])
return t_breakdown
def tabulate_data(test_results, output_fp=None):
input_sizes = sorted(list(test_results.keys()))
time_values = dict()
func_names = test_results[input_sizes[0]].keys()
for f in func_names:
name = f.split('.')[-1:].pop()
time_values[name] = list()
for n in input_sizes:
for f in func_names:
name = f.split('.')[-1:].pop()
time_values[name].append(test_results[n][f])
timing_tbl = tabulate(
[[k] + time_values[k] for k in time_values],
headers=['portfolio size'] + input_sizes,
tablefmt="rst")
# if set write to test summary table to log file
if output_fp:
log_path = os.path.abspath(output_fp)
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
with io.open(log_path, 'w') as log:
log.write(timing_tbl)
print(timing_tbl)
def run_tests(test_dir, run_dir, log_fp, oasis_args, threshold=None):
'''
Output of each run entry in `results`
In [3]: example_run
Out[3]:
{'total': 88.63,
'oasislmf.manager.__init__': 0.0,
'oasislmf.model_preparation.gul_inputs.get_gul_input_items': 16.05,
'oasislmf.model_preparation.gul_inputs.write_items_file': 3.84,
'oasislmf.model_preparation.gul_inputs.write_coverages_file': 1.88,
'oasislmf.model_preparation.gul_inputs.write_gul_input_files': 5.94,
'oasislmf.model_preparation.summaries.get_summary_mapping': 0.8,
'oasislmf.model_preparation.summaries.write_mapping_file': 6.77,
'oasislmf.model_preparation.il_inputs.get_il_input_items': 30.42,
'oasislmf.model_preparation.il_inputs.write_fm_policytc_file': 8.49,
'oasislmf.model_preparation.il_inputs.write_fm_profile_file': 1.59,
'oasislmf.model_preparation.il_inputs.write_fm_programme_file': 7.52,
'oasislmf.model_preparation.il_inputs.write_fm_xref_file': 2.98,
'oasislmf.model_preparation.il_inputs.write_il_input_files': 21.44}
'''
sub_dirs = next(os.walk(test_dir))[1]
test_data = dict()
results= dict()
for d in sub_dirs:
loc_fp = os.path.join(test_dir, d, 'loc.csv')
acc_fp = os.path.join(test_dir, d, 'acc.csv')
keys_fp = os.path.join(test_dir, d, 'keys.csv')
n_sample = sum(1 for line in open(loc_fp)) -1
cmd_str = f'oasislmf model generate-oasis-files -x {loc_fp} -y {acc_fp} -z {keys_fp} --oasis-files-dir {run_dir} {oasis_args} --verbose'
test_data[n_sample] = cmd_str
for t in sorted(test_data.keys()):
print('Running: ')
print(f"cmd = {test_data[t]}")
print(f'size = {t}')
print(f't_max = {threshold}')
stdout = run_command(test_data[t])
run = pasrse_gen_output(stdout)
results[t] = run
print(f"t_total = {run['total']}\n")
# If given check that threshold isn't exceeded
if threshold:
if run['total'] > threshold:
print('FAILED\n')
tabulate_data(results, log_fp)
sys.exit(1)
else:
print('PASSED\n')
tabulate_data(results, log_fp)
return results
if __name__ == "__main__":
args = parse_args()
run_tests(args['test_directory'],
args['output_directory'],
args['log_output'],
args['extra_oasislmf_args'],
args['time_threshold'])
| 35.819444
| 144
| 0.647732
| 726
| 5,158
| 4.374656
| 0.283747
| 0.053212
| 0.09068
| 0.049118
| 0.178841
| 0.158375
| 0.095088
| 0.027078
| 0.027078
| 0.027078
| 0
| 0.01535
| 0.216945
| 5,158
| 143
| 145
| 36.06993
| 0.770983
| 0.203373
| 0
| 0.063158
| 0
| 0.010526
| 0.208095
| 0.014154
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.010526
| 0.063158
| 0
| 0.157895
| 0.094737
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2844b9558a0aad3fcd5e9e967cacb650e5737e3
| 1,427
|
py
|
Python
|
GUI/index.py
|
Abhishek2019/Speech
|
416827a02279cdafd268ef2748d4f4f52b0f0e15
|
[
"MIT"
] | null | null | null |
GUI/index.py
|
Abhishek2019/Speech
|
416827a02279cdafd268ef2748d4f4f52b0f0e15
|
[
"MIT"
] | null | null | null |
GUI/index.py
|
Abhishek2019/Speech
|
416827a02279cdafd268ef2748d4f4f52b0f0e15
|
[
"MIT"
] | null | null | null |
# from tkinter import *
# root = Tk()
# frametop = Frame(root)
# framebottom = Frame(root)
# frameleft = Frame(framebottom)
# frameright = Frame(framebottom)
# text = Text(frametop)
# scroll = Scrollbar(frametop, command=text.yview)
# btn1 = Button(frameleft, text="Course")
# btn2 = Button(frameleft, text="Abscences")
# btn3 = Button(frameright, text="Notes")
# btn4 = Button(frameright, text="Return")
# text['yscrollcommand'] = scroll.set
# frametop.pack(side=TOP, fill=BOTH, expand=1)
# framebottom.pack(side=BOTTOM, fill=BOTH, expand=1)
# frameleft.pack(side=LEFT, fill=BOTH, expand=1)
# frameright.pack(side=RIGHT, fill=BOTH, expand=1)
# text.pack(side=TOP, fill=BOTH, padx=5, pady=5, expand=1)
# scroll.pack(side=BOTTOM, fill=BOTH, padx=5, pady=5, expand=1)
# btn1.pack(side=TOP, fill=BOTH, padx=5, pady=5, expand=1)
# btn2.pack(side=BOTTOM, fill=BOTH, padx=5, pady=5, expand=1)
# btn3.pack(side=TOP, fill=BOTH, padx=5, pady=5, expand=1)
# btn4.pack(side=BOTTOM, fill=BOTH, padx=5, pady=5, expand=1)
# root.mainloop()
from tkinter import *
root = Tk()
button_frame = Frame(root)
button_frame.pack(fill=X, side=BOTTOM)
reset_button = Button(button_frame, text='Reset')
run_button = Button(button_frame, text='Run')
button_frame.columnconfigure(0, weight=1)
button_frame.columnconfigure(1, weight=1)
reset_button.grid(row=0, column=0, sticky=W+E)
run_button.grid(row=0, column=1, sticky=W+E)
root.mainloop()
| 29.122449
| 63
| 0.716889
| 220
| 1,427
| 4.604545
| 0.236364
| 0.078973
| 0.071076
| 0.076999
| 0.40079
| 0.222113
| 0.222113
| 0.222113
| 0.222113
| 0.222113
| 0
| 0.029898
| 0.10932
| 1,427
| 49
| 64
| 29.122449
| 0.767113
| 0.684653
| 0
| 0
| 0
| 0
| 0.018779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c288905ef79b8a14c466f9e48a449c4d916507ed
| 7,815
|
py
|
Python
|
summer21/mpqa_dataprocessing/mpqa3_to_dict.py
|
gu-sentiment-2021/sent
|
a3874a7286c965684d92fcf78e4091ad3a33aae1
|
[
"MIT"
] | null | null | null |
summer21/mpqa_dataprocessing/mpqa3_to_dict.py
|
gu-sentiment-2021/sent
|
a3874a7286c965684d92fcf78e4091ad3a33aae1
|
[
"MIT"
] | null | null | null |
summer21/mpqa_dataprocessing/mpqa3_to_dict.py
|
gu-sentiment-2021/sent
|
a3874a7286c965684d92fcf78e4091ad3a33aae1
|
[
"MIT"
] | null | null | null |
# mpqa3_to_dict helps to convert MPQA stand-off format to python dictionaries.
# It provides the following functionalities:
# 1) Clean up the MPQA 3.0 corpus
# 2) Convert an MPQA document to a dictionary
# 3) Convert an entire corpus to a dictionary
import os
import re
HAS_LIST_OF_IDS = [ # These attributes may have any number of ids. (>= 0)
"nested-source", "attitude-link", "insubstantial",
"sTarget-link", "newETarget-link", "eTarget-link",
"target-speech-link"
]
class mpqa3_to_dict:
"""
mpqa3_to_dict helps to clean up the corpus and convert MPQA stand-off format to python dictionaries.
"""
corpus_name = "" # Name of the corpus from which the documents were drawn.
mpqa_dir = "mpqa_dataprocessing\\database.mpqa.cleaned" # mpqa root directory
def __init__(self, corpus_name="", mpqa_dir="mpqa_dataprocessing\\database.mpqa.cleaned"):
self.corpus_name = corpus_name
self.mpqa_dir = mpqa_dir
def __cleanup_data(self, anno_lines): ### IT ACTUALLY DOES NOTHING AT THE MOMENT !!!
"""
It cleans up the annotation lines by correcting misspelled values, attributes and more.
:param anno_lines: a list of the annotation lines of a document
:return: The cleaned up list of the annotation lines of the document
"""
return anno_lines
def doc_to_dict(self, docname, cleaning=True):
"""
It converts an MPQA document to a python dictionary.
:param docname: The name of the document to be converted.
:param cleaning: It cleans up the data, if set to true.
:return: A python dictionary representing the document.
"""
# example: ./docs/20011024/21.53.09-11428
with open(os.path.join(self.mpqa_dir, "docs", docname)) as doc_file:
doc_text = doc_file.read()
# example: ./man_anns/20011024/21.53.09-11428/gateman.mpqa.lre.3.0
anno_lines = []
with open(os.path.join(self.mpqa_dir, "man_anns", docname, "gateman.mpqa.lre.3.0")) as anno_file:
anno_lines = anno_file.readlines()
if cleaning: # Clean up the data, if requested.
anno_lines = self.__cleanup_data(anno_lines)
# Final output
output = {
"agent": [],
"expressive-subjectivity": [],
"direct-subjective": [],
"objective-speech-event": [],
"attitude": [],
"targetFrame": [],
"sTarget": [],
"eTarget": [],
"sentence": [],
"supplementaryAttitude": [],
"supplementaryExpressive-subjectivity": [],
"target-speech": [],
"annotations": {}
}
# Process all annotation lines
for anno in anno_lines:
if len(anno) < 1: # If the line is empty then skip it.
continue
if anno[0] == '#': # If it is a comment then skip it.
continue
# Parsing the main components of an annotation line.
line_id, span, anno_type, attributes = anno.split('\t')
# Converting span to a tuple of ints.
span = span.split(',')
span = (int(span[0]), int(span[1]))
# Removes ' \n' at the end of the string.
attributes = attributes.strip()
# A temporary variable for an annotation line before knowing its ID.
temp_dict = {
"anno-type": anno_type,
"head": doc_text[span[0]:span[1]],
"line-id": int(line_id),
"span-in-doc": span,
}
# Process all attributes
if len(attributes) == 0: # example: split annotation
continue
# Splits with the whitespaces out of the quotes as the delimeter
attributes = attributes.strip()
attributes = re.split(r' (?=([^"]*"[^"]*")*[^"]*$)', attributes)
for attribute in attributes:
key, val = attribute.split('=')
key, val = key.strip(), val.strip()
val = val[1:-1] # Removes double quotation marks
if key in HAS_LIST_OF_IDS:
temp_dict[key] = [] if val == "none" or val == "" else [v.strip() for v in val.split(',')]
else:
temp_dict[key] = val
# We probably know the identifier assigned to the annotation by now
# except some of the agnets and the sentences
id = temp_dict.pop("id", line_id)
# Updating the final output
output["annotations"][id] = temp_dict
if anno_type in output:
output[anno_type].append(id)
else: # If there's a new type of annotation, warn us in red!
output[anno_type] = [id]
print("\033[91m <UNKNOWN ANNO: {}>\033[00m".format(anno_type))
# Set sentence-id, sentence and span-in-sentence
for key in output["annotations"].keys():
if key in output["sentence"]:
continue # Skip changing sentences
# Search for the corresponding sentence
for sentence_id in output["sentence"]:
# Checks if the annotation is whithin this sentence
if output["annotations"][sentence_id]["span-in-doc"][0] <= output["annotations"][key]["span-in-doc"][0]\
and output["annotations"][sentence_id]["span-in-doc"][1] >= output["annotations"][key]["span-in-doc"][1]:
output["annotations"][key]["sentence-id"] = sentence_id
output["annotations"][key]["text"] = output["annotations"][sentence_id]["head"]
output["annotations"][key]["span-in-sentence"] = (
output["annotations"][key]["span-in-doc"][0] - output["annotations"][sentence_id]["span-in-doc"][0],
output["annotations"][key]["span-in-doc"][1] - output["annotations"][sentence_id]["span-in-doc"][0]
)
break
return output
def corpus_to_dict(self, doclist=None, doclist_filename='doclist.3.0', cleaning=True):
"""
It converts an entire list of MPQA documents to a python dictionary.
:param doclist: The list of document names to be converted. If set, doclist_filename will be ignored.
:param doclist_filename: The name of the file which contains a list of the document names.
:param cleaning: It cleans up the data, if set to true.
:return: A python dictionary representing the corpus.
"""
if doclist is None:
doclist = self.__doclistfile_to_doclist(doclist_filename)
output = {
"corpus": self.corpus_name, # Name of the corpus from which the documents were drawn.
"doclist": doclist, # List of the document names.
"docs": {} # Dictionary of document annotations in dictionary format.
}
for docname in doclist:
output["docs"][docname] = self.doc_to_dict(docname, cleaning)
return output
def __doclistfile_to_doclist(self, doclist_filename='doclist.3.0'):
"""
An auxiliary function for converting a file of a list of document names to a list of document names.
:param doclist_filename: The name of the file which contains a list of the document names.
:return: A python list containing the document names.
"""
# example: ./doclist.3.0
doclist = []
with open(os.path.join(self.mpqa_dir, doclist_filename)) as doclist_file:
for doc in doclist_file.readlines():
doclist.append(doc[:-1]) # Removes \n at the end of the line
return doclist
| 47.078313
| 124
| 0.583109
| 955
| 7,815
| 4.671204
| 0.235602
| 0.016812
| 0.018157
| 0.012329
| 0.325263
| 0.237839
| 0.226182
| 0.19054
| 0.136516
| 0.121946
| 0
| 0.015697
| 0.307102
| 7,815
| 165
| 125
| 47.363636
| 0.808126
| 0.346129
| 0
| 0.116505
| 0
| 0
| 0.172477
| 0.043273
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048544
| false
| 0
| 0.019417
| 0
| 0.135922
| 0.009709
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c28cb4aefdf64fe8ea7d0c518f1c67e77950a4da
| 2,534
|
py
|
Python
|
marketgrab/views.py
|
colinmcglone/window-time
|
74ed90440b9bb93fa569534c7557972242569d3a
|
[
"MIT"
] | null | null | null |
marketgrab/views.py
|
colinmcglone/window-time
|
74ed90440b9bb93fa569534c7557972242569d3a
|
[
"MIT"
] | null | null | null |
marketgrab/views.py
|
colinmcglone/window-time
|
74ed90440b9bb93fa569534c7557972242569d3a
|
[
"MIT"
] | null | null | null |
import os
from django.conf import *
from django.shortcuts import render_to_response, render
from django.http import HttpResponse
from .models import Data, MovingAvg, Movements, Sigma
from datetime import datetime
from django.template import RequestContext
def index(request):
ticker = Data.objects.values_list('ticker').distinct()
market = []
for t in ticker:
t = t[0]
price = Data.objects.filter(ticker=t).latest('date').aclose_price
date = Data.objects.filter(ticker=t).latest('date').date
move_price = Movements.objects.filter(ticker=t, series='market').latest('date').price
move_percent = Movements.objects.filter(ticker=t, series='market').latest('date').percent
move_zscore = Movements.objects.filter(ticker=t, series='market').latest('date').zvalue
spans = MovingAvg.objects.values_list('span').distinct()
i = {
'index':t,
'price':price,
'date':date,
'move_price':move_price,
'move_percent':round(move_percent, 4),
'move_zscore':round(move_zscore, 4),
'hist':'marketgrab/'+t+'_hist.png'
}
market.append(i)
for s in spans:
s = s[0]
avg_price = Movements.objects.filter(ticker=t, series=s).latest('date').price
avg_percent = Movements.objects.filter(ticker=t, series=s).latest('date').percent
zscore = Movements.objects.filter(ticker=t, series=s).latest('date').zvalue
a = {
'ticker':t,
'span':s,
'price':avg_price,
'percent':round(avg_percent, 4),
'zscore':round(zscore, 4)
}
(item for item in market if item['index']==t).next()[str(s) + '_avg'] = a
context = RequestContext(request, {'market':market})
return render_to_response('marketgrab/index.html', context_instance = context)
def detail(request, t):
if Data.objects.filter(ticker=t).exists():
response = "You're looking at the details for %s."
else:
response = "Sorry, cannot find data for %s."
return HttpResponse(response % t)
def graphs(request):
path = os.path.abspath(os.path.join(settings.BASE_DIR, '..', 'public/static/marketgrab'))
images = []
for f in os.listdir(path):
if f.endswith("png"):
images.append("marketgrab/%s" % f)
context = {'images': images}
return render(request, 'marketgrab/graphs.html', context)
| 33.786667
| 97
| 0.609708
| 308
| 2,534
| 4.938312
| 0.282468
| 0.050625
| 0.112426
| 0.118343
| 0.275477
| 0.259698
| 0.259698
| 0.191322
| 0.191322
| 0
| 0
| 0.003156
| 0.249803
| 2,534
| 74
| 98
| 34.243243
| 0.796949
| 0
| 0
| 0
| 0
| 0
| 0.131413
| 0.02644
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.122807
| 0
| 0.22807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c291f9afa8d4a69dbe3f4791438b896f2870685a
| 1,690
|
py
|
Python
|
setup.py
|
krismolendyke/den
|
aa18bb3ffc07688dbe5f9cbea9ba39fb9b67d37d
|
[
"MIT"
] | 6
|
2015-06-20T21:54:21.000Z
|
2017-11-29T03:00:15.000Z
|
setup.py
|
krismolendyke/den
|
aa18bb3ffc07688dbe5f9cbea9ba39fb9b67d37d
|
[
"MIT"
] | 1
|
2017-02-13T09:08:54.000Z
|
2017-02-13T09:33:46.000Z
|
setup.py
|
krismolendyke/den
|
aa18bb3ffc07688dbe5f9cbea9ba39fb9b67d37d
|
[
"MIT"
] | null | null | null |
"""setuptools entry point."""
from codecs import open
from os import path
from setuptools import find_packages, setup
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.rst"), encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
with open(path.join(HERE, "src", "den", "VERSION")) as version_file:
VERSION = version_file.read().strip()
setup(
name="den",
version=VERSION,
description="Den is a home for your home's data.",
long_description=LONG_DESCRIPTION,
author="Kris Molendyke",
author_email="kris@k20e.com",
url="https://git.io/k20e",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6"
],
keywords="nest thermostat smoke alarm camera weather propane monitor",
packages=find_packages("src"),
package_dir={"": "src"},
install_requires=["backoff>=1.3.2", "influxdb>=3.0", "python-forecastio>=1.3.5", "requests>=2.0"],
extras_require={
"dev": [
"tox",
"yapf",
],
"doc": [
"Sphinx",
"alabaster",
"sphinx-argparse",
"sphinx-autobuild",
],
"notebook": ["jupyter", ],
"test": [
"coverage",
"prospector",
"mock",
"responses",
],
},
package_data={},
include_package_data=True,
data_files=[],
test_suite="tests",
python_requires=">=2.7",
entry_points={"console_scripts": ["den = den.__main__:main", ], }, )
| 29.649123
| 118
| 0.57929
| 187
| 1,690
| 5.090909
| 0.59893
| 0.047269
| 0.078782
| 0.033613
| 0.042017
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019033
| 0.253846
| 1,690
| 56
| 119
| 30.178571
| 0.735924
| 0.013609
| 0
| 0.08
| 0
| 0
| 0.344973
| 0.014449
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.06
| 0
| 0.06
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c293b3a4ad307a538250b63b7b3b8429f3fda47c
| 25,807
|
py
|
Python
|
apgl/util/Util.py
|
mathemaphysics/APGL
|
6ca7c176e04017feeae00c4cee069fd126df0fbc
|
[
"BSD-3-Clause"
] | 13
|
2015-02-19T14:39:09.000Z
|
2021-04-12T01:22:32.000Z
|
apgl/util/Util.py
|
mathemaphysics/APGL
|
6ca7c176e04017feeae00c4cee069fd126df0fbc
|
[
"BSD-3-Clause"
] | 1
|
2020-07-29T07:09:33.000Z
|
2020-07-29T07:09:33.000Z
|
apgl/util/Util.py
|
mathemaphysics/APGL
|
6ca7c176e04017feeae00c4cee069fd126df0fbc
|
[
"BSD-3-Clause"
] | 7
|
2015-03-16T07:26:49.000Z
|
2021-01-12T06:57:27.000Z
|
'''
Created on 31 Jul 2009
@author: charanpal
'''
from __future__ import print_function
import sys
import os
import numpy
from contextlib import contextmanager
import numpy.random as rand
import logging
import scipy.linalg
import scipy.sparse as sparse
import scipy.special
import pickle
from apgl.util.Parameter import Parameter
class Util(object):
'''
A class with some general useful function that don't fit in anywhere else. Not very OO unfortunately.
'''
def __init__(self):
'''
Constructor
'''
pass
@staticmethod
def histogram(v):
"""
Compute a histogram based on all unique elements in vector v
"""
if v.ndim != 1:
raise ValueError("Input must be a dimension 1 vector")
uniqElements = numpy.unique(v)
numElements = uniqElements.shape[0]
hist = numpy.zeros(numElements)
for i in range(0, numElements):
hist[i] = sum(v == uniqElements[i])
return (hist, uniqElements)
@staticmethod
def mode(v):
"""
Returns the mode of a 1D vectors, and the 1st more frequent element if more than 1
"""
if v.ndim != 1:
raise ValueError("Input must be a dimension 1 vector")
uniqElements = numpy.unique(v)
freqs = numpy.zeros(uniqElements.shape[0])
for i in range(uniqElements.shape[0]):
freqs[i] = numpy.sum(v == uniqElements[i])
return uniqElements[numpy.argmax(freqs)]
@staticmethod
def sampleWithoutReplacement(sampleSize, totalSize):
"""
Create a list of integers from 0 to totalSize, and take a random sample of size sampleSize. The
sample ordered.
"""
perm = rand.permutation(totalSize)
perm = perm[0:sampleSize]
perm = numpy.sort(perm)
return perm
@staticmethod
def randNormalInt(mean, sd, min, max):
"""
Returns a normally distributed integer within a range (inclusive of min, max)
"""
i = round(rand.normal(mean, sd));
while i<min or i>max:
i = round(random.normal(mean, sd));
return i
@staticmethod
def computeMeanVar(X):
mu = numpy.mean(X, 0)
X2 = X - mu
sigma = numpy.dot(X2.T, X2)/X.shape[0]
return (mu, sigma)
@staticmethod
def iterationStr(i, step, maxIter, preStr="Iteration: "):
outputStr = ""
if maxIter == 1:
outputStr = preStr + str(i) + " (1.0)"
elif i % step == 0:
#frm = inspect.stack()[1]
#mod = inspect.getmodule(frm[0])
#logging.info(mod.__name__ + ": " + str(i) + " (" + str(float(i)/maxIter) + ")")
outputStr = preStr + str(i) + " (" + str("%.3f" % (float(i)/(maxIter-1))) + ")"
elif i == maxIter-1:
outputStr = preStr + str(i) + " (" + str("%.3f" % (float(i)/(maxIter-1))) + ")"
else:
raise ValueError("Got invalid input: " + str((i, step, maxIter)))
return outputStr
@staticmethod
def printIteration(i, step, maxIter, preStr="Iteration: "):
if i % step == 0 or i==maxIter-1:
logging.debug(Util.iterationStr(i, step, maxIter, preStr))
@staticmethod
def printConciseIteration(i, step, maxIter, preStr="Iteration: "):
if i==0:
print(Util.iterationStr(i, step, maxIter, preStr), end=""),
elif i!=maxIter-1:
print(Util.iterationStr(i, step, maxIter, " "), end="")
else:
print(Util.iterationStr(i, step, maxIter, " "))
@staticmethod
def abstract():
"""
This is a method to be put in abstract methods so that they are identified
as such when called.
"""
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError("Method " + caller + ' must be implemented in subclass')
@staticmethod
def rank(A, tol=1e-8):
"""
Kindly borrowed from the following forum thread:
http://mail.scipy.org/pipermail/numpy-discussion/2008-February/031218.html
"""
s = numpy.linalg.svd(A, compute_uv=False)
return numpy.sum(numpy.where(s>tol, 1, 0))
@staticmethod
def randomChoice(V, n=1):
"""
Make a random choice from a vector V of values which are unnormalised
probabilities. Return the corresponding index. For example if v = [1, 2, 4]
then the probability of the indices repectively are [1/7, 2/7, 4/7]. The
parameter n is the number of random choices to make. If V is a matrix,
then the rows are taken as probabilities, and a choice is made for each
row.
"""
Parameter.checkClass(V, numpy.ndarray)
if V.shape[0]==0:
return -1
if V.ndim == 1:
cumV = numpy.cumsum(V)
p = numpy.random.rand(n)*cumV[-1]
return numpy.searchsorted(cumV, p)
elif V.ndim == 2:
cumV = numpy.cumsum(V, 1)
P = numpy.random.rand(V.shape[0], n)*numpy.array([cumV[:, -1]]).T
inds = numpy.zeros(P.shape, numpy.int)
for i in range(P.shape[0]):
inds[i, :] = numpy.searchsorted(cumV[i, :], P[i, :])
return inds
else:
raise ValueError("Invalid number of dimensions")
@staticmethod
def fitPowerLaw(x, xmin):
"""
Take a sample of data points which are drawn from a power law probability
distribution (p(x) = (x/xmin)**-alpha) and return the exponent. This works
best for continuous data.
"""
x = x[x >= xmin]
n = x.shape[0]
lnSum = n / numpy.sum(numpy.log(x/xmin))
#gamma = 1 + lnSum
gamma = lnSum
return gamma
@staticmethod
def fitDiscretePowerLaw(x, xmins = None):
"""
Take a sample of discrete data points which are drawn from a power law probability
distribution (p(x) = x-alpha / zeta(alpha, xmin)) and return the exponent.
If xmins is supplied then it searches through the set of xmins rather than
using all possible xmins. Most of the time it helps to keep xmins low.
Returns the goodness of fit, best alpha and xmin. If there is only 1 unique
value of x then -1, -1 min(x) is returned.
"""
xmax = numpy.max(x)
if xmins == None:
xmin = numpy.max(numpy.array([numpy.min(x), 1]))
xmins = numpy.arange(xmin, xmax)
#Note that x must have at least 2 unique elements
if xmins.shape[0] == 0:
return -1, -1, numpy.min(x)
alphas = numpy.arange(1.5, 3.5, 0.01)
ksAlpha = numpy.zeros((xmins.shape[0], 2))
for j in range(xmins.shape[0]):
xmin = xmins[j]
z = x[x >= xmin]
n = z.shape[0]
sumLogx = numpy.sum(numpy.log(z))
likelyhoods = numpy.zeros(alphas.shape[0])
for i in range(alphas.shape[0]):
likelyhoods[i] = -n*numpy.log(scipy.special.zeta(alphas[i], xmin)) -alphas[i]*sumLogx
k = numpy.argmax(likelyhoods)
#Compute KS statistic
cdf = numpy.cumsum(numpy.bincount(z)[xmin:xmax]/float(n))
fit = numpy.arange(xmin, xmax)**-alphas[k] /scipy.special.zeta(alphas[k], xmin)
fit = numpy.cumsum(fit)
ksAlpha[j, 0] = numpy.max(numpy.abs(cdf - fit))
ksAlpha[j, 1] = alphas[k]
i = numpy.argmin(ksAlpha[:, 0])
return ksAlpha[i, 0], ksAlpha[i, 1], xmins[i]
@staticmethod
def entropy(v):
"""
Compute the information entropy of a vector of random vector observations
using the log to the base 2.
"""
items = numpy.unique(v)
infEnt = 0
for i in items:
prob = numpy.sum(v==i)/float(v.shape[0])
infEnt -= prob * numpy.log2(prob)
return infEnt
@staticmethod
def expandIntArray(v):
"""
Take a vector of integers and expand it into a vector with counts of the
corresponding integers. For example, with v = [1, 3, 2, 4], the expanded
vector is [0, 1, 1, 1, 2, 2, 3, 3, 3, 3].
"""
Parameter.checkClass(v, numpy.ndarray)
Parameter.checkList(v, Parameter.checkInt, [0, float('inf')])
w = numpy.zeros(numpy.sum(v), numpy.int)
currentInd = 0
for i in range(v.shape[0]):
w[currentInd:currentInd+v[i]] = i
currentInd += v[i]
return w
@staticmethod
def random2Choice(V, n=1):
"""
Make a random binary choice from a vector V of values which are unnormalised
probabilities. Return the corresponding index. For example if v = [1, 2]
then the probability of the indices repectively are [1/3, 2/3]. The
parameter n is the number of random choices to make. If V is a matrix,
then the rows are taken as probabilities, and a choice is made for each
row.
"""
Parameter.checkClass(V, numpy.ndarray)
if V.ndim == 1 and V.shape[0] != 2:
raise ValueError("Function only works on binary probabilities")
if V.ndim == 2 and V.shape[1] != 2:
raise ValueError("Function only works on binary probabilities")
if V.ndim == 1:
cumV = numpy.cumsum(V)
p = numpy.random.rand(n)*cumV[-1]
cumV2 = numpy.ones(n)*cumV[0] - p
return numpy.array(cumV2 <= 0, numpy.int)
elif V.ndim == 2:
cumV = numpy.cumsum(V, 1)
P = numpy.random.rand(V.shape[0], n)*numpy.array([cumV[:, -1]]).T
cumV2 = numpy.outer(cumV[:, 0], numpy.ones(n)) - P
return numpy.array(cumV2 <= 0, numpy.int)
else:
raise ValueError("Invalid number of dimensions")
@staticmethod
def loadPickle(filename):
"""
Loads a pickled file with the given filename.
"""
file = open(filename, 'rb')
obj = pickle.load(file)
file.close()
#logging.debug("Loaded " + filename + " with object " + str(type(obj)))
return obj
@staticmethod
def savePickle(obj, filename, overwrite=True, debug=False):
if os.path.isfile(filename) and not overwrite:
raise IOError("File exists: " + filename)
file = open(filename, 'wb')
pickle.dump(obj, file)
file.close()
if debug:
logging.debug("Saved " + filename + " object type " + str(type(obj)))
@staticmethod
def incompleteCholesky(X, k):
"""
Compute the incomplete cholesky decomposition of positive semi-define
square matrix X. Use an approximation of k rows.
"""
if X.shape[0] != X.shape[1]:
raise ValueError("X must be a square matrix")
ell = X.shape[0]
R = numpy.zeros((k, ell))
d = numpy.diag(X)
aInd = numpy.argmax(d)
a = d[aInd]
nu = numpy.zeros(k)
for j in range(k):
nu[j] = numpy.sqrt(a)
for i in range(ell):
R[j, i] = (X[aInd, i] - R[:, i].T.dot(R[:, aInd]))/nu[j]
d[i] = d[i] - R[j, i]**2
aInd = numpy.argmax(d)
a = d[aInd]
return R
@staticmethod
def incompleteCholesky2(X, k):
"""
Compute the incomplete cholesky decomposition of positive semi-define
square matrix X. Use an approximation of k rows.
"""
ell = X.shape[0]
A = numpy.zeros((ell, k))
Xj = X
Xaj = numpy.zeros((ell, k))
for j in range(k):
d = numpy.diag(Xj)
ind = numpy.argmax(d)
A[ind, j] = 1/numpy.sqrt(Xj[ind, ind])
Xaj[:, j] = Xj.dot(A[:, j])
Xj = Xj - numpy.outer(Xaj[:, j], Xaj[:, j])/numpy.dot(A[:, j].T, Xaj[:, j])
return Xaj.T
@staticmethod
def indEig(s, U, inds):
"""
Take the output of numpy.linalg.eig and return the eigenvalue and vectors
sorted in order indexed by ind.
"""
U = U[:, inds]
s = s[inds]
return s, U
@staticmethod
def indSvd(P, s, Q, inds):
"""
Take the output of numpy.linalg.svd and return the eigenvalue and vectors
sorted in order indexed by ind.
"""
if inds.shape[0] != 0:
P = P[:, inds]
s = s[inds]
Q = Q.conj().T
Q = Q[:, inds]
else:
P = numpy.zeros((P.shape[0], 0))
s = numpy.zeros(0)
Q = Q.conj().T
Q = numpy.zeros((Q.shape[0], 0))
return P, s, Q
@staticmethod
def svd(A, eps=10**-8, tol=10**-8):
"""
Wrapper for 'svd_from_eigh' to work on the smallest dimention of A
"""
if A.shape[0] > A.shape[1]:
return Util.svd_from_eigh(A, eps)
else:
P, s, Qh = Util.svd_from_eigh(A.conj().T, eps, tol)
return Qh.conj().T, s.conj(), P.conj().T
@staticmethod
def svd_from_eigh(A, eps=10**-8, tol=10**-8):
"""
Find the SVD of an ill conditioned matrix A. This uses numpy.linalg.eig
but conditions the matrix so is not as precise as numpy.linalg.svd, but
can be useful if svd does not coverge. Uses the eigenvectors of A^T*A and
return singular vectors corresponding to nonzero singular values.
Note: This is slightly different to linalg.svd which returns zero singular
values.
"""
AA = A.conj().T.dot(A)
lmbda, Q = scipy.linalg.eigh(AA + eps*numpy.eye(A.shape[1]))
lmbda = lmbda-eps
inds = numpy.arange(lmbda.shape[0])[lmbda>tol]
lmbda, Q = Util.indEig(lmbda, Q, inds)
sigma = lmbda**0.5
P = A.dot(Q) / sigma
Qh = Q.conj().T
if __debug__:
if not scipy.allclose(A, (P*sigma).dot(Qh), atol=tol):
logging.warn(" SVD obtained from EVD is too poor")
Parameter.checkArray(P, softCheck=True, arrayInfo="P in svd_from_eigh()")
if not Parameter.checkOrthogonal(P, tol=tol, softCheck=True, arrayInfo="P in svd_from_eigh()", investigate=True):
print("corresponding sigma: ", sigma)
Parameter.checkArray(sigma, softCheck=True, arrayInfo="sigma in svd_from_eigh()")
Parameter.checkArray(Qh, softCheck=True, arrayInfo="Qh in svd_from_eigh()")
if not Parameter.checkOrthogonal(Qh.conj().T, tol=tol, softCheck=True, arrayInfo="Qh.H in svd_from_eigh()"):
print("corresponding sigma: ", sigma)
return P, sigma, Qh
@staticmethod
def safeSvd(A, eps=10**-8, tol=10**-8):
"""
Compute the SVD of a matrix using scipy.linalg.svd, and if convergence fails
revert to Util.svd.
"""
# check input matrix
if __debug__:
if not Parameter.checkArray(A, softCheck = True):
logging.info("... in Util.safeSvd")
try:
# run scipy.linalg.svd
try:
P, sigma, Qh = scipy.linalg.svd(A, full_matrices=False)
except scipy.linalg.LinAlgError as e:
logging.warn(str(e))
raise Exception('SVD decomposition has to be computed from EVD decomposition')
# --- only when the SVD decomposition comes from scipy.linalg.svd ---
# clean output singular values (sometimes scipy.linalg.svd returns NaN or negative singular values, let's remove them)
inds = numpy.arange(sigma.shape[0])[sigma > tol]
if inds.shape[0] < sigma.shape[0]:
P, sigma, Q = Util.indSvd(P, sigma, Qh, inds)
Qh = Q.conj().T
# an expensive check but we really need it
# rem: A*s = A.dot(diag(s)) ; A*s[:,new] = diag(s).dot(A)
if not scipy.allclose(A, (P*sigma).dot(Qh)):
logging.warn(" After cleaning singular values from scipy.linalg.svd, the SVD decomposition is too far from the original matrix")
# numpy.savez("matrix_leading_to_bad_SVD.npz", A)
raise Exception('SVD decomposition has to be computed from EVD decomposition')
# check scipy.linalg.svd output matrices (expensive)
if __debug__:
badAnswerFromScipySvd = False
if not Parameter.checkArray(P, softCheck=True, arrayInfo="P in Util.safeSvd()"):
badAnswerFromScipySvd = True
if not Parameter.checkArray(sigma, softCheck = True, arrayInfo="sigma in Util.safeSvd()"):
badAnswerFromScipySvd = True
if not Parameter.checkArray(Qh, softCheck = True, arrayInfo="Qh in Util.safeSvd()"):
badAnswerFromScipySvd = True
if badAnswerFromScipySvd:
logging.warn(" After cleaning singular values from scipy.linalg.svd, the SVD decomposition still contains 'NaN', 'inf' or complex values")
raise Exception('SVD decomposition has to be computed from EVD decomposition')
except Exception as inst:
if inst.args != ('SVD decomposition has to be computed from EVD decomposition',):
raise
logging.warn(" Using EVD method to compute the SVD.")
P, sigma, Qh = Util.svd(A, eps, tol)
# check Util.svd output matrices (expensive)
if __debug__:
badAnswerFromUtilSvd = False
if not Parameter.checkArray(P, softCheck = True):
logging.info("... in P in Util.safeSvd")
badAnswerFromUtilSvd = True
# print nan_rows in P: numpy.isnan(P).sum(0).nonzero()
if not Parameter.checkArray(sigma, softCheck = True):
logging.info("... in sigma in Util.safeSvd")
badAnswerFromUtilSvd = True
# print numpy.isnan(sigma).nonzero()
if not Parameter.checkArray(Qh, softCheck = True):
logging.info("... in Q in Util.safeSvd")
badAnswerFromUtilSvd = True
# blop = numpy.isnan(Qh).sum(1)
# print blop.nonzero()
# print blop[blop.nonzero()]
if badAnswerFromUtilSvd:
logging.warn(" SVD decomposition obtained from EVD decomposition contains 'NaN', 'inf' or real values")
from sandbox.util.ProfileUtils import ProfileUtils
if ProfileUtils.memory() > 10**9:
ProfileUtils.memDisplay(locals())
return P, sigma, Qh
@staticmethod
def safeEigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False, overwrite_b=False, turbo=True, eigvals=None, type=1):
"""
Compute the EigenDecomposition of a hermitian matrix using scipy.linalg.eigh,
and if convergence fails revert to scipy.linalg.eig.
"""
try:
return scipy.linalg.eigh(a, b=b, lower=lower, eigvals_only=eigvals_only, overwrite_a=overwrite_a, overwrite_b=overwrite_b, turbo=turbo, eigvals=eigvals) #, type=type) I do not know how to manage it
except:
if __debug__:
logging.warning(" scipy.linalg.eigh raised an error, scipy.linalg.eig() is used instead")
lmbda, q = scipy.linalg.eig(a, b=b, overwrite_a=overwrite_a, overwrite_b=overwrite_b)
if eigvals == None:
eigvals = (0, len(lmbda))
if eigvals_only:
return lmbda[eigvals[0]:eigvals[1]]
else :
return lmbda[eigvals[0]:eigvals[1]], q[eigvals[0]:eigvals[1]]
@staticmethod
def powerLawProbs(alpha, zeroVal=0.5, maxInt=100):
"""
Generate a vector of power law probabilities such that p(x) = C x^-alpha for some
C and 0 < x <= maxInt. The value of zeroVal^-alpha is the probability to assign
to x==0.
"""
p = numpy.arange(0, maxInt, dtype=numpy.float)
p[0] = zeroVal
p = p ** -alpha
p /= p.sum()
return p
@staticmethod
def matrixPower(A, n):
"""
Compute the matrix power of A using the exponent n. The computation simply
evaluated the eigendecomposition of A and then powers the eigenvalue
matrix accordingly.
Warning: if at least one eigen-value is negative, n should be an integer.
"""
Parameter.checkClass(A, numpy.ndarray)
tol = 10**-10
lmbda, V = scipy.linalg.eig(A)
lmbda[numpy.abs(lmbda) <= tol] = 0
lmbda[numpy.abs(lmbda) > tol] = lmbda[numpy.abs(lmbda) > tol]**n
if n >= 0:
return (V*lmbda).dot(numpy.linalg.inv(V))
else:
A = scipy.linalg.pinv(A)
n = numpy.abs(n)
lmbda, V = scipy.linalg.eig(A)
lmbda[numpy.abs(lmbda) > tol] = lmbda[numpy.abs(lmbda) > tol]**n
return (V*lmbda).dot(numpy.linalg.inv(V))
@staticmethod
def matrixPowerh(A, n):
"""
Compute the matrix power of A using the exponent n. The computation simply
evaluated the eigendecomposition of A and then powers the eigenvalue
matrix accordingly.
This version assumes that A is hermitian.
Warning: if at least one eigen-value is negative, n should be an integer.
"""
Parameter.checkClass(A, numpy.ndarray)
tol = 10**-10
lmbda, V = scipy.linalg.eigh(A)
lmbda[numpy.abs(lmbda) < tol] = 0
lmbda[numpy.abs(lmbda) > tol] = lmbda[numpy.abs(lmbda) > tol]**n
# next line uses the fact that eigh claims returning an orthonormal basis (even if
#one sub-space is of dimension >=2) (to be precise, it claims using dsyevd which claims returning an orthonormal matrix)
return (V*lmbda).dot(V.T)
@staticmethod
def extendArray(A, newShape, val=0):
"""
Take a 2D matrix A and extend the shape to newShape adding zeros to the
right and bottom of it. One can optionally pass in scalar or array val
and this will be broadcast into the new array.
"""
tempA = numpy.ones(newShape)*val
tempA[0:A.shape[0], 0:A.shape[1]] = A
return tempA
@staticmethod
def distanceMatrix(U, V):
"""
Compute a distance matrix between n x d matrix U and m x d matrix V, such
that D_ij = ||u_i - v_i||.
"""
if U.shape[1] != V.shape[1]:
raise ValueError("Arrays must have the same number of columns")
normU = numpy.sum(U**2, 1)
normV = numpy.sum(V**2, 1)
D = numpy.outer(normU, numpy.ones(V.shape[0])) - 2*U.dot(V.T) + numpy.outer(numpy.ones(U.shape[0]), normV)
#Fix for slightly negative numbers
D[D<0] = 0
try:
D **= 0.5
except FloatingPointError:
numpy.set_printoptions(suppress=True, linewidth=200, threshold=2000)
print(D.shape)
print(D)
raise
return D
@staticmethod
def cumMin(v):
"""
Find the minimum element of a 1d array v for each subarray, starting
with the 1st elemnt.
"""
u = numpy.zeros(v.shape[0])
for i in range(v.shape[0]):
u[i] = numpy.min(v[0:i+1])
return u
@staticmethod
def argsort(seq):
"""
Find the indices of a sequence after being sorted. Code taken from
http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
"""
return sorted(range(len(seq)), key = seq.__getitem__)
@staticmethod
@contextmanager
def suppressStdout():
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
@staticmethod
@contextmanager
def suppressStderr():
with open(os.devnull, "w") as devnull:
old_stderr = sys.stderr
sys.stderr = devnull
try:
yield
finally:
sys.stderr = old_stderr
@staticmethod
def powerEigs(A, eps=0.001):
"""
Compute the largest eigenvector of A using power iteration. Returns
the eigenvector and corresponding eigenvalue.
"""
v = numpy.random.rand(A.shape[1])
oldV = v
error = eps+1
while error > eps:
v = A.dot(v)
v = v/numpy.sqrt((v**2).sum())
error = numpy.linalg.norm(oldV - v)
oldV = v
return v.T.dot(A).dot(v), v
@staticmethod
def argmaxN(a, N):
"""
Return the top N elements of numpy array a
"""
b = numpy.zeros(N, numpy.int)
tempA = a.copy()
minA = numpy.min(a)
for i in range(N):
idx = numpy.argmax(tempA)
b[i] = idx
tempA[idx] = minA
return b
| 35.016282
| 209
| 0.551168
| 3,282
| 25,807
| 4.308349
| 0.17794
| 0.037129
| 0.003819
| 0.006223
| 0.357992
| 0.32744
| 0.283239
| 0.263154
| 0.212447
| 0.181117
| 0
| 0.016031
| 0.337699
| 25,807
| 736
| 210
| 35.063859
| 0.811257
| 0.242454
| 0
| 0.320186
| 0
| 0.00464
| 0.078187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088167
| false
| 0.00232
| 0.032483
| 0
| 0.213457
| 0.025522
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c29512076f4adfe1c703eb019e1315c92cfb30fe
| 3,342
|
py
|
Python
|
tasks/utilities/runner.py
|
faisaltheparttimecoder/carelogBackend
|
b0635e72338e14dad24f1ee0329212cd60a3e83a
|
[
"MIT"
] | 1
|
2020-04-09T11:45:14.000Z
|
2020-04-09T11:45:14.000Z
|
tasks/utilities/runner.py
|
faisaltheparttimecoder/carelogBackend
|
b0635e72338e14dad24f1ee0329212cd60a3e83a
|
[
"MIT"
] | 2
|
2020-06-05T18:04:30.000Z
|
2021-06-10T20:11:46.000Z
|
tasks/utilities/runner.py
|
faisaltheparttimecoder/carelogBackend
|
b0635e72338e14dad24f1ee0329212cd60a3e83a
|
[
"MIT"
] | null | null | null |
import datetime, os
from django.contrib.auth.models import User
from products.lib.data_load import LoadProducts
from zendesk.lib.load_tickets import LoadTickets
from tasks.engine.maintenance import Maintenance
from tasks.models import LastRun
class TaskRunner:
def __init__(self):
"""
Initialize the variables.
"""
self.admin_username = 'admin'
self.admin_email = 'admin@email.com'
self.admin_pass = os.environ['ADMIN_PASS']
self.when_to_run = {
'refresh_pivotal_products_table': 3600, # Every Hour
'table_maintenance': 86400, # Once a day
'load_ticket_data': 900, # Every 15 minutes
}
def update_last_run_time(self, component):
"""
Once the component run is completed update the lastrun table, if
its running for the first time, then set the time now.
"""
if LastRun.objects.filter(component=component).count() == 0:
run = LastRun(component=component)
run.save()
else:
LastRun.objects.filter(component=component).update(
last_run=datetime.datetime.now()
)
def check_last_run_table(self, component):
"""
Get all the date/time of the last run by components ..
"""
last_record_time = '2000-01-01 00:00:00'
last_record_time = datetime.datetime.strptime(last_record_time, "%Y-%m-%d %H:%M:%S")
last_record_time = (datetime.datetime.now() - last_record_time).total_seconds()
last_run = LastRun.objects.filter(component=component).values('last_run')
for last_run in last_run:
last_record_time = (datetime.datetime.now() - last_run['last_run']).total_seconds()
return last_record_time
def create_super_user(self):
"""
Create SuperUser if not exits
"""
if User.objects.filter(username=self.admin_username).count() == 0:
User.objects.create_superuser(self.admin_username, self.admin_email, self.admin_pass)
def run_refresh_pivotal_products_table(self):
"""
Run the component to load all the pivotal products
"""
if self.when_to_run['refresh_pivotal_products_table'] < self.check_last_run_table('refresh_pivotal_products_table'):
LoadProducts().load_data_to_db()
self.update_last_run_time('refresh_pivotal_products_table')
def run_check_for_table_maintenance(self):
"""
Run the component to do table maintainence ...
"""
if self.when_to_run['table_maintenance'] < self.check_last_run_table('table_maintenance'):
Maintenance().run_table_maintenance()
self.update_last_run_time('table_maintenance')
def run_load_ticket_table(self):
"""
Run the component to do table maintainence ...
"""
if self.when_to_run['load_ticket_data'] < self.check_last_run_table('load_ticket_data'):
LoadTickets().extract_data()
self.update_last_run_time('load_ticket_data')
def run_task(self):
"""
Execute all the tasks ...
"""
self.create_super_user()
self.run_refresh_pivotal_products_table()
self.run_check_for_table_maintenance()
self.run_load_ticket_table()
| 37.550562
| 124
| 0.648115
| 413
| 3,342
| 4.937046
| 0.254237
| 0.054929
| 0.048063
| 0.079451
| 0.33693
| 0.205493
| 0.194213
| 0.093183
| 0.053948
| 0.053948
| 0
| 0.01201
| 0.252543
| 3,342
| 88
| 125
| 37.977273
| 0.804243
| 0.131957
| 0
| 0
| 0
| 0
| 0.123429
| 0.044346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0.038462
| 0.115385
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c298455b91c04670dd6ada8face196e4608ff57c
| 1,667
|
py
|
Python
|
code/mlp-test.py
|
asdlei99/firewall
|
fd2819fab4cfde9989350397300efd4321e197fa
|
[
"MIT"
] | 1
|
2020-03-01T21:17:01.000Z
|
2020-03-01T21:17:01.000Z
|
code/mlp-test.py
|
asdlei99/firewall
|
fd2819fab4cfde9989350397300efd4321e197fa
|
[
"MIT"
] | null | null | null |
code/mlp-test.py
|
asdlei99/firewall
|
fd2819fab4cfde9989350397300efd4321e197fa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 23 10:51:14 2018
@author: peter
"""
from sklearn.feature_extraction.text import TfidfVectorizer
import os
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn import metrics
import urllib.parse
from sklearn.externals import joblib
def loadFile(name):#读取文件
directory = str(os.getcwd())
filepath = os.path.join(directory, name)
with open(filepath, 'r', encoding='UTF-8') as f:
data = f.readlines()
data = list(set(data))
result = []
for d in data:
d = str(urllib.parse.unquote(d))
result.append(d)
return result
badQueries = loadFile('badqueries.txt')#读取恶意请求
validQueries = loadFile('goodqueries.txt')#读取正常请求
#去重
badQueries = list(set(badQueries))
validQueries = list(set(validQueries))
allQueries = badQueries + validQueries
#打标签
yBad = [1 for i in range(0, len(badQueries))]
yGood = [0 for i in range(0, len(validQueries))]
y = yBad + yGood
queries = allQueries
#TF-IDF进行特征提取
vectorizer = TfidfVectorizer(min_df = 0.0, analyzer="char", sublinear_tf=True, ngram_range=(1,3))
X = vectorizer.fit_transform(queries)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
badCount = len(badQueries)
validCount = len(validQueries)
#加载mlp模型
mlp = joblib.load("mlp-module.m")
predicted=mlp.predict(X_test)
print("Bad samples: %d" % badCount)
print("Good samples: %d" % validCount)
print("Accuracy: %f" % mlp.score(X_test, y_test))
print("Precision: %f" % metrics.precision_score(y_test, predicted))
print("Recall: %f" % metrics.recall_score(y_test, predicted))
| 27.327869
| 98
| 0.720456
| 238
| 1,667
| 4.945378
| 0.487395
| 0.046729
| 0.023789
| 0.018692
| 0.025489
| 0.025489
| 0
| 0
| 0
| 0
| 0
| 0.018271
| 0.146371
| 1,667
| 61
| 99
| 27.327869
| 0.808855
| 0.068386
| 0
| 0
| 0
| 0
| 0.076023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.179487
| 0
| 0.230769
| 0.128205
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c298b609bf3203502c3817910d7a265918d513ee
| 3,176
|
py
|
Python
|
crud.py
|
wileeam/discount-code-service
|
74ccd0564115c636ed8d825e41d8e7d1bec33ded
|
[
"Apache-2.0"
] | null | null | null |
crud.py
|
wileeam/discount-code-service
|
74ccd0564115c636ed8d825e41d8e7d1bec33ded
|
[
"Apache-2.0"
] | null | null | null |
crud.py
|
wileeam/discount-code-service
|
74ccd0564115c636ed8d825e41d8e7d1bec33ded
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
from sqlalchemy.orm import Session
import models, schemas
def get_brand(db: Session, brand_id: int):
return db.query(models.Brand).filter(models.Brand.id == brand_id).first()
def get_brand_by_name(db: Session, name: str):
return db.query(models.Brand).filter(models.Brand.name == name).first()
def get_brands(db: Session, skip: int = 0, limit: int = 10):
return db.query(models.Brand).offset(skip).limit(limit).all()
def create_brand(db: Session, brand: schemas.BrandCreate):
db_brand = models.Brand(name=brand.name)
db.add(db_brand)
db.commit()
db.refresh(db_brand)
return db_brand
def get_discounts_by_brand(db: Session,
brand_id: int,
skip: int = 0,
limit: int = 100):
db_brand = get_brand(db, brand_id)
return db.query(
models.Discount).filter(models.Discount.owner_id ==
db_brand.id).offset(skip).limit(limit).all()
def get_discounts_by_brand_name(db: Session,
brand_name: str,
active: bool = True,
skip: int = 0,
limit: int = 100):
db_brand = get_brand_by_name(db, brand_name)
return db.query(models.Discount).filter(
models.Discount.owner_id == db_brand.id).filter(
models.Discount.is_active == active).offset(skip).limit(
limit).all()
def get_active_discounts_by_brand(db: Session,
brand_id: int,
skip: int = 0,
limit: int = 100):
db_brand = get_brand(db, brand_id)
return db.query(models.Discount).filter(
models.Discount.owner_id == db_brand.id).filter(
models.Discount.is_active == True).offset(skip).limit(limit).all()
def get_discount(db: Session, brand_id: int):
active_discounts = get_active_discounts_by_brand(db, brand_id)
if len(active_discounts) > 0:
discount = active_discounts[0]
discount.is_active = False
db.commit()
else:
discount = None
return discount
def get_discounts(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Discount).offset(skip).limit(limit).all()
def create_brand_discounts(db: Session,
number: int,
brand_id: int,
length: int = 8):
db_brand = get_brand(db, brand_id)
for i in range(number):
code = ''.join(
random.choices(string.ascii_uppercase + string.digits, k=length))
description = f"Use the following code to get a discount with {db_brand.name}"
db_discount = models.Discount(code=code,
description=description,
is_active=True,
owner_id=brand_id)
db.add(db_discount)
db.commit()
db.refresh(db_discount)
return f"Successfully generated {number} discount codes for {db_brand.name}."
| 34.521739
| 86
| 0.573992
| 389
| 3,176
| 4.501285
| 0.18509
| 0.071959
| 0.05197
| 0.075957
| 0.551685
| 0.463735
| 0.429469
| 0.337521
| 0.248429
| 0.248429
| 0
| 0.010228
| 0.322733
| 3,176
| 91
| 87
| 34.901099
| 0.803812
| 0
| 0
| 0.271429
| 0
| 0
| 0.040302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.057143
| 0.057143
| 0.342857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c29a89217076d97f8ff62faec004446052c3802d
| 14,431
|
py
|
Python
|
consai2_game/scripts/example/actions/defense.py
|
ibis-ssl/consai2-ibis
|
2b7d67007703fa49fc7290e92e12481ba48a9a93
|
[
"MIT"
] | 4
|
2019-12-16T12:17:32.000Z
|
2020-02-15T04:45:47.000Z
|
consai2_game/scripts/example/actions/defense.py
|
ibis-ssl/consai2-ibis
|
2b7d67007703fa49fc7290e92e12481ba48a9a93
|
[
"MIT"
] | null | null | null |
consai2_game/scripts/example/actions/defense.py
|
ibis-ssl/consai2-ibis
|
2b7d67007703fa49fc7290e92e12481ba48a9a93
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2019 SSL-Roots
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# coding: UTF-8
# defense.pyでは、ボールを蹴らないactionを定義する
import math
import rospy
import sys,os
from consai2_msgs.msg import BallInfo, RobotInfo
from consai2_msgs.msg import ControlTarget
from geometry_msgs.msg import Pose2D
sys.path.append(os.pardir)
from field import Field
from observer import Observer
import role
import tool
# defenseのroleによって行動を切り替える
def defense_decision(my_role, ball_info, control_target, my_pose, defense_num, robot_info, zone_enable=False):
# ゴール前ディフェンス
if role.ROLE_ID['ROLE_DEFENSE_GOAL_1'] <= my_role <= role.ROLE_ID['ROLE_DEFENSE_GOAL_2']:
return defense_goal(my_pose, ball_info, control_target, my_role, defense_num)
# ゾーンディフェンス
elif role.ROLE_ID['ROLE_DEFENSE_ZONE_1'] <= my_role <= role.ROLE_ID['ROLE_DEFENSE_ZONE_4']:
return defense_zone(my_pose, ball_info, control_target, my_role, defense_num, robot_info['their'], zone_enable)
# 例外だった場合はその場にいる
else:
control_target.path = []
control_target.path.append(my_pose)
return control_target
# ゴール前ディフェンス
def defense_goal(my_pose, ball_info, control_target, my_role, defense_num):
# ゴール前ディフェンスは、ペナルティエリアに沿って守備を行う
# ボールとゴールを結ぶ直線と、ペナルティエリアのラインの交点を基準に移動
# ボールの位置によって、ライン左、ライン正面、ライン右のどのラインの前に移動するか変わる
# ペナルティエリアからどれだけ離れるか
MARGIN_LINE = 0.2
# 2台でディフェンスする時のお互いの距離
MARGIN_ROBOT = 0
# ライン左からライン正面に移動する時等に、
# 一時的にスピードアップするための数値
MARGIN_FOR_SPEED = 0.5
# ディフェンスが2台以上いる時はMARGIN_ROBOTを変更
if defense_num > 1:
if my_role == role.ROLE_ID["ROLE_DEFENSE_GOAL_1"]:
MARGIN_ROBOT = 0.15
else:
MARGIN_ROBOT = -0.15
# ボール位置
ball_pose = ball_info.pose
# ボールの位置判定用フラグ
ball_is_center = False
ball_is_left = False
ball_is_right = False
# 自分の位置判定用フラグ
my_pose_is_left = False
my_pose_is_right = False
# 移動すべき場所判定用フラグ
target_is_center = False
target_is_left = False
target_is_right = False
# Field情報からペナルティエリアの情報を取得
# ペナルティエリアの左角
left_penalty_corner = Field.penalty_pose('our', 'upper_front')
# ペナルティエリアの右角
right_penalty_corner = Field.penalty_pose('our', 'lower_front')
# ペナルティエリアの左側のゴールラインとの交点
left_penalty_goalside = Field.penalty_pose('our', 'upper_back')
# ペナルティエリアの右側のゴールラインとの交点
right_penalty_goalside = Field.penalty_pose('our', 'lower_back')
# ゴールの中心
goal_center = Field.goal_pose('our', 'center')
# ゴール中心からペナルティエリア左角への角度
angle_to_left_penalty_corner = tool.get_angle(goal_center, left_penalty_corner)
# ゴール中心からペナルティエリア右角への角度
angle_to_right_penalty_corner = tool.get_angle(goal_center, right_penalty_corner)
# 自分からボールへの角度(ボールの方向を向くため)
angle_to_ball = tool.get_angle(my_pose, ball_pose)
# ゴールを背にした左角を中心とした座標軸へ変換
trans_left = tool.Trans(left_penalty_corner, angle_to_left_penalty_corner)
tr_left_ball_pose = trans_left.transform(ball_pose)
# ゴールを背にした右角を中心とした座標軸へ変換
trans_right = tool.Trans(right_penalty_corner, angle_to_right_penalty_corner)
tr_right_ball_pose = trans_right.transform(ball_pose)
# ボールの位置を判定
if tr_left_ball_pose.y > 0:
ball_is_left = True
elif tr_right_ball_pose.y < 0:
ball_is_right = True
else:
ball_is_center = True
# ---------------------------------------------------------
# キックとドリブルはOFF
control_target.kick_power = 0.0
control_target.dribble_power = 0.0
# ボールは真ん中にある
if ball_is_center:
# ペナルティエリアの左角と右角の線分(正面の線)と、ゴール中心とボールの線分の交点
target_pose = tool.get_intersection(left_penalty_corner, right_penalty_corner,
goal_center, ball_pose)
if target_pose is not None:
# ペナルティエリアに侵入しないように+MARGIN_LINE
target_pose.x += MARGIN_LINE
# ロボットが正面の線より後ろにいる
if my_pose.x < left_penalty_corner.x:
# ダッシュで正面に移動
target_pose.x += MARGIN_FOR_SPEED
# ペナルティエリアを沿って移動
if my_pose.y > 0:
target_pose.y = left_penalty_corner.y + MARGIN_LINE
else:
target_pose.y = right_penalty_corner.y - MARGIN_LINE
else:
target_pose.y += MARGIN_ROBOT
else:
target_pose = Pose2D()
# ボールは左側にある
elif ball_is_left:
# ペナルティエリアの左側の線分と、ゴール中心とボールの線分の交点
target_pose = tool.get_intersection(left_penalty_corner, left_penalty_goalside,
goal_center, ball_pose)
if target_pose is not None:
# ペナルティエリアに侵入しないように+MARGIN_LINE
target_pose.y += MARGIN_LINE
# ロボットが左側にいない
if my_pose.y < left_penalty_corner.y:
# 左側にいないかつ後ろにいる場合は右側を沿う
if my_pose.x < left_penalty_corner.x and my_pose.y < 0:
target_pose.x = left_penalty_corner.x + MARGIN_FOR_SPEED
target_pose.y = right_penalty_corner.y - MARGIN_LINE
# 左側にダッシュで移動
else:
target_pose.x = left_penalty_corner.x + MARGIN_LINE
target_pose.y += MARGIN_FOR_SPEED
else:
target_pose.x -= MARGIN_ROBOT
else:
target_pose = Pose2D()
# ボールは右側にある
elif ball_is_right:
target_pose = tool.get_intersection(right_penalty_corner, right_penalty_goalside,
goal_center, ball_pose)
if target_pose is not None:
# ペナルティエリアに侵入しないように-MARGIN_LINE
target_pose.y -= MARGIN_LINE
# ロボットが右側にいない
if my_pose.y > right_penalty_corner.y:
# 右側にいないかつ後ろにいる場合は左側を沿う
if my_pose.x < left_penalty_corner.x and my_pose.y > 0:
target_pose.x = left_penalty_corner.x + MARGIN_FOR_SPEED
target_pose.y = left_penalty_corner.y + MARGIN_LINE
# 右側にダッシュで移動
else:
target_pose.x = right_penalty_corner.x + MARGIN_LINE
target_pose.y -= MARGIN_FOR_SPEED
else:
target_pose.x += MARGIN_ROBOT
else:
target_pose = Pose2D()
# フィールドから出ないように
if target_pose.x < goal_center.x:
target_pose.x = goal_center.x
# 向きはボールの方向
target_pose.theta = angle_to_ball
control_target.path = []
control_target.path.append(target_pose)
return control_target
# ゾーンディフェンス
def defense_zone(my_pose, ball_info, control_target, my_role, defense_num, their_robot_info, zone_enable):
# ゴールディフェンスに割り当てる台数
GOAL_DEFENSE_NUM = 2
# 現在のディフェンス数 - ゴールディフェンス数 = ゾーンディフェンスに割り当てられる台数
ZONE_DEFENSE_NUM = defense_num - GOAL_DEFENSE_NUM
# ゾーンディフェンスが始まるROLE_ID
ZONE_START_ROLE_ID = role.ROLE_ID["ROLE_DEFENSE_ZONE_1"]
# ゾーンオフェンス用の待機場所
ZONE_OFFENCE_POSE = Pose2D(3,0,0)
# センターライン用のマージン
MARGIN_CENTER = 0.6
# ちょっと前進用のマージン
MARGIN_LITTLE_FORWARD = 1.0
# ドリブルパワー
DRIBBLE_POWER = 0.6
# ボール位置
ball_pose = ball_info.pose
# Field情報からペナルティエリアの情報を取得
# フィールド幅
field_width = Field.field('width')
# フィールド幅の半分
half_field_width = float(field_width) / 2
# フィールド幅の1/4
quarter_field_width = float(field_width) / 4
# フィールド長さ
field_length = Field.field('length')
# フィールド長さの1/4 → 自陣側の長さの半分
half_our_field_length = -float(field_length) / 4
# ゴール中心
goal_center = Field.goal_pose('our', 'center')
# ペナルティエリアの角
left_penalty_corner = Field.penalty_pose('our', 'upper_front')
right_penalty_corner = Field.penalty_pose('our', 'lower_front')
# 自分からボールへの角度(ボールの方向を向くため)
angle_to_ball = tool.get_angle(my_pose, ball_pose)
# ゴール中心からボールへの角度
angle_to_ball_from_goal = tool.get_angle(goal_center, ball_pose)
# ゾーンディフェンス用のID
zone_id = None
# 移動目標点の初期化
target_pose = Pose2D()
# ---------------------------------------------------------
# キックとドリブルはOFF
control_target.kick_power = 0.0
control_target.dribble_power = 0.0
# ゾーンオフェンス判定用フラグ
my_role_is_offence = False
# ボールが相手フィールドにあるとき
# ゾーンから1台ゾーンオフェンスに出す
# 相手キックオフ時などに前に出ないように
# マージンを持って相手フィールド側かを判断している
if ZONE_DEFENSE_NUM > 1 and ball_pose.x > MARGIN_CENTER:
# 1台ディフェンスが減る
ZONE_DEFENSE_NUM -= 1
# ゾーンディフェンスが始まるROLE_IDをずらす
ZONE_START_ROLE_ID = role.ROLE_ID["ROLE_DEFENSE_ZONE_2"]
# ROLE_DEFENSE_ZONE_1をゾーンオフェンスとして出す
if my_role is role.ROLE_ID["ROLE_DEFENSE_ZONE_1"]:
my_role_is_offence = True
# 私はゾーンオフェンスです
if my_role_is_offence:
zone_id = 0
target_pose = ZONE_OFFENCE_POSE
# 基本的にアタッカーがボールを取りに行くので
# ボールが無い方向に移動してこぼれ球が取れるようにする
if ball_pose.y > 0:
target_pose.y = - quarter_field_width
else:
target_pose.y = quarter_field_width
# ボールを向く
target_pose.theta = angle_to_ball
# ゾーンオフェンス以外
if ZONE_DEFENSE_NUM > 0 and not my_role_is_offence:
step = float(field_width) / (ZONE_DEFENSE_NUM * 2)
# ゾーンディフェンスの数でフィールド幅を等分した配列を作る
split_field = [i * step - half_field_width for i in range(0,(ZONE_DEFENSE_NUM * 2 + 1))]
# ゾーンディフェンスの数でフィールド幅を等分した時の
# それぞれのゾーンの中心の配列を作る
split_field_center = [i * step - half_field_width for i in range(0,(ZONE_DEFENSE_NUM * 2)) \
if i % 2 != 0]
# 参照エラー対策のtry
try:
# ゾーンIDの計算
# ロボットが8台生きていてゾーンオフェンスがいなければ
# ゾーンIDは0~3
zone_id = my_role - ZONE_START_ROLE_ID
# ゾーンの中心を目標位置とする
target_pose.y = split_field_center[zone_id]
# 自分のゾーンに入っている敵チェック
# their_robot_infoのposeを抜き出してそれぞれチェック
# 敵が自陣側にいる、かつ、自分のゾーンの幅の中にいる、を確認
# 当てはまらない場合配列は空っぽ
invader_pose = [i.pose for i in their_robot_info \
if split_field[zone_id * 2] < i.pose.y < split_field[(zone_id + 1) * 2] and \
i.pose.x < 0]
# ボールが自分のゾーンの中に入っている, かつzone_enable
if(zone_enable and \
ball_pose.x < 0 and \
split_field[zone_id * 2] < ball_pose.y < split_field[(zone_id + 1) * 2]):
trans = tool.Trans(ball_pose, angle_to_ball_from_goal)
target_pose = trans.inverted_transform(Pose2D(-0.9, 0, 0))
# 自分のゾーンにボールはないけど敵がいる場合は割り込む
elif zone_enable and invader_pose != []:
# 敵とボールの間に割り込む
angle_to_ball_from_invader = tool.get_angle(invader_pose[0], ball_pose)
trans = tool.Trans(invader_pose[0], angle_to_ball_from_invader)
target_pose = trans.inverted_transform(Pose2D(0.5, 0, 0))
else:
# ボールが敵陣の時はディフェンスちょっと前進
if ball_pose.x > MARGIN_CENTER:
target_pose.x = half_our_field_length + MARGIN_LITTLE_FORWARD
else:
target_pose.x = half_our_field_length
except IndexError:
target_pose = my_pose
target_pose.theta = angle_to_ball
# ボールが来てたらボールを受け取る
if zone_id != None:
receive_ball_result, receive_target_pose = update_receive_ball(ball_info, my_pose, zone_id)
if receive_ball_result:
# ドリブラー回す
control_target.dribble_power = DRIBBLE_POWER
target_pose = receive_target_pose
# ペナルティエリアには入らない
if((left_penalty_corner.y + 0.2 > target_pose.y > right_penalty_corner.y - 0.2) and \
target_pose.x < left_penalty_corner.x + 0.3):
target_pose.x = half_our_field_length
control_target.path = []
control_target.path.append(target_pose)
return control_target
# ボールレシーブ情報保持用のクラス
class Receiving(object):
_recenving = [False] * role.ZONE_DEFENSE_NUM
@classmethod
def update_receiving(cls, zone_id, param):
Receiving._recenving[zone_id] = param
@classmethod
def receiving(cls, zone_id):
return Receiving._recenving[zone_id]
def update_receive_ball(ball_info, my_pose, zone_id):
# ボール位置
ball_pose = ball_info.pose
# ボールスピード
ball_vel = ball_info.velocity
# 受け取れると判断する距離
_can_receive_dist = 1.0
# ヒステリシス
_can_receive_hysteresis = 0.3
result = False
target_pose = Pose2D()
# ボールが動いている
if Observer.ball_is_moving():
# ボール速度ベクトルの角度
angle_velocity = tool.get_angle_from_center(ball_vel)
trans = tool.Trans(ball_pose, angle_velocity)
tr_pose = trans.transform(my_pose)
# ボール速度の線と垂直な距離
fabs_y = math.fabs(tr_pose.y)
# 受け取れる判定
if Receiving.receiving(zone_id) == False and \
fabs_y < _can_receive_dist - _can_receive_hysteresis:
Receiving.update_receiving(zone_id, True)
# 受け取れない判定
elif Receiving.receiving(zone_id) == True and \
fabs_y > _can_receive_dist + _can_receive_hysteresis:
Receiving.update_receiving(zone_id, False)
# 受け取れるかつボールが向かう方向にいる
if Receiving.receiving(zone_id) and tr_pose.x > 0.0:
tr_pose.y = 0.0
inv_pose = trans.inverted_transform(tr_pose)
angle_to_ball = tool.get_angle(inv_pose, ball_pose)
target_pose = Pose2D(inv_pose.x, inv_pose.y, angle_to_ball)
result = True
return result, target_pose
| 34.857488
| 119
| 0.654147
| 1,759
| 14,431
| 5.026151
| 0.192723
| 0.058817
| 0.036534
| 0.012668
| 0.426309
| 0.372356
| 0.312861
| 0.264789
| 0.245334
| 0.177356
| 0
| 0.011367
| 0.26845
| 14,431
| 413
| 120
| 34.941889
| 0.825992
| 0.224101
| 0
| 0.281818
| 0
| 0
| 0.02424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027273
| false
| 0
| 0.045455
| 0.004545
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c29b8867909e2528de5c43aad2904d281f32bd76
| 454
|
py
|
Python
|
python/py-collections/most-commons.py
|
PingHuskar/hackerrank
|
1bfdbc63de5d0f94cd9e6ae250476b4a267662f2
|
[
"Unlicense"
] | 41
|
2018-05-11T07:54:34.000Z
|
2022-03-29T19:02:32.000Z
|
python/py-collections/most-commons.py
|
PingHuskar/hackerrank
|
1bfdbc63de5d0f94cd9e6ae250476b4a267662f2
|
[
"Unlicense"
] | 2
|
2021-09-13T10:03:26.000Z
|
2021-10-04T10:21:05.000Z
|
python/py-collections/most-commons.py
|
PingHuskar/hackerrank
|
1bfdbc63de5d0f94cd9e6ae250476b4a267662f2
|
[
"Unlicense"
] | 21
|
2019-01-23T19:06:59.000Z
|
2021-12-23T16:03:47.000Z
|
# Python > Collections > Company Logo
# Print the number of character occurrences in descending order.
#
# https://www.hackerrank.com/challenges/most-commons/problem
#
from collections import Counter
from itertools import groupby
name = input()
nb = 0
for c, g in groupby(Counter(name).most_common(), key=lambda x: x[1]):
for l in sorted(map(lambda x: x[0], g)):
print(l, c)
nb += 1
if nb == 3: break
if nb == 3: break
| 23.894737
| 69
| 0.65859
| 70
| 454
| 4.257143
| 0.628571
| 0.04698
| 0.053691
| 0.067114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0.220264
| 454
| 18
| 70
| 25.222222
| 0.824859
| 0.345815
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2a18f5087df24218cafcfe623033e7eac9d54d7
| 16,181
|
py
|
Python
|
Kafka/automated/dedup_test.py
|
allensanborn/ChaosTestingCode
|
36682e9ec70659f8e6a684e53fff6968bb5d15a2
|
[
"MIT"
] | 73
|
2018-10-17T19:48:44.000Z
|
2022-03-24T10:28:32.000Z
|
Kafka/automated/dedup_test.py
|
allensanborn/ChaosTestingCode
|
36682e9ec70659f8e6a684e53fff6968bb5d15a2
|
[
"MIT"
] | 1
|
2019-03-04T07:15:29.000Z
|
2019-03-04T07:31:49.000Z
|
Kafka/automated/dedup_test.py
|
allensanborn/ChaosTestingCode
|
36682e9ec70659f8e6a684e53fff6968bb5d15a2
|
[
"MIT"
] | 35
|
2018-10-20T23:37:57.000Z
|
2022-03-30T13:48:57.000Z
|
#!/usr/bin/env python
from confluent_kafka import Producer, Consumer, KafkaError
import sys
import time
import subprocess
from datetime import datetime
import threading
from collections import defaultdict
import re
import uuid
def log(text, to_file=False):
global output_file
print(text)
if to_file:
output_file.write(f"{text}\n")
output_file.flush()
def log_order(text):
global order_file
time_now = datetime.now().strftime('%H:%M:%S')
print(text)
order_file.write(f"{time_now}: {text}\n")
def create_cluster():
subprocess.call(["./setup-dedup-test-run.sh"])
def kill_tcp_connections_of_leader():
global leader
port = ""
if leader == "kafka1":
port = "9092"
elif leader == "kafka2":
port = "9093"
elif leader == "kafka3":
port = "9094"
cmd = f"sudo timeout 10s sudo tcpkill -i docker0 -9 port {port}"
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
# not used at this time
def blackhole_leader():
global leader
ip = ""
if leader == "kafka1":
ip = "172.17.0.3"
elif leader == "kafka2":
ip = "172.17.0.4"
elif leader == "kafka3":
ip = "172.17.0.5"
cmd = f"sudo ip route add blackhole {ip}"
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
time.sleep(5)
cmd = f"sudo ip route delete blackhole {ip}"
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
def create_topic(topic):
bash_command = f"bash create-topic-print-leader.sh {topic}"
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
leader_num = output.decode('ascii').replace('\n', '')
leader = f"kafka{leader_num}"
return leader
def get_live_nodes():
bash_command = "bash ../cluster/list-live-nodes.sh"
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
nodes_line = output.decode('ascii').replace('\n', '')
return nodes_line.split(' ')
def kill_partition_leader():
global leader
subprocess.call(["./execute-chaos.sh", "kill-specific-node", leader])
def start_downed_broker():
global leader
subprocess.call(["./execute-chaos.sh", "start-specific-node", leader])
def get_broker_ips():
bash_command = "bash ../cluster/list-broker-ips.sh"
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
nodes_line = output.decode('ascii').replace('\n', '')
return nodes_line.rstrip(' ').replace(' ',',')
def produce_with_java(topic, count, bootstrap_servers, pos_acked_file_path, neg_acked_file_path, enable_idempotency):
global messages_sent, messages_pos_acked, messages_neg_acked
cmd = f"java -jar ../KafkaDedup/build/libs/KafkaDedup-all-1.0.jar {topic} {count} {bootstrap_servers} {pos_acked_file_path} {neg_acked_file_path} {enable_idempotency}"
process = subprocess.Popen(
cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
action_taken = False
while True:
out = process.stdout.readline().decode("ascii")
if out == '' and process.poll() != None:
break
if out != '':
if out.startswith("50000") and action_taken == False:
action_taken = True
if test_type == "kill-leader":
print(f"Killing partition leader: {leader}")
r = threading.Thread(target=kill_partition_leader)
r.start()
else:
print(f"Preparing to kill client connections to partition leader: {leader}")
r = threading.Thread(target=kill_tcp_connections_of_leader)
r.start()
elif out == "FINISHED":
break
print(out)
for i in range(0, count):
messages_sent[str(i)] = list()
# load pos acked
file = open(pos_acked_file_path)
for line in file:
messages_pos_acked.add(str(int(line)))
# load neg acked
file = open(neg_acked_file_path)
for line in file:
messages_neg_acked.add(str(int(line)))
# def delivery_report(err, msg):
# global messages_pos_acked, messages_neg_acked, send_count, ack_count, pos_ack_count, neg_ack_count, action_mark, action_performed, topic, test_type
# ack_count += 1
# if err:
# neg_ack_count += 1
# value = int(msg.value())
# messages_neg_acked.add(value)
# else:
# pos_ack_count += 1
# value = int(msg.value())
# messages_pos_acked.add(value)
# if ack_count % 50000 == 0:
# log(f"Send count: {str(send_count)} Ack count: {str(ack_count)} Pos: {str(pos_ack_count)} Neg: {str(neg_ack_count)}")
# if ack_count > action_mark and action_performed == False:
# action_performed = True
# if test_type == "kill-leader":
# print(f"Preparing to kill partition leader: {leader}")
# r = threading.Thread(target=kill_partition_leader)
# r.start()
# else:
# print(f"Preparing to kill client connections to partition leader: {leader}")
# r = threading.Thread(target=kill_tcp_connections_of_leader)
# r.start()
# def produce():
# global send_count, ack_count, pos_ack_count, neg_ack_count, messages_sent, messages_pos_acked, partitions, leader
# dedup = dedup_enabled == "true"
# acks_mode = "all"
# bootstrap_servers = get_broker_ips()
# log(f"Producer bootstrap.servers: {bootstrap_servers}")
# producer = Producer({'bootstrap.servers': bootstrap_servers,
# 'message.send.max.retries': 3,
# 'max.in.flight.requests.per.connection': 5,
# #'enable.idempotence': dedup,
# 'default.topic.config': { 'request.required.acks': acks_mode }})
# # send the first message synchronously, to ensure everything is running ok
# producer.produce(topic, str(send_count).encode('utf-8'), callback=delivery_report)
# send_count += 1
# messages_sent[send_count] = list()
# producer.poll(0)
# producer.flush()
# partitions = get_isolate_from_zk_partitions(leader)
# print("Started producing")
# # send bulk of messages asynchronously in order to achieve high message rate
# while send_count < count-1:
# producer.poll(0)
# if send_count - ack_count >= 10000: # ensure we don't have more than 10k in flight at a time
# time.sleep(0.1)
# #print("Sleeping")
# else:
# producer.produce(topic, str(send_count).encode('utf-8'), callback=delivery_report)
# messages_sent[send_count] = list()
# send_count += 1
# # send last message in order to block until acked
# # this way we ensure all messages are acked by the end of this function
# producer.produce(topic, str(send_count).encode('utf-8'), callback=delivery_report)
# send_count += 1
# messages_sent[send_count] = list()
# producer.poll(0)
# time.sleep(5)
# producer.flush()
# log(f"Send count: {str(send_count)} Ack count: {str(ack_count)} Pos: {str(pos_ack_count)} Neg: {str(neg_ack_count)}")
def partition_assignment(consumer, partitions):
for p in partitions:
p.offset = 0
log("Partition assigned")
consumer.assign(partitions)
def read():
global received_count, messages_sent, topic, duplicate_jump_forward, duplicate_jump_back, jump_forward, jump_back
bootstrap_servers = get_broker_ips()
log(f"Consumer bootstrap.servers: {bootstrap_servers}")
consumer = Consumer({
'bootstrap.servers': bootstrap_servers,
'group.id': str(uuid.uuid1()),
'api.version.request': True,
'enable.auto.commit': True,
'auto.offset.reset': 'earliest'
})
log(f"Subscribing to {topic}")
consumer.subscribe([topic], on_assign=partition_assignment)
no_msg_count = 0
last_payload = -1
in_dup_block = False
while True:
try:
msg = consumer.poll(2.0)
if msg is None:
log("No messages")
no_msg_count += 1
if no_msg_count > 30:
log("Aborting test, no messages to consume")
sys.exit(1)
continue
no_msg_count = 0
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
log("Consumed all messages")
break
else:
log(msg.error())
break
received_count += 1
msg_offset = msg.offset()
current_payload = int(msg.value())
current_payload_str = str(current_payload)
seen = len(messages_sent[current_payload_str]) > 0
if seen:
if last_payload >= current_payload:
duplicate_jump_back += 1
jump = str(last_payload - current_payload)
log_order(f"Test run: {test_run} DUPLICATE BLOCK - JUMP BACKWARDS {jump} ({str(last_payload)} -> {str(current_payload)})")
elif last_payload + 1 < current_payload:
duplicate_jump_forward += 1
jump = str(current_payload - last_payload)
log_order(f"Test run: {test_run} DUPLICATE BLOCK - JUMP FORWARDS {jump} ({str(last_payload)} -> {str(current_payload)})")
if not seen:
if last_payload >= current_payload:
jump_back += 1
jump = str(last_payload - current_payload)
log_order(f"Test run: {test_run} JUMP BACKWARDS {jump} ({str(last_payload)} -> {str(current_payload)})")
elif last_payload + 1 < current_payload:
jump_forward += 1
jump = str(current_payload - last_payload)
log_order(f"Test run: {test_run} JUMP FORWARDS {jump} ({str(last_payload)} -> {str(current_payload)})")
if current_payload_str in messages_sent:
messages_sent[current_payload_str].append(msg_offset)
last_payload = current_payload
if received_count % 50000 == 0:
log(f"Received: {received_count} Curr Offset: {msg_offset}")
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:{1!r}"
message = template.format(type(ex).__name__, ex.args)
log(message)
log(f"Read phase complete with message {msg.offset()}")
consumer.close()
topic_prefix = sys.argv[1]
test_num = int(sys.argv[2])
count = int(sys.argv[3])
action_mark = int(sys.argv[4])
test_type = sys.argv[5]
leader = ""
# create log files
start_time = datetime.now().strftime('%H:%M:%S')
output_file_w = open(f"test-output/{topic_prefix}_dedup_output.txt", "w")
output_file_w.write("DedupEnabled,TestRun,SendCount,AckCount,PosAckCount,NegAckCount,Received,NotReceived,ReceivedNoAck,MsgsWithDups,DJF,DJB,JF,JB\n")
output_file = open(f"test-output/{topic_prefix}_dedup_output.txt", "a")
order_file_w = open(f"test-output/{topic_prefix}_order_output.txt", "w")
order_file_w.write("Log of duplicate blocks and out-of-order messages")
order_file = open(f"test-output/{topic_prefix}_order_output.txt", "a")
dedup_enabled_values = ["false", "true"]
timeout_values = [60000, 0]
for i in range(2):
test_run = 1
dedup_enabled = dedup_enabled_values[i]
timeout = timeout_values[i]
log(f"Running {test_num} runs with deduplication enabled = {dedup_enabled}")
create_cluster()
while test_run <= test_num:
# run test
topic = f"{topic_prefix}_{str(test_run)}_dedup_{dedup_enabled}"
leader = create_topic(topic)
duplicate_jump_forward = 0
duplicate_jump_back = 0
jump_forward = 0
jump_back = 0
# send_count = 0
# ack_count = 0
# pos_ack_count = 0
# neg_ack_count = 0
# action_performed = False
# - CHAOS VARIABLES
partitions = list()
log(f"")
log(f"Test Run #{test_run} on topic {topic} ------------")
# - WRITE PHASE --------------------
log("-------------------------------------------------")
log("WRITE PHASE")
log("-------------------------------------------------")
messages_sent = defaultdict(list)
messages_pos_acked = set()
messages_neg_acked = set()
# try:
# produce()
# print("Produce ended")
# except KeyboardInterrupt:
# log("Producer cancelled")
# sys.exit(1)
# except Exception as ex:
# template = "An exception of type {0} occurred. Arguments:{1!r}"
# message = template.format(type(ex).__name__, ex.args)
# log("The producer has failed!!!")
# log(message)
# sys.exit(1)
pos_acked_file = f"producer-output/{topic}_pos_acked.txt"
neg_acked_file = f"producer-output/{topic}_neg_acked.txt"
try:
bootstrap_servers = get_broker_ips()
produce_with_java(topic, count, bootstrap_servers, pos_acked_file, neg_acked_file, dedup_enabled)
log("Produce ended")
except KeyboardInterrupt:
log("Producer cancelled")
sys.exit(1)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:{1!r}"
message = template.format(type(ex).__name__, ex.args)
log("The Java producer has failed!!!")
log(message)
sys.exit(1)
# - READ PHASE --------------------
if test_type == "kill-leader":
start_downed_broker()
time.sleep(10)
log("-------------------------------------------------")
log("READ PHASE")
log("-------------------------------------------------")
received_count = 0
try:
read()
except KeyboardInterrupt:
log("Reader cancelled")
sys.exit(1)
not_received = 0
received_no_ack = 0
msgs_with_dups = 0
received = 0
for msg_val, msg_ids in messages_sent.items():
received += len(msg_ids)
if len(msg_ids) == 0 and msg_val in messages_pos_acked:
not_received += 1
elif len(msg_ids) == 1 and msg_val not in messages_pos_acked:
received_no_ack += 1
elif len(msg_ids) > 1:
msgs_with_dups += 1
send_count = len(messages_sent)
ack_count = len(messages_pos_acked) + len(messages_neg_acked)
pos_ack_count = len(messages_pos_acked)
neg_ack_count = len(messages_neg_acked)
log("Results --------------------------------------------")
log(f"Final send count: {str(send_count)}")
log(f"Final ack count: {str(ack_count)}")
log(f"Final positive ack count: {str(pos_ack_count)}")
log(f"Final negative ack count: {str(neg_ack_count)}")
log(f"Messages received: {str(received)}")
log(f"Acked messages missing: {str(not_received)}")
log(f"Non-acked messages received: {str(received_no_ack)}")
log(f"Duplicates: {msgs_with_dups}")
log(f"Duplicate Jump Forward: {duplicate_jump_forward}")
log(f"Duplicate Jump Back: {duplicate_jump_back}")
log(f"Non-Duplicate Jump Forward: {jump_forward}")
log(f"Non-Duplicate Jump Back: {jump_back}")
log("----------------------------------------------------")
log(f"{dedup_enabled},{str(test_run)},{str(send_count)},{str(ack_count)},{str(pos_ack_count)},{str(neg_ack_count)},{str(received)},{str(not_received)},{str(received_no_ack)},{str(msgs_with_dups)},{str(duplicate_jump_forward)},{str(duplicate_jump_back)},{str(jump_forward)},{str(jump_back)}", True)
time.sleep(20)
test_run += 1
| 37.630233
| 305
| 0.596131
| 1,980
| 16,181
| 4.653535
| 0.167172
| 0.030389
| 0.017365
| 0.011721
| 0.474929
| 0.394834
| 0.348817
| 0.33232
| 0.297482
| 0.280117
| 0
| 0.013132
| 0.265867
| 16,181
| 430
| 306
| 37.630233
| 0.762522
| 0.219022
| 0
| 0.254545
| 0
| 0.025455
| 0.270966
| 0.108339
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047273
| false
| 0
| 0.032727
| 0
| 0.090909
| 0.021818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2a47a378106287329bd3e25e1d300fbd9312bc2
| 643
|
py
|
Python
|
apps/store/permissions.py
|
JimenezJC/cozy-exchange
|
131576e8159df8bab2ff680283ed55e66abaaa1d
|
[
"MIT"
] | null | null | null |
apps/store/permissions.py
|
JimenezJC/cozy-exchange
|
131576e8159df8bab2ff680283ed55e66abaaa1d
|
[
"MIT"
] | null | null | null |
apps/store/permissions.py
|
JimenezJC/cozy-exchange
|
131576e8159df8bab2ff680283ed55e66abaaa1d
|
[
"MIT"
] | null | null | null |
from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerOrReadOnly(BasePermission):
message = 'You must be the owner of this object'
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return obj.seller == request.user
class IsBuyerOrSeller(BasePermission):
message = 'You must either be the buyer or the seller of this listing'
def has_object_permission(self,request,view,obj):
if request.method in SAFE_METHODS:
return True
return (obj.seller == request.user) or obj.buyer == (request.user)
| 33.842105
| 74
| 0.709176
| 83
| 643
| 5.39759
| 0.457831
| 0.073661
| 0.107143
| 0.125
| 0.464286
| 0.464286
| 0.464286
| 0.464286
| 0.464286
| 0.464286
| 0
| 0
| 0.219285
| 643
| 18
| 75
| 35.722222
| 0.89243
| 0
| 0
| 0.461538
| 0
| 0
| 0.14619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.846154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2a9d8d15587245ae91d5e2b5d778ffa6fc78c2f
| 13,246
|
py
|
Python
|
sg_covid_impact/complexity.py
|
nestauk/sg_covid_impact
|
0d52e643280cc6b06611759d4464dec82949ae05
|
[
"MIT"
] | 2
|
2020-10-19T16:30:59.000Z
|
2021-03-17T13:11:50.000Z
|
sg_covid_impact/complexity.py
|
nestauk/sg_covid_impact
|
0d52e643280cc6b06611759d4464dec82949ae05
|
[
"MIT"
] | 67
|
2020-10-07T09:34:38.000Z
|
2021-04-06T08:46:49.000Z
|
sg_covid_impact/complexity.py
|
nestauk/sg_covid_impact
|
0d52e643280cc6b06611759d4464dec82949ae05
|
[
"MIT"
] | null | null | null |
import logging
import numpy as np
import pandas as pd
import scipy.stats as ss
from scipy.linalg import eig
from numba import jit
import sg_covid_impact
# from mi_scotland.utils.pandas import preview
logger = logging.getLogger(__name__)
np.seterr(all="raise") # Raise errors on floating point errors
def process_complexity(df, dataset, year, geo_type, cluster, PCI=False):
"""Calculate complexity variables aggregated over the columns.
Calculates: size, complexity index, complexity outlook index
Args:
df (pandas.DataFrame): Long dataframe
Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}`
year (str): Year
dataset (str): Name of dataset
geo_type (str): Type of regional geography
cluster (str): Name of cluster column to use to pivot on
PCI (bool, optional): If True, calculate product complexity by
transposing input
# TODO refactor outside of function
Returns:
pandas.DataFrame
"""
X = (
df.pipe(pivot_area_cluster, cluster).fillna(0)
# Transpose if PCI
.pipe(lambda x: x.T if PCI else x)
)
X.index.name = "cluster"
size = X.sum(1).to_frame("size")
complexity = (
X.pipe(create_lq, binary=True)
.pipe(calc_eci, sign_correction=X.sum(1))
.pipe(lambda x: x.rename(columns={"eci": "pci"}) if PCI else x)
)
outlook = X.pipe(complexity_outlook_index).to_frame("coi" if not PCI else "poi")
return (
size.join(complexity)
.join(outlook)
.assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster)
)
def _melt_keep_index(df, value_name="value"):
""" Fully melt a dataframe keeping index, setting new index as all but `value` """
id_vars = df.index.names
return (
df.reset_index()
.melt(id_vars=id_vars, value_name=value_name)
.set_index([*id_vars, df.columns.name])
)
def process_complexity_unit(df, dataset, year, geo_type, cluster):
"""Calculate unaggregated complexity analysis variables
Calculates: raw value, location quotient, RCA?, distance, opportunity outlook gain
Args:
df (pandas.DataFrame): Long dataframe
Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}`
year (str): Year
dataset (str): Name of dataset
geo_type (str): Type of regional geography
cluster (str): Name of cluster column to use to pivot on
Returns:
pandas.DataFrame
"""
X = df.pipe(pivot_area_cluster, cluster).fillna(0)
X.columns.name = "cluster"
# Index: year, location, cluster, geo_type
# value, LQ, RCA?, distance, OOG
value = X.pipe(_melt_keep_index, "value")
lq = X.pipe(create_lq).pipe(_melt_keep_index, "lq")
has_rca = (lq > 1).rename(columns={"lq": "has_rca"})
d = X.pipe(distance).pipe(_melt_keep_index, "distance")
omega = 1 - X.pipe(proximity_density).pipe(_melt_keep_index, "omega")
oog = opportunity_outlook_gain(X).pipe(_melt_keep_index, "oog")
return (
pd.concat([value, lq, has_rca, d, omega, oog], axis=1)
.assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster)
.pipe(preview)
)
@jit(nopython=True)
def _proximity_matrix(M):
""" `proximity_matrix` helper function """
n_c, n_p = M.shape
phi = np.empty((n_p, n_p), dtype=np.float64)
k = M.sum(0) # Ubiquity
for i in range(n_p):
Mci = M[:, i]
for j in range(n_p):
if j > i:
continue
Mcj = M[:, j]
m = max([k[i], k[j]])
if m == 0:
v = np.nan
else:
v = (Mci * Mcj).sum() / m
phi[i, j] = v
phi[j, i] = v
return phi
def proximity_matrix(X, threshold=1):
""" Calculates proximity matrix
Proximity between entries calculates the probability that given a revealed
comparative advantage (RCA) in entity `j`, a location also has a RCA in
entity `i`.
The same probability is calculated with `i` and `j` permuted, and the
minimum of the two probabilities is then taken.
.. math::
\\large{ \\phi_{ij} = \\min\\left\\{\\mathbb{P}(\\text{RCA}_i \\geq 1 |
\\text{RCA}_j \\geq 1), \\mathbb{P}(\\text{RCA}_j \\geq 1 |
\\text{RCA}_i \\geq 1)\\right\\} } \\\\
\\large{ \\phi_{ij} = \\frac{\\sum_c M_{ci} * M_{cj}}{\\max(k_i, k_j)} }
k = \\sum_i M_{i, j}
Args:
X (pandas.DataFrame): Activity matrix [m x n]
threshold (float, optional): Binarisation threshold for location quotient.
Returns:
pandas.DataFrame [n x n]
"""
M = create_lq(X, binary=True, threshold=threshold)
return pd.DataFrame(_proximity_matrix(M.values), index=M.columns, columns=M.columns)
def proximity_density(X, threshold=1):
"""Calculate proximity density
.. math:
\\omega_{ik} = \\frac{ \\sum_j M_{ij} \\phi_{jk}}{\\sum_j \\phi_{jk}}
Args:
X (pandas.DataFrame): Activity matrix [m x n]
threshold (float, optional): Binarisation threshold for location quotient.
Returns:
pandas.DataFrame [m x n]
"""
M = create_lq(X, binary=True, threshold=threshold)
phi = proximity_matrix(X, threshold)
return (M @ phi) / phi.sum(axis=0)
def distance(X, threshold=1):
"""Distance: 1 - proximity density w/ existing capabilities as NaN
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.DataFrame [locations x activites]
"""
M = create_lq(X, threshold, binary=True)
phi = proximity_matrix(X, threshold)
return (((1 - M) @ phi) / phi.sum(axis=1)) * M.applymap(
lambda x: np.nan if x == 1 else 1
)
def complexity_outlook_index(X, threshold=1):
"""Calculate economic complexity outlook index
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.Series [locations]
"""
M = create_lq(X, threshold, binary=True)
d = distance(X, threshold)
PCI = calc_eci(M.T, sign_correction=X.sum(0))
if PCI.shape[0] != M.shape[1]:
M = M.loc[:, PCI.index]
d = d.loc[:, PCI.index]
return ((1 - d) * (1 - M) * PCI.values.T).sum(axis=1)
def opportunity_outlook_gain(X, threshold=1):
"""Calculate opportunity outlook gain
Value for existing capabilities is NaN.
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.DataFrame [locations x activites]
"""
M = create_lq(X, threshold, binary=True)
phi = proximity_matrix(X, threshold)
d = distance(X, threshold)
PCI = calc_eci(M.T, sign_correction=X.sum(0))
if PCI.shape[0] != M.shape[1]:
M = M.loc[:, PCI.index]
phi = phi.loc[PCI.index, PCI.index]
d = d.loc[:, PCI.index]
return (
(1 - M) * PCI.values.T @ (phi / phi.sum(0)) - ((1 - d) * PCI.values.T)
) * M.applymap(lambda x: np.nan if x == 1 else 1)
def pivot_area_cluster(df, cluster, aggfunc=sum):
"""Convert long data into a matrix, pivoting on `cluster`
For example, take BRES/IDBR data at Local authority (LAD) geographic level
and SIC4 sectoral level to create matrix with elements representing the
activity level for a given LAD-SIC4 combination.
Args:
df (pandas.DataFrame): Long dataframe
Expected Columns: `{"geo_nm", "geo_cd", cluster}`
cluster (str): Column of the sector type to pivot on
agg_func (function, optional): Aggregation function passed to
`pandas.DataFrame.pivot_table`.
Returns:
pandas.DataFrame: [number areas x number cluster]
Note: Fills missing values with zero
"""
return (
df
# Fill missing values with zeros
.fillna(0)
# Pivot to [areas x sectors]
.pivot_table(
index=["geo_cd", "geo_nm"],
columns=cluster,
values="value",
fill_value=0,
aggfunc=aggfunc,
)
)
def create_lq(X, threshold=1, binary=False):
"""Calculate the location quotient.
Divides the share of activity in a location by the share of activity in
the UK total.
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
threshold (float, optional): Binarisation threshold.
binary (bool, optional): If True, binarise matrix at `threshold`.
and values are activity in a given sector at a location.
Returns:
pandas.DataFrame
#UTILS
"""
Xm = X.values
with np.errstate(invalid="ignore"): # Accounted for divide by zero
X = pd.DataFrame(
(Xm * Xm.sum()) / (Xm.sum(1)[:, np.newaxis] * Xm.sum(0)),
index=X.index,
columns=X.columns,
).fillna(0)
return (X > threshold).astype(float) if binary else X
def calc_fitness(X, n_iters):
"""Calculate the fitness metric of economic complexity
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
n_iters (int): Number of iterations to calculate fitness for
Returns:
pandas.DataFrame
#UTILS
"""
X = _drop_zero_rows_cols(X)
x = np.ones(X.shape[0])
for n in range(1, n_iters):
x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1)
x = x / x.mean()
return pd.DataFrame(np.log(x), index=X.index, columns=["fitness"])
def calc_fit_plus(X, n_iters, correction=True):
"""Calculate the fitness+ (ECI+) metric of economic complexity
Args:
X (pandas.Dataframe): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
n_iters (int): Number of iterations to calculate fitness for
correction (bool, optional): If true, apply logarithmic correction.
Returns:
pandas.Dataframe
#UTILS
"""
X = _drop_zero_rows_cols(X)
if X.dtypes[0] == bool:
norm_mean = np.mean
else:
norm_mean = ss.gmean
x = X.values.sum(axis=1)
x = x / norm_mean(x)
for n in range(1, n_iters):
x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1)
x = x / norm_mean(x)
if correction:
x = np.log(x) - np.log((X / X.sum(0)).sum(1))
else:
pass # x = np.log(x)
return pd.DataFrame(x, index=X.index, columns=["fit_p"])
def calc_eci(X, sign_correction=None):
"""Calculate the original economic complexity index (ECI).
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
sign_correction (pd.Series, optional): Array to correlate with ECI
to calculate sign correction. Typically, ubiquity. If None, uses
the sum over columns of the input data.
Returns:
pandas.DataFrame
#UTILS
"""
X = _drop_zero_rows_cols(X)
C = np.diag(1 / X.sum(1)) # Diagonal entries k_C
P = np.diag(1 / X.sum(0)) # Diagonal entries k_P
H = C @ X.values @ P @ X.T.values
w, v = eig(H, left=False, right=True)
eci = pd.DataFrame(v[:, 1].real, index=X.index, columns=["eci"])
# Positively correlate `sign_correction` (some proxy for diversity) w/ ECI
if sign_correction is None:
sign_correction = X.sum(1)
else:
sign_correction = sign_correction.loc[X.index]
sign = np.sign(np.corrcoef(sign_correction, eci.eci.values)[0, 1])
logger.info(f"CI sign: {sign}")
return (eci - eci.mean()) / eci.std() * sign
def _drop_zero_rows_cols(X):
"""Drop regions/entities with no activity
Fully zero column/row means ECI cannot be calculated
"""
nz_rows = X.sum(1) > 0
has_zero_rows = nz_rows.sum() != X.shape[0]
if has_zero_rows:
logger.warning(f"Dropping all zero rows: {X.loc[~nz_rows].index.values}")
X = X.loc[nz_rows]
nz_cols = X.sum(0) > 0
has_zero_cols = nz_cols.sum() != X.shape[1]
if has_zero_cols:
logger.warning(f"Dropping all zero cols: {X.loc[:, ~nz_cols].columns.values}")
X = X.loc[:, nz_cols]
return X
def simple_diversity(X):
"""Generate two simple measures of diversity
The first measure is the number of areas engaging in an activity
The second measure is the number of areas with a revealed comparative advantage
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
Returns:
pandas.DataFrame
#UTILS
"""
div_1 = X.pipe(lambda x: np.sum(x > 0, axis=1)).to_frame("div_n_active")
div_2 = (
X.pipe(create_lq, binary=True, threshold=1).sum(axis=1).to_frame("div_n_RCA")
)
return pd.concat([div_1, div_2], axis=1)
| 29.968326
| 88
| 0.616337
| 1,855
| 13,246
| 4.29434
| 0.173046
| 0.048958
| 0.033141
| 0.025107
| 0.429199
| 0.382375
| 0.34459
| 0.340949
| 0.340949
| 0.334923
| 0
| 0.008394
| 0.262494
| 13,246
| 441
| 89
| 30.036281
| 0.807043
| 0.448437
| 0
| 0.215909
| 0
| 0
| 0.039071
| 0.008297
| 0
| 0
| 0
| 0.002268
| 0
| 1
| 0.090909
| false
| 0.005682
| 0.039773
| 0
| 0.221591
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2aaa982479408d6fca2ceb47bf8d2f924d7e364
| 768
|
py
|
Python
|
Exercicios/Ex019.py
|
RenanRibeiroDaSilva/Meu-Aprendizado-Python
|
280bf2ad132ae0d26255e70b894fa7dbb69a5d01
|
[
"MIT"
] | 2
|
2021-05-21T23:17:44.000Z
|
2021-05-22T04:34:37.000Z
|
Exercicios/Ex019.py
|
RenanRibeiroDaSilva/Meu-Aprendizado-Python
|
280bf2ad132ae0d26255e70b894fa7dbb69a5d01
|
[
"MIT"
] | null | null | null |
Exercicios/Ex019.py
|
RenanRibeiroDaSilva/Meu-Aprendizado-Python
|
280bf2ad132ae0d26255e70b894fa7dbb69a5d01
|
[
"MIT"
] | null | null | null |
'''Ex 019 - Um professor quer sortear um dos seus quatro alunos para apagar o quadro.
Faça um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido.'''
print('-' * 15, '>Ex 19<', '-' * 15)
from random import choice
# Usando Random para sortiar o escolhido.
# Recebendo dados.
aluno1 = str(input('Digite o nome do aluno:'))
aluno2 = str(input('Digite o nome do aluno:'))
aluno3 = str(input('Digite o nome do aluno:'))
aluno4 = str(input('Digite o nome do aluno:'))
# Criando um array para escolher um entre os informados.
lista = [aluno1, aluno2, aluno3, aluno4]
# Usando choice para sortiar um dentro do array.
escolhido = choice(lista)
# Imprimindo dados na tela para o usuario.
print('O escolhido foi {}'. format(escolhido))
| 33.391304
| 100
| 0.716146
| 121
| 768
| 4.545455
| 0.479339
| 0.054545
| 0.063636
| 0.109091
| 0.189091
| 0.189091
| 0.189091
| 0
| 0
| 0
| 0
| 0.02673
| 0.171875
| 768
| 23
| 101
| 33.391304
| 0.83805
| 0.496094
| 0
| 0
| 0
| 0
| 0.314815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2afa0144857d385ec53c489e4695b2ff1d1fdcf
| 1,327
|
py
|
Python
|
alipay/aop/api/domain/AlipayOpenAuthUserauthTokenCreateModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayOpenAuthUserauthTokenCreateModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayOpenAuthUserauthTokenCreateModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenAuthUserauthTokenCreateModel(object):
def __init__(self):
self._scopes = None
self._user_id = None
@property
def scopes(self):
return self._scopes
@scopes.setter
def scopes(self, value):
self._scopes = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.scopes:
if hasattr(self.scopes, 'to_alipay_dict'):
params['scopes'] = self.scopes.to_alipay_dict()
else:
params['scopes'] = self.scopes
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenAuthUserauthTokenCreateModel()
if 'scopes' in d:
o.scopes = d['scopes']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| 23.696429
| 65
| 0.57046
| 159
| 1,327
| 4.528302
| 0.251572
| 0.125
| 0.097222
| 0.036111
| 0.188889
| 0.108333
| 0
| 0
| 0
| 0
| 0
| 0.001119
| 0.3263
| 1,327
| 55
| 66
| 24.127273
| 0.804251
| 0.03165
| 0
| 0.097561
| 0
| 0
| 0.062451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170732
| false
| 0
| 0.04878
| 0.04878
| 0.365854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2b463e3b92836e2fb5a6f0fa7a7587ea2477928
| 750
|
py
|
Python
|
advanced/image_processing/examples/plot_blur.py
|
rossbar/scipy-lecture-notes
|
7f74e6925721c43bd81bf0bee34b4805ac4a3b57
|
[
"CC-BY-4.0"
] | 2,538
|
2015-01-01T04:58:41.000Z
|
2022-03-31T21:06:05.000Z
|
advanced/image_processing/examples/plot_blur.py
|
rossbar/scipy-lecture-notes
|
7f74e6925721c43bd81bf0bee34b4805ac4a3b57
|
[
"CC-BY-4.0"
] | 362
|
2015-01-18T14:16:23.000Z
|
2021-11-18T16:24:34.000Z
|
advanced/image_processing/examples/plot_blur.py
|
rossbar/scipy-lecture-notes
|
7f74e6925721c43bd81bf0bee34b4805ac4a3b57
|
[
"CC-BY-4.0"
] | 1,127
|
2015-01-05T14:39:29.000Z
|
2022-03-25T08:38:39.000Z
|
"""
Blurring of images
===================
An example showing various processes that blur an image.
"""
import scipy.misc
from scipy import ndimage
import matplotlib.pyplot as plt
face = scipy.misc.face(gray=True)
blurred_face = ndimage.gaussian_filter(face, sigma=3)
very_blurred = ndimage.gaussian_filter(face, sigma=5)
local_mean = ndimage.uniform_filter(face, size=11)
plt.figure(figsize=(9, 3))
plt.subplot(131)
plt.imshow(blurred_face, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(132)
plt.imshow(very_blurred, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(133)
plt.imshow(local_mean, cmap=plt.cm.gray)
plt.axis('off')
plt.subplots_adjust(wspace=0, hspace=0., top=0.99, bottom=0.01,
left=0.01, right=0.99)
plt.show()
| 23.4375
| 63
| 0.716
| 122
| 750
| 4.319672
| 0.483607
| 0.056926
| 0.051233
| 0.074004
| 0.288425
| 0.174573
| 0.174573
| 0.174573
| 0.125237
| 0
| 0
| 0.043741
| 0.116
| 750
| 31
| 64
| 24.193548
| 0.751131
| 0.128
| 0
| 0.15
| 0
| 0
| 0.013932
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2b8cf5ed62085b93846cc634a5c0abe566a9d50
| 4,376
|
py
|
Python
|
smartsnippets_inherit/cms_plugins.py
|
pbs/django-cms-smartsnippets
|
61727dbdf44678ebd7df3fbeca8e7e190e364cc8
|
[
"BSD-3-Clause"
] | 5
|
2015-08-06T14:47:00.000Z
|
2021-02-17T19:18:27.000Z
|
smartsnippets_inherit/cms_plugins.py
|
pbs/django-cms-smartsnippets
|
61727dbdf44678ebd7df3fbeca8e7e190e364cc8
|
[
"BSD-3-Clause"
] | 11
|
2015-03-10T23:16:40.000Z
|
2018-07-01T22:44:55.000Z
|
smartsnippets_inherit/cms_plugins.py
|
pbs/django-cms-smartsnippets
|
61727dbdf44678ebd7df3fbeca8e7e190e364cc8
|
[
"BSD-3-Clause"
] | 5
|
2015-06-04T17:35:34.000Z
|
2018-02-08T15:43:59.000Z
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.plugins.utils import downcast_plugins
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from smartsnippets_inherit.models import InheritPageContent
from smartsnippets_inherit.forms import InheritPageForm
from smartsnippets_inherit.settings import USE_BOOTSTRAP_ACE
from smartsnippets.settings import inherit_variable_pattern
from smartsnippets.models import Variable, SmartSnippetPointer
from contextlib import contextmanager
from itertools import chain
@contextmanager
def current_page(request, page):
original_page = getattr(request, 'current_page', None)
try:
setattr(request, 'current_page', page)
yield
finally:
setattr(request, 'current_page', original_page)
class PageInheritPlugin(CMSPluginBase):
model = InheritPageContent
name = "Inherit Content from Page"
render_template = 'smartsnippets/plugin.html'
change_form_template = 'admin/smartsnippets_inherit/plugininherit_change_form.html'
admin_preview = False
form = InheritPageForm
page_only = True
def render_inherited(self, context, instance):
content = ''
if not instance.from_page.published:
return content
inherited = instance.get_placeholder()
if not inherited:
return content
# prepare variables to be passed to the context with different values
new_vars = {}
for overwrite_var in instance.overwrite_variables.all():
var = overwrite_var.to_variable()
context_var = inherit_variable_pattern.format(identifier=var.pk)
new_vars[context_var] = var.formatted_value
with current_page(context.get('request'), instance.from_page):
# inject new variables in context
# so that snippet plugin render can pick them up
context.update({name: value for name, value in new_vars.items()})
# render plugins from the inherited section
# with the updated context
content = inherited.render(context, None)
# remove overwritten data from context
for name in new_vars.keys():
if name in context:
del context[name]
return content
def render(self, context, instance, placeholder):
context.update({'content': self.render_inherited(context, instance)})
return context
def get_form(self, request, obj=None, **kwargs):
formCls = super(PageInheritPlugin, self).get_form(
request, obj, **kwargs)
formCls.current_page = self.cms_plugin_instance.page or self.page
formCls.use_ace_theme = USE_BOOTSTRAP_ACE
return formCls
def change_view(self, request, object_id, *args, **kwargs):
extra_context = kwargs.get('extra_context', None) or {}
try:
plugin = InheritPageContent.objects.get(id=object_id)
placeholder = plugin.get_placeholder()
extra_context.update({
'snippet_plugins': self.get_inherited_snippets(placeholder)
})
except (InheritPageContent.DoesNotExist, ):
pass
kwargs['extra_context'] = extra_context
return super(PageInheritPlugin, self).change_view(
request, object_id, *args, **kwargs)
def get_inherited_snippets(self, placeholder):
if not placeholder or not placeholder.page:
return []
def can_be_overwritten(plg):
return (
plg.__class__ is SmartSnippetPointer and
plg.variables.exists()
)
page = placeholder.page
slot = placeholder.slot
pages = chain([page], page.get_cached_ancestors(ascending=True))
for ancestor in pages:
placeholder = ancestor.placeholders.filter(slot=slot)[:1]
if not placeholder:
continue
placeholder = placeholder[0]
plugins = downcast_plugins(placeholder.get_plugins())
if not plugins:
continue
return sorted(
filter(can_be_overwritten, plugins),
key=lambda plg: plg.position,
)
return []
plugin_pool.register_plugin(PageInheritPlugin)
| 37.084746
| 87
| 0.662934
| 469
| 4,376
| 6.008529
| 0.30064
| 0.023421
| 0.02555
| 0.017743
| 0.017743
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000622
| 0.265539
| 4,376
| 117
| 88
| 37.401709
| 0.876167
| 0.058044
| 0
| 0.095745
| 0
| 0
| 0.04836
| 0.02017
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074468
| false
| 0.010638
| 0.12766
| 0.010638
| 0.393617
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2bbc6212ba14cce222e1171cae69fdb2905ea98
| 727
|
py
|
Python
|
uploadHelpers.py
|
BNUZ-China/iGem-Wiki
|
18216737bbd1d5316e5302ff7202a9fa139ad033
|
[
"MIT"
] | 1
|
2021-08-28T15:06:10.000Z
|
2021-08-28T15:06:10.000Z
|
uploadHelpers.py
|
BNUZ-China/iGem-Wiki
|
18216737bbd1d5316e5302ff7202a9fa139ad033
|
[
"MIT"
] | null | null | null |
uploadHelpers.py
|
BNUZ-China/iGem-Wiki
|
18216737bbd1d5316e5302ff7202a9fa139ad033
|
[
"MIT"
] | null | null | null |
import os
from subprocess import run
import pyperclip
import webbrowser
from urllib import parse
location = 'production'
def runOnSingleFolder(folder):
file_list = os.listdir(os.path.join(location, folder))
for file in file_list:
file_noextend = file[:-(len(folder) + 1)]
url = f'https://2021.igem.org/wiki/index.php?title=Template:BNUZ-China/{folder}/{parse.quote(file_noextend)}&action=edit'
webbrowser.open(url)
print(url)
with open(os.path.join(location, folder, file), encoding='utf-8') as f:
content = f.read()
pyperclip.copy(content)
print('相应js代码已经复制,请粘贴至打开的网页,完成后请回车')
input()
runOnSingleFolder('js')
runOnSingleFolder('css')
| 29.08
| 129
| 0.672627
| 92
| 727
| 5.271739
| 0.608696
| 0.041237
| 0.041237
| 0.074227
| 0.098969
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010292
| 0.198074
| 727
| 24
| 130
| 30.291667
| 0.821612
| 0
| 0
| 0
| 0
| 0.05
| 0.218707
| 0.037139
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.25
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2bd92ea5b65d1f42b8e2aa98a412fc4debb102e
| 1,180
|
py
|
Python
|
Snake.py
|
ZippyCodeYT/Zippy_Codes
|
91101085194ba2f30c74a82639b4730d52bb76dc
|
[
"CC-BY-4.0"
] | 64
|
2021-07-11T17:56:42.000Z
|
2022-03-28T14:17:53.000Z
|
Snake.py
|
ZippyCodeYT/Zippy_Codes
|
91101085194ba2f30c74a82639b4730d52bb76dc
|
[
"CC-BY-4.0"
] | 9
|
2021-07-10T23:26:39.000Z
|
2022-03-04T17:39:57.000Z
|
Snake.py
|
ZippyCodeYT/Ursina_Codes
|
91101085194ba2f30c74a82639b4730d52bb76dc
|
[
"CC-BY-4.0"
] | 57
|
2021-07-14T17:09:46.000Z
|
2022-03-31T08:55:51.000Z
|
from ursina import *
app = Ursina()
snake = Entity(model='cube', texture = 'assets\snake', scale=0.4, z=-1, collider='box')
ground = Entity(model='cube', texture='grass',rotation=(90,0,0),scale=(5,1,5), z=1)
apple = Entity(model='cube', texture='assets\\apple', scale=0.4, position=(1,-1,-1), collider='mesh')
body = [Entity(model='cube', scale =0.2, texture='assets\\body') for i in range(14)]
camera.orthographic = True
camera.fov = 8
from random import randint
dx = dy = 0
def update():
info = snake.intersects()
if info.hit:
apple.x = randint(-4,4)/2
apple.y = randint(-4,4)/2
new = Entity(model='cube', z = -1, scale=0.2, texture='assets\\body')
body.append(new)
for i in range(len(body)-1,0,-1):
pos = body[i-1].position
body[i].position = pos
body[0].x = snake.x
body[0].y = snake.y
snake.x += time.dt * dx
snake.y += time.dt * dy
def input(key):
global dx,dy
for x,y,z in zip(['d','a'],[2,-2],[270,90]):
if key==x:
snake.rotation_z = z
dx = y
dy = 0
for x,y,z in zip(['w','s'],[2,-2],[180,0]):
if key == x:
snake.rotation_z = z
dy = y
dx = 0
app.run()
| 16.857143
| 101
| 0.572881
| 206
| 1,180
| 3.271845
| 0.320388
| 0.081602
| 0.111276
| 0.097923
| 0.249258
| 0.166172
| 0.062315
| 0
| 0
| 0
| 0
| 0.056399
| 0.218644
| 1,180
| 69
| 102
| 17.101449
| 0.67462
| 0
| 0
| 0.108108
| 0
| 0
| 0.073721
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2c31ca71ec1d801042e3c41eac4e04e937da0de
| 11,186
|
py
|
Python
|
instance_selection/_DROP3.py
|
dpr1005/Semisupervised-learning-and-instance-selection-methods
|
646d9e729c85322e859928e71a3241f2aec6d93d
|
[
"MIT"
] | 3
|
2021-12-10T09:04:18.000Z
|
2022-01-22T15:03:19.000Z
|
instance_selection/_DROP3.py
|
dpr1005/Semisupervised-learning-and-instance-selection-methods
|
646d9e729c85322e859928e71a3241f2aec6d93d
|
[
"MIT"
] | 107
|
2021-12-02T07:43:11.000Z
|
2022-03-31T11:02:46.000Z
|
instance_selection/_DROP3.py
|
dpr1005/Semisupervised-learning-and-instance-selection-methods
|
646d9e729c85322e859928e71a3241f2aec6d93d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: DROP3.py
# @Author: Daniel Puente Ramírez
# @Time: 31/12/21 16:00
# @Version: 5.0
import copy
from sys import maxsize
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from .utils import transform
class DROP3:
"""
Wilson, D. R., & Martinez, T. R. (2000). Reduction techniques for
instance-based learning algorithms. Machine learning, 38(3), 257-286.
Parameters
----------
nearest_neighbors : int, default=3
Number to use as nearest neighbors when computing distances.
power_parameter : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance (l2)
for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
"""
def __init__(self, nearest_neighbors=3, power_parameter=2):
"""
The function takes in two parameters, nearest_neighbors and
power_parameter, and assigns them to the attributes nearest_neighbors
and power_parameter
:param nearest_neighbors: The number of nearest neighbors to use when
calculating the weights, defaults to 3 (optional)
:param power_parameter: This is the exponent that is used to calculate
the weights, defaults to 2 (optional)
"""
self.nearest_neighbors = nearest_neighbors
self.power_parameter = power_parameter
self.x_attr = None
def filter(self, samples, y):
"""
Implementation of DROP3.
The Decremental Reduction Optimization Procedure (DROP) algorithms base
their selection rule in terms of the partner and associate concept.
At the very beginning a Wilson Editing algorithm is performed in order
to remove any noise that may ve contained in the data. Followed by
the DROP algorithm, in which an instance will be removed is its
associates are correctly classified without the instance.
:param samples: DataFrame.
:param y: DataFrame.
:return: the input dataset with the remaining samples.
"""
(
initial_distances,
initial_samples,
initial_targets,
knn,
samples_info,
) = self._create_variables(samples, y)
self._find_associates(
initial_distances, initial_samples, initial_targets, knn, samples_info
)
initial_distances.sort(key=lambda x: x[2], reverse=True)
removed = 0
size = len(initial_distances)
for index_x in range(size):
x_sample = initial_distances[index_x - removed][0]
with_, without = self._with_without(tuple(x_sample), samples_info)
if without >= with_:
initial_distances = (
initial_distances[: index_x - removed]
+ initial_distances[index_x - removed + 1:]
)
removed += 1
for a_associate_of_x in samples_info[(tuple(x_sample))][1]:
a_neighs, remaining_samples = self._remove_from_neighs(
a_associate_of_x, initial_distances, samples_info, x_sample
)
knn = NearestNeighbors(
n_neighbors=self.nearest_neighbors + 2,
n_jobs=1,
p=self.power_parameter,
)
knn.fit(remaining_samples)
_, neigh_ind = knn.kneighbors([a_associate_of_x])
possible_neighs = [initial_distances[x][0]
for x in neigh_ind[0]]
self._find_new_neighs(
a_associate_of_x, a_neighs, possible_neighs, samples_info
)
new_neigh = a_neighs[-1]
samples_info[tuple(new_neigh)][1].append(a_associate_of_x)
samples = pd.DataFrame(
[x for x, _, _ in initial_distances], columns=self.x_attr
)
y = pd.DataFrame([x for _, x, _ in initial_distances])
return samples, y
def _create_variables(self, samples, y):
"""
> It takes in the samples and targets, and returns the initial
distances, samples, targets, knn, and samples_info
:param samples: the data
:param y: the target variable
:return: initial_distances, initial_samples, initial_targets, knn,
samples_info
"""
self.x_attr = samples.keys()
samples = transform(samples, y)
s = copy.deepcopy(samples)
initial_samples = s["data"]
initial_targets = s["target"]
initial_samples, samples_index = np.unique(
ar=initial_samples, return_index=True, axis=0
)
initial_targets = initial_targets[samples_index]
knn = NearestNeighbors(
n_neighbors=self.nearest_neighbors + 2, n_jobs=1, p=self.power_parameter
)
knn.fit(initial_samples)
samples_info = {
tuple(x): [[], [], y] for x, y in zip(initial_samples, initial_targets)
}
initial_distances = []
return initial_distances, initial_samples, initial_targets, knn, samples_info
@staticmethod
def _find_new_neighs(a_associate_of_x, a_neighs, possible_neighs, samples_info):
"""
> The function takes a sample, finds its neighbors, and then checks if
any of the neighbors are not already in the list of neighbors. If
they are not, then they are added to the list of neighbors
:param a_associate_of_x: the sample we are looking for neighbors for
:param a_neighs: the list of neighbors of a_associate_of_x
:param possible_neighs: a list of all the possible neighbors of a given
point
:param samples_info: a dictionary with the following structure:
"""
for pos_neigh in possible_neighs[1:]:
was_in = False
for old_neigh in a_neighs:
if np.array_equal(old_neigh, pos_neigh):
was_in = True
break
if not was_in:
a_neighs.append(pos_neigh)
break
samples_info[tuple(a_associate_of_x)][0] = a_neighs
@staticmethod
def _remove_from_neighs(
a_associate_of_x, initial_distances, samples_info, x_sample
):
"""
> It removes the sample `x_sample` from the list of neighbors of
`a_associate_of_x` and returns the updated list of neighbors of
`a_associate_of_x` and the updated list of remaining samples
:param a_associate_of_x: the sample that is the associate of x
:param initial_distances: a list of tuples of the form (sample,
distance, associate)
:param samples_info: a dictionary of the form
{(x,y):[neighs,distances,associate]}
:param x_sample: the sample we want to find the nearest neighbor for
:return: the new list of neighbors of a_associate_of_x, and the list of
remaining samples.
"""
a_neighs = samples_info[tuple(a_associate_of_x)][0]
index_to_use = 0
for index_a, neigh in enumerate(a_neighs):
index_to_use = index_a
if np.array_equal(neigh, x_sample):
break
a_neighs = a_neighs[:index_to_use] + a_neighs[index_to_use + 1:]
remaining_samples = [x for x, _, _ in initial_distances]
return a_neighs, remaining_samples
@staticmethod
def _find_associates(
initial_distances, initial_samples, initial_targets, knn, samples_info
):
"""
For each sample in the initial set, find the closest sample from the
other class and store it in the initial_distances list
:param initial_distances: a list of lists, each list containing a
sample, its target, and its distance to the nearest sample of a
different class
:param initial_samples: the samples that we want to find the nearest
neighbors for
:param initial_targets: the labels of the initial samples
:param knn: the k-nearest neighbors model
:param samples_info: a dictionary that stores the neighbors of each
sample and the samples that are neighbors of each sample
"""
for x_sample, x_target in zip(initial_samples, initial_targets):
min_distance = maxsize
for y_sample, y_label in zip(initial_samples, initial_targets):
if x_target != y_label:
xy_distance = np.linalg.norm(x_sample - y_sample)
if xy_distance < min_distance:
min_distance = xy_distance
initial_distances.append([x_sample, x_target, min_distance])
_, neigh_ind = knn.kneighbors([x_sample])
x_neighs = [initial_samples[x] for x in neigh_ind[0][1:]]
samples_info[tuple(x_sample)][0] = x_neighs
for neigh in x_neighs[:-1]:
samples_info[tuple(neigh)][1].append(x_sample)
@staticmethod
def _with_without(x_sample, samples_info):
"""
For each sample in the dataset, we find its associates and then for each
associate, we find its neighbors. We then find the class with the most
number of neighbors and compare it with the class of the associate. If
they are the same, we increment the `with_` variable. If they are not
the same, we increment the `without` variable
:param x_sample: the sample we're looking at
:param samples_info: a dictionary of the form {(x,y):[neighbors,
associates, target]}
:return: The number of times the target class of the sample is the most
common class among its neighbors, with and without the sample itself.
"""
index_a = 0
with_ = 0
without = 0
x_associates = samples_info[x_sample][1]
associates_targets = [samples_info[tuple(x)][2] for x in x_associates]
associates_neighs = [samples_info[tuple(x)][0] for x in x_associates]
for _, a_target, a_neighs in zip(
x_associates, associates_targets, associates_neighs
):
neighs_targets = np.ravel(
np.array([samples_info[tuple(x)][2] for x in a_neighs])
).astype(int)
neighs_targets = neighs_targets.tolist()
count = np.bincount(neighs_targets[:-1])
max_class = np.where(count == np.amax(count))[0][0]
if max_class == a_target:
with_ += 1
for index_a, neigh in enumerate(a_neighs):
if np.array_equal(neigh, x_sample):
break
count = np.bincount(
neighs_targets[:index_a] + neighs_targets[index_a + 1:]
)
max_class = np.where(count == np.amax(count))[0][0]
if max_class == a_target:
without += 1
return with_, without
| 39.111888
| 85
| 0.614339
| 1,421
| 11,186
| 4.616467
| 0.187192
| 0.045274
| 0.029268
| 0.029726
| 0.347713
| 0.273323
| 0.228506
| 0.207774
| 0.161738
| 0.15122
| 0
| 0.010308
| 0.314858
| 11,186
| 285
| 86
| 39.249123
| 0.845642
| 0.361345
| 0
| 0.170068
| 0
| 0
| 0.001544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.040816
| 0
| 0.122449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2c78be72ea72b242adb4ca29ed829fd6b4d5b20
| 1,445
|
py
|
Python
|
set4/challenge27.py
|
solfer/cryptopals_python
|
6b22981a663b3dd2ef5fb5c30b1a6dc13eb0af1a
|
[
"MIT"
] | null | null | null |
set4/challenge27.py
|
solfer/cryptopals_python
|
6b22981a663b3dd2ef5fb5c30b1a6dc13eb0af1a
|
[
"MIT"
] | null | null | null |
set4/challenge27.py
|
solfer/cryptopals_python
|
6b22981a663b3dd2ef5fb5c30b1a6dc13eb0af1a
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
from Crypto.Cipher import AES
from random import randint
# https://www.cryptopals.com/sets/4/challenges/27
# Recover the key from CBC with IV=Key
import sys
sys.path.append('..')
from cryptopals import ctr,xor,random_aes_key,cbc_decrypt,cbc_encrypt
def random_aes_key(blocksize=16):
return random_str(blocksize,blocksize)
def detect_high_ascii(text):
for c in text:
if c >= 0x80:
return True
return False
def f1(plaintext):
global key
aes_ecb = AES.new(key, AES.MODE_ECB)
return cbc_encrypt(aes_ecb,plaintext,IV)
def f2(ciphertext):
global key
aes_ecb = AES.new(key, AES.MODE_ECB)
plaintext = cbc_decrypt(aes_ecb,ciphertext,IV)
if detect_high_ascii(plaintext):
return plaintext
else:
return False
def blockfy(data, blocklen=16):
return [data[i:i+blocklen] for i in range(0,len(data),blocklen)]
def main():
blocksize = 16
global key
global IV
#key = random_aes_key(blocksize)
key = "YELLOW SUBMARINE"
IV = bytearray(key,"ascii")
INPUT = bytearray("A"*32,"ascii")
ciphertext = f1(INPUT)
temp = blockfy(ciphertext)
x = temp[0]
x.extend(bytearray(16))
x.extend(temp[0])
#x = temp[0] + "\x00"*16 + temp[0]
r = f2(x)
if r:
error = r
else:
print ("Bad luck!")
exit()
p = blockfy(error)
k = xor(p[0],p[2])
print (k)
main()
| 21.567164
| 69
| 0.632526
| 213
| 1,445
| 4.192488
| 0.389671
| 0.026876
| 0.040314
| 0.047032
| 0.076148
| 0.076148
| 0.076148
| 0.076148
| 0.076148
| 0.076148
| 0
| 0.029331
| 0.244983
| 1,445
| 66
| 70
| 21.893939
| 0.789184
| 0.115571
| 0
| 0.1875
| 0
| 0
| 0.029851
| 0
| 0
| 0
| 0.003142
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.083333
| 0.041667
| 0.354167
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2c80dfda0a5984d9ce2a209c4604c7a22beaa47
| 577
|
wsgi
|
Python
|
testproject/testproject.wsgi
|
c4mb0t/django-setman
|
6551e3f6367bf8ee7c8f91e893c9e8439428f28a
|
[
"BSD-3-Clause"
] | 1
|
2015-05-30T15:05:14.000Z
|
2015-05-30T15:05:14.000Z
|
testproject/testproject.wsgi
|
c4mb0t/django-setman
|
6551e3f6367bf8ee7c8f91e893c9e8439428f28a
|
[
"BSD-3-Clause"
] | null | null | null |
testproject/testproject.wsgi
|
c4mb0t/django-setman
|
6551e3f6367bf8ee7c8f91e893c9e8439428f28a
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
DIRNAME = os.path.abspath(os.path.dirname(__file__))
rel = lambda *x: os.path.abspath(os.path.join(DIRNAME, *x))
PROJECT_DIR = rel('..')
activate_this = rel('env', 'bin', 'activate_this.py')
# Activate virtualenv
execfile(activate_this, {'__file__': activate_this})
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
os.environ['PYTHON_EGG_CACHE'] = '/srv/python_eggs/'
# Need to add upper-level dir to syspath to reproduce dev Django environ
sys.path.append(PROJECT_DIR)
from django.core.handlers.wsgi import WSGIHandler
application = WSGIHandler()
| 26.227273
| 72
| 0.753899
| 83
| 577
| 5.012048
| 0.53012
| 0.057692
| 0.0625
| 0.072115
| 0.091346
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105719
| 577
| 21
| 73
| 27.47619
| 0.806202
| 0.155979
| 0
| 0
| 0
| 0
| 0.196281
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2caaf55603ef2c7129fc78578663a36d8c83697
| 8,057
|
py
|
Python
|
ntp/modules/generate.py
|
Michiel29/ntp-release
|
567bf1ca823eeef5eeb2d63bbe16023ea63af766
|
[
"Apache-2.0"
] | 3
|
2019-07-03T11:25:12.000Z
|
2019-11-28T20:24:03.000Z
|
ntp/modules/generate.py
|
Michiel29/ntp-release
|
567bf1ca823eeef5eeb2d63bbe16023ea63af766
|
[
"Apache-2.0"
] | null | null | null |
ntp/modules/generate.py
|
Michiel29/ntp-release
|
567bf1ca823eeef5eeb2d63bbe16023ea63af766
|
[
"Apache-2.0"
] | null | null | null |
"""Functions for generating random data with injected relationships"""
from itertools import product
import os
import json
import re
import random
import numpy as np
from numpy import random as rd
from scipy.special import comb
from ntp.util.util_kb import load_from_list
def gen_relationships(n_pred, n_rel, body_predicates=1):
"""
Generates random relationships between predicates of the form goal predicate <-- {set of body predicates}.
Goal predicates have a higher number than body predicates.
Args:
n_pred: number of total predicates
n_rel: number of relationships
body_predicates: number of body predicates for each relationship
Returns:
Dict, entries where keys are goal predicates and values are list of body predicates
"""
relationship_dict = {}
n_rel_possible = comb(n_pred, body_predicates + 1)
pred_probs = [comb(i, body_predicates)/n_rel_possible for i in range(n_pred)]
relationship_head_array = list(rd.choice(n_pred, size=n_rel, replace=False, p=pred_probs))
relationship_body_array = [set(rd.choice(range(relationship_head_array[i]), size=body_predicates, replace=False)) for i in range(len(relationship_head_array))]
for i in range(n_rel):
relationship_dict[relationship_head_array[i]] = relationship_body_array[i]
return relationship_dict
def gen_simple(n_pred, relationship_dict, p_normal, p_relationship, n_constants, order=1):
"""
Generates random truth values for predicates for a set number of constants, and given some relationships
Args:
n_pred: number of total predicates
relationship_dict: Dict of relationships
p_normal: probability of predicate truth given no relationship/relationship body not true
p_relationship: probability of goal predicate truth given body predicate truth
n_constants: number of constants
order: order of predicate (unary, binary)
Returns:
Numpy array where value j, i corresponds to the truth value of predicate i for constant j
"""
# Checks whether body predicates for a particular relationship hold for a particular constant
def body_holds(data, body_predicates, constant):
holds = True
for predicate in body_predicates:
if data[index + (predicate,)] != 1:
holds = False
break
return holds
data = np.zeros([n_constants] * order + [n_pred])
for predicate in range(n_pred):
for index in product(*[range(n_constants) for i in range(order)]):
if predicate in relationship_dict:
if body_holds(data, relationship_dict[predicate], index):
data[index + (predicate,)] = rd.binomial(1, p_relationship)
continue
# Set variable normally if predicate from relationship doesn't hold
data[index + (predicate,)] = rd.binomial(1, p_normal)
return data
def write_data(data):
"""Convert numpy array of data into list of strings that the ntp algorithm can read"""
shape = np.shape(data)
text_list = []
for pred in range(shape[-1]):
for index in product(*[range(dim_size) for dim_size in shape[:-1]]):
if data[index + (pred,)] == 1:
write_string = "Predicate" + str(pred) + "("
for const in index:
write_string += "Constant" + str(const) + ","
write_string = write_string[:-1] + ").\n"
text_list.append(write_string)
return text_list
def write_relationships(relationships, path):
"""write relationship dict to file"""
with open(path, "w") as f:
json.dump(relationships, f)
return
def write_simple_templates(n_rules, body_predicates=1, order=1):
"""Generate rule template of form C < A ^ B of varying size and order"""
text_list = []
const_term = "("
for i in range(order):
const_term += chr(ord('X') + i) + ","
const_term = const_term[:-1] + ")"
write_string = "{0} #1{1} :- #2{1}".format(n_rules, const_term)
if body_predicates > 1:
for i in range(body_predicates - 1):
write_string += ", #" + str(i + 3) + const_term
text_list.append(write_string)
return text_list
def gen_transitivity(n_preds, n_rules, n_constants, p_base, max_iterations=1):
"""Generate data with transitivity relationships, and also rule templates"""
# active predicate is predicate 0 WLOG
active_values = np.random.binomial(1, p_base, size=[n_constants, n_constants])
edges = [(i, j) for i in range(n_constants) for j in range(n_constants) if active_values[i, j] == 1]
closure = set(edges)
while True:
new_edges = set((x,w) for x,y in closure for q,w in closure if q == y)
closure_until_now = closure | new_edges
if closure_until_now == closure:
break
closure = closure_until_now
edges = list(closure)
active_values[tuple(np.transpose(edges))] = 1
values = np.random.binomial(1, p_base, size=[n_constants, n_constants, n_preds])
values[:, :, 0] = active_values
fact_list = write_data(values)
template = "{0} #1(X, Z) :- #1(X, Y), #1(Y, Z).".format(n_rules)
return fact_list, template
def text_to_id(fact):
"""Given a fact in text form, convert to predicate and constant numbers"""
reduced = re.sub("[^0-9\(,]", '', fact)
reduced_split = tuple(re.split("[\(,]", reduced))
predicate = int(reduced_split[0])
constants = tuple([int(constant_text) for constant_text in reduced_split[1:]])
return predicate, constants
def gen_constant_dict(train_list):
"""Convert list of facts in text form to a dictionary of predicate truth values by constant"""
constant_dict = {}
for fact in train_list:
predicate, constants = text_to_id(fact)
if not constants in constant_dict:
constant_dict[constants] = set([predicate])
else:
constant_dict[constants].add(predicate)
return constant_dict
def test_fact_active(constant_dict, constants, predicate, relationships):
"""Given relationships, determine whether the truth value of a fact could be predicted by a relationship"""
if predicate in relationships:
if all(body_pred in constant_dict[constants] for body_pred in relationships[predicate]):
return True
return False
def count_active(constant_dict, relationships):
"""Given relationships and a dataset of constants, determine for how many facts the truth value could be predicted by a relationship"""
active_facts = 0
for constants, predicates in constant_dict.items():
for predicate in relationships:
if predicate in predicates and all(body_pred in predicates for body_pred in relationships[predicate]):
active_facts += 1
return active_facts
def gen_test_kb(train_list, n_test, test_active_only=False, relationships=None):
"""Given a list of facts, choose some facts to be split off to a test dataset in such a way that there is at least one training fact left for each constant"""
constant_dict = gen_constant_dict(train_list)
random.shuffle(train_list)
constant_set = set()
new_train_list = []
test_list = []
for fact in train_list:
predicate, constants = text_to_id(fact)
if test_active_only:
if test_fact_active(constant_dict, constants, predicate, relationships) and len(test_list) < n_test:
test_list.append(fact)
continue
else:
if all(constant in constant_set for constant in constants) and len(test_list) < n_test:
test_list.append(fact)
continue
else:
for constant in constants:
constant_set.add(constant)
new_train_list.append(fact)
train_list = new_train_list
test_kb = load_from_list(test_list)
return test_kb, train_list
| 37.129032
| 163
| 0.666749
| 1,099
| 8,057
| 4.707916
| 0.183803
| 0.040588
| 0.008118
| 0.014882
| 0.177039
| 0.146115
| 0.120603
| 0.096637
| 0.074604
| 0.058369
| 0
| 0.006255
| 0.245997
| 8,057
| 216
| 164
| 37.300926
| 0.845432
| 0.251582
| 0
| 0.153846
| 0
| 0.007692
| 0.016672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092308
| false
| 0
| 0.069231
| 0
| 0.261538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2cabc8b7c10f234c2f764e400a0eb0ee368ade4
| 1,116
|
py
|
Python
|
accounts/tests/test_account_views.py
|
borzecki/django-paymate
|
960e1dcce2682e57374663d87e47c5cff0c7aae4
|
[
"MIT"
] | null | null | null |
accounts/tests/test_account_views.py
|
borzecki/django-paymate
|
960e1dcce2682e57374663d87e47c5cff0c7aae4
|
[
"MIT"
] | null | null | null |
accounts/tests/test_account_views.py
|
borzecki/django-paymate
|
960e1dcce2682e57374663d87e47c5cff0c7aae4
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from accounts.models import Account
from accounts.serializers import AccountSerializer
from .utils import create_accounts
class AccountViewsTests(APITestCase):
def test_create_account(self):
"""
Ensure we can create a new account object.
"""
url = reverse('accounts-list')
data = {'name': 'Test'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Account.objects.count(), 1)
self.assertEqual(Account.objects.get().name, 'Test')
def test_account_list(self):
"""
Ensure GET endpoint is returning all serialized accounts.
"""
create_accounts(10)
url = reverse('accounts-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, AccountSerializer(Account.objects.all(), many=True).data)
| 32.823529
| 97
| 0.689964
| 131
| 1,116
| 5.770992
| 0.419847
| 0.099206
| 0.09127
| 0.058201
| 0.140212
| 0.140212
| 0.140212
| 0.140212
| 0.140212
| 0
| 0
| 0.010181
| 0.207885
| 1,116
| 33
| 98
| 33.818182
| 0.845023
| 0.089606
| 0
| 0.1
| 0
| 0
| 0.047472
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2cb04716bb5f1c7ce9e0998301f2ac347c3c6dd
| 202
|
py
|
Python
|
CTF/Pico2017/level_two/forensics/little_school_bus/solve.py
|
RegaledSeer/netsecnoobie
|
d3366937ec8c67a9742f61e47698239ae693af49
|
[
"MIT"
] | null | null | null |
CTF/Pico2017/level_two/forensics/little_school_bus/solve.py
|
RegaledSeer/netsecnoobie
|
d3366937ec8c67a9742f61e47698239ae693af49
|
[
"MIT"
] | null | null | null |
CTF/Pico2017/level_two/forensics/little_school_bus/solve.py
|
RegaledSeer/netsecnoobie
|
d3366937ec8c67a9742f61e47698239ae693af49
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
FILE_PATH = "./littleschoolbus.bmp"
with open(FILE_PATH,"rb") as f:
bytes = bytearray(f.read())
result = ""
for byte in bytes[54:]:
result += str(byte & 1)
print(result)
| 14.428571
| 35
| 0.633663
| 30
| 202
| 4.2
| 0.766667
| 0.126984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024242
| 0.183168
| 202
| 13
| 36
| 15.538462
| 0.739394
| 0.084158
| 0
| 0
| 0
| 0
| 0.125
| 0.11413
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2ce62208e5d0f3a5f97c461255fe7d85b8afbee
| 13,528
|
py
|
Python
|
custom_utils/crop4patches.py
|
ziming-liu/ObjectDet
|
6e25fa784114b9773b052d9d5465aa6fed93468a
|
[
"Apache-2.0"
] | null | null | null |
custom_utils/crop4patches.py
|
ziming-liu/ObjectDet
|
6e25fa784114b9773b052d9d5465aa6fed93468a
|
[
"Apache-2.0"
] | null | null | null |
custom_utils/crop4patches.py
|
ziming-liu/ObjectDet
|
6e25fa784114b9773b052d9d5465aa6fed93468a
|
[
"Apache-2.0"
] | null | null | null |
import numpy
import os
import json
import cv2
import csv
import os.path as osp
import mmcv
import numpy as np
def isgood(w,h):
if w<2 or h<2:
return False
if w /h >10.0 or h/w >10.0:
return False
return True
def bbox_iou(box1, box2):
b1_x1, b1_y1, b1_x2, b1_y2 = box1
b2_x1, b2_y1, b2_x2, b2_y2 = box2
#get the corrdinates of the intersection rectangle
inter_rect_x1 = max(b1_x1, b2_x1)
inter_rect_y1 = max(b1_y1, b2_y1)
inter_rect_x2 = min(b1_x2, b2_x2)
inter_rect_y2 = min(b1_y2, b2_y2)
#Intersection area
inter_width = inter_rect_x2 - inter_rect_x1 + 1
inter_height = inter_rect_y2 - inter_rect_y1 + 1
if inter_width > 0 and inter_height > 0:#strong condition
inter_area = inter_width * inter_height
#Union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
else:
iou = 0
return iou
def save_newanno(message, path):
with open(path,'a') as ann: # 追加模式
ann.write(message)
ann.write('\n')
def crop4patches(img_prefix,img_writen,istrain=True):
if not os.path.exists(img_writen+'annotations/'):
os.makedirs(img_writen+'annotations/')
if not os.path.exists(img_writen+'images/'):
os.makedirs(img_writen+'images/')
img_infos = []
img_file = img_prefix+'images/'
all_imgs_files = os.listdir(img_file)
for img_file in all_imgs_files:
img_id = img_file.split('.')[0]
anno_name ='annotations/{}.txt'.format(img_id)
img_name = 'images/{}.jpg'.format(img_id)
#p rint(filename)
print("dealing with {}".format(img_name))
img_path = osp.join(img_prefix, img_name)
anno_path = osp.join(img_prefix,anno_name)
img = cv2.imread(img_path)
h,w,c = img.shape
print("h {}".format(h))
print("w {}".format(w))
patch_width = int(w) // 2
patch_height = int(h) // 2
bboxes = []
bboxes.append(np.array([0,0,patch_width,patch_height]))
bboxes.append(np.array([0,patch_height,patch_width,h]))
bboxes.append(np.array([patch_width,0,w,patch_height]))
bboxes.append(np.array([patch_width,patch_height,w,h]))
padw = (w-patch_width)//2
padh = (h-patch_height)//2
if istrain:
bboxes.append(np.array([padw,padh,w-padw,h-padh]))
bboxes = np.array(bboxes)
img_patches = mmcv.imcrop(img,bboxes,scale=1.0)
for jj in range(len(img_patches)):
if istrain:
assert (len(img_patches)) == 5
else:
assert (len(img_patches)) == 4
cv2.imwrite(img_writen+'images/{}_{}.jpg'.format(img_id,jj+1),img_patches[jj])
with open(anno_path,'r') as ann:
note = ann.readlines()
# 计算中心 patch的标注
if istrain:
for item in note:
values_str = item.split(',')#list()
bbox_left,bbox_top,bbox_width,bbox_height,score,object_category,\
truncation,occulusion = int(values_str[0]),int(values_str[1]),\
int(values_str[2]),int(values_str[3]),int(values_str[4]),int(values_str[5]),\
int(values_str[6]),int(values_str[7])
# in central patch
if bbox_left>padw and bbox_top>padh and bbox_left<w-padw and bbox_top < h-padh:
if bbox_left+bbox_width>w-padw or bbox_top+bbox_height>h-padh:
if bbox_iou((bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height),\
(bbox_left,bbox_top,min(w-padw,bbox_left+bbox_width),min(h-padh,bbox_top+bbox_height))) > 0.5:
message = str(bbox_left-padw)+','+str(bbox_top-padh)+','+str(min(w-padw,bbox_left+bbox_width)-bbox_left)+','+str(min(h-padh,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(1)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,5)
save_newanno(message,path)
continue
else:
continue
else:
message = str(bbox_left-padw)+','+str(bbox_top-padh)+','+str(min(w-padw,bbox_left+bbox_width)-bbox_left)+','+str(min(h-padh,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,5)
#print("5loc {}".format(message))
save_newanno(message,path)
continue
for item in note:
values_str = item.split(',')#list()
bbox_left,bbox_top,bbox_width,bbox_height,score,object_category,\
truncation,occulusion = int(values_str[0]),int(values_str[1]),\
int(values_str[2]),int(values_str[3]),int(values_str[4]),int(values_str[5]),\
int(values_str[6]),int(values_str[7])
if bbox_left < patch_width and bbox_top < patch_height:# zuoshang
if bbox_left+bbox_width> patch_width or bbox_top+bbox_height > patch_height:# outline
if bbox_iou((bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height),\
(bbox_left,bbox_top,min(patch_width,bbox_left+bbox_width),min(patch_height,bbox_top+bbox_height))) > 0.5:
#save
message = str(bbox_left-0)+','+str(bbox_top-0)+','+str(min(patch_width,bbox_left+bbox_width)-bbox_left)+','+str(min(patch_height,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(1)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,1)
save_newanno(message,path)
continue
else:# dont save
continue
else: # 完整直接save
message = str(bbox_left-0)+','+str(bbox_top-0)+','+str(min(patch_width,bbox_left+bbox_width)-bbox_left)+','+str(min(patch_height,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,1)
save_newanno(message,path)
#print("1loc {}".format(message))
continue
#zuoxia
if bbox_left< patch_width and bbox_top >= patch_height:
if bbox_top+bbox_height > h:# 原本标注错误
raise IOError
if bbox_left+bbox_width > patch_width:# outline
if bbox_iou((bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height),\
(bbox_left,bbox_top,min(patch_width,bbox_left+bbox_width),min(patch_height,bbox_top+bbox_height))) > 0.5:
#save
message = str(bbox_left-0)+','+str(bbox_top-patch_height)+','+str(min(patch_width,bbox_left+bbox_width)-bbox_left)+','+str(min(h,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(1)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,2)
save_newanno(message,path)
continue
else:# dont save
continue
else:
#save
message = str(bbox_left-0)+','+str(bbox_top-patch_height)+','+str(min(patch_width,bbox_left+bbox_width)-bbox_left)+','+str(min(h,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,2)
save_newanno(message,path)
#print("2loc {}".format(message))
continue
#youshang
if bbox_left >= patch_width and bbox_top < patch_height:
if bbox_left + bbox_width > w:
raise IOError
if bbox_top + bbox_height > patch_height:# outline
if bbox_iou((bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height),\
(bbox_left,bbox_top,min(patch_width,bbox_left+bbox_width),min(patch_height,bbox_top+bbox_height))) > 0.5:
#save
message = str(bbox_left-patch_width)+','+str(bbox_top-0)+','+str(min(w,bbox_left+bbox_width)-bbox_left)+','+str(min(patch_height,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(1)+','+str(occulusion)# must trucncation
path = img_writen+'annotations/{}_{}.txt'.format(img_id,3)
save_newanno(message,path)
continue
else:# dont save
continue
else:
#save
message = str(bbox_left-patch_width)+','+str(bbox_top-0)+','+str(min(w,bbox_left+bbox_width)-bbox_left)+','+str(min(patch_height,bbox_top+bbox_height)-bbox_top)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,3)
save_newanno(message,path)
#print("3loc {}".format(message))
continue
# youxia
if bbox_left >= patch_width and bbox_top >= patch_height:
if bbox_left+bbox_width>w or bbox_height+bbox_top>h:
raise IOError
# 第四个区域不会有 outline
message = str(bbox_left-patch_width)+','+str(bbox_top-patch_height)+','+str(bbox_width)+','+str(bbox_height)\
+','+str(score)+','+str(object_category)+','+str(truncation)+','+str(occulusion)
path = img_writen+'annotations/{}_{}.txt'.format(img_id,4)
save_newanno(message,path)
#print("4loc {}".format(message))
continue
#check if the image has no annotaion , delet it
for jj in range(len(img_patches)):
if istrain:
assert (len(img_patches)) == 5
else:
assert (len(img_patches)) == 4
if not os.path.exists(img_writen+'annotations/{}_{}.txt'.format(img_id,jj+1)):
os.remove(img_writen+'images/{}_{}.jpg'.format(img_id,jj+1))
#path = img_writen+'annotations/{}_{}.txt'.format(img_id,jj+1)
#with open(path,'w') as ann: # 追加模式
# pass
#print("empty {}".format('annotations/{}_{}.jpg'.format(img_id,jj+1)))
new_list = os.listdir(img_writen+'images/')
new_list_show = []
new_list_show.extend(new_list[:100])
new_list_show.extend(new_list[500:600])
for ii,item in enumerate(new_list_show):
showimg = cv2.imread(img_writen+'images/'+item)
id = item.split('.')[0]
annotation = img_writen+'annotations/'+id+'.txt'
#if not os.path.exists(annotation):
# continue
with open(annotation,'r') as ann:
note = ann.readlines()
bboxes = []
for jj in note:
values_str = jj.split(',')#list()
bbox_left,bbox_top,bbox_width,bbox_height,score,object_category,\
truncation,occulusion = int(values_str[0]),int(values_str[1]),\
int(values_str[2]),int(values_str[3]),int(values_str[4]),int(values_str[5]),\
int(values_str[6]),int(values_str[7])
bboxes.append(np.array([bbox_left,bbox_top,bbox_left+bbox_width,bbox_top+bbox_height]))
bboxes = np.array(bboxes)
print('/home/share2/VisDrone2019/vispatch/'+item)
if istrain:
mmcv.imshow_bboxes(showimg,bboxes,show=False,out_file='/home/share2/VisDrone2019/TASK1/trainpatch/'+item)
else:
mmcv.imshow_bboxes(showimg,bboxes,show=False,out_file='/home/share2/VisDrone2019/TASK1/valpatch/'+item)
if __name__ == '__main__':
import fire
fire.Fire()
#img_prefix = '/home/share2/VisDrone2019/TASK1/VisDrone2019-DET-val/'
#img_writen= '/home/share2/VisDrone2019/TASK1/VisDrone2019-DET-val-patches/'
#crop4patches(img_prefix=img_prefix,img_writen=img_writen,istrain=False)
| 53.05098
| 190
| 0.53563
| 1,647
| 13,528
| 4.139648
| 0.112325
| 0.066882
| 0.059842
| 0.054855
| 0.675125
| 0.64095
| 0.600176
| 0.567762
| 0.559255
| 0.533441
| 0
| 0.023387
| 0.329908
| 13,528
| 254
| 191
| 53.259843
| 0.728737
| 0.070373
| 0
| 0.514851
| 0
| 0
| 0.046514
| 0.0268
| 0
| 0
| 0
| 0
| 0.019802
| 1
| 0.019802
| false
| 0
| 0.044554
| 0
| 0.084158
| 0.019802
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2d008457b1988d06b4f36156a0cb0305d850324
| 1,121
|
py
|
Python
|
rabbitgetapi/__main__.py
|
Sidon/get-rabbitmq-messages
|
8feff8c9b9edee863d875966f5e5f3a5eb6ab06a
|
[
"MIT"
] | 11
|
2022-01-10T13:49:39.000Z
|
2022-01-11T05:57:45.000Z
|
rabbitgetapi/__main__.py
|
Sidon/get-rabbitmq-messages
|
8feff8c9b9edee863d875966f5e5f3a5eb6ab06a
|
[
"MIT"
] | null | null | null |
rabbitgetapi/__main__.py
|
Sidon/get-rabbitmq-messages
|
8feff8c9b9edee863d875966f5e5f3a5eb6ab06a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyleft 2021 Sidon Duarte
#
import http
import sys
from typing import Any
import colorama
import requests
from rabbitgetapi import cli
from rabbitgetapi import exceptions
from rabbitgetapi import build_parser
def main() -> Any:
try:
result = cli.dispatch(sys.argv[1:])
except requests.HTTPError as exc:
status_code = exc.response.status_code
status_phrase = http.HTTPStatus(status_code).phrase
result = (
f"{exc.__class__.__name__}: {status_code} {status_phrase} "
f"from {exc.response.url}\n"
f"{exc.response.reason}"
)
except exceptions.GetRmqApiException as exc:
result = f"{exc.__class__.__name__}: {exc.args[0]}"
return _format_error(result) if isinstance(result, str) else result
def _format_error(message: str) -> str:
pre_style, post_style = "", ""
if not cli.args.no_color:
colorama.init()
pre_style, post_style = colorama.Fore.RED, colorama.Style.RESET_ALL
return f"{pre_style}{message}{post_style}"
if __name__ == "__main__":
sys.exit(main())
| 26.069767
| 75
| 0.674398
| 145
| 1,121
| 4.917241
| 0.455172
| 0.056101
| 0.092567
| 0.061711
| 0.053296
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007973
| 0.216771
| 1,121
| 42
| 76
| 26.690476
| 0.8041
| 0.042819
| 0
| 0
| 0
| 0
| 0.169159
| 0.096262
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.266667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2d2914bf2009ddae6cb71f0693560922df3f83f
| 12,182
|
py
|
Python
|
SST/datasets/wrapperpolicy.py
|
shaoshitong/torchdistill
|
709ca2d59442090d73a554d363e4c5e37538c707
|
[
"MIT"
] | 1
|
2022-03-25T05:05:55.000Z
|
2022-03-25T05:05:55.000Z
|
SST/datasets/wrapperpolicy.py
|
shaoshitong/torchdistill
|
709ca2d59442090d73a554d363e4c5e37538c707
|
[
"MIT"
] | null | null | null |
SST/datasets/wrapperpolicy.py
|
shaoshitong/torchdistill
|
709ca2d59442090d73a554d363e4c5e37538c707
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import torch
from torch.utils.data import Dataset
import math
import torch
import torch.nn.functional as F
import random
import torchvision.datasets
from torchvision.transforms import *
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from PIL import Image, ImageEnhance, ImageOps
from torch.utils.data import Dataset
from torchdistill.datasets.wrapper import register_dataset_wrapper,BaseDatasetWrapper
def rotate_with_fill(img, magnitude):
rot = img.convert('RGBA').rotate(magnitude)
return Image.composite(rot, Image.new('RGBA', rot.size, (128,) * 4), rot).convert(img.mode)
def shearX(img,magnitude,fillcolor):
return img.transform(img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),Image.BICUBIC, fillcolor=fillcolor)
def shearY(img,magnitude,fillcolor):
return img.transform(img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),Image.BICUBIC, fillcolor=fillcolor)
def translateX(img,magnitude,fillcolor):
return img.transform( img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),fillcolor=fillcolor)
def translateY(img,magnitude,fillcolor):
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),fillcolor=fillcolor)
def rotate(img,magnitude,fillcolor):
return rotate_with_fill(img, magnitude)
def color(img,magnitude,fillcolor):
return ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1]))
def posterize(img,magnitude,fillcolor):
return ImageOps.posterize(img, magnitude)
def solarize(img,magnitude,fillcolor):
return ImageOps.solarize(img, magnitude)
def contrast(img,magnitude,fillcolor):
return ImageEnhance.Contrast(img).enhance(1 + magnitude * random.choice([-1, 1]))
def sharpness(img,magnitude,fillcolor):
return ImageEnhance.Sharpness(img).enhance(1 + magnitude * random.choice([-1, 1]))
def brightness(img,magnitude,fillcolor):
return ImageEnhance.Brightness(img).enhance(1 + magnitude * random.choice([-1, 1]))
def autocontrast(img,magnitude,fillcolor):
return ImageOps.autocontrast(img)
def equalize(img,magnitude,fillcolor):
return ImageOps.equalize(img)
def invert(img,magnitude,fillcolor):
return ImageOps.invert(img)
def rand_bbox(size, lam):
W = size[1]
H = size[2]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
class SubPolicy:
def __init__(self, p1, operation1, magnitude_idx1, fillcolor=(128, 128, 128)):
self.fillcolor=fillcolor
ranges = {
'shearX': np.linspace(0, 0.3, 10),
'shearY': np.linspace(0, 0.3, 10),
'translateX': np.linspace(0, 150 / 331, 10),
'translateY': np.linspace(0, 150 / 331, 10),
'rotate': np.linspace(0, 30, 10),
'color': np.linspace(0.0, 0.9, 10),
'posterize': np.round(np.linspace(8, 4, 10), 0).astype(np.int),
'solarize': np.linspace(256, 0, 10),
'contrast': np.linspace(0.0, 0.9, 10),
'sharpness': np.linspace(0.0, 0.9, 10),
'brightness': np.linspace(0.0, 0.9, 10),
'autocontrast': [0] * 10,
'equalize': [0] * 10,
'invert': [0] * 10
}
func = {
'shearX': shearX,
'shearY': shearY,
'translateX': translateX,
'translateY': translateY,
'rotate': rotate,
'color': color,
'posterize': posterize,
'solarize': solarize,
'contrast': contrast,
'sharpness': sharpness,
'brightness': brightness,
'autocontrast': autocontrast,
'equalize': equalize,
'invert': invert
}
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
def __call__(self, img):
label=0
if random.random() < self.p1:
img = self.operation1(img, self.magnitude1,self.fillcolor)
label=1
return img,label
@register_dataset_wrapper
class PolicyDataset(BaseDatasetWrapper):
def __init__(self,org_dataset,mixcut=False,mixcut_prob=0.1,beta=0.3):
super(PolicyDataset, self).__init__(org_dataset)
self.transform=org_dataset.transform
org_dataset.transform=None
self.policies = [
SubPolicy(0.5, 'invert', 7),
SubPolicy(0.5, 'rotate', 2),
SubPolicy(0.5, 'sharpness', 1),
SubPolicy(0.5, 'shearY', 8),
SubPolicy(0.5, 'autocontrast', 8),
SubPolicy(0.5, 'color', 3),
SubPolicy(0.5, 'sharpness', 9),
SubPolicy(0.5, 'equalize', 5),
SubPolicy(0.5, 'contrast', 7),
SubPolicy(0.5, 'translateY', 3),
SubPolicy(0.5, 'brightness',6),
SubPolicy(0.5, 'solarize', 2),
SubPolicy(0.5, 'translateX',3),
SubPolicy(0.5, 'shearX', 8),
]
self.policies_len=len(self.policies)
self.beta=beta
self.mixcut_prob=mixcut_prob
self.mixcut=mixcut
def __getitem__(self, index):
sample, target_a, supp_dict = super(PolicyDataset, self).__getitem__(index)
sample=self.transform(sample).detach()
r = np.random.rand(1)
if self.mixcut and self.beta > 0 and r < self.mixcut_prob:
lam = np.random.beta(self.beta, self.beta)
rand_index=random.randint(0,len(self)-1)
rsample,target_b, supp_dict = super(PolicyDataset, self).__getitem__(rand_index)
rsample = self.transform(rsample)
bbx1, bby1, bbx2, bby2 = rand_bbox(sample.size(), lam)
sample[ :, bbx1:bbx2, bby1:bby2] = rsample[ :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (sample.size()[-1] * sample.size()[-2]))
target=F.one_hot(torch.LongTensor([target_a]),10)*lam+ F.one_hot(torch.LongTensor([target_b]),10)*(1.-lam)
else:
target=target_a
new_sample=transforms.ToPILImage()(sample)
policy_index = torch.zeros(self.policies_len).float()
for i in range(self.policies_len):
new_sample,label=self.policies[i](new_sample)
policy_index[i]=label
new_sample=self.transform(new_sample).detach()
if not isinstance(target,torch.Tensor):
target=F.one_hot(torch.LongTensor([target]),10)
target=target.expand(2,-1) # 2,1
policy_target=torch.stack([torch.zeros(self.policies_len).float(),policy_index],0) # 2, policy_len
target=torch.cat([target,policy_target],1) # 2,num_classes+policy_len
sample=torch.stack([
sample,
new_sample,
])
return sample,target,supp_dict
@register_dataset_wrapper
class PolicyDatasetC100(BaseDatasetWrapper):
def __init__(self,org_dataset,mixcut=False,mixcut_prob=0.1,beta=0.3):
super(PolicyDatasetC100, self).__init__(org_dataset)
self.transform=org_dataset.transform
org_dataset.transform=None
self.policies = [
SubPolicy(0.5,'autocontrast', 2),
SubPolicy(0.5, 'contrast', 3),
SubPolicy(0.5, 'posterize', 0),
SubPolicy(0.5, 'solarize', 4),
SubPolicy(0.5, 'translateY', 8),
SubPolicy(0.5, 'shearX', 5),
SubPolicy(0.5, 'color', 3),
SubPolicy(0.5, 'shearY', 0),
SubPolicy(0.5, 'translateX', 1),
SubPolicy(0.5, 'sharpness', 5),
SubPolicy(0.5, 'invert', 4),
SubPolicy(0.5, 'color', 4),
SubPolicy(0.5, 'equalize', 8),
SubPolicy(0.5, 'rotate', 3),
]
self.beta=beta
self.mixcut_prob=mixcut_prob
self.mixcut=mixcut
self.policies_len=len(self.policies)
def __getitem__(self, index):
sample, target_a, supp_dict = super(PolicyDatasetC100, self).__getitem__(index)
sample=self.transform(sample).detach()
r = np.random.rand(1)
if self.mixcut and self.beta > 0 and r < self.mixcut_prob:
lam = np.random.beta(self.beta, self.beta)
rand_index=random.randint(0,len(self)-1)
rsample,target_b, supp_dict = super(PolicyDatasetC100, self).__getitem__(rand_index)
rsample = self.transform(rsample)
bbx1, bby1, bbx2, bby2 = rand_bbox(sample.size(), lam)
sample[ :, bbx1:bbx2, bby1:bby2] = rsample[ :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (sample.size()[-1] * sample.size()[-2]))
target=F.one_hot(torch.LongTensor([target_a]),100)*lam+ F.one_hot(torch.LongTensor([target_b]),100)*(1.-lam)
else:
target=target_a
new_sample=transforms.ToPILImage()(sample)
policy_index = torch.zeros(self.policies_len).float()
for i in range(self.policies_len):
new_sample,label=self.policies[i](new_sample)
policy_index[i]=label
new_sample=self.transform(new_sample).detach()
if not isinstance(target,torch.Tensor):
target=F.one_hot(torch.LongTensor([target]),100)
target=target.expand(2,-1) # 2,1
policy_target=torch.stack([torch.zeros(self.policies_len).float(),policy_index],0) # 2, policy_len
target=torch.cat([target,policy_target],1) # 2,num_classes+policy_len
sample=torch.stack([
sample,
new_sample,
])
return sample,target,supp_dict
def policy_classes_compute(hot):
l=hot.shape[0]
exp=torch.arange(0,l)
weight=2**exp
return (hot*weight).sum().long()
@register_dataset_wrapper
class ICPDataset(BaseDatasetWrapper):
def __init__(self,org_dataset):
super(ICPDataset, self).__init__(org_dataset)
self.transform=org_dataset.transform
org_dataset.transform=None
self.policies = [
SubPolicy(0.5, 'invert', 7),
SubPolicy(0.5, 'rotate', 2),
SubPolicy(0.5, 'sharpness', 1),
SubPolicy(0.5, 'shearY', 8),
SubPolicy(0.5, 'autocontrast', 8),
SubPolicy(0.5, 'color', 3),
SubPolicy(0.5, 'sharpness', 9),
SubPolicy(0.5, 'equalize', 5),
SubPolicy(0.5, 'contrast', 7),
SubPolicy(0.5, 'translateY', 3),
SubPolicy(0.5, 'brightness',6),
SubPolicy(0.5, 'solarize', 2),
SubPolicy(0.5, 'translateX',3),
SubPolicy(0.5, 'shearX', 8),
]
self.policies_len=len(self.policies)
def __getitem__(self, index):
sample,target,supp_dict=super(ICPDataset, self).__getitem__(index)
policy_index=torch.zeros(self.policies_len).float()
new_sample=sample
for i in range(self.policies_len):
new_sample,label=self.policies[i](new_sample)
policy_index[i]=label
new_sample=self.transform(new_sample).detach()
sample=self.transform(sample).detach()
if isinstance(target,torch.Tensor) and target.ndim==2 and target.shape[-1]!=1:
target=target.argmax(1)
elif not isinstance(target,torch.Tensor):
target=torch.LongTensor([target])
identity_target=torch.LongTensor([index]).unsqueeze(0).expand(2,-1)
classes_target=target.unsqueeze(0).expand(2,-1) # 2,1
policy_target = torch.stack([torch.zeros(self.policies_len).int(), policy_index.int()], 0) # 2, policy_len
target=torch.cat([identity_target,classes_target,policy_target],1) # 2,3
sample=torch.stack([
sample,
new_sample,
])
return sample,target,supp_dict
| 40.471761
| 136
| 0.613692
| 1,551
| 12,182
| 4.693746
| 0.112186
| 0.057692
| 0.063462
| 0.051923
| 0.674038
| 0.601923
| 0.562363
| 0.540522
| 0.514011
| 0.493681
| 0
| 0.044679
| 0.244869
| 12,182
| 300
| 137
| 40.606667
| 0.746712
| 0.015843
| 0
| 0.456929
| 0
| 0
| 0.047424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093633
| false
| 0
| 0.05618
| 0.052434
| 0.243446
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2d36fb4456d02f1a3cbf08824eb8cded948400d
| 3,029
|
py
|
Python
|
{{cookiecutter.project_slug}}/backend/app/app/tests/crud/test_item.py
|
Gjacquenot/full-stack-fastapi-couchbase
|
5df16af2ffcb22d141c5e689a220611005747939
|
[
"MIT"
] | 353
|
2019-01-03T09:53:17.000Z
|
2022-03-27T12:24:45.000Z
|
{{cookiecutter.project_slug}}/backend/app/app/tests/crud/test_item.py
|
Gjacquenot/full-stack-fastapi-couchbase
|
5df16af2ffcb22d141c5e689a220611005747939
|
[
"MIT"
] | 21
|
2019-01-06T21:50:40.000Z
|
2021-08-19T11:33:15.000Z
|
{{cookiecutter.project_slug}}/backend/app/app/tests/crud/test_item.py
|
Gjacquenot/full-stack-fastapi-couchbase
|
5df16af2ffcb22d141c5e689a220611005747939
|
[
"MIT"
] | 72
|
2019-03-07T21:59:55.000Z
|
2022-03-18T04:59:22.000Z
|
from app import crud
from app.db.database import get_default_bucket
from app.models.config import ITEM_DOC_TYPE
from app.models.item import ItemCreate, ItemUpdate
from app.tests.utils.user import create_random_user
from app.tests.utils.utils import random_lower_string
def test_create_item():
title = random_lower_string()
description = random_lower_string()
id = crud.utils.generate_new_id()
item_in = ItemCreate(title=title, description=description)
bucket = get_default_bucket()
user = create_random_user()
item = crud.item.upsert(
bucket=bucket, id=id, doc_in=item_in, owner_username=user.username, persist_to=1
)
assert item.id == id
assert item.type == ITEM_DOC_TYPE
assert item.title == title
assert item.description == description
assert item.owner_username == user.username
def test_get_item():
title = random_lower_string()
description = random_lower_string()
id = crud.utils.generate_new_id()
item_in = ItemCreate(title=title, description=description)
bucket = get_default_bucket()
user = create_random_user()
item = crud.item.upsert(
bucket=bucket, id=id, doc_in=item_in, owner_username=user.username, persist_to=1
)
stored_item = crud.item.get(bucket=bucket, id=id)
assert item.id == stored_item.id
assert item.title == stored_item.title
assert item.description == stored_item.description
assert item.owner_username == stored_item.owner_username
def test_update_item():
title = random_lower_string()
description = random_lower_string()
id = crud.utils.generate_new_id()
item_in = ItemCreate(title=title, description=description)
bucket = get_default_bucket()
user = create_random_user()
item = crud.item.upsert(
bucket=bucket, id=id, doc_in=item_in, owner_username=user.username, persist_to=1
)
description2 = random_lower_string()
item_update = ItemUpdate(description=description2)
item2 = crud.item.update(
bucket=bucket,
id=id,
doc_in=item_update,
owner_username=item.owner_username,
persist_to=1,
)
assert item.id == item2.id
assert item.title == item2.title
assert item.description == description
assert item2.description == description2
assert item.owner_username == item2.owner_username
def test_delete_item():
title = random_lower_string()
description = random_lower_string()
id = crud.utils.generate_new_id()
item_in = ItemCreate(title=title, description=description)
bucket = get_default_bucket()
user = create_random_user()
item = crud.item.upsert(
bucket=bucket, id=id, doc_in=item_in, owner_username=user.username, persist_to=1
)
item2 = crud.item.remove(bucket=bucket, id=id, persist_to=1)
item3 = crud.item.get(bucket=bucket, id=id)
assert item3 is None
assert item2.id == id
assert item2.title == title
assert item2.description == description
assert item2.owner_username == user.username
| 35.22093
| 88
| 0.719379
| 407
| 3,029
| 5.113022
| 0.115479
| 0.06247
| 0.081691
| 0.061509
| 0.606439
| 0.581932
| 0.540605
| 0.508409
| 0.476694
| 0.476694
| 0
| 0.008509
| 0.18521
| 3,029
| 85
| 89
| 35.635294
| 0.834684
| 0
| 0
| 0.441558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.246753
| 1
| 0.051948
| false
| 0
| 0.077922
| 0
| 0.12987
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2d52797a4915efe6cf6a4bf7bb065954ba40d31
| 12,271
|
py
|
Python
|
03_ML_training.py
|
YunxiaoRen/ML-iAMR
|
6bab74b4dccb5da8bc6155a7ee7ffa9d4811b894
|
[
"MIT"
] | 4
|
2021-10-10T15:31:23.000Z
|
2022-02-10T00:17:55.000Z
|
03_ML_training.py
|
YunxiaoRen/ML-iAMR
|
6bab74b4dccb5da8bc6155a7ee7ffa9d4811b894
|
[
"MIT"
] | null | null | null |
03_ML_training.py
|
YunxiaoRen/ML-iAMR
|
6bab74b4dccb5da8bc6155a7ee7ffa9d4811b894
|
[
"MIT"
] | 2
|
2021-12-07T22:04:54.000Z
|
2022-02-10T07:14:42.000Z
|
##**************************************************************************************##
## Step1. Load Packages and Input Data ##
##**************************************************************************************##
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm,metrics
from sklearn.svm import SVC,LinearSVC
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn.metrics import matthews_corrcoef,auc, roc_curve,plot_roc_curve, plot_precision_recall_curve,classification_report, confusion_matrix,average_precision_score, precision_recall_curve
from pandas.core.frame import DataFrame
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import imblearn
from collections import Counter
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
############################# Step2: input data processing #####################
## giessen data
gi_data = np.load("/gi_CIP_FCGR200/alt_cnn_input.npy")
gi_pheno = pd.read_csv("CIP_gi_pheno.csv",index_col=0)
gi_data.shape,gi_pheno.shape
gi_data2 = gi_data.reshape(900,40000)
gi_pheno2 = gi_pheno.values
gi_pheno3 = gi_pheno2.reshape(900,)
gi_data2.shape,gi_pheno3.shape
X = gi_data2
y = gi_pheno3
X.shape,y.shape
## pubdata
pub_data = np.load("/pub_CIP_FCGR200/alt_cnn_input.npy")
pub_pheno = pd.read_csv("CIP_pub_pheno.csv",index_col=0)
pub_data.shape
pub_data2 = pub_data.reshape(1496,40000)
pub_pheno2 = pub_pheno.values
pub_pheno3 = pub_pheno2.reshape(1496,)
pub_data2.shape,pub_pheno3.shape
x_test = pub_data2
y_test = pub_pheno3
undersample = RandomUnderSampler(sampling_strategy='majority')
pub_x_under,pub_y_under=undersample.fit_resample(pub_data2,pub_pheno3)
print(Counter(pub_y_under))
##**************************************************************************************##
## Step2. Training and evaluation of RF,LR, SVM ##
##**************************************************************************************##
## cross validation
cv = StratifiedKFold(n_splits=5)
rf = RandomForestClassifier(n_estimators=200, random_state=0)
lr = LogisticRegression(solver = 'lbfgs',max_iter=1000)
svm = SVC(kernel='linear', probability=True)
##*************** F1 + ROC curve
rf_tprs = []
rf_prs = []
rf_roc_aucs = []
rf_pr_aucs = []
rf_f1_matrix_out = []
rf_f1_report_out = []
rf_MCC_out = []
rf_pred_cls_out = []
rf_pred_prob_out = []
rf_y_test_out = []
rf_mean_fpr = np.linspace(0, 1, 100)
rf_mean_recall = np.linspace(0, 1, 100)
## LR
lr_tprs = []
lr_prs = []
lr_roc_aucs = []
lr_pr_aucs = []
lr_f1_matrix_out = []
lr_f1_report_out = []
lr_MCC_out = []
lr_pred_cls_out = []
lr_pred_prob_out = []
lr_y_test_out = []
lr_mean_fpr = np.linspace(0, 1, 100)
lr_mean_recall = np.linspace(0, 1, 100)
## SVM
svm_tprs = []
svm_prs = []
svm_roc_aucs = []
svm_pr_aucs = []
svm_f1_matrix_out = []
svm_f1_report_out = []
svm_MCC_out = []
svm_pred_cls_out = []
svm_pred_prob_out = []
svm_y_test_out = []
svm_mean_fpr = np.linspace(0, 1, 100)
svm_mean_recall = np.linspace(0, 1, 100)
fig,[ax1,ax2,ax3] = plt.subplots(nrows=1,ncols=3,figsize=(15, 4))
for i, (train, test) in enumerate(cv.split(X, y)):
## train the new model
rf.fit(X[train], y[train])
## roc curve
rf_viz = plot_roc_curve(rf, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax1)
rf_interp_tpr = np.interp(rf_mean_fpr, rf_viz.fpr, rf_viz.tpr)
rf_interp_tpr[0] = 0.0
rf_tprs.append(rf_interp_tpr)
rf_roc_aucs.append(rf_viz.roc_auc)
## evaluation metrics
rf_pred_cls = rf.predict(X[test])
rf_pred_prob = rf.predict_proba(X[test])[:,1]
rf_f1_matrix = confusion_matrix(y[test],rf_pred_cls)
rf_f1_report = classification_report(y[test],rf_pred_cls)
rf_MCC = matthews_corrcoef(y[test],rf_pred_cls)
### save evalu_metrics out
rf_pred_cls_out.append(rf_pred_cls)
rf_pred_prob_out.append(rf_pred_prob)
rf_f1_matrix_out.append(rf_f1_matrix)
rf_f1_report_out.append(rf_f1_report)
rf_MCC_out.append(rf_MCC)
rf_y_test_out.append(y[test])
## LR
lr.fit(X[train], y[train])
## roc curve
lr_viz = plot_roc_curve(lr, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax2)
lr_interp_tpr = np.interp(lr_mean_fpr, lr_viz.fpr, lr_viz.tpr)
lr_interp_tpr[0] = 0.0
lr_tprs.append(lr_interp_tpr)
lr_roc_aucs.append(lr_viz.roc_auc)
## evaluation metrics
lr_pred_cls = lr.predict(X[test])
lr_pred_prob = lr.predict_proba(X[test])[:,1]
lr_f1_matrix = confusion_matrix(y[test],lr_pred_cls)
lr_f1_report = classification_report(y[test],lr_pred_cls)
lr_MCC = matthews_corrcoef(y[test],lr_pred_cls)
### save evalu_metrics out
lr_pred_cls_out.append(lr_pred_cls)
lr_pred_prob_out.append(lr_pred_prob)
lr_f1_matrix_out.append(lr_f1_matrix)
lr_f1_report_out.append(lr_f1_report)
lr_MCC_out.append(lr_MCC)
lr_y_test_out.append(y[test])
## SVM
svm.fit(X[train], y[train])
## roc curve
svm_viz = plot_roc_curve(svm, X[test], y[test],name='K fold {}'.format(i),alpha=0.3, lw=1,ax=ax3)
svm_interp_tpr = np.interp(svm_mean_fpr, svm_viz.fpr, svm_viz.tpr)
svm_interp_tpr[0] = 0.0
svm_tprs.append(svm_interp_tpr)
svm_roc_aucs.append(svm_viz.roc_auc)
## evaluation metrics
svm_pred_cls = svm.predict(X[test])
svm_pred_prob = svm.predict_proba(X[test])[:,1]
svm_f1_matrix = confusion_matrix(y[test],svm_pred_cls)
svm_f1_report = classification_report(y[test],svm_pred_cls)
svm_MCC = matthews_corrcoef(y[test],svm_pred_cls)
### save evalu_metrics out
svm_pred_cls_out.append(svm_pred_cls)
svm_pred_prob_out.append(svm_pred_prob)
svm_f1_matrix_out.append(svm_f1_matrix)
svm_f1_report_out.append(svm_f1_report)
svm_MCC_out.append(svm_MCC)
svm_y_test_out.append(y[test])
#### save predit_prob out
np.save("CIP_gi_FCGR_RF_y_pred_prob_out.npy",rf_pred_prob_out)
np.save("CIP_gi_FCGR_RF_y_test_out.npy",rf_y_test_out)
np.save("CIP_gi_FCGR_LR_y_pred_prob_out.npy",lr_pred_prob_out)
np.save("CIP_gi_FCGR_LR_y_test_out.npy",lr_y_test_out)
np.save("CIP_gi_FCGR_SVM_y_pred_prob_out.npy",svm_pred_prob_out)
np.save("CIP_gi_FCGR_SVM_y_test_out.npy",svm_y_test_out)
#### evaluation
rf_eva_pred_prob = rf.predict_proba(pub_data2)[:,1]
lr_eva_pred_prob = lr.predict_proba(pub_data2)[:,1]
svm_eva_pred_prob = svm.predict_proba(pub_data2)[:,1]
np.save("CIP_FCGR_RF_test_y_pred_prob.npy",rf_eva_pred_prob)
np.save("CIP_FCGR_LR_test_y_pred_prob.npy",lr_eva_pred_prob)
np.save("CIP_FCGR_SVM_test_y_pred_prob.npy",svm_eva_pred_prob)
np.save("CIP_FCGR_test_y_out.npy",pub_pheno3)
#### evaluation for under sample
#pub_x_under,pub_y_under
rf_eva_under_pred_prob = rf.predict_proba(pub_x_under)[:,1]
lr_eva_under_pred_prob = lr.predict_proba(pub_x_under)[:,1]
svm_eva_under_pred_prob = svm.predict_proba(pub_x_under)[:,1]
##**************************************************************************************##
## Step3. Training and evaluation of CNN ##
##**************************************************************************************##
############################# Step1: load pacakge #####################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from keras.utils import to_categorical
from keras.models import Sequential
from tensorflow.keras import activations
from sklearn.model_selection import KFold,StratifiedKFold
from keras.layers import Dense,Dropout, Flatten, Conv1D, Conv2D, MaxPooling1D,MaxPooling2D
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.layers import BatchNormalization
############################# Step2: load metrics function #####################
### F1 score, precision, recall and accuracy metrics
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
############################# Step3: input data processing #####################
X.shape,y.shape,pub_data2.shape,pub_pheno3.shape
#((900, 40000),(900,), (1496, 40000), (1496,))
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=123)
x_train.shape,x_test.shape,y_train.shape,y_test.shape
#((720, 40000), (180, 40000), (720,), (180,))
inputs = x_train.reshape(720,200,200,1)
inputs = inputs.astype('float32')
targets = to_categorical(y_train)
inputs.shape,targets.shape
x_test2 = x_test.reshape(180,200,200,1)
x_test2 = x_test2.astype('float32')
y_test2 = to_categorical(y_test)
pub_x_test = pub_data2.reshape(1496,200,200,1)
pub_x_test = pub_x_test.astype('float32')
pub_y_test = pub_pheno3
############################# Step4: model training #####################
batch_size = 8
no_classes = 2
no_epochs = 50
verbosity = 1
num_folds = 5
# Define the K-fold Cross Validator
kfold = KFold(n_splits=num_folds, shuffle=True)
# K-fold Cross Validation model evaluation
fold_no = 1
model_history=[]
for train, test in kfold.split(inputs, targets):
model = Sequential()
model.add(Conv2D(filters=8, kernel_size=3,activation='relu', input_shape=(200,200,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=8, kernel_size=3, padding='same', activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2)))
model.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
#model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(2,activation='softmax'))
# Compile the model
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['acc',f1_m,precision_m, recall_m])
# Generate a print
print('--------------------------------')
print(f'Training for fold {fold_no} ...')
## checkpoint for saving model
filepath="CIP_gi_FCGR_CNN_weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True,mode='max')
callbacks_list = [checkpoint]
# Fit data to model
train_model = model.fit(inputs[train], targets[train],batch_size=batch_size,epochs=no_epochs,callbacks=callbacks_list,verbose=verbosity,validation_data=(inputs[test], targets[test]))
model_history.append(train_model.history)
# Increase fold number
fold_no = fold_no + 1
########## (2) save model
model.save_weights('CIP_gi_FCGR_CNN.model.h5')
# save model history
from pandas.core.frame import DataFrame
model_out = DataFrame(model_history)
model_out.to_csv("CIP_gi_FCGR_CNN_model_history_out.csv",index=False)
############# Evaluation on pub data
### ROC
y_pred_keras = model.predict_proba(pub_x_test)
### evaluation for under-sample
undersample = RandomUnderSampler(sampling_strategy='majority')
pub_x_under,pub_y_under=undersample.fit_resample(pub_data2,pub_pheno3)
print(Counter(pub_y_under))
pub_x_under = pub_x_under.reshape(534,200,200,1)
pub_x_under = pub_x_under.astype('float32')
y_pred_keras = model.predict_proba(pub_x_under)
| 39.079618
| 193
| 0.677043
| 1,856
| 12,271
| 4.13847
| 0.140086
| 0.021482
| 0.017185
| 0.009113
| 0.408931
| 0.327562
| 0.217289
| 0.17719
| 0.12928
| 0.118995
| 0
| 0.03163
| 0.142042
| 12,271
| 313
| 194
| 39.204473
| 0.697948
| 0.142776
| 0
| 0.105727
| 0
| 0
| 0.074832
| 0.053757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013216
| false
| 0
| 0.123348
| 0
| 0.14978
| 0.017621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2d5cfe13e3252b73bc2d506fd5f87805ad7437d
| 6,660
|
py
|
Python
|
gdalhelpers/functions/create_points_at_angles_distance_in_direction.py
|
JanCaha/gdalhelpers
|
925ecb2552b697b5970617484f1fc259f844ba04
|
[
"MIT"
] | null | null | null |
gdalhelpers/functions/create_points_at_angles_distance_in_direction.py
|
JanCaha/gdalhelpers
|
925ecb2552b697b5970617484f1fc259f844ba04
|
[
"MIT"
] | null | null | null |
gdalhelpers/functions/create_points_at_angles_distance_in_direction.py
|
JanCaha/gdalhelpers
|
925ecb2552b697b5970617484f1fc259f844ba04
|
[
"MIT"
] | null | null | null |
from osgeo import ogr
from typing import List, Union
import math
import os
import warnings
import numpy as np
from gdalhelpers.checks import values_checks, datasource_checks, layer_checks
from gdalhelpers.helpers import layer_helpers, datasource_helpers, geometry_helpers
def create_points_at_angles_distance_in_direction(start_points: ogr.DataSource,
main_direction_point: ogr.DataSource,
distance: Union[int, float] = 10,
angle_offset: Union[int, float] = 10,
angle_density: Union[int, float] = 1,
angles_specification_degrees: bool = True,
input_points_id_field: str = None) -> ogr.DataSource:
"""
Function that generates for every `Feature` in `start_points` set of points at specified `distance` in direction of
`main_direction_point`.
Parameters
----------
start_points : ogr.DataSource
Points to generate new points around. Can be of geometrical types: `ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbPointM, ogr.wkbPointZM`.
main_direction_point : ogr.DataSource
Layer with single feature that specifies the direction in which the new points are generated.
distance : float or int
Distance at which the new points are generated. Default value is `10` and it is specified in units of layer
`start_points`.
angle_offset : float or int
Specification of angle offset on each side from `main_direction_point`. The points are generated in interval
`[main_angle - angle_offset, main_angle + angle_offset]`, where `main_angle` is angle between specific feature
of `start_points` and `main_direction_point`. Default value is `10`, which gives over angle width of `20`.
angle_density : float or int
How often points are generated in inverval given by `angle_offset`. Default value is `1`.
angles_specification_degrees : bool
Are the angles specified in degrees? Default values is `True`, if `False` the values are in radians.
input_points_id_field : str
Name of ID (or other) field from `input_points_ds` that should be carried over the resulting DataSource.
Returns
-------
ogr.DataSource
Virtual `ogr.DataSource` in memory with one layer (named `points`) containing the points.
Raises
------
Various Errors can be raise while checking for validity of inputs.
Warns
-------
UserWarning
If the field of given name (`input_points_id_field`) is not present or if its not of type `ogr.OFTInteger`.
"""
output_points_ds = datasource_helpers.create_temp_gpkg_datasource()
datasource_checks.check_is_ogr_datasource(start_points, "start_points")
datasource_checks.check_is_ogr_datasource(main_direction_point, "main_direction_point")
values_checks.check_value_is_zero_or_positive(distance, "distance")
values_checks.check_number(angle_offset, "angle_offset")
values_checks.check_number(angle_density, "angle_density")
if angles_specification_degrees:
angle_offset = ((2*math.pi)/360)*angle_offset
angle_density = ((2*math.pi)/360)*angle_density
input_points_layer = start_points.GetLayer()
layer_checks.check_is_layer_geometry_type(input_points_layer, "input_points_layer", [ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbPointM, ogr.wkbPointZM])
input_points_srs = input_points_layer.GetSpatialRef()
main_point_layer = main_direction_point.GetLayer()
layer_checks.check_is_layer_geometry_type(main_point_layer, "main_point_layer", [ogr.wkbPoint, ogr.wkbPoint25D,
ogr.wkbPointM, ogr.wkbPointZM])
layer_checks.check_number_of_features(main_point_layer, "main_point_layer", 1)
if input_points_id_field is not None:
if not layer_checks.does_field_exist(input_points_layer, input_points_id_field):
input_points_id_field = None
warnings.warn(
"Field {0} does not exist in {1}. Defaulting to FID.".format(input_points_id_field,
os.path.basename(start_points.GetDescription()))
)
else:
if not layer_checks.is_field_of_type(input_points_layer, input_points_id_field, ogr.OFTInteger):
input_points_id_field = None
warnings.warn(
"Field {0} in {1} is not `Integer`. Defaulting to FID.".format(input_points_id_field,
os.path.basename(start_points.GetDescription()))
)
if input_points_id_field is None:
field_name_id = "input_point_FID"
else:
field_name_id = "input_point_ID"
field_name_angle = "angle"
layer_helpers.create_layer_points(output_points_ds, input_points_srs, "points")
output_points_layer = output_points_ds.GetLayer()
fields = {field_name_id: ogr.OFTInteger,
field_name_angle: ogr.OFTReal}
layer_helpers.add_fields_from_dict(output_points_layer, fields)
output_points_def = output_points_layer.GetLayerDefn()
for main_feature in main_point_layer:
main_geom = main_feature.GetGeometryRef()
for feature in input_points_layer:
geom = feature.GetGeometryRef()
if input_points_id_field is None:
f_id = feature.GetFID()
else:
f_id = feature.GetField(input_points_id_field)
main_angle = geometry_helpers.angle_points(geom, main_geom)
angles = np.arange(main_angle - angle_offset,
np.nextafter(main_angle + angle_offset, np.Inf),
step=angle_density)
for angle in angles:
p = geometry_helpers.point_at_angle_distance(geom, distance, angle)
output_point_feature = ogr.Feature(output_points_def)
output_point_feature.SetGeometry(p)
values = {field_name_id: f_id,
field_name_angle: angle}
layer_helpers.add_values_from_dict(output_point_feature, values)
output_points_layer.CreateFeature(output_point_feature)
return output_points_ds
| 43.815789
| 131
| 0.640841
| 793
| 6,660
| 5.064313
| 0.218159
| 0.062998
| 0.042082
| 0.058267
| 0.332918
| 0.231076
| 0.166335
| 0.12002
| 0.086155
| 0.038845
| 0
| 0.006578
| 0.292342
| 6,660
| 151
| 132
| 44.10596
| 0.845534
| 0.261862
| 0
| 0.166667
| 0
| 0
| 0.054275
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012821
| false
| 0
| 0.102564
| 0
| 0.128205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2d8aaeb7cd07de199497544ee9bb719305bd800
| 1,380
|
py
|
Python
|
polybot/views/ingest.py
|
evanpcosta/IEEEPolybot
|
75fd70680f4f9fec8b1b77b4e116e4869eb8c079
|
[
"Apache-2.0"
] | null | null | null |
polybot/views/ingest.py
|
evanpcosta/IEEEPolybot
|
75fd70680f4f9fec8b1b77b4e116e4869eb8c079
|
[
"Apache-2.0"
] | null | null | null |
polybot/views/ingest.py
|
evanpcosta/IEEEPolybot
|
75fd70680f4f9fec8b1b77b4e116e4869eb8c079
|
[
"Apache-2.0"
] | 1
|
2021-03-07T20:46:43.000Z
|
2021-03-07T20:46:43.000Z
|
"""Routes related to ingesting data from the robot"""
import os
import logging
from pathlib import Path
from flask import Blueprint, request, current_app
from pydantic import ValidationError
from werkzeug.utils import secure_filename
from polybot.models import UVVisExperiment
logger = logging.getLogger(__name__)
bp = Blueprint('ingest', __name__, url_prefix='/ingest')
@bp.route('/', methods=('POST',))
def upload_data():
"""Intake a file from the robot and save it to disk"""
# Check the format of the request
if 'file' not in request.files:
logger.info('Bad request, missing the file')
return {
'success': False,
'error': 'File not included in the message'
}
try:
metadata = UVVisExperiment.parse_obj(request.form)
except ValidationError as exc:
logger.info('Bad request, failed validation')
return {
'success': False,
'error': str(exc)
}
# Save the file somewhere accessible
filename = secure_filename(f'{metadata.name}.csv')
os.makedirs(current_app.config['UPLOAD_FOLDER'], exist_ok=True)
output_path = Path(current_app.config['UPLOAD_FOLDER']) / filename
logger.info(f'Saving file to: {output_path}')
file = request.files['file']
file.save(output_path)
return {'success': True, 'filename': output_path.name}
| 30
| 70
| 0.674638
| 173
| 1,380
| 5.248555
| 0.491329
| 0.044053
| 0.026432
| 0.044053
| 0.061674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216667
| 1,380
| 45
| 71
| 30.666667
| 0.839963
| 0.118841
| 0
| 0.125
| 0
| 0
| 0.19103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.21875
| 0
| 0.34375
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2db78f1fd6b3b030ac80b311ec8e5f6c6ad3962
| 1,572
|
py
|
Python
|
test/test_mpdstats.py
|
dfc/beets
|
96c5121f65b9477e9b424f166dc57369b6457e42
|
[
"MIT"
] | 1
|
2017-11-15T23:24:35.000Z
|
2017-11-15T23:24:35.000Z
|
test/test_mpdstats.py
|
dfc/beets
|
96c5121f65b9477e9b424f166dc57369b6457e42
|
[
"MIT"
] | null | null | null |
test/test_mpdstats.py
|
dfc/beets
|
96c5121f65b9477e9b424f166dc57369b6457e42
|
[
"MIT"
] | null | null | null |
# This file is part of beets.
# Copyright 2015
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from mock import Mock
from test._common import unittest
from test.helper import TestHelper
from beets.library import Item
from beetsplug.mpdstats import MPDStats
class MPDStatsTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('mpdstats')
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
def test_update_rating(self):
item = Item(title='title', path='', id=1)
item.add(self.lib)
log = Mock()
mpdstats = MPDStats(self.lib, log)
self.assertFalse(mpdstats.update_rating(item, True))
self.assertFalse(mpdstats.update_rating(None, True))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| 30.230769
| 71
| 0.720738
| 205
| 1,572
| 5.390244
| 0.526829
| 0.049774
| 0.023529
| 0.052489
| 0.063348
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003962
| 0.197201
| 1,572
| 51
| 72
| 30.823529
| 0.871632
| 0.382952
| 0
| 0
| 0
| 0
| 0.027197
| 0
| 0
| 0
| 0
| 0
| 0.08
| 1
| 0.16
| false
| 0
| 0.24
| 0.04
| 0.48
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2dc055259ce8bd609c68240256323675bd4a1ec
| 1,236
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cloudsign/models/StampInfo.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cloudsign/models/StampInfo.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cloudsign/models/StampInfo.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class StampInfo(object):
def __init__(self, stampId=None, stampName=None, stampContent=None, stampDigest=None, createTime=None):
"""
:param stampId: (Optional) 印章ID
:param stampName: (Optional) 印章名称
:param stampContent: (Optional) 印章图片(base64)
:param stampDigest: (Optional) 印章摘要
:param createTime: (Optional) 印章上传时间
"""
self.stampId = stampId
self.stampName = stampName
self.stampContent = stampContent
self.stampDigest = stampDigest
self.createTime = createTime
| 34.333333
| 107
| 0.706311
| 156
| 1,236
| 5.570513
| 0.583333
| 0.069045
| 0.029919
| 0.036824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011329
| 0.214401
| 1,236
| 35
| 108
| 35.314286
| 0.883625
| 0.662621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2de7d7431503150ac6343d65fe89abecb277cb0
| 3,462
|
py
|
Python
|
authors/apps/likedislike/tests/test_likedislike.py
|
andela/ah-code-titans
|
4f1fc77c2ecdf8ca15c24327d39fe661eac85785
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/likedislike/tests/test_likedislike.py
|
andela/ah-code-titans
|
4f1fc77c2ecdf8ca15c24327d39fe661eac85785
|
[
"BSD-3-Clause"
] | 20
|
2018-11-26T16:22:46.000Z
|
2018-12-21T10:08:25.000Z
|
authors/apps/likedislike/tests/test_likedislike.py
|
andela/ah-code-titans
|
4f1fc77c2ecdf8ca15c24327d39fe661eac85785
|
[
"BSD-3-Clause"
] | 3
|
2019-01-24T15:39:42.000Z
|
2019-09-25T17:57:08.000Z
|
from rest_framework import status
from django.urls import reverse
from authors.apps.articles.models import Article
from authors.base_test_config import TestConfiguration
slug = None
class TestLikeDislike(TestConfiguration):
"""
Class to test for liking and disliking of articles.
"""
def create_article(self):
"""
Method to create an article first and return a token.
"""
article = {
"article": {
"title": "How To Train Your Dragon",
"description": "Ever wonder how?",
"body": "It takes a Jacobian"
}
}
# register the user and verify email
self.email_verification(self.reg_user)
# login the registered user
response = self.login(self.log_user)
# grab the token from the response data
token = response.data["token"]
# Create an article using the authentication token
self.client.post(
reverse("articles"),
article,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
return token
def test_article_like(self):
"""
Test if an article can be liked.
"""
# create an article and get user token
token = self.create_article()
# get the article slug
article = Article.objects.all().first()
global slug
slug = article.slug
# set the url
url = '/api/articles/{}/like/'.format(slug)
like_response = self.client.post(
url,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
# Test for correct response
self.assertEqual(like_response.status_code, status.HTTP_201_CREATED)
# Test response data to see if the article has 1 like
self.assertEqual(like_response.data["total_likes"], 1)
def test_dislike_article(self):
"""
Test if an article can be disliked
"""
token = self.create_article()
url = '/api/articles/{}/dislike/'.format(slug)
like_response = self.client.post(
url,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
# Test for correct response
self.assertEqual(like_response.status_code, status.HTTP_201_CREATED)
# Test response data to see if the article has 1 dislike
self.assertEqual(like_response.data["total_dislikes"], 1)
def test_for_already_liked_article(self):
"""
Test deletion of vote.
If a user likes an article they have already liked,
the vote is removed
"""
token = self.create_article()
url = '/api/articles/{}/like/'.format(slug)
# Post a like to the article
self.client.post(
url,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
# Like the article twice
like_response = self.client.post(
url,
content_type='application/json',
HTTP_AUTHORIZATION='Token ' + token
)
# Test for correct response
self.assertEqual(like_response.status_code, status.HTTP_201_CREATED)
# Test response data to see if the article has 0 likes
self.assertEqual(like_response.data["total_likes"], 0)
| 28.61157
| 76
| 0.593299
| 391
| 3,462
| 5.127877
| 0.2711
| 0.053865
| 0.056858
| 0.080798
| 0.501746
| 0.501746
| 0.46783
| 0.367082
| 0.340648
| 0.340648
| 0
| 0.006383
| 0.321202
| 3,462
| 120
| 77
| 28.85
| 0.846809
| 0.223859
| 0
| 0.435484
| 0
| 0
| 0.123573
| 0.027155
| 0
| 0
| 0
| 0
| 0.096774
| 1
| 0.064516
| false
| 0
| 0.064516
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2dfea80584df5547d3541ae560b3208410a1788
| 3,875
|
py
|
Python
|
source/yahoo_finance.py
|
mengwangk/myinvestor-toolkit
|
3dca9e1accfccf1583dcdbec80d1a0fe9dae2e81
|
[
"MIT"
] | 7
|
2019-10-13T18:58:33.000Z
|
2021-08-07T12:46:22.000Z
|
source/yahoo_finance.py
|
mengwangk/myinvestor-toolkit
|
3dca9e1accfccf1583dcdbec80d1a0fe9dae2e81
|
[
"MIT"
] | 7
|
2019-12-16T21:25:34.000Z
|
2022-02-10T00:11:22.000Z
|
source/yahoo_finance.py
|
mengwangk/myinvestor-toolkit
|
3dca9e1accfccf1583dcdbec80d1a0fe9dae2e81
|
[
"MIT"
] | 4
|
2020-02-01T11:23:51.000Z
|
2021-12-13T12:27:18.000Z
|
"""
=======================
Yahoo Finance source
=======================
"""
import re
import requests
import time
from json import loads
from bs4 import BeautifulSoup
from yahoofinancials import YahooFinancials
# Yahoo Finance data source
class YahooFinanceSource(YahooFinancials):
def __init__(self, ticker):
super(YahooFinanceSource, self).__init__(ticker)
# private static method to scrap data from yahoo finance
@staticmethod
def _scrape_dividend_data(url, tech_type, statement_type):
response = requests.get(url)
time.sleep(7)
soup = BeautifulSoup(response.content, "html.parser")
script = soup.find("script", text=re.compile("root.App.main")).text
data = loads(re.search("root.App.main\s+=\s+(\{.*\})", script).group(1))
stores = data["context"]["dispatcher"]["stores"]["HistoricalPriceStore"]
return stores
# Private Method to clean the dates of the newly returns historical stock data into readable format
def _clean_historical_div_data(self, hist_data):
data = {}
for k, v in hist_data.items():
if 'date' in k.lower():
cleaned_date = self.format_date(v, 'standard')
dict_ent = {k: {u'' + 'formatted_date': cleaned_date, 'date': v}}
data.update(dict_ent)
elif isinstance(v, list):
sub_dict_list = []
for sub_dict in v:
type = sub_dict.get('type', '')
if (type.upper() == 'DIVIDEND'):
sub_dict[u'' + 'formatted_date'] = self.format_date(sub_dict['date'], 'standard')
sub_dict_list.append(sub_dict)
dict_ent = {k: sub_dict_list}
data.update(dict_ent)
else:
dict_ent = {k: v}
data.update(dict_ent)
return data
# Private method to get time interval code
def _build_historical_dividend_url(self, ticker, hist_oj, filter='div'):
url = self._BASE_YAHOO_URL + ticker + '/history?period1=' + str(hist_oj['start']) + '&period2=' + \
str(hist_oj['end']) + '&interval=' + hist_oj['interval'] + '&filter=' + filter + '&frequency=' + \
hist_oj['interval']
return url
# Private Method to take scrapped data and build a data dictionary with
def _create_dict_ent_div(self, ticker, statement_type, tech_type, report_name, hist_obj):
up_ticker = ticker.upper()
YAHOO_URL = self._build_historical_dividend_url(up_ticker, hist_obj)
re_data = self._scrape_dividend_data(YAHOO_URL, tech_type, statement_type)
cleaned_re_data = self._clean_historical_div_data(re_data)
dict_ent = {up_ticker: cleaned_re_data}
return dict_ent
# Public Method for user to get historical stock dividend data
def get_historical_stock_dividend_data(self, start_date, end_date, time_interval):
interval_code = self.get_time_code(time_interval)
start = self.format_date(start_date, 'unixstamp')
end = self.format_date(end_date, 'unixstamp')
hist_obj = {'start': start, 'end': end, 'interval': interval_code}
data = self.get_stock_dividend_data('history', hist_obj=hist_obj)
return data
# Public Method to get stock data
def get_stock_dividend_data(self, statement_type='history', tech_type='', report_name='', hist_obj={}):
data = {}
if isinstance(self.ticker, str):
dict_ent = self._create_dict_ent_div(self.ticker, statement_type, tech_type, report_name, hist_obj)
data.update(dict_ent)
else:
for tick in self.ticker:
dict_ent = self._create_dict_ent_div(tick, statement_type, tech_type, report_name, hist_obj)
data.update(dict_ent)
return data
| 40.789474
| 112
| 0.627097
| 482
| 3,875
| 4.755187
| 0.248963
| 0.045812
| 0.030541
| 0.037086
| 0.198953
| 0.132635
| 0.116492
| 0.08726
| 0.08726
| 0.08726
| 0
| 0.001733
| 0.255484
| 3,875
| 94
| 113
| 41.223404
| 0.792721
| 0.116645
| 0
| 0.179104
| 0
| 0
| 0.087709
| 0.008214
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104478
| false
| 0
| 0.089552
| 0
| 0.298507
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2e195ab4b278f23e01854b0146790e6742d3324
| 26,510
|
py
|
Python
|
photoz.py
|
martinkilbinger/shapepipe_photoz
|
da4547774f6d599fb0106273eb8ab9819b7fd9eb
|
[
"MIT"
] | null | null | null |
photoz.py
|
martinkilbinger/shapepipe_photoz
|
da4547774f6d599fb0106273eb8ab9819b7fd9eb
|
[
"MIT"
] | null | null | null |
photoz.py
|
martinkilbinger/shapepipe_photoz
|
da4547774f6d599fb0106273eb8ab9819b7fd9eb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 10:02:58 2020
@author: Xavier Jimenez
"""
#------------------------------------------------------------------#
# # # # # Imports # # # # #
#------------------------------------------------------------------#
import numpy as np
import os
import shutil
import glob
import pandas as pd
import importlib
from joblib import Parallel, delayed
from tqdm import tqdm
import argparse
import warnings
warnings.filterwarnings('ignore')
from functions import *
#------------------------------------------------------------------#
# # # # # Create catalog # # # # #
#------------------------------------------------------------------#
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--nodes", required=False, type=int, nargs="?", const=1)
parser.add_argument("-s", "--survey", required=False, type=str, nargs="?", const='test')
parser.add_argument("-c", "--clean", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-m", "--make", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-j", "--join", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-g", "--generate_plots", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-p", "--preprocess", required=False, type=str, nargs="?", const=None)
parser.add_argument("-l", "--learning", required=False, type=bool, nargs="?", const=False)
parser.add_argument("-o", "--optimize", required=False, type=str, nargs="?", const=None)
parser.add_argument("-a", "--algorithm", required=False, type=str, nargs="?", const='RF')
parser.add_argument("-i", "--input", required=False, type=str)
args = parser.parse_args()
#------------------------------------------------------------------#
# # # # # PS3PI # # # # #
#------------------------------------------------------------------#
path = os.getcwd() + '/'
if args.input is None:
import params
else:
params = importlib.import_module(args.input)
if args.nodes is None:
args.nodes = 1
if args.algorithm is None:
args.algorithm = 'RF'
if args.survey is None:
args.survey = 'test'
if args.survey == 'test':
print('Modules loaded properly')
if args.preprocess is None:
args.preprocess = 'drop'
elif args.survey == 'ps3pi_cfis' or args.survey == 'unions':
bands = params.bands
output_path = params.output_path
output_name = params.output_name
temp_path = params.temp_path
#------------------------------------------------------------------#
# # # # # CLEAN # # # # #
#------------------------------------------------------------------#
if args.clean == True:
GenFiles = GenerateFiles(args.survey, bands, temp_path, output_name, output_path)
GenFiles.clean_temp_directories()
GenFiles.make_directories()
#------------------------------------------------------------------#
# # # # # MAKE INDIVIDUAL TILE CATALOGS # # # # #
#------------------------------------------------------------------#
if args.make == True:
spectral_path = params.spectral_path
spectral_names = params.spectral_names
path_to_tile_run = params.path_to_tile_run
spectral_surveys = params.spectral_surveys
vignet = params.vignet
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
for i in range(len(spectral_names)):
cat.make_survey_catalog(spectral_path, spectral_names[i])
if params.input_path == None:
out_dir = os.listdir(path_to_tile_run + args.survey + '/%s/output/'%(spectral_surveys[i]))[-1]
input_path = path_to_tile_run + args.survey + '/%s/output/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir)
else:
input_path = params.input_path
paste_dir = os.listdir(input_path)
Parallel(n_jobs=args.nodes)(delayed(cat.make_catalog)(p, paste_dir, input_path, spectral_names[i], vignet=vignet) for p in tqdm(range(len(paste_dir))))
#------------------------------------------------------------------#
# # # # # JOIN INDIVIDUAL TILE CATALOGS # # # # #
#------------------------------------------------------------------#
if args.join == True:
vignet = params.vignet
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
cat.merge_catalogs(vignet=vignet)
#------------------------------------------------------------------#
# # # # # SAVE FIGURES # # # # #
#------------------------------------------------------------------#
if args.generate_plots == True:
spectral_names = params.spectral_names
GenPlot = GeneratePlots(args.survey, bands, temp_path, output_name=output_name, spectral_names=spectral_names, output_path=output_path)
GenPlot.plot_matched_z_spec_hist()
GenPlot.plot_unmatched_z_spec_hist()
#------------------------------------------------------------------#
# # # # # MACHINE LEARNING ALGORITHMS # # # # #
#------------------------------------------------------------------#
if args.learning == True:
GenFiles = GenerateFiles(args.survey, bands, path, output_name, output_path=output_path)
GenFiles.make_directories(output=True)
path_to_csv = params.path_to_csv
spectral_names = params.spectral_names
weights = params.weights
cv = params.cv
max_evals = params.max_evals
feature_engineering = params.feature_engineering
feature_importance = params.feature_importance
plot = params.plot
if path_to_csv is None:
if args.survey == 'ps3pi_cfis':
path_to_csv = output_path + 'output/' + args.survey + '/' + output_name + '/files/' + output_name + '.csv'
ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
df, df_unmatched = ML.merge_cfis_r_cfht_u_medium_deep_i_g_z()
if feature_engineering == True:
# df_list = ML.feature_engineering(df, bands=['r', 'u', 'i', 'z', 'g'])
df_list = ML.feature_engineering(df, bands=['r', 'u', 'i', 'z', 'g'], color_order=['i', 'g' , 'r', 'z', 'u'])
else:
df_list = [df]
# print(df.head(10))
if plot == True:
ML.plot_corrmat(df)
GenPlot = GeneratePlots(args.survey, bands, temp_path, output_name=output_name, output_path=output_path, spectral_names=spectral_names)
# GenPlot.plot_mags(df, df_unmatched)
elif args.survey == 'unions':
path_to_csv = output_path + 'output/' + args.survey + '/' + output_name + '/files/' + output_name + '.csv'
ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
df = ML.dataframe()
df_unmatched = ML.unmatched_dataframe()
df = ML.gal_g()
if plot == True:
ML.plot_corrmat(df)
GenPlot = GeneratePlots(args.survey, bands, temp_path, output_name=output_name, output_path=output_path, spectral_names=spectral_names)
GenPlot.plot_mags(df, df_unmatched)
else:
raise TypeError("--survey needs to be set to 'unions' or 'ps3pi_cfis', please specify the full path to your DataFrame")
elif path_to_csv is not None:
ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
df = ML.dataframe()
# ML.plot_corrmat(df)
algs = {'RF': RandomForest, 'ANN': ArtificialNeuralNetwork, 'LASSO': LassoRegression, 'ENET': ElasticNetRegression,
'XGB':XGBoost, 'KRR':KernelRidgeRegression, 'SVR': SupportVectorRegression, 'LGB': LightGBM, 'GBR': GradientBoostingRegression}
if args.algorithm == 'BEST':
algs = {'RF': RandomForest, 'ANN': ArtificialNeuralNetwork, 'SVR': SupportVectorRegression, 'GBR': GradientBoostingRegression}
best_score = 1
best_alg = 'none'
# alg_names = np.array(list(algs.items()))[:,1]
if weights == True:
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
weights = cat.compute_weights(df, column = 'r')
elif type(weights) == str:
weights = np.load(weights)
else:
weights = None
global_score = 1
best_dict = pd.DataFrame(data={}, index=['score', 'score std'])
y_pred_dict = {}
y_test_dict = {}
for alg_name in algs:
best_score= 1
alg = algs[alg_name]
print('[Feature engineering]')
print('---------------------------------------------------------------')
for df in df_list:
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
score = method.score()
print(list(df.columns))
print('[preprocess] %s'%score[4])
print('[%s '%alg_name +'score] {:.3f} ± {:.3f}'.format(score[5], score[6]))
if score[5] < best_score:
print('[NEW BEST]')
print("%s: "%alg_name + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(score[0], score[1], score[2]*100, score[3]*100), end='\r')
best_score = score[5]
best_score_std = score[6]
bscore = score
df_best = df
best_columns = df.columns
best_preprocess = score[4]
best_dict[alg_name] = [best_score, best_score_std]
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=best_preprocess, n_jobs=args.nodes)
_, y_pred, y_test = method.model()
y_pred_dict[alg] = y_pred
y_test_dict[alg] = y_test
break
best_dict.to_cs(path + 'output/%s/%s/files/'%(args.survey, output_name) + 'Best_scores_' + output_name + '.csv', index=False)
# score = method.score()
print('---------------------------------------------------------------')
print("%s: "%alg_name + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(bscore[0], bscore[1], bscore[2]*100, bscore[3]*100))
if best_score < global_score:
global_score = best_score
global_score_std = best_score_std
gscore = bscore
best_alg = alg_name
df_global = df_best
global_columns = best_columns
global_preprocess = best_preprocess
print('[NEW BEST] %s'%best_alg + ' score: {:.3f} ± {:.3f}'.format(global_score, global_score_std))
print('---------------------------------------------------------------')
best_dict.sort_values(by = 'score', axis = 1, inplace=True)
print(best_dict.head())
df_best = df_global
alg = algs[best_alg]
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
if feature_importance == True:
if best_alg != 'ANN':
method.permutation()
if plot == True:
method.plot(lim=1.8)
print('---------------------------------------------------------------')
print('[BEST] preprocess: %s'%global_preprocess)
print('[BEST] score: {:.3f} ± {:.3f}'.format(global_score, global_score_std))
print(list(global_columns))
print("[%s] "%args.algorithm + "%s: "%best_alg + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(gscore[0], gscore[1], gscore[2]*100, bscore[3]*100))
print('---------------------------------------------------------------')
else:
try:
alg = algs[args.algorithm]
except:
raise TypeError('MLM is not defined')
if weights == True:
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
weights = cat.compute_weights(df, column = 'r')
elif type(weights) == str:
weights = np.load(weights)
else:
weights = None
best_score = 1
print('[Feature engineering]')
print('---------------------------------------------------------------')
for df in df_list:
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# method.plot(lim=1.8)
# method.permutation()
# df = method.filter()
# df.drop(columns=['r-z'], inplace=True)
score = method.score(df)
print(list(df.columns))
print('[preprocess] %s'%score[4])
print('[%s '%args.algorithm + 'score] {:.3f} ± {:.3f}'.format(score[5], score[6]))
if score[5] < best_score:
print('[NEW BEST]')
print("%s: "%args.algorithm + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(score[0], score[1], score[2]*100, score[3]*100))
best_score = score[5]
best_score_std = score[6]
bscore = score
df_best = df
best_columns = df.columns
best_preprocess = score[4]
# break
method = alg(survey = args.survey, bands = bands, output_name = output_name, temp_path=temp_path, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
if feature_importance == True:
if args.algorithm != 'ANN':
method.permutation()
if plot == True:
method.plot(lim=1.5)
if params.morph_importance == True and params.weights == False and args.algorithm == 'RF':
method.morph_importance(df_best)
print('---------------------------------------------------------------')
print('[BEST] preprocess: %s'%best_preprocess)
print('[BEST] score: {:.3f} ± {:.3f}'.format(best_score, best_score_std))
print(list(best_columns))
print("%s: "%args.algorithm + "Sigma: {:.3f} ± {:.4f}, outlier rate: {:.3f} ± {:.3f} % ".format(bscore[0], bscore[1], bscore[2]*100, bscore[3]*100))
print('---------------------------------------------------------------')
#------------------------------------------------------------------#
# # # # # OPTIMIZE LEARNING ALGORITHMS # # # # #
#------------------------------------------------------------------#
if args.optimize == 'HyperOpt' or args.optimize == 'RandomSearch' or args.optimize == 'GridSearch':
# GenFiles = GenerateFiles(args.survey, bands, path, output_name, output_path=output_path)
# GenFiles.make_directories(output=True)
# path_to_csv = params.path_to_csv
# max_evals = params.max_evals
weights = params.weights
# cv = params.cv
algs = {'RF': RandomForestOptimizer, 'SVR': SVROptimizer, 'XGB': XGBoostOptimizer, 'KRR': KRROptimizer, 'ANN': ANNOptimizer}
try:
alg = algs[args.algorithm]
except:
raise ValueError('Method does not have an optimization algorithm')
if weights == True:
cat = MakeCatalogs(args.survey, bands, temp_path, output_name, output_path)
weights = cat.compute_weights(df_best, column = 'r')
elif type(weights) == str:
weights = np.load(weights)
else:
weights = None
print('[%s] optimization'%args.optimize)
# if args.algorithm == 'ANN':
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, validation_set=True)
# X_train, X_val, X_test, Y_train, Y_val, Y_test = ML.data()
# X_train, Y_train, X_val, Y_val = data()
# trials = Trials()
# _, best_model = optim.minimize(model=model,data=data,algo=tpe.suggest, max_evals=max_evals, trials=trials)
# Y_pred = best_model.predict(X_test, verbose = 0)
# print(type(Y_pred), type(Y_test))
# sigma, eta = sigma_eta(Y_test.to_numpy().flatten(), Y_pred.flatten())
# print("%s Opt : "%args.algorithm + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
# ML.plot_zphot_zspec(Y_pred.flatten(), method='ANN_Opt', lim=1.8)
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# df = ML.dataframe()
# ML.plot_corrmat(df)
# ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df, path_to_csv=None, validation_set=False)
# _, sigma, eta = ModelOptimizer.best_params(max_evals=10)
# print("%s Opt : "%args.algorithm + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
# if path_to_csv is None:
# path_to_csv = output_path + 'output/' + args.survey + '/' + output_name + '/files/' + output_name + '.csv'
# ML = LearningAlgorithms(survey = args.survey, bands = bands, dataframe=df_best, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# df, df_unmatched = ML.merge_cfis_r_cfht_u_medium_deep_i_g_z()
# ML.plot_corrmat(df_best, figure_name=args.algorithm+'_best_corrmat')
ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df_best, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=best_preprocess, n_jobs=args.nodes)
# ModelOptimizer.debug()
_, sigma, eta, score = ModelOptimizer.best_params(max_evals=max_evals, method=args.optimize)
print('---------------------------------------------------------------')
print('[BEST OPT] score: {:.3f}'.format(score))
print("%s %s : "%(args.algorithm, args.optimize) + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
print('---------------------------------------------------------------')
# elif path_to_csv is not None:
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# df = ML.dataframe()
# ML.plot_corrmat(df)
# ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df, path_to_csv=None, validation_set=False, output_path=output_path, sample_weight=weights, cv=cv, preprocessing=args.preprocess, n_jobs=args.nodes)
# _, sigma, eta = ModelOptimizer.best_params(max_evals=max_evals, method=args.optimize)
# print("%s %s : "%(args.algorithm, args.optimize) + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
# else:
# ML = LearningAlgorithms(survey = args.survey, bands = bands, path_to_csv = path_to_csv, output_name = output_name)
# df = ML.dataframe()
# df = ML.preprocess(df, method = args.preprocess)
# ML.plot_corrmat(df)
# ModelOptimizer = alg(survey = args.survey, bands = bands, output_name = output_name, dataframe=df, path_to_csv=False, validation_set=False)
# _, sigma, eta = ModelOptimizer.best_params(max_evals=max_evals, method=args.optimize)
# print("%s %s : "%(args.algorithm, args.optimize) + "Sigma: {:.3f}, outlier rate: {:.3f} % ".format(sigma, eta*100))
#------------------------------------------------------------------#
# # # # # UNIONS # # # # #
#------------------------------------------------------------------#
elif args.survey == 'unions_deprecated':
spectral_path = '/home/mkilbing/astro/data/CFIS/spectro_surveys/'
spectral_names = ['data_DR14_LRG_N', 'data_DR14_LRG_S', 'galaxy_DR12v5_CMASSLOWZTOT_North', 'galaxy_DR12v5_CMASSLOWZTOT_South','sdss_main_gal']
# spectral_names = ['sdss_main_gal']
spectral_surveys = ['SDSS', 'SDSS', 'eBOSS', 'eBOSS', 'SDSS_2']
# spectral_surveys = ['SDSS_2']
output_name = 'CFIS_matched_eBOSS_SDSS_catalog_RUIZ'
# output_name = 'CFIS_matched_SDSS_2_catalog_RUIZ'
output_path = path
temp_path = '/n17data/jimenez/temp/'
bands = ['R', 'U', 'I', 'Z']
# out_dir = os.listdir("/n17data/jimenez/shaperun_unions/output_%s/"%(spectral_surveys[i]))[-1]
# path_to_tile_run = '/n17data/jimenez/shaperun/'
# input_path = path_to_tile_run + args.survey + '/%s/output/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir)
# paste_dir = os.listdir(input_path)
if args.clean == True:
GenFiles = GenerateFiles(args.survey, bands, temp_path)
GenFiles.clean_temp_directories()
GenFiles.make_directories()
elif args.make == True:
cat = MakeCatalogs(args.survey, bands, temp_path)
# vignet = [False, False, False, False, False]
for i in range(len(spectral_names)):
cat.make_survey_catalog(spectral_path, spectral_names[i])
out_dir = os.listdir("/n17data/jimenez/shaperun_unions/output_%s/"%(spectral_surveys[i]))[-1]
paste_dir = os.listdir('/n17data/jimenez/shaperun_unions/output_%s/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir))
input_path = '/n17data/jimenez/shaperun_unions/output_%s/%s/paste_cat_runner/output/'%(spectral_surveys[i], out_dir)
Parallel(n_jobs=args.nodes)(delayed(cat.make_catalog)(p, paste_dir, input_path, spectral_names[i], vignet=False) for p in tqdm(range(len(paste_dir))))
elif args.join == True:
cat = MakeCatalogs(args.survey, bands, temp_path)
cat.merge_catalogs(output_name, vignet=False)
elif args.generate_plots == True:
GenPlot = GeneratePlots(args.survey, bands, temp_path, csv_name=output_name, spectral_names=spectral_names)
# GenPlot.plot_d2d()
GenPlot.plot_matched_r_i_i_z()
GenPlot.plot_matched_u_r_r_i()
GenPlot.plot_matched_z_spec_hist()
# GenPlot.plot_unmatched_r_i_i_z()
# GenPlot.plot_unmatched_u_r_r_i()
GenPlot.plot_unmatched_z_spec_hist()
# if args.survey != 'unions' or args.survey != 'ps3pi_cfis':
# print("Survey must either be 'unions' or 'ps3pi_cfis'")
# raise SyntaxError("Survey must either be 'unions' or 'ps3pi_cfis'")
| 54.102041
| 289
| 0.515805
| 2,766
| 26,510
| 4.714389
| 0.110991
| 0.047546
| 0.044172
| 0.030675
| 0.67385
| 0.640567
| 0.615951
| 0.602377
| 0.572316
| 0.562653
| 0
| 0.010234
| 0.295964
| 26,510
| 489
| 290
| 54.212679
| 0.687634
| 0.228631
| 0
| 0.412186
| 0
| 0.003584
| 0.117851
| 0.050423
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0.132616
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2e55b26934d85e03276f6736007bed25c578301
| 1,348
|
py
|
Python
|
network/fs_net_repo/PoseTs.py
|
lolrudy/GPV_pose
|
f326a623b3e45e6edfc1963b068e8e7aaea2bfff
|
[
"MIT"
] | 10
|
2022-03-16T02:14:56.000Z
|
2022-03-31T19:01:34.000Z
|
network/fs_net_repo/PoseTs.py
|
lolrudy/GPV_pose
|
f326a623b3e45e6edfc1963b068e8e7aaea2bfff
|
[
"MIT"
] | 1
|
2022-03-18T06:43:16.000Z
|
2022-03-18T06:56:35.000Z
|
network/fs_net_repo/PoseTs.py
|
lolrudy/GPV_pose
|
f326a623b3e45e6edfc1963b068e8e7aaea2bfff
|
[
"MIT"
] | 2
|
2022-03-19T13:06:28.000Z
|
2022-03-19T16:08:18.000Z
|
import torch.nn as nn
import torch
import torch.nn.functional as F
import absl.flags as flags
from absl import app
FLAGS = flags.FLAGS
# Point_center encode the segmented point cloud
# one more conv layer compared to original paper
class Pose_Ts(nn.Module):
def __init__(self):
super(Pose_Ts, self).__init__()
self.f = FLAGS.feat_c_ts
self.k = FLAGS.Ts_c
self.conv1 = torch.nn.Conv1d(self.f, 1024, 1)
self.conv2 = torch.nn.Conv1d(1024, 256, 1)
self.conv3 = torch.nn.Conv1d(256, 256, 1)
self.conv4 = torch.nn.Conv1d(256, self.k, 1)
self.drop1 = nn.Dropout(0.2)
self.bn1 = nn.BatchNorm1d(1024)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(256)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = F.relu(self.bn3(self.conv3(x)))
x = self.drop1(x)
x = self.conv4(x)
x = x.squeeze(2)
x = x.contiguous()
xt = x[:, 0:3]
xs = x[:, 3:6]
return xt, xs
def main(argv):
feature = torch.rand(3, 3, 1000)
obj_id = torch.randint(low=0, high=15, size=[3, 1])
net = Pose_Ts()
out = net(feature, obj_id)
t = 1
if __name__ == "__main__":
app.run(main)
| 25.433962
| 55
| 0.58457
| 217
| 1,348
| 3.516129
| 0.37788
| 0.02097
| 0.068152
| 0.039318
| 0.028834
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079673
| 0.273739
| 1,348
| 52
| 56
| 25.923077
| 0.699694
| 0.068991
| 0
| 0
| 0
| 0
| 0.00639
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.128205
| 0
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2e64fced5d7c9dff05319da1da37700db19293c
| 2,653
|
py
|
Python
|
gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/exportXGBoostNode.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/exportXGBoostNode.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
gQuant/plugins/gquant_plugin/greenflow_gquant_plugin/analysis/exportXGBoostNode.py
|
t-triobox/gQuant
|
6ee3ba104ce4c6f17a5755e7782298902d125563
|
[
"Apache-2.0"
] | null | null | null |
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.portsSpecSchema import (ConfSchema,
PortsSpecSchema)
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
from greenflow.dataframe_flow.util import get_file_path
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from ..node_hdf_cache import NodeHDFCacheMixin
class XGBoostExportNode(TemplateNodeMixin, NodeHDFCacheMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.INPUT_PORT_NAME = 'model_in'
self.OUTPUT_PORT_NAME = 'filename'
port_type = PortsSpecSchema.port_type
port_inports = {
self.INPUT_PORT_NAME: {
port_type: ["xgboost.Booster", "builtins.dict"]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: ["builtins.str"]
}
}
cols_required = {}
addition = {}
meta_inports = {
self.INPUT_PORT_NAME: cols_required
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_ADDITION,
MetaDataSchema.META_REF_INPUT: self.INPUT_PORT_NAME,
MetaDataSchema.META_DATA: addition
}
}
self.template_ports_setup(
in_ports=port_inports,
out_ports=port_outports
)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "XGBoost Export Configure",
"type": "object",
"description": """Export the xgboost model to a file
""",
"properties": {
"path": {
"type": "string",
"description":
"""The output filepath for the xgboost
model"""
}
},
"required": ["path"],
}
ui = {}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
dump the model into the file
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
model = inputs[self.INPUT_PORT_NAME]
if isinstance(model, dict):
model = model['booster']
pathname = get_file_path(self.conf['path'])
model.save_model(pathname)
return {self.OUTPUT_PORT_NAME: pathname}
| 31.211765
| 74
| 0.547682
| 243
| 2,653
| 5.72428
| 0.329218
| 0.051761
| 0.07908
| 0.093458
| 0.071891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.366001
| 2,653
| 84
| 75
| 31.583333
| 0.826992
| 0.043724
| 0
| 0.030303
| 0
| 0
| 0.088321
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2e989f1d471ff586e3048f193d3b0ec35055cc5
| 623
|
py
|
Python
|
Python/main.py
|
mrn4344/Mandelbrot
|
8958b6453b3feafa1329fa18dc2822ab8985cb41
|
[
"MIT"
] | null | null | null |
Python/main.py
|
mrn4344/Mandelbrot
|
8958b6453b3feafa1329fa18dc2822ab8985cb41
|
[
"MIT"
] | null | null | null |
Python/main.py
|
mrn4344/Mandelbrot
|
8958b6453b3feafa1329fa18dc2822ab8985cb41
|
[
"MIT"
] | null | null | null |
import mandelbrot as mand
from PIL import Image
width = 1280
height = 720
scale = 2
def pixelToCoord( pos ):
(x, y) = pos
return ( 4*(x/height - 0.5)/scale , -4*(y/height - 0.5)/scale)
def main():
me = mand.mandelbrot(2)
img = Image.new('RGB', (width,height), color = 'white')
for y in range(0, height):
for x in range(0,width):
c = pixelToCoord((x,y))
if(me.isInSet(complex(c[0], c[1]), 1024)):
img.putpixel((x,y), (0,0,0))
if y%25 == 0:
print("Row " + str(y))
img.save("output2.png")
if __name__ == "__main__":
main()
| 22.25
| 67
| 0.5313
| 95
| 623
| 3.4
| 0.494737
| 0.018576
| 0.049536
| 0.080495
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06772
| 0.288925
| 623
| 27
| 68
| 23.074074
| 0.6614
| 0
| 0
| 0
| 0
| 0
| 0.049759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.238095
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2ea645b92efeff22da8081f24ec4c1af5469ade
| 1,699
|
py
|
Python
|
blockformer/position/relative_position_bias.py
|
colinski/blockformer
|
56be6abc08dc25ab97c526384e9c69f6c814c3ed
|
[
"MIT"
] | null | null | null |
blockformer/position/relative_position_bias.py
|
colinski/blockformer
|
56be6abc08dc25ab97c526384e9c69f6c814c3ed
|
[
"MIT"
] | null | null | null |
blockformer/position/relative_position_bias.py
|
colinski/blockformer
|
56be6abc08dc25ab97c526384e9c69f6c814c3ed
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.utils.weight_init import trunc_normal_
#adapted from open-mmlab implementation of swin transformer
class RelativePositionBias(nn.Module):
def __init__(self,
window_size=(7, 7),
num_heads=8
):
super().__init__()
self.window_size = window_size
self.num_heads = num_heads
# define parameter table and idx of relative position bias
Wh, Ww = self.window_size
num_rows = (2 * Wh - 1) * (2 * Ww - 1)
self.relative_position_bias_table = nn.Parameter(
torch.zeros(num_rows, num_heads)
)
rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)
rel_position_index = rel_index_coords + rel_index_coords.T
rel_position_index = rel_position_index.flip(1).contiguous().view(-1)
self.register_buffer('relative_position_index', rel_position_index)
self.init_weights()
def init_weights(self): #important!
trunc_normal_(self.relative_position_bias_table, std=0.02)
def forward(self, *args, **kwargs):
Wh, Ww = self.window_size
bias = self.relative_position_bias_table[self.relative_position_index]
bias = bias.view(Wh * Ww, Wh * Ww, -1)
bias = bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
bias = bias.unsqueeze(0) # 1 nH Wh*Ww Wh*Ww
return bias
@staticmethod
def double_step_seq(step1, len1, step2, len2):
seq1 = torch.arange(0, step1 * len1, step1)
seq2 = torch.arange(0, step2 * len2, step2)
return (seq1[:, None] + seq2[None, :]).reshape(1, -1)
| 38.613636
| 78
| 0.638611
| 235
| 1,699
| 4.370213
| 0.344681
| 0.031159
| 0.054528
| 0.070107
| 0.195716
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03083
| 0.255444
| 1,699
| 43
| 79
| 39.511628
| 0.781028
| 0.093584
| 0
| 0.055556
| 0
| 0
| 0.014984
| 0.014984
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2ecefbb6392e5044c1bce089bc79ba2086836e6
| 1,714
|
py
|
Python
|
ka_model.py
|
ycjing/AmalgamateGNN.PyTorch
|
f99a60b374d23002d53385f23da2d540d964c7c2
|
[
"MIT"
] | 15
|
2021-06-25T05:02:37.000Z
|
2022-03-20T08:34:15.000Z
|
ka_model.py
|
ycjing/AmalgamateGNN.PyTorch
|
f99a60b374d23002d53385f23da2d540d964c7c2
|
[
"MIT"
] | 2
|
2022-01-21T05:14:17.000Z
|
2022-03-23T09:24:45.000Z
|
ka_model.py
|
ycjing/AmalgamateGNN.PyTorch
|
f99a60b374d23002d53385f23da2d540d964c7c2
|
[
"MIT"
] | 1
|
2021-08-18T06:28:58.000Z
|
2021-08-18T06:28:58.000Z
|
import torch
from utils import get_teacher1, get_teacher2, get_student
def collect_model(args, data_info_s, data_info_t1, data_info_t2):
"""This is the function that constructs the dictionary containing the models and the corresponding optimizers
Args:
args (parse_args): parser arguments
data_info_s (dict): the dictionary containing the data information of the student
data_info_t1 (dict): the dictionary containing the data information of teacher #1
data_info_t2 (dict): the dictionary containing the data information of teacher #2
Returns:
dict: model dictionary ([model_name][model/optimizer])
"""
device = torch.device("cpu") if args.gpu < 0 else torch.device("cuda:" + str(args.gpu))
# initialize the two teacher GNNs and the student GNN
s_model = get_student(args, data_info_s)
s_model.to(device)
t1_model = get_teacher1(args, data_info_t1)
t1_model.to(device)
t2_model = get_teacher2(args, data_info_t2)
t2_model.to(device)
# define the corresponding optimizers of the teacher GNNs and the student GNN
params = s_model.parameters()
s_model_optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
t1_model_optimizer = None
t2_model_optimizer = None
# construct the model dictionary containing the models and the corresponding optimizers
model_dict = {}
model_dict['s_model'] = {'model':s_model, 'optimizer':s_model_optimizer}
model_dict['t1_model'] = {'model':t1_model, 'optimizer':t1_model_optimizer}
model_dict['t2_model'] = {'model':t2_model, 'optimizer':t2_model_optimizer}
return model_dict
| 41.804878
| 113
| 0.713536
| 240
| 1,714
| 4.85
| 0.270833
| 0.120275
| 0.098797
| 0.089347
| 0.27921
| 0.27921
| 0.232818
| 0.232818
| 0.092784
| 0
| 0
| 0.018315
| 0.203617
| 1,714
| 41
| 114
| 41.804878
| 0.834432
| 0.405484
| 0
| 0
| 0
| 0
| 0.074566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2f080fa5d08bb1269862977727df7460da362c1
| 445
|
py
|
Python
|
probs/prob9.py
|
mattrid93/ProjectEuler
|
3e1cf1bad9581e526b37d17e20b5fe8af837c1c6
|
[
"MIT"
] | null | null | null |
probs/prob9.py
|
mattrid93/ProjectEuler
|
3e1cf1bad9581e526b37d17e20b5fe8af837c1c6
|
[
"MIT"
] | null | null | null |
probs/prob9.py
|
mattrid93/ProjectEuler
|
3e1cf1bad9581e526b37d17e20b5fe8af837c1c6
|
[
"MIT"
] | null | null | null |
"""Problem 9: Special Pythagorean triplet.
Brute force."""
import unittest
def find_triple(s):
"""Returns abc where a^2+b^2=c^2 with a+b+c=s."""
a, b, c = 998, 1, 1
while b < 999:
if a**2 + b**2 == c**2:
return a*b*c
if a == 1:
c += 1
b = 1
a = 1000 - b - c
else:
b += 1
a -= 1
if __name__ == "__main__":
print(find_triple(1000))
| 20.227273
| 53
| 0.440449
| 70
| 445
| 2.657143
| 0.457143
| 0.043011
| 0.048387
| 0.043011
| 0.064516
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0.10566
| 0.404494
| 445
| 21
| 54
| 21.190476
| 0.596226
| 0.217978
| 0
| 0
| 0
| 0
| 0.023739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2f1d876ec603c325d5fd840f0aed40ac0a43ab5
| 998
|
py
|
Python
|
cleanup.py
|
DuncteBot/tf2-transformer-chatbot
|
0e364da0537717de025314d40c5b0423891f9dc4
|
[
"MIT"
] | null | null | null |
cleanup.py
|
DuncteBot/tf2-transformer-chatbot
|
0e364da0537717de025314d40c5b0423891f9dc4
|
[
"MIT"
] | null | null | null |
cleanup.py
|
DuncteBot/tf2-transformer-chatbot
|
0e364da0537717de025314d40c5b0423891f9dc4
|
[
"MIT"
] | null | null | null |
import sqlite3
from helpers import get_db_path, get_timeframes
from traceback import print_tb
timeframes = get_timeframes()
print(timeframes)
for timeframe in timeframes:
with sqlite3.connect(get_db_path(timeframe)) as connection:
try:
c = connection.cursor()
print("Cleanin up!")
c.execute('BEGIN TRANSACTION')
# Remove values that we don't want
sql = "DELETE FROM parent_reply WHERE parent IS NULL OR parent == 'False' OR parent == '0'"
c.execute(sql)
connection.commit()
# c.execute("VACUUM")
# connection.commit()
sql = "SELECT COUNT(comment_id) FROM parent_reply"
c.execute(sql)
result = c.fetchone()
if result is not None:
res = result[0]
print(f'Cleanup done, paired rows: {res}')
except Exception as e:
print('Something broke')
print(e)
print('Done')
| 28.514286
| 103
| 0.576152
| 116
| 998
| 4.87069
| 0.560345
| 0.056637
| 0.031858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005997
| 0.331663
| 998
| 34
| 104
| 29.352941
| 0.841079
| 0.072144
| 0
| 0.083333
| 0
| 0
| 0.221258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.291667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2f341062556abc813aaebd4a88c681a262c4eb7
| 8,059
|
py
|
Python
|
visualization/plots.py
|
yc14600/beta3_IRT
|
7c3d87b2f04fc9ad7bf59db5d60166df5ca47dc6
|
[
"MIT"
] | 7
|
2019-06-26T15:23:14.000Z
|
2021-12-28T14:16:24.000Z
|
visualization/plots.py
|
yc14600/beta3_IRT
|
7c3d87b2f04fc9ad7bf59db5d60166df5ca47dc6
|
[
"MIT"
] | null | null | null |
visualization/plots.py
|
yc14600/beta3_IRT
|
7c3d87b2f04fc9ad7bf59db5d60166df5ca47dc6
|
[
"MIT"
] | 4
|
2019-08-29T19:07:35.000Z
|
2021-12-28T19:22:11.000Z
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn as sns
import pandas as pd
import glob
import re
from itertools import combinations
import matplotlib
matplotlib.rcParams['text.usetex'] = True
def plot_probabilities(X, probabilities, titles, suptitle):
norm = plt.Normalize(0, 1)
n = len(titles)
nrows = int(np.ceil(n / 2))
sns.set_context('paper')
cmap = sns.cubehelix_palette(rot=-.5,light=1.5,dark=-.5,as_cmap=True)
f, axarr = plt.subplots(nrows, min(n,2))
if n < 2:
axarr.scatter(X[:, 0], X[:, 1], c=probabilities[0],
cmap=cmap, norm=norm, edgecolor='k',s=60)
axarr.set_title(titles[0])
#f.set_size_inches(8, 8)
else:
i = j = 0
for idx, t in enumerate(titles):
axarr[i, j].scatter(X[:, 0], X[:, 1], c=probabilities[idx],
cmap=cmap, norm=norm, edgecolor='k')
axarr[i, j].set_title(t)
j += 1
if j == 2:
j = 0
i += 1
if n % 2 != 0:
axarr[-1, -1].axis('off')
f.set_size_inches(10, 30)
f.suptitle(suptitle)
f.subplots_adjust(hspace=0.7)
return f
def plot_parameters(X, delta, a):
sns.set_context('paper')
cmap1 = sns.cubehelix_palette(rot=-.5,light=1.5,dark=-.5,as_cmap=True)
gs = gridspec.GridSpec(2, 2, height_ratios=[4, 2])
f = plt.figure(figsize=(12,6))
axarr = np.array([[None]*2]*2)
for i in range(2):
for j in range(2):
axarr[i,j] = plt.subplot(gs[i*2+j])
axarr[0, 0].scatter(X[:, 0], X[:, 1], c=delta, cmap=cmap1,
edgecolor='k',s=40)
axarr[0, 0].set_title('$\mathbf{\delta}$ (Difficulty)',fontsize=16)
axarr[0, 1].scatter(X[:, 0], X[:, 1], c=a, cmap=cmap1,
edgecolor='k',s=40)
axarr[0, 1].set_title('$\mathbf{a}$ (Discrimination)',fontsize=16)
#axarr[1, 0].hist(delta,bins=100)
sns.distplot(delta,bins=100,ax=axarr[1,0])
axarr[1, 0].set_title('Histogram of $\mathbf{\delta}$',fontsize=16)
#axarr[1, 1].hist(a,bins=100)
sns.distplot(a,bins=100,ax=axarr[1,1])
axarr[1, 1].set_title('Histogram of $\mathbf{a}$',fontsize=16)
f.suptitle('IRT item parameters')
#f.set_size_inches(20, 20)
f.subplots_adjust(hspace=0.3)
return f
def plot_noisy_points(xtest, disc=None):
sns.set_context('paper')
cls = sns.color_palette("BuGn_r")
lgd = []
f = plt.figure()
plt.scatter(xtest.x[xtest.noise==0],xtest.y[xtest.noise==0],facecolors='none',edgecolors='k',s=60)
lgd.append('non-noise item')
plt.scatter(xtest.x[xtest.noise>0],xtest.y[xtest.noise>0],c=cls[3],s=60)
lgd.append('noise item')
if not disc is None:
plt.scatter(xtest.x[disc<0],xtest.y[disc<0],c=cls[0],marker='+',facecolors='none')
lgd.append('detected noise item')
plt.title('True and detected noise items')
l = plt.legend(lgd,frameon=True,fontsize=12)
l.get_frame().set_edgecolor('g')
return f
def plot_item_parameters_corr(irt_prob_avg,difficulty,noise,disc=None):
sns.set_context('paper')
cls = sns.color_palette("BuGn_r")
lgd = []
f = plt.figure()
plt.xlim([0.,1.])
plt.ylim([0.,1.])
plt.scatter(irt_prob_avg[noise>0],difficulty[noise>0],c=cls[3],s=60)
lgd.append('noise item')
if not disc is None:
plt.scatter(irt_prob_avg[disc<0],difficulty[disc<0],c=cls[0],marker='+',facecolors='none')
lgd.append('detected noise item')
plt.scatter(irt_prob_avg[noise==0],difficulty[noise==0],facecolors='none',edgecolors='k',s=60)
lgd.append('non-noise item')
plt.title('Correlation between difficulty and response')
plt.xlabel('Average response',fontsize=14)
plt.ylabel('Difficulty',fontsize=14)
l=plt.legend(lgd,frameon=True,fontsize=12)
l.get_frame().set_edgecolor('g')
return f
def vis_performance(gather_prec,gather_recal,path,asd='as1@5',vtype='nfrac'):
fig = plt.figure()
plt.plot(gather_recal.index, gather_recal.mean(axis=1),marker='o')
plt.plot(gather_prec.index, gather_prec.mean(axis=1),marker='^')
plt.errorbar(gather_recal.index, gather_recal.mean(axis=1), gather_recal.std(axis=1), linestyle='None')
plt.errorbar(gather_prec.index, gather_prec.mean(axis=1), gather_prec.std(axis=1), linestyle='None')
if vtype=='nfrac':
plt.title('Precision and recall under different noise fractions')
plt.xlabel('Noise fraction (percentile)')
plt.ylim(-0.05,1.1)
plt.yticks(np.arange(0,1.2,0.2))
plt.legend(['Recall','Precision'],loc=0)
plt.savefig(path+'gathered_dnoise_performance_nfrac_'+asd+'.pdf')
elif vtype=='astd':
plt.title('Precision and recall under different prior SD')
plt.xlabel('Prior standard deviation of discrimination')
plt.xlim(0.5,3.25)
plt.ylim(-0.05,1.1)
plt.yticks(np.arange(0,1.2,0.2))
plt.legend(['Recall','Precision'],loc=0)
plt.savefig(path+'gathered_dnoise_performance_asd_nfrac20.pdf')
plt.close(fig)
def gather_vary_nfrac(path,dataset,a_prior_std=1.5,clcomb='79',mcomb='m10',idx = [2,5,10,20,30,40,50,55]):
prefix = path+'dnoise_performance_'+dataset+'_s400_'
files = glob.glob(prefix+'*.txt')
#print(len(files))
asd = 'as'+str(a_prior_std).replace('.','@')
files = filter(lambda f: '_'+mcomb+'_' in f and asd in f and 'cl'+clcomb in f , files)
gather_prec = pd.DataFrame(index=idx)
gather_recal = pd.DataFrame(index=idx)
pfix1 = 'precision = '
pfix2 = 'recall = '
err_files = []
for f in files:
parse = re.split('_|\.',f[len(prefix)+1:])
#print(parse)
frac = int(parse[0])
#print(frac)
if frac not in idx:
continue
seed = parse[1]
with open(f,'r') as fr:
l = fr.readlines()
gather_prec.loc[frac,seed] = float(l[0][len(pfix1):])
gather_recal.loc[frac,seed] = float(l[1][len(pfix2):])
if np.isnan(gather_prec.loc[frac,seed]) or \
np.isnan(gather_recal.loc[frac,seed]):
print('find nan:',parse)
err_files.append('./test_data/noise_test/'+dataset+'/bc4/'+mcomb+'/'+parse[2]+'/irt_data_'+dataset+'_s400_f'+parse[0]+'_'+parse[1]+'_'+parse[2]+'_'+mcomb+'.csv')
return gather_prec,gather_recal,err_files
def vis_avg_all_clscombs_perform(dataset='mnist',a_prior_std=1.5,mcomb='m10',rpath='./results/bc4/mnist/m10/'):
errs = []
gather_precs=None
gather_recals=None
gather_prec_allcl = pd.DataFrame()
gather_recal_allcl = pd.DataFrame()
asd = 'as'+str(a_prior_std).replace('.','@')
for i,cls in enumerate(combinations(np.arange(10),2)):
#print(i)
cl1, cl2 = cls[0],cls[1]
comb = str(cl1)+str(cl2)
path = rpath+'cl'+comb+'/'
gather_prec,gather_recal, err = gather_vary_nfrac(path,dataset,a_prior_std,clcomb=comb,mcomb=mcomb)
if len(err)==0:
vis_performance(gather_prec,gather_recal,path,asd=asd)
errs+=err
if gather_precs is None:
gather_precs = gather_prec
gather_recals = gather_recal
gather_prec_allcl = pd.DataFrame(index=gather_prec.index)
gather_recal_allcl = pd.DataFrame(index=gather_recal.index)
else:
gather_precs+=gather_prec
gather_recals+=gather_recal
gather_prec_allcl[comb] = gather_prec.values.mean(axis=1)
gather_recal_allcl[comb] = gather_recal.values.mean(axis=1)
gather_precs /= i
gather_recals /= i
#vis_performance(gather_precs,gather_recals,rpath)
vis_performance(gather_prec_allcl,gather_recal_allcl,rpath,asd=asd)
if len(errs) > 0:
with open('./retest.sh','w') as wf:
for ef in errs:
wf.writelines('python betairt_test.py '+ef+' a_prior_std:'+str(a_prior_std)+'\n')
| 36.631818
| 173
| 0.618191
| 1,207
| 8,059
| 3.992544
| 0.212096
| 0.041502
| 0.013073
| 0.014941
| 0.470845
| 0.365844
| 0.350073
| 0.313136
| 0.240506
| 0.240506
| 0
| 0.037975
| 0.215784
| 8,059
| 220
| 174
| 36.631818
| 0.724525
| 0.025437
| 0
| 0.217143
| 0
| 0
| 0.11662
| 0.015804
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.057143
| 0
| 0.125714
| 0.005714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2f74385f195f0884b6d65f78882d41fbb6267cb
| 19,448
|
py
|
Python
|
models/transformer/transformer.py
|
lsgai/selene
|
ad23904cad2a5a292732ff350e7689c0b9e511f4
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
models/transformer/transformer.py
|
lsgai/selene
|
ad23904cad2a5a292732ff350e7689c0b9e511f4
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
models/transformer/transformer.py
|
lsgai/selene
|
ad23904cad2a5a292732ff350e7689c0b9e511f4
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME, BertConfig
from pytorch_transformers.modeling_bert import *
from pytorch_transformers.tokenization_bert import BertTokenizer
import pytorch_transformers.optimization
class BertEmbeddingsDNA(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddingsDNA, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
# label should not need to have ordering ?
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.config = config
if self.config.aa_type_emb:
print ('\n\nturn on the token-type style embed.\n\n')
## okay to say 4 groups + 1 extra , we need special token to map to all 0, so CLS SEP PAD --> group 0
## 20 major amino acids --> 4 major groups
## or... we have mutation/not --> 2 major groups. set not mutation = 0 as base case
## we did not see experiment with AA type greatly improve outcome
## !! notice that padding_idx=0 will not be 0 because of initialization MUST MANUAL RESET 0
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# if token_type_ids is None:
# token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.config.aa_type_emb:
# @token_type_ids is batch x aa_len x domain_type --> output batch x aa_len x domain_type x dim
token_type_embeddings = self.token_type_embeddings(token_type_ids)
## must sum over domain (additive effect)
token_type_embeddings = torch.sum(token_type_embeddings,dim=2) # get batch x aa_len x dim
embeddings = words_embeddings + position_embeddings + token_type_embeddings
else:
embeddings = words_embeddings + position_embeddings # + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertEmbeddingsLabel(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddingsLabel, self).__init__()
print('TODO BertEmbeddingsLabel config, should log')
print(config)
self.config = config
self.word_embeddings = nn.Embedding(config.label_size, config.hidden_size) ## , padding_idx=0
# label should not need to have ordering ?
# self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
if self.config.scale_label_vec:
## if we freeze, then we will not use any layer norm. let's try using the vectors as they are.
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
## should always drop to avoid overfit
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
# seq_length = input_ids.size(1)
# if position_ids is None:
# position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
# position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# if token_type_ids is None:
# token_type_ids = torch.zeros_like(input_ids)
# embeddings = self.word_embeddings(input_ids)
# position_embeddings = self.position_embeddings(position_ids)
# token_type_embeddings = self.token_type_embeddings(token_type_ids)
# embeddings = words_embeddings # + position_embeddings + token_type_embeddings
# if self.config.scale_label_vec:
# embeddings = self.LayerNorm(embeddings)
## should always drop to avoid overfit
# embeddings = self.dropout(embeddings)
##!! COMMENT we always use all the labels, so that we do not need to specify label-indexing.
## need only call @self.word_embeddings.weight
embeddings = self.LayerNorm(self.word_embeddings.weight)
embeddings = embeddings.expand(input_ids.shape[0],-1,-1) ## batch x num_label x dim
embeddings = self.dropout( embeddings )
return embeddings
class BertModel2Emb(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape
``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel2Emb, self).__init__(config)
self.embeddings = BertEmbeddingsDNA(config)
self.embeddings_label = BertEmbeddingsLabel(config) ## label takes its own emb layer
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _resize_label_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings_label.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings_label.word_embeddings = new_embeddings
return self.embeddings_label.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def resize_label_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens
``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_label_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.label_size = new_num_tokens
base_model.label_size = new_num_tokens
# Tie weights again if needed
if hasattr(self, 'tie_weights'):
self.tie_weights()
return model_embeds
def forward(self, input_ids, input_DNA, label_index_id, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None):
##!! to avoid a lot of re-structuring, let's define @input_ids=>protein_vector from interaction network
## assume @input_ids is batch x 1 x dim, each batch is a protein so it has 1 vector
# if attention_mask is None:
# attention_mask = torch.ones_like(input_ids) ## probably don't need this very much.
# if we pass in mask and token_type, which we always do for batch mode
# # if token_type_ids is None:
# # token_type_ids = torch.zeros_like(input_ids)
# # We create a 3D attention mask from a 2D tensor mask.
# # Sizes are [batch_size, 1, 1, to_seq_length]
# # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# # this attention mask is more simple than the triangular masking of causal attention
# # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# # masked positions, this operation will create a tensor which is 0.0 for
# # positions we want to attend and -10000.0 for masked positions.
# # Since we are adding it to the raw scores before the softmax, this is
# # effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
# extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
#print('Bert forward')
#print('input_DNA')
#print(input_DNA)
#print('____')
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
## need to split the @input_ids into AA side and label side, @input_DNA @label_index_id
## COMMENT
embedding_output = self.embeddings(input_DNA, position_ids=position_ids, token_type_ids=token_type_ids)
embedding_output_label = self.embeddings_label(label_index_id, position_ids=None, token_type_ids=None)
# concat into the original embedding
if self.config.ppi_front:
## masking may vary, because some proteins don't have vec emb
embedding_output = torch.cat([input_ids,embedding_output,embedding_output_label], dim=1)
## we add protein_vector as variable @input_ids
else:
## COMMENT
embedding_output = torch.cat([embedding_output,embedding_output_label], dim=1)
## @embedding_output is batch x num_aa x dim so append @embedding_output_label to dim=1
## (basically adding more words to @embedding_output)
# @embedding_output is just some type of embedding, the @encoder will apply attention weights
encoder_outputs = self.encoder(embedding_output,
attention_mask=None,
head_mask=head_mask)
## @extended_attention_mask must mask using the entire set of sequence + label input
sequence_output = encoder_outputs[0]
# pooled_output = self.pooler(sequence_output)
outputs = (sequence_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here pooled_output
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class ElementWiseMultiplyLayer(nn.Module):
def __init__(self, hidden_size, n_genomic_features):
super(ElementWiseMultiplyLayer, self).__init__()
self.embedding_dim = hidden_size
self.num_labels = n_genomic_features ## about 919 for histone marks
self.weightMat = nn.Parameter(torch.Tensor(self.num_labels, self.embedding_dim)) # define the trainable parameter
self.bias = nn.Parameter(torch.Tensor(n_genomic_features))
def forward(self, x):
# x is [batch, nlabel, hidden]. element-wise mult and sum over hidden
return torch.sum(x * self.weightMat, dim = 2) + self.bias
class TokenClassificationBase (BertPreTrainedModel):
## !! we change this to do 1-hot prediction
## take in K labels so we have vector of 1-hot length K
## for each label, we get a vector output from BERT, then we predict 0/1
def __init__(self, config_name, sequence_length, n_genomic_features):
## create config object base on path name. bert needs config object
self.config = BertConfig.from_pretrained(config_name)
super(TokenClassificationBase, self).__init__(self.config)
self.sequence_length = sequence_length
self.num_labels = n_genomic_features ## about 919 for histone marks
self.bert = BertModel2Emb(self.config)
self.dropout = nn.Dropout(self.config.hidden_dropout_prob)
# this classifier uses same weights for all labels
self.classifier = nn.Sequential(nn.Linear(self.config.hidden_size, 1),
nn.Sigmoid())
# this classifier using CLS embedding with different weights for each label
#self.classifier2 = nn.Sequential(nn.Linear(self.config.hidden_size, self.num_labels),
# nn.Sigmoid())
# uses label embeddings and learns different weights for each
print('TokenClassificationBase: Using classifier1')
self.classifier3 = nn.Sequential(
ElementWiseMultiplyLayer(self.config.hidden_size, self.num_labels),
nn.Sigmoid()
)
self.init_weights() # https://github.com/lonePatient/Bert-Multi-Label-Text-Classification/issues/19
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def _init_weights(self, module):
# https://github.com/huggingface/transformers/blob/master/src/transformers/modeling_bert.py#L535
""" Initialize the weights, including for our custom layer """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, ElementWiseMultiplyLayer):
module.weightMat.data.normal_(mean=0.0, std=self.config.initializer_range)
module.bias.data.zero_()
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, x):
##!! @x in Transformer is batch x word_indexing
## COMMENT: original model must take only x=batch x 4 x 1000 because @selene pipeline requires only this input
## default @x is DNA + label --> so it is already an embedding
## COMMENT convert @x into word-indexing style. so we want @x = [[1,1,2,2,...], [3,3,4,4,...]] --> batch x seq_len
##!! @label_index_id can be determined ahead of time
# label_index_id = self.label_range.expand(real_batch_size,-1) ## batch x num_label ... 1 row for 1 ob in batch
## COMMENT use @x as indexing-style
##!! observe that we pass in @x twice. this is a trick to get batch_size.
outputs = self.bert(None, x, x, position_ids=None, token_type_ids=None)
sequence_output = outputs[0][:,self.sequence_length::,:] ## last layer.
## last layer outputs is batch_num x num_label x hidden_dim
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output).squeeze(2) ## want batch x len x 1 --> batch x num_label
#logits = self.classifier3(sequence_output) ## want batch x len x 1 --> batch x num_label
#cls_output = outputs[0][:,0,:]
#cls_output = self.dropout(cls_output)
#print('TODO cls', cls_output.shape)
#logits = self.classifier2(cls_output)
#print('TODO logits', logits.shape)
#print(logits.shape)
return logits # batch x num_label
def criterion():
return nn.BCELoss()
def get_optimizer(lr):
# adam with L2 norm
#return (torch.optim.Adam, {"lr": lr, "weight_decay": 1e-6})
#https://github.com/datduong/BertGOAnnotation/blob/master/finetune/RunTokenClassifyProtData.py#L313
# Prepare optimizer and schedule (linear warmup and decay)
'''
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
'''
return (pytorch_transformers.optimization.AdamW, {"lr":lr, "weight_decay": 1e-6})
# using deepsea optimizer
#return (torch.optim.SGD,
# {"lr": lr, "weight_decay": 1e-6, "momentum": 0.9})
| 47.783784
| 121
| 0.722285
| 2,744
| 19,448
| 4.925656
| 0.190962
| 0.020642
| 0.014205
| 0.011986
| 0.349216
| 0.295132
| 0.27116
| 0.230098
| 0.19636
| 0.182302
| 0
| 0.009456
| 0.189737
| 19,448
| 406
| 122
| 47.901478
| 0.848268
| 0.535736
| 0
| 0.207547
| 0
| 0
| 0.018452
| 0.002768
| 0
| 0
| 0
| 0.002463
| 0
| 1
| 0.106918
| false
| 0
| 0.09434
| 0.012579
| 0.301887
| 0.031447
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2f98c67de6fff06f026a352c43e196aef39bfda
| 1,166
|
py
|
Python
|
setup.py
|
jackschultz/dbactor
|
57ca01bb257d92b32d6003b56cec69e930b6ea73
|
[
"MIT"
] | 2
|
2021-11-18T09:35:42.000Z
|
2021-11-18T14:46:30.000Z
|
setup.py
|
jackschultz/dbactor
|
57ca01bb257d92b32d6003b56cec69e930b6ea73
|
[
"MIT"
] | null | null | null |
setup.py
|
jackschultz/dbactor
|
57ca01bb257d92b32d6003b56cec69e930b6ea73
|
[
"MIT"
] | null | null | null |
from setuptools import setup
__version__ = '0.0.3'
REQUIRES = ['psycopg2-binary']
EXTRAS_REQUIRE = {
'sqlalchemy': ['sqlalchemy'],
'jinjasql': ['jinjasql'],
'pandas': ['jinjasql', 'pandas'],
}
extras_lists = [vals for k, vals in EXTRAS_REQUIRE.items()]
# flattening the values in EXTRAS_REQUIRE from popular stack overflow question 952914
all_extras_require = list(set([item for sublist in extras_lists for item in sublist]))
EXTRAS_REQUIRE['all'] = all_extras_require
TESTS_REQUIRE = REQUIRES + all_extras_require + ['pytest', 'testing.postgresql']
setup_dict = dict(name='dbactor',
version=__version__,
description='DBActor: ORM helper and alternative',
long_description=open('README.md').read(),
url='http://github.com/jackschultz/dbactor',
author='Jack Schultz',
author_email='jackschultz23@gmail.com',
license='MIT',
install_requires=REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=TESTS_REQUIRE,
packages=['dbactor'])
setup(**setup_dict)
| 36.4375
| 86
| 0.628645
| 123
| 1,166
| 5.715447
| 0.536585
| 0.16643
| 0.068279
| 0.071124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013825
| 0.255575
| 1,166
| 31
| 87
| 37.612903
| 0.796083
| 0.071184
| 0
| 0
| 0
| 0
| 0.218316
| 0.021277
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2fb06c89af3c0d869e1710b20eb4d1e629dd002
| 725
|
py
|
Python
|
CV0101EN-09.02-frames_to_video.py
|
reddyprasade/Computer-Vision-with-Python
|
8eebec61f0fdacb05e122460d6845a32ae506c8f
|
[
"Apache-2.0"
] | null | null | null |
CV0101EN-09.02-frames_to_video.py
|
reddyprasade/Computer-Vision-with-Python
|
8eebec61f0fdacb05e122460d6845a32ae506c8f
|
[
"Apache-2.0"
] | null | null | null |
CV0101EN-09.02-frames_to_video.py
|
reddyprasade/Computer-Vision-with-Python
|
8eebec61f0fdacb05e122460d6845a32ae506c8f
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
import os
def frames_to_video(inputpath,outputpath,fps):
image_array = []
files = [f for f in os.listdir(inputpath) if isfile(join(inputpath, f))]
files.sort(key = lambda x: int(x[5:-4]))
for i in range(len(files)):
img = cv2.imread(inputpath + files[i])
size = (img.shape[1],img.shape[0])
img = cv2.resize(img,size)
image_array.append(img)
fourcc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
out = cv2.VideoWriter(outputpath,fourcc, fps, size)
for i in range(len(image_array)):
out.write(image_array[i])
out.release()
inputpath = 'folder path'
outpath = 'video file path/video.mp4'
fps = 29
frames_to_video(inputpath,outpath,fps)
| 29
| 75
| 0.66069
| 113
| 725
| 4.159292
| 0.477876
| 0.085106
| 0.055319
| 0.093617
| 0.059574
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020443
| 0.190345
| 725
| 24
| 76
| 30.208333
| 0.780239
| 0
| 0
| 0
| 0
| 0
| 0.055172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.142857
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2febe7880974ca6e91553584ed0bba9eac9b426
| 5,303
|
py
|
Python
|
pbt/estimator_worker.py
|
Octavian-ai/mac-graph
|
3ef978e8a6f79f2dcc46783d34f01934aabf7f19
|
[
"Unlicense"
] | 116
|
2018-07-11T13:19:56.000Z
|
2021-07-26T17:22:44.000Z
|
pbt/estimator_worker.py
|
Octavian-ai/mac-graph
|
3ef978e8a6f79f2dcc46783d34f01934aabf7f19
|
[
"Unlicense"
] | 1
|
2019-02-11T02:25:02.000Z
|
2019-02-11T17:05:19.000Z
|
pbt/estimator_worker.py
|
Octavian-ai/mac-graph
|
3ef978e8a6f79f2dcc46783d34f01934aabf7f19
|
[
"Unlicense"
] | 21
|
2018-10-11T23:03:22.000Z
|
2021-07-14T22:42:08.000Z
|
import tensorflow as tf
import numpy as np
import traceback
import os.path
from .worker import Worker
from .param import *
from .params import *
import logging
logger = logging.getLogger(__name__)
class HeartbeatHook(tf.train.SessionRunHook):
def __init__(self, heatbeat, should_continue):
self.heatbeat = heatbeat
self.should_continue = should_continue
def after_run(self, run_context, run_values):
self.heatbeat()
try:
self.should_continue()
except StopIteration:
run_context.request_stop()
def end(self, session):
self.heatbeat()
class HeatbeatSaverListener(tf.train.CheckpointSaverListener):
def __init__(self, heartbeat):
self.heartbeat = heartbeat
def after_save(self, session, global_step_value):
self.heartbeat()
def resize_and_load(var, val, sess):
o_shape = var.get_shape().as_list()
i_shape = list(val.shape)
if o_shape != i_shape:
resize_dim = 1 # may not always hold true, assumption for now
delta = o_shape[resize_dim] - i_shape[resize_dim]
if delta != 0:
tf.logging.info("reshape var {} by {}".format(var.name, deta))
if delta < 0:
val = val[:,:o_shape[1]]
elif delta > 0:
val = np.pad(val, ((0,0),(0, delta)), 'reflect')
v.load(val, self.sess)
def gen_scaffold(params):
def init_fn(scaffold, session):
tf.logging.info("Running Scaffold init_fn", params)
vs = params["vars"]
if vs is not None:
for var in tf.trainable_variables():
if var.name in vs:
val = vs[var.name]
resize_and_load(var, val, session)
# return tf.train.Scaffold(init_fn=lambda scaffold, session: True)
return tf.train.Scaffold(init_fn=init_fn)
class MetricHook(tf.train.SessionRunHook):
def __init__(self, metrics, cb, key=0):
self.metrics = metrics
self.key = key
self.readings = []
def before_run(self, run_context):
return tf.train.SessionRunArgs(self.metrics)
def after_run(self, run_context, run_values):
if run_values.results is not None:
self.readings.append(run_values.results[self.key][1])
def end(self, session):
if len(self.readings) > 0:
self.cb(np.average(self.readings))
self.readings.clear()
class EstimatorWorker(Worker):
def __init__(self, init_params, hyperparam_spec):
self.estimator = None
self.trained = False
if init_params["use_warm_start"]:
assert "model_id" in hyperparam_spec, "Warm start requires model_id hyperparam"
super().__init__(init_params, hyperparam_spec)
def setup_estimator(self):
if self.init_params["use_warm_start"] and self.warm_start_dir is not None:
model_dir = self.model_dir
warm_start = self.warm_start_dir
else:
model_dir = os.path.join(self.init_params["model_dir"], self.init_params["run"], str(uuid.uuid4()))
warm_start = None
self.estimator = tf.estimator.Estimator(
model_fn=self.init_params["model_fn"],
model_dir=model_dir,
config=self.init_params.get("run_config", None),
params=vars(self.friendly_params),
warm_start_from=warm_start
)
self.trained = False
def ensure_warm(self):
if self.estimator is None:
self.setup_estimator()
# We need to warm up the estimator
if not self.init_params["use_warm_start"] and not self.trained:
self.do_step(1, lambda:None, lambda:None)
def extract_vars(self):
if "vars" in self._params:
self.ensure_warm()
var_names = self.estimator.get_variable_names()
vals = {k:self.estimator.get_variable_value(k) for k in var_names}
self._params["vars"] = VariableParam(vals)
# --------------------------------------------------------------------------
# Worker class stub impl
# --------------------------------------------------------------------------
def pre_params_get(self):
if not self.init_params["use_warm_start"]:
self.extract_vars()
def post_params_set(self):
self.setup_estimator()
def do_step(self, steps, heartbeat, should_continue):
# We lazily initialise the estimator as during unpickling we may not have all the params
if self.estimator is None:
self.setup_estimator()
self.estimator.train(
self.init_params["train_input_fn"](self.friendly_params),
steps=steps,
hooks=[HeartbeatHook(heartbeat, should_continue)],
saving_listeners=[HeatbeatSaverListener(heartbeat)],
)
# TODO: put heartbeat and should_continue into a hook
heartbeat()
self.trained = True
def do_eval(self):
self.ensure_warm()
return self.estimator.evaluate(self.init_params["eval_input_fn"](self.friendly_params))
# --------------------------------------------------------------------------
# Pickling
# --------------------------------------------------------------------------
def __getstate__(self):
return {
"_params": self.params,
"results": self.results,
"id": self.id,
"total_steps": self.total_steps,
"recent_steps": self.recent_steps,
"time_started": self.time_started,
}
def __setstate__(self, state):
self.id = state.get("id", uuid.uuid4())
self.time_started = 0
self.performance = (0,0)
self.total_steps = state.get("total_steps", 0)
self.recent_steps = state.get("recent_steps", 0)
self.results = state.get("results", {})
self._params = state.get("_params", {})
self.estimator = None
self.trained = False
| 24.896714
| 103
| 0.666227
| 715
| 5,303
| 4.709091
| 0.239161
| 0.03564
| 0.04158
| 0.020196
| 0.158598
| 0.125928
| 0.07128
| 0.061776
| 0
| 0
| 0
| 0.004294
| 0.165567
| 5,303
| 212
| 104
| 25.014151
| 0.75661
| 0.115595
| 0
| 0.134328
| 0
| 0
| 0.066938
| 0
| 0
| 0
| 0
| 0.004717
| 0.007463
| 1
| 0.164179
| false
| 0
| 0.059701
| 0.014925
| 0.283582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c01243ea6bcaf63004fe1fe3e588e8eca1e226b
| 4,064
|
py
|
Python
|
tracer/main.py
|
LzVv123456/Deep-Reinforced-Tree-Traversal
|
8e117590c8cd51c9fc9c033232658876160fa638
|
[
"MIT"
] | 20
|
2021-07-08T08:33:27.000Z
|
2022-01-14T03:27:35.000Z
|
tracer/main.py
|
abcxubu/Deep-Reinforced-Tree-Traversal
|
8e117590c8cd51c9fc9c033232658876160fa638
|
[
"MIT"
] | 1
|
2021-10-01T12:39:11.000Z
|
2021-10-01T13:19:43.000Z
|
tracer/main.py
|
abcxubu/Deep-Reinforced-Tree-Traversal
|
8e117590c8cd51c9fc9c033232658876160fa638
|
[
"MIT"
] | 3
|
2021-07-08T07:34:48.000Z
|
2022-01-10T11:41:59.000Z
|
import os
import glob
import yaml
import torch
import argparse
from addict import Dict
from dataset import *
from init import *
from utilities import *
from train import *
def parse_args():
parser = argparse.ArgumentParser(description='infer')
parser.add_argument('--config', type=str, default='./tracer/train_config.yaml',
help='path to config file')
return parser.parse_args()
def main(args):
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load config file
cfgs = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
cfgs = Dict(cfgs)
# get train, val dataset
train_data_path = sorted(glob.glob(cfgs.train_path + '/**/'))
train_set = GetDateset(train_data_path, cfgs)
train_loader = DataLoader(train_set, batch_size=1, shuffle=True, num_workers=0)
# initialize everything
policy_net, target_net, optimizer, scheduler, memory, steps_done, start_epoch = \
inilization(cfgs, args)
state_dict = {}
all_trace_length = 0
all_tree_length = 0
# train epoch
for epoch in range(start_epoch, cfgs.epoch):
print('epoch: {}'.format(epoch))
for _, sample in enumerate(train_loader):
env, tree, start_pts, name = tensor_to_numpy(sample)
# prepare training regions and data mode
training_list = prepare_training_area(start_pts)
# save some statistic values
state_dict = prepare_stat_dict(state_dict, name)
all_trace_length = 0
all_tree_length = 0
for item in training_list:
print('training information', item)
start_num, region = item
traing_agent = Training_Agent(args, cfgs, target_net, policy_net,
env, tree, start_num, steps_done, optimizer, scheduler, memory)
target_net, policy_net, trace_trajectory, STEPS_DONE = traing_agent.train()
region_tree, _ = get_region_tree(start_num, tree)
match_rate = get_match_rate(region_tree, trace_trajectory, cfgs.match_dist)
print('match rate', np.round(match_rate * 100, 2))
if region == 'l':
state_dict[name]['LCA progress'].append(np.round(match_rate*100, 2))
elif region == 'r':
state_dict[name]['RCA progress'].append(np.round(match_rate*100, 2))
all_tree_length += len(region_tree)
all_trace_length += len(region_tree) * match_rate
all_finish_rate = np.round(all_trace_length / all_tree_length, 2) * 100
state_dict[name]['ALL progress'].append(all_finish_rate)
if len(state_dict[name]['LCA progress']) > 0:
state_dict[name]['LCA average finish rate'] = sum(state_dict[name]['LCA progress'])/len(state_dict[name]['LCA progress'])
if len(state_dict[name]['RCA progress']) > 0:
state_dict[name]['RCA average finish rate'] = sum(state_dict[name]['RCA progress'])/len(state_dict[name]['RCA progress'])
state_dict[name]['ALL average finish rate'] = sum(state_dict[name]['ALL progress'])/len(state_dict[name]['ALL progress'])
# print stat dict
for key in sorted(state_dict.keys()):
print(key, state_dict[key])
# Update the target network
if epoch % cfgs.update_epoch == 0:
target_net.load_state_dict(policy_net.state_dict())
# save model
if (epoch+1)%cfgs.save_freq==0:
if not os.path.exists(cfgs.save_path):
os.makedirs(cfgs.save_path)
torch.save({
'model_state_dict': target_net.module.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'frames': memory.frame,
'steps': steps_done,
'epochs': epoch
}, cfgs.save_path + '/agent_' + str(epoch+1) + '.pth')
if __name__ == '__main__':
args = parse_args()
main(args)
| 37.62963
| 137
| 0.615404
| 513
| 4,064
| 4.625731
| 0.272904
| 0.094817
| 0.082174
| 0.033713
| 0.230089
| 0.148757
| 0.094817
| 0.053097
| 0
| 0
| 0
| 0.009466
| 0.272146
| 4,064
| 108
| 138
| 37.62963
| 0.792765
| 0.04749
| 0
| 0.054054
| 0
| 0
| 0.09943
| 0.006732
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.135135
| 0
| 0.175676
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c0675ff607912b34920445802ae59f9d31371c8
| 4,222
|
py
|
Python
|
test/functional/bsv-protoconf.py
|
bxlkm1/yulecoin
|
3605faf2ff2e3c7bd381414613fc5c0234ad2936
|
[
"OML"
] | 8
|
2019-08-02T02:49:42.000Z
|
2022-01-17T15:51:48.000Z
|
test/functional/bsv-protoconf.py
|
bxlkm1/yulecoin
|
3605faf2ff2e3c7bd381414613fc5c0234ad2936
|
[
"OML"
] | null | null | null |
test/functional/bsv-protoconf.py
|
bxlkm1/yulecoin
|
3605faf2ff2e3c7bd381414613fc5c0234ad2936
|
[
"OML"
] | 4
|
2019-08-02T02:50:44.000Z
|
2021-05-28T03:21:38.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin SV developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time, math
from test_framework.blocktools import create_block, create_coinbase
class BsvProtoconfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.setup_nodes()
def run_test(self):
# Testing scope: our maximal protocol message length is smaller than remote node's message length, remote node has to respect this.
ELEMENTS_PER_1MiB = 29126
ELEMENTS_PER_2MiB = 58254
expected_inv_len = CInv.estimateMaxInvElements(LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH) #29126 elements
assert_equal(expected_inv_len, ELEMENTS_PER_1MiB)
logger.info("Our max message size: {} B, which represents {} elements. ".format(LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH, expected_inv_len))
test_node = NodeConnCB()
wanted_inv_lengths = []
def on_getdata(conn, message):
wanted_inv_lengths.append(len(message.inv))
test_node.on_getdata = on_getdata
connections = []
connections.append(
NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
test_node.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
def send_protoconf_default_msg_length(conn):
conn.send_message(msg_protoconf(CProtoconf(1, LEGACY_MAX_PROTOCOL_PAYLOAD_LENGTH)))
test_node.send_protoconf = send_protoconf_default_msg_length
# 0. Prepare initial block. Needed so that GETDATA can be send back.
self.nodes[0].generate(1)
# 1. Receive bitcoind's protoconf and save max_recv_payload_length.
test_node.wait_for_protoconf()
max_recv_payload_length = test_node.last_message["protoconf"].protoconf.max_recv_payload_length
maxInvElements = CInv.estimateMaxInvElements(max_recv_payload_length) #58254
assert_equal(maxInvElements, ELEMENTS_PER_2MiB)
logger.info("Received bitcoind max message size: {} B, which represents {} elements. ".format(max_recv_payload_length, maxInvElements))
# 2. Send bitcoind Inv message.
test_node.send_message(msg_inv([CInv(1, i) for i in range(0, maxInvElements)]))
test_node.sync_with_ping()
assert_equal(len(self.nodes[0].listbanned()), 0) #not banned
# 2.1. Receive GetData.
test_node.wait_for_getdata()
# 2.2. We should receive 2 GetData messages with 1MB size (29126 elements) and 1 GetData message with 2 elements.
assert_equal(wanted_inv_lengths[0], expected_inv_len)
assert_equal(wanted_inv_lengths[1], expected_inv_len)
assert_equal(wanted_inv_lengths[2], 2)
assert_equal(len(wanted_inv_lengths), 3)
### TEST WITH maxInvElements - 1, maxInvElements and maxInvElements + 1
# 1. Send bitcoind Inv message that is smaller than max_recv_payload_length.
test_node.send_message(msg_inv([CInv(1, i) for i in range(0, maxInvElements - 1)]))
test_node.sync_with_ping()
assert_equal(len(self.nodes[0].listbanned()), 0) #not banned
# 2. Send bitcoind Inv message that is equal to max_recv_payload_length.
test_node.send_message(msg_inv([CInv(1, i) for i in range(0, maxInvElements)]))
test_node.sync_with_ping()
assert_equal(len(self.nodes[0].listbanned()), 0) #not banned
# 3. Send bitcoind Inv message that is larger than max_recv_payload_length.
test_node.send_message(msg_inv([CInv(1, i) for i in range(0, maxInvElements + 1)]))
test_node.wait_for_disconnect()
assert(self.nodes[0].closed)# disconnected
assert_equal(len(self.nodes[0].listbanned()), 1) #banned
logger.info("Banned nodes : {}".format(self.nodes[0].listbanned()))
if __name__ == '__main__':
BsvProtoconfTest().main()
| 43.979167
| 143
| 0.709853
| 571
| 4,222
| 4.982487
| 0.283713
| 0.044991
| 0.02812
| 0.056239
| 0.400703
| 0.296661
| 0.247452
| 0.235501
| 0.175747
| 0.175747
| 0
| 0.026277
| 0.197774
| 4,222
| 95
| 144
| 44.442105
| 0.813699
| 0.243013
| 0
| 0.140351
| 0
| 0
| 0.05454
| 0
| 0
| 0
| 0
| 0
| 0.192982
| 1
| 0.087719
| false
| 0
| 0.087719
| 0
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c09b1ff084d1e9df9670c57209d4a2a65e97d3c
| 9,838
|
py
|
Python
|
actor_critic/trainer.py
|
zamlz/dlcampjeju2018-I2A-cube
|
85ae7a2084ca490ea685ff3d30e82720fb58c0ea
|
[
"MIT"
] | 14
|
2018-07-19T03:56:45.000Z
|
2019-10-01T12:09:01.000Z
|
actor_critic/trainer.py
|
zamlz/dlcampjeju2018-I2A-cube
|
85ae7a2084ca490ea685ff3d30e82720fb58c0ea
|
[
"MIT"
] | null | null | null |
actor_critic/trainer.py
|
zamlz/dlcampjeju2018-I2A-cube
|
85ae7a2084ca490ea685ff3d30e82720fb58c0ea
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
import tensorflow as tf
import time
from actor_critic.policy import A2CBuilder
from actor_critic.util import discount_with_dones, cat_entropy, fix_tf_name
from common.model import NetworkBase
from common.multiprocessing_env import SubprocVecEnv
from tqdm import tqdm
class ActorCritic(NetworkBase):
def __init__(self, sess, a2c_arch, ob_space, ac_space,
pg_coeff=1.0, vf_coeff=0.5, ent_coeff=0.01, max_grad_norm=0.5,
lr=7e-4, alpha=0.99, epsilon=1e-5, summarize=False):
self.sess = sess
self.nact = ac_space.n
self.ob_space = ob_space
# Actions, Advantages, and Reward
self.actions = tf.placeholder(tf.int32, [None], name='actions')
self.advantages = tf.placeholder(tf.float32, [None], name='advantages')
self.rewards = tf.placeholder(tf.float32, [None], name='rewards')
self.depth = tf.placeholder(tf.float32, [None], name='scramble_depth')
# setup the models
self.step_model = A2CBuilder(self.sess, a2c_arch, ob_space, ac_space, reuse=False)
self.train_model = A2CBuilder(self.sess, a2c_arch, ob_space, ac_space, reuse=True)
# Negative log probs of actions
neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.train_model.pi, labels=self.actions)
# Policy Gradients Loss, Value Function Loss, Entropy, and Full Loss
self.pg_loss = tf.reduce_mean(self.advantages * neglogpac)
self.vf_loss = tf.reduce_mean(tf.square(tf.squeeze(self.train_model.vf) - self.rewards) / 2.0)
self.entropy = tf.reduce_mean(cat_entropy(self.train_model.pi))
self.loss = pg_coeff*self.pg_loss - ent_coeff*self.entropy + vf_coeff*self.vf_loss
self.mean_rew= tf.reduce_mean(self.rewards)
self.mean_depth = tf.reduce_mean(self.depth)
# Find the model parameters and their gradients
with tf.variable_scope('a2c_model'):
self.params = tf.trainable_variables()
grads = tf.gradients(self.loss, self.params)
if max_grad_norm is not None:
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, self.params))
# Setup the optimizer
trainer = tf.train.RMSPropOptimizer(learning_rate=lr, decay=alpha, epsilon=epsilon)
self.opt = trainer.apply_gradients(grads)
# For some awesome tensorboard stuff
if summarize:
tf.summary.scalar('Loss', self.loss)
tf.summary.scalar('Entropy', self.entropy)
tf.summary.scalar('Policy Gradient Loss', self.pg_loss)
tf.summary.scalar('Value Function Loss', self.vf_loss)
tf.summary.scalar('Rewards', self.mean_rew)
tf.summary.scalar('Depth', self.mean_depth)
# fix tf scopes if we are loading a scope that is different from the saved instance
#name_scope = tf.contrib.framework.get_name_scope()
#if len(name_scope) != 0:
# self.params = { fix_tf_name(v.name, name_scope): v for v in self.params }
#else:
# self.params = { fix_tf_name(v.name): v for v in self.params }
# Initialize the tensorflow saver
self.saver = tf.train.Saver(self.params, max_to_keep=5)
# Single training step
def train(self, obs, rewards, masks, actions, values, depth, step, summary_op=None):
advantages = rewards - values
feed_dict = {
self.actions: actions,
self.advantages: advantages,
self.rewards: rewards,
self.depth: depth,
}
inputs = self.train_model.get_inputs()
mapped_input = self.train_model.transform_input(obs)
for transformed_input, inp in zip(mapped_input, inputs):
feed_dict[inp] = transformed_input
ret_vals = [
self.loss,
self.pg_loss,
self.vf_loss,
self.entropy,
self.mean_rew,
self.mean_depth,
self.opt,
]
if summary_op is not None:
ret_vals.append(summary_op)
return self.sess.run(ret_vals, feed_dict=feed_dict)
# Given an observation, perform an action
def act(self, obs, stochastic=True):
return self.step_model.step(obs, stochastic=stochastic)
# Return the value of the value function
def critique(self, obs):
return self.step_model.value(obs)
# The function that trains the a2c model
def train(env_fn = None,
spectrum = False,
a2c_arch = None,
nenvs = 16,
nsteps = 100,
max_iters = 1e6,
gamma = 0.99,
pg_coeff = 1.0,
vf_coeff = 0.5,
ent_coeff = 0.01,
max_grad_norm = 0.5,
lr = 7e-4,
alpha = 0.99,
epsilon = 1e-5,
log_interval = 100,
summarize = True,
load_path = None,
log_path = None,
cpu_cores = 1):
# Construct the vectorized parallel environments
envs = [ env_fn for _ in range(nenvs) ]
envs = SubprocVecEnv(envs)
# Set some random seeds for the environment
envs.seed(0)
if spectrum:
envs.spectrum()
ob_space = envs.observation_space.shape
nw, nh, nc = ob_space
ac_space = envs.action_space
obs = envs.reset()
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=cpu_cores,
intra_op_parallelism_threads=cpu_cores )
tf_config.gpu_options.allow_growth = True
with tf.Session(config=tf_config) as sess:
actor_critic = ActorCritic(sess, a2c_arch, ob_space, ac_space,
pg_coeff, vf_coeff, ent_coeff, max_grad_norm,
lr, alpha, epsilon, summarize)
load_count = 0
if load_path is not None:
actor_critic.load(load_path)
print('Loaded a2c')
summary_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path, graph=sess.graph)
sess.run(tf.global_variables_initializer())
batch_ob_shape = (-1, nw, nh, nc)
dones = [False for _ in range(nenvs)]
episode_rewards = np.zeros((nenvs, ))
final_rewards = np.zeros((nenvs, ))
print('a2c Training Start!')
print('Model will be saved on intervals of %i' % (log_interval))
for i in tqdm(range(load_count + 1, int(max_iters) + 1), ascii=True, desc='ActorCritic'):
# Create the minibatch lists
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_depth = [], [], [], [], [], []
total_reward = 0
for n in range(nsteps):
# Get the actions and values from the actor critic, we don't need neglogp
actions, values, neglogp = actor_critic.act(obs)
mb_obs.append(np.copy(obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(dones)
obs, rewards, dones, info = envs.step(actions)
total_reward += np.sum(rewards)
episode_rewards += rewards
masks = 1 - np.array(dones)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
mb_rewards.append(rewards)
mb_depth.append(np.array([ info_item['scramble_depth'] for info_item in info ]))
mb_dones.append(dones)
# Convert batch steps to batch rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32).swapaxes(1,0).reshape(batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1,0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1,0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1,0)
mb_dones = np.asarray(mb_dones, dtype=np.float32).swapaxes(1,0)
mb_depth = np.asarray(mb_depth, dtype=np.int32).swapaxes(1,0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = actor_critic.critique(obs).tolist()
# discounting
for n, (rewards, d, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
d = d.tolist()
if d[-1] == 0:
rewards = discount_with_dones(rewards+[value], d+[0], gamma)[:-1]
else:
rewards = discount_with_dones(rewards, d, gamma)
mb_rewards[n] = rewards
# Flatten the whole minibatch
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
mb_depth = mb_depth.flatten()
# Save the information to tensorboard
if summarize:
loss, policy_loss, value_loss, policy_ent, mrew, mdp, _, summary = actor_critic.train(
mb_obs, mb_rewards, mb_masks, mb_actions, mb_values, mb_depth, i, summary_op)
writer.add_summary(summary, i)
else:
loss, policy_loss, value_loss, policy_ent, mrew, mdp, _ = actor_critic.train(
mb_obs, mb_rewards, mb_masks, mb_actions, mb_values, mb_depth, i)
if i % log_interval == 0:
actor_critic.save(log_path, i)
actor_critic.save(log_path, 'final')
print('a2c model is finished training')
| 37.838462
| 102
| 0.593617
| 1,249
| 9,838
| 4.470777
| 0.224179
| 0.021669
| 0.015043
| 0.012536
| 0.186246
| 0.145595
| 0.11909
| 0.08793
| 0.087213
| 0.063037
| 0
| 0.016207
| 0.310124
| 9,838
| 259
| 103
| 37.984556
| 0.806542
| 0.103883
| 0
| 0.034286
| 0
| 0
| 0.026861
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.051429
| 0.011429
| 0.102857
| 0.022857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c0bbff19246f88fe29603b2519f950e3178d9cc
| 23,504
|
py
|
Python
|
src/model_ode.py
|
fkhiro/kws-ode
|
5751f9b665511908b26e77f6ea5a97bf87823aab
|
[
"MIT"
] | 5
|
2020-08-12T07:24:12.000Z
|
2022-02-23T14:04:16.000Z
|
src/model_ode.py
|
fkhiro/kws-ode
|
5751f9b665511908b26e77f6ea5a97bf87823aab
|
[
"MIT"
] | null | null | null |
src/model_ode.py
|
fkhiro/kws-ode
|
5751f9b665511908b26e77f6ea5a97bf87823aab
|
[
"MIT"
] | 1
|
2020-09-03T07:28:19.000Z
|
2020-09-03T07:28:19.000Z
|
from enum import Enum
import hashlib
import math
import os
import random
import re
from chainmap import ChainMap
from torch.autograd import Variable
import librosa
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from .manage_audio import AudioPreprocessor
from torchdiffeq import odeint_adjoint as odeint
import pickle
class SimpleCache(dict):
def __init__(self, limit):
super().__init__()
self.limit = limit
self.n_keys = 0
def __setitem__(self, key, value):
if key in self.keys():
super().__setitem__(key, value)
elif self.n_keys < self.limit:
self.n_keys += 1
super().__setitem__(key, value)
return value
class ConfigType(Enum):
ODE_TCNN = "ode-tcnn"
ODE_TDNN = "ode-tdnn"
def find_model(conf):
if isinstance(conf, ConfigType):
conf = conf.value
if conf.startswith("ode-tcnn"):
print("ODE-TCNN")
return SpeechOdeTCNNModel
elif conf.startswith("ode-tdnn"):
print("ODE-TDNN")
return SpeechOdeTDNNModel
print("model is not specified.")
return None
def find_config(conf):
if isinstance(conf, ConfigType):
conf = conf.value
return _configs[conf]
def truncated_normal(tensor, std_dev=0.01):
tensor.zero_()
tensor.normal_(std=std_dev)
while torch.sum(torch.abs(tensor) > 2 * std_dev) > 0:
t = tensor[torch.abs(tensor) > 2 * std_dev]
t.zero_()
tensor[torch.abs(tensor) > 2 * std_dev] = torch.normal(t, std=std_dev)
class BNStatistics(object):
def __init__(self, max_t):
self.max_t = max_t
self.mean_t = [None] * self.max_t
self.var_t = [None] * self.max_t
self.count = [0] * self.max_t
self.poly_coeff_mean = None # for polyfit
self.poly_coeff_var = None # for polyfit
def reset(self):
del self.mean_t
del self.var_t
del self.count
del self.poly_coeff_mean
del self.poly_coeff_var
self.mean_t = [None] * self.max_t
self.var_t = [None] * self.max_t
self.count = [0] * self.max_t
self.poly_coeff_mean = None
self.poly_coeff_var = None
def average(self):
for i in range(self.max_t):
if self.count[i] > 0:
self.mean_t[i] = self.mean_t[i] / self.count[i]
self.var_t[i] = self.var_t[i] / self.count[i]
class SerializableModule(nn.Module):
def __init__(self):
super().__init__()
self.item_list = []
self.odefunc = None
def save(self, filename):
torch.save(self.state_dict(), filename)
def load(self, filename):
self.load_state_dict(torch.load(filename, map_location=lambda storage, loc: storage))
def switch_forward(self):
self.odefunc.bForward = True
def switch_backward(self):
self.odefunc.bForward = False
def init_bn_statistics(self, odefunc, item_list, max_t):
self.odefunc = odefunc
self.item_list = item_list
for item in self.item_list:
self.odefunc.bn_statistics[item] = BNStatistics(max_t)
def save_bn_statistics(self, filename):
f_pickle = open(filename, "wb")
pickle.dump(self.odefunc.bn_statistics, f_pickle)
f_pickle.close()
def load_bn_statistics(self, filename):
f_pickle = open(filename, "rb")
self.odefunc.bn_statistics = pickle.load(f_pickle)
f_pickle.close()
def reset_bn_statistics(self):
for item in self.item_list:
self.odefunc.bn_statistics[item].reset()
def average_bn_statistics(self):
for item in self.item_list:
self.odefunc.bn_statistics[item].average()
class ODEBlock(nn.Module):
def __init__(self, odefunc, it=1, tol=1e-3):
super(ODEBlock, self).__init__()
self.odefunc = odefunc
self.integration_time = torch.tensor([0, it]).float()
self.tol = tol
def forward(self, x):
self.integration_time = self.integration_time.type_as(x)
out = odeint(self.odefunc, x, self.integration_time, rtol=self.tol, atol=self.tol)
return out[1]
def set_integration_time(self, it):
self.integration_time = torch.tensor([0, it]).float()
@property
def nfe(self):
return self.odefunc.nfe
@nfe.setter
def nfe(self, value):
self.odefunc.nfe = value
def complement_run_bn(data, max_t, t):
low = None
high = None
tl = t - 1
while tl >= 0:
if type(data[tl]) == torch.Tensor:
low = data[tl]
break
tl -= 1
th = t + 1
while th < max_t:
if type(data[th]) == torch.Tensor:
high = data[th]
break
th += 1
if type(low) != torch.Tensor:
if type(high) != torch.Tensor:
print("Complement failed ({} {}) ...".format(tl, th))
exit()
else:
print("low is not found, and thus high ({}) is used in stead.".format(th))
return high
elif type(high) != torch.Tensor:
if type(low) != torch.Tensor:
print("Complement failed ({} {}) ...".format(tl, th))
exit()
else:
print("high is not found, and thus low ({}) is used in stead.".format(tl))
return low
return low + (high-low)*(float(t-tl)/float(th-tl))
def complement_simple(norm, bn_statistics, tm):
t = round(tm.item()*100)
mean_t = bn_statistics.mean_t
var_t = bn_statistics.var_t
if t >= len(mean_t):
print("t is too large ({} >= {})".format(t, len(mean_t)))
t = len(mean_t) - 1
if type(mean_t[t]) != torch.Tensor:
print("complement at t = {}".format(t))
max_t = len(mean_t)
mean_t[t] = complement_run_bn(mean_t, max_t, t)
var_t[t] = complement_run_bn(var_t, max_t, t)
norm.running_mean = mean_t[t]
norm.running_var = var_t[t]
def calc_poly_coeff(data):
dtype = None
device = None
x = []
y = None
for i in range(len(data)):
if type(data[i]) == torch.Tensor:
dtype = data[i].dtype
device = data[i].device
x.append(i/100.0)
if type(y) != np.ndarray:
y = data[i].cpu().numpy()
else:
y = np.vstack((y, data[i].cpu().numpy()))
x = np.array(x)
coef = np.polyfit(x,y,2)
y_pred = coef[0].reshape(1,-1)*(x**2).reshape(-1,1) + coef[1].reshape(1,-1)*x.reshape(-1,1) + coef[2].reshape(1,-1)*np.ones((len(x),1))
y_bar = np.mean(y, axis=0) * np.ones((len(x),1))
r2 = np.ones(y.shape[1]) - np.sum((y-y_pred)**2, axis=0) / np.sum((y-y_bar)**2, axis=0)
t_coef = torch.from_numpy(coef)
if type(device) == torch.device:
t_coef = t_coef.to(device)
if type(dtype) == torch.dtype:
t_coef = t_coef.to(dtype)
return t_coef
def complement_polyfit2(norm, bn_statistics, t):
if type(bn_statistics.poly_coeff_mean) != torch.Tensor:
print("Calculating polynomial coefficients...")
bn_statistics.poly_coeff_mean = calc_poly_coeff(bn_statistics.mean_t)
bn_statistics.poly_coeff_var = calc_poly_coeff(bn_statistics.var_t)
norm.running_mean = bn_statistics.poly_coeff_mean[0]*(t**2) + bn_statistics.poly_coeff_mean[1]*t + bn_statistics.poly_coeff_mean[2]
norm.running_var = bn_statistics.poly_coeff_var[0]*(t**2) + bn_statistics.poly_coeff_var[1]*t + bn_statistics.poly_coeff_var[2]
complement_simple(norm, bn_statistics, t)
def collect_statistics(norm, mean_t, var_t, count, tm):
t = round(tm.item()*100)
if t >= len(mean_t):
print("list index out of range: {} > {}".format(t, len(mean_t)))
return
if type(mean_t[t]) != torch.Tensor:
mean_t[t] = torch.zeros(norm.num_features)
var_t[t] = torch.zeros(norm.num_features)
mean_t[t] += norm.running_mean
var_t[t] += norm.running_var
count[t] += 1
def run_norm(x, t, norm, bn_statistics, training, bForward, complement_statistics_func=complement_simple):
if training:
if bForward:
norm.running_mean.zero_()
norm.running_var.fill_(1)
norm.num_batches_tracked.zero_()
else:
complement_statistics_func(norm, bn_statistics, t)
norm.num_batches_tracked.zero_()
out = norm(x)
if training and bForward:
collect_statistics(norm, bn_statistics.mean_t, bn_statistics.var_t, bn_statistics.count, t)
return out
bn_complement_func = { "complement": complement_simple, "polyfit2": complement_polyfit2 }
class TCNN_ODEfunc(nn.Module):
def __init__(self, n_maps):
super(TCNN_ODEfunc, self).__init__()
self.norm1 = nn.BatchNorm2d(n_maps, affine=False, momentum=None)
self.conv1 = nn.Conv2d(n_maps, n_maps, (9, 1), padding=(4,0), dilation=1, bias=False)
self.norm2 = nn.BatchNorm2d(n_maps, affine=False, momentum=None)
self.conv2 = nn.Conv2d(n_maps, n_maps, (9, 1), padding=(4,0), dilation=1, bias=False)
self.norm3 = nn.BatchNorm2d(n_maps, affine=False, momentum=None)
self.conv3 = nn.Conv2d(n_maps, n_maps, (1, 1), dilation=1, bias=False)
self.bn_statistics = {}
self.bForward = True
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
out = self.conv1(x)
out = run_norm(out, t, self.norm1, self.bn_statistics["norm1"], self.training, self.bForward)
out = F.relu(out)
out = self.conv2(out)
out = run_norm(out, t, self.norm2, self.bn_statistics["norm2"], self.training, self.bForward)
# branch
out3 = self.conv3(x)
out3 = run_norm(out3, t, self.norm3, self.bn_statistics["norm3"], self.training, self.bForward)
out3 = F.relu(out3)
out = F.relu(out + out3)
return out
class SpeechOdeTCNNModel(SerializableModule):
def __init__(self, config):
it = config["integration_time"]
super().__init__()
n_labels = config["n_labels"]
n_mels = config["n_mels"]
n_maps = config["n_feature_maps"]
it = config["integration_time"]
tol = config["tol"]
print("n_mels = {} --> n_maps = {}".format(n_mels, n_maps))
self.conv0 = nn.Conv2d(n_mels, n_maps, (3, 1), padding=(1,0), dilation=1, bias=False)
self.norm_in = nn.BatchNorm2d(n_maps, affine=False)
if "res_pool" in config:
self.pool = nn.AvgPool2d(config["res_pool"])
self.odeblock = ODEBlock(TCNN_ODEfunc(n_maps), it, tol)
self.output = nn.Linear(n_maps, n_labels)
self.init_bn_statistics(self.odeblock.odefunc, ["norm1", "norm2", "norm3"], int(it*100)+100)
def forward(self, x):
x = x.unsqueeze(3)
x = self.conv0(x)
x = F.relu(self.norm_in(x))
if hasattr(self, "pool"):
x = self.pool(x)
x = self.odeblock(x)
x = x.view(x.size(0), x.size(1), -1) # shape: (batch, feats, o3)
x = torch.mean(x, 2)
return self.output(x)
# TDNN is based on the following implementation:
# https://github.com/cvqluu/TDNN
class TDNN(nn.Module):
def __init__(
self,
input_dim=23,
output_dim=512,
context_size=5,
stride=1,
dilation=1,
padding=0
):
'''
TDNN as defined by https://www.danielpovey.com/files/2015_interspeech_multisplice.pdf
Affine transformation not applied globally to all frames but smaller windows with local context
batch_norm: True to include batch normalisation after the non linearity
Context size and dilation determine the frames selected
(although context size is not really defined in the traditional sense)
For example:
context size 5 and dilation 1 is equivalent to [-2,-1,0,1,2]
context size 3 and dilation 2 is equivalent to [-2, 0, 2]
context size 1 and dilation 1 is equivalent to [0]
'''
super(TDNN, self).__init__()
self.context_size = context_size
self.stride = stride
self.input_dim = input_dim
self.output_dim = output_dim
self.dilation = dilation
self.padding = padding
self.kernel = nn.Linear(input_dim*context_size, output_dim)
# Xavier initialization
nn.init.xavier_normal_(self.kernel.weight)
def forward(self, x):
'''
input: size (batch, seq_len, input_features)
outpu: size (batch, new_seq_len, output_features)
'''
_, _, d = x.shape
assert (d == self.input_dim), 'Input dimension was wrong. Expected ({}), got ({})'.format(self.input_dim, d)
x = x.unsqueeze(1)
# Unfold input into smaller temporal contexts
x = F.unfold(
x,
(self.context_size, self.input_dim),
#stride=(1,self.input_dim),
stride=(self.stride,self.input_dim),
dilation=(self.dilation,1),
padding=(self.padding,0)
)
x = x.transpose(1,2)
x = self.kernel(x)
return x
class TDNN_ODEfunc(nn.Module):
def __init__(self, n_maps, window):
super(TDNN_ODEfunc, self).__init__()
self.norm1 = nn.BatchNorm1d(n_maps, affine=False, momentum=None)
self.tdnn1 = TDNN(input_dim=n_maps, output_dim=n_maps, context_size=window, stride=1, dilation=1, padding=int((window-1)/2))
self.bn_statistics = {}
self.bForward = True
self.nfe = 0
def forward(self, t, x):
self.nfe += 1
out = self.tdnn1(x)
out = F.relu(out)
out = out.transpose(1,2)
out = run_norm(out, t, self.norm1, self.bn_statistics["norm1"], self.training, self.bForward)
out = out.transpose(1,2)
return out
class SpeechOdeTDNNModel(SerializableModule):
def __init__(self, config):
it = config["integration_time"]
super().__init__()
n_labels = config["n_labels"]
n_mels = config["n_mels"]
n_maps = config["n_feature_maps"]
tol = config["tol"]
print("n_mels = {} --> n_maps = {}".format(n_mels, n_maps))
print("sub_sampe: window = {}, stride = {}".format(config["sub_sample_window"], config["sub_sample_stride"]))
print("tdnn: window = {}".format(config["tdnn_window"]))
self.tdnn0 = TDNN(input_dim=n_mels, output_dim=n_maps, context_size=config["sub_sample_window"], stride=config["sub_sample_stride"], dilation=1, padding=int((config["sub_sample_window"]-1)/2))
self.norm_in = nn.BatchNorm1d(n_maps, affine=False)
self.odeblock = ODEBlock(TDNN_ODEfunc(n_maps, config["tdnn_window"]), it, tol)
self.output = nn.Linear(n_maps, n_labels)
self.init_bn_statistics(self.odeblock.odefunc, ["norm1"], int(it*100)+100)
def forward(self, x):
x = F.relu(self.tdnn0(x))
x = x.transpose(1,2)
x = self.norm_in(x)
x = x.transpose(1,2)
x = self.odeblock(x)
x = torch.mean(x, 1)
return self.output(x)
class DatasetType(Enum):
TRAIN = 0
DEV = 1
TEST = 2
class SpeechDataset(data.Dataset):
LABEL_SILENCE = "__silence__"
LABEL_UNKNOWN = "__unknown__"
def __init__(self, data, set_type, config):
super().__init__()
self.audio_files = list(data.keys())
self.set_type = set_type
self.audio_labels = list(data.values())
config["bg_noise_files"] = list(filter(lambda x: x.endswith("wav"), config.get("bg_noise_files", [])))
self.bg_noise_audio = [librosa.core.load(file, sr=16000)[0] for file in config["bg_noise_files"]]
self.unknown_prob = config["unknown_prob"]
self.silence_prob = config["silence_prob"]
self.noise_prob = config["noise_prob"]
self.input_length = config["input_length"]
self.timeshift_ms = config["timeshift_ms"]
self._audio_cache = SimpleCache(config["cache_size"])
self._file_cache = SimpleCache(config["cache_size"])
n_unk = len(list(filter(lambda x: x == 1, self.audio_labels)))
self.n_silence = int(self.silence_prob * (len(self.audio_labels) - n_unk))
self.n_mels = config["n_mels"]
self.hop_ms = config["hop_ms"]
self.n_fft = config["n_fft"]
self.audio_processor = AudioPreprocessor(n_mels=self.n_mels, n_dct_filters=config["n_dct_filters"], hop_ms=self.hop_ms, n_fft=self.n_fft)
self.audio_preprocess_type = config["audio_preprocess_type"]
@staticmethod
def default_config():
config = {}
config["group_speakers_by_id"] = True
config["silence_prob"] = 0.1
config["noise_prob"] = 0.8
config["n_dct_filters"] = 40
config["input_length"] = 16000
config["n_mels"] = 40
config["timeshift_ms"] = 100
config["unknown_prob"] = 0.1
config["train_pct"] = 80
config["dev_pct"] = 10
config["test_pct"] = 10
config["wanted_words"] = ["command", "random"]
config["data_folder"] = "/data/speech_dataset"
config["audio_preprocess_type"] = "MFCCs"
return config
def collate_fn(self, data):
x = None
y = []
for audio_data, label in data:
if self.audio_preprocess_type == "MFCCs":
#audio_tensor = torch.from_numpy(self.audio_processor.compute_mfccs(audio_data).reshape(1, 101, 40))
audio_tensor = torch.from_numpy(self.audio_processor.compute_mfccs(audio_data).reshape(1, (1000//self.hop_ms)+1, self.n_mels))
x = audio_tensor if x is None else torch.cat((x, audio_tensor), 0)
elif self.audio_preprocess_type == "MFCC_TCNN":
audio_tensor = torch.from_numpy(self.audio_processor.compute_mfccs(audio_data).T)
x = audio_tensor if x is None else torch.cat((x, audio_tensor), 0)
elif self.audio_preprocess_type == "PCEN":
audio_tensor = torch.from_numpy(np.expand_dims(audio_data, axis=0))
audio_tensor = self.audio_processor.compute_pcen(audio_tensor)
x = audio_tensor if x is None else torch.cat((x, audio_tensor), 0)
y.append(label)
return x, torch.tensor(y)
def _timeshift_audio(self, data):
shift = (16000 * self.timeshift_ms) // 1000
shift = random.randint(-shift, shift)
a = -min(0, shift)
b = max(0, shift)
data = np.pad(data, (a, b), "constant")
return data[:len(data) - a] if a else data[b:]
def load_audio(self, example, silence=False):
if silence:
example = "__silence__"
if random.random() < 0.7:
try:
return self._audio_cache[example]
except KeyError:
pass
in_len = self.input_length
if self.bg_noise_audio:
bg_noise = random.choice(self.bg_noise_audio)
a = random.randint(0, len(bg_noise) - in_len - 1)
bg_noise = bg_noise[a:a + in_len]
else:
bg_noise = np.zeros(in_len)
if silence:
data = np.zeros(in_len, dtype=np.float32)
else:
file_data = self._file_cache.get(example)
data = librosa.core.load(example, sr=16000)[0] if file_data is None else file_data
self._file_cache[example] = data
data = np.pad(data, (0, max(0, in_len - len(data))), "constant")
if self.set_type == DatasetType.TRAIN:
data = self._timeshift_audio(data)
if random.random() < self.noise_prob or silence:
a = random.random() * 0.1
data = np.clip(a * bg_noise + data, -1, 1)
self._audio_cache[example] = data
return data
@classmethod
def splits(cls, config):
folder = config["data_folder"]
wanted_words = config["wanted_words"]
unknown_prob = config["unknown_prob"]
train_pct = config["train_pct"]
dev_pct = config["dev_pct"]
test_pct = config["test_pct"]
words = {word: i + 2 for i, word in enumerate(wanted_words)}
words.update({cls.LABEL_SILENCE:0, cls.LABEL_UNKNOWN:1})
sets = [{}, {}, {}]
unknowns = [0] * 3
bg_noise_files = []
unknown_files = []
for folder_name in os.listdir(folder):
path_name = os.path.join(folder, folder_name)
is_bg_noise = False
if os.path.isfile(path_name):
continue
if folder_name in words:
label = words[folder_name]
elif folder_name == "_background_noise_":
is_bg_noise = True
else:
label = words[cls.LABEL_UNKNOWN]
for filename in os.listdir(path_name):
wav_name = os.path.join(path_name, filename)
if is_bg_noise and os.path.isfile(wav_name):
bg_noise_files.append(wav_name)
continue
elif label == words[cls.LABEL_UNKNOWN]:
unknown_files.append(wav_name)
continue
if config["group_speakers_by_id"]:
hashname = re.sub(r"_nohash_.*$", "", filename)
max_no_wavs = 2**27 - 1
bucket = int(hashlib.sha1(hashname.encode()).hexdigest(), 16)
bucket = (bucket % (max_no_wavs + 1)) * (100. / max_no_wavs)
if bucket < dev_pct:
tag = DatasetType.DEV
elif bucket < test_pct + dev_pct:
tag = DatasetType.TEST
else:
tag = DatasetType.TRAIN
sets[tag.value][wav_name] = label
for tag in range(len(sets)):
unknowns[tag] = int(unknown_prob * len(sets[tag]))
random.shuffle(unknown_files)
a = 0
for i, dataset in enumerate(sets):
b = a + unknowns[i]
unk_dict = {u: words[cls.LABEL_UNKNOWN] for u in unknown_files[a:b]}
dataset.update(unk_dict)
a = b
train_cfg = ChainMap(dict(bg_noise_files=bg_noise_files), config)
test_cfg = ChainMap(dict(bg_noise_files=bg_noise_files, noise_prob=0), config)
datasets = (cls(sets[0], DatasetType.TRAIN, train_cfg), cls(sets[1], DatasetType.DEV, test_cfg),
cls(sets[2], DatasetType.TEST, test_cfg))
return datasets
def __getitem__(self, index):
if index >= len(self.audio_labels):
return self.load_audio(None, silence=True), 0
return self.load_audio(self.audio_files[index]), self.audio_labels[index]
def __len__(self):
return len(self.audio_labels) + self.n_silence
_configs = {
ConfigType.ODE_TCNN.value: dict(n_labels=12, n_feature_maps=20, res_pool=(4, 1), use_dilation=False),
ConfigType.ODE_TDNN.value: dict(n_labels=12, n_feature_maps=32, sub_sample_window=3, sub_sample_stride=3, tdnn_window=3),
}
| 35.185629
| 200
| 0.592282
| 3,172
| 23,504
| 4.163619
| 0.130517
| 0.035436
| 0.008329
| 0.014311
| 0.347846
| 0.250776
| 0.20103
| 0.181949
| 0.151056
| 0.13084
| 0
| 0.019033
| 0.284675
| 23,504
| 667
| 201
| 35.238381
| 0.76649
| 0.042503
| 0
| 0.209213
| 0
| 0
| 0.060754
| 0.001878
| 0
| 0
| 0
| 0
| 0.001919
| 1
| 0.090211
| false
| 0.001919
| 0.03263
| 0.003839
| 0.211132
| 0.028791
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c0d6af23938ca6fed73a619af2c2521273b4c43
| 7,642
|
py
|
Python
|
tests/test_snapshot.py
|
arkadiam/virt-backup
|
b3e8703ae3ab0f792f5d68913ecf5e7270acea46
|
[
"BSD-2-Clause-FreeBSD"
] | 54
|
2019-06-21T23:29:02.000Z
|
2022-03-28T14:30:44.000Z
|
tests/test_snapshot.py
|
arkadiam/virt-backup
|
b3e8703ae3ab0f792f5d68913ecf5e7270acea46
|
[
"BSD-2-Clause-FreeBSD"
] | 28
|
2019-08-18T01:01:25.000Z
|
2021-07-14T17:39:42.000Z
|
tests/test_snapshot.py
|
arkadiam/virt-backup
|
b3e8703ae3ab0f792f5d68913ecf5e7270acea46
|
[
"BSD-2-Clause-FreeBSD"
] | 12
|
2019-07-12T10:16:03.000Z
|
2022-03-09T05:33:30.000Z
|
import json
import os
import arrow
import libvirt
import pytest
from virt_backup.backups import DomBackup
from virt_backup.domains import get_xml_block_of_disk
from virt_backup.backups.snapshot import DomExtSnapshot, DomExtSnapshotCallbackRegistrer
from virt_backup.exceptions import DiskNotFoundError, SnapshotNotStarted
from helper.virt_backup import MockSnapshot
class TestDomExtSnapshot:
snapshot_helper = None
@pytest.fixture(autouse=True)
def gen_snapshot_helper(self, build_mock_domain):
dom = build_mock_domain
callbacks_registrer = DomExtSnapshotCallbackRegistrer(dom._conn)
self.snapshot_helper = DomExtSnapshot(
dom=dom,
callbacks_registrer=callbacks_registrer,
disks={
"vda": {"src": "/vda.qcow2", "type": "qcow2"},
"vdb": {"src": "/vdb.qcow2", "type": "qcow2"},
},
)
def test_snapshot_logic_date(self, monkeypatch):
"""
Create a DomBackup and test to add vdc
"""
pre_snap_date = arrow.now()
metadatas = self.start_snapshot(monkeypatch)
post_snap_date = arrow.now()
snapshot_date = metadatas["date"]
assert snapshot_date >= pre_snap_date
assert snapshot_date <= post_snap_date
def test_snapshot_disks_infos(self, monkeypatch):
"""
Check if metadatas contains the necessary infos
"""
metadatas = self.start_snapshot(monkeypatch)
assert len(self.snapshot_helper.disks) == len(metadatas["disks"])
for disk in self.snapshot_helper.disks:
assert sorted(("snapshot", "src", "type")) == sorted(
metadatas["disks"][disk].keys()
)
def test_snapshot_correct_snapshot_path(self, monkeypatch):
"""
Check if the snapshot is done is the same path as its source disk
"""
metadatas = self.start_snapshot(monkeypatch)
for disk in metadatas["disks"].values():
assert os.path.dirname(disk["src"]) == os.path.dirname(disk["snapshot"])
def start_snapshot(self, monkeypatch):
monkeypatch.setattr(
self.snapshot_helper, "external_snapshot", lambda: MockSnapshot("123")
)
return self.snapshot_helper.start()
def test_external_snapshot(self):
snap = self.snapshot_helper.external_snapshot()
assert isinstance(snap, MockSnapshot)
def test_external_snapshot_quiesce_fallback(self):
tried = {"quiesce": False}
def mock_quiesce_failure(_, flags):
if (flags & libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) != 0:
tried["quiesce"] = True
raise libvirt.libvirtError("quiesce error")
return MockSnapshot("123")
self.snapshot_helper.dom.set_mock_snapshot_create(mock_quiesce_failure)
self.snapshot_helper.quiesce = True
snap = self.snapshot_helper.external_snapshot()
assert tried["quiesce"]
assert isinstance(snap, MockSnapshot)
def test_get_snapshot_flags(self):
flags = self.snapshot_helper._get_snapshot_flags()
assert flags == (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA
)
def test_get_snapshot_flags_quiesce(self):
flags = self.snapshot_helper._get_snapshot_flags(quiesce=True)
assert (flags & libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) != 0
def test_gen_libvirt_snapshot_xml(self):
expected_xml = (
"<domainsnapshot>\n"
" <description>Pre-backup external snapshot</description>\n"
" <disks>\n"
' <disk name="vda" snapshot="external"/>\n'
' <disk name="vdb" snapshot="external"/>\n'
' <disk name="vdz" snapshot="no"/>\n'
" </disks>\n"
"</domainsnapshot>\n"
)
assert self.snapshot_helper.gen_libvirt_snapshot_xml() == expected_xml
def test_gen_libvirt_snapshot_xml_ignored_disk(self):
self.snapshot_helper.disks.pop("vdb")
expected_xml = (
"<domainsnapshot>\n"
" <description>Pre-backup external snapshot</description>\n"
" <disks>\n"
' <disk name="vda" snapshot="external"/>\n'
' <disk name="vdb" snapshot="no"/>\n'
' <disk name="vdz" snapshot="no"/>\n'
" </disks>\n"
"</domainsnapshot>\n"
)
assert self.snapshot_helper.gen_libvirt_snapshot_xml() == expected_xml
def test_manually_pivot_disk(self, build_mock_libvirtconn):
self.snapshot_helper.conn = build_mock_libvirtconn
self.snapshot_helper._manually_pivot_disk("vda", "/testvda", "qcow2")
dom_xml = self.snapshot_helper.dom.XMLDesc()
assert self.get_src_for_disk(dom_xml, "vda") == "/testvda"
def get_src_for_disk(self, dom_xml, disk):
elem = get_xml_block_of_disk(dom_xml, disk)
return elem.xpath("source")[0].get("file")
def test_manually_pivot_disk_libvirt_2(self, build_mock_libvirtconn):
"""
Test manual pivot with libvirt < 3.0
"""
conn = build_mock_libvirtconn
conn._libvirt_version = 2000000
conn._domains.append(self.snapshot_helper.dom)
return self.test_manually_pivot_disk(conn)
def test_manually_pivot_unexistant_disk(self):
with pytest.raises(DiskNotFoundError):
self.snapshot_helper._manually_pivot_disk("sda", "/testvda", "qcow2")
def test_clean_no_metadata(self):
with pytest.raises(SnapshotNotStarted):
self.snapshot_helper.clean()
def test_clean(self, monkeypatch, tmpdir):
snapdir = self.prepare_test_clean(monkeypatch, tmpdir)
self.snapshot_helper.clean()
assert len(snapdir.listdir()) == 0
def prepare_test_clean(self, monkeypatch, tmpdir):
snapshots = self.create_temp_snapshot_files(tmpdir)
self.mock_pivot_mechanism(monkeypatch)
# set the domain unactive to avoid the blockcommit
self.snapshot_helper.dom.set_state(0, 0)
self.snapshot_helper.metadatas = {
"date": arrow.now(),
"disks": {
disk: {"src": prop["src"], "snapshot": snapshots[disk], "type": "qcow2"}
for disk, prop in self.snapshot_helper.disks.items()
},
}
return tmpdir.join("snaps")
def create_temp_snapshot_files(self, tmpdir):
tmpdir = tmpdir.mkdir("snaps")
self.snapshot_helper.dom.set_storage_basedir(os.path.abspath(str(tmpdir)))
snapshots = {}
# swap disk and snapshots, to just change the domain basedir
for disk, prop in self.snapshot_helper.disks.items():
dom_disk_path = (
(get_xml_block_of_disk(self.snapshot_helper.dom.XMLDesc(), disk))
.xpath("source")[0]
.get("file")
)
tmpdir.join(os.path.basename(dom_disk_path)).write("")
prop["snapshot"] = dom_disk_path
disk_path = tmpdir.join("{}.qcow2.{}".format(disk, "123"))
prop["src"] = str(disk_path)
snapshots[disk] = prop["snapshot"]
return snapshots
def mock_pivot_mechanism(self, monkeypatch):
monkeypatch.setattr(
self.snapshot_helper, "_qemu_img_commit", lambda *args: None
)
monkeypatch.setattr(
self.snapshot_helper, "_manually_pivot_disk", lambda *args: None
)
| 36.390476
| 88
| 0.634258
| 853
| 7,642
| 5.411489
| 0.193435
| 0.094021
| 0.113085
| 0.027296
| 0.39818
| 0.269281
| 0.207756
| 0.156412
| 0.119151
| 0.101386
| 0
| 0.006003
| 0.258833
| 7,642
| 209
| 89
| 36.564593
| 0.808969
| 0.038995
| 0
| 0.193548
| 0
| 0
| 0.112138
| 0.022483
| 0
| 0
| 0
| 0
| 0.090323
| 1
| 0.135484
| false
| 0
| 0.064516
| 0
| 0.251613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c0ff50a90211a83518224c4a9e7cb96da0fbca0
| 1,015
|
py
|
Python
|
DongbinNa/17/pt.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
DongbinNa/17/pt.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
DongbinNa/17/pt.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
# NxN 시험관, 바이러스 매 초 상하좌우로 증식, 낮은 번호의 바이러스부터 우선순위
# 시간동안(for) 낮은 번호부터 증식 시작. 바이러스가 있거나 matrix 범위 이상이면 stop.
# 바이러스 종류별 좌표 추가
n, k = map(int, input().split())
matrix = []
for _ in range(n):
matrix.append(list(map(int, input().split())))
s, x, y = map(int, input().split())
# 상 하 좌 우
dx = [-1, 1, 0, 0] # 위아래
dy = [0, 0, -1, 1] # 좌우
# 바이러스 좌표 dict. initial
virus = {}
for i in range(k):
virus[i+1] = []
for i in range(n):
for j in range(n):
if matrix[i][j] != 0:
virus[matrix[i][j]].append((i,j))
def move(cord):
x, y = cord
v_num = matrix[x][y]
for i in range(4):
n_x = x+dx[i]
n_y = y+dy[i]
if n_x == n or n_y == n or n_x < 0 or n_y < 0:
continue
if matrix[n_x][n_y] == 0:
matrix[n_x][n_y] = v_num
virus[v_num].append((n_x, n_y))
for _ in range(s):
for idx in sorted(virus.keys()):
for cord in virus[idx]:
move(cord)
# answer. initial cord = (1,1)
print(matrix[x-1][y-1])
| 23.604651
| 57
| 0.519212
| 196
| 1,015
| 2.602041
| 0.336735
| 0.082353
| 0.023529
| 0.094118
| 0.039216
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025424
| 0.302463
| 1,015
| 43
| 58
| 23.604651
| 0.694915
| 0.180296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0
| 0
| 0.033333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c11ff715822a78e65219cb047fa20aeb18248ac
| 7,843
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/pavelib/i18n.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/pavelib/i18n.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/pavelib/i18n.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2022-02-06T10:48:15.000Z
|
2022-02-06T10:48:15.000Z
|
"""
Internationalization tasks
"""
import re
import subprocess
import sys
from path import Path as path
from paver.easy import cmdopts, needs, sh, task
from .utils.cmd import django_cmd
from .utils.envs import Env
from .utils.timer import timed
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text
DEFAULT_SETTINGS = Env.DEVSTACK_SETTINGS
@task
@needs(
"pavelib.prereqs.install_prereqs",
"pavelib.i18n.i18n_validate_gettext",
)
@cmdopts([
("verbose", "v", "Sets 'verbose' to True"),
])
@timed
def i18n_extract(options):
"""
Extract localizable strings from sources
"""
verbose = getattr(options, "verbose", None)
cmd = "i18n_tool extract"
if verbose:
cmd += " -v"
sh(cmd)
@task
@timed
def i18n_fastgenerate():
"""
Compile localizable strings from sources without re-extracting strings first.
"""
sh("i18n_tool generate")
@task
@needs("pavelib.i18n.i18n_extract")
@timed
def i18n_generate():
"""
Compile localizable strings from sources, extracting strings first.
"""
sh("i18n_tool generate")
@task
@needs("pavelib.i18n.i18n_extract")
@timed
def i18n_generate_strict():
"""
Compile localizable strings from sources, extracting strings first.
Complains if files are missing.
"""
sh("i18n_tool generate --strict")
@task
@needs("pavelib.i18n.i18n_extract")
@timed
def i18n_dummy():
"""
Simulate international translation by generating dummy strings
corresponding to source strings.
"""
sh("i18n_tool dummy")
# Need to then compile the new dummy strings
sh("i18n_tool generate")
@task
@needs(
"pavelib.prereqs.install_prereqs",
)
@timed
def i18n_compilejs(options): # lint-amnesty, pylint: disable=unused-argument
"""
Generating djangojs.js files using django-statici18n
"""
settings = 'devstack_docker'
# Generate static i18n JS files.
for system in ['lms', 'cms']:
sh(django_cmd(system, settings, 'compilejsi18n'))
@task
@timed
def i18n_validate_gettext():
"""
Make sure GNU gettext utilities are available
"""
returncode = subprocess.call(['which', 'xgettext'])
if returncode != 0:
msg = colorize(
'red',
"Cannot locate GNU gettext utilities, which are "
"required by django for internationalization.\n (see "
"https://docs.djangoproject.com/en/dev/topics/i18n/"
"translation/#message-files)\nTry downloading them from "
"http://www.gnu.org/software/gettext/ \n"
)
sys.stderr.write(msg)
sys.exit(1)
@task
@timed
def i18n_validate_transifex_config():
"""
Make sure config file with username/password exists
"""
home = path('~').expanduser()
config = home / '.transifexrc'
if not config.isfile or config.getsize == 0:
msg = colorize(
'red',
"Cannot connect to Transifex, config file is missing"
" or empty: {config} \nSee "
"http://help.transifex.com/features/client/#transifexrc \n".format(
config=config,
)
)
sys.stderr.write(msg)
sys.exit(1)
@task
@needs("pavelib.i18n.i18n_validate_transifex_config")
@timed
def i18n_transifex_push():
"""
Push source strings to Transifex for translation
"""
sh("i18n_tool transifex push")
@task
@needs("pavelib.i18n.i18n_validate_transifex_config")
@timed
def i18n_transifex_pull():
"""
Pull translated strings from Transifex
"""
sh("i18n_tool transifex pull")
@task
@timed
def i18n_rtl():
"""
Pull all RTL translations (reviewed AND unreviewed) from Transifex
"""
sh("i18n_tool transifex rtl")
print("Now generating langugage files...")
sh("i18n_tool generate --rtl")
print("Committing translations...")
sh('git clean -fdX conf/locale')
sh('git add conf/locale')
sh('git commit --amend')
@task
@timed
def i18n_ltr():
"""
Pull all LTR translations (reviewed AND unreviewed) from Transifex
"""
sh("i18n_tool transifex ltr")
print("Now generating langugage files...")
sh("i18n_tool generate --ltr")
print("Committing translations...")
sh('git clean -fdX conf/locale')
sh('git add conf/locale')
sh('git commit --amend')
@task
@needs(
"pavelib.i18n.i18n_clean",
"pavelib.i18n.i18n_transifex_pull",
"pavelib.i18n.i18n_extract",
"pavelib.i18n.i18n_dummy",
"pavelib.i18n.i18n_generate_strict",
)
@timed
def i18n_robot_pull():
"""
Pull source strings, generate po and mo files, and validate
"""
# sh('paver test_i18n')
# Tests were removed from repo, but there should still be tests covering the translations
# TODO: Validate the recently pulled translations, and give a bail option
sh('git clean -fdX conf/locale/rtl')
sh('git clean -fdX conf/locale/eo')
print("\n\nValidating translations with `i18n_tool validate`...")
sh("i18n_tool validate")
con = input("Continue with committing these translations (y/n)? ")
if con.lower() == 'y':
sh('git add conf/locale')
sh('git add cms/static/js/i18n')
sh('git add lms/static/js/i18n')
sh(
'git commit --message='
'"Update translations (autogenerated message)" --edit'
)
@task
@timed
def i18n_clean():
"""
Clean the i18n directory of artifacts
"""
sh('git clean -fdX conf/locale')
@task
@needs(
"pavelib.i18n.i18n_clean",
"pavelib.i18n.i18n_extract",
"pavelib.i18n.i18n_transifex_push",
)
@timed
def i18n_robot_push():
"""
Extract new strings, and push to transifex
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@task
@needs(
"pavelib.i18n.i18n_validate_transifex_config",
"pavelib.i18n.i18n_generate",
)
@timed
def i18n_release_push():
"""
Push release-specific resources to Transifex.
"""
resources = find_release_resources()
sh("i18n_tool transifex push " + " ".join(resources))
@task
@needs(
"pavelib.i18n.i18n_validate_transifex_config",
)
@timed
def i18n_release_pull():
"""
Pull release-specific translations from Transifex.
"""
resources = find_release_resources()
sh("i18n_tool transifex pull " + " ".join(resources))
def find_release_resources():
"""
Validate the .tx/config file for release files, returning the resource names.
For working with release files, the .tx/config file should have exactly
two resources defined named "release-*". Check that this is true. If
there's a problem, print messages about it.
Returns a list of resource names, or raises ValueError if .tx/config
doesn't have two resources.
"""
# An entry in .tx/config for a release will look like this:
#
# [edx-platform.release-dogwood]
# file_filter = conf/locale/<lang>/LC_MESSAGES/django.po
# source_file = conf/locale/en/LC_MESSAGES/django.po
# source_lang = en
# type = PO
#
# [edx-platform.release-dogwood-js]
# file_filter = conf/locale/<lang>/LC_MESSAGES/djangojs.po
# source_file = conf/locale/en/LC_MESSAGES/djangojs.po
# source_lang = en
# type = PO
rx_release = r"^\[([\w-]+\.release-[\w-]+)\]$"
with open(".tx/config") as tx_config:
resources = re.findall(rx_release, tx_config.read(), re.MULTILINE)
if len(resources) == 2:
return resources
if not resources: # lint-amnesty, pylint: disable=no-else-raise
raise ValueError("You need two release-* resources defined to use this command.")
else:
msg = "Strange Transifex config! Found these release-* resources:\n" + "\n".join(resources)
raise ValueError(msg)
| 23.694864
| 99
| 0.651409
| 968
| 7,843
| 5.174587
| 0.282025
| 0.037333
| 0.050908
| 0.035935
| 0.37772
| 0.3334
| 0.293671
| 0.256938
| 0.204432
| 0.136355
| 0
| 0.025578
| 0.227336
| 7,843
| 330
| 100
| 23.766667
| 0.80099
| 0.280632
| 0
| 0.428571
| 0
| 0
| 0.40207
| 0.126058
| 0.005495
| 0
| 0
| 0.00303
| 0
| 1
| 0.098901
| false
| 0.005495
| 0.054945
| 0
| 0.159341
| 0.027473
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c1262e89c4802e8d7e590c6c84ac0e62c5a4169
| 2,020
|
py
|
Python
|
sympy/parsing/autolev/test-examples/ruletest9.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
sympy/parsing/autolev/test-examples/ruletest9.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
sympy/parsing/autolev/test-examples/ruletest9.py
|
Michal-Gagala/sympy
|
3cc756c2af73b5506102abaeefd1b654e286e2c8
|
[
"MIT"
] | null | null | null |
import sympy.physics.mechanics as _me
import sympy as _sm
import math as m
import numpy as _np
frame_n = _me.ReferenceFrame('n')
frame_a = _me.ReferenceFrame('a')
a = 0
d = _me.inertia(frame_a, 1, 1, 1)
point_po1 = _me.Point('po1')
point_po2 = _me.Point('po2')
particle_p1 = _me.Particle('p1', _me.Point('p1_pt'), _sm.Symbol('m'))
particle_p2 = _me.Particle('p2', _me.Point('p2_pt'), _sm.Symbol('m'))
c1, c2, c3 = _me.dynamicsymbols('c1 c2 c3')
c1_d, c2_d, c3_d = _me.dynamicsymbols('c1_ c2_ c3_', 1)
body_r_cm = _me.Point('r_cm')
body_r_cm.set_vel(frame_n, 0)
body_r_f = _me.ReferenceFrame('r_f')
body_r = _me.RigidBody('r', body_r_cm, body_r_f, _sm.symbols('m'), (_me.outer(body_r_f.x,body_r_f.x),body_r_cm))
point_po2.set_pos(particle_p1.point, c1*frame_a.x)
v = 2*point_po2.pos_from(particle_p1.point)+c2*frame_a.y
frame_a.set_ang_vel(frame_n, c3*frame_a.z)
v = 2*frame_a.ang_vel_in(frame_n)+c2*frame_a.y
body_r_f.set_ang_vel(frame_n, c3*frame_a.z)
v = 2*body_r_f.ang_vel_in(frame_n)+c2*frame_a.y
frame_a.set_ang_acc(frame_n, (frame_a.ang_vel_in(frame_n)).dt(frame_a))
v = 2*frame_a.ang_acc_in(frame_n)+c2*frame_a.y
particle_p1.point.set_vel(frame_a, c1*frame_a.x+c3*frame_a.y)
body_r_cm.set_acc(frame_n, c2*frame_a.y)
v_a = _me.cross(body_r_cm.acc(frame_n), particle_p1.point.vel(frame_a))
x_b_c = v_a
x_b_d = 2*x_b_c
a_b_c_d_e = x_b_d*2
a_b_c = 2*c1*c2*c3
a_b_c += 2*c1
a_b_c = 3*c1
q1, q2, u1, u2 = _me.dynamicsymbols('q1 q2 u1 u2')
q1_d, q2_d, u1_d, u2_d = _me.dynamicsymbols('q1_ q2_ u1_ u2_', 1)
x, y = _me.dynamicsymbols('x y')
x_d, y_d = _me.dynamicsymbols('x_ y_', 1)
x_dd, y_dd = _me.dynamicsymbols('x_ y_', 2)
yy = _me.dynamicsymbols('yy')
yy = x*x_d**2+1
m = _sm.Matrix([[0]])
m[0] = 2*x
m = m.row_insert(m.shape[0], _sm.Matrix([[0]]))
m[m.shape[0]-1] = 2*y
a = 2*m[0]
m = _sm.Matrix([1,2,3,4,5,6,7,8,9]).reshape(3, 3)
m[0,1] = 5
a = m[0, 1]*2
force_ro = q1*frame_n.x
torque_a = q2*frame_n.z
force_ro = q1*frame_n.x + q2*frame_n.y
f = force_ro*2
| 36.071429
| 113
| 0.688119
| 463
| 2,020
| 2.63067
| 0.164147
| 0.098522
| 0.034483
| 0.036946
| 0.300493
| 0.231527
| 0.137931
| 0.102627
| 0.08046
| 0.042693
| 0
| 0.063212
| 0.130693
| 2,020
| 55
| 114
| 36.727273
| 0.63041
| 0
| 0
| 0
| 0
| 0
| 0.047328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c137c12cabff00b49311cbc274302f573ef641a
| 3,830
|
py
|
Python
|
tests/test_asm_stats.py
|
hall-lab/tenx-gcp
|
f204e60cc5efb543a524df9cdbd44d0a8c590673
|
[
"MIT"
] | null | null | null |
tests/test_asm_stats.py
|
hall-lab/tenx-gcp
|
f204e60cc5efb543a524df9cdbd44d0a8c590673
|
[
"MIT"
] | null | null | null |
tests/test_asm_stats.py
|
hall-lab/tenx-gcp
|
f204e60cc5efb543a524df9cdbd44d0a8c590673
|
[
"MIT"
] | null | null | null |
import filecmp, os, tempfile, unittest
from click.testing import CliRunner
from tenx.asm_stats import asm_stats_cmd, get_contig_lengths, get_scaffold_and_contig_lengths, get_stats, length_buckets
class AsmStatsTest(unittest.TestCase):
def setUp(self):
self.data_dn = os.path.join(os.path.dirname(__file__), "data", "asm-stats")
self.fasta1_fn = os.path.join(self.data_dn, "asm.fasta")
self.fasta2_fn = os.path.join(self.data_dn, "asm.scaffolded.fasta")
self.fasta2_stats_fn = os.path.join(self.data_dn, "asm.scaffolded.fasta.stats")
self.expected_scaffolds = [ 17004, 350002, 1000001]
self.expected_contigs = [1, 2001, 5001, 10001, 100001, 250001, 1000001]
self.temp_d = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_d.cleanup()
def test1_get_contig_lengths(self):
contigs = get_contig_lengths(self.fasta1_fn)
self.assertEqual(contigs, self.expected_contigs)
def test1_get_scaffold_and_contig_lengths(self):
scaffolds, contigs = get_scaffold_and_contig_lengths(self.fasta2_fn, 2)
self.assertEqual(scaffolds, self.expected_scaffolds)
self.assertEqual(contigs, self.expected_contigs)
def test2_get_stats(self):
total = sum(self.expected_scaffolds)
count = len(self.expected_scaffolds)
expected_stats = {
"total": total,
"count": count,
"mean": int(total/count),
"max": self.expected_scaffolds[-1],
"genome_n50": int(total/2),
"n50_length": total,
}
for b in length_buckets():
expected_stats["_".join([str(b), "count"])] = 0
expected_stats["_".join([str(b), "length"])] = 0
expected_stats["1000000_length"] = self.expected_scaffolds[-1]
expected_stats["1000000_count"] = 1
expected_stats["250000_length"] = self.expected_scaffolds[1]
expected_stats["250000_count"] = 1
expected_stats["10000_length"] = self.expected_scaffolds[0]
expected_stats["10000_count"] = 1
stats = get_stats(self.expected_scaffolds)
self.assertEqual(stats, expected_stats)
total = sum(self.expected_contigs)
count = len(self.expected_contigs)
expected_stats = {
"total": total,
"count": count,
"mean": int(total/count),
"max": self.expected_contigs[-1],
"genome_n50": int(total/2),
"n50_length": total,
}
for b in length_buckets():
expected_stats["_".join([str(b), "count"])] = 1
expected_stats["_".join([str(b), "length"])] = b + 1
stats = get_stats(self.expected_contigs)
self.assertEqual(stats, expected_stats)
def test4_asm_stats_cmd(self):
runner = CliRunner()
result = runner.invoke(asm_stats_cmd, ["--help"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(asm_stats_cmd, [])
self.assertEqual(result.exit_code, 2)
stats_fn = os.path.join(self.temp_d.name, "stats.txt")
result = runner.invoke(asm_stats_cmd, ["-i", self.fasta2_fn, "-n", 2, "-o", stats_fn, "-f", "quick"])
try:
self.assertEqual(result.exit_code, 0)
except:
print(result.output)
raise
self.assertEqual(filecmp.cmp(stats_fn, self.fasta2_stats_fn), True)
result = runner.invoke(asm_stats_cmd, ["-i", self.fasta2_fn, "-n", 2, "-o", stats_fn, "-f", "json"])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(asm_stats_cmd, ["-i", self.fasta2_fn, "-n", 2, "-o", stats_fn, "-f", "yaml"])
self.assertEqual(result.exit_code, 0)
# -- AsmStatsTest
if __name__ == '__main__':
unittest.main(verbosity=2)
#-- __main__
| 40.315789
| 120
| 0.629243
| 475
| 3,830
| 4.795789
| 0.212632
| 0.084284
| 0.082968
| 0.046093
| 0.546093
| 0.479368
| 0.355136
| 0.280509
| 0.269535
| 0.269535
| 0
| 0.045051
| 0.234987
| 3,830
| 94
| 121
| 40.744681
| 0.732423
| 0.006789
| 0
| 0.282051
| 0
| 0
| 0.079716
| 0.00684
| 0
| 0
| 0
| 0
| 0.141026
| 1
| 0.076923
| false
| 0
| 0.038462
| 0
| 0.128205
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c1416cedaf37318b018aae01bda9b0f41f3ed30
| 3,435
|
py
|
Python
|
utils.py
|
zexihuang/raft-blockchain
|
a2f7365e10f5a5334c59bac6b551648bae04e2e8
|
[
"Apache-2.0"
] | 1
|
2021-06-04T03:05:06.000Z
|
2021-06-04T03:05:06.000Z
|
utils.py
|
zexihuang/raft-blockchain
|
a2f7365e10f5a5334c59bac6b551648bae04e2e8
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
zexihuang/raft-blockchain
|
a2f7365e10f5a5334c59bac6b551648bae04e2e8
|
[
"Apache-2.0"
] | null | null | null |
import socket
import pickle
import random
import string
import time
import hashlib
import os
BUFFER_SIZE = 65536
def send_message(msg, port):
# Setup socket for the user to be send
s_temp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_temp.connect((socket.gethostname(), port))
# encode and send message
msg = pickle.dumps(msg)
s_temp.send(msg)
# Receive ack.
ack = pickle.loads(s_temp.recv(BUFFER_SIZE))
# message_logger.info(f'Port {port} sends {ack}\n')
s_temp.close()
def receive_message(connection):
# Receive message and send acknowledgement.
header, sender, receiver, message = pickle.loads(connection.recv(BUFFER_SIZE))
connection.send(pickle.dumps('ACK'))
return header, sender, receiver, message
def generate_random_string_with_ending(length, ending):
found = False
s = ""
while not found:
s = ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))
if s[-1] in ending:
found = True
return s
def get_hash(transactions, nonce):
will_encode = str((tuple(transactions), nonce))
return hashlib.sha3_256(will_encode.encode('utf-8')).hexdigest()
def read_first_blockchain():
def prepare_block(blockchain, transactions, term):
found = False
nonce = None
while not found:
nonce = generate_random_string_with_ending(length=6, ending={'0', '1', '2'})
cur_pow = get_hash(transactions, nonce)
if '2' >= cur_pow[-1] >= '0':
found = True
phash = None
if len(blockchain) > 0:
previous_nonce = blockchain[-1]['nonce']
previous_transactions = blockchain[-1]['transactions']
phash = get_hash(previous_transactions, previous_nonce)
return {'term': term, 'phash': phash, 'nonce': nonce, 'transactions': transactions}
if not os.path.exists('first_blockchain_processed.pkl'):
blockchain = []
file_path = 'first_blockchain.txt'
with open(file_path, 'r') as _file:
term = -1
transactions = []
for line in _file.readlines():
sender, receiver, amount = map(int, tuple(line.split()))
transaction_id = time.time()
transaction = (transaction_id, (sender, receiver, amount))
transactions.append(transaction)
if len(transactions) == 3:
# block is finished, find nonce...
block = prepare_block(blockchain, transactions, term)
blockchain.append(block)
transactions = []
if len(transactions) > 0:
transactions += [None for _ in range(3 - len(transactions))]
block = prepare_block(blockchain, transactions, term)
blockchain.append(block)
with open('first_blockchain_processed.pkl', 'wb') as _fb:
pickle.dump(blockchain, _fb)
def blockchain_print_format(blockchain):
blockchain_str = ""
for i, block in enumerate(blockchain):
term = block['term']
transactions = block['transactions']
new_block_str = f'[({term}) {[transaction[1] for transaction in transactions if transaction is not None]}]'
if i < len(blockchain) - 1:
new_block_str += ' -> '
blockchain_str += new_block_str
return blockchain_str
| 32.714286
| 115
| 0.616885
| 394
| 3,435
| 5.215736
| 0.317259
| 0.012165
| 0.032117
| 0.049635
| 0.115815
| 0.097324
| 0.062287
| 0.062287
| 0.062287
| 0
| 0
| 0.010822
| 0.273654
| 3,435
| 104
| 116
| 33.028846
| 0.812826
| 0.057642
| 0
| 0.157895
| 0
| 0
| 0.076471
| 0.018576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092105
| false
| 0
| 0.092105
| 0
| 0.25
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c14181d8879fcc2609ab9415e7fe2cdbb328098
| 3,850
|
py
|
Python
|
api/data_refinery_api/test/test_dataset_stats.py
|
AlexsLemonade/refinebio
|
52f44947f902adedaccf270d5f9dbd56ab47e40a
|
[
"BSD-3-Clause"
] | 106
|
2018-03-05T16:24:47.000Z
|
2022-03-19T19:12:25.000Z
|
api/data_refinery_api/test/test_dataset_stats.py
|
AlexsLemonade/refinebio
|
52f44947f902adedaccf270d5f9dbd56ab47e40a
|
[
"BSD-3-Clause"
] | 1,494
|
2018-02-27T17:02:21.000Z
|
2022-03-24T15:10:30.000Z
|
api/data_refinery_api/test/test_dataset_stats.py
|
AlexsLemonade/refinebio
|
52f44947f902adedaccf270d5f9dbd56ab47e40a
|
[
"BSD-3-Clause"
] | 15
|
2019-02-03T01:34:59.000Z
|
2022-03-29T01:59:13.000Z
|
import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from data_refinery_api.test.test_api_general import API_VERSION
from data_refinery_common.models import (
Experiment,
ExperimentOrganismAssociation,
ExperimentSampleAssociation,
Organism,
Sample,
)
class APITestCases(APITestCase):
def setUp(self):
self.homo_sapiens = Organism(name="HOMO_SAPIENS", taxonomy_id=9606, is_scientific_name=True)
self.homo_sapiens.save()
def test_dataset_stats(self):
""" Test the dataset stats endpoint """
gallus_gallus = Organism(name="GALLUS_GALLUS", taxonomy_id=9031, is_scientific_name=True)
gallus_gallus.save()
equus_ferus = Organism(name="EQUUS_FERUS", taxonomy_id=1114792, is_scientific_name=True)
equus_ferus.save()
ex = Experiment()
ex.accession_code = "XYZ123"
ex.title = "XYZ123"
ex.description = "XYZ123"
ex.technology = "MICROARRAY"
ex.submitter_institution = "XYZ123"
ex.save()
ex2 = Experiment()
ex2.accession_code = "ABC789"
ex2.title = "ABC789"
ex2.description = "ABC789"
ex2.technology = "RNA-SEQ"
ex2.submitter_institution = "Funkytown"
ex2.save()
sample1 = Sample()
sample1.title = "1"
sample1.accession_code = "1"
sample1.platform_name = "AFFY"
sample1.is_processed = True
sample1.organism = self.homo_sapiens
sample1.save()
sample2 = Sample()
sample2.title = "2"
sample2.accession_code = "2"
sample2.platform_name = "ILLUMINA"
sample2.is_processed = True
sample2.organism = gallus_gallus
sample2.save()
sample3 = Sample()
sample3.title = "3"
sample3.accession_code = "3"
sample3.platform_name = "ILLUMINA"
sample3.is_processed = True
sample3.organism = gallus_gallus
sample3.save()
xoa = ExperimentOrganismAssociation()
xoa.experiment = ex
xoa.organism = self.homo_sapiens
xoa.save()
xoa = ExperimentOrganismAssociation()
xoa.experiment = ex2
xoa.organism = gallus_gallus
xoa.save()
xoa = ExperimentOrganismAssociation()
xoa.experiment = ex2
xoa.organism = equus_ferus
xoa.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample1
experiment_sample_association.experiment = ex
experiment_sample_association.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample2
experiment_sample_association.experiment = ex2
experiment_sample_association.save()
experiment_sample_association = ExperimentSampleAssociation()
experiment_sample_association.sample = sample3
experiment_sample_association.experiment = ex2
experiment_sample_association.save()
jdata = json.dumps(
{"email_address": "baz@gmail.com", "data": {"XYZ123": ["1"], "ABC789": ["2"]}}
)
response = self.client.post(
reverse("create_dataset", kwargs={"version": API_VERSION}),
jdata,
content_type="application/json",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json()["data"], json.loads(jdata)["data"])
good_id = response.json()["id"]
# Check that we can fetch these sample details via samples API
response = self.client.get(
reverse("samples", kwargs={"version": API_VERSION}), {"dataset_id": good_id}
)
self.assertEqual(response.json()["count"], 2)
| 32.627119
| 100
| 0.647013
| 387
| 3,850
| 6.22739
| 0.268734
| 0.079668
| 0.13444
| 0.024896
| 0.256846
| 0.236515
| 0.236515
| 0.236515
| 0.236515
| 0.097925
| 0
| 0.031097
| 0.256623
| 3,850
| 117
| 101
| 32.905983
| 0.810971
| 0.024416
| 0
| 0.168421
| 0
| 0
| 0.066684
| 0
| 0
| 0
| 0
| 0
| 0.031579
| 1
| 0.021053
| false
| 0
| 0.063158
| 0
| 0.094737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c15bfba4b8c0e66ef69eb440d0dc33cc1bed1d7
| 4,804
|
py
|
Python
|
hetzner_fix_report/hetzner_fix_report.py
|
flxai/hetzner-fix-report
|
ab484a3463ed0efc6f14ebd7b45d1b2c1281fb0b
|
[
"MIT"
] | 2
|
2020-06-20T21:50:38.000Z
|
2020-06-22T08:37:11.000Z
|
hetzner_fix_report/hetzner_fix_report.py
|
flxai/hetzner-fix-report
|
ab484a3463ed0efc6f14ebd7b45d1b2c1281fb0b
|
[
"MIT"
] | 4
|
2020-07-01T21:59:08.000Z
|
2020-07-05T11:33:59.000Z
|
hetzner_fix_report/hetzner_fix_report.py
|
flxai/hetzner-fix-report
|
ab484a3463ed0efc6f14ebd7b45d1b2c1281fb0b
|
[
"MIT"
] | null | null | null |
import pdftotext
import sys
import numpy as np
import pandas as pd
import regex as re
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_server_type(server_type_str):
"""Check wether string is contained"""
server_type_list = server_type_str.split(' ')
if len(server_type_list) < 2:
if server_type_str == 'Backup':
return 'backup'
else:
return 'unknown'
return server_type_list[1].split('-')[0]
def regex_match(server_type_str, regex, ret_id=1):
"""Applies a regular expression and returns a match """
m = re.match(regex, server_type_str)
return np.NaN if m is None else m.group(ret_id)
def regex_search(server_type_str, regex, ret_id=1):
"""Applies a regular expression and returns a match """
m = re.search(regex, server_type_str)
return np.NaN if m is None else m.group(ret_id)
def hetzner_fix_report(csv_path, pdf_path):
# Keys for originally fucked CSV
df_keys = [
'server_type_str',
'comment',
'date_from',
'date_to',
'quantity',
'price',
'price_net',
'empty',
]
# Keys' new order
df_keys_reorder = ['server_id', 'name', 'project', 'type', 'quantity', 'usage_hours', 'price', 'price_max',
'price_net', 'price_gross', 'vat', 'date_from', 'date_to', 'is_backup', 'is_server', 'is_ceph']
# Load originally fucked CSV
df = pd.read_csv(csv_path, sep=',', names=df_keys)
# Wether entry is backup
df['is_backup'] = df.server_type_str.apply(lambda x: 'Backup' in x)
# Wether entry is server instance
df['is_server'] = df.server_type_str.apply(lambda x: 'Server' in x)
# Wether entry uses Ceph
df['is_ceph'] = df.server_type_str.apply(lambda x: 'ceph' in x)
# Server types according to https://www.hetzner.de/cloud
df['type'] = df.server_type_str.apply(get_server_type)
# Hetzner's instance id
df['server_id'] = df.comment.apply(lambda x: regex_match(x, r'.*#([0-9]+) ".*'))
# Maximum price for hourly rated servers
df['price_max'] = df.comment.apply(lambda x: regex_search(x,
r'(?:period|Zeitraum).*?((?:€\s*[\d.]+)|(?:[\d,]+\s*€))'))
df_price_max_mask = ~df.price_max.isna()
df.loc[df_price_max_mask, 'price_max'] = \
df.price_max.loc[df_price_max_mask].apply(lambda x: float(x.replace('€', '').replace(',', '.')))
# Set server name
df['name'] = df.comment.apply(lambda x: regex_match(x, r'.+"([^"]+)"'))
df.loc[df['name'] == 'instance', 'name'] = np.nan
# Usage in hours
df['usage_hours'] = df.comment.apply(lambda x: regex_search(x, r'(?:Usage|Nutzung):.*?(\d+)\s*h'))
# Drop unnecessary columns
df.drop(['comment', 'server_type_str', 'empty'], axis=1, inplace=True)
# Combine with pdf to get project names
with open(pdf_path, 'rb') as f:
pdf = pdftotext.PDF(f)
# Collect VAT value
vat = None
for page in pdf:
m = re.search(r'(USt\.|VAT) \(([0-9.,]+) ?%\)', page)
if m is not None:
vat = float(m[2])
break
else:
m = re.search(r'Tax rate.*\n.*?([\d.,]+) ?%', page)
if m is not None:
vat = float(m[1])
if vat is None:
eprint('VAT information could not be found!')
sys.exit(1)
df['vat'] = vat / 100
df['price_net'] = df.quantity * df.price
df['price_gross'] = df.price_net * (1 + df.vat)
# Collect individual projects' names
projects = []
for page in pdf:
projects += re.findall(r'Proje[ck]t "([^"]+)"', page)
projects = np.array(projects)
# Collect individual projects' string locations
page_factor = 1e6
projects_loc = []
for project in projects:
for i, page in enumerate(pdf):
loc = page.find(project)
if loc != -1:
# Add page offset to make locations comparable
projects_loc.append(loc + i * page_factor)
projects_loc = np.array(projects_loc)
# Collect individual server ids' string locations and map them to nearest previous project name
df['project'] = np.nan
sid_loc = []
for idx, sid in df.server_id[df.server_id.notnull()].items():
for i, page in enumerate(pdf):
loc = page.find(sid)
if loc == -1:
continue
# Add page offset to make locations comparable
loc = np.array(loc + i * page_factor)
sid_loc.append(loc)
diff_loc = projects_loc - loc
project_name = projects[np.where(diff_loc < 0, diff_loc, -np.inf).argmax()]
df.loc[idx, 'project'] = project_name
# Reorder columns
df = df[df_keys_reorder]
return df
| 32.90411
| 118
| 0.59159
| 683
| 4,804
| 4.004392
| 0.256223
| 0.065814
| 0.061792
| 0.021938
| 0.259232
| 0.239488
| 0.239488
| 0.182084
| 0.182084
| 0.090676
| 0
| 0.006206
| 0.262073
| 4,804
| 145
| 119
| 33.131034
| 0.764457
| 0.165279
| 0
| 0.107527
| 0
| 0
| 0.150943
| 0.020881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053763
| false
| 0
| 0.053763
| 0
| 0.172043
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c16620f0a89c9e70bfae221558f9859765dc5b0
| 3,705
|
py
|
Python
|
src/random_forest.py
|
rrozema12/Data-Mining-Final-Project
|
4848f3daed4b75879b626c5dc460e8dbd70ae861
|
[
"MIT"
] | 1
|
2018-02-04T01:10:20.000Z
|
2018-02-04T01:10:20.000Z
|
src/random_forest.py
|
rrozema12/Data-Mining-Final-Project
|
4848f3daed4b75879b626c5dc460e8dbd70ae861
|
[
"MIT"
] | null | null | null |
src/random_forest.py
|
rrozema12/Data-Mining-Final-Project
|
4848f3daed4b75879b626c5dc460e8dbd70ae861
|
[
"MIT"
] | null | null | null |
# random_forest.py
# does the random forest calcutlaions
import decision_tree
import partition
import heapq
import table_utils
import classifier_util
from homework_util import strat_folds
def run_a_table(table, indexes, class_index, N, M, F):
""" Takes a table, splits it into a training and test set. Creates a
random forest for the training set. Then tests the forest off of
the test set
:param table: a table of values
:param indexes: The indexes to partition on
:param class_index: The index of the label to predict
:param N: Number of trees to produce
:param M: Number of the best trees to choose
:param F: Subset size of random attributes
:return: Returns a list of tuples. Of the actual, predicted label and
training and test
[(actual1,predicted1), (actual2,predicted2), ...], training, test
"""
domains = table_utils.get_domains(table, indexes)
folds = strat_folds(table, class_index, 3)
training = folds[0]
training.extend(folds[1])
test = folds[2]
forest = _random_forest(test, indexes, class_index, domains, N, M, F)
return [(row[class_index], predict_label(forest, row)) for row in test], \
training, test
def _random_forest(table, indexes, class_index, att_domains, N, M, F):
""" Generates a random forest classifier for a given table
:param table: a table
:param indexes: a list of indexes to partition on
:param class_index: the index of the class label to predict
:param N: Number of trees to produce
:param M: Number of the best trees to choose
:param F: Subset size of random attributes
:return: A list of lists. Trees and thier accuracies
[(accuracy1, tree1), ... , (accuracyM, treeM)]
"""
# We store the accuracies and trees in a priority queue
# lower numbers = higher priority
priority_queue = [] # see: https://docs.python.org/3/library/heapq.html#basic-examples
attributes = indexes
# Uses a training and remainder set from bootsraping to create each tree
bags = partition.bagging(table, N)
for bag_set in bags:
tree = decision_tree.tdidt_RF(bag_set[0], attributes, att_domains, class_index, F)
acc = _accuracy_for_tree(tree,class_index, bag_set[1])
heapq.heappush(priority_queue, (acc, tree))
#push to the priorityQueue
# Since our priority queue is backwards (and I dunno how to reverse that)
# we pop off all the ones we don't need. N - M
for i in range(N - M):
heapq.heappop(priority_queue)
# Now our priority queue will be our list that we can return
return priority_queue
def _accuracy_for_tree(tree, class_index, test_set):
labels = decision_tree.classify_with_tree(tree, class_index, test_set)
return classifier_util.accuracy(labels)
def predict_label(forest, instance):
""" predicts the label of an instance given a forest using weighted
voting with accuracies
:param forest: a list of lists in te form returned by random_forest()
:param instance: an row to have a class label predicted
:return: a class label
"""
labels = {}
for acc_and_tree in forest:
prediction = decision_tree.get_label(acc_and_tree[1], instance)
# totals the accuracy predicted for each label
try:
labels[prediction] += acc_and_tree[0]
except KeyError:
labels[prediction] = acc_and_tree[0]
# gets the label with the highest predicted value
highest_value = 0
highest_label = 0
for current_label, value in labels.items():
if value > highest_value:
highest_label = current_label
return highest_label
| 35.970874
| 90
| 0.691768
| 540
| 3,705
| 4.618519
| 0.3
| 0.044106
| 0.011227
| 0.021652
| 0.189254
| 0.189254
| 0.131516
| 0.131516
| 0.131516
| 0.131516
| 0
| 0.006349
| 0.234818
| 3,705
| 102
| 91
| 36.323529
| 0.873369
| 0.48583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.139535
| 0
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c186e241fa2559c5801595eef7a0db1d8af608a
| 18,320
|
py
|
Python
|
run.py
|
RafaelCenzano/Corona-Virus-Email-Updater
|
2d5bc071ab21fe8df358689862a019d400c73cd5
|
[
"MIT"
] | 3
|
2020-03-10T13:52:37.000Z
|
2020-03-15T17:19:39.000Z
|
run.py
|
RafaelCenzano/Corona-Virus-Email-Updater
|
2d5bc071ab21fe8df358689862a019d400c73cd5
|
[
"MIT"
] | null | null | null |
run.py
|
RafaelCenzano/Corona-Virus-Email-Updater
|
2d5bc071ab21fe8df358689862a019d400c73cd5
|
[
"MIT"
] | 2
|
2020-03-10T13:52:29.000Z
|
2022-01-13T19:58:28.000Z
|
import requests
import json
import os
from bs4 import BeautifulSoup as bs
from secret import *
from smtplib import SMTP
from datetime import datetime
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def maths(num1, num2, num3=None):
num1 = int(''.join(num1.split(',')))
num2 = int(''.join(num2.split(',')))
if num3:
num3 = int(''.join(num3.split(',')))
num = '{:,}'.format(num1 - num2 - num3)
else:
num = '{:,}'.format(num1 - num2)
return num
def scraper():
r = requests.get(
'https://www.cdc.gov/coronavirus/2019-ncov/cases-updates/cases-in-us.html')
page = r.text
soup = bs(page, 'lxml')
nowFormatted = datetime.now().strftime('%-m/%-d/%y %-I:%M %p')
totals = soup.findAll(attrs={'class': 'count'})
newCasesData = soup.findAll(attrs={'class': 'new-cases'})
newCasesText = newCasesData[0].text
newCases = newCasesText[:len(newCasesText) - 11]
newDeathsText = newCasesData[1].text
newDeaths = newDeathsText[:len(newDeathsText) - 12]
r2 = requests.get(
'https://www.sfchronicle.com/bayarea/article/Coronavirus-live-updates-news-bay-area-15237940.php')
page2 = r2.text
soup2 = bs(page2, 'lxml')
pTags = soup2.findAll('p')
californiaParts = pTags[3].text[2:].split()
californiaCases = californiaParts[0]
californiaDeaths = californiaParts[len(californiaParts) - 2]
bayAreaParts = pTags[4].text[2:].split()
bayAreaCases = bayAreaParts[0]
bayAreaDeaths = bayAreaParts[len(bayAreaParts) - 2]
with open(jsonFilePath, 'r') as jsonFile:
jsonDataRead = json.load(jsonFile)
try:
calCasesToday = int(''.join(californiaCases.split(',')))
except BaseException:
calCasesToday = jsonDataRead['calCasesToday']
try:
calDeathsToday = int(''.join(californiaDeaths.split(',')))
except BaseException:
calDeathsToday = jsonDataRead['calDeathsToday']
try:
baCasesToday = int(''.join(bayAreaCases.split(',')))
except BaseException:
baCasesToday = jsonDataRead['baCasesToday']
bayAreaCases = jsonDataRead['baCasesToday']
try:
baDeathsToday = int(''.join(bayAreaDeaths.split(',')))
except BaseException:
baDeathsToday = jsonDataRead['baDeathsToday']
bayAreaDeaths = jsonDataRead['baDeathsToday']
r3 = requests.get('https://www.worldometers.info/coronavirus/')
page3 = r3.text
soup3 = bs(page3, 'lxml')
spanTags = soup3.findAll('span')
totalsWorld = soup3.findAll('div', attrs={'class': 'number-table-main'})
worldCases = spanTags[4].text
worldDeaths = spanTags[5].text
worldRecoveries = spanTags[6].text
mildCasesWorld = spanTags[8].text
criticalCasesWorld = spanTags[9].text
recoveredWorld = spanTags[11].text
currentWorldCases = totalsWorld[0].text
currentWorldClosed = totalsWorld[1].text
worldCasesToday = int(''.join(worldCases.split(',')))
worldDeathsToday = int(''.join(worldDeaths.split(',')))
worldRecoveriesToday = int(''.join(worldRecoveries.split(',')))
if os.path.isfile(jsonFilePath) == False:
jsonData = {
'other': {
'currentWorldCases': currentWorldCases,
'uscases': totals[0].text,
'usnewcases': newCases,
'usenewdeaths': newDeaths,
'usdeaths': totals[1].text},
'past': {
'calCasesToday': calCasesToday,
'calDeathsToday': calDeathsToday,
'baCasesToday': baCasesToday,
'baDeathsToday': baDeathsToday,
'worldCases': worldCasesToday,
'worldDeaths': worldDeathsToday,
'worldRecoveries': worldRecoveriesToday},
'past2': {
'calCasesToday': calCasesToday,
'calDeathsToday': calDeathsToday,
'baCasesToday': baCasesToday,
'baDeathsToday': baDeathsToday,
'worldCases': worldCasesToday,
'worldDeaths': worldDeathsToday,
'worldRecoveries': worldRecoveriesToday}}
with open(jsonFilePath, 'w') as jsonFile:
json.dump(jsonData, jsonFile)
calDifferenceCases = 'Unknown'
calDifferenceDeaths = 'Unknown'
baDifferenceCases = 'Unknown'
baDifferencesDeaths = 'Unknown'
wDifferenceCases = 'Unknown'
wDifferenceDeath = 'Unknown'
wDifferenceRecoveries = 'Unknown'
calDifferenceCases1 = 'Unknown'
calDifferenceDeaths1 = 'Unknown'
baDifferenceCases1 = 'Unknown'
baDifferencesDeaths1 = 'Unknown'
wDifferenceCases1 = 'Unknown'
wDifferenceDeath1 = 'Unknown'
wDifferenceRecoveries1 = 'Unknown'
else:
with open(jsonFilePath, 'r') as jsonFile:
jsonDataFile = json.load(jsonFile)
calDifferenceCases = '{:,}'.format(
calCasesToday - jsonDataFile['past']['calCasesToday'])
calDifferenceDeaths = '{:,}'.format(
calDeathsToday - jsonDataFile['past']['calDeathsToday'])
baDifferenceCases = '{:,}'.format(
baCasesToday - jsonDataFile['past']['baCasesToday'])
baDifferencesDeaths = '{:,}'.format(
baDeathsToday - jsonDataFile['past']['baDeathsToday'])
wDifferenceCases = '{:,}'.format(
worldCasesToday - int(jsonDataFile['past']['worldCases']))
wDifferenceDeath = '{:,}'.format(
worldDeathsToday - int(jsonDataFile['past']['worldDeaths']))
wDifferenceRecoveries = '{:,}'.format(
worldRecoveriesToday - int(jsonDataFile['past']['worldRecoveries']))
calDifferenceCases1 = '{:,}'.format(
jsonDataFile['past']['calCasesToday'] -
jsonDataFile['past2']['calCasesToday'])
calDifferenceDeaths1 = '{:,}'.format(
jsonDataFile['past']['calDeathsToday'] -
jsonDataFile['past2']['calDeathsToday'])
baDifferenceCases1 = '{:,}'.format(
jsonDataFile['past']['baCasesToday'] -
jsonDataFile['past2']['baCasesToday'])
baDifferencesDeaths1 = '{:,}'.format(
jsonDataFile['past']['baDeathsToday'] -
jsonDataFile['past2']['baDeathsToday'])
wDifferenceCases1 = '{:,}'.format(
jsonDataFile['past']['worldCases'] - int(jsonDataFile['past2']['worldCases']))
wDifferenceDeath1 = '{:,}'.format(
jsonDataFile['past']['worldDeaths'] - int(jsonDataFile['past2']['worldDeaths']))
wDifferenceRecoveries1 = '{:,}'.format(
jsonDataFile['past']['worldRecoveries'] - int(jsonDataFile['past2']['worldRecoveries']))
pastWorldCases = jsonDataFile['other']['currentWorldCases']
pastUsCases = jsonDataFile['other']['uscases']
pastUsNewCases = jsonDataFile['other']['usnewcases']
pastUsDeaths = jsonDataFile['other']['usdeaths']
pastUsNewDeaths = jsonDataFile['other']['usenewdeaths']
jsonDataFile['past2']['calCasesToday'] = jsonDataFile['past']['calCasesToday']
jsonDataFile['past2']['calDeathsToday'] = jsonDataFile['past']['calDeathsToday']
jsonDataFile['past2']['baCasesToday'] = jsonDataFile['past']['baCasesToday']
jsonDataFile['past2']['baDeathsToday'] = jsonDataFile['past']['baDeathsToday']
jsonDataFile['past2']['worldCases'] = jsonDataFile['past']['worldCases']
jsonDataFile['past2']['worldDeaths'] = jsonDataFile['past']['worldDeaths']
jsonDataFile['past2']['worldRecoveries'] = jsonDataFile['past']['worldRecoveries']
jsonDataFile['past']['calCasesToday'] = calCasesToday
jsonDataFile['past']['calDeathsToday'] = calDeathsToday
jsonDataFile['past']['baCasesToday'] = baCasesToday
jsonDataFile['past']['baDeathsToday'] = baDeathsToday
jsonDataFile['past']['worldCases'] = worldCasesToday
jsonDataFile['past']['worldDeaths'] = worldDeathsToday
jsonDataFile['past']['worldRecoveries'] = worldRecoveriesToday
jsonDataFile['other'] = {
'currentWorldCases': currentWorldCases,
'uscases': totals[0].text,
'usnewcases': newCases,
'usenewdeaths': newDeaths,
'usdeaths': totals[1].text}
with open(jsonFilePath, 'w') as jsonFile:
json.dump(jsonDataFile, jsonFile)
emailMessage = (f'''
Hello,
Update: {nowFormatted}
World Data from WorldOMeter:
Total cases since outbreak: {worldCases}, Yesterday: {maths(worldCases,wDifferenceCases)}
Total current cases: {currentWorldCases}, Yesterday: {pastWorldCases}
New cases: {wDifferenceCases}, Yesterday: {wDifferenceCases1}
Total closed cases: {currentWorldClosed}, Yesterday: {maths(currentWorldClosed,wDifferenceDeath,wDifferenceRecoveries)}
Total deaths: {worldDeaths}, Yesterday: {maths(worldDeaths,wDifferenceDeath)}
New deaths: {wDifferenceDeath}, Yesterday: {wDifferenceDeath1}
Total Recoveries: {worldRecoveries}, Yesterday: {maths(worldRecoveries,wDifferenceRecoveries)}
New Recoveries: {wDifferenceRecoveries}, Yesterday: {wDifferenceRecoveries1}
United States Data from CDC:
Total cases: {totals[0].text}, Yesterday: {pastUsCases}
New cases: {newCases}, Yesterday: {pastUsNewCases}
Total deaths: {totals[1].text}, Yesterday: {pastUsDeaths}
New deaths: {newDeaths}, Yesterday: {pastUsNewDeaths}
California Data from SF Chronicle:
Total cases: {californiaCases}, Yesterday: {maths(californiaCases,calDifferenceCases)}
New cases: {calDifferenceCases}, Yesterday: {calDifferenceCases1}
Total deaths: {californiaDeaths}, Yesterday: {maths(californiaDeaths,calDifferenceDeaths)}
New deaths: {calDifferenceDeaths}, Yesterday: {calDifferenceDeaths1}
Bay Area from SF Chronicle:
Total cases: {bayAreaCases}, Yesterday: {maths(bayAreaCases,baDifferenceCases)}
New cases: {baDifferenceCases}, Yesterday: {baDifferenceCases1}
Total deaths: {bayAreaDeaths}, Yesterday: {maths(bayAreaDeaths,baDifferencesDeaths)}
New deaths: {baDifferencesDeaths}, Yesterday: {baDifferencesDeaths1}
- COVID-19 Reporter
(Created by Rafael Cenzano)''')
emailMessageHtml = (f'''
<html lang="en">
<head></head>
<body>
<p>Hello,</p>
<p>Update: {nowFormatted}</p>
<br>
<h2>World Data from <a href="https://www.worldometers.info/coronavirus/" target="_blank">WorldOMeter</a>:</h2>
<table border="0" cellpadding="4px" cellspacing="0" height="auto" width="auto%">
<tr>
<td align="center" valign="top">Info</td>
<td align="center" valign="top">Today's Data</td>
<td align="center" valign="top">Yesterday's Data</td>
</tr>
<tr>
<td align="left" valign="top">Cases since outbreak</td>
<td align="left" valign="top">{worldCases}</td>
<td align="left" valign="top">{maths(worldCases,wDifferenceCases)}</td>
</tr>
<tr>
<td align="left" valign="top">Current Cases</td>
<td align="left" valign="top">{currentWorldCases}</td>
<td align="left" valign="top">{pastWorldCases}</td>
</tr>
<tr>
<td align="left" valign="top">New Cases</td>
<td align="left" valign="top">{wDifferenceCases}</td>
<td align="left" valign="top">{wDifferenceCases1}</td>
</tr>
<tr>
<td align="left" valign="top">Closed Cases</td>
<td align="left" valign="top">{currentWorldClosed}</td>
<td align="left" valign="top">{maths(currentWorldClosed,wDifferenceDeath,wDifferenceRecoveries)}</td>
</tr>
<tr>
<td align="left" valign="top">Total Deaths</td>
<td align="left" valign="top">{worldDeaths}</td>
<td align="left" valign="top">{maths(worldDeaths,wDifferenceDeath)}</td>
</tr>
<tr>
<td align="left" valign="top">New Deaths</td>
<td align="left" valign="top">{wDifferenceDeath}</td>
<td align="left" valign="top">{wDifferenceDeath1}</td>
</tr>
<tr>
<td align="left" valign="top">Total Recoveries</td>
<td align="left" valign="top">{worldRecoveries}</td>
<td align="left" valign="top">{maths(worldRecoveries,wDifferenceRecoveries)}</td>
</tr>
<tr>
<td align="left" valign="top">New Recoveries</td>
<td align="left" valign="top">{wDifferenceRecoveries}</td>
<td align="left" valign="top">{wDifferenceRecoveries1}</td>
</tr>
</table>
<br>
<h2>United States Data from <a href="https://www.cdc.gov/coronavirus/2019-ncov/cases-updates/cases-in-us.html" target="_blank">CDC</a>:</h2>
<table border="0" cellpadding="4px" cellspacing="0" height="auto" width="auto%">
<tr>
<td align="center" valign="top">Info</td>
<td align="center" valign="top">Today's Data</td>
<td align="center" valign="top">Yesterday's Data</td>
</tr>
<tr>
<td align="left" valign="top">Total Cases</td>
<td align="left" valign="top">{totals[0].text}</td>
<td align="left" valign="top">{pastUsCases}</td>
</tr>
<tr>
<td align="left" valign="top">New Cases</td>
<td align="left" valign="top">{newCases}</td>
<td align="left" valign="top">{pastUsNewCases}</td>
</tr>
<tr>
<td align="left" valign="top">Total Deaths</td>
<td align="left" valign="top">{totals[1].text}</td>
<td align="left" valign="top">{pastUsDeaths}</td>
</tr>
<tr>
<td align="left" valign="top">New Deaths</td>
<td align="left" valign="top">{newDeaths}</td>
<td align="left" valign="top">{pastUsNewDeaths}</td>
</tr>
</table>
<br>
<h2>California Data from <a href="https://www.sfchronicle.com/bayarea/article/Coronavirus-live-updates-news-bay-area-15237940.php" target="_blank">SF Chronicle</a>:</h2>
<table border="0" cellpadding="4px" cellspacing="0" height="auto" width="auto%">
<tr>
<td align="center" valign="top">Info</td>
<td align="center" valign="top">Today's Data</td>
<td align="center" valign="top">Yesterday's Data</td>
</tr>
<tr>
<td align="left" valign="top">Total Cases</td>
<td align="left" valign="top">{californiaCases}</td>
<td align="left" valign="top">{maths(californiaCases,calDifferenceCases)}</td>
</tr>
<tr>
<td align="left" valign="top">New Cases</td>
<td align="left" valign="top">{calDifferenceCases}</td>
<td align="left" valign="top">{calDifferenceCases1}</td>
</tr>
<tr>
<td align="left" valign="top">Total Deaths</td>
<td align="left" valign="top">{californiaDeaths}</td>
<td align="left" valign="top">{maths(californiaDeaths,calDifferenceDeaths)}</td>
</tr>
<tr>
<td align="left" valign="top">New Deaths</td>
<td align="left" valign="top">{calDifferenceDeaths}</td>
<td align="left" valign="top">{calDifferenceDeaths1}</td>
</tr>
</table>
<br>
<h2>Bay Area from <a href="https://www.sfchronicle.com/bayarea/article/Coronavirus-live-updates-news-bay-area-15237940.php" target="_blank">SF Chronicle</a>:</h2>
<table border="0" cellpadding="4px" cellspacing="0" height="auto" width="auto%">
<tr>
<td align="center" valign="top">Info</td>
<td align="center" valign="top">Today's Data</td>
<td align="center" valign="top">Yesterday's Data</td>
</tr>
<tr>
<td align="left" valign="top">Total Cases</td>
<td align="left" valign="top">{bayAreaCases}</td>
<td align="left" valign="top">{maths(bayAreaCases,baDifferenceCases)}</td>
</tr>
<tr>
<td align="left" valign="top">New Cases</td>
<td align="left" valign="top">{baDifferenceCases}</td>
<td align="left" valign="top">{baDifferenceCases1}</td>
</tr>
<tr>
<td align="left" valign="top">Total Deaths</td>
<td align="left" valign="top">{bayAreaDeaths}</td>
<td align="left" valign="top">{maths(bayAreaDeaths,baDifferencesDeaths)}</td>
</tr>
<tr>
<td align="left" valign="top">New Deaths</td>
<td align="left" valign="top">{baDifferencesDeaths}</td>
<td align="left" valign="top">{baDifferencesDeaths1}</td>
</tr>
</table>
<br>
<h4>- COVID-19 Reporter</h4>
<p>(Created by <a href="https://rafaelcenzano.com" target="_blank">Rafael Cenzano</a>)</p>
</body>
</html>''')
for recieverEmail in recieverEmails:
msg = MIMEMultipart('alternative')
msg['From'] = f'COVID-19 Reporter <{senderEmail}>'
msg['To'] = recieverEmail
msg['Subject'] = f'CoronaVirus update: {nowFormatted}'
part1 = MIMEText(emailMessage, 'plain')
part2 = MIMEText(emailMessageHtml, 'html')
msg.attach(part1)
msg.attach(part2)
message = msg.as_string()
smtp_server = SMTP('smtp.gmail.com', 587)
smtp_server.ehlo_or_helo_if_needed()
smtp_server.starttls()
smtp_server.ehlo_or_helo_if_needed()
smtp_server.login(senderEmail, senderPassword)
smtp_server.sendmail(senderEmail, recieverEmail, message)
smtp_server.quit()
print(f'Email sent to {recieverEmail} @ {nowFormatted}')
if __name__ == '__main__':
scraper()
| 41.922197
| 177
| 0.593177
| 1,686
| 18,320
| 6.428233
| 0.152432
| 0.046503
| 0.060897
| 0.094113
| 0.380605
| 0.332718
| 0.301163
| 0.26444
| 0.252445
| 0.236021
| 0
| 0.012393
| 0.251255
| 18,320
| 436
| 178
| 42.018349
| 0.777721
| 0
| 0
| 0.330667
| 0
| 0.029333
| 0.587445
| 0.134279
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005333
| false
| 0.002667
| 0.024
| 0
| 0.032
| 0.002667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c19d7164f4d767fbe5d4431bf900ccb1c4a00d6
| 6,494
|
py
|
Python
|
Machine_Learning/Feature_Tutorials/04-tensorflow-ai-optimizer/files/application/app_mt.py
|
dankernel/Vitis-Tutorials
|
558791a2350327ea275917db890797a895d0fac2
|
[
"Apache-2.0"
] | null | null | null |
Machine_Learning/Feature_Tutorials/04-tensorflow-ai-optimizer/files/application/app_mt.py
|
dankernel/Vitis-Tutorials
|
558791a2350327ea275917db890797a895d0fac2
|
[
"Apache-2.0"
] | null | null | null |
Machine_Learning/Feature_Tutorials/04-tensorflow-ai-optimizer/files/application/app_mt.py
|
dankernel/Vitis-Tutorials
|
558791a2350327ea275917db890797a895d0fac2
|
[
"Apache-2.0"
] | null | null | null |
'''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Author: Mark Harvey, Xilinx Inc
'''
from ctypes import *
import cv2
import numpy as np
import runner
import os
import xir.graph
import pathlib
import xir.subgraph
import threading
import time
import sys
import argparse
divider = '-----------------------------------------------'
def preprocess_fn(image_path):
'''
Image pre-processing.
Rearranges from BGR to RGB then normalizes to range 0:1
input arg: path of image file
return: numpy array
'''
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image/255.0
return image
def get_subgraph (g):
'''
interrogate model file to return subgraphs
Returns a list of subgraph objects
'''
sub = []
root = g.get_root_subgraph()
sub = [ s for s in root.children
if s.metadata.get_attr_str ("device") == "DPU"]
return sub
def runDPU(id,start,dpu,img):
'''
DPU execution - called in thread from app function.
Arguments:
id: integer to identify thread - not currently used
start: Start index for writes to out_q.
dpu: runner
img: list of pre-processed images to pass into DPU
'''
''' input/output tensor information
get_input_tensors() and get_output_tensors() return lists of tensors objects.
The lists will contain one element for each input or output of the network.
The shape of each tensor object is (batch,height,width,channels)
For Edge DPU, batchsize is always 1.
'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
#print('Input tensor :',inputTensors[0].name,inputTensors[0].shape)
#print('Output Tensor:',outputTensors[0].name,outputTensors[0].shape)
outputSize = outputTensors[0].dims[1]*outputTensors[0].dims[2]*outputTensors[0].dims[3]
shapeIn = inputTensors[0].shape
shapeOut = outputTensors[0].shape
for i in range(len(img)):
'''prepare lists of np arrays to hold input & output tensors '''
inputData = []
inputData.append(img[i].reshape(shapeIn))
outputData = []
outputData.append(np.empty((shapeOut), dtype = np.float32, order = 'C'))
'''start DPU, wait until it finishes '''
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
''' output data shape is currently (batch,height,width,channels)
so flatten it into (batch,height*width*channels)'''
outputData[0] = outputData[0].reshape(1, outputSize)
''' store results in global lists '''
out_q[start+i] = outputData[0][0]
return
def app(image_dir,threads,model):
'''
main application function
'''
listimage=os.listdir(image_dir)
runTotal = len(listimage[:2500])
print('Found',len(listimage),'images - processing',runTotal,'of them')
''' global list that all threads can write results to '''
global out_q
out_q = [None] * runTotal
''' get a list of subgraphs from the compiled model file '''
g = xir.graph.Graph.deserialize(pathlib.Path(model))
subgraphs = get_subgraph (g)
print('Found',len(subgraphs),'subgraphs in',model)
''' preprocess images '''
print('Pre-processing',runTotal,'images...')
img = []
for i in range(runTotal):
path = os.path.join(image_dir,listimage[i])
img.append(preprocess_fn(path))
''' create dpu runners
Each thread receives a dpu runner.
Each dpu runner executes a subgraph
'''
all_dpu_runners = []
for i in range(threads):
all_dpu_runners.append(runner.Runner(subgraphs[0], "run"))
''' create threads
Each thread receives a section of the preprocessed images list as input and
will write results into the corresponding section of the global out_q list.
'''
threadAll = []
start=0
for i in range(threads):
if (i==threads-1):
end = len(img)
else:
end = start+(len(img)//threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i,start,all_dpu_runners[i], in_q))
threadAll.append(t1)
start=end
'''run threads '''
print('Starting',threads,'threads...')
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
threads_time = time2 - time1
''' post-processing '''
classes = ['dog','cat']
correct = 0
wrong = 0
for i in range(len(out_q)):
argmax = np.argmax((out_q[i]))
prediction = classes[argmax]
ground_truth, _ = listimage[i].split('.',1)
if (ground_truth==prediction):
correct += 1
else:
wrong += 1
accuracy = correct/len(out_q)
print (divider)
print('Correct:',correct,'Wrong:',wrong,'Accuracy:', accuracy)
print (divider)
fps = float(runTotal / threads_time)
print('FPS: %.2f, total frames: %.0f, total time: %.3f seconds' %(fps,runTotal,threads_time))
print (divider)
return
# only used if script is run as 'main' from command line
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--image_dir',type=str,default='images', help='Path to folder of images. Default is images')
ap.add_argument('-t', '--threads', type=int,default=1, help='Number of threads. Default is 1')
ap.add_argument('-m', '--model', type=str,default='model_dir/dpu_alexnet_np.elf', help='Path of folder with .elf or .xmodel. Default is model_dir/dpu_alexnet_np.elf')
args = ap.parse_args()
print (divider)
print ('Command line options:')
print (' --image_dir : ', args.image_dir)
print (' --threads : ', args.threads)
print (' --model : ', args.model)
print (divider)
app(args.image_dir,args.threads,args.model)
if __name__ == '__main__':
main()
| 29.788991
| 172
| 0.647213
| 874
| 6,494
| 4.729977
| 0.322654
| 0.007741
| 0.007257
| 0.013304
| 0.034833
| 0.011127
| 0
| 0
| 0
| 0
| 0
| 0.012363
| 0.227749
| 6,494
| 217
| 173
| 29.926267
| 0.811964
| 0.191407
| 0
| 0.118182
| 0
| 0
| 0.12979
| 0.025464
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.109091
| 0
| 0.190909
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c1bc99ed022294f204e51cb23b911f2274cbb0b
| 525
|
py
|
Python
|
examples/cuda/bfs/py/vcache.py
|
bespoke-silicon-group/bsg_replicant
|
cadd8dcb3fb1382adf39479cdd9bc7463f269fa0
|
[
"BSD-3-Clause"
] | 12
|
2020-03-27T13:15:54.000Z
|
2022-03-25T14:22:26.000Z
|
examples/cuda/bfs/py/vcache.py
|
bespoke-silicon-group/bsg_f1
|
08b7be7162719b92b4796f18b0caad263f90ea2f
|
[
"BSD-3-Clause"
] | 255
|
2019-05-10T01:08:51.000Z
|
2020-01-29T18:45:32.000Z
|
examples/cuda/bfs/py/vcache.py
|
bespoke-silicon-group/bsg_replicant
|
cadd8dcb3fb1382adf39479cdd9bc7463f269fa0
|
[
"BSD-3-Clause"
] | 8
|
2020-02-21T18:28:34.000Z
|
2021-07-24T00:22:29.000Z
|
from vcache_utils import VCacheStats
from bfs_common import BFSParameters
import sys
import pandas as pd
class BFSVCacheStats(VCacheStats):
def _subclass_init_add_group_by_fields(self):
self._parameters = BFSParameters(self.filename)
self._parameters.updateDataFrame(self._data)
self._group_by_fields += self._parameters.parameters
return
data = pd.DataFrame()
for filename in sys.argv[1:]:
data = data.append(BFSVCacheStats(filename).diffed_data)
data.to_csv("vcache.summary.csv")
| 29.166667
| 60
| 0.76381
| 67
| 525
| 5.731343
| 0.552239
| 0.109375
| 0.067708
| 0.088542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002247
| 0.152381
| 525
| 17
| 61
| 30.882353
| 0.860674
| 0
| 0
| 0
| 0
| 0
| 0.034286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c1bdfa67d70b8200b8f300c42147c7b6f88c84a
| 14,511
|
py
|
Python
|
fgread/read.py
|
FASTGenomics/jupyter-fgread-py
|
400eb54e2376a8a3afaa674397617fa64c33a280
|
[
"MIT"
] | 1
|
2019-12-09T17:41:09.000Z
|
2019-12-09T17:41:09.000Z
|
fgread/read.py
|
FASTGenomics/jupyter-fgread-py
|
400eb54e2376a8a3afaa674397617fa64c33a280
|
[
"MIT"
] | 2
|
2019-09-26T13:49:56.000Z
|
2020-08-06T15:10:17.000Z
|
fgread/read.py
|
FASTGenomics/jupyter-fgread-py
|
400eb54e2376a8a3afaa674397617fa64c33a280
|
[
"MIT"
] | null | null | null |
import json
import logging
import re
from pathlib import Path
from typing import Optional, Union
import pandas as pd
from . import DOCSURL, DS_URL_PREFIX, readers
# configure logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
DEFAULT_READERS = {
"loom": readers.read_loom_to_anndata,
"rds": readers.read_seurat_to_anndata,
"h5ad": readers.read_anndata_to_anndata,
"hdf5": readers.read_10xhdf5_to_anndata,
"h5": readers.read_10xhdf5_to_anndata,
"tsv": readers.read_densetsv_to_anndata,
"csv": readers.read_densecsv_to_anndata,
}
DATA_DIR = Path("/fastgenomics/data")
DF_SORT_ORDER = [
"title",
"id",
"organism",
"tissue",
"numberOfCells",
"numberOfGenes",
"path",
"numberOfExpressionDataFiles",
"expressionDataFileNames",
"numberOfMetaDataFiles",
"metaDataFileNames",
"expressionDataFileInfos",
"metaDataFileInfos",
]
def get_datasets_df(data_dir: Path = DATA_DIR) -> pd.DataFrame:
"""Constructs a :py:func:`pandas.DataFrame` from all available datasets.
Parameters
----------
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
Returns
-------
pd.DataFrame
A pandas DataFrame containing all available datasets
"""
ds_paths = get_ds_paths(data_dir=data_dir)
ds_df = pd.DataFrame()
for ds_path in ds_paths:
with open(ds_path / "dataset_info.json") as f:
info_df = json.load(f)
info_df["path"] = str(ds_path)
info_df["numberOfExpressionDataFiles"] = len(
info_df["expressionDataFileInfos"]
)
info_df["numberOfMetaDataFiles"] = len(info_df["metaDataFileInfos"])
_ = info_df.pop("schemaVersion", None)
ds_df = ds_df.append(info_df, ignore_index=True)
# sort colnames
col_names = ds_df.columns.values.tolist()
col_names_sorted = [name for name in DF_SORT_ORDER if name in col_names]
[col_names.remove(name) for name in DF_SORT_ORDER if name in col_names]
col_names_sorted.extend(col_names)
ds_df = ds_df[col_names_sorted]
# Format types
ds_df = ds_df.astype(
{
"numberOfCells": "int32",
"numberOfGenes": "int32",
"numberOfExpressionDataFiles": "int32",
"numberOfMetaDataFiles": "int32",
}
)
return ds_df
def ds_info(
ds: Optional[str] = None,
pretty: bool = None,
output: bool = None,
data_dir: Path = DATA_DIR,
) -> pd.DataFrame:
"""Get information on all available datasets in this analysis.
Parameters
----------
ds : Optional[str], optional
A single dataset ID or dataset title. If set, only this dataset will be displayed. Recommended to use with ``pretty``, by default None
pretty : bool, optional
Whether to display some nicely formatted output, by default True
output : bool, optional
Whether to return a DataFrame or not, by default True
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
Returns
-------
pd.DataFrame
A pandas DataFrame containing all, or a single dataset (depends on ``ds``)
"""
if pretty is None:
pretty = ds is not None
if output is None:
output = ds is None
if not pretty and not output:
logger.warning(
'You have set "pretty" and "output" to false. Hence, this function will do/return nothing.'
)
return
try:
ds_df = get_datasets_df(data_dir=data_dir)
except NoDatasetsError as err:
logger.warning(err)
return pd.DataFrame()
def add_url(title, id):
return f'<a href="{DS_URL_PREFIX}{id}" target="_blank">{title}</a>'
def disp_pretty_df(df, index=True, header=True):
try:
from IPython.display import display, Markdown
df_html = df.to_html(
render_links=True,
escape=False,
header=header,
index=index,
justify="center",
)
display(Markdown(df_html))
except:
logger.warning(
"IPython not available. Pretty printing only works in Jupyter Notebooks."
)
if ds:
single_ds_df = select_ds_id(ds, df=ds_df)
single_ds_df["expressionDataFileNames"] = ", ".join(
[
expr["name"]
for expr in single_ds_df.loc[0, "expressionDataFileInfos"]
]
)
single_ds_df["metaDataFileNames"] = ", ".join(
[expr["name"] for expr in single_ds_df.loc[0, "metaDataFileInfos"]]
)
# Sort columns
single_col_names = single_ds_df.columns.values.tolist()
single_col_names_sorted = [
name for name in DF_SORT_ORDER if name in single_col_names
]
[
single_col_names.remove(name)
for name in DF_SORT_ORDER
if name in single_col_names
]
single_col_names_sorted.extend(single_col_names)
single_ds_df = single_ds_df[single_col_names_sorted]
if pretty:
pretty_df = single_ds_df
pretty_df["expressionDataFileNames"] = "<br>".join(
[
expr["name"]
for expr in pretty_df.loc[0, "expressionDataFileInfos"]
]
)
pretty_df["metaDataFileNames"] = ", ".join(
[expr["name"] for expr in pretty_df.loc[0, "metaDataFileInfos"]]
)
empty_cols = [
col for col in pretty_df.columns if pretty_df.loc[0, col] == ""
]
pretty_df = pretty_df.drop(
labels=["expressionDataFileInfos", "metaDataFileInfos"]
+ empty_cols,
axis=1,
errors="ignore",
)
pretty_df.loc[0, "title"] = pretty_df.apply(
lambda x: add_url(x.title, x.id), axis=1
).squeeze()
disp_pretty_df(pretty_df.T, header=False)
if output:
return single_ds_df
else:
if pretty:
pretty_df = ds_df.drop(
labels=[
"description",
"license",
"preprocessing",
"citation",
"webLink",
"file",
"expressionDataFileInfos",
"metaDataFileInfos",
],
axis=1,
errors="ignore",
)
pretty_df["title"] = pretty_df.apply(
lambda x: add_url(x.title, x.id), axis=1
)
disp_pretty_df(pretty_df)
if output:
return ds_df
def load_data(
ds: Optional[str] = None,
data_dir: Path = DATA_DIR,
additional_readers: dict = {},
expression_file: Optional[str] = None,
as_format: Optional[str] = None,
):
"""This function loads a single dataset into an AnnData object.
If there are multiple datasets available you need to specify one by setting
``ds`` to a dataset `id` or dataset `title`.
To get an overview of availabe dataset use :py:func:`ds_info`
Parameters
----------
ds : str, optional
A single dataset ID or dataset title to select a dataset to be loaded.
If only one dataset is available you do not need to set this parameter, by default None
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
additional_readers : dict, optional
Used to specify your own readers for the specific data set format.
Dict key needs to be file extension (e.g., h5ad), dict value a function.
Still experimental, by default {}
expression_file: str, Optional
The name of the expression file to load.
Only needed when there are multiple expression files in a dataset.
as_format: str, optional
Specifies which reader should be uses for this dataset. Overwrites the auto-detection
of the format. Possible parameters are the file extensions of our supported data
formats: ``h5ad``, ``h5``, ``hdf5``, ``loom``, ``rds``, ``csv``, ``tsv``.
Returns
-------
AnnData Object
A single AnnData object with dataset id in `obs` and all dataset metadata in `uns`
Examples
--------
To use a custom reader for files with the extension ".fg", you have to define a function first:
>>> def my_loader(file):
... anndata = magic_file_loading(file)
... return anndata
You can then use this reader like this:
>>> fgread.load_data("my_dataset", additional_readers={"fg": my_loader})
"""
readers = {**DEFAULT_READERS, **additional_readers}
if ds:
single_df = select_ds_id(ds, df=get_datasets_df(data_dir=data_dir))
else:
single_df = get_datasets_df(data_dir=data_dir)
if len(single_df) > 1:
raise RuntimeError(
"There is more than one dataset available in this analysis. "
"Please select one by its ID or title. "
'You can list available datasets by using "fgread.ds_info()".'
)
exp_count = single_df.loc[0, "numberOfExpressionDataFiles"]
meta_count = single_df.loc[0, "numberOfMetaDataFiles"]
if exp_count == 0:
raise TypeError(
f"There is no expression data available in this data set.\n"
f"Metadata files: {meta_count}."
)
exp_files = [
exp["name"] for exp in single_df.loc[0, "expressionDataFileInfos"]
]
if expression_file:
if expression_file in exp_files:
file = expression_file
else:
raise KeyError(
f'Expression file "{expression_file}" not found in dataset. '
f"Available expression files are: {exp_files}."
)
else:
if exp_count == 1:
file = single_df.loc[0, "expressionDataFileInfos"][0]["name"]
else:
raise TypeError(
f"There are {exp_count} expression data files in this dataset. "
'Please specify which one you want to load using the parameter "expression_file". '
f"Available expression files are: {exp_files}."
)
title = single_df.loc[0, "title"]
ds_id = single_df.loc[0, "id"]
path = single_df.loc[0, "path"]
metadata_dict = single_df.loc[0].to_dict()
if as_format:
format = as_format.lower()
else:
try:
format = file.rsplit(".", 1)[1].lower()
logger.info(f'Expression file "{file}" with format "{format}".')
except ValueError as e:
raise ValueError(
f'The expression file "{file}" has no valid file suffix.'
).with_traceback(e.__traceback__)
if format in readers:
if meta_count != 0:
logger.info(
f"There are {meta_count} metadata files in this dataset. "
"This data will not be integrated into the anndata object."
)
logger.info(
f'Loading file "{file}" from dataset "{title}" in format "{format}" from directory "{path}"...\n'
)
adata = readers[format](Path(path) / file)
adata.uns["ds_metadata"] = {ds_id: {"title": title}}
adata.uns["ds_metadata_raw"] = {ds_id: str(metadata_dict)}
adata.obs["fg_id"] = ds_id
n_genes = adata.shape[1]
n_cells = adata.shape[0]
logger.info(
f'Loaded dataset "{title}" with {n_cells} cells and {n_genes} genes.\n'
f"==================================================================\n"
)
return adata
else:
raise KeyError(
f'Unsupported file format "{format}", use one of {list(readers)}. '
f'You can force the usage of a specific reader by setting "as_format" to a supported format. '
f"In addition, you can also implement your own reading function. See {DOCSURL} for more information."
)
def select_ds_id(ds: str, df: pd.DataFrame = None) -> pd.DataFrame:
"""Select a single dataset from a pandas DataFrame by its ID or title
Parameters
----------
ds : str
A single dataset ID or dataset title for selection
df : pd.DataFrame, optional
A pandas DataFrame from which a single entry is selected, by default None
Returns
-------
pd.DataFrame
A pandas DataFrame with only the selected dataset.
"""
single_df = df.loc[(df["id"] == ds) | (df["title"] == ds)].reset_index(
drop=True
)
len_df = len(single_df)
if len_df == 1:
return single_df.copy()
elif len_df == 0:
add_err = ""
if not ds.startswith("dataset-"):
add_err = " Please note that dataset titles can be changed by the owner. To be safe, you might want to consider dataset IDs instead."
raise KeyError("Your selection matches no datasets." + add_err)
else:
display(single_df)
raise KeyError(
f"Your selection matches {len_df} datasets. Please make sure to select exactly one."
)
def get_ds_paths(data_dir: Union[str, Path] = DATA_DIR) -> list:
"""Gets available datasets for this analysis from path.
Parameters
----------
data_dir : Union[str,Path], optional
Directory containing the datasets, e.g. "fastgenomics/data", by default DATA_DIR
Returns
-------
list
A list of dataset paths
"""
data_dir = Path(data_dir)
if not data_dir.exists():
raise NoDatasetsError(
f'There are no datasets attached to this analysis. Path "{data_dir}" does not exist.'
)
paths = [
Path(subdir)
for subdir in sorted(data_dir.iterdir())
if subdir.is_dir() and re.match(r"^dataset_\d{4}$", subdir.name)
]
if not paths:
raise NoDatasetsError(
f'There are no datasets attached to this analysis. Path "{data_dir}" is empty.'
)
return paths
class NoDatasetsError(Exception):
"""Raised when no datasets are attached"""
pass
| 32.175166
| 145
| 0.591551
| 1,769
| 14,511
| 4.691916
| 0.18372
| 0.026145
| 0.01012
| 0.011566
| 0.248434
| 0.197108
| 0.171446
| 0.152169
| 0.144096
| 0.126747
| 0
| 0.005056
| 0.304803
| 14,511
| 450
| 146
| 32.246667
| 0.817704
| 0.226242
| 0
| 0.168385
| 0
| 0.010309
| 0.257448
| 0.052041
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024055
| false
| 0.003436
| 0.027491
| 0.003436
| 0.085911
| 0.003436
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c1dc30f32a47cfe9ea5fa235e76eff1529c75dd
| 4,368
|
py
|
Python
|
iotapy/storage/providers/types/transaction_metadata.py
|
aliciawyy/iota-python
|
b8d421acf94ccd9e7374f799fbe496f6d23e3cf3
|
[
"MIT"
] | 34
|
2017-10-24T15:04:02.000Z
|
2021-09-05T17:46:43.000Z
|
iotapy/storage/providers/types/transaction_metadata.py
|
aliciawyy/iota-python
|
b8d421acf94ccd9e7374f799fbe496f6d23e3cf3
|
[
"MIT"
] | 8
|
2017-12-18T21:53:08.000Z
|
2021-06-01T21:24:31.000Z
|
iotapy/storage/providers/types/transaction_metadata.py
|
aliciawyy/iota-python
|
b8d421acf94ccd9e7374f799fbe496f6d23e3cf3
|
[
"MIT"
] | 11
|
2017-12-18T22:02:29.000Z
|
2020-11-10T17:58:22.000Z
|
# -*- coding: utf-8 -*-
import struct
import iota
from iotapy.storage import converter as conv
TRANSACTION_METADATA_TRITS_LENGTH = 1604
HASH_BYTES_LENGTH = 49
HASH_TRITS_LENGTH = 243
def get_key(bytes_: bytes):
# Convert key bytes to iota.TransactionHash
if not isinstance(bytes_, bytes):
raise TypeError
key = iota.TransactionHash.from_trits(conv.from_binary_to_trits(bytes_, HASH_TRITS_LENGTH))
return key
def get(bytes_: bytes, key=None):
if bytes_ is None:
return None
if not isinstance(bytes_, bytes):
raise TypeError
i = 0
address = iota.Address.from_trits(conv.from_binary_to_trits(bytes_[:HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
bundle = iota.BundleHash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
trunk = iota.TransactionHash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
branch = iota.TransactionHash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
legacy_tag = iota.Hash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
value = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
current_index = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
last_index = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
timestamp = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
tag = iota.Hash.from_trits(conv.from_binary_to_trits(bytes_[i:i + HASH_BYTES_LENGTH], HASH_TRITS_LENGTH))
i += HASH_BYTES_LENGTH
attachment_timestamp = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
attachment_timestamp_lower_bound = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
attachment_timestamp_upper_bound = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
validity = struct.unpack('>l', bytes_[i:i + 4])[0]
i += 4
type_ = struct.unpack('>l', bytes_[i:i + 4])[0]
i += 4
arrival_time = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
height = struct.unpack('>q', bytes_[i:i + 8])[0]
i += 8
# Is confirmed?
solid = bytes_[i] == 1
i += 1
snapshot = struct.unpack('>l', bytes_[i:i + 4])[0]
i += 4
sender = bytes_[i:]
return {
'address': address,
'bundle_hash': bundle,
'trunk_transaction_hash': trunk,
'branch_transaction_hash': branch,
'legacy_tag': legacy_tag,
'value': value,
'current_index': current_index,
'last_index': last_index,
'timestamp': timestamp,
'tag': tag,
'attachment_timestamp': attachment_timestamp,
'attachment_timestamp_lower_bound': attachment_timestamp_lower_bound,
'attachment_timestamp_upper_bound': attachment_timestamp_upper_bound,
'validity': validity,
'type': type_,
'arrival_time': arrival_time,
'height': height,
'solid': solid,
'snapshot': snapshot,
'sender': sender
}
def save(value: iota.Transaction):
buf = b''
buf += conv.from_trits_to_binary(value.address.as_trits())
buf += conv.from_trits_to_binary(value.bundle_hash.as_trits())
buf += conv.from_trits_to_binary(value.trunk_transaction_hash.as_trits())
buf += conv.from_trits_to_binary(value.branch_transaction_hash.as_trits())
buf += conv.from_trits_to_binary(iota.Hash.from_trits(value.legacy_tag.as_trits()).as_trits())
buf += struct.pack('>q', value.value)
buf += struct.pack('>q', value.current_index)
buf += struct.pack('>q', value.last_index)
buf += struct.pack('>q', value.timestamp)
buf += conv.from_trits_to_binary(iota.Hash.from_trits(value.tag.as_trits()).as_trits())
buf += struct.pack('>q', value.attachment_timestamp)
buf += struct.pack('>q', value.attachment_timestamp_lower_bound)
buf += struct.pack('>q', value.attachment_timestamp_upper_bound)
buf += struct.pack('>l', value.validity)
buf += struct.pack('>l', value.type)
buf += struct.pack('>q', value.arrival_time)
buf += struct.pack('>q', value.height)
buf += struct.pack('>?', value.solid)
buf += struct.pack('>l', value.snapshot)
buf += value.sender
return buf
| 35.225806
| 123
| 0.66163
| 617
| 4,368
| 4.385737
| 0.116694
| 0.042129
| 0.043976
| 0.065041
| 0.654472
| 0.597931
| 0.551737
| 0.477088
| 0.477088
| 0.449372
| 0
| 0.013924
| 0.194368
| 4,368
| 123
| 124
| 35.512195
| 0.755044
| 0.017628
| 0
| 0.217822
| 0
| 0
| 0.069046
| 0.025426
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029703
| false
| 0
| 0.029703
| 0
| 0.09901
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6c1f784f7fc92dd4f1d6302efb41edae068a6f5e
| 5,980
|
py
|
Python
|
Student Database/last.py
|
manas1410/Miscellaneous-Development
|
8ffd2b586cb05b12ed0855d97c3015c8bb2a6c01
|
[
"MIT"
] | null | null | null |
Student Database/last.py
|
manas1410/Miscellaneous-Development
|
8ffd2b586cb05b12ed0855d97c3015c8bb2a6c01
|
[
"MIT"
] | null | null | null |
Student Database/last.py
|
manas1410/Miscellaneous-Development
|
8ffd2b586cb05b12ed0855d97c3015c8bb2a6c01
|
[
"MIT"
] | null | null | null |
from tkinter import*
import website
import tkinter.font as font
from PIL import ImageTk,Image
import os
import sqlite3
import webbrowser
def main():
cgnc=Tk()
cgnc.title('Show')
cgnc.iconbitmap("logo/spectrumlogo.ico")
f=font.Font(family='Bookman Old Style',size=10,weight='bold')
f1=font.Font(family='Bookman Old Style',size=10)
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
#query the database
c.execute("SELECT *,oid FROM mark_list")
records=c.fetchall()
l=len(c.fetchall())
ch=records[l-1][4]
ma=records[l-1][5]
co=records[l-1][6]
us=records[l-1][0]
#commit_changes
db.commit()
#close connection
db.close()
def cgpa():
cg1=((ch+ma+co)/3)/9.5
cg="{:.2f}".format(cg1)
db=sqlite3.connect("mark_list.db")
c=db.cursor()
#query the database
c.execute("SELECT *,oid FROM mark_list")
records=c.fetchall()
l=len(c.fetchall())
n6=records[l-1][1]
c.execute("""UPDATE mark_list SET cgpa=? WHERE name=?""",(cg,n6))
#commit_changes
db.commit()
#close connection
db.close()
entry.delete(0,END)
entry.insert(0,cg)
def grad():
av=((ch+ma+co)/3)
if av<=100 and av>=90:
gr='O'
elif av<90 and av>=80:
gr='E'
elif av<80 and av>=70:
gr='A'
elif av<70 and av>=60:
gr='B'
elif av<60 and av>=50:
gr='C'
elif av<50 and av>=40:
gr='D'
elif av<40:
gr='F'
db=sqlite3.connect("mark_list.db")
c=db.cursor()
#query the database
c.execute("SELECT *,oid FROM mark_list")
records=c.fetchall()
l=len(c.fetchall())
n6=records[l-1][1]
c.execute("""UPDATE mark_list SET grade=? WHERE name=?""",(gr,n6))
#commit_changes
db.commit()
#close connection
db.close()
entry.delete(0,END)
entry.insert(0,gr)
#buttons
cgpa=Button(cgnc,text='CGPA',bg='yellow',fg='black',borderwidth=3,padx=25,pady=20,command=cgpa,font=f)
cgpa.grid(row=0,column=0)
grade=Button(cgnc,text='GRADE',bg='yellow',fg='black',borderwidth=3,padx=20,pady=20,command=grad,font=f)
grade.grid(row=0,column=1)
Label(cgnc,text="\n").grid(row=1)
def new():
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
#insert into tabels
c.execute("INSERT INTO mark_list VALUES(:user_name,:name,:registration_no,:branch,:chemistry,:math,:computer,:cgpa,:grade)",
{
'user_name':us,
'name':' ',
'registration_no':' ',
'branch':' ',
'chemistry':0,
'math':0,
'computer':0,
'cgpa':0,
'grade':' '
})
#commit_changes
db.commit()
#close connection
db.close()
cgnc.destroy()
import input_details
input_details.main()
def close():
os._exit(1)
new_input=Button(cgnc,text='New Input',bg='yellow',fg='black',borderwidth=3,padx=10,pady=20,command=new,font=f)
new_input.grid(row=2,column=0)
close=Button(cgnc,text='Close',bg='yellow',fg='black',borderwidth=3,command=close,padx=20,pady=20,font=f)
close.grid(row=2,column=1)
Label(cgnc,text="\n").grid(row=3)
entry=Entry(cgnc,borderwidth=3,width=44)
entry.grid(row=4,column=0,columnspan=2,padx=20)
def show_en():
show_ent=Toplevel()
show_ent.geometry("600x450")
db=sqlite3.connect("mark_list.db")
#cursor
c=db.cursor()
#query the database
c.execute("SELECT *,oid FROM mark_list")
records=c.fetchall()
f=font.Font(family='Bookman Old Style',size=10,weight='bold')
l=len(c.fetchall())
Label(show_ent,text="Username",font=f,fg='red').grid(row=0,column=0)
Label(show_ent,text="Name",font=f,fg='red').grid(row=0,column=1)
Label(show_ent,text="Registration ID",font=f,fg='red').grid(row=0,column=2)
Label(show_ent,text="Branch",font=f,fg='red').grid(row=0,column=3)
Label(show_ent,text="Chemistry",font=f,fg='red').grid(row=0,column=4)
Label(show_ent,text="Math",font=f,fg='red').grid(row=0,column=5)
Label(show_ent,text="Computer",font=f,fg='red').grid(row=0,column=6)
Label(show_ent,text="Cgpa",font=f,fg='red').grid(row=0,column=7)
Label(show_ent,text="Grade",font=f,fg='red').grid(row=0,column=8)
r=1
r1=0
for record in records:
if(records[l-1][0]==record[0]):
l1=list(record)
for c in range(0,9):
Label(show_ent,text=l1[c],fg='blue',font=f1).grid(row=r1+1,column=c)
r+=1
r=r+1
r1=r1+1
#commit_changes
db.commit()
#close connection
db.close()
show=Button(cgnc,text='Show Entries',bg='yellow',fg='black',borderwidth=3,command=show_en,padx=84,pady=5,font=f)
show.grid(row=5,column=0,columnspan=2,padx=40)
fo=font.Font(family='36 DAYS',size=10)
def call(url):
webbrowser.open_new(url)
Label(cgnc,text="\nVisit our club website:",fg='blue',font=fo).grid(row=6,column=0,columnspan=2)
l=Label(cgnc,text="https://spectrumcet.com/",fg='blue',font=fo)
l.bind("<Button-1>",lambda x:call('https://spectrumcet.com/'))
l.grid(row=7,column=0,columnspan=2)
mainloop()
if __name__=='__main__':
main()
| 30.824742
| 133
| 0.533779
| 831
| 5,980
| 3.78219
| 0.203369
| 0.044543
| 0.027999
| 0.048998
| 0.484569
| 0.44448
| 0.440025
| 0.388801
| 0.25008
| 0.25008
| 0
| 0.041129
| 0.300669
| 5,980
| 193
| 134
| 30.984456
| 0.710426
| 0.044314
| 0
| 0.269504
| 0
| 0.007092
| 0.1501
| 0.019989
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049645
| false
| 0
| 0.056738
| 0
| 0.106383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|