hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aef488759816cabfb40bd3b6063dcdfb1b53455d
| 3,216
|
py
|
Python
|
ane_research/utils/kendall_top_k.py
|
michaeljneely/sparse-attention-explanation
|
658b181f67963fe22dd0489bd9b37bdbd05110c1
|
[
"MIT"
] | 2
|
2020-03-25T22:13:09.000Z
|
2021-01-06T04:28:03.000Z
|
ane_research/utils/kendall_top_k.py
|
michaeljneely/sparse-attention-explanation
|
658b181f67963fe22dd0489bd9b37bdbd05110c1
|
[
"MIT"
] | null | null | null |
ane_research/utils/kendall_top_k.py
|
michaeljneely/sparse-attention-explanation
|
658b181f67963fe22dd0489bd9b37bdbd05110c1
|
[
"MIT"
] | null | null | null |
'''Top-k kendall-tau distance.
This module generalise kendall-tau as defined in [1].
It returns a distance: 0 for identical (in the sense of top-k) lists and 1 if completely different.
Example:
Simply call kendall_top_k with two same-length arrays of ratings (or also rankings), length of the top elements k (default is the maximum length possible), and p (default is 0, see [1]) as parameters:
import kendall
a = np.array([1,2,3,4,5])
b = np.array([5,4,3,2,1])
kendall.kendall_top_k(a,b,k=4)
Author: Alessandro Checco
https://github.com/AlessandroChecco
References
[1] Fagin, Ronald, Ravi Kumar, and D. Sivakumar. 'Comparing top k lists.' SIAM Journal on Discrete Mathematics 17.1 (2003): 134-160.
'''
# pylint: disable=E1101
# pylint incorrectly identifies some types as tuples
import math
import numpy as np
import scipy.stats as stats
import scipy.special as special
def kendall_top_k(a, b, k=None, kIsNonZero=False, p=0.5):
'''
kendall_top_k(np.array,np.array,k,p)
This function generalise kendall-tau as defined in
[1] Fagin, Ronald, Ravi Kumar, and D. Sivakumar. 'Comparing top k lists.' SIAM Journal on Discrete Mathematics 17.1 (2003): 134-160.
It returns a distance: 1 for identical (in the sense of top-k) lists and -1 if completely different.
Example:
Simply call it with two same-length arrays of ratings (or also rankings),
length of the top elements k (default is the maximum length possible), and p (default is 0, see [1]) as parameters:
$ a = np.array([1,2,3,4,5])
$ b = np.array([5,4,3,2,1])
$ kendall_top_k(a,b,k=4)
If the kIsNonZero option is True, k is set to the amount of non-zero values in a or b, depending on which has least.
'''
a = np.array(a)
b = np.array(b)
if kIsNonZero:
anz, bnz = np.count_nonzero(a), np.count_nonzero(b)
k = min(np.count_nonzero(a), np.count_nonzero(b))
#print('anz={}, bnz={}, k={}'.format(anz, bnz, k))
elif k is None:
k = a.size
if a.size != b.size:
raise NameError('The two arrays need to have same lengths')
k = min(k,a.size)
a_top_k = np.argpartition(a,-k)[-k:]
b_top_k = np.argpartition(b,-k)[-k:]
common_items = np.intersect1d(a_top_k,b_top_k)
only_in_a = np.setdiff1d(a_top_k, common_items)
only_in_b = np.setdiff1d(b_top_k, common_items)
# case 1
kendall = (1 - (stats.kendalltau(a[common_items], b[common_items])[0] / 2 + 0.5)) * common_items.size**2
if np.isnan(kendall): # degenerate case with only one item (not defined by Kendall)
#print('DEGENERATE CASE <= 1 in common')
kendall = 0
#case 2 (& 3 ?)
test = 0
for i in common_items:
for j in only_in_a:
if a[i] < a[j]:
test += 1
for j in only_in_b:
if b[i] < b[j]:
test += 1
kendall += test
# case 4
kendall += 2 * p * special.binom(k-common_items.size, 2)
# case 3
kendall /= (only_in_a.size + only_in_b.size + common_items.size)**2 #normalization
kendall = -2 * kendall + 1 # change to correct range
return (kendall, k)
| 34.212766
| 204
| 0.634639
| 536
| 3,216
| 3.720149
| 0.270522
| 0.032096
| 0.027583
| 0.018054
| 0.430291
| 0.418255
| 0.411234
| 0.368104
| 0.338014
| 0.338014
| 0
| 0.036792
| 0.247823
| 3,216
| 93
| 205
| 34.580645
| 0.787516
| 0.557525
| 0
| 0.055556
| 0
| 0
| 0.029608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.111111
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aef7c4bd6270658e2d5f6a301a21f1fd8ae19292
| 619
|
py
|
Python
|
test/math/test_matmul.py
|
ctgk/bayes
|
96eab9305eaeecc5a5b032cdf92a8285de4f60bf
|
[
"MIT"
] | 21
|
2019-01-08T05:58:41.000Z
|
2021-11-26T14:24:11.000Z
|
test/math/test_matmul.py
|
ctgk/bayes
|
96eab9305eaeecc5a5b032cdf92a8285de4f60bf
|
[
"MIT"
] | null | null | null |
test/math/test_matmul.py
|
ctgk/bayes
|
96eab9305eaeecc5a5b032cdf92a8285de4f60bf
|
[
"MIT"
] | 11
|
2019-05-04T13:44:19.000Z
|
2021-08-05T04:26:19.000Z
|
import unittest
import numpy as np
import bayesnet as bn
class TestMatMul(unittest.TestCase):
def test_matmul(self):
x = np.random.rand(10, 3)
y = np.random.rand(3, 5)
g = np.random.rand(10, 5)
xp = bn.Parameter(x)
z = xp @ y
self.assertTrue((z.value == x @ y).all())
z.backward(g)
self.assertTrue((xp.grad == g @ y.T).all())
yp = bn.Parameter(y)
z = x @ yp
self.assertTrue((z.value == x @ y).all())
z.backward(g)
self.assertTrue((yp.grad == x.T @ g).all())
if __name__ == '__main__':
unittest.main()
| 22.925926
| 51
| 0.534733
| 91
| 619
| 3.538462
| 0.384615
| 0.173913
| 0.111801
| 0.086957
| 0.304348
| 0.304348
| 0.304348
| 0.304348
| 0.304348
| 0.304348
| 0
| 0.018519
| 0.3021
| 619
| 26
| 52
| 23.807692
| 0.726852
| 0
| 0
| 0.2
| 0
| 0
| 0.012924
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.05
| false
| 0
| 0.15
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aef90fca9fea526b2891e7df58b0f264aee383cd
| 2,387
|
py
|
Python
|
test.py
|
VegaSera/SWNDiscordBot2
|
cb73b9d51591b6af9f2a1a603ea0dd8a7161020c
|
[
"MIT"
] | 2
|
2020-09-08T18:08:55.000Z
|
2021-06-22T17:13:32.000Z
|
test.py
|
VegaSera/SWNDiscordBot2
|
cb73b9d51591b6af9f2a1a603ea0dd8a7161020c
|
[
"MIT"
] | null | null | null |
test.py
|
VegaSera/SWNDiscordBot2
|
cb73b9d51591b6af9f2a1a603ea0dd8a7161020c
|
[
"MIT"
] | 1
|
2020-06-30T19:12:27.000Z
|
2020-06-30T19:12:27.000Z
|
class char:
def __init__(self):
self.str = 15
self.dex = 15
self.con = 14
self.wis = 15
self.int = 15
self.cha = 15
def raise_stat(self):
stats = [self.str, self.dex, self.con, self.int, self.wis, self.cha]
min_stat = min(stats)
for index, value in enumerate(stats):
if value == min_stat:
if index == 0:
#self.verbose_log += f"Free 14 - Raised Strength from {self.str} to 14."
self.str = 14
break
elif index == 1:
#self.verbose_log += f"Free 14 - Raised Dexterity from {self.dex} to 14."
self.dex = 14
break
elif index == 2:
#self.verbose_log += f"Free 14 - Raised Constitution from {self.con} to 14."
self.con = 14
break
elif index == 3:
#self.verbose_log += f"Free 14 - Raised Intelligence from {self.int} to 14."
self.int = 14
break
elif index == 4:
#self.verbose_log += f"Free 14 - Raised Wisdom from {self.wis} to 14."
self.wis = 14
break
elif index == 5:
#self.verbose_log += f"Free 14 - Raised Charisma from {self.cha} to 14."
self.cha = 14
break
print("Prints after for loop")
def change_stat(self):
self.cha = 15
newchar = char()
#newchar.raise_stat()
# print(newchar.cha)
# newchar.raise_stat()
# print(newchar.cha)
#
# class_type = None
# list(class_type)
# print(class_type, type(class_type))
#
# listthing = [0, 1, 2, 3, 4, 5]
#
# for i in listthing:
# if i == 2:
# listthing.append(1)
# elif i == 1:
# print("I FOUND A ONE! HOPEFULLY I'LL FIND ANOTHER")
# elif i == 3:
# listthing.remove(i)
# print(listthing)
#
# import random
#
# featuredict = {1:"Amphibian",2:"Bird",3:"Fish",4:"Insect",5:"Mammal",6:"Reptile",7:"Spider",8:"Exotic"}
# print(random.choice(featuredict))
def returns_tuple():
a = 5
b = 6
c = 7
return a, b, c
print("Function output", returns_tuple())
x, y, z = returns_tuple()
print("x =", x)
print("y =", y)
print("z =", z)
| 27.436782
| 105
| 0.492669
| 305
| 2,387
| 3.780328
| 0.291803
| 0.036427
| 0.072853
| 0.078057
| 0.194276
| 0.194276
| 0.140503
| 0
| 0
| 0
| 0
| 0.052027
| 0.379975
| 2,387
| 87
| 106
| 27.436782
| 0.727027
| 0.40553
| 0
| 0.222222
| 0
| 0
| 0.032421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0
| 0
| 0.133333
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aeffe251e30362d499c33484220e03c6b09531a5
| 987
|
py
|
Python
|
extracting_information/extract_payments.py
|
ErikOSorensen/mmrisk_instrument
|
3a1bf587ec08362a4c24f8a40064142a5307c94c
|
[
"BSD-3-Clause"
] | null | null | null |
extracting_information/extract_payments.py
|
ErikOSorensen/mmrisk_instrument
|
3a1bf587ec08362a4c24f8a40064142a5307c94c
|
[
"BSD-3-Clause"
] | null | null | null |
extracting_information/extract_payments.py
|
ErikOSorensen/mmrisk_instrument
|
3a1bf587ec08362a4c24f8a40064142a5307c94c
|
[
"BSD-3-Clause"
] | null | null | null |
from mmr2web.models import *
import datetime
def get_payments_file(nok_per_usd=9.1412):
"""Default exchange rate taken from Norges Bank, Nov 22, 2019."""
payments_out = open("payments_mmrisk.csv", "w")
payments_out.write("amount,message\n")
total_payment = 0
for s in Situation.objects.filter(selected=True):
if s.choice_risk:
amount = DICE[s.die.dienumber]['eyes'][s.draw-1] / nok_per_usd
message = "In mmr2 - someone decided to throw a dice on your behalf."
if amount==0:
amount=0.01
message = "In mmr - someone decided to throw a dice on your behalf and you were unlucky."
else:
amount = s.safe_amount / nok_per_usd
message = "In mmr2 - someone decided for the safe amount on your behalf."
payments_out.write("%3.2f,%s\n" % (amount, message))
total_payment += amount
payments_out.close()
return total_payment
get_payments_file()
| 37.961538
| 105
| 0.637285
| 140
| 987
| 4.35
| 0.528571
| 0.07225
| 0.044335
| 0.052545
| 0.220033
| 0.220033
| 0.220033
| 0.220033
| 0.124795
| 0
| 0
| 0.030178
| 0.261398
| 987
| 25
| 106
| 39.48
| 0.805213
| 0.059777
| 0
| 0
| 0
| 0
| 0.265727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e002a3d2a0b17bea2d95b12a32b8e97ea924162
| 1,488
|
py
|
Python
|
tests/extmod/uasyncio_threadsafeflag.py
|
ProofDx/micropython
|
321d1897c34f16243edf2c94913d7cf877a013d1
|
[
"MIT"
] | 13,648
|
2015-01-01T01:34:51.000Z
|
2022-03-31T16:19:53.000Z
|
tests/extmod/uasyncio_threadsafeflag.py
|
ProofDx/micropython
|
321d1897c34f16243edf2c94913d7cf877a013d1
|
[
"MIT"
] | 7,092
|
2015-01-01T07:59:11.000Z
|
2022-03-31T23:52:18.000Z
|
tests/extmod/uasyncio_threadsafeflag.py
|
ProofDx/micropython
|
321d1897c34f16243edf2c94913d7cf877a013d1
|
[
"MIT"
] | 4,942
|
2015-01-02T11:48:50.000Z
|
2022-03-31T19:57:10.000Z
|
# Test Event class
try:
import uasyncio as asyncio
except ImportError:
print("SKIP")
raise SystemExit
import micropython
try:
micropython.schedule
except AttributeError:
print("SKIP")
raise SystemExit
try:
# Unix port can't select/poll on user-defined types.
import uselect as select
poller = select.poll()
poller.register(asyncio.ThreadSafeFlag())
except TypeError:
print("SKIP")
raise SystemExit
async def task(id, flag):
print("task", id)
await flag.wait()
print("task", id, "done")
def set_from_schedule(flag):
print("schedule")
flag.set()
print("schedule done")
async def main():
flag = asyncio.ThreadSafeFlag()
# Set the flag from within the loop.
t = asyncio.create_task(task(1, flag))
print("yield")
await asyncio.sleep(0)
print("set event")
flag.set()
print("yield")
await asyncio.sleep(0)
print("wait task")
await t
# Set the flag from scheduler context.
print("----")
t = asyncio.create_task(task(2, flag))
print("yield")
await asyncio.sleep(0)
print("set event")
micropython.schedule(set_from_schedule, flag)
print("yield")
await asyncio.sleep(0)
print("wait task")
await t
# Flag already set.
print("----")
print("set event")
flag.set()
t = asyncio.create_task(task(3, flag))
print("yield")
await asyncio.sleep(0)
print("wait task")
await t
asyncio.run(main())
| 18.6
| 56
| 0.633065
| 192
| 1,488
| 4.869792
| 0.296875
| 0.057754
| 0.080214
| 0.117647
| 0.396791
| 0.255615
| 0.255615
| 0.255615
| 0.255615
| 0.255615
| 0
| 0.007048
| 0.237231
| 1,488
| 79
| 57
| 18.835443
| 0.81674
| 0.105511
| 0
| 0.589286
| 0
| 0
| 0.099623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.071429
| 0
| 0.089286
| 0.357143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e039a12924bbf9ee1073f9918fa1b333ccf4193
| 4,370
|
py
|
Python
|
Python/biopsy/binding_hit.py
|
JohnReid/biopsy
|
1eeb714ba5b53f2ecf776d865d32e2078cbc0338
|
[
"MIT"
] | null | null | null |
Python/biopsy/binding_hit.py
|
JohnReid/biopsy
|
1eeb714ba5b53f2ecf776d865d32e2078cbc0338
|
[
"MIT"
] | null | null | null |
Python/biopsy/binding_hit.py
|
JohnReid/biopsy
|
1eeb714ba5b53f2ecf776d865d32e2078cbc0338
|
[
"MIT"
] | null | null | null |
#
# Copyright John Reid 2006
#
from _biopsy import *
def _hit_str( hit ):
return ",".join( [
hit.binder,
str( hit.location.position ),
str( hit.location.positive_strand ),
str( hit.p_binding )
] )
Hit.__str__ = _hit_str
def _location_start( location ):
return location.position
HitLocation.start = _location_start
def _location_end( location ):
return location.position + location.length
HitLocation.end = _location_end
def _location_overlap( location1, location2 ):
"""Do two hits overlap?"""
if location1.position < location2.position:
return location1.end() > location2.position
else:
return location2.end() > location1.position
HitLocation.overlap = _location_overlap
def _location_separation( location1, location2 ):
"""The separation between two locations"""
if location1.position >= location2.end():
return location1.position - location2.end()
else:
return location2.position - location1.end()
HitLocation.separation = _location_separation
def _hits_str( hits ):
return '\n'.join( [ str( hit ) for hit in hits ] )
HitVec.__str__ = _hits_str
def get_char_for_hit( hit ):
return hit.binder
def get_score_for_hit( hit ):
# return math.log( hit.p_binding )
return hit.p_binding
def get_max_p_binding_over_hits( hits ):
"""Takes a list of hits and returns a dictionary mapping binder names to max( p(binding) ) across all hits"""
result = { }
for hit in hits:
if not result.has_key( hit.binder ) or result[hit.binder] < hit.p_binding:
result[hit.binder] = hit.p_binding
return result
def find_pair_in_analysis(
analysis,
pair,
max_separation = None,
separation = None
):
"""Finds in which analyses a pair of TFs bind
analysis: Analysis
pair: A tuple ( binder1, binder2, orientation1, orientation2 )
max_separation: If specified determines maximum separation
separation: If specified determines exact separation (over-rides max_separation)
Returns a list of keys for the analyses
"""
result = { }
for k in analysis.get_keys():
hits = analysis.get_hits_for( k )
found_pairs = find_pair_in_hits( hits, pair, max_separation, separation )
if found_pairs:
result[ k ] = found_pairs
return result
def find_pair_in_hits(
hits,
pair,
max_separation = None,
separation = None
):
"""Finds the locations where a pair of TFs bind in a sequence of hits
hits: The hits
pair: A tuple ( binder1, binder2, orientation1, orientation2 )
max_separation: If specified determines maximum separation
separation: If specified determines exact separation (overrides max_separation)
returns a sequence of pairs of hits that satisfy the criteria
"""
( binder1, binder2, orientation1, orientation2 ) = pair
result = [ ]
for h1 in hits:
if binder1 != h1.binder: continue
for h2 in hits:
if binder2 != h2.binder: continue
if h1.location.overlap( h2.location ): continue
distance = h1.location.separation( h2.location )
if None != separation and separation != distance: continue
if None != max_separation and max_separation < distance: continue
if h1.location.position < h2.location.position:
if (
h1.location.positive_strand != orientation1
or
h2.location.positive_strand != orientation2
): continue
else:
if (
h1.location.positive_strand == orientation1
or
h2.location.positive_strand == orientation2
): continue
result.append( ( h1, h2 ) )
return result
def hit_over_threshold_predicate(threshold):
"@return: A function that returns True if the hit is over the threshold given."
def predicate(hit):
"@return: True iff the hit's score is above the threshold."
return hit.p_binding >= threshold
return predicate
def hits_above_threshold(hits, threshold):
"@return: Those hits above the threshold."
return filter(hit_over_threshold_predicate(threshold), hits)
| 30.774648
| 113
| 0.643936
| 516
| 4,370
| 5.282946
| 0.209302
| 0.04292
| 0.024211
| 0.045488
| 0.285767
| 0.25055
| 0.216801
| 0.19369
| 0.168012
| 0.168012
| 0
| 0.017094
| 0.277117
| 4,370
| 141
| 114
| 30.992908
| 0.845837
| 0.240732
| 0
| 0.23913
| 0
| 0
| 0.051815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152174
| false
| 0
| 0.01087
| 0.065217
| 0.336957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e0443002a9f7388df8a4ecc7a67f5770910ff51
| 8,384
|
py
|
Python
|
epithet/epithet.py
|
mitodl/epithet
|
4f95054fbdfbae0e9d6db2e3309993d00a8a6867
|
[
"MIT"
] | null | null | null |
epithet/epithet.py
|
mitodl/epithet
|
4f95054fbdfbae0e9d6db2e3309993d00a8a6867
|
[
"MIT"
] | null | null | null |
epithet/epithet.py
|
mitodl/epithet
|
4f95054fbdfbae0e9d6db2e3309993d00a8a6867
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import click
from github import Github
from github.GithubException import RateLimitExceededException
def main():
cli(obj={})
def get_repos(key, org, repo, url):
if url:
g = Github(key, base_url=url)
else:
g = Github(key)
if org:
g_org = g.get_organization(login=org)
else:
g_org = g.get_user()
if repo:
repos = [g_org.get_repo(repo)]
else:
repos = g_org.get_repos()
return repos
@click.group()
@click.option('--key', envvar='EPITHET_KEY', help="Github OAuth Token")
@click.option('--dryrun', is_flag=True, help="Don't actually change or create labels")
@click.option('--url', help="API URL - change if GitHub Enterprise")
@click.pass_context
def cli(ctx, key, dryrun, url):
if not key:
click.echo("You must provide a GitHub API v3 key")
return
ctx.obj['dryrun'] = dryrun
ctx.obj['url'] = url
ctx.obj['key'] = key
@cli.command()
@click.option('--label', '-l', is_flag=True, help="List labels", default=False)
@click.option('--milestone', '-m', is_flag=True, help='List milestones', default=False)
@click.option('--org', '-o', help="Organization to get repos from")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.pass_context
def list(ctx, label, milestone, org, repo):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo("\n * {}:\n".format(repo.name))
if label:
for label in repo.get_labels():
click.echo(" - {} ({})".format(label.name, label.color))
if milestone:
for milestone in repo.get_milestones():
click.echo(" - {} ({})".format(milestone.title))
@cli.command()
@click.option('--label', '-l', is_flag=True, help="Add label", default=False)
@click.option('--milestone', '-m', is_flag=True, help='Add milestone', default=False)
@click.option('--org', '-o', help="Organization")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.option('--name', '-n', help="Name of new label")
@click.option('--color', '-c', help="Color of new label")
@click.pass_context
def add(ctx, label, milestone, org, repo, name, color):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo(" * Checking {}".format(repo.name))
if label:
click.echo("Adding a label with name: {} and color: {}".format(name, color))
labels = {label.name: label for label in repo.get_labels()}
if name.lower() in [l.lower() for l in labels.keys()]:
click.echo(
" - Found {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
if name not in labels.keys():
for labelname, label in labels.items():
if labelname.lower() == name.lower():
labels[labelname].edit(name=name, color=color)
elif labels[name].color != color and not ctx.obj['dryrun'] \
and not repo.archived:
labels[name].edit(name=name, color=color)
else:
click.echo(
" - Creating {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun'] and not repo.archived:
repo.create_label(name=name, color=color)
if milestone:
click.echo("Adding a milestone with name: {}".format(name))
milestones = {milestone.title: milestone
for milestone in repo.get_milestones()}
if name.lower() in [m.lower() for m in milestones.keys()]:
click.echo(
" - Found {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
else:
click.echo(
" - Creating {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun'] and not repo.archived:
repo.create_milestone(title=name)
@cli.command()
@click.option('--label', '-l', is_flag=True, help="Delete label", default=False)
@click.option('--milestone', '-m', is_flag=True, help='Delete milestones', default=False)
@click.option('--org', '-o', help="Organization")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.option('--name', '-n', help="Name of label or milestone to delete")
@click.pass_context
def delete(ctx, label, milestone, org, repo, name):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo(" * Checking {}".format(repo.name))
if label:
click.echo("Deleting label: {}".format(name))
labels = {}
for label in repo.get_labels():
labels[label.name] = label
if name in labels:
click.echo(
" - Found {} on {}, deleting (Dryrun: {})".format(
labels[name].name, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun']:
labels[name].delete()
if milestone:
click.echo("Deleting milestone: {}".format(name))
milestones = {}
for milestone in repo.get_milestones():
milestones[milestone.title] = milestone
if name in milestones:
click.echo(
" - Found {} on {}, deleting (Dryrun: {})".format(
milestones[name].title, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun']:
milestones[name].delete()
@cli.command()
@click.option('--label', '-l', is_flag=True, help="Update label", default=False)
@click.option('--milestone', '-m', is_flag=True, help='Update milestone', default=False)
@click.option('--org', '-o', help="Organization")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.option('--name', '-n', help="Name of the existing label")
@click.option('--new-name', help="New name of the label")
@click.pass_context
def update(ctx, label, milestone, org, repo, name, new_name):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo(" * Checking {}".format(repo.name))
if label:
click.echo("Updating label {}".format(name))
labels = {}
for label in repo.get_labels():
labels[label.name] = label
if name in labels:
click.echo(
" - Found {} on {}, upating to {} (Dryrun: {})".format(
labels[name].name, repo.name, new_name, ctx.obj['dryrun']
)
)
if labels[name].name != new_name and not ctx.obj['dryrun']:
labels[name].edit(name=new_name, color=labels[name].color)
else:
click.echo("{} not found, did you mean 'add'?".format(name))
if milestone:
click.echo("Updating milestone with name: {}".format(name))
milestones = {}
for milestone in repo.get_milestones():
milestones[milestone.title] = milestone
if name in milestones:
click.echo(
" - Found {} on {}, upating to {} (Dryrun: {})".format(
milestones[name].name, repo.name, new_name, ctx.obj['dryrun']
)
)
else:
click.echo("{} not found, did you mean 'add'?".format(name))
if __name__ == "__main__":
main(obj={})
| 40.699029
| 89
| 0.532085
| 980
| 8,384
| 4.49898
| 0.109184
| 0.055115
| 0.040826
| 0.028578
| 0.687231
| 0.646859
| 0.608528
| 0.564981
| 0.54797
| 0.517804
| 0
| 0.000347
| 0.311904
| 8,384
| 205
| 90
| 40.897561
| 0.763911
| 0.002505
| 0
| 0.505319
| 0
| 0
| 0.1928
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037234
| false
| 0.026596
| 0.015957
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e04cfd6696b1d79b63702e52778fdde33cbdd79
| 1,876
|
py
|
Python
|
Tarea1/utilities.py
|
aleluman/CC5114
|
aae4ea9faf0a7cb3eb3bf53f8eecaf209aebf4d6
|
[
"MIT"
] | null | null | null |
Tarea1/utilities.py
|
aleluman/CC5114
|
aae4ea9faf0a7cb3eb3bf53f8eecaf209aebf4d6
|
[
"MIT"
] | null | null | null |
Tarea1/utilities.py
|
aleluman/CC5114
|
aae4ea9faf0a7cb3eb3bf53f8eecaf209aebf4d6
|
[
"MIT"
] | null | null | null |
import numpy as np
def normalize(matrix, nh=1, nl=0):
"""Normalizes each column in a matrix by calculating its maximum
and minimum values, the parameters nh and nl specify the final range
of the normalized values"""
return (matrix - matrix.min(0)) * ((nh - nl) / matrix.ptp(0)) + nl
def one_hot_encoding(array):
"""Encodes each unique label in 'array' in a vector of the same length as
the number of unique labels. This vector is filled with zeros and a 1
representing the position assigned to the label"""
labels = np.unique(array)
number_of_labels = labels.size
encoded = {}
for i in range(number_of_labels):
encoding = np.zeros(number_of_labels)
encoding[i] = 1
encoded[labels[i]] = encoding
return encoded
def encode(array, encoding):
"""Encodes 'array' with the encoding specified in encoding.
This value must be a dictionary"""
encoded = []
for i in array:
encoded.append(encoding[i])
return encoded
def load_data_wrapper(name, input_cols, output_col, output_type="float", delimiter=None):
"""Wrapper to load the desired data in an easier way. It returns the normalized and encoded
data, alongside with the size of the values in the inputs and outputs to initialize
the neural network correctly"""
data_x = np.loadtxt(name, usecols=input_cols, delimiter=delimiter)
data_x = normalize(data_x)
data_y = np.loadtxt(name, usecols=output_col, delimiter=delimiter, dtype=output_type)
encoding = one_hot_encoding(data_y)
data_y = encode(data_y, encoding)
# x_len will be the number of input neurons, and y_len the number of output neurons
x_len = np.shape(data_x)[1]
y_len = np.shape(data_y)[1]
data = [[np.reshape(x, (x_len, 1)), np.reshape(y, (y_len, 1))] for x, y in zip(data_x, data_y)]
return data, x_len, y_len
| 39.083333
| 99
| 0.695096
| 297
| 1,876
| 4.265993
| 0.340067
| 0.037885
| 0.026046
| 0.020521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.211087
| 1,876
| 47
| 100
| 39.914894
| 0.849324
| 0.382729
| 0
| 0.074074
| 0
| 0
| 0.004509
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.037037
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e051ec8fbfa4fdbb801b562f9028e2cec2f9219
| 1,304
|
py
|
Python
|
tests/test_searcher.py
|
jrdelmar/cbis
|
6cce46680555d622ecea88f2ee2721209810abbe
|
[
"MIT"
] | 1
|
2019-03-19T14:10:19.000Z
|
2019-03-19T14:10:19.000Z
|
tests/test_searcher.py
|
jrdelmar/cbis
|
6cce46680555d622ecea88f2ee2721209810abbe
|
[
"MIT"
] | 14
|
2020-01-28T22:38:54.000Z
|
2022-03-11T23:43:34.000Z
|
tests/test_searcher.py
|
jrdelmar/cbis
|
6cce46680555d622ecea88f2ee2721209810abbe
|
[
"MIT"
] | null | null | null |
from pyimagesearch.searcher import Searcher
from pyimagesearch.utils import *
import pytest
indexPath = "D:/APP/cbis/"
verbose = True
#test Search class
@pytest.fixture
def searcher():
return Searcher(indexPath, verbose)
pred_file = "D://APP//cbis//tests//out//predictions_test.csv"
top_k = 20
def test_search_gun( searcher ):
threshold = 0.50
image_list = searcher.search_gun(pred_file, top_k, threshold)
assert len(image_list) == 1
assert image_list[0][3] == 'gun'
def test_search_not_gun(searcher):
threshold = 0.70
search_list = ['wooden_spoon']
image_list = searcher.search_list(pred_file,search_list, top_k, threshold)
assert len(image_list) == 2
assert image_list[0][3] == 'wooden_spoon'
def test_search_not_gun1(searcher):
threshold = 0.80
search_list = ['wooden_spoon', 'revolver']
image_list = searcher.search_list(pred_file,search_list, top_k, threshold)
assert len(image_list) == 1
assert image_list[0][3] == 'revolver'
def test_search_not_gun2(searcher):
threshold = 0.70
search_list = ['wooden_spoon', 'revolver']
image_list = searcher.search_list(pred_file,search_list, top_k, threshold)
assert len(image_list) == 3
assert image_list[0][3] == 'wooden_spoon'
assert image_list[2][3] == 'revolver'
| 29.636364
| 78
| 0.713957
| 188
| 1,304
| 4.68617
| 0.244681
| 0.132804
| 0.085131
| 0.104427
| 0.533485
| 0.533485
| 0.533485
| 0.469921
| 0.400681
| 0.400681
| 0
| 0.027498
| 0.163344
| 1,304
| 43
| 79
| 30.325581
| 0.780018
| 0.013037
| 0
| 0.323529
| 0
| 0
| 0.119751
| 0.036547
| 0
| 0
| 0
| 0
| 0.264706
| 1
| 0.147059
| false
| 0
| 0.088235
| 0.029412
| 0.264706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e08b9785d412b27c9f6fb1800aa24f2a6fc367a
| 9,484
|
py
|
Python
|
ntfs.py
|
kartone/INDXRipper
|
88e663115b8705b1bb153b28fd74f943c515b9ca
|
[
"MIT"
] | null | null | null |
ntfs.py
|
kartone/INDXRipper
|
88e663115b8705b1bb153b28fd74f943c515b9ca
|
[
"MIT"
] | null | null | null |
ntfs.py
|
kartone/INDXRipper
|
88e663115b8705b1bb153b28fd74f943c515b9ca
|
[
"MIT"
] | null | null | null |
"""
Provides functions for working with NTFS volumes
Author: Harel Segev
05/16/2020
"""
from construct import Struct, Padding, Computed, IfThenElse, BytesInteger, Const, Enum, Array, FlagsEnum, Switch, Tell
from construct import PaddedString, Pointer, Seek, Optional, StopIf, RepeatUntil, Padded
from construct import Int8ul, Int16ul, Int32ul, Int64ul, Int8sl
from dataruns import get_dataruns, NonResidentStream
from sys import exit as sys_exit
class EmptyNonResidentAttributeError(ValueError):
pass
BOOT_SECTOR = Struct(
"OffsetInImage" / Tell,
Padding(3),
"Magic" / Optional(Const(b'NTFS')),
StopIf(lambda this: this.Magic is None),
Padding(4),
"BytsPerSec" / Int16ul,
"SecPerClus" / Int8ul,
"BytsPerClus" / Computed(lambda this: this.BytsPerSec * this.SecPerClus),
Padding(34),
"MftClusNumber" / Int64ul,
Padding(8),
"BytsOrClusPerRec" / Int8sl,
"BytsPerRec" / IfThenElse(
lambda this: this.BytsOrClusPerRec > 0,
Computed(lambda this: this.BytsOrClusPerRec * this.BytsPerClus),
Computed(lambda this: 2 ** abs(this.BytsOrClusPerRec)),
),
Padding(3),
"BytsOrClusPerIndx" / Int8sl,
"BytsPerIndx" / IfThenElse(
lambda this: this.BytsOrClusPerIndx > 0,
Computed(lambda this: this.BytsOrClusPerIndx * this.BytsPerClus),
Computed(lambda this: 2 ** abs(this.BytsOrClusPerIndx)),
),
"BytsPerMftChunk" / IfThenElse(
lambda this: this.BytsPerClus > this.BytsPerRec,
Computed(lambda this: this.BytsPerClus),
Computed(lambda this: this.BytsPerRec)
),
)
FILE_REFERENCE = Struct(
"FileRecordNumber" / BytesInteger(6, swapped=True, signed=False),
"SequenceNumber" / Int16ul
)
FILE_RECORD_HEADER = Struct(
"OffsetInChunk" / Tell,
"Magic" / Optional(Const(b'FILE')),
StopIf(lambda this: this.Magic is None),
"UpdateSequenceOffset" / Int16ul,
"UpdateSequenceSize" / Int16ul,
Padding(8),
"SequenceNumber" / Int16ul,
Padding(2),
"FirstAttributeOffset" / Int16ul,
"Flags" / FlagsEnum(Int16ul, IN_USE=1, DIRECTORY=2),
Padding(8),
"BaseRecordReference" / FILE_REFERENCE,
Seek(lambda this: this.UpdateSequenceOffset + this.OffsetInChunk),
"UpdateSequenceNumber" / Int16ul,
"UpdateSequenceArray" / Array(lambda this: this.UpdateSequenceSize - 1, Int16ul)
)
FILE_RECORD_HEADERS = Struct(
"RecordHeaders" / Array(
lambda this: this._.records_per_chunk,
Padded(lambda this: this._.bytes_per_record, FILE_RECORD_HEADER)
)
)
ATTRIBUTE_HEADER = Struct(
"EndOfRecordSignature" / Optional(Const(b'\xFF\xFF\xFF\xFF')),
StopIf(lambda this: this.EndOfRecordSignature is not None),
"OffsetInChunk" / Tell,
"Type" / Enum(Int32ul, FILE_NAME=0x30, INDEX_ALLOCATION=0xA0, DATA=0x80),
"Length" / Int32ul,
"Residence" / Enum(Int8ul, RESIDENT=0x00, NON_RESIDENT=0x01),
"NameLength" / Int8ul,
"NameOffset" / Int16ul,
"AttributeName" / Pointer(lambda this: this.NameOffset + this.OffsetInChunk,
PaddedString(lambda this: 2 * this.NameLength, "utf16")),
Padding(4),
"Metadata" / Switch(
lambda this: this.Residence,
{
"RESIDENT":
Struct(
"AttributeLength" / Int32ul,
"AttributeOffset" / Int16ul,
),
"NON_RESIDENT":
Struct(
Padding(16),
"DataRunsOffset" / Int16ul,
Padding(6),
"AllocatedSize" / Int64ul,
"RealSize" / Int64ul,
)
}
),
Seek(lambda this: this.Length + this.OffsetInChunk)
)
ATTRIBUTE_HEADERS = Struct(
Seek(lambda this: this._.offset),
"AttributeHeaders" / RepeatUntil(lambda obj, lst, ctx: obj.EndOfRecordSignature is not None, ATTRIBUTE_HEADER)
)
FILENAME_ATTRIBUTE = Struct(
"ParentDirectoryReference" / FILE_REFERENCE,
Padding(56),
"FilenameLengthInCharacters" / Int8ul,
"FilenameNamespace" / Enum(Int8ul, POSIX=0, WIN32=1, DOS=2, WIN32_DOS=3),
"FilenameInUnicode" / PaddedString(lambda this: this.FilenameLengthInCharacters * 2, "utf16")
)
def get_boot_sector(raw_image, partition_offset):
raw_image.seek(partition_offset)
return BOOT_SECTOR.parse_stream(raw_image)
def panic_on_invalid_boot_sector(vbr):
if vbr["Magic"] is None:
sys_exit("INDXRipper: error: invalid volume boot record")
def get_mft_offset(vbr):
return vbr["MftClusNumber"] * vbr["BytsPerClus"] + vbr["OffsetInImage"]
def get_first_mft_chunk(vbr, raw_image):
raw_image.seek(get_mft_offset(vbr))
return bytearray(raw_image.read(vbr["BytsPerMftChunk"]))
def get_record_headers(mft_chunk, vbr):
return FILE_RECORD_HEADERS.parse(
mft_chunk,
bytes_per_record=vbr["BytsPerRec"],
records_per_chunk=vbr["BytsPerMftChunk"] // vbr["BytsPerRec"]
)["RecordHeaders"]
def is_valid_record_signature(record_header):
return record_header["Magic"] is not None
def apply_record_fixup(mft_chunk, record_header, vbr):
usn = record_header["UpdateSequenceNumber"]
first_fixup_offset = record_header["OffsetInChunk"] + vbr["BytsPerSec"] - 2
end_of_record_offset = record_header["OffsetInChunk"] + vbr["BytsPerRec"]
for i, usn_offset in enumerate(range(first_fixup_offset, end_of_record_offset, vbr["BytsPerSec"])):
if Int16ul.parse(mft_chunk[usn_offset:usn_offset + 2]) != usn:
return False
mft_chunk[usn_offset:usn_offset + 2] = Int16ul.build(record_header["UpdateSequenceArray"][i])
return True
def apply_fixup(mft_chunk, record_headers, vbr):
for record_header in record_headers:
if is_valid_record_signature(record_header):
record_header["IsValidFixup"] = apply_record_fixup(mft_chunk, record_header, vbr)
def is_valid_fixup(record_header):
return record_header["IsValidFixup"]
def is_used(record_header):
return record_header["Flags"]["IN_USE"]
def is_directory(record_header):
return record_header["Flags"]["DIRECTORY"]
def get_sequence_number(record_header):
if is_used(record_header):
return record_header["SequenceNumber"]
else:
return record_header["SequenceNumber"] - 1
def is_base_record(record_header):
return record_header["BaseRecordReference"]["FileRecordNumber"] == 0
def get_base_record_reference(record_header):
base_reference = record_header["BaseRecordReference"]
return base_reference["FileRecordNumber"], base_reference["SequenceNumber"]
def get_attribute_headers(mft_chunk, record_header):
first_attribute_offset = record_header["FirstAttributeOffset"] + record_header["OffsetInChunk"]
res = ATTRIBUTE_HEADERS.parse(mft_chunk, offset=first_attribute_offset)
return res["AttributeHeaders"][:-1]
def get_resident_attribute(mft_chunk, attribute_header):
offset = attribute_header["OffsetInChunk"] + attribute_header["Metadata"]["AttributeOffset"]
return mft_chunk[offset: offset + attribute_header["Metadata"]["AttributeLength"]]
def get_attribute_type(attribute_header):
return attribute_header["Type"]
def get_attribute_name(attribute_header):
return attribute_header["AttributeName"]
def is_resident(attribute_header):
return attribute_header["Residence"]["RESIDENT"]
def get_attribute_header(attribute_headers, attribute_type):
for attribute_header in attribute_headers:
if attribute_header["Type"] == attribute_type:
yield attribute_header
def parse_filename_attribute(filename_attribute):
return FILENAME_ATTRIBUTE.parse(filename_attribute)
def get_non_resident_attribute(vbr, raw_image, mft_chunk, attribute_header, is_allocated):
dataruns_offset_in_chunk = attribute_header["OffsetInChunk"] + attribute_header["Metadata"]["DataRunsOffset"]
dataruns = get_dataruns(mft_chunk, dataruns_offset_in_chunk)
if not dataruns:
raise EmptyNonResidentAttributeError
return NonResidentStream(vbr["BytsPerClus"], vbr["OffsetInImage"], raw_image, dataruns, is_allocated)
def panic_on_invalid_first_record(record_header):
if not is_valid_record_signature(record_header):
sys_exit(f"INDXRipper: error: invalid 'FILE' signature in first file record")
if not is_valid_fixup(record_header):
sys_exit(f"INDXRipper: error: fixup validation failed for first file record")
def get_mft_data_attribute(vbr, raw_image):
panic_on_invalid_boot_sector(vbr)
mft_chunk = get_first_mft_chunk(vbr, raw_image)
record_headers = get_record_headers(mft_chunk, vbr)
apply_fixup(mft_chunk, record_headers, vbr)
panic_on_invalid_first_record(record_headers[0])
attribute_headers = get_attribute_headers(mft_chunk, record_headers[0])
mft_data_attribute_header = next(get_attribute_header(attribute_headers, "DATA"))
return get_non_resident_attribute(vbr, raw_image, mft_chunk, mft_data_attribute_header, True)
def get_mft_chunks(vbr, mft_data_attribute_stream):
while current_chunk := mft_data_attribute_stream.read(vbr["BytsPerMftChunk"]):
yield current_chunk
| 33.75089
| 119
| 0.690953
| 1,034
| 9,484
| 6.077369
| 0.185687
| 0.063017
| 0.044558
| 0.022915
| 0.253978
| 0.173775
| 0.0993
| 0.038829
| 0.013367
| 0
| 0
| 0.016247
| 0.208245
| 9,484
| 280
| 120
| 33.871429
| 0.820615
| 0.00833
| 0
| 0.090909
| 0
| 0
| 0.153981
| 0.005491
| 0
| 0
| 0.002197
| 0
| 0
| 1
| 0.126263
| false
| 0.005051
| 0.025253
| 0.055556
| 0.267677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e0c94378cede26866700f316056f4a9b045008f
| 486
|
py
|
Python
|
writer.py
|
ZitRos/edu-text-analysis
|
a03f22f9c6e72e4cac4d38b9e963d1554cae35d0
|
[
"MIT"
] | 9
|
2017-11-28T22:42:06.000Z
|
2021-01-27T05:05:52.000Z
|
writer.py
|
ZitRos/edu-text-analysis
|
a03f22f9c6e72e4cac4d38b9e963d1554cae35d0
|
[
"MIT"
] | null | null | null |
writer.py
|
ZitRos/edu-text-analysis
|
a03f22f9c6e72e4cac4d38b9e963d1554cae35d0
|
[
"MIT"
] | 1
|
2022-02-08T21:55:29.000Z
|
2022-02-08T21:55:29.000Z
|
import xlsxwriter
from slugify import slugify
import os
def write_to_xlsx(filename, title="Worksheet", data=None):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet(slugify(title)[:28])
row_count = 0
for row in data:
cell_count = 0
for cell in row:
worksheet.write(row_count, cell_count, cell)
cell_count += 1
row_count += 1
workbook.close()
| 24.3
| 58
| 0.746914
| 71
| 486
| 4.985915
| 0.450704
| 0.067797
| 0.050847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.148148
| 486
| 19
| 59
| 25.578947
| 0.84058
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.176471
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e0ca604df69608c9b3245228eab46db3a285865
| 4,251
|
py
|
Python
|
src/4. Ajuste de curvas/Metodos/MC_multilineal.py
|
thonyblaz/Numerical-Methods
|
fdeccb9e2eba4a1eb7892ab3a55bd6169c430502
|
[
"MIT"
] | 1
|
2021-04-24T20:47:26.000Z
|
2021-04-24T20:47:26.000Z
|
src/4. Ajuste de curvas/Metodos/MC_multilineal.py
|
Desarrollador2021/Numerical-Methods
|
fdeccb9e2eba4a1eb7892ab3a55bd6169c430502
|
[
"MIT"
] | null | null | null |
src/4. Ajuste de curvas/Metodos/MC_multilineal.py
|
Desarrollador2021/Numerical-Methods
|
fdeccb9e2eba4a1eb7892ab3a55bd6169c430502
|
[
"MIT"
] | 1
|
2021-04-24T20:47:03.000Z
|
2021-04-24T20:47:03.000Z
|
import numpy as np
def sisEcua(mat_A, mat_B):
a_inv = np.linalg.inv(mat_A)
C = a_inv.dot(mat_B.T)
return C
def matrices(sm, smm, smy, smn, datos, cant_datos):
dimension = datos+1
s = (dimension, dimension)
mat_A = np.zeros(s)
mat_B = np.matrix(smy)
# contadores
n = len(smn)
fin = datos-1
con_master = fin-1
ini = 0
fil = 1
col = 1
# primer numero ubicado
mat_A[0][0] = cant_datos
for i in range(0, datos):
mat_A[i+1][i+1] = smm[i]
mat_A[0][i+1] = sm[i]
mat_A[i+1][0] = sm[i]
# ubicacion de la variables multiplicadas por otras variables
for i in range(1, datos):
for j in range(ini, fin):
mat_A[i][col+1] = smn[j]
mat_A[col+1][i] = smn[j]
col += 1
fil += 1
col = col-con_master
ini = fin
fin = fin+con_master
con_master -= 1
#para visualizar las matrices
# print(mat_A)
# print(mat_B)
return sisEcua(mat_A, mat_B)
def multilineal(var_dependiente, var_independiente, nombre_variables):
variables = len(nombre_variables)
sis_ecuaciones = len(nombre_variables)+1
cant_datos = len(var_dependiente)
# vectores auxiliares
var_al_cuadrado = []
var_por_y = []
var_multiplicadas = []
# vectores de las sumas
suma_var_al_cuadrado = []
suma_var = []
suma_por_y = []
suma_de_var_por_var = []
# variable dependiente
y = np.array(var_dependiente)
sum_y = np.sum(y)
suma_por_y.append(sum_y)
# multiplicaciones de m*n, m*p y n*p
k = 1
# operaciones
for var_i in range(variables):
m = np.array(var_independiente[var_i])
y_por_m = y*m
m_cuadrado = m*m
# anade las m**2 y los m*y
var_al_cuadrado.append(m_cuadrado)
var_por_y.append(y_por_m)
# sumas
suma_mm = np.sum(m_cuadrado)
suma_var_al_cuadrado.append(suma_mm)
suma_m = np.sum(m)
suma_var.append(suma_m)
suma_my = np.sum(y_por_m)
suma_por_y.append(suma_my)
# multiplicaciones cor cada variable
for i in range(k, variables):
n = np.array(var_independiente[i])
multipl = m*n
var_multiplicadas.append(multipl)
# suma de las multiplicaciones
suma_mn = np.sum(multipl)
suma_de_var_por_var.append(suma_mn)
k += 1
""" #para visualizar las sumatorias
print(var_al_cuadrado)
print(var_por_y)
print(var_multiplicadas)
print(suma_var)
print(suma_var_al_cuadrado)
print(suma_por_y)
print(suma_de_var_por_var) """
resultado=matrices(suma_var, suma_var_al_cuadrado,
suma_por_y, suma_de_var_por_var, variables, cant_datos)
#resultados finales
ecuacion_final='y = '
print('\n COEFICIENTES DEL AJUSTE LINEAL MULTIPLE\n')
for i in range(0,variables+1):
solucion=float(resultado[i])
sol_redondeada="{0:.7f}".format(solucion)
print(f' a{i} = {sol_redondeada} ')
if i>0:
ec=' + '+str(sol_redondeada)+'*'+str(nombre_variables[i-1])
else:
ec=str(sol_redondeada)
ecuacion_final=ecuacion_final+ec
print('\n La ecuacion de ajuste es:\n')
print(f' {ecuacion_final}')
print('\nNota: y = Var. Dependiente')
# datos de prueba
#set 1
""" agua = [27.5, 28, 28.8, 29.1, 30, 31, 32]
cal = [2, 3.5, 4.5, 2.5, 8.5, 10.5, 13.5]
puzo = [18, 16.5, 10.5, 2.5, 9, 4.5, 1.5]
dr = [5, 2, 3, 4, 1, 2, 3]
gh = [7, 2, 1, 1, 1, 6, 7]
puzos = [15, 15.5, 11.5, 5, 5, 3, 1]
variables_data = [cal, puzo]
variable = ['u', 'v']
variables_data = [cal, puzo, dr, gh, puzos]
variable = ['u', 'v', 'w', 'z', 's'] """
#set 2
""" u=[0.02,0.02,0.02,0.02,0.1,0.1,0.1,0.1,0.18,0.18,0.18,0.18]
v=[1000,1100,1200,1300,1000,1100,1200,1300,1000,1100,1200,1300]
fuv=[78.9,65.1,55.2,56.4,80.9,69.7,57.4,55.4,85.3,71.8,60.7,58.9]
variables_data = [u,v]
variable = ['u', 'v'] """
""" agua = [27.5, 28, 28.8, 29.1, 30, 31, 32]
cal = [2, 3.5, 4.5, 2.5, 8.5, 10.5, 13.5]
puzo = [18, 16.5, 10.5, 2.5, 9, 4.5, 1.5]
variables_data = [cal, puzo]
variable = ['u', 'v']
multilineal(agua, variables_data, variable) """
| 28.152318
| 71
| 0.584098
| 713
| 4,251
| 3.302945
| 0.225806
| 0.018684
| 0.038641
| 0.018684
| 0.180042
| 0.128238
| 0.123567
| 0.089172
| 0.049257
| 0.049257
| 0
| 0.086329
| 0.266996
| 4,251
| 150
| 72
| 28.34
| 0.669448
| 0.092919
| 0
| 0
| 0
| 0
| 0.066834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036585
| false
| 0
| 0.012195
| 0
| 0.073171
| 0.060976
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e14a820dce8b0c05972db39e72bc127d5d06743
| 3,550
|
py
|
Python
|
vcf_reader.py
|
ZhiGroup/ROH-DICE
|
5a2edfd04e285fe1f40bb199117c03a33b176984
|
[
"MIT"
] | 1
|
2021-09-01T15:46:26.000Z
|
2021-09-01T15:46:26.000Z
|
vcf_reader.py
|
ZhiGroup/ROH-DICE
|
5a2edfd04e285fe1f40bb199117c03a33b176984
|
[
"MIT"
] | 1
|
2021-05-21T13:13:55.000Z
|
2021-05-25T17:56:06.000Z
|
vcf_reader.py
|
ZhiGroup/ROH-DICE
|
5a2edfd04e285fe1f40bb199117c03a33b176984
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : Ardalan Naseri
# Created Date: Mon September 21 2020
# =============================================================================
"""The module is a VCF reader to parse input VCF file."""
import gzip
import random
def eff_split(string_input, array_vals, delimeter='\t'):
counter = 0
start_pos = 0
end_pos = start_pos
while start_pos < len(string_input) - 1:
end_pos = start_pos + 1
while end_pos < len(string_input) and string_input[end_pos] != delimeter and start_pos != end_pos:
end_pos += 1
array_vals[counter] = string_input[start_pos:end_pos]
start_pos = end_pos + 1
counter = counter + 1
class VCFReader:
def __init__(self, vcf_input_compressed):
self.vcf_file = gzip.open(vcf_input_compressed)
self.samples = []
self.done = False
self.vals = []
self.genome_pos = []
self.valid = True
self.entries_started = False
self.inter_vals = None
self._line = None
def set_samples(self):
done = False
while not done:
line = self.vcf_file.readline()
if not line:
done = True
self.done = True
continue
if '#CHROM' in line:
self.entries_started = True
i = 9
_values = line.replace("\n", "").split()
while i < len(_values):
self.samples.append(_values[i])
i += 1
self.vals = [0] * len(self.samples)
self.inter_vals = ['0|1'] * (len(self.samples) + 9)
done = True
def read_next_site(self):
site_counter = 0
line = self.vcf_file.readline().replace("\n", "")
self._line = line
self.valid = True
if not line:
self.done = True
self.vcf_file.close()
return False
if self.entries_started:
eff_split(line, self.inter_vals, '\t')
_pos = self.inter_vals[1]
alt = self.inter_vals[4]
if len(alt.split(',')) > 1:
self.valid = False
return True
i = 2
while i < len(self.inter_vals) and self.inter_vals[i] != 'GT':
i += 1
i += 1
if i >= len(self.inter_vals):
self.valid = False
return True
tags = self.inter_vals[7]
if len(self.inter_vals[3]) > 1 or len(self.inter_vals[4]) > 1:
self.valid = False
return True
i = 9
site_values = ''
j = 0
while i < len(self.inter_vals):
site_values = self.inter_vals[i].replace("\n", '').split("|")
if site_values[0] == '.' or len(site_values) < 2 or (len(site_values) > 1 and site_values[1] == '.'):
self.valid = False
return True
al1 = int(site_values[0])
al2 = int(site_values[1])
if al1 == al2:
self.vals[j] = al1
else:
self.vals[j] = random.randint(0, 1)
j = j + 1
i += 1
self.genome_pos.append(self.inter_vals[1])
site_counter = site_counter + 1
return True
| 31.415929
| 117
| 0.468169
| 411
| 3,550
| 3.861314
| 0.231144
| 0.079395
| 0.114682
| 0.05041
| 0.131065
| 0.076244
| 0.032766
| 0
| 0
| 0
| 0
| 0.023842
| 0.385634
| 3,550
| 112
| 118
| 31.696429
| 0.703806
| 0.089014
| 0
| 0.261364
| 0
| 0
| 0.007757
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.022727
| 0
| 0.147727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e15597d3a91189d8d9a4e8575fb172c9d0972ad
| 2,865
|
py
|
Python
|
neighbor/tests.py
|
Elianehbmna/Neighborhood
|
3e684fe813904f10fca7f3ea8c71adb1f2bc6a3d
|
[
"MIT"
] | null | null | null |
neighbor/tests.py
|
Elianehbmna/Neighborhood
|
3e684fe813904f10fca7f3ea8c71adb1f2bc6a3d
|
[
"MIT"
] | 5
|
2020-02-12T03:17:58.000Z
|
2021-09-08T01:23:33.000Z
|
neighbor/tests.py
|
Elianehbmna/Neighbourhood
|
3e684fe813904f10fca7f3ea8c71adb1f2bc6a3d
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Profile, Neighbourhood, Post, Business
# Create your tests here.
class ProfileTestClass(TestCase):
'''
Test case for the Profile class
'''
def setUp(self):
'''
Method that creates an instance of Profile class
'''
# Create instance of Profile class
self.new_profile = Profile(bio="I am superwoman")
def test_instance(self):
'''
Test case to check if self.new_profile in an instance of Profile class
'''
self.assertTrue(isinstance(self.new_profile, Profile))
def test_get_other_profiles(self):
'''
Test case to check if all profiles are gotten from the database
'''
self.eliane = User(username="elly")
self.eliane.save()
self.eliane = User(username="habibi")
self.eliane.save()
self.test_profile = Profile(user=self.eliane, bio="Another Profile")
gotten_profiles = Profile.get_other_profiles(self.eliane.id)
profiles = Profile.objects.all()
class Neighbourhood(TestCase):
'''
Test case for the Neighbourhood class
'''
def setUp(self):
'''
Method that creates an instance of Profile class
'''
# Create a Image instance
self.new_Image = Image(
caption='hey')
def test_instance(self):
'''
Test case to check if self.new_Image in an instance of Image class
'''
self.assertTrue(isinstance(self.new_Image, Image))
class Post(TestCase):
'''
Test case for the Comment class
'''
def setUp(self):
'''
Method that creates an instance of Comment class
'''
# Create a Comment instance
self.new_comment = Comment(
comment_content='hey')
def test_instance(self):
'''
Test case to check if self.new_comment in an instance of Comment class
'''
self.assertTrue(isinstance(self.new_comment, Comment))
def test_get_Image_comments(self):
'''
Test case to check if get Image comments is getting comments for a specific Image
'''
self.eliane = User(username="eli")
self.eliane.save()
self.eliane = User(username="habibi")
self.eliane.save()
self.test_profile = Profile(user=self.eliane, bio="Another Profile")
self.test_Image = Image(user=self.eliane, caption="Another Profile")
self.test_comment = Comment(
Image=self.test_Image, comment_content="Wow")
gotten_comments = Comment.get_Image_comments(self.test_Image.id)
comments = Comment.objects.all()
# No comments were saved so expect True
self.assertTrue(len(gotten_comments) == len(comments))
| 26.045455
| 89
| 0.622339
| 345
| 2,865
| 5.072464
| 0.22029
| 0.068571
| 0.041143
| 0.04
| 0.490857
| 0.386857
| 0.301143
| 0.301143
| 0.301143
| 0.301143
| 0
| 0
| 0.282373
| 2,865
| 109
| 90
| 26.284404
| 0.851167
| 0.261431
| 0
| 0.35
| 0
| 0
| 0.046809
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.2
| false
| 0
| 0.075
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e1a4e1f3d76e5fdbb618878f0f9c68ef36c94ef
| 13,944
|
py
|
Python
|
src/flintfiller/dataframe_to_frame_parser.py
|
discipl/flintfiller
|
15d220c980a962ac2c4b7ac232f091666ab24e66
|
[
"Apache-2.0"
] | null | null | null |
src/flintfiller/dataframe_to_frame_parser.py
|
discipl/flintfiller
|
15d220c980a962ac2c4b7ac232f091666ab24e66
|
[
"Apache-2.0"
] | null | null | null |
src/flintfiller/dataframe_to_frame_parser.py
|
discipl/flintfiller
|
15d220c980a962ac2c4b7ac232f091666ab24e66
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (C) 2020 Nederlandse Organisatie voor Toegepast Natuur-
wetenschappelijk Onderzoek TNO / TNO, Netherlands Organisation for
applied scientific research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Maaike de Boer, Roos Bakker
@contact: maaike.deboer@tno.nl, roos.bakker@tno.nl
"""
import ast
# This script transforms POStagged text to a FLINT frame.
import json
from typing import Tuple
import pandas as pd
action_verbs = ['aanbrengen', 'aanwijzen', 'achterwege blijven', 'afnemen', 'afwijken', 'afwijzen',
'ambtshalve verlenen', 'ambtshalve verlengen', 'annuleren', 'behandelen', 'beheren', 'bepalen',
'beperken', 'betreden', 'beveiligen', 'bevelen', 'bevorderen', 'bieden gelegenheid', 'bijhouden',
'buiten behandeling stellen', 'buiten werking stellen', 'doorzoeken', 'erop wijzen',
'gebruiken maken van', 'gedwongen ontruimen', 'geven', 'heffen', 'in bewaring stellen',
'in de gelegenheid stellen zich te doen horen', 'in kennis stellen', 'in werking doen treden',
'in werking stellen', 'indienen', 'innemen', 'instellen', 'intrekken', 'invorderen', 'inwilligen',
'maken', 'naar voren brengen', 'nemen', 'niet in behandeling nemen', 'niet-ontvankelijk verklaren',
'nogmaals verlengen', 'om niet vervoeren', 'onderwerpen', 'onderzoeken', 'ongewenstverklaren',
'onmiddellijk bepalen', 'onmiddellijk verlaten', 'ontnemen', 'ontvangen', 'opheffen', 'opleggen',
'oproepen', 'overbrengen', 'overdragen', 'plaatsen', 'schorsen', 'schriftelijk in kennis stellen',
'schriftelijk laten weten', 'schriftelijk mededelen', 'schriftelijk naar voren brengen', 'signaleren',
'sluiten', 'staande houden', 'stellen', 'straffen', 'ter hand stellen', 'teruggeven',
'tijdelijk in bewaring nemen', 'toetsen', 'toezenden', 'uitstellen', 'uitvaardigen', 'uitzetten',
'van rechtswege verkrijgen', 'vaststellen', 'vergelijken', 'verhalen', 'verhogen', 'verklaren',
'verkorten', 'verkrijgen', 'verlaten', 'verlenen', 'verlengen', 'verplichten', 'verschaffen',
'verstrekken', 'verzoeken', 'voegen', 'vorderen', 'vragen', 'willigen', 'weigeren', 'wijzigen']
set_propernouns = ["PRP", "PRP$", "NNP", "NNPS"]
list_act = []
list_fact = []
global facts_list
def read_csv_to_df(csv_file):
datafrm = pd.read_csv(csv_file)
print("csv loaded from " + csv_file)
return datafrm
def write_df_to_csv(df, fle):
df.to_csv(fle)
print("df written to " + fle)
def get_empty_flint_frame_format() -> dict:
flint_frame = {
"acts": [],
"facts": [],
"duties": []
}
return flint_frame
def get_empty_act_frame() -> dict:
act_frame = {
"act": "",
"actor": "",
"action": "",
"object": "",
"recipient": "",
"preconditions": {
"expression": "LITERAL",
"operand": True
},
"create": [],
"terminate": [],
"sources": [], # with validFrom, validTo, citation juriconnect and text
"explanation": ""
}
return act_frame
def get_empty_fact_frame() -> dict:
fact_frame = {
"fact": "",
"function": [],
"sources": [], # with validFrom, validTo, citation juriconnect and text
"explanation": ""
}
return fact_frame
def get_source_dict(row, text, name_law) -> dict:
source_dict = {"validFrom": row["Versie"]}
try:
source_dict["citation"] = "art. " + row['jci 1.3'].split("artikel=")[1].split('&')[0] + "lid " + \
row['jci 1.3'].split("lid=")[1].split('&')[0] + ", " + name_law
except:
# if split("lid=")[1] is not filled in, do not add this part
source_dict["citation"] = "art. " + row['jci 1.3'].split("artikel=")[1].split('&')[0] + ", " + name_law
source_dict['text'] = text.replace('\n', '').replace('\r', '').replace("\t", " ")
source_dict['juriconnect'] = row['jci 1.3']
return source_dict
def create_fact_or_act_function(list_text: list) -> dict:
fact_function = {"expression": "AND"}
fact_function_operands = []
for fct in list_text:
try:
if 'Onderdeel' not in fct and 'Lid' not in fct and len(fct) > 3:
fact_function_operands.append(
"[" + fct.replace('\n', '').replace('\r', '').split(";")[0].replace("\t", "")[1:] + "]") # .
except:
# if the fact is empty or has length of 0, [1:] does not work
'do nothing'
# get rid of the empty list at the beginning
if len(fact_function_operands) > 1:
fact_function_operands.pop(0)
fact_function["operands"] = fact_function_operands
else:
fact_function = {
"expression": "LITERAL",
"operand": True
}
return fact_function
def get_object_and_actor(orig, tags) -> Tuple[str, str]:
vp_found = False
obj = ""
actor_num = -1
# check the index of the verb
for i in range(0, len(tags)):
try:
# find the VP
if tags[i][0] == "VP" and (tags[i][len(tags[i][0])][0] == orig):
vp_found = True
for num in range(1, len(tags[i])):
# get the first NP; this is the object
# TODO: version 2: create better code using dependencies to determine the object and actor
if not vp_found:
# bug fix: no lower, because the link to the actor is gone then
obj += " " + (str(tags[i][num][0]))
# only add NPs if they are in the same sentence as the VP of the act
if "$" in str(tags[i][num][0]) and not vp_found:
obj = ""
# try to find the actor and recipient
# Hack: make a list of characters and check whether the first is uppercased (capitalized)
if tags[i][num][1] in set_propernouns and list(tags[i][num][0])[0].isupper() and actor_num < 0:
list_non_actors = ['Onderdeel', 'Lid', 'Indien', 'Tenzij', 'Onverminderd', 'Nadat']
if not (any(non_actor in tags[i][num][0] for non_actor in list_non_actors)):
# print(tags[i][num])
actor_num = i
except:
# if tags[i][len(tags[i][0])][0] or tags[i][0] does not exist, we have an error
'do nothing'
# the actor is the NP of the actor_num (number in the tags)
actor = ""
# fixed bug: bigger than -1 if the word occurs as the first word
if actor_num > -1:
# range starts with 1, because 0 is the type NP
for nr in range(1, len(tags[actor_num])):
actor += " " + tags[actor_num][nr][0]
# hacks to get a better object
if len(actor) > 1 and actor in obj:
obj = obj.replace(actor, "")
if "kan" in obj:
obj = obj.replace("kan", "")
return actor, obj
def check_infinitive(inf, row) -> bool:
return inf in action_verbs and not \
("het " + inf) in row['Brontekst'] or \
("de " + inf) in row['Brontekst'] or \
("een " + inf) in row['Brontekst']
# This is a first version!
def get_acts(row, verbs, tags, flint_frames, name_law) -> dict:
# for each verb (if one verb this also works)
for infinitive, original in verbs.items():
# if the verb is in the first part (before the :) (could be more verbs)
parts = row['Brontekst'].split(":")
# print("verbs found " + infinitive + " " + original)
# addition to wrong parsing:
# acts are not those that have a determiner before it; Dutch determiners are 'de',
# 'het' and 'een'
# acts are not those that have 'indien' as a form of 'indienen'
if check_infinitive(infinitive, row) and not original == 'indien':
act_frame = get_empty_act_frame()
# print("act found: " + original)
list_act.append([original, row['Brontekst']])
# print(original + "\t" + row['text:'])
act_frame['action'] = "[" + infinitive + "]"
# if we know that there should be preconditions, add them
if ":" in row['Brontekst'] and original in parts[0]:
act_function = create_fact_or_act_function(''.join(parts[1:]).split("$"))
act_frame['preconditions'] = act_function
# print(''.join(parts[1:]).split("$$"))
# print(act_function)
# TODO in version 2: make a fact of the pre-condition
# get_empty_fact_frame()
actor, obj = get_object_and_actor(original, tags)
# hack: first character is a space; use from second on
act_frame['actor'] = "[" + actor[1:] + "]"
act_frame['act'] = "<<" + infinitive + obj.lower() + ">>"
act_frame['object'] = "[" + obj[1:].lower() + "]"
# TODO in version 2: make code better; now only vreemdeling as recipient
if "vreemdeling" in row['Brontekst']:
act_frame['recipient'] = "[vreemdeling]"
source_dict_act = get_source_dict(row, row['Brontekst'], name_law)
act_frame['sources'].append(source_dict_act)
flint_frames['acts'].append(act_frame)
return flint_frames
def get_facts(row, part, name_law) -> dict:
global facts_list
fact_frame = get_empty_fact_frame()
source_dict = get_source_dict(row, part, name_law)
fact_frame['sources'].append(source_dict)
# The facts has to be in between brackets
fact_frame['fact'] = "[" + part.split(":")[0][1:] + "]"
facts_list.append(part.split(":")[0][1:])
# create the function. In case of Artikel 1 this is the (one) definition that is after the :
list_defs = [part.split(":")[1]]
fact_function = create_fact_or_act_function(list_defs)
fact_frame['function'] = fact_function
return fact_frame
def create_flint_frames(df, name_law) -> dict:
flint_frames = get_empty_flint_frame_format()
global facts_list
facts_list = []
# loop through the rows and create acts and facts as we go
for index, row in df.iterrows():
# we start with Facts that are present in the First Article
# try:
# Bug Fix: able to handle all prefixes before Artikel1
if str(row['Nummer'].split("/")[len(row['Nummer'].split("/")) - 1]) == 'Artikel1' and type(
row['Brontekst']) != float:
for part in row['Brontekst'].split("$"):
if ":" in part and not "Onderdeel" in part:
if part.split(":")[0][1:] not in facts_list and len(part.split(":")[1]) > 2:
# Facts
list_fact.append([part.split(":")[0][1:], part.split(":")[1].split(";")[0]])
# print(part.split(":")[0][1:] + "\t" + part.split(":")[1].split(";")[0])
fact_frame = get_facts(row, part, name_law)
flint_frames['facts'].append(fact_frame)
# Acts: only if we have verbs
if not "[]" == row['verbs']:
# hack: make it a dict / list again is we load in a dataframe from another format
verbs = ast.literal_eval(row['verbs'])
tags = ast.literal_eval(row['tags'])
# because more than one act_frame could be created, go on the level of the flint_frames
flint_frames = get_acts(row, verbs, tags, flint_frames, name_law)
else:
'no acts'
return flint_frames
def write_flint_frames_to_json(flint_frames, flint_file):
with open(str(flint_file), 'w') as f:
json.dump(flint_frames, f)
print("flint frames written to " + str(flint_file))
def dataframe_to_frame_parser(csv_file, output_file):
name_law = csv_file.split("_")[len(csv_file.split("_")) - 1].split(".")[0]
if name_law == 'postagged':
name_law = csv_file.split("_")[len(csv_file.split("_")) - 2].split(".")[0]
pos_tagged_df = read_csv_to_df(str(csv_file))
# print(name_law)
flint_frames = create_flint_frames(pos_tagged_df, name_law)
write_flint_frames_to_json(flint_frames, output_file)
# if __name__ == '__main__':
# method = "TOGS"
# base = 'C:\\Users\\boermhtd\\PycharmProjects\\calculemus\\nlp\\data\\csv_files\\postagged\\'
# if method == "TOGS":
# csv_file = base + 'BWBR0043324_2020-03-31_0_TOGS_postagged.csv'
# elif method == "TOZO":
# csv_file = base + 'BWBR0043402_2020-04-22_0_TOZO_postagged.csv'
# elif method == "AWB":
# csv_file = base + 'BWBR0005537_2020-04-15_0_AWB_postagged.csv'
#
# #'BWBR0011823_2019-02-27_Vreemdelingenwet_postagged.csv'
#
# output_file = method + '_new.json'
# dataframe_to_frame_parser(csv_file, output_file)
#
# act_file = "acts_" + method + ".csv"
# df_act = pd.DataFrame(list_act, columns = ['action', 'sentence'])
# df_act.to_csv(act_file, index=False)
#
# fact_file = "facts_" + method + ".csv"
# df_fact = pd.DataFrame(list_fact, columns = ['fact', 'definition'])
# df_fact.to_csv(fact_file, index=False)
# # df = read_csv_to_df(str(csv_file))
# flint_frames = create_flint_frames(df)
# write_flint_frames_to_json(flint_frames)
| 41.748503
| 118
| 0.590863
| 1,763
| 13,944
| 4.514464
| 0.25865
| 0.030406
| 0.005277
| 0.00691
| 0.155547
| 0.102274
| 0.079281
| 0.063073
| 0.047493
| 0.029652
| 0
| 0.014638
| 0.270009
| 13,944
| 333
| 119
| 41.873874
| 0.767266
| 0.302137
| 0
| 0.143617
| 0
| 0
| 0.207934
| 0
| 0
| 0
| 0
| 0.003003
| 0
| 1
| 0.074468
| false
| 0
| 0.021277
| 0.005319
| 0.154255
| 0.015957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e1b7e1efb40a138e872299167e3dc139051bf3e
| 4,677
|
py
|
Python
|
tools/webcam/webcam_apis/nodes/mmdet_node.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
tools/webcam/webcam_apis/nodes/mmdet_node.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
tools/webcam/webcam_apis/nodes/mmdet_node.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Union
import numpy as np
from .builder import NODES
from .node import MultiInputNode, Node
try:
from mmdet.apis import inference_detector, init_detector
has_mmdet = True
except (ImportError, ModuleNotFoundError):
has_mmdet = False
@NODES.register_module()
class DetectorNode(Node):
def __init__(self,
name: str,
model_config: str,
model_checkpoint: str,
input_buffer: str,
output_buffer: Union[str, List[str]],
enable_key: Optional[Union[str, int]] = None,
enable: bool = True,
device: str = 'cuda:0'):
# Check mmdetection is installed
assert has_mmdet, \
f'MMDetection is required for {self.__class__.__name__}.'
super().__init__(name=name, enable_key=enable_key, enable=enable)
self.model_config = model_config
self.model_checkpoint = model_checkpoint
self.device = device.lower()
# Init model
self.model = init_detector(
self.model_config, self.model_checkpoint, device=self.device)
# Register buffers
self.register_input_buffer(input_buffer, 'input', trigger=True)
self.register_output_buffer(output_buffer)
def bypass(self, input_msgs):
return input_msgs['input']
def process(self, input_msgs):
input_msg = input_msgs['input']
img = input_msg.get_image()
preds = inference_detector(self.model, img)
det_result = self._post_process(preds)
input_msg.add_detection_result(det_result, tag=self.name)
return input_msg
def _post_process(self, preds):
if isinstance(preds, tuple):
dets = preds[0]
segms = preds[1]
else:
dets = preds
segms = [None] * len(dets)
det_model_classes = self.model.CLASSES
if isinstance(det_model_classes, str):
det_model_classes = (det_model_classes, )
assert len(dets) == len(det_model_classes)
assert len(segms) == len(det_model_classes)
result = {'preds': [], 'model_cfg': self.model.cfg.copy()}
for i, (cls_name, bboxes,
masks) in enumerate(zip(det_model_classes, dets, segms)):
if masks is None:
masks = [None] * len(bboxes)
else:
assert len(masks) == len(bboxes)
preds_i = [{
'cls_id': i,
'label': cls_name,
'bbox': bbox,
'mask': mask,
} for (bbox, mask) in zip(bboxes, masks)]
result['preds'].extend(preds_i)
return result
@NODES.register_module()
class MultiFrameDetectorNode(DetectorNode, MultiInputNode):
"""Detect hand with one frame in a video clip. The length of clip is
decided on the frame rate and the inference speed of detector.
Parameters:
inference_frame (str): indicate the frame selected in a clip to run
detect hand. Can be set to ('begin', 'mid', 'last').
Default: 'mid'.
"""
def __init__(self,
name: str,
model_config: str,
model_checkpoint: str,
input_buffer: str,
output_buffer: Union[str, List[str]],
inference_frame: str = 'mid',
enable_key: Optional[Union[str, int]] = None,
device: str = 'cuda:0'):
DetectorNode.__init__(
self,
name,
model_config,
model_checkpoint,
input_buffer,
output_buffer,
enable_key,
device=device)
self.inference_frame = inference_frame
def process(self, input_msgs):
"""Select frame and detect hand."""
input_msg = input_msgs['input']
if self.inference_frame == 'last':
key_frame = input_msg[-1]
elif self.inference_frame == 'mid':
key_frame = input_msg[len(input_msg) // 2]
elif self.inference_frame == 'begin':
key_frame = input_msg[0]
else:
raise ValueError(f'Invalid inference_frame {self.inference_frame}')
img = key_frame.get_image()
preds = inference_detector(self.model, img)
det_result = self._post_process(preds)
imgs = [frame.get_image() for frame in input_msg]
key_frame.set_image(np.stack(imgs, axis=0))
key_frame.add_detection_result(det_result, tag=self.name)
return key_frame
| 32.034247
| 79
| 0.584135
| 537
| 4,677
| 4.839851
| 0.26257
| 0.030781
| 0.0404
| 0.018469
| 0.259331
| 0.183147
| 0.183147
| 0.158523
| 0.158523
| 0.124663
| 0
| 0.002517
| 0.320505
| 4,677
| 145
| 80
| 32.255172
| 0.815293
| 0.092795
| 0
| 0.278846
| 0
| 0
| 0.044006
| 0.011418
| 0
| 0
| 0
| 0
| 0.038462
| 1
| 0.057692
| false
| 0.009615
| 0.057692
| 0.009615
| 0.173077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e1e6490f04076ef930623904d9e0fdabc66c26f
| 1,325
|
py
|
Python
|
gryphon/fsm/machine.py
|
vittorfp/labskit_cli
|
28e109b4a9f36a03d499eb953e04a4fb787632fe
|
[
"MIT"
] | null | null | null |
gryphon/fsm/machine.py
|
vittorfp/labskit_cli
|
28e109b4a9f36a03d499eb953e04a4fb787632fe
|
[
"MIT"
] | 1
|
2022-03-08T14:54:26.000Z
|
2022-03-08T15:02:52.000Z
|
gryphon/fsm/machine.py
|
vittorfp/labskit_cli
|
28e109b4a9f36a03d499eb953e04a4fb787632fe
|
[
"MIT"
] | null | null | null |
class HaltSignal(Exception):
def __init__(self):
super().__init__()
class Machine:
def __init__(self, initial_state, possible_states):
self.history = [initial_state.name]
self.possible_states = possible_states
self.current_state = initial_state
def find_state_by_name(self, name):
filtered = list(filter(lambda x: name == x.name, self.possible_states))
if len(filtered):
return filtered[0]
else:
names = [
p.name
for p in self.possible_states
]
raise RuntimeError(f"State '{name}' not found in possible states: {names}")
def run_interaction(self, context: dict):
context = self.current_state.on_start(context)
transition = self.current_state.check_transitions(context)
self.current_state = self.find_state_by_name(transition.next_state)
if transition is None:
raise HaltSignal()
context = transition.callback(context)
self.history.append(self.current_state.name)
return context
def run(self):
context = {}
while self.current_state and not self.current_state.is_final_state():
context = self.run_interaction(context)
self.current_state.on_start(context)
| 29.444444
| 87
| 0.636226
| 154
| 1,325
| 5.201299
| 0.344156
| 0.109863
| 0.1598
| 0.086142
| 0.092385
| 0.092385
| 0.092385
| 0
| 0
| 0
| 0
| 0.001042
| 0.275472
| 1,325
| 44
| 88
| 30.113636
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.039305
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15625
| false
| 0
| 0
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e1ef7bc29a97c874523d2f21ef24ab69fc641da
| 708
|
py
|
Python
|
cursos_complementarios/estructuras_datos_lineales_python/modulo_II_arrays/utils/cube.py
|
EdinsonRequena/articicial-inteligence-and-data-science
|
953566220e64cbd8f732c2667b818da807bb54c0
|
[
"MIT"
] | 30
|
2020-06-19T16:21:04.000Z
|
2022-02-19T01:48:39.000Z
|
cursos_complementarios/estructuras_datos_lineales_python/modulo_II_arrays/utils/cube.py
|
Samsuesca/articicial-inteligence-and-data-science
|
953566220e64cbd8f732c2667b818da807bb54c0
|
[
"MIT"
] | 87
|
2021-02-12T04:42:13.000Z
|
2021-09-20T04:25:29.000Z
|
cursos_complementarios/estructuras_datos_lineales_python/modulo_II_arrays/utils/cube.py
|
Samsuesca/articicial-inteligence-and-data-science
|
953566220e64cbd8f732c2667b818da807bb54c0
|
[
"MIT"
] | 11
|
2020-08-13T04:04:01.000Z
|
2022-01-20T20:10:43.000Z
|
from .array import Array
from .grid import Grid
class Cube(object):
"""three-dimensional array"""
def __init__(self, nrows, ncols, deep, value=None) -> None:
"""Initializes the Cube with nrows, ncols, deep and optional value"""
self.data = Array(deep)
for i in range(deep):
self.data[i] = Grid(nrows, ncols, value)
def __getdeep__(self) -> int:
"""Return the whole cube"""
return len(self.data)
def __str__(self) -> str:
"""Return the cube as a string"""
result = ""
for array in range(self.__getdeep__()):
result += self.data[array].__str__()
result += "\n"
return str(result)
| 27.230769
| 77
| 0.574859
| 88
| 708
| 4.397727
| 0.431818
| 0.082687
| 0.072351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.292373
| 708
| 25
| 78
| 28.32
| 0.772455
| 0.193503
| 0
| 0
| 0
| 0
| 0.003643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.133333
| 0
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e26c8f3d5348e863a10d16b62007dbfcaa204c5
| 1,126
|
py
|
Python
|
setup.py
|
TimSusa/aptly-api-cli
|
011ba8e7f464726b336b53f6b2cbdc4490b5180c
|
[
"MIT"
] | 17
|
2016-03-15T10:07:27.000Z
|
2022-03-07T17:55:01.000Z
|
setup.py
|
TimSusa/aptly-api-cli
|
011ba8e7f464726b336b53f6b2cbdc4490b5180c
|
[
"MIT"
] | 2
|
2016-03-15T12:50:58.000Z
|
2018-04-17T03:45:17.000Z
|
setup.py
|
TimSusa/aptly-api-cli
|
011ba8e7f464726b336b53f6b2cbdc4490b5180c
|
[
"MIT"
] | 5
|
2017-05-07T20:01:49.000Z
|
2018-06-06T13:43:02.000Z
|
try:
from setuptools import setup, find_packages
from pkg_resources import Requirement, resource_filename
except ImportError:
from distutils.core import setup, find_packages
setup(
name='Aptly-Api-Cli',
version='0.1',
url='https://github.com/TimSusa/aptly_api_cli',
license='MIT',
keywords="aptly aptly-server debian",
author='Tim Susa',
author_email='timsusa@gmx.de',
description='This cli executes remote calls to the Aptly server, without blocking the Aptly database.',
long_description=__doc__,
packages=find_packages(),
package_dir={'aptly_cli': 'aptly_cli'},
# packages=['aptly_cli', 'aptly_cli.api', 'aptly_cli.cli', 'aptly_cli.util'],
# py_modules=['aptly_cli.api.api', 'cli'],
entry_points={
'console_scripts': [
'aptly-cli=aptly_cli.cli.cli:main'
]
},
# data_files=[
# ('configs', ['configs/aptly-cli.conf']),
# ],
# package_data={'configs': ['aptly_cli/configs/aptly-cli.conf']},
platforms='any'
)
filename = resource_filename(Requirement.parse("Aptly-Api-Cli"), "configs/aptly-cli.conf")
| 33.117647
| 107
| 0.667851
| 141
| 1,126
| 5.12766
| 0.48227
| 0.143845
| 0.060858
| 0.06639
| 0.060858
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002167
| 0.180284
| 1,126
| 33
| 108
| 34.121212
| 0.781148
| 0.214032
| 0
| 0
| 0
| 0
| 0.338269
| 0.061503
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e27d12ca0167eeef14eeab8dc9bfe483d5dc2db
| 417
|
py
|
Python
|
2018-04/2018-04-11.py
|
shangpf1/python_study
|
6730519ce7b5cf4612e1c778ae5876cfbb748a4f
|
[
"MIT"
] | null | null | null |
2018-04/2018-04-11.py
|
shangpf1/python_study
|
6730519ce7b5cf4612e1c778ae5876cfbb748a4f
|
[
"MIT"
] | null | null | null |
2018-04/2018-04-11.py
|
shangpf1/python_study
|
6730519ce7b5cf4612e1c778ae5876cfbb748a4f
|
[
"MIT"
] | null | null | null |
class Employee:
def __init__(self,first,last,pay):
self.first = first
self.last = last
self.email = first+last+'@123.com'
self.pay = pay
def fullname(self):
return('{} {}'.format(self.first,self.last))
emp_1 = Employee('hello','world',1900)
emp_2 = Employee('test','world',2000)
print(emp_1)
print(emp_2)
print(emp_1.fullname())
print(emp_2.fullname())
| 18.130435
| 52
| 0.606715
| 58
| 417
| 4.189655
| 0.396552
| 0.131687
| 0.106996
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.22542
| 417
| 22
| 53
| 18.954545
| 0.69969
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0.071429
| 0.214286
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e298d0ad6de43261aab1a6d6e7529e6494b22c8
| 658
|
py
|
Python
|
src/heatmap.py
|
JsPatenaude/INF8808_projet
|
601a7505188f379365a32594b484cee3d924a52a
|
[
"MIT"
] | null | null | null |
src/heatmap.py
|
JsPatenaude/INF8808_projet
|
601a7505188f379365a32594b484cee3d924a52a
|
[
"MIT"
] | null | null | null |
src/heatmap.py
|
JsPatenaude/INF8808_projet
|
601a7505188f379365a32594b484cee3d924a52a
|
[
"MIT"
] | null | null | null |
import plotly.express as px
from preprocess import PreprocessHeatmap
def get_figure(df):
pp = PreprocessHeatmap()
heatmap_df = pp.preprocess_heatmap(df)
hover_template = \
'''
<b style="font-size: 20px;>%{x}, %{y}h00</b>
<br>
<span style="font-size: 16px;>%{z:.0f} likes générés</span>
<extra></extra>
'''
fig = px.imshow(heatmap_df)
fig.update_layout(
xaxis_title='Jour de la semaine',
yaxis_title='Heure de la journée',
yaxis_nticks=24,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
fig.update_traces(hovertemplate=hover_template)
return fig
| 26.32
| 63
| 0.62766
| 90
| 658
| 4.444444
| 0.611111
| 0.03
| 0.03
| 0.065
| 0.075
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0.033399
| 0.226444
| 658
| 25
| 64
| 26.32
| 0.752456
| 0
| 0
| 0
| 0
| 0
| 0.124506
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e2a78cc73dc66dd46aa1290d150d2b082861993
| 13,985
|
py
|
Python
|
client/forumgame.py
|
codingforhelp/fbserv
|
b09cc2ce20eaa3714e80d23e0f5741f144d2eed2
|
[
"MIT"
] | 5
|
2019-01-31T08:09:53.000Z
|
2020-04-13T22:48:25.000Z
|
client/forumgame.py
|
codingforhelp/fbserv
|
b09cc2ce20eaa3714e80d23e0f5741f144d2eed2
|
[
"MIT"
] | 2
|
2021-04-30T21:04:37.000Z
|
2021-06-01T23:42:18.000Z
|
client/forumgame.py
|
codingforhelp/fbserv
|
b09cc2ce20eaa3714e80d23e0f5741f144d2eed2
|
[
"MIT"
] | 3
|
2019-08-04T07:51:58.000Z
|
2022-02-25T13:39:30.000Z
|
from dom import e, Div, TextInput, Button, TextArea
from basicboard import BasicBoard
from connection import getconn
from utils import queryparams, random, setseed
mainseed = 80
class Forumnode(e):
def __init__(self, root, args = {}):
super().__init__("div")
self.root = root
self.move = args["move"]
self.uci = args["uci"]
self.comment = args["comment"]
if not self.comment:
self.comment = ""
self.owner = args["owner"]
self.fen = args["fen"]
self.parent = args["parent"]
self.isadmin = args["isadmin"]
self.halfmoveno = args["halfmoveno"]
if not self.halfmoveno:
self.halfmoveno = -1
self.childs = []
self.build()
def toobj(self):
moveobjs = {}
for child in self.childs:
moveobjs[child.move] = child.toobj()
return {
"uci": self.uci,
"comment": self.comment,
"owner": self.owner,
"fen": self.fen,
"moves": moveobjs
}
def appendchild(self, node):
node.halfmoveno = self.halfmoveno + 1
node.build()
self.childs.append(node)
self.containerdiv.a(node)
if len(self.childs) > 1:
rgb = "rgb({},{},{})".format(int(random()*128 + 127),int(random()*128 + 127),int(random()*128 + 127))
self.containerdiv.bc(rgb).bds("solid").bdw(10).bdr(20).bdc(rgb)
def addnode(self):
input = window.prompt("Move:uci:owner:fen", "")
if input:
self.root.shift()
parts = input.split(":")
self.appendchild(Forumnode(self.root, {
"move": parts[0],
"uci": None,
"comment": "",
"uci": parts[0],
"owner": parts[2],
"fen": parts[2],
"parent": self,
"isadmin": self.isadmin
}))
self.root.parse()
def edituci(self):
input = window.prompt("Uci", "")
if input:
self.uci = input
self.setboard()
self.ucidiv.html(self.uci)
self.root.parse()
def editfen(self):
input = window.prompt("Fen", "")
if input:
self.fen = input
self.setboard()
self.root.parse()
def setmovelabel(self):
if self.halfmoveno < 0:
moveno = ""
elif ( self.halfmoveno % 2 ) == 0:
moveno = ( ( self.halfmoveno + 2 ) / 2 ) + ". "
else:
moveno = ( ( self.halfmoveno + 1 ) / 2 ) + ".. "
self.movelabeldiv.html("{}{}".format(moveno, self.move))
def editsan(self):
input = window.prompt("San", "")
if input:
self.move = input
self.setmovelabel()
self.root.parse()
def editcomment(self):
input = window.prompt("Comment", self.comment)
if input:
self.comment = input
self.commentdiv.html(self.comment)
self.root.parse()
def editowner(self):
input = window.prompt("Owner", "")
if input:
self.owner = input
self.ownerdiv.html(self.owner)
self.root.parse()
def movecallback(self, variantkey, fen, uci):
if self.reqfenunderway:
print("a fen request is in progress, cannot start a new one")
return
self.root.shift()
self.root.reqfenunderway = True
self.root.reqnode = self
getconn().sioreq({
"kind": "forumgamemove",
"owner": "forumgame",
"moveuci": uci,
"variantkey": variantkey,
"fen": fen
})
def bbdragstart(self, ev):
ev.stopPropagation()
def setboard(self):
initobj = {
"fen": self.fen,
"squaresize": 20,
"showfen": False,
"movecallback": self.movecallback,
"variantkey": "atomic"
}
if self.uci:
initobj["positioninfo"] = {
"genmove": {
"uci": self.uci
}
}
b = BasicBoard(initobj)
b.cp().ae("dragstart", self.bbdragstart)
self.boarddiv.x().a(b)
def analyzelocal(self):
try:
self.root.mainboard.variantchanged("atomic", self.fen)
self.root.parenttabpane.selectbykey("board")
except:
pass
def analyzelichess(self):
window.open("https://lichess.org/analysis/atomic/" + self.fen, "_blank")
def delchilds(self):
self.childs = []
self.root.rebuild(mainseed)
def delme(self):
parent = self.parent
if parent:
newchilds = []
for child in parent.childs:
print("child", child.move, child.uci)
if not ( child == self ):
newchilds.append(child)
parent.childs = newchilds
self.root.rebuild(mainseed)
def serializefunc(self):
self.root.rebuild(mainseed + 1)
self.root.store()
def serialize(self):
self.infohook.html("serializing")
setTimeout(self.serializefunc, 100)
def copysrc(self):
self.root.copysrc()
def copylink(self):
ti = TextInput()
self.linktexthook.a(ti)
ti.setText("https://fbserv.herokuapp.com/analysis/atomic/" + self.fen.replace(" ", "%20"))
ti.e.select()
document.execCommand("copy")
self.linktexthook.x()
def build(self):
self.movediv = Div().disp("flex").fd("row").ai("center")
self.movedescdiv = Div().bc("#eee").w(110).maw(110).pad(3)
self.movelabeldiv = Div().fw("bold").pad(3).ff("monospace")
self.setmovelabel()
self.ownerdiv = Div().html(self.owner).ff("monospace").fs("10").c("#007")
self.ucidiv = Div().ff("monospace").fs("12").pad(3)
self.commentdiv = Div().fs("12").pad(5).html(self.comment)
if self.uci:
self.ucidiv.html(self.uci)
self.movedescdiv.a([self.movelabeldiv, self.ownerdiv, self.commentdiv])
self.movedescdiv.a(Button("Analyze local", self.analyzelocal).mar(2))
self.movedescdiv.a(Button("Analyze lichess", self.analyzelichess).mar(2))
self.infohook = Div().ff("monospace").pad(3).c("#007").fw("bold").html("built")
if self.isadmin:
self.movedescdiv.a(self.infohook)
self.linktexthook = Div()
self.movedescdiv.a(self.ucidiv)
self.movedescdiv.a(Button("+", self.addnode).pad(5))
self.movedescdiv.a(Button("san", self.editsan).pad(5))
self.movedescdiv.a(Button("uci", self.edituci).pad(5))
self.movedescdiv.a(Button("fen", self.editfen).pad(5))
self.movedescdiv.a(Button("comment", self.editcomment).pad(5))
self.movedescdiv.a(Button("owner", self.editowner).pad(5))
self.movedescdiv.a(Button("serialize", self.serialize).pad(5).bc("#ffa"))
self.movedescdiv.a(Button("copy", self.copysrc).pad(5).bc("#afa"))
self.movedescdiv.a(self.linktexthook)
self.movedescdiv.a(Button("link", self.copylink).pad(5).bc("#aff"))
self.movedescdiv.a(Button("delchilds", self.delchilds).pad(5).bc("#faa"))
self.movedescdiv.a(Button("delme", self.delme).pad(5).bc("#faa"))
self.boarddiv = Div().pad(2)
self.movecontainerdiv = Div().disp("flex").fd("row").ai("center")
self.movecontainerdiv.a([self.movedescdiv, self.boarddiv])
self.containerdiv = Div().disp("flex").fd("column").ai("flex-start")
self.movediv.a([self.movecontainerdiv, self.containerdiv])
self.setboard()
self.x().a(self.movediv)
self.mw(600)
class Forumgame(e):
def __init__(self):
super().__init__("div")
self.messagediv = Div().disp("inline-block").pad(3).ff("monospace")
self.contentdiv = Div()
self.a([self.messagediv, self.contentdiv])
self.reqfenunderway = False
self.reqnode = None
self.requestforumgame()
self.ae("mousemove", self.mousemove)
self.ae("mouseup", self.mouseup)
self.ae("mouseleave", self.mouseleave)
def copysrc(self):
self.textarea.e.select()
document.execCommand("copy")
window.alert("Copied source to clipboard, {} characters.".format(len(self.textarea.getText())))
def mousemove(self, ev):
if self.dragunderway:
dx = ev.clientX - self.dragstartx
dy = ev.clientY - self.dragstarty
self.parenttabpane.contentdiv.e.scrollTop = self.scrolltop + 20 * dy
self.parenttabpane.contentdiv.e.scrollLeft = self.scrollleft + 20 * dx
def mouseup(self, ev):
self.dragunderway = False
def mouseleave(self, ev):
self.dragunderway = False
def parse(self):
obj = self.rootnode.toobj()
text = JSON.stringify(obj, None, 2)
self.textarea.setText(text)
return text
def store(self):
self.parenttabpane.contentdiv.bc("#faa")
self.messagediv.html("Parsing JSON")
try:
obj = JSON.parse(self.textarea.getText())
self.messagediv.html("Storing JSON")
getconn().sioreq({
"kind": "setforumgame",
"owner": "forumgame",
"forumgame": obj
})
except:
self.messagediv.html("Error: could not parse JSON")
return
def requestforumgame(self):
getconn().sioreq({
"kind": "getforumgame",
"owner": "forumgame"
})
def buildrec(self, parentnode, tree):
__pragma__("jsiter")
if not tree["moves"]:
return
for move in tree["moves"]:
moveobj = tree["moves"][move]
node = Forumnode(self, {
"move": move,
"uci": moveobj["uci"],
"comment": moveobj["comment"],
"owner": moveobj["owner"],
"fen": moveobj["fen"],
"parent": parentnode,
"isadmin": self.isadmin
})
parentnode.appendchild(node)
self.buildrec(node, moveobj)
__pragma__("nojsiter")
def build(self, text, seed):
setseed(seed)
self.contentdiv.x().pad(3)
self.textarea = TextArea().w(1000).h(200)
self.textarea.setText(text)
self.controlpanel = Div()
self.controlpanel.a(Button("Store", self.store))
if self.isadmin:
self.contentdiv.a(self.textarea)
self.contentdiv.a(self.controlpanel)
self.rootnode = Forumnode(self, {
"move": "startpos",
"uci": None,
"owner": "Wolfram_EP",
"comment": "Forum game",
"fen": "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1",
"parent": None,
"isadmin": self.isadmin
})
self.contentdiv.a(self.rootnode)
self.buildrec(self.rootnode, self.forumgame)
#self.rootnode.e.scrollIntoView(True)
self.parenttabpane.setscroll()
self.contentdiv.sa("draggable", True).cm().ae("dragstart", self.dragstart)
def dragstart(self, ev):
ev.preventDefault()
self.dragstartx = ev.clientX
self.dragstarty = ev.clientY
self.scrolltop = self.parenttabpane.contentdiv.e.scrollTop
self.scrollleft = self.parenttabpane.contentdiv.e.scrollLeft
self.dragunderway = True
def rebuild(self, seed):
text = self.parse()
self.forumgame = JSON.parse(text)
self.build(text, seed)
def shift(self):
sl = self.parenttabpane.contentdiv.e.scrollLeft
self.parenttabpane.contentdiv.e.scrollLeft = sl + 300
def siores(self, response):
if response["kind"] == "setforumgame":
self.forumgame = response["forumgame"]
self.messagediv.html("Forumgame loaded")
self.isadmin = response["isadmin"]
if queryparams.get("noadmin", "false") == "true":
self.isadmin = False
self.build(JSON.stringify(self.forumgame, None, 2), mainseed)
self.parenttabpane.contentdiv.bc("#def")
if response["kind"] == "setforumgamedone":
self.messagediv.html("Stored, refreshing")
self.requestforumgame()
if response["kind"] == "setforumgamefen":
posinfo = response["positioninfo"]
fen = response["fen"]
san = posinfo["genmove"]["san"]
uci = posinfo["genmove"]["uci"]
rp = self.reqnode.parent
owner = None
if rp:
owner = rp.owner
if not owner:
owner = window.prompt("Owner", "?")
if not owner:
owner = "?"
self.reqnode.appendchild(Forumnode(self, {
"move": san,
"uci": uci,
"comment": "",
"owner": owner,
"fen": fen,
"parent": self.reqnode,
"isadmin": self.isadmin
}))
self.parse()
| 36.609948
| 125
| 0.512835
| 1,388
| 13,985
| 5.148415
| 0.198847
| 0.02239
| 0.038063
| 0.040022
| 0.133361
| 0.095018
| 0.014134
| 0.014134
| 0
| 0
| 0
| 0.012072
| 0.348445
| 13,985
| 382
| 126
| 36.609948
| 0.772169
| 0.002574
| 0
| 0.209302
| 0
| 0
| 0.096119
| 0.003084
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104651
| false
| 0.002907
| 0.011628
| 0
| 0.136628
| 0.005814
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e2ce6d71349214a1161e5b470a89bc7da49773f
| 6,513
|
py
|
Python
|
tests/mathbot_tests.py
|
RubyMarsden/Crayfish
|
33bbb1248beec2fc40eee59e462711dd8cbc33da
|
[
"MIT"
] | null | null | null |
tests/mathbot_tests.py
|
RubyMarsden/Crayfish
|
33bbb1248beec2fc40eee59e462711dd8cbc33da
|
[
"MIT"
] | 8
|
2021-03-19T06:35:48.000Z
|
2021-03-31T14:23:24.000Z
|
tests/mathbot_tests.py
|
RubyMarsden/Crayfish
|
33bbb1248beec2fc40eee59e462711dd8cbc33da
|
[
"MIT"
] | null | null | null |
import unittest
from models import settings
from models.mathbot import *
from models.settings import U238_DECAY_CONSTANT, U238_DECAY_CONSTANT_ERROR, TH232_DECAY_CONSTANT, \
TH232_DECAY_CONSTANT_ERROR
class MathbotTests(unittest.TestCase):
########################################
### Outlier resistant mean and stdev ###
########################################
def test_outlier_resistant_mean_no_outliers_allowed(self):
test_data = [1, 1, 2, 1, 4, 1, 2, 3, 9, 2]
mean, st_dev = calculate_outlier_resistant_mean_and_st_dev(test_data, 0)
self.assertEqual(np.mean(test_data), mean)
self.assertEqual(np.std(test_data), st_dev)
def test_outlier_resistant_mean_zeros(self):
test_data = [0] * 10
self.assertEqual((0, 0), calculate_outlier_resistant_mean_and_st_dev(test_data, 2))
def test_outlier_resistant_mean_empty_set(self):
self.assertRaises(IndexError, calculate_outlier_resistant_mean_and_st_dev, [], 2)
def test_outlier_resistant_mean_one_higher_outlier(self):
test_data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 40]
mean, st_dev = calculate_outlier_resistant_mean_and_st_dev(test_data, 1)
mean_2, st_dev_2 = calculate_outlier_resistant_mean_and_st_dev(test_data, 2)
self.assertEqual(1, mean)
self.assertEqual(0, st_dev)
self.assertEqual(1, mean_2)
self.assertEqual(0, st_dev_2)
def test_outlier_resistant_mean_one_lower_outlier(self):
test_data = [1, 40, 40, 40, 40, 40, 40, 40, 40, 40]
mean, st_dev = calculate_outlier_resistant_mean_and_st_dev(test_data, 1)
mean_2, st_dev_2 = calculate_outlier_resistant_mean_and_st_dev(test_data, 2)
self.assertEqual(40, mean)
self.assertEqual(0, st_dev)
self.assertEqual(40, mean_2)
self.assertEqual(0, st_dev_2)
def test_outlier_resistant_mean_two_outliers(self):
test_data = [1, 40, 40, 40, 40, 40, 40, 40, 40, 400]
mean, st_dev = calculate_outlier_resistant_mean_and_st_dev(test_data, 1)
mean_2, st_dev_2 = calculate_outlier_resistant_mean_and_st_dev(test_data, 2)
self.assertEqual(np.mean(test_data), mean)
self.assertEqual(np.std(test_data), st_dev)
self.assertEqual(40, mean_2)
self.assertEqual(0, st_dev_2)
#######################
### Relative errors ###
#######################
def test_relative_errors_zero_case(self):
self.assertEqual(0, relative_error(0, 4))
def test_relative_errors_general(self):
self.assertEqual(0.1, relative_error(10, 1))
############################
### Errors in quadrature ###
############################
def test_errors_in_quadrature_single_error(self):
self.assertEqual(1, errors_in_quadrature([1]))
def test_errors_in_quadrature_general(self):
self.assertEqual(13, errors_in_quadrature([5, 12]))
def test_errors_in_quadrature_negative(self):
self.assertEqual(13, errors_in_quadrature([-5, 12]))
def test_errors_in_quadrature_decimals(self):
self.assertEqual(0.2, errors_in_quadrature([0.1, 0.1, 0.1, 0.1]))
########################################
### Interpolate to exponential curve ###
########################################
def test_interpolate_to_exponential(self):
a, b, y_est_rounded, y_est_rounded_uncertainty = interpolate_to_exponential((0, 10), 3, (1, 5), 2, 0.5)
self.assertEqual(10, a)
self.assertAlmostEqual(-0.693147180559945, b, 14)
self.assertAlmostEqual(7.07106781186548, y_est_rounded, 14)
self.assertAlmostEqual(1.76776695296637, y_est_rounded_uncertainty, 14)
def test_interpolate_to_exponential_invalid_points(self):
self.assertRaises(AssertionError, interpolate_to_exponential, (0, 0), 0, (0, 0), 0, 0)
self.assertRaises(AssertionError, interpolate_to_exponential, (0, 10), 0, (1, 5), 0, 2)
######################
### Activity ratio ###
######################
def test_activity_ratio_general(self):
ratio, ratio_uncertainty = activity_ratio(
cps_mass_1=10,
cps_mass_1_uncertainty=1,
decay_constant_1=2,
decay_constant_1_uncertainty=0.2,
cps_mass_2=20,
cps_mass_2_uncertainty=2,
decay_constant_2=5,
decay_constant_2_uncertainty=0.5
)
self.assertEqual(0.2, ratio)
self.assertAlmostEqual(0.04, ratio_uncertainty, 16)
def test_activity_ratio_data_values(self):
# using data from Heidelberg University 05/2020
ratio, ratio_uncertainty = activity_ratio(
cps_mass_1=12.0540007,
cps_mass_1_uncertainty=0.01,
decay_constant_1=U238_DECAY_CONSTANT,
decay_constant_1_uncertainty=U238_DECAY_CONSTANT_ERROR,
cps_mass_2=10,
cps_mass_2_uncertainty=0.01,
decay_constant_2=TH232_DECAY_CONSTANT,
decay_constant_2_uncertainty=TH232_DECAY_CONSTANT_ERROR
)
self.assertAlmostEqual(3.77943781422436, ratio, 14)
self.assertAlmostEqual(0.00531355971346501, ratio_uncertainty, 14)
def test_activity_ratio_invalid_input(self):
self.assertRaises(AssertionError, activity_ratio,
cps_mass_1=-1,
cps_mass_1_uncertainty=0.01,
decay_constant_1=U238_DECAY_CONSTANT,
decay_constant_1_uncertainty=U238_DECAY_CONSTANT_ERROR,
cps_mass_2=10,
cps_mass_2_uncertainty=0.01,
decay_constant_2=TH232_DECAY_CONSTANT,
decay_constant_2_uncertainty=TH232_DECAY_CONSTANT_ERROR
)
#########################
### Age from gradient ###
#########################
def test_age_from_gradient_zero_uncertainty(self):
age, uncertainty = calculate_age_from_values(0.5, 0, 1, 0, 0, 0)
self.assertEqual(-math.log(0.5) / settings.TH230_DECAY_CONSTANT, age)
self.assertEqual(uncertainty, 0)
def test_age_from_gradient_more_realistic(self):
age, uncertainty = calculate_age_from_values(3.02, 0.05, 6.33, 0.16, 0.32, 0.01)
self.assertEqual(-math.log(1 - (3.02 - 0.32)/(6.33 - 0.32)) / settings.TH230_DECAY_CONSTANT, age)
self.assertAlmostEqual(2459.439109, uncertainty, 6)
if __name__ == '__main__':
unittest.main()
| 40.453416
| 111
| 0.634423
| 831
| 6,513
| 4.583634
| 0.139591
| 0.088737
| 0.084012
| 0.023103
| 0.580992
| 0.502232
| 0.47414
| 0.415857
| 0.375689
| 0.36545
| 0
| 0.081621
| 0.223092
| 6,513
| 160
| 112
| 40.70625
| 0.671146
| 0.028558
| 0
| 0.305556
| 0
| 0
| 0.001349
| 0
| 0
| 0
| 0
| 0
| 0.342593
| 1
| 0.175926
| false
| 0
| 0.037037
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e2d4927d418a10f01fca137a00d8c7a207d49a7
| 2,748
|
py
|
Python
|
flask_modular_auth/manager.py
|
fabian-rump/flask_modular_auth
|
509def7b2cb366cba5d0d18187d99932c8ca00ef
|
[
"MIT"
] | null | null | null |
flask_modular_auth/manager.py
|
fabian-rump/flask_modular_auth
|
509def7b2cb366cba5d0d18187d99932c8ca00ef
|
[
"MIT"
] | null | null | null |
flask_modular_auth/manager.py
|
fabian-rump/flask_modular_auth
|
509def7b2cb366cba5d0d18187d99932c8ca00ef
|
[
"MIT"
] | null | null | null |
from .abstract import AbstractAuthProvider, AbstractUnauthenticatedEntity
from .utils import _context_processor
from flask import _request_ctx_stack, has_request_context
class AuthManager:
def __init__(self, app=None, unauthorized_callback=None, unauthenticated_entity_class=None):
self._auth_providers = []
if unauthenticated_entity_class:
self._unauthenticated_entity_class = unauthenticated_entity_class
else:
self._unauthenticated_entity_class = AbstractUnauthenticatedEntity
self._unauthorized_callback = unauthorized_callback
if app is not None:
self.init_app(app)
def init_app(self, app):
app.auth_manager = self
app.context_processor(_context_processor)
def set_unauthenticated_entity_class(self, unauthenticated_entity_class):
self._unauthenticated_entity_class = unauthenticated_entity_class
def unauthorized(self):
if has_request_context() and hasattr(_request_ctx_stack.top, 'unauthorized_callback'):
return _request_ctx_stack.top.unauthorized_callback()
elif self._unauthorized_callback:
return self._unauthorized_callback()
else:
return 'Not authorized', 403
def get_auth_providers(self):
"""
Get a list of all registered authentication providers.
:return:List of authentication providers
"""
return self._auth_providers
def register_auth_provider(self, auth_provider):
"""
Register an authentication provider with the manager.
:param auth_provider: A valid authentication provider (i.e. an instance of a subclass of AbstractAuthenticationProvider)
"""
if auth_provider.__class__ == AbstractAuthProvider:
raise RuntimeError('Tried to add AbstractAuthProvider. Please add an implementing subclass object instead.')
elif not isinstance(auth_provider, AbstractAuthProvider):
raise ValueError('Tried to add an object which is no valid AuthProvider. Object should be instantiated from a subclass of AbstractAuthProvider.')
else:
self._auth_providers.append(auth_provider)
def _load_authenticated_entity(self):
ctx = _request_ctx_stack.top
if not self._auth_providers:
raise RuntimeError('Please register at least one authentication provider to get authenticated entities.')
for auth_provider in self._auth_providers:
entity = auth_provider.get_authenticated_entity()
if entity:
ctx.authenticated_entity = entity
return True
ctx.authenticated_entity = self._unauthenticated_entity_class()
return False
| 42.9375
| 157
| 0.71361
| 294
| 2,748
| 6.346939
| 0.292517
| 0.11254
| 0.139335
| 0.080386
| 0.144695
| 0.144695
| 0.103966
| 0.087889
| 0.087889
| 0.087889
| 0
| 0.00142
| 0.231441
| 2,748
| 63
| 158
| 43.619048
| 0.882102
| 0.098617
| 0
| 0.111111
| 0
| 0.022222
| 0.136288
| 0.026098
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155556
| false
| 0
| 0.066667
| 0
| 0.377778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e30d02b5676aa65a9e86f44cc1848fd4a7d7bb2
| 13,400
|
py
|
Python
|
models/iscnet/modules/relation_model.py
|
blakeyy/Relational-RfDNet
|
72f4e35601e963c91515f40707174c0d79cb5403
|
[
"MIT"
] | 1
|
2022-03-31T13:00:15.000Z
|
2022-03-31T13:00:15.000Z
|
models/iscnet/modules/relation_model.py
|
blakeyy/Relational-RfDNet
|
72f4e35601e963c91515f40707174c0d79cb5403
|
[
"MIT"
] | null | null | null |
models/iscnet/modules/relation_model.py
|
blakeyy/Relational-RfDNet
|
72f4e35601e963c91515f40707174c0d79cb5403
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from net_utils.nn_distance import nn_distance
from net_utils.relation_tool import PositionalEmbedding
from models.registers import MODULES
from models.iscnet.modules.proposal_module import decode_scores
from configs.scannet_config import ScannetConfig #param2obb
@MODULES.register_module
class RelationalProposalModule(nn.Module):
def __init__(self, cfg, optim_spec = None):
'''
Relation-based Proposal Module to enhance detected proposals.
:param config: configuration file.
:param optim_spec: optimizer parameters.
'''
super(RelationalProposalModule, self).__init__()
'''Optimizer parameters used in training'''
self.optim_spec = optim_spec
self.cfg = cfg
'''Parameters'''
self.num_class = cfg.dataset_config.num_class
self.num_heading_bin = cfg.dataset_config.num_heading_bin
self.num_size_cluster = cfg.dataset_config.num_size_cluster
appearance_feature_dim = cfg.config['model']['relation_module']['appearance_feature_dim']
key_feature_dim = cfg.config['model']['relation_module']['key_feature_dim']
geo_feature_dim = cfg.config['model']['relation_module']['geo_feature_dim']
self.isDuplication = cfg.config['model']['relation_module']['isDuplication']
self.Nr = cfg.config['model']['relation_module']['n_relations']
self.dim_g = geo_feature_dim
'''Modules'''
self.gamma = nn.Parameter(torch.ones(1)) # requires_grad is True by default for Parameter
nn.init.constant_(self.gamma, 0.0)
#if self.cfg.config['model']['relation_module']['use_learned_pos_embed']:
# self.pos_embedding = PositionEmbeddingLearned(6, geo_feature_dim)
self.relation = nn.ModuleList()
for N in range(self.Nr):
self.relation.append(RelationUnit(appearance_feature_dim, key_feature_dim=key_feature_dim, geo_feature_dim=geo_feature_dim))
##### Adding concat to f_a
self.feature_transform1 = nn.Sequential(nn.Conv1d(128,128,1), \
nn.BatchNorm1d(128), \
nn.ReLU(), \
nn.Conv1d(128, appearance_feature_dim, 1))
self.feature_transform2 = nn.Sequential(nn.Conv1d(appearance_feature_dim, 128, 1), \
nn.BatchNorm1d(128), \
nn.ReLU(), \
nn.Conv1d(128, 128, 1))
self.proposal_generation = nn.Sequential(nn.Conv1d(128,128,1), \
nn.BatchNorm1d(128), \
nn.ReLU(), \
nn.Conv1d(128,128,1), \
nn.BatchNorm1d(128), \
nn.ReLU(), \
nn.Conv1d(128,5 + self.num_heading_bin*2 + self.num_size_cluster*4 + self.num_class,1))
##### Concatenate concat to f_a
#self.feature_transform2 = nn.Sequential(nn.Conv1d(appearance_feature_dim + self.dim_g*self.Nr, 128, 1), \
# nn.BatchNorm1d(128), \
# nn.ReLU(), \
# nn.Conv1d(128, 128, 1))
#self.proposal_generation = nn.Sequential(nn.Conv1d(128,128,1), \
# nn.BatchNorm1d(128), \
# nn.ReLU(), \
# nn.Conv1d(128,128,1), \
# nn.BatchNorm1d(128), \
# nn.ReLU(), \
# nn.Conv1d(128, 5 + self.num_heading_bin*2 + self.num_size_cluster*4 + self.num_class, 1))
#self.init_weights()
#self.bn_momentum = cfg.config['bnscheduler']['bn_momentum_init']
#self.init_bn_momentum()
#self.relation.apply(init_weights)
#self.feature_transform1.apply(init_weights)
#self.feature_transform2.apply(init_weights)
#self.proposal_generation.apply(init_weights)
def forward(self, proposal_features, end_points, data, mode='train'):
if self.cfg.config['model']['relation_module']['compute_two_losses']:
prefix = 'proposal_'
else:
prefix = ''
center = end_points[f'{prefix}center'] # (B, K, 3)
if not self.cfg.config['model']['relation_module']['use_gt_boxsize'] or mode == 'test':
### Compute predicted box size
config_dict = self.cfg.eval_config
pred_size_class = torch.argmax(end_points[f'{prefix}size_scores'], -1) # B,num_proposal
size_residuals = end_points[f'{prefix}size_residuals_normalized'] * torch.from_numpy(
config_dict['dataset_config'].mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0)
pred_size_residual = torch.gather(size_residuals, 2,
pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 1,
3)) # B,num_proposal,1,3
pred_size_residual.squeeze_(2)
mean_size_arr = torch.from_numpy(config_dict['dataset_config'].mean_size_arr.astype(np.float32)).cuda()
pred_size_class = torch.squeeze(pred_size_class.type(torch.cuda.LongTensor)) ## Problem if batch_size==1 -> change where to squeeze
temp = mean_size_arr[pred_size_class, :]
box_size = temp + pred_size_residual
else:
### Compute GT box size
# choose the cluster for each proposal based on GT class of that proposal. GT class of each proposal is the closest GT box to each predicted proposal
aggregated_vote_xyz = end_points['aggregated_vote_xyz'] #(B,K,3)
gt_center = data['center_label'] #(B,K2,3)
_, ind1, _, _ = nn_distance(aggregated_vote_xyz, gt_center)
object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1
size_class_label = torch.gather(data['size_class_label'], 1, object_assignment) # select (B,K) from (B,K2), object_assignment: (B,K) with values in 0,1,...,K2-1
size_residual_label = torch.gather(data['size_residual_label'], 1, object_assignment.unsqueeze(-1).repeat(1,1,3)) # select (B,K,3) from (B,K2,3)
mean_size_label = torch.from_numpy(self.cfg.dataset_config.mean_size_arr.astype(np.float32)).to('cuda')[size_class_label] # (B,K,3)
box_size = size_residual_label + mean_size_label # (B,K,3)
# get geometric feature and feed it into PositionalEmbedding
geometric_feature = torch.cat([center, box_size], dim=-1) # (B, K, 6)
#if not self.cfg.config['model']['relation_module']['use_learned_pos_embed']:
# position_embedding = PositionalEmbedding(geometric_feature, dim_g=self.dim_g) # (B,K,K, dim_g)
#else:
# position_embedding = self.pos_embedding(geometric_feature) #
#position_embedding = self.feature_transform_pos(proposal_features) #
# position_embedding = position_embedding.transpose(1, 2).contiguous() #
position_embedding = PositionalEmbedding(geometric_feature, dim_g=self.dim_g) # (B,K,K, dim_g)
#transform proposal_features from 128-dim to appearance_feature_dim
proposal_features = self.feature_transform1(proposal_features) #(B,appearance_feature_dim, K)
proposal_features = proposal_features.transpose(1, 2).contiguous() # (B, K, appearance_feature_dim)
# proposal_features: (B,K,appearance_feature_dim)
# positional_embedding: (B,K,K,dim_g)
if(self.isDuplication):
f_a, embedding_f_a, position_embedding = (proposal_features, position_embedding)
else:
f_a, position_embedding = (proposal_features, position_embedding) #input_data # f_a: (B,K,appearance_feature_dim), position_embedding: (B,K,K,dim_g)
isFirst=True
for N in range(self.Nr):
if(isFirst):
if(self.isDuplication):
concat = self.relation[N](embedding_f_a,position_embedding) #(B,K,dim_k)
else:
concat = self.relation[N](f_a,position_embedding)
isFirst=False
else:
if(self.isDuplication):
concat = torch.cat((concat, self.relation[N](embedding_f_a, position_embedding)), -1)
else:
concat = torch.cat((concat, self.relation[N](f_a, position_embedding)), -1)
proposal_features = self.gamma * concat + f_a # proposal_features: (B,K, appearance_feature_dim)
#proposal_features = concat
#proposal_features = f_a + concat
#proposal_features = torch.cat((f_a, concat), -1)
proposal_features = proposal_features.transpose(1,2).contiguous() #(B,appearance_feature_dim, K)
proposal_features = self.feature_transform2(proposal_features) # (B,128,K)
net = self.proposal_generation(proposal_features) # # (B, 2+3+num_heading_bin*2+num_size_cluster*4 + num_class, K)
if self.cfg.config['model']['relation_module']['compute_two_losses']:
prefix = 'last_'
else:
prefix = ''
end_points = decode_scores(net, end_points, self.num_heading_bin, self.num_size_cluster, prefix=prefix)
return end_points, proposal_features
def init_weights(self):
# initialize transformer
#for m in self.relation.parameters():
# if m.dim() > 1:
# nn.init.xavier_uniform_(m)
for m in self.feature_transform1.parameters():
if m.dim() > 1:
nn.init.xavier_uniform_(m)
for m in self.feature_transform2.parameters():
if m.dim() > 1:
nn.init.xavier_uniform_(m)
for m in self.proposal_generation.parameters():
if m.dim() > 1:
nn.init.xavier_uniform_(m)
#for m in self.prediction_heads.parameters():
# if m.dim() > 1:
# nn.init.xavier_uniform_(m)
def init_bn_momentum(self):
for m in self.modules():
if isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.momentum = self.bn_momentum
class RelationUnit(nn.Module):
def __init__(self, appearance_feature_dim=768,key_feature_dim = 96, geo_feature_dim = 96):
super(RelationUnit, self).__init__()
self.dim_g = geo_feature_dim
self.dim_k = key_feature_dim
self.WG = nn.Linear(geo_feature_dim, 1, bias=True)
self.WK = nn.Linear(appearance_feature_dim, key_feature_dim, bias=True)
self.WQ = nn.Linear(appearance_feature_dim, key_feature_dim, bias=True)
self.WV = nn.Linear(appearance_feature_dim, key_feature_dim, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, f_a, position_embedding):#f_a: (B,K,appearance_feature_dim), position_embedding: (B,K,K,dim_g)
B,K,_ = f_a.size()
w_g = self.relu(self.WG(position_embedding)) # (B,K,K,1)
w_k = self.WK(f_a) # (B,K,dim_k)
w_k = w_k.view(B,K,1,self.dim_k)
w_q = self.WQ(f_a) # (B,K,dim_k)
w_q = w_q.view(B,1,K,self.dim_k)
scaled_dot = torch.sum((w_k*w_q),-1 ) # (B,K,K). Note that 1st K is key, 2nd K is query
scaled_dot = scaled_dot / np.sqrt(self.dim_k)
w_g = w_g.view(B,K,K) # Note that 1st K is key, 2nd K is query
w_a = scaled_dot.view(B,K,K)
w_mn = torch.log(torch.clamp(w_g, min = 1e-6)) + w_a # (B,K,K)
w_mn = torch.nn.Softmax(dim=1)(w_mn) # compute softmax along key dimension
w_v = self.WV(f_a) # (B,K,dim_k)
w_mn = w_mn.view(B,K,K,1) # (B,K,K,1)
w_v = w_v.view(B,K,1,-1) # (B,K,1,dim_k)
output = w_mn*w_v # (B,K,K, dim_k)
output = torch.sum(output,1) # (B,K,dim_k)
return output
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, input_channel, num_pos_feats=128):
super().__init__()
self.position_embedding_head = nn.Sequential(
nn.Conv1d(input_channel, num_pos_feats, kernel_size=1),
nn.BatchNorm1d(num_pos_feats),
nn.ReLU(inplace=True),
nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1))
def forward(self, xyz):
xyz = xyz.transpose(1, 2).contiguous()
position_embedding = self.position_embedding_head(xyz)
return position_embedding
#def init_weights(m):
# if type(m) == nn.Linear or type(m) == nn.Conv1d:
# gain = nn.init.calculate_gain('relu')
# nn.init.xavier_uniform_(m.weight, gain=gain)
# m.bias.data.fill_(0.01)
#gain = nn.init.calculate_gain('relu')
#nn.init.xavier_uniform_(m.weight, gain=gain)
#m.bias.data.fill_(0.01)
| 47.51773
| 172
| 0.592313
| 1,702
| 13,400
| 4.39953
| 0.143361
| 0.050748
| 0.048077
| 0.02938
| 0.461939
| 0.409322
| 0.363114
| 0.309028
| 0.279781
| 0.246795
| 0
| 0.025814
| 0.291716
| 13,400
| 282
| 173
| 47.517731
| 0.763144
| 0.278209
| 0
| 0.21519
| 0
| 0
| 0.050111
| 0.005827
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050633
| false
| 0
| 0.056962
| 0
| 0.14557
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e3154ae1d10762e4681a612915a4720d50696c7
| 1,760
|
py
|
Python
|
ipware/descriptor.py
|
phi1010/django-ipware
|
9d4e5f3b17e8669757ea9590e3e02580bd310634
|
[
"MIT"
] | null | null | null |
ipware/descriptor.py
|
phi1010/django-ipware
|
9d4e5f3b17e8669757ea9590e3e02580bd310634
|
[
"MIT"
] | null | null | null |
ipware/descriptor.py
|
phi1010/django-ipware
|
9d4e5f3b17e8669757ea9590e3e02580bd310634
|
[
"MIT"
] | null | null | null |
from enum import Enum, auto
from typing import List, Union, Callable
from ipaddress import IPv4Address, IPv4Network, IPv6Address, IPv6Network, ip_network, ip_address
from warnings import warn
class Order(Enum):
HEADER_APPENDED = auto()
HEADER_PREPENDED = auto()
class Header:
def __init__(self,
name: str,
order:Order=Order.HEADER_APPENDED,
custom_parser: Callable[[str], Union[IPv4Address, IPv6Address]] = None
):
self.custom_parser = custom_parser
self.order = order
# header field names are case insensitive
# https://datatracker.ietf.org/doc/html/rfc7230#section-3.2
# we convert them to uppercase now to avoid different parts of code matching differenly;
# if this breaks anything, the remainder of the code is broken.
self.uppercase_name = name.upper()
class ReverseProxy:
def __init__(self,
header_added: Header,
*ip_addresses: Union[str, IPv4Address, IPv4Network, IPv6Address, IPv6Network],
):
"""
:param ip_addresses: You can use anything that ipaddress.ip_network accepts, e.g. `127.0.0.0/8` or `::1`
:param headers_added: Specify here which header this host adds. We only support one header per reverse proxy
"""
if not ip_addresses:
warn("A reverse proxy configuration without IP addresses will be ignored.")
self.header_added = header_added
ip_networks = []
for address in ip_addresses:
# Addresses will be converted to /32 resp. /128 networks, matching exactly one IP
ip_networks.append(ip_network(address))
self.ip_networks = ip_networks
| 38.26087
| 116
| 0.651136
| 215
| 1,760
| 5.186047
| 0.52093
| 0.049327
| 0.059193
| 0.078924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.275
| 1,760
| 45
| 117
| 39.111111
| 0.851097
| 0.306818
| 0
| 0.142857
| 0
| 0
| 0.056636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e31ecc86ddefaf67265db380dc7eba40617c43e
| 2,333
|
py
|
Python
|
locs/models/anisotropic_filter.py
|
mkofinas/locs
|
4cb0ab9e989ebfee42d1d2850bdf3360336b5c1c
|
[
"MIT"
] | 16
|
2021-11-04T07:57:58.000Z
|
2022-03-01T17:45:32.000Z
|
locs/models/anisotropic_filter.py
|
mkofinas/locs
|
4cb0ab9e989ebfee42d1d2850bdf3360336b5c1c
|
[
"MIT"
] | null | null | null |
locs/models/anisotropic_filter.py
|
mkofinas/locs
|
4cb0ab9e989ebfee42d1d2850bdf3360336b5c1c
|
[
"MIT"
] | null | null | null |
from torch import nn
import torch.nn.functional as F
from locs.models.activations import ACTIVATIONS
class AnisotropicEdgeFilter(nn.Module):
def __init__(self, in_size, pos_size, hidden_size, dummy_size, out_size,
act='elu', **kwargs):
super().__init__()
self.num_relative_features = in_size
self.out_size = out_size
self._act = act
self.edge_filter = nn.Sequential(
nn.Linear(pos_size, hidden_size),
ACTIVATIONS[act](),
nn.Linear(hidden_size, self.num_relative_features * out_size),
)
self.init_weights()
def init_weights(self):
if self._act == 'elu':
gain = nn.init.calculate_gain('relu')
else:
gain = nn.init.calculate_gain(self._act)
nn.init.orthogonal_(self.edge_filter[0].weight, gain=gain)
nn.init.orthogonal_(self.edge_filter[2].weight)
def forward(self, edge_attr, edge_pos):
edge_weight = self.edge_filter(edge_pos)
edge_weight = edge_weight.reshape(
edge_weight.shape[:-1] + tuple([self.num_relative_features, -1]))
edge_attr = (edge_attr.unsqueeze(-2) @ edge_weight).squeeze(-2)
return edge_attr
class MLPEdgeFilter(nn.Module):
"""2-layer MLP, follows same template as AnisotropicEdgeFilter"""
def __init__(self, in_size, pos_size, hidden_size, bottleneck_size,
out_size, do_prob=0.0):
super().__init__()
self.num_relative_features = in_size
self.out_size = out_size
self.hidden_size = bottleneck_size
self.lin1 = nn.Linear(self.num_relative_features, bottleneck_size)
self.drop1 = nn.Dropout(p=do_prob)
self.lin2 = nn.Linear(bottleneck_size, out_size)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.1)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, edge_attr, edge_pos):
edge_attr = F.relu(self.lin1(edge_attr))
edge_attr = self.drop1(edge_attr)
edge_attr = F.relu(self.lin2(edge_attr))
return edge_attr
| 35.348485
| 77
| 0.629233
| 308
| 2,333
| 4.464286
| 0.253247
| 0.069818
| 0.04
| 0.083636
| 0.341818
| 0.286545
| 0.242909
| 0.242909
| 0.194909
| 0.087273
| 0
| 0.011021
| 0.261037
| 2,333
| 65
| 78
| 35.892308
| 0.786543
| 0.025289
| 0
| 0.264151
| 0
| 0
| 0.004409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113208
| false
| 0
| 0.056604
| 0
| 0.245283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e32180523c62ff4dfee0a5445151998ee1a7804
| 1,798
|
py
|
Python
|
src/data_files/sample_data.py
|
gorried/hexgraph
|
b179e2fe0f8afc465ce92eac02f3cc2c4d1ac38e
|
[
"MIT"
] | null | null | null |
src/data_files/sample_data.py
|
gorried/hexgraph
|
b179e2fe0f8afc465ce92eac02f3cc2c4d1ac38e
|
[
"MIT"
] | null | null | null |
src/data_files/sample_data.py
|
gorried/hexgraph
|
b179e2fe0f8afc465ce92eac02f3cc2c4d1ac38e
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
"""
Daniel Gorrie
Large dataset sampler
"""
import random
import os
from os import listdir
from os.path import isfile, join
# Constants
INPUT_FILE = 'train.features'
INPUT_FILE_SIZE = 8352136
OUTPUT_FILE = 'train_small.features'
SAMPLE_SIZE = 110000
INPUT_LABEL_DIR = 'labels/'
OUTPUT_LABEL_DIR = 'labels_small/'
def main():
random.seed()
# Generate array of SAMPLE_SIZE random integers in range [0, INPUT_FILE.length)
# Iterate over the input file grabbing the
indices = dict.fromkeys([random.randint(0, INPUT_FILE_SIZE) for _ in xrange(SAMPLE_SIZE)])
while len(indices) < SAMPLE_SIZE:
indices[random.randint(0, INPUT_FILE_SIZE)] = 0
# Grab the proper training data
with open(OUTPUT_FILE, 'w') as out:
with open(INPUT_FILE, 'r') as f:
line_count = 0
for line in f:
if line_count in indices:
# append the line to the output file
out.write(line)
line_count += 1
# Grab the label files
label_files = [ f for f in listdir(INPUT_LABEL_DIR) if isfile(join(INPUT_LABEL_DIR,f)) ]
# make a new directory
d = os.path.dirname(OUTPUT_LABEL_DIR)
if not os.path.exists(d):
os.makedirs(d)
# put versions of all the label files in the output directory
for label_file in label_files:
with open(INPUT_LABEL_DIR + label_file, 'r') as f:
with open (OUTPUT_LABEL_DIR + label_file, 'w') as out:
line_count = 0
for line in f:
if line_count in indices:
# append the line to the output file
out.write(line)
line_count += 1
if __name__ == '__main__':
main()
| 24.630137
| 94
| 0.613459
| 251
| 1,798
| 4.191235
| 0.342629
| 0.059886
| 0.04943
| 0.036122
| 0.222433
| 0.222433
| 0.171103
| 0.171103
| 0.171103
| 0.171103
| 0
| 0.016813
| 0.305339
| 1,798
| 72
| 95
| 24.972222
| 0.82546
| 0.215795
| 0
| 0.277778
| 0
| 0
| 0.047482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.111111
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e323ee929773b5d99e66e15ebdc6631d0480bf5
| 1,581
|
py
|
Python
|
utils/uniprot.py
|
glycosciences/covid-19-Annotations-on-Structures
|
3337bc5aec0ba79287ab0fd8c4763b15a4783378
|
[
"MIT"
] | 2
|
2020-04-06T18:12:47.000Z
|
2021-08-01T20:17:59.000Z
|
utils/uniprot.py
|
glycosciences/covid-19-Annotations-on-Structures
|
3337bc5aec0ba79287ab0fd8c4763b15a4783378
|
[
"MIT"
] | 20
|
2020-04-02T18:02:14.000Z
|
2020-08-10T12:29:46.000Z
|
utils/uniprot.py
|
glycosciences/covid-19-Annotations-on-Structures
|
3337bc5aec0ba79287ab0fd8c4763b15a4783378
|
[
"MIT"
] | 9
|
2020-04-06T12:39:02.000Z
|
2021-08-01T20:18:00.000Z
|
import re
import urllib.request
"""
Collection of handy functions related to uniprot. Potential reimplementations
of code that would be available in various packages with the goal of keeping
dependencies at a minimum.
"""
def valid_uniprot_ac_pattern(uniprot_ac):
"""
Checks whether Uniprot AC is formally correct according to
https://www.uniprot.org/help/accession_numbers
This is no check whether it actually exists.
:param uniprot_ac: Accession code to be checked
"""
ac_pat = "[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}"
if re.match(ac_pat, uniprot_ac):
return True
else:
return False
def seq_from_ac(uniprot_ac):
"""
Fetches raw sequence string for given uniprot accession code
:param uniprot_ac: Accession code for which you want the sequence
"""
if not valid_uniprot_ac_pattern(uniprot_ac):
raise RuntimeError("Uniprot AC does not look valid")
data = None
try:
# that's the default uniprot access
url = "https://www.uniprot.org/uniprot/%s.fasta" % uniprot_ac
with urllib.request.urlopen(url) as response:
data = response.readlines()
except:
# this is only temporary, as SARS-CoV2 is not yet in uniprot
url = (
"https://www.ebi.ac.uk/uniprot/api/covid-19/uniprotkb/accession/%s.fasta"
% (uniprot_ac)
)
with urllib.request.urlopen(url) as response:
data = response.readlines()
return "".join(line.decode().strip() for line in data[1:])
| 29.830189
| 85
| 0.655281
| 229
| 1,581
| 4.441048
| 0.49345
| 0.106195
| 0.00885
| 0.041298
| 0.255654
| 0.202557
| 0.143559
| 0.143559
| 0.143559
| 0.143559
| 0
| 0.016584
| 0.237192
| 1,581
| 52
| 86
| 30.403846
| 0.8267
| 0.268185
| 0
| 0.166667
| 0
| 0.083333
| 0.227421
| 0.073993
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e3b40be7c29c65a9fd22f72903754a1e504955c
| 5,643
|
py
|
Python
|
structures/solution/bar.py
|
EladSharony/Mechanics
|
078f97bea84114fc1db6fe9700b92b96b18a0d5e
|
[
"MIT"
] | 24
|
2021-02-23T13:53:14.000Z
|
2022-03-29T16:40:56.000Z
|
structures/solution/bar.py
|
EladSharony/Mechanics
|
078f97bea84114fc1db6fe9700b92b96b18a0d5e
|
[
"MIT"
] | 2
|
2021-04-23T12:30:32.000Z
|
2022-03-31T10:51:12.000Z
|
structures/solution/bar.py
|
EladSharony/Mechanics
|
078f97bea84114fc1db6fe9700b92b96b18a0d5e
|
[
"MIT"
] | 12
|
2021-04-11T20:44:03.000Z
|
2022-03-30T19:23:58.000Z
|
from geom2d import Segment, make_vector_between
from structures.model.bar import StrBar
from .node import StrNodeSolution
class StrBarSolution:
"""
A truss structure bar with the solution values included.
This class is a decorator of the original `StrBar` class that's
linked to the solution nodes, that include their displacement
vectors. It's thanks to the solution displaced nodes that we
can obtain the stress and strain values for the bar.
"""
def __init__(
self,
original_bar: StrBar,
start_node: StrNodeSolution,
end_node: StrNodeSolution
):
if original_bar.start_node.id != start_node.id:
raise ValueError('Wrong start node')
if original_bar.end_node.id != end_node.id:
raise ValueError('Wrong end node')
self.__original_bar = original_bar
self.start_node = start_node
self.end_node = end_node
@property
def id(self):
"""
The original bar's identifier.
:return: id for the bar
"""
return self.__original_bar.id
@property
def cross_section(self):
"""
The original bar's cross section area value.
:return: the cross section
"""
return self.__original_bar.cross_section
@property
def young_mod(self):
"""
The original bar's Young modulus (or elasticity modulus).
:return: the Young modulus
"""
return self.__original_bar.young_mod
@property
def original_geometry(self):
"""
The original bar's geometry described by a line segment.
:return: the bar's geometry
"""
return self.__original_bar.geometry
@property
def final_geometry(self):
"""
The bar's geometry, described by a line segment, after the
computed displacements are applied.
:return: the solution bar's geometry
"""
return Segment(
self.start_node.displaced_pos,
self.end_node.displaced_pos
)
@property
def original_length(self):
"""
The original bar's length. This is, the distance between
its nodes.
:return: the bar's length
"""
return self.original_geometry.length
@property
def final_length(self):
"""
The bar's length after the computed displacements are
applied. This is the distance between the solution nodes.
:return: the solution bar's length
"""
return self.final_geometry.length
@property
def elongation(self):
"""
The difference between the solution bar's length and the
original bar's length.
A positive elongation means the bar has elongated (due to
a tensile stress) and a negative elongation means the bar
has shortened (due to a compressive stress).
:return: the bar's elongation
"""
return self.final_length - self.original_length
@property
def strain(self):
"""
The bar's elongation per unit of length. This is a
unit-less quantity.
:return: the bar's strain
"""
return self.elongation / self.original_length
@property
def stress(self):
"""
The bar's axial force per unit of cross section area.
Using Hooke's law, the stress can be computed as the
product of the bar's strain and Young modulus.
:return:
"""
return self.young_mod * self.strain
@property
def internal_force_value(self):
"""
The bar's internal force.
:return: the bar's internal force
"""
return self.stress * self.cross_section
def force_in_node(self, node: StrNodeSolution):
"""
Returns the force this bar exerts on of of its to nodes.
The passed in node needs to be one or the bar's end nodes,
otherwise, this method will throw a `ValueError`.
:param node: one of the bar's end nodes
:return: force exerted by the bar on the given node
"""
if node is self.start_node:
return make_vector_between(
self.end_node.displaced_pos,
self.start_node.displaced_pos
).with_length(
self.internal_force_value
)
elif node is self.end_node:
return make_vector_between(
self.start_node.displaced_pos,
self.end_node.displaced_pos
).with_length(
self.internal_force_value
)
raise ValueError(
f'Bar {self.id} does not know about node {node.id}'
)
def has_node(self, node: StrNodeSolution):
"""
Tests whether the given `node` is one of this bar's end
nodes.
:param node: structure node
:return: is the node connected with this bar?
"""
return node is self.start_node or node is self.end_node
def final_geometry_scaling_displacement(self, scale: float):
"""
Computes the geometry of the bar after the displacements
of its nodes have been applied with a given scale factor.
This scaled geometry can be used for drawing the solution
diagram.
:param scale: used to scale the displacements
:return: the solution bar's final geometry scaled
"""
return Segment(
self.start_node.displaced_pos_scaled(scale),
self.end_node.displaced_pos_scaled(scale)
)
| 28.356784
| 67
| 0.608187
| 703
| 5,643
| 4.753912
| 0.200569
| 0.028725
| 0.027229
| 0.02693
| 0.327947
| 0.145123
| 0.088869
| 0.073609
| 0.052663
| 0.028725
| 0
| 0.000263
| 0.325359
| 5,643
| 198
| 68
| 28.5
| 0.877594
| 0.39837
| 0
| 0.294872
| 0
| 0
| 0.028088
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.038462
| 0
| 0.435897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e3df3a417c99ed4ce96f722ac39d7ce01ef8e82
| 219
|
py
|
Python
|
baekjoon/1436/nth_666.py
|
ucyang/AlgoEx
|
465c88f04b9449c06ee5c9a684ded5aba8ccf399
|
[
"MIT"
] | null | null | null |
baekjoon/1436/nth_666.py
|
ucyang/AlgoEx
|
465c88f04b9449c06ee5c9a684ded5aba8ccf399
|
[
"MIT"
] | null | null | null |
baekjoon/1436/nth_666.py
|
ucyang/AlgoEx
|
465c88f04b9449c06ee5c9a684ded5aba8ccf399
|
[
"MIT"
] | null | null | null |
import sys
input = lambda: sys.stdin.readline().rstrip()
n = int(input())
i = 666
c = 0
while True:
if str(i).find("666") != -1:
c += 1
if c == n:
print(i)
break
i += 1
| 14.6
| 45
| 0.452055
| 33
| 219
| 3
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 0.378995
| 219
| 14
| 46
| 15.642857
| 0.654412
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d62368843928d090cd812f1e7a939bf13155d3f
| 988
|
py
|
Python
|
tests/mock_urllib.py
|
cedricduriau/PackagerBuddy
|
3eda40cd1b72f030e4f02e38af452e6377b20148
|
[
"MIT"
] | 1
|
2019-01-10T11:15:40.000Z
|
2019-01-10T11:15:40.000Z
|
tests/mock_urllib.py
|
cedricduriau/PackagerBuddy
|
3eda40cd1b72f030e4f02e38af452e6377b20148
|
[
"MIT"
] | 6
|
2019-01-06T16:56:22.000Z
|
2019-01-07T01:43:54.000Z
|
tests/mock_urllib.py
|
cedricduriau/PackagerBuddy
|
3eda40cd1b72f030e4f02e38af452e6377b20148
|
[
"MIT"
] | null | null | null |
# stdlib modules
try:
from urllib.response import addinfourl
from urllib.error import HTTPError
from urllib.request import HTTPHandler
from io import StringIO
except ImportError:
from urllib2 import addinfourl, HTTPError, HTTPHandler
from StringIO import StringIO
def mock_response(req):
url = req.get_full_url()
if url.startswith("http://valid"):
resp = addinfourl(StringIO("valid"), "valid", url)
resp.code = 200
resp.msg = "OK"
resp.headers = {"content-disposition": "filename=valid.tar"}
return resp
elif url.startswith("http://filename"):
resp = addinfourl(StringIO("filename"), "filename", url)
resp.code = 200
resp.msg = "OK"
resp.headers = {}
return resp
elif url.startswith("http://invalid"):
raise HTTPError(url, 404, "invalid", "", StringIO())
class MockHTTPHandler(HTTPHandler):
def http_open(self, req):
return mock_response(req)
| 29.058824
| 68
| 0.648785
| 113
| 988
| 5.628319
| 0.40708
| 0.04717
| 0.080189
| 0.044025
| 0.204403
| 0.204403
| 0.106918
| 0.106918
| 0.106918
| 0
| 0
| 0.013263
| 0.236842
| 988
| 33
| 69
| 29.939394
| 0.830239
| 0.01417
| 0
| 0.222222
| 0
| 0
| 0.118313
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.259259
| 0.037037
| 0.481481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d6381be8993257224fb80b97034c3a236987a13
| 2,192
|
py
|
Python
|
slickbird/web/hcollection.py
|
lpenz/slickbird
|
1ad6c615be7edbc0c8c5abd97373058abea3d794
|
[
"Apache-2.0"
] | null | null | null |
slickbird/web/hcollection.py
|
lpenz/slickbird
|
1ad6c615be7edbc0c8c5abd97373058abea3d794
|
[
"Apache-2.0"
] | null | null | null |
slickbird/web/hcollection.py
|
lpenz/slickbird
|
1ad6c615be7edbc0c8c5abd97373058abea3d794
|
[
"Apache-2.0"
] | null | null | null |
'''Slickbird collection handler'''
import logging
import json
from tornado.web import URLSpec
import tornado.web
from slickbird import datparse
import slickbird.orm as orm
import slickbird
from slickbird.web import hbase
def _log():
if not _log.logger:
_log.logger = logging.getLogger(__name__)
return _log.logger
_log.logger = None
# Add handler: ###############################################################
class CollectionAddHandler(hbase.PageHandler):
name = 'collection_add'
@tornado.gen.coroutine
def collectionadd(self, cadder, dat):
for gn, gd in dat['games'].items():
cadder.game_add(gn, gd)
yield tornado.gen.moment
cadder.done()
self.settings['session'].commit()
@tornado.gen.coroutine
def post(self):
name = self.get_argument('name')
directory = self.get_argument('directory')
filename = self.request.files['datfile'][0]['filename']
dat = datparse.parse(
datstr=self.request.files['datfile'][0]['body'].decode('utf-8'))
cadder = slickbird.CollectionAdder(
self.settings['session'], self.settings['home'],
name, directory, filename, dat)
self.redirect(self.reverse_url('game_lst', cadder.name))
tornado.ioloop.IOLoop.current() \
.spawn_callback(self.collectionadd, cadder, dat)
# API: #######################################################################
class CollectionListDataHandler(tornado.web.RequestHandler):
def get(self):
self.write(json.dumps([c.as_dict()
for c in self.settings['session'].query(orm.Collection)]))
# Install: ###################################################################
def install(app):
app.add_handlers('.*', [
URLSpec(r'/collection/add',
CollectionAddHandler,
name='collection_add'),
URLSpec(r'/collection/list',
hbase.genPageHandler('collection_lst'),
name='collection_lst'),
URLSpec(r'/api/collection_lst.json',
CollectionListDataHandler,
name='api_collection_lst'),
])
| 28.842105
| 78
| 0.570255
| 217
| 2,192
| 5.64977
| 0.391705
| 0.029364
| 0.046493
| 0.029364
| 0.039152
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001766
| 0.224909
| 2,192
| 75
| 79
| 29.226667
| 0.719835
| 0.02646
| 0
| 0.039216
| 0
| 0
| 0.110707
| 0.012474
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098039
| false
| 0
| 0.156863
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d664e109ebe34ba1e2952a24047d4157da5bc86
| 715
|
py
|
Python
|
connected_devices.py
|
savlakaran/bluetooth-profile-manager
|
a485560cecd6668241539d7d7fa96756a1a8dc9f
|
[
"MIT"
] | null | null | null |
connected_devices.py
|
savlakaran/bluetooth-profile-manager
|
a485560cecd6668241539d7d7fa96756a1a8dc9f
|
[
"MIT"
] | null | null | null |
connected_devices.py
|
savlakaran/bluetooth-profile-manager
|
a485560cecd6668241539d7d7fa96756a1a8dc9f
|
[
"MIT"
] | null | null | null |
import pydbus
bus = pydbus.SystemBus()
adapter = bus.get('org.bluez', '/org/bluez/hci0')
mngr = bus.get('org.bluez', '/')
def list_connected_devices():
connected = []
mngd_objs = mngr.GetManagedObjects()
for path in mngd_objs:
con_state = mngd_objs[path].get('org.bluez.Device1', {}).get('Connected', False)
if con_state:
addr = mngd_objs[path].get('org.bluez.Device1', {}).get('Address')
name = mngd_objs[path].get('org.bluez.Device1', {}).get('Name')
connected.append({'name': name, 'address': addr})
return connected
if __name__ == '__main__':
connected = list_connected_devices()
for item in connected:
print(item['name'])
| 31.086957
| 88
| 0.625175
| 89
| 715
| 4.808989
| 0.370787
| 0.11215
| 0.128505
| 0.10514
| 0.231308
| 0.231308
| 0.231308
| 0.231308
| 0
| 0
| 0
| 0.00703
| 0.204196
| 715
| 23
| 89
| 31.086957
| 0.745167
| 0
| 0
| 0
| 0
| 0
| 0.178771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.166667
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d66606d079a0a649bc4ef6dda1629c7be67e773
| 5,079
|
py
|
Python
|
etl_base/etl_base/dags/acme/operators/file_operators.py
|
buckylee2019/sqlg-airflow
|
37610a23b99bea8d9fdc8b066a01736ff2ff0c9d
|
[
"Apache-2.0"
] | null | null | null |
etl_base/etl_base/dags/acme/operators/file_operators.py
|
buckylee2019/sqlg-airflow
|
37610a23b99bea8d9fdc8b066a01736ff2ff0c9d
|
[
"Apache-2.0"
] | null | null | null |
etl_base/etl_base/dags/acme/operators/file_operators.py
|
buckylee2019/sqlg-airflow
|
37610a23b99bea8d9fdc8b066a01736ff2ff0c9d
|
[
"Apache-2.0"
] | 1
|
2022-03-10T03:47:35.000Z
|
2022-03-10T03:47:35.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, fnmatch
import logging
from shutil import copyfile
from airflow.contrib.hooks.fs_hook import FSHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from datetime import datetime
# You can also make this format a parameter in the Operator, for example
# if you expect that you work with different intervals than "@daily".
# Then you can introduce time components to have a finer grain for file storage.
DATE_FORMAT = '%Y%m%d'
class FileToPredictableLocationOperator(BaseOperator):
"""
Picks up a file from somewhere and lands this in a predictable location elsewhere
"""
template_fields = ('file_mask',)
@apply_defaults
def __init__(self,
src_conn_id,
dst_conn_id,
file_mask,
*args,
**kwargs):
"""
:param src_conn_id: Hook with a conn id that points to the source directory.
:type src_conn_id: string
:param dst_conn_id: Hook with a conn id that points to the destination directory.
:type dst_conn_id: string
"""
super(FileToPredictableLocationOperator, self).__init__(*args, **kwargs)
self.src_conn_id = src_conn_id
self.dst_conn_id = dst_conn_id
self.file_mask = file_mask
def execute(self, context):
"""
Picks up all files from a source directory and dumps them into a root directory system,
organized by dagid, taskid and execution_date
"""
execution_date = context['execution_date'].strftime(DATE_FORMAT)
src_hook = FSHook(conn_id=self.src_conn_id)
source_dir = src_hook.get_path()
dest_hook = FSHook(conn_id=self.dst_conn_id)
dest_root_dir = dest_hook.get_path()
dag_id = self.dag.dag_id
task_id = self.task_id
logging.info("Now searching for files like {0} in {1}".format(self.file_mask, source_dir))
file_names = fnmatch.filter(os.listdir(source_dir), self.file_mask)
for file_name in file_names:
full_path = os.path.join(source_dir, file_name)
dest_dir = os.path.join(dest_root_dir, dag_id, task_id, execution_date)
logging.info("Now creating path structure {0}".format(dest_dir))
os.makedirs(dest_dir)
dest_file_name = os.path.join(dest_dir, os.path.basename(file_name))
logging.info("Now moving {0} to {1}".format(full_path, dest_file_name))
copyfile(full_path, dest_file_name)
class PredictableLocationToFinalLocationOperator(BaseOperator):
"""
Picks up a file from predictable location storage and loads/transfers the results to
a target system (in this case another directory, but it could be anywhere).
"""
@apply_defaults
def __init__(self,
src_conn_id,
dst_conn_id,
src_task_id,
*args,
**kwargs):
"""
:param src_conn_id: Hook with a conn id that points to the source directory.
:type src_conn_id: string
:param dst_conn_id: Hook with a conn id that points to the destination directory.
:type dst_conn_id: string
:param src_task_id: Source task that produced the file of interest
:type src_task_id: string
"""
super(PredictableLocationToFinalLocationOperator, self).__init__(*args, **kwargs)
self.src_conn_id = src_conn_id
self.dst_conn_id = dst_conn_id
self.src_task_id = src_task_id
def execute(self, context):
"""
Picks up all files from a source directory and dumps them into a root directory system,
organized by dagid, taskid and execution_date
"""
execution_date = context['execution_date'].strftime(DATE_FORMAT)
src_hook = FSHook(conn_id=self.src_conn_id)
dest_hook = FSHook(conn_id=self.dst_conn_id)
dest_dir = dest_hook.get_path()
dag_id = self.dag.dag_id
source_dir = os.path.join(src_hook.get_path(), dag_id, self.src_task_id, execution_date)
if os.path.exists(source_dir):
for file_name in os.listdir(source_dir):
full_path = os.path.join(source_dir, file_name)
dest_file_name = os.path.join(dest_hook.get_path(), file_name)
logging.info("Now moving {0} to final destination {1}".format(full_path, dest_file_name))
copyfile(full_path, dest_file_name)
| 40.632
| 105
| 0.669817
| 713
| 5,079
| 4.535764
| 0.26648
| 0.059369
| 0.033395
| 0.024119
| 0.470006
| 0.46444
| 0.44094
| 0.426098
| 0.406926
| 0.406926
| 0
| 0.003159
| 0.252018
| 5,079
| 124
| 106
| 40.959677
| 0.848118
| 0.351644
| 0
| 0.46875
| 0
| 0
| 0.056333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.109375
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d675985cd1e3fa2d6a896298711a9c21776ae26
| 7,052
|
py
|
Python
|
pyllusion/image/utilities.py
|
RebeccaHirst/Pyllusion
|
9944076e38bced0eabb49c607482b71809150bdb
|
[
"MIT"
] | null | null | null |
pyllusion/image/utilities.py
|
RebeccaHirst/Pyllusion
|
9944076e38bced0eabb49c607482b71809150bdb
|
[
"MIT"
] | null | null | null |
pyllusion/image/utilities.py
|
RebeccaHirst/Pyllusion
|
9944076e38bced0eabb49c607482b71809150bdb
|
[
"MIT"
] | null | null | null |
import numpy as np
import PIL.ImageColor, PIL.ImageFont
from .rescale import rescale
def _rgb(x):
"""Convert 0-1 values to RGB 0-255 values.
"""
return rescale(x, to=[0, 255], scale=[0, 1])
def _color(color="black", alpha=1, mode="RGB"):
"""Sanitize color to RGB(A) format.
"""
if isinstance(color, str):
if color == "transparent":
return (0, 0, 0, 0)
color = PIL.ImageColor.getrgb(color)
elif isinstance(color, (int, np.integer)):
color = tuple([color] * 3)
elif isinstance(color, (list, np.ndarray)):
color = tuple(color)
# Add transparency
if mode == "RGBA":
if len(color) == 3:
color = color + tuple([np.int(_rgb(alpha))])
return color
def _coord_circle(image, diameter=0.1, x=0, y=0, unit="grid", method="pil"):
"""Get circle coordinates
Examples
--------
>>> import pyllusion as ill
>>> import PIL.Image, PIL.ImageDraw
>>>
>>> image = PIL.Image.new('RGB', (500, 400), color = "white")
>>> draw = PIL.ImageDraw.Draw(image, 'RGBA')
>>>
>>> coord = _coord_circle(image, diameter=1, x=0, y=0)
>>> draw.ellipse(coord, fill="red", width=0)
>>> draw.ellipse(_coord_circle(image, diameter=1.5, x=0, y=0), outline="blue")
>>> image #doctest: +ELLIPSIS
<PIL.Image.Image ...>
"""
if unit == "grid":
# Get coordinates in pixels
width, height = image.size
x = np.int(rescale(x, to=[0, width], scale=[-1, 1]))
if method == "pil":
y = np.int(rescale(-y, to=[0, height], scale=[-1, 1]))
elif method == "psychopy":
y = np.int(rescale(y, to=[0, height], scale=[-1, 1]))
# Convert diameter based on height
diameter = np.int(rescale(diameter, to=[0, height], scale=[0, 2]))
diameter = 2 if diameter < 2 else diameter
radius = diameter / 2
# Choose diameter and centre
coord = [(x - radius, y - radius), (x + radius, y + radius)]
if method == "pil":
return coord
elif method == "psychopy":
return radius, x, y
def _coord_text(
image, text="hello", size="auto", x=0, y=0, font="arial.ttf", unit="grid",
method="pil"
):
"""Get text coordinates
Examples
--------
>>> import pyllusion as ill
>>> import PIL.Image, PIL.ImageDraw
>>>
>>> image = PIL.Image.new('RGB', (500, 500), color = "white")
>>> draw = PIL.ImageDraw.Draw(image, 'RGB')
>>>
>>> coord, font = _coord_text(image, size="auto", x=-0.5, y=0.5) #doctest: +SKIP
>>> draw.text(coord, text="hello", fill="black", font=font) #doctest: +SKIP
>>> image #doctest: +SKIP
"""
if unit == "grid":
# Get coordinates in pixels
width, height = image.size
x = np.int(rescale(x, to=[0, width], scale=[-1, 1]))
if method == "pil":
y = np.int(rescale(-y, to=[0, height], scale=[-1, 1]))
elif method == "psychopy":
y = np.int(rescale(y, to=[0, height], scale=[-1, 1]))
if size == "auto":
# Initialize values
size, top_left_x, top_left_y, right_x, bottom_y = 0, width, height, 0, 0
# Loop until max size is reached
while (
top_left_x > 0.01 * width
and right_x < 0.99 * width
and top_left_y > 0.01 * height
and bottom_y < 0.99 * height
):
loaded_font = PIL.ImageFont.truetype(font, size)
text_width, text_height = loaded_font.getsize(text)
top_left_x = x - (text_width / 2)
top_left_y = y - (text_height / 2)
right_x = top_left_x + text_width
bottom_y = top_left_y + text_height
size += 1 # Increment text size
else:
loaded_font = PIL.ImageFont.truetype(font, size)
text_width, text_height = loaded_font.getsize(text)
top_left_x = x - (text_width / 2)
top_left_y = y - (text_height / 2)
coord = top_left_x, top_left_y
return coord, loaded_font, x, y
def _coord_line(
image=None,
x=0,
y=0,
x1=None,
y1=None,
x2=None,
y2=None,
length=None,
angle=None,
adjust_width=False,
adjust_height=False,
method="pil",
):
"""
"""
# Center to None if x1 entered
x = None if x1 is not None else x
y = None if y1 is not None else y
# Get missing parameters
if x is None and y is None:
if x2 is None and y2 is None:
x2, y2 = _coord_line_x2y2(x1, y1, length, angle)
if length is None and angle is None:
length, angle = _coord_line_lengthangle(x1, y1, x2, y2)
else:
if x2 is None and y2 is None:
x2, y2 = _coord_line_x2y2(x, y, length / 2, angle)
if length is None and angle is None:
length, angle = _coord_line_lengthangle(x, y, x2, y2)
length = length * 2
x1, y1 = _coord_line_x2y2(x2, y2, length, 180 + angle)
# Get coordinates in pixels
if image is not None:
width, height = image.size
if adjust_width is True:
x1, x2 = x1 * (height / width), x2 * (height / width)
if adjust_height is True:
y1, y2 = y1 * (width / height), y2 * (width / height)
x1 = np.int(rescale(x1, to=[0, width], scale=[-1, 1]))
x2 = np.int(rescale(x2, to=[0, width], scale=[-1, 1]))
if method == "pil":
y1 = np.int(rescale(-y1, to=[0, height], scale=[-1, 1]))
y2 = np.int(rescale(-y2, to=[0, height], scale=[-1, 1]))
elif method == "psychopy":
y1 = np.int(rescale(y1, to=[0, height], scale=[-1, 1]))
y2 = np.int(rescale(y2, to=[0, height], scale=[-1, 1]))
length = np.int(rescale(length, to=[0, height], scale=[0, 2]))
return (x1, y1, x2, y2), length, angle
def _coord_line_x2y2(x1=None, y1=None, length=None, angle=None):
x2 = x1 + np.sin(np.deg2rad(angle)) * length
y2 = y1 + np.cos(np.deg2rad(angle)) * length
return x2, y2
def _coord_line_lengthangle(x1=None, y1=None, x2=None, y2=None):
length = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
angle = np.rad2deg(np.arccos(np.abs(x1 - x2) / length))
return length, angle
def _coord_rectangle(image=None, x=0, y=0, size_width=1, size_height=1, method="pil"):
"""
"""
x1 = x - (size_width / 2)
y1 = y + (size_height / 2)
x2 = x + (size_width / 2)
y2 = y - (size_height / 2)
# Get coordinates in pixels
if image is not None:
width, height = image.size
x1 = np.int(rescale(x1, to=[0, width], scale=[-1, 1]))
x2 = np.int(rescale(x2, to=[0, width], scale=[-1, 1]))
if method == "pil":
y1 = np.int(rescale(-y1, to=[0, height], scale=[-1, 1]))
y2 = np.int(rescale(-y2, to=[0, height], scale=[-1, 1]))
elif method == "psychopy":
y1 = np.int(rescale(y1, to=[0, height], scale=[-1, 1]))
y2 = np.int(rescale(y2, to=[0, height], scale=[-1, 1]))
return (x1, y1, x2, y2)
| 32.648148
| 86
| 0.548497
| 1,021
| 7,052
| 3.697356
| 0.132223
| 0.016689
| 0.063576
| 0.051921
| 0.500397
| 0.457219
| 0.433377
| 0.414834
| 0.414834
| 0.39894
| 0
| 0.04974
| 0.29013
| 7,052
| 215
| 87
| 32.8
| 0.704355
| 0.177964
| 0
| 0.392593
| 0
| 0
| 0.022667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059259
| false
| 0
| 0.022222
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d67bc8055c64e00d851f4955360ca97f28db935
| 6,971
|
py
|
Python
|
pyfluka/pyfluka_merge.py
|
morgenst/pyfluka
|
6dd3aa8cc29cfce0b2f084fb6b08bdebd2233298
|
[
"MIT"
] | null | null | null |
pyfluka/pyfluka_merge.py
|
morgenst/pyfluka
|
6dd3aa8cc29cfce0b2f084fb6b08bdebd2233298
|
[
"MIT"
] | null | null | null |
pyfluka/pyfluka_merge.py
|
morgenst/pyfluka
|
6dd3aa8cc29cfce0b2f084fb6b08bdebd2233298
|
[
"MIT"
] | null | null | null |
import sys
import argparse
import fnmatch
import os
import re
import shutil
import glob
import logging
import multiprocessing
from copy_reg import pickle
from types import MethodType
_logger = logging.getLogger('default')
_logger.addHandler(logging.StreamHandler())
_logger.setLevel(logging.CRITICAL)
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
class InputParser:
def __init__(self, path):
self.path = path
self.parsedInfo = {'resnuc': [], 'usrbin': []}
def _get_bins(self):
r = re.compile("([0-9]{2})$")
self.bins = set([int(re.search(r, fN).group(1)) for fN in glob.glob(self.path + '/*fort*')])
def _drop_bin(self, bin):
try:
self.bins.remove(bin)
return True
except:
return False
def __parse_scoring_cards(self):
re_resnuc = re.compile("^RESNUC")
re_usrbin = re.compile("^(USRBIN)\s+\d+.?\d?\s+\w+.*")
try:
input_file = glob.glob(self.path + '/*.inp')[0]
except IndexError:
_logger.critical("Unable to locate .inp file required for parsing scoring card information. Either provide "
"it in the input directory or specify card and bins explicitly.")
sys.exit(1)
for line in open(input_file).readlines():
if len(self.bins) == 0:
return
if re.match(re_resnuc, line):
index = abs(int(line.split()[2].rstrip('.')))
add_bin = self._drop_bin(index)
if add_bin:
self.parsedInfo['resnuc'].append(index)
elif re.match(re_usrbin, line):
index = abs(int(line.split()[3].rstrip('.')))
add_bin = self._drop_bin(index)
if add_bin:
self.parsedInfo['usrbin'].append(index)
def parse(self):
self._get_bins()
self.__parse_scoring_cards()
return self.parsedInfo
class Merger(object):
def __init__(self, path, out_path):
self.curdir = os.getcwd()
self.path = path
self.bins = []
self.filelist = []
self.cycle = []
self.parse_dir()
self.mergingCodeLookup = {'resnuc': 'usrsuw',
'usrbin': 'usbsuw'}
self.out_path = out_path
self.__class__.check_fluka_loaded()
self.check_out_path()
@staticmethod
def check_fluka_loaded():
try:
os.environ['FLUPRO']
except KeyError:
_logger.critical('FLUPRO environment not setup. Please export FLUPRO pointing to your FLUKA \
installation directory.')
sys.exit(1)
def check_out_path(self):
if self.out_path is not None:
self.out_path = os.path.abspath(self.out_path)
if not os.path.exists(self.out_path):
os.makedirs(self.out_path)
def parse_dir(self):
for file_name in os.listdir(self.path):
if fnmatch.fnmatch(file_name, '*???_fort.??*'):
self.geom = file_name[:-11]
c = int(file_name[-11:-8])
b = int(file_name[-2:])
self.filelist.append(file_name)
if b not in self.bins:
self.bins.append(b)
if c not in self.cycle:
self.cycle.append(c)
def merge(self, cards):
pickle(MethodType, _pickle_method, _unpickle_method)
jobs = [(k,v) for k, values in cards.items() for v in values]
pool = multiprocessing.Pool(processes=min(len(jobs), multiprocessing.cpu_count()))
pool.map(self._merge_impl, jobs)
def _merge_impl(self, *args):
card = args[0][0]
b = args[0][1]
_logger.debug("Merge " + card + " for bin " + str(b))
os.chdir(self.path)
list_name = 'list_' + str(b) + '_' + card
os.system('ls -1 *_fort.'+str(b)+'* > ' + list_name)
os.system('echo "" >> ' + list_name)
os.system('echo "' + self.geom + '_' + card + '_'+str(b)+'" >> ' + list_name)
os.system('%s/flutil/%s < %s ' % (os.environ['FLUPRO'], self.mergingCodeLookup[card], list_name))
if self.out_path is not None:
self.move(card, b)
if card == 'usrbin':
self.convert_to_ascii(card, b)
def move(self, card, index):
for fName in glob.glob(r'%s/%s_%s_%s*' % (self.path,
self.geom,
card,
index)):
shutil.move(fName, os.path.join(self.out_path, fName.split('/')[-1]))
def convert_to_ascii(self, card, bin):
os.chdir(self.out_path)
tmp_file_name = 'asciiconversion_%s_%i.txt' % (card, bin)
for file_name in glob.glob(r'%s/%s_%s_%s*' % (self.out_path,
self.geom,
card,
bin)):
if file_name.endswith('.ascii'):
continue
file_name = os.path.split(file_name)[1]
tmp_file = open(os.path.join(self.curdir, tmp_file_name), 'w+')
print >> tmp_file, file_name
print >> tmp_file, file_name + '.ascii'
tmp_file.close()
os.system('%s/flutil/usbrea < %s > /dev/null' % (os.environ['FLUPRO'], os.path.join(self.curdir, tmp_file_name)))
os.remove(os.path.join(self.curdir, tmp_file_name))
os.chdir(self.curdir)
def main(argv):
parser = argparse.ArgumentParser(description='Script for merging fluka bin data')
parser.add_argument('path', help='input path')
parser.add_argument('--card', '-c', required=False, default=None, help='card')
parser.add_argument('--bins', '-b', required=False, default=None, type=int, nargs='+', help='bins')
parser.add_argument('--output', '-o', default=None, help='output directory')
parser.add_argument('--debug', '-d', action='store_true', default=False, help='Switch on debug messages')
args = parser.parse_args()
if args.debug:
_logger.setLevel(logging.DEBUG)
path = os.path.abspath(args.path)
if not args.card and not args.bins:
parser = InputParser(path)
scoring_cards = parser.parse()
else:
scoring_cards = {args.card : args.bins}
merger = Merger(path, args.output)
merger.merge(scoring_cards)
if __name__ == '__main__':
main(sys.argv[1:])
| 36.307292
| 125
| 0.551427
| 853
| 6,971
| 4.310668
| 0.237984
| 0.034811
| 0.029916
| 0.01523
| 0.134621
| 0.117215
| 0.078053
| 0.078053
| 0.05548
| 0.027196
| 0
| 0.005038
| 0.316597
| 6,971
| 191
| 126
| 36.497382
| 0.766793
| 0
| 0
| 0.13253
| 0
| 0
| 0.090374
| 0.007603
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096386
| false
| 0.006024
| 0.066265
| 0
| 0.210843
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d6864036da06d6197930101a35bf7b6e92aebea
| 1,325
|
py
|
Python
|
calculation.py
|
n-a-iliev/NBA-PER-Calculator
|
590c617cc8c47009224a33f60fc4cba75f4b26bd
|
[
"MIT"
] | null | null | null |
calculation.py
|
n-a-iliev/NBA-PER-Calculator
|
590c617cc8c47009224a33f60fc4cba75f4b26bd
|
[
"MIT"
] | null | null | null |
calculation.py
|
n-a-iliev/NBA-PER-Calculator
|
590c617cc8c47009224a33f60fc4cba75f4b26bd
|
[
"MIT"
] | null | null | null |
from balldontlie import balldontlie, player, stats
from matplotlib import pyplot as plt
'''This function gets more information about the player by inputting
their name and dataset to search'''
def getplayer(firstname, lastname, datalist):
for players in datalist:
for info in players.data:
if info['first_name'] == firstname and info['last_name'] == lastname:
return player(info['first_name'], info['last_name'], info['id'])
def main():
totalpages = range(1, 34)
kobeyears = range(1996,2016)
kobestatlist = []
kobeperlist = []
datalist = []
for page in totalpages:
datalist.append(balldontlie('https://www.balldontlie.io/api/v1/players?page=' + str(page)))
kobe = getplayer('Kobe', 'Bryant', datalist)
for year in kobeyears:
kobestatlist.append(kobe.getstats(kobe,year))
for stat in kobestatlist:
kobeperlist.append(stat.calculate_PER(stat))
plt.plot(kobeyears, kobeperlist, label= "Kobe Bryant's Player Efficiency Rating",
color='yellow')
plt.xlabel('Season')
plt.xticks(kobeyears)
plt.ylabel('Player Efficiency Rating')
plt.title('Change in PER Over Time')
ax = plt.gca()
ax.set_facecolor('purple')
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 33.974359
| 99
| 0.659623
| 163
| 1,325
| 5.276074
| 0.527607
| 0.051163
| 0.030233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011572
| 0.217358
| 1,325
| 38
| 100
| 34.868421
| 0.817743
| 0
| 0
| 0
| 0
| 0
| 0.170632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d6b7d2817a9a11d4f368ca09bd16da81be04b5f
| 1,496
|
py
|
Python
|
rides/forms.py
|
andrenbrandao/pirauber
|
d7c5647ec6df698fa3d7397907ff629c74cc76b9
|
[
"MIT"
] | null | null | null |
rides/forms.py
|
andrenbrandao/pirauber
|
d7c5647ec6df698fa3d7397907ff629c74cc76b9
|
[
"MIT"
] | 6
|
2020-06-05T23:27:38.000Z
|
2022-02-10T08:14:16.000Z
|
rides/forms.py
|
andrenbrandao/pirauber
|
d7c5647ec6df698fa3d7397907ff629c74cc76b9
|
[
"MIT"
] | null | null | null |
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.utils.translation import ugettext_lazy as _
from .models import Ride
class RideForm(forms.ModelForm):
date = forms.DateField(
label=_('Date'),
widget=forms.DateInput(format=('%Y-%m-%d'),attrs={
'class': 'form-control input-group-alternative',
'type': 'date'
})
)
time = forms.TimeField(
label=_('Time'),
required=False,
input_formats=['%H:%M'],
widget=forms.TimeInput(format=('%H:%M'), attrs={
'class': 'form-control input-group-alternative',
'type': 'time'
})
)
description = forms.CharField(
label=_('Description'),
required=False,
help_text=_('Write here any additional information.'),
widget=forms.Textarea(attrs={
'class': 'form-control input-group-alternative',
})
)
class Meta:
model = Ride
fields = ('date', 'time', 'origin', 'destination', 'seats', 'price', 'description')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.add_input(
Submit('submit', _('Save Ride'), css_class='btn-block'))
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'input-group-alternative'
| 31.166667
| 91
| 0.592914
| 158
| 1,496
| 5.468354
| 0.481013
| 0.046296
| 0.097222
| 0.072917
| 0.155093
| 0.155093
| 0.155093
| 0.106481
| 0
| 0
| 0
| 0
| 0.25869
| 1,496
| 47
| 92
| 31.829787
| 0.77908
| 0
| 0
| 0.195122
| 0
| 0
| 0.21123
| 0.061497
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.121951
| 0
| 0.268293
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d6dfe9a0fb4cf150a1dbedc9b781a51974ddeed
| 843
|
py
|
Python
|
tests/testdata/models.py
|
dtpryce/MLServer
|
02744b3c770141b0b1d9dad2a0256d243051de61
|
[
"Apache-2.0"
] | null | null | null |
tests/testdata/models.py
|
dtpryce/MLServer
|
02744b3c770141b0b1d9dad2a0256d243051de61
|
[
"Apache-2.0"
] | null | null | null |
tests/testdata/models.py
|
dtpryce/MLServer
|
02744b3c770141b0b1d9dad2a0256d243051de61
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from mlserver import MLModel
from mlserver.codecs import NumpyCodec
from mlserver.types import InferenceRequest, InferenceResponse
class SumModel(MLModel):
async def predict(self, payload: InferenceRequest) -> InferenceResponse:
decoded = self.decode(payload.inputs[0])
total = decoded.sum(axis=1, keepdims=True)
output = NumpyCodec().encode(name="total", payload=total)
return InferenceResponse(id=payload.id, model_name=self.name, outputs=[output])
class SlowModel(MLModel):
async def load(self) -> bool:
await asyncio.sleep(10)
self.ready = True
return self.ready
async def infer(self, payload: InferenceRequest) -> InferenceResponse:
await asyncio.sleep(10)
return InferenceResponse(id=payload.id, model_name=self.name, outputs=[])
| 31.222222
| 87
| 0.71293
| 97
| 843
| 6.175258
| 0.43299
| 0.0601
| 0.050083
| 0.146912
| 0.193656
| 0.193656
| 0.193656
| 0.193656
| 0.193656
| 0.193656
| 0
| 0.008759
| 0.187426
| 843
| 26
| 88
| 32.423077
| 0.865693
| 0
| 0
| 0.111111
| 0
| 0
| 0.005931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d6f477bb8496ccbe8298b0d502cfaf9b42c5d1c
| 10,459
|
py
|
Python
|
PERFORMER.py
|
ShivamRajSharma/Transformer-Architecure_From_Scratch
|
f7f24cb5146c09e6cf38a41e5e5ef721389803c1
|
[
"MIT"
] | 17
|
2020-09-13T07:53:41.000Z
|
2022-03-17T09:58:23.000Z
|
PERFORMER.py
|
ShivamRajSharma/Transformer-Architecure_From_Scratch
|
f7f24cb5146c09e6cf38a41e5e5ef721389803c1
|
[
"MIT"
] | null | null | null |
PERFORMER.py
|
ShivamRajSharma/Transformer-Architecure_From_Scratch
|
f7f24cb5146c09e6cf38a41e5e5ef721389803c1
|
[
"MIT"
] | 3
|
2020-12-15T14:20:47.000Z
|
2022-01-24T02:26:04.000Z
|
from time import time
import torch
import torch.nn as nn
class FastAttention(nn.Module):
def __init__(self, input_shape, head, n_features):
super(FastAttention, self).__init__()
self.head = head
self.input_shape = input_shape
self.depth = int(input_shape // head)
self.n_features = n_features
self.key_ORF = self.OrthogonalRandomFeature()
self.query_ORF = self.OrthogonalRandomFeature()
self.query = nn.Linear(self.depth, self.depth)
self.key = nn.Linear(self.depth, self.depth)
self.value = nn.Linear(self.depth, self.depth)
self.fc = nn.Linear(self.depth*head, input_shape)
def kernel_function(self, x, flag):
ORF = self.query_ORF if flag == 'query' else self.key_ORF
normalization_factor = 1/ORF.shape[-1]**0.25
x *= normalization_factor
out = torch.einsum('nhsd, fd -> nhsf', x, ORF)
kernel_fn = nn.ReLU()(out) + 1e-3
return kernel_fn
def OrthogonalRandomFeature(self):
n = self.n_features//self.depth
remainder = self.n_features%self.depth
orthogonal_features = []
for _ in range(n):
normal_feature = torch.rand(self.depth, self.depth)
orthogonal_feature, _ = torch.qr(normal_feature)
orthogonal_features.append(orthogonal_feature)
if remainder > 0 :
normal_feature = torch.rand(self.depth, self.depth)
orthogonal_feature, _ = torch.qr(normal_feature)
orthogonal_features.append(orthogonal_feature[0: remainder])
orthogonal_features = torch.cat(orthogonal_features)
mutilplier = torch.randn(self.n_features, self.depth).norm(dim=1)
final_features = torch.matmul(torch.diag(mutilplier), orthogonal_features)
return final_features
def causal_attention(self, q, k, v):
denominator = 1/torch.einsum('nhqf, nhkf -> nhqf', q, k.cumsum(dim=-2))
x = torch.einsum('nhkf, nhkd -> nhkfd', k, v)
x = x.cumsum(dim=-3)
out = torch.einsum('nhqfd, nhqf, nhqf -> nhqd', x, q, denominator)
return out
def bidirectional_attention(self, q, k, v):
kt_i = torch.einsum('nhkf -> nhf', k)
normalization_factor = 1/(torch.einsum('nhqf, nhf -> nhq', q, kt_i))
k_v = torch.einsum('nhkf, nhkd -> nhfd', k, v)
attention = torch.einsum('nhfd, nhqf, nhq-> nhqd', k_v, q, normalization_factor)
return attention
def forward(self, query, key, value, mask=None, casual_mask=False):
batch = query.shape[0]
query_len, key_len, value_len = query.shape[1], key.shape[1], value.shape[1]
query = query.reshape(batch, query_len, self.head, self.depth)
key = key.reshape(batch, key_len, self.head, self.depth)
value = value.reshape(batch, value_len, self.head, self.depth)
query = query.permute(0, 2, 1, 3)
key = key.permute(0, 2, 1, 3)
value = value.permute(0, 2, 1, 3)
query = self.query(query)
key = self.key(key)
value = self.value(value)
if mask is not None:
key.masked_fill(mask == 0, float("-1e20"))
query = self.kernel_function(query, 'query')
key = self.kernel_function(key, 'key')
if casual_mask:
out = self.causal_attention(query, key, value)
else:
out = self.bidirectional_attention(query, key, value)
out = out.permute(0, 2, 1, 3)
out = out.reshape(batch, query_len, self.head*self.depth)
out = self.fc(out)
return out
class PerformerBlock(nn.Module):
def __init__(self, input_shape, head, n_features, dropout, forward_expansion):
super(PerformerBlock, self).__init__()
self.attention = FastAttention(input_shape, head, n_features)
self.feed_forward = nn.Sequential(
nn.Linear(input_shape, input_shape*forward_expansion),
nn.GELU(),
nn.Linear(input_shape*forward_expansion, input_shape)
)
self.layernorm1 = nn.LayerNorm(input_shape)
self.layernorm2 = nn.LayerNorm(input_shape)
self.dropout = nn.Dropout(dropout)
def forward(self, query, key, value, mask):
attention = self.attention(query, key, value, mask)
add = attention + query
regulazation = self.dropout(self.layernorm1(add))
forward = self.feed_forward(regulazation)
out = self.dropout(self.layernorm2(forward + regulazation))
return out
class Encoder(nn.Module):
def __init__(
self,
vocab_size,
embedding_out,
num_layers,
heads,
n_features,
forward_expansion,
dropout,
max_len
):
super(Encoder, self).__init__()
self.word_embedding = nn.Embedding(vocab_size, embedding_out)
self.postional_embedding = nn.Parameter(torch.zeros(1, max_len, embedding_out))
self.dropout = nn.Dropout(dropout)
self.layers = nn.Sequential(
*[
PerformerBlock(
embedding_out,
heads,
n_features,
dropout,
forward_expansion
)
for _ in range(num_layers)
]
)
def forward(self, x, mask):
word_embedding = self.word_embedding(x)
postional_embedding = self.postional_embedding[:, :x.shape[1], :]
out = self.dropout(word_embedding + postional_embedding)
for layer in self.layers:
out = layer(out, out, out, mask)
return out
class DecoderBlock(nn.Module):
def __init__(
self,
embedding_out,
head,
n_features,
forward_expansion,
dropout
):
super(DecoderBlock, self).__init__()
self.attention = FastAttention(embedding_out, head, n_features)
self.Performer_block = PerformerBlock(
embedding_out,
head,
n_features,
dropout,
forward_expansion
)
self.dropout = nn.Dropout(dropout)
self.norm = nn.LayerNorm(embedding_out)
def forward(self, query, key, value, src_mask):
attention = self.attention(query, query, query, src_mask, True)
query = self.dropout(self.norm(attention + query))
out = self.Performer_block(query, key, value, src_mask)
return out
class Decoder(nn.Module):
def __init__(
self,
vocab_size,
embedding_out,
num_layers,
head,
n_features,
forward_expansion,
dropout,
max_len
):
super(Decoder, self).__init__()
self.word_embedding = nn.Embedding(vocab_size, embedding_out)
self.positional_embedding = nn.Parameter(torch.zeros(1, max_len, embedding_out))
self.layers = nn.Sequential(
*[
DecoderBlock(
embedding_out,
head,
n_features,
forward_expansion,
dropout
)
for _ in range(num_layers)
]
)
self.fc = nn.Linear(embedding_out, vocab_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, encoder_output, src_mask):
x = self.dropout(self.word_embedding(x) + self.positional_embedding[:, :x.shape[1], :])
for layer in self.layers:
x = layer(
x,
encoder_output,
encoder_output,
src_mask
)
out = self.fc(x)
return out
class Performers(nn.Module):
def __init__(
self,
input_vocab_size,
output_vocab_size,
pad_idx,
embedding_out,
num_layers,
forward_expansion,
head,
n_features,
dropout,
max_len
):
super(Performers, self).__init__()
self.encoder = Encoder(
input_vocab_size,
embedding_out,
num_layers,
head,
n_features,
forward_expansion,
dropout,
max_len
)
self.decoder = Decoder(
output_vocab_size,
embedding_out,
num_layers,
head,
n_features,
forward_expansion,
dropout,
max_len
)
self.pad_idx = pad_idx
self.apply(self._init_weights)
#From @HuggingFace
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def input_pad_mask(self, inputs):
pad_mask = (inputs != self.pad_idx).unsqueeze(1).unsqueeze(3)
return pad_mask
def output_pad_mask(self, targets):
pad_mask = (targets != self.pad_idx).unsqueeze(1).unsqueeze(3)
def forward(self, inputs, target):
input_pad_mask = self.input_pad_mask(inputs)
output_pad_mask = self.output_pad_mask(targets)
encoder_output = self.encoder(inputs, input_pad_mask)
decoder_out = self.decoder(target, encoder_output, output_pad_mask)
return decoder_out
if __name__ == "__main__":
#Depends on the Tokenizer
input_vocab_size = 100
output_vocab_size = 200
#DEFAULT PerFORMERS PARAMETERS:-
pad_idx = 0
embedding_out = 512
num_layers = 6
forward_expansion = 4
head = 8
n_features = 256
dropout = 0.1
max_len = 512
inputs = torch.randint(0, 100, (32, 200))
targets = torch.randint(0, 100, (32,100))
model = Performers(
input_vocab_size,
output_vocab_size,
pad_idx,
embedding_out,
num_layers,
forward_expansion,
head,
n_features,
dropout,
max_len
)
start = time()
y = model(inputs, targets)
print(f'INFERENCE TIME = {time() - start}sec')
x = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'NUMBER OF PARAMETERS ARE = {x}')
| 30.852507
| 95
| 0.581222
| 1,207
| 10,459
| 4.809445
| 0.153273
| 0.031008
| 0.026873
| 0.015504
| 0.418432
| 0.292334
| 0.264255
| 0.230146
| 0.178467
| 0.178467
| 0
| 0.013485
| 0.319342
| 10,459
| 339
| 96
| 30.852507
| 0.801938
| 0.006884
| 0
| 0.394265
| 0
| 0
| 0.022918
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0681
| false
| 0
| 0.010753
| 0
| 0.139785
| 0.007168
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d6fa2ce7adb3f0d8fb6ff64a2befb7535e72eca
| 28,970
|
py
|
Python
|
nogo/gtp_connection.py
|
douglasrebstock/alpha-zero-general
|
2237522be5a1bbfebbc2fc1b2a8e8a6bcb6d5aab
|
[
"MIT"
] | null | null | null |
nogo/gtp_connection.py
|
douglasrebstock/alpha-zero-general
|
2237522be5a1bbfebbc2fc1b2a8e8a6bcb6d5aab
|
[
"MIT"
] | null | null | null |
nogo/gtp_connection.py
|
douglasrebstock/alpha-zero-general
|
2237522be5a1bbfebbc2fc1b2a8e8a6bcb6d5aab
|
[
"MIT"
] | null | null | null |
"""
gtp_connection.py
Module for playing games of Go using GoTextProtocol
Parts of this code were originally based on the gtp module
in the Deep-Go project by Isaac Henrion and Amos Storkey
at the University of Edinburgh.
"""
import signal, os
import traceback
from sys import stdin, stdout, stderr
from board_util import GoBoardUtil, BLACK, WHITE, EMPTY, BORDER, PASS, \
MAXSIZE, coord_to_point
import numpy as np
import re
import time
import random
class GtpConnection():
def __init__(self, go_engine, board, debug_mode = False):
"""
Manage a GTP connection for a Go-playing engine
Parameters
----------
go_engine:
a program that can reply to a set of GTP commandsbelow
board:
Represents the current board state.
"""
self.totalTime = 0
self.count = 0
self.nodeExp = 0
self.timeLimit = 1
self.to_play = BLACK
#H table is a dictionary that stores (state,value) pairs
#value = Black win -> 1, White win -1
self.H_table = {}
self._winner = ''
self._optimal_move = ''
self._debug_mode = debug_mode
self.go_engine = go_engine
self.board = board
self.commands = {
"protocol_version": self.protocol_version_cmd,
"quit": self.quit_cmd,
"name": self.name_cmd,
"boardsize": self.boardsize_cmd,
"showboard": self.showboard_cmd,
"clear_board": self.clear_board_cmd,
"komi": self.komi_cmd,
"version": self.version_cmd,
"known_command": self.known_command_cmd,
"genmove": self.genmove_cmd,
"list_commands": self.list_commands_cmd,
"play": self.play_cmd,
"legal_moves": self.legal_moves_cmd,
"gogui-rules_game_id": self.gogui_rules_game_id_cmd,
"gogui-rules_board_size": self.gogui_rules_board_size_cmd,
"gogui-rules_legal_moves": self.gogui_rules_legal_moves_cmd,
"gogui-rules_side_to_move": self.gogui_rules_side_to_move_cmd,
"gogui-rules_board": self.gogui_rules_board_cmd,
"gogui-rules_final_result": self.gogui_rules_final_result_cmd,
"gogui-analyze_commands": self.gogui_analyze_cmd,
"timelimit": self.timelimit_cmd,
"solve":self.solve_cmd
}
# used for argument checking
# values: (required number of arguments,
# error message on argnum failure)
self.argmap = {
"boardsize": (1, 'Usage: boardsize INT'),
"komi": (1, 'Usage: komi FLOAT'),
"known_command": (1, 'Usage: known_command CMD_NAME'),
"genmove": (1, 'Usage: genmove {w,b}'),
"play": (2, 'Usage: play {b,w} MOVE'),
"legal_moves": (1, 'Usage: legal_moves {w,b}'),
"timelimit": (1, 'Usage: timelimit INT, 1 <= INT <= 100'),
}
def write(self, data):
stdout.write(data)
def flush(self):
stdout.flush()
def start_connection(self):
"""
Start a GTP connection.
This function continuously monitors standard input for commands.
"""
line = stdin.readline()
while line:
self.get_cmd(line)
line = stdin.readline()
def get_cmd(self, command):
"""
Parse command string and execute it
"""
if len(command.strip(' \r\t')) == 0:
return
if command[0] == '#':
return
# Strip leading numbers from regression tests
if command[0].isdigit():
command = re.sub("^\d+", "", command).lstrip()
elements = command.split()
if not elements:
return
command_name = elements[0]; args = elements[1:]
if self.has_arg_error(command_name, len(args)):
return
if command_name in self.commands:
try:
self.commands[command_name](args)
except Exception as e:
self.debug_msg("Error executing command {}\n".format(str(e)))
self.debug_msg("Stack Trace:\n{}\n".
format(traceback.format_exc()))
raise e
else:
self.debug_msg("Unknown command: {}\n".format(command_name))
self.error('Unknown command')
stdout.flush()
def has_arg_error(self, cmd, argnum):
"""
Verify the number of arguments of cmd.
argnum is the number of parsed arguments
"""
if cmd in self.argmap and self.argmap[cmd][0] != argnum:
self.error(self.argmap[cmd][1])
return True
return False
def debug_msg(self, msg):
""" Write msg to the debug stream """
if self._debug_mode:
stderr.write(msg)
stderr.flush()
def error(self, error_msg):
""" Send error msg to stdout """
stdout.write('? {}\n\n'.format(error_msg))
stdout.flush()
def respond(self, response=''):
""" Send response to stdout """
stdout.write('= {}\n\n'.format(response))
stdout.flush()
def reset(self, size):
"""
Reset the board to empty board of given size
"""
self.board.reset(size)
def board2d(self):
return str(GoBoardUtil.get_twoD_board(self.board))
def protocol_version_cmd(self, args):
""" Return the GTP protocol version being used (always 2) """
self.respond('2')
def quit_cmd(self, args):
""" Quit game and exit the GTP interface """
self.respond()
exit()
def name_cmd(self, args):
""" Return the name of the Go engine """
self.respond(self.go_engine.name)
def version_cmd(self, args):
""" Return the version of the Go engine """
self.respond(self.go_engine.version)
def clear_board_cmd(self, args):
""" clear the board """
self.reset(self.board.size)
self.respond()
def boardsize_cmd(self, args):
"""
Reset the game with new boardsize args[0]
"""
self.reset(int(args[0]))
self.respond()
#newly added
def timelimit_cmd(self, args):
"""
Reset the game with new timelimit args[0]
"""
self.timeLimit = int(args[0])
self.respond()
def showboard_cmd(self, args):
self.respond('\n' + self.board2d())
def komi_cmd(self, args):
"""
Set the engine's komi to args[0]
"""
self.go_engine.komi = float(args[0])
self.respond()
def known_command_cmd(self, args):
"""
Check if command args[0] is known to the GTP interface
"""
if args[0] in self.commands:
self.respond("true")
else:
self.respond("false")
def list_commands_cmd(self, args):
""" list all supported GTP commands """
self.respond(' '.join(list(self.commands.keys())))
def legal_moves_cmd(self, args):
"""
List legal moves for color args[0] in {'b','w'}
"""
board_color = args[0].lower()
color = color_to_int(board_color)
moves = GoBoardUtil.generate_legal_moves(self.board, color)
gtp_moves = []
for move in moves:
coords = point_to_coord(move, self.board.size)
gtp_moves.append(format_point(coords))
sorted_moves = ' '.join(sorted(gtp_moves))
self.respond(sorted_moves)
def play_cmd(self, args):
"""
play a move args[1] for given color args[0] in {'b','w'}
"""
try:
board_color = args[0].lower()
board_move = args[1]
if board_color != "b" and board_color !="w":
self.respond("illegal move: \"{}\" wrong color".format(board_color))
return
color = color_to_int(board_color)
#change turn to the other player
self.to_play = GoBoardUtil.opponent(color)
if args[1].lower() == 'pass':
self.respond("illegal move: \"{} {}\" wrong coordinate".format(args[0], args[1]))
return
coord = move_to_coord(args[1], self.board.size)
if coord:
move = coord_to_point(coord[0],coord[1], self.board.size)
else:
self.error("Error executing move {} converted from {}"
.format(move, args[1]))
return
if not self.board.play_move(move, color):
self.respond("illegal move: \"{} {}\" ".format(args[0], board_move))
return
else:
self.debug_msg("Move: {}\nBoard:\n{}\n".
format(board_move, self.board2d()))
self.respond()
except Exception as e:
self.respond('illegal move: \"{} {}\" {}'.format(args[0], args[1], str(e)))
def solve_helper(self):
winner = 'unknown'
#the copy of board can be viewed as a state
cp_board = self.board.copy()
start = time.time()
signal.signal(signal.SIGALRM, handler)
signal.alarm(self.timeLimit)
try:
value,move = self.advanced_search(cp_board,81,-1,1)
except Exception as e:
value,move = 0,None
#print("nodeExp",self.nodeExp)
#print("count",self.count)
signal.alarm(0)
end = time.time()
print("time: ",end - start)
#print("partial time: ",self.totalTime)
if value == 1:
winner = 'b'
elif value == -1:
winner = 'w'
if (winner == 'b' and self.to_play !=BLACK) or (winner == 'w' and self.to_play !=WHITE):
move = None
return winner,move
#newly added
def solve_cmd(self,args):
moveStr = ''
winner,move = self.solve_helper()
if move:
moveStr = ' '+ coord_to_move(move,self.board.size)
self.respond(winner+moveStr)
#alpha beta pruning, referencing from wikipedia: https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning
#color is the player. black is max player, white is min player
def ab_search(self, color, copy_of_board, depth, alpha, beta):
_alpha = alpha
_beta = beta
bestMove = None
#base case, no more legal move
#print(GoBoardUtil.generate_legal_moves(copy_of_board, color))
if depth == 0 or (GoBoardUtil.generate_legal_moves(copy_of_board, color) == []):
#depth should always be >0
#since NOGO cannot capture nor suiside, if last move is by WHITE/BLACK, it must be a BLACK/WHITE win.
if color == WHITE:
return 1,None
#color == BLACK
else:
return -1,None
#color is black; max player
if color == BLACK:
value = -1000000
#make a copy of current state
allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, color)
#print("allmoves:")
#print(allmoves)
for move in allmoves:
child = copy_of_board.copy()
child.play_move(move, color)
childValue,_ = self.ab_search(WHITE,child,depth-1,_alpha,_beta)
value = max(value,childValue)
_alpha = max(_alpha,value)
bestMove = move
#beta cut-off
if _alpha >= _beta:
break
return value,bestMove
#color is white; min player
else:
value = 1000000
allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, color)
#print("allmoves:")
#print(allmoves)
for move in allmoves:
child = copy_of_board.copy()
child.play_move(move, color)
childValue,_ = self.ab_search(BLACK,child,depth-1,_alpha,_beta)
value = min(value,childValue)
_beta = min(_beta,value)
bestMove = move
#alpha cut-off
if _alpha >= _beta:
break
return value,bestMove
def advanced_search(self,copy_of_board,depth,alpha,beta):
_alpha = alpha
_beta = beta
bestMove = None
self.nodeExp += 1
#base case, depth 0
if depth == 0:
return 0,None
#Start = time.time()
allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, copy_of_board.current_player)
#End =time.time()
#self.totalTime += End-Start
#base case, no more legal move
if allmoves == []:
#since NOGO cannot capture nor suiside, if last move is by WHITE/BLACK, it must be a BLACK/WHITE win.
if copy_of_board.current_player == WHITE:
self.H_table[self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))] = 1
return 1,None
#color == BLACK
else:
self.H_table[self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))] = -1
return -1,None
searchedMoves = []
unsearchedMoves = []
unsearched = {}
searchedValue = {}
isoSet = set()
singleMoveIsoSet = set()
for move in allmoves:
singleMoveIsoSet.clear()
child = copy_of_board.copy()
child.play_move(move, copy_of_board.current_player)
#get all isomorphics of the board, in order to prunning as many as redundent states possible
isomorphics = self.get_all_isomorphic(GoBoardUtil.get_twoD_board(child),child.size)
found = False
for iso in isomorphics:
if self.tuple_to_str(iso) in self.H_table:
found = True
searchedMoves.append(move)
searchedValue[move] = self.H_table[self.tuple_to_str(iso)]
break
if iso in isoSet:
found = True
break
else:
isoSet.add(iso)
singleMoveIsoSet.add(iso)
if not found:
'''
the following is the heuristic I created for ordering the moves:
(1) eye-filling is the last thing we want to do;
(2) the few the number of player's stones with MD 1, the better;
(3) the more the number of opponent's stones with MD 1, the better;
(4) the more the number of player's stones with MD 2, the better;
'''
num_same = 49
dis1 = [move+1,move-1,move+child.size+1,move-child.size-1]
dis2 = [move+2,move-2,move+2*(child.size+1),move-2*(child.size+1),move+child.size+2,move-child.size-2,move+child.size,move-child.size]
valid1 = []
for point in dis1:
x = point%(child.size+1)
y = point//(child.size+1)
if 1<=x<=child.size and 1<=y<=child.size:
valid1.append(point)
valid2 = []
for point in dis2:
x = point%(child.size+1)
y = point//(child.size+1)
if 1<=x<=child.size and 1<=y<=child.size:
valid2.append(point)
if copy_of_board.is_eye(move,copy_of_board.current_player):
num_same += 1000
for point in valid1:
if child.get_color(point)==copy_of_board.current_player:
num_same += 100
if child.get_color(point)== BLACK+WHITE-copy_of_board.current_player:
num_same -= 10
for point in valid2:
if child.get_color(point)==copy_of_board.current_player:
num_same -= 1
unsearched[move] = num_same
#print("dic:",unsearched)
#print("searched:",searchedMoves)
#sorting unsearched moves by the heuristic value
sorted_x = sorted(unsearched.items(), key=lambda kv: kv[1])
for item in sorted_x:
unsearchedMoves.append(item[0])
orderedMoves = searchedMoves + unsearchedMoves
self.count += len(allmoves) - len(orderedMoves)
state = self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))
#below is normal alpha-beta search
#color is black; max player
if copy_of_board.current_player == BLACK:
value = -1000000
#make a copy of current state
for move in orderedMoves:
if move in searchedMoves:
childValue = searchedValue[move]
else:
child = copy_of_board.copy()
child.play_move(move, copy_of_board.current_player)
childValue,_ = self.advanced_search(child,depth-1,_alpha,_beta)
#childValue,_ = self.advanced_search(copy_of_board,depth-1,_alpha,_beta)
value = max(value,childValue)
_alpha = max(_alpha,value)
bestMove = move
#beta cut-off
if _alpha >= _beta:
break
self.H_table[state] = value
return value,bestMove
#color is white; min player
else:
value = 1000000
for move in orderedMoves:
if move in searchedMoves:
childValue = searchedValue[move]
else:
child = copy_of_board.copy()
child.play_move(move, copy_of_board.current_player)
#childValue,_ = self.advanced_search(copy_of_board,depth-1,_alpha,_beta)
childValue,_ = self.advanced_search(child,depth-1,_alpha,_beta)
value = min(value,childValue)
_beta = min(_beta,value)
bestMove = move
#alpha cut-off
if _alpha >= _beta:
break
self.H_table[state] = value
return value,bestMove
def get_all_isomorphic(self, board_2d,size):
"""
input: matrix of a board
output: a set of tuples
"""
isomorphics = set()
#original
#print("mat to tuple:")
#print(self.matrix_to_tuple(board_2d,size))
isomorphics.add(self.matrix_to_tuple(board_2d,size))
#return isomorphics
tmp_board = []
#reflectional sym, 2 cases
#swap rows
cp_board_2dx = board_2d.copy()
for i in range(size//2):
tmp = cp_board_2dx[i,:].copy()
cp_board_2dx[i,:] = cp_board_2dx[size-1-i,:]
cp_board_2dx[size-1-i,:]=tmp
isomorphics.add(self.matrix_to_tuple(cp_board_2dx,size))
#swap columns
cp_board_2dy = board_2d.copy()
for j in range(size//2):
for i in range(size):
tmp = cp_board_2dy[i,j]
cp_board_2dy[i,j] = cp_board_2dy[i,size-1-j]
cp_board_2dy[i,size-1-j] = tmp
isomorphics.add(self.matrix_to_tuple(cp_board_2dy,size))
#rotational sym, 3 cases
board_90 = np.rot90(board_2d)
#board_90 = self.rotateMatrix(board_2d,size)
isomorphics.add(self.matrix_to_tuple(board_90,size))
#reflectional sym of 90 degree, 2 cases
#swap rows
cp_board_90x = board_90.copy()
for i in range(size//2):
tmp = cp_board_90x[i,:].copy()
cp_board_90x[i,:] = cp_board_90x[size-1-i,:]
cp_board_90x[size-1-i,:] = tmp
isomorphics.add(self.matrix_to_tuple(cp_board_90x,size))
#swap columns
cp_board_90y = board_90.copy()
for j in range(size//2):
for i in range(size):
tmp = cp_board_90y[i,j]
cp_board_90y[i,j] = cp_board_90y[i,size-1-j]
cp_board_90y[i,size-1-j] = tmp
isomorphics.add(self.matrix_to_tuple(cp_board_90y,size))
#print("90",board_90)
board_180 = np.rot90(board_90)
#print("180",board_180)
isomorphics.add(self.matrix_to_tuple(board_180,size))
board_270 = np.rot90(board_180)
#print("270",board_270)
isomorphics.add(self.matrix_to_tuple(board_270,size))
#board_180 = self.rotateMatrix(board_90,size)
#isomorphics.add(self.matrix_to_tuple(board_180,size))
#board_270 = self.rotateMatrix(board_180,size)
#isomorphics.add(self.matrix_to_tuple(board_270,size))
return isomorphics
def matrix_to_tuple(self,matrix,dim):
board1d = np.zeros((dim* dim), dtype = np.int32)
for i in range(dim):
board1d[i*dim:i*dim+dim] = matrix[i,:]
return tuple(board1d)
def get_oneD_board(self,goboard):
"""
Return: numpy array
a 1-d numpy array with the stones as the goboard.
Does not pad with BORDER
Rows 1..size of goboard are copied into rows 0..size - 1 of board2d
"""
size = goboard.size
board1d = np.zeros((size* size), dtype = np.int32)
for row in range(size):
start = goboard.row_start(row + 1)
board1d[row*size:row*size+size] = goboard.board[start : start + size]
return board1d
def tuple_to_str(self,tup):
res = ''
for i in tup:
res += str(int(i))
return res
#genemove overrided
def genmove_cmd(self, args):
"""
Generate a move for the color args[0] in {'b', 'w'}, for the game of gomoku.
"""
board_color = args[0].lower()
color = color_to_int(board_color)
self.to_play = color
winnerStr,optMove = self.solve_helper()
winner = EMPTY
if winnerStr=='b':
winner = BLACK
elif winnerStr =='w':
winner = WHITE
#if current player is winner, we will take bestmove; otherwise we should take a random move
if board_color == winner:
move = optMove
else:
move = GoBoardUtil.generate_random_move(self.board, color,False)
move_coord = point_to_coord(move, self.board.size)
move_as_string = format_point(move_coord)
if self.board.is_legal(move, color):
self.board.play_move(move, color)
self.respond(move_as_string)
else:
self.respond("resign")
def gogui_rules_game_id_cmd(self, args):
self.respond("NoGo")
def gogui_rules_board_size_cmd(self, args):
self.respond(str(self.board.size))
def legal_moves_cmd(self, args):
"""
List legal moves for color args[0] in {'b','w'}
"""
board_color = args[0].lower()
color = color_to_int(board_color)
moves = GoBoardUtil.generate_legal_moves(self.board, color)
gtp_moves = []
for move in moves:
coords = point_to_coord(move, self.board.size)
gtp_moves.append(format_point(coords))
sorted_moves = ' '.join(sorted(gtp_moves))
self.respond(sorted_moves)
def gogui_rules_legal_moves_cmd(self, args):
empties = self.board.get_empty_points()
color = self.board.current_player
legal_moves = []
for move in empties:
if self.board.is_legal(move, color):
legal_moves.append(move)
gtp_moves = []
for move in legal_moves:
coords = point_to_coord(move, self.board.size)
gtp_moves.append(format_point(coords))
sorted_moves = ' '.join(sorted(gtp_moves))
self.respond(sorted_moves)
def gogui_rules_side_to_move_cmd(self, args):
color = "black" if self.board.current_player == BLACK else "white"
self.respond(color)
def gogui_rules_board_cmd(self, args):
size = self.board.size
str = ''
for row in range(size-1, -1, -1):
start = self.board.row_start(row + 1)
for i in range(size):
point = self.board.board[start + i]
if point == BLACK:
str += 'X'
elif point == WHITE:
str += 'O'
elif point == EMPTY:
str += '.'
else:
assert False
str += '\n'
self.respond(str)
def gogui_rules_final_result_cmd(self, args):
empties = self.board.get_empty_points()
color = self.board.current_player
legal_moves = []
for move in empties:
if self.board.is_legal(move, color):
legal_moves.append(move)
if not legal_moves:
result = "black" if self.board.current_player == WHITE else "white"
else:
result = "unknown"
self.respond(result)
def gogui_analyze_cmd(self, args):
self.respond("pstring/Legal Moves For ToPlay/gogui-rules_legal_moves\n"
"pstring/Side to Play/gogui-rules_side_to_move\n"
"pstring/Final Result/gogui-rules_final_result\n"
"pstring/Board Size/gogui-rules_board_size\n"
"pstring/Rules GameID/gogui-rules_game_id\n"
"pstring/Show Board/gogui-rules_board\n"
)
def point_to_coord(point, boardsize):
"""
Transform point given as board array index
to (row, col) coordinate representation.
Special case: PASS is not transformed
"""
if point == PASS:
return PASS
else:
NS = boardsize + 1
return divmod(point, NS)
def format_point(move):
"""
Return move coordinates as a string such as 'a1', or 'pass'.
"""
column_letters = "ABCDEFGHJKLMNOPQRSTUVWXYZ"
#column_letters = "abcdefghjklmnopqrstuvwxyz"
if move == PASS:
return "pass"
row, col = move
if not 0 <= row < MAXSIZE or not 0 <= col < MAXSIZE:
raise ValueError
return column_letters[col - 1]+ str(row)
def move_to_coord(point_str, board_size):
"""
Convert a string point_str representing a point, as specified by GTP,
to a pair of coordinates (row, col) in range 1 .. board_size.
Raises ValueError if point_str is invalid
"""
if not 2 <= board_size <= MAXSIZE:
raise ValueError("board_size out of range")
s = point_str.lower()
if s == "pass":
return PASS
try:
col_c = s[0]
if (not "a" <= col_c <= "z") or col_c == "i":
raise ValueError
col = ord(col_c) - ord("a")
if col_c < "i":
col += 1
row = int(s[1:])
if row < 1:
raise ValueError
except (IndexError, ValueError):
# e.g. "a0"
raise ValueError("wrong coordinate")
if not (col <= board_size and row <= board_size):
# e.g. "a20"
raise ValueError("wrong coordinate")
return row, col
def coord_to_move(move, board_size):
"""
Convert a string point_str representing a point, as specified by GTP,
to a pair of coordinates (row, col) in range 1 .. board_size.
Raises ValueError if point_str is invalid
"""
if not 2 <= board_size <= MAXSIZE:
raise ValueError("board_size out of range")
#s = point_str.lower()
x = move%(board_size+1)
y = move//(board_size+1)
col = chr(x-1 + ord("a"))
#col = col.upper()
return col+str(y)
def color_to_int(c):
"""convert character to the appropriate integer code"""
color_to_int = {"b": BLACK , "w": WHITE, "e": EMPTY,
"BORDER": BORDER}
return color_to_int[c]
def handler(signum, frame):
print('Signal handler called with signal', signum)
raise Exception("Timeout!")
| 34.736211
| 150
| 0.542975
| 3,482
| 28,970
| 4.345204
| 0.1278
| 0.013483
| 0.023265
| 0.01573
| 0.429941
| 0.379577
| 0.346861
| 0.320291
| 0.295704
| 0.271844
| 0
| 0.017755
| 0.354539
| 28,970
| 833
| 151
| 34.777911
| 0.791379
| 0.15875
| 0
| 0.334586
| 0
| 0
| 0.055794
| 0.013659
| 0
| 0
| 0
| 0
| 0.00188
| 1
| 0.088346
| false
| 0.015038
| 0.015038
| 0.00188
| 0.167293
| 0.003759
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d71192a0442b7eef7acad0763b92e91ecac841f
| 965
|
py
|
Python
|
plugins/help.py
|
A0vanc01/Frisky
|
d4d7f9892858b5412755c9dee594e5b60b6d2b94
|
[
"MIT"
] | 5
|
2020-01-22T18:16:59.000Z
|
2021-06-14T13:23:57.000Z
|
plugins/help.py
|
A0vanc01/Frisky
|
d4d7f9892858b5412755c9dee594e5b60b6d2b94
|
[
"MIT"
] | 104
|
2020-02-12T00:36:14.000Z
|
2022-02-10T08:18:28.000Z
|
plugins/help.py
|
A0vanc01/Frisky
|
d4d7f9892858b5412755c9dee594e5b60b6d2b94
|
[
"MIT"
] | 4
|
2020-01-30T15:44:04.000Z
|
2020-08-27T19:22:57.000Z
|
from frisky.events import MessageEvent
from frisky.plugin import FriskyPlugin, PluginRepositoryMixin
from frisky.responses import FriskyResponse
class HelpPlugin(FriskyPlugin, PluginRepositoryMixin):
commands = ['help']
def command_help(self, message: MessageEvent) -> FriskyResponse:
if len(message.args) == 1:
plugin_name = message.args[0]
if plugin_name == 'help':
return 'Usage: `?help` or `?help <plugin_name>`'
plugin = self.get_plugin_by_name(plugin_name)
if plugin is None:
return f'No such plugin: `{plugin_name}`, try `?help` to list installed plugins'
if (help_text := plugin.help_text()) is None:
return f'Plugin `{plugin_name}` does not provide help text.'
return help_text
plugins = self.get_plugin_names()
joined_string = ', '.join(plugins)
return f'Available plugins: {joined_string}'
| 40.208333
| 96
| 0.643523
| 111
| 965
| 5.441441
| 0.432432
| 0.099338
| 0.043046
| 0.043046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002801
| 0.260104
| 965
| 23
| 97
| 41.956522
| 0.843137
| 0
| 0
| 0
| 0
| 0
| 0.210363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.578947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d71751143901cbe72d8513a42c3b74da3d29bf0
| 998
|
py
|
Python
|
composer/models/ssd/ssd_hparams.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
composer/models/ssd/ssd_hparams.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
composer/models/ssd/ssd_hparams.py
|
anisehsani/composer
|
42599682d50409b4a4eb7c91fad85d67418cee13
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 MosaicML. All Rights Reserved.
from dataclasses import dataclass
import yahp as hp
from composer.models.model_hparams import ModelHparams
@dataclass
class SSDHparams(ModelHparams):
input_size: int = hp.optional(
doc="input size",
default=300,
)
num_classes: int = hp.optional(
doc="num_classes",
default=80,
)
overlap_threshold: float = hp.optional(
doc="threshold",
default=0.5,
)
nms_max_detections: int = hp.optional(
doc="nms max dets",
default=200,
)
data: str = hp.optional(
doc="data",
default="/localdisk/coco",
)
def initialize_object(self):
from composer.models.ssd.ssd import SSD
return SSD(
input_size=self.input_size,
overlap_threshold=self.overlap_threshold,
nms_max_detections=self.nms_max_detections,
num_classes=self.num_classes,
data=self.data,
)
| 22.681818
| 55
| 0.617234
| 114
| 998
| 5.245614
| 0.438596
| 0.083612
| 0.108696
| 0.080268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019774
| 0.290581
| 998
| 43
| 56
| 23.209302
| 0.824859
| 0.04509
| 0
| 0
| 0
| 0
| 0.064143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.117647
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d73808fab2e4c633d3b7d43187bc4821f1bfb77
| 1,303
|
py
|
Python
|
src/lib/base_dataset.py
|
CvHadesSun/Camera-Calibration
|
5c054672749aa0b3be1bdff8b8f4f3d2fcf3ee85
|
[
"MIT"
] | null | null | null |
src/lib/base_dataset.py
|
CvHadesSun/Camera-Calibration
|
5c054672749aa0b3be1bdff8b8f4f3d2fcf3ee85
|
[
"MIT"
] | null | null | null |
src/lib/base_dataset.py
|
CvHadesSun/Camera-Calibration
|
5c054672749aa0b3be1bdff8b8f4f3d2fcf3ee85
|
[
"MIT"
] | null | null | null |
from os.path import join
from utils import getFileList
class ImageFolder:
def __init__(self, path, sub=None, annot='annot') -> None:
self.root = path
self.image = 'images'
self.annot = annot
self.image_root = join(path, self.image)
self.annot_root = join(path, self.annot)
self.annot_root_tmp = join(path, self.annot + '_tmp')
if sub is None:
self.imgnames = getFileList(self.image_root, ext='.jpg')
self.annnames = getFileList(self.annot_root, ext='.json')
else:
self.imgnames = getFileList(join(self.image_root, sub), ext='.jpg')
self.annnames = getFileList(join(self.annot_root, sub), ext='.json')
self.imgnames = [join(sub, name) for name in self.imgnames]
self.annnames = [join(sub, name) for name in self.annnames]
self.isTmp = True
assert len(self.imgnames) == len(self.annnames)
def __getitem__(self, index):
imgname = join(self.image_root, self.imgnames[index])
if self.isTmp:
annname = join(self.annot_root_tmp, self.annnames[index])
else:
annname = join(self.annot_root, self.annnames[index])
return imgname, annname
def __len__(self):
return len(self.imgnames)
| 40.71875
| 80
| 0.61934
| 166
| 1,303
| 4.710843
| 0.23494
| 0.103581
| 0.099744
| 0.065217
| 0.196931
| 0.061381
| 0.061381
| 0
| 0
| 0
| 0
| 0
| 0.261704
| 1,303
| 32
| 81
| 40.71875
| 0.81289
| 0
| 0
| 0.068966
| 0
| 0
| 0.025307
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.103448
| false
| 0
| 0.068966
| 0.034483
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d73d6f049758b5497d67b41cd027577eaf0250d
| 1,704
|
py
|
Python
|
main.py
|
sunkr1995/genetic-drawing
|
6e5cc755a55c1994770c3f18fb14f1cc651bb700
|
[
"MIT"
] | null | null | null |
main.py
|
sunkr1995/genetic-drawing
|
6e5cc755a55c1994770c3f18fb14f1cc651bb700
|
[
"MIT"
] | null | null | null |
main.py
|
sunkr1995/genetic-drawing
|
6e5cc755a55c1994770c3f18fb14f1cc651bb700
|
[
"MIT"
] | null | null | null |
'''
Author: your name
Date: 2021-06-18 10:13:00
LastEditTime: 2021-07-08 14:13:07
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /genetic-drawing/main.py
'''
import cv2
import os
import time
from IPython.display import clear_output
from genetic_drawing import *
gen = GeneticDrawing('03.jpg', seed=time.time())
out = gen.generate(400, 50)
brushesRange = np.array([[0.1, 0.3], [0.3, 0.7]])
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
try:
for i in range(5):
brushesRange_tmp = brushesRange/(2**(i+1))
gen.brushesRange = brushesRange_tmp.tolist()
maskname = "masks-03/mask-{}.jpg".format(i)
gen.sampling_mask = cv2.cvtColor(cv2.imread(maskname), cv2.COLOR_BGR2GRAY)
#keep drawing on top of our previous result
out = gen.generate(100, 30)
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
except:
if not os.path.exists('out'):
os.mkdir("out")
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
#brushesRange_tmp = brushesRange/100
#gen.brushesRange = brushesRange_tmp.tolist()
##gen.brushesRange = [[0.005, 0.015],[0.015, 0.035]]
#gen.sampling_mask = cv2.cvtColor(cv2.imread("masks/mask-end.jpg"), cv2.COLOR_BGR2GRAY)
#
##keep drawing on top of our previous result
#out = gen.generate(50, 30)
#save all the images from the image buffer
if not os.path.exists('out'):
os.mkdir("out")
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
| 34.08
| 87
| 0.669601
| 271
| 1,704
| 4.173432
| 0.372694
| 0.084881
| 0.026525
| 0.04863
| 0.541114
| 0.477454
| 0.477454
| 0.415561
| 0.415561
| 0.415561
| 0
| 0.067975
| 0.162559
| 1,704
| 50
| 88
| 34.08
| 0.724597
| 0.32277
| 0
| 0.428571
| 0
| 0
| 0.082674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.178571
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d740fa3ec721433e495424e2743d9af67d910eb
| 10,991
|
py
|
Python
|
flair/models/sandbox/simple_sequence_tagger_model.py
|
bratao/flair
|
67b53cc2a615a2e2a4e552d6f787c2efa708a939
|
[
"MIT"
] | null | null | null |
flair/models/sandbox/simple_sequence_tagger_model.py
|
bratao/flair
|
67b53cc2a615a2e2a4e552d6f787c2efa708a939
|
[
"MIT"
] | null | null | null |
flair/models/sandbox/simple_sequence_tagger_model.py
|
bratao/flair
|
67b53cc2a615a2e2a4e552d6f787c2efa708a939
|
[
"MIT"
] | null | null | null |
import logging
from typing import List, Union, Optional
import torch
import torch.nn
import torch.nn.functional as F
from tqdm import tqdm
import flair.nn
from flair.data import Dictionary, Sentence, Label
from flair.datasets import SentenceDataset, DataLoader
from flair.embeddings import TokenEmbeddings
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
class SimpleSequenceTagger(flair.nn.Classifier):
"""
This class is a simple version of the SequenceTagger class.
The purpose of this class is to demonstrate the basic hierarchy of a
sequence tagger (this could be helpful for new developers).
It only uses the given embeddings and maps them with a linear layer to
the tag_dictionary dimension.
Thus, this class misses following functionalities from the SequenceTagger:
- CRF,
- RNN,
- Reprojection.
As a result, only poor results can be expected.
"""
def __init__(
self,
embeddings: TokenEmbeddings,
tag_dictionary: Dictionary,
tag_type: str,
):
"""
Initializes a SimpleSequenceTagger
:param embeddings: word embeddings used in tagger
:param tag_dictionary: dictionary of tags you want to predict
:param tag_type: string identifier for tag type
:param beta: Parameter for F-beta score for evaluation and training annealing
"""
super(SimpleSequenceTagger, self).__init__()
# embeddings
self.embeddings = embeddings
# dictionaries
self.tag_dictionary: Dictionary = tag_dictionary
self.tag_type: str = tag_type
self.tagset_size: int = len(tag_dictionary)
# linear layer
self.linear = torch.nn.Linear(self.embeddings.embedding_length, len(tag_dictionary))
# all parameters will be pushed internally to the specified device
self.to(flair.device)
def forward_loss(
self, data_points: Union[List[Sentence], Sentence], sort=True
) -> torch.tensor:
features = self.forward(data_points)
return self._calculate_loss(features, data_points)
def _get_state_dict(self):
model_state = {
"state_dict": self.state_dict(),
"embeddings": self.embeddings,
"tag_dictionary": self.tag_dictionary,
"tag_type": self.tag_type,
}
return model_state
@staticmethod
def _init_model_with_state_dict(state):
model = SimpleSequenceTagger(
embeddings=state["embeddings"],
tag_dictionary=state["tag_dictionary"],
tag_type=state["tag_type"],
)
model.load_state_dict(state["state_dict"])
return model
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
all_tag_prob: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.tag_type
with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, Sentence):
sentences = [sentences]
# reverse sort all sequences by their length
rev_order_len_index = sorted(
range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True
)
reordered_sentences: List[Union[Sentence, str]] = [
sentences[index] for index in rev_order_len_index
]
dataloader = DataLoader(
dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size
)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
overall_loss = 0
batch_no = 0
for batch in dataloader:
batch_no += 1
if verbose:
dataloader.set_description(f"Inferencing on batch {batch_no}")
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
feature = self.forward(batch)
if return_loss:
overall_loss += self._calculate_loss(feature, batch)
tags, all_tags = self._obtain_labels(
feature=feature,
batch_sentences=batch,
get_all_tags=all_tag_prob,
)
for (sentence, sent_tags) in zip(batch, tags):
for (token, tag) in zip(sentence.tokens, sent_tags):
token.add_tag_label(label_name, tag)
# all_tags will be empty if all_tag_prob is set to False, so the for loop will be avoided
for (sentence, sent_all_tags) in zip(batch, all_tags):
for (token, token_all_tags) in zip(sentence.tokens, sent_all_tags):
token.add_tags_proba_dist(label_name, token_all_tags)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss / batch_no
def forward(self, sentences: List[Sentence]):
self.embeddings.embed(sentences)
names = self.embeddings.get_names()
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
longest_token_sequence_in_batch: int = max(lengths)
pre_allocated_zero_tensor = torch.zeros(
self.embeddings.embedding_length * longest_token_sequence_in_batch,
dtype=torch.float,
device=flair.device,
)
all_embs = list()
for sentence in sentences:
all_embs += [
emb for token in sentence for emb in token.get_each_embedding(names)
]
nb_padding_tokens = longest_token_sequence_in_batch - len(sentence)
if nb_padding_tokens > 0:
t = pre_allocated_zero_tensor[
: self.embeddings.embedding_length * nb_padding_tokens
]
all_embs.append(t)
sentence_tensor = torch.cat(all_embs).view(
[
len(sentences),
longest_token_sequence_in_batch,
self.embeddings.embedding_length,
]
)
features = self.linear(sentence_tensor)
return features
def _calculate_loss(
self, features: torch.tensor, sentences: List[Sentence]
) -> float:
lengths: List[int] = [len(sentence.tokens) for sentence in sentences]
tag_list: List = []
for s_id, sentence in enumerate(sentences):
# get the tags in this sentence
tag_idx: List[int] = [
self.tag_dictionary.get_idx_for_item(token.get_tag(self.tag_type).value)
for token in sentence
]
# add tags as tensor
tag = torch.tensor(tag_idx, device=flair.device)
tag_list.append(tag)
score = 0
for sentence_feats, sentence_tags, sentence_length in zip(
features, tag_list, lengths
):
sentence_feats = sentence_feats[:sentence_length]
score += torch.nn.functional.cross_entropy(
sentence_feats, sentence_tags
)
score /= len(features)
return score
def _obtain_labels(
self,
feature: torch.Tensor,
batch_sentences: List[Sentence],
get_all_tags: bool,
) -> (List[List[Label]], List[List[List[Label]]]):
"""
Returns a tuple of two lists:
- The first list corresponds to the most likely `Label` per token in each sentence.
- The second list contains a probability distribution over all `Labels` for each token
in a sentence for all sentences.
"""
lengths: List[int] = [len(sentence.tokens) for sentence in batch_sentences]
tags = []
all_tags = []
feature = feature.cpu()
for index, length in enumerate(lengths):
feature[index, length:] = 0
softmax_batch = F.softmax(feature, dim=2).cpu()
scores_batch, prediction_batch = torch.max(softmax_batch, dim=2)
feature = zip(softmax_batch, scores_batch, prediction_batch)
for feats, length in zip(feature, lengths):
softmax, score, prediction = feats
confidences = score[:length].tolist()
tag_seq = prediction[:length].tolist()
scores = softmax[:length].tolist()
tags.append(
[
Label(self.tag_dictionary.get_item_for_index(tag), conf)
for conf, tag in zip(confidences, tag_seq)
]
)
if get_all_tags:
all_tags.append(
[
[
Label(
self.tag_dictionary.get_item_for_index(score_id), score
)
for score_id, score in enumerate(score_dist)
]
for score_dist in scores
]
)
return tags, all_tags
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(
f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens."
)
return filtered_sentences
@property
def label_type(self):
return self.tag_type
| 35.569579
| 111
| 0.592849
| 1,243
| 10,991
| 5.053097
| 0.225261
| 0.028976
| 0.013533
| 0.018468
| 0.074988
| 0.048878
| 0.038847
| 0.038847
| 0.038847
| 0.031842
| 0
| 0.001373
| 0.337185
| 10,991
| 309
| 112
| 35.569579
| 0.86081
| 0.19516
| 0
| 0.068627
| 0
| 0
| 0.023448
| 0.0028
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04902
| false
| 0
| 0.053922
| 0.004902
| 0.156863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d7508b796c963b53ae0eb9f9680e4518db45e86
| 1,708
|
py
|
Python
|
exercise/xiaohuar/spider-xiaohuar.com.py
|
PorYoung/bigData-camp-8d
|
8fa31b48065da27fd1c4f8432232342cede6f56c
|
[
"MIT"
] | 1
|
2019-12-27T06:34:06.000Z
|
2019-12-27T06:34:06.000Z
|
exercise/xiaohuar/spider-xiaohuar.com.py
|
PorYoung/bigData-camp-8d
|
8fa31b48065da27fd1c4f8432232342cede6f56c
|
[
"MIT"
] | 1
|
2021-12-14T20:40:06.000Z
|
2021-12-14T20:40:06.000Z
|
exercise/xiaohuar/spider-xiaohuar.com.py
|
PorYoung/bigData-camp-8d
|
8fa31b48065da27fd1c4f8432232342cede6f56c
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
def spider_xiaohuar_content(url, headers):
response = requests.get(url=url, headers=headers)
print(response.status_code)
if response.status_code == 200:
response.encoding = 'utf-8'
html = response.content
# 参数:网页内容,解析器
soup = BeautifulSoup(html, 'html5lib')
div_list = soup.find_all('div', attrs={'class': 'all_lanmu'})
text = ''
file = open('爬虫校花.md', 'w', encoding='utf-8')
for div in div_list:
title_div = div.find('div', attrs={'class': 'title1000'})
title = title_div.find('a').string
text += '<style>img[src*="headimg-style"]{width:100px;height:100px}</style>\n\n## 标题:'+title+'\n\n'
ul = div.find('ul')
li_list = ul.find_all('li')
for li in li_list:
img_src = li.find('img').attrs['lazysrc']
a_href = li.find('a').attrs['href']
img_title = li.find('span').string
school = li.find('b', attrs={'class': 'b1'}).string
fav = li.find('b', attrs={'class': 'b2'}).string
if url not in img_src:
img_src = url+img_src
text += '> ' + img_title+'\n\n'
text += ''+'\n\n'
text += '- 学校:'+school+'\n\n'
text += '- 点赞人数:'+fav+'\n\n'
file.write(text)
file.close
url = 'http://xiaohuar.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
spider_xiaohuar_content(url, headers)
| 38.818182
| 136
| 0.538056
| 223
| 1,708
| 4.013453
| 0.41704
| 0.040223
| 0.020112
| 0.053631
| 0.107263
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039409
| 0.286885
| 1,708
| 44
| 137
| 38.818182
| 0.695402
| 0.00644
| 0
| 0
| 0
| 0.055556
| 0.220519
| 0.042453
| 0.027778
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.055556
| 0
| 0.083333
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d75c627939ebcaa3bf24644789f819936e04c59
| 749
|
py
|
Python
|
v1.1/auc_csv_merge.py
|
lz-pku-1997/so-many-tricks-for-Image-classification
|
3df7a0672f88219f893b0fa23c31ae6b30d01264
|
[
"MIT"
] | 2
|
2020-04-21T06:06:28.000Z
|
2020-12-27T12:35:57.000Z
|
v1.1/auc_csv_merge.py
|
lz-pku-1997/so-many-tricks-for-Image-classification
|
3df7a0672f88219f893b0fa23c31ae6b30d01264
|
[
"MIT"
] | null | null | null |
v1.1/auc_csv_merge.py
|
lz-pku-1997/so-many-tricks-for-Image-classification
|
3df7a0672f88219f893b0fa23c31ae6b30d01264
|
[
"MIT"
] | null | null | null |
#尝试直接读取文件夹内所有csv,记得看看列表,是不是读对了
import glob
import pandas as pd
import numpy as np
io = glob.glob(r"*.csv")
len_io=len(io)
print('总共输入表的数量为:',len_io)
prob_list=[]
for i in range(len_io):
sub_1 = pd.read_csv(io[i])
denominator=len(sub_1)
for my_classes in ['healthy','multiple_diseases','rust','scab']:
sub_label_1 = sub_1.loc[:, my_classes].values
sort_1=np.argsort(sub_label_1)
for i,temp_sort in enumerate(sort_1):
sub_label_1[temp_sort]=i/denominator
sub_1.loc[:,my_classes]=sub_label_1
prob_list.append(sub_1.loc[:,'healthy':].values)
sub_1.loc[:,'healthy':] = np.mean(prob_list,axis =0)
sub_1.to_csv('out/submission.csv', index=False)
print(sub_1.head())
| 31.208333
| 69
| 0.663551
| 124
| 749
| 3.75
| 0.403226
| 0.068817
| 0.077419
| 0.03871
| 0.068817
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02455
| 0.184246
| 749
| 24
| 70
| 31.208333
| 0.736498
| 0.038718
| 0
| 0
| 0
| 0
| 0.113343
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d76b727796967801234a59f7efe009b01c9e636
| 10,468
|
py
|
Python
|
masakari-7.0.0/masakari/objects/base.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
masakari-7.0.0/masakari/objects/base.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
masakari-7.0.0/masakari/objects/base.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2016 NTT Data.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Masakari common internal object model"""
import datetime
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovoo_base
from oslo_versionedobjects import fields as obj_fields
from masakari import objects
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_obj_' + name
class MasakariObjectRegistry(ovoo_base.VersionedObjectRegistry):
notification_classes = []
def registration_hook(self, cls, index):
# NOTE(Dinesh_Bhor): This is called when an object is registered,
# and is responsible for maintaining masakari.objects.$OBJECT
# as the highest-versioned implementation of a given object.
version = versionutils.convert_version_to_tuple(cls.VERSION)
if not hasattr(objects, cls.obj_name()):
setattr(objects, cls.obj_name(), cls)
else:
cur_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
if version >= cur_version:
setattr(objects, cls.obj_name(), cls)
@classmethod
def register_notification(cls, notification_cls):
"""Register a class as notification.
Use only to register concrete notification or payload classes,
do not register base classes intended for inheritance only.
"""
cls.register_if(False)(notification_cls)
cls.notification_classes.append(notification_cls)
return notification_cls
@classmethod
def register_notification_objects(cls):
"""Register previously decorated notification as normal ovos.
This is not intended for production use but only for testing and
document generation purposes.
"""
for notification_cls in cls.notification_classes:
cls.register(notification_cls)
remotable_classmethod = ovoo_base.remotable_classmethod
remotable = ovoo_base.remotable
class MasakariObject(ovoo_base.VersionedObject):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
OBJ_SERIAL_NAMESPACE = 'masakari_object'
OBJ_PROJECT_NAMESPACE = 'masakari'
def masakari_obj_get_changes(self):
"""Returns a dict of changed fields with tz unaware datetimes.
Any timezone aware datetime field will be converted to UTC timezone
and returned as timezone unaware datetime.
This will allow us to pass these fields directly to a db update
method as they can't have timezone information.
"""
# Get dirtied/changed fields
changes = self.obj_get_changes()
# Look for datetime objects that contain timezone information
for k, v in changes.items():
if isinstance(v, datetime.datetime) and v.tzinfo:
# Remove timezone information and adjust the time according to
# the timezone information's offset.
changes[k] = v.replace(tzinfo=None) - v.utcoffset()
# Return modified dict
return changes
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
.. note::
- This is NOT "revert to previous values"
- Specifying fields on recursive resets will only be honored at the
top level. Everything below the top will reset all.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
class MasakariObjectDictCompat(ovoo_base.VersionedObjectDictCompat):
def __iter__(self):
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name
def keys(self):
return list(self)
class MasakariTimestampObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class MasakariPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
'deleted_at': obj_fields.DateTimeField(nullable=True),
'deleted': obj_fields.BooleanField(default=False),
}
class ObjectListBase(ovoo_base.ObjectListBase):
@classmethod
def _obj_primitive_key(cls, field):
return 'masakari_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=obj_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == obj_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
class MasakariObjectSerializer(ovoo_base.VersionedObjectSerializer):
"""A Masakari Object Serializer.
This implements the Oslo Serializer interface and provides
the ability to serialize and deserialize MasakariObject entities.
Any service that needs to accept or return MasakariObjects
as arguments or result values should pass this to its RPCClient
and RPCServer objects.
"""
OBJ_BASE_CLASS = MasakariObject
def __init__(self):
super(MasakariObjectSerializer, self).__init__()
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The MasakariObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A MasakariObject becomes a dict, and anything that implements
ObjectListBase becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, MasakariObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
else:
return obj
def obj_equal_prims(obj_1, obj_2, ignore=None):
"""Compare two primitives for equivalence ignoring some keys.
This operation tests the primitives of two objects for equivalence.
Object primitives may contain a list identifying fields that have been
changed - this is ignored in the comparison. The ignore parameter lists
any other keys to be ignored.
:param:obj1: The first object in the comparison
:param:obj2: The second object in the comparison
:param:ignore: A list of fields to ignore
:returns: True if the primitives are equal ignoring changes
and specified fields, otherwise False.
"""
def _strip(prim, keys):
if isinstance(prim, dict):
for k in keys:
prim.pop(k, None)
for v in prim.values():
_strip(v, keys)
if isinstance(prim, list):
for v in prim:
_strip(v, keys)
return prim
if ignore is not None:
keys = ['masakari_object.changes'] + ignore
else:
keys = ['masakari_object.changes']
prim_1 = _strip(obj_1.obj_to_primitive(), keys)
prim_2 = _strip(obj_2.obj_to_primitive(), keys)
return prim_1 == prim_2
| 35.364865
| 79
| 0.664215
| 1,296
| 10,468
| 5.220679
| 0.26929
| 0.017292
| 0.011085
| 0.017736
| 0.11277
| 0.06444
| 0.034585
| 0.027195
| 0.027195
| 0.027195
| 0
| 0.002355
| 0.26987
| 10,468
| 295
| 80
| 35.484746
| 0.882899
| 0.416221
| 0
| 0.166667
| 0
| 0
| 0.026246
| 0.008103
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.037879
| 0.015152
| 0.348485
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d7ad5477f4bf8f12192323e1ee2103954aa57db
| 3,925
|
py
|
Python
|
twitter_bot/MyBot.py
|
diem-ai/datascience-projects
|
deef93217bd3b0cfc2ca7802933142d1dad7fcba
|
[
"MIT"
] | null | null | null |
twitter_bot/MyBot.py
|
diem-ai/datascience-projects
|
deef93217bd3b0cfc2ca7802933142d1dad7fcba
|
[
"MIT"
] | null | null | null |
twitter_bot/MyBot.py
|
diem-ai/datascience-projects
|
deef93217bd3b0cfc2ca7802933142d1dad7fcba
|
[
"MIT"
] | null | null | null |
"""
Class SaleBot
It is initialised by nlp model (bag-of-word, tf-idf, word2vec)
It returns response with a question as the input
"""
from gensim.corpora import Dictionary
#from gensim.models import FastText
from gensim.models import Word2Vec , WordEmbeddingSimilarityIndex
from gensim.similarities import SoftCosineSimilarity, SparseTermSimilarityMatrix
from gensim.models import TfidfModel
from multiprocessing import cpu_count
from nlp_helper import preprocessing
class AskeBayBot:
"""
- Using tf-idf and word2vec to build vector matrix from the corpus
- Using soft-cosine similarity to calculate the similarity between query and matrix
"""
"""
References:
- https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/soft_cosine_tutorial.ipynb
"""
def __init__(self, questions, responses, model_type="word2vec"):
self.questions = questions
self.responses = responses
self.model_type = model_type
self.docsim_index = []
self.dictionary = []
self.tfidf = []
self.compute_sim_matrix()
def compute_sim_matrix(self):
'''
if(self.model_type.lower() == "fasttext"):
model = FastText(self.questions)
else:
model = Word2Vec(self.questions)
'''
self.dictionary = Dictionary(self.questions)
self.tfidf = TfidfModel(dictionary = self.dictionary)
word2vec_model = Word2Vec(self.questions
, workers=cpu_count()
, min_count=5
, size=300
, seed=12345)
sim_index = WordEmbeddingSimilarityIndex(word2vec_model.wv)
sim_matrix = SparseTermSimilarityMatrix(sim_index
, self.dictionary
, self.tfidf
, nonzero_limit=100)
bow_corpus = [self.dictionary.doc2bow(document) for document in self.questions]
tfidf_corpus = [self.tfidf[bow] for bow in bow_corpus]
self.docsim_index = SoftCosineSimilarity(tfidf_corpus, sim_matrix, num_best=10)
def get_similarities(self, question):
'''
@return indices of anwsers whose questions are similar to the input question
'''
vectorizer = self.dictionary.doc2bow(preprocessing(question))
tfidf_vectorizer = self.tfidf[vectorizer]
similarities = self.docsim_index[tfidf_vectorizer]
return similarities
def get_response(self, question):
similarities = self.get_similarities(question)
return self.get_sim(similarities, 1)
def get_all_responses(self, question):
similarities = self.get_similarities(question)
return self.get_sim(similarities, 10)
def get_sim(self, similarities, n_top=1):
"""
@return a tuple of similar question and best response in similarity matrix
"""
sim_questions = []
sim_responses = []
sim_scores = []
if (len(similarities) > 0):
for (idx, score) in similarities:
if (idx < len(self.responses)):
sim_questions.append(self.questions[idx])
sim_responses.append(self.responses[idx])
sim_scores.append(score)
# return self.questions[idx], self.responses[idx], score
else:
return "Just a moment, someone will contact you"
if (n_top == 1):
return sim_questions[0], sim_responses[0], sim_scores[0]
else:
return sim_questions, sim_responses, sim_scores
if __name__ == "__main__":
print("I'm a bot")
| 37.380952
| 105
| 0.592866
| 399
| 3,925
| 5.666667
| 0.323308
| 0.051747
| 0.02123
| 0.029191
| 0.125608
| 0.10084
| 0.10084
| 0.069881
| 0.069881
| 0.069881
| 0
| 0.012495
| 0.327134
| 3,925
| 105
| 106
| 37.380952
| 0.84362
| 0.163822
| 0
| 0.066667
| 0
| 0
| 0.02191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.316667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d8165f8ce202fddd44b2d3bc70e29ad7d9245a2
| 1,482
|
py
|
Python
|
hail_scripts/v01/convert_tsv_to_vds.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | null | null | null |
hail_scripts/v01/convert_tsv_to_vds.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | null | null | null |
hail_scripts/v01/convert_tsv_to_vds.py
|
NLSVTN/hail-elasticsearch-pipelines
|
8b895a2e46a33d347dd2a1024101a6d515027a03
|
[
"MIT"
] | null | null | null |
import argparse as ap
import hail
from pprint import pprint
import time
from hail_scripts.v01.utils.vds_utils import write_vds
p = ap.ArgumentParser(description="Convert a tsv table to a .vds")
p.add_argument("-c", "--chrom-column", required=True)
p.add_argument("-p", "--pos-column", required=True)
p.add_argument("-r", "--ref-column", required=True)
p.add_argument("-a", "--alt-column", required=True)
p.add_argument("table_path", nargs="+")
args = p.parse_args()
print(", ".join(args.vcf_path))
hc = hail.HailContext(log="./hail_{}.log".format(time.strftime("%y%m%d_%H%M%S")))
for table_path in args.table_path:
print("\n")
print("==> import_table: %s" % table_path)
output_path = table_path.replace(".tsv", "").replace(".gz", "").replace(".bgz", "") + ".vds"
print("==> output: %s" % output_path)
kt = hc.import_table(table_path, impute=True, no_header=args.no_header, delimiter=args.delimiter, missing=args.missing_value, min_partitions=1000)
#kt = kt.drop(columns_to_drop)
#kt = kt.rename(rename_columns)
kt = kt.filter("%(ref_column)s == %(alt_column)s" % args.__dict__, keep=False)
kt = kt.annotate("variant=Variant(%(chrom_column)s, %(pos_column)s, %(ref_column)s, %(alt_column)s)" % args.__dict__)
kt = kt.key_by('variant')
kt = kt.drop([args.chrom_column, args.pos_column, args.ref_column, args.alt_column])
vds = hail.VariantDataset.from_table(kt)
pprint(vds.variant_schema)
write_vds(vds, output_path)
| 36.146341
| 150
| 0.690958
| 227
| 1,482
| 4.281938
| 0.348018
| 0.055556
| 0.061728
| 0.078189
| 0.18107
| 0.18107
| 0.057613
| 0.057613
| 0
| 0
| 0
| 0.004615
| 0.122807
| 1,482
| 40
| 151
| 37.05
| 0.743077
| 0.039811
| 0
| 0
| 0
| 0.037037
| 0.209008
| 0.023223
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.259259
| 0
| 0.259259
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d81808e7a83247fd981f349fc73abe0b9de1e1e
| 4,649
|
py
|
Python
|
scripts/Old/fixSequenceIDs.py
|
paepcke/json_to_relation
|
acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9
|
[
"BSD-3-Clause"
] | 4
|
2015-10-10T19:09:49.000Z
|
2021-09-02T00:58:06.000Z
|
scripts/Old/fixSequenceIDs.py
|
paepcke/json_to_relation
|
acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/Old/fixSequenceIDs.py
|
paepcke/json_to_relation
|
acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9
|
[
"BSD-3-Clause"
] | 8
|
2015-05-16T14:33:33.000Z
|
2019-10-24T08:56:25.000Z
|
#!/usr/bin/env python
# Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Dec 22, 2013
@author: paepcke
'''
import os
import re
import sys
from edxTrackLogJSONParser import EdXTrackLogJSONParser
from modulestoreImporter import ModulestoreImporter
from unidecode import unidecode
idExtractPat = re.compile(r'^"([^"]*)')
seqIDExtractPat = re.compile(r'","([^"]*)')
hashLookup = ModulestoreImporter(os.path.join(os.path.dirname(__file__),'data/modulestore_latest.json'),
useCache=True)
def makeInsertSafe(unsafeStr):
'''
Makes the given string safe for use as a value in a MySQL INSERT
statement. Looks for embedded CR or LFs, and turns them into
semicolons. Escapes commas and single quotes. Backslash is
replaced by double backslash. This is needed for unicode, like
\0245 (invented example)
@param unsafeStr: string that possibly contains unsafe chars
@type unsafeStr: String
@return: same string, with unsafe chars properly replaced or escaped
@rtype: String
'''
#return unsafeStr.replace("'", "\\'").replace('\n', "; ").replace('\r', "; ").replace(',', "\\,").replace('\\', '\\\\')
if unsafeStr is None or not isinstance(unsafeStr, basestring) or len(unsafeStr) == 0:
return ''
# Check for chars > 128 (illegal for standard ASCII):
for oneChar in unsafeStr:
if ord(oneChar) > 128:
# unidecode() replaces unicode with approximations.
# I tried all sorts of escapes, and nothing worked
# for all cases, except this:
unsafeStr = unidecode(unicode(unsafeStr))
break
return unsafeStr.replace('\n', "; ").replace('\r', "; ").replace('\\', '').replace("'", r"\'")
def fixSequencIDs():
counter = 0
with open('/home/paepcke/tmp/sequenceIDs.sql','w') as outfd:
outfd.write("USE Edx;\nINSERT INTO EdxTrackEvent(_id,resource_display_name)\n")
with open('/home/paepcke/tmp/sequenceIDs.csv','r') as fd:
for idSeqID in fd:
sqlid = idExtractPat.search(idSeqID).group(1)
seqID = seqIDExtractPat.search(idSeqID).group(1)
resourceNameMatch = EdXTrackLogJSONParser.findHashPattern.search(seqID)
if resourceNameMatch is not None:
resourceName = makeInsertSafe(hashLookup.getDisplayName(resourceNameMatch.group(1)))
if counter == 0:
outfd.write('("%s","%s")' % (sqlid,resourceName))
else:
outfd.write(',\n("%s","%s")' % (sqlid,resourceName))
else:
continue
counter += 1
#if counter > 10:
# break
outfd.write("\nON DUPLICATE KEY UPDATE resource_display_name = VALUES(resource_display_name);\n")
print("Created %d corrections." % counter)
if __name__ == '__main__':
fixSequencIDs()
#INSERT INTO EdxTrackEvent (_id,long_answer) VALUES ('fbcefe06_fb7c_48aa_a12e_d85e6988dbda','first answer'),('bbd3ddf3_8ed0_4eee_8ff7_f5791b9e4a7e','second answer') ON DUPLICATE KEY UPDATE long_answer=VALUES(long_answer);
| 54.05814
| 757
| 0.687245
| 568
| 4,649
| 5.568662
| 0.489437
| 0.012646
| 0.018021
| 0.014543
| 0.112551
| 0.082833
| 0.042997
| 0.042997
| 0.042997
| 0.042997
| 0
| 0.016218
| 0.217466
| 4,649
| 85
| 758
| 54.694118
| 0.853216
| 0.544203
| 0
| 0.05
| 0
| 0
| 0.161527
| 0.092511
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.175
| 0
| 0.275
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d818b86a7daa5558c49d73a26208235e0d52b89
| 8,433
|
py
|
Python
|
tests/test_logger_device.py
|
ska-telescope/lmc-base-classes
|
e3ac46a731aca4d49d53747b4352ec4be089ff5d
|
[
"BSD-3-Clause"
] | 3
|
2019-04-18T20:46:02.000Z
|
2019-07-30T17:47:40.000Z
|
tests/test_logger_device.py
|
ska-telescope/lmc-base-classes
|
e3ac46a731aca4d49d53747b4352ec4be089ff5d
|
[
"BSD-3-Clause"
] | 26
|
2018-10-30T07:50:50.000Z
|
2020-07-13T12:50:36.000Z
|
tests/test_logger_device.py
|
ska-telescope/lmc-base-classes
|
e3ac46a731aca4d49d53747b4352ec4be089ff5d
|
[
"BSD-3-Clause"
] | 4
|
2019-01-16T07:47:59.000Z
|
2021-06-01T11:17:32.000Z
|
#########################################################################################
# -*- coding: utf-8 -*-
#
# This file is part of the SKALogger project
#
#
#
#########################################################################################
"""Contain the tests for the SKALogger."""
import re
import pytest
from tango import DevState
from tango.test_context import MultiDeviceTestContext
from ska_tango_base.base import ReferenceBaseComponentManager
from ska_tango_base.logger_device import SKALogger
from ska_tango_base.subarray import SKASubarray
import tango
# PROTECTED REGION ID(SKALogger.test_additional_imports) ENABLED START #
from ska_tango_base.control_model import (
AdminMode,
ControlMode,
HealthState,
LoggingLevel,
SimulationMode,
TestMode,
)
# PROTECTED REGION END # // SKALogger.test_additional_imports
# PROTECTED REGION ID(SKALogger.test_SKALogger_decorators) ENABLED START #
@pytest.mark.usefixtures("tango_context", "initialize_device")
# PROTECTED REGION END # // SKALogger.test_SKALogger_decorators
class TestSKALogger(object):
"""
Test class for tests of the SKALogger device class.
"""
@pytest.fixture(scope="class")
def device_test_config(self, device_properties):
"""
Fixture that specifies the device to be tested, along with its
properties and memorized attributes.
"""
return {
"device": SKALogger,
"component_manager_patch": lambda self: ReferenceBaseComponentManager(
self.op_state_model, logger=self.logger
),
"properties": device_properties,
"memorized": {"adminMode": str(AdminMode.ONLINE.value)},
}
@pytest.mark.skip("Not implemented")
def test_properties(self, tango_context):
# test the properties
# PROTECTED REGION ID(SKALogger.test_properties) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_properties
pass
# PROTECTED REGION ID(SKALogger.test_State_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_State_decorators
def test_State(self, tango_context):
"""Test for State"""
# PROTECTED REGION ID(SKALogger.test_State) ENABLED START #
assert tango_context.device.State() == DevState.OFF
# PROTECTED REGION END # // SKALogger.test_State
# PROTECTED REGION ID(SKALogger.test_Status_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_Status_decorators
def test_Status(self, tango_context):
"""Test for Status"""
# PROTECTED REGION ID(SKALogger.test_Status) ENABLED START #
assert tango_context.device.Status() == "The device is in OFF state."
# PROTECTED REGION END # // SKALogger.test_Status
# PROTECTED REGION ID(SKALogger.test_GetVersionInfo_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_GetVersionInfo_decorators
def test_GetVersionInfo(self, tango_context):
"""Test for GetVersionInfo"""
# PROTECTED REGION ID(SKALogger.test_GetVersionInfo) ENABLED START #
versionPattern = re.compile(
f"{tango_context.device.info().dev_class}, ska_tango_base, [0-9]+.[0-9]+.[0-9]+, "
"A set of generic base devices for SKA Telescope."
)
versionInfo = tango_context.device.GetVersionInfo()
assert (re.match(versionPattern, versionInfo[0])) is not None
# PROTECTED REGION END # // SKALogger.test_GetVersionInfo
# PROTECTED REGION ID(SKALogger.test_buildState_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_buildState_decorators
def test_buildState(self, tango_context):
"""Test for buildState"""
# PROTECTED REGION ID(SKALogger.test_buildState) ENABLED START #
buildPattern = re.compile(
r"ska_tango_base, [0-9]+.[0-9]+.[0-9]+, "
r"A set of generic base devices for SKA Telescope"
)
assert (re.match(buildPattern, tango_context.device.buildState)) is not None
# PROTECTED REGION END # // SKALogger.test_buildState
# PROTECTED REGION ID(SKALogger.test_versionId_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_versionId_decorators
def test_versionId(self, tango_context):
"""Test for versionId"""
# PROTECTED REGION ID(SKALogger.test_versionId) ENABLED START #
versionIdPattern = re.compile(r"[0-9]+.[0-9]+.[0-9]+")
assert (re.match(versionIdPattern, tango_context.device.versionId)) is not None
# PROTECTED REGION END # // SKALogger.test_versionId
# PROTECTED REGION ID(SKALogger.test_loggingLevel_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_loggingLevel_decorators
def test_loggingLevel(self, tango_context):
"""Test for loggingLevel"""
# PROTECTED REGION ID(SKALogger.test_loggingLevel) ENABLED START #
assert tango_context.device.loggingLevel == LoggingLevel.INFO
# PROTECTED REGION END # // SKALogger.test_loggingLevel
# PROTECTED REGION ID(SKALogger.test_healthState_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_healthState_decorators
def test_healthState(self, tango_context):
"""Test for healthState"""
# PROTECTED REGION ID(SKALogger.test_healthState) ENABLED START #
assert tango_context.device.healthState == HealthState.OK
# PROTECTED REGION END # // SKALogger.test_healthState
# PROTECTED REGION ID(SKALogger.test_adminMode_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_adminMode_decorators
def test_adminMode(self, tango_context):
"""Test for adminMode"""
# PROTECTED REGION ID(SKALogger.test_adminMode) ENABLED START #
assert tango_context.device.adminMode == AdminMode.ONLINE
# PROTECTED REGION END # // SKALogger.test_adminMode
# PROTECTED REGION ID(SKALogger.test_controlMode_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_controlMode_decorators
def test_controlMode(self, tango_context):
"""Test for controlMode"""
# PROTECTED REGION ID(SKALogger.test_controlMode) ENABLED START #
assert tango_context.device.controlMode == ControlMode.REMOTE
# PROTECTED REGION END # // SKALogger.test_controlMode
# PROTECTED REGION ID(SKALogger.test_simulationMode_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_simulationMode_decorators
def test_simulationMode(self, tango_context):
"""Test for simulationMode"""
# PROTECTED REGION ID(SKALogger.test_simulationMode) ENABLED START #
assert tango_context.device.simulationMode == SimulationMode.FALSE
# PROTECTED REGION END # // SKALogger.test_simulationMode
# PROTECTED REGION ID(SKALogger.test_testMode_decorators) ENABLED START #
# PROTECTED REGION END # // SKALogger.test_testMode_decorators
def test_testMode(self, tango_context):
"""Test for testMode"""
# PROTECTED REGION ID(SKALogger.test_testMode) ENABLED START #
assert tango_context.device.testMode == TestMode.NONE
# PROTECTED REGION END # // SKALogger.test_testMode
@pytest.mark.forked
def test_SetLoggingLevel():
"""Test for SetLoggingLevel"""
logging_level = int(tango.LogLevel.LOG_ERROR)
logging_target = "logger/target/1"
logger_device = "logger/device/1"
devices_info = (
{"class": SKALogger, "devices": [{"name": logger_device}]},
{"class": SKASubarray, "devices": [{"name": logging_target}]},
)
with MultiDeviceTestContext(devices_info, process=False) as multi_context:
dev_proxy = multi_context.get_device(logging_target)
dev_proxy.Init()
dev_proxy.loggingLevel = int(tango.LogLevel.LOG_FATAL)
assert dev_proxy.loggingLevel != logging_level
levels = []
levels.append(logging_level)
targets = []
targets.append(multi_context.get_device_access(logging_target))
device_details = []
device_details.append(levels)
device_details.append(targets)
multi_context.get_device(logger_device).SetLoggingLevel(device_details)
assert dev_proxy.loggingLevel == logging_level
| 44.856383
| 94
| 0.681727
| 899
| 8,433
| 6.204672
| 0.150167
| 0.134457
| 0.076192
| 0.116529
| 0.552886
| 0.48028
| 0.154177
| 0.146468
| 0.020437
| 0
| 0
| 0.003295
| 0.208348
| 8,433
| 187
| 95
| 45.096257
| 0.832235
| 0.440768
| 0
| 0
| 0
| 0.011628
| 0.099074
| 0.024306
| 0
| 0
| 0
| 0
| 0.151163
| 1
| 0.162791
| false
| 0.011628
| 0.104651
| 0
| 0.290698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d83b4f58893d59845ef72aeb0870f92b39fa121
| 2,053
|
py
|
Python
|
baseline/find_pairs.py
|
parallelcrawl/DataCollection
|
4308473e6b53779159a15c1416bff3f2291dd1f2
|
[
"Apache-2.0"
] | 8
|
2018-02-08T16:03:00.000Z
|
2022-01-19T11:41:38.000Z
|
baseline/find_pairs.py
|
christianbuck/CorpusMining
|
f9248c3528a415a1e5af2c5a54a60c16cd79ff1d
|
[
"Apache-2.0"
] | 3
|
2017-08-08T10:53:29.000Z
|
2017-08-08T10:58:51.000Z
|
baseline/find_pairs.py
|
parallelcrawl/DataCollection
|
4308473e6b53779159a15c1416bff3f2291dd1f2
|
[
"Apache-2.0"
] | 4
|
2018-06-09T21:53:09.000Z
|
2022-01-19T11:41:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import urlparse
def process_buffer(buffer):
if not buffer or len(buffer) < 2:
return
buffer = [line.decode('utf-8', 'ignore') for line in buffer]
split_buffer = [line.strip().lower().split("\t")
for line in buffer]
if list(set(map(len, split_buffer))) != [4]:
for line in buffer:
sys.stderr.write(line.encode('utf-8'))
return
original_urls = []
stripped_languages = []
detected_languages = []
for stripped_url, \
original_url, \
stripped_language, \
detected_language in split_buffer:
original_urls.append(original_url)
stripped_languages.append(stripped_language)
detected_languages.append(detected_language)
if len(set(original_urls)) < 2:
# print "not enough urls"
return
if len(set(stripped_languages)) < 2:
# print "not enough stripped languages", languages_stripped
return
if len(set(detected_languages)) < 2:
# print "not enough detected_languages", detected_languages
return
for language in stripped_languages:
for detected_language in detected_languages:
# print "looking for ", language, " in ", detected_languages
if language in detected_language.replace("chineset", "chinese") \
.split('/'):
for line in buffer:
sys.stdout.write(line.encode("utf-8"))
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
buffer = []
buffer_url = None
for line in sys.stdin:
# line = line.decode("utf-8", "ignore")
url = line.split("\t", 1)[0]
if url != buffer_url:
process_buffer(buffer)
buffer = [line]
buffer_url = url
else:
buffer.append(line)
# print url != buffer_url
process_buffer(buffer)
| 31.106061
| 77
| 0.580614
| 229
| 2,053
| 5.021834
| 0.266376
| 0.103478
| 0.03913
| 0.052174
| 0.205217
| 0.097391
| 0
| 0
| 0
| 0
| 0
| 0.008523
| 0.314174
| 2,053
| 65
| 78
| 31.584615
| 0.808239
| 0.147589
| 0
| 0.2
| 0
| 0
| 0.028129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.08
| 0
| 0.22
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d87c99f7edc4a51975ce4aad83b2a68eca0165b
| 4,931
|
py
|
Python
|
utils.py
|
nea23/greek_alphabets_tf-idf
|
94094dd6d7383400e0f0a9d4a1b05744dd2f3ba9
|
[
"MIT"
] | null | null | null |
utils.py
|
nea23/greek_alphabets_tf-idf
|
94094dd6d7383400e0f0a9d4a1b05744dd2f3ba9
|
[
"MIT"
] | null | null | null |
utils.py
|
nea23/greek_alphabets_tf-idf
|
94094dd6d7383400e0f0a9d4a1b05744dd2f3ba9
|
[
"MIT"
] | null | null | null |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
"""
The following functions are used to create an annotated heatmap and they were copied from:
https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html#using-the-helper-function-code-style
"""
def heatmap(data, row_labels, col_labels, ax=None,
**kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
# ax.spines[:].set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=("black", "white"),
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
textcolors
A pair of colors. The first is used for values below a threshold,
the second for those above. Optional.
threshold
Value in data units according to which the colors from textcolors are
applied. If None (the default) uses the middle of the colormap as
separation. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
"""
The following functions are used to get the top pairs from a correlation matrix and they were copied from:
https://stackoverflow.com/a/41453817
"""
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
def get_top_abs_correlations(df, min_val=0.6):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)
au_corr_df = pd.DataFrame(au_corr, columns=['Score'])
return au_corr_df.where(au_corr_df['Score'] >= min_val, np.nan).dropna()
| 34.725352
| 131
| 0.651592
| 713
| 4,931
| 4.429173
| 0.352034
| 0.0133
| 0.015199
| 0.021533
| 0.136795
| 0.117163
| 0.081697
| 0.065231
| 0
| 0
| 0
| 0.008313
| 0.243764
| 4,931
| 142
| 132
| 34.725352
| 0.838563
| 0.404583
| 0
| 0
| 0
| 0
| 0.026282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d87fe4b4c7aa76322c36b84c9220f5fee728c3d
| 6,675
|
py
|
Python
|
built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/src/launch.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/src/launch.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 3
|
2021-03-31T20:15:40.000Z
|
2022-02-09T23:50:46.000Z
|
built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/src/launch.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""auto generate rank table and export envs"""
import sys
import subprocess
import os
import socket
import json
from argparse import ArgumentParser, REMAINDER
def parse_args():
parser = ArgumentParser(description="mindspore distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for D training, this is recommended to be set "
"to the number of D in your system so that "
"each process can be bound to a single D.")
parser.add_argument("--visible_devices", type=str, default="0,1,2,3,4,5,6,7",
help="will use the visible devices sequentially")
parser.add_argument("--env_sh", type=str, default="",
help="env for 1p")
parser.add_argument("--server_id", type=str, default="",
help="server ip")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single D training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# device mode
parser.add_argument("--device", type=str, default="A+K")
# task_set, to impove cpu utilization for multi-npu(e.g., 8P) training
parser.add_argument("--task_set", type=bool, default=False)
parser.add_argument("--task_set_core", type=int, default=24)
# ranktable file
parser.add_argument("--table_fn", type=str, default="",
help="The ranktable file path, if not set, "
"we will auto-generate a ranktable for user")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
print('args:{}'.format(args))
visible_devices = args.visible_devices.split(',')
assert len(visible_devices) >= args.nproc_per_node
print('visible_devices:{}'.format(visible_devices))
if(args.server_id == ''):
print('pleaser input server ip!!!')
exit(0)
print('server_id:{}'.format(args.server_id))
hccn_configs = open('/etc/hccn.conf', 'r').readlines()
device_ips = {}
for hccn_item in hccn_configs:
hccn_item = hccn_item.strip()
if hccn_item.startswith('address_'):
device_id, device_ip = hccn_item.split('=')
device_id = device_id.split('_')[1]
device_ips[device_id] = device_ip
print('device_id:{}, device_ip:{}'.format(device_id, device_ip))
hccn_table = {}
if args.device == 'A+K':
hccn_table['board_id'] = '0x002f'
else:
hccn_table['board_id'] = '0x0000'
hccn_table['chip_info'] = '910'
hccn_table['deploy_mode'] = 'lab'
hccn_table['group_count'] = '1'
hccn_table['group_list'] = []
instance_list = []
usable_dev = ''
for instance_id in range(args.nproc_per_node):
instance = {}
instance['devices'] = []
device_id = visible_devices[instance_id]
device_ip = device_ips[device_id]
usable_dev += str(device_id)
instance['devices'].append({
'device_id': device_id,
'device_ip': device_ip,
})
instance['rank_id'] = str(instance_id)
instance['server_id'] = args.server_id
instance_list.append(instance)
hccn_table['group_list'].append({
'device_num': str(args.nproc_per_node),
'server_num': '1',
'group_name': '',
'instance_count': str(args.nproc_per_node),
'instance_list': instance_list,
})
hccn_table['para_plane_nic_location'] = 'device'
hccn_table['para_plane_nic_name'] = []
for instance_id in range(args.nproc_per_node):
eth_id = visible_devices[instance_id]
hccn_table['para_plane_nic_name'].append('eth{}'.format(eth_id))
hccn_table['para_plane_nic_num'] = str(args.nproc_per_node)
hccn_table['status'] = 'completed'
if args.table_fn is "":
table_fn = os.path.join(os.getcwd(), 'rank_table_{}p_{}_{}.json'.format(args.nproc_per_node, usable_dev, args.server_id))
with open(table_fn, 'w') as table_fp:
json.dump(hccn_table, table_fp, indent=4)
else:
table_fn = args.table_fn
# world size in terms of number of processes
dist_group_size = args.nproc_per_node
for rank in range(0, args.nproc_per_node):
rank_id = rank
device_id = visible_devices[rank]
device_root_fn = os.path.join(os.getcwd(), 'device{}'.format(device_id)) #format(rank_id))
rank_process = ''
if args.nproc_per_node > 1:
rank_process += 'export RANK_TABLE_FILE={} && '.format(table_fn)
if args.task_set:
left = int(device_id) * args.task_set_core
right = left + args.task_set_core - 1
rank_process += 'export RANK_SIZE={} && source {} && export RANK_ID={} && export DEVICE_ID={} && rm -rf {} && mkdir {} && cd {} && taskset -c {}-{} python {} '.format(args.nproc_per_node, args.env_sh, rank_id, device_id, device_root_fn, device_root_fn, device_root_fn, left, right, args.training_script)
else:
rank_process += 'export RANK_SIZE={} && source {} && export RANK_ID={} && export DEVICE_ID={} && rm -rf {} && mkdir {} && cd {} && python {} '.format(args.nproc_per_node, args.env_sh, rank_id, device_id, device_root_fn, device_root_fn, device_root_fn, args.training_script)
rank_process += ' '.join(args.training_script_args) + ' >log{}.log 2>&1 &'.format(rank_id)
os.system(rank_process)
if __name__ == "__main__":
main()
| 43.914474
| 315
| 0.61588
| 867
| 6,675
| 4.491349
| 0.275663
| 0.03698
| 0.040062
| 0.049307
| 0.21623
| 0.138932
| 0.0981
| 0.0981
| 0.0981
| 0.07961
| 0
| 0.008585
| 0.249588
| 6,675
| 151
| 316
| 44.205298
| 0.768816
| 0.131386
| 0
| 0.06087
| 0
| 0.017391
| 0.254505
| 0.008316
| 0
| 0
| 0.002079
| 0
| 0.008696
| 1
| 0.017391
| false
| 0
| 0.052174
| 0
| 0.078261
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d886ff7c8fb1d674ed9db521c7c448a657e5fe1
| 3,799
|
py
|
Python
|
Incident-Response/Tools/cyphon/cyphon/responder/actions/tests/test_models.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/cyphon/cyphon/responder/actions/tests/test_models.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/cyphon/cyphon/responder/actions/tests/test_models.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
"""
# standard library
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
# third party
from django.test import TestCase
# local
import platforms.jira.handlers as jira_module
from responder.actions.models import Action
from tests.fixture_manager import get_fixtures
class ActionsBaseTestCase(TestCase):
"""
Base class for testing Actions.
"""
fixtures = get_fixtures(['actions', 'dispatches'])
def setUp(self):
self.action = Action.objects.get(pk=1)
class ActionTestCase(ActionsBaseTestCase):
"""
Tests the Action class.
"""
def test_str(self):
"""
Tests the string representation of a Pipe.
"""
self.assertEqual(str(self.action), 'Jira IssueAPI')
def test_get_module(self):
"""
Tests the _get_module method for getting the module for an
Action's Destination.
"""
self.assertEqual(self.action._get_module(), jira_module)
def test_create_request_handler(self):
"""
Tests the create_request_handler method for getting a request
handler for an Action.
"""
mock_user = Mock()
mock_handler = Mock()
with patch('platforms.jira.handlers.IssueAPI',
return_value=mock_handler) as mock_api:
kwargs = {
'user': mock_user,
}
result = self.action.create_request_handler(**kwargs)
mock_api.assert_called_once_with(endpoint=self.action,
user=mock_user)
self.assertEqual(result, mock_handler)
def test_save_w_no_descr(self):
"""
Test the save method of an Action with the Action has no
description.
"""
self.assertEqual(self.action.description, None)
self.action.save()
self.assertEqual(self.action.description, 'Jira IssueAPI')
def test_save_w_descr(self):
"""
Test the save method of an Action with the Action has a
description.
"""
self.action.description = 'Create a JIRA Issue'
self.action.save()
self.assertEqual(self.action.description, 'Create a JIRA Issue')
def test_get_dispatch(self):
"""
Test the get_dispatch method of an Action.
"""
mock_alert = Mock()
mock_user = Mock()
mock_record = Mock()
mock_handler = Mock()
mock_handler.run = Mock(return_value=mock_record)
mock_handler.record = mock_record
with patch('platforms.jira.handlers.IssueAPI',
return_value=mock_handler) as mock_api:
kwargs = {
'alert': mock_alert,
'user': mock_user,
}
result = self.action.get_dispatch(**kwargs)
mock_api.assert_called_once_with(endpoint=self.action,
user=mock_user)
mock_handler.run.assert_called_once_with(mock_alert)
self.assertEqual(result, mock_record)
| 30.886179
| 72
| 0.630692
| 461
| 3,799
| 5.052061
| 0.321041
| 0.055818
| 0.032632
| 0.042937
| 0.303564
| 0.276943
| 0.228854
| 0.206097
| 0.16316
| 0.16316
| 0
| 0.004044
| 0.284022
| 3,799
| 122
| 73
| 31.139344
| 0.852206
| 0.300869
| 0
| 0.327273
| 0
| 0
| 0.065047
| 0.026348
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.127273
| false
| 0
| 0.127273
| 0
| 0.309091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d8881a2641e3115485a61059c62987f2d27bf5d
| 4,805
|
py
|
Python
|
predictions/lambda/handler.py
|
aaronshim/alexa-github-today
|
4f3e7adffa9bb9f3d63cfc1f4a79f396078c787c
|
[
"MIT"
] | null | null | null |
predictions/lambda/handler.py
|
aaronshim/alexa-github-today
|
4f3e7adffa9bb9f3d63cfc1f4a79f396078c787c
|
[
"MIT"
] | null | null | null |
predictions/lambda/handler.py
|
aaronshim/alexa-github-today
|
4f3e7adffa9bb9f3d63cfc1f4a79f396078c787c
|
[
"MIT"
] | null | null | null |
import json
import requests
from collections import defaultdict
from fuzzywuzzy import process
from random import sample
# Constants
"""
Constants for default responses that do not need any further computation.
"""
DEFAULT_STOP_RESPONSE = 'All right. See you next time!'
DEFAULT_ERROR_MESSAGE = "I'm sorry. I don't know how to do that yet."
DEFAULT_HELP_MESSAGE = "Try asking me about prediction markets. Ask me to look up midterm elections."
PREDEFINED_RESPONSES = {
'AMAZON.FallbackIntent': "I couldn't understand what you were asking. Why don't you ask me about elections?",
'AMAZON.CancelIntent': DEFAULT_STOP_RESPONSE,
'AMAZON.HelpIntent': DEFAULT_HELP_MESSAGE,
'AMAZON.StopIntent': DEFAULT_STOP_RESPONSE,
'AMAZON.NavigateHomeIntent': DEFAULT_STOP_RESPONSE,
}
"""
To be considered as a match, any other title would have to be within this percentage of the score of the best match.
"""
PERCENTAGE_THRESHOLD = 0.1
# API Helpers
def get_all_markets():
"""
Query the PredictIt API to get all available markets in a dictionary that maps from the name of the market to its ID.
"""
all_markets = requests.request(
'GET', 'https://www.predictit.org/api/marketdata/all/')
all_markets = json.loads(all_markets.content)
return dict((market['name'], market['id']) for market in all_markets['markets'])
def get_market(id):
"""
Query the PredictIt API to get the details of a particular market given the market's ID.
"""
market = requests.request(
'GET', "https://www.predictit.org/api/marketdata/markets/%d" % id)
return json.loads(market.content)
# "UI" Helpers
def market_message(market):
"""
Given the response from `get_market`, generates a message that conveys the relevant information of the particular market.
"""
if len(market['contracts']) > 1:
return "%s is too complicated." % market['name']
return "%s is trading at %d percent." % \
(market['name'], market['contracts'][0]['lastTradePrice'] * 100)
def response_from_message(message):
"""
Helper to wrap a message string into the minimum acceptable Alexa response JSON.
"""
return {
'version': '1.0',
'response': {
'outputSpeech': {
'type': 'PlainText',
'text': message,
}
}
}
def can_fulfill(intent):
if intent['name'] == 'Query' and intent['slots'] and \
intent['slots']['Market'] and intent['slots']['Market']['value']:
return {
'version': '1.0',
'response': {
'canFulfillIntent': {
'canFulfill': 'YES',
'slots': {
'Market': {
'canUnderstand': 'YES',
'canFulfill': 'YES'
},
}
}
}
}
return {
'version': '1.0',
'response': {
'canFulfillIntent': {
'canFulfill': 'NO',
}
}
}
# Main function
def main(event, context):
"""
Entry point for the Alexa action.
"""
request_type = event['request']['type']
if request_type != 'IntentRequest':
if request_type == 'LaunchRequest':
return response_from_message(DEFAULT_HELP_MESSAGE)
elif request_type == 'CanFulfillIntentRequest':
return can_fulfill(event['request']['intent'])
elif request_type == 'SessionEndedRequest':
return
intent = event['request']['intent']
intent_type = intent['name']
# Get the canned responses out of the way before we do any heavy lifting
# with external API calls.
if intent_type in PREDEFINED_RESPONSES:
return response_from_message(PREDEFINED_RESPONSES[intent_type])
# Sanity check.
if intent_type != 'Query' or 'Market' not in intent['slots']:
return response_from_message(DEFAULT_ERROR_MESSAGE)
keyword = intent['slots']['Market']['value']
markets = get_all_markets()
# Only take the ones that are within percentage threshold of the first
# result. Bucket them by score.
likely_markets = process.extract(keyword, markets.keys(), limit=100)
(_, best_score) = likely_markets[0]
result_markets = defaultdict(list) # Multimap score -> id's
for (name, score) in likely_markets:
if best_score - score <= PERCENTAGE_THRESHOLD * best_score:
result_markets[score].append(markets[name])
# List of market JSON response's.
result_markets = [get_market(id) for id in sum(
[sample(ids, 1) for (_, ids) in result_markets.items()], [])]
return response_from_message(' '.join(market_message(market) for market in result_markets))
| 33.838028
| 125
| 0.624766
| 565
| 4,805
| 5.189381
| 0.346903
| 0.010232
| 0.032401
| 0.034106
| 0.114939
| 0.085266
| 0.068213
| 0.034789
| 0.034789
| 0
| 0
| 0.005078
| 0.262227
| 4,805
| 141
| 126
| 34.078014
| 0.822003
| 0.157544
| 0
| 0.120879
| 0
| 0
| 0.238488
| 0.018366
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065934
| false
| 0
| 0.054945
| 0
| 0.263736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d88973447a6fc9a97038839f4db33428c51196b
| 12,649
|
py
|
Python
|
Train.py
|
prattcmp/speakerembedding
|
5ed051261e69aaf7a1306c390b36cedb8da3f095
|
[
"MIT"
] | null | null | null |
Train.py
|
prattcmp/speakerembedding
|
5ed051261e69aaf7a1306c390b36cedb8da3f095
|
[
"MIT"
] | null | null | null |
Train.py
|
prattcmp/speakerembedding
|
5ed051261e69aaf7a1306c390b36cedb8da3f095
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import logging, yaml, os, sys, argparse, time
from tqdm import tqdm
from collections import defaultdict
from Logger import Logger
import matplotlib
matplotlib.use('agg')
matplotlib.rcParams['agg.path.chunksize'] = 10000
import matplotlib.pyplot as plt
from scipy.io import wavfile
from random import sample
from sklearn.manifold import TSNE
from Modules import GE2E, GE2E_Loss
from Datasets import Dataset, Collater, Inference_Collater
from Noam_Scheduler import Modified_Noam_Scheduler
from Radam import RAdam
from Arg_Parser import Recursive_Parse
hp = Recursive_Parse(yaml.load(
open('Hyper_Parameters.yaml', encoding='utf-8'),
Loader=yaml.Loader
))
if not hp.Device is None:
os.environ['CUDA_VISIBLE_DEVICES']= str(hp.Device)
if not torch.cuda.is_available():
device = torch.device('cpu')
else:
device = torch.device('cuda:0')
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(0)
logging.basicConfig(
level=logging.INFO, stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
)
if hp.Use_Mixed_Precision:
try:
from apex import amp
except:
logging.warn('There is no apex modules in the environment. Mixed precision does not work.')
hp.Use_Mixed_Precision = False
class Trainer:
def __init__(self, steps= 0):
self.steps = steps
self.epochs = 0
self.Datset_Generate()
self.Model_Generate()
self.scalar_Dict = {
'Train': defaultdict(float),
'Evaluation': defaultdict(float),
}
self.writer_Dict = {
'Train': Logger(os.path.join(hp.Log_Path, 'Train')),
'Evaluation': Logger(os.path.join(hp.Log_Path, 'Evaluation')),
}
self.Load_Checkpoint()
def Datset_Generate(self):
train_Dataset = Dataset(
pattern_path= hp.Train.Train_Pattern.Path,
metadata_file= hp.Train.Train_Pattern.Metadata_File,
pattern_per_speaker= hp.Train.Batch.Train.Pattern_per_Speaker,
use_cache= hp.Train.Use_Pattern_Cache
)
dev_Dataset = Dataset(
pattern_path= hp.Train.Eval_Pattern.Path,
metadata_file= hp.Train.Eval_Pattern.Metadata_File,
pattern_per_speaker= hp.Train.Batch.Eval.Pattern_per_Speaker,
use_cache= hp.Train.Use_Pattern_Cache
)
inference_Dataset = Dataset(
pattern_path= hp.Train.Eval_Pattern.Path,
metadata_file= hp.Train.Eval_Pattern.Metadata_File,
pattern_per_speaker= hp.Train.Batch.Eval.Pattern_per_Speaker,
num_speakers= 50, #Maximum number by tensorboard.
use_cache= hp.Train.Use_Pattern_Cache
)
logging.info('The number of train speakers = {}.'.format(len(train_Dataset)))
logging.info('The number of development speakers = {}.'.format(len(dev_Dataset)))
collater = Collater(
min_frame_length= hp.Train.Frame_Length.Min,
max_frame_length= hp.Train.Frame_Length.Max
)
inference_Collater = Inference_Collater(
samples= hp.Train.Inference.Samples,
frame_length= hp.Train.Inference.Frame_Length,
overlap_length= hp.Train.Inference.Overlap_Length
)
self.dataLoader_Dict = {}
self.dataLoader_Dict['Train'] = torch.utils.data.DataLoader(
dataset= train_Dataset,
shuffle= True,
collate_fn= collater,
batch_size= hp.Train.Batch.Train.Speaker,
num_workers= hp.Train.Num_Workers,
pin_memory= True
)
self.dataLoader_Dict['Dev'] = torch.utils.data.DataLoader(
dataset= dev_Dataset,
shuffle= True,
collate_fn= collater,
batch_size= hp.Train.Batch.Eval.Speaker,
num_workers= hp.Train.Num_Workers,
pin_memory= True
)
self.dataLoader_Dict['Inference'] = torch.utils.data.DataLoader(
dataset= inference_Dataset,
shuffle= True,
collate_fn= inference_Collater,
batch_size= hp.Train.Batch.Eval.Speaker,
num_workers= hp.Train.Num_Workers,
pin_memory= True
)
def Model_Generate(self):
self.model = GE2E(
mel_dims= hp.Sound.Mel_Dim,
lstm_size= hp.GE2E.LSTM.Sizes,
lstm_stacks= hp.GE2E.LSTM.Stacks,
embedding_size= hp.GE2E.Embedding_Size,
).to(device)
self.criterion = GE2E_Loss().to(device)
self.optimizer = RAdam(
params= self.model.parameters(),
lr= hp.Train.Learning_Rate.Initial,
betas= (hp.Train.ADAM.Beta1, hp.Train.ADAM.Beta2),
eps= hp.Train.ADAM.Epsilon,
weight_decay= hp.Train.Weight_Decay
)
self.scheduler = Modified_Noam_Scheduler(
optimizer= self.optimizer,
base= hp.Train.Learning_Rate.Base,
)
if hp.Use_Mixed_Precision:
self.model, self.optimizer = amp.initialize(
models= self.model,
optimizers=self.optimizer
)
logging.info(self.model)
def Train_Step(self, mels):
loss_Dict = {}
mels = mels.to(device, non_blocking=True)
embeddings = self.model(mels)
loss_Dict['Embedding'] = self.criterion(embeddings, hp.Train.Batch.Train.Pattern_per_Speaker)
self.optimizer.zero_grad()
if hp.Use_Mixed_Precision:
with amp.scale_loss(loss_Dict['Embedding'], self.optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
parameters= amp.master_params(self.optimizer),
max_norm= hp.Train.Gradient_Norm
)
else:
loss_Dict['Embedding'].backward()
torch.nn.utils.clip_grad_norm_(
parameters= self.model.parameters(),
max_norm= hp.Train.Gradient_Norm
)
self.optimizer.step()
self.scheduler.step()
self.steps += 1
self.tqdm.update(1)
for tag, loss in loss_Dict.items():
self.scalar_Dict['Train']['Loss/{}'.format(tag)] += loss_Dict['Embedding']
def Train_Epoch(self):
for mels in self.dataLoader_Dict['Train']:
self.Train_Step(mels)
if self.steps % hp.Train.Checkpoint_Save_Interval == 0:
self.Save_Checkpoint()
if self.steps % hp.Train.Logging_Interval == 0:
self.scalar_Dict['Train'] = {
tag: loss / hp.Train.Logging_Interval
for tag, loss in self.scalar_Dict['Train'].items()
}
self.scalar_Dict['Train']['Learning_Rate'] = self.scheduler.get_last_lr()
self.writer_Dict['Train'].add_scalar_dict(self.scalar_Dict['Train'], self.steps)
self.scalar_Dict['Train'] = defaultdict(float)
if self.steps % hp.Train.Evaluation_Interval == 0:
self.Evaluation_Epoch()
if self.steps % hp.Train.Inference_Interval == 0:
self.Inference_Epoch()
if self.steps >= hp.Train.Max_Step:
return
self.epochs += 1
@torch.no_grad()
def Evaluation_Step(self, mels):
loss_Dict = {}
mels = mels.to(device, non_blocking=True)
embeddings = self.model(mels)
loss_Dict['Embedding'] = self.criterion(embeddings, hp.Train.Batch.Eval.Pattern_per_Speaker)
for tag, loss in loss_Dict.items():
self.scalar_Dict['Evaluation']['Loss/{}'.format(tag)] += loss
def Evaluation_Epoch(self):
logging.info('(Steps: {}) Start evaluation.'.format(self.steps))
self.model.eval()
for step, mels in tqdm(enumerate(self.dataLoader_Dict['Dev'], 1), desc='[Evaluation]'):
self.Evaluation_Step(mels)
self.scalar_Dict['Evaluation'] = {
tag: loss / step
for tag, loss in self.scalar_Dict['Evaluation'].items()
}
self.writer_Dict['Evaluation'].add_scalar_dict(self.scalar_Dict['Evaluation'], self.steps)
self.writer_Dict['Evaluation'].add_histogram_model(self.model, self.steps, delete_keywords=['layer_Dict', 'layer'])
self.scalar_Dict['Evaluation'] = defaultdict(float)
self.model.train()
@torch.no_grad()
def Inference_Step(self, mels):
return self.model(
mels= mels.to(device, non_blocking=True),
samples= hp.Train.Inference.Samples
)
def Inference_Epoch(self):
logging.info('(Steps: {}) Start inference.'.format(self.steps))
self.model.eval()
embeddings, speakers = zip(*[
(self.Inference_Step(mels), speakers)
for mels, speakers in tqdm(self.dataLoader_Dict['Inference'], desc='[Inference]')
])
embeddings = torch.cat(embeddings, dim= 0).cpu().numpy()
speakers = [speaker for speaker_List in speakers for speaker in speaker_List]
self.writer_Dict['Evaluation'].add_embedding(
embeddings,
metadata= speakers,
global_step= self.steps,
tag= 'Embeddings'
)
self.model.train()
def Load_Checkpoint(self):
if self.steps == 0:
paths = [
os.path.join(root, file).replace('\\', '/')
for root, _, files in os.walk(hp.Checkpoint_Path)
for file in files
if os.path.splitext(file)[1] == '.pt'
]
if len(paths) > 0:
path = max(paths, key = os.path.getctime)
else:
return # Initial training
else:
path = os.path.join(path, 'S_{}.pt'.format(self.steps).replace('\\', '/'))
state_Dict = torch.load(os.path.join(path), map_location= 'cpu')
self.model.load_state_dict(state_Dict['Model'])
self.optimizer.load_state_dict(state_Dict['Optimizer'])
self.scheduler.load_state_dict(state_Dict['Scheduler'])
self.steps = state_Dict['Steps']
self.epochs = state_Dict['Epochs']
if hp.Use_Mixed_Precision:
if not 'AMP' in state_Dict.keys():
logging.warn('No AMP state dict is in the checkpoint. Model regards this checkpoint is trained without mixed precision.')
else:
amp.load_state_dict(state_Dict['AMP'])
logging.info('Checkpoint loaded at {} steps.'.format(self.steps))
def Save_Checkpoint(self):
os.makedirs(hp.Checkpoint_Path, exist_ok= True)
state_Dict = {
'Model': self.model.state_dict(),
'Optimizer': self.optimizer.state_dict(),
'Scheduler': self.scheduler.state_dict(),
'Steps': self.steps,
'Epochs': self.epochs,
}
if hp.Use_Mixed_Precision:
state_Dict['AMP'] = amp.state_dict()
torch.save(
state_Dict,
os.path.join(hp.Checkpoint_Path, 'S_{}.pt'.format(self.steps).replace('\\', '/'))
)
logging.info('Checkpoint saved at {} steps.'.format(self.steps))
def Train(self):
hp_Path = os.path.join(hp.Checkpoint_Path, 'Hyper_Parameters.yaml').replace('\\', '/')
if not os.path.exists(hp_Path):
from shutil import copyfile
os.makedirs(hp.Checkpoint_Path, exist_ok= True)
copyfile('Hyper_Parameters.yaml', hp_Path)
if self.steps == 0:
self.Evaluation_Epoch()
if hp.Train.Initial_Inference:
self.Inference_Epoch()
self.tqdm = tqdm(
initial= self.steps,
total= hp.Train.Max_Step,
desc='[Training]'
)
while self.steps < hp.Train.Max_Step:
try:
self.Train_Epoch()
except KeyboardInterrupt:
self.Save_Checkpoint()
exit(1)
self.tqdm.close()
logging.info('Finished training.')
if __name__ == '__main__':
argParser = argparse.ArgumentParser()
argParser.add_argument('-s', '--steps', default= 0, type= int)
args = argParser.parse_args()
new_Trainer = Trainer(steps= args.steps)
new_Trainer.Train()
| 35.233983
| 137
| 0.591035
| 1,446
| 12,649
| 4.981328
| 0.185339
| 0.041788
| 0.023324
| 0.018465
| 0.374705
| 0.295155
| 0.206303
| 0.168957
| 0.147022
| 0.140358
| 0
| 0.004048
| 0.296861
| 12,649
| 359
| 138
| 35.233983
| 0.805824
| 0.003716
| 0
| 0.216949
| 0
| 0.00339
| 0.080787
| 0.006825
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040678
| false
| 0
| 0.061017
| 0.00339
| 0.115254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d8c97671a23367d026ea52b147ffe064cc2939a
| 881
|
py
|
Python
|
ga/gen_graph.py
|
k4t0mono/exercicios-ia
|
06f76db20f519b8d7e9b5ee2cf5c7a72b21e188c
|
[
"BSD-3-Clause"
] | 1
|
2018-09-23T15:38:04.000Z
|
2018-09-23T15:38:04.000Z
|
ga/gen_graph.py
|
k4t0mono/exercicios-ia
|
06f76db20f519b8d7e9b5ee2cf5c7a72b21e188c
|
[
"BSD-3-Clause"
] | null | null | null |
ga/gen_graph.py
|
k4t0mono/exercicios-ia
|
06f76db20f519b8d7e9b5ee2cf5c7a72b21e188c
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import numpy as np
import matplotlib.pyplot as plt
f = open(sys.argv[1], 'r')
lines = f.readlines()
f.close()
pop_size = int(lines.pop(0))
pops = []
for l in lines:
if l[0] == '[':
pops.append(l.strip())
for j in range(len(pops)):
p = []
for n in pops[j][1:-1].split(','):
p.append(int(n))
d = {}
for i in range(-16, 16):
d[i] = 0
for i in p:
d[i] += 1
x = []
y = []
for k in d:
x.append(k)
y.append(d[k])
axes = plt.gca()
axes.set_xlim([-17, 16])
axes.set_ylim([0, pop_size+1])
# plt.scatter(x, y, s=5, c=[(0,0,0)], alpha=0.5)
plt.bar(x, y, 1, color='blue')
plt.title('Population {:03d}'.format(j))
plt.xlabel('x')
plt.ylabel('qnt')
name = 'pop{:03d}.png'.format(j)
plt.savefig(name)
print('saving {}'.format(name))
plt.clf()
| 17.979592
| 52
| 0.506243
| 151
| 881
| 2.927152
| 0.437086
| 0.013575
| 0.027149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044094
| 0.279228
| 881
| 48
| 53
| 18.354167
| 0.651969
| 0.052213
| 0
| 0
| 0
| 0
| 0.060024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d8f0a7d44e8c877c0f58c7e9fe5bd054fd5c40a
| 7,486
|
py
|
Python
|
src/analyses/analyses.py
|
zahariaa/disentangled-dynamics
|
2dbdf9884f6f90ff67073f571191227e7abce81d
|
[
"MIT"
] | null | null | null |
src/analyses/analyses.py
|
zahariaa/disentangled-dynamics
|
2dbdf9884f6f90ff67073f571191227e7abce81d
|
[
"MIT"
] | null | null | null |
src/analyses/analyses.py
|
zahariaa/disentangled-dynamics
|
2dbdf9884f6f90ff67073f571191227e7abce81d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
analyses for bVAE entanglement, etc
"""
import torch
import sys
sys.path.append("..") # Adds higher directory to python modules path.
import matplotlib.pyplot as plt
import numpy as np
from data.dspritesb import dSpriteBackgroundDataset
from torchvision import transforms
ds = dSpriteBackgroundDataset(transform=transforms.Resize((32,32)),shapetype = 'circle')
# Build sweeps through model ...
def sweepCircleLatents(model,latents=np.linspace(0,1,16),def_latents=None):
"""sweepCircleLatents(model,latents,def_latents):
generates input images that sweep through each latent variable,
and evaluates them on given model
model = loaded model, e.g., vae = staticVAE32(n_latent = 4)
latents = latents to sweep through. defaults to
np.linspace(0,1,16)
def_latents = 'default latents': defines the non-swept latents.
defaults to [0.5,0.5,0.5,0.5] if None
---e.g.,---
yhat, x = sweepCircleLatents(vae)
"""
# Initialization
nsweep = len(latents)
if type(model).__name__ == 'encoderBVAE_like':
n_latent = model.fc.out_features
encoder = model
else:
n_latent = model.n_latent
encoder = model.encode
if def_latents is None:
def_latents = 0.5*np.ones(n_latent)
# Generate stimulus sweeps
x = torch.zeros((n_latent,nsweep,1,32,32))
for i in np.arange(0,nsweep):
x[0,i,:,:,:] = ds.arbitraryCircle(latents[i],def_latents[1],def_latents[2],def_latents[3])
x[1,i,:,:,:] = ds.arbitraryCircle(def_latents[0],latents[i],def_latents[2],def_latents[3])
x[2,i,:,:,:] = ds.arbitraryCircle(def_latents[0],def_latents[1],latents[i],def_latents[3])
x[3,i,:,:,:] = ds.arbitraryCircle(def_latents[0],def_latents[1],def_latents[2],latents[i])
# ... and evaulate them all at once
yhat = encoder(x)
if not (type(model).__name__ == 'encoderBVAE_like' or type(model).__name__ == 'dynamicAE32'):
yhat = yhat[0]
return yhat,x
# Plot sweeps through model
def plotCircleSweep(x=None,nimgs=5):
"""plotCircleSweep(yhat,x):
plots a subset of stimuli,
generated from sweepCircleLatents()
---e.g.,---
yhat, x = sweepCircleLatents(vae)
plotCircleSweep(x)
alternatively,
plotCircleSweep(sweepCircleLatents(vae))
"""
# Initialization
if x is None and type(nimgs) is tuple:
x = yhat[1]
# Start a-plottin'
fig, ax = plt.subplots(nimgs,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k')
for latentdim in range(4):
cnt = -1
for img in np.linspace(0,15,nimgs).astype(int):
cnt+=1
plt.sca(ax[cnt,latentdim])
plt.set_cmap('gray')
ax[cnt,latentdim].imshow(
x[latentdim*16+img,:,:,:].squeeze(), vmin=0, vmax=1)
plt.axis('off')
return fig, ax
def plotLatentsSweep(yhat,nmodels=1):
"""plotLatentsSweep(yhat):
plots model latents and a subset of the corresponding stimuli,
generated from sweepCircleLatents()
---e.g.,---
yhat, x = sweepCircleLatents(vae)
plotCircleSweep(yhat,x)
alternatively,
plotLatentsSweep(sweepCircleLatents(vae))
"""
# Initialization
if type(yhat) is tuple:
yhat = yhat[0]
# Start a-plottin'
fig, ax = plt.subplots(nmodels,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k', sharey='row',sharex='col')
for latentdim in range(4):
if nmodels > 1:
for imodel in range(nmodels):
plt.sca(ax[imodel,latentdim])
plt.plot(yhat[imodel][latentdim*16+np.arange(0,16),:].detach().numpy())
# ax[imodel,latentdim].set_aspect(1./ax[imodel,latentdim].get_data_ratio())
ax[imodel,latentdim].spines['top'].set_visible(False)
ax[imodel,latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[imodel,latentdim].spines['left'].set_visible(False)
# ax[imodel,latentdim].set_yticklabels([])
ax[imodel,latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[imodel,latentdim].spines['bottom'].set_visible(False)
ax[imodel,latentdim].set_xticklabels([])
ax[imodel,latentdim].tick_params(axis='x', length=0)
else:
imodel=0
plt.sca(ax[latentdim])
plt.plot(yhat[latentdim*16+np.arange(0,16),:].detach().numpy())
ax[latentdim].set_aspect(1./ax[latentdim].get_data_ratio())
ax[latentdim].spines['top'].set_visible(False)
ax[latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[latentdim].spines['left'].set_visible(False)
ax[latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[latentdim].spines['bottom'].set_visible(False)
ax[latentdim].set_xticklabels([])
ax[latentdim].tick_params(axis='x', length=0)
return fig, ax
def colorAxisNormalize(colorbar):
"""colorAxisNormalize(colorbar):
normalizes a color axis so it is centered on zero.
useful for diverging colormaps
(e.g., cmap='bwr': blue=negative, red=positive, white=0)
input is already initialized colorbar object from a plot
---e.g.,---
corr_vae = np.corrcoef(yhat_vae.detach().numpy().T)
plt.set_cmap('bwr')
plt.imshow(corr_vae)
cb = plt.colorbar()
colorAxisNormalize(cb)
---or---
colorAxisNormalize(plt.colorbar())
"""
cm = np.max(np.abs(colorbar.get_clim()))
colorbar.set_clim(-cm,cm)
def showReconstructionsAndErrors(model):
"""showReconstructionsAndErrors(model):
generates random inputs, runs them through a specified model
to generate their reconstructions. plots the inputs,
reconstructions, and their difference
---e.g.---
from staticvae.models import staticVAE32
vae = staticVAE32(n_latent = 4)
vae.eval()
checkpoint = torch.load('../staticvae/trained/staticvae32_dsprites_circle_last_500K',map_location='cpu')
vae.load_state_dict(checkpoint['model_states']['net'])
showReconstructionsAndErrors(model)
"""
fig=plt.figure(figsize=(18, 16), dpi= 80, facecolor='w',
edgecolor='k')
cnt = 0
for ii in range(12):
x,label = ds[np.random.randint(1000)]
x = x[np.newaxis, :, :]
mu,logvar = model.encode(x.float())
recon = model.decode(mu).detach()
diff = x - recon
cnt += 1
ax = plt.subplot(6,6,cnt)
plt.set_cmap('gray')
ax.imshow(x.squeeze(), vmin=0, vmax=1)
plt.title('true')
plt.axis('off')
cnt += 1
ax = plt.subplot(6,6,cnt)
ax.imshow(recon.squeeze(), vmin=0, vmax=1)
plt.title('recon')
plt.axis('off')
cnt += 1
ax = plt.subplot(6,6,cnt)
plt.set_cmap('bwr')
img = ax.imshow(diff.numpy().squeeze())
colorAxisNormalize(fig.colorbar(img))
plt.title('diff')
plt.axis('off')
| 36.339806
| 119
| 0.593909
| 925
| 7,486
| 4.723243
| 0.271351
| 0.038911
| 0.042802
| 0.023346
| 0.378119
| 0.318379
| 0.280385
| 0.161822
| 0.161822
| 0.127489
| 0
| 0.025496
| 0.266497
| 7,486
| 205
| 120
| 36.517073
| 0.770169
| 0.320064
| 0
| 0.221154
| 0
| 0
| 0.029848
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048077
| false
| 0
| 0.057692
| 0
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d9030a3ab27bda98f5076efe7e1d4f4d61c1b31
| 2,684
|
py
|
Python
|
Chapter_BestPractices/Centering_Scaling.py
|
ML-PSE/Machine_Learning_for_PSE
|
b53578d7cc0e0eca4907527b188a60de06d6710e
|
[
"Apache-2.0"
] | 2
|
2022-02-20T18:57:46.000Z
|
2022-03-03T07:07:12.000Z
|
Chapter_BestPractices/Centering_Scaling.py
|
ML-PSE/Machine_Learning_for_PSE
|
b53578d7cc0e0eca4907527b188a60de06d6710e
|
[
"Apache-2.0"
] | null | null | null |
Chapter_BestPractices/Centering_Scaling.py
|
ML-PSE/Machine_Learning_for_PSE
|
b53578d7cc0e0eca4907527b188a60de06d6710e
|
[
"Apache-2.0"
] | null | null | null |
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Centering & Scaling
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% Standard scaling
import numpy as np
from sklearn.preprocessing import StandardScaler
X = np.array([[ 1000, 0.01, 300],
[ 1200, 0.06, 350],
[ 1500, 0.1, 320]])
scaler = StandardScaler().fit(X) # computes mean & std column-wise
X_scaled = scaler.transform(X) # transform using computed mean and std
# check mean = 0 and variance = 1 for every variable/column after scaling
print(X_scaled.mean(axis=0)) # return 1D array of size(3,1)
print(X_scaled.std(axis=0)) # return 1D array of size(3,1)
# access mean and variance via object properties
print(scaler.mean_) # return 1D array of size(3,1)
print(scaler.var_) # return 1D array of size(3,1)
#%% Normalization
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler() # create object
X_scaled = scaler.fit_transform(X) # fit & transform
# check min = 0 and max = 1 for every variable/column after scaling
print(X_scaled.min(axis=0))
print(X_scaled.max(axis=0))
# access min and max via object properties
print(scaler.data_min_)
print(scaler.data_max_)
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Robust Centering & Scaling
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% Generate oulier-infested data
X = np.random.normal(40, 1, (1500,1))
X[200:300] = X[200:300] +8; X[1000:1150] = X[1000:1150] + 8
# plot
import matplotlib.pyplot as plt
plt.plot(X, '.-')
plt.xlabel('sample #'), plt.ylabel('variable measurement')
plt.title('Raw measurements')
#%% Transform via standard scaling
scaler = StandardScaler().fit(X)
X_scaled = scaler.transform(X)
# mean and std
print('Estimated mean = ', scaler.mean_[0])
print('Estimated standard deviation = ', np.sqrt(scaler.var_[0]))
# plot
plt.figure()
plt.plot(X_scaled, '.-')
plt.xlabel('sample #'), plt.ylabel('scaled variable measurement')
plt.xlim((0,1500))
plt.title('Standard scaling')
#%% Transform via robust MAD scaling
# compute median and MAD
from scipy import stats
median = np.median(X)
MAD = stats.median_absolute_deviation(X)
# scale
X_scaled = (X - median)/MAD[0]
# median and MAD
print('Estimated robust location = ', median)
print('Estimated robust spread = ', MAD)
# plot
plt.figure()
plt.plot(X_scaled, '.-')
plt.xlabel('sample #'), plt.ylabel('scaled variable measurement')
plt.xlim((0,1500))
plt.title('Robust MAD scaling')
| 31.209302
| 80
| 0.592399
| 340
| 2,684
| 4.614706
| 0.276471
| 0.044614
| 0.030593
| 0.038241
| 0.331421
| 0.248566
| 0.248566
| 0.235182
| 0.215424
| 0.182282
| 0
| 0.044984
| 0.171759
| 2,684
| 85
| 81
| 31.576471
| 0.660819
| 0.389344
| 0
| 0.272727
| 0
| 0
| 0.168754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.113636
| 0
| 0.113636
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d91be2759fba448a3db8257c92c32db569fc6fc
| 2,244
|
py
|
Python
|
web/addons/mass_mailing/models/mass_mailing_report.py
|
diogocs1/comps
|
63df07f6cf21c41e4527c06e2d0499f23f4322e7
|
[
"Apache-2.0"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/addons/mass_mailing/models/mass_mailing_report.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/addons/mass_mailing/models/mass_mailing_report.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp import tools
class MassMailingReport(osv.Model):
_name = 'mail.statistics.report'
_auto = False
_description = 'Mass Mailing Statistics'
_columns = {
'scheduled_date': fields.datetime('Scheduled Date', readonly=True),
'name': fields.char('Mass Mail', readonly=True),
'campaign': fields.char('Mass Mail Campaign', readonly=True),
'sent': fields.integer('Sent', readonly=True),
'delivered': fields.integer('Delivered', readonly=True),
'opened': fields.integer('Opened', readonly=True),
'bounced': fields.integer('Bounced', readonly=True),
'replied': fields.integer('Replied', readonly=True),
'state': fields.selection(
[('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')],
string='Status', readonly=True,
),
'email_from': fields.char('From', readonly=True),
}
def init(self, cr):
"""Mass Mail Statistical Report: based on mail.mail.statistics that models the various
statistics collected for each mailing, and mail.mass_mailing model that models the
various mailing performed. """
tools.drop_view_if_exists(cr, 'mail_statistics_report')
cr.execute("""
CREATE OR REPLACE VIEW mail_statistics_report AS (
SELECT
min(ms.id) as id,
ms.scheduled as scheduled_date,
mm.name as name,
mc.name as campaign,
count(ms.bounced) as bounced,
count(ms.sent) as sent,
(count(ms.sent) - count(ms.bounced)) as delivered,
count(ms.opened) as opened,
count(ms.replied) as replied,
mm.state,
mm.email_from
FROM
mail_mail_statistics as ms
left join mail_mass_mailing as mm ON (ms.mass_mailing_id=mm.id)
left join mail_mass_mailing_campaign as mc ON (ms.mass_mailing_campaign_id=mc.id)
GROUP BY ms.scheduled, mm.name, mc.name, mm.state, mm.email_from
)""")
| 42.339623
| 101
| 0.572638
| 254
| 2,244
| 4.944882
| 0.314961
| 0.095541
| 0.047771
| 0.028662
| 0.065287
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000649
| 0.31328
| 2,244
| 52
| 102
| 43.153846
| 0.814406
| 0.096702
| 0
| 0
| 0
| 0.023256
| 0.590025
| 0.076309
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.046512
| 0
| 0.186047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d934505c9a5de277afc3e1a3c4cc83a509daf62
| 2,750
|
py
|
Python
|
modules/springerlink.py
|
Christoph-D/paperget
|
9887936039ecc9fafe4dcce7988e75e964a05bcd
|
[
"MIT"
] | 3
|
2016-06-17T15:52:02.000Z
|
2017-12-21T02:44:49.000Z
|
modules/springerlink.py
|
Christoph-D/paperget
|
9887936039ecc9fafe4dcce7988e75e964a05bcd
|
[
"MIT"
] | null | null | null |
modules/springerlink.py
|
Christoph-D/paperget
|
9887936039ecc9fafe4dcce7988e75e964a05bcd
|
[
"MIT"
] | 1
|
2021-02-16T21:10:33.000Z
|
2021-02-16T21:10:33.000Z
|
import urllib, re
class FakeUseragentURLopener(urllib.FancyURLopener):
version = "Mozilla/5.0 (Ubuntu; X11; Linux i686; rv:9.0.1) Gecko/20100101 Firefox/9.0.1"
urllib._urlopener = FakeUseragentURLopener()
download_pdf_regex = re.compile('.*<li class="pdf"><a class="sprite pdf-resource-sprite" href="([^"]*)" title="Download PDF.*')
viewstate_regex = re.compile('.*<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="([^"]*)" />.*')
eventvalidation_regex = re.compile('.*<input type="hidden" name="__EVENTVALIDATION" id="__EVENTVALIDATION" value="([^"]*)" />.*')
def download_pdf(url, filename):
page = urllib.urlopen(url).read()
result = download_pdf_regex.search(page)
if result is None:
return False
fulltext_url = "http://www.springerlink.com" + result.group(1)
return urllib.urlretrieve(fulltext_url, filename) is not None
def download_bib(url, filename):
url += 'export-citation/'
form = urllib.urlopen(url).read()
viewstate = viewstate_regex.search(form)
eventvalidation = eventvalidation_regex.search(form)
if viewstate is None or eventvalidation is None:
return False
viewstate = viewstate.group(1)
eventvalidation = eventvalidation.group(1)
data = urllib.urlencode([
('__VIEWSTATE', viewstate),
('ctl00$ctl14$cultureList', 'en-us'),
('ctl00$ctl14$SearchControl$BasicSearchForTextBox', ''),
('ctl00$ctl14$SearchControl$BasicAuthorOrEditorTextBox', ''),
('ctl00$ctl14$SearchControl$BasicPublicationTextBox', ''),
('ctl00$ctl14$SearchControl$BasicVolumeTextBox', ''),
('ctl00$ctl14$SearchControl$BasicIssueTextBox', ''),
('ctl00$ctl14$SearchControl$BasicPageTextBox', ''),
('ctl00$ContentPrimary$ctl00$ctl00$Export', 'CitationOnlyRadioButton'),
('ctl00$ContentPrimary$ctl00$ctl00$CitationManagerDropDownList', 'BibTex'),
('ctl00$ContentPrimary$ctl00$ctl00$ExportCitationButton', 'Export+Citation'),
('__EVENTVALIDATION', eventvalidation)])
return urllib.urlretrieve(url, filename, data=data) is not None
def download_pdf_chapter(url, filename):
return urllib.urlretrieve(url.replace('/chapter/', '/content/pdf/', 1) + '.pdf', filename) is not None
import base
base.register_module('http://www\.springerlink\.com/content/.*',
{'name': 'springerlink',
'download_pdf': download_pdf,
'download_bib': download_bib,
})
base.register_module('http://link\.springer\.com/chapter/.*',
{'name': 'springerlink_chapter',
'download_pdf': download_pdf_chapter,
})
| 49.107143
| 129
| 0.651273
| 272
| 2,750
| 6.448529
| 0.338235
| 0.056442
| 0.078677
| 0.049601
| 0.060433
| 0.037628
| 0.037628
| 0
| 0
| 0
| 0
| 0.0322
| 0.198182
| 2,750
| 55
| 130
| 50
| 0.763265
| 0
| 0
| 0.081633
| 0
| 0.040816
| 0.396
| 0.189455
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.040816
| 0.020408
| 0.244898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d9caa03a4ae2fbdbadf5bfc3fd2600ade753a1b
| 3,460
|
py
|
Python
|
modules/colors.py
|
trybefore/discordbot
|
1ffce8149cde586e8c5883e8200b02937c5a15f6
|
[
"MIT"
] | 3
|
2020-09-15T23:19:18.000Z
|
2021-02-17T10:24:54.000Z
|
modules/colors.py
|
trybefore/discordbot
|
1ffce8149cde586e8c5883e8200b02937c5a15f6
|
[
"MIT"
] | 3
|
2021-06-22T10:57:14.000Z
|
2021-06-22T10:57:15.000Z
|
modules/colors.py
|
trybefore/discordbot
|
1ffce8149cde586e8c5883e8200b02937c5a15f6
|
[
"MIT"
] | 2
|
2020-05-03T20:54:57.000Z
|
2020-09-12T18:49:13.000Z
|
from threading import Lock
import discord
from discord.ext import commands
from loguru import logger
from local_types import Snowflake
from modules import is_bot_admin
class Colors(commands.Cog):
bot: discord.ext.commands.Bot
colorRoles = {}
mutex = Lock()
def __init__(self, bot):
self.bot = bot
self.reload()
def reload(self):
self.mutex.acquire()
for g in self.bot.guilds:
try:
self.colorRoles[g.id].clear()
except Exception:
pass # Ignore error
d = {}
for r in g.roles:
if r.name.lower().startswith("color- "):
color_name = r.name.lower().split("color- ")[1]
d[color_name] = Snowflake(r.id)
# logger.debug(f"color roles: {d}")
self.colorRoles[g.id] = d
self.mutex.release()
@commands.command(name='reload_colors', hidden=True)
@commands.check_any(is_bot_admin(), commands.has_permissions(manage_roles=True), commands.is_owner())
@commands.max_concurrency(1, wait=True)
@commands.guild_only()
async def reload_colors(self, ctx):
await self.reload()
async def print_colors(self, ctx: discord.ext.commands.Context):
g: discord.Guild = ctx.guild
d: dict = self.colorRoles[g.id]
roles = []
for r in d.keys():
roles.append(r)
await ctx.send(f"```{', '.join(roles)}```")
# do not use outside of color command function
async def remove_roles(self, ctx: discord.ext.commands.Context):
g: discord.Guild = ctx.guild
member: discord.member.Member = g.get_member(ctx.author.id)
d: dict = self.colorRoles[g.id]
to_remove = []
for r in d.values():
for mr in member.roles:
if r.id == mr.id:
to_remove.append(r)
await member.remove_roles(*to_remove, reason="Color Command", atomic=True)
@commands.command(name='color', help="Choose your name color")
@commands.cooldown(type=commands.BucketType.user, rate=1, per=3)
@commands.guild_only()
async def color(self, ctx: discord.ext.commands.Context, color: str):
self.mutex.acquire()
g: discord.Guild = ctx.guild
member: discord.member.Member = g.get_member(ctx.author.id)
color = color.lower()
if color == "list":
await self.print_colors(ctx)
else:
d: dict = self.colorRoles[g.id]
if d is None:
await ctx.send(f"{ctx.author.mention} could not find any color roles in this server!")
else:
try:
r = d[color]
await self.remove_roles(ctx)
await member.add_roles(r)
await ctx.send(f"{ctx.author.mention} successfully changed your color to {color}")
except KeyError:
await ctx.send(
f"{ctx.author.mention} could not find any such color!\n ```{self.bot.command_prefix}{ctx.command.name} list``` to view available colors")
self.mutex.release()
@color.error
async def color_error(self, ctx, error):
if isinstance(error, discord.ext.commands.errors.CommandOnCooldown):
await ctx.send(f"{ctx.author.mention} {error}")
else:
logger.error(f"color error: {error}")
| 31.454545
| 161
| 0.57948
| 437
| 3,460
| 4.512586
| 0.276888
| 0.030426
| 0.045639
| 0.043103
| 0.267748
| 0.234787
| 0.185091
| 0.15568
| 0.15568
| 0.15568
| 0
| 0.001656
| 0.302023
| 3,460
| 109
| 162
| 31.743119
| 0.814907
| 0.026301
| 0
| 0.2375
| 0
| 0.0125
| 0.120654
| 0.01367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0.0125
| 0.075
| 0
| 0.15
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9d9e064b6bf0f12b09cc360b5115a0ae4d5fbeff
| 1,645
|
py
|
Python
|
examples/basic_dsp_example.py
|
Camotubi/basic_dsp
|
38a380439cc8936c64febbc12227df78d95fce7f
|
[
"Apache-2.0",
"MIT"
] | 40
|
2015-11-23T02:23:35.000Z
|
2022-03-18T11:19:11.000Z
|
examples/basic_dsp_example.py
|
Camotubi/basic_dsp
|
38a380439cc8936c64febbc12227df78d95fce7f
|
[
"Apache-2.0",
"MIT"
] | 47
|
2015-11-23T01:58:38.000Z
|
2021-01-11T07:53:37.000Z
|
examples/basic_dsp_example.py
|
Camotubi/basic_dsp
|
38a380439cc8936c64febbc12227df78d95fce7f
|
[
"Apache-2.0",
"MIT"
] | 9
|
2018-05-19T07:25:26.000Z
|
2022-01-09T20:51:40.000Z
|
import ctypes
import struct
import time
#
# A small example how to use basic_dsp in a different language.
#
class VecResult(ctypes.Structure):
_fields_ = [("resultCode", ctypes.c_int),
("result", ctypes.c_void_p)]
lib = ctypes.WinDLL('basic_dsp.dll')
new64Proto = ctypes.WINFUNCTYPE (
ctypes.c_void_p, # Return type.
ctypes.c_int,
ctypes.c_int,
ctypes.c_double,
ctypes.c_ulong,
ctypes.c_double)
new64 = new64Proto (("new64", lib))
getValue64Proto = ctypes.WINFUNCTYPE (
ctypes.c_double, # Return type.
ctypes.c_void_p,
ctypes.c_ulong)
getValue64 = getValue64Proto (("get_value64", lib))
offset64Proto = ctypes.WINFUNCTYPE (
VecResult, # Return type.
ctypes.c_void_p,
ctypes.c_double)
offset64 = offset64Proto (("real_offset64", lib))
vec = new64(
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_double(0.0),
ctypes.c_ulong(100000),
ctypes.c_double(1.0))
val = getValue64(vec, ctypes.c_ulong(0))
print('At the start: vec[0] = {}'.format(val))
start = time.clock()
iterations = 100000
toNs = 1e9 / iterations
increment = 5.0
for x in range(0, iterations):
vecRes = offset64(vec, ctypes.c_double(increment))
vec = vecRes.result
end = time.clock()
print('{} ns per iteration, each iteration has {} samples'.format((end - start) * toNs, iterations))
print('Result code: {} (0 means no error)'.format(vecRes.resultCode))
vecRes = offset64(vec, ctypes.c_double(5.0))
vec = vecRes.result
val = getValue64(vec, ctypes.c_ulong(0))
print('After {} iterations of increment by {}: vec[0] = {}'.format(iterations + 1, increment, val))
| 26.967213
| 100
| 0.677204
| 227
| 1,645
| 4.76652
| 0.334802
| 0.142329
| 0.096118
| 0.044362
| 0.22366
| 0.198706
| 0.116451
| 0.116451
| 0
| 0
| 0
| 0.048363
| 0.182979
| 1,645
| 60
| 101
| 27.416667
| 0.756696
| 0.06079
| 0
| 0.319149
| 0
| 0
| 0.141835
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.06383
| 0
| 0.106383
| 0.085106
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9da1a92cdcf88a9e292d7bdc3fb0eeb027139777
| 2,305
|
py
|
Python
|
chemex/experiments/cpmg/fast/liouvillian.py
|
marcuscangussu/chemex_bouvignies
|
ce9ec20a42604eb5995abb0f8a84094b29747651
|
[
"BSD-3-Clause"
] | null | null | null |
chemex/experiments/cpmg/fast/liouvillian.py
|
marcuscangussu/chemex_bouvignies
|
ce9ec20a42604eb5995abb0f8a84094b29747651
|
[
"BSD-3-Clause"
] | null | null | null |
chemex/experiments/cpmg/fast/liouvillian.py
|
marcuscangussu/chemex_bouvignies
|
ce9ec20a42604eb5995abb0f8a84094b29747651
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Created on Sep 1, 2011
@author: guillaume
"""
from scipy import zeros
from chemex.bases.two_states.fast import R_IXY, DR_IXY, DW, KAB, KBA
def compute_liouvillians(pb=0.0, kex=0.0, dw=0.0,
r_ixy=5.0, dr_ixy=0.0):
"""
Compute the exchange matrix (Liouvillian)
The function assumes a 2-site (A <-> B) exchanging system.
The matrix is written in 6x6 cartesian basis, that is {Nx, Ny, Nz}{a,b}.
Here the thermal equilibrium is assumed to be 0. This is justified because of
the +/- phase cycling of the first 90 degree pulse at the beginning of the
cpmg block.
Parameters
----------
pb : float
Fractional population of state B.
0.0 for 0%, 1.0 for 100%.
kex : float
Exchange rate between state A and B in /s.
dw : float
Chemical shift difference between states A and B in rad/s.
r_nz : float
Longitudinal relaxation rate of state {a,b} in /s.
r_nxy : float
Transverse relaxation rate of state a in /s.
dr_nxy : float
Transverse relaxation rate difference between states a and b in /s.
cs_offset : float
Offset from the carrier in rad/s.
Returns
-------
out: numpy.matrix
Liouvillian describing free precession of one
isolated spin in presence of two-site exchange.
"""
kab = kex * pb
kba = kex - kab
l_free = R_IXY * r_ixy
l_free += DR_IXY * dr_ixy
l_free += DW * dw
l_free += KAB * kab
l_free += KBA * kba
return l_free
def compute_iy_eq(pb):
"""
Returns the equilibrium magnetization vector.
Parameters
----------
pb : float
Fractional population of state B.
0.0 for 0%, 1.0 for 100%.
Returns
-------
out: numpy.matrix
Magnetization vector at equilibrium.
"""
mag_eq = zeros((4, 1))
mag_eq[1, 0] += (1.0 - pb)
mag_eq[3, 0] += pb
return mag_eq
def get_iy(mag):
"""
Returns the amount of magnetization along z.
Parameters
----------
mag : ndarray
Magnetization vector.
Returns
-------
magy_a, magy_b : float
Amount of magnetization in state a and b along z.
"""
magy_a = mag[1, 0]
magy_b = mag[3, 0]
return magy_a, magy_b
| 21.745283
| 81
| 0.59436
| 344
| 2,305
| 3.886628
| 0.343023
| 0.008975
| 0.014959
| 0.015707
| 0.210172
| 0.133134
| 0.133134
| 0.088257
| 0.088257
| 0.088257
| 0
| 0.030644
| 0.306291
| 2,305
| 105
| 82
| 21.952381
| 0.805503
| 0.630803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.095238
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9da26db5109dcd203a39bfcab1fbaa5c755f0368
| 33,787
|
py
|
Python
|
Software/python/config_dialog.py
|
edavalosanaya/SKORE
|
72e742611ba96b0df542781ded0685f525bea82b
|
[
"MIT"
] | 1
|
2020-09-20T19:00:17.000Z
|
2020-09-20T19:00:17.000Z
|
Software/python/config_dialog.py
|
MrCodingRobot/SKORE
|
72e742611ba96b0df542781ded0685f525bea82b
|
[
"MIT"
] | null | null | null |
Software/python/config_dialog.py
|
MrCodingRobot/SKORE
|
72e742611ba96b0df542781ded0685f525bea82b
|
[
"MIT"
] | null | null | null |
# General Utility Libraries
import sys
import os
import warnings
# PyQt5, GUI Library
from PyQt5 import QtCore, QtGui, QtWidgets
# Serial and Midi Port Library
import rtmidi
import serial
import serial.tools.list_ports
# SKORE Library
from lib_skore import read_config, update_config
import globals
#-------------------------------------------------------------------------------
# Classes
class ArduinoComboBox(QtWidgets.QComboBox):
"""
This class allows the combobox to recognize arduinos connected as soon as
the user clicks the combobox.
"""
def avaliable_arduino_com(self):
"""
This fuction returns all the available COM ports in a list of strings.
"""
ports = serial.tools.list_ports.comports(include_links=False)
results = []
for port in ports:
results.append(str(port.device))
return results
def showPopup(self):
"""
This function appends to the original showPopup function from the
QComboBox by adding the avaliable arduino com ports.
"""
avaliable_arduino_ports = self.avaliable_arduino_com()
self.clear()
for avaliable_port in avaliable_arduino_ports:
self.addItem(avaliable_port)
super(ArduinoComboBox, self).showPopup()
return None
class PianoComboBox(QtWidgets.QComboBox):
"""
This class allows the combobox to recognize piano connected as soon as the
user clicks the combobox.
"""
def avaliable_piano_port(self):
"""
This function returns all the available MIDI ports in a list of string.
"""
temp_midi_in = []
temp_midi_in = rtmidi.MidiIn()
avaliable_ports = temp_midi_in.get_ports()
results = []
for port_name in avaliable_ports:
results.append(str(port_name))
return results
def showPopup(self):
"""
This function appends to the showPopup function of the QComboBox by
adding the avaliable MIDI ports to the listed items in the QComboBox.
"""
avaliable_piano_ports = self.avaliable_piano_port()
self.clear()
for avaliable_piano_port_connected in avaliable_piano_ports:
self.addItem(avaliable_piano_port_connected)
super(PianoComboBox, self).showPopup()
return None
class ConfigDialog(QtWidgets.QDialog):
"""
This class is the settings dialog that provides the user the capability
of changing the settings of the SKORE application.
"""
finish_apply_signal = QtCore.pyqtSignal()
def __init__(self):
"""
This function sets the settings dialog by changing the title, size, icon,
and placing the widgets.
"""
super(QtWidgets.QDialog, self).__init__()
self.setObjectName("Dialog")
self.resize(530 * globals.S_W_R, 679 * globals.S_H_R)
self.setWindowTitle("SKORE - General Configuration")
self.setWindowIcon(QtGui.QIcon('.\images\skore_icon.png'))
self.setup_ui()
self.setup_func()
self.read_all_settings()
self.update_settings()
return None
def setup_ui(self):
"""
This function places all the widgets in the settings dialog.
"""
self.apply_close_buttonBox = QtWidgets.QDialogButtonBox(self)
self.apply_close_buttonBox.setGeometry(QtCore.QRect(310 * globals.S_W_R, 640 * globals.S_H_R, 201 * globals.S_W_R, 32 * globals.S_H_R))
self.apply_close_buttonBox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.apply_close_buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.apply_close_buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Apply|QtWidgets.QDialogButtonBox.Close)
self.apply_close_buttonBox.setObjectName("apply_cancel_buttonBox")
#-----------------------------------------------------------------------
# Tab Widget
self.tabWidget = QtWidgets.QTabWidget(self)
self.tabWidget.setGeometry(QtCore.QRect(10 * globals.S_W_R, 10 * globals.S_H_R, 511 * globals.S_W_R, 621 * globals.S_H_R))
self.tabWidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.tabWidget.setObjectName("tabWidget")
#-----------------------------------------------------------------------#
# Tab Widget -> path_and_comm_tab
self.path_and_comm_tab = QtWidgets.QWidget()
self.path_and_comm_tab.setObjectName("path_and_comm_tab")
#-----------------------------------------------------------------------
# Tab Widget -> path_and_comm_tab -> path section
self.configure_path_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.configure_path_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 5 * globals.S_H_R, 231 * globals.S_W_R, 16 * globals.S_H_R))
self.configure_path_label.setObjectName("configure_path_label")
self.path_line = QtWidgets.QFrame(self.path_and_comm_tab)
self.path_line.setGeometry(QtCore.QRect(10 * globals.S_W_R, 20 * globals.S_H_R, 481 * globals.S_W_R, 20 * globals.S_H_R))
self.path_line.setFrameShape(QtWidgets.QFrame.HLine)
self.path_line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.path_line.setObjectName("path_line")
self.audiveris_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab)
self.audiveris_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 60 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R))
self.audiveris_pushButton.setObjectName("audiveris_pushButton")
self.audiveris_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.audiveris_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 40 * globals.S_H_R, 101 * globals.S_W_R, 16 * globals.S_H_R))
self.audiveris_label.setObjectName("audiveris_label")
self.audiveris_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab)
self.audiveris_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 60 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R))
self.audiveris_lineEdit.setObjectName("audiveris_lineEdit")
self.amazingmidi_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab)
self.amazingmidi_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 120 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R))
self.amazingmidi_lineEdit.setObjectName("amazingmidi_lineEdit")
self.amazingmidi_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.amazingmidi_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 100 * globals.S_H_R, 121 * globals.S_W_R, 16 * globals.S_H_R))
self.amazingmidi_label.setObjectName("amazingmidi_label")
self.amazingmidi_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab)
self.amazingmidi_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 120 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R))
self.amazingmidi_pushButton.setObjectName("amazingmidi_pushButton")
self.anthemscore_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab)
self.anthemscore_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 180 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R))
self.anthemscore_pushButton.setObjectName("anthemscore_pushButton")
self.anthemscore_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab)
self.anthemscore_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 180 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R))
self.anthemscore_lineEdit.setObjectName("anthemscore_lineEdit")
self.anthemscore_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.anthemscore_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 160 * globals.S_H_R, 191 * globals.S_W_R, 16 * globals.S_H_R))
self.anthemscore_label.setObjectName("anthemscore_label")
self.muse_score_pushButton = QtWidgets.QPushButton(self.path_and_comm_tab)
self.muse_score_pushButton.setGeometry(QtCore.QRect(400 * globals.S_W_R, 240 * globals.S_H_R, 93 * globals.S_W_R, 31 * globals.S_H_R))
self.muse_score_pushButton.setObjectName("muse_score_pushButton")
self.muse_score_lineEdit = QtWidgets.QLineEdit(self.path_and_comm_tab)
self.muse_score_lineEdit.setGeometry(QtCore.QRect(10 * globals.S_W_R, 240 * globals.S_H_R, 381 * globals.S_W_R, 31 * globals.S_H_R))
self.muse_score_lineEdit.setObjectName("muse_score_linedEdit")
self.muse_score_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.muse_score_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 220 * globals.S_H_R, 191 * globals.S_W_R, 16 * globals.S_H_R))
self.muse_score_label.setObjectName("muse_score_label")
self.mp3_to_midi_converter_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.mp3_to_midi_converter_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 280 * globals.S_H_R, 141 * globals.S_W_R, 16 * globals.S_H_R))
self.mp3_to_midi_converter_label.setObjectName("mp3_to_midi_converter_label")
self.open_source_radioButton = QtWidgets.QRadioButton(self.path_and_comm_tab)
self.open_source_radioButton.setGeometry(QtCore.QRect(240 * globals.S_W_R, 280 * globals.S_H_R, 111 * globals.S_W_R, 20 * globals.S_H_R))
self.open_source_radioButton.setObjectName("open_source_radioButton")
self.close_source_radioButton = QtWidgets.QRadioButton(self.path_and_comm_tab)
self.close_source_radioButton.setGeometry(QtCore.QRect(380 * globals.S_W_R, 280 * globals.S_H_R, 111 * globals.S_W_R, 20 * globals.S_H_R))
self.close_source_radioButton.setObjectName("close_source_radioButton")
#-----------------------------------------------------------------------
# Tab Widget -> path_and_comm_tab -> comm section
self.comm_line = QtWidgets.QFrame(self.path_and_comm_tab)
self.comm_line.setGeometry(QtCore.QRect(10 * globals.S_W_R, 300 * globals.S_H_R, 481 * globals.S_W_R, 20 * globals.S_H_R))
self.comm_line.setFrameShape(QtWidgets.QFrame.HLine)
self.comm_line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.comm_line.setObjectName("comm_line")
self.portsettings_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.portsettings_label.setGeometry(QtCore.QRect(210 * globals.S_W_R, 320 * globals.S_H_R, 81* globals.S_W_R, 20 * globals.S_H_R))
self.portsettings_label.setObjectName("portsettings_label")
self.piano_port_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.piano_port_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 340 * globals.S_H_R, 71 * globals.S_W_R, 16 * globals.S_H_R))
self.piano_port_label.setObjectName("pianoport_label")
self.piano_port_comboBox = PianoComboBox(self.path_and_comm_tab)
self.piano_port_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 360 * globals.S_H_R, 481 * globals.S_W_R, 31 * globals.S_H_R))
self.piano_port_comboBox.setObjectName("pianoport_comboBox")
self.piano_size_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.piano_size_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 400 * globals.S_H_R, 71* globals.S_W_R, 16* globals.S_H_R))
self.piano_size_label.setObjectName("pianosize_label")
self.piano_size_comboBox = QtWidgets.QComboBox(self.path_and_comm_tab)
self.piano_size_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 420 * globals.S_H_R, 481 * globals.S_W_R, 31 * globals.S_H_R))
self.piano_size_comboBox.setObjectName("pianosize_comboBox")
self.arduinoport_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.arduinoport_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 460 * globals.S_H_R, 81 * globals.S_W_R, 16* globals.S_H_R))
self.arduinoport_label.setObjectName("arduinoport_label")
self.arduino_port_comboBox = ArduinoComboBox(self.path_and_comm_tab)
self.arduino_port_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 480 * globals.S_H_R, 481 * globals.S_W_R, 31 * globals.S_H_R))
self.arduino_port_comboBox.setObjectName("arduinoport_comboBox")
self.arduino_baud_rate_label = QtWidgets.QLabel(self.path_and_comm_tab)
self.arduino_baud_rate_label.setGeometry(QtCore.QRect(10 * globals.S_W_R, 520 * globals.S_H_R, 200 * globals.S_W_R, 20* globals.S_H_R))
self.arduino_baud_rate_label.setText("Arduino Baud Rate")
self.arduino_baud_rate_comboBox = QtWidgets.QComboBox(self.path_and_comm_tab)
self.arduino_baud_rate_comboBox.setGeometry(QtCore.QRect(10 * globals.S_W_R, 540 * globals.S_H_R, 481* globals.S_W_R, 31 * globals.S_H_R))
self.tabWidget.addTab(self.path_and_comm_tab, "")
#-----------------------------------------------------------------------
# Tab Widget -> Lighting and Color Tab
self.color_tab = QtWidgets.QWidget()
self.color_tab.setObjectName("color_tab")
#-----------------------------------------------------------------------
# Tab Widget -> Tutoring Tab -> Timing Section
self.timingsettings_label = QtWidgets.QLabel(self.color_tab)
self.timingsettings_label.setGeometry(QtCore.QRect(200 * globals.S_W_R, 10 * globals.S_H_R, 151 * globals.S_W_R, 20 * globals.S_H_R))
self.timingsettings_label.setObjectName("timingsettings_label")
self.chord_tick_tolerance_label = QtWidgets.QLabel(self.color_tab)
self.chord_tick_tolerance_label.setGeometry(QtCore.QRect(20 * globals.S_W_R, 40* globals.S_H_R, 200 * globals.S_W_R, 20 * globals.S_H_R))
self.chord_tick_tolerance_label.setText("Chord Tick Tolerance:")
self.chord_tick_tolerance_lineEdit = QtWidgets.QLineEdit(self.color_tab)
self.chord_tick_tolerance_lineEdit.setGeometry(QtCore.QRect(200 * globals.S_W_R, 40 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R))
self.chord_sum_tolerance_label = QtWidgets.QLabel(self.color_tab)
self.chord_sum_tolerance_label.setGeometry(QtCore.QRect(20 * globals.S_W_R, 80 * globals.S_H_R, 200 * globals.S_W_R, 20 * globals.S_H_R))
self.chord_sum_tolerance_label.setText("Chord Sum Tolerance:")
self.chord_sum_tolerance_lineEdit = QtWidgets.QLineEdit(self.color_tab)
self.chord_sum_tolerance_lineEdit.setGeometry(QtCore.QRect(200 * globals.S_W_R, 80 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R))
self.record_chord_tolerance_label = QtWidgets.QLabel(self.color_tab)
self.record_chord_tolerance_label.setGeometry(QtCore.QRect(20* globals.S_W_R, 120 * globals.S_H_R, 200* globals.S_W_R, 20 * globals.S_H_R))
self.record_chord_tolerance_label.setText("Record Chord Tolerance:")
self.record_chord_tolerance_lineEdit = QtWidgets.QLineEdit(self.color_tab)
self.record_chord_tolerance_lineEdit.setGeometry(QtCore.QRect(200* globals.S_W_R, 120 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R))
self.arduino_handshake_timeout_label = QtWidgets.QLabel(self.color_tab)
self.arduino_handshake_timeout_label.setGeometry(QtCore.QRect(20 * globals.S_W_R, 160* globals.S_H_R, 200 * globals.S_W_R, 20 * globals.S_H_R))
self.arduino_handshake_timeout_label.setText("Arduino Handshake Timeout:")
self.arduino_handshake_timeout_lineEdit = QtWidgets.QLineEdit(self.color_tab)
self.arduino_handshake_timeout_lineEdit.setGeometry(QtCore.QRect(200 * globals.S_W_R, 160 * globals.S_H_R, 280 * globals.S_W_R, 20 * globals.S_H_R))
self.line = QtWidgets.QFrame(self.color_tab)
self.line.setGeometry(QtCore.QRect(10 * globals.S_W_R, 230 * globals.S_H_R, 481 * globals.S_W_R, 16 * globals.S_H_R))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
#-----------------------------------------------------------------------
# Tab Widget -> Tutoring Tab -> Color Section
self.colorsettings_label = QtWidgets.QLabel(self.color_tab)
self.colorsettings_label.setGeometry(QtCore.QRect(210 * globals.S_W_R, 250 * globals.S_H_R, 81 * globals.S_W_R, 20 * globals.S_H_R))
self.colorsettings_label.setObjectName("colorsettings_label_2")
bw_y = ( 250 + 40 ) * globals.S_H_R
space = 20 * globals.S_H_R
self.black_key_label = QtWidgets.QLabel(self.color_tab)
self.black_key_label.setGeometry(QtCore.QRect(80 * globals.S_W_R, bw_y, 61 * globals.S_W_R, 16 * globals.S_H_R))
self.black_key_label.setObjectName("black_key_label")
self.black_key_pushButton = QtWidgets.QPushButton(self.color_tab)
self.black_key_pushButton.setGeometry(QtCore.QRect(40 * globals.S_W_R, bw_y + space, 141 * globals.S_W_R, 61 * globals.S_H_R))
self.black_key_pushButton.setText("")
self.black_key_pushButton.setObjectName("black_key_pushButton")
self.white_key_label = QtWidgets.QLabel(self.color_tab)
self.white_key_label.setGeometry(QtCore.QRect(360 * globals.S_W_R, bw_y, 71 * globals.S_W_R, 16 * globals.S_H_R))
self.white_key_label.setObjectName("white_key_label")
self.white_key_pushButton = QtWidgets.QPushButton(self.color_tab)
self.white_key_pushButton.setGeometry(QtCore.QRect(320 * globals.S_W_R, bw_y + space, 141 * globals.S_W_R, 61 * globals.S_W_R))
self.white_key_pushButton.setText("")
self.white_key_pushButton.setObjectName("white_key_pushButton")
wu_y = ( 390 + 40 ) * globals.S_H_R
self.wrong_label = QtWidgets.QLabel(self.color_tab)
self.wrong_label.setGeometry(QtCore.QRect(75 * globals.S_W_R, wu_y, 71 * globals.S_W_R, 16 * globals.S_H_R))
self.wrong_label.setObjectName("wrong_label")
self.wrong_pushButton = QtWidgets.QPushButton(self.color_tab)
self.wrong_pushButton.setGeometry(QtCore.QRect(40 * globals.S_W_R, wu_y + space, 141 * globals.S_W_R, 61 * globals.S_H_R))
self.wrong_pushButton.setText("")
self.wrong_pushButton.setObjectName("wrong_pushButton")
self.upcoming_label = QtWidgets.QLabel(self.color_tab)
self.upcoming_label.setGeometry(QtCore.QRect(350 * globals.S_W_R, wu_y, 91 * globals.S_W_R, 16 * globals.S_H_R))
self.upcoming_label.setObjectName("upcoming_label")
self.upcoming_pushButton = QtWidgets.QPushButton(self.color_tab)
self.upcoming_pushButton.setGeometry(QtCore.QRect(320 * globals.S_W_R, wu_y + space, 141 * globals.S_W_R, 61 * globals.S_H_R))
self.upcoming_pushButton.setText("")
self.upcoming_pushButton.setObjectName("upcoming_pushButton")
self.tabWidget.addTab(self.color_tab, "")
self.retranslate_ui()
self.tabWidget.setCurrentIndex(0)
self.apply_close_buttonBox.accepted.connect(self.accept)
self.apply_close_buttonBox.rejected.connect(self.close)
QtCore.QMetaObject.connectSlotsByName(self)
def setup_func(self):
"""
This function places all the slot and signals for the widgets of the
settings dialog.
"""
self.browse_button_group = QtWidgets.QButtonGroup()
self.browse_button_group.addButton(self.audiveris_pushButton)
self.browse_button_group.addButton(self.amazingmidi_pushButton)
self.browse_button_group.addButton(self.anthemscore_pushButton)
self.browse_button_group.addButton(self.muse_score_pushButton)
self.browse_button_group.buttonClicked.connect(self.upload_exe_file)
self.browse_button_dict = {self.audiveris_pushButton: ['', self.audiveris_lineEdit, 'audiveris'], self.amazingmidi_pushButton: ['',self.amazingmidi_lineEdit, 'amazing_midi'],
self.anthemscore_pushButton: ['', self.anthemscore_lineEdit,'anthemscore'], self.muse_score_pushButton: ['', self.muse_score_lineEdit, 'muse_score']}
self.port_dict = {self.piano_port_comboBox: ['','piano'], self.piano_size_comboBox: ['','piano_size'],
self.arduino_port_comboBox: ['','arduino'], self.arduino_baud_rate_comboBox: ['', 'arduino baud rate']}
self.piano_size_comboBox.addItem('76 Key Piano')
self.piano_size_comboBox.addItem('88 Key Piano')
self.arduino_baud_rate_comboBox.addItem('300')
self.arduino_baud_rate_comboBox.addItem('600')
self.arduino_baud_rate_comboBox.addItem('1200')
self.arduino_baud_rate_comboBox.addItem('4800')
self.arduino_baud_rate_comboBox.addItem('9600')
self.arduino_baud_rate_comboBox.addItem('14400')
self.arduino_baud_rate_comboBox.addItem('19200')
self.arduino_baud_rate_comboBox.addItem('28800')
self.arduino_baud_rate_comboBox.addItem('38400')
self.arduino_baud_rate_comboBox.addItem('57600')
self.arduino_baud_rate_comboBox.addItem('115200')
self.arduino_baud_rate_comboBox.addItem('230400')
self.timing_button_dict = {self.chord_tick_tolerance_lineEdit: ['', 'chord tick tolerance'], self.chord_sum_tolerance_lineEdit: ['','chord sum tolerance'],
self.record_chord_tolerance_lineEdit: ['', 'record chord tolerance'], self.arduino_handshake_timeout_lineEdit: ['', 'count timeout']
}
self.color_button_group = QtWidgets.QButtonGroup()
self.color_button_group.addButton(self.black_key_pushButton)
self.color_button_group.addButton(self.white_key_pushButton)
self.color_button_group.addButton(self.wrong_pushButton)
self.color_button_group.addButton(self.upcoming_pushButton)
self.color_button_group.buttonClicked.connect(self.color_picker)
self.color_button_dict = {self.black_key_pushButton: ['','black'], self.white_key_pushButton: ['','white'],
self.wrong_pushButton: ['','wrong'], self.upcoming_pushButton: ['','upcoming']
}
self.apply_close_buttonBox.button(QtWidgets.QDialogButtonBox.Apply).clicked.connect(self.apply_changes)
return None
#---------------------------------------------------------------------------
# Path Section Functions
def open_file_name_dialog_exe_file(self):
"""
This file dialog is used to obtain the file location of the .exe file.
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, "Select .exe/.bat File", "", "Executiable Files (*.exe);; Batch Files (*.bat)", options=options)
if fileName:
file_dialog_output = str(fileName)
else:
return ""
file_dialog_output = file_dialog_output.replace('/' , '\\' )
return file_dialog_output
def open_directory_name_dialog_exe_path(self):
"""
This file dialog is used to obtain the folder directory of the desired
exe folder location.
"""
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.ShowDirsOnly
options |= QtWidgets.QFileDialog.DontUseNativeDialog
directory = QtWidgets.QFileDialog.getExistingDirectory(self, caption = 'Select a folder', options = options)
if directory:
file_dialog_output = str(directory)
else:
return ""
file_dialog_output = file_dialog_output.replace('/' , '\\' )
return file_dialog_output
def upload_exe_file(self, button):
"""
This function decides wether to use the exe file or exe path function.
If the pushButton is for audiveris, utlize the exe path. Else, use the
standard exe file function.
"""
upload_exe_path = self.open_file_name_dialog_exe_file()
if upload_exe_path != '':
self.browse_button_dict[button][0] = upload_exe_path
self.update_settings()
return None
#---------------------------------------------------------------------------
# Color
def color_picker(self, button):
"""
This function creates a QColorDialog when the user clicks the color
wheel color. Once the user selects a color, it will display the RGB
colors in the lineedits.
"""
color = QtWidgets.QColorDialog.getColor()
if color.isValid():
# Converting Hexadecimal to RGB values
value = color.name()
value = value.lstrip('#')
rgb = tuple(int(value[i:i+2], 16) for i in (0, 2, 4))
rgb = str(rgb)[1:-1].replace(" ","")
self.color_button_dict[button][0] = rgb
button.setStyleSheet('background-color:rgb({})'.format(rgb))
return None
#---------------------------------------------------------------------------
# Reading Settings
def read_all_settings(self):
"""
This function reads all the settings in the config.yml and stores them
in dictionaries that correlate the settings to the widgets.
"""
cfg = read_config()
# Path Settings
for key in self.browse_button_dict.keys():
self.browse_button_dict[key][0] = cfg['app_path'][self.browse_button_dict[key][2]]
# Mp3 to midi Settings
self.mp3_to_midi_setting = cfg['app_path']['open_close_source']
# Port Settings
for key in self.port_dict.keys():
self.port_dict[key][0] = cfg['port'][self.port_dict[key][1]]
# Timing Settings
for key in self.timing_button_dict.keys():
self.timing_button_dict[key][0] = cfg['timing'][self.timing_button_dict[key][1]]
# Color Settings
for key in self.color_button_dict.keys():
self.color_button_dict[key][0] = cfg['color'][self.color_button_dict[key][1]]
return None
def update_settings(self):
"""
This function places the information of the settings into the widgets,
such as placing the value or color to the widget.
"""
# Path Settings
for button in self.browse_button_dict:
self.browse_button_dict[button][1].setText(self.browse_button_dict[button][0])
# Mp3 to midi Settings
if self.mp3_to_midi_setting == 'open_source':
self.open_source_radioButton.setChecked(True)
self.close_source_radioButton.setChecked(False)
elif self.mp3_to_midi_setting == 'close_source':
self.close_source_radioButton.setChecked(True)
self.open_source_radioButton.setChecked(False)
# Port Settings
for key in self.port_dict.keys():
if self.port_dict[key][1] == 'piano_size':
key.setCurrentText(str(self.port_dict[key][0]) + ' Key Piano')
elif key == self.arduino_baud_rate_comboBox:
key.setCurrentText(str(self.port_dict[key][0]))
else:
key.addItem(str(self.port_dict[key][0]))
key.setCurrentText(str(self.port_dict[key][0]))
# Timing Settings
for key in self.timing_button_dict.keys():
key.setText(str(self.timing_button_dict[key][0]))
# Color Settings
for key in self.color_button_dict.keys():
rgb = self.color_button_dict[key][0]
key.setStyleSheet('background-color:rgb({})'.format(rgb))
return None
def apply_changes(self):
"""
This fuction applies any of the changes done by the user to the settings.
This changes are recorded in the config.yml file.
"""
cfg = read_config()
# Apply Path
for button in self.browse_button_dict:
text = self.browse_button_dict[button][1].text()
cfg['app_path'][self.browse_button_dict[button][2]] = text
# Mp3 to midi Settings
if self.open_source_radioButton.isChecked():
cfg['app_path']['open_close_source'] = 'open_source'
elif self.close_source_radioButton.isChecked():
cfg['app_path']['open_close_source'] = 'close_source'
# Color Settings
for key in self.color_button_dict.keys():
rgb = self.color_button_dict[key][0]
cfg['color'][self.color_button_dict[key][1]] = rgb
for key in self.timing_button_dict.keys():
cfg['timing'][self.timing_button_dict[key][1]] = int(key.text())
# Port Settings
for key in self.port_dict.keys():
index = key.currentIndex()
if index == -1:
continue
if key == self.piano_port_comboBox or key == self.arduino_port_comboBox:
cfg['port'][self.port_dict[key][1]] = key.currentText()
elif key == self.piano_size_comboBox:
cfg['port'][self.port_dict[key][1]] = key.currentText()[:2]
elif key == self.arduino_baud_rate_comboBox:
cfg['port'][self.port_dict[key][1]] = int(key.currentText())
update_config(cfg)
print("Applied Changes")
self.finish_apply_signal.emit()
return None
#---------------------------------------------------------------------------
# Misc Functions
def retranslate_ui(self):
"""
This function places all the text content in the configuration dialog
widgets.
"""
_translate = QtCore.QCoreApplication.translate
self.anthemscore_pushButton.setText(_translate("Dialog", "Browse"))
self.anthemscore_label.setText(_translate("Dialog", "AnthemScore [.exe] (Optional)"))
self.audiveris_pushButton.setText(_translate("Dialog", "Browse"))
self.audiveris_label.setText(_translate("Dialog", "Audiveris [folder]"))
self.amazingmidi_pushButton.setText(_translate("Dialog", "Browse"))
self.amazingmidi_label.setText(_translate("Dialog", "AmazingMIDI [.exe]"))
self.muse_score_label.setText(_translate("Dialog", "MuseScore [.exe]"))
self.muse_score_pushButton.setText(_translate("Dialog", "Browse"))
self.configure_path_label.setText(_translate("Dialog", "Configure the path for each program."))
self.mp3_to_midi_converter_label.setText(_translate("Dialog", "MP3 to MIDI Converter:"))
self.open_source_radioButton.setText(_translate("Dialog", "Open-Source"))
self.close_source_radioButton.setText(_translate("Dialog", "Close-Source"))
self.piano_port_label.setText(_translate("Dialog", "Piano Port"))
self.piano_size_label.setText(_translate("Dialog", "Piano Size"))
self.portsettings_label.setText(_translate("Dialog", "Port Settings"))
self.arduinoport_label.setText(_translate("Dialog", "Arduino Port"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.path_and_comm_tab), _translate("Dialog", "Path and Communication Settings"))
self.timingsettings_label.setText(_translate("Dialog", "Timing Settings"))
self.colorsettings_label.setText(_translate("Dialog", "Color Settings"))
self.black_key_label.setText(_translate("Dialog", "Black Keys"))
self.white_key_label.setText(_translate("Dialog", "White Keys"))
self.wrong_label.setText(_translate("Dialog", "Wrong Note"))
self.upcoming_label.setText(_translate("Dialog", "Upcoming Note"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.color_tab), _translate("Dialog", "Tutoring Settings"))
#-----------------------------------------------------------------------
# Text Scaling
font = self.anthemscore_label.font()
font.setPixelSize(13)
print("Prescaling Font Pixel Size: ", font.pixelSize())
font.setPixelSize(font.pixelSize() * globals.S_W_R)
print("Postscaling Font Pixel Size: ", font.pixelSize())
text_group = [self.anthemscore_pushButton, self.anthemscore_label, self.anthemscore_lineEdit,
self.audiveris_pushButton, self.audiveris_label, self.audiveris_lineEdit,
self.amazingmidi_pushButton, self.amazingmidi_label, self.amazingmidi_lineEdit,
self.muse_score_pushButton, self.muse_score_label, self.muse_score_lineEdit,
self.configure_path_label, self. mp3_to_midi_converter_label,
self.piano_port_label, self.piano_size_label, self.piano_size_comboBox,
self.portsettings_label, self.arduinoport_label, self.piano_port_comboBox,
self.arduino_port_comboBox, self.timingsettings_label, self.colorsettings_label,
self.black_key_label, self.white_key_label, self.wrong_label, self.upcoming_label,
self.arduino_baud_rate_comboBox, self.open_source_radioButton,
self.close_source_radioButton, self.chord_tick_tolerance_label,
self.chord_tick_tolerance_lineEdit, self.chord_sum_tolerance_label,
self.chord_sum_tolerance_lineEdit, self.record_chord_tolerance_label,
self.record_chord_tolerance_lineEdit, self.arduino_handshake_timeout_label,
self.arduino_handshake_timeout_lineEdit, self.apply_close_buttonBox,
self.tabWidget]
for element in text_group:
element.setFont(font)
#-------------------------------------------------------------------------------
# Main Code
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
config_dialog = ConfigDialog()
config_dialog.show()
sys.exit(app.exec_())
| 48.336195
| 184
| 0.671797
| 4,302
| 33,787
| 4.96676
| 0.095769
| 0.071138
| 0.0417
| 0.046333
| 0.600178
| 0.489774
| 0.405813
| 0.335564
| 0.296298
| 0.158188
| 0
| 0.021526
| 0.19839
| 33,787
| 698
| 185
| 48.405444
| 0.76739
| 0.108089
| 0
| 0.123487
| 0
| 0
| 0.068422
| 0.008553
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03632
| false
| 0
| 0.021792
| 0
| 0.104116
| 0.007264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9da270a879210ead826c86bdc8c185c7e2c0effa
| 1,814
|
py
|
Python
|
valorant/caller.py
|
frissyn/valorant.py
|
49abceab5cc1f3af016ce0b1d253d10089aeb0b4
|
[
"MIT"
] | 56
|
2021-01-22T01:48:23.000Z
|
2022-03-31T20:44:23.000Z
|
valorant/caller.py
|
Tominous/valorant.py
|
b462441ab4ab403123ad245cab30f3abbd891a66
|
[
"MIT"
] | 20
|
2021-02-03T10:40:37.000Z
|
2022-03-24T11:23:57.000Z
|
valorant/caller.py
|
Tominous/valorant.py
|
b462441ab4ab403123ad245cab30f3abbd891a66
|
[
"MIT"
] | 15
|
2021-03-24T01:17:58.000Z
|
2022-02-01T02:10:27.000Z
|
import requests
from .values import ROUTES
from .values import LOCALES
from .values import REGIONS
from .values import ENDPOINTS
def value_check(*args):
KEYS = ROUTES + LOCALES + REGIONS
for arg in args:
if arg not in KEYS:
raise ValueError
else:
return True
class WebCaller(object):
def __init__(self, token: str, locale: str, region: str, route: str):
self.base = "https://{root}.api.riotgames.com/"
self.eps = ENDPOINTS["web"]
self.sess = requests.Session()
self.sess.params.update({"locale": locale})
self.sess.headers.update(
{
"Accept-Charset": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": "Mozilla/5.0",
"X-Riot-Token": token,
}
)
if value_check(locale, region, route):
self.locale = locale
self.region = region
self.route = route
def call(self, m: str, ep: str, params=None, route=False, **kw):
if ep not in list(self.eps.keys()):
raise ValueError
else:
pass
prefix = self.base.format(root=self.route if route else self.region)
url = prefix + self.eps[ep].format(**kw)
r = self.sess.request(m, url, params=params)
r.raise_for_status()
return r.json()
class ClientCaller(object):
def __init__(self, token: str):
self.base = "https://pd.{code}.a.pvp.net/"
self.token = token
self.sess = requests.Session()
self.sess.headers.update(
{
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
"X-Riot-Entitlements-JWT": "riot_entitlement",
}
)
| 27.484848
| 85
| 0.555678
| 212
| 1,814
| 4.693396
| 0.424528
| 0.048241
| 0.064322
| 0.046231
| 0.112563
| 0.112563
| 0
| 0
| 0
| 0
| 0
| 0.002435
| 0.320838
| 1,814
| 65
| 86
| 27.907692
| 0.805195
| 0
| 0
| 0.156863
| 0
| 0
| 0.142778
| 0.031422
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0.019608
| 0.098039
| 0
| 0.254902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9da846794dabe811239a290251111e03ccfb593a
| 1,256
|
py
|
Python
|
test_LearnSubtitles.py
|
heitor31415/LearnSubtitles
|
153178ea11d700a49a1f3692de39e8fc81e3cc4e
|
[
"MIT"
] | 8
|
2020-02-13T03:08:25.000Z
|
2021-01-11T20:28:39.000Z
|
test_LearnSubtitles.py
|
heitor31415/LearnSubtitles
|
153178ea11d700a49a1f3692de39e8fc81e3cc4e
|
[
"MIT"
] | 1
|
2020-04-28T19:48:16.000Z
|
2020-04-29T12:28:15.000Z
|
test_LearnSubtitles.py
|
heitor31415/LearnSubtitles
|
153178ea11d700a49a1f3692de39e8fc81e3cc4e
|
[
"MIT"
] | 1
|
2020-03-14T00:46:36.000Z
|
2020-03-14T00:46:36.000Z
|
import os
import pytest
from typing import Any, Callable, Dict, List
import LearnSubtitles as ls
def prepare(language: str) -> List:
""" Create LearnSubtitles objects for every subtitle in folder 'language' """
test_dir = "testfiles/" + language
subs = [
ls.LearnSubtitles(os.path.abspath(os.path.join(test_dir, x)), language)
for x in os.listdir(test_dir)
]
return subs
languages = ["de", "en", "pt"] # supported languages
def test_LearnSubtitles_parsing():
for language in languages:
subs = prepare(language)
for sub in subs:
assert len(sub.text) != 0
def test_LearnSubtitles_bad_file():
with pytest.raises(FileNotFoundError):
ls.LearnSubtitles(os.path.abspath("testfiles/fail/fail.srt"), "en")
with pytest.raises(ls.LearnSubtitlesError):
ls.LearnSubtitles(os.path.abspath("testfiles/fail/bad_file.srt"), "en")
def test_LearnSubtitles_level():
levels = ["A1", "A2", "B1"]
subs = [
ls.LearnSubtitles(
"testfiles/de/Nicos Weg – " + level + " – Ganzer Film - German.srt", "de"
)
for level in levels
]
assert subs[0].film_level > subs[1].film_level
assert subs[1].film_level > subs[2].film_level
| 26.723404
| 85
| 0.648089
| 161
| 1,256
| 4.975155
| 0.403727
| 0.0799
| 0.067416
| 0.082397
| 0.141074
| 0.104869
| 0.104869
| 0
| 0
| 0
| 0
| 0.008239
| 0.226911
| 1,256
| 46
| 86
| 27.304348
| 0.814624
| 0.072452
| 0
| 0.0625
| 0
| 0
| 0.112263
| 0.043178
| 0
| 0
| 0
| 0
| 0.09375
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dacec32c244293fcf0c09720725cd6c562e10da
| 4,888
|
py
|
Python
|
fast_downloader_mt/main.py
|
Kirozen/fast-downloader
|
febdcc8b6a6ad3b8d263a8923b8f24e8402df618
|
[
"MIT"
] | null | null | null |
fast_downloader_mt/main.py
|
Kirozen/fast-downloader
|
febdcc8b6a6ad3b8d263a8923b8f24e8402df618
|
[
"MIT"
] | null | null | null |
fast_downloader_mt/main.py
|
Kirozen/fast-downloader
|
febdcc8b6a6ad3b8d263a8923b8f24e8402df618
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import multiprocessing
import os
import re
import sys
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from itertools import chain
from pathlib import Path
from urllib.parse import urlparse
import click
import requests
from requests.models import HTTPError
from rich.progress import (
BarColumn,
DownloadColumn,
Progress,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
@dataclass
class DownloadFile:
urls: list[str]
dest: Path = Path.cwd()
filename: str = field(init=False)
def __post_init__(self):
self.filename = Path(self.urls[0]).name
@property
def filepath(self):
return self.dest / self.filename
BUFFER_SIZE = 32768
progress = Progress(
TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
DownloadColumn(),
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
)
def parse_aria2(data: list[str], destination: Path):
files = []
out_re = re.compile(r"^\s+out=(?P<out>.*)$")
for line in data:
if line.startswith("#") or not line:
continue
if line.startswith("http"):
files.append(DownloadFile(line.split("\t"), destination))
else:
match_out = out_re.match(line)
if match_out:
files[-1].filename = match_out.groupdict()["out"]
return files
def get_inputs(inputs: list[str], destination: Path, aria2_compatibility: bool):
paths = []
for input in inputs:
lines = Path(input).read_text().splitlines(keepends=False)
if aria2_compatibility:
paths.extend(parse_aria2(lines, destination))
else:
paths.extend(
DownloadFile([url], destination)
for url in lines
if url.startswith("http")
)
return paths
def downloader(downloadfile: DownloadFile, buffer_size: int, quiet: bool):
if not quiet:
task_id = progress.add_task(
"download",
filename=downloadfile.filename,
)
iterator = iter(downloadfile.urls)
response = None
try:
while not response:
url = next(iterator)
try:
response = requests.get(url, allow_redirects=True, stream=True)
response.raise_for_status()
except HTTPError:
response = None
if not quiet:
size = int(response.headers.get("content-length"))
progress.update(task_id, total=size)
with open(downloadfile.filepath, "wb") as handler:
if not quiet:
progress.start_task(task_id)
for data in response.iter_content(chunk_size=buffer_size):
handler.write(data)
if not quiet:
progress.update(task_id, advance=len(data))
except StopIteration:
print("Urls are not available")
def executor(threads, downloadfiles, buffer_size, quiet):
with ThreadPoolExecutor(max_workers=threads) as pool:
for downloadfile in sorted(
downloadfiles, key=lambda df: len(df.filename), reverse=True
):
try:
for url in downloadfile.urls:
urlparse(url)
except ValueError:
print(f"An url in {downloadfile.urls} is not valid!", file=sys.stderr)
continue
pool.submit(downloader, downloadfile, buffer_size, quiet)
@click.command()
@click.option(
"-t",
"--threads",
default=lambda: multiprocessing.cpu_count(),
type=click.IntRange(min=1, max=1000, clamp=True),
help="thread number",
)
@click.option(
"-i",
"--input",
"inputs",
multiple=True,
type=click.Path(exists=True, file_okay=True),
help="input file",
)
@click.option("-q", "--quiet", is_flag=True)
@click.option(
"-d",
"--destination",
type=click.Path(dir_okay=True, allow_dash=True),
default=Path(os.getcwd()),
)
@click.option("--aria2-compatibility", is_flag=True)
@click.option(
"--buffer-size", type=click.IntRange(min=1, clamp=True), default=BUFFER_SIZE
)
@click.argument("urls", nargs=-1, type=click.Path())
def fast_downloader(
threads, inputs, quiet, destination, buffer_size, aria2_compatibility, urls
):
download_urls = (DownloadFile([url], Path(destination)) for url in urls)
download_files = list(
chain(download_urls, get_inputs(inputs, Path(destination), aria2_compatibility))
)
if quiet:
executor(threads, download_files, buffer_size, quiet)
else:
with progress:
executor(threads, download_files, buffer_size, quiet)
if __name__ == "__main__":
fast_downloader()
| 28.091954
| 88
| 0.625818
| 550
| 4,888
| 5.447273
| 0.341818
| 0.033378
| 0.013351
| 0.014686
| 0.056742
| 0.028705
| 0.028705
| 0
| 0
| 0
| 0
| 0.006354
| 0.259411
| 4,888
| 173
| 89
| 28.254335
| 0.820442
| 0
| 0
| 0.165563
| 0
| 0
| 0.067308
| 0.019231
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046358
| false
| 0
| 0.092715
| 0.006623
| 0.18543
| 0.013245
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dad8057a50b53867020fcecaeb0676d2cfff102
| 4,362
|
py
|
Python
|
sitch/sitchlib/geo_correlator.py
|
codecuisine/sensor
|
06fb0908178af1ab673b95e7f435b873cc62e61b
|
[
"ECL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | 68
|
2016-08-08T17:28:59.000Z
|
2021-11-26T09:31:52.000Z
|
sitch/sitchlib/geo_correlator.py
|
codecuisine/sensor
|
06fb0908178af1ab673b95e7f435b873cc62e61b
|
[
"ECL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | 61
|
2016-08-20T21:01:01.000Z
|
2020-07-22T06:10:45.000Z
|
sitch/sitchlib/geo_correlator.py
|
codecuisine/sensor
|
06fb0908178af1ab673b95e7f435b873cc62e61b
|
[
"ECL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | 40
|
2017-01-28T23:06:22.000Z
|
2021-08-13T15:09:43.000Z
|
"""Correlate based on geograpgic information."""
from alert_manager import AlertManager
from utility import Utility
class GeoCorrelator(object):
"""Geographic correlator."""
def __init__(self, device_id):
"""Initialize the Geographic Correlator."""
self.geo_anchor = {}
self.threshold = 100
self.time_threshold = 10
self.device_id = device_id
def correlate(self, scan_bolus):
"""Correlate one geo event.
The first time we get a geo event, we set the state and print a message
to stdout to that effect. Every subsequent message is compared
against the geo_anchor. Once the anchor is set, it does not
change for the life of the instance. Correlation of subsequent
events causes the distance beween the anchor and current event
to be determined and if the threshold of 10km is exceeded, an alert
is returned.
Args:
scan_bolus (tuple): Two-item tuple. Position 0 contains the scan
type, which is not checked. We should only ever have geo
events coming through this method. Position 1 is expected to
contain geo json.
Returns:
list: List of alerts. If no alerts are fired, the list returned is
zero-length.
"""
scan_body = scan_bolus[1]
if self.geo_anchor == {}:
self.geo_anchor = scan_body
print("GeoCorrelator: Setting anchor to %s" % str(scan_body))
alerts = []
else:
alerts = GeoCorrelator.geo_drift_check(self.geo_anchor, scan_body,
self.threshold,
self.device_id)
for alert in GeoCorrelator.time_drift_check(scan_body,
self.time_threshold,
self.device_id):
alerts.append(alert)
for alert in alerts:
alert[1]["site_name"] = scan_body["site_name"]
alert[1]["sensor_name"] = scan_body["sensor_name"]
alert[1]["sensor_id"] = scan_body["sensor_id"]
return alerts
@classmethod
def geo_drift_check(cls, geo_anchor, gps_scan, threshold, device_id):
"""Fire alarm if distance between points exceeds threshold.
Args:
geo_anchor (dict): Geographic anchor point, usually stored in an
instance variable and passed in via the `correlate()` method.
gps_scan (dict): Same format as geo_anchor, expects the same format
as `geo_anchor`.
threshold (int): Alerting threshold in km.
Returns:
list: list of alerts (usually just one) or an empty list of there
are no alerts.
"""
lat_1 = geo_anchor["location"]["coordinates"][1]
lon_1 = geo_anchor["location"]["coordinates"][0]
lat_2 = gps_scan["location"]["coordinates"][1]
lon_2 = gps_scan["location"]["coordinates"][0]
current_distance = Utility.calculate_distance(lon_1, lat_1,
lon_2, lat_2)
if current_distance < threshold:
return []
else:
message = "Possible GPS spoofing attack! %d delta from anchor at %s / %s %s !" % (current_distance, gps_scan["site_name"], gps_scan["sensor_name"], Utility.create_gmaps_link(lat_1, lon_1)) # NOQA
alert = AlertManager(device_id).build_alert(300, message,
gps_scan["location"])
return[alert]
@classmethod
def time_drift_check(cls, gps_scan, threshold_mins, device_id):
"""Checks drift value, alarms if beyond threshold."""
current_delta = gps_scan["time_drift"]
if current_delta < threshold_mins:
return []
else:
message = "Possible GPS time spoofing attack! %d delta from system at %s / %s" % (current_delta, gps_scan["site_name"], gps_scan["sensor_name"]) # NOQA
alert = AlertManager(device_id).build_alert(310, message,
gps_scan["location"])
return[alert]
| 44.969072
| 208
| 0.570381
| 505
| 4,362
| 4.744554
| 0.330693
| 0.035058
| 0.020033
| 0.01419
| 0.231219
| 0.086811
| 0.059265
| 0.026711
| 0
| 0
| 0
| 0.01162
| 0.348923
| 4,362
| 96
| 209
| 45.4375
| 0.832042
| 0.323017
| 0
| 0.211538
| 0
| 0
| 0.135374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.038462
| 0
| 0.192308
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dadf1bb28dc34ec81f4c906780d3dcd3137e862
| 1,697
|
py
|
Python
|
grid_search_results_v1/get_vals_heatmap.py
|
malfarasplux/pnet2019
|
ae34d5c84fb4d3985634b237a14dfb69e98b8339
|
[
"BSD-3-Clause"
] | 1
|
2020-11-29T12:42:30.000Z
|
2020-11-29T12:42:30.000Z
|
grid_search_results_v1/get_vals_heatmap.py
|
malfarasplux/pnet2019
|
ae34d5c84fb4d3985634b237a14dfb69e98b8339
|
[
"BSD-3-Clause"
] | null | null | null |
grid_search_results_v1/get_vals_heatmap.py
|
malfarasplux/pnet2019
|
ae34d5c84fb4d3985634b237a14dfb69e98b8339
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
N =[20,40,50,75,100,150,200]
scale = [0.0001, 0.001, 0.005, 0.01, 0.1, 1, 10]
mem = [0.001, 0.01, 0.1, 0.13, 0.25, 0.5, 1]
sigexp = [0.01, 0.1, 0.5, 1, 2, 5, 10]
val_key = {}
with open("./grid_search_results_v1/F1_report.txt") as f:
for i, line in enumerate(f):
lineval = line.split()[0]
print ("line {0} = {1}".format(i, lineval))
val_key[lineval.split(".txt:")[0][7:]] = float(lineval.split(".txt:")[1])
F1_matrix = np.zeros((len(scale),len(mem)),dtype=np.float)
N_i = str(200)
sigexp_i = str(0.1)
for i in range(len(scale)):
scale_i = str(scale[i])
for j in range(len(mem)):
mem_i = str(mem[j])
key_i = N_i + "_" + scale_i + "_" + mem_i + "_" + sigexp_i
F1_matrix[i,j] = val_key[key_i]
fig, ax = plt.subplots()
im = ax.imshow(F1_matrix)
ax.set_title("Grid search F1 opt")
ax.set_xticks(np.arange(len(mem)))
ax.set_yticks(np.arange(len(scale)))
ax.set_xticklabels(mem)
ax.set_yticklabels(scale)
ax.set_xlabel('mem')
ax.set_ylabel('scale')
cbar = ax.figure.colorbar(im, ax=ax)
# Loop over data dimensions and create text annotations.
for i in range(len(scale)):
for j in range(len(mem)):
text = ax.text(j, i, F1_matrix[i, j],
ha="center", va="center", color="w")
| 38.568182
| 160
| 0.476134
| 245
| 1,697
| 3.171429
| 0.367347
| 0.045045
| 0.05148
| 0.019305
| 0.136422
| 0.092664
| 0
| 0
| 0
| 0
| 0
| 0.080675
| 0.371833
| 1,697
| 43
| 161
| 39.465116
| 0.648218
| 0.031821
| 0
| 0.114286
| 0
| 0
| 0.063376
| 0.023157
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.057143
| 0
| 0.057143
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dafa0a196d3c478e9ef8c55c4f9dd2dd56b60ad
| 1,457
|
py
|
Python
|
_snippets/scrape_RAND_pdfs.py
|
vashu1/data_snippets
|
b0ae5230d60c2054c7b9278093533b7f71f3758b
|
[
"MIT"
] | 1
|
2021-02-10T20:33:43.000Z
|
2021-02-10T20:33:43.000Z
|
_snippets/scrape_RAND_pdfs.py
|
vashu1/data_snippets
|
b0ae5230d60c2054c7b9278093533b7f71f3758b
|
[
"MIT"
] | null | null | null |
_snippets/scrape_RAND_pdfs.py
|
vashu1/data_snippets
|
b0ae5230d60c2054c7b9278093533b7f71f3758b
|
[
"MIT"
] | null | null | null |
# scrape articles from RAND site, see https://vashu11.livejournal.com/20523.html
import re
import requests
from bs4 import BeautifulSoup
import os
content = ['https://www.rand.org/pubs/papers.html'] + ['https://www.rand.org/pubs/papers.{}.html'.format(i) for i in range(2, 108)]
def get_articles(page):
page = requests.get(page)
soup = BeautifulSoup(page.content, 'html.parser')
return [('https://www.rand.org' + link.get('href')) for link in soup.findAll('a', attrs={'href': re.compile("/pubs/papers/.*")})]
def get_pdfs(link):
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
name = soup.findAll('h1', attrs={'id': 'RANDTitleHeadingId'})[0].text
return set([(name, ('https://www.rand.org' if not 'http' in link.get('href') else '') + link.get('href')) for link in soup.findAll('a', attrs={'href': re.compile(".*\.pdf")})])
os.mkdir('pdfs')
for page in content[11:]:
print('PAGE', page)
articles = get_articles(page)
for article in articles:
print('ARTICLE', article)
c = 0
for d in get_pdfs(article):
name, link = d
if c > 0:
name += '_{}'.format(c)
print('NAME', name)
r = requests.get(link)
l = len(r.content)
print('LEN', l)
with open('./pdfs/' + re.sub('[^\w\-_\. ]', '_', name) + '.pdf', 'wb') as f:
f.write(r.content)
c += 1
| 38.342105
| 180
| 0.577213
| 201
| 1,457
| 4.149254
| 0.363184
| 0.038369
| 0.057554
| 0.071942
| 0.280576
| 0.280576
| 0.189448
| 0.119904
| 0.119904
| 0.119904
| 0
| 0.016949
| 0.230611
| 1,457
| 37
| 181
| 39.378378
| 0.727029
| 0.053535
| 0
| 0.0625
| 0
| 0
| 0.18809
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.25
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9db66809b3f7cfe04fff2e0d4fd9725d23130f54
| 2,422
|
py
|
Python
|
inputs/fino2_dats.py
|
a2edap/WE-Validate
|
6e4be8228c9b4f66fb1a056f7566030b79441f2e
|
[
"BSD-3-Clause"
] | 1
|
2022-01-21T08:09:03.000Z
|
2022-01-21T08:09:03.000Z
|
inputs/fino2_dats.py
|
a2edap/WE-Validate
|
6e4be8228c9b4f66fb1a056f7566030b79441f2e
|
[
"BSD-3-Clause"
] | null | null | null |
inputs/fino2_dats.py
|
a2edap/WE-Validate
|
6e4be8228c9b4f66fb1a056f7566030b79441f2e
|
[
"BSD-3-Clause"
] | 1
|
2021-06-14T09:32:36.000Z
|
2021-06-14T09:32:36.000Z
|
# A parser for multiple FINO2 .dat files in a directory.
import os
import pathlib
import pandas as pd
import numpy as np
import glob
import sys
class fino2_dats:
"""FINO2 data class
"""
def __init__(self, info, conf):
self.path = os.path.join(
(pathlib.Path(os.getcwd()).parent), str(info['path'])
)
self.var = info['var']
# self.lev = conf['levels']['height_agl']
self.target_var = info['target_var']
def get_ts(self, lev):
"""The directory can contain multiple FINO2 files, and each file
contains data at one height level.
The function only read in one data file at one height level.
"""
file_list = glob.glob(os.path.join(self.path, '*.dat'))
for file in file_list:
if str(lev)+'m' in file:
df_all = pd.read_csv(file)
# Get variable name and column names
var_name = df_all.iloc[0][0].split(': ', 1)[1]
col_names = df_all.iloc[3][0].split('\t')[1:]
df = pd.read_csv(file, skiprows=6, sep='\s+')
# Turn column names into 1st row
df = pd.DataFrame(np.vstack([df.columns, df]))
# Combine 2 time columns, hard coded
df['t'] = df[0].map(str)+' '+df[1]
# Drop duplicating columns
df.pop(0)
df.pop(1)
# Reassign column names
for i in range(len(col_names)):
df[col_names[i]] = df[i+2]
df.pop(i+2)
df = df.set_index('t').sort_index()
df.index = pd.to_datetime(df.index)
# FINO data are averages centered at each 10-minute period
# Data between 10:30 and 10:40 are averaged and labelled as
# 10:35
# Apply correction to label data at the end of each period
# Hence data between 10:30 and 10:40 are averaged and labelled
# as 10:40
df.index = df.index+pd.Timedelta('5minutes')
# Extract only 1 column of data
out_df = df.loc[:, [self.var]]
out_df.rename(
columns={self.var: self.target_var}, inplace=True
)
out_df = out_df.astype(float)
return out_df
| 31.051282
| 78
| 0.514038
| 322
| 2,422
| 3.776398
| 0.400621
| 0.020559
| 0.016447
| 0.026316
| 0.078947
| 0.078947
| 0.078947
| 0.078947
| 0.078947
| 0.078947
| 0
| 0.031936
| 0.379438
| 2,422
| 77
| 79
| 31.454545
| 0.777112
| 0.28943
| 0
| 0
| 0
| 0
| 0.024522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.162162
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9db67e536e2a5337dee11670942d6aa03db5b908
| 2,481
|
py
|
Python
|
bin/ess/dependencies.py
|
clu3bot/cora
|
de4d1af983c135184ebaf557271fa14c7c0e1849
|
[
"MIT"
] | null | null | null |
bin/ess/dependencies.py
|
clu3bot/cora
|
de4d1af983c135184ebaf557271fa14c7c0e1849
|
[
"MIT"
] | null | null | null |
bin/ess/dependencies.py
|
clu3bot/cora
|
de4d1af983c135184ebaf557271fa14c7c0e1849
|
[
"MIT"
] | null | null | null |
import subprocess as sp
import os
import time
import platform
from os.path import exists
#colar vars
class color:
lightblue='\033[1;34m' #light blue
lightred='\033[1;31m' #light red
lightgreen='\033[1;32m' #lightgreen
red='\033[0;31m' #red
yellow='\033[1;33m' #yellow
none='\033[0m' #no color
purple='\033[1;35m' #purple
cyan='\033[0;36m' #cyan
green='\033[0;32m' #green
def permissions(): #checks for root permissions
if not os.environ.get("SUDO_UID") and os.geteuid() != 0:
print(color.lightred + "You need to run this script with sudo or as root.")
time.sleep(0.3)
quit()
permissions()
def getos():
osys=platform.system()
if osys != "Linux":
print(color.lightred + "This program only runs on Linux operating systems.")
time.sleep(2)
quit()
getos()
def check_file():
file = exists("tmp/flag.txt")
if file == 'True':
os.system("rm -rf tmp/flag.txt")
else:
time.sleep(0.5)
check_file()
#dependencies
class dependencies:
dependencie1 = 'mdk3'
dependencie2 = 'aircrack-ng'
dependencie3 = 'xterm'
dependencie4 = 'macchanger'
def check_mdk3():
check_d1 = sp.getoutput("bash etc/dpkg-check/dpkg-check-mdk3.sh")
if check_d1 == '0':
mdk3 = 'null'
else:
mdk3 = 'inst'
return mdk3
def check_aircrack():
check_d2 = sp.getoutput("bash etc/dpkg-check/dpkg-check-aircrack-ng.sh")
if check_d2 == '0':
aircrack = 'null'
else:
aircrack = 'inst'
return aircrack
def check_xterm():
check_d3 = sp.getoutput("bash etc/dpkg-check/dpkg-check-xterm.sh")
if check_d3 == '0':
xterm = 'null'
else:
xterm = 'inst'
return xterm
def check_macchanger():
check_d4 = sp.getoutput("bash etc/dpkg-check/dpkg-check-macchanger.sh")
if check_d4 == '0':
macchanger = 'null'
else:
macchanger = 'inst'
return macchanger
def export():
mdk3 = check_mdk3()
aircrack = check_aircrack()
xterm = check_xterm()
macchanger = check_macchanger()
if mdk3 == 'null':
flag = "null"
elif aircrack == 'null':
flag = "null"
elif xterm == 'null':
flag = "null"
elif macchanger == "null":
flag = "null"
else:
time.sleep(1)
if flag == 'null':
os.system("echo "+flag+" > tmp/flag.txt")
else:
check_file()
| 20.675
| 84
| 0.584442
| 320
| 2,481
| 4.46875
| 0.334375
| 0.05035
| 0.041958
| 0.05035
| 0.100699
| 0.100699
| 0.100699
| 0.100699
| 0
| 0
| 0
| 0.046745
| 0.275695
| 2,481
| 119
| 85
| 20.84874
| 0.749026
| 0.044337
| 0
| 0.172414
| 0
| 0
| 0.221374
| 0.061917
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091954
| false
| 0
| 0.057471
| 0
| 0.367816
| 0.022989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9db72ff4ce32323ddaf8107b708ab0ac40987bfc
| 2,748
|
py
|
Python
|
src/bfh.py
|
Pella86/Snake4d
|
cdf3773b42efc888affa33dd22ebe56a48f6d979
|
[
"MIT"
] | 79
|
2018-05-23T09:39:00.000Z
|
2021-11-29T02:26:07.000Z
|
src/bfh.py
|
Pella86/Snake4d
|
cdf3773b42efc888affa33dd22ebe56a48f6d979
|
[
"MIT"
] | 1
|
2020-06-13T17:57:14.000Z
|
2020-06-16T15:53:40.000Z
|
src/bfh.py
|
Pella86/Snake4d
|
cdf3773b42efc888affa33dd22ebe56a48f6d979
|
[
"MIT"
] | 6
|
2018-06-28T13:03:38.000Z
|
2021-03-06T14:24:32.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 27 17:24:58 2018
@author: Mauro
"""
#==============================================================================
# Imports
#==============================================================================
import struct
#==============================================================================
# Helpers
#==============================================================================
def as_bytes(dtype, data):
return struct.pack(dtype, data)
#==============================================================================
# Constants
#==============================================================================
# little conversion table for the supported files
type_to_size = {}
type_to_size['I'] = 4
type_to_size['d'] = 8
type_to_size['c'] = 1
#==============================================================================
# Binary file class
#==============================================================================
class BinaryFile:
''' reads the bytes from a file object with custom cumulative offset'''
def __init__(self, fobj, co = 0):
'''
self.file is a file object, self.co is the cumulative offset where
to start the procedure
'''
self.file = fobj
self.co = co
def write(self, dtype, data):
''' writes a data packet and moves the offset'''
self.file.seek(self.co)
b = as_bytes(dtype, data)
self.file.write(b)
self.co += len(b)
def read(self, dtype):
'''
reads a data packet and moves the offset, returns the data packet
in the specified format
'''
self.file.seek(self.co)
size_read = type_to_size[dtype]
b = self.file.read(size_read)
self.co += size_read
return struct.unpack(dtype, b)[0]
def write_string(self, string):
'''
Writess a string saving the length first and then the caracters
encoded with UTF-8
'''
self.file.seek(self.co)
strlen = len(string)
#write str len
self.write("I", strlen)
fmt = 'c'*strlen
data = []
for c in string:
data.append(bytes(c, "utf-8"))
b = struct.pack(fmt, *data)
self.file.write(b)
self.co += len(b)
def read_string(self):
''' readst the string from a binary file... in ascii? mmh...
'''
self.file.seek(self.co)
# read the length
strlen = self.read("I")
b = self.file.read(strlen)
s = str(b, "ascii")
self.co += strlen
return s
| 26.941176
| 79
| 0.409025
| 279
| 2,748
| 3.953405
| 0.329749
| 0.072529
| 0.045331
| 0.058024
| 0.17951
| 0.114234
| 0.114234
| 0.063463
| 0.063463
| 0.063463
| 0
| 0.01003
| 0.274381
| 2,748
| 101
| 80
| 27.207921
| 0.543129
| 0.456696
| 0
| 0.2
| 0
| 0
| 0.011791
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.025
| 0.025
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9db736834f35ad283117ff978c76815cc0ba771c
| 8,726
|
py
|
Python
|
bin/read_analysis.py
|
louperelo/longmetarg
|
026b66c3621a4bcc71f5bc8a73955faf57978985
|
[
"MIT"
] | null | null | null |
bin/read_analysis.py
|
louperelo/longmetarg
|
026b66c3621a4bcc71f5bc8a73955faf57978985
|
[
"MIT"
] | null | null | null |
bin/read_analysis.py
|
louperelo/longmetarg
|
026b66c3621a4bcc71f5bc8a73955faf57978985
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pandas as pd
from scipy import stats
import numpy as np
#import seaborn as sns
#import matplotlib.pyplot as plt
import math
from Bio import SeqIO
import io
import re
import pysam
from functools import reduce
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--bam_file", metavar="<BAM>", dest="bam", help="enter the path to the alignment.bam file. By default 'aln_F4.bam' will be used",
type=str, default="aln_F4.bam")
parser.add_argument("--reads_fasta", metavar="<FASTA>", dest="fasta", help="enter the path to the original fasta file being analysed. By default 'reads.fasta' will be used",
type=str, default="reads.fasta")
parser.add_argument("--ident", metavar="<IDENT>", dest="ident", help="enter the int value for minimum identity. By default 80 will be used",
type=int, default= 80)
parser.add_argument("--cov_length", metavar="<COV>", dest="cov", help="enter the int value for minimum coverage length. By default 95 will be used",
type=int, default= 95)
parser.add_argument("--folder_out", metavar="<OUT>", dest="out", help="enter name for output files. By default 'arg_results' will be used",
type=str, default="../out_dir/")
parser.add_argument("--aro_idx", metavar="<IDX>", dest="idx", help="enter the path to the aro_index.csv file. By default 'aro_index.tsv' will be used",
type=str, default="aro_index.tsv")
# print help message for user
parser.print_help()
# get command line arguments
args = parser.parse_args()
# read files from path
bam = args.bam
fasta = args.fasta
ident = args.ident
covlen = args.cov
folder = args.out
idx = args.idx
#read list of cigar tuples and get number of matches (0), insertions (1) or deletions (2)
#auxiliary function in parse_bam()
def read_cigar(lof_tup, idnum):
x = 0
for t in lof_tup:
if(t[0]==idnum):
x += t[1]
return x
#Joins information from BAM file in pandas dataframe
#query sequence: query_name, query_length
#reference sequence: reference_name (gives one string, is split into ARO, ID, gene name and NCBI reference id), reference_start, reference_length
#alignment: query_alignment_length, number of mismatches and gaps (tag 'NM)
#calculates sequence identity % (identity(A,B)=100*(identical nucleotides / min(length(A),length(B)))), with identical nucleotides = query_alignment_length - NM
#calculates cover length % (query_alignment_length*100 / reference_length)
pd.options.mode.chained_assignment = None
def parse_bam(bam_path):
aln_file = pysam.AlignmentFile(bam_path, "rb")
lst = []
# loop over alignments, get values per contig and store in list of lists (lst)
for index, aln in enumerate(aln_file.fetch(until_eof = True)): #index = int(0 ... n), aln = all information on read
substr = [aln.query_name, aln.query_length, aln.query_alignment_length, aln.get_tag('NM'), aln.reference_length, aln.reference_start, aln.cigartuples]
#divide information in reference_name
string = str(aln.reference_name)
start=[]
stop=[]
for i, c in enumerate(string):
if ((c==':')):
start.append(i+1)
elif (c=='|'):
stop.append(i)
else:
continue
stop.append(len(string))
for i in range(0, len(start)):
#substr = []
substr.append(string[start[i]:stop[i]])
lst.append(substr)
#print(lst[0:10])
df = pd.DataFrame(lst, columns=('contig_name', 'contig_length', 'aln_length', 'aln_nm', 'ref_length', 'ref_start', 'c_tuples', 'ref_ARO', 'ref_ID', 'ref_genename', 'ref_NCBI'))
#get number of matches from cigar tuples
df['matches'] = df['c_tuples'].apply(lambda x: read_cigar(x, 0))
df['insertions'] = df['c_tuples'].apply(lambda x: read_cigar(x, 1))
df['deletions'] = df['c_tuples'].apply(lambda x: read_cigar(x, 2))
#infer contig_length in repetitions of same contig_name (otherwise the value is 0)
for i in range(1, df.shape[0]-1):
if (df['contig_name'].iloc[i+1]==df['contig_name'].iloc[i]):
df['contig_length'].iloc[i+1] = df['contig_length'].iloc[i]
#calculate coverage length
df['cov_length'] = df['aln_length']*100/df['ref_length']
#Sequence identity is the amount of characters which match exactly between two different sequences.
#identity(A,B)=100% (num identical nucleotides / min(length(A),length(B)))
df['cov_identity'] = 100*df['matches']/(df.loc[:,['aln_length','ref_length']].min(axis=1))
return df
#Filter df for highest identity and coverlength rates
def filter_best(df, ident, cov_l):
return df[(df['cov_identity']>=ident) & (df['cov_length']>=cov_l)]
#Filter assembly fasta for contigs of interest (data) and save to out_name.fasta
#for taxonomic analysis
def arg_contigs(data, fasta, out_name):
#filter contigs with antibiotic resistance genes
arg_contigs = data['contig_name'].drop_duplicates().to_list()
# filter contig sequence information from original fasta file
#filter fasta for contigs with antibiotic resistance genes (arg) for taxonomic analysis
fasta_sequences = SeqIO.parse(open(fasta),'fasta')
with open(out_name, 'w') as out_file:
for fasta in fasta_sequences:
#name, sequence = fasta.id, fasta.seq.tostring() #tostring() should be replaced by str(fasta.seq), but is not working on my computer
name, sequence = fasta.id, str(fasta.seq)
for c in arg_contigs:
if (name==c):
out_file.write('>'+ name + '\n' + sequence + '\n')
#check for and eliminate less significant (lower cover identity) overlaps
#generate list of index numbers of non-overlapping hits from df sorted by coverage identity (highest first)
#in case of overlaps, keep the hit with the highest coverage identity
def overlaps(df_in):
df = df_in.reset_index()
#list of contig_names
reads = df['contig_name'].unique()
#list of indices to keep
keep = []
#check overlaps for one contig_name at a time
for read in reads:
#create dataframe for each contig_name, sorted by cov_identity, highest value first
readdf = df[df['contig_name']==read].sort_values(by='cov_identity', ascending=False)
#list of indices to keep for each read
k=[]
#iterate over each enty for one read
for i in range(0, readdf.shape[0]-1):
#append first entry of sorted readdf (highest cov_identity) to list of indices to keep for this contig_name
k.append(readdf['index'].iloc[0])
#list for indices of contigs not overlapping with first entry
lst=[]
#compare first entry with all other entries
for j in range (i+1, readdf.shape[0]):
#get start s and end e position of two resistance gene hits
s1, e1 = readdf['ref_start'].iloc[i], readdf['ref_start'].iloc[i] + readdf['ref_length'].iloc[i]
s2, e2 = readdf['ref_start'].iloc[j], readdf['ref_start'].iloc[j] + readdf['ref_length'].iloc[j]
#if there is no overlap, add the entry index to lst
if (e1<s2 or e2<s1):
lst.append(readdf['index'].iloc[j])
#update readdf, only keep entries with index in lst
readdf = readdf[readdf['index'].isin(lst)]
#if updated readdf only contains one entry, add index to k and pass on to next read
if (readdf.shape[0]==1):
k.append(readdf['index'].iloc[0])
break
#if updated readdf is empty, pass on to next read
if(readdf.shape[0]==0):
break
#append indices for each read to lst keep
keep.append(k)
#flatten list of lists (keep)
keep = reduce(lambda x,y: x+y,keep)
return(df[df['index'].isin(keep)])
if __name__ == "__main__":
#extract data of interest from bam file, filter best hits and eliminate overlaps
result_df = overlaps(filter_best(parse_bam(bam), ident, covlen))
#add corresponding drug class from CARD aro_index.tsv to result_df
rgdrug_dict = pd.read_csv(idx, sep='\t').set_index('ARO Name').to_dict()['Drug Class']
result_df['drug_class'] = result_df['ref_genename'].map(rgdrug_dict)
#save result_df as tsv
result_df.to_csv("argHitsDf.tsv", sep='\t')
#save reads/contigs of hits in result_df in 'result.fasta' for further analysis with PlasFlow or Blast/Diamond
arg_contigs(result_df, fasta, "argHits.fasta")
| 47.68306
| 180
| 0.655168
| 1,280
| 8,726
| 4.352344
| 0.244531
| 0.01795
| 0.018309
| 0.015078
| 0.156704
| 0.135703
| 0.06839
| 0.027464
| 0.027464
| 0
| 0
| 0.009771
| 0.225877
| 8,726
| 182
| 181
| 47.945055
| 0.814952
| 0.358584
| 0
| 0.055046
| 0
| 0.009174
| 0.205235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045872
| false
| 0
| 0.100917
| 0.009174
| 0.174312
| 0.009174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9db737d0aa2bbc9904ff5f6209cdc235a2493a9c
| 6,315
|
py
|
Python
|
parkinglot/admin.py
|
YangWanjun/areaparking
|
b08bc9b8f8d5f602d823115263b9d040edb9f245
|
[
"Apache-2.0"
] | 1
|
2018-08-02T04:00:44.000Z
|
2018-08-02T04:00:44.000Z
|
parkinglot/admin.py
|
YangWanjun/areaparking
|
b08bc9b8f8d5f602d823115263b9d040edb9f245
|
[
"Apache-2.0"
] | null | null | null |
parkinglot/admin.py
|
YangWanjun/areaparking
|
b08bc9b8f8d5f602d823115263b9d040edb9f245
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from django.contrib import admin
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Max
from . import models, forms
from address.biz import geocode
from utils import common
from utils.django_base import BaseAdmin
# Register your models here.
class ParkingPositionInline(admin.TabularInline):
model = models.ParkingPosition
extra = 0
class ParkingLotDocInline(admin.TabularInline):
model = models.ParkingLotDoc
form = forms.ParkingLotDocForm
extra = 0
class ParkingLotImageInline(admin.TabularInline):
model = models.ParkingLotImage
extra = 0
class ParkingLotCommentInline(admin.TabularInline):
model = models.ParkingLotComment
extra = 0
class ParkingLotKeyInline(admin.TabularInline):
model = models.ParkingLotKey
extra = 0
class ParkingLotStaffHistoryInline(admin.TabularInline):
model = models.ParkingLotStaffHistory
extra = 0
def has_add_permission(self, request):
return False
# def has_delete_permission(self, request, obj=None):
# return False
class ParkingPositionKeyInline(admin.TabularInline):
model = models.ParkingPositionKey
extra = 0
class ManagementCompanyStaffInline(admin.TabularInline):
model = models.ManagementCompanyStaff
extra = 0
@admin.register(models.ParkingLotType)
class ParkingLotTypeAdmin(BaseAdmin):
list_display = ('code', 'name')
list_display_links = ('code', 'name')
# @admin.register(models.LeaseManagementCompany)
# class LeaseManagementCompanyAdmin(BaseAdmin):
# list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email')
#
#
# @admin.register(models.BuildingManagementCompany)
# class BuildingManagementCompanyAdmin(BaseAdmin):
# list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email')
@admin.register(models.ManagementCompany)
class ManagementCompanyAdmin(BaseAdmin):
list_display = ('name', 'address', 'tel', 'email')
inlines = (ManagementCompanyStaffInline,)
@admin.register(models.TryPuttingOperator)
class TryPuttingOperatorAdmin(BaseAdmin):
pass
@admin.register(models.ParkingLot)
class ParkingLotAdmin(BaseAdmin):
form = forms.ParkingLotForm
icon = '<i class="material-icons">local_parking</i>'
list_display = ('code', 'name', 'category', 'address', 'subscription_list_send_type')
search_fields = ('code', 'name',)
inlines = (ParkingLotCommentInline, ParkingLotStaffHistoryInline, ParkingLotDocInline, ParkingLotImageInline,
ParkingLotKeyInline)
def save_model(self, request, obj, form, change):
if change is False or (
'pref_name' in form.changed_data or
'city_name' in form.changed_data or
'town_name' in form.changed_data or
'aza_name' in form.changed_data or
'other_name' in form.changed_data
):
# 新規の場合、または住所変更した場合、座標を取得しなおします。
coordinate = geocode(obj.address)
if coordinate.get('lng', None):
obj.lng = coordinate.get('lng', None)
if coordinate.get('lat', None):
obj.lat = coordinate.get('lat', None)
if coordinate.get('post_code', None):
obj.post_code = coordinate.get('post_code', None)
# 担当者変更時、駐車場担当者履歴追加
if change and 'staff' in form.changed_data:
queryset = models.ParkingLotStaffHistory.objects.public_filter(parking_lot=obj)
try:
last_staff = models.ParkingLot.objects.get(pk=obj.pk).staff
last_start_date = models.ParkingLot.objects.get(pk=obj.pk).staff_start_date
history_end_date = queryset.aggregate(Max('end_date')).get('end_date__max', None)
if (history_end_date is None or history_end_date < obj.staff_start_date) and last_start_date != obj.staff_start_date:
models.ParkingLotStaffHistory.objects.create(
parking_lot=obj,
member=last_staff,
start_date=last_start_date,
end_date=(obj.staff_start_date + datetime.timedelta(days=-1))
)
except ObjectDoesNotExist:
pass
super(ParkingLotAdmin, self).save_model(request, obj, form, change)
@admin.register(models.ParkingPosition)
class ParkingPosition(BaseAdmin):
form = forms.ParkingPositionForm
list_display = ('parking_lot', 'name', 'length', 'width', 'height', 'weight')
list_display_links = ('parking_lot', 'name',)
search_fields = ('parking_lot__code', 'parking_lot__name')
fieldsets = (
(None, {
'fields': (
'parking_lot',
'name', 'category', 'cost',
)
}),
("賃料", {
'classes': ('collapse',),
'fields': (
('price_recruitment_no_tax', 'price_recruitment'),
('price_homepage_no_tax', 'price_homepage'),
('price_handbill_no_tax', 'price_handbill'),
)
}),
("サイズ", {
'classes': ('collapse',),
'fields': (
('length', 'width', 'height', 'weight'),
('tyre_width', 'tyre_width_ap', 'min_height', 'min_height_ap'),
('f_value', 'r_value',),
)
}),
('備考', {
'fields': (
'comment',
)
}),
)
inlines = (ParkingPositionKeyInline,)
save_as = True
def save_model(self, request, obj, form, change):
continued_positions = common.get_continued_positions(obj.name)
if continued_positions:
split_positions = []
else:
split_positions = [s for s in obj.name.split(',') if s]
continued_positions.extend(split_positions)
if not change and continued_positions:
# 複数の車室を追加の場合
for name in continued_positions:
if models.ParkingPosition.objects.public_filter(parking_lot=obj.parking_lot, name=name).count() == 0:
obj.pk = None
obj.name = name
obj.save()
else:
super(ParkingPosition, self).save_model(request, obj, form, change)
| 33.951613
| 133
| 0.62977
| 625
| 6,315
| 6.1824
| 0.28
| 0.023292
| 0.047619
| 0.060041
| 0.173913
| 0.150104
| 0.097308
| 0.080228
| 0.041925
| 0.041925
| 0
| 0.002142
| 0.260808
| 6,315
| 185
| 134
| 34.135135
| 0.825621
| 0.08361
| 0
| 0.175182
| 0
| 0
| 0.106376
| 0.023042
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021898
| false
| 0.014599
| 0.058394
| 0.007299
| 0.423358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9db76eb5840b9b7ac5d4ffae358c55f69c7c5da4
| 965
|
py
|
Python
|
graficas.py
|
dianuchitop/el26
|
e84bb35ca9d6a603d515a624a85dae27cd4d10f2
|
[
"MIT"
] | null | null | null |
graficas.py
|
dianuchitop/el26
|
e84bb35ca9d6a603d515a624a85dae27cd4d10f2
|
[
"MIT"
] | null | null | null |
graficas.py
|
dianuchitop/el26
|
e84bb35ca9d6a603d515a624a85dae27cd4d10f2
|
[
"MIT"
] | null | null | null |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
filenames=["euler.dat","rk4.dat","leapfrog.dat"]
fig, axs = plt.subplots(nrows=3, ncols=3)
ax=axs[0][0]
ax.set_title('Euler')
ax=axs[0][1]
ax.set_title('RK4')
ax=axs[0][2]
ax.set_title('Leap_frog')
for i in range(3):
f=open(filenames[i],"r")
s=list(map(float,f.readline().split()))
s1=list(map(float,f.readline().split()))
time=list(map(float,f.readline().split()))
ax=axs[0][i]
ax.set_xlabel("time")
ax.set_ylabel("posistion")
ax.plot(time,s )
ax.set_ylim(-1.5,1.5)
ax.set_xlim(0,15)
ax=axs[1][i]
ax.plot(time, s1)
ax.set_ylim(-1.5,1.5)
ax.set_xlim(0,15)
ax.set_xlabel("time")
ax.set_ylabel("velocity")
ax=axs[2][i]
ax.plot(s, s1)
ax.set_ylim(-2.0,2.0)
ax.set_xlim(-2.0,2.0)
ax.set_xlabel("position")
ax.set_ylabel("velocity")
fig.subplots_adjust(hspace=1, wspace=1)
plt.savefig('graficas.png')
plt.show()
| 24.74359
| 48
| 0.635233
| 181
| 965
| 3.292818
| 0.320442
| 0.125839
| 0.040268
| 0.065436
| 0.33557
| 0.33557
| 0.174497
| 0.090604
| 0.090604
| 0.090604
| 0
| 0.049939
| 0.149223
| 965
| 38
| 49
| 25.394737
| 0.676005
| 0
| 0
| 0.216216
| 0
| 0
| 0.102697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.081081
| 0
| 0.081081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dbc6591cdea251b119f8bcead36767b18ac8b75
| 4,654
|
py
|
Python
|
mailpile/plugins/contacts.py
|
k0nsl/Mailpile
|
556f5f9040c4e01b005b4d633f3213668a474936
|
[
"Apache-2.0"
] | null | null | null |
mailpile/plugins/contacts.py
|
k0nsl/Mailpile
|
556f5f9040c4e01b005b4d633f3213668a474936
|
[
"Apache-2.0"
] | null | null | null |
mailpile/plugins/contacts.py
|
k0nsl/Mailpile
|
556f5f9040c4e01b005b4d633f3213668a474936
|
[
"Apache-2.0"
] | null | null | null |
import mailpile.plugins
from mailpile.commands import Command
from mailpile.mailutils import Email, ExtractEmails
from mailpile.util import *
class VCard(Command):
"""Add/remove/list/edit vcards"""
ORDER = ('Internals', 6)
KIND = ''
SYNOPSIS = '<nickname>'
def command(self, save=True):
session, config = self.session, self.session.config
vcards = []
for email in self.args:
vcard = config.get_vcard(email)
if vcard:
vcards.append(vcard)
else:
session.ui.warning('No such contact: %s' % email)
return vcards
def _fparse(self, fromdata):
email = ExtractEmails(fromdata)[0]
name = fromdata.replace(email, '').replace('<>', '').strip()
return email, (name or email)
def _prepare_new_vcard(self, vcard):
pass
def _valid_vcard_handle(self, vc_handle):
return (vc_handle and '@' in vc_handle[1:])
def _add_from_messages(self):
pairs, idx = [], self._idx()
for email in [Email(idx, i) for i in self._choose_messages(self.args)]:
pairs.append(self._fparse(email.get_msg_info(idx.MSG_FROM)))
return pairs
def _pre_delete_vcard(self, vcard):
pass
def add_vcards(self):
session, config, idx = self.session, self.session.config, self._idx()
if (len(self.args) > 2
and self.args[1] == '='
and self._valid_vcard_handle(self.args[0])):
pairs = [(self.args[0], ' '.join(self.args[2:]))]
elif self.data:
if self.data.has_key("@contactname") and self.data.has_key("@contactemail"):
pairs = [(self.data["@contactemail"], self.data["@contactname"])]
elif self.data.has_key("contactnames") and self.data.has_key("contactemails"):
pairs = zip(self.data["contactemails"], self.data["contactnames"])
else:
pairs = self._add_from_messages()
if pairs:
vcards = []
for handle, name in pairs:
if handle.lower() not in config.vcards:
vcard = config.add_vcard(handle, name, self.KIND)
self._prepare_new_vcard(vcard)
vcards.append(vcard)
else:
session.ui.warning('Already exists: %s' % handle)
else:
return self._error('Nothing to do!')
return {"contacts": [x.as_mpCard() for x in vcards]}
def _format_values(self, key, vals):
if key.upper() in ('MEMBER', ):
return [['mailto:%s' % e, []] for e in vals]
else:
return [[e, []] for e in vals]
def set_vcard(self):
session, config = self.session, self.session.config
handle, var = self.args[0], self.args[1]
if self.args[2] == '=':
val = ' '.join(self.args[3:])
else:
val = ' '.join(self.args[2:])
try:
vcard = config.get_vcard(handle)
if not vcard:
return self._error('Contact not found')
config.deindex_vcard(vcard)
if val:
if ',' in val:
vcard[var] = self._format_values(var, val.split(','))
else:
vcard[var] = val
else:
del vcard[var]
vcard.save()
config.index_vcard(vcard)
session.ui.display_vcard(vcard, compact=False)
return True
except:
self._ignore_exception()
return self._error('Error setting %s = %s' % (var, val))
def rm_vcards(self):
session, config = self.session, self.session.config
for handle in self.args:
vcard = config.get_vcard(handle)
if vcard:
self._pre_delete_vcard(vcard)
config.del_vcard(handle)
else:
session.ui.error('No such contact: %s' % handle)
return True
def find_vcards(self):
session, config = self.session, self.session.config
if self.args and self.args[0] == '--full':
self.args.pop(0)
compact = False
else:
compact = True
kinds = self.KIND and [self.KIND] or []
vcards = config.find_vcards(self.args, kinds=kinds)
#for vcard in vcards:
# session.ui.display_vcard(vcard, compact=compact)
ctx = {}
ctx["contacts"] = [x.as_mpCard() for x in vcards]
ctx["query"] = " ".join(self.args)
ctx["total"] = len(vcards)
ctx["start"] = 1
ctx["end"] = len(vcards)
ctx["count"] = len(vcards)
return ctx
SUBCOMMANDS = {
'add': (add_vcards, '<msgs>|<email> = <name>'),
'set': (set_vcard, '<email> <attr> <value>'),
'list': (find_vcards, '[--full] [<terms>]'),
'delete': (rm_vcards, '<email>'),
}
class Contact(VCard):
"""Add/remove/list/edit contacts"""
KIND = 'individual'
ORDER = ('Tagging', 3)
SYNOPSIS = '<email>'
TEMPLATE_IDS = ['contact']
mailpile.plugins.register_command('C:', 'contact=', Contact)
mailpile.plugins.register_command('_vcard', 'vcard=', VCard)
| 30.220779
| 84
| 0.613666
| 608
| 4,654
| 4.577303
| 0.225329
| 0.051743
| 0.054977
| 0.039526
| 0.247575
| 0.17571
| 0.139418
| 0.103845
| 0.036651
| 0
| 0
| 0.004757
| 0.232058
| 4,654
| 153
| 85
| 30.418301
| 0.773923
| 0.027503
| 0
| 0.20155
| 0
| 0
| 0.096831
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085271
| false
| 0.015504
| 0.031008
| 0.007752
| 0.294574
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dbe26545533c7c7d397d2847ba2a1eeca8ad8ef
| 1,663
|
py
|
Python
|
hw2/codes/plot.py
|
Trinkle23897/Artificial-Neural-Network-THU-2018
|
3326ed131298caaaf3fd0b6af80de37fd1ff9526
|
[
"MIT"
] | 38
|
2019-01-23T07:14:19.000Z
|
2022-03-07T06:03:21.000Z
|
hw2/codes/plot.py
|
ywythu/Artificial-Neural-Network-THU-2018
|
3326ed131298caaaf3fd0b6af80de37fd1ff9526
|
[
"MIT"
] | null | null | null |
hw2/codes/plot.py
|
ywythu/Artificial-Neural-Network-THU-2018
|
3326ed131298caaaf3fd0b6af80de37fd1ff9526
|
[
"MIT"
] | 17
|
2019-03-30T06:33:06.000Z
|
2021-12-24T10:42:39.000Z
|
import numpy as np
from pylab import *
D = 10
acc1 = np.load('res/small/acc.npy').reshape(D, -1).mean(axis=0)
loss1 = np.load('res/small/loss.npy').reshape(D, -1).mean(axis=0)
acc2 = np.load('res/large/acc.npy').reshape(D, -1).mean(axis=0)
loss2 = np.load('res/large/loss.npy').reshape(D, -1).mean(axis=0)
cut = int(acc1.shape[0] / 10 * 4)
print(' 1: %.2f %.6f'%(100*acc1[:cut].max(), loss1[:cut].min()))
print(' 2: %.2f %.6f'%(100*acc2[:cut].max(), loss2[:cut].min()))
iter_ = np.arange(acc1.shape[0]) * D
print(acc1.shape, iter_.shape[0])
figure()
p = subplot(111)
p.plot(iter_[:cut], loss1[:cut], '-', label='Original CNN')
p.plot(iter_[:cut], loss2[:cut], '-', label='Designed CNN')
p.set_ylim((0, .4))
p.set_xlabel(r'# of Iterations')
p.set_ylabel(r'Loss')
p.legend(loc='upper right')
tight_layout()
savefig("loss.pdf")
figure()
p = subplot(111)
p.plot(iter_[:cut], acc1[:cut], '-', label='Original CNN')
p.plot(iter_[:cut], acc2[:cut], '-', label='Designed CNN')
p.set_ylim((.9, 1))
p.set_xlabel(r'# of Iterations')
p.set_ylabel(r'Accuracy')
p.legend(loc='lower right')
tight_layout()
savefig("acc.pdf")
# 1: 23:24:44.414 Testing, total mean loss 0.019417, total acc 0.863300 - 23:24:33.131
# 2s: 20:20:39.807 Testing, total mean loss 0.003224, total acc 0.967700 - 20:18:21.597
# 2r: 20:48:01.448 Testing, total mean loss 0.002306, total acc 0.981300 - 20:45:16.709
#-2r: 20:38:47.940 Testing, total mean loss 0.002271, total acc 0.981500 - 20:35:59.910
# 3s: 00:38:10.865 Testing, total mean loss 0.001759, total acc 0.980098 - 00:33:01.622
# 3r: 21:24:04.253 Testing, total mean loss 0.001675, total acc 0.980588 - 21:19:28.262
| 41.575
| 91
| 0.654841
| 313
| 1,663
| 3.434505
| 0.364217
| 0.022326
| 0.089302
| 0.111628
| 0.433488
| 0.316279
| 0.316279
| 0.266047
| 0.063256
| 0.063256
| 0
| 0.1718
| 0.121467
| 1,663
| 40
| 92
| 41.575
| 0.563997
| 0.323512
| 0
| 0.25
| 0
| 0
| 0.203041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dbe2a0458905fed950a4384ff34ad0dc77f394d
| 696
|
py
|
Python
|
app/helpers/__init__.py
|
jaywonder20/Flask_Api_Starter
|
d3cf69f4742923737e826261f5e737f00d1c6270
|
[
"MIT"
] | 1
|
2020-07-28T13:28:42.000Z
|
2020-07-28T13:28:42.000Z
|
app/helpers/__init__.py
|
jaywonder20/Flask_Api_Starter
|
d3cf69f4742923737e826261f5e737f00d1c6270
|
[
"MIT"
] | null | null | null |
app/helpers/__init__.py
|
jaywonder20/Flask_Api_Starter
|
d3cf69f4742923737e826261f5e737f00d1c6270
|
[
"MIT"
] | null | null | null |
from flask_restful import reqparse
def send_api_response(response_code, response_message, http_status, response_data={}):
if http_status not in [200, 201]:
return {'responseCode': response_code,
'responseMessage': response_message
}, int(http_status), \
{"Access-Control-Allow-Origin": "*"}
else:
return {'responseCode': response_code,
'responseMessage': response_message,
'data': response_data
}, int(http_status), \
{"Access-Control-Allow-Origin": "*"}
parser = reqparse.RequestParser()
parser.add_argument('email_address', help='field cannot be blank.')
| 33.142857
| 86
| 0.616379
| 69
| 696
| 5.971014
| 0.565217
| 0.097087
| 0.126214
| 0.145631
| 0.470874
| 0.470874
| 0.470874
| 0
| 0
| 0
| 0
| 0.011742
| 0.265805
| 696
| 20
| 87
| 34.8
| 0.794521
| 0
| 0
| 0.4
| 0
| 0
| 0.21408
| 0.077586
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dc60e93e26c2a9f12204a366a70cced0bf9b339
| 4,081
|
py
|
Python
|
chapter_3_featurization/text_features.py
|
fancyerii/voicebook
|
def82da8577086d0361643a05fec2463006533a9
|
[
"Apache-2.0"
] | 1
|
2020-03-05T01:19:17.000Z
|
2020-03-05T01:19:17.000Z
|
chapter_3_featurization/text_features.py
|
fancyerii/voicebook
|
def82da8577086d0361643a05fec2463006533a9
|
[
"Apache-2.0"
] | null | null | null |
chapter_3_featurization/text_features.py
|
fancyerii/voicebook
|
def82da8577086d0361643a05fec2463006533a9
|
[
"Apache-2.0"
] | null | null | null |
'''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## TEXT_FEATURES.PY ##
================================================
extract all text features:
nltk_features()
spacy_features()
gensim_features()
'''
import transcribe as ts
import sounddevice as sd
import soundfile as sf
import nltk_features as nf
import spacy_features as spf
import gensim_features as gf
import numpy as np
import os, json
def sync_record(filename, duration, fs, channels):
print('recording')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
print('done recording')
def text_featurize(filename,jsondump):
# transcribe with sphinx
transcript=ts.transcribe_sphinx('test.wav')
# now put transcript through various feature engines
nltk_featureset, nltk_labels=nf.nltk_featurize(transcript)
spacy_featureset, spacy_labels=spf.spacy_featurize(transcript)
# make gensim embedding on alice and wonderland text
# (or any text corpus you'd like)
modelname='alice.pickle'
if modelname not in os.listdir():
text=open('alice.txt').read()
gf.w2v_train(text,100,modelname)
gensim_featureset=gf.sentence_embedding(transcript,100,modelname)
data={
'transcript':transcript,
'transcript type':'sphinx',
'nltk':np.array(nltk_featureset).tolist(),
'spacy':np.array(spacy_featureset).tolist(),
'gensim':np.array(gensim_featureset).tolist(),
}
if jsondump == True:
jsonfilename=filename[0:-4]+'.json'
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
return data
# # record and get transcript
# if 'test.wav' not in os.listdir():
# sync_record('test.wav', 10, 44100, 2)
# # now extract all text features
# data=text_featurize('test.wav', True)
| 34.584746
| 121
| 0.639304
| 497
| 4,081
| 5.201207
| 0.480885
| 0.027079
| 0.009284
| 0.011605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013956
| 0.192355
| 4,081
| 117
| 122
| 34.880342
| 0.770328
| 0.669199
| 0
| 0
| 0
| 0
| 0.078137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.222222
| 0
| 0.305556
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dcad228e81ec6b0f9a3bb86c1710900d1f1972c
| 1,755
|
py
|
Python
|
3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/24. Exam Preparation/22.08.2020/project/everland.py
|
kzborisov/SoftUni
|
ccb2b8850adc79bfb2652a45124c3ff11183412e
|
[
"MIT"
] | 1
|
2021-02-07T07:51:12.000Z
|
2021-02-07T07:51:12.000Z
|
3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/24. Exam Preparation/22.08.2020/project/everland.py
|
kzborisov/softuni
|
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
|
[
"MIT"
] | null | null | null |
3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/24. Exam Preparation/22.08.2020/project/everland.py
|
kzborisov/softuni
|
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
|
[
"MIT"
] | null | null | null |
class Everland:
def __init__(self):
self.rooms = []
def add_room(self, room):
self.rooms.append(room)
def get_monthly_consumptions(self):
total_consumption = 0
for room in self.rooms:
total_consumption += room.expenses + room.room_cost
return f"Monthly consumption: {total_consumption:.2f}$."
def pay(self):
result = []
for room in self.rooms:
total_cost = room.expenses + room.room_cost
if room.budget >= total_cost:
room.budget -= total_cost
result.append(f"{room.family_name} paid {total_cost:.2f}$ and"
f" have {room.budget:.2f}$ left.")
else:
self.rooms.remove(room)
result.append(f"{room.family_name} does not have enough"
f" budget and must leave the hotel.")
return "\n".join(result)
def status(self):
result = ""
result += f"Total population: {sum([r.members_count for r in self.rooms])}\n"
for r in self.rooms:
result += f"{r.family_name} with {r.members_count} members. Budget: {r.budget:.2f}$, " \
f"Expenses: {r.expenses:.2f}$\n"
if r.children:
counter = 0
for c in r.children:
counter += 1
result += f"--- Child {counter} monthly cost: {c.cost * 30:.2f}$\n"
if hasattr(r, "appliances"):
total_expenses = 0
for a in r.appliances:
total_expenses += a.get_monthly_expense()
result += f"--- Appliances monthly cost: {total_expenses:.2f}$\n"
return result
| 39
| 100
| 0.520228
| 206
| 1,755
| 4.305825
| 0.291262
| 0.071026
| 0.049605
| 0.029312
| 0.200676
| 0.11274
| 0
| 0
| 0
| 0
| 0
| 0.011638
| 0.363533
| 1,755
| 44
| 101
| 39.886364
| 0.782453
| 0
| 0
| 0.05
| 0
| 0.025
| 0.271795
| 0.039316
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dcd01c7a81f81cad912ec87f997c4e5ba58f9bb
| 2,448
|
py
|
Python
|
minifold/log.py
|
nokia/minifold
|
3687d32ab6119dc8293ae370c8c4ba9bbbb47deb
|
[
"BSD-3-Clause"
] | 15
|
2018-09-03T09:40:59.000Z
|
2021-07-16T16:14:46.000Z
|
src/log.py
|
Infinite-Blue-1042/minifold
|
cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16
|
[
"BSD-3-Clause"
] | null | null | null |
src/log.py
|
Infinite-Blue-1042/minifold
|
cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16
|
[
"BSD-3-Clause"
] | 8
|
2019-01-25T07:18:59.000Z
|
2021-04-07T17:54:54.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
import sys
from pprint import pformat
DEBUG = 0
INFO = 1
WARNING = 2
ERROR = 3
# Shell colors
DEFAULT = 0
RED = 1
GREEN = 2
YELLOW = 3
BLUE = 4
PINK = 5
CYAN = 6
GRAY = 7
# Shell style
DEFAULT = 0
BOLD = 1
UNDERLINED = 4
BLINKING = 5
HIGHLIGHTED = 7
class Log:
enable_print = False
# TODO: The following static paramaters should be load from ~/.minifoldrc
# TODO: dark / light colors
with_color = True
log_level = 0
message_header = {
DEBUG : "DEBUG",
INFO : "INFO",
WARNING : "WARNING",
ERROR : "ERROR",
}
message_color = {
DEBUG : CYAN,
INFO : GREEN,
WARNING : YELLOW,
ERROR : RED,
}
@staticmethod
def start_style(
fg_color :int = None,
bg_color :int = None,
styles :list = list()
) -> str:
styling = list()
if fg_color != None: styling.append("3%d" % fg_color)
if bg_color != None: styling.append("4%d" % bg_color)
if styles: styling += styles
return "\033[%sm" % ";".join(styling) if styling else ""
@staticmethod
def default_style() -> str:
return "\033[0m"
@classmethod
def print(cls, message_type :int, message :str, file = sys.stderr):
if cls.enable_print and message_type >= cls.log_level:
color = cls.message_color[message_type]
header = cls.message_header[message_type]
print(
"%(start_style)s%(message)s%(end_style)s" % {
"start_style" : cls.start_style(fg_color = color),
"message" : " ".join([header, message if isinstance(message, str) else pformat(message)]),
"end_style" : cls.default_style()
},
file = file
)
@classmethod
def debug(cls, s): cls.print(DEBUG, s)
@classmethod
def info(cls, s): cls.print(INFO, s)
@classmethod
def warning(cls, s): cls.print(WARNING, s)
@classmethod
def error(cls, s): cls.print(ERROR, s)
| 24
| 114
| 0.562908
| 296
| 2,448
| 4.493243
| 0.381757
| 0.052632
| 0.021053
| 0.03609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02042
| 0.319853
| 2,448
| 101
| 115
| 24.237624
| 0.778378
| 0.099265
| 0
| 0.12
| 0
| 0
| 0.096128
| 0.034624
| 0
| 0
| 0
| 0.009901
| 0
| 1
| 0.093333
| false
| 0
| 0.026667
| 0.013333
| 0.226667
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dce2d32fa35d3b007796ab403b5019d5baeeffb
| 2,820
|
py
|
Python
|
data_collection/omscs_website/omscs_cleaner.py
|
yashchitalia/jack-holmes
|
1ce3c65c1477390fb15d99a14f608f62745548b1
|
[
"Apache-2.0"
] | 1
|
2017-03-30T02:25:18.000Z
|
2017-03-30T02:25:18.000Z
|
data_collection/omscs_website/omscs_cleaner.py
|
yashchitalia/jack-holmes
|
1ce3c65c1477390fb15d99a14f608f62745548b1
|
[
"Apache-2.0"
] | null | null | null |
data_collection/omscs_website/omscs_cleaner.py
|
yashchitalia/jack-holmes
|
1ce3c65c1477390fb15d99a14f608f62745548b1
|
[
"Apache-2.0"
] | null | null | null |
from bs4 import BeautifulSoup
import re
import urllib
import pickle as pkl
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
cleanr_still = re.compile('\\xa0')
cleanertext = re.sub(cleanr_still, '', cleantext)
cleanr_even = re.compile('\\u2019s')
cleanesttext= re.sub(cleanr_even, '', cleanertext)
cleanr_more = re.compile('\\u2019ll')
cleanest_even = re.sub(cleanr_more, ' ', cleanesttext)
cleanest_even_more = cleanest_even.replace('\\xa0', ' ')
cleanest_even_more = cleanest_even_more.replace('\\u2014', ' ')
cleanest_even_more = cleanest_even_more.replace('\\u201c', ' ')
cleanest_even_more = cleanest_even_more.replace('\\u201d', ' ')
cleanest_even_more = cleanest_even_more.replace('\\u2013', ' ')
return cleanest_even_more
unclean_dat = pkl.load(open('omscs_website_data.p', 'rb'))
clean_dat = {}
for course_number in unclean_dat.keys():
curr_unclean_dat = unclean_dat[course_number]
curr_clean_dat = {}
for attribute in curr_unclean_dat.keys():
if attribute == 'Instructor':
try:
instructor_name = str(curr_unclean_dat[attribute][0])
except:
continue
curr_clean_dat[attribute] = instructor_name
elif attribute == 'Name':
try:
class_name = str(curr_unclean_dat[attribute])
except:
continue
curr_clean_dat[attribute] = class_name
elif attribute in ['Overview', 'Prerequisites', 'Grading', 'Technical', 'Reading']:
final_string= ''
unclean_list = curr_unclean_dat[attribute]
unclean_list.pop(0)
for item in unclean_list:
try:
if str(type(item)) == "<class 'bs4.element.NavigableString'>":
item = item.encode('ascii', errors='backslashreplace')
if str(item) == '\n':
continue
final_string = final_string+ ' ' + str(item)
elif str(type(item)) == "<class 'bs4.element.Tag'>":
if item.next == '\n':
continue
final_string = final_string+ ' '+ str(item.next)
except UnicodeEncodeError:
item = item.encode('ascii', errors='backslashreplace')
if str(item) == '\n':
continue
final_string = final_string+ ' ' + str(item)
html_cleaned_string = cleanhtml(final_string)
curr_clean_dat[attribute] = html_cleaned_string
continue
clean_dat[course_number] = curr_clean_dat
pkl.dump(clean_dat, open('omscs_cleaned_data.p', 'wb'))
| 40.285714
| 91
| 0.575887
| 298
| 2,820
| 5.174497
| 0.278523
| 0.093385
| 0.103761
| 0.077821
| 0.411154
| 0.392996
| 0.239948
| 0.138781
| 0.114137
| 0.114137
| 0
| 0.014834
| 0.306738
| 2,820
| 69
| 92
| 40.869565
| 0.773913
| 0
| 0
| 0.274194
| 0
| 0
| 0.099681
| 0.010642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.064516
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dce34cc1f5685467f230a6aaddab0a3ca10dd09
| 1,116
|
py
|
Python
|
testinfra/test_hypervisor-runc.py
|
devbox-tools/sfc
|
0a5a9c3db165b35506f84d4c2dbfc1dace3fcea1
|
[
"Apache-2.0"
] | 1
|
2019-02-26T13:25:17.000Z
|
2019-02-26T13:25:17.000Z
|
testinfra/test_hypervisor-runc.py
|
devbox-tools/sfc
|
0a5a9c3db165b35506f84d4c2dbfc1dace3fcea1
|
[
"Apache-2.0"
] | null | null | null |
testinfra/test_hypervisor-runc.py
|
devbox-tools/sfc
|
0a5a9c3db165b35506f84d4c2dbfc1dace3fcea1
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import utils
import yaml
class TestHypervisorRunC(utils.Base):
def test_slaves_are_running(self, host):
assert host.check_output("runc list -q")
def test_slaves_are_isolated(self, host):
group_vars = yaml.safe_load(open(
"/var/lib/software-factory/ansible/group_vars/all.yaml"))
if group_vars.get("enable_insecure_slaves") is not True:
# Make sure managesf internal url access fails
assert host.run("curl --connect-timeout 3 %s" % group_vars[
"managesf_internal_url"]).rc in (7, 28)
| 39.857143
| 75
| 0.713262
| 164
| 1,116
| 4.756098
| 0.670732
| 0.076923
| 0.033333
| 0.041026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008989
| 0.202509
| 1,116
| 27
| 76
| 41.333333
| 0.867416
| 0.508065
| 0
| 0
| 0
| 0
| 0.251866
| 0.179104
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.181818
| false
| 0
| 0.181818
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dcee3a8fc687322519c4ee6dd19ea787ec8d273
| 280
|
py
|
Python
|
Frameworks/urls.py
|
MiniJez/TP_Django
|
e7540f3178d44efeab69a8c8bea14a70fdaa9b4e
|
[
"MIT"
] | null | null | null |
Frameworks/urls.py
|
MiniJez/TP_Django
|
e7540f3178d44efeab69a8c8bea14a70fdaa9b4e
|
[
"MIT"
] | null | null | null |
Frameworks/urls.py
|
MiniJez/TP_Django
|
e7540f3178d44efeab69a8c8bea14a70fdaa9b4e
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import index, create, delete, update
urlpatterns = [
path('', index, name='index'),
path('create/', create, name='create'),
path('delete/<int:pk>', delete, name='delete'),
path('update/<int:pk>', update, name='update'),
]
| 28
| 51
| 0.639286
| 36
| 280
| 4.972222
| 0.388889
| 0.055866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157143
| 280
| 10
| 52
| 28
| 0.758475
| 0
| 0
| 0
| 0
| 0
| 0.213523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dd02fb84f2d21edf2c3f482fb528f7ff864783d
| 1,831
|
py
|
Python
|
scrape.py
|
valvoda/holjplus
|
6a214911b477adf1253b43e46f7f5afc3076a86a
|
[
"MIT"
] | null | null | null |
scrape.py
|
valvoda/holjplus
|
6a214911b477adf1253b43e46f7f5afc3076a86a
|
[
"MIT"
] | null | null | null |
scrape.py
|
valvoda/holjplus
|
6a214911b477adf1253b43e46f7f5afc3076a86a
|
[
"MIT"
] | null | null | null |
"""
Adapted from https://realpython.com/python-web-scraping-practical-introduction/
for the purpose of scraping https://publications.parliament.uk/pa/ld/ldjudgmt.HTML
to create an expanded HOLJ+ corpus
"""
import requests
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
class Scrape:
def simple_get(self, url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None
"""
try:
with closing(get(url, stream=True)) as resp:
if self.is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
self.log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(self, resp):
"""
Returns true if the response seems to be HTML, false otherwise
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(self, e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
if __name__ == "__main__":
sc = Scrape()
print("Testing the scaper:")
raw_html = sc.simple_get('https://realpython.com/blog/')
assert (len(raw_html) > 0), "Error, does not get"
no_html = sc.simple_get("https://doesnotexist.com/thereshouldbenothing/")
assert (no_html == None), "Error, does get"
print("Working")
| 30.516667
| 84
| 0.616057
| 237
| 1,831
| 4.654008
| 0.49789
| 0.049864
| 0.032638
| 0.027199
| 0.036265
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005344
| 0.284544
| 1,831
| 59
| 85
| 31.033898
| 0.836641
| 0.293829
| 0
| 0.066667
| 0
| 0
| 0.162299
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.1
| false
| 0
| 0.133333
| 0
| 0.4
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dd06c5c9ed12f49b25dc9756a8a419ae3530b18
| 1,881
|
py
|
Python
|
emotional_ai/model.py
|
fuluny/Emotional-AI
|
1372933ec410f72cd500513ea560f43167382e34
|
[
"MIT"
] | null | null | null |
emotional_ai/model.py
|
fuluny/Emotional-AI
|
1372933ec410f72cd500513ea560f43167382e34
|
[
"MIT"
] | null | null | null |
emotional_ai/model.py
|
fuluny/Emotional-AI
|
1372933ec410f72cd500513ea560f43167382e34
|
[
"MIT"
] | null | null | null |
# #!/usr/bin/python
import os
import numpy as np
import pandas as pd
from keras.models import load_model
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers.core import Dense, Activation, Dropout
from keras import optimizers
from matplotlib import pyplot as plt
print('Loading data...')
data = pd.read_csv('fer2013.csv')
#data = pd.read_csv('testdata.csv')
im = data['pixels']
im_list = []
print('Pre-processing data...')
for i in range(len(im)):
im_list.append(list(map(int,im[i].split())))
X_train = np.asarray(im_list).astype('float32')
y_train = np_utils.to_categorical(np.asarray(data['emotion']))
X_train *= 2.0/255
X_train -= 1
input_dim = X_train.shape[1]
nb_classes = y_train.shape[1]
# Parameters were chosen from most commonly used and sometimes at random
# Further development of the model may be needed
print('Making model')
model = Sequential()
# Dense define number of nodes
model.add(Dense(1000, input_dim=input_dim))
# Activation defines the output
model.add(Activation('relu'))
# Dropout to avoid overfitting.
model.add(Dropout(0.15))
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
print(model.summary())
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
print("Training...")
model.fit(X_train, y_train, epochs=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_train, y_train, verbose=0)
print(scores)
# save model to HDF5
model.save('model.h5')
print("Saved model to disk")
| 25.767123
| 81
| 0.747475
| 306
| 1,881
| 4.509804
| 0.418301
| 0.098551
| 0.056522
| 0.07971
| 0.176087
| 0.176087
| 0.176087
| 0.176087
| 0.153623
| 0.153623
| 0
| 0.034767
| 0.09782
| 1,881
| 72
| 82
| 26.125
| 0.778433
| 0.14673
| 0
| 0.2
| 0
| 0
| 0.111041
| 0.015056
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.18
| 0
| 0.18
| 0.14
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dd27bec72ba1ef4b5afcb916eaaa9109718bd5c
| 2,487
|
py
|
Python
|
detect_port_services.py
|
amir78729/penetration-test-project
|
c85376303ce0451e2e3a3150617484d5e6837168
|
[
"MIT"
] | 1
|
2022-02-04T19:29:18.000Z
|
2022-02-04T19:29:18.000Z
|
detect_port_services.py
|
amir78729/penetration-test-project
|
c85376303ce0451e2e3a3150617484d5e6837168
|
[
"MIT"
] | null | null | null |
detect_port_services.py
|
amir78729/penetration-test-project
|
c85376303ce0451e2e3a3150617484d5e6837168
|
[
"MIT"
] | null | null | null |
from socket import socket, gaierror, getservbyport, AF_INET, SOCK_STREAM, setdefaulttimeout
from tqdm import tqdm
from datetime import datetime
def detect_port_services(ip, range_start, range_end):
port_services = {}
port_detecting_progress = tqdm(range(range_start, range_end + 1))
try:
for port in port_detecting_progress:
port_detecting_progress.set_description('checking port {}'.upper().format(port))
setdefaulttimeout(2)
s = socket(AF_INET, SOCK_STREAM)
result = s.connect_ex((ip, port))
# trying to get more information about port service
try:
message = b'WhoAreYou'
s.send(message)
banner = s.recv(100)
s.close()
except IOError:
banner = b''
if result == 0:
service_name = getservbyport(port)
port_services.update({port: (service_name, banner.replace(b'\r\n', b'').decode('utf-8'))})
s.close()
log_port_services(ip, range_start, range_end, port_services)
except KeyboardInterrupt:
print("\ncanceled...".upper())
except gaierror:
print("\nHostname Could Not Be Resolved".upper())
return port_services
def log_port_services(ip, range_start, range_end, port_services):
try:
with open("results/result_port_services.txt", "a") as file:
file.write('@ {}'.upper().format(datetime.now()))
file.write('\nhost {} open ports\' services from {} to {}:'.upper().format(ip, range_start, range_end))
[file.write('\n {}:\t{} {}'
.format(port,
port_services[port][0].upper(),
'' if not port_services[port][1] else '\n\t\t({})\n'
.format(port_services[port][1]))
) for port in port_services.keys()]
if not port_services.keys():
file.write('\n× no open ports was founded!'.upper())
file.write('\n----------------------------------------------------\n')
except FileNotFoundError:
print('PLEASE CREATE \"/results/result_detect_open_ports.txt\" AND TRY AGAIN.')
if __name__ == '__main__':
detect_port_services(
ip=input('TARGET IP ADDRESS: '),
range_start=int(input('START OF RANGE : ')),
range_end=int(input('END OF RANGE : ')),
)
| 38.859375
| 115
| 0.556494
| 281
| 2,487
| 4.725979
| 0.362989
| 0.135542
| 0.056476
| 0.067771
| 0.118976
| 0.103916
| 0.103916
| 0.103916
| 0.103916
| 0.070783
| 0
| 0.005744
| 0.29996
| 2,487
| 63
| 116
| 39.47619
| 0.756462
| 0.019702
| 0
| 0.098039
| 0
| 0
| 0.157225
| 0.052956
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.058824
| 0
| 0.117647
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dd2a344fe4c04f0564d9da26c93b7f70200954e
| 14,829
|
py
|
Python
|
zvdata/apps/data_app.py
|
freedom6xiaobai/zvt
|
f4ba510a30f1014cc0e48b85370b0d3936bd851a
|
[
"MIT"
] | 1
|
2019-10-28T08:03:26.000Z
|
2019-10-28T08:03:26.000Z
|
zvdata/apps/data_app.py
|
freedom6xiaobai/zvt
|
f4ba510a30f1014cc0e48b85370b0d3936bd851a
|
[
"MIT"
] | null | null | null |
zvdata/apps/data_app.py
|
freedom6xiaobai/zvt
|
f4ba510a30f1014cc0e48b85370b0d3936bd851a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
from collections import OrderedDict
from typing import List
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
from dash import dash
from dash.dependencies import Input, Output, State
from zvdata import IntervalLevel
from zvdata.app import app
from zvdata.chart import Drawer
from zvdata.domain import global_providers, get_schemas, get_schema_by_name, get_schema_columns
from zvdata.normal_data import NormalData, IntentType
from zvdata.reader import DataReader
from zvdata.utils.pd_utils import df_is_not_null
from zvdata.utils.time_utils import now_pd_timestamp, TIME_FORMAT_DAY
current_df = None
layout = html.Div(
[
html.Div(
[
# provider selector
dcc.Dropdown(
id='provider-selector',
placeholder='select provider',
options=[{'label': provider, 'value': provider} for provider in
global_providers]),
# schema selector
dcc.Dropdown(id='schema-selector', placeholder='select schema'),
# level selector
dcc.Dropdown(id='level-selector', placeholder='select level',
options=[{'label': level.value, 'value': level.value} for level in
IntervalLevel],
value=IntervalLevel.LEVEL_1DAY.value),
# column selector
html.Div(id='schema-column-selector-container', children=None),
dcc.Dropdown(
id='properties-selector',
options=[
{'label': 'undefined', 'value': 'undefined'}
],
value='undefined',
multi=True
),
# codes filter
dcc.Input(id='input-code-filter', type='text', placeholder='input codes',
style={'width': '400px'}),
# time range filter
dcc.DatePickerRange(
id='date-picker-range',
start_date='2009-01-01',
end_date=now_pd_timestamp(),
display_format=TIME_FORMAT_DAY
),
# load data for table
html.Button('load data', id='btn-load-data', n_clicks_timestamp=0),
# table container
html.Div(id='data-table-container', children=None),
# selected properties
html.Label('setting y_axis and chart type for the columns:'),
# col setting container
html.Div(id='col-setting-container', children=dash_table.DataTable(
id='col-setting-table',
columns=[
{'id': 'property', 'name': 'property', 'editable': False},
{'id': 'y_axis', 'name': 'y_axis', 'presentation': 'dropdown'},
{'id': 'chart', 'name': 'chart', 'presentation': 'dropdown'}
],
dropdown={
'y_axis': {
'options': [
{'label': i, 'value': i}
for i in ['y1', 'y2', 'y3', 'y4', 'y5']
]
},
'chart': {
'options': [
{'label': chart_type.value, 'value': chart_type.value}
for chart_type in NormalData.get_charts_by_intent(IntentType.compare_self)
]
}
},
editable=True
), ),
html.Div(id='table-type-label', children=None),
html.Div(
[
html.Div([dcc.Dropdown(id='intent-selector')],
style={'width': '50%', 'display': 'inline-block'}),
html.Div([dcc.Dropdown(id='chart-selector')],
style={'width': '50%', 'display': 'inline-block'})
]
),
html.Div(id='chart-container', children=None)
])
]
)
@app.callback(
Output('schema-selector', 'options'),
[Input('provider-selector', 'value')])
def update_schema_selector(provider):
if provider:
return [{'label': schema.__name__, 'value': schema.__name__} for schema in
get_schemas(provider=provider)]
raise dash.exceptions.PreventUpdate()
@app.callback(
Output('schema-column-selector-container', 'children'),
[Input('schema-selector', 'value')],
state=[State('provider-selector', 'value')])
def update_column_selector(schema_name, provider):
if provider and schema_name:
schema = get_schema_by_name(name=schema_name)
cols = get_schema_columns(schema=schema)
return dcc.Dropdown(
id='schema-column-selector',
options=[
{'label': col, 'value': col} for col in cols
],
value=get_schema_by_name(name=schema_name).important_cols(),
multi=True
)
raise dash.exceptions.PreventUpdate()
@app.callback(
[Output('properties-selector', 'options'),
Output('properties-selector', 'value')],
[Input('schema-column-selector', 'value')],
state=[State('provider-selector', 'value'),
State('schema-selector', 'value'),
State('properties-selector', 'options'),
State('properties-selector', 'value')])
def update_selected_properties(selected_cols, provider, schema_name, options, value):
if selected_cols and provider and schema_name:
current_options = options
current_value = value
added_labels = []
added_values = []
for col in selected_cols:
added_labels.append(col)
added_values.append(
json.dumps({
'provider': provider,
'schema': schema_name,
'column': col
}))
added_options = [{'label': col, 'value': added_values[i]} for i, col in enumerate(added_labels)]
if 'undefined' in value:
current_options = []
current_value = []
current_options += added_options
current_value += added_values
return current_options, current_value
raise dash.exceptions.PreventUpdate()
def properties_to_readers(properties, level, codes, start_date, end_date) -> List[DataReader]:
provider_schema_map_cols = {}
for prop in properties:
provider = prop['provider']
schema_name = prop['schema']
key = (provider, schema_name)
if key not in provider_schema_map_cols:
provider_schema_map_cols[key] = []
provider_schema_map_cols[key].append(prop['column'])
readers = []
for item, columns in provider_schema_map_cols.items():
provider = item[0]
schema_name = item[1]
schema = get_schema_by_name(schema_name)
readers.append(DataReader(data_schema=schema, provider=provider, codes=codes, level=level,
columns=columns, start_timestamp=start_date, end_timestamp=end_date,
time_field=schema.time_field()))
return readers
@app.callback(
[Output('data-table-container', 'children'),
Output('col-setting-table', 'data'),
Output('table-type-label', 'children'),
Output('intent-selector', 'options'),
Output('intent-selector', 'value')],
[Input('btn-load-data', 'n_clicks')],
state=[State('properties-selector', 'value'),
State('level-selector', 'value'),
State('input-code-filter', 'value'),
State('date-picker-range', 'start_date'),
State('date-picker-range', 'end_date')])
def update_data_table(n_clicks, properties, level, codes: str, start_date, end_date):
if n_clicks and properties:
props = []
for prop in properties:
props.append(json.loads(prop))
readers = properties_to_readers(properties=props, level=level, codes=codes, start_date=start_date,
end_date=end_date)
if readers:
data_df = readers[0].data_df
for reader in readers[1:]:
if df_is_not_null(reader.data_df):
data_df = data_df.join(reader.data_df, how='outer')
global current_df
current_df = data_df
if not df_is_not_null(current_df):
return 'no data,please reselect!', [], '', [
{'label': 'compare_self', 'value': 'compare_self'}], 'compare_self'
normal_data = NormalData(current_df)
data_table = Drawer(data=normal_data).draw_data_table(id='data-table-content')
# generate col setting table
properties = normal_data.data_df.columns.to_list()
df = pd.DataFrame(OrderedDict([
('property', properties),
('y_axis', ['y1'] * len(properties)),
('chart', ['line'] * len(properties))
]))
# generate intents
intents = normal_data.get_intents()
intent_options = [
{'label': intent.value, 'value': intent.value} for intent in intents
]
intent_value = intents[0].value
return data_table, df.to_dict('records'), normal_data.get_table_type(), intent_options, intent_value
else:
return 'no data,please reselect!', [], '', [
{'label': 'compare_self', 'value': 'compare_self'}], 'compare_self'
raise dash.exceptions.PreventUpdate()
@app.callback(
[Output('chart-selector', 'options'),
Output('chart-selector', 'value')],
[Input('intent-selector', 'value')])
def update_chart_selector(intent):
if intent:
charts = NormalData.get_charts_by_intent(intent=intent)
options = [
{'label': chart.value, 'value': chart.value} for chart in charts
]
value = charts[0].value
return options, value
raise dash.exceptions.PreventUpdate()
operators_df = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
operators_sql = [['>= ', '>='],
['<= ', '<='],
['< ', '<'],
['> ', '>'],
['!= ', '!='],
['== ', '='],
['contains '],
['datestartswith ']]
def split_filter_part(filter_part, operators=operators_df):
for operator_type in operators:
for operator in operator_type:
if operator in filter_part:
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find('{') + 1: name_part.rfind('}')]
value_part = value_part.strip()
v0 = value_part[0]
if (v0 == value_part[-1] and v0 in ("'", '"', '`')):
value = value_part[1: -1].replace('\\' + v0, v0)
else:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, operator_type[0].strip(), value
return [None] * 3
@app.callback(
[Output('data-table-content', "data"),
Output('chart-container', "children")],
[Input('data-table-content', "page_current"),
Input('data-table-content', "page_size"),
Input('data-table-content', "sort_by"),
Input('data-table-content', "filter_query"),
Input('intent-selector', "value"),
Input('chart-selector', "value"),
Input('col-setting-table', 'data'),
Input('col-setting-table', 'columns')])
def update_table_and_graph(page_current, page_size, sort_by, filter, intent, chart, rows, columns):
if chart:
property_map = {}
for row in rows:
property_map[row['property']] = {
'y_axis': row['y_axis'],
'chart': row['chart']
}
dff = current_df
if filter:
filtering_expressions = filter.split(' && ')
for filter_part in filtering_expressions:
col_name, operator, filter_value = split_filter_part(filter_part)
if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
# these operators match pandas series operator method names
dff = dff.loc[getattr(dff[col_name], operator)(filter_value)]
elif operator == 'contains':
dff = dff.loc[dff[col_name].str.contains(filter_value)]
elif operator == 'datestartswith':
# this is a simplification of the front-end filtering logic,
# only works with complete fields in standard format
dff = dff.loc[dff[col_name].str.startswith(filter_value)]
# if sort_by:
# dff = dff.sort_values(
# [col['entity_id'] for col in sort_by],
# ascending=[
# col['direction'] == 'asc'
# for col in sort_by
# ],
# inplace=False
# )
if intent in (IntentType.compare_self.value, IntentType.compare_to_other.value):
graph_data, graph_layout = Drawer(NormalData(dff)).draw_compare(chart=chart, property_map=property_map,
render=None, keep_ui_state=False)
else:
graph_data, graph_layout = Drawer(NormalData(dff)).draw(chart=chart, property_map=property_map, render=None,
keep_ui_state=False)
table_data = dff.iloc[page_current * page_size: (page_current + 1) * page_size
].to_dict('records')
return table_data, \
dcc.Graph(
id='chart-content',
figure={
'data': graph_data,
'layout': graph_layout
}
)
raise dash.exceptions.PreventUpdate()
| 36.796526
| 120
| 0.52458
| 1,470
| 14,829
| 5.104762
| 0.167347
| 0.025986
| 0.012127
| 0.025586
| 0.196962
| 0.102212
| 0.102212
| 0.057303
| 0.045842
| 0.033849
| 0
| 0.004586
| 0.353024
| 14,829
| 402
| 121
| 36.88806
| 0.777569
| 0.047947
| 0
| 0.139535
| 0
| 0
| 0.144216
| 0.009155
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026578
| false
| 0
| 0.059801
| 0
| 0.122924
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dd3506fa61a6efdbedcfd729d5128ff929686bf
| 4,333
|
py
|
Python
|
src/hmmmr/non_batched_functions.py
|
carojasq/HMMMR
|
f94846d8f02fe8993a0e5fb55e936dd1c1596187
|
[
"MIT"
] | null | null | null |
src/hmmmr/non_batched_functions.py
|
carojasq/HMMMR
|
f94846d8f02fe8993a0e5fb55e936dd1c1596187
|
[
"MIT"
] | 1
|
2019-11-01T08:32:04.000Z
|
2019-11-01T08:32:04.000Z
|
src/hmmmr/non_batched_functions.py
|
carojasq/HMMMR
|
f94846d8f02fe8993a0e5fb55e936dd1c1596187
|
[
"MIT"
] | 1
|
2019-04-05T00:06:31.000Z
|
2019-04-05T00:06:31.000Z
|
from common_libs import *
from cublas_functions import *
linalg.init()
def cublas_calculate_transpose_non_batched(h, a_gpu):
cublas_transpose = get_single_transpose_function(a_gpu)
m, k = a_gpu.shape
at_gpu = gpuarray.empty((k, m), a_gpu.dtype)
k, n = at_gpu.shape
# Calculate transpose
transa = transb = 't'
cublas_transpose(h, transa, transb, m, k, 1.0, a_gpu.gpudata, k, 0.0, a_gpu.gpudata, k, at_gpu.gpudata, m)
return at_gpu
# Matrix product, there is a batch equivalent for this function too
# Make sure it has 2 dimensions (use reshape in the case is 1d)
def cublas_matrix_product_gemm_non_batched(handle, a_gpu, b_gpu):
"""
:param handle:
:param a_gpu: Be carefull to pass X here
:param b_gpu: Xt should be here
:return:
"""
cublas_dot = get_single_dot_function(b_gpu)
if len(a_gpu.shape)!=2 or len(a_gpu.shape)!=2:
raise ValueError('Make sure the arrays are 2 dimensional')
n, l = a_gpu.shape
k, m = b_gpu.shape
c_gpu = gpuarray.empty((n, m), b_gpu.dtype)
lda = max(1, a_gpu.strides[0] // a_gpu.dtype.itemsize)
ldb = max(1, b_gpu.strides[0] // b_gpu.dtype.itemsize)
ldc = max(1, c_gpu.strides[0] // c_gpu.dtype.itemsize)
alpha = np.float32(1.0)
beta = np.float32(0.0)
transa = transb = 'n'
cublas_dot(handle, transb, transa, m, n, k, alpha, b_gpu.gpudata, ldb, a_gpu.gpudata, lda, beta, c_gpu.gpudata, ldc)
return c_gpu
def cublas_matrix_product_gemm_batched(handle, as_gpu, bs_gpu):
cublas_dot = get_batched_dot_function(as_gpu)
if len(a_gpu.shape) != 2 or len(a_gpu.shape) != 2:
raise ValueError('Make sure the arrays are 2 dimensional')
# n, z, l
n, l = as_gpu.shape
k, m = bs_gpu.shape
c_gpu = gpuarray.empty((n, m), b_gpu.dtype)
lda = max(1, a_gpu.strides[0] // a_gpu.dtype.itemsize)
ldb = max(1, b_gpu.strides[0] // b_gpu.dtype.itemsize)
ldc = max(1, c_gpu.strides[0] // c_gpu.dtype.itemsize)
alpha = np.float32(1.0)
beta = np.float32(0.0)
transa = transb = 'n'
cublas_dot(handle, transb, transa, m, n, k, alpha, b_gpu.gpudata, ldb, a_gpu.gpudata, lda, beta, c_gpu.gpudata, ldc)
return c_gpu
"TODO: Fix this function, like linalg.inv"
def cublas_single_matrix_inversion_non_batched(h, a_gpu, overwrite=False, ipiv_gpu=None):
(cublas_getrf, bufsize, cublas_getrs) = get_single_inverse_function(a_gpu)
data_type = a_gpu.dtype
n = a_gpu.shape[0]
if ipiv_gpu is None:
ipiv_gpu = gpuarray.empty((n, 1), np.int32)
try:
in_gpu = a_gpu if overwrite else a_gpu.copy()
Lwork = bufsize(h, n, n, in_gpu.gpudata, n)
Work = gpuarray.empty(Lwork, data_type)
devInfo = gpuarray.empty(1, np.int32)
cublas_getrf(h, n, n, in_gpu.gpudata, n, Work.gpudata, ipiv_gpu.gpudata, devInfo.gpudata)
except cusolver.CUSOLVER_ERROR as e:
raise ValueError("Error while generating inverse of the matrix")
d = devInfo.get()[0]
if d != 0:
raise ValueError("Singular matrix or wrong params")
try:
b_gpu = linalg.eye(n, data_type)
cublas_getrs(h, cublas._CUBLAS_OP['n'], n, n,
in_gpu.gpudata, n, ipiv_gpu.gpudata, b_gpu.gpudata, n,
devInfo.gpudata)
# Since CUSOLVER's getrs functions save their output in b_gpu, we
# need to copy it back to the input matrix if overwrite is requested:
if overwrite:
a_gpu.set(b_gpu)
return a_gpu
else:
return b_gpu
except cusolver.CUSOLVER_ERROR as e:
raise "Error with cusolver {}".format(e.message)
return h
def calculate_regression_coeffs_non_batched(handle, x_gpu, y_gpu):
xt_gpu = cublas_calculate_transpose_non_batched(handle, x_gpu)
xtx_gpu = cublas_matrix_product_gemm_non_batched(handle, xt_gpu, x_gpu)
xty_gpu = cublas_matrix_product_gemm_non_batched(handle, xt_gpu, y_gpu)
# xtx_inv_gpu = cublas_single_matrix_inversion(handle, xtx_gpu)
xtx_inv_gpu = linalg.inv(xtx_gpu, lib="cusolver")
b_coefficients = cublas_matrix_product_gemm_non_batched(handle, xtx_inv_gpu, xty_gpu)
return b_coefficients
def calculate_predictions_from_model_non_batched(handle, x_gpu, b_coefficients_gpu):
return cublas_matrix_product_gemm_non_batched(handle, x_gpu, b_coefficients_gpu)
| 41.663462
| 120
| 0.686591
| 710
| 4,333
| 3.930986
| 0.209859
| 0.038696
| 0.045862
| 0.049445
| 0.460408
| 0.398424
| 0.393049
| 0.331781
| 0.291652
| 0.291652
| 0
| 0.014252
| 0.206554
| 4,333
| 104
| 121
| 41.663462
| 0.797557
| 0.103162
| 0
| 0.320988
| 0
| 0
| 0.058396
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.024691
| 0.012346
| 0.197531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dd862d583434b6ed73a9e6519551c5f6c54561e
| 1,575
|
py
|
Python
|
examples/run_fieldtrip_IF.py
|
annapasca/ephypype
|
6dbacdd6913234a28b690b401862ff062accecc7
|
[
"BSD-3-Clause"
] | 18
|
2018-04-18T12:14:52.000Z
|
2022-02-25T19:31:44.000Z
|
examples/run_fieldtrip_IF.py
|
annapasca/ephypype
|
6dbacdd6913234a28b690b401862ff062accecc7
|
[
"BSD-3-Clause"
] | 106
|
2017-12-09T13:34:30.000Z
|
2022-03-12T01:02:17.000Z
|
examples/run_fieldtrip_IF.py
|
annapasca/ephypype
|
6dbacdd6913234a28b690b401862ff062accecc7
|
[
"BSD-3-Clause"
] | 13
|
2017-05-28T20:38:56.000Z
|
2022-03-06T15:58:02.000Z
|
"""
.. _ft_seeg_example:
=========================================
Apply bipolar montage to depth electrodes
=========================================
This scripts shows a very simple example on how to create an Interface wrapping
a desired function of a Matlab toolbox (|FieldTrip|).
.. |FieldTrip| raw:: html
<a href="http://www.fieldtriptoolbox.org/" target="_blank">FieldTrip</a>
The **input** data should be a **.mat** file containing a FieldTrip data struct
"""
# Authors: Annalisa Pascarella <a.pascarella@iac.cnr.it>
# License: BSD (3-clause)
import os.path as op
import ephypype
from ephypype.nodes.FT_tools import Reference
from ephypype.datasets import fetch_ieeg_dataset
###############################################################################
# Let us fetch the data first. It is around 675 MB download.
base_path = op.join(op.dirname(ephypype.__file__), '..', 'examples')
data_path = fetch_ieeg_dataset(base_path)
ft_path = '/usr/local/MATLAB/R2018a/toolbox/MEEG/fieldtrip-20200327/'
refmethod = 'bipolar'
channels_name = '{\'RAM*\', \'RHH*\', \'RTH*\', \'ROC*\', \'LAM*\',\'LHH*\', \'LTH*\'}' # noqa
# Now we call the interface Reference to apply a bipolar montage to sEEG data
reference_if = Reference()
reference_if.inputs.data_file = op.join(data_path, 'SubjectUCI29_data.mat')
reference_if.inputs.channels = channels_name
reference_if.inputs.ft_path = ft_path
reference_if.inputs.refmethod = refmethod
reference_if.inputs.script = ''
out = reference_if.run()
print('Rereferenced data saved at {}'.format(out.outputs.data_output))
| 32.8125
| 95
| 0.665397
| 205
| 1,575
| 4.956098
| 0.55122
| 0.075787
| 0.083661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012866
| 0.111746
| 1,575
| 47
| 96
| 33.510638
| 0.713367
| 0.43619
| 0
| 0
| 0
| 0
| 0.180678
| 0.097867
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.235294
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9dda3faed30d9ee945694fcad8f057ec177bc507
| 6,568
|
py
|
Python
|
rak_net/protocol/handler.py
|
L0RD-ZER0/aio-rak-net
|
0ec0b6ac4daf6a4b146ac94ac2d0313c13975363
|
[
"MIT"
] | 1
|
2021-12-02T04:37:08.000Z
|
2021-12-02T04:37:08.000Z
|
rak_net/protocol/handler.py
|
L0RD-ZER0/aio-rak-net
|
0ec0b6ac4daf6a4b146ac94ac2d0313c13975363
|
[
"MIT"
] | null | null | null |
rak_net/protocol/handler.py
|
L0RD-ZER0/aio-rak-net
|
0ec0b6ac4daf6a4b146ac94ac2d0313c13975363
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import TYPE_CHECKING
from .packet import (
ConnectionRequest,
ConnectionRequestAccepted,
NewIncomingConnection,
OfflinePing,
OfflinePong,
OnlinePing,
OnlinePong,
OpenConnectionRequest1,
OpenConnectionReply1,
OpenConnectionRequest2,
OpenConnectionReply2,
IncompatibleProtocolVersion,
)
from .protocol_info import ProtocolInfo
from ..utils import InternetAddress
if TYPE_CHECKING:
from ..server import Server
__all__ = 'Handler',
class Handler:
"""
Class containing various handler methods to handle packets
:param server: Server for which handler is intended
"""
__slots__ = 'server',
def __init__(self, server: Server):
self.server = server
async def handle_connection_request(self, data: bytes, address: InternetAddress, *, server: Server = None) -> bytes:
"""
Handler to handle `Connection-Request`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: ConnectionRequest = ConnectionRequest(data)
packet.decode()
new_packet: ConnectionRequestAccepted = ConnectionRequestAccepted()
new_packet.client_address = address
new_packet.system_index = 0
new_packet.server_guid = server.guid
new_packet.system_addresses = [InternetAddress("255.255.255.255", 19132)] * 20
new_packet.request_timestamp = server.get_time_ms()
new_packet.encode()
return new_packet.data
async def handle_connection_request_accepted(self, data: bytes, address: InternetAddress, *, server: Server = None) -> bytes:
"""
Handler to handle `Connection-Request-Accepted`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: ConnectionRequestAccepted = ConnectionRequestAccepted(data)
packet.decode()
new_packet: NewIncomingConnection = NewIncomingConnection()
new_packet.server_address = address
new_packet.system_addresses = packet.system_addresses
new_packet.request_timestamp = packet.accepted_timestamp
new_packet.accepted_timestamp = server.get_time_ms()
new_packet.encode()
return new_packet.data
async def handle_offline_ping(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes:
"""
Handler to handle `Offline-Ping`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: OfflinePing = OfflinePing(data)
packet.decode()
new_packet: OfflinePong = OfflinePong()
new_packet.client_timestamp = packet.client_timestamp
new_packet.server_guid = server.guid
new_packet.magic = ProtocolInfo.MAGIC
new_packet.server_name = server.name if hasattr(server, "name") else ""
new_packet.encode()
return new_packet.data
async def handle_online_ping(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes:
"""
Handler to handle `Online-Ping`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: OnlinePing = OnlinePing(data)
packet.decode()
new_packet: OnlinePong = OnlinePong()
new_packet.client_timestamp = packet.client_timestamp
new_packet.server_timestamp = server.get_time_ms()
new_packet.encode()
return new_packet.data
async def handle_open_connection_request_1(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes:
"""
Handler to handle `Open-Connection-Request-1`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: OpenConnectionRequest1 = OpenConnectionRequest1(data)
packet.decode()
if packet.protocol_version == server.protocol_version:
new_packet: OpenConnectionReply1 = OpenConnectionReply1()
new_packet.magic = ProtocolInfo.MAGIC
new_packet.server_guid = server.guid
new_packet.use_security = False
new_packet.mtu_size = packet.mtu_size
else:
new_packet: IncompatibleProtocolVersion = IncompatibleProtocolVersion()
new_packet.protocol_version = server.protocol_version
new_packet.magic = ProtocolInfo.MAGIC
new_packet.server_guid = server.guid
new_packet.encode()
return new_packet.data
async def handle_open_connection_request_2(self, data: bytes, address: InternetAddress = None, *, server: Server = None) -> bytes:
"""
Handler to handle `Open-Connection-Request-2`
:param data: data of the packet
:param address: :class:`InternetAddress` of the packet
:param server: Optional server to use the handler with, defaults to ``self.handler``
:return: returns the processed data
"""
server = server or self.server
packet: OpenConnectionRequest2 = OpenConnectionRequest2(data)
packet.decode()
new_packet: OpenConnectionReply2 = OpenConnectionReply2()
new_packet.magic = ProtocolInfo.MAGIC
new_packet.server_guid = server.guid
new_packet.client_address = address
new_packet.mtu_size = packet.mtu_size
new_packet.use_encryption = False
new_packet.encode()
await server.add_connection(address, packet.mtu_size)
return new_packet.data
| 40.294479
| 134
| 0.676309
| 711
| 6,568
| 6.075949
| 0.136428
| 0.095833
| 0.030556
| 0.044444
| 0.664583
| 0.615509
| 0.615509
| 0.588657
| 0.544444
| 0.544444
| 0
| 0.007262
| 0.24528
| 6,568
| 162
| 135
| 40.54321
| 0.864232
| 0.0169
| 0
| 0.397959
| 0
| 0
| 0.006943
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010204
| false
| 0
| 0.061224
| 0
| 0.153061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ddc3d1e0254e6926c024e8ba5ff8037971f9673
| 5,434
|
py
|
Python
|
software/pynguin/pynguin/testcase/execution/monkeytypeexecutor.py
|
se2p/artifact-pynguin-ssbse2020
|
32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6
|
[
"CC-BY-4.0"
] | 3
|
2020-08-20T10:27:13.000Z
|
2021-11-02T20:28:16.000Z
|
software/pynguin/pynguin/testcase/execution/monkeytypeexecutor.py
|
se2p/artifact-pynguin-ssbse2020
|
32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6
|
[
"CC-BY-4.0"
] | null | null | null |
software/pynguin/pynguin/testcase/execution/monkeytypeexecutor.py
|
se2p/artifact-pynguin-ssbse2020
|
32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6
|
[
"CC-BY-4.0"
] | null | null | null |
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
"""An executor that executes a test under the inspection of the MonkeyType tool."""
import contextlib
import logging
import os
import sys
from typing import Any, Dict, Iterable, List, Optional
import astor
from monkeytype.config import DefaultConfig
from monkeytype.db.base import CallTraceStore, CallTraceThunk
from monkeytype.encoding import CallTraceRow, serialize_traces
from monkeytype.tracing import CallTrace, CallTraceLogger, CallTracer
import pynguin.configuration as config
import pynguin.testcase.execution.executioncontext as ctx
import pynguin.testcase.testcase as tc
class _MonkeyTypeCallTraceStore(CallTraceStore):
def __init__(self):
self._values: Dict[str, Any] = {}
def add(self, traces: Iterable[CallTrace]) -> None:
for row in serialize_traces(traces):
self._values[row.module] = (
row.qualname,
row.arg_types,
row.return_type,
row.yield_type,
)
def filter(
self, module: str, qualname_prefix: Optional[str] = None, limit: int = 2000
) -> List[CallTraceThunk]:
result: List[CallTraceThunk] = []
for stored_module, row in self._values.items():
is_qualname = qualname_prefix is not None and qualname_prefix in row[0]
if stored_module == module or is_qualname:
result.append(
CallTraceRow(
module=module,
qualname=row[0],
arg_types=row[1],
return_type=row[2],
yield_type=row[3],
)
)
return result if len(result) < limit else result[:limit]
@classmethod
def make_store(cls, connection_string: str) -> "CallTraceStore":
return cls()
def list_modules(self) -> List[str]:
return [k for k, _ in self._values.items()]
class _MonkeyTypeCallTraceLogger(CallTraceLogger):
def __init__(self) -> None:
self._traces: List[CallTrace] = []
def log(self, trace: CallTrace) -> None:
self._traces.append(trace)
@property
def traces(self) -> List[CallTrace]:
"""Provides the collected traces"""
return self._traces
class _MonkeyTypeConfig(DefaultConfig):
def trace_store(self) -> CallTraceStore:
return _MonkeyTypeCallTraceStore()
def trace_logger(self) -> CallTraceLogger:
return _MonkeyTypeCallTraceLogger()
# pylint:disable=too-few-public-methods
class MonkeyTypeExecutor:
"""An executor that executes a test under the inspection of the MonkeyType tool."""
_logger = logging.getLogger(__name__)
def __init__(self):
""""""
self._config = _MonkeyTypeConfig()
self._tracer = CallTracer(
logger=self._config.trace_logger(),
code_filter=self._config.code_filter(),
sample_rate=self._config.sample_rate(),
)
self._call_traces: List[CallTrace] = []
def execute(self, test_cases: List[tc.TestCase]) -> List[CallTrace]:
"""Execute the given test cases."""
with open(os.devnull, mode="w") as null_file:
with contextlib.redirect_stdout(null_file):
for test_case in test_cases:
exec_ctx = ctx.ExecutionContext(test_case)
self._execute_ast_nodes(exec_ctx)
self._filter_and_append_call_traces()
return self._call_traces
def _execute_ast_nodes(self, exec_ctx: ctx.ExecutionContext):
for node in exec_ctx.executable_nodes():
try:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug("Executing %s", astor.to_source(node))
code = compile(node, "<ast>", "exec")
sys.setprofile(self._tracer)
# pylint: disable=exec-used
exec(code, exec_ctx.global_namespace, exec_ctx.local_namespace) # nosec
except BaseException as err: # pylint: disable=broad-except
failed_stmt = astor.to_source(node)
self._logger.info(
"Fatal! Failed to execute statement with MonkeyType\n%s%s",
failed_stmt,
err.args,
)
break
finally:
sys.setprofile(None)
def _filter_and_append_call_traces(self) -> None:
assert isinstance(self._tracer.logger, _MonkeyTypeCallTraceLogger)
module_name = config.INSTANCE.module_name
for trace in self._tracer.logger.traces:
func_name = trace.funcname
if func_name.startswith(module_name):
self._call_traces.append(trace)
| 37.219178
| 88
| 0.636916
| 623
| 5,434
| 5.370787
| 0.346709
| 0.012552
| 0.010759
| 0.017035
| 0.083084
| 0.068141
| 0.058577
| 0.038255
| 0.038255
| 0.038255
| 0
| 0.002548
| 0.277696
| 5,434
| 145
| 89
| 37.475862
| 0.849936
| 0.177033
| 0
| 0.019608
| 0
| 0
| 0.020777
| 0
| 0
| 0
| 0
| 0
| 0.009804
| 1
| 0.137255
| false
| 0
| 0.127451
| 0.039216
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ddca262545e263f1aa26d015f1d96948d664c84
| 7,778
|
py
|
Python
|
testproject/testapp/tests/test_history_entries.py
|
innovationinit/django-wicked-historian
|
bef0011639791e2275c6bf2272b57542174b4cf0
|
[
"BSD-2-Clause"
] | null | null | null |
testproject/testapp/tests/test_history_entries.py
|
innovationinit/django-wicked-historian
|
bef0011639791e2275c6bf2272b57542174b4cf0
|
[
"BSD-2-Clause"
] | null | null | null |
testproject/testapp/tests/test_history_entries.py
|
innovationinit/django-wicked-historian
|
bef0011639791e2275c6bf2272b57542174b4cf0
|
[
"BSD-2-Clause"
] | 1
|
2022-03-15T07:29:58.000Z
|
2022-03-15T07:29:58.000Z
|
"Test history entries for migrated, obsolete fields"
from datetime import (
time,
timedelta,
)
from decimal import Decimal
from typing import (
Any,
Dict,
)
from django.contrib.auth.models import User
from django.db import models
from wicked_historian.usersmuggler import usersmuggler
from wicked_historian.utils import FieldDescription
from testapp.factories import BookFactory
from testapp.models import (
Author,
Book,
BookEditHistory,
Language,
OBSOLETE_BOOK_FIELD_CHOICES,
)
from .base import FreezeTimeTestCase
class GettingHistoryEntriesForChangedFieldsTestCase(FreezeTimeTestCase):
UNKNOWN_FIELD_ID = 'unknown_field_id'
def setUp(self):
super().setUp()
# test languages
self.languages = {
'english': Language.objects.create(name='english'),
'polish': Language.objects.create(name='polish'),
}
# test authors
self.authors = {
'william_shakespeare': Author.objects.create(name='William Shakespeare'),
'john_paul_ii': Author.objects.create(name='John Paul II'),
'nostradamus': Author.objects.create(name='Nostradamus'),
}
self.user = User.objects.create(username='john.smith')
with usersmuggler.set_user(self.user):
self.book = BookFactory( # type: Book
title='Macbeth',
issue_year=1603,
language=self.languages['english'],
has_pictures=False,
literary_period=2,
date_of_publication=(self.frozen_time + timedelta(days=1)).date(),
moment_of_appearance_on_torrents=self.frozen_time + timedelta(hours=1),
ebook_length=timedelta(days=1, hours=3, minutes=12, seconds=7),
number_of_downloads_on_torrents=1223372036854775808,
encrypted_book=b'some_data',
cash_lost_because_of_piracy=Decimal('666666666.66'),
plain_text='foo',
first_download_hour=time(hour=1),
)
self.book.authors.set([self.authors['william_shakespeare']])
self.book = Book.objects.get(pk=self.book.pk) # just to reset any instance attributes used for creating history
self.field_choices_by_name = {description.name: description for description in BookEditHistory.FIELDS_DESCRIPTIONS}
self.obsolete_field_by_name = {description.name: description for description in OBSOLETE_BOOK_FIELD_CHOICES}
BookEditHistory.objects.all().delete()
def test_unknown_field(self):
self.create_fake_history_entry(
self.UNKNOWN_FIELD_ID,
old_value=1603,
new_value=2018,
)
with self.assertRaises(BookEditHistory.UnknownFieldException):
BookEditHistory.get_for(self.book)
def test_deleted_field_with_choices(self):
self.create_fake_history_entry(
self.obsolete_field_by_name['age'].id,
old_value=1,
new_value=2,
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'age',
'old_value': 'XV',
'new_value': 'XIX',
})
def test_deleted_char_field(self):
self.create_fake_history_entry(
self.obsolete_field_by_name['description'].id,
old_value='abc',
new_value='xyz',
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'description',
'old_value': 'abc',
'new_value': 'xyz',
})
def test_deleted_foreign_key_field(self):
william_shakespeare = {'pk': self.authors['william_shakespeare'].pk, 'str': str(self.authors['william_shakespeare'])}
john_paul_ii = {'pk': self.authors['john_paul_ii'].pk, 'str': str(self.authors['john_paul_ii'])}
self.create_fake_history_entry(
self.obsolete_field_by_name['author'].id,
old_value=william_shakespeare,
new_value=john_paul_ii,
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'author',
'old_value': william_shakespeare,
'new_value': john_paul_ii,
})
def test_deleted_many_to_many_field(self):
english = {'pk': self.languages['english'].pk, 'str': str(self.languages['english'])}
polish = {'pk': self.languages['polish'].pk, 'str': str(self.languages['polish'])}
self.create_fake_history_entry(
self.obsolete_field_by_name['languages'].id,
old_value=[english],
new_value=[english, polish]
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'languages',
'old_value': [english],
'new_value': [english, polish]
})
def test_different_id_for_different_type_with_the_same_name(self):
first = FieldDescription('description', models.TextField())
second = FieldDescription('description', models.CharField())
third = FieldDescription('description', models.CharField(max_length=50))
self.assertNotEqual(first.id, second.id)
self.assertEqual(second.id, third.id)
def test_changed_from_string_to_int(self):
self.create_fake_history_entry(
self.field_choices_by_name['issue_year'].id,
old_value='MDCIII',
new_value='MMXVIII'
)
history_entry = self.get_last_history_entry(self.book)
self.assertDictEqual(history_entry, {
'change_date': self.frozen_time,
'user': self.user,
'field_verbose_name': 'issue year',
'old_value': 'MDCIII',
'new_value': 'MMXVIII'
})
def test_presence_of_field_names_on_fields_descriptions_list(self):
field_names = {description.name for description in BookEditHistory.FIELDS_DESCRIPTIONS}
self.assertEqual(field_names, {
'age',
'author',
'authors',
'book_shelf_slot',
'cash_lost_because_of_piracy',
'date_of_publication',
'description',
'ebook_length',
'encrypted_book',
'first_download_hour',
'has_pictures',
'id',
'issue_number',
'issue_year',
'language',
'languages',
'literary_period',
'moment_of_appearance_on_torrents',
'number_of_downloads_on_torrents',
'number_of_pages',
'plain_text',
'text_as_pdf',
'title',
'pirates',
'printers',
'chapter_set',
})
@staticmethod
def get_last_history_entry(book: Book) -> Dict[str, Any]:
return BookEditHistory.get_for(book)[0]
def create_fake_history_entry(self, field: str, old_value: Any, new_value: Any) -> BookEditHistory:
return BookEditHistory.objects.create(**{
'model': self.book,
'user': self.user,
'change_date': self.frozen_time,
'field': field,
'old_value': old_value,
'new_value': new_value
})
| 36.862559
| 125
| 0.613525
| 832
| 7,778
| 5.426683
| 0.217548
| 0.06113
| 0.060244
| 0.034109
| 0.397785
| 0.314507
| 0.278627
| 0.235216
| 0.204208
| 0.184718
| 0
| 0.009993
| 0.279506
| 7,778
| 210
| 126
| 37.038095
| 0.795682
| 0.019799
| 0
| 0.18617
| 0
| 0
| 0.147829
| 0.011733
| 0
| 0
| 0
| 0
| 0.047872
| 1
| 0.058511
| false
| 0
| 0.053191
| 0.010638
| 0.132979
| 0.005319
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9ddcebb4e8c7a0186684b52cc9c2d36af16dce87
| 12,639
|
py
|
Python
|
mmdetection/third_party/text_perceptron/mmdet/models/seg_heads/tp_head.py
|
chengzhanzhan/DAVAR-Lab-OCR
|
79776915c616731698d452d935e7b599b1ce46f0
|
[
"Apache-2.0"
] | 4
|
2021-07-08T03:08:16.000Z
|
2022-03-20T02:53:29.000Z
|
mmdetection/third_party/text_perceptron/mmdet/models/seg_heads/tp_head.py
|
chengzhanzhan/DAVAR-Lab-OCR
|
79776915c616731698d452d935e7b599b1ce46f0
|
[
"Apache-2.0"
] | null | null | null |
mmdetection/third_party/text_perceptron/mmdet/models/seg_heads/tp_head.py
|
chengzhanzhan/DAVAR-Lab-OCR
|
79776915c616731698d452d935e7b599b1ce46f0
|
[
"Apache-2.0"
] | null | null | null |
"""
####################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : tp_head.py
# Abstract : Text Perceptron head structure, mainly including losses for segmentation part and regression part
# Current Version: 1.0.0
# Author : Liang Qiao
# Date : 2020-05-31
# Modified Date : 2020-11-26
# Modified by : inusheng
# Comments : Code and comment standardized
######################################################################################################
"""
import numpy as np
import torch
import torch.nn as nn
from mmdet.models.builder import build_loss
from mmdet.models.registry import HEADS
from mmdet.ops import ConvModule
from mmdet.core import force_fp32, auto_fp16
def make_one_hot(input_tensor, num_classes):
"""
Description:
convert a feature map of shape [N, 1, H, W] into its one-hot encoding version of shape [N, C, H, W],
where C is the number of classes.
Arguments:
input_tensor: input tensor, [N, 1, *]
num_classes : the number of classes of feature maps
Returns:
one-hot encoding of input tensor, [N, num_classes, *]
"""
input_tensor = input_tensor[:, np.newaxis, ::]
shape = np.array(input_tensor.shape)
shape[1] = num_classes
shape = tuple(shape)
result = torch.zeros(shape)
result = result.scatter_(1, input_tensor.cpu(), 1).to(input_tensor.device)
return result
@HEADS.register_module
class TPHead(nn.Module):
"""
Description:
Text Perceptron head structure,
this head is used for further feature extraction and generate loss wrt ground-truth labels.
Arguments:
in_channels : the number of channels of input feature maps
conv_out_channels: the number of channels of output feature maps
conv_cfg : configuration of conv filters
norm_cfg : configuration of normalization
loss_seg : segmentation loss
loss_reg_head : regression loss of head area
loss_reg_tail : regression loss of tail area
loss_reg_bond : regression loss of center area
"""
def __init__(self,
in_channels=256,
conv_out_channels=256,
conv_cfg=None,
norm_cfg=None,
loss_seg=None,
loss_reg_head=None,
loss_reg_bond=None,
loss_reg_tail=None,
):
super().__init__()
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
assert loss_seg is not None
self.loss_seg = build_loss(loss_seg)
self.loss_reg_head = loss_reg_head
self.loss_reg_bond = loss_reg_bond
self.loss_reg_tail = loss_reg_tail
if loss_reg_head is not None:
self.loss_reg_head = build_loss(loss_reg_head)
if loss_reg_tail is not None:
self.loss_reg_tail = build_loss(loss_reg_tail)
if loss_reg_bond is not None:
self.loss_reg_bond = build_loss(loss_reg_bond)
# define extra conv filters for long text feature extraction
self.P4_conv = ConvModule(self.in_channels, self.conv_out_channels,
kernel_size=3, stride=1, padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.P4_1x7_conv = ConvModule(self.conv_out_channels,
self.conv_out_channels,
kernel_size=(1, 7), stride=(1, 1),
padding=(0, 3), conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.channel4_1x7_conv = ConvModule(self.in_channels,
self.conv_out_channels,
kernel_size=(1, 7), stride=(1, 1),
padding=(0, 3),
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.rpn4 = ConvModule(self.conv_out_channels, self.conv_out_channels,
3, padding=1, conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.seg_branch_conv = ConvModule(self.conv_out_channels,
self.conv_out_channels, 3, padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.reg_branch_conv = ConvModule(self.conv_out_channels,
self.conv_out_channels, 3, padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits_text = nn.Conv2d(self.conv_out_channels, 1, 1)
self.conv_logits_head = nn.Conv2d(self.conv_out_channels, 1, 1)
self.conv_logits_tail = nn.Conv2d(self.conv_out_channels, 1, 1)
self.conv_logits_bond = nn.Conv2d(self.conv_out_channels, 1, 1)
self.conv_regress_head = nn.Conv2d(self.conv_out_channels, 4, 1)
self.conv_regress_tail = nn.Conv2d(self.conv_out_channels, 4, 1)
self.conv_regress_bond = nn.Conv2d(self.conv_out_channels, 4, 1)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
"""
Description:
network parameters initialization
"""
for module in [self.conv_logits_text, self.conv_logits_head,
self.conv_logits_tail, self.conv_logits_bond,
self.conv_regress_bond,self.conv_regress_tail,
self.conv_regress_head]:
if module is None:
continue
nn.init.xavier_normal_(module.weight)
nn.init.constant_(module.bias, 0)
@auto_fp16()
def forward(self, x):
"""
Description:
network forward pass
"""
# compute loss from 4x feature maps only
# you can add other supervisions on feature maps in terms of your compute resources
x_4 = x[0]
# extract long text feature
x_p4 = self.P4_conv(x_4)
x_4_1x7 = self.channel4_1x7_conv(x_4)
x_p4_1x7 = self.P4_1x7_conv(x_p4)
x_4 = x_p4_1x7 + x_p4 + x_4_1x7
x_4 = self.rpn4(x_4)
# generate predicted segmentation map
x_4_seg = self.seg_branch_conv(x_4)
score_text_pred = self.conv_logits_text(x_4_seg) # segmentation map for center area [N, 1, H, W]
score_head_pred = self.conv_logits_head(x_4_seg) # segmentation map for head area [N, 1, H, W]
score_tail_pred = self.conv_logits_tail(x_4_seg) # segmentation map for tail area [N, 1, H, W]
score_bond_pred = self.conv_logits_bond(x_4_seg) # segmentation map for top and bottom boundaries area [N, 1, H, W]
# generate predicted regression map
x4_reg = self.seg_branch_conv(x_4)
reg_head_pred = self.conv_regress_head(x4_reg) # predicted regression map for head corner points [N, 4, H, W]
reg_tail_pred = self.conv_regress_tail(x4_reg) # predicted regression map for tail corner points [N, 4, H, W]
reg_bond_pred = self.conv_regress_bond(x4_reg) # predicted regression map for center area [N, 4, H, W]
return score_text_pred, score_head_pred, score_tail_pred, score_bond_pred, reg_head_pred, reg_tail_pred, reg_bond_pred
def get_target(self, gt_masks):
"""
Description:
generate ground-truth labels
Arguments:
gt_masks : input ground-truth labels
gt_mask:[:,0] : gt_score_map
gt_mask:[:,1] : gt_score_map_mask, 1 Care / 0 Not Care
gt_mask:[:,2:6] : gt_geo_map_head
gt_mask:[:,6:10] : gt_geo_map_head_weight
gt_mask:[:,10:14]: gt_geo_map_tail
gt_mask:[:,14:18]: gt_geo_map_tail_weight
gt_mask:[:,18:22]: gt_geo_map_bond
gt_mask:[:,22:26]: gt_geo_map_bond_weight
Returns:
score_text_target : one-hot encoding of segmentation map ground-truth of center area of shape [N, 1, H, W]
score_head_target : one-hot encoding of segmentation map ground-truth of head area of shape [N, 1, H, W]
score_tail_target : one-hot encoding of segmentation map ground-truth of tail area of shape [N, 1, H, W]
score_bond_target : one-hot encoding of segmentation map ground-truth of top and bottom boundaries, [N, 1, H, W]
score_map_masks_target : mask of segmentation map ground-truth, [N, 1, H, W]
geo_head_target : ground-truth of head corner points regression, [N, 4, H, W]
geo_head_weights_target: weights of ground-truth of head regression, [N, 4, H, W]
geo_tail_target : gound-truth of tail corner points regression, [N, 4, H, W]
geo_tail_weights_target: weights of ground-truth of tail regression, [N, 4, H, W]
geo_bond_target : ground-truth of top and bottom boundaries regression, [N, 4, H, W]
geo_bond_weights_target: weights of ground-truth of top and bottom boundaries regression, [N, 4, H, W]
"""
assert len(gt_masks[0]) == 26
score_map_target = gt_masks[:, 0, :, :].long()
score_map_masks_target = gt_masks[:, 1, :, :].float()
geo_head_target = gt_masks[:, 2:6, :, :]
geo_head_weights_target = gt_masks[:, 6:10, :, :]
geo_tail_target = gt_masks[:, 10:14, :, :]
geo_tail_weights_target = gt_masks[:, 14:18, :, :]
geo_bond_target = gt_masks[:, 18:22, :, :]
geo_bond_weights_target = gt_masks[:, 22:, :, :]
# convert into one-hot encodings
score_map_one_hot = make_one_hot(score_map_target, 5).float()
score_text_target = score_map_one_hot[:, 1: 2, :, :]
score_head_target = score_map_one_hot[:, 2: 3, :, :]
score_tail_target = score_map_one_hot[:, 3: 4, :, :]
score_bond_target = score_map_one_hot[:, 4: 5, :, :]
return score_text_target, score_head_target, score_tail_target, score_bond_target, score_map_masks_target,\
geo_head_target, geo_head_weights_target, geo_tail_target, geo_tail_weights_target, geo_bond_target,\
geo_bond_weights_target
@force_fp32(apply_to=('mask_pred',))
def loss(self, mask_pred, mask_targets):
score_text_pred, score_head_pred, score_tail_pred, score_bond_pred, reg_head_pred, reg_tail_pred, reg_bond_pred = mask_pred
score_text_target, score_head_target, score_tail_target, score_bond_target, score_map_masks_target, \
geo_head_target, geo_head_weights_target, geo_tail_target, geo_tail_weights_target, geo_bond_target, \
geo_bond_weights_target = mask_targets
loss = dict()
# compute segmentation loss
loss["loss_seg_text"] = self.loss_seg(score_text_pred, score_text_target, weight=score_map_masks_target)
loss["loss_seg_head"] = self.loss_seg(score_head_pred, score_head_target, weight=score_map_masks_target)
loss["loss_seg_tail"] = self.loss_seg(score_tail_pred, score_tail_target, weight=score_map_masks_target)
loss["loss_seg_bond"] = self.loss_seg(score_bond_pred, score_bond_target, weight=score_map_masks_target)
# compute regression loss
if self.loss_reg_head is not None:
loss_reg_head = self.loss_reg_head(reg_head_pred, geo_head_target,
weight=geo_head_weights_target)
loss["loss_reg_head"] = loss_reg_head
if self.loss_reg_tail is not None:
loss_reg_tail = self.loss_reg_tail(reg_tail_pred, geo_tail_target,
weight=geo_tail_weights_target)
loss["loss_reg_tail"] = loss_reg_tail
if self.loss_reg_bond is not None:
loss_reg_bond = self.loss_reg_bond(reg_bond_pred, geo_bond_target,
weight=geo_bond_weights_target)
loss["loss_reg_bond"] = loss_reg_bond
return loss
| 48.240458
| 131
| 0.600047
| 1,692
| 12,639
| 4.14539
| 0.130024
| 0.052467
| 0.04491
| 0.04876
| 0.465925
| 0.428144
| 0.325777
| 0.286285
| 0.269461
| 0.246792
| 0
| 0.023582
| 0.305483
| 12,639
| 261
| 132
| 48.425287
| 0.775461
| 0.319329
| 0
| 0.123288
| 0
| 0
| 0.012198
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 1
| 0.041096
| false
| 0
| 0.047945
| 0
| 0.123288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|