hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
89d9e48927ec828fd9208dc357f86ea67a28a09c | 793 | py | Python | scripts/add_come.py | belamu/kanthaus.online | de84010a77e60156cbefb8e014ac6290540ded69 | [
"CC0-1.0",
"MIT"
] | 6 | 2018-09-03T15:48:19.000Z | 2021-09-27T12:04:04.000Z | scripts/add_come.py | belamu/kanthaus.online | de84010a77e60156cbefb8e014ac6290540ded69 | [
"CC0-1.0",
"MIT"
] | 13 | 2017-12-25T20:44:37.000Z | 2020-10-30T09:37:10.000Z | scripts/add_come.py | belamu/kanthaus.online | de84010a77e60156cbefb8e014ac6290540ded69 | [
"CC0-1.0",
"MIT"
] | 14 | 2018-01-05T19:54:40.000Z | 2021-03-24T10:16:31.000Z | #!/usr/bin/env python3
# Usage: ./scripts/add_come.py
url = "https://codi.kanthaus.online/come/download"
import urllib.request
with urllib.request.urlopen(url) as response:
markdown = response.read().decode()
import yaml
parts = markdown.split('---')
frontmatter = parts[1]
frontmatter = yaml.safe_load(frontmatter)
date = frontmatter['date']
destination_directory = 'user/pages/40.governance/90.minutes/{}_CoMe'.format(date)
import os
import sys
if os.path.isdir(destination_directory):
print(destination_directory, 'already exists! Exiting...')
sys.exit(1)
os.mkdir(destination_directory)
destination_file = os.path.join(destination_directory, 'item.md')
with open(destination_file, 'w+') as f:
f.write(markdown)
print('Done! Type `git status` to see the changes!')
| 27.344828 | 82 | 0.741488 | 108 | 793 | 5.351852 | 0.638889 | 0.17301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009943 | 0.112232 | 793 | 28 | 83 | 28.321429 | 0.81108 | 0.063052 | 0 | 0 | 0 | 0 | 0.22942 | 0.05803 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89da59d4dd7ad791f2d39f92732c1becceee0d21 | 918 | py | Python | classifiers/VGGFace2/helper_scripts/vggface2_80_dataset_script.py | johannaSommer/adversarial_relighting | 6749a8b4a60e83b0907550d771643921bcfdea9e | [
"MIT"
] | 1 | 2021-08-28T15:27:09.000Z | 2021-08-28T15:27:09.000Z | classifiers/VGGFace2/helper_scripts/vggface2_80_dataset_script.py | johannaSommer/adversarial_relighting | 6749a8b4a60e83b0907550d771643921bcfdea9e | [
"MIT"
] | 1 | 2022-03-03T03:24:00.000Z | 2022-03-03T03:24:00.000Z | classifiers/VGGFace2/helper_scripts/vggface2_80_dataset_script.py | johannaSommer/adversarial_relighting | 6749a8b4a60e83b0907550d771643921bcfdea9e | [
"MIT"
] | 1 | 2021-08-28T15:27:11.000Z | 2021-08-28T15:27:11.000Z | import os
from shutil import copy
path_new = "../../../data/vggface2-80"
path_old = "../../../datasets/vggface2/train"
# Create destination folder
if not os.path.exists(path_new):
os.makedirs(path_new)
subfolders = [entry for entry in os.scandir(path_old) if entry.is_dir()][0:80]
for dir in subfolders:
folder_name = dir.name
copy_count = 0
# Create new folder for identity
identity_folder = os.path.join(path_new, folder_name)
if not os.path.exists(identity_folder):
os.makedirs(identity_folder)
for entry in (os.scandir(dir)):
if entry.is_file():
copy(entry.path, os.path.join(path_new, folder_name, entry.name))
copy_count += 1
if copy_count >= 100:
break
if copy_count < 100:
print(f"Identity '{folder_name}' does not have 100 images. It has {copy_count}")
print("Successfully finished copying!")
| 27.818182 | 88 | 0.655773 | 133 | 918 | 4.368421 | 0.345865 | 0.060241 | 0.024096 | 0.037866 | 0.216867 | 0.092943 | 0.092943 | 0 | 0 | 0 | 0 | 0.02521 | 0.222222 | 918 | 32 | 89 | 28.6875 | 0.788515 | 0.061002 | 0 | 0 | 0 | 0 | 0.182771 | 0.066356 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89dbb17620026a1abd4d449a3a6748a5cdc88ef7 | 3,935 | py | Python | Linked Lists/Python/doubly_linked_list/main.py | tayfunkscu/data-structures | 374bb24f543cb729e4d4a0acc62260c9c4330c0e | [
"MIT"
] | null | null | null | Linked Lists/Python/doubly_linked_list/main.py | tayfunkscu/data-structures | 374bb24f543cb729e4d4a0acc62260c9c4330c0e | [
"MIT"
] | null | null | null | Linked Lists/Python/doubly_linked_list/main.py | tayfunkscu/data-structures | 374bb24f543cb729e4d4a0acc62260c9c4330c0e | [
"MIT"
] | null | null | null | import time
class DoublyLinkedList:
def __init__(self):
self.head = None
self.current = None
def display(self):
it = self.head
while it is not None:
print(it.value, end=" ")
it = it.next
print("")
def displayReverse(self):
it = self.current
while it is not self.head:
print(it.value, end=" ")
it = it.prev
print(it.value, end=" ")
print("")
def insert(self, value): # standart ekleme işlemi, listenin sonuna eklenir
if self.head is None:
self.head = Node(value)
self.head.next = None
self.head.prev = None
self.current = self.head
return
else:
temp = self.current
self.current.next = Node(value)
self.current = self.current.next
self.current.prev = temp
def insertTo(self, index, value):
counter = 0
it = self.head
if index == 0:
temp = Node(self.head.value)
temp.next = self.head.next
self.head.next.prev = temp
self.head.next = temp
temp.prev = self.head
self.head.value = value
return
while counter != index - 1:
it = it.next
counter += 1
temp = Node(value)
temp.next = it.next
it.next.prev = temp
it.next = temp
temp.prev = it
def insertInOrder(self, value): # sıralı ekleme
if self.head is None:
self.head = Node(value)
return
if value < self.head.value:
temp = Node(self.head.value)
temp.next = self.head.next.next
self.head.next = temp
self.head.value = value
return
it = self.head
while it.next is not None and it.next.value < value:
it = it.next
temp = Node(value)
temp.next = it.next
it.next = temp
temp.prev = it
if temp.next is not None:
temp.next.prev = temp
def deleteNode(self, value):
it = self.head
if self.head.value == value: # kökün silinme durumu
self.head = self.head.next
return
while it.next is not None and it.next.value != value: # silinecek değerin önünde durur
it = it.next
if it.next is None:
print("Value not found")
return
it.next = it.next.next
def deleteNodeAt(self, index): # Verilen indexteki elemanı silme
counter = 0
it = self.head
if index == 0:
self.head = self.head.next
while counter != index - 1:
it = it.next
counter += 1
it.next = it.next.next
class Node:
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
def test1():
begin = time.time()
dList = DoublyLinkedList()
for i in range(10000000):
dList.insert(i)
t1 = time.time()
insertion = t1 - begin
for i in range(10000000):
dList.deleteNode(i)
end = time.time()
deletion = end - t1
elapsed = end - begin
print("Insertion : {:.4f} s".format(insertion))
print("Deletion : {:.4f} s".format(deletion))
print("Time taken: {:.4f} s ".format(elapsed))
def test2():
dList = DoublyLinkedList()
num = [5, 6, 7, 10, -5, 20, 3, 2, -10]
for i in range(len(num)):
dList.insertInOrder(num[i])
dList.display()
def test3():
dList = DoublyLinkedList()
num = [5, 6, 7, 10, -5, 20, 3]
for i in range(len(num)):
dList.insert(num[i])
dList.display()
dList.insertTo(3, 50)
dList.display()
dList.deleteNodeAt(3)
dList.insertTo(0, 22)
dList.display()
if __name__ == '__main__':
test1()
# test2()
# test3()
| 23.011696 | 95 | 0.520712 | 486 | 3,935 | 4.183128 | 0.176955 | 0.118052 | 0.047221 | 0.031481 | 0.413674 | 0.315789 | 0.256763 | 0.235121 | 0.209543 | 0.111166 | 0 | 0.025765 | 0.368742 | 3,935 | 170 | 96 | 23.147059 | 0.792673 | 0.040915 | 0 | 0.488189 | 0 | 0 | 0.022836 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094488 | false | 0 | 0.007874 | 0 | 0.165354 | 0.070866 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89dc83bef82356a8e78989258d5bfb95205eaf7c | 2,075 | py | Python | cw_persistent_bugger.py | yiyinghsieh/python-algorithms-data-structures | 26879343d3d83ca431946f8a92def2bc8672f807 | [
"BSD-2-Clause"
] | null | null | null | cw_persistent_bugger.py | yiyinghsieh/python-algorithms-data-structures | 26879343d3d83ca431946f8a92def2bc8672f807 | [
"BSD-2-Clause"
] | null | null | null | cw_persistent_bugger.py | yiyinghsieh/python-algorithms-data-structures | 26879343d3d83ca431946f8a92def2bc8672f807 | [
"BSD-2-Clause"
] | null | null | null | """Codewars: Persistent Bugger
6 kyu
URL: https://www.codewars.com/kata/55bf01e5a717a0d57e0000ec/train/python
Write a function, persistence, that takes in a positive parameter num
and returns its multiplicative persistence, which is the number of
times you must multiply the digits in num until you reach a single
digit.
For example:
persistence(39) # returns 3, because 3*9=27, 2*7=14, 1*4=4
# and 4 has only one digit
persistence(999) # returns 4, because 9*9*9=729, 7*2*9=126,
# 1*2*6=12, and finally 1*2=2
persistence(4) # returns 0, because 4 is already a one-dig
"""
def persistence1(n):
multi_num = 1
count = 1
len_n = len(str(n)) # length, size
if len_n < 2:
return count - 1
elif len_n >= 2:
for len_num in range(len_n):
multi_num = multi_num * int(str(n)[len_num])
while len(str(multi_num)) >= 2:
new_multi_num = multi_num
multi_num = 1
count += 1
for len_num in range(len(str(new_multi_num))):
multi_num = multi_num * int(str(new_multi_num)[len_num])
return count
def _multiply(n_str):
# Helper of persistence2(): Compute multiplication of n.
multiply = 1
for c in n_str:
multiply *= int(c)
return multiply
def persistence2(n):
n_str = str(n)
length = len(n_str)
# Edge case for n's lenght is 1.
if length == 1:
return 0
counter = 0
while length >= 2:
# Multiply n and increment counter.
n_persist = _multiply(n_str)
counter += 1
# Update n and its length.
n_str = str(n_persist)
length = len(n_str)
return counter
def main():
# # Output: 3
# n = 39
# print(persistence(n))
assert persistence1(39) == 3
assert persistence1(4) == 0
assert persistence1(25) == 2
assert persistence1(999) == 4
assert persistence2(39) == 3
assert persistence2(4) == 0
assert persistence2(25) == 2
assert persistence2(999) == 4
if __name__ == '__main__':
main()
| 22.802198 | 72 | 0.612048 | 310 | 2,075 | 3.964516 | 0.319355 | 0.071603 | 0.052889 | 0.065094 | 0.115541 | 0.09764 | 0.043938 | 0 | 0 | 0 | 0 | 0.074798 | 0.284819 | 2,075 | 90 | 73 | 23.055556 | 0.753369 | 0.389398 | 0 | 0.090909 | 0 | 0 | 0.006395 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89dd2be94e22bee24353b0eddb7d7b32ae0dc12a | 5,953 | py | Python | tests/test_vocab.py | kmkurn/text2array | 910c349fcbd85299b137c132f218a26860211a6b | [
"Apache-2.0"
] | null | null | null | tests/test_vocab.py | kmkurn/text2array | 910c349fcbd85299b137c132f218a26860211a6b | [
"Apache-2.0"
] | null | null | null | tests/test_vocab.py | kmkurn/text2array | 910c349fcbd85299b137c132f218a26860211a6b | [
"Apache-2.0"
] | 1 | 2021-02-27T08:53:59.000Z | 2021-02-27T08:53:59.000Z | from typing import Iterable, MutableMapping
from tqdm import tqdm # type: ignore
import pytest
from text2array import StringStore, Vocab
class TestFromSamples:
@staticmethod
def from_samples(ss, **kwargs):
return Vocab.from_samples(ss, pbar=tqdm(disable=True), **kwargs)
def test_ok(self):
ss = [{"w": "c"}, {"w": "b"}, {"w": "a"}, {"w": "b"}, {"w": "c"}, {"w": "c"}]
vocab = self.from_samples(ss)
assert isinstance(vocab, MutableMapping)
assert len(vocab) == 1
assert list(vocab) == ["w"]
assert isinstance(vocab["w"], StringStore)
assert vocab["w"].default == "<unk>"
assert list(vocab["w"]) == "<unk> c b a".split()
vocab["ws"] = StringStore()
assert set(vocab) == {"w", "ws"}
del vocab["ws"]
assert list(vocab) == ["w"]
def test_has_vocab_for_all_str_fields(self):
ss = [{"w": "b", "t": "b"}, {"w": "b", "t": "b"}]
vocab = self.from_samples(ss)
assert vocab.get("w") is not None
assert vocab.get("t") is not None
def test_no_vocab_for_non_str(self):
vocab = self.from_samples([{"i": 10}, {"i": 20}])
with pytest.raises(KeyError) as exc:
vocab["i"]
assert "no vocabulary found for field name 'i'" in str(exc.value)
def test_seq(self):
ss = [{"ws": ["a", "c", "c"]}, {"ws": ["b", "c"]}, {"ws": ["b"]}]
vocab = self.from_samples(ss)
assert list(vocab["ws"]) == "<pad> <unk> c b a".split()
def test_seq_of_seq(self):
ss = [
{"cs": [["c", "d"], ["a", "d"]]},
{"cs": [["c"], ["b"], ["b", "d"]]},
{"cs": [["d", "c"]]},
]
vocab = self.from_samples(ss)
assert list(vocab["cs"]) == "<pad> <unk> d c b a".split()
def test_empty_field_values(self):
vocab = self.from_samples([{"w": []}])
with pytest.raises(KeyError):
vocab["w"]
def test_min_count(self):
ss = [
{"w": "c", "t": "c"},
{"w": "b", "t": "b"},
{"w": "a", "t": "a"},
{"w": "b", "t": "b"},
{"w": "c", "t": "c"},
{"w": "c", "t": "c"},
]
vocab = self.from_samples(ss, options={"w": dict(min_count=3)})
assert "a" not in vocab["w"]
assert "b" not in vocab["w"]
assert "c" in vocab["w"]
assert "a" in vocab["t"]
assert "b" in vocab["t"]
assert "c" in vocab["t"]
def test_no_unk(self):
vocab = self.from_samples([{"w": "a", "t": "a"}], options={"w": dict(unk=None)})
assert vocab["w"].default is None
assert "<unk>" not in vocab["w"]
assert "<unk>" in vocab["t"]
def test_no_pad(self):
vocab = self.from_samples([{"w": ["a"], "t": ["a"]}], options={"w": dict(pad=None)})
assert "<pad>" not in vocab["w"]
assert "<pad>" in vocab["t"]
def test_max_size(self):
ss = [
{"w": "c", "t": "c"},
{"w": "b", "t": "b"},
{"w": "a", "t": "a"},
{"w": "b", "t": "b"},
{"w": "c", "t": "c"},
{"w": "c", "t": "c"},
]
vocab = self.from_samples(ss, options={"w": dict(max_size=1)})
assert "a" not in vocab["w"]
assert "b" not in vocab["w"]
assert "c" in vocab["w"]
assert "a" in vocab["t"]
assert "b" in vocab["t"]
assert "c" in vocab["t"]
def test_iterator_is_passed(self):
ss = [
{"ws": ["b", "c"], "w": "c"},
{"ws": ["c", "b"], "w": "c"},
{"ws": ["c"], "w": "c"},
]
vocab = self.from_samples(iter(ss))
assert "b" in vocab["ws"]
assert "c" in vocab["ws"]
assert "c" in vocab["w"]
class TestStoi:
def test_samples_to_indices(self):
ss = [
{"ws": ["a", "c", "c"], "i": 1},
{"ws": ["b", "c"], "i": 2},
{"ws": ["b"], "i": 3},
]
vocab = Vocab({"ws": StringStore("abc")})
ss_ = vocab.stoi(ss)
assert isinstance(ss_, Iterable)
assert list(ss_) == [
{"ws": [0, 2, 2], "i": 1},
{"ws": [1, 2], "i": 2},
{"ws": [1], "i": 3},
]
def test_value_is_not_str(self):
ss = [{"ws": [0, 1, 2]}]
vocab = Vocab({"ws": StringStore("abc")})
assert list(vocab.stoi(ss)) == ss
class TestItos:
def test_samples_to_strings(self):
ss = [
{"ws": ["a", "c", "c"], "i": 1},
{"ws": ["b", "c"], "i": 2},
{"ws": ["b"], "i": 3},
]
vocab = Vocab({"ws": StringStore("abc")})
assert list(vocab.itos(vocab.stoi(ss))) == ss
def test_value_is_str(self):
ss = [{"ws": ["a", "b", "c"]}]
vocab = Vocab({"ws": StringStore("abc")})
assert list(vocab.itos(ss)) == ss
class TestExtend:
def test_ok(self):
vocab = Vocab(
{"ws": StringStore("abc"), "ds": StringStore("123"), "l": StringStore("p"),}
)
vocab.extend(
[
{"ws": list("cbd"), "ds": list("221"), "l": "p"},
{"ws": list("abe"), "ds": list("33"), "l": "n", "cs": list("XYZ")},
]
)
assert "cs" not in vocab
assert list(vocab["ws"]) == list("abcde")
assert list(vocab["ds"]) == list("123")
assert list(vocab["l"]) == list("pn")
def test_selected_field_names(self):
vocab = Vocab(
{"ws": StringStore("abc"), "ds": StringStore("123"), "l": StringStore("p"),}
)
vocab.extend(
[
{"ws": list("cbd"), "ds": list("221"), "l": "p"},
{"ws": list("abe"), "ds": list("33"), "l": "n", "cs": list("XYZ")},
],
fields=["l"],
)
assert list(vocab["ws"]) == list("abc")
assert list(vocab["l"]) == list("pn")
| 31.664894 | 92 | 0.436923 | 761 | 5,953 | 3.332457 | 0.144547 | 0.055205 | 0.076893 | 0.086751 | 0.519322 | 0.466088 | 0.415221 | 0.366325 | 0.320978 | 0.295741 | 0 | 0.011191 | 0.324542 | 5,953 | 187 | 93 | 31.834225 | 0.619498 | 0.002016 | 0 | 0.386076 | 0 | 0 | 0.078633 | 0 | 0 | 0 | 0 | 0 | 0.278481 | 1 | 0.113924 | false | 0.006329 | 0.025316 | 0.006329 | 0.170886 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89de287bde4163591292e22935587e9251683f36 | 558 | py | Python | ai_guesses_number.py | dedeogluhu/python-beginner-projects | 612c0e9fa983d1915bebc1fbc0152aee0976d692 | [
"MIT"
] | null | null | null | ai_guesses_number.py | dedeogluhu/python-beginner-projects | 612c0e9fa983d1915bebc1fbc0152aee0976d692 | [
"MIT"
] | 1 | 2020-09-27T10:12:39.000Z | 2020-09-27T10:12:39.000Z | ai_guesses_number.py | dedeogluhu/python-beginner-projects | 612c0e9fa983d1915bebc1fbc0152aee0976d692 | [
"MIT"
] | null | null | null | import random
x = 0
y = 100
counter = 0
print("choose a number between 0-100 and let me guess your number")
while True:
a = random.randint(int(x),int(y))
print("your number is", a, end = " ")
guess = int(input("1.too low\n2.too high\n3.congratulations"))
if guess == 3:
print("thank you for playing the game\nit took ", counter, " guesses to find your number")
break
elif guess == 2:
y = a
elif guess == 1:
x = a
counter += 1
print("Game's Over")
| 17.4375 | 98 | 0.541219 | 82 | 558 | 3.682927 | 0.597561 | 0.099338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0.340502 | 558 | 32 | 99 | 17.4375 | 0.777174 | 0 | 0 | 0 | 0 | 0 | 0.345259 | 0.041145 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89de4a6f60e2e4cdcdc3747c6ac5f4201051823c | 811 | py | Python | input_parameters.py | danush-95/1D-axisymmetric-element | 8db6e1f23eb5453da6cc9dba91dfc476ca87bfeb | [
"MIT"
] | null | null | null | input_parameters.py | danush-95/1D-axisymmetric-element | 8db6e1f23eb5453da6cc9dba91dfc476ca87bfeb | [
"MIT"
] | 2 | 2021-06-17T19:17:08.000Z | 2021-07-29T19:22:02.000Z | input_parameters.py | danush-95/1D-axisymmetric-element | 8db6e1f23eb5453da6cc9dba91dfc476ca87bfeb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Danush
This file contains all the necessary input parameters for the infinite element
problem.
Any changes here reflects in the main.py file
"""
import numpy as np
def parameter_list():
"""This function returns all the input parameters
n_e - no. of elements to be considered
zetta - value for the Time intergration scheme"""
# Input parameters
E = 200000 #Young's Modulus in MPa
mu = 0.20 #poison's ratio
Q = 100000 #MPa
T = 1 #sec
a = 50 #mm
b = 100 #mm
p_max = 140 #MPa
t_l = 2 #sec
t_f = 10 #sec
n_e = 10 # no. of elements
zetta = 1/2 #Euler modified method
del_t = 0.1 # delta time/time step in s
return E,mu,Q,T,a,b,p_max,t_l,t_f,n_e,zetta,del_t
| 26.16129 | 81 | 0.601726 | 135 | 811 | 3.525926 | 0.592593 | 0.094538 | 0.05042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060498 | 0.307028 | 811 | 31 | 82 | 26.16129 | 0.786477 | 0.553637 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89e44893dd1ed37d0cacd918cd6953876b8082e3 | 5,401 | py | Python | sympy/core/compatibility.py | cielavenir/sympy | ada04faf48a4eb6c1529e8a5d49a6f2f9ce2616e | [
"BSD-3-Clause"
] | 2 | 2015-11-13T16:40:57.000Z | 2017-09-15T15:37:19.000Z | openrave/sympy/core/compatibility.py | jdsika/holy | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | 1 | 2016-06-13T01:29:51.000Z | 2016-06-14T00:38:27.000Z | openrave/sympy/core/compatibility.py | jdsika/holy | a2ac55fa1751a3a8038cf61d29b95005f36d6264 | [
"MIT"
] | null | null | null | """
Reimplementations of constructs introduced in later versions of Python than we
support.
"""
# These are in here because telling if something is an iterable just by calling
# hasattr(obj, "__iter__") behaves differently in Python 2 and Python 3. In
# particular, hasattr(str, "__iter__") is False in Python 2 and True in Python 3.
# I think putting them here also makes it easier to use them in the core.
def iterable(i, exclude=(basestring, dict)):
"""
Return a boolean indicating whether i is an iterable in the sympy sense.
When sympy is working with iterables, it is almost always assuming
that the iterable is not a string or a mapping, so those are excluded
by default. If you want a pure python definition, make exclude=None. To
exclude multiple items, pass them as a tuple.
See also: is_sequence
Examples:
>>> from sympy.utilities.iterables import iterable
>>> from sympy import Tuple
>>> things = [[1], (1,), set([1]), Tuple(1), (j for j in [1, 2]), {1:2}, '1', 1]
>>> for i in things:
... print iterable(i), type(i)
True <type 'list'>
True <type 'tuple'>
True <type 'set'>
True <class 'sympy.core.containers.Tuple'>
True <type 'generator'>
False <type 'dict'>
False <type 'str'>
False <type 'int'>
>>> iterable({}, exclude=None)
True
>>> iterable({}, exclude=str)
True
>>> iterable("no", exclude=str)
False
"""
try:
iter(i)
except TypeError:
return False
if exclude:
return not isinstance(i, exclude)
return True
def is_sequence(i, include=None):
"""
Return a boolean indicating whether i is a sequence in the sympy
sense. If anything that fails the test below should be included as
being a sequence for your application, set 'include' to that object's
type; multiple types should be passed as a tuple of types.
Note: although generators can generate a sequence, they often need special
handling to make sure their elements are captured before the generator is
exhausted, so these are not included by default in the definition of a
sequence.
See also: iterable
Examples:
>>> from sympy.utilities.iterables import is_sequence
>>> from types import GeneratorType
>>> is_sequence([])
True
>>> is_sequence(set())
False
>>> is_sequence('abc')
False
>>> is_sequence('abc', include=str)
True
>>> generator = (c for c in 'abc')
>>> is_sequence(generator)
False
>>> is_sequence(generator, include=(str, GeneratorType))
True
"""
return (hasattr(i, '__getitem__') and
iterable(i) or
bool(include) and
isinstance(i, include))
"""
Wrapping some imports in try/except statements to allow the same code to
be used in Python 3+ as well.
"""
try:
callable = callable
except NameError:
import collections
def callable(obj):
return isinstance(obj, collections.Callable)
try:
from functools import reduce
except ImportError:
reduce = reduce
def cmp_to_key(mycmp):
"""
Convert a cmp= function into a key= function
This code is included in Python 2.7 and 3.2 in functools.
"""
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
try:
import __builtin__
cmp = __builtin__.cmp
except AttributeError:
def cmp(a,b):
return (a > b) - (a < b)
try:
from itertools import product
except ImportError: # Python 2.5
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
try:
from itertools import permutations
except ImportError: # Python 2.5
def permutations(iterable, r=None):
# permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC
# permutations(range(3)) --> 012 021 102 120 201 210
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
| 30.173184 | 84 | 0.595816 | 744 | 5,401 | 4.245968 | 0.326613 | 0.02849 | 0.02849 | 0.037987 | 0.156695 | 0.156695 | 0.11301 | 0.091485 | 0.091485 | 0.061728 | 0 | 0.021406 | 0.299389 | 5,401 | 178 | 85 | 30.342697 | 0.813425 | 0.460655 | 0 | 0.185185 | 0 | 0 | 0.006696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17284 | false | 0 | 0.098765 | 0.098765 | 0.469136 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89e4ba7fcaf844dc87baaae6e1b45c22cb420a82 | 7,289 | py | Python | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_SPnet_aichallenger_224_128_0.54G_1.3/code/test/test_single_person.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | 1 | 2020-12-18T14:49:19.000Z | 2020-12-18T14:49:19.000Z | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_SPnet_aichallenger_224_128_0.54G_1.3/code/test/test_single_person.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_SPnet_aichallenger_224_128_0.54G_1.3/code/test/test_single_person.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PART OF THIS FILE AT ALL TIMES.
"""Test a regression network on ai challenger."""
import time
import math
import os.path as osp
import sys
import argparse
import numpy as np
import cv2
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument('--data', help='anno image path')
parser.add_argument('--caffe', help='load a model for training or evaluation')
parser.add_argument('--cpu', action='store_true', help='CPU ONLY')
parser.add_argument('--weights', help='weights path')
parser.add_argument('--model', help='model path')
parser.add_argument('--anno', help='anno file path')
parser.add_argument('--output', default='result/', help='output_path')
parser.add_argument('--name', help='output name, default eval', default='eval')
parser.add_argument('--input', help='input name in the first layer', default='image')
parser.add_argument('--width', default=128, type=int, help='width of input image')
parser.add_argument('--height', default=224, type=int, help='height of input image')
args = parser.parse_args()
# Add caffe to PYTHONPATH
caffe_path = osp.join(args.caffe, 'python')
add_path(caffe_path)
import caffe
class Config:
def __init__(self):
self.use_gpu = not args.cpu
self.gpuID = args.gpu
self.caffemodel = args.weights
self.deployFile = args.model
self.description_short = 'googlenet_regression'
self.width = args.width #128
self.height = args.height #224
self.npoints = 14
self.mean = [104, 117, 123]
self.result_dir = args.output
self.test_image_dir = args.data
self.test_anno_file = args.anno
# 1: R_shoulder, 2: R_elbow, 3: R_wrist, 4: L_shoulder, 5: L_elbow, 6: L_wrist, 7: R_hip,
# 8: R_knee, 9: R_ankle, 10: L_hip, 11: L_knee, 12: L_ankle, 13: head, 14: neck
class Rect:
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def preprocess(img, param):
img_out = cv2.resize(img, (param.width, param.height))
img_out = np.float32(img_out)
img_out[:, :, 0] = img_out[:, :, 0] - param.mean[0]
img_out[:, :, 1] = img_out[:, :, 1] - param.mean[1]
img_out[:, :, 2] = img_out[:, :, 2] - param.mean[2]
# change H*W*C -> C*H*W
return np.transpose(img_out, (2, 0, 1))
def applymodel(net, image, param):
# Select parameters from param
width = param.width
height = param.height
npoints = param.npoints
imageToTest = preprocess(image, param)
net.blobs['data'].data[...] = imageToTest.reshape((1, 3, height, width))
net.forward()
prediction = net.blobs['pred_coordinate'].data[0]
pred_cooridnate = np.zeros((param.npoints, 2), dtype=np.float)
for j in range(param.npoints):
pred_cooridnate[j, 0] = prediction[j * 2]
pred_cooridnate[j, 1] = prediction[j * 2 + 1]
#pred_visible = net.blobs['pred_visible'].data[0]
pred_visible = None
return pred_cooridnate, pred_visible
def draw_joints_16(test_image, pred_coordinate, save_image, param):
image = cv2.imread(test_image)
joints = np.zeros(pred_coordinate.shape, dtype=np.int)
for j in range(pred_coordinate.shape[0]):
joints[j, 0] = int(round(pred_coordinate[j, 0] * image.shape[1] / param.width))
joints[j, 1] = int(round(pred_coordinate[j, 1] * image.shape[0] / param.height))
# draw joints in green spots
for j in range(len(joints)):
cv2.circle(image, (joints[j, 0], joints[j, 1]), 5, (0, 255, 0), 2)
# draw torso in yellow lines
torso = [[0, 6], [6, 9], [0, 13], [3, 13], [3, 9], [12, 13]]
for item in torso:
cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (0, 0, 0), 2)
# draw left part in pink lines
lpart = [[3, 4], [4, 5], [9, 10], [10, 11]]
for item in lpart:
cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (255, 0, 0), 2)
# draw right part in blue lines
rpart = [[0, 1], [1, 2], [6, 7], [7, 8]]
for item in rpart:
cv2.line(image, (joints[item[0], 0], joints[item[0], 1]), (joints[item[1], 0], joints[item[1], 1]), (0, 0, 255), 2)
cv2.imwrite(save_image, image)
if __name__ == '__main__':
param = Config()
if param.use_gpu:
caffe.set_mode_gpu()
caffe.set_device(param.gpuID)
net = caffe.Net(param.deployFile, param.caffemodel, caffe.TEST)
net.name = param.description_short
# test a folder
test_image_dir = param.test_image_dir
test_anno_file = open(param.test_anno_file, 'r')
result_dir = param.result_dir
lines = test_anno_file.readlines()
precision = np.zeros(param.npoints)
number = 0
for i in range(len(lines)):
print("image number: " + "%d" % i)
info = lines[i].split("\n")[0].split(" ")
test_image = test_image_dir + info[0]
save_image = result_dir + info[0]
image = cv2.imread(test_image)
#print(test_image)
if image is None:
continue
if len(image.shape) != 3:
continue
joints = [int(round(float(item))) for item in info[1:29]]
weights = [int(round(float(item))) for item in info[29:57]]
visible = [int(round(float(item))) for item in info[57:]]
print(weights[1::2])
# # draw gt
# gt_cooridnate = np.zeros((param.npoints, 2), dtype=np.float)
# for j in range(param.npoints):
# gt_cooridnate[j, 0] = joints[j * 2]
# gt_cooridnate[j, 1] = joints[j * 2 + 1]
# draw_joints_16(test_image, gt_cooridnate, 'gt/' + info[0], param)
pred_coordinate, pred_visible = applymodel(net, image, param)
#draw_joints_16(test_image, pred_coordinate, save_image, param)
#if param.npoints == 16:
# draw_joints_16(test_image, pred_coordinate, save_image, param)
px = joints[::2]
py = joints[1::2]
threshold = math.sqrt((px[13]-px[12]) ** 2 + (py[13]-py[12]) ** 2)
temp_precision = np.zeros(param.npoints)
number += 1
for j in range(len(px)):
temp_precision[j] = math.sqrt((px[j]-pred_coordinate[j, 0]) ** 2 + (py[j]-pred_coordinate[j, 1]) ** 2) < 0.5 * threshold
#if visible[j] == 2:
if weights[j*2] == 0:
temp_precision[j] = 1
precision += temp_precision
print(temp_precision)
if number > 0:
precision /= number
print(precision)
print(np.mean(precision))
| 35.906404 | 132 | 0.625463 | 1,098 | 7,289 | 4.02459 | 0.225865 | 0.02444 | 0.046164 | 0.023761 | 0.171985 | 0.140077 | 0.124689 | 0.124689 | 0.104322 | 0.104322 | 0 | 0.041379 | 0.224173 | 7,289 | 202 | 133 | 36.084158 | 0.740053 | 0.203732 | 0 | 0.030769 | 0 | 0 | 0.067767 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046154 | false | 0 | 0.061538 | 0 | 0.138462 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89eb41ae8485cac765ccc73868fbd5656b22ac73 | 8,045 | py | Python | riding_forecast.py | GooseDad/Test | 544555a2684a50b11c1de9aeb849872bd7502501 | [
"Apache-2.0"
] | null | null | null | riding_forecast.py | GooseDad/Test | 544555a2684a50b11c1de9aeb849872bd7502501 | [
"Apache-2.0"
] | null | null | null | riding_forecast.py | GooseDad/Test | 544555a2684a50b11c1de9aeb849872bd7502501 | [
"Apache-2.0"
] | null | null | null | import csv
import datetime
from scipy.stats import norm
from regional_poll_interpolator import RegionalPollInterpolator
import riding_poll_model
party_long_names = {
'cpc': 'Conservative/Conservateur',
'lpc': 'Liberal/Lib',
'ndp': 'NDP-New Democratic Party/NPD-Nouveau Parti d',
'gpc': 'Green Party/Parti Vert',
'bq': 'Bloc Qu',
'oth': 'Independent',
}
province_to_region = {
'Newfoundland and Labrador': 'ATL',
'Prince Edward Island': 'ATL',
'Nova Scotia': 'ATL',
'New Brunswick': 'ATL',
'Quebec': 'QC',
'Ontario': 'ON',
'Manitoba': 'SK_MB',
'Saskatchewan': 'SK_MB',
'Alberta': 'AB',
'British Columbia': 'BC',
'Yukon': 'Canada',
'Northwest Territories': 'Canada',
'Nunavut': 'Canada',
}
province_abbreviations = {
'Newfoundland and Labrador': 'NL',
'Prince Edward Island': 'PE',
'Nova Scotia': 'NS',
'New Brunswick': 'NB',
'Quebec': 'QC',
'Ontario': 'ON',
'Manitoba': 'MB',
'Saskatchewan': 'SK',
'Alberta': 'AB',
'British Columbia': 'BC',
'Yukon': 'YT',
'Northwest Territories': 'NT',
'Nunavut': 'NU',
}
provinces_by_numeric_code = {
'10': 'NL',
'11': 'PE',
'12': 'NS',
'13': 'NB',
'24': 'QC',
'35': 'ON',
'46': 'MB',
'47': 'SK',
'48': 'AB',
'59': 'BC',
'60': 'YT',
'61': 'NT',
'62': 'NU',
}
def WhichParty(s):
"""If the given string contains a party name, return its abbreviation."""
for abbreviation, long_name in party_long_names.items():
if long_name in s:
return abbreviation
return None
def WhichRegion(s):
"""If the given string contains a province name, return its region code."""
for province, region in province_to_region.items():
if province in s:
return region
return None
def WhichProvince(s):
"""If the given string contains a province name, return its short form."""
for province, abbr in province_abbreviations.items():
if province in s:
return abbr
return None
def NormalizeDictVector(d):
"""Adjusts numerical values so they add up to 1."""
normalized = {}
divisor = sum(d.values())
for key in d:
normalized[key] = d[key] / divisor
return normalized
def KeyWithHighestValue(d, forbidden_keys=[]):
"""Return the key with the highest value.
Optionally, a list of forbidden keys can be provided. If so, the function
will return the key with the next-highest value, but which is not
forbidden.
"""
mv = -1
mk = None
for k, v in d.items():
if k in forbidden_keys:
continue
if v > mv:
mk = k
mv = v
return mk
# Load regional polling data.
interpolator = RegionalPollInterpolator()
interpolator.LoadFromCsv('regional_poll_averages.csv')
interpolator.LoadFromCsv('regional_baseline.csv')
baseline_date = datetime.datetime(2011, 5, 2)
# Load and process per-riding election results from 2011.
old_ridings = {}
with open('table_tableau12.csv') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
riding_name = row['Electoral District Name/Nom de circonscription']
riding_number = row['Electoral District Number']
popular_vote = float(row['Percentage of Votes Obtained'])
party = WhichParty(row['Candidate/Candidat'])
if not party:
continue
province = WhichProvince(row['Province'])
region = WhichRegion(row['Province'])
assert region
before = interpolator.Interpolate(region, party, baseline_date)
after = interpolator.GetMostRecent(region, party)
if before > 2: # As in 2% not 200%
projected_gain = after / before
else:
projected_gain = 1
projection = popular_vote * projected_gain
if not riding_number in old_ridings:
old_ridings[riding_number] = {
'2011': {}, 'projections': {},
'name': riding_name,
'number': riding_number,
'province': province}
r = old_ridings[riding_number]
r['2011'][party] = popular_vote
r['projections'][party] = projection
# Calculate the transposition from old ridings (2003) to new ridings (2013).
new_ridings = {}
with open('TRANSPOSITION_338FED.csv') as csv_file:
# Skip the first few lines of the file, to get to the data part.
for i in range(4):
next(csv_file)
reader = csv.DictReader(csv_file)
for row in reader:
new_riding_number = row['2013 FED Number']
if not new_riding_number:
continue
new_riding_name = row['2013 FED Name']
old_riding_number = row['2003 FED Number from which the 2013 ' +
'FED Number is constituted']
prov_num_code = row['Province and territory numeric code']
province = provinces_by_numeric_code[prov_num_code]
assert province
population_2013 = float(row['2013 FED - Population'])
population_transferred = float(
row['Population transferred to 2013 FED'])
population_percent = population_transferred / population_2013
all_votes = row['All votes']
electors = row['Electors on lists']
if new_riding_number not in new_ridings:
new_ridings[new_riding_number] = {
'name': new_riding_name,
'number': new_riding_number,
'province': province,
'feeders': {},
'total_votes_2011': 0,
'total_electors_2011': 0,
'population': int(population_2013)}
r = new_ridings[new_riding_number]
r['feeders'][old_riding_number] = population_percent
r['total_votes_2011'] += int(all_votes)
r['total_electors_2011'] += int(electors)
# Output final stats for each riding.
party_order = ['cpc', 'ndp', 'lpc', 'gpc', 'bq', 'oth']
readable_party_names = {
'cpc': 'CON',
'lpc': 'LIB',
'ndp': 'NDP',
'gpc': 'GRN',
'bq': 'BQ',
'oth': 'OTH',
}
with open('riding_forecasts.csv', 'wb') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(
['province', 'name', 'number,'] +
[readable_party_names[p].lower() for p in party_order] +
['projected_winner', 'strategic_vote', 'confidence', 'turnout_2011'])
for r in new_ridings.values():
projections = {}
riding_name = r['name']
riding_number = str(r['number'])
province = r['province']
# Project this riding by mixing old-riding projections.
for feeder_number, weight in r['feeders'].items():
feeder = old_ridings[feeder_number]
normalized = NormalizeDictVector(feeder['projections'])
for party, support in normalized.items():
if party not in projections:
projections[party] = 0
projections[party] += support * weight
# Upgrade the projections for ridings that have local polling data.
projections = riding_poll_model.projections_by_riding_number.get(
riding_number, projections)
ordered_projections = [projections.get(p, 0) for p in party_order]
projected_winner = KeyWithHighestValue(projections)
runner_up = KeyWithHighestValue(projections, [projected_winner])
strategic_vote = KeyWithHighestValue(projections, ['cpc'])
gap = projections[projected_winner] - projections[runner_up]
projected_winner = readable_party_names[projected_winner]
strategic_vote = readable_party_names[strategic_vote]
confidence = norm.cdf(gap / 0.25)
turnout = float(r['total_votes_2011']) / r['total_electors_2011']
csv_writer.writerow([province, riding_name, riding_number] +
ordered_projections +
[projected_winner, strategic_vote, confidence,
turnout])
| 34.82684 | 79 | 0.612679 | 929 | 8,045 | 5.142088 | 0.284177 | 0.042705 | 0.01884 | 0.023446 | 0.135859 | 0.096504 | 0.057777 | 0.039355 | 0.039355 | 0.039355 | 0 | 0.023347 | 0.270603 | 8,045 | 230 | 80 | 34.978261 | 0.790729 | 0.104413 | 0 | 0.10101 | 0 | 0 | 0.193571 | 0.013417 | 0 | 0 | 0 | 0 | 0.010101 | 1 | 0.025253 | false | 0 | 0.025253 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89ecab6bc7c96ef4ec3c90aee3bd1aeff3579ec5 | 9,090 | py | Python | engine.py | benzhang13/schedule-comparer | 31607883a1907a50adee78574377c53a4c531a86 | [
"MIT"
] | null | null | null | engine.py | benzhang13/schedule-comparer | 31607883a1907a50adee78574377c53a4c531a86 | [
"MIT"
] | null | null | null | engine.py | benzhang13/schedule-comparer | 31607883a1907a50adee78574377c53a4c531a86 | [
"MIT"
] | null | null | null | import discord
from app.process_excel import receive_file, read_all_files, delete_file
from app.process_ical import receive_file as ical_receive_file
from datetime import datetime
from dotenv import load_dotenv
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
client = discord.Client()
load_dotenv()
def run_bot():
token = os.environ['BOT_TOKEN']
client.run(token)
@client.event
async def on_ready():
print('{0.user} about to compare timetables'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('$hello'):
await message.channel.send(message.author.mention + ', hey!')
elif message.content.startswith('$submit'):
if len(message.attachments) == 0:
await message.channel.send(message.author.mention + ', you have to attach your timetable.'
' You can find the template with $template')
else:
result = receive_file(message.attachments[0].url, str(message.author.id))
if result.get('already_exists'):
await message.channel.send(message.author.mention +
', you already have a submitted timetable.'
' Please use $delete if you would like to replace your timetable.')
elif result.get('not_xlsx'):
await message.channel.send(message.author.mention +
', that file is not a .xlsx file.'
' Please use the excel template provided with the $template command')
else:
await message.channel.send(message.author.mention + '\'s timetable submitted')
elif message.content.startswith('$free'):
now = datetime.today().timetuple()
semester = 0
if now[1] > 9 and now[2] >= 5:
semester = 1
elif now[1] <= 4 and now[2] >= 6:
semester = 2
if now[3] < 9 or now[3] > 21:
await message.channel.send('No one has classes right now')
else:
if now[4] < 30:
minute = 0
else:
minute = 30
available = read_all_files(minute=minute, hour=now[3], day=datetime.today().weekday(), semester=semester)
available_in_guild = []
for user_id in available:
guild = message.guild
u = guild.get_member(user_id)
if u is not None:
available_in_guild.append(u)
if len(available_in_guild) == 0:
await message.channel.send(message.author.mention + ', no one is currently available')
else:
for user in available_in_guild:
await message.channel.send(user.mention + ' ')
await message.channel.send('have no classes at the moment')
elif message.content.startswith('$delete'):
already_exists = delete_file(str(message.author.id))
if not already_exists:
await message.channel.send(message.author.mention +
', you have no timetable to delete. Use $submit to submit one.')
else:
await message.channel.send(message.author.mention + '\'s timetable deleted')
elif message.content.startswith('$get'):
file_path = dir_path + '/app/spreadsheets/' + str(message.author.id) + 'schedule.xlsx'
if os.path.isfile(file_path):
file = discord.File(fp=file_path, filename=message.author.name + '_schedule.xlsx')
await message.channel.send(content=message.author.mention+', here you go!', file=file)
else:
await message.channel.send('It appears that you do not currently have a submitted schedule.'
' Please use $help for commands to make one')
elif message.content.startswith('$template'):
user = message.author
file = discord.File(fp=dir_path + '/app/generic_sheet/generic_sheet.xlsx')
await message.channel.send(content=user.mention + ', here you go!', file=file)
elif message.content.startswith('$subcal summer'):
if len(message.attachments) == 0:
await message.channel.send(message.author.mention + ', you must include the ICalendar file downloaded'
' from the ACORN website as an attachment.')
else:
results = ical_receive_file(message.attachments[0].url, message.author.id, 0)
if results.get('not_ics'):
await message.channel.send(message.author.mention + ', that is not a .ics file. Make sure you are'
' submitting the file downloaded from the ACORN'
' website.')
if results.get('submitted'):
await message.channel.send(message.author.mention + ', your timetable has successfully been edited.')
elif message.content.startswith('$subcal fall'):
if len(message.attachments) == 0:
await message.channel.send(message.author.mention + ', you must include the ICalendar file downloaded'
' from the ACORN website as an attachment.')
else:
results = ical_receive_file(message.attachments[0].url, message.author.id, 1)
if results.get('not_ics'):
await message.channel.send(message.author.mention + ', that is not a .ics file. Make sure you are'
' submitting the file downloaded from the ACORN'
' website.')
if results.get('submitted'):
await message.channel.send(message.author.mention + ', your timetable has successfully been edited.')
elif message.content.startswith('$subcal winter'):
if len(message.attachments) == 0:
await message.channel.send(message.author.mention + ', you must include the ICalendar file downloaded'
' from the ACORN website as an attachment.')
else:
results = ical_receive_file(message.attachments[0].url, message.author.id, 2)
if results.get('not_ics'):
await message.channel.send(message.author.mention + ', that is not a .ics file. Make sure you are'
' submitting the file downloaded from the ACORN'
' website.')
if results.get('submitted'):
await message.channel.send(message.author.mention + ', your timetable has successfully been edited.')
elif message.content.startswith('$help'):
await message.channel.send(message.author.mention +
"""
Commands include:
$template: gets generic excel template
$submit: submits the attached spreadsheet as a timetable
$subcal: edits timetable using ical file from ACORN. Use <$subcal help> for more details about this
$delete: deletes your current timetable
$get: gets your current submitted schedule as an excel file
$free: mention everyone who has no classes at the moment
""")
elif message.content.startswith('$subcal help'):
await message.channel.send(message.author.mention +
"""
The $subcal command is used for importing ical(.ics) schedules downloaded from ACORN.
To find how to retrieve your ical schedule from the ACORN site, use <$subcal retrieve help>.
Once you have your ical schedule for the semester, use the command:
"$subcal <semester>", where <semester> is the semester for your ical schedule, i.e. summer, fall, winter.
An example command for editing your fall timetable would be "$subcal fall"
Make sure the ical file is attached to your command message.
If you do not currently have a timetable, this command will create one for you.
""")
elif message.content.startswith('$subcal retrieve help'):
await message.channel.send(message.author.mention +
"""
Steps to retrieve your ical schedule from ACORN:
1. Log in to ACORN
2. Click the "View Timetable" button on the home page
3. Click "Download Calendar Export"
4. Your ical file for the current semester should begin downloading
""")
elif message.content.startswith('$'):
await message.channel.send('I don\'t recognize that command. Try $help for a list of commands.')
if __name__ == '__main__':
run_bot()
| 53.470588 | 117 | 0.577558 | 1,036 | 9,090 | 5.004826 | 0.201737 | 0.075217 | 0.098939 | 0.119769 | 0.491032 | 0.454966 | 0.406172 | 0.397879 | 0.353134 | 0.333076 | 0 | 0.006594 | 0.332673 | 9,090 | 169 | 118 | 53.786982 | 0.84817 | 0 | 0 | 0.348485 | 0 | 0.007576 | 0.209582 | 0.004844 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007576 | false | 0 | 0.045455 | 0 | 0.060606 | 0.007576 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89ef27909812cb68db4e32120922fdf1198b7341 | 1,579 | py | Python | debug/fixed_width_rnn.py | JunLi-Galios/unsup_temp_embed_alternating | 1b054fd82aadcfe1aa219be17beb77c89efd974e | [
"MIT"
] | null | null | null | debug/fixed_width_rnn.py | JunLi-Galios/unsup_temp_embed_alternating | 1b054fd82aadcfe1aa219be17beb77c89efd974e | [
"MIT"
] | null | null | null | debug/fixed_width_rnn.py | JunLi-Galios/unsup_temp_embed_alternating | 1b054fd82aadcfe1aa219be17beb77c89efd974e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Baseline for relative time embedding: learn regression model in terms of
relative time.
"""
__author__ = 'Jun Li'
__date__ = 'Feberary 2021'
import torch
import torch.nn as nn
from torch.nn import functional as F
from ute.utils.arg_pars import opt
from ute.utils.logging_setup import logger
class Fixed_Width_RNN(nn.Module):
def __init__(self, embedding, input_size, hidden_size, width=15):
super(Fixed_Width_RNN, self).__init__()
self._embedding = embedding
self._input_size = input_size
self._hidden_size
self.i2h = nn.Linear(input_size, hidden_size)
self.h2h = nn.Linear(hidden_size, hidden_size)
self.cls = nn.Linear(hidden_size, 2)
self._init_weights()
def forward(self, x):
output = F.relu(self.i2h(x))
output = self.out_fc(output)
output = nn.functional.log_softmax(output, dim=1)
return output
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def create_model(K):
torch.manual_seed(opt.seed)
model = CLS(K).to(opt.device)
loss = nn.NLLLoss()
# loss = nn.MSELoss().cuda()
optimizer = torch.optim.Adam(model.parameters(),
lr=opt.lr * 0.1,
weight_decay=opt.weight_decay)
logger.debug(str(model))
logger.debug(str(loss))
logger.debug(str(optimizer))
return model, loss, optimizer
| 27.224138 | 75 | 0.631412 | 217 | 1,579 | 4.37788 | 0.43318 | 0.063158 | 0.044211 | 0.04 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015345 | 0.257125 | 1,579 | 57 | 76 | 27.701754 | 0.794544 | 0.085497 | 0 | 0 | 0 | 0 | 0.01324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.131579 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89f3cebe6712e73f788766b163b60831e545280b | 364 | py | Python | spec/construct/test_debug_switch_user.py | DarkShadow44/kaitai_struct_tests | 4bb13cef82965cca66dda2eb2b77cd64e9f70a12 | [
"MIT"
] | 11 | 2018-04-01T03:58:15.000Z | 2021-08-14T09:04:55.000Z | spec/construct/test_debug_switch_user.py | DarkShadow44/kaitai_struct_tests | 4bb13cef82965cca66dda2eb2b77cd64e9f70a12 | [
"MIT"
] | 73 | 2016-07-20T10:27:15.000Z | 2020-12-17T18:56:46.000Z | spec/construct/test_debug_switch_user.py | DarkShadow44/kaitai_struct_tests | 4bb13cef82965cca66dda2eb2b77cd64e9f70a12 | [
"MIT"
] | 37 | 2016-08-15T08:25:56.000Z | 2021-08-28T14:48:46.000Z | # runs in debug mode, so the _read() needs to be called manually
import unittest
from debug_switch_user import _schema
class TestDebugSwitchUser(unittest.TestCase):
def test_debug_switch_user(self):
r = _schema.parse_file('src/nav_parent_switch.bin')
r._read()
self.assertEqual(r.code, 1)
self.assertEqual(r.data.val, -190)
| 26 | 64 | 0.711538 | 52 | 364 | 4.75 | 0.711538 | 0.089069 | 0.121457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013652 | 0.195055 | 364 | 13 | 65 | 28 | 0.829352 | 0.17033 | 0 | 0 | 0 | 0 | 0.083333 | 0.083333 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89f6226b3375969de1abcdae2bec38f52419b391 | 4,438 | py | Python | train_vae_on_convs.py | tencia/video_predict | fbd84769d595b6518d8174024dd2d862cd48518f | [
"MIT"
] | 84 | 2016-01-08T23:35:22.000Z | 2021-06-01T06:52:26.000Z | train_vae_on_convs.py | tencia/video_predict | fbd84769d595b6518d8174024dd2d862cd48518f | [
"MIT"
] | 2 | 2016-05-26T10:32:22.000Z | 2018-03-30T11:51:18.000Z | train_vae_on_convs.py | tencia/video_predict | fbd84769d595b6518d8174024dd2d862cd48518f | [
"MIT"
] | 34 | 2016-03-31T21:13:33.000Z | 2021-12-11T19:49:38.000Z | import numpy as np
import sys
import os
import theano
import theano.tensor as T
import lasagne as nn
import utils as u
import models as m
import config as c
#
# trains a vae using convolutional features (and a separately trained network
# for reversing the convolutional feature extraction.
def main(data_file = '', num_epochs=10, batch_size = 128, L=2, z_dim=256,
n_hid=1500, binary='false', img_size = 64, init_from = '', save_to='params',
split_layer='conv7', pxsh = 0.5, specstr = c.pf_cae_specstr,
cae_weights=c.pf_cae_params, deconv_weights = c.pf_deconv_params):
binary = binary.lower() == 'true'
# pre-trained function for extracting convolutional features from images
cae = m.build_cae(input_var=None, specstr=specstr, shape=(img_size,img_size))
laydict = dict((l.name, l) for l in nn.layers.get_all_layers(cae))
convshape = nn.layers.get_output_shape(laydict[split_layer])
convs_from_img, _ = m.encoder_decoder(cae_weights, specstr=specstr, layersplit=split_layer,
shape=(img_size, img_size))
# pre-trained function for returning to images from convolutional features
img_from_convs = m.deconvoluter(deconv_weights, specstr=specstr, shape=convshape)
# Create VAE model
print("Building model and compiling functions...")
print("L = {}, z_dim = {}, n_hid = {}, binary={}".format(L, z_dim, n_hid, binary))
input_var = T.tensor4('inputs')
c,w,h = convshape[1], convshape[2], convshape[3]
l_tup = l_z_mu, l_z_ls, l_x_mu_list, l_x_ls_list, l_x_list, l_x = \
m.build_vae(input_var, L=L, binary=binary, z_dim=z_dim, n_hid=n_hid,
shape=(w,h), channels=c)
if len(init_from) > 0:
print("loading from {}".format(init_from))
u.load_params(l_x, init_from)
# build loss, updates, training, prediction functions
loss,_ = u.build_vae_loss(input_var, *l_tup, deterministic=False, binary=binary, L=L)
test_loss, test_prediction = u.build_vae_loss(input_var, *l_tup, deterministic=True,
binary=binary, L=L)
lr = theano.shared(nn.utils.floatX(1e-5))
params = nn.layers.get_all_params(l_x, trainable=True)
updates = nn.updates.adam(loss, params, learning_rate=lr)
train_fn = theano.function([input_var], loss, updates=updates)
val_fn = theano.function([input_var], test_loss)
ae_fn = theano.function([input_var], test_prediction)
# run training loop
def data_transform(x, do_center):
floatx_ims = u.raw_to_floatX(x, pixel_shift=pxsh, square=True, center=do_center)
return convs_from_img(floatx_ims)
print("training for {} epochs".format(num_epochs))
data = u.DataH5PyStreamer(data_file, batch_size=batch_size)
hist = u.train_with_hdf5(data, num_epochs=num_epochs, train_fn=train_fn, test_fn=val_fn,
tr_transform=lambda x: data_transform(x[0], do_center=False),
te_transform=lambda x: data_transform(x[0], do_center=True))
# generate examples, save training history
te_stream = data.streamer(shuffled=True)
imb, = next(te_stream.get_epoch_iterator())
orig_feats = data_transform(imb, do_center=True)
reconstructed_feats = ae_fn(orig_feats).reshape(orig_feats.shape)
orig_feats_deconv = img_from_convs(orig_feats)
reconstructed_feats_deconv = img_from_convs(reconstructed_feats)
for i in range(reconstructed_feats_deconv.shape[0]):
u.get_image_pair(orig_feats_deconv, reconstructed_feats_deconv, index=i, shift=pxsh)\
.save('output_{}.jpg'.format(i))
hist = np.asarray(hist)
np.savetxt('vae_convs_train_hist.csv', np.asarray(hist), delimiter=',', fmt='%.5f')
u.save_params(l_x, os.path.join(save_to, 'vae_convs_{}.npz'.format(hist[-1,-1])))
if __name__ == '__main__':
# make all arguments of main(...) command line arguments (with type inferred from
# the default value) - this doesn't work on bools so those are strings when
# passed into main.
import argparse, inspect
parser = argparse.ArgumentParser(description='Command line options')
ma = inspect.getargspec(main)
for arg_name,arg_type in zip(ma.args[-len(ma.defaults):],[type(de) for de in ma.defaults]):
parser.add_argument('--{}'.format(arg_name), type=arg_type, dest=arg_name)
args = parser.parse_args(sys.argv[1:])
main(**{k:v for (k,v) in vars(args).items() if v is not None})
| 48.23913 | 95 | 0.698288 | 676 | 4,438 | 4.331361 | 0.338757 | 0.021858 | 0.01127 | 0.008197 | 0.118852 | 0.081967 | 0.052596 | 0.052596 | 0.052596 | 0 | 0 | 0.009348 | 0.180487 | 4,438 | 91 | 96 | 48.769231 | 0.795711 | 0.128662 | 0 | 0 | 0 | 0 | 0.060976 | 0.006227 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.149254 | 0 | 0.19403 | 0.059701 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89fb095e61e1c1df946fce48cf100b184e5599dc | 424 | py | Python | tests/test_hand.py | kuwv/spades | 9c36eff2c8fe2e4acc69a317d0c58bb8e1c2373f | [
"Apache-2.0"
] | null | null | null | tests/test_hand.py | kuwv/spades | 9c36eff2c8fe2e4acc69a317d0c58bb8e1c2373f | [
"Apache-2.0"
] | null | null | null | tests/test_hand.py | kuwv/spades | 9c36eff2c8fe2e4acc69a317d0c58bb8e1c2373f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# type: ignore
'''Provide player hand tests.'''
from spades import db
from spades.game.models.card import Card
from spades.game.models.player import Hand
def test_hand(app) -> None:
'''Test hand.'''
hand = Hand()
hand.add_card(Card('A', 'S'))
db.session.add(hand)
db.session.commit()
card = hand.pull_card('A', 'S')
assert card.rank == 'A'
assert card.suit == 'S'
| 21.2 | 42 | 0.620283 | 63 | 424 | 4.126984 | 0.460317 | 0.115385 | 0.107692 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00295 | 0.200472 | 424 | 19 | 43 | 22.315789 | 0.764012 | 0.17217 | 0 | 0 | 0 | 0 | 0.017699 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89fbeff5e102c8ed93a9dcb609eeb07e8969b34f | 1,191 | py | Python | Setup.py | jmoore001/JM_Custom_Maya_Tools | a042a497f22fe21b9c70ea56581682731100e259 | [
"MIT"
] | 1 | 2021-10-03T18:03:36.000Z | 2021-10-03T18:03:36.000Z | Setup.py | jmoore001/JM_Custom_Maya_Tools | a042a497f22fe21b9c70ea56581682731100e259 | [
"MIT"
] | null | null | null | Setup.py | jmoore001/JM_Custom_Maya_Tools | a042a497f22fe21b9c70ea56581682731100e259 | [
"MIT"
] | null | null | null | import maya.cmds as cmds
import os
import sys
user = os.environ.get('USER')
path = 'C:/Users/' + user + '/Documents/maya/JM_Custom_Maya_Tools/Scripts'
if path not in sys.path:
sys.path.append(path)
import InitilizeTools
import JMCustomMarkingMenu
version = cmds.about(version = True)
destWindows = 'C:/Users/' + user + '/Documents/maya/' + version + '/scripts/userSetup.mel'
srcWindows = path + '/userSetup.mel'
cmds.sysFile( srcWindows, copy=destWindows )
customToolsDirect = 'C:/Users/{}/Documents/maya/JM_Custom_Maya_Tools'.format(user)
scriptsFolder = customToolsDirect + '/Scripts'
iconsFolder = customToolsDirect + '/Icons'
shelfLevel = mel.eval("$tmpVar=$gShelfTopLevel")
icon = iconsFolder + '/CustomToolsIcon.png'
command = "import os\nimport sys\nuser = os.environ.get('USER')\npath = 'C:/Users/' + user + '/Documents/maya/JM_Custom_Maya_Tools/Scripts'\nif path not in sys.path:\n sys.path.append(path)\nimport InitilizeTools\nInitilizeTools.CustomToolsJM()"
shelf = cmds.tabLayout(shelfLevel, query=1, ca=1, selectTab = True)
cmds.shelfButton(p = shelf, image1 = icon, command = command)
JMCustomMarkingMenu.JMCustomToolsMarkingMenu()
InitilizeTools.CustomToolsJM() | 44.111111 | 248 | 0.753988 | 147 | 1,191 | 6.047619 | 0.421769 | 0.026997 | 0.033746 | 0.064117 | 0.20135 | 0.139483 | 0.105737 | 0.105737 | 0.105737 | 0.105737 | 0 | 0.002817 | 0.105793 | 1,191 | 27 | 249 | 44.111111 | 0.831925 | 0 | 0 | 0 | 0 | 0.043478 | 0.384228 | 0.24245 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.26087 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89fcae0b5887d34e67c9e20f4c13b08e578b22b7 | 10,072 | py | Python | pints/_mcmc/_differential_evolution.py | lisaplag/pints | 3de6617e57ba5b395edaca48961bfc5a4b7209b3 | [
"RSA-MD"
] | null | null | null | pints/_mcmc/_differential_evolution.py | lisaplag/pints | 3de6617e57ba5b395edaca48961bfc5a4b7209b3 | [
"RSA-MD"
] | null | null | null | pints/_mcmc/_differential_evolution.py | lisaplag/pints | 3de6617e57ba5b395edaca48961bfc5a4b7209b3 | [
"RSA-MD"
] | null | null | null | #
# Differential evolution MCMC
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pints
import numpy as np
import logging
class DifferentialEvolutionMCMC(pints.MultiChainMCMC):
r"""
Uses differential evolution MCMC as described in [1]_ to perform posterior
sampling from the posterior.
In each step of the algorithm ``n`` chains are evolved using the evolution
equation::
x_proposed = x[i,r] + gamma * (X[i,r1] - x[i,r2]) + epsilon
where ``r1`` and ``r2`` are random chain indices chosen (without
replacement) from the ``n`` available chains, which must not equal ``i`` or
each other, where ``i`` indicates the current time step, and
``epsilon ~ N(0,b)`` where ``d`` is the dimensionality of the parameter
vector.
If ``x_proposed / x[i,r] > u ~ U(0,1)``, then
``x[i+1,r] = x_proposed``; otherwise, ``x[i+1,r] = x[i]``.
Extends :class:`MultiChainMCMC`.
.. note::
This sampler requires a number of chains :math:`n \ge 3`, and
recommends :math:`n \ge 1.5 d`.
References
----------
.. [1] "A Markov Chain Monte Carlo version of the genetic algorithm
Differential Evolution: easy Bayesian computing for real parameter
spaces". Cajo J. F. Ter Braak (2006) Statistical Computing
https://doi.org/10.1007/s11222-006-8769-1
"""
def __init__(self, chains, x0, sigma0=None):
super(DifferentialEvolutionMCMC, self).__init__(chains, x0, sigma0)
# Need at least 3 chains
if self._chains < 3:
raise ValueError('Need at least 3 chains.')
# Warn user against using too few chains
if self._chains < 1.5 * self._n_parameters:
log = logging.getLogger(__name__)
log.warning('This method should be run with n_chains >= ' +
'1.5 * n_parameters')
# Set initial state
self._running = False
# Current points and proposed points
self._current = None
self._current_log_pdfs = None
self._proposed = None
#
# Default settings
#
# Gamma
self._gamma = 2.38 / np.sqrt(2 * self._n_parameters)
# Gamma switch to 1 every (below) steps to help find
# modes
self._gamma_switch_rate = 10
# Error scale width
self._b = 0.001
# Mean used for scaling error process
self._mu = np.mean(self._x0, axis=0)
# Gaussian error vs uniform
self._gaussian_error = True
# Relative scaling
self._relative_scaling = True
def ask(self):
""" See :meth:`pints.MultiChainMCMC.ask()`. """
# Initialise on first call
if not self._running:
self._initialise()
# Propose new points
if self._proposed is None:
# set gamma to 1
if self._iter_count % self._gamma_switch_rate == 0:
self._gamma = 1
self._iter_count += 1
self._proposed = np.zeros(self._current.shape)
for j in range(self._chains):
if self._gaussian_error:
error = np.random.normal(0, self._b_star, self._mu.shape)
else:
error = np.random.uniform(-self._b_star, self._b_star,
self._mu.shape)
r1, r2 = r_draw(j, self._chains)
self._proposed[j] = (
self._current[j]
+ self._gamma * (self._current[r1] - self._current[r2])
+ error
)
# reset gamma
self._gamma = 2.38 / np.sqrt(2 * self._n_parameters)
# Set as read only
self._proposed.setflags(write=False)
# Return proposed points
return self._proposed
def current_log_pdfs(self):
""" See :meth:`MultiChainMCMC.current_log_pdfs()`. """
return self._current_log_pdfs
def set_gaussian_error(self, gaussian_error):
"""
If ``True`` sets the error process to be a gaussian error,
``N(0, b*)``; if ``False``, it uses a uniform error ``U(-b*, b*)``;
where ``b* = b`` if absolute scaling used and ``b* = mu * b`` if
relative scaling is used instead.
"""
gaussian_error = bool(gaussian_error)
self._gaussian_error = gaussian_error
def _initialise(self):
"""
Initialises the routine before the first iteration.
"""
if self._running:
raise RuntimeError('Already initialised.')
# Propose x0 as first points
self._current = None
self._current_log_pdfs = None
self._proposed = self._x0
# Set mu
# TODO: Should this be a user setting?
self._mu = np.mean(self._x0, axis=0)
# Use relative or absolute scaling of error process
if self._relative_scaling:
self._b_star = np.abs(self._mu * self._b)
else:
self._b_star = np.repeat(self._b, self._n_parameters)
# Gamma set to 1 counter
self._iter_count = 0
# Update sampler state
self._running = True
def set_gamma_switch_rate(self, gamma_switch_rate):
"""
Sets the number of steps between iterations where gamma is set to 1
(then reset immediately afterwards)
"""
if gamma_switch_rate < 1:
raise ValueError('The interval number of steps between ' +
' gamma=1 iterations must equal or exceed 1.')
if not isinstance(gamma_switch_rate, int):
raise ValueError('The interval number of steps between ' +
' gamma=1 iterations must be an integer.')
self._gamma_switch_rate = gamma_switch_rate
def set_relative_scaling(self, relative_scaling):
"""
Sets whether to use an error process whose standard deviation scales
relatively (``scale = self._mu * self_b``) or absolutely
(``scale = self._b`` in all dimensions).
"""
relative_scaling = bool(relative_scaling)
self._relative_scaling = relative_scaling
if self._relative_scaling:
self._b_star = self._mu * self._b
else:
self._b_star = np.repeat(self._b, self._n_parameters)
def name(self):
""" See :meth:`pints.MCMCSampler.name()`. """
return 'Differential Evolution MCMC'
def tell(self, proposed_log_pdfs):
""" See :meth:`pints.MultiChainMCMC.tell()`. """
# Check if we had a proposal
if self._proposed is None:
raise RuntimeError('Tell called before proposal was set.')
# Ensure proposed_log_pdfs are numpy array
proposed_log_pdfs = np.array(proposed_log_pdfs)
# First points?
if self._current is None:
if not np.all(np.isfinite(proposed_log_pdfs)):
raise ValueError(
'Initial points for MCMC must have finite logpdf.')
# Accept
self._current = self._proposed
self._current_log_pdfs = proposed_log_pdfs
self._current_log_pdfs.setflags(write=False)
# Clear proposal
self._proposed = None
# Return first samples for chains
return self._current
# Perform iteration
next = np.array(self._current, copy=True)
next_log_pdfs = np.array(self._current_log_pdfs, copy=True)
# Sample uniform numbers
u = np.log(np.random.uniform(size=self._chains))
# Get chains to be updated
i = u < (proposed_log_pdfs - self._current_log_pdfs)
# Update
next[i] = self._proposed[i]
next_log_pdfs[i] = proposed_log_pdfs[i]
self._current = next
self._current_log_pdfs = next_log_pdfs
self._current_log_pdfs.setflags(write=False)
# Clear proposal
self._proposed = None
# Return samples to add to chains
self._current.setflags(write=False)
return self._current
def set_scale_coefficient(self, b):
"""
Sets the scale coefficient ``b`` of the error process used in updating
the position of each chain.
"""
b = float(b)
if b < 0:
raise ValueError('Scale coefficient must be non-negative.')
self._b = b
def set_gamma(self, gamma):
"""
Sets the coefficient ``gamma`` used in updating the position of each
chain.
"""
gamma = float(gamma)
if gamma < 0:
raise ValueError('Gamma must be non-negative.')
self._gamma = gamma
def n_hyper_parameters(self):
""" See :meth:`TunableMethod.n_hyper_parameters()`. """
return 5
def set_hyper_parameters(self, x):
"""
The hyper-parameter vector is ``[gamma, gaussian_scale_coefficient,
gamma_switch_rate, gaussian_error, relative_scaling]``.
See :meth:`TunableMethod.set_hyper_parameters()`.
"""
self.set_gamma(x[0])
self.set_scale_coefficient(x[1])
try:
int_x2 = int(x[2])
except (ValueError, TypeError):
raise ValueError('The interval number of steps between ' +
'gamma=1 iterations must be convertable ' +
'to an integer.')
self.set_gamma_switch_rate(int_x2)
self.set_gaussian_error(x[3])
self.set_relative_scaling(x[4])
def r_draw(i, num_chains):
# TODO: Needs a docstring!
r1, r2 = np.random.choice(num_chains, 2, replace=False)
while(r1 == i or r2 == i or r1 == r2):
r1, r2 = np.random.choice(num_chains, 2, replace=False)
return r1, r2
| 33.461794 | 79 | 0.590052 | 1,254 | 10,072 | 4.539075 | 0.238437 | 0.042516 | 0.027056 | 0.028461 | 0.228918 | 0.173753 | 0.166725 | 0.148981 | 0.12825 | 0.12825 | 0 | 0.015933 | 0.314535 | 10,072 | 300 | 80 | 33.573333 | 0.808517 | 0.32357 | 0 | 0.211679 | 0 | 0 | 0.082382 | 0 | 0 | 0 | 0 | 0.006667 | 0 | 1 | 0.10219 | false | 0 | 0.036496 | 0 | 0.19708 | 0.007299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89ffd0729487f13d0cbc63a922ded256cede7242 | 23,621 | py | Python | kms-samples-python/asymmetric.py | QPC-WORLDWIDE67c7x/aliyunu | 4ebf00e1b196ecca94c5dcef6da4f32af9ac9578 | [
"Apache-2.0"
] | 17 | 2020-01-15T08:46:07.000Z | 2022-02-24T15:53:48.000Z | kms-samples-python/asymmetric.py | QPC-WORLDWIDE67c7x/aliyunu | 4ebf00e1b196ecca94c5dcef6da4f32af9ac9578 | [
"Apache-2.0"
] | 3 | 2020-10-13T20:38:58.000Z | 2021-04-30T20:25:43.000Z | kms-samples-python/asymmetric.py | QPC-WORLDWIDE67c7x/aliyunu | 4ebf00e1b196ecca94c5dcef6da4f32af9ac9578 | [
"Apache-2.0"
] | 5 | 2020-11-24T06:55:57.000Z | 2022-02-21T01:15:50.000Z | import argparse
import base64
import hashlib
import json
import ecdsa
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import pss
from Crypto.Signature import pkcs1_15
from ecdsa.util import sigdecode_der
from aliyunsdkcore.client import AcsClient
from aliyunsdkkms.request.v20160120 import ListKeysRequest
from aliyunsdkkms.request.v20160120 import DescribeKeyRequest
from aliyunsdkkms.request.v20160120 import CreateKeyRequest
from aliyunsdkkms.request.v20160120 import ListKeyVersionsRequest
from aliyunsdkkms.request.v20160120 import DescribeKeyVersionRequest
from aliyunsdkkms.request.v20160120 import CreateKeyVersionRequest
from aliyunsdkkms.request.v20160120 import GetPublicKeyRequest
from aliyunsdkkms.request.v20160120 import AsymmetricEncryptRequest
from aliyunsdkkms.request.v20160120 import AsymmetricDecryptRequest
from aliyunsdkkms.request.v20160120 import AsymmetricSignRequest
from aliyunsdkkms.request.v20160120 import AsymmetricVerifyRequest
class KeyMetadata(object):
"""密钥信息"""
def __init__(self, value):
self.creation_date = "" if "CreationDate" not in value else value.get("CreationDate")
self.description = "" if "Description" not in value else value.get("Description")
self.key_id = "" if "KeyId" not in value else value.get("KeyId")
self.key_state = "" if "KeyState" not in value else value.get("KeyState")
self.key_usage = "" if "KeyUsage" not in value else value.get("KeyUsage")
self.key_spec = "" if "KeySpec" not in value else value.get("KeySpec")
self.primary_key_version = "" if "PrimaryKeyVersion" not in value else value.get("PrimaryKeyVersion")
self.delete_date = "" if "DeleteDate" not in value else value.get("DeleteDate")
self.creator = "" if "Creator" not in value else value.get("Creator")
self.arn = "" if "Arn" not in value else value.get("Arn")
self.origin = "" if "Origin" not in value else value.get("Origin")
self.material_expire_time = "" if "MaterialExpireTime" not in value else value.get("MaterialExpireTime")
self.protection_level = "" if "ProtectionLevel" not in value else value.get("ProtectionLevel")
self.last_rotation_date = "" if "LastRotationDate" not in value else value.get("LastRotationDate")
self.automatic_rotation = "" if "AutomaticRotation" not in value else value.get("AutomaticRotation")
def get_creation_date(self):
return self.creation_date
def set_creation_date(self, create_date):
self.creation_date = create_date
def get_description(self):
return self.description
def set_description(self, description):
self.description = description
def get_key_id(self):
return self.key_id
def set_key_id(self, key_id):
self.key_id = key_id
def get_key_state(self):
return self.key_state
def set_key_state(self, key_state):
self.key_state = key_state
def get_key_usage(self):
return self.key_usage
def set_key_usage(self, key_usage):
self.key_usage = key_usage
def get_key_spec(self):
return self.key_spec
def set_key_spec(self, key_spec):
self.key_spec = key_spec
def get_primary_key_version(self):
return self.primary_key_version
def set_primary_key_version(self, primary_key_version):
self.primary_key_version = primary_key_version
def get_delete_date(self):
return self.delete_date
def set_delete_date(self, delete_date):
self.delete_date = delete_date
def get_creator(self):
return self.creator
def set_creator(self, creator):
self.creator = creator
def get_arn(self):
return self.arn
def set_arn(self, arn):
self.arn = arn
def get_origin(self):
return self.origin
def set_origin(self, origin):
self.origin = origin
def get_material_expire_time(self):
return self.material_expire_time
def set_material_expire_time(self, material_expire_time):
self.material_expire_time = material_expire_time
def get_protection_level(self):
return self.protection_level
def set_protection_level(self, protection_level):
self.protection_level = protection_level
def get_last_rotation_date(self):
return self.last_rotation_date
def set_last_rotation_date(self, last_rotation_date):
self.last_rotation_date = last_rotation_date
def get_automatic_rotation(self):
return self.automatic_rotation
def set_automatic_rotation(self, automatic_rotation):
self.automatic_rotation = automatic_rotation
class ListKeysResponse(object):
"""查询密钥返回值"""
def __init__(self, value):
self.page_number = 0
self.total_count = 0
self.key_ids = []
self.request_id = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if ("Keys" in response) and ("Key" in response["Keys"]):
for key in response["Keys"]["Key"]:
if "KeyId" in key:
self.key_ids.append(key.get("KeyId"))
if "PageNumber" in response:
self.page_number = response["PageNumber"]
if "TotalCount" in response:
self.total_count = response["TotalCount"]
if "RequestId" in response:
self.request_id = response["RequestId"]
def get_key_ids(self):
return self.key_ids[:]
def get_page_number(self):
return self.page_number
def get_total_count(self):
return self.total_count
def get_request_id(self):
return self.request_id
class DescribeKeyResponse(object):
"""获取指定密钥相关信息返回值"""
def __init__(self, value):
self.key_metadata = None
self.request_id = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if "KeyMetadata" in response:
self.key_metadata = KeyMetadata(response["KeyMetadata"])
if "RequestId" in response:
self.request_id = response["RequestId"]
def get_key_metadata(self):
return self.key_metadata
def get_request_id(self):
return self.request_id
class CreateKeyResponse(object):
"""创建密钥返回值"""
def __init__(self, value):
self.key_metadata = None
self.key_id = ""
self.request_id = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if "KeyMetadata" in response:
self.key_metadata = KeyMetadata(response["KeyMetadata"])
if "RequestId" in response:
self.request_id = response["RequestId"]
def get_key_metadata(self):
return self.key_metadata
def get_key_id(self):
if self.key_metadata is not None:
self.key_id = self.key_metadata.get_key_id()
return self.key_id
def get_request_id(self):
return self.request_id
class ListKeyVersionsResponse(object):
"""查询密钥版本返回值"""
def __init__(self, value):
self.key_version_ids = []
self.page_number = 0
self.total_count = 0
self.request_id = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if ("KeyVersions" in response) and ("KeyVersion" in response["KeyVersions"]):
for key_version in response["KeyVersions"]["KeyVersion"]:
if "KeyVersionId" in key_version:
self.key_version_ids.append(key_version.get("KeyVersionId"))
if "TotalCount" in response:
self.page_number = response["TotalCount"]
if "PageNumber" in response:
self.total_count = response["PageNumber"]
if "RequestId" in response:
self.request_id = response["RequestId"]
def get_key_version_ids(self):
return self.key_version_ids[:]
def get_page_number(self):
return self.page_number
def get_total_count(self):
return self.total_count
def get_request_id(self):
return self.request_id
class DescribeKeyVersionResponse(object):
"""获取密钥版本信息返回值"""
def __init__(self, value):
self.request_id = ""
self.key_id = ""
self.key_version_id = ""
self.creation_date = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if "KeyVersion" in response:
if "KeyVersionId" in response["KeyVersion"]:
self.key_version_id = response["KeyVersion"]["KeyVersionId"]
if "KeyId" in response["KeyVersion"]:
self.key_id = response["KeyVersion"]["KeyId"]
if "CreationDate" in response["KeyVersion"]:
self.creation_date = response["KeyVersion"]["CreationDate"]
if "RequestId" in response:
self.request_id = response["RequestId"]
def get_key_id(self):
return self.key_id
def get_key_version_id(self):
return self.key_version_id
def get_creation_date(self):
return self.creation_date
def get_request_id(self):
return self.request_id
class CreateKeyVersionResponse(object):
"""创建密钥版本返回值"""
def __init__(self, value):
self.request_id = ""
self.key_id = ""
self.key_version_id = ""
self.creation_date = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if "RequestId" in response:
self.request_id = response["RequestId"]
if "KeyVersion" in response:
if "KeyVersionId" in response["KeyVersion"]:
self.key_version_id = response["KeyVersion"]["KeyVersionId"]
if "KeyId" in response["KeyVersion"]:
self.key_id = response["KeyVersion"]["KeyId"]
if "CreationDate" in response["KeyVersion"]:
self.creation_date = response["KeyVersion"]["CreationDate"]
def get_request_id(self):
return self.request_id
def get_key_id(self):
return self.key_id
def get_key_version_id(self):
return self.key_version_id
def get_creation_date(self):
return self.creation_date
class GetPublicKeyResponse(object):
"""获取公钥信息返回值"""
def __init__(self, value):
self.request_id = ""
self.public_key = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if "RequestId" in response:
self.request_id = response["RequestId"]
if "PublicKey" in response:
self.public_key = response["PublicKey"]
def get_request_id(self):
return self.request_id
def get_public_key(self):
return self.public_key
class AsymmetricEncryptResponse(object):
"""非对称密钥加密返回值"""
def __init__(self, value):
self.request_id = ""
self.key_id = ""
self.key_version_id = ""
self.cipher_text_blob = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if "RequestId" in response:
self.request_id = response["RequestId"]
if "KeyId" in response:
self.key_id = response["KeyId"]
if "KeyVersionId" in response:
self.key_version_id = response["KeyVersionId"]
if "CiphertextBlob" in response:
self.cipher_text_blob = response["CiphertextBlob"]
def get_request_id(self):
return self.request_id
def get_key_id(self):
return self.key_id
def get_key_version_id(self):
return self.key_version_id
def get_cipher_text_blob(self):
return self.cipher_text_blob
class AsymmetricDecryptResponse(object):
"""非对称密钥解密返回值"""
def __init__(self, value):
self.request_id = ""
self.key_id = ""
self.key_version_id = ""
self.plain_text = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if "RequestId" in response:
self.request_id = response["RequestId"]
if "KeyId" in response:
self.key_id = response["KeyId"]
if "KeyVersionId" in response:
self.key_version_id = response["KeyVersionId"]
if "Plaintext" in response:
self.plain_text = response["Plaintext"]
def get_request_id(self):
return self.request_id
def get_key_id(self):
return self.key_id
def get_key_version_id(self):
return self.key_version_id
def get_plain_text(self):
return self.plain_text
class AsymmetricSignResponse(object):
"""非对称密钥签名返回值"""
def __init__(self, value):
self.request_id = ""
self.key_id = ""
self.key_version_id = ""
self.value = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if "RequestId" in response:
self.request_id = response["RequestId"]
if "KeyId" in response:
self.key_id = response["KeyId"]
if "KeyVersionId" in response:
self.key_version_id = response["KeyVersionId"]
if "Value" in response:
self.value = response["Value"]
def get_request_id(self):
return self.request_id
def get_key_id(self):
return self.key_id
def get_key_version_id(self):
return self.key_version_id
def get_value(self):
return self.value
class AsymmetricVerifyResponse(object):
"""非对称密钥验签返回值"""
def __init__(self, value):
self.request_id = ""
self.key_id = ""
self.key_version_id = ""
self.value = ""
self.parse(value)
def parse(self, value):
response = json.loads(value)
if "RequestId" in response:
self.request_id = response["RequestId"]
if "KeyId" in response:
self.key_id = response["KeyId"]
if "KeyVersionId" in response:
self.key_version_id = response["KeyVersionId"]
if "Value" in response:
self.value = response["Value"]
def get_request_id(self):
return self.request_id
def get_key_id(self):
return self.key_id
def get_key_version_id(self):
return self.key_version_id
def get_value(self):
return self.value
def list_keys(acs_client):
key_ids = []
page_number = "1"
page_size = "10"
while True:
request = ListKeysRequest.ListKeysRequest()
request.set_accept_format('JSON')
request.set_PageNumber(page_number)
request.set_PageSize(page_size)
response = ListKeysResponse(acs_client.do_action_with_exception(request))
key_ids[len(key_ids):len(key_ids)] = response.get_key_ids()
if response.get_page_number() * 10 >= response.get_total_count():
break
page_number = str(response.get_page_number() + 1)
return key_ids
def describe_key(acs_client, key_id):
request = DescribeKeyRequest.DescribeKeyRequest()
request.set_accept_format('JSON')
request.set_KeyId(key_id)
return DescribeKeyResponse(acs_client.do_action_with_exception(request))
def create_key(acs_client, key_spec, key_usage):
request = CreateKeyRequest.CreateKeyRequest()
request.set_accept_format('JSON')
request.set_KeyUsage(key_usage)
request.set_KeySpec(key_spec)
response = CreateKeyResponse(acs_client.do_action_with_exception(request))
return response.get_key_id()
def list_key_versions(acs_client, key_id):
key_version_ids = []
page_number = "1"
page_size = "10"
while True:
request = ListKeyVersionsRequest.ListKeyVersionsRequest()
request.set_accept_format('JSON')
request.set_KeyId(key_id)
request.set_PageNumber(page_number)
request.set_PageSize(page_size)
response = ListKeyVersionsResponse(acs_client.do_action_with_exception(request))
key_version_ids[len(key_version_ids):] = response.get_key_version_ids()
if response.get_page_number() * 10 >= response.get_total_count():
break
page_number = str(response.get_page_number() + 1)
return key_version_ids
def describe_key_version(acs_client, key_id, key_version_id):
request = DescribeKeyVersionRequest.DescribeKeyVersionRequest()
request.set_accept_format('JSON')
request.set_KeyId(key_id)
request.set_KeyVersionId(key_version_id)
return DescribeKeyVersionResponse(acs_client.do_action_with_exception(request))
def create_key_version(acs_client, key_id):
request = CreateKeyVersionRequest.CreateKeyVersionRequest()
request.set_accept_format('JSON')
request.set_KeyId(key_id)
return CreateKeyVersionResponse(acs_client.do_action_with_exception(request))
def get_public_key(acs_client, key_id, key_version_id):
request = GetPublicKeyRequest.GetPublicKeyRequest()
request.set_accept_format('JSON')
request.set_KeyId(key_id)
request.set_KeyVersionId(key_version_id)
response = GetPublicKeyResponse(acs_client.do_action_with_exception(request))
return response.get_public_key()
def asymmetric_encrypt(acs_client, key_id, key_version_id, message, algorithm):
request = AsymmetricEncryptRequest.AsymmetricEncryptRequest()
request.set_accept_format('JSON')
# message要进行base64编码
plain_text = base64.b64encode(message.encode('utf-8'))
request.set_KeyId(key_id)
request.set_KeyVersionId(key_version_id)
request.set_Plaintext(plain_text)
request.set_Algorithm(algorithm)
response = AsymmetricEncryptResponse(acs_client.do_action_with_exception(request))
# 密文要进行base64解码
return base64.b64decode(response.get_cipher_text_blob())
def asymmetric_decrypt(acs_client, key_id, key_version_id, cipher_blob, algorithm):
request = AsymmetricDecryptRequest.AsymmetricDecryptRequest()
request.set_accept_format('JSON')
# cipher_blob要进行base64编码
cipher_text = base64.b64encode(cipher_blob)
request.set_KeyId(key_id)
request.set_KeyVersionId(key_version_id)
request.set_CiphertextBlob(cipher_text)
request.set_Algorithm(algorithm)
response = AsymmetricDecryptResponse(acs_client.do_action_with_exception(request))
# 明文要进行base64解码
return base64.b64decode(response.get_plain_text())
def asymmetric_sign(acs_client, key_id, key_version_id, message, algorithm):
request = AsymmetricSignRequest.AsymmetricSignRequest()
request.set_accept_format('JSON')
# 计算消息摘要(SHA-256)
h = SHA256.new()
h.update(message.encode('utf-8'))
digest = base64.b64encode(h.digest())
request.set_KeyId(key_id)
request.set_KeyVersionId(key_version_id)
request.set_Digest(digest)
request.set_Algorithm(algorithm)
response = AsymmetricSignResponse(acs_client.do_action_with_exception(request))
# 签名要进行base64解码
return base64.b64decode(response.get_value())
def asymmetric_verify(acs_client, key_id, key_version_id, message, signature, algorithm):
request = AsymmetricVerifyRequest.AsymmetricVerifyRequest()
request.set_accept_format('JSON')
# 计算消息摘要(SHA-256)
h = SHA256.new()
h.update(message.encode('utf-8'))
digest = base64.b64encode(h.digest())
value = base64.b64encode(signature)
request.set_KeyId(key_id)
request.set_KeyVersionId(key_version_id)
request.set_Digest(digest)
request.set_Value(value)
request.set_Algorithm(algorithm)
response = AsymmetricVerifyResponse(acs_client.do_action_with_exception(request))
return response.get_value()
def rsa_encrypt(acs_client, key_id, key_version_id, message, algorithm):
pub_key_pem = get_public_key(acs_client, key_id, key_version_id)
rsa_pub = RSA.importKey(pub_key_pem)
if algorithm == 'RSAES_OAEP_SHA_1':
cipher = PKCS1_OAEP.new(rsa_pub)
return cipher.encrypt(message.encode('utf-8'))
elif algorithm == 'RSAES_OAEP_SHA_256':
cipher = PKCS1_OAEP.new(key=rsa_pub, hashAlgo=SHA256)
return cipher.encrypt(message.encode('utf-8'))
else:
return ''
def rsa_verify(acs_client, key_id, key_version_id, message, signature, algorithm):
pub_key_pem = get_public_key(acs_client, key_id, key_version_id)
rsa_pub = RSA.importKey(pub_key_pem)
if algorithm == 'RSA_PSS_SHA_256':
try:
verifier = pss.new(rsa_pub)
verifier.verify(SHA256.new(message.encode('utf-8')), signature)
except (ValueError, TypeError):
return False
elif algorithm == 'RSA_PKCS1_SHA_256':
try:
verifier = pkcs1_15.new(rsa_pub)
verifier.verify(SHA256.new(message.encode('utf-8')), signature)
except (ValueError, TypeError):
return False
else:
return False
return True
def ecdsa_verify(acs_client, key_id, key_version_id, message, signature):
pub_key_pem = get_public_key(acs_client, key_id, key_version_id)
verifier = ecdsa.VerifyingKey.from_pem(pub_key_pem)
return verifier.verify(signature, message.encode('utf-8'), hashfunc=hashlib.sha256, sigdecode=sigdecode_der)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--ak', help='the access key id')
parser.add_argument('--as', help='the access key secret')
parser.add_argument('--region', default='cn-hangzhou', help='the region id')
args = vars(parser.parse_args())
client = AcsClient(args["ak"], args["as"], args["region"])
key_ids = list_keys(client)
for key_id in key_ids:
res = describe_key(client, key_id)
key_metadata = res.get_key_metadata()
key_id = key_metadata.get_key_id()
key_version_ids = list_key_versions(client, key_id)
if not key_metadata.get_key_spec() == 'Aliyun_AES_256':
public_key = get_public_key(client, key_id, key_version_ids[0])
print(public_key)
key_id = 'a8c6eb76-278c-4f88-801b-8fb56e4c3019'
key_version_id = '6f050e56-9b71-41db-8d48-5275855f1041'
message = '测试消息'
cipher_blob = asymmetric_encrypt(client, key_id, key_version_id, message, 'RSAES_OAEP_SHA_256')
print(cipher_blob)
plain_text = asymmetric_decrypt(client, key_id, key_version_id, cipher_blob, 'RSAES_OAEP_SHA_256')
print(plain_text.decode())
cipher_text = rsa_encrypt(client, key_id, key_version_id, message, 'RSAES_OAEP_SHA_256')
print(cipher_text)
plain_text = asymmetric_decrypt(client, key_id, key_version_id, cipher_blob, 'RSAES_OAEP_SHA_256')
print(plain_text.decode())
key_id = 'bb974925-d7d2-48c3-b896-cb2a3f3f33bd'
key_version_id = 'd4229c1f-17ec-40df-bfe0-51667c6c78b6'
sign = asymmetric_sign(client, key_id, key_version_id, message, 'RSA_PKCS1_SHA_256')
print(sign)
value = asymmetric_verify(client, key_id, key_version_id, message, sign, 'RSA_PKCS1_SHA_256')
if value:
print('verify success.')
else:
print('verify failed.')
value = rsa_verify(client, key_id, key_version_id, message, sign, 'RSA_PKCS1_SHA_256')
if value:
print('verify success.')
else:
print('verify failed.')
key_id = '71032ff8-1803-426f-b5be-c57bdeee1080'
key_version_id = '529eb3e1-6ef5-4a47-bce4-4c86494ebc1c'
sign = asymmetric_sign(client, key_id, key_version_id, message, 'ECDSA_SHA_256')
print(sign)
value = ecdsa_verify(client, key_id, key_version_id, message, sign)
if value:
print('verify success.')
else:
print('verify failed.')
if __name__ == '__main__':
main()
| 32.225102 | 112 | 0.678718 | 2,912 | 23,621 | 5.224245 | 0.085852 | 0.026293 | 0.042595 | 0.025242 | 0.637152 | 0.557287 | 0.513048 | 0.485111 | 0.470256 | 0.455531 | 0 | 0.019396 | 0.22298 | 23,621 | 732 | 113 | 32.269126 | 0.809469 | 0.010033 | 0 | 0.556569 | 0 | 0 | 0.082858 | 0.009264 | 0 | 0 | 0 | 0 | 0 | 1 | 0.195255 | false | 0 | 0.04562 | 0.096715 | 0.395985 | 0.023723 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d6017026544f420a111eccfab9031abf79be64d6 | 2,057 | py | Python | dataset_initial_exploration.py | diarmuidwhelan/python_project | 6964d72ab110a807a59093d4200177e0bf225597 | [
"Apache-2.0"
] | null | null | null | dataset_initial_exploration.py | diarmuidwhelan/python_project | 6964d72ab110a807a59093d4200177e0bf225597 | [
"Apache-2.0"
] | null | null | null | dataset_initial_exploration.py | diarmuidwhelan/python_project | 6964d72ab110a807a59093d4200177e0bf225597 | [
"Apache-2.0"
] | null | null | null | ###################################################################################################################
#GMIT Higher Diploma in Data Analytics
#Scripting and Programming
#Python Project
#Diarmuid Whelan, 2018-04-10
#Iris Dataset initial exploration
#Will provide summary stats and plots for the Iris Dataset
######################################################################################################################
import pandas
import numpy
import matplotlib.pyplot as pl
#Open the IRIS.CSV FILE
iris=numpy.genfromtxt('C:/Users/DELL PC/Documents/GMIT DATA ANALYTICS/Programming and Scripting/iris.csv',delimiter=',')
print(iris)
#Print the shape of the data
print(iris.data.shape)
#Iris data has 150 rows and 5 columns
#loop through each of first four columns using for loop
#Print mean,max,min and median of first 4 columns
for i in range (0,4):
column =iris[:,i]
meancolumn=numpy.mean(column)
print("Mean of column ", i+1," is:",meancolumn)
mediancolumn=numpy.median(column)
print("Median of column ", i+1," is:",mediancolumn)
maxcolumn=numpy.max(column)
print("Max of column ", i+1," is:",maxcolumn)
mincolumn=numpy.min(column)
print("Min of column ", i+1," is:",mincolumn)
#Plot each columns data
pl.hist(column)
pl.show()
#Use pandas to capture the categorical variables in column 5 and solve issues of the NANs
#Iris_data.csv includes column headers
dataset = pandas.read_csv('C:/Users/DELL PC/Documents/GMIT DATA ANALYTICS/Programming and Scripting/iris_data.csv')
print(dataset)
#Dataset formatted neater and includes the categorical variables
#Number of each species in each sample
print(dataset.groupby('species').size())
#Print a statistical summary about the dataset
print(dataset.describe())
#Means are same as calculated above. Describe function also provides percentile and standard deviation information to summarise the distribution of the data
# box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
pl.show()
| 40.333333 | 156 | 0.671852 | 280 | 2,057 | 4.925 | 0.435714 | 0.023205 | 0.026106 | 0.029007 | 0.129079 | 0.094271 | 0.094271 | 0.094271 | 0.094271 | 0.094271 | 0 | 0.012263 | 0.127856 | 2,057 | 50 | 157 | 41.14 | 0.75641 | 0.414195 | 0 | 0.083333 | 0 | 0 | 0.266527 | 0.068206 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d60174b1c29d306cf33605f0ff3e650f9be71eb2 | 784 | py | Python | example_usage/model_wrapping.py | micheltokic/stable_baselines_model_based_rl | 75bac906aeba69072878ceb15d9be459b1f436c3 | [
"Apache-2.0"
] | 1 | 2022-01-08T17:08:13.000Z | 2022-01-08T17:08:13.000Z | example_usage/model_wrapping.py | micheltokic/stable_baselines_model_based_rl | 75bac906aeba69072878ceb15d9be459b1f436c3 | [
"Apache-2.0"
] | 5 | 2021-09-15T18:14:48.000Z | 2021-09-19T16:17:51.000Z | example_usage/model_wrapping.py | micheltokic/stable_baselines_model_based_rl | 75bac906aeba69072878ceb15d9be459b1f436c3 | [
"Apache-2.0"
] | null | null | null | from stable_baselines_model_based_rl.utils.configuration import Configuration
from stable_baselines_model_based_rl.wrapper.wrapped_model_env import WrappedModelEnv
cfg_path = './sample_output/CartPole-v1/loss=0.0002335651806788519-lag=4.00-2021-08-14-13-15-36/config.yaml'
model_path = './sample_output/CartPole-v1/loss=0.0002335651806788519-lag=4.00-2021-08-14-13-15-36/model.h5'
cfg = Configuration(cfg_path)
env = WrappedModelEnv(model_path, config=cfg)
episodes = 100
for e in range(episodes):
print(f'NEXT EPISODE {e}:')
env.reset()
step = 0
while True:
step += 1
state, reward, done, info = env.step(env.action_space.sample())
print('Step', step, '---', state, reward, done, info)
if done:
break
print('Done')
| 29.037037 | 108 | 0.709184 | 114 | 784 | 4.72807 | 0.5 | 0.037106 | 0.070501 | 0.089054 | 0.374768 | 0.374768 | 0.25974 | 0.25974 | 0.25974 | 0.25974 | 0 | 0.124431 | 0.159439 | 784 | 26 | 109 | 30.153846 | 0.693475 | 0 | 0 | 0 | 0 | 0.111111 | 0.274235 | 0.23852 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d601de97a6ffd9f68c9eb7d019863b8a412710c4 | 1,447 | py | Python | tests/opytimizer/optimizers/test_ihs.py | macoldibelli/opytimizer | ca0574d520ecc17b1ac875bc6271d466c88d18ac | [
"MIT"
] | null | null | null | tests/opytimizer/optimizers/test_ihs.py | macoldibelli/opytimizer | ca0574d520ecc17b1ac875bc6271d466c88d18ac | [
"MIT"
] | null | null | null | tests/opytimizer/optimizers/test_ihs.py | macoldibelli/opytimizer | ca0574d520ecc17b1ac875bc6271d466c88d18ac | [
"MIT"
] | null | null | null | import sys
import numpy as np
import pytest
from opytimizer.core import function
from opytimizer.optimizers import ihs
from opytimizer.spaces import search
def test_ihs_hyperparams():
hyperparams = {
'HMCR': 0.5,
'PAR_min': 0.5,
'PAR_max': 1,
'bw_min': 2,
'bw_max': 5
}
new_ihs = ihs.IHS(hyperparams=hyperparams)
assert new_ihs.HMCR == 0.5
assert new_ihs.PAR_min == 0.5
assert new_ihs.PAR_max == 1
assert new_ihs.bw_min == 2
assert new_ihs.bw_max == 5
def test_ihs_hyperparams_setter():
new_ihs = ihs.IHS()
new_ihs.HMCR = 0.7
assert new_ihs.HMCR == 0.7
new_ihs.PAR_min = 0.1
assert new_ihs.PAR_min == 0.1
new_ihs.PAR_max = 0.5
assert new_ihs.PAR_max == 0.5
new_ihs.bw_min = 1
assert new_ihs.bw_min == 1
new_ihs.bw_max = 10
assert new_ihs.bw_max == 10
def test_ihs_rebuild():
new_ihs = ihs.IHS()
assert new_ihs.built == True
def test_ihs_run():
def square(x):
return np.sum(x**2)
new_function = function.Function(pointer=square)
new_ihs = ihs.IHS()
search_space = search.SearchSpace(n_agents=2, n_iterations=10,
n_variables=2, lower_bound=[0, 0],
upper_bound=[10, 10])
history = new_ihs.run(search_space, new_function)
assert len(history.agents) > 0
assert len(history.best_agent) > 0
| 19.554054 | 72 | 0.615757 | 224 | 1,447 | 3.727679 | 0.236607 | 0.150898 | 0.158084 | 0.057485 | 0.300599 | 0.172455 | 0.047904 | 0 | 0 | 0 | 0 | 0.043145 | 0.279198 | 1,447 | 73 | 73 | 19.821918 | 0.75743 | 0 | 0 | 0.065217 | 0 | 0 | 0.020733 | 0 | 0 | 0 | 0 | 0 | 0.282609 | 1 | 0.108696 | false | 0 | 0.130435 | 0.021739 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d602616d1834b85919025d38d89d7171948156d1 | 2,067 | py | Python | images/pipelines.py | heranly/imageSpider | 2bf802e6401b737a24874a740d50c1f75fe985ce | [
"MIT"
] | 1 | 2020-04-23T11:52:14.000Z | 2020-04-23T11:52:14.000Z | images/pipelines.py | heranly/imageSpider | 2bf802e6401b737a24874a740d50c1f75fe985ce | [
"MIT"
] | null | null | null | images/pipelines.py | heranly/imageSpider | 2bf802e6401b737a24874a740d50c1f75fe985ce | [
"MIT"
] | 1 | 2020-06-09T03:32:48.000Z | 2020-06-09T03:32:48.000Z | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import hashlib
# from bitarray.test_bitarray import to_bytes
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy import Request
class MyImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for image_url in item['image_urls']:
# refer = item['refer'] 有部分网站具有防盗链,需要加上referer表明来处
yield Request(image_url,meta={'keyword':item['keyword']})
def file_path(self, request, response=None, info=None):
#使用这个方法更改下载目录
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ImagesPipeline.image_key(url) and file_key(url) methods are deprecated, '
'please use file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from image_key or file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() or image_key() methods have been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
elif not hasattr(self.image_key, '_base'):
_warn()
return self.image_key(url)
## end of deprecation warning block
keyword = request.meta['keyword']
image_guid = hashlib.sha1(url.encode()).hexdigest()
return '%s/%s.jpg' % (keyword,image_guid)
def item_completed(self, results, item, info):
image_paths = [x['path'] for ok, x in results if ok]
if not image_paths:
raise DropItem("Item contains no images")
item['image_paths'] = image_paths
return item
| 39 | 100 | 0.641993 | 252 | 2,067 | 5.134921 | 0.444444 | 0.030912 | 0.030912 | 0.040185 | 0.074189 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001975 | 0.265119 | 2,067 | 52 | 101 | 39.75 | 0.849901 | 0.22061 | 0 | 0.085714 | 0 | 0 | 0.139549 | 0.018148 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.171429 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d6029c98ea0c42de189e1a9444390288eb1679e6 | 1,432 | py | Python | tests/fixtures/api.py | siilats/meltano | 404605c83f441c3fc2b729e26416c6caa8b0ed0b | [
"MIT"
] | 122 | 2021-06-21T17:30:29.000Z | 2022-03-25T06:21:38.000Z | tests/fixtures/api.py | siilats/meltano | 404605c83f441c3fc2b729e26416c6caa8b0ed0b | [
"MIT"
] | null | null | null | tests/fixtures/api.py | siilats/meltano | 404605c83f441c3fc2b729e26416c6caa8b0ed0b | [
"MIT"
] | 21 | 2021-06-22T10:08:15.000Z | 2022-03-18T08:57:02.000Z | from contextlib import contextmanager
from unittest import mock
import meltano.api.app
import pytest
from flask import request_started
from flask_security.utils import login_user, logout_user
from meltano.api.models import db as _db
from meltano.api.security.identity import create_dev_user
from meltano.core.migration_service import MigrationService
from sqlalchemy import MetaData
@pytest.fixture
def impersonate(app):
@contextmanager
def factory(user):
def push(sender):
if user:
login_user(user)
else:
logout_user()
with request_started.connected_to(push):
yield
return factory
@pytest.fixture(scope="class")
def app(create_app):
return create_app()
@pytest.fixture(scope="class")
def create_app(request, project, vacuum_db):
def _factory(**kwargs):
config = {"TESTING": True, "LOGIN_DISABLED": False, "ENV": "test", **kwargs}
app = meltano.api.app.create_app(config)
# let's push an application context so the
# `current_app` is ready in each test
ctx = app.app_context()
ctx.push()
# let's make sure to pop the context at the end
request.addfinalizer(lambda: ctx.pop())
return app
return _factory
@pytest.fixture()
def api(app):
return app.test_client()
@pytest.fixture()
def seed_users(app, session):
create_dev_user()
| 22.730159 | 84 | 0.678073 | 186 | 1,432 | 5.080645 | 0.424731 | 0.068783 | 0.050794 | 0.055026 | 0.055026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.232542 | 1,432 | 62 | 85 | 23.096774 | 0.859873 | 0.085196 | 0 | 0.097561 | 0 | 0 | 0.029096 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.195122 | false | 0 | 0.243902 | 0.04878 | 0.560976 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d606af08fdc496e4fe99e4d2b023dc88e875f64d | 4,432 | py | Python | Web/digital/app/views.py | inishchith/offgrid | 90c0100bf06ab025184e60357dd77f83c2edcb45 | [
"MIT"
] | 3 | 2018-10-17T11:12:28.000Z | 2018-11-29T05:17:25.000Z | Web/digital/app/views.py | inishchith/offgrid | 90c0100bf06ab025184e60357dd77f83c2edcb45 | [
"MIT"
] | null | null | null | Web/digital/app/views.py | inishchith/offgrid | 90c0100bf06ab025184e60357dd77f83c2edcb45 | [
"MIT"
] | 5 | 2018-10-16T15:40:40.000Z | 2019-01-12T22:14:11.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,redirect
from django.http import HttpResponse, HttpResponseRedirect
from . import queries
import requests
import googlemaps
from .config import *
# Create your views here.
def index(request):
return render(request,'app/map.html')
def appindex(request):
return render(request,'app/map.html')
def map(request):
# FSWMCH
if(request.method == 'POST'):
# food = request.POST.get('cbx1')
# shelter = request.POST.get('cbx2')
# water = request.POST.get('cbx3')
# medicine = request.POST.get('cbx4')
# cloth = request.POST.get('cbx5')
# hygenie = request.POST.get('cbx6')
fin = []
for i in range(1,7):
temp = request.POST.get('cbx'+str(i))
if temp == 'on':
fin.append('1')
else:
fin.append('0')
print(fin)
count=1
result = queries.find_specific_donors(fin)
print(result)
# result = [1,2,3,4]
return render(request,'app/map.html',{"result": result, "fin":fin})
else:
donors = queries.find_donors()
# print(donors['id'])
list = []
for i in donors:
list.append(donors[i])
return render(request,'app/map.html',{'list':list})
def retreive_area(lat,lon):
print("Retreiving data")
gmaps = googlemaps.Client(key="AIzaSyAwAdfRMQoKv8Tmc4iD2KsDCXQfWoxVJkk")
reverse_geocode_result = gmaps.reverse_geocode((lat, lon))
print(reverse_geocode_result)
return 1
#key = key.upper()
# print(loc,key)
# query_result = google_places.nearby_search(location=loc, keyword=key, radius=2000, types=[type_map[key]])
# places_data = [] # name,number,addr
# for place in query_result.places:
# # print(place.place_id)
# x = place.get_details()
# places_data.append([place.name,place.local_phone_number,place.vicinity])
# if len(places_data):
# return places_data[0]
def calamity(request):
sos = queries.get_sos()
# r = requests.get("https://api.ipdata.co?api-key="+GLapikey).json()
#lon,lat = r["longitude"],r["latitude"]
#details = retreive_area(lat,lon)
#print(details)
return render(request,'app/calamity.html',{'list':sos})
def provisions(request):
temp = queries.get_issue()
print("//////////////")
print(temp)
return render(request,'app/provisions.html',{'src': temp})
def need(request):
if(request.method == 'POST'):
fin = []
for i in range(1,7):
temp = request.POST.get('cb'+str(i))
if temp == 'on':
fin.append('1')
else:
fin.append('0')
print(fin)
index = queries.number_of_req("donor")
rd = 0
fswmch = "".join(fin)
desc = request.POST.get('descr')
address = request.POST.get('addr')
name = request.POST.get('name')
number = request.POST.get('phn')
queries.add_donors(index,rd,fswmch,desc,address,name,number)
return render(request,'app/map.html')
def issue(request):
if(request.method == "POST"):
index = queries.number_of_req("issues")
name = request.POST.get("name")
descr = request.POST.get("descr")
date = request.POST.get("date")
dept = request.POST.get("dept")
print(name,descr,date,dept)
queries.add_issue(descr,name,dept,date)
return render(request, 'app/map.html')
# def adddonors(request):
# if(request.method == 'POST'):
# name = request.POST.get('name')
# dob = request.POST.get('dob')
# state = request.POST.get('state')
# district = request.POST.get('district')
# block = request.POST.get('block')
# pincode = request.POST.get('pincode')
# phone = request.POST.get('phone')
# temp = request.POST.getlist('checks[]')
# #data = Crop.objects.filter(region=pincode)[0]
# #x = Farmer(name=name,dob=dob,state=state,district=district,block=block,pincode=pincode,phone=phone,crops=crops,data=data)
# #x.save()
# #u = reverse('app:dashboard',kwargs={'username':str(request.user)})
# #print(str(request.user))
# #return HttpResponseRedirect(u)
# else:
# #return render(request,'app/farmer.html',{})
| 34.092308 | 132 | 0.593637 | 540 | 4,432 | 4.805556 | 0.290741 | 0.101734 | 0.124085 | 0.076301 | 0.228131 | 0.137187 | 0.114836 | 0.090173 | 0.060116 | 0.060116 | 0 | 0.009281 | 0.24639 | 4,432 | 129 | 133 | 34.356589 | 0.767665 | 0.372969 | 0 | 0.283784 | 0 | 0 | 0.097212 | 0.014307 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.094595 | 0.027027 | 0.324324 | 0.108108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d607865fe1f92b56986d3ae6847a22b22564a535 | 13,683 | py | Python | zeenode/zeenode/cogs/main.py | flowitoo/selfbot | 8de0c84391bbc08546a7a8b6c657438405da675c | [
"MIT"
] | 1 | 2021-04-26T21:32:14.000Z | 2021-04-26T21:32:14.000Z | zeenode/zeenode/cogs/main.py | flowitoo/selfbot | 8de0c84391bbc08546a7a8b6c657438405da675c | [
"MIT"
] | null | null | null | zeenode/zeenode/cogs/main.py | flowitoo/selfbot | 8de0c84391bbc08546a7a8b6c657438405da675c | [
"MIT"
] | null | null | null | import discord, requests, pyfiglet, datetime, aiohttp, urllib3, asyncio
import io
from discord.ext import commands as zeenode
from zeenode.load import token
from zeenode.config import prefix
bot = zeenode.Bot(command_prefix=prefix, self_bot=True)
bot.remove_command('help')
Output = "[ERROR] - "
class Main(zeenode.Cog):
def __init__(self, bot):
self.bot = bot
@zeenode.command()
async def ascii(self, ctx, args):
await ctx.message.delete()
text = pyfiglet.figlet_format(args)
await ctx.send(f'```{text}```')
@zeenode.command()
async def hypesquad(self, ctx, house):
await ctx.message.delete()
request = requests.session()
headers = {
'Authorization': token,
'Content-Type': 'application/json'
}
global payload
if house == "bravery":
payload = {'house_id': 1}
elif house == "brilliance":
payload = {'house_id': 2}
elif house == "balance":
payload = {'house_id': 3}
try:
requests.post('https://discordapp.com/api/v6/hypesquad/online', headers=headers, json=payload)
print(f"{Fore.GREEN} Succesfully set your HypeSquad house to {house}!")
except:
print(f"{Fore.RED}{Output} {Fore.YELLOW}Failed to set your HypeSquad house to {house}.")
@zeenode.command()
async def embed(self, ctx, *, description):
await ctx.message.delete()
embed = discord.Embed(description=description, color=0x0000)
await ctx.send(embed=embed)
@zeenode.command()
async def help(self, ctx, category=None):
await ctx.message.delete()
if category is None:
embed = discord.Embed(color=0x0000, timestamp=ctx.message.created_at)
embed.set_author(name="Zeenode Self-Bot | Prefix: " + str(bot.command_prefix),
icon_url="https://cdn.discordapp.com/attachments/796868392095186976/812453623309008927/zeenode_logo.png")
embed.set_image(url="https://cdn.discordapp.com/attachments/796868392095186976/813534281405825075/zeenode.gif")
embed.add_field(name="`\uD83D\uDCF1 - Activity`", value="Shows all **activity** commands.", inline=False)
embed.add_field(name="`\uD83D\uDCB0 - Currency`", value="Shows all **currency** commands.", inline=False)
embed.add_field(name="`\uD83D\uDC40 - Emoticons`", value="Shows all **emoticons** commands.", inline=False)
embed.add_field(name="`\uD83D\uDE02 - Fun`", value="Shows all **fun** commands.", inline=False)
embed.add_field(name="`\uD83D\uDD25 - Main`", value="Shows all **main** commands.", inline=False)
embed.add_field(name="`\uD83D\uDEE1\uFE0F - Mass`", value="Shows all **mass** commands.", inline=False)
embed.add_field(name="`\uD83D\uDD1E - Nsfw`", value="Shows all **nsfw** commands.", inline=False)
embed.add_field(name="`\uD83D\uDCC3 - TextEncoding`", value="Shows all **text encoding** commands.", inline=False)
await ctx.send(embed=embed)
elif str(category).lower() == "activity":
embed = discord.Embed(color=0x0000, timestamp=ctx.message.created_at)
embed.set_author(name="Zeenode Self-Bot | Prefix: " + str(bot.command_prefix),
icon_url="https://cdn.discordapp.com/attachments/796868392095186976/812453623309008927/zeenode_logo.png")
embed.set_image(url="https://raw.githubusercontent.com/zeenode/selfbot-site/master/img/banner.gif")
embed.description = f"`\uD83D\uDCF1 - Activity Commands`\n`> listening <text>` - Shows listening status.\n`> playing <text>` - Shows playing status.\n`> watching <text>` - Shows watching status.\n`> streaming <text>` - Shows streaming status.\n`> stopactivity` - Stops activity."
await ctx.send(embed=embed)
elif str(category).lower() == "currency":
embed = discord.Embed(color=0x0000, timestamp=ctx.message.created_at)
embed.set_author(name="Zeenode Self-Bot | Prefix: " + str(bot.command_prefix),
icon_url="https://cdn.discordapp.com/attachments/796868392095186976/812453623309008927/zeenode_logo.png")
embed.set_image(url="https://cdn.discordapp.com/attachments/796868392095186976/813534281405825075/zeenode.gif")
embed.description = f"`\uD83D\uDCB0 - Currency Commands`\n`> btc` - Shows Bitcoin price. \n`> doge` - Shows Doge price.\n`> eth` - Shows Ethereum price.\n`> xmr` - Shows Monero price.\n`> xrp` - Shows Ripple price."
await ctx.send(embed=embed)
elif str(category).lower() == "emoticons":
embed = discord.Embed(color=0x0000, timestamp=ctx.message.created_at)
embed.set_author(name="Zeenode Self-Bot | Prefix: " + str(bot.command_prefix),
icon_url="https://cdn.discordapp.com/attachments/796868392095186976/812453623309008927/zeenode_logo.png")
embed.set_image(url="https://cdn.discordapp.com/attachments/796868392095186976/813534281405825075/zeenode.gif")
embed.description = f"`\uD83D\uDC40 - Emoticons Commands`\n`> fuckyou` - Sends fuckyou emoticon. \n`> lenny` - Sends lenny emoticon.\n`> what` - Sends what emoticon.\n`> bear` - Sends bear emoticon.\n`> worried` - Sends worried emoticon.\n`> ak47` - Sends ak47 emoticon.\n`> awp` - Sends awp emoticon.\n`> lmg` - Sends lmg emoticon.\n`> sword` - Sends sword emoticon.\n`> love` - Sends love emoticon.\n`> goodnight` - Sends goodnight emoticon.\n`> smile` - Sends smile emoticon."
await ctx.send(embed=embed)
elif str(category).lower() == "fun":
embed = discord.Embed(color=0x0000, timestamp=ctx.message.created_at)
embed.set_author(name="Zeenode Self-Bot | Prefix: " + str(bot.command_prefix),
icon_url="https://cdn.discordapp.com/attachments/796868392095186976/812453623309008927/zeenode_logo.png")
embed.set_image(url="https://cdn.discordapp.com/attachments/796868392095186976/813534281405825075/zeenode.gif")
embed.description = f"`\uD83D\uDE02 - Fun Commands`\n`> cat` - Sends a random cat image.\n`> dog` - Sends a random dog image.\n`> panda` - Sends a random panda image.\n`> dick <@user>` - Shows user dick size.\n`> hug <@user>` - Sends a hug to user.\n`> kiss <@user>` - Sends a kiss to user.\n`> slap <@user>` - Sends a slap to user.\n`> meme` - Sends a random meme.\n`> nitro` - Sends a nitro."
await ctx.send(embed=embed)
elif str(category).lower() == "main":
embed = discord.Embed(color=0x0000, timestamp=ctx.message.created_at)
embed.set_author(name="Zeenode Self-Bot | Prefix: " + str(bot.command_prefix),
icon_url="https://cdn.discordapp.com/attachments/796868392095186976/812453623309008927/zeenode_logo.png")
embed.set_image(url="https://cdn.discordapp.com/attachments/796868392095186976/812744896439910450/zeenode_banner.gif")
embed.description = f"`\uD83D\uDD25 - Main Commands`\n`> ascii <message>` - Sends message as ascii art. \n`> embed <message>` - Sends embed message.\n`> av <@user>` - Sends your avatar in the chat.\n`> guildicon` - Shows server (guild) icon.\n`> serverinfo` - Shows server info.\n`> whois <@user>` - Sends info about user.\n`> hypesquad <house>` - Allows you to change your hypesquad house/badge.\n`> purge <number of messages>` - Deletes messages.\n`> suggest <question>` - Sends question with embed leaving thumbsup & thumbsdown react."
await ctx.send(embed=embed)
elif str(category).lower() == "mass":
embed = discord.Embed(color=0x0000, timestamp=ctx.message.created_at)
embed.set_author(name="Zeenode Self-Bot | Prefix: " + str(bot.command_prefix),
icon_url="https://cdn.discordapp.com/attachments/796868392095186976/812453623309008927/zeenode_logo.png")
embed.set_image(url="https://cdn.discordapp.com/attachments/796868392095186976/813534281405825075/zeenode.gif")
embed.description = f"`\uD83D\uDEE1\uFE0F - Mass Commands`\n`> massreact <emoji>` - Reacts to last 20 messages with emojis.\n`> spam <number of messages> <message> ` - Spams messages."
await ctx.send(embed=embed)
elif str(category).lower() == "nsfw":
embed = discord.Embed(color=0x0000, timestamp=ctx.message.created_at)
embed.set_author(name="Zeenode Self-Bot | Prefix: " + str(bot.command_prefix),
icon_url="https://cdn.discordapp.com/attachments/796868392095186976/812453623309008927/zeenode_logo.png")
embed.set_image(url="https://cdn.discordapp.com/attachments/796868392095186976/813534281405825075/zeenode.gif")
embed.description = f"`\uD83D\uDD1E - Nsfw Commands`\n`> anal <user>` - Sends nsfw anime content.\n`> blowjob <user>` - Sends nsfw anime content.\n`> boobs <user>` - Sends nsfw anime content.\n`> hentai <user>` - Sends hentai (anime porn)."
await ctx.send(embed=embed)
elif str(category).lower() == "textencoding":
embed = discord.Embed(color=0x0000, timestamp=ctx.message.created_at)
embed.set_author(name="Zeenode Self-Bot | Prefix: " + str(bot.command_prefix),
icon_url="https://cdn.discordapp.com/attachments/796868392095186976/812453623309008927/zeenode_logo.png")
embed.set_image(url="https://cdn.discordapp.com/attachments/796868392095186976/813534281405825075/zeenode.gif")
embed.description = f"`\uD83D\uDCC3 - Text Encoding Commands`\n`> encode_base64 <word/message>` - Encodes text with Base64.\n`> encode_leet <word/message>` - Encodes text with leet speak.\n`> encode_md5 <word/message>` - Encodes text with MD5 hash.\n`> encode_sha1 <word/message>` - Encodes text with Sha1.\n`> encode_sha224 <word/message>` - Encodes text wish SHA224.\n`> encode_sha384 <word/message>` - Encodes text with Sha384.\n`> encode_sha251 <word/message>` - Encodes text with Sha512."
await ctx.send(embed=embed)
@zeenode.command(aliases=["suggestion"])
async def suggest(self, ctx, *, suggestion):
await ctx.message.delete()
embed = discord.Embed(title="Suggestion:", color=0x0000, description=suggestion)
embed.set_thumbnail(url="")
msg = await ctx.send(embed=embed)
await msg.add_reaction('\U0001F44D')
await msg.add_reaction('\U0001F44E')
@zeenode.command(aliases=['pfp', 'avatar'])
async def av(self, ctx, *, user: discord.User = None):
await ctx.message.delete()
format = "gif"
user = user or ctx.author
if user.is_avatar_animated() != True:
format = "png"
avatar = user.avatar_url_as(format=format if format != "gif" else None)
async with aiohttp.ClientSession() as session:
async with session.get(str(avatar)) as resp:
image = await resp.read()
with io.BytesIO(image) as file:
await ctx.send(file=discord.File(file, f"Avatar.{format}"))
@zeenode.command(aliases=["guildinfo"])
async def serverinfo(self,ctx):
await ctx.message.delete()
date_format = "%a, %d %b %Y %I:%M %p"
embed = discord.Embed(title=f"Server Info of {ctx.guild.name}:",
description=f"{len(ctx.guild.members)} Members\n {len(ctx.guild.roles)} Roles\n {len(ctx.guild.text_channels)} Text-Channels\n {len(ctx.guild.voice_channels)} Voice-Channels\n {len(ctx.guild.categories)} Categories",
timestamp=datetime.datetime.utcnow(), color=0x0000)
embed.add_field(name="Server created at", value=f"{ctx.guild.created_at.strftime(date_format)}")
embed.add_field(name="Server Owner", value=f"{ctx.guild.owner}")
embed.add_field(name="Server Region", value=f"{ctx.guild.region}")
embed.add_field(name="Server ID", value=f"{ctx.guild.id}")
embed.set_thumbnail(url=f"{ctx.guild.icon_url}")
await ctx.send(embed=embed)
@zeenode.command()
async def guildicon(self, ctx):
await ctx.message.delete()
embed = discord.Embed(color=0x0000)
embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)
embed.set_image(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
@zeenode.command()
async def whois(self, ctx, *, user: discord.User = None):
await ctx.message.delete()
if user is None:
user = ctx.author
date_format = "%a, %d %b %Y %I:%M %p"
em = discord.Embed(description=user.mention)
em.set_author(name=str(user), icon_url=user.avatar_url)
em.set_thumbnail(url=user.avatar_url)
em.add_field(name="Registered", value=user.created_at.strftime(date_format))
return await ctx.send(embed=em)
@zeenode.command()
async def purge(self, ctx, amount: int):
await ctx.message.delete()
async for message in ctx.message.channel.history(limit=amount).filter(lambda m: m.author == self.bot.user).map(lambda m: m):
try:
await message.delete()
except:
pass
def setup(bot):
bot.add_cog(Main(bot))
| 66.746341 | 551 | 0.636337 | 1,665 | 13,683 | 5.160961 | 0.176577 | 0.024206 | 0.021762 | 0.041545 | 0.520424 | 0.468172 | 0.437915 | 0.420459 | 0.387059 | 0.33888 | 0 | 0.073746 | 0.225024 | 13,683 | 204 | 552 | 67.073529 | 0.736609 | 0 | 0 | 0.392045 | 0 | 0.0625 | 0.432302 | 0.013206 | 0 | 0 | 0.005787 | 0 | 0 | 1 | 0.011364 | false | 0.005682 | 0.028409 | 0 | 0.051136 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d608eeced9ea4fb133349f54be3a4852652eadaf | 828 | py | Python | Selenium/tests_homepage_information.py | wonjoonSeol/ScienceScape | 8d8a3cb76193b6f85b7a2a6c7219e249237d64c8 | [
"BSD-3-Clause"
] | 5 | 2018-02-14T21:11:06.000Z | 2020-02-23T14:53:11.000Z | Selenium/tests_homepage_information.py | wonjoonSeol/ScienceScape | 8d8a3cb76193b6f85b7a2a6c7219e249237d64c8 | [
"BSD-3-Clause"
] | 106 | 2018-02-09T00:31:05.000Z | 2018-03-29T07:28:34.000Z | Selenium/tests_homepage_information.py | wonjoonSeol/ScienceScape | 8d8a3cb76193b6f85b7a2a6c7219e249237d64c8 | [
"BSD-3-Clause"
] | 6 | 2018-02-23T17:48:03.000Z | 2020-05-14T13:39:36.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from django.test import TestCase
class TestHomepageInformation(TestCase):
def test_homepage_shows_correct_information(self):
browser_driver = webdriver.Chrome()
browser_driver.get("http://127.0.0.1:8000/")
self.assertIn("ScienceScape", browser_driver.page_source)
self.assertIn("ScienceScapeS - Scientometrics, made easy", browser_driver.title)
self.assertIn("Example Graph", browser_driver.page_source)
browser_driver.quit()
def test_user_sees_login_button(self):
browser_driver2 = webdriver.Chrome()
browser_driver2.get("http://127.0.0.1:8000/")
page_source = browser_driver2.page_source
self.assertIn("Log In", page_source)
browser_driver2.quit()
| 39.428571 | 88 | 0.724638 | 101 | 828 | 5.712871 | 0.445545 | 0.135182 | 0.088388 | 0.038128 | 0.058925 | 0.058925 | 0.058925 | 0 | 0 | 0 | 0 | 0.035088 | 0.173913 | 828 | 20 | 89 | 41.4 | 0.80848 | 0 | 0 | 0 | 0 | 0 | 0.140097 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d6098e88a068d8b50569303641093b4d642d476a | 1,240 | py | Python | tests/test_default.py | krlsdu/ansible-sdkman | 31473737d0e46858932b1386556cbda85478dbc6 | [
"Apache-2.0"
] | null | null | null | tests/test_default.py | krlsdu/ansible-sdkman | 31473737d0e46858932b1386556cbda85478dbc6 | [
"Apache-2.0"
] | null | null | null | tests/test_default.py | krlsdu/ansible-sdkman | 31473737d0e46858932b1386556cbda85478dbc6 | [
"Apache-2.0"
] | null | null | null | sdkman_dir = '/usr/local/sdkman'
def script_wrap(cmds):
sdk_init_tmpl = 'export SDKMAN_DIR={0} && source {0}/bin/sdkman-init.sh'
sdk_init = sdk_init_tmpl.format(sdkman_dir)
result_cmds = [sdk_init] + cmds
return "/bin/bash -c '{0}'".format('; '.join(result_cmds))
def check_run_for_rc_and_result(cmds, expected, host, check_stderr=False):
result = host.run(script_wrap(cmds))
assert result.rc == 0
if check_stderr:
assert result.stderr.find(expected) != -1
else:
assert result.stdout.find(expected) != -1
def test_config_file(host):
f = host.file(sdkman_dir + '/etc/config')
setup = host.ansible("setup")
assert f.exists
assert f.is_file
assert f.mode == 0o644
assert f.user == setup['ansible_facts']['ansible_user_id']
assert f.gid == setup['ansible_facts']['ansible_user_gid']
assert f.contains('sdkman_auto_answer=true')
def test_gradle_installed(host):
cmds = ['gradle --version']
expected = 'Gradle 4.6'
check_run_for_rc_and_result(cmds, expected, host)
def test_other_gradle_installed(host):
cmds = ['sdk use gradle 3.5.1', 'gradle --version']
expected = 'Gradle 3.5.1'
check_run_for_rc_and_result(cmds, expected, host)
| 30.243902 | 76 | 0.682258 | 185 | 1,240 | 4.318919 | 0.351351 | 0.052566 | 0.041302 | 0.048811 | 0.212766 | 0.142678 | 0.142678 | 0.142678 | 0.142678 | 0 | 0 | 0.017647 | 0.177419 | 1,240 | 40 | 77 | 31 | 0.765686 | 0 | 0 | 0.066667 | 0 | 0 | 0.210484 | 0.03629 | 0 | 0 | 0 | 0 | 0.3 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d609e197017d18ff638787a76403b554be370344 | 2,102 | py | Python | research_scripts/models/metrics.py | mikeyo89/job-scraper | 2cbebc377d34afe9efd740a94ef6c7b2460f9598 | [
"MIT"
] | 1 | 2021-12-27T23:53:28.000Z | 2021-12-27T23:53:28.000Z | research_scripts/models/metrics.py | mikeyo89/job-scraper | 2cbebc377d34afe9efd740a94ef6c7b2460f9598 | [
"MIT"
] | null | null | null | research_scripts/models/metrics.py | mikeyo89/job-scraper | 2cbebc377d34afe9efd740a94ef6c7b2460f9598 | [
"MIT"
] | null | null | null | # Metrics dataset that comprises of vectors which will be searched for in job postings.
pl = [
['python', False],
['c#', False],
['c', False],
['c++', False],
['ruby', False],
['java', False],
[['javascript', 'js'], False],
[['html', 'html5'], False],
['css', False],
['sql', False],
['r', False],
['assembly', False],
['swift', False],
['pascal', False],
[['objective-c', 'objectivec'], False],
['php', False],
[['go', 'golang'], False],
['perl', False],
['f#', False],
['scala', False],
['apex', False],
['kotlin', False],
[['typescript', 'ts'], False]
]
f = [
[['dotnet', '.net', 'asp.net', 'aspnet', 'net'], False],
[['react', 'reactjs', 'react.js'], False],
[['angular', 'angular.js', 'angularjs'], False],
['django', False],
['splunk', False],
['spring', False],
['rails', False],
['redux', False],
[['express', 'expressjs', 'express.js'], False],
[['vue', 'vuejs', 'vue.js'], False],
['flask', False],
['laravel', False],
['symfony', False],
[['gatsby', 'gatsbyjs', 'gatsby.js'], False],
['sinatra', False],
['materialize', False],
['bootstrap', False],
['tailwind', False],
['ionic', False],
['xamarin', False],
['phonegap', False],
['native', False],
['corona', False],
['jquery', False],
['flutter', False],
['pytorch', False],
['pandas', False],
[['sci-kit', 'scikit'], False],
[['ml.net', 'mlnet'], False],
['chainer', False],
['pytest', False],
['jest', False],
['mocha', False],
['jasmine', False],
['cypress', False],
['scrapy', False],
[['node', 'nodejs', 'npm'], False],
[['git', 'github'], False],
[['api', 'apis'], False],
[['sdk', 'sdks'], False],
[['postgres', 'postgresql', 'psql'], False],
['mysql', False],
['docker', False],
['jenkins', False],
['jira', False],
[['rally','rallydev'], False],
['azure', False],
['kubernetes', False],
['swagger', False],
[['scrum', 'agile'], False]
] | 26.275 | 87 | 0.486679 | 201 | 2,102 | 5.089552 | 0.557214 | 0.034213 | 0.032258 | 0.02346 | 0.022483 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000621 | 0.233587 | 2,102 | 80 | 88 | 26.275 | 0.634389 | 0.040438 | 0 | 0 | 0 | 0 | 0.296627 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d60a7fac3a95dca71de1420dd67565ca4da447a2 | 3,850 | py | Python | image-segmentation/config/config.py | swcho84/image-segmentation | ef9b9b3d832e9efe6f43522cc5ca0e17279d6608 | [
"MIT"
] | 64 | 2019-03-09T08:55:11.000Z | 2022-01-27T07:08:02.000Z | image-segmentation/config/config.py | swcho84/image-segmentation | ef9b9b3d832e9efe6f43522cc5ca0e17279d6608 | [
"MIT"
] | 2 | 2019-11-07T11:49:13.000Z | 2020-01-16T14:39:03.000Z | image-segmentation/config/config.py | swcho84/image-segmentation | ef9b9b3d832e9efe6f43522cc5ca0e17279d6608 | [
"MIT"
] | 21 | 2019-03-09T08:56:35.000Z | 2022-03-02T12:24:43.000Z | from configparser import ConfigParser
import json
import os
from ast import literal_eval
type_caster = {
'int': int,
'str': lambda x: x if x.lower() != 'none' else None,
'float': float,
'bool': lambda x: True if x == 'True' else False if x == 'False' else exec("raise Exception('invalid bool value: {}'.format(x))"),
'dict': json.loads,
'list': json.loads,
'tuple': literal_eval
}
class Config:
__non_const_vars = ['NAME', 'GPU_IDS', 'IMAGES_PER_GPU', 'BATCH_SIZE', 'MODE', 'BACKBONE_WEIGHTS']
def __init__(self, name='Configuration'):
self.NAME = name
pass
def __str__(self, depth=0, show_type=True):
d = self.__dict__
s = ''
for key in sorted(d.keys()):
if d[key].__class__.__name__ == 'Config':
s += '[{}]\n'.format(key)
s += d[key].__str__(depth + 1, show_type)
elif key == 'NAME':
continue
else:
if show_type:
typename = d[key].__class__.__name__
if typename == 'NoneType':
typename = 'str'
s += depth * '\t' + '{}-{} = {}\n'.format(typename, key, str(d[key]).replace('\'', '\"'))
else:
s += depth * '\t' + '{} = {}\n'.format(key, str(d[key]).replace('\'', '\"'))
return s
class ConstError(TypeError): pass
def __setattr__(self, key, value):
if key in self.__dict__ and key not in Config.__non_const_vars:
raise self.ConstError('Cannot change the value of the constant {} in Config object'.format(key))
else:
self.__dict__[key] = value
def flatten(self):
d = self.__dict__
conf = Config()
conf.NAME = self.NAME
for key in d:
if d[key].__class__.__name__ == 'Config':
_d = d[key].__dict__
for _key in _d:
vars(conf)[_key] = _d[_key]
else:
vars(conf)[key] = d[key]
if hasattr(conf, 'IMAGES_PER_GPU') and hasattr(conf, 'GPU_IDS'):
conf.BATCH_SIZE = conf.IMAGES_PER_GPU * len(conf.GPU_IDS)
if hasattr(conf, 'NUM_CLASSES'):
conf.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + conf.NUM_CLASSES
if hasattr(conf, 'IMAGE_WIDTH') and hasattr(conf, 'IMAGE_HEIGHT'):
conf.IMAGE_SHAPE = (conf.IMAGE_HEIGHT, conf.IMAGE_WIDTH, 3)
return conf
def display(self):
print('\n-----------------{}-----------------\n'.format(self.NAME))
print(self.__str__(show_type=False))
print('\n-----------------{}-----------------\n'.format(len(self.NAME) * '-'))
def save(self, path):
f = open(path, 'w')
f.write(self.__str__())
f.close()
def load_config(filename):
parser = ConfigParser()
assert parser.read(filename), 'Could not read the file {}'.format(filename)
config = Config()
filename = os.path.basename(filename)
config.NAME = filename[:-4] if filename.endswith('.cfg') else filename
for section_name in parser.sections():
vars(config)[section_name] = Config()
section = parser.items(section_name)
for item in section:
_type_name = item[0]
_type_name = _type_name.split('-')
assert len(_type_name) == 2, 'invalid variable syntex: {}. Variables in .cfg file should be declared in the form type-VARIABLE_NAME'.format(item[0].upper())
_type = _type_name[0].strip(' ')
_name = _type_name[1].strip(' ').upper()
comment_p = item[1].find('#')
_value = item[1].strip(' ') if comment_p == -1 else item[1][:comment_p].strip(' ')
vars(getattr(config, section_name))[_name] = type_caster[_type](_value)
return config
| 36.666667 | 168 | 0.543896 | 472 | 3,850 | 4.144068 | 0.28178 | 0.018405 | 0.018405 | 0.019939 | 0.093047 | 0.021472 | 0 | 0 | 0 | 0 | 0 | 0.00662 | 0.293766 | 3,850 | 104 | 169 | 37.019231 | 0.712762 | 0 | 0 | 0.091954 | 0 | 0.011494 | 0.144675 | 0.020779 | 0 | 0 | 0 | 0 | 0.022989 | 1 | 0.08046 | false | 0.022989 | 0.045977 | 0 | 0.195402 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d60b68696ab0e13ca68be8ff221db4648d047fae | 2,676 | py | Python | examples/benchmarks/plot_label_shift.py | ltiao/gp-dre | 5997a74826636a58662f5fa8c41a81d32ba8baa2 | [
"MIT"
] | null | null | null | examples/benchmarks/plot_label_shift.py | ltiao/gp-dre | 5997a74826636a58662f5fa8c41a81d32ba8baa2 | [
"MIT"
] | null | null | null | examples/benchmarks/plot_label_shift.py | ltiao/gp-dre | 5997a74826636a58662f5fa8c41a81d32ba8baa2 | [
"MIT"
] | 1 | 2021-11-10T00:52:11.000Z | 2021-11-10T00:52:11.000Z | # -*- coding: utf-8 -*-
"""
Label Shift
===========
"""
# sphinx_gallery_thumbnail_number = 2
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from gpdre.benchmarks import LabelShift
from gpdre.applications.covariate_shift.benchmarks import regression_metric
from gpdre.datasets.liacc_regression import DATASET_LOADER
from gpdre.plotting import continuous_pairplot, line
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_diabetes, load_boston
# %%
# constants
test_rate = 1 / 3
dataset_seed = 8888
num_seeds = 10
DATASET_LOADER_SKLEARN = {
"boston": load_boston,
"diabetes": load_diabetes
}
dataset_names = []
dataset_names.extend(DATASET_LOADER_SKLEARN.keys())
dataset_names.extend(DATASET_LOADER.keys())
# %%
def make_data(dataset_name, exact, uniform):
data = pd.DataFrame(dict(dataset_name=dataset_name,
exact=exact, uniform=uniform))
data.index.name = "seed"
data.reset_index(inplace=True)
return data
# %%
dataset = load_diabetes()
r = LabelShift()
g = continuous_pairplot(features=dataset.data,
target=r.prob(dataset.data, dataset.target).numpy(),
columns=dataset.feature_names)
# %%
dfs = []
for dataset_name in dataset_names:
if dataset_name in DATASET_LOADER_SKLEARN:
dataset = DATASET_LOADER_SKLEARN[dataset_name]()
X_all = dataset.data
y_all = dataset.target
else:
(X_all, y_all), test_data = DATASET_LOADER[dataset_name](data_home="../../datasets/")
X, X_test, y, y_test = train_test_split(X_all, y_all,
test_size=test_rate,
random_state=dataset_seed)
r = LabelShift()
metrics_uniform_seeds = []
metrics_exact_seeds = []
for seed in range(num_seeds):
(X_train, y_train), (X_val, y_val) = r.train_test_split(X, y, seed=seed)
metrics_uniform_seeds.append(regression_metric(X_train, y_train, X_test, y_test))
metrics_exact_seeds.append(regression_metric(X_train, y_train, X_test, y_test,
sample_weight=np.maximum(1e-6, r.ratio(X_train, y_train))))
dfs.append(make_data(dataset_name, metrics_exact_seeds, metrics_uniform_seeds))
# %%
data = pd.concat(dfs, axis="index", ignore_index=True, sort=True)
# %%
g = sns.relplot(x="uniform", y="exact", hue="dataset_name",
kind="scatter", data=data, alpha=0.7, palette="tab10",
facet_kws=dict(sharex="row", sharey="row"))
g.map(line, "uniform", "exact")
| 27.306122 | 112 | 0.662182 | 347 | 2,676 | 4.827089 | 0.342939 | 0.059104 | 0.047761 | 0.028657 | 0.117612 | 0.058507 | 0.058507 | 0.058507 | 0.058507 | 0.058507 | 0 | 0.007711 | 0.224589 | 2,676 | 97 | 113 | 27.587629 | 0.799518 | 0.041106 | 0 | 0.035714 | 0 | 0 | 0.036078 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.178571 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d61213ab7de50c9a60bac067952659cf38ee277c | 373 | py | Python | decode_can.py | awharkrider/CPSC_3320_HACK_A_CAR | 531bd170a9701808e1e348ff1de50a1c7a586414 | [
"MIT"
] | null | null | null | decode_can.py | awharkrider/CPSC_3320_HACK_A_CAR | 531bd170a9701808e1e348ff1de50a1c7a586414 | [
"MIT"
] | null | null | null | decode_can.py | awharkrider/CPSC_3320_HACK_A_CAR | 531bd170a9701808e1e348ff1de50a1c7a586414 | [
"MIT"
] | null | null | null | from pyvit.file.db import jsondb
from pyvit.hw import socketcan
parser = jsondb.JsonDbParser()
b = parser.parse('examples/example_db.json')
dev = socketcan.SocketCanDev('vcan0')
dev.start()
# Read in file and
# decode frame and print
while True:
frame = dev.recv()
signals = b.parse_frame(frame)
if signals:
for s in signals:
print(s) | 17.761905 | 44 | 0.678284 | 53 | 373 | 4.735849 | 0.603774 | 0.071713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003425 | 0.217158 | 373 | 21 | 45 | 17.761905 | 0.856164 | 0.104558 | 0 | 0 | 0 | 0 | 0.087349 | 0.072289 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d6125c1539b6dd4f0fdf3d0cb8cf22747f85dd3d | 17,224 | py | Python | Graficos.py | Dashboard-Plotly/Dashboard-CO2 | c96ae541cdc68dde6afe67a20dbb40ac86a27128 | [
"MIT"
] | null | null | null | Graficos.py | Dashboard-Plotly/Dashboard-CO2 | c96ae541cdc68dde6afe67a20dbb40ac86a27128 | [
"MIT"
] | null | null | null | Graficos.py | Dashboard-Plotly/Dashboard-CO2 | c96ae541cdc68dde6afe67a20dbb40ac86a27128 | [
"MIT"
] | null | null | null | #Importação das bibliotecas
from pandas import read_csv
from dash import Dash, html, dcc, Input, Output
import plotly.express as px
app = Dash(__name__, title='Emissão de CO2')
df = read_csv("annual-co2-emissions-per-country.csv")
df_array = df.values #Tranformando tudo o que estiver no df em uma lista para parar de usar a extensão Pandas no código.
#---------------------------------------------------------------------------------------------------------
#Grafico1
world = []
anos = []
# Colocando os dados do mundo na lista vazia
for linha in df_array:
anos.append(linha[2])
if linha[0] == "World":
world.append(linha[3])
anos = sorted(set(anos))
fig1 = px.line(
x=anos,
y=world,
color_discrete_sequence=px.colors.sequential.Aggrnyl,
template='gridon',
title="Emissão no mundo ao longo do tempo",
labels={
"y":"Emissão ",
"x":"Ano "
}
)
fig1.update_layout(
title="Emissão no mundo ao longo do tempo",
xaxis_title="Anos",
yaxis_title="Emissão por tonelada",
paper_bgcolor="rgba(255,255,255,0)",
plot_bgcolor="#DCEEF3",
title_font_family="Archive",
title_font_color="#031225",
font_color="#031225",
font_family="Archive"
)
#---------------------------------------------------------------------------------------------------------
#Grafico2
# Acrescentando os dados para por no gráfico de mapa
anos_ordenados = []
paises_ordenados = []
nivel_ordenado = []
codigo_ordenado = []
for ano in anos:
for linha in df_array:
if linha[2] == ano:
if (linha[0] != 'Africa') and (linha[0] != 'Asia') and (linha[0] != 'Europe') and (linha[0] != 'North America') and (linha[0] != 'Oceania') and (linha[0] != 'South America') and (linha[0] != 'World'):
paises_ordenados.append(linha[0])
codigo_ordenado.append(linha[1])
anos_ordenados.append(linha[2])
nivel_ordenado.append(linha[3])
fig2 = px.choropleth(
locations=codigo_ordenado, #Posição do país no mapa
color=nivel_ordenado, #Nível de CO2
hover_name=paises_ordenados, #Nome do país ao deixar o mouse encima
animation_frame=anos_ordenados, #Régua
range_color=[0,2000000000], #Intervalo de CO2
color_continuous_scale=px.colors.sequential.Mint, #Variação de cor
labels={
"animation_frame":"Ano ",
"color":"Emissão ",
"locations":"Código "
}
)
fig2.update_layout(
title_font_family="Archive",
title_font_color="#031225",
font_color="#031225",
font_family="Archive",
paper_bgcolor="#DCEEF3"
)
#---------------------------------------------------------------------------------------------------------
#Dados para Gráficos 3 e 4.
# Dicionário com os continentes
emissoes = {
'Africa': [], # Continentes/Key : valor(es) = (lista vazia)
'Asia': [],
'Europe': [],
'North America': [],
'Oceania': [],
'South America': [],
}
anos = []
# Adicionando os dados de emissão para cada lista de continentes do dicionário
for linha in df_array:
if linha[2] >= 1987:
anos.append(linha[2])
anos = sorted(set(anos))
for ano in anos:
for emissao in emissoes:
if linha[0] == emissao: # continente sendo lido no momento no df == 'key' do dicionário.
if linha[2] == ano: # Não queremos todos os anos, filtramos ele.
emissoes[emissao].append(linha[3])
continents = ['Africa', 'Asia', 'Europe', 'North America', 'Oceania', 'South America'] #Original do CSV
continentes = ['África', 'Ásia', 'Europa', 'América do Norte', 'Oceania', 'América do Sul'] # Traduzido
#----------------------------
#Grafico3
fig3 = px.bar(
x=anos,
y=anos)
#----------------------------
#Grafico4
fig4 = px.pie(
names= continentes,
values= continentes)
#---------------------------------------------------------------------------------------------------------
#HTML - Esqueleto da página Dash
app.layout = html.Main(id='graphs', className='container',
children = [
html.Div(className="menu",
children= [
html.Img(id='logo',
src='assets\Logo.jpg'),
html.Div(className="ancoras",
children=[
html.A(className="line", children=[
html.Img(src='./assets/grafico-de-linha.png', id='linhaPng'),
html.Img(src='./assets/grafico-de-linha.gif', id='linhaGif')
],
href="#grafic1"),
html.A(className="map", children=[
html.Img(src='./assets/grafico-mapa.png', id='mapaPng'),
html.Img(src='./assets/grafico-mapa.gif', id='mapaGif')
], href="#grafico_mapa_CO2"),
html.A(className="bar", children=[
html.Img(src='./assets/grafico-de-barras.png', id='barraPng'),
html.Img(src='./assets/grafico-de-barras.gif', id='barraGif')
], href="#grafico_Barras_CO2"),
html.A(className="pie", children=[
html.Img(src='./assets/grafico-de-pizza.png', id='pizzaPng'),
html.Img(src='./assets/grafico-de-pizza.gif', id='pizzaGif')
], href="#grafico_Pizza_CO2")
]
)
]
),
html.Div(className='graficos',
children=[
html.Div(className='capa',
children=[
html.H1(id='texto1',
children=[
"Emissão", html.Br(), "global de", html.Br(), "CO2"
]
),
html.Img(src='assets\mundo.png', id='mundo'),
html.Div(id='grafic1')
]),
html.Div(className='grafico_1',
children = [
html.Div(className='T1', children=[
html.H1('Gráfico de Linha', id="T_Grafico1"),
html.H2(
children=[
html.P('Como as emissões de CO2 mudaram ao longo do tempo?'), html.P('Pode-se notar o crescimento global da emissão por tonelada de CO2 em relação ao tempo, tendo início em 1750 e possuindo dados até 2020.'), html.P('Por meio deste gráfico, é possível observar uma emissão quase nula nos primeiros cem anos, seguida de uma alta a partir do século XX, momento no qual a humanidade começou processos maiores de industrialização.'), html.P('Ao final do século XX, já havia atingido 22 bilhões de toneladas, com o gráfico ainda tendendo a crescer.'), html.P('Nos últimos anos, a emissão de CO2 está próxima das 35 bilhões de toneladas. Vê-se que o crescimento se estabilizou no final do gráfico, porém ainda não atingiu o nível máximo.')
], id='T1'
)
]
),
html.Div(className='g1',
children=[
dcc.Graph(id='Grafico_Linhas_CO2',
figure=fig1)
]
),
html.Div(id='grafico_mapa_CO2')
]
),
html.Div(className='grafico_2',
children = [
html.Div(className='T2', children=[
html.H1('Gráfico de Mapa', id="T_Grafico2"),
html.H2(
children=[
html.P('A relação da emissão de CO2 em cada país do mundo de acordo com o tempo é o foco deste gráfico.'), html.P('Países que emitem em maior escala costumam ser produtores de petróleo e gás natural ou serem ocupados por uma grande população. Há também um fator de desenvolvimento, visto que países menos desenvolvidos se apresentam com emissões muito baixas em comparação com grandes potências mundiais.'), html.P('Utilizando o recurso do gráfico para comparar datas, em 1750 o Reino Unido era o maior emissor, enquanto atualmente o norte da América e a Ásia assumem o topo das emissões.')
], id='T2'
)
]
),
html.Div(className='g2',
children=[
dcc.Graph(id="Grafico-Mapa", figure=fig2)
]
),
html.Div(id='grafico_Barras_CO2')
]
),
html.Div(className='grafico_3',
children = [
html.Div(className='T3', children=[
html.H1('Gráfico de Barras', id="T_Grafico3"),
html.H2(
children=[
html.P('São apresentados os dados de emissão de CO2 ao longo do tempo, especificados por cada continente, sendo apresentada a África primeiramente.'), html.P('É evidente o crescimento das emissões em todos os continentes. Tendo como partida o ano de 1987, as emissões desde o começo do gráfico já se apresentam altas, e em alguns continentes houve uma pequena queda entre os anos de 2019 e 2020.')
], id='T3'
)
]
),
html.Div(className='g3',
children=[
dcc.Dropdown(continents, value='Africa' , id='continentes', style={
'border-radius':'10px'
}),
dcc.Graph(
id='Grafico_Barras_CO2',
figure=fig3
)
]
),
html.Div(id='grafico_Pizza_CO2')
]
),
html.Div(id="grafico4",className='grafico_4',
children = [
html.Div(className='T4',
children=[
html.H1('Gráfico de Pizza', id="T_Grafico4"),
html.H2(
children=[
html.P('Este gráfico apresenta os dados da emissão de CO2 por continentes em relação ao tempo, tendo início em 1987, ano no qual a Europa liderava as emissões no mundo, enquanto no ano final, 2020, o continente a liderar é a Ásia, sendo responsável por mais de um quarto da emissão global.'), html.P('A Ásia, entretanto, possui uma população altíssima, e balanceando sua emissão em relação a população, possui números baixos quando comparada com outros continentes.'), html.P('A América do Sul e a África apresentam um percentual pequeno, ambos têm emissão correspondente a voos e envio de mercadoria, mas esses dados não são considerados globalmente.')
], id='T4'
)
]
),
html.Div(className='g1',
children=[
dcc.Dropdown(anos, value=1987, id='Anos', style={
'border-radius':'10px'
}),
dcc.Graph(
id='Grafico_Pizza_CO2',
figure=fig4
)
]
)
]
),
html.Div(className='baseboard',
children=[
html.Div(className='participantes',
children=[
html.H1('Participantes - Grupo 02', id='participantes'),
html.H2(id='conteudo_participantes',
children=[
('Alana Gabriele Amorim Silva - 211061331'), html.Br(),
('Danielle Rodrigues Silva - 211061574'), html.Br(),
('Dara Cristina Fernandes - 211061609'), html.Br(),
('Davi Rodrigues da Rocha - 211061618'), html.Br(),
('Harleny Angéllica Araújo - 211061832'), html.Br(),
('Helena Emery Silvano - 211061841'), html.Br(),
('Leandro Almeida Rocha - 211062080'), html.Br(),
('Rafaela de Melo Lopes - 211062400'), html.Br(),
('Thaiza R da Silva - 211062508')
]
)
]
),
html.Div(className='fonte',
children=[
html.H1('Dados Usados', id='referencia'),
html.H2(children=[
('Dados fornecidos pela '), html.A('Our World in Data', href='https://ourworldindata.org/co2-emissions', id='link'), (' sobre a emissão anual de CO2 por país.')], id='fonte')
]
),
html.Div(className='imagens',
children=[
html.Img(src='./assets/logo-unb.png', id='unb'),
html.Img(src='./assets/logo-fga.png', id='fga')
]
)
]
)
]
)
]
)
#-------------------------------------------------------------------------------------------------------------------
#Callback's
# Callback do gráfico de barras (intermediário entre a função o dropdown)
@app.callback(
# ↓↓↓ Saída será a nova uma nova figura/gráfico com a filtragem do continente escolhido
Output('Grafico_Barras_CO2', 'figure'),
Input('continentes', 'value') # Entrada será o value do ID "continentes", ou seja, o dcc.dropdown.
)
# Função o G.Barras para processar o novo gráfico filtrado
def atualizar_output(value): #Definindo uma função com o parâmetro value do input recebido de callback
for emissao in emissoes:
if value == emissao:
fig3 = px.bar(
x = anos,
y = emissoes[emissao],
color_discrete_sequence=px.colors.sequential.Aggrnyl,
title = 'Emissão por continente',
template='gridon',
labels={
"x":"Ano ",
"y":"Emissão ",
}
)
fig3.update_layout(
title="Emissão por continente",
xaxis_title="Anos",
yaxis_title="Emissão por tonelada",
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="#DCEEF3",
title_font_family="Archive",
title_font_color="#031225",
font_color="#031225",
font_family="Archive"
)
return fig3
# Callback do Gráfico de Pizza (intermediário entre a função o dropdown)
@app.callback(
Output('Grafico_Pizza_CO2', 'figure'),
Input('Anos', 'value')
)
# Função o G.Pizza para processar o novo gráfico filtrado
def update_output(value):
ano_especifico = []
i = 0
while i < 34: # 33 anos entre 1987 à 2020
if value == anos[i]:
posição = i # Marca a posição do ano que o usuário escolheu
break # Quebra do looping
i += 1 # i = i + 1
for continente in emissoes:
# ↓↓↓ Acrescentando somente os dados de emissão, de cada continente, no índice/posição do ano específico
ano_especifico.append(emissoes[continente][posição])
fig4 = px.pie(
names= continentes,
values= ano_especifico,
template= 'simple_white',
title= "Emissão por continente",
color_discrete_sequence=px.colors.sequential.Darkmint,
labels={
"names":"Continente ",
"values":"Emissão "
}
)
fig4.update_layout(
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
title_font_family="Archive",
title_font_color="#031225",
font_color="#031225",
font_family="Archive",
)
return fig4
#---------------------------------------------------------------------------------------------------------
# Colocando a 'app' pra rodar no Dash
if __name__ == '__main__':
app.run_server(debug=True) | 43.38539 | 769 | 0.474106 | 1,699 | 17,224 | 4.739847 | 0.281931 | 0.037253 | 0.03775 | 0.021855 | 0.250466 | 0.191854 | 0.159692 | 0.096362 | 0.061344 | 0.057122 | 0 | 0.031469 | 0.385625 | 17,224 | 397 | 770 | 43.38539 | 0.728974 | 0.123258 | 0 | 0.35061 | 0 | 0.036585 | 0.297669 | 0.021651 | 0 | 0 | 0 | 0.002519 | 0 | 1 | 0.006098 | false | 0 | 0.009146 | 0 | 0.021341 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d6128aaf87e5dbb02bad2f67476de3832d5e901a | 1,257 | py | Python | fastapi-master-api/app/service/metrics.py | SionAbes/fullstack-porfolio | 6ca74da425a0f6e2d9b65b2aeb8d5452ff1565a9 | [
"MIT"
] | 1 | 2021-12-25T09:19:25.000Z | 2021-12-25T09:19:25.000Z | fastapi-master-api/app/service/metrics.py | SionAbes/fullstack-porfolio | 6ca74da425a0f6e2d9b65b2aeb8d5452ff1565a9 | [
"MIT"
] | null | null | null | fastapi-master-api/app/service/metrics.py | SionAbes/fullstack-porfolio | 6ca74da425a0f6e2d9b65b2aeb8d5452ff1565a9 | [
"MIT"
] | null | null | null | from typing import List
from app.domain import metric as domain
from app.domain.user_token import LoggedUser
from app.repository.database.metrics import MetricsRepo, metrics_repo
from app.service.authorization import authorize
from sqlalchemy.orm import Session
class CreateMetric:
def __init__(
self,
db: Session,
token: LoggedUser,
create_metric: domain.CreateMetric,
metric: domain.Metric = domain.Metric,
metric_repo: MetricsRepo = metrics_repo,
):
self.db = db
self.token = token
self.create_metric = create_metric
self.metric = metric
self.metric_repo = metric_repo
def create(self) -> domain.Metric:
self._authorize()
return self._create()
def _create(self) -> domain.Metric:
return self.metric_repo.create(
db=self.db,
obj_in=self.create_metric,
)
def _authorize(self):
self.options = self._build_options()
authorize(
self.token,
self.metric,
"create",
self.db,
self.options,
)
def _build_options(self) -> dict:
return {
"user_id": self.create_metric.user_id,
}
| 25.653061 | 69 | 0.611774 | 139 | 1,257 | 5.33813 | 0.258993 | 0.080863 | 0.06469 | 0.051213 | 0.067385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.30708 | 1,257 | 48 | 70 | 26.1875 | 0.851894 | 0 | 0 | 0 | 0 | 0 | 0.010342 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121951 | false | 0 | 0.146341 | 0.04878 | 0.365854 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d619e59499d4ab2065147196c6ae2bffbc2f56cd | 4,310 | py | Python | traiders/backend/api/serializers/comment.py | rdilruba/bounswe2019group2 | b373908a4a8e92481f359297aba07245f0a23c1c | [
"Apache-2.0"
] | 11 | 2019-02-15T12:08:32.000Z | 2019-11-14T19:25:09.000Z | traiders/backend/api/serializers/comment.py | bounswe/bounswe2019group2 | 05d41cf7b6bc1b3f994e82495d2a885a6eaa7cf3 | [
"Apache-2.0"
] | 279 | 2019-02-13T14:57:39.000Z | 2022-03-12T00:02:30.000Z | traiders/backend/api/serializers/comment.py | rdilruba/bounswe2019group2 | b373908a4a8e92481f359297aba07245f0a23c1c | [
"Apache-2.0"
] | 13 | 2019-03-20T08:30:55.000Z | 2021-01-31T16:49:14.000Z | from rest_framework import serializers, fields
from rest_framework.exceptions import PermissionDenied
from ..models import ArticleComment, EquipmentComment, Equipment
from . import UserSerializer
class IsLiked(serializers.BooleanField):
def get_attribute(self, instance):
if self.context['request'].user.is_anonymous:
return False
return instance.liked_by.filter(pk=self.context['request'].user.pk).exists()
class CommentSerializerBase(serializers.HyperlinkedModelSerializer):
user = UserSerializer(read_only=True)
is_liked = IsLiked(required=False)
num_likes = serializers.SerializerMethodField(read_only=True)
def get_num_likes(self, comment):
return comment.liked_by.count()
def validate(self, data):
# If only liking, user does not have to provide any content or image
if self.context['request'].method != 'PATCH':
if not data.get('image') and not data.get('content'):
raise serializers.ValidationError("Please either provide an image or a text as the comment.")
return data
def create(self, validated_data: dict):
if 'is_liked' in validated_data:
validated_data.pop('is_liked')
validated_data['user'] = self.context['request'].user
return super().create(validated_data)
def update(self, instance, validated_data: dict):
is_liked = validated_data.pop('is_liked', None)
likers = instance.liked_by.all()
if validated_data and self.context['request'].user != instance.user:
raise PermissionDenied
if is_liked is True:
if self.context['request'].user.is_anonymous:
raise serializers.ValidationError('A guest user cannot like a comment. '
'Please login to perform this action.')
if self.context['request'].user in likers:
raise serializers.ValidationError('You already like this comment.')
instance.liked_by.add(self.context['request'].user)
elif is_liked is False:
if self.context['request'].user.is_anonymous:
raise serializers.ValidationError('A guest user cannot unlike a comment '
'Please login to perform this action.')
if self.context['request'].user not in likers:
raise serializers.ValidationError('You already do not like this comment.')
instance.liked_by.remove(self.context['request'].user)
return super().update(instance, validated_data)
class ArticleCommentSerializer(CommentSerializerBase):
def get_fields(self):
fields = super().get_fields()
view = self.context.get('view')
if view and getattr(view, 'action') != 'create':
fields['article'].read_only = True
return fields
class Meta:
model = ArticleComment
fields = ["id", "url", "created_at", "content", "image", "user",
"article", "is_liked", "liked_by", "num_likes"]
read_only_fields = ['id', 'url', 'created_at', 'user',
'liked_by', 'is_liked', 'num_likes']
class EquipmentCommentSerializer(CommentSerializerBase):
class EquipmentField(serializers.CharField):
def get_attribute(self, instance):
return instance.equipment.symbol
def run_validation(self, symbol=fields.empty):
super().run_validation(symbol)
equipment = Equipment.objects.filter(symbol=symbol).first()
if equipment is None:
raise serializers.ValidationError('No such equipment')
return equipment
equipment = EquipmentField()
def get_fields(self):
fields = super().get_fields()
view = self.context.get('view')
if view and getattr(view, 'action') != 'create':
fields['equipment'].read_only = True
return fields
class Meta:
model = EquipmentComment
fields = ["id", "url", "created_at", "content",
"image", "user", "equipment", "is_liked", "liked_by", "num_likes"]
read_only_fields = ['id', 'url', 'created_at', 'user',
'liked_by', 'is_liked', 'num_likes']
| 38.828829 | 109 | 0.628306 | 475 | 4,310 | 5.572632 | 0.24 | 0.054023 | 0.074802 | 0.083113 | 0.430298 | 0.39252 | 0.344919 | 0.294673 | 0.238761 | 0.238761 | 0 | 0 | 0.264037 | 4,310 | 110 | 110 | 39.181818 | 0.834489 | 0.015313 | 0 | 0.283951 | 0 | 0 | 0.15818 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.049383 | 0.024691 | 0.419753 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d619e65c055a22f7471ea471a7d73c6dedb9a000 | 2,614 | py | Python | contrastive_dataloader.py | rajahaseeb147/Alzheimers_final | dd9b8607ec47b4317cb139e8c78c3c0b3c8ca1b8 | [
"MIT"
] | null | null | null | contrastive_dataloader.py | rajahaseeb147/Alzheimers_final | dd9b8607ec47b4317cb139e8c78c3c0b3c8ca1b8 | [
"MIT"
] | null | null | null | contrastive_dataloader.py | rajahaseeb147/Alzheimers_final | dd9b8607ec47b4317cb139e8c78c3c0b3c8ca1b8 | [
"MIT"
] | null | null | null | import os
import torch
import torch.utils.data as data
import torchvision.transforms as transform
import numpy as np
from PIL import Image
import cv2
class AlzhDataset(data.Dataset):
def __init__(self, type, root='/data/tm/alzh/New_dataset/train', transform=None):
self.transform = transform
if type == 'AD_CN':
## AD -> 0, CN -> 1
image_file = os.path.join(root, 'images_AD_CN.txt')
class_file = os.path.join(root, 'image_class_labels_AD_CN.txt')
elif type == 'AD_MCI':
## AD -> 0, MCI -> 1
image_file = os.path.join(root, 'images_AD_CN.txt')
class_file = os.path.join(root, 'image_class_labels_AD_CN.txt')
elif type == 'MCI_CN':
## MCI -> 0, CN -> 1
image_file = os.path.join(root, 'images_AD_CN.txt')
class_file = os.path.join(root, 'image_class_labels_AD_CN.txt')
elif type == '3class':
## AD -> 0 MCI -> 1, CN -> 2
image_file = os.path.join(root, 'images.txt')
class_file = os.path.join(root, 'image_class_labels.txt')
id2image = self.list2dict(self.text_read(image_file))
id2class = self.list2dict(self.text_read(class_file))
self.images = []
self.labels = []
for k in id2image.keys():
image_path = os.path.join(root, id2image[k])
self.images.append(image_path)
self.labels.append(int(id2class[k]))
def text_read(self, file):
with open(file, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
lines[i] = line.strip('\n')
return lines
def list2dict(self, list):
dict = {}
for l in list:
s = l.split(' ')
id = int(s[0])
cls = s[1]
if id not in dict.keys():
dict[id] = cls
else:
raise EOFError('The same ID can only appear once')
return dict
def __len__(self):
return len(self.labels)
def __getitem__(self, item):
img = Image.open(self.images[item]).convert('RGB')
label = self.labels[item]
if self.transform is not None:
img1 = self.transform(img)
img2 = self.transform(img)
return img1, img2, label
if __name__ == '__main__':
dataset = AlzhDataset()
train_len = int(dataset.__len__() * 0.8)
valid_len = dataset.__len__() - train_len
train, valid = torch.utils.data.random_split(dataset, [train_len, valid_len])
print(train.__len__(), valid.__len__()) | 35.324324 | 85 | 0.571155 | 349 | 2,614 | 4.045845 | 0.283668 | 0.038244 | 0.063739 | 0.089235 | 0.283286 | 0.247875 | 0.247875 | 0.227337 | 0.227337 | 0.227337 | 0 | 0.014795 | 0.301836 | 2,614 | 74 | 86 | 35.324324 | 0.758904 | 0.029839 | 0 | 0.096774 | 0 | 0 | 0.104826 | 0.054193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080645 | false | 0 | 0.112903 | 0.016129 | 0.274194 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d61b02e3c2cbe185d9b13c07deb6296c38aee33e | 3,346 | py | Python | resources/ganttChartDrawer.py | judgyknowitall/ethogram_maker | 605001243717bc6f10e32b24f09de2652b115676 | [
"MIT"
] | null | null | null | resources/ganttChartDrawer.py | judgyknowitall/ethogram_maker | 605001243717bc6f10e32b24f09de2652b115676 | [
"MIT"
] | null | null | null | resources/ganttChartDrawer.py | judgyknowitall/ethogram_maker | 605001243717bc6f10e32b24f09de2652b115676 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 27 12:05:33 2021
@author: Zahra Ghavasieh
Draws a Gantt chart given params
"""
# Importing the matplotlib.pyplot
import matplotlib.pyplot as plt
# Colours used for bars in chart
clrs = [
#'black', # Groom
'tab:orange', # In Place Activity
'tab:green', # Locomotion
'tab:red', # No Movement
'tab:purple', # Rear
'tab:brown',
'tab:pink',
'tab:gray',
'tab:olive',
'tab:cyan'
]
# Draw a single trial with each event on its own axis
# xmax set to 9 minutes (540 seconds)
# x = (x_start, x_len)
# y = Event name
def draw_trial(filename, df, x="Bar", y="Event", xmax=540, sort=True):
print("Drawing Gantt Chart...")
# Declaring figure and its aspect ratio
plt.figure(figsize=(20,8))
# Find and sort unique Events alphabetically
events = list(set(df[y]))
if sort:
events.sort()
# Set limits
plt.ylim(0, len(events)*10)
plt.xlim(0, xmax)
# Setting axis labels
plt.xlabel('Time (s)')
# Setting ticks on y-axis
plt.yticks([ (i*10)+5 for i in range(len(events))], events)
# Setting ticks on x-axis
plt.xticks(range(0,xmax+1,60))
# Setting graph attribute
#plt.grid(True,axis='x')
# Declare bars in chart
for i in range(len(events)):
# Get all occurrences of the event and draw its bars
occurrences = df[df[y] == events[i]]
print("\t- Found " + str(len(occurrences)) + " occurrences of " + events[i] )
plt.broken_barh(occurrences[x], (i*10,10), facecolors=(clrs[i % len(clrs)]))
plt.savefig("out/" + filename + "_ganttchart.svg", bbox_inches='tight')
# Draw multiple trials with each trial on its own axis
# xmax set to 9 minutes (540 seconds)
# x = (x_start, x_len)
# y = Event name
def draw_group(dfs, trials, groupname=None, x="Bar", y="Event", xmax=540, sort=True):
print("Drawing Gantt Chart...")
# Declaring figure and its aspect ratio
plt.figure(figsize=(20,8))
# Find and sort unique Events alphabetically
events = list(set(dfs[0][y]))
if sort:
events.sort()
# Set limits
plt.ylim(0, len(trials)*10)
plt.xlim(0, xmax)
# Setting axis labels
plt.xlabel('Time (s)')
# Setting ticks on y-axis
plt.yticks([ (i*10)+ 5 for i in range(len(trials))], trials)
# Setting ticks on x-axis
plt.xticks(range(0,xmax+1,60))
# Declare bars in chart
for t in range(len(trials)):
print('\n\t Trial ' + str(trials[t]) + '...')
for e in range(len(events)):
# Get all occurrences of the event and draw its bars
occurrences = dfs[t][dfs[t][y] == events[e]]
print("\t- Found " + str(len(occurrences)) + " occurrences of " + events[e] )
plt.broken_barh(occurrences[x], (t*10,10), facecolors=(clrs[e % len(clrs)]))
print()
# Save plot
name = "out/"
if groupname == None:
name = name + "trials" + trials[0] + "-" + trials[-1]
else:
name = name + groupname
plt.savefig(name + "_ganttchart.svg", bbox_inches='tight')
| 25.937984 | 89 | 0.559474 | 457 | 3,346 | 4.070022 | 0.315098 | 0.018817 | 0.026882 | 0.017742 | 0.619892 | 0.541935 | 0.536559 | 0.536559 | 0.536559 | 0.486022 | 0 | 0.028217 | 0.300956 | 3,346 | 129 | 90 | 25.937985 | 0.766994 | 0.306037 | 0 | 0.269231 | 0 | 0 | 0.120614 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.019231 | 0 | 0.057692 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d61bab398771bbaa9398eaa8f2cd33b77ed4ebdf | 5,834 | py | Python | data-prep/Landsat/landsat-calculate-yearly-stats.py | jjmcnelis/VegMapper | 9513e121fa6495cf8f61d20908017412a73b1c2e | [
"Apache-2.0"
] | null | null | null | data-prep/Landsat/landsat-calculate-yearly-stats.py | jjmcnelis/VegMapper | 9513e121fa6495cf8f61d20908017412a73b1c2e | [
"Apache-2.0"
] | null | null | null | data-prep/Landsat/landsat-calculate-yearly-stats.py | jjmcnelis/VegMapper | 9513e121fa6495cf8f61d20908017412a73b1c2e | [
"Apache-2.0"
] | null | null | null | import os
import sys
import numpy as np
import rasterio
import dask
import dask.array as da
import certifi
import boto3
from botocore.exceptions import ClientError
"""
This file calculates yearly NDVI statistics (min, mean, max, standard deviation)
per pixel for Landsat tiles in an S3 bucket and outputs a .tif file. This
file is expected to be used after data is downloaded via
landsat-download-ndvi.py.
This file takes as input a listfile and yearfile specifying the tile and year
that statistics should be calculated for. It also takes the name of the bucket
and the particular folder within the bucket where images are stored.
If a year's worth of images for a particular tile are not available
the program will crash, so please ensure the images are available.
This can be fixed in the future.
Example run:
python3 landsat-calculate-yearly-stats.py listfile yearfile bucket folder
python3 landsat-calculate-yearly-stats.py ucayali_tile_list.txt ucayali_year_list.txt sar-optical-images GLAD/
ucayali_tile_list.txt
----------------------
070W_07S
070W_08S
070W_09S
070W_10S
070W_11S
071W_07S
ucayali_year_list.txt
----------------------
2018
2019
"""
listfile = sys.argv[1]
yearfile = sys.argv[2]
bucket = sys.argv[3]
folder = sys.argv[4]
with open(listfile, 'r') as f:
tiles = [tile.strip() for tile in f.readlines()]
with open(yearfile, 'r') as f:
years = [int(year.strip()) for year in f.readlines()]
# from 16d_intervals.xlsx file, these are the intervals that define a year
years_intervals = [] # list of years and associated interval. Ex: [(2015, 806, 828), (2016, 829, 851), ...]
for year in years:
start = ((year - 1980) * 23) + 1 # from GLAD documentation
end = start + 23 - 1 # 23 intervals per year
years_intervals.append((year, start, end))
# the functions to compute the min, mean, max, std. dev composites we are making
functions = [
("min", np.nanmin),
("mean", np.nanmean),
("max", np.nanmax),
("std_dev", np.nanstd)
]
# establish connection to S3
s3 = boto3.client('s3')
os.environ["AWS_REQUEST_PAYER"] = "requester"
#os.environ["CURL_CA_BUNDLE"] = "/etc/ssl/certs/ca-certificates.crt"
rasterio.Env(CURL_CA_BUNDLE=certifi.where()) # for rasterio.open() to work with S3
folder_path = os.path.join('s3://', bucket, folder)
for year, start, end in years_intervals:
for tile in tiles:
print("Starting composite calculation for tile {} year {}".format(tile, year))
# find which functions/composites actually need to be calculated by checking if the composites are already in S3
functions_to_calculate = []
for func_name, func in functions:
filename = "ndvi_{}_composite_{}.tif".format(func_name, year)
key = os.path.join(folder, tile, filename)
try:
s3.head_object(Bucket=bucket, Key=key)
print("{} already present. Skipping to next function...".format(filename))
continue
except:
functions_to_calculate.append((func_name, func))
if len(functions_to_calculate) == 0:
print("No composites to calculate for tile {} year {}. Skipping to next tile...\n".format(tile, year))
continue
ndvi_list = []
kwargs = None
for i in range(start, end+1):
path_to_file_s3 = os.path.join(folder_path, tile, '{}.tif'.format(i))
# Check if file exists before opening to prevent crash
try:
path_to_file = os.path.join(folder, tile, '{}.tif'.format(i)) # without s3 prefix
# if this succeeds file is present so we can open it with rasterio
s3.head_object(Bucket=bucket, Key=path_to_file)
except:
print("Missing file: {}.".format(key))
continue
with rasterio.open(path_to_file_s3) as src:
nodata_value = src.profile['nodata']
# read data into dask array with 500x500 chunks in memory
ndvi = da.from_array(src.read(1), chunks=(500, 500))
ndvi[ndvi == nodata_value] = np.nan
ndvi_list.append(ndvi)
print("Downloaded {} {} for year {}".format(tile, i, year))
# grab the profile from the first image's profile.
# use it when saving the composte image.
# TODO: is this the right way to do this?
if i == start:
kwargs = src.profile
# stack many existing Dask arrays into a new array along new axis
ndvi_array = da.stack(ndvi_list, axis=0)
for func_name, func in functions_to_calculate:
ndvi_composite = func(ndvi_array, axis=0)
print("Done calculating {} composite for year {}".format(func_name, year))
# if there are still NaN values, replace with 9999 (no data)
nodata_value = 9999
ndvi_composite[np.isnan(ndvi_composite)] = nodata_value
kwargs.update(nodata = nodata_value)
filename = "ndvi_{}_composite_{}.tif".format(func_name, year)
with rasterio.open(filename, 'w', **kwargs) as dst:
dst.write_band(1, ndvi_composite.astype(rasterio.float32))
dst.set_band_description(1, "NDVI {} composite for year {}".format(func_name, year))
key = os.path.join(folder, tile, filename)
try:
response = s3.upload_file(filename, bucket, key)
except ClientError as e:
print("S3 Upload Error:", e)
print("Done uploading {} composite for year {}".format(func_name, year))
os.remove(filename)
print("Done calculating all composites for {}\n".format(year))
| 37.63871 | 120 | 0.631471 | 792 | 5,834 | 4.550505 | 0.334596 | 0.017758 | 0.013873 | 0.024972 | 0.125416 | 0.119867 | 0.070477 | 0.04717 | 0.028857 | 0.028857 | 0 | 0.028651 | 0.264141 | 5,834 | 154 | 121 | 37.883117 | 0.810855 | 0.165924 | 0 | 0.141176 | 0 | 0 | 0.130299 | 0.012484 | 0 | 0 | 0 | 0.006494 | 0 | 1 | 0 | false | 0 | 0.105882 | 0 | 0.105882 | 0.105882 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d61c1827e9bee48f783ccf9d425e7b3db2637bbc | 815 | py | Python | conans/test/unittests/tools/microsoft/test_msbuild.py | thombet/conan | eee0d568ee631fffe1856a5da1c23f47dcdf5c1c | [
"MIT"
] | 1 | 2021-05-29T16:44:56.000Z | 2021-05-29T16:44:56.000Z | conans/test/unittests/tools/microsoft/test_msbuild.py | thombet/conan | eee0d568ee631fffe1856a5da1c23f47dcdf5c1c | [
"MIT"
] | 1 | 2019-06-07T03:02:02.000Z | 2019-06-07T03:02:02.000Z | conans/test/unittests/tools/microsoft/test_msbuild.py | thombet/conan | eee0d568ee631fffe1856a5da1c23f47dcdf5c1c | [
"MIT"
] | 1 | 2021-08-20T19:47:51.000Z | 2021-08-20T19:47:51.000Z | import textwrap
from conan.tools.microsoft import MSBuild
from conans.model.conf import ConfDefinition
from conans.test.utils.mocks import ConanFileMock, MockSettings
def test_msbuild_cpu_count():
c = ConfDefinition()
c.loads(textwrap.dedent("""\
tools.microsoft.msbuild:max_cpu_count=23
tools.build:processes=10
"""))
settings = MockSettings({"build_type": "Release",
"compiler": "gcc",
"compiler.version": "7",
"os": "Linux",
"arch": "x86_64"})
conanfile = ConanFileMock()
conanfile.settings = settings
conanfile.conf = c.get_conanfile_conf(None)
msbuild = MSBuild(conanfile)
cmd = msbuild.command('project.sln')
assert '/m:23' in cmd
| 29.107143 | 63 | 0.6 | 84 | 815 | 5.714286 | 0.583333 | 0.058333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0189 | 0.28589 | 815 | 27 | 64 | 30.185185 | 0.805842 | 0 | 0 | 0 | 0 | 0 | 0.203681 | 0.078528 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.047619 | false | 0 | 0.190476 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d61c9be7e1ffc0dbe58eeae48ca58bb9133715d1 | 1,695 | py | Python | capturemelody.py | eashwar/super-midio-bros | e8572d257efa071b57b4c6107fab03427fa1b64e | [
"MIT"
] | null | null | null | capturemelody.py | eashwar/super-midio-bros | e8572d257efa071b57b4c6107fab03427fa1b64e | [
"MIT"
] | null | null | null | capturemelody.py | eashwar/super-midio-bros | e8572d257efa071b57b4c6107fab03427fa1b64e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Contains an example of midi input, and a separate example of midi output.
By default it runs the output example.
python midi.py --output
python midi.py --input
"""
import sys
import os
import pygame
import pygame.midi
from pygame.locals import *
try: # Ensure set available for output example
set
except NameError:
from sets import Set as set
def input_main(device_id = None):
pygame.init()
pygame.fastevent.init()
event_get = pygame.fastevent.get
event_post = pygame.fastevent.post
pygame.midi.init()
if device_id is None:
input_id = pygame.midi.get_default_input_id()
else:
input_id = device_id
print ("using input_id :%s:" % input_id)
i = pygame.midi.Input( input_id )
pygame.display.set_mode((1,1))
going = True
while going:
events = event_get()
for e in events:
if e.type in [QUIT]:
going = False
if e.type in [KEYDOWN]:
going = False
if e.type in [pygame.midi.MIDIIN]:
print (e)
if i.poll():
midi_events = i.read(10)
# convert them into pygame events.
midi_evs = pygame.midi.midis2events(midi_events, i.device_id)
for m_e in midi_evs:
if m_e.data2 == 100:
print(m_e.data1, file=open("notes.txt", "a"))
del i
pygame.midi.quit()
def main(mode='input', device_id=None):
input_main(device_id)
if __name__ == '__main__':
try:
device_id = int( sys.argv[-1] )
except:
device_id = None
input_main(device_id) | 21.455696 | 76 | 0.585251 | 233 | 1,695 | 4.090129 | 0.369099 | 0.075551 | 0.047219 | 0.053515 | 0.100735 | 0.100735 | 0.06086 | 0 | 0 | 0 | 0 | 0.009507 | 0.317404 | 1,695 | 79 | 77 | 21.455696 | 0.814175 | 0.149853 | 0 | 0.125 | 0 | 0 | 0.02933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.166667 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d61ecb31d1af9c59bf9f73777d714e9801f16ffd | 837 | py | Python | decide-tree/decide_prune.py | ubear/MachineLearn | c24947f7078269f51bf7ee0d2167ca6b71152d1e | [
"MIT"
] | null | null | null | decide-tree/decide_prune.py | ubear/MachineLearn | c24947f7078269f51bf7ee0d2167ca6b71152d1e | [
"MIT"
] | null | null | null | decide-tree/decide_prune.py | ubear/MachineLearn | c24947f7078269f51bf7ee0d2167ca6b71152d1e | [
"MIT"
] | null | null | null | #coding:utf-8
from decision_tree import *
def prune(tree, mingain):
if tree.tb.results == None:
prune(tree.tb, mingain)
if tree.fb.results == None:
prune(tree.fb, mingain)
if tree.tb.results != None and tree.fb.results != None:
tb, fb = [], []
for v, c in tree.tb.results.items():
tb += [[v]] * c
for v, c in tree.fb.results.items():
fb += [[v]] * c
delta = entropy(tb + fb) - (entropy(tb) + entropy(fb)) / 2
if delta < mingain:
tree.tb, tree.fb = None, None
tree.results = unique_counts(tb + fb)
if __name__ == '__main__':
from data import my_data
from build_dt_img import draw_tree
tree = build_tree(my_data)
draw_tree(tree, 'previous.jpg')
prune(tree, 1.0)
draw_tree(tree, 'next.jpg')
| 24.617647 | 66 | 0.561529 | 121 | 837 | 3.735537 | 0.322314 | 0.066372 | 0.086283 | 0.066372 | 0.163717 | 0.115044 | 0 | 0 | 0 | 0 | 0 | 0.006791 | 0.296296 | 837 | 33 | 67 | 25.363636 | 0.760611 | 0.014337 | 0 | 0 | 0 | 0 | 0.033981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.130435 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d628384096a682a0c64ae1ad0b4d5405798588dd | 4,781 | py | Python | code/bert_utils/bert_utils_simplify_v2/data_loader.py | InscribeDeeper/Text-Classification | 9cd3def58b5bd4b722a5b8fdff60a07d977234aa | [
"MIT"
] | null | null | null | code/bert_utils/bert_utils_simplify_v2/data_loader.py | InscribeDeeper/Text-Classification | 9cd3def58b5bd4b722a5b8fdff60a07d977234aa | [
"MIT"
] | null | null | null | code/bert_utils/bert_utils_simplify_v2/data_loader.py | InscribeDeeper/Text-Classification | 9cd3def58b5bd4b722a5b8fdff60a07d977234aa | [
"MIT"
] | null | null | null | import re
from tensorflow.keras.utils import to_categorical
from collections import Counter
import pandas as pd
import torch
from sklearn.model_selection import train_test_split, StratifiedKFold
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import numpy as np
##############################################################################################################
# Training option
##############################################################################################################
def data_loader_BERT(input_ids, attention_masks, one_hot_labels, batch_size=None, random_state=1234, test_size=0.2, testing=False):
"""generate dataloader for batch input
Args:
input_ids ([type]): [description]
attention_masks ([type]): [description]
one_hot_labels ([type]): [description]
batch_size ([type], optional): [description]. Defaults to None.
random_state (int, optional): [description]. Defaults to 1234.
test_size (float, optional): [description]. Defaults to 0.2.
testing (bool, optional): [description]. Defaults to False.
Returns:
[type]: [description]
"""
assert isinstance(one_hot_labels, np.ndarray)
stratify_y = np.argmax(one_hot_labels, axis=1)
one_hot_labels = torch.tensor(one_hot_labels, dtype=torch.int32)
input_ids = torch.tensor(input_ids, dtype=torch.int32)
attention_masks = torch.tensor(attention_masks, dtype=torch.int32)
if not testing:
train_ids, validation_ids, train_attention_masks, val_attention_masks, train_labels, validation_labels = train_test_split(input_ids, attention_masks, one_hot_labels, random_state=random_state, test_size=test_size, stratify=stratify_y)
# Create the DataLoader for our training set. with shuffle
train_data = TensorDataset(train_ids, train_attention_masks, train_labels)
train_dataloader = DataLoader(train_data, sampler=RandomSampler(train_data), batch_size=batch_size)
# Create the DataLoader for our validation set. without shuffle
validation_data = TensorDataset(validation_ids, val_attention_masks, validation_labels)
validation_dataloader = DataLoader(validation_data, sampler=SequentialSampler(validation_data), batch_size=batch_size)
return train_dataloader, validation_dataloader
else:
batch_size = len(one_hot_labels) if batch_size is None else batch_size
test_data = TensorDataset(input_ids, attention_masks, one_hot_labels)
test_dataloader = DataLoader(test_data, sampler=None, batch_size=batch_size)
return test_dataloader, None
def k_fold_data_loader_BERT(input_ids, attention_masks, one_hot_labels, batch_size=None, random_state=1234, k_fold=3):
"""generate k-fold dataloader with generator
Args:
input_ids ([type]): [description]
attention_masks ([type]): [description]
one_hot_labels ([type]): [description]
batch_size ([type], optional): [description]. Defaults to None.
random_state (int, optional): [description]. Defaults to 1234.
test_size (float, optional): [description]. Defaults to 0.2.
"""
assert isinstance(one_hot_labels, np.ndarray)
one_hot_labels = torch.tensor(one_hot_labels, dtype=torch.int32)
input_ids = torch.tensor(input_ids, dtype=torch.int32)
attention_masks = torch.tensor(attention_masks, dtype=torch.int32)
stratify_y = np.argmax(one_hot_labels, axis=1)
skf = StratifiedKFold(n_splits=k_fold, random_state=random_state, shuffle=True)
for train_index, test_index in skf.split(input_ids, stratify_y):
X_train_ids, X_test_ids = input_ids[train_index], input_ids[test_index]
X_train_attention_masks, X_test_attention_masks = attention_masks[train_index], attention_masks[test_index]
Y_train, Y_test = one_hot_labels[train_index], one_hot_labels[test_index]
train_data = TensorDataset(X_train_ids, X_train_attention_masks, Y_train)
train_dataloader = DataLoader(train_data, sampler=RandomSampler(train_data), batch_size=batch_size)
validation_data = TensorDataset(X_test_ids, X_test_attention_masks, Y_test)
validation_dataloader = DataLoader(validation_data, sampler=SequentialSampler(validation_data), batch_size=batch_size)
yield train_dataloader, validation_dataloader
def one_hot_encoded_y(labels):
"""
labels has to be continous
e.g. wiht 0,1,6, funciton will only output 6 dimension
"""
import numpy as np
label_size = max(labels)
assert label_size > 0
if len(labels.shape) > 1:
one_hot_labels = np.eye(label_size)[labels]
else:
one_hot_labels = np.eye(2)[labels]
return one_hot_labels
| 45.971154 | 242 | 0.708011 | 607 | 4,781 | 5.260297 | 0.192751 | 0.039461 | 0.075164 | 0.063577 | 0.491701 | 0.455997 | 0.455997 | 0.411525 | 0.411525 | 0.390229 | 0 | 0.011017 | 0.16461 | 4,781 | 103 | 243 | 46.417476 | 0.788433 | 0.22192 | 0 | 0.352941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d628eb2618c4fa8efe97f0a1814ba041871e5417 | 9,733 | py | Python | prometheus_metrics.py | lesykm/healthstats-collector | fb81a6938ab9ae0ff3408642d0d06e2869454a6f | [
"MIT"
] | null | null | null | prometheus_metrics.py | lesykm/healthstats-collector | fb81a6938ab9ae0ff3408642d0d06e2869454a6f | [
"MIT"
] | null | null | null | prometheus_metrics.py | lesykm/healthstats-collector | fb81a6938ab9ae0ff3408642d0d06e2869454a6f | [
"MIT"
] | 1 | 2021-12-23T19:10:39.000Z | 2021-12-23T19:10:39.000Z | import datetime
import os
import pytz
from prometheus_client import CollectorRegistry, Gauge, pushadd_to_gateway
class PrometheusMetrics:
def __init__(self, logger):
self.logger = logger
self.registry = CollectorRegistry()
self.pushgateway = os.environ.get("PUSHGATEWAY", "localhost:9091")
self.job_name = os.environ.get("JOB_NAME", "healthstats")
self.timezone = os.environ.get("TIMEZONE", "UTC")
def summary(self, data):
self._heart_rate(data)
self._intensity_minutes(data)
self._steps(data)
self._floors(data)
self._calories(data)
self._stress(data)
def weight(self, data):
if (
data.get("weight") is None
or data.get("weight") == 0
or data.get("bodyFat") is None
or data.get("boneMass") is None
or data.get("bmi") is None
or data.get("bodyWater") is None
or data.get("muscleMass") is None
):
return
# create metrics
weight_total = self.gauge("weight_total", "Body weight in KG")
weight_body_fat = self.gauge("weight_body_fat", "Body fat in %")
weight_bone_mass = self.gauge("weight_bone_mass", "Bone mass in KG")
weight_bmi = self.gauge("weight_bmi", "BMI")
weight_body_water = self.gauge("weight_body_water", "Body water in %")
weight_muscle_mass = self.gauge("weight_muscle_mass", "Muscle mass in KG")
# set metrics values
weight_total.set(data["weight"] / 1000.0)
weight_body_fat.set(data["bodyFat"])
weight_bone_mass.set(data["boneMass"] / 1000.0)
weight_bmi.set(data["bmi"])
weight_body_water.set(data["bodyWater"])
weight_muscle_mass.set(data["muscleMass"] / 1000.0)
def _heart_rate(self, data):
if (
data.get("restingHeartRate") is None
or data.get("minHeartRate") is None
or data.get("maxHeartRate") is None
):
return
# create metrics
resting_heart_rate = self.gauge("resting_heart_rate", "Resting heart rate")
heart_rate_resting = self.gauge("heart_rate_resting", "Resting heart rate")
heart_rate_min = self.gauge("heart_rate_min", "Minimum heart rate")
heart_rate_max = self.gauge("heart_rate_max", "Maximum heart rate")
# set metrics values
resting_heart_rate.set(data["restingHeartRate"])
heart_rate_resting.set(data["restingHeartRate"])
heart_rate_min.set(data["minHeartRate"])
heart_rate_max.set(data["maxHeartRate"])
def _steps(self, data):
if (
data.get("totalSteps") is None
or data.get("dailyStepGoal") is None
or data.get("totalDistanceMeters") is None
or data.get("activeSeconds") is None
or data.get("sedentarySeconds") is None
):
return
# create metrics
steps = self.gauge("steps", "Total steps")
steps_daily_goal = self.gauge("steps_daily_goal", "Daily step goal")
distance_meters = self.gauge("distance_meters", "Total distance in meters")
active_seconds = self.gauge("active_seconds", "Seconds in movement")
sedentary_seconds = self.gauge("sedentary_seconds", "Seconds in sedentary position")
# set metrics values
steps.set(data["totalSteps"])
steps_daily_goal.set(data["dailyStepGoal"])
distance_meters.set(data["totalDistanceMeters"])
active_seconds.set(data["activeSeconds"])
sedentary_seconds.set(data["sedentarySeconds"])
def _calories(self, data):
if (
data.get("totalKilocalories") is None
or data.get("activeKilocalories") is None
or data.get("bmrKilocalories") is None
):
return
# create metrics
calories = self.gauge("calories", "Total calories")
calories_active = self.gauge("calories_active", "Active calories")
calories_resting = self.gauge("calories_resting", "Resting calories")
# set metrics values
calories.set(data["totalKilocalories"])
calories_active.set(data["activeKilocalories"])
calories_resting.set(data["bmrKilocalories"])
def _intensity_minutes(self, data):
if (
data.get("moderateIntensityMinutes") is None
or data.get("vigorousIntensityMinutes") is None
):
return
# create metrics
intensity_minutes_moderate = self.gauge(
"intensity_minutes_moderate", "Minutes in moderate intensity activities"
)
intensity_minutes_vigorous = self.gauge(
"intensity_minutes_vigorous", "Minutes in vigorous intensity activities"
)
# set metrics values
intensity_minutes_moderate.set(data["moderateIntensityMinutes"])
intensity_minutes_vigorous.set(data["vigorousIntensityMinutes"])
def _floors(self, data):
if (
data.get("floorsAscended") is None
or data.get("floorsAscendedInMeters") is None
or data.get("floorsDescended") is None
or data.get("floorsDescendedInMeters") is None
):
return
# create metrics
floors_ascended = self.gauge("floors_ascended", "Floors ascended")
floors_ascended_meters = self.gauge("floors_ascended_meters", "Floors ascended in meters")
floors_descended = self.gauge("floors_descended", "Floors descended")
floors_descended_meters = self.gauge(
"floors_descended_meters", "Floors descended in meters"
)
# set metrics values
floors_ascended.set(data["floorsAscended"])
floors_ascended_meters.set(data["floorsAscendedInMeters"])
floors_descended.set(data["floorsDescended"])
floors_descended_meters.set(data["floorsDescendedInMeters"])
def _stress(self, data):
"""
'averageStressLevel': 21,
'maxStressLevel': 83,
'stressDuration': 8400,
'stressPercentage': 18.82,
'stressQualifier': 'UNKNOWN',
'totalStressDuration': 44640,
'highStressDuration': 360,
'highStressPercentage': 0.81,
'lowStressDuration': 7080,
'lowStressPercentage': 15.86,
'mediumStressDuration': 960,
'mediumStressPercentage': 2.15,
'restStressDuration': 28920,
'restStressPercentage': 64.78,
'activityStressDuration': 4020,
'activityStressPercentage': 9.01,
'uncategorizedStressDuration': 3300,
'uncategorizedStressPercentage': 7.39,
high + low + medium = stressDuration
high + low + medium + activity + uncategorized = totalStressDuration
The stress level range is from 0 to 100, where 0 to 25 is a resting state,
26 to 50 is low stress, 51 to 75 is medium stress, and 76 to 100 is a high stress state.
"""
if (
data.get("averageStressLevel") is None
or data.get("maxStressLevel") is None
or data.get("stressDuration") is None
or data.get("totalStressDuration") is None
):
return
# create metrics
average_stress_level = self.gauge("average_stress_level", "Average stress level")
max_stress_level = self.gauge("max_stress_level", "Maximum stress level")
high_stress_percentage = self.gauge("high_stress_percentage", "High stress percentage")
medium_stress_percentage = self.gauge(
"medium_stress_percentage", "Medium stress percentage"
)
low_stress_percentage = self.gauge("low_stress_percentage", "Low stress percentage")
resting_stress_percentage = self.gauge(
"resting_stress_percentage", "Resting stress percentage"
)
# set metrics values
average_stress_level.set(data["averageStressLevel"])
max_stress_level.set(data["maxStressLevel"])
high_stress_percentage.set(data["highStressPercentage"])
medium_stress_percentage.set(data["mediumStressPercentage"])
low_stress_percentage.set(data["lowStressPercentage"])
resting_stress_percentage.set(data["restStressPercentage"])
def sleep(self, data):
utc = datetime.datetime.now()
now = pytz.timezone(self.timezone).fromutc(utc)
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
duration = (now - midnight).seconds
if duration < 43200:
# only sync when the day past 12 hours
# 43200 = 3600 * 12
return
if (
data.get("sleepTimeSeconds") is None
or data.get("deepSleepSeconds") is None
or data.get("lightSleepSeconds") is None
or data.get("awakeSleepSeconds") is None
):
return
# create metrics
sleep_time_sec = self.gauge("sleep_time_sec", "Total sleep time in seconds")
sleep_deep_sec = self.gauge("sleep_deep_sec", "Deep sleep time in seconds")
sleep_light_sec = self.gauge("sleep_light_sec", "Light sleep time in seconds")
sleep_awake_sec = self.gauge("sleep_awake_sec", "Sleep awake time in seconds")
# set metrics values
sleep_time_sec.set(data["sleepTimeSeconds"])
sleep_deep_sec.set(data["deepSleepSeconds"])
sleep_light_sec.set(data["lightSleepSeconds"])
sleep_awake_sec.set(data["awakeSleepSeconds"])
def gauge(self, name, documentation):
return Gauge(name, documentation, registry=self.registry)
def publish(self):
pushadd_to_gateway(self.pushgateway, job=self.job_name, registry=self.registry)
| 39.245968 | 98 | 0.636803 | 1,063 | 9,733 | 5.649106 | 0.182502 | 0.050958 | 0.03597 | 0.045962 | 0.170192 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016919 | 0.259118 | 9,733 | 247 | 99 | 39.404858 | 0.815837 | 0.116716 | 0 | 0.147059 | 0 | 0 | 0.275036 | 0.04733 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070588 | false | 0 | 0.023529 | 0.005882 | 0.158824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d62a7b5bab3d8a5d3f5a1aff360bc9d1b2ac9814 | 2,305 | py | Python | tests/pymcell4_positive/2660_get_current_rxn_count_on_demand_only/observables.py | mcellteam/mcell-tests | 34d2d967b75d56edbae999bf0090641850f4f4fe | [
"MIT"
] | 1 | 2021-08-13T20:40:54.000Z | 2021-08-13T20:40:54.000Z | tests/pymcell4_positive/2660_get_current_rxn_count_on_demand_only/observables.py | mcellteam/mcell_tests | 34d2d967b75d56edbae999bf0090641850f4f4fe | [
"MIT"
] | null | null | null | tests/pymcell4_positive/2660_get_current_rxn_count_on_demand_only/observables.py | mcellteam/mcell_tests | 34d2d967b75d56edbae999bf0090641850f4f4fe | [
"MIT"
] | null | null | null | import mcell as m
from parameters import *
from subsystem import *
from geometry import *
# ---- observables ----
viz_output = m.VizOutput(
mode = m.VizMode.ASCII,
output_files_prefix = './viz_data/seed_' + str(get_seed()).zfill(5) + '/Scene',
every_n_timesteps = 1
)
count_a_Cube1 = m.Count(
name = 'a_Cube1',
expression = m.CountTerm(
molecules_pattern = m.Complex('a'),
region = Cube1
)
)
count_a_Cube2 = m.Count(
name = 'a_Cube2',
expression = m.CountTerm(
molecules_pattern = m.Complex('a'),
region = Cube2 - Cube1
)
)
count_a_Cube3 = m.Count(
name = 'a_Cube3',
expression = m.CountTerm(
molecules_pattern = m.Complex('a'),
region = Cube3 - Cube2
)
)
count_a_World = m.Count(
name = 'a_World',
expression = m.CountTerm(
molecules_pattern = m.Complex('a')
)
)
rxn_a_plus_b = subsystem.find_reaction_rule('rxn_a_plus_b')
assert rxn_a_plus_b
count_rxn_a_plus_b_World = m.Count(
name = 'rxn_a_plus_b_World',
expression = m.CountTerm(reaction_rule = rxn_a_plus_b),
every_n_timesteps = 0
)
count_rxn_a_plus_b_Cube1 = m.Count(
name = 'rxn_a_plus_b_Cube1',
expression = m.CountTerm(
reaction_rule = rxn_a_plus_b,
region = Cube1
),
every_n_timesteps = 0
)
count_rxn_a_plus_b_Cube2 = m.Count(
name = 'rxn_a_plus_b_Cube2',
expression = m.CountTerm(
reaction_rule = rxn_a_plus_b,
region = Cube2 - Cube1
),
every_n_timesteps = 0
)
count_rxn_a_plus_b_Cube3 = m.Count(
name = 'rxn_a_plus_b_Cube3',
expression = m.CountTerm(
reaction_rule = rxn_a_plus_b,
region = Cube3 - Cube2
),
every_n_timesteps = 0
)
observables = m.Observables()
observables.add_viz_output(viz_output)
observables.add_count(count_a_Cube1)
observables.add_count(count_a_Cube2)
observables.add_count(count_a_Cube3)
observables.add_count(count_a_World)
observables.add_count(count_rxn_a_plus_b_World)
observables.add_count(count_rxn_a_plus_b_Cube1)
observables.add_count(count_rxn_a_plus_b_Cube2)
observables.add_count(count_rxn_a_plus_b_Cube3)
# load observables information from bngl file
observables.load_bngl_observables('model.bngl', './react_data/seed_' + str(get_seed()).zfill(5) + '/')
| 22.598039 | 102 | 0.688503 | 332 | 2,305 | 4.361446 | 0.168675 | 0.052486 | 0.104972 | 0.118094 | 0.653315 | 0.563536 | 0.515193 | 0.429558 | 0.352901 | 0.145718 | 0 | 0.019053 | 0.203037 | 2,305 | 101 | 103 | 22.821782 | 0.769189 | 0.0282 | 0 | 0.333333 | 0 | 0 | 0.074654 | 0 | 0 | 0 | 0 | 0 | 0.012821 | 1 | 0 | false | 0 | 0.051282 | 0 | 0.051282 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d62a8b24a611540b7442ff5bbdf17e7e9ceae229 | 1,372 | py | Python | custom_logger.py | LC-Duarte/pytil | 271b4e8dee408b586fbd741511116726e10d1fc0 | [
"MIT"
] | null | null | null | custom_logger.py | LC-Duarte/pytil | 271b4e8dee408b586fbd741511116726e10d1fc0 | [
"MIT"
] | null | null | null | custom_logger.py | LC-Duarte/pytil | 271b4e8dee408b586fbd741511116726e10d1fc0 | [
"MIT"
] | null | null | null | #custom_logger.py
#Leonardo Duarte
import logging
#Logging levels
INFO = logging.INFO
DEBUG = logging.DEBUG
WARNING = logging.WARNING
ERROR = logging.ERROR
def setup(name, **kwargs):
log_level=kwargs.get('log_level',INFO)
formatter = logging.Formatter(fmt='%(asctime)s templatetool %(levelname)s: %(message)s', datefmt='%Y-%m-%d %I:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(log_level)
logger.addHandler(handler)
return logger
def dashed_line():
log = logging.getLogger('main')
line = "----------------------------------------"
log.debug(line)
def print_objects(**kwargs):
log = logging.getLogger('main')
for k, v in kwargs.items():
log.debug(f'{k}:')
log.debug(f'{v}')
dashed_line()
def print_list(list, **kwargs):
log = logging.getLogger('main')
bullet = kwargs.get("bullet", True)
tab = kwargs.get('tab', True)
limit = kwargs.get('limit', -1)
name = kwargs.get('name', None)
c = 0
if name != None:
log.debug(f'{name}:')
for x in list:
out = ''
if tab:
out += '\t'
if bullet:
out += '* '
out += x
log.debug(out)
c += 1
if c == limit:
break
dashed_line()
| 26.384615 | 121 | 0.560496 | 167 | 1,372 | 4.550898 | 0.377246 | 0.059211 | 0.075 | 0.090789 | 0.076316 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002967 | 0.26312 | 1,372 | 51 | 122 | 26.901961 | 0.748764 | 0.032799 | 0 | 0.111111 | 0 | 0 | 0.124622 | 0.030211 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0 | 0.022222 | 0 | 0.133333 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d62b77751af6dbe89d65c59649c018081395e8a3 | 4,403 | py | Python | CPAC/utils/ga.py | gkiar/C-PAC | 0926b451dd8622b25eb68c7bcc770f0156238b23 | [
"BSD-3-Clause"
] | 125 | 2015-03-04T09:14:46.000Z | 2022-03-29T07:46:12.000Z | CPAC/utils/ga.py | gkiar/C-PAC | 0926b451dd8622b25eb68c7bcc770f0156238b23 | [
"BSD-3-Clause"
] | 1,018 | 2015-01-04T16:01:29.000Z | 2022-03-31T19:23:09.000Z | CPAC/utils/ga.py | gkiar/C-PAC | 0926b451dd8622b25eb68c7bcc770f0156238b23 | [
"BSD-3-Clause"
] | 117 | 2015-01-10T08:05:52.000Z | 2022-01-18T05:16:51.000Z | import configparser
import os
import os.path as op
import requests
import tempfile
import threading
import traceback
import uuid
from CPAC.info import __version__, ga_tracker
udir = op.expanduser('~')
if udir=='/':
udir = tempfile.mkdtemp()
temp_dir = True
tracking_path = op.join(udir, '.cpac')
def get_or_create_config():
if not op.exists(tracking_path):
parser = configparser.ConfigParser()
parser.read_dict(dict(user=dict(uid=uuid.uuid1().hex,
track=True)))
with open(tracking_path, 'w+') as fhandle:
parser.write(fhandle)
else:
parser = configparser.ConfigParser()
parser.read(tracking_path)
return parser
def get_uid():
if os.environ.get('CPAC_TRACKING', '').lower() not in [
'',
'0',
'false',
'off'
]:
return os.environ.get('CPAC_TRACKING')
parser = get_or_create_config()
if parser['user'].getboolean('track'):
return parser['user']['uid']
return None
def do_it(data, timeout):
try:
headers = {
'User-Agent': 'C-PAC/{} (https://fcp-indi.github.io)'.format(
__version__
)
}
response = requests.post(
'https://www.google-analytics.com/collect',
data=data,
timeout=timeout,
headers=headers
)
return response
except:
return False
if temp_dir:
try:
os.remove(tracking_path)
os.rmdir(udir)
temp_dir = False
except:
print("Unable to delete temporary tracking path.")
def track_event(category, action, uid=None, label=None, value=0,
software_version=None, timeout=2, thread=True):
"""
Record an event with Google Analytics
Parameters
----------
tracking_id : str
Google Analytics tracking ID.
category : str
Event category.
action : str
Event action.
uid : str
User unique ID, assigned when popylar was installed.
label : str
Event label.
value : int
Event value.
software_version : str
Records a version of the software.
timeout : float
Maximal duration (in seconds) for the network connection to track the
event. After this duration has elapsed with no response (e.g., on a
slow network connection), the tracking is dropped.
"""
if os.environ.get('CPAC_TRACKING', '').lower() in ['0', 'false', 'off']:
return
if uid is None:
uid = get_uid()
if not uid:
return
this = "/CPAC/utils/ga.py"
exec_stack = list(reversed(traceback.extract_stack()))
assert exec_stack[0][0].endswith(this)
package_path = exec_stack[0][0][:-len(this)]
# only CPAC paths are going to be recorded
file_path = ""
for s in exec_stack:
if s[0].endswith(this):
continue
if not s[0].startswith(package_path):
break
file_path = s[0][len(package_path):]
data = {
'v': '1', # API version.
'tid': ga_tracker, # GA tracking ID
'dp': file_path,
'cid': uid, # User unique ID, stored in `tracking_path`
't': 'event', # Event hit type.
'ec': category, # Event category.
'ea': action, # Event action.
'el': label, # Event label.
'ev': value, # Event value, must be an integer
'aid': "CPAC",
'an': "CPAC",
'av': __version__,
'aip': 1, # anonymize IP by removing last octet, slightly worse
# geolocation
}
if thread:
t = threading.Thread(target=do_it, args=(data, timeout))
t.start()
else:
do_it(data, timeout)
def track_config(cpac_interface):
track_event(
'config',
cpac_interface,
label=None,
value=None,
thread=False
)
def track_run(level='participant', participants=0):
if level in ['participant', 'group']:
track_event(
'run',
level,
label='participants',
value=participants,
thread=False
)
else:
track_event(
'config',
'test',
label='participants',
value=participants,
thread=False
)
| 25.16 | 77 | 0.556893 | 504 | 4,403 | 4.746032 | 0.373016 | 0.035117 | 0.01505 | 0.020067 | 0.137124 | 0.063545 | 0.02592 | 0 | 0 | 0 | 0 | 0.005086 | 0.330229 | 4,403 | 174 | 78 | 25.304598 | 0.806036 | 0.192823 | 0 | 0.185484 | 0 | 0 | 0.098318 | 0 | 0 | 0 | 0 | 0 | 0.008065 | 1 | 0.048387 | false | 0 | 0.072581 | 0 | 0.185484 | 0.008065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d631ffddb2895fc51dd1aaa5d9e97af2b59d8562 | 3,763 | py | Python | src/modeling/metrics.py | zahrahosseini99/fever2-columbia | 0859c005a960c144de903a095d480fe8c131754a | [
"Apache-2.0"
] | 8 | 2020-04-29T04:59:45.000Z | 2022-03-20T01:26:21.000Z | src/modeling/metrics.py | zahrahosseini99/fever2-columbia | 0859c005a960c144de903a095d480fe8c131754a | [
"Apache-2.0"
] | 2 | 2021-04-30T21:12:58.000Z | 2022-03-21T22:18:50.000Z | src/modeling/metrics.py | zahrahosseini99/fever2-columbia | 0859c005a960c144de903a095d480fe8c131754a | [
"Apache-2.0"
] | 4 | 2020-05-05T01:18:34.000Z | 2022-03-22T07:20:12.000Z | from typing import Optional
from overrides import overrides
import numpy as np
import torch
from allennlp.training.metrics.metric import Metric
class FeverScore(Metric):
def __init__(self, nei_label=0, max_select=5) -> None:
self.correct_count = 0.
self.total_count = 0.
self.correct_evidence_count = 0.
self.total_evidence_count = 0.
self.nei_label = nei_label
self.max_select = max_select
def __call__(self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
evidence_predictions: torch.Tensor,
evidence: torch.Tensor,
indices=False,
metadata=None,
pad_idx=-1):
top_k = predictions.max(-1)[1].unsqueeze(-1)
correct = top_k.eq(gold_labels.long().unsqueeze(-1)).view(-1)
evidence_predictions = evidence_predictions.data.cpu().numpy()
total_evidence_count = 0
correct_evidence_count = 0
total_correct = 0
fever_recall = []
for idx,(is_correct, evidence_prediction) in enumerate(zip(correct,
evidence_predictions)):
#print(predictions[idx], gold_labels[idx].item(), is_correct.item(),
# evidence_prediction, metadata[idx])
try:
gold_label = gold_labels[idx]
except IndexError:
gold_label = gold_labels.item()
if gold_label != self.nei_label:
total_evidence_count += 1
#TODO: subset evidence
evidence_metadata = {tuple(metadata[idx]['evidence'][i]) for i in evidence_prediction[:self.max_select] if i < len(metadata[idx]['evidence'])}
found_evidence = False
for evidence_set in metadata[idx]['gold']:
#print(evidence_set, evidence_set.issubset(evidence_metadata))
if evidence_set.issubset(evidence_metadata):
found_evidence = True
break
correct_evidence_count += found_evidence
if is_correct and (gold_label == self.nei_label or found_evidence):
total_correct += 1
fever_recall.append(1)
else:
fever_recall.append(0)
#print(total_evidence_count, correct_evidence_count, total_correct)
self.total_evidence_count += total_evidence_count
self.correct_evidence_count += correct_evidence_count
self.total_count += (idx+1)#int(gold_labels.size(0))
self.correct_count += total_correct
fever_recall = torch.autograd.Variable(torch.FloatTensor(fever_recall))
if torch.cuda.is_available() and evidence.is_cuda:
idx = evidence.get_device()
fever_recall = fever_recall.cuda(idx)
return fever_recall
def get_metric(self, reset: bool = False):
"""
Returns
-------
The accumulated accuracy.
"""
accuracy = 0.0
if float(self.total_count) > 0:
accuracy = float(self.correct_count) / float(self.total_count)
recall = 0.0
if float(self.total_evidence_count) > 0:
recall = float(self.correct_evidence_count) / float(self.total_evidence_count)
if reset:
self.reset()
return accuracy, recall
@overrides
def reset(self):
self.correct_count = 0.0
self.total_count = 0.0
self.correct_evidence_count = 0.
self.total_evidence_count = 0.
| 37.257426 | 158 | 0.570821 | 405 | 3,763 | 5.032099 | 0.22963 | 0.10844 | 0.07949 | 0.053974 | 0.18842 | 0.065751 | 0.048086 | 0.048086 | 0.048086 | 0.048086 | 0 | 0.01336 | 0.343609 | 3,763 | 100 | 159 | 37.63 | 0.811741 | 0.08557 | 0 | 0.054795 | 0 | 0 | 0.005889 | 0 | 0 | 0 | 0 | 0.01 | 0 | 1 | 0.054795 | false | 0 | 0.068493 | 0 | 0.164384 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d633c093faac664c8098b15521bd8dae8ec5b536 | 565 | py | Python | ci/fetch_pysteps_data.py | pySTEPS/pysteps_tutorials | aeb7cf92603c9b72bf6e80d0870a10daac3a4923 | [
"BSD-3-Clause"
] | null | null | null | ci/fetch_pysteps_data.py | pySTEPS/pysteps_tutorials | aeb7cf92603c9b72bf6e80d0870a10daac3a4923 | [
"BSD-3-Clause"
] | null | null | null | ci/fetch_pysteps_data.py | pySTEPS/pysteps_tutorials | aeb7cf92603c9b72bf6e80d0870a10daac3a4923 | [
"BSD-3-Clause"
] | 1 | 2022-03-08T08:10:11.000Z | 2022-03-08T08:10:11.000Z | """
Install the pysteps data in a test environment and create a pystepsrc
configuration file pointing to that data.
"""
if __name__ == "__main__":
import argparse
from pysteps.datasets import create_default_pystepsrc, download_pysteps_data
parser = argparse.ArgumentParser(description="Install pysteps data")
parser.add_argument(
"dest_dir", type=str, help="Directory where to install the Pysteps data"
)
args = parser.parse_args()
download_pysteps_data(args.dest_dir, force=True)
create_default_pystepsrc(args.dest_dir)
| 29.736842 | 80 | 0.750442 | 73 | 565 | 5.520548 | 0.561644 | 0.136476 | 0.084367 | 0.104218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.171681 | 565 | 18 | 81 | 31.388889 | 0.861111 | 0.19646 | 0 | 0 | 0 | 0 | 0.17713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d6353235caf92485faaed3962ae13a7db0c3c200 | 4,723 | py | Python | core/commander.py | brant-ruan/IDF4APEV | b5ca7be867a8720ae8eccefb6c9f49c3d53e6888 | [
"MIT"
] | 2 | 2019-12-24T13:56:44.000Z | 2020-05-21T23:23:16.000Z | core/commander.py | brant-ruan/IDF4APEV | b5ca7be867a8720ae8eccefb6c9f49c3d53e6888 | [
"MIT"
] | 1 | 2020-01-05T16:30:07.000Z | 2020-01-19T14:19:55.000Z | core/commander.py | brant-ruan/IDF4APEV | b5ca7be867a8720ae8eccefb6c9f49c3d53e6888 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@File : commander.py
@Time : 2019-04-20 11:04
@Author : Bonan Ruan
@Desc :
"""
from core.executer import Executer
from core.builder import Builder
from core.poc_manager import PoCManager
import utils.utils as utils
import utils.consts as consts
import time
class Commander:
def __init__(self):
self.executer = Executer()
self.builder = Builder()
self.poc_manager = PoCManager()
def load_devices(self, only_number=False):
devices = self.executer.load_devices(only_number=only_number)
return devices
def check_devices(self, devices, pocs, result):
for device in devices:
for poc in pocs:
time.sleep(1)
utils.debug(
"[*] Checking device <%s> with poc <%s>" %
(device.name, poc.name))
status = self._check_device(device=device, poc=poc)
if status == consts.VULNERABLE:
utils.debug(
"[!] Device <%s> is VULNERABLE to vulnerability <%s>" %
(device.name, poc.cve), mode=consts.DEBUG_RED)
else:
utils.debug(
"[√] Device <%s> is NOT VULNERABLE to vulnerability <%s>" %
(device.name, poc.cve), mode=consts.DEBUG_GREEN)
print("")
result.add_check_result(device=device, poc=poc, status=status)
def _check_device(self, device, poc):
utils.debug(
"[*] \tBuilding\tpoc: %s\tsdk: android-%s\tabi: %s" %
(poc.file, device.sdk, device.abi))
file_path = self.builder.build_poc(
poc_file=poc.file, device_name=device.name, abi=device.abi, sdk=device.sdk)
utils.debug(
"[*] \tExecuting\tpoc: %s\tdevice: %s" %
(poc.file, device.name))
status = self.executer.exec_poc(
device_name=device.name, binary=file_path)
return status
def diagnose_devices(self, devices, vulns, result):
for device in devices:
for vuln in vulns:
utils.debug(
"[*] Diagnosing device <%s> with vulnerability <%s>" %
(device.name, vuln.cve))
status = self._diagnose_device(device, vuln)
if status == consts.VULNERABLE:
utils.debug(
"[!] Device <%s> MAY BE VULNERABLE to vulnerability <%s>" %
(device.name, vuln.cve), mode=consts.DEBUG_YELLOW)
else:
utils.debug(
"[√] Device <%s> MAY BE NOT VULNERABLE to vulnerability <%s>" %
(device.name, vuln.cve), mode=consts.DEBUG_GREEN)
print("")
result.add_diagnose_result(
device=device, vuln=vuln, status=status)
def _diagnose_device(self, device, vuln):
# if security patch date is not earlier than the patch date of vuln
# then this device may be not vulnerable
if not _date_is_earlier(device_date=device.sec_patch_date,
patch_date=vuln.patch_date):
return consts.NOT_VULNERABLE
# if kernel version of device is not in the range of vulnerable kernel version
# then it may be not vulnerable
if not _kernel_is_in_range(
device_ver=device.kernel_version, vuln_ver_list=vuln.vuln_kernel_ver):
return consts.NOT_VULNERABLE
return consts.VULNERABLE
def export_result(self, result=None):
result.export()
def _date_is_earlier(device_date, patch_date):
try:
# some old devices even do not have a security-patch-date!!!
if not device_date:
return True
if device_date < patch_date:
return True
return False
except BaseException:
raise
def _kernel_is_in_range(device_ver, vuln_ver_list):
try:
dev_ver_num = _version_to_int(device_ver)
vuln_ver_min = 0
if vuln_ver_list[0]:
vuln_ver_min = _version_to_int(vuln_ver_list[0])
vuln_ver_max = _version_to_int(vuln_ver_list[1])
if dev_ver_num >= vuln_ver_min:
if dev_ver_num < vuln_ver_max:
return True
return False
except BaseException:
raise
def _version_to_int(version):
ver_num = 0
i = 1
ver_list = version.split('.')
ver_list.reverse()
for sub_ver in ver_list:
ver_num += int(sub_ver) * i
i *= 1000
return ver_num
if __name__ == "__main__":
a = Commander()
| 32.798611 | 87 | 0.56913 | 569 | 4,723 | 4.511424 | 0.224956 | 0.042852 | 0.025711 | 0.046747 | 0.323335 | 0.305415 | 0.176081 | 0.173744 | 0.089599 | 0.089599 | 0 | 0.007651 | 0.335804 | 4,723 | 143 | 88 | 33.027972 | 0.81001 | 0.083422 | 0 | 0.27619 | 0 | 0 | 0.093163 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.057143 | 0 | 0.266667 | 0.019048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d6357aa58a908129b11e43500c1352a82061c155 | 221 | py | Python | simple_fun_#8_kill_kth_bit.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | simple_fun_#8_kill_kth_bit.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | simple_fun_#8_kill_kth_bit.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Simple Fun #8: Kill K-th Bit
#Problem level: 7 kyu
def kill_kth_bit(n, k):
b = list(bin(n))[2:]
if len(b)>=k:
b[(-1)*k]='0'
return int(''.join(b),2)
| 20.090909 | 43 | 0.574661 | 41 | 221 | 3.04878 | 0.731707 | 0.032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034884 | 0.221719 | 221 | 10 | 44 | 22.1 | 0.69186 | 0.420814 | 0 | 0 | 0 | 0 | 0.00813 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d63812b8fe6e16b469f6f47a426cfd5513175e5a | 1,642 | py | Python | filter_plugins/filter_for_dict_list.py | link-u/ansible-roles-v2_php7 | f0572912d80f5660401878c69bb2836e960d1819 | [
"MIT"
] | null | null | null | filter_plugins/filter_for_dict_list.py | link-u/ansible-roles-v2_php7 | f0572912d80f5660401878c69bb2836e960d1819 | [
"MIT"
] | 9 | 2021-04-01T17:10:50.000Z | 2022-03-24T04:58:44.000Z | filter_plugins/filter_for_dict_list.py | link-u/ansible-roles-v2_php7 | f0572912d80f5660401878c69bb2836e960d1819 | [
"MIT"
] | null | null | null | from ansible.errors import AnsibleError
class FilterModule(object):
def filters(self):
return {
'combine_dict_list': self.combine_dict_list,
'search_from_dict_list': self.search_from_dict_list,
'search_item_from_dict_list': self.search_item_from_dict_list,
}
def combine_dict_list(self, dict_list: list, key: str = 'key') -> list:
result_list = []
for item in dict_list:
i = self.search(result_list, item[key], key)
merge_mode = self.get(item, 'merge_mode')
if merge_mode == 'append' or i == -1:
result_list.append(item)
else:
result_list[i] = item
return result_list
def search_from_dict_list(self, dict_list: list, search_key: str, key: str = 'key', value: str = 'value') -> list:
result_list = []
for item in dict_list:
if item[key] == search_key:
result_list.append(item[value])
return result_list
def search_item_from_dict_list(self, dict_list: list, search_key: str, key: str = 'key') -> list:
result_list = []
for item in dict_list:
if item[key] == search_key:
result_list.append(item)
return result_list
def search(self, dict_list: list, search_key: str, key: str = 'key') -> int:
for i, item in enumerate(dict_list):
if item[key] == search_key:
return i
return -1
def get(self, item: dict, key: str):
if key in item.keys():
return item[key]
else:
return None
| 34.93617 | 118 | 0.576736 | 214 | 1,642 | 4.168224 | 0.168224 | 0.152466 | 0.070628 | 0.071749 | 0.585202 | 0.507848 | 0.380045 | 0.350897 | 0.350897 | 0.350897 | 0 | 0.001795 | 0.321559 | 1,642 | 46 | 119 | 35.695652 | 0.798923 | 0 | 0 | 0.4 | 0 | 0 | 0.059074 | 0.028624 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.025 | 0.025 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d63de35a7cbc93490b36f2a69557bc91a57c22d4 | 633 | py | Python | python/merge_2_sorted_lists.py | anishLearnsToCode/leetcode-algorithms | d5a84e9179a4c3427890313a933fe1950fbcdabc | [
"MIT"
] | 17 | 2020-06-13T15:05:55.000Z | 2022-03-16T18:28:53.000Z | python/merge_2_sorted_lists.py | anishLearnsToCode/leetcode-algorithms | d5a84e9179a4c3427890313a933fe1950fbcdabc | [
"MIT"
] | null | null | null | python/merge_2_sorted_lists.py | anishLearnsToCode/leetcode-algorithms | d5a84e9179a4c3427890313a933fe1950fbcdabc | [
"MIT"
] | 9 | 2020-12-27T19:17:58.000Z | 2022-03-22T07:02:19.000Z | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
result = ListNode(val=-1)
previous = result
while l1 is not None and l2 is not None:
if l1.val <= l2.val:
previous.next = l1
l1 = l1.next
else:
previous.next = l2
l2 = l2.next
previous = previous.next
previous.next = l2 if l1 is None else l1
return result.next
| 26.375 | 68 | 0.537125 | 79 | 633 | 4.253165 | 0.35443 | 0.142857 | 0.053571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0.382306 | 633 | 23 | 69 | 27.521739 | 0.815857 | 0.053712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d64364d5d663ab2f6e187dde65f5c3f8303a30c3 | 954 | py | Python | paperscraper/6-get_unique_authors.py | ahmed-shariff/scraper | 52bed967db7e08e438daaa8dfa8d9338567ad7c2 | [
"MIT"
] | 1 | 2021-11-19T02:56:22.000Z | 2021-11-19T02:56:22.000Z | paperscraper/6-get_unique_authors.py | ahmed-shariff/scraper | 52bed967db7e08e438daaa8dfa8d9338567ad7c2 | [
"MIT"
] | 1 | 2021-11-19T03:42:58.000Z | 2022-03-29T16:32:16.000Z | paperscraper/6-get_unique_authors.py | ahmed-shariff/scraper | 52bed967db7e08e438daaa8dfa8d9338567ad7c2 | [
"MIT"
] | 1 | 2021-11-19T02:56:28.000Z | 2021-11-19T02:56:28.000Z | # External packages
import sys
import pandas as pd
import ast
import os
# Internal modules
import paperscraper.config as config
def main():
# Read input file
df_scraped_input = pd.read_csv(config.path_postprocessing_output, sep='\t', index_col=0)
unique_authors = set()
for index, row in df_scraped_input.iterrows():
authors_list = list()
try:
authors_list = ast.literal_eval(row["author_processed"])
except Exception as e:
# These are mostly 'ERROR' and 'No Url' strings that are actually Error Codes defined in the scraper.
pass
if isinstance(authors_list, list):
for au in authors_list:
unique_authors.add(au)
# Save list to disk
pd.DataFrame(list(unique_authors), columns=["author"]).to_csv(config.path_unique_authors, sep='\t', header=True)
if __name__ == "__main__":
main()
sys.exit(os.EX_OK) # code 0, all ok
| 26.5 | 116 | 0.659329 | 132 | 954 | 4.545455 | 0.583333 | 0.086667 | 0.046667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002782 | 0.246331 | 954 | 35 | 117 | 27.257143 | 0.831711 | 0.191824 | 0 | 0 | 0 | 0 | 0.044503 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.047619 | 0.238095 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d64587d60e9a4f706954810829075388a9ad602e | 7,453 | bzl | Python | package_managers/download_pkgs.bzl | mikekamornikov/base-images-docker | 10df42be69f0c4afc87f0df9a0b35fcfb43b5854 | [
"Apache-2.0"
] | null | null | null | package_managers/download_pkgs.bzl | mikekamornikov/base-images-docker | 10df42be69f0c4afc87f0df9a0b35fcfb43b5854 | [
"Apache-2.0"
] | null | null | null | package_managers/download_pkgs.bzl | mikekamornikov/base-images-docker | 10df42be69f0c4afc87f0df9a0b35fcfb43b5854 | [
"Apache-2.0"
] | null | null | null | #Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule for downloading apt packages and tar them in a .tar file."""
def _generate_add_additional_repo_commands(ctx, additional_repos):
return """printf "{repos}" >> /etc/apt/sources.list.d/{name}_repos.list""".format(
name = ctx.attr.name,
repos = "\n".join(additional_repos),
)
def _generate_download_commands(ctx, packages, additional_repos):
return """#!/bin/bash
set -ex
{add_additional_repo_commands}
# Remove /var/lib/apt/lists/* in the base image. apt-get update -y command will create them.
rm -rf /var/lib/apt/lists/*
# Fetch Index
apt-get update -y
# Make partial dir
mkdir -p /tmp/install/./partial
# Install command
apt-get install --no-install-recommends -y -q -o Dir::Cache="/tmp/install" -o Dir::Cache::archives="." {packages} --download-only
items=$(ls /tmp/install/*.deb)
if [ $items = ""]; then
echo "Did not find the .deb files for debian packages {packages} in /tmp/install. Did apt-get actually succeed?" && false
fi
# Generate csv listing the name & versions of the debian packages.
# Example contents of a metadata CSV with debian packages gcc 8.1 & clang 9.1:
# Name,Version
# gcc,7.1
# clang,9.1
echo "Generating metadata CSV file {installables}_metadata.csv"
echo Name,Version > {installables}_metadata.csv
dpkg_deb_path=$(which dpkg-deb)
for item in $items; do
echo "Adding information about $item to metadata CSV"
pkg_name=$($dpkg_deb_path -f $item Package)
if [ $pkg_name = ""]; then
echo "Failed to get name of the package for $item" && false
fi
pkg_version=$($dpkg_deb_path -f $item Version)
if [ $pkg_version = ""]; then
echo "Failed to get the version of the package for $item" && false
fi
echo "Package $pkg_name, Version $pkg_version"
echo -n "$pkg_name," >> {installables}_metadata.csv
echo $pkg_version >> {installables}_metadata.csv
done;
# Tar command to only include all the *.deb files and ignore other directories placed in the cache dir.
tar -cpf {installables}_packages.tar --mtime='1970-01-01' --directory /tmp/install/. `cd /tmp/install/. && ls *.deb`""".format(
installables = ctx.attr.name,
packages = " ".join(packages),
add_additional_repo_commands = _generate_add_additional_repo_commands(ctx, additional_repos),
)
def _run_download_script(
ctx,
build_contents,
image_tar,
output_tar,
output_script,
output_metadata,
image_id_extractor):
contents = build_contents.replace(image_tar.short_path, image_tar.path)
contents = contents.replace(output_tar.short_path, output_tar.path)
# The paths for running within bazel build are different and hence replace short_path
# by full path
ctx.actions.write(
output = output_script,
content = contents,
)
ctx.actions.run(
outputs = [output_tar, output_metadata],
executable = output_script,
inputs = [image_tar, image_id_extractor],
)
def _impl(ctx, image_tar = None, packages = None, additional_repos = None, output_executable = None, output_tar = None, output_script = None, output_metadata = None):
"""Implementation for the download_pkgs rule.
Args:
ctx: The bazel rule context
image_tar: File, overrides ctx.file.image_tar
packages: str List, overrides ctx.attr.packages
additional_repos: str List, overrides ctx.attr.additional_repos
output_executable: File, overrides ctx.outputs.executable
output_tar: File, overrides ctx.outputs.pkg_tar
output_script: File, overrides ctx.outputs.build_script
output_metadata: File, overrides ctx.outputs.metadata_csv
"""
image_tar = image_tar or ctx.file.image_tar
packages = packages or ctx.attr.packages
additional_repos = additional_repos or ctx.attr.additional_repos
output_executable = output_executable or ctx.outputs.executable
output_tar = output_tar or ctx.outputs.pkg_tar
output_script = output_script or ctx.outputs.build_script
output_metadata = output_metadata or ctx.outputs.metadata_csv
if len(packages) == 0:
fail("attribute 'packages' given to download_pkgs rule by {} was empty.".format(attr.label))
# Generate a shell script to run apt_get inside this docker image.
# TODO(tejaldesai): Replace this by docker_run rule
build_contents = """\
#!/bin/bash
set -ex
# Load the image and remember its name
image_id=$(python {image_id_extractor_path} {image_tar})
docker load -i {image_tar}
# Run the builder image.
cid=$(docker run -w="/" -d --privileged $image_id sh -c $'{download_commands}')
docker attach $cid
docker cp $cid:{installables}_packages.tar {output}
docker cp $cid:{installables}_metadata.csv {output_metadata}
# Cleanup
docker rm $cid
""".format(
image_tar = image_tar.short_path,
installables = ctx.attr.name,
download_commands = _generate_download_commands(ctx, packages, additional_repos),
output = output_tar.short_path,
output_metadata = output_metadata.path,
image_id_extractor_path = ctx.file._image_id_extractor.path,
)
_run_download_script(
ctx,
build_contents,
image_tar,
output_tar,
output_script,
output_metadata,
ctx.file._image_id_extractor,
)
ctx.actions.write(
output = output_executable,
content = build_contents,
)
return struct(
runfiles = ctx.runfiles(files = [image_tar, output_script, output_metadata, ctx.file._image_id_extractor]),
files = depset([output_executable]),
)
_attrs = {
"image_tar": attr.label(
default = Label("//ubuntu:ubuntu_16_0_4_vanilla.tar"),
allow_single_file = True,
),
"packages": attr.string_list(
mandatory = True,
),
"additional_repos": attr.string_list(),
"_image_id_extractor": attr.label(
default = "@io_bazel_rules_docker//contrib:extract_image_id.py",
allow_single_file = True,
),
}
_outputs = {
"pkg_tar": "%{name}.tar",
"build_script": "%{name}.sh",
"metadata_csv": "%{name}_metadata.csv",
}
# Export download_pkgs rule for other bazel rules to depend on.
download = struct(
attrs = _attrs,
outputs = _outputs,
implementation = _impl,
)
"""Downloads packages within a container.
This rule creates a script to download packages within a container.
The script bunldes all the packages in a tarball.
Args:
name: A unique name for this rule.
image_tar: The image tar for the container used to download packages.
packages: list of packages to download. e.g. ['curl', 'netbase']
additional_repos: list of additional debian package repos to use, in sources.list format
"""
download_pkgs = rule(
attrs = _attrs,
executable = True,
outputs = _outputs,
implementation = _impl,
)
| 36.179612 | 166 | 0.700255 | 1,014 | 7,453 | 4.956608 | 0.262327 | 0.030243 | 0.025468 | 0.019897 | 0.213689 | 0.135893 | 0.095702 | 0.06546 | 0.045165 | 0.045165 | 0 | 0.004858 | 0.19898 | 7,453 | 205 | 167 | 36.356098 | 0.837018 | 0.187039 | 0 | 0.219178 | 0 | 0.041096 | 0.441224 | 0.10297 | 0 | 0 | 0 | 0.004878 | 0 | 1 | 0.027397 | false | 0 | 0 | 0.013699 | 0.047945 | 0.006849 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d646dec8e4a9b46e4c87b603db2467ff487bbb88 | 765 | py | Python | src/app.py | Gscienty/openfaas_wechat | 9c959228e3a87d6857876ce1c0f33f527dd0f67e | [
"MIT"
] | 1 | 2019-11-14T06:12:43.000Z | 2019-11-14T06:12:43.000Z | src/app.py | Gscienty/openfaas_wechat | 9c959228e3a87d6857876ce1c0f33f527dd0f67e | [
"MIT"
] | null | null | null | src/app.py | Gscienty/openfaas_wechat | 9c959228e3a87d6857876ce1c0f33f527dd0f67e | [
"MIT"
] | null | null | null | from flask import Flask, request, abort, jsonify
import wechat_sec, http_util
import sys
import os
import logging
app = Flask(__name__)
app.logger.setLevel(logging.NOTSET)
def process(uri, sec=False):
run_env = os.getenv('RUN_ENV')
req = wechat_sec.req_build(request.json)
if run_env in { 'develop', 'mock' }:
print('client request: uri: {uri};sec:{sec}; req body: {req}'.format(uri=uri, sec=sec, req=str(req)))
if sec:
res_content = http_util.security_call(uri, req)
else:
res_content = http_util.normal_call(uri, req)
if 'sign' not in res_content:
return jsonify(res_content), 400
if wechat_sec.response_sign_check(res_content) == False:
return {}, 510
return jsonify(res_content), 200
| 28.333333 | 109 | 0.682353 | 114 | 765 | 4.368421 | 0.447368 | 0.120482 | 0.036145 | 0.048193 | 0.060241 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014658 | 0.197386 | 765 | 26 | 110 | 29.423077 | 0.796417 | 0 | 0 | 0 | 0 | 0 | 0.098168 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.238095 | 0 | 0.428571 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d6483a58a24b696782a730062dce4d1c9a9cbd47 | 23,517 | py | Python | archivebot.py | Botomatik/JackBot | 58651d8b5a5bcead2a2eb79849019cb4f972b7cd | [
"MIT"
] | null | null | null | archivebot.py | Botomatik/JackBot | 58651d8b5a5bcead2a2eb79849019cb4f972b7cd | [
"MIT"
] | null | null | null | archivebot.py | Botomatik/JackBot | 58651d8b5a5bcead2a2eb79849019cb4f972b7cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
archivebot.py - discussion page archiving bot.
usage:
python archivebot.py [OPTIONS] TEMPLATE_PAGE
Bot examines backlinks (Special:Whatlinkshere) to TEMPLATE_PAGE.
Then goes through all pages (unless a specific page specified using options)
and archives old discussions. This is done by breaking a page into threads,
then scanning each thread for timestamps. Threads older than a specified
treshold are then moved to another page (the archive), which can be named
either basing on the thread's name or then name can contain a counter which
will be incremented when the archive reaches a certain size.
Trancluded template may contain the following parameters:
{{TEMPLATE_PAGE
|archive =
|algo =
|counter =
|maxarchivesize =
|minthreadsleft =
|minthreadstoarchive =
|archiveheader =
|key =
}}
Meanings of parameters are:
archive Name of the page to which archived threads will be put.
Must be a subpage of the current page. Variables are
supported.
algo specifies the maximum age of a thread. Must be in the form
old(<delay>) where <delay> specifies the age in hours or
days like 24h or 5d.
Default ist old(24h)
counter The current value of a counter which could be assigned as
variable. Will be actualized by bot. Initial value is 1.
maxarchivesize The maximum archive size before incrementing the counter.
Value can be given with appending letter like K or M which
indicates KByte or MByte. Default value is 1000M.
minthreadsleft Minimum number of threads that should be left on a page.
Default value is 5.
minthreadstoarchive The minimum number of threads to archive at once. Default
value is 2.
archiveheader Content that will be put on new archive pages as the
header. This parameter supports the use of variables.
Default value is {{talkarchive}}
key A secret key that (if valid) allows archives to not be
subpages of the page being archived.
Options (may be omitted):
-h, --help show this help message and exit
-c PAGE, --calc=PAGE calculate key for PAGE and exit
-f FILE, --file=FILE load list of pages from FILE
-F, --force override security options
-l LOCALE, --locale=LOCALE
switch to locale LOCALE
-L LANG, --lang=LANG set the language code to work on
-n NAMESPACE, --namespace=NAMESPACE
only archive pages from a given namespace
-p PAGE, --page=PAGE archive a single PAGE, default ns is a user talk page
-s SALT, --salt=SALT specify salt
-S --simulate Do not change pages, just simulate
"""
#
# (C) Misza13, 2006-2010
# (C) xqt, 2009-2012
# (C) Pywikipedia bot team, 2007-2012
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id$'
#
import wikipedia as pywikibot
from pywikibot import i18n
import pagegenerators, query
Site = pywikibot.getSite()
import os, re, time, locale, traceback, string, urllib
try: #Get a constructor for the MD5 hash object
import hashlib
new_hash = hashlib.md5
except ImportError: #Old python?
import md5
new_hash = md5.md5
language = Site.language()
def message(key, lang=Site.language()):
return i18n.twtranslate(lang, key)
class MalformedConfigError(pywikibot.Error):
"""There is an error in the configuration template."""
class MissingConfigError(pywikibot.Error):
"""The config is missing in the header (either it's in one of the threads
or transcluded from another page)."""
class AlgorithmError(MalformedConfigError):
"""Invalid specification of archiving algorithm."""
class ArchiveSecurityError(pywikibot.Error):
"""Archive is not a subpage of page being archived and key not specified
(or incorrect)."""
def str2time(str):
"""Accepts a string defining a time period:
7d - 7 days
36h - 36 hours
Returns the corresponding time, measured in seconds."""
if str[-1] == 'd':
return int(str[:-1])*24*3600
elif str[-1] == 'h':
return int(str[:-1])*3600
else:
return int(str)
def str2size(str):
"""Accepts a string defining a size:
1337 - 1337 bytes
150K - 150 kilobytes
2M - 2 megabytes
Returns a tuple (size,unit), where size is an integer and unit is
'B' (bytes) or 'T' (threads)."""
if str[-1] in string.digits: #TODO: de-uglify
return (int(str),'B')
elif str[-1] in ['K', 'k']:
return (int(str[:-1])*1024,'B')
elif str[-1] == 'M':
return (int(str[:-1])*1024*1024,'B')
elif str[-1] == 'T':
return (int(str[:-1]),'T')
else:
return (int(str[:-1])*1024,'B')
def int2month(num):
"""Returns the locale's full name of month 'num' (1-12)."""
if hasattr(locale, 'nl_langinfo'):
return locale.nl_langinfo(locale.MON_1+num-1).decode('utf-8')
Months = ['january', 'february', 'march', 'april', 'may_long', 'june',
'july', 'august', 'september', 'october', 'november', 'december']
return Site.mediawiki_message(Months[num-1])
def int2month_short(num):
"""Returns the locale's abbreviated name of month 'num' (1-12)."""
if hasattr(locale, 'nl_langinfo'):
#filter out non-alpha characters
return ''.join([c for c in locale.nl_langinfo(locale.ABMON_1+num-1).decode('utf-8') if c.isalpha()])
Months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
return Site.mediawiki_message(Months[num-1])
def txt2timestamp(txt, format):
"""Attempts to convert the timestamp 'txt' according to given 'format'.
On success, returns the time tuple; on failure, returns None."""
## print txt, format
try:
return time.strptime(txt,format)
except ValueError:
try:
return time.strptime(txt.encode('utf8'),format)
except:
pass
return None
def generateTransclusions(Site, template, namespaces=[]):
pywikibot.output(u'Fetching template transclusions...')
transclusionPage = pywikibot.Page(Site, template, defaultNamespace=10)
gen = pagegenerators.ReferringPageGenerator(transclusionPage,
onlyTemplateInclusion=True)
if namespaces:
gen = pagegenerators.NamespaceFilterPageGenerator(gen, namespaces, Site)
for page in gen:
yield page
class DiscussionThread(object):
"""An object representing a discussion thread on a page, that is something of the form:
== Title of thread ==
Thread content here. ~~~~
:Reply, etc. ~~~~
"""
def __init__(self, title):
self.title = title
self.content = ""
self.timestamp = None
def __repr__(self):
return '%s("%s",%d bytes)' \
% (self.__class__.__name__,self.title,len(self.content))
def feedLine(self, line):
if not self.content and not line:
return
self.content += line + '\n'
#Update timestamp
# nnwiki:
# 19:42, 25 mars 2008 (CET)
# enwiki
# 16:36, 30 March 2008 (UTC)
# huwiki
# 2007. december 8., 13:42 (CET)
TM = re.search(r'(\d\d):(\d\d), (\d\d?) (\S+) (\d\d\d\d) \(.*?\)', line)
if not TM:
TM = re.search(r'(\d\d):(\d\d), (\S+) (\d\d?), (\d\d\d\d) \(.*?\)', line)
if not TM:
TM = re.search(r'(\d{4})\. (\S+) (\d\d?)\., (\d\d:\d\d) \(.*?\)', line)
# 18. apr 2006 kl.18:39 (UTC)
# 4. nov 2006 kl. 20:46 (CET)
if not TM:
TM = re.search(r'(\d\d?)\. (\S+) (\d\d\d\d) kl\.\W*(\d\d):(\d\d) \(.*?\)', line)
#3. joulukuuta 2008 kello 16.26 (EET)
if not TM:
TM = re.search(r'(\d\d?)\. (\S+) (\d\d\d\d) kello \W*(\d\d).(\d\d) \(.*?\)', line)
if not TM:
# 14:23, 12. Jan. 2009 (UTC)
pat = re.compile(r'(\d\d):(\d\d), (\d\d?)\. (\S+)\.? (\d\d\d\d) \((?:UTC|CES?T)\)')
TM = pat.search(line)
# ro.wiki: 4 august 2012 13:01 (EEST)
if not TM:
TM = re.search(r'(\d\d?) (\S+) (\d\d\d\d) (\d\d):(\d\d) \(.*?\)', line)
if TM:
TIME = txt2timestamp(TM.group(0),"%d. %b %Y kl. %H:%M (%Z)")
if not TIME:
TIME = txt2timestamp(TM.group(0), "%Y. %B %d., %H:%M (%Z)")
if not TIME:
TIME = txt2timestamp(TM.group(0), "%d. %b %Y kl.%H:%M (%Z)")
if not TIME:
TIME = txt2timestamp(re.sub(' *\([^ ]+\) *', '', TM.group(0)),
"%H:%M, %d %B %Y")
if not TIME:
TIME = txt2timestamp(TM.group(0), "%H:%M, %d %b %Y (%Z)")
if not TIME:
TIME = txt2timestamp(re.sub(' *\([^ ]+\) *', '', TM.group(0)),
"%H:%M, %d %b %Y")
if not TIME:
TIME = txt2timestamp(TM.group(0), "%H:%M, %b %d %Y (%Z)")
if not TIME:
TIME = txt2timestamp(TM.group(0), "%H:%M, %B %d %Y (%Z)")
if not TIME:
TIME = txt2timestamp(TM.group(0), "%H:%M, %b %d, %Y (%Z)")
if not TIME:
TIME = txt2timestamp(TM.group(0), "%H:%M, %B %d, %Y (%Z)")
if not TIME:
TIME = txt2timestamp(TM.group(0),"%d. %Bta %Y kello %H.%M (%Z)")
if not TIME:
TIME = txt2timestamp(TM.group(0), "%d %B %Y %H:%M (%Z)")
if not TIME:
TIME = txt2timestamp(re.sub(' *\([^ ]+\) *', '', TM.group(0)),
"%H:%M, %d. %b. %Y")
if TIME:
self.timestamp = max(self.timestamp, time.mktime(TIME))
## pywikibot.output(u'Time to be parsed: %s' % TM.group(0))
## pywikibot.output(u'Parsed time: %s' % TIME)
## pywikibot.output(u'Newest timestamp in thread: %s' % TIME)
def size(self):
return len(self.title) + len(self.content) + 12
def toText(self):
return "== " + self.title + ' ==\n\n' + self.content
def shouldBeArchived(self,Archiver):
algo = Archiver.get('algo')
reT = re.search(r'^old\((.*)\)$',algo)
if reT:
if not self.timestamp:
return ''
#TODO: handle this:
#return 'unsigned'
maxage = str2time(reT.group(1))
if self.timestamp + maxage < time.time():
return message('archivebot-older-than') + ' ' + reT.group(1)
return ''
class DiscussionPage(pywikibot.Page):
"""A class that represents a single discussion page as well as an archive
page. Feed threads to it and run an update() afterwards."""
def __init__(self, title, archiver, vars=None):
pywikibot.Page.__init__(self, Site, title)
self.threads = []
self.full = False
self.archiver = archiver
self.vars = vars
try:
self.loadPage()
except pywikibot.NoPage:
self.header = archiver.get('archiveheader',
message('archivebot-archiveheader'))
if self.vars:
self.header = self.header % self.vars
def loadPage(self):
"""Loads the page to be archived and breaks it up into threads."""
self.header = ''
self.threads = []
self.archives = {}
self.archivedThreads = 0
lines = self.get().split('\n')
found = False #Reading header
curThread = None
for line in lines:
threadHeader = re.search('^== *([^=].*?) *== *$',line)
if threadHeader:
found = True #Reading threads now
if curThread:
self.threads.append(curThread)
curThread = DiscussionThread(threadHeader.group(1))
else:
if found:
curThread.feedLine(line)
else:
self.header += line + '\n'
if curThread:
self.threads.append(curThread)
pywikibot.output(u'%d Threads found on %s' % (len(self.threads), self))
def feedThread(self, thread, maxArchiveSize=(250*1024,'B')):
self.threads.append(thread)
self.archivedThreads += 1
if maxArchiveSize[1] == 'B':
if self.size() >= maxArchiveSize[0]:
self.full = True
elif maxArchiveSize[1] == 'T':
if len(self.threads) >= maxArchiveSize[0]:
self.full = True
return self.full
def size(self):
return len(self.header) + sum([t.size() for t in self.threads])
def update(self, summary, sortThreads = False):
if sortThreads:
pywikibot.output(u'Sorting threads...')
self.threads.sort(key = lambda t: t.timestamp)
newtext = re.sub('\n*$', '\n\n', self.header) #Fix trailing newlines
for t in self.threads:
newtext += t.toText()
if self.full:
summary += ' ' + message('archivebot-archive-full')
self.put(newtext, comment=summary)
class PageArchiver(object):
"""A class that encapsulates all archiving methods.
__init__ expects a pywikibot.Page object.
Execute by running the .run() method."""
algo = 'none'
def __init__(self, Page, tpl, salt, force=False):
self.attributes = {
'algo' : ['old(24h)',False],
'archive' : ['',False],
'maxarchivesize' : ['1000M',False],
'counter' : ['1',False],
'key' : ['',False],
}
self.tpl = tpl
self.salt = salt
self.force = force
self.Page = DiscussionPage(Page.title(), self)
self.loadConfig()
self.commentParams = {
'from' : self.Page.title(),
}
self.archives = {}
self.archivedThreads = 0
def get(self, attr, default=''):
return self.attributes.get(attr,[default])[0]
def set(self, attr, value, out=True):
if attr == 'archive':
value = value.replace('_', ' ')
self.attributes[attr] = [value, out]
def saveables(self):
return [a for a in self.attributes if self.attributes[a][1]
and a != 'maxage']
def attr2text(self):
return '{{%s\n%s\n}}' \
% (self.tpl,
'\n'.join(['|%s = %s'%(a,self.get(a))
for a in self.saveables()]))
def key_ok(self):
s = new_hash()
s.update(self.salt+'\n')
s.update(self.Page.title().encode('utf8')+'\n')
return self.get('key') == s.hexdigest()
def loadConfig(self):
pywikibot.output(u'Looking for: {{%s}} in %s' % (self.tpl, self.Page))
found = False
for tpl in self.Page.templatesWithParams(thistxt=self.Page.header):
if tpl[0] == self.tpl:
for param in tpl[1]:
item, value = param.split('=', 1)
self.set(item.strip(), value.strip())
found = True
break
if not found:
raise MissingConfigError(u'Missing or malformed template')
if not self.get('algo', ''):
raise MissingConfigError(u'Missing algo')
def feedArchive(self, archive, thread, maxArchiveSize, vars=None):
"""Feed the thread to one of the archives.
If it doesn't exist yet, create it.
If archive name is an empty string (or None),
discard the thread (/dev/null).
Also checks for security violations."""
if not archive:
return
if not self.force \
and not self.Page.title()+'/' == archive[:len(self.Page.title())+1] \
and not self.key_ok():
raise ArchiveSecurityError
if not archive in self.archives:
self.archives[archive] = DiscussionPage(archive, self, vars)
return self.archives[archive].feedThread(thread,maxArchiveSize)
def analyzePage(self):
maxArchSize = str2size(self.get('maxarchivesize'))
archCounter = int(self.get('counter', '1'))
oldthreads = self.Page.threads
self.Page.threads = []
T = time.mktime(time.gmtime())
whys = []
pywikibot.output(u'Processing %d threads' % len(oldthreads))
for t in oldthreads:
if len(oldthreads) - self.archivedThreads \
<= int(self.get('minthreadsleft',5)):
self.Page.threads.append(t)
continue #Because there's too little threads left.
# TODO: Make an option so that unstamped (unsigned) posts get
# archived.
why = t.shouldBeArchived(self)
if why:
archive = self.get('archive')
TStuple = time.gmtime(t.timestamp)
vars = {
'counter' : archCounter,
'year' : TStuple[0],
'month' : TStuple[1],
'monthname' : int2month(TStuple[1]),
'monthnameshort' : int2month_short(TStuple[1]),
'week' : int(time.strftime('%W',TStuple)),
}
archive = archive % vars
if self.feedArchive(archive,t,maxArchSize,vars):
archCounter += 1
self.set('counter',str(archCounter))
whys.append(why)
self.archivedThreads += 1
else:
self.Page.threads.append(t)
return set(whys)
def run(self):
if not self.Page.botMayEdit(Site.username):
return
whys = self.analyzePage()
if self.archivedThreads < int(self.get('minthreadstoarchive',2)):
# We might not want to archive a measly few threads
# (lowers edit frequency)
pywikibot.output(u'There are only %d Threads. Skipping'
% self.archivedThreads)
return
if whys:
pywikibot.output(u'Archiving %d thread(s).' % self.archivedThreads)
#Save the archives first (so that bugs don't cause a loss of data)
for a in sorted(self.archives.keys()):
self.commentParams['count'] = self.archives[a].archivedThreads
comment = i18n.twntranslate(language,
'archivebot-archive-summary',
self.commentParams)
self.archives[a].update(comment)
#Save the page itself
rx = re.compile('{{'+self.tpl+'\n.*?\n}}',re.DOTALL)
self.Page.header = rx.sub(self.attr2text(),self.Page.header)
self.commentParams['count'] = self.archivedThreads
self.commentParams['archives'] \
= ', '.join(['[['+a.title()+']]' for a in self.archives.values()])
if not self.commentParams['archives']:
self.commentParams['archives'] = '/dev/null'
self.commentParams['why'] = ', '.join(whys)
comment = i18n.twntranslate(language,
'archivebot-page-summary',
self.commentParams)
self.Page.update(comment)
def main():
global Site, language
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [options] [LINKPAGE(s)]')
parser.add_option('-f', '--file', dest='filename',
help='load list of pages from FILE', metavar='FILE')
parser.add_option('-p', '--page', dest='pagename',
help='archive a single PAGE', metavar='PAGE')
parser.add_option('-n', '--namespace', dest='namespace', type='int',
help='only archive pages from a given namespace')
parser.add_option('-s', '--salt', dest='salt',
help='specify salt')
parser.add_option('-F', '--force', action='store_true', dest='force',
help='override security options')
parser.add_option('-c', '--calc', dest='calc',
help='calculate key for PAGE and exit', metavar='PAGE')
parser.add_option('-l', '--locale', dest='locale',
help='switch to locale LOCALE', metavar='LOCALE')
parser.add_option('-L', '--lang', dest='lang',
help='current language code', metavar='lang')
parser.add_option('-T', '--timezone', dest='timezone',
help='switch timezone to TIMEZONE', metavar='TIMEZONE')
parser.add_option('-S', '--simulate', action='store_true', dest='simulate',
help='Do not change pages, just simulate')
(options, args) = parser.parse_args()
if options.locale:
#Required for english month names
locale.setlocale(locale.LC_TIME,options.locale)
if options.timezone:
os.environ['TZ'] = options.timezone
#Or use the preset value
if hasattr(time, 'tzset'):
time.tzset()
if options.calc:
if not options.salt:
parser.error('Note: you must specify a salt to calculate a key')
s = new_hash()
s.update(options.salt+'\n')
s.update(options.calc+'\n')
pywikibot.output(u'key = ' + s.hexdigest())
return
if options.salt:
salt = options.salt
else:
salt = ''
if options.force:
force = True
else:
force = False
if options.lang:
Site = pywikibot.getSite(options.lang)
language = Site.language()
if options.simulate:
pywikibot.simulate = True
if not args:
pywikibot.output(u'NOTE: you must specify a template to run the bot')
pywikibot.showHelp('archivebot')
return
for a in args:
pagelist = []
if not options.filename and not options.pagename:
#for pg in pywikibot.Page(Site,a).getReferences(follow_redirects=False,onlyTemplateInclusion=True):
if not options.namespace == None:
ns = [str(options.namespace)]
else:
ns = []
for pg in generateTransclusions(Site, a, ns):
pagelist.append(pg)
if options.filename:
for pg in file(options.filename,'r').readlines():
pagelist.append(pywikibot.Page(Site,pg))
if options.pagename:
pagelist.append(pywikibot.Page(Site, options.pagename,
defaultNamespace=3))
pagelist = sorted(pagelist)
#if not options.namespace == None:
# pagelist = [pg for pg in pagelist if pg.namespace()==options.namespace]
for pg in iter(pagelist):
pywikibot.output(u'Processing %s' % pg)
# Catching exceptions, so that errors in one page do not bail out
# the entire process
try:
Archiver = PageArchiver(pg, a, salt, force)
Archiver.run()
time.sleep(10)
except:
pywikibot.output(u'Error occured while processing page %s' % pg)
traceback.print_exc()
if __name__ == '__main__':
try:
main()
finally:
pywikibot.stopme()
| 38.115073 | 111 | 0.551346 | 2,806 | 23,517 | 4.591946 | 0.213828 | 0.007916 | 0.008382 | 0.007451 | 0.164455 | 0.105627 | 0.074272 | 0.06884 | 0.059139 | 0.058518 | 0 | 0.017949 | 0.315346 | 23,517 | 616 | 112 | 38.176948 | 0.782312 | 0.251265 | 0 | 0.203431 | 0 | 0.017157 | 0.129454 | 0.006735 | 0 | 0 | 0 | 0.00487 | 0 | 1 | 0.071078 | false | 0.002451 | 0.019608 | 0.019608 | 0.198529 | 0.002451 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d64d34e571746e4e8b7826386ea1af2de18078b2 | 9,914 | py | Python | scripts/cbparse.py | maspadaru/starbench | 7230ddcaa5e202f63978d96baa71c31e111ba4f8 | [
"Apache-2.0"
] | null | null | null | scripts/cbparse.py | maspadaru/starbench | 7230ddcaa5e202f63978d96baa71c31e111ba4f8 | [
"Apache-2.0"
] | null | null | null | scripts/cbparse.py | maspadaru/starbench | 7230ddcaa5e202f63978d96baa71c31e111ba4f8 | [
"Apache-2.0"
] | null | null | null | import string
import sys
import os
OPERATOR_BOX = "B"
OPERATOR_DIAMOND = "D"
def print_help():
print ('Usage: python cbparse.py Option Arguments DirectoryPath')
print ('Options: ')
print (' S: parse chasebench data dir - Source Instance')
print (' Arguments: timeline_size')
print (' T: parse chasebench dependecies dir - TGDs')
print (' Arguments: window_size percent_box percent_diamond '\
'percent_event')
print (' ')
print ('Example: python cbparse.py D 1000 chasebench/scenarios/deep/100/data > out.stream')
print ('Example: python cbparse.py T 3 20 60 50 '\
' chasebench/scenarios/deep/100/dependencies > out.laser')
print (' ')
def validate_dir(dir_path):
if not dir_path.endswith("/"):
return "%s/" % dir_path
return dir_path
class Fact:
def __init__(self, predicate, values):
self._predicate = predicate
self._values = values
def __str__(self):
var = ' '.join(self._values)
return "%s %s" % (self._predicate, var)
def parse_fact_csv(filename, dir_path):
# each file is a fact: <predicate>.csv
result = []
predicate = filename.replace(".csv","")
predicate = predicate.replace('_','')
full_path = "%s%s" % (dir_path, filename)
with open(full_path) as openfileobject:
for line in openfileobject:
line = line.rstrip()
if line:
line = line.replace('_','').replace('"','')
values = line.split(',')
# Eliminating tailing new line. Removing "" around string vlaues.
fact = Fact(predicate, values)
result.append(fact)
return result
def print_source(timeline_size, source_instance):
# on the first line, Star expects the first and last timepoint
print("%d %d" % (1, timeline_size))
for time_point in range(timeline_size):
for fact in source_instance:
# one fact per line; each line contains values separated by space
# first value is the predicate followed by constants
print(fact)
# empty line marks the end of imput for the current time point
print("")
def parse_source(timeline_size, dir_path):
source_instance = []
for filename in os.listdir(dir_path):
if filename.endswith(".csv"):
fact_list = parse_fact_csv(filename, dir_path)
source_instance.extend(fact_list)
return source_instance
class Dependency:
def __init__(self, line):
self._parse_line(line)
self._existential_variables = self._get_existential_variables()
self.is_existential = len(self._existential_variables) > 0
self._events = []
def __str__(self):
head_atoms = self._head[:]
for event in self._events:
event_atom = "[I, %s]" % (event)
head_atoms.append(event_atom)
head = ' && '.join(head_atoms)
body = ' && '.join(self._body)
rule = head + ' := ' + body
return rule
def _parse_line(self, line):
line = line.replace('.','').replace('?','').replace(' ','').replace('_','')
line_list = line.split('->')
body = line_list[0]
head = line_list[1]
self._body = self._parse_conjunction(body)
self._head = self._parse_conjunction(head)
def _parse_conjunction(self, conjunction):
'''
parse atoms (starts with alphanumeric, ends with ')')
'''
result = []
atom_chars = []
prev_char = ""
for char in conjunction:
if char == ',' and prev_char == ')':
atom = ''.join(atom_chars)
atom_chars = []
result.append(atom)
else:
atom_chars.append(char)
prev_char = char
if atom_chars:
atom = ''.join(atom_chars)
result.append(atom)
return result
def _get_variable_list(self, atom):
atom = atom.replace(')', '')
variables = atom.split('(')[1]
var_list = variables.split(',')
return var_list
def _get_existential_variables(self):
body_variables = set()
head_variables = set()
for atom in self._body:
variables = self._get_variable_list(atom)
body_variables.update(variables)
for atom in self._head:
variables = self._get_variable_list(atom)
head_variables.update(variables)
existential_variables = head_variables.difference(body_variables)
return list(existential_variables)
def add_operator(self, operator, window_size):
new_body = []
for atom in self._body:
new_atom = "[$, %d] [%s] %s" % (window_size, operator, atom)
new_body.append(new_atom)
self._body = new_body
def add_event(self):
if self.is_existential:
self._events.append(self._existential_variables[0])
def add_all_events(self):
if self.is_existential:
self._events = self._existential_variables[:]
class DependencyParser:
def __init__(self, window_size, percent_box, percent_diamond, \
percent_event, dir_path):
self._window_size = window_size
self._percent_box = percent_box
self._percent_diamond = percent_diamond
self._percent_event = percent_event
self._dir_path = dir_path
self._st_dependencies = []
self._t_dependencies = []
def _parse_dependecy_file(self, file_name):
dependencies = []
full_path = "%s%s" % (self._dir_path, file_name)
with open(full_path) as openfileobject:
for line in openfileobject:
line = line.rstrip()
if line:
dependency = Dependency(line)
dependencies.append(dependency)
return dependencies
def _parse_tgds(self):
for file_name in os.listdir(self._dir_path):
if file_name.endswith(".st-tgds.txt"):
new_deps = self._parse_dependecy_file(file_name)
self._st_dependencies.extend(new_deps)
if file_name.endswith(".t-tgds.txt"):
new_deps = self._parse_dependecy_file(file_name)
self._t_dependencies.extend(new_deps)
def _count_existential_tgds(self, dependencies):
result = 0
for dependency in dependencies:
if dependency.is_existential:
result += 1
return result
def _add_windows(self, dependencies):
number_tgds = len(dependencies)
number_box = (number_tgds * self._percent_box) / 100
number_diamond = (number_tgds * self._percent_diamond) / 100
for dependency in dependencies:
if number_box > 0:
dependency.add_operator(OPERATOR_BOX, self._window_size)
number_box -= 1
elif number_diamond > 0:
dependency.add_operator(OPERATOR_DIAMOND, self._window_size)
number_diamond -= 1
def _add_events(self, dependencies):
if self._percent_event == 50:
counter = 0
for dependency in dependencies:
counter += 1
if counter % 2 == 0 and dependency.is_existential:
dependency.add_all_events()
else:
number_existential_tgds = self._count_existential_tgds(dependencies)
number_event = (number_existential_tgds * self._percent_event) / 100
for dependency in dependencies:
if number_event > 0 and dependency.is_existential:
# dependency.add_event()
dependency.add_all_events()
number_event -= 1
def _mutate_list(self, dependencies):
self._add_windows(dependencies)
self._add_events(dependencies)
def read(self):
self._parse_tgds()
def mutate(self):
self._mutate_list(self._st_dependencies)
self._mutate_list(self._t_dependencies)
def write(self):
for dependency in self._st_dependencies:
print(dependency)
for dependency in self._t_dependencies:
print(dependency)
def validate_percentages(percent_box, percent_diamond, percent_event):
if percent_box > 100 or percent_diamond > 100 or percent_event > 100:
print("Percentages should not be larger than 100")
return False
if percent_box + percent_diamond > 100:
print("Percentages for box and diamond should not add up to more "\
"than 100")
return False
return True
def main():
if (len(sys.argv) < 2):
print_help()
else:
option = sys.argv[1]
if option == 'S':
if (len(sys.argv) == 4):
timeline_size = int(sys.argv[2])
dir_path = validate_dir(sys.argv[3])
source_instance = parse_source(timeline_size, dir_path)
print_source(timeline_size, source_instance)
elif option == 'T':
if (len(sys.argv) == 7):
window_size = int(sys.argv[2])
percent_box = int(sys.argv[3])
percent_diamond = int(sys.argv[4])
percent_event = int(sys.argv[5])
if validate_percentages(percent_box, percent_diamond, \
percent_event):
dir_path = validate_dir(sys.argv[6])
parser = DependencyParser(window_size, percent_box, \
percent_diamond, percent_event, dir_path)
parser.read()
parser.mutate()
parser.write()
else:
print_help()
if __name__ == '__main__':
main()
| 33.268456 | 95 | 0.587452 | 1,109 | 9,914 | 4.962128 | 0.161407 | 0.022897 | 0.021625 | 0.026168 | 0.248228 | 0.187534 | 0.133927 | 0.093585 | 0.063965 | 0.063965 | 0 | 0.011055 | 0.315715 | 9,914 | 297 | 96 | 33.380471 | 0.800118 | 0.04186 | 0 | 0.202586 | 0 | 0.00431 | 0.070124 | 0.008026 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.012931 | 0 | 0.206897 | 0.094828 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d64dbb66dc28a89025281d71ffc0283bc9c1aa67 | 1,279 | py | Python | 21.py | oliver-johnston/advent-of-code-2019 | c8774ac98fa0ea861887c35fcae1da3d0435dda4 | [
"MIT"
] | null | null | null | 21.py | oliver-johnston/advent-of-code-2019 | c8774ac98fa0ea861887c35fcae1da3d0435dda4 | [
"MIT"
] | null | null | null | 21.py | oliver-johnston/advent-of-code-2019 | c8774ac98fa0ea861887c35fcae1da3d0435dda4 | [
"MIT"
] | null | null | null | import intcode
class Springdroid:
def __init__(self, instructions):
self.instruction_chars = list("\n".join(instructions)) + ["\n"]
def run(self):
intcode.execute_program(self.get_program(), self.get_input, self.write_output)
def get_input(self):
ch = self.instruction_chars.pop(0)
return ord(ch)
@staticmethod
def get_program():
fp = open("21.txt")
return [int(x) for x in fp.read().split(",")]
@staticmethod
def write_output(o):
if o > 1000:
print(o)
else:
print(chr(o), end="")
Springdroid(["OR A J", # J = land at A
"AND C J", # J = land at A and C
"NOT J J", # J = hole at A or C
"AND D J", # J = (hole at A or C) and land at D
"WALK"]).run()
Springdroid(["OR A J", # J = land at A
"AND B J", # J = land at A and B
"AND C J", # J = land at A and B and C
"NOT J J", # J = hole at A or B or C
"AND D J", # J = (hole at A or B or C) and land at D
"OR E T", # T = land at E
"OR H T", # T = land at E or H
"AND T J", # J = (hole at A or B or C) and land at D and (land at E or H)
"RUN"]).run()
| 29.744186 | 86 | 0.487099 | 210 | 1,279 | 2.904762 | 0.27619 | 0.039344 | 0.04918 | 0.065574 | 0.409836 | 0.393443 | 0.381967 | 0.340984 | 0.318033 | 0.183607 | 0 | 0.008827 | 0.379984 | 1,279 | 42 | 87 | 30.452381 | 0.760404 | 0.238468 | 0 | 0.30303 | 0 | 0 | 0.101977 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.030303 | 0 | 0.272727 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d64f495619cee232c074671c5f339c49c18f602c | 1,290 | py | Python | download_images.py | lastcoolnameleft/whisky-training | 1a8eb24942f65ab3055357bfbfca9eca6d2a93be | [
"MIT"
] | null | null | null | download_images.py | lastcoolnameleft/whisky-training | 1a8eb24942f65ab3055357bfbfca9eca6d2a93be | [
"MIT"
] | null | null | null | download_images.py | lastcoolnameleft/whisky-training | 1a8eb24942f65ab3055357bfbfca9eca6d2a93be | [
"MIT"
] | null | null | null | import requests
import pprint
import os
import csv
import sys
import urllib.request
scotch_csv = sys.argv[1]
subscription_key = sys.argv[2]
search_url = "https://api.cognitive.microsoft.com/bing/v7.0/images/search"
scotch_list = []
with open(scotch_csv, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
print(row[1])
scotch_list.append(row[1])
print(scotch_list)
for scotch in scotch_list:
print(scotch)
img_dir = "content/images/" + scotch
if not os.path.exists(img_dir):
os.makedirs(img_dir)
search_term = scotch + " Bottle"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": search_term, "textDecorations":False, "count": 10}
params["offset"] = 50
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
for value in search_results["value"]:
url = value["contentUrl"]
print(url)
filename = url.split("/")[-1]
file_name = img_dir + "/" + filename
try:
urllib.request.urlretrieve(url, file_name)
except:
print("Got error for (" + scotch + ") :" + url)
#pprint.pprint(search_results)
| 30 | 74 | 0.651163 | 165 | 1,290 | 4.951515 | 0.466667 | 0.04896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010848 | 0.213953 | 1,290 | 42 | 75 | 30.714286 | 0.794872 | 0.022481 | 0 | 0 | 0 | 0 | 0.134921 | 0.019841 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.162162 | 0 | 0.162162 | 0.162162 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d65139b8e3aa8a1bba1c0eb4a22dccde013dd9a9 | 2,007 | py | Python | src/vuabl/parsing/data_from_other_asset.py | CncealDVRflectN/vuabl | 1c1f3270d4ad4b730ec9558b65b2f6ea84a5bd50 | [
"MIT"
] | null | null | null | src/vuabl/parsing/data_from_other_asset.py | CncealDVRflectN/vuabl | 1c1f3270d4ad4b730ec9558b65b2f6ea84a5bd50 | [
"MIT"
] | null | null | null | src/vuabl/parsing/data_from_other_asset.py | CncealDVRflectN/vuabl | 1c1f3270d4ad4b730ec9558b65b2f6ea84a5bd50 | [
"MIT"
] | null | null | null | from vuabl.data.data_from_other_asset import DataFromOtherAsset
from vuabl.utils.layout_reader import LayoutReader
import vuabl.parsing.parameters as pparamrs
import vuabl.parsing.asset_type as pasttp
import re
def is_data_from_other_assets_header(line: str) -> bool:
return re.match(r"^\s*Data From Other Assets.*", line)
def parse_file_data_from_other_asset(reader: LayoutReader) -> DataFromOtherAsset:
data: DataFromOtherAsset = DataFromOtherAsset()
intent: int = pparamrs.get_intent(reader.currentLine())
data.path = re.search(r"^\s*([^(]+)\(", reader.currentLine()).group(1).rstrip()
data.assetType = pasttp.get_asset_type(data.path)
data.size = pparamrs.get_size(reader.currentLine())
data.sizeFromObjects = pparamrs.get_size_from_objects(reader.currentLine())
data.sizeFromStreamedData = pparamrs.get_size_from_streamed_data(reader.currentLine())
data.objectCount = pparamrs.get_header_integer_param(reader.currentLine(), "Object Count")
try:
while True:
line: str = reader.nextLine()
if pparamrs.get_intent(line) <= intent:
break
elif pparamrs.is_param(line, "Referencing Assets"):
data.referencingAssets = pparamrs.get_assets_list_param(line, "Referencing Assets")
except StopIteration:
pass
return data
def parse_file_data_from_other_assets(reader: LayoutReader) -> list:
data: list[DataFromOtherAsset] = []
intent: int = pparamrs.get_intent(reader.currentLine())
try:
isNext: bool = True
while True:
line: str = reader.currentLine()
if isNext:
line = reader.nextLine()
curIntent = pparamrs.get_intent(line)
if curIntent <= intent:
break
elif curIntent == intent + 1:
data.append(parse_file_data_from_other_asset(reader))
isNext = False
except StopIteration:
pass
return data
| 30.876923 | 99 | 0.673144 | 227 | 2,007 | 5.748899 | 0.299559 | 0.075862 | 0.05977 | 0.041379 | 0.249808 | 0.165517 | 0.144061 | 0.093487 | 0 | 0 | 0 | 0.001295 | 0.230194 | 2,007 | 64 | 100 | 31.359375 | 0.843366 | 0 | 0 | 0.318182 | 0 | 0 | 0.044345 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0.045455 | 0.113636 | 0.022727 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d655a42e44e0f08cda3777efc3dd6716b3147a5f | 7,132 | py | Python | server/main.py | leemengtaiwan/exbert | 63f1d4503f491c87392626e4c0f1f91cae698228 | [
"Apache-2.0"
] | 1 | 2020-10-26T11:53:57.000Z | 2020-10-26T11:53:57.000Z | server/main.py | leemengtaiwan/exbert | 63f1d4503f491c87392626e4c0f1f91cae698228 | [
"Apache-2.0"
] | null | null | null | server/main.py | leemengtaiwan/exbert | 63f1d4503f491c87392626e4c0f1f91cae698228 | [
"Apache-2.0"
] | 1 | 2020-05-10T15:07:27.000Z | 2020-05-10T15:07:27.000Z | import argparse
from attention_details import (
AttentionDetailsData,
get_token_info,
add_token_info,
)
from pytorch_pretrained_bert import BertModel, BertTokenizer
from flask import render_template, redirect, send_from_directory
from flask_cors import CORS
from utils.mask_att import strip_attention
import connexion
import os
import pickle
import utils.path_fixes as pf
import numpy as np
from data.processing.create_faiss import Indexes, ContextIndexes
from data.processing.woz_embeddings import CorpusEmbeddings, AttentionCorpusEmbeddings
from copy import deepcopy
app = connexion.FlaskApp(__name__, static_folder='client/dist', specification_dir='.')
flask_app = app.app
CORS(flask_app)
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--debug", action='store_true', help=' Debug mode')
parser.add_argument("--port", default=5555, help="Port to run the app. ")
# NOTE: Connexion runs all global code twice. We need to load the info on the second pass of the app instantiating, not the first. 'main' code statement.
# This may not work in deploy
class WozFaissWrapper:
def __init__(self):
self.embedding_faiss = None
self.context_faiss = None
self.embedding_corpus = None
self.context_corpus = None
def load_info(self):
"""Allow values to have default NONE, load all at once after first load of flask"""
self.embedding_faiss = Indexes(pf.WOZ_EMBEDDINGS)
self.context_faiss = ContextIndexes(pf.WOZ_CONTEXT)
self.embedding_corpus = AttentionCorpusEmbeddings(pf.WOZ_HDF5)
self.context_corpus = AttentionCorpusEmbeddings(pf.WOZ_CONTEXT_HDF5)
woz = WozFaissWrapper()
# Flask main routes
@app.route('/')
def hello_world():
return redirect('client/exBERT.html')
# send everything from client as static content
@app.route('/client/<path:path>')
def send_static_client(path):
""" serves all files from ./client/ to ``/client/<path:path>``
:param path: path from api call
"""
return send_from_directory(str(pf.CLIENT_DIST), path)
#======================================================================
## INITIALIZATION OF MODEL ##
#======================================================================
bert_version = 'bert-base-uncased'
model = BertModel.from_pretrained(bert_version)
tokenizer = BertTokenizer.from_pretrained(bert_version)
details_data = AttentionDetailsData(model, tokenizer)
p_file = "_store/simple.pckl"
def keep_aa(attentions):
""" Last minute change: transfer over the network is very slow. Need to drop keys from the JSON to make rendering faster """
aa = attentions['aa']
out = {'aa': aa}
return out
def masking_reformat(st, layer):
"""
'st' = SimpleTokensInfo
"""
format_matrix = lambda mat: np.array(mat)[:,layer,:].tolist()
out = {}
out['text'] = st['text']
out['embeddings'] = format_matrix(st['embeddings'])
out['contexts'] = format_matrix(st['contexts'])
return out
def in_side_select_layer(fsti, layer):
"""
fst = "FullSingleTokenInfo[]"
"""
new_side = []
for f in fsti:
new_side_obj = {}
for k, v in f.items():
if k == 'embeddings' or k == 'contexts':
v = f[k][layer]
new_side_obj[k] = v
new_side.append(new_side_obj)
return new_side
def minimize_aa(attentions, layer, text_info_formatter):
""" Last minute change: Need to additionally only return the attentions for a particular layer
Assume "AttentionMetaResult". Also drops the key and query.
attentions: The attentions returned by the model
layer: The layer to analyze
text_info_formatter: How to parse the 'left' and 'right' sides by layer
"""
aa = attentions['aa']
# When updating the masked attentions, we don't have to modfiy the contexts and the embeddings
new_left = text_info_formatter(aa['left'], layer)
new_right = text_info_formatter(aa['right'], layer)
new_aa = {
'att': aa['att'][layer],
'left': new_left,
'right': new_right
}
out = {
'aa': new_aa
}
return out
#======================================================================
## CONNEXION API ##
#======================================================================
def get_attention_and_meta(**request):
sent_a = request['sentenceA']
sent_b = request['sentenceB']
layer = int(request['layer'])
deets = details_data.get_data(sent_a, sent_b)
attentions_and_meta = add_token_info(deets.to_json(), sent_a, sent_b)
return minimize_aa(keep_aa(attentions_and_meta), layer, in_side_select_layer)
def update_masked_meta_attention(**request):
"""
Return attention information from tokens and mask indices.
Object: {"a" : {"sentence":__, "mask_inds"}, "b" : {...}}
"""
payload = request['payload']
a = payload['tokensA'] # NAME OF VARIABLE IS IMPORTANT. See below.
b = payload['tokensB'] # NAME OF VARIABLE IS IMPORTANT. See below.
mask_a = payload['maskA']
mask_b = payload['maskB']
layer = int(payload['layer'])
MASK = '[MASK]'
tokens_a = [t if i not in mask_a else MASK for (i, t) in enumerate(a)]
tokens_b = [t if i not in mask_b else MASK for (i, t) in enumerate(b)]
print("tokens_a: ", tokens_a)
print("tokens_b: ", tokens_b)
deets = details_data.get_data_from_tokens(tokens_a, tokens_b)
attentions = deets.to_json()
print(f"a: {a}")
print(f"b: {b}")
for k in attentions:
if k != 'all':
attentions[k]['left']['text'] = eval(k[0]) # Calls the 'a' or 'b' from above
attentions[k]['right']['text'] = eval(k[1])
out = minimize_aa(keep_aa(attentions), layer, masking_reformat)
return out
def woz_nearest_embedding_search(**request):
"""Return the token text and the metadata in JSON"""
q = np.array(request['embedding']).reshape((1, -1)).astype(np.float32)
layer = int(request['layer'])
heads = list(map(int, list(set(request['heads']))))
k = int(request['k'])
nearest_dists, nearest_idxs = woz.embedding_faiss.search(layer, q, k)
out = woz.embedding_corpus.find2d(nearest_idxs)[0]
return_obj = [o.to_json(layer, heads) for o in out]
return return_obj
def woz_nearest_context_search(**request):
"""Return the token text and the metadata in JSON"""
q = np.array(request['context']).reshape((1, -1)).astype(np.float32)
layer = int(request['layer'])
heads = list(map(int, list(set(request['heads']))))
k = int(request['k'])
nearest_dists, nearest_idxs = woz.context_faiss.search(layer, heads, q, k)
out = woz.context_corpus.find2d(nearest_idxs)[0]
return_obj = [o.to_json(layer, heads) for o in out]
return return_obj
app.add_api('swagger.yaml')
# Setup code
if __name__ != '__main__':
print("SETTING UP")
woz.load_info()
print("AFTER SETUP")
# Then deploy app
else:
args, _ = parser.parse_known_args()
print("Initiating app")
app.run(port=args.port, use_reloader=False, debug=args.debug) | 31.839286 | 153 | 0.654655 | 946 | 7,132 | 4.748414 | 0.285412 | 0.016029 | 0.015138 | 0.013357 | 0.166963 | 0.145147 | 0.139359 | 0.11398 | 0.11398 | 0.11398 | 0 | 0.003454 | 0.188166 | 7,132 | 224 | 154 | 31.839286 | 0.772366 | 0.232193 | 0 | 0.123188 | 0 | 0 | 0.083115 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.101449 | 0.007246 | 0.268116 | 0.050725 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d657cea0bec4ccf577d4498c272dbcb1c60618ac | 1,948 | py | Python | tronos/manual-tracker.py | danilexn/tronos | 64852cc41fba51271470f0c424c60c7587ff9f0f | [
"MIT"
] | 1 | 2021-03-19T03:29:06.000Z | 2021-03-19T03:29:06.000Z | tronos/manual-tracker.py | danilexn/tronos | 64852cc41fba51271470f0c424c60c7587ff9f0f | [
"MIT"
] | 3 | 2020-10-21T13:54:32.000Z | 2020-10-26T19:05:41.000Z | tronos/manual-tracker.py | danilexn/tronos | 64852cc41fba51271470f0c424c60c7587ff9f0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# This is a VERY simple and naive manual tracker.
# If you need something more powerful, Fiji is
# absolutely recommended.
# Generic
import argparse
import csv
import numpy as np
import sys
import os
# Imaging and visualization
from PIL import Image
import matplotlib.pyplot as plt
# Functions
def open_image(froute):
try:
img = Image.open(froute)
img.load()
print(img.n_frames)
print(img)
return img
except:
print("Unable to load image")
def on_press(event):
trajs.append([event.x, event.y])
def build_trajs(img):
for i in range(img.n_frames):
try:
plt.imshow(img, cmap="gray", vmin=0, vmax=255)
plt.connect("button_press_event", on_press)
plt.show()
img.seek(i)
except EOFError:
# Not enough frames in img
break
def csv_save(trajs, csvname):
with open(csvname, "a+") as f:
for i, pos in enumerate(trajs):
f.writelines("{},{},{}".format(i, pos[0], pos[1]))
# Argument parsing
def cmdline_args():
p = argparse.ArgumentParser(
description="""
Tronos VERY simple manual tracker
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
p.add_argument(
"--source",
nargs="+",
help="Source TIFF file to manually process. CSV results will be saved to selected directory",
)
p.add_argument(
"-p",
"--plot",
action="store_true",
help="Plot routes when manual tracking completed for each trajectory",
)
return p.parse_args()
if __name__ == "__main__":
args = cmdline_args()
trajs = []
for fname in args.source:
trajs = []
img = open_image(fname)
build_trajs(img)
csvname = os.path.basename(os.path.splitext(fname)[0]) + "_trajs.csv"
csv_save(trajs, csvname)
pass
| 22.136364 | 101 | 0.602669 | 246 | 1,948 | 4.658537 | 0.536585 | 0.020942 | 0.017452 | 0.033159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005036 | 0.286448 | 1,948 | 87 | 102 | 22.390805 | 0.819424 | 0.114476 | 0 | 0.1 | 0 | 0 | 0.171911 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.016667 | 0.116667 | 0 | 0.233333 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d658c086729548623c04cfdeea90310116c48dcd | 5,568 | py | Python | data_preparation/align_caricature_data.py | wendychen0521/DualStyleGAN | d04c665d800a0fae933c234ad9da17a96cb57c07 | [
"MIT"
] | 488 | 2022-03-20T08:36:40.000Z | 2022-03-31T22:58:54.000Z | data_preparation/align_caricature_data.py | Guttappa1238/DualStyleGAN | 67751d90d1a23358f6cbe5d23e3475cbe2cebff2 | [
"MIT"
] | 10 | 2022-03-20T13:04:10.000Z | 2022-03-31T14:39:47.000Z | data_preparation/align_caricature_data.py | Guttappa1238/DualStyleGAN | 67751d90d1a23358f6cbe5d23e3475cbe2cebff2 | [
"MIT"
] | 70 | 2022-03-20T08:53:27.000Z | 2022-03-31T05:15:58.000Z | import numpy as np
import scipy.ndimage
import os
import PIL.Image
txtpath = './caricature.txt'
lmpath = './WebCaricature/FacialPoints/'
impath = './WebCaricature/OriginalImages/'
outpath = './Caricature/'
def image_align(src_file, dst_file, face_landmarks, output_size=1024, transform_size=4096, enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):
# Align function from FFHQ dataset pre-processing step
# https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
lm = np.array(face_landmarks)
'''
lm_chin = lm[0 : 17] # left-right
lm_eyebrow_left = lm[17 : 22] # left-right
lm_eyebrow_right = lm[22 : 27] # left-right
lm_nose = lm[27 : 31] # top-down
lm_nostrils = lm[31 : 36] # top-down
lm_eye_left = lm[36 : 42] # left-clockwise
lm_eye_right = lm[42 : 48] # left-clockwise
lm_mouth_outer = lm[48 : 60] # left-clockwise
lm_mouth_inner = lm[60 : 68] # left-clockwise
'''
lm_eye_left = lm[8:10]
lm_eye_right = lm[10:12]
lm_mouth_outer = lm[13:17]
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
'''
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
'''
mouth_left = lm_mouth_outer[1]
mouth_right = lm_mouth_outer[3]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
x *= x_scale
y = np.flipud(x) * [-y_scale, y_scale]
c = eye_avg + eye_to_mouth * em_scale
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
# Load in-the-wild image.
if not os.path.isfile(src_file):
print('\nCannot find source image. Please run "--wilds" before "--align".')
return
img = PIL.Image.open(src_file).convert('RGBA').convert('RGB')
# Shrink.
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
#border = max(int(np.rint(qsize * 0.7)), 1)
border = max(int(np.rint(qsize * 0.01)), 0.5)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 2:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
#pad = np.maximum(pad, int(np.rint(qsize * 0.05)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'edge')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
#initial was *0.2
blur = qsize * 0.002
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = np.uint8(np.clip(np.rint(img), 0, 255))
if alpha:
mask = 1-np.clip(3.0 * mask, 0.0, 1.0)
mask = np.uint8(np.clip(np.rint(mask*255), 0, 255))
img = np.concatenate((img, mask), axis=2)
img = PIL.Image.fromarray(img, 'RGBA')
else:
img = PIL.Image.fromarray(img, 'RGB')
quad += pad[:2]
# Transform.
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
# Save aligned image.
img.save(dst_file, 'PNG')
if __name__ == "__main__":
if not os.path.exists(outpath):
os.makedirs(outpath)
f = open(txtpath,'r')
names = f.readlines()
f.close()
for name in names:
fname = name[:-8]
num = name[-7:-1]
lm = np.loadtxt(os.path.join(lmpath, fname.replace('_', ' '), num + '.txt'))
imgname = os.path.join(impath, fname.replace('_', ' '), num + '.jpg')
savename = os.path.join(outpath, fname + '_' + num + '.jpg')
image_align(imgname, savename, lm, output_size=256, transform_size=1024) | 44.544 | 169 | 0.547055 | 838 | 5,568 | 3.505967 | 0.227924 | 0.025528 | 0.024506 | 0.019061 | 0.201498 | 0.134786 | 0.088496 | 0.072158 | 0.072158 | 0.047651 | 0 | 0.052343 | 0.279454 | 5,568 | 125 | 170 | 44.544 | 0.67996 | 0.065014 | 0 | 0 | 0 | 0 | 0.04399 | 0.013066 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012195 | false | 0 | 0.04878 | 0 | 0.073171 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d65a40564489076d2504ca869d13f4f429ac3159 | 5,153 | py | Python | src/rcpicar/util/argument.py | franzmandl/rcpicar | f847f918862edd3ad1ca9b00b845e61b52500ca7 | [
"MIT"
] | null | null | null | src/rcpicar/util/argument.py | franzmandl/rcpicar | f847f918862edd3ad1ca9b00b845e61b52500ca7 | [
"MIT"
] | null | null | null | src/rcpicar/util/argument.py | franzmandl/rcpicar | f847f918862edd3ad1ca9b00b845e61b52500ca7 | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import ABC, abstractmethod
from argparse import ArgumentParser, MetavarTypeHelpFormatter
from sys import version_info
from typing import Any, Callable, Dict, Generic, Iterable, Mapping, Optional, Sequence, Tuple, Type, TypeVar
if version_info >= (3, 8):
from typing import TypedDict
else:
from typing_extensions import TypedDict
from .checking import check_type
from .Lazy import Lazy
K = TypeVar('K')
T = TypeVar('T')
ParsedType = TypeVar('ParsedType')
def get_key_of_value(haystack: Mapping[K, T], needle: T) -> K:
for key, value in haystack.items():
if needle == value:
return key
raise RuntimeError(f'Value "{needle}" not found in mapping.')
class AddArgumentKwargs(TypedDict, total=False):
dest: str
help: str
type: Type[Any]
AddArgumentArgs = Tuple[str]
class IArgument(Generic[ParsedType], ABC):
@abstractmethod
def add_argument(self, parser: ArgumentParser) -> None:
""""""
@abstractmethod
def get_name(self) -> str:
""""""
@abstractmethod
def set_value(self, value: ParsedType) -> None:
""""""
AnyArgument = IArgument[Any]
AnyArguments = Iterable[AnyArgument]
class IArguments(ABC):
def get_arguments(self) -> AnyArguments:
return []
class Argument(Generic[T, ParsedType], IArgument[ParsedType], IArguments):
def __init__(
self,
parser_add_argument: Callable[[ArgumentParser, AddArgumentArgs, AddArgumentKwargs], None],
default: Optional[str],
help_: str,
lazy: Lazy[T],
name: str,
parse_value: Callable[[ParsedType], T],
type_: Type[ParsedType],
) -> None:
self.parser_add_argument = parser_add_argument
self.help = help_ if default is None else f'{help_} (default: {default or lazy.get_default()})'
self.lazy = lazy
self.name = name
self.parse_value = parse_value
self.type = type_
def add_argument(self, parser: ArgumentParser) -> None:
kwargs = AddArgumentKwargs()
if self.name[0] in parser.prefix_chars:
kwargs['dest'] = self.name
kwargs['help'] = self.help
kwargs['type'] = self.type
self.parser_add_argument(parser, (self.name,), kwargs)
def get_arguments(self) -> AnyArguments:
return [self]
def get_name(self) -> str:
return self.name
def set_value(self, value: ParsedType) -> None:
self.lazy.set(self.parse_value(check_type(value, self.type)))
def create_choice_argument(
lazy: Lazy[T],
name: str,
choices: Mapping[str, T],
help_: str = '',
default: str = ''
) -> Argument[T, str]:
def add_argument(parser: ArgumentParser, args: AddArgumentArgs, kwargs: AddArgumentKwargs) -> None:
parser.add_argument(*args, **kwargs, choices=choices)
return Argument(
add_argument, default or get_key_of_value(choices, lazy.get_default()),
help_, lazy, name, lambda value: choices[value], str,
)
def create_value_argument(
lazy: Lazy[T],
name: str,
type_: Type[T],
help_: str = '',
default: Optional[str] = '',
) -> Argument[T, T]:
def add_argument(parser: ArgumentParser, args: AddArgumentArgs, kwargs: AddArgumentKwargs) -> None:
parser.add_argument(*args, **kwargs)
return Argument(add_argument, default, help_, lazy, name, lambda value: value, type_)
class ArgumentRegistry:
def __init__(self) -> None:
self.parser = ArgumentParser(formatter_class=MetavarTypeHelpFormatter)
self.arguments: Dict[str, AnyArgument] = dict()
def add_arguments(self, arguments_list: Iterable[IArguments]) -> None:
for arguments in arguments_list:
for argument in arguments.get_arguments():
self.add_argument(argument)
def add_argument(self, argument: AnyArgument) -> None:
name = argument.get_name()
if name in self.arguments:
raise RuntimeError(f'Argument {name} already used.')
self.arguments[name] = argument
argument.add_argument(self.parser)
def parse_arguments(self, argument_vector: Optional[Sequence[str]] = None) -> None:
parsed = vars(self.parser.parse_args(argument_vector))
parsed_keys = set(parsed.keys())
for name, value in parsed.items():
if name not in self.arguments:
raise RuntimeError(f'Unknown argument {name}.')
if value is not None:
self.arguments[name].set_value(value)
parsed_keys.remove(name)
if len(parsed_keys) != 0:
unknown_arguments_string = ', '.join((f'{key}: {parsed[key]}' for key in parsed_keys))
raise RuntimeError(f'Unknown arguments: {unknown_arguments_string}')
def process_arguments(arguments_list: Iterable[IArguments], argument_vector: Optional[Sequence[str]] = None) -> None:
argument_registry = ArgumentRegistry()
argument_registry.add_arguments(arguments_list)
argument_registry.parse_arguments(argument_vector)
| 32.821656 | 117 | 0.653406 | 592 | 5,153 | 5.52027 | 0.172297 | 0.05049 | 0.031212 | 0.016524 | 0.26224 | 0.212974 | 0.137087 | 0.065483 | 0.065483 | 0.065483 | 0 | 0.001015 | 0.235591 | 5,153 | 156 | 118 | 33.032051 | 0.828637 | 0 | 0 | 0.176471 | 0 | 0 | 0.04518 | 0.005063 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159664 | false | 0 | 0.07563 | 0.02521 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c38706caa52372bd80ca5ff9d1179f9716376ef6 | 4,170 | py | Python | Classifier/data/process_data.py | vigilancetrent/chatbot-advanced | 2e0c72c4df2e1434da995b7105f8f0414aba6248 | [
"Apache-2.0"
] | 52 | 2020-02-06T06:18:52.000Z | 2022-02-23T07:21:46.000Z | Classifier/data/process_data.py | vigilancetrent/chatbot-advanced | 2e0c72c4df2e1434da995b7105f8f0414aba6248 | [
"Apache-2.0"
] | 5 | 2020-11-20T11:26:23.000Z | 2021-12-01T14:22:44.000Z | Classifier/data/process_data.py | vigilancetrent/chatbot-advanced | 2e0c72c4df2e1434da995b7105f8f0414aba6248 | [
"Apache-2.0"
] | 20 | 2020-01-14T05:01:39.000Z | 2021-12-02T23:28:02.000Z | import pickle
import sys
sys.path.append("../../") # nopep8
from Sentence_Encoder.meta_query_encoder import encode
import tensorflow.compat.v1 as tf
import tensorflow_text
import tensorflow_hub as hub
import numpy as np
tf.disable_eager_execution()
sess = tf.InteractiveSession(graph=tf.Graph())
ConvRT_model = hub.Module("../../Sentence_Encoder/Embeddings/ConvRT/")
USE_QA_model = hub.load('../../Sentence_Encoder/Embeddings/USE_QA/')
sess.run(tf.tables_initializer())
sess.run(tf.global_variables_initializer())
train_dir = "train.txt"
dev_dir = "dev.txt"
filename = train_dir
all_targets = []
def extract_data(filename):
contexts = []
queries = []
acts = []
with open(filename) as file:
global all_targets
lines = file.readlines()
for line in lines:
line = line.strip()
split_line = line.split(" : ")
line = split_line[1]
context1 = split_line[0]
if "what" in context1.lower() \
or "why" in context1.lower() \
or "where" in context1.lower() \
or "how" in context1.lower() \
or "who" in context1.lower():
punc = "?"
else:
punc = "."
split_line = line.split(" > ")
context2 = split_line[0].strip()
if context2 == "EMPTY":
context = context1+punc
else:
context = context1+punc+" "+context2
line = split_line[1]
split_line = line.split(" ## ")
current_uttr = split_line[0]
targets = split_line[1]
targets = targets.split(";")
targets = [target for target in targets if target != '']
if len(targets) < 2:
targets.append("NULL")
all_targets += targets
contexts.append(context)
queries.append(current_uttr)
acts.append(targets)
return contexts, queries, acts
train_contexts, train_queries, train_acts = extract_data(train_dir)
test_contexts, test_queries, test_acts = extract_data(dev_dir)
all_targets = list(set(all_targets))
labels2idx = {v: i for i, v in enumerate(all_targets)}
train_queries_vec = []
i = 0
batch_size = 2000
while i < len(train_queries):
print(i)
if i+batch_size > len(train_queries):
batch_size = len(train_queries)-i
train_query_vec = encode(sess, train_queries[i:i+batch_size], train_contexts[i:i +
batch_size], USE_QA_model, ConvRT_model)
train_queries_vec.append(train_query_vec)
i += batch_size
train_queries_vec = np.concatenate(train_queries_vec, axis=0)
test_queries_vec = []
i = 0
while i < len(test_queries):
if i+batch_size > len(test_queries):
batch_size = len(test_queries)-i
test_query_vec = encode(sess, test_queries[i:i+batch_size], test_contexts[i:i +
batch_size], USE_QA_model, ConvRT_model)
test_queries_vec.append(test_query_vec)
i += batch_size
test_queries_vec = np.concatenate(test_queries_vec, axis=0)
print(train_queries_vec.shape)
print(test_queries_vec.shape)
train_acts_vec = []
for acts in train_acts:
train_acts_vec.append([labels2idx[act] for act in acts])
test_acts_vec = []
for acts in test_acts:
test_acts_vec.append([labels2idx[act] for act in acts])
train_acts_vec = np.asarray(train_acts_vec, np.int)
test_acts_vec = np.asarray(test_acts_vec, np.int)
print(train_acts_vec.shape)
print(test_acts_vec.shape)
data = {}
data["labels2idx"] = labels2idx
data["train_contexts"] = train_contexts
data["test_contexts"] = test_contexts
data["train_queries"] = train_queries
data["train_acts"] = train_acts
data["test_queries"] = test_queries
data["test_acts"] = test_acts
data["test_queries_vec"] = test_queries_vec
data["test_acts_vec"] = test_acts_vec
data["train_queries_vec"] = train_queries_vec
data["train_acts_vec"] = train_acts_vec
with open("processed_data.pkl", 'wb') as fp:
pickle.dump(data, fp)
| 28.175676 | 121 | 0.631415 | 544 | 4,170 | 4.558824 | 0.200368 | 0.067742 | 0.032258 | 0.027419 | 0.145161 | 0.062903 | 0.062903 | 0.062903 | 0.062903 | 0.032258 | 0 | 0.010652 | 0.257074 | 4,170 | 147 | 122 | 28.367347 | 0.789864 | 0.001439 | 0 | 0.091743 | 0 | 0 | 0.073522 | 0.019702 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009174 | false | 0 | 0.06422 | 0 | 0.082569 | 0.045872 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3892365eea6eb09110211a34cc5ac5c7a48f371 | 2,616 | py | Python | topojoin/cli.py | SimmonsRitchie/topojoin | 5c5848ae05a249c38babdc449dbd89df1b644b61 | [
"MIT"
] | 1 | 2020-08-04T04:36:05.000Z | 2020-08-04T04:36:05.000Z | topojoin/cli.py | SimmonsRitchie/topojoin | 5c5848ae05a249c38babdc449dbd89df1b644b61 | [
"MIT"
] | null | null | null | topojoin/cli.py | SimmonsRitchie/topojoin | 5c5848ae05a249c38babdc449dbd89df1b644b61 | [
"MIT"
] | null | null | null | """Console script for topojoin."""
import os
import sys
from pathlib import Path
import click
from topojoin.topojoin import TopoJoin
from typing import Union, Dict, Any
@click.command()
@click.argument("topo_path", type=click.Path(exists=True))
@click.argument("csv_path", type=click.Path(exists=True))
@click.option(
"topo_key",
"--topokey",
"-tk",
default="id",
type=click.STRING,
help="Key in CSV file that will be used to join with CSV file",
show_default=True,
)
@click.option(
"csv_key",
"--csvkey",
"-ck",
default="id",
type=click.STRING,
help="Key in CSV file that will be used to join with topojson file",
show_default=True,
)
@click.option(
"csv_props",
"--csv_props",
"-cp",
type=click.STRING,
help="Comma separated list of fields in CSV file to merge to each topojson feature "
"(eg: name,population,net_income). Defaults to including all fields in CSV file.",
)
@click.option(
"output_path",
"--output_path",
"-o",
default=Path(os.getcwd()) / "joined.json",
type=click.Path(resolve_path=True),
help="Output path of joined topojson file. Defaults to current working directory.",
)
@click.option(
"quiet",
"--quiet",
"-q",
is_flag=True,
default=False,
help="Disables stdout during program run",
)
@click.version_option()
def main(
quiet: bool,
csv_path: Union[str, Path],
topo_path: Union[str, Path],
csv_props: str,
output_path: Union[str, Path],
**kwargs,
) -> Dict[str, Any]:
"""
A CLI utility that joins CSV data to a topojson file.
Returns:
Dict[str, Any]: topojson data merged with CSV values.
"""
if quiet:
f = open(os.devnull, "w")
sys.stdout = f
tj = TopoJoin(topo_path, csv_path, **kwargs)
click.echo(
f"Joining {tj.csv_filename} to {tj.topo_filename})..."
)
click.echo(
f"CSV key '{tj.csv_key}' will be joined with topojson key '{tj.topo_key}'"
)
clean_output_path = Path(output_path)
if csv_props:
csv_props = [x.strip() for x in csv_props.split(",")]
if not set(csv_props).issubset(set(tj.all_csv_props)):
click.echo(f"Error: One or more fields in csv_props is not among available CSV properties: "
f"{', '.join(tj.all_csv_props)}. Please enter valid fields.")
exit(code=1)
topo_data = tj.join(clean_output_path, csv_props)
click.echo(f"Joined data saved to: {clean_output_path}")
return topo_data
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 27.536842 | 104 | 0.635321 | 372 | 2,616 | 4.330645 | 0.344086 | 0.054624 | 0.022346 | 0.035382 | 0.182495 | 0.160149 | 0.160149 | 0.079454 | 0.079454 | 0.079454 | 0 | 0.000495 | 0.227829 | 2,616 | 94 | 105 | 27.829787 | 0.79703 | 0.06422 | 0 | 0.170732 | 0 | 0 | 0.339396 | 0.031418 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012195 | false | 0 | 0.073171 | 0 | 0.097561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c389e2c1ae91273e918a68d04e0d2427360ed8c8 | 16,112 | py | Python | empowermentexploration/models/visualization.py | franziskabraendle/alchemy_empowerment | 83655daff99ea5dbada196fdf7ae21781cbb241e | [
"MIT"
] | null | null | null | empowermentexploration/models/visualization.py | franziskabraendle/alchemy_empowerment | 83655daff99ea5dbada196fdf7ae21781cbb241e | [
"MIT"
] | null | null | null | empowermentexploration/models/visualization.py | franziskabraendle/alchemy_empowerment | 83655daff99ea5dbada196fdf7ae21781cbb241e | [
"MIT"
] | null | null | null | import empowermentexploration.utils.data_handle as data_handle
import empowermentexploration.utils.helpers as helpers
import matplotlib as mpl
import matplotlib.colors as c
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.stats import sem
mpl.use('Agg')
class Visualization():
"""Visualization model.
"""
def __init__(self, game_version, time, model_type, temperatures, runs, steps, memory_type, empowerment_calculation=None):
"""Initializes visualization.
Args:
game_version (str):
time (str): Time that will be used as folder name for saving pictures.
model_type (str or list): Model(s) that the plots are for.
temperatures (list): List of temperatures for which simulations are run.
runs (int): Number of simulations.
steps (int): Number of steps for each simulation.
memory_type (int, optional): States whether it should be memorized what combinations have been used before. There are different options
(1) 0 = no memory
(2) 1 = memory
(3) 2 = fading memory (delete random previous combination every 10 steps)
empowerment_calculation (tuple, optional): Tuple made of three entries. Defaults to None.
- dynamic (bool): Whether calculation of empowerment is done dynamically or static.
- local (bool): Whether calculation of empowerment is done locally or globally.
- outgoing_combinations (bool): Whether calculation of empowerment is done on outgoing combinations
or length of set of resulting elements.
"""
# set attributes
self.game_version = game_version
self.time = time
self.model_type = model_type
self.temperatures = temperatures
self.runs = runs
self.steps = steps
self.memory_type = memory_type
self.empowerment_calculation = empowerment_calculation
# set general settings for plotting
# TODO: change font to Open Sans
sns.set_theme(context='paper', style='ticks', font='Arial', font_scale=2, rc={'lines.linewidth': 2, 'grid.linewidth':0.6, 'grid.color': '#9d9d9d',
'axes.linewidth':0.6, 'axes.edgecolor': '#9d9d9d'})
# set color
color = '#828282'
if self.model_type == 'base':
color = '#828282'
elif self.model_type == 'emp' or self.model_type == 'trueemp':
color = '#0c2e8a'
#color = '#ffc640' '#0c2e8a'
elif self.model_type == 'bin' or self.model_type == 'truebin':
color = '#ffc640'
#color = '#ff796c' '#ffc640'
elif self.model_type == 'cbu':
color = '#ff796c'
#color = '#0c2e8a' '#ff796c'
elif self.model_type == 'cbv':
color = '#6ab8d9'
elif self.model_type == 'sim':
color = '#bf3409'
color = c.to_rgba(color)
self.colors = [color]
if temperatures is not None and len(temperatures) > 1:
j = 1
for _ in range(len(self.temperatures)-1):
j += 0.2
self.colors.append(helpers.adjust_lightness(color,j))
sns.set_palette(sns.color_palette(self.colors))
def plot_gameprogress(self, inventory):
"""Plots game progress and saves file as PNG.
Args:
inventory (Inventory): Inventory info.
"""
# print info
print('\nPlot game progress.')
# plot data for each temperature
for t, temperature in enumerate(self.temperatures):
# get average inventory over time, means and stds
steps = range(0,self.steps+1)
inventory_over_time = np.squeeze(inventory.inventory_size_over_time[t,:,:])
inventory_over_time_mean = np.mean(inventory_over_time, axis=0)
inventory_over_time_sem = sem(inventory_over_time, axis=0)
# plot line
if self.model_type == 'base':
plt.plot(inventory_over_time_mean)
else:
plt.plot(inventory_over_time_mean, label='T={}'.format(temperature))
# plot std
plt.fill_between(steps, inventory_over_time_mean - inventory_over_time_sem,
inventory_over_time_mean + inventory_over_time_sem, alpha=0.1)
# set titles, labels, legends
plt.xlabel('Trial')
plt.ylabel('Inventory size')
plt.xlim(left=0, right=self.steps)
plt.ylim(bottom=0)
if self.model_type != 'base':
plt.legend(loc=0, frameon=False)
model = helpers.translate_model(self.model_type)
#plt.title('Game progress averaged over {} runs, model={}'.format(self.runs, model), loc='center', wrap=True)
plt.tight_layout()
if self.model_type in ['emp', 'trueemp', 'bin', 'truebin']:
plt.savefig('empowermentexploration/data/models/{}/{}-{}-{}-memory{}-averageGameProgress.svg'.format(self.time, self.game_version, self.model_type, self.empowerment_calculation, self.memory_type))
plt.savefig('empowermentexploration/data/models/{}/{}-{}-{}-memory{}-averageGameProgress.png'.format(self.time, self.game_version, self.model_type, self.empowerment_calculation, self.memory_type))
else:
plt.savefig('empowermentexploration/data/models/{}/{}-{}-memory{}-averageGameProgress.svg'.format(self.time, self.game_version, self.model_type, self.memory_type))
plt.savefig('empowermentexploration/data/models/{}/{}-{}-memory{}-averageGameProgress.png'.format(self.time, self.game_version, self.model_type, self.memory_type))
plt.close()
def plot_inventory_sizes(self, inventory, temperature_idx):
"""Plots density of inventory sizes.
Args:
inventory: (Inventory): Inventory info.
temperature_idx (int): Index of given temperature.
"""
# print info
print('\nPlot inventory sizes.')
# get inventory sizes
inventory_sizes = np.squeeze(inventory.inventory_size_over_time[temperature_idx,:,-1])
# plot data as histogram (density)
ax = sns.histplot(data=inventory_sizes, kde=True)
# plot mean
if len(ax.lines) != 0:
kdeline = ax.lines[0]
mean = inventory_sizes.mean()
height = np.interp(mean, kdeline.get_xdata(), kdeline.get_ydata())
ax.vlines(mean, 0, height, ls='dashed', color='#444444', linewidth=1)
# set titles, labels
plt.xlabel('Inventory size')
plt.ylabel('Count')
model = helpers.translate_model(self.model_type)
if self.model_type == 'base':
#plt.title('Density of inventory sizes at {} runs, model={}'.format(self.runs, model), loc='center', wrap=True)
plt.tight_layout()
plt.savefig('empowermentexploration/data/models/{}/{}-memory{}-inventorySizes.svg'.format(self.time, self.model_type, self.memory_type))
plt.savefig('empowermentexploration/data/models/{}/{}-memory{}-inventorySizes.png'.format(self.time, self.model_type, self.memory_type))
elif self.model_type in ['emp', 'trueemp', 'bin', 'truebin']:
#plt.title('Density of inventory sizes at {} runs, model={}, T={}'.format(self.runs, model, self.temperatures[temperature_idx]), loc='center', wrap=True)
plt.tight_layout()
plt.savefig('empowermentexploration/data/models/{}/{}-{}-memory{}-temperature{}-inventorySizes.svg'.format(self.time, self.model_type, self.empowerment_calculation, self.memory_type, self.temperatures[temperature_idx]))
plt.savefig('empowermentexploration/data/models/{}/{}-{}-memory{}-temperature{}-inventorySizes.png'.format(self.time, self.model_type, self.empowerment_calculation, self.memory_type, self.temperatures[temperature_idx]))
else:
#plt.title('Density of inventory sizes at {} runs, model={}, T={}'.format(self.runs, model, self.temperatures[temperature_idx]), loc='center', wrap=True)
plt.tight_layout()
plt.savefig('empowermentexploration/data/models/{}/{}-memory{}-temperature{}-inventorySizes.svg'.format(self.time, self.model_type, self.memory_type, self.temperatures[temperature_idx]))
plt.savefig('empowermentexploration/data/models/{}/{}-memory{}-temperature{}-inventorySizes.png'.format(self.time, self.model_type, self.memory_type, self.temperatures[temperature_idx]))
plt.close()
def plot_all(self, value_calculation, human=True):
"""Plots gameprogress curves for all run models and humans for comparison.
Args:
value_calculation (tuple): Tuple containing first calculation info on model trueemp, then emp, then truebin and bin.
human (boolean, optional): True if plot for human should be added as well, False if not. Defaults to True.
"""
# print info
print('\nPlot game progress.')
# set figure size
plt.figure(figsize=(6.2,5))
# initialize variable storing maximum
max = -1
# read in model data
for model_type in self.model_type:
# plot human line eralier in case of special model arrangement
if human is True and np.all(self.model_type == ['base','cbu', 'truebin', 'trueemp']) and model_type == 'truebin':
# set color
colors = ['#82cafc', '#d9effe']
# read in human data
memory = False
if self.memory_type == 1:
memory = True
data = data_handle.get_player_data('alchemy2', memory=memory)
grouped_data = data.groupby('id')
# get player subset
trial_sizes = grouped_data['trial'].max()
trial_sizes = trial_sizes.loc[trial_sizes > self.steps-2]
idx = trial_sizes.index
player_subset = data.query('id in @idx & trial < @self.steps')
# get human average inventory over time
inventory_over_time_mean = player_subset.groupby('trial')['inventory'].mean()
inventory_over_time_sem = player_subset.groupby('trial')['inventory'].sem()
# plot line
plt.plot(inventory_over_time_mean, label='human', color=colors[0])
# plot std
plt.fill_between(range(0,self.steps), inventory_over_time_mean - inventory_over_time_sem,
inventory_over_time_mean + inventory_over_time_sem, alpha=0.1, color=colors[1])
# set color
if model_type == 'base':
colors = ['#828282', '#a9a9a9']
elif model_type == 'emp' or model_type == 'trueemp':
colors = ['#0c2e8a', '#ae8782']
#colors = ['#ffc640', '#ffdd83'] ['#0c2e8a', '#ae8782']
elif model_type == 'bin' or model_type == 'truebin':
colors = ['#ffc640', '#ffdd83']
#colors = ['#ff796c', '#ffc1ba'] ['#ffc640', '#ffdd83']
elif model_type == 'cbu':
colors = ['#ff796c', '#ffc1ba']
#colors = ['#0c2e8a', '#ae8782'] ['#ff796c', '#ffc1ba']
elif model_type == 'cbv':
colors = ['#6ab8d9', '#a9d6e9']
elif model_type == 'sim':
colors = ['#bf3409', '#f55422']
# set empowerment_calculation
if model_type == 'trueemp':
empowerment_calculation = value_calculation[0]
elif model_type == 'emp':
empowerment_calculation = value_calculation[1]
elif model_type in ['truebin']:
empowerment_calculation = value_calculation[2]
elif model_type in ['bin']:
empowerment_calculation = value_calculation[3]
else:
empowerment_calculation = ['placeholder']
for e_c in empowerment_calculation:
if model_type in ['trueemp', 'emp', 'bin', 'truebin']:
data = data_handle.get_gameprogress_data(self.time, self.game_version, '{}-{}'.format(model_type, e_c), self.memory_type)
else:
data = data_handle.get_gameprogress_data(self.time, self.game_version, model_type, self.memory_type)
# get temperature value resulting in highest average inventory size
t_max = -1
t = 0
if model_type != 'base':
for temperature in range(len(self.temperatures)):
inventory_over_time = np.squeeze(data['out'][temperature,:,self.steps])
if np.mean(inventory_over_time, axis=0) > t_max:
t_max = np.mean(inventory_over_time, axis=0)
t = temperature
# plot gameprogress curve
inventory_over_time = np.squeeze(data['out'][t,:,:self.steps])
inventory_over_time_mean = np.mean(inventory_over_time, axis=0)
inventory_over_time_sem = sem(inventory_over_time, axis=0)
# check if maximum
if np.max(inventory_over_time_mean) > max:
max = np.max(inventory_over_time_mean)
# plot line
plt.plot(inventory_over_time_mean, label=helpers.translate_model(model_type), color=colors[0])
# plot std
plt.fill_between(range(0,self.steps), inventory_over_time_mean - inventory_over_time_sem,
inventory_over_time_mean + inventory_over_time_sem, alpha=0.1, color=colors[1])
if human is True and not np.all(self.model_type == ['base','cbu', 'truebin', 'trueemp']):
# set color
colors = ['#82cafc', '#cfeafe']
# read in human data
memory = False
if self.memory_type == 1:
memory = True
data = data_handle.get_player_data('alchemy2', memory=memory)
grouped_data = data.groupby('id')
# get player subset
trial_sizes = grouped_data['trial'].max()
trial_sizes = trial_sizes.loc[trial_sizes > self.steps-2]
idx = trial_sizes.index
player_subset = data.query('id in @idx & trial < @self.steps')
# get human average inventory over time
inventory_over_time_mean = player_subset.groupby('trial')['inventory'].mean()
inventory_over_time_sem = player_subset.groupby('trial')['inventory'].sem()
# plot line
plt.plot(inventory_over_time_mean, label='human', color=colors[0])
# plot std
plt.fill_between(range(0,self.steps), inventory_over_time_mean - inventory_over_time_sem,
inventory_over_time_mean + inventory_over_time_sem, alpha=0.1, color=colors[1])
# set titles, labels, legends
plt.xlabel('Trial')
plt.ylabel('Inventory size')
plt.xlim(left=0, right=self.steps)
plt.ylim(bottom=0, top=max+max/4)
#plt.yticks(np.arange(0, max+5, 20))
plt.legend(loc='lower left', bbox_to_anchor=(0, 0.66, 1.02, 0.2), frameon=False, mode='expand', ncol=2)
# plt.title('Game progress comparison, averaged over {} runs, memory={}'.format(self.runs, self.memory_type), loc='center', wrap=True)
plt.tight_layout()
plt.savefig('empowermentexploration/data/models/{}/{}-averageGameProgressAll.pdf'.format(self.time, self.game_version))
plt.savefig('empowermentexploration/data/models/{}/{}-averageGameProgressAll.png'.format(self.time, self.game_version))
plt.savefig('empowermentexploration/data/models/{}/{}-averageGameProgressAll.svg'.format(self.time, self.game_version))
plt.close()
| 51.312102 | 231 | 0.602781 | 1,846 | 16,112 | 5.096425 | 0.165764 | 0.048788 | 0.0777 | 0.042411 | 0.556122 | 0.524235 | 0.499469 | 0.455251 | 0.449086 | 0.425489 | 0 | 0.018952 | 0.279543 | 16,112 | 313 | 232 | 51.476038 | 0.791523 | 0.216174 | 0 | 0.335135 | 0 | 0 | 0.145376 | 0.079717 | 0 | 0 | 0 | 0.003195 | 0 | 1 | 0.021622 | false | 0 | 0.043243 | 0 | 0.07027 | 0.016216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c38ac124aa00145b8a036028e35c373f39315aa7 | 5,917 | py | Python | pyzoo/zoo/chronos/autots/model/auto_prophet.py | cabuliwallah/analytics-zoo | 5e662bd01c5fc7eed412973119594cf2ecea8b11 | [
"Apache-2.0"
] | 1 | 2021-06-16T11:42:32.000Z | 2021-06-16T11:42:32.000Z | pyzoo/zoo/chronos/autots/model/auto_prophet.py | cabuliwallah/analytics-zoo | 5e662bd01c5fc7eed412973119594cf2ecea8b11 | [
"Apache-2.0"
] | null | null | null | pyzoo/zoo/chronos/autots/model/auto_prophet.py | cabuliwallah/analytics-zoo | 5e662bd01c5fc7eed412973119594cf2ecea8b11 | [
"Apache-2.0"
] | null | null | null | # +
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.orca.automl.auto_estimator import AutoEstimator
from zoo.chronos.model.prophet import ProphetBuilder
# -
class AutoProphet:
def __init__(self,
changepoint_prior_scale=0.05,
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
seasonality_mode='additive',
changepoint_range=0.8,
metric='mse',
logs_dir="/tmp/auto_prophet_logs",
cpus_per_trial=1,
name="auto_prophet",
**prophet_config
):
"""
Create an automated Prophet Model.
User need to specify either the exact value or the search space of the
Prophet model hyperparameters. For details of the Prophet model hyperparameters, refer to
https://facebook.github.io/prophet/docs/diagnostics.html#hyperparameter-tuning.
:param changepoint_prior_scale: Int or hp sampling function from an integer space
for hyperparameter changepoint_prior_scale for the Prophet model.
For hp sampling, see zoo.chronos.orca.automl.hp for more details.
e.g. hp.loguniform(0.001, 0.5).
:param seasonality_prior_scale: hyperparameter seasonality_prior_scale for the
Prophet model.
e.g. hp.loguniform(0.01, 10).
:param holidays_prior_scale: hyperparameter holidays_prior_scale for the
Prophet model.
e.g. hp.loguniform(0.01, 10).
:param seasonality_mode: hyperparameter seasonality_mode for the
Prophet model.
e.g. hp.choice(['additive', 'multiplicative']).
:param changepoint_range: hyperparameter changepoint_range for the
Prophet model.
e.g. hp.uniform(0.8, 0.95).
:param metric: String. The evaluation metric name to optimize. e.g. "mse"
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_prophet_logs"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
:param name: name of the AutoProphet. It defaults to "auto_prophet"
:param prophet_config: Other Prophet hyperparameters.
"""
self.search_space = {
"changepoint_prior_scale": changepoint_prior_scale,
"seasonality_prior_scale": seasonality_prior_scale,
"holidays_prior_scale": holidays_prior_scale,
"seasonality_mode": 'additive',
"changepoint_range": changepoint_range,
}
self.metric = metric
model_builder = ProphetBuilder()
self.auto_est = AutoEstimator(model_builder=model_builder,
logs_dir=logs_dir,
resources_per_trial={"cpu": cpus_per_trial},
name=name)
def fit(self,
data,
epochs=1,
validation_data=None,
metric_threshold=None,
n_sampling=1,
search_alg=None,
search_alg_params=None,
scheduler=None,
scheduler_params=None,
):
"""
Automatically fit the model and search for the best hyperparameters.
:param data: Training data, A 1-D numpy array.
:param epochs: Max number of epochs to train in each trial. Defaults to 1.
If you have also set metric_threshold, a trial will stop if either it has been
optimized to the metric_threshold or it has been trained for {epochs} epochs.
:param validation_data: Validation data. A 1-D numpy array.
:param metric_threshold: a trial will be terminated when metric threshold is met
:param n_sampling: Number of times to sample from the search_space. Defaults to 1.
If hp.grid_search is in search_space, the grid will be repeated n_sampling of times.
If this is -1, (virtually) infinite samples are generated
until a stopping condition is met.
:param search_alg: str, all supported searcher provided by ray tune
(i.e."variant_generator", "random", "ax", "dragonfly", "skopt",
"hyperopt", "bayesopt", "bohb", "nevergrad", "optuna", "zoopt" and
"sigopt")
:param search_alg_params: extra parameters for searcher algorithm besides search_space,
metric and searcher mode
:param scheduler: str, all supported scheduler provided by ray tune
:param scheduler_params: parameters for scheduler
"""
self.auto_est.fit(data=data,
validation_data=validation_data,
metric=self.metric,
metric_threshold=metric_threshold,
n_sampling=n_sampling,
search_space=self.search_space,
search_alg=search_alg,
search_alg_params=search_alg_params,
scheduler=scheduler,
scheduler_params=scheduler_params
)
def get_best_model(self):
"""
Get the best Prophet model.
"""
return self.auto_est.get_best_model()
| 44.825758 | 99 | 0.616022 | 699 | 5,917 | 5.052933 | 0.32618 | 0.042469 | 0.029728 | 0.025481 | 0.14581 | 0.062288 | 0.05436 | 0.029445 | 0.029445 | 0.029445 | 0 | 0.012105 | 0.31587 | 5,917 | 131 | 100 | 45.167939 | 0.860425 | 0.552814 | 0 | 0.038462 | 0 | 0 | 0.069977 | 0.0307 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.038462 | 0 | 0.134615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c38be7eb99340230dc41782be5bfea2761dd7514 | 4,882 | py | Python | process-data.py | RE-Lab-Projects/TRY_DE_2015_2045 | 419c665fd0f693accd5fc72ab352f0b53b1d1008 | [
"MIT"
] | 1 | 2021-07-15T12:06:03.000Z | 2021-07-15T12:06:03.000Z | process-data.py | RE-Lab-Projects/TRY_DE_2015_2045 | 419c665fd0f693accd5fc72ab352f0b53b1d1008 | [
"MIT"
] | null | null | null | process-data.py | RE-Lab-Projects/TRY_DE_2015_2045 | 419c665fd0f693accd5fc72ab352f0b53b1d1008 | [
"MIT"
] | null | null | null | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
import glob
import os
# %%
# Read and format raw data and save to csv
# Pattern for the dataset names: TRY_[1-15]_[a,w,s]_[2015,2045] with
# 1-15 -> test reference region number
# a,w,s -> average, extreme winter, extreme summer
root=os.getcwd()
year=['2015','2045']
station = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15']
mode = ['Jahr','Somm','Wint']
for y in year:
for s in station:
for m in mode:
fn1=glob.glob(root+'/1_raw-data/'+s+'/*'+y+'*'+m+'.dat')
fn2=glob.glob(root+'/2_synthetic-radiation/'+s+'/*'+y+'*'+m+'*'+'.dat')
for f1, f2 in zip(fn1, fn2):
# change notation
if m=='Jahr':
m='a'
elif m=='Somm':
m='s'
else:
m='w'
# skiprows 34 / 36 if year ist 2015 / 2045
if y=='2015':
sr=34
y1='2014'
y2='2015'
else:
sr=36
y1='2044'
y2='2045'
# load DWD data file
vars()['TRY'+'_'+s+'_'+m+'_'+y] = pd.read_csv(f1, sep='\s+', skiprows=sr, header=None, usecols=[5,6,7,8,9,11,12,13])
# rename columns
vars()['TRY'+'_'+s+'_'+m+'_'+y].rename(columns={5:'temperature [degC]',
6:'pressure [hPa]',
7:'wind direction [deg]',
8:'wind speed [m/s]',
9:'cloud coverage [1/8]',
11:'humidity [%]',
12:'direct irradiance [W/m^2]',
13:'diffuse irradiance [W/m^2]'}, inplace=True)
# load synthetic data file
data_synth = pd.read_csv(f2, sep=',', skiprows=1, header=None)
# rename columns
data_synth.rename(columns={0:'synthetic global irradiance [W/m^2]',
1:'synthetic diffuse irradiance [W/m^2]',
2:'clear sky irradiance [W/m^2]'},
inplace=True)
# generate datetime column
vars()['TRY'+'_'+s+'_'+m+'_'+y]['datetime'] = pd.date_range(start=y+'-01-01 00:00:00', end=y+'-12-31 23:00:00', freq='h', tz='CET')
# set datetime as index
vars()['TRY'+'_'+s+'_'+m+'_'+y].set_index(keys='datetime',inplace=True)
# save to csv
vars()['TRY'+'_'+s+'_'+m+'_'+y].to_csv(root+'/3_processed-data/TRY'+'_'+s+'_'+m+'_'+y+'_60min.csv')
# upsampling to 1min temporal resolution and linear interpolation
vars()['TRY'+'_'+s+'_'+m+'_'+y].index = pd.to_datetime(vars()['TRY'+'_'+s+'_'+m+'_'+y].index, utc=True)
vars()['TRY'+'_'+s+'_'+m+'_'+y].index = vars()['TRY'+'_'+s+'_'+m+'_'+y].index.shift(1800, freq='s')
vars()['TRY'+'_'+s+'_'+m+'_'+y].loc[pd.to_datetime(y1+'-12-31 22:30:00+00:00')] = vars()['TRY'+'_'+s+'_'+m+'_'+y].iloc[0].values
vars()['TRY'+'_'+s+'_'+m+'_'+y] = vars()['TRY'+'_'+s+'_'+m+'_'+y].sort_index()
vars()['TRY'+'_'+s+'_'+m+'_'+y].loc[pd.to_datetime(y2+'-12-31 23:30:00+00:00')] = vars()['TRY'+'_'+s+'_'+m+'_'+y].iloc[8760].values
data = vars()['TRY'+'_'+s+'_'+m+'_'+y].tz_convert('CET')
vars()['TRY'+'_'+s+'_'+m+'_'+y+'_1min'] = data.resample('1min').interpolate().round(1)
vars()['TRY'+'_'+s+'_'+m+'_'+y+'_1min'] = vars()['TRY'+'_'+s+'_'+m+'_'+y+'_1min'].iloc[30:-31]
# add columns with synthetic radiation from Hofmann et. al. model
data_synth['datetime'] = vars()['TRY'+'_'+s+'_'+m+'_'+y+'_1min'].index
data_synth.set_index(keys='datetime',inplace=True)
vars()['TRY'+'_'+s+'_'+m+'_'+y+'_1min'] = pd.concat([vars()['TRY'+'_'+s+'_'+m+'_'+y+'_1min'], data_synth], axis=1)
# save to csv
vars()['TRY'+'_'+s+'_'+m+'_'+y+'_1min'].to_csv(root+'/3_processed-data/TRY'+'_'+s+'_'+m+'_'+y+'_1min.csv')
# downsampling to 15min temporal resolution
vars()['TRY'+'_'+s+'_'+m+'_'+y+'_15min'] = vars()['TRY'+'_'+s+'_'+m+'_'+y+'_1min'].resample('15T').mean().round(1)
# save to csv
vars()['TRY'+'_'+s+'_'+m+'_'+y+'_15min'].to_csv(root+'/3_processed-data/TRY'+'_'+s+'_'+m+'_'+y+'_15min.csv')
# delete from workspace
del vars()['TRY'+'_'+s+'_'+m+'_'+y], vars()['TRY'+'_'+s+'_'+m+'_'+y+'_1min'], vars()['TRY'+'_'+s+'_'+m+'_'+y+'_15min'], data, data_synth
# %%
| 55.477273 | 152 | 0.447358 | 620 | 4,882 | 3.3 | 0.277419 | 0.062561 | 0.078201 | 0.093842 | 0.35435 | 0.312317 | 0.173021 | 0.155425 | 0.146139 | 0.102151 | 0 | 0.065852 | 0.312577 | 4,882 | 87 | 153 | 56.114943 | 0.543802 | 0.141131 | 0 | 0.033898 | 0 | 0 | 0.199952 | 0.020643 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.050847 | 0 | 0.050847 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c38cd67552cc0c16f9183da6e9feae5e2b7e61f8 | 1,100 | py | Python | twelve-days/twelve_days.py | Isaac-Tolu/exercism-python | 17c26b446e1f79a24daf6736dcf9982c16d06c50 | [
"MIT"
] | null | null | null | twelve-days/twelve_days.py | Isaac-Tolu/exercism-python | 17c26b446e1f79a24daf6736dcf9982c16d06c50 | [
"MIT"
] | null | null | null | twelve-days/twelve_days.py | Isaac-Tolu/exercism-python | 17c26b446e1f79a24daf6736dcf9982c16d06c50 | [
"MIT"
] | null | null | null | structure = [
('first', 'a Partridge in a Pear Tree.'),
('second', 'two Turtle Doves, '),
('third', 'three French Hens, '),
('fourth', 'four Calling Birds, '),
('fifth', 'five Gold Rings, '),
('sixth', 'six Geese-a-Laying, '),
('seventh', 'seven Swans-a-Swimming, '),
('eighth', 'eight Maids-a-Milking, '),
('ninth', 'nine Ladies Dancing, '),
('tenth', 'ten Lords-a-Leaping, '),
('eleventh', 'eleven Pipers Piping, '),
('twelfth', 'twelve Drummers Drumming, '),
]
def recite(start_verse, end_verse):
song_list = []
for verse in range(start_verse, end_verse + 1):
each_verse = first_part(verse) + second_part(verse)
song_list.append(each_verse)
return song_list
def first_part(verse):
verse_segment = f'On the {structure[verse-1][0]} day of Christmas ' \
f'my true love gave to me: '
return verse_segment
def second_part(verse):
verse_segment = ''
for i in range(verse, 0, -1):
verse_segment += structure[i-1][1]
if i == 2: verse_segment += 'and '
return verse_segment | 28.205128 | 73 | 0.597273 | 143 | 1,100 | 4.461538 | 0.58042 | 0.112853 | 0.040752 | 0.056426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009524 | 0.236364 | 1,100 | 39 | 74 | 28.205128 | 0.75 | 0 | 0 | 0.066667 | 0 | 0 | 0.367847 | 0.02089 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c38e421262f215fdac54068119047281c12a3966 | 1,423 | py | Python | get_max_audio.py | ykvch/QABox | 41e00a9d75963773b52b002e41c8a4d77497fdbd | [
"BSD-2-Clause"
] | null | null | null | get_max_audio.py | ykvch/QABox | 41e00a9d75963773b52b002e41c8a4d77497fdbd | [
"BSD-2-Clause"
] | null | null | null | get_max_audio.py | ykvch/QABox | 41e00a9d75963773b52b002e41c8a4d77497fdbd | [
"BSD-2-Clause"
] | null | null | null | """
Test obtaining peak audio values with PyAV:
time python pyav_audio_vol.py ~/Videos/sample.mp4
Reference:
https://ffmpeg.org/doxygen/trunk/group__lavu__sampfmts.html
"""
import sys
import av
from numpy import abs, sqrt, vdot, fromiter, float64, uint8, average, frombuffer, mean
import matplotlib.pyplot as plt
video_file = sys.argv[1]
container = av.open(video_file)
audio_stream = container.streams.audio[0]
# audio_stream.thread_type = "AUTO"
video_stream = container.streams.video[0]
video_stream.thread_type = "AUTO"
def rms(x): # https://stackoverflow.com/a/28398092 o_O noice!
return sqrt((vdot(x, x)>0.1)/x.size)
# np.diff, np.gradient
# np.where(np.convolve(all_frames_array, [1,1,-1,-1])>.7)
# audio_max = fromiter(((frame.pts, rms(frame.to_ndarray())) for frame in
# audio_max = fromiter((rms(frame.to_ndarray()) for frame in
# container.decode(video_stream, audio_stream)), float64)
# zero for luma
# video_max = fromiter((average(frame.to_ndarray()[0].size) for frame in
video_max = fromiter((mean(frombuffer(frame.planes[0], uint8).reshape(1920, 1080)[1200:,:500]) for frame in
container.decode(video_stream)), uint8)
# videos = [f for f in container.decode(video=0)]
# import pdb; pdb.set_trace()
container.close()
print(video_max.max())
# print(audio_max.max())
# with open("out.bin", "bw") as out_f:
# save(out_f,audio_max)
# plt.plot(video_max)
# plt.show()
| 30.276596 | 107 | 0.718904 | 224 | 1,423 | 4.415179 | 0.455357 | 0.044489 | 0.040445 | 0.066734 | 0.11729 | 0.11729 | 0.11729 | 0 | 0 | 0 | 0 | 0.035455 | 0.127899 | 1,423 | 46 | 108 | 30.934783 | 0.761483 | 0.56149 | 0 | 0 | 0 | 0 | 0.006656 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.266667 | 0.066667 | 0.4 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c390d3f41e8a75d80d2f51aece360b98c9e31e37 | 1,164 | py | Python | ml_app/src/api/serve.py | marcostx/mlops-playground | 7c170e7a94a5a27f67884f63574ce79efd9ffdd6 | [
"MIT"
] | null | null | null | ml_app/src/api/serve.py | marcostx/mlops-playground | 7c170e7a94a5a27f67884f63574ce79efd9ffdd6 | [
"MIT"
] | null | null | null | ml_app/src/api/serve.py | marcostx/mlops-playground | 7c170e7a94a5a27f67884f63574ce79efd9ffdd6 | [
"MIT"
] | null | null | null | from flask import Blueprint, request, jsonify
from flasgger import Swagger, LazyJSONEncoder
from flasgger.utils import swag_from
from src.model.test import make_prediction
from src.api.config import get_logger
_logger = get_logger(logger_name=__name__)
prediction_app = Blueprint('prediction_app', __name__)
@prediction_app.route('/health', methods=['GET'])
def health():
"""
Health check endpoint
---
description: Health check endpoint
responses:
200:
description: the system is ok
"""
if request.method == 'GET':
_logger.info('health status OK')
return 'ok'
@prediction_app.route('/predict/regression', methods=['POST'])
@swag_from("/app/ml_app/src/api/swagger_conf.yml")
def predict():
if request.method == 'POST':
json_data = request.get_json()
_logger.info(f'Inputs: {json_data}')
result = make_prediction(input_data=json_data)
_logger.info(f'Outputs: {result}')
predictions = result.get('predictions')[0]
version = result.get('version')
return jsonify({'predictions': predictions,
'version': version})
| 28.390244 | 62 | 0.665808 | 137 | 1,164 | 5.437956 | 0.408759 | 0.069799 | 0.040268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004357 | 0.21134 | 1,164 | 40 | 63 | 29.1 | 0.80719 | 0.101375 | 0 | 0 | 0 | 0 | 0.178218 | 0.035644 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.208333 | 0 | 0.375 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c390e18727737c534ccf5f5efd296570308b2ff9 | 921 | py | Python | visualization/colorbar.py | CAMI-DKFZ/simpa_paper_experiments | f5a37d57692b29b78b85d60a38e4dc0aaa5aadfc | [
"MIT"
] | null | null | null | visualization/colorbar.py | CAMI-DKFZ/simpa_paper_experiments | f5a37d57692b29b78b85d60a38e4dc0aaa5aadfc | [
"MIT"
] | null | null | null | visualization/colorbar.py | CAMI-DKFZ/simpa_paper_experiments | f5a37d57692b29b78b85d60a38e4dc0aaa5aadfc | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
# SPDX-FileCopyrightText: 2021 Janek Groehl
# SPDX-License-Identifier: MIT
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def col_bar(image, fontsize=None, fontname=None, ticks=None):
ax = plt.gca()
# ax.axes.xaxis.set_visible(False)
ax.axes.xaxis.set_ticks([])
ax.axes.yaxis.set_ticks([])
# ax.axes.yaxis.set_visible(False)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
# plt.clim(0, 1)
if fontsize is None or fontname is None:
plt.colorbar(image, cax=cax, orientation="vertical")
else:
cbar = plt.colorbar(image, cax=cax, orientation="vertical", ticks=ticks)
for l in cbar.ax.yaxis.get_ticklabels():
l.set_family(fontname)
cbar.ax.tick_params(labelsize=fontsize)
| 35.423077 | 82 | 0.703583 | 130 | 921 | 4.869231 | 0.530769 | 0.037915 | 0.078989 | 0.044234 | 0.194313 | 0.194313 | 0.129542 | 0 | 0 | 0 | 0 | 0.019763 | 0.175896 | 921 | 25 | 83 | 36.84 | 0.814229 | 0.2519 | 0 | 0 | 0 | 0 | 0.033724 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c39142fb863e925990bc60f0387ec99b2c445a57 | 2,365 | py | Python | PEARSON_CORRELATION/global.py | madgik/mip-algorithms | 58d931adccaba510432568a0cf8fa30a8f5443a0 | [
"MIT"
] | 2 | 2018-11-22T10:32:16.000Z | 2019-02-06T14:26:38.000Z | PEARSON_CORRELATION/global.py | madgik/mip-algorithms | 58d931adccaba510432568a0cf8fa30a8f5443a0 | [
"MIT"
] | null | null | null | PEARSON_CORRELATION/global.py | madgik/mip-algorithms | 58d931adccaba510432568a0cf8fa30a8f5443a0 | [
"MIT"
] | 4 | 2016-02-17T07:37:55.000Z | 2019-02-07T14:03:51.000Z | import sys
import math
import json
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))) + '/utils/')
import numpy as np
import scipy.special as special
from algorithm_utils import get_parameters, set_algorithms_output_data
from pearsonc_lib import PearsonCorrelationLocalDT
def pearsonc_global(global_in):
nn, sx, sy, sxx, sxy, syy, schema_X, schema_Y = global_in.get_data()
n_cols = len(nn)
schema_out = [None] * n_cols
result_list = []
for i in xrange(n_cols):
schema_out[i] = schema_X[i] + '_' + schema_Y[i]
# compute pearson correlation coefficient and p-value
if nn[i] == 0:
r = None
prob = None
else:
d = (math.sqrt(nn[i] * sxx[i] - sx[i] * sx[i]) * math.sqrt(nn[i] * syy[i] - sy[i] * sy[i]))
if d == 0:
r = 0
else:
r = float((nn[i] * sxy[i] - sx[i] * sy[i]) / d)
r = max(min(r, 1.0), -1.0) # if abs(r) > 1 correct: artifact of floating point arithmetic.
df = nn[i] - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r ** 2 * (df / ((1.0 - r) * (1.0 + r)))
prob = special.betainc(
0.5 * df, 0.5, np.fmin(np.asarray(df / (df + t_squared)), 1.0)
)
result_list.append({
'Variable pair' : schema_out[i],
'Pearson correlation coefficient': r,
'p-value' : prob if prob >= 2.2e-16 else 0.0
})
global_out = json.dumps({'result': result_list})
return global_out
def main():
# read parameters
parameters = get_parameters(sys.argv[1:])
if not parameters or len(parameters) < 1:
raise ValueError("There should be 1 parameter")
# get data from local db
localdbs = parameters.get("-local_step_dbs")
if localdbs == None:
raise ValueError("local_step_dbs not provided as parameter.")
local_out = PearsonCorrelationLocalDT.load(localdbs)
# run algorithm global step
global_out = pearsonc_global(global_in=local_out)
# return the algorithm's output
set_algorithms_output_data(global_out)
if __name__ == '__main__':
main()
| 34.779412 | 104 | 0.550951 | 316 | 2,365 | 3.952532 | 0.348101 | 0.009608 | 0.009608 | 0.036829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020873 | 0.331501 | 2,365 | 67 | 105 | 35.298507 | 0.769133 | 0.087949 | 0 | 0.056604 | 0 | 0 | 0.074892 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.150943 | 0 | 0.207547 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3921dc91324fc061413450e3a3c0772670a6ac6 | 5,245 | py | Python | varats-core/varats/table/tables.py | J03D03/VaRA-Tool-Suite | f11a8b1b66e2adf37f591bb058073753e614e5ad | [
"BSD-2-Clause"
] | null | null | null | varats-core/varats/table/tables.py | J03D03/VaRA-Tool-Suite | f11a8b1b66e2adf37f591bb058073753e614e5ad | [
"BSD-2-Clause"
] | null | null | null | varats-core/varats/table/tables.py | J03D03/VaRA-Tool-Suite | f11a8b1b66e2adf37f591bb058073753e614e5ad | [
"BSD-2-Clause"
] | null | null | null | """General tables module."""
import logging
import re
import typing as tp
from pathlib import Path
from varats.mapping.commit_map import create_lazy_commit_map_loader
from varats.plot.plot_utils import check_required_args
from varats.utils.settings import vara_cfg
if tp.TYPE_CHECKING:
import varats.table.table as table # pylint: disable=unused-import
LOG = logging.getLogger(__name__)
class TableRegistry(type):
"""Registry for all supported tables."""
TO_SNAKE_CASE_PATTERN = re.compile(r'(?<!^)(?=[A-Z])')
tables: tp.Dict[str, tp.Type[tp.Any]] = {}
tables_discovered = False
def __init__(
cls, name: str, bases: tp.Tuple[tp.Any], attrs: tp.Dict[tp.Any, tp.Any]
):
super(TableRegistry, cls).__init__(name, bases, attrs)
if hasattr(cls, 'NAME'):
key = getattr(cls, 'NAME')
else:
key = TableRegistry.TO_SNAKE_CASE_PATTERN.sub('_', name).lower()
TableRegistry.tables[key] = cls
@staticmethod
def get_table_types_help_string() -> str:
"""
Generates help string for visualizing all available tables.
Returns:
a help string that contains all available table names.
"""
return "The following tables are available:\n " + "\n ".join([
key for key in TableRegistry.tables if key != "table"
])
@staticmethod
def get_class_for_table_type(table_type: str) -> tp.Type['table.Table']:
"""
Get the class for ``table`` from the table registry.
Args:
table_type: the name of the table
Returns:
the class implementing the table
"""
from varats.table.table import Table # pylint: disable=C0415
if table_type not in TableRegistry.tables:
raise LookupError(
f"Unknown table '{table_type}'.\n" +
TableRegistry.get_table_types_help_string()
)
table_cls = TableRegistry.tables[table_type]
if not issubclass(table_cls, Table):
raise AssertionError()
return table_cls
def build_tables(**args: tp.Any) -> None:
"""
Build the specfied table(s).
Args:
**args: the arguments for the table(s)
"""
for p_table in prepare_tables(**args):
build_table(p_table)
def build_table(table_to_build: 'table.Table') -> None:
"""
Builds the given table.
Args:
table: the table to build
"""
table_to_build.format = table_to_build.table_kwargs["output-format"]
if table_to_build.table_kwargs["view"]:
print(table_to_build.tabulate())
else:
table_to_build.save(
wrap_document=table_to_build.table_kwargs.
get("wrap_document", False)
)
@check_required_args(['table_type'])
def prepare_table(**kwargs: tp.Any) -> 'table.Table':
"""
Instantiate a table with the given args.
Args:
**kwargs: the arguments for the table
Returns:
the instantiated table
"""
table_type = TableRegistry.get_class_for_table_type(kwargs['table_type'])
return table_type(**kwargs)
def prepare_tables(**args: tp.Any) -> tp.Iterable['table.Table']:
"""
Instantiate the specified table(s).
First, compute missing arguments that are needed by most tables.
Args:
**args: the arguments for the table(s)
Returns:
an iterable of instantiated table
"""
# pylint: disable=C0415
from varats.paper.case_study import load_case_study_from_file
from varats.paper_mgmt.paper_config import get_paper_config
# pylint: enable=C0415
# Setup default result folder
if 'result_output' not in args:
args['table_dir'] = str(vara_cfg()['tables']['table_dir'])
else:
args['table_dir'] = args.pop('result_output')
if not Path(args['table_dir']).exists():
LOG.error(f"Could not find output dir {args['table_dir']}")
return []
if 'view' not in args:
args['view'] = False
if 'output-format' not in args:
from varats.table.table import TableFormat # pylint: disable=C0415
if args['view']:
args['output-format'] = TableFormat.fancy_grid
else:
args['output-format'] = TableFormat.latex_booktabs
if 'paper_config' not in args:
args['paper_config'] = False
LOG.info(f"Writing tables to: {args['table_dir']}")
if args['paper_config']:
tables: tp.List['table.Table'] = []
paper_config = get_paper_config()
for case_study in paper_config.get_all_case_studies():
project_name = case_study.project_name
args['project'] = project_name
args['get_cmap'] = create_lazy_commit_map_loader(project_name)
args['table_case_study'] = case_study
tables.append(prepare_table(**args))
return tables
if 'project' in args:
args['get_cmap'] = create_lazy_commit_map_loader(args['project'])
if 'cs_path' in args:
case_study_path = Path(args['cs_path'])
args['table_case_study'] = load_case_study_from_file(case_study_path)
else:
args['table_case_study'] = None
return [prepare_table(**args)]
| 30.317919 | 79 | 0.635844 | 672 | 5,245 | 4.736607 | 0.235119 | 0.034559 | 0.03016 | 0.026704 | 0.136035 | 0.042727 | 0.042727 | 0.042727 | 0 | 0 | 0 | 0.004085 | 0.253194 | 5,245 | 172 | 80 | 30.494186 | 0.808527 | 0.179981 | 0 | 0.073684 | 0 | 0 | 0.133611 | 0 | 0 | 0 | 0 | 0 | 0.010526 | 1 | 0.073684 | false | 0 | 0.126316 | 0 | 0.305263 | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c392765fa8ab915930e11ed27d0e54982c3f4a1f | 16,436 | py | Python | sogs/model/user.py | Bilb/session-pysogs | eb6f13e26d16f0119e87b41ffb86813c3c177ab9 | [
"MIT"
] | 1 | 2022-03-08T06:34:34.000Z | 2022-03-08T06:34:34.000Z | sogs/model/user.py | Bilb/session-pysogs | eb6f13e26d16f0119e87b41ffb86813c3c177ab9 | [
"MIT"
] | null | null | null | sogs/model/user.py | Bilb/session-pysogs | eb6f13e26d16f0119e87b41ffb86813c3c177ab9 | [
"MIT"
] | null | null | null | from __future__ import annotations
from .. import crypto, db, config
from ..db import query
from ..web import app
from .exc import NoSuchUser, BadPermission
from typing import Optional
import time
class User:
"""
Class representing a user stored in the database.
Properties:
id - the database primary key for this user row
session_id - the session_id of the user, in hex
created - unix timestamp when the user was created
last_active - unix timestamp when the user was last active
banned - True if the user is (globally) banned
global_admin - True if the user is a global admin
global_moderator - True if the user is a global moderator
visible_mod - True if the user's admin/moderator status should be visible in rooms
"""
def __init__(
self,
row=None,
*,
id: Optional[int] = None,
session_id: Optional[str] = None,
autovivify: bool = True,
touch: bool = False,
try_blinding: bool = False,
):
"""
Constructs a user from a pre-retrieved row *or* a session id or user primary key value.
autovivify - if True and we are given a session_id that doesn't exist, either consider
importing from a pre-blinding user (if needed) or create a default user row and use it to
populate the object. This is the default behaviour. If False and the session_id doesn't
exist then a NoSuchUser is raised if the session id doesn't exist.
try_blinding - if True and blinding is required, and a given `session_id` is given that is
*not* blinded then attempt to look up the possible blinded versions of the session id and
use one of those (if they exist) rather than the given unblinded id. If no blinded version
exists then the unblinded id will be used (check `.is_blinded` after construction to see if
we found and switched to the blinded id).
touch - if True (default is False) then update the last_activity time of this user before
returning it.
"""
self._touched = False
self._refresh(
row=row, id=id, session_id=session_id, autovivify=autovivify, try_blinding=try_blinding
)
if touch:
self._touch()
def _refresh(
self,
*,
row=None,
id: Optional[int] = None,
session_id: Optional[str] = None,
autovivify: bool = True,
try_blinding: bool = False,
):
"""
Internal method to (re-)fetch details from the database; this is used during construction
but also in the test suite to forcibly re-fetch details.
"""
n_args = sum(x is not None for x in (row, session_id, id))
if n_args == 0 and hasattr(self, 'id'):
id = self.id
elif n_args != 1:
raise ValueError("User() error: exactly one of row/session_id/id is required")
self._tried_blinding = False
if session_id is not None:
if try_blinding and config.REQUIRE_BLIND_KEYS and session_id.startswith('05'):
b_pos = crypto.compute_blinded_abs_id(session_id)
b_neg = crypto.blinded_neg(b_pos)
row = query(
"SELECT * FROM users WHERE session_id IN (:pos, :neg) LIMIT 1",
pos=b_pos,
neg=b_neg,
).first()
self._tried_blinding = True
if not row:
row = query("SELECT * FROM users WHERE session_id = :s", s=session_id).first()
if not row and autovivify:
if config.REQUIRE_BLIND_KEYS:
row = self._import_blinded(session_id)
if not row:
row = db.insert_and_get_row(
"INSERT INTO users (session_id) VALUES (:s)", "users", "id", s=session_id
)
# No need to re-touch this user since we just created them:
self._touched = True
elif id is not None:
row = query("SELECT * FROM users WHERE id = :u", u=id).fetchone()
if row is None:
raise NoSuchUser(session_id if session_id is not None else id)
self.id, self.session_id, self.created, self.last_active = (
row[c] for c in ('id', 'session_id', 'created', 'last_active')
)
self.banned, self.global_moderator, self.global_admin, self.visible_mod = (
bool(row[c]) for c in ('banned', 'moderator', 'admin', 'visible_mod')
)
def _import_blinded(self, session_id):
"""
Attempts to import the user and permission rows from an unblinded session_id to a new,
blinded session_id row.
Any permissions/bans are *moved* from the old, unblinded id to the new blinded user record.
"""
if not session_id.startswith('15'):
return
blind_abs = crypto.blinded_abs(session_id.lower())
with db.transaction():
to_import = query(
"""
SELECT * FROM users WHERE id = (
SELECT "user" FROM needs_blinding WHERE blinded_abs = :ba
)
""",
ba=blind_abs,
).fetchone()
if to_import is None:
return False
row = db.insert_and_get_row(
"""
INSERT INTO users
(session_id, created, last_active, banned, moderator, admin, visible_mod)
VALUES (:sid, :cr, :la, :ban, :mod, :admin, :vis)
""",
"users",
"id",
sid=session_id,
cr=to_import["created"],
la=to_import["last_active"],
ban=to_import["banned"],
mod=to_import["moderator"],
admin=to_import["admin"],
vis=to_import["visible_mod"],
)
# If we have any global ban/admin/mod then clear them (because we've just set up the
# global ban/mod/admin permissions for the blinded id in the query above).
query(
"UPDATE users SET banned = FALSE, admin = FALSE, moderator = FALSE WHERE id = :u",
u=to_import["id"],
)
for t in ("user_permission_overrides", "user_permission_futures", "user_ban_futures"):
query(
f'UPDATE {t} SET "user" = :new WHERE "user" = :old',
new=row["id"],
old=to_import["id"],
)
query('DELETE FROM needs_blinding WHERE "user" = :u', u=to_import["id"])
return row
def __str__(self):
"""Returns string representation of a user: U[050123…cdef], the id prefixed with @ or % if
the user is a global admin or moderator, respectively."""
if len(self.session_id) != 66:
# Something weird (e.g. the "deleted" id from an old sogs import), just print directly
return f"U[{self.session_id}]"
return "U[{}{}…{}]".format(
'@' if self.global_admin else '%' if self.global_moderator else '',
self.session_id[:6],
self.session_id[-4:],
)
def _touch(self):
query(
"""
UPDATE users SET last_active = :now
WHERE id = :u
""",
u=self.id,
now=time.time(),
)
self._touched = True
def touch(self, force=False):
"""
Updates the last activity time of this user. This method only updates the first time it is
called (and possibly not even then, if we auto-vivified the user row), unless `force` is set
to True.
"""
if not self._touched or force:
self._touch()
def update_room_activity(self, room):
query(
"""
INSERT INTO room_users ("user", room) VALUES (:u, :r)
ON CONFLICT("user", room) DO UPDATE
SET last_active = :now
""",
u=self.id,
r=room.id,
now=time.time(),
)
def set_moderator(self, *, added_by: User, admin=False, visible=False):
"""
Make this user a global moderator or admin. If the user is already a global mod/admin then
their status is updated according to the given arguments (that is, this can promote/demote).
If `admin` is None then the current admin status is left unchanged.
"""
if not added_by.global_admin:
app.logger.warning(
f"Cannot set {self} as global {'admin' if admin else 'moderator'}: "
f"{added_by} is not a global admin"
)
raise BadPermission()
with db.transaction():
u = self
need_blinding = False
if config.REQUIRE_BLIND_KEYS:
blinded = self.find_blinded()
if blinded is not None:
u = blinded
else:
need_blinding = True
query(
f"""
UPDATE users
SET moderator = TRUE, visible_mod = :visible
{', admin = :admin' if admin is not None else ''}
WHERE id = :u
""",
admin=bool(admin),
visible=visible,
u=u.id,
)
if need_blinding:
u.record_needs_blinding()
u.global_admin = admin
u.global_moderator = True
u.visible_mod = visible
def remove_moderator(self, *, removed_by: User, remove_admin_only: bool = False):
"""Removes this user's global moderator/admin status, if set."""
if not removed_by.global_admin:
app.logger.warning(
f"Cannot remove {self} as global mod/admin: {removed_by} is not an admin"
)
raise BadPermission()
query(
f"""
UPDATE users
SET admin = FALSE {', moderator = FALSE' if not remove_admin_only else ''}
WHERE id = :u
""",
u=self.id,
)
self.global_admin = False
self.global_moderator = False
def ban(self, *, banned_by: User, timeout: Optional[float] = None):
"""
Globally bans this user from the server; can only be applied by a global moderator or global
admin, and cannot be applied to another global moderator or admin (to prevent accidental
mod/admin banning; to ban them, first explicitly remove them as moderator/admin and then
ban).
timeout should be None for a non-expiring ban and otherwise should be the duration of the
ban, in seconds; an unban will be scheduled to occur after the interval. In either case,
any existing scheduled global unbans for this user will be deleted (and replaced, if a new
timeout is provided).
"""
if not banned_by.global_moderator:
app.logger.warning(f"Cannot ban {self}: {banned_by} is not a global mod/admin")
raise BadPermission()
with db.transaction():
u = self
need_blinding = False
if config.REQUIRE_BLIND_KEYS:
blinded = self.find_blinded()
if blinded is not None:
u = blinded
else:
need_blinding = True
if u.global_moderator:
app.logger.warning(f"Cannot ban {u}: user is a global moderator/admin")
raise BadPermission()
query("UPDATE users SET banned = TRUE WHERE id = :u", u=u.id)
query(
'DELETE FROM user_ban_futures WHERE room IS NULL AND "user" = :u AND NOT banned',
u=u.id,
)
if timeout:
query(
"""
INSERT INTO user_ban_futures
("user", room, banned, at) VALUES (:u, NULL, FALSE, :at)
""",
u=u.id,
at=time.time() + timeout,
)
if need_blinding:
u.record_needs_blinding()
app.logger.debug(f"{banned_by} globally banned {u}{f' for {timeout}s' if timeout else ''}")
u.banned = True
def unban(self, *, unbanned_by: User):
"""
Undoes a global ban. `unbanned_by` must be a global mod/admin.
Any currently scheduled global ban futures for this user will be removed as well.
"""
if not unbanned_by.global_moderator:
app.logger.warning(f"Cannot unban {self}: {unbanned_by} is not a global mod/admin")
raise BadPermission()
query("UPDATE users SET banned = FALSE WHERE id = :u", u=self.id)
query(
'DELETE FROM user_ban_futures WHERE room IS NULL AND "user" = :u AND banned', u=self.id
)
app.logger.debug(f"{unbanned_by} removed global ban on {self}")
self.banned = False
def verify(self, *, message: bytes, sig: bytes):
"""verify signature signed by this session id
return True if the signature is valid otherwise return False
"""
pk = crypto.xed25519_pubkey(bytes.fromhex(self.session_id[2:]))
return crypto.verify_sig_from_pk(message, sig, pk)
def find_blinded(self):
"""
Attempts to look up the blinded User associated with this (unblinded) session id.
If this User is already a blinded id, this simply returns `self`.
Otherwise, if we find a blinded id in the users table that corresponds to this (unblinded)
id we return a new User object for the blinded user.
Otherwise returns None.
"""
if self.is_blinded:
return self
if not self.session_id.startswith('05'): # Mainly here to catch the SystemUser
return None
if self._tried_blinding:
# We already tried (and failed) to get the blinded id during construction
return None
b_pos = crypto.compute_blinded_abs_id(self.session_id)
b_neg = crypto.blinded_neg(b_pos)
row = query(
"SELECT * FROM users WHERE session_id IN (:pos, :neg) LIMIT 1", pos=b_pos, neg=b_neg
).first()
if not row:
self._tried_blinding = True
return None
return User(row)
def record_needs_blinding(self):
"""
Inserts a database record into the `needs_blinding` table indicating that this user requires
permission or ban moves. This should only be called for an unblinded user for which
find_blinded did not find an existing blinded user row.
"""
query(
"""
INSERT INTO needs_blinding (blinded_abs, "user") VALUES (:b_abs, :u)
ON CONFLICT DO NOTHING
""",
b_abs=crypto.compute_blinded_abs_id(self.session_id),
u=self.id,
)
@property
def is_blinded(self):
"""True if the user's session id is a derived key"""
return self.session_id.startswith('15')
@property
def system_user(self):
"""True if (and only if) this is the special SOGS system user
created for internal database tasks"""
return self.session_id[0:2] == "ff" and self.session_id[2:] == crypto.server_pubkey_hex
class SystemUser(User):
"""
User subclasses representing the local system for performing local operations, e.g. from the
command line.
"""
def __init__(self):
super().__init__(session_id="ff" + crypto.server_pubkey_hex)
def get_all_global_moderators():
"""
Returns all global moderators; for internal user only as this doesn't filter out hidden
mods/admins.
Returns a 4-tuple of lists of:
- visible mods
- visible admins
- hidden mods
- hidden admins
"""
m, hm, a, ha = [], [], [], []
for row in query("SELECT * FROM users WHERE moderator"):
u = User(row=row)
if u.system_user:
continue
lst = (a if u.global_admin else m) if u.visible_mod else (ha if u.global_admin else hm)
lst.append(u)
return (m, a, hm, ha)
| 36.123077 | 100 | 0.563945 | 2,090 | 16,436 | 4.317225 | 0.169856 | 0.049873 | 0.01873 | 0.013299 | 0.2436 | 0.198604 | 0.168126 | 0.142525 | 0.101186 | 0.094869 | 0 | 0.003002 | 0.351363 | 16,436 | 454 | 101 | 36.202643 | 0.842791 | 0.286079 | 0 | 0.367816 | 0 | 0 | 0.175521 | 0.004723 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.076628 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c39c355b6d0a784ab7ced2706035abd7d88c4b92 | 871 | py | Python | avconv/avconv_stdout.py | suchkultur/sidomotest | 5fa813aeb77b856c90b96fbfe0b9a7eff669f304 | [
"MIT"
] | null | null | null | avconv/avconv_stdout.py | suchkultur/sidomotest | 5fa813aeb77b856c90b96fbfe0b9a7eff669f304 | [
"MIT"
] | null | null | null | avconv/avconv_stdout.py | suchkultur/sidomotest | 5fa813aeb77b856c90b96fbfe0b9a7eff669f304 | [
"MIT"
] | null | null | null | """A python FFMPEG module built from sdpm."""
from __future__ import unicode_literals
from sidomo import Container
def transcode_file(url):
"""Any format --> 20000 Hz mono wav audio."""
with Container(
'suchkultur/avconv',
memory_limit_gb=2,
stderr=False
) as c:
for line in c.run(
'bash -c \"\
wget -nv -O tmp.unconverted %s;\
avconv -i tmp.unconverted -f wav -acodec pcm_s16le -ac 1 -ar 20000 tmp.wav;\
cat tmp.wav\
\"\
' % url
):
yield line
if __name__ == '__main__':
print("I'm gonna transcode an audio file and print the result to stdout.")
url = 'http://www2.warwick.ac.uk/fac/soc/sociology/staff/sfuller/media/audio/9_minutes_on_epistemology.mp3'
for line in transcode_file(url):
print(line)
| 30.034483 | 111 | 0.590126 | 116 | 871 | 4.25 | 0.715517 | 0.052738 | 0.064909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027915 | 0.300804 | 871 | 28 | 112 | 31.107143 | 0.781609 | 0.0907 | 0 | 0 | 0 | 0.090909 | 0.241997 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.136364 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c39ee19e15e090d8880bdf7df0d8e984b7c43a6a | 3,401 | py | Python | optimizers/reg_params.py | Coricos/Challenger | b5f8ec49f72836779943109efa8eb88695bfd7d1 | [
"Apache-2.0"
] | 15 | 2019-09-06T15:49:12.000Z | 2021-12-15T08:10:25.000Z | optimizers/reg_params.py | Coricos/Challenger | b5f8ec49f72836779943109efa8eb88695bfd7d1 | [
"Apache-2.0"
] | null | null | null | optimizers/reg_params.py | Coricos/Challenger | b5f8ec49f72836779943109efa8eb88695bfd7d1 | [
"Apache-2.0"
] | 2 | 2020-01-05T03:40:09.000Z | 2021-12-15T08:10:29.000Z | # Author: DINDIN Meryll
# Date: 02/03/2019
# Project: optimizers
SPACE = dict()
SPACE['SGD'] = {
'loss': ('choice', ['squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']),
'penalty': ('choice', ['none', 'l1', 'l2', 'elasticnet']),
'alpha': ('uniform_log', (1e-10, 1.0)),
'l1_ratio': ('uniform_float', (0.0, 1.0)),
'fit_intercept': ('choice', [True, False]),
'max_iter': ('uniform_int', (100, 1000)),
'tol': ('uniform_float', (1e-5, 0.1)),
'epsilon': ('uniform_log', (1e-5, 0.1)),
'learning_rate': ('choice', ['constant', 'optimal', 'invscaling', 'adaptive']),
'eta0': ('uniform_log', (1e-5, 1.0)),
'power_t': ('uniform_float', (0.1, 1.0))
}
SPACE['SVM'] = {
'C': ('uniform_log', (1e-3, 10)),
'kernel': ('choice', ['linear', 'poly', 'rbf', 'sigmoid']),
'degree': ('uniform_int', (2, 5)),
'gamma': ('uniform_log', (1e-8, 1.0)),
'shrinking': ('choice', [True, False]),
'tol': ('uniform_float', (1e-5, 0.1)),
'epsilon': ('uniform_log', (1e-3, 1.0))
}
SPACE['ETS'] = {
'n_estimators': ('uniform_int', (10, 1000)),
'max_depth': ('uniform_int', (2, 16)),
'min_samples_split': ('uniform_int', (2, 20)),
'min_samples_leaf': ('uniform_int', (1, 10)),
'min_weight_fraction_leaf': ('uniform_float', (0, 0.5)),
'criterion': ('choice', ['mse', 'mae']),
'bootstrap': ('choice', [True, False]),
'max_features': ('uniform_float', (0, 1))
}
SPACE['RFS'] = {
'n_estimators': ('uniform_int', (10, 1000)),
'max_depth': ('uniform_int', (2, 16)),
'min_samples_split': ('uniform_int', (2, 20)),
'min_samples_leaf': ('uniform_int', (1, 10)),
'min_weight_fraction_leaf': ('uniform_float', (0, 0.5)),
'criterion': ('choice', ['mse', 'mae']),
'bootstrap': ('choice', [True, False]),
'max_features': ('uniform_float', (0, 1))
}
SPACE['LGB'] = {
'n_estimators': ('uniform_int', (10, 1000)),
'max_depth': ('uniform_int', (2, 16)),
'learning_rate': ('uniform_log', (0.001, 0.5)),
'num_leaves': ('uniform_int', (2, 50)),
'min_child_weight': ('uniform_log', (1e-4, 0.1)),
'min_child_samples': ('uniform_int', (10, 30)),
'colsample_bytree': ('uniform_float', (0.5, 1.0)),
'reg_alpha': ('uniform_log', (1e-10, 1)),
'reg_lambda': ('uniform_log', (1e-3, 10)),
'objective': ('choice', ('regression_l1', 'regression_l2', 'mape'))
}
SPACE['XGB'] = {
'n_estimators': ('uniform_int', (10, 1000)),
'max_depth': ('uniform_int', (2, 16)),
'learning_rate': ('uniform_log', (0.001, 0.5)),
'num_leaves': ('uniform_int', (2, 50)),
'min_child_weight': ('uniform_log', (1e-4, 0.1)),
'min_child_samples': ('uniform_int', (10, 30)),
'colsample_bytree': ('uniform_float', (0.5, 1.0)),
'colsample_bylevel': ('uniform_float', (0.1, 1.0)),
'reg_alpha': ('uniform_log', (1e-10, 1)),
'reg_lambda': ('uniform_log', (1e-3, 10)),
'gamma': ('uniform_float', (0.0, 1.0)),
'subsample': ('uniform_float', (0.0, 1.0)),
}
SPACE['CAT'] = {
'learning_rate': ('uniform_log', (0.001, 0.5)),
'n_estimators': ('uniform_int', (10, 200)),
'reg_lambda': ('uniform_log', (1e-3, 10)),
'max_depth': ('uniform_int', (2, 12)),
'colsample_bylevel': ('uniform_float', (0.1, 1.0)),
}
| 21.801282 | 104 | 0.541017 | 435 | 3,401 | 3.97931 | 0.23908 | 0.11554 | 0.090121 | 0.040439 | 0.71115 | 0.678221 | 0.629694 | 0.615829 | 0.563836 | 0.563836 | 0 | 0.078388 | 0.197295 | 3,401 | 155 | 105 | 21.941935 | 0.555678 | 0.01823 | 0 | 0.526316 | 0 | 0 | 0.449175 | 0.022489 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c39fc66cbbaf2d7063e82766c69335a2ff9ae3b8 | 14,626 | py | Python | src/resipy/compress_utils.py | sgreene8/resipy | 297533fd1a2f0443f4a7a062b36c8ab7df5e5e63 | [
"MIT"
] | 2 | 2019-05-06T02:55:40.000Z | 2019-05-22T09:53:19.000Z | src/resipy/compress_utils.py | sgreene8/resipy | 297533fd1a2f0443f4a7a062b36c8ab7df5e5e63 | [
"MIT"
] | null | null | null | src/resipy/compress_utils.py | sgreene8/resipy | 297533fd1a2f0443f4a7a062b36c8ab7df5e5e63 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
"""
Utilities for performing stochastic matrix and vector compressions.
"""
import numpy
import misc_c_utils
import near_uniform
def fri_subd(vec, num_div, sub_weights, n_samp):
""" Perform FRI compression on a vector whose first elements,
vec[i] are each subdivided into equal segments, and
whose last elements are each divided into unequal segments.
Parameters
----------
vec : (numpy.ndarray, float)
vector on which to perform compression. Elements can
be negative, and it need not be normalized. vec.shape[0]
must equal num_div.shape[0] + sub_weights.shape[0]
num_div : (numpy.ndarray, unsigned int)
the first num_div.shape[0] elements of vec are subdivided
into equal segments, the number of which for each element
is specified in this array
sub_weights : (numpy.ndarray, float)
the weights of the unequal subdivisions of the last
sub_weights.shape[0] elements of vec. Must be row-
normalized.
Returns
-------
(numpy.ndarray, uint32)
2-D array of indices of nonzero elements in the vector. The
0th column specifies the index in the vec array, while
the 1st specifies the index within the subdivision. Not
necessarily sorted, although indices in the uniform part
of the array are grouped first, followed by indices in the
nonuniform part.
(numpy.ndarray, float64)
values of nonzero elements in the compressed vector
"""
new_idx = numpy.zeros([n_samp, 2], dtype=numpy.uint32)
new_vals = numpy.zeros(n_samp)
weights = numpy.abs(vec)
sub_cp = numpy.copy(sub_weights)
preserve_uni, preserve_nonuni = _keep_idx(weights, num_div, sub_cp, n_samp)
preserve_counts = numpy.zeros_like(num_div, dtype=numpy.uint32)
preserve_counts[preserve_uni] = num_div[preserve_uni]
uni_rpt = misc_c_utils.ind_from_count(preserve_counts)
n_exact_uni = uni_rpt.shape[0]
new_idx[:n_exact_uni, 0] = uni_rpt
uni_seq = misc_c_utils.seq_from_count(preserve_counts)
new_idx[:n_exact_uni, 1] = uni_seq
new_vals[:n_exact_uni] = vec[uni_rpt] / num_div[uni_rpt]
nonuni_exact_idx = numpy.nonzero(preserve_nonuni)
n_exact_nonuni = nonuni_exact_idx[0].shape[0]
n_samp -= (n_exact_uni + n_exact_nonuni)
num_uni_wt = num_div.shape[0]
sub_renorm = numpy.sum(sub_cp, axis=1)
weights[num_uni_wt:] *= sub_renorm
sub_renorm.shape = (-1, 1)
sub_renorm = 1. / sub_renorm
sub_cp *= sub_renorm
one_norm = weights.sum()
if abs(one_norm) > 1e-10:
sampl_idx = sys_subd(weights, num_div, sub_cp, n_samp)
end_idx = n_exact_uni + n_samp
new_idx[n_exact_uni:end_idx] = sampl_idx
new_vals[n_exact_uni:end_idx] = numpy.sign(vec[sampl_idx[:, 0]]) * one_norm / n_samp
else:
end_idx = n_exact_uni
end_idx2 = end_idx + n_exact_nonuni
new_idx[end_idx:end_idx2, 0] = nonuni_exact_idx[0] + num_uni_wt
new_idx[end_idx:end_idx2, 1] = nonuni_exact_idx[1]
new_vals[end_idx:end_idx2] = vec[num_uni_wt + nonuni_exact_idx[0]] * sub_weights[nonuni_exact_idx]
return new_idx[:end_idx2], new_vals[:end_idx2]
def deterministic(vec, n_nonz):
"""Calculate the indices of the n_nonz largest-magnitude elementss in vec
Parameters
----------
vec : (numpy.ndarray)
vector to compress
n_nonz : (unsigned int)
desired number of nonzero entries
Returns
-------
(numpy.ndarray, unsigned int)
indices of elements to preserve in compression
"""
weights = numpy.abs(vec)
srt_idx = weights.argsort()[::-1]
cmp_idx = srt_idx[:n_nonz]
return cmp_idx
def fri_1D(vec, n_samp):
"""Compress a vector in full (non-sparse format) using the
FRI scheme, potentially preserving some elements exactly.
Parameters
----------
vec : (numpy.ndarray)
vector to compress
n_samp : (unsigned int)
desired number of nonzero entries in the output vector
Returns
-------
(numpy.ndarray, unsigned int)
indices of nonzero elements in compressed vector, in order
(numpy.ndarray, float)
values of nonzero elements in compressed vector
"""
weights = numpy.abs(vec)
new_vec = numpy.zeros(weights.shape[0])
counts = numpy.ones_like(vec, dtype=numpy.uint32)
sub_wts = numpy.empty((0, 2))
preserve_idx, empty_ret = _keep_idx(weights, counts, sub_wts, n_samp)
preserve_vals = vec[preserve_idx]
new_vec[preserve_idx] = preserve_vals
n_samp -= preserve_vals.shape[0]
one_norm = weights.sum()
if abs(one_norm) > 1e-10:
sampl_idx = sys_resample(weights, n_samp, ret_idx=True)
new_vec[sampl_idx] = one_norm / n_samp * numpy.sign(vec[sampl_idx])
nonz_idx = numpy.nonzero(new_vec)[0]
return nonz_idx, new_vec[nonz_idx]
def _keep_idx(weights, num_div, sub_weights, n_samp):
# Calculate indices of elements in weights that are preserved exactly
# Elements in weights are subdivided according to num_div and sub_weights
num_uni = num_div.shape[0]
uni_keep = numpy.zeros(num_uni, dtype=numpy.bool_)
nonuni_keep = numpy.zeros_like(sub_weights, dtype=numpy.bool_)
one_norm = weights.sum()
any_kept = True
uni_weights = weights[:num_uni] / num_div
nonuni_weights = weights[num_uni:]
nonuni_weights.shape = (-1, 1)
nonuni_weights = nonuni_weights * sub_weights
while any_kept and one_norm > 1e-9:
add_uni = one_norm / n_samp <= uni_weights
uni_weights[add_uni] = 0
uni_keep[add_uni] = True
num_add_uni = num_div[add_uni].sum()
n_samp -= num_add_uni
one_norm -= weights[:num_uni][add_uni].sum()
if one_norm > 0:
add_nonuni = one_norm / n_samp <= nonuni_weights
chosen_weights = nonuni_weights[add_nonuni]
nonuni_weights[add_nonuni] = 0
nonuni_keep[add_nonuni] = True
num_add_nonuni = chosen_weights.shape[0]
n_samp -= num_add_nonuni
one_norm -= chosen_weights.sum()
else:
num_add_nonuni = 0
any_kept = num_add_uni > 0 or num_add_nonuni > 0
sub_weights[nonuni_keep] = 0
weights[:num_uni][uni_keep] = 0
return uni_keep, nonuni_keep
def sys_resample(vec, nsample, ret_idx=False, ret_counts=False):
"""Choose nsample elements of vector vec according to systematic resampling
algorithm (eq. 44-46 in SIAM Rev. 59 (2017), 547-587)
Parameters
----------
vec : (numpy.ndarray, float)
the weights for each index
nsample : (unsigned int)
the number of samples to draw
ret_idx : (bool, optional)
If True, return a vector containing the indices (possibly repeated) of
chosen indices
ret_counts : (bool, optional)
If True, return a 1-D vector of the same shape as vec with the number of
chosen samples at each position
Returns
-------
(tuple)
Contains 0, 1, or 2 numpy vectors depending on the values of input parameters
ret_idx and ret_counts
"""
if nsample == 0:
return numpy.array([], dtype=int)
rand_points = (numpy.linspace(0, 1, num=nsample, endpoint=False) +
numpy.random.uniform(high=1. / nsample))
intervals = numpy.cumsum(vec)
# normalize if necessary
if intervals[-1] != 1.:
intervals /= intervals[-1]
ret_tuple = ()
if ret_idx:
ret_tuple = ret_tuple + (misc_c_utils.linsearch_1D(intervals, rand_points),)
if ret_counts:
ret_tuple = ret_tuple + (misc_c_utils.linsearch_1D_cts(intervals, rand_points),)
return ret_tuple
def sys_subd(weights, counts, sub_weights, nsample):
"""Performs systematic resampling on a vector of weights subdivided
according to counts and sub_weights
Parameters
----------
weights : (numpy.ndarray, float)
vector of weights to be subdivided. weights.shape[0] must equal
counts.shape[0] + sub_weights.shape[0]
counts : (numpy.ndarray, unsigned int)
the first counts.shape[0] elements of weights are subdivided into
equal subintervals
sub_weights : (numpy.ndarray, float)
sub_weights[i] corresponds to the subdivisions of weights[i].
Must be row-normalized
n_sample : (unsigned int)
number of samples to draw
Returns
-------
(numpy.ndarray, unsigned int)
2-D array of chosen indices. The 0th column is the index in
the weights vector, and the 1st is the index with the
subdivision.
"""
if nsample == 0:
return numpy.empty((0, 2), dtype=numpy.uint32)
rand_points = (numpy.arange(0, 1, 1. / nsample) +
numpy.random.uniform(high=1. / nsample))
rand_points = rand_points[:nsample]
big_intervals = numpy.cumsum(weights)
one_norm = big_intervals[-1]
# normalize if necessary
if abs(one_norm - 1.) > 1e-10:
big_intervals /= one_norm
ret_idx = numpy.zeros([nsample, 2], dtype=numpy.uint32)
weight_idx = misc_c_utils.linsearch_1D(big_intervals, rand_points)
ret_idx[:, 0] = weight_idx
rand_points[weight_idx > 0] -= big_intervals[weight_idx[weight_idx > 0] - 1]
rand_points *= one_norm / weights[weight_idx]
rand_points[rand_points >= 1.] = 0.999999
n_uni_wts = counts.shape[0]
num_uni = numpy.searchsorted(weight_idx, n_uni_wts)
ret_idx[:num_uni, 1] = rand_points[:num_uni] * counts[weight_idx[:num_uni]]
subweight_idx = misc_c_utils.linsearch_2D(sub_weights, weight_idx[num_uni:] - n_uni_wts,
rand_points[num_uni:])
ret_idx[num_uni:, 1] = subweight_idx
return ret_idx
def round_binomially(vec, num_round):
"""Round non-integer entries in vec to integer entries in b such that
b[i] ~ (binomial(num_round[i], vec[i] - floor(vec[i])) + floor(vec[i])
* num_round)
Parameters
----------
vec : (numpy.ndarray, float)
non-integer numbers to be rounded
num_round : (numpy.ndarray, unsigned int)
parameter of the binomial distribution for each entry in vec, must
have same shape as vec
Returns
-------
(numpy.ndarray, int)
integer array of results
"""
flr_vec = numpy.floor(vec)
flr_vec = flr_vec.astype(numpy.int32)
n = num_round.astype(numpy.uint32)
b = flr_vec * num_round + numpy.random.binomial(n, vec - flr_vec).astype(numpy.int32)
return b
def round_bernoulli(vec, mt_ptrs):
"""Round non-integer entries in vec to integer entries in b such that
b[i] ~ binomial(1, vec[i] - floor(vec[i])) + floor(vec[i])
Parameters
----------
vec : (numpy.ndarray, float)
non-integer numbers to be rounded
mt_ptrs : (numpy.ndarray, uint64)
List of addresses to MT state objects to use for RN generation
Returns
-------
(numpy.ndarray, int)
integer array of results
"""
flr_vec = numpy.floor(vec)
flr_vec = flr_vec.astype(numpy.int32)
b = flr_vec + near_uniform.par_bernoulli(vec - flr_vec, mt_ptrs)
return b
def sample_alias(alias, Q, row_idx, mt_ptrs):
"""Perform multinomial sampling using the alias method for an array of
probability distributions.
Parameters
----------
alias : (numpy.ndarray, unsigned int)
alias indices as calculated in cyth_helpers2.setup_alias
Q : (numpy.ndarray, float)
alias probabilities as calculated in cyth_helpers2.setup_alias
row_idx : (numpy.ndarray, unsigned int)
Row index in alias/Q of each value to sample. Can be obtained from
desired numbers of samples using cyth_helpers2.ind_from_count()
mt_ptrs : (numpy.ndarray, uint64)
List of addresses to MT state objects to use for RN generation
Returns
-------
(numpy.ndarray, unsigned char)
1-D array of chosen column indices of each sample
"""
n_states = alias.shape[1]
tot_samp = row_idx.shape[0]
r_ints = numpy.random.randint(n_states, size=tot_samp)
orig_success = near_uniform.par_bernoulli(Q[row_idx, r_ints], mt_ptrs)
orig_idx = orig_success == 1
alias_idx = numpy.logical_not(orig_idx)
choices = numpy.zeros(tot_samp, dtype=numpy.uint)
choices[orig_idx] = r_ints[orig_idx]
choices[alias_idx] = alias[row_idx[alias_idx], r_ints[alias_idx]]
choices = choices.astype(numpy.uint8)
return choices
def proc_fri_sd_choices(sampl_idx, n_sing, all_arrs, sing_arrs, doub_arrs):
"""Given the results from an FRI compression in which the weights for single excitations
are uniformly subdivided and those for double excitations are subdivided nonuniformly,
rearrange arrays of observables such that they are indexed by the sample index from the compression,
instead of the index in the uncompressed array.
Parameters
----------
sampl_idx : (numpy.ndarray, unsigned int)
Array of chosen top-level indices from the FRI compression
n_sing : (unsigned int)
Number of single excitation elements in the uncompressed array
all_arrs : (python list of numpy.ndarrays)
Each array contains numbers corresponding to each element in the original uncompressed array
sing_arrs : (python list of numpy.ndarrays)
Each array contains numbers corresponding only to single excitation elements in the orinal
uncompressed array
doub_arrs : (python list of numpy.ndarrays)
Each array contains numbers corresponding only to double excitation elements in the orinal
uncompressed array
Returns
-------
(python list of numpy.ndarrays), (python list of numpy.ndarrays), (python list of numpy.ndarrays)
Each array contains numbers corresponding to the same parameters as in the inputted arrays,
except they are now indexed according to the sampled indices in the compressed array.
(unsigned int)
Number of single excitation elements in the compressed array
"""
# sampl_idx isn't necessarily sorted, but that's ok because single excitation elements come first
new_n_sing = numpy.searchsorted(sampl_idx, n_sing)
new_all = []
for arr in all_arrs:
new_all.append(arr[sampl_idx])
new_sing = []
for arr in sing_arrs:
new_sing.append(arr[sampl_idx[:new_n_sing]])
doub_idx = sampl_idx[new_n_sing:] - n_sing
new_doub = []
for arr in doub_arrs:
new_doub.append(arr[doub_idx])
return new_all, new_sing, new_doub, new_n_sing
| 36.84131 | 104 | 0.673185 | 2,120 | 14,626 | 4.429245 | 0.15566 | 0.034505 | 0.021299 | 0.022045 | 0.30607 | 0.221832 | 0.178168 | 0.138978 | 0.125346 | 0.107242 | 0 | 0.014306 | 0.235334 | 14,626 | 396 | 105 | 36.934343 | 0.825286 | 0.457541 | 0 | 0.122699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06135 | false | 0 | 0.018405 | 0 | 0.153374 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3a1abb9b65cf75cfb9749d348aca024d63998e3 | 5,590 | py | Python | venv/lib/python3.8/site-packages/seaborn_qqplot/plots.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 6 | 2019-06-16T15:01:47.000Z | 2021-11-01T00:05:19.000Z | venv/lib/python3.8/site-packages/seaborn_qqplot/plots.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 11 | 2019-06-16T23:10:12.000Z | 2021-07-06T14:24:07.000Z | venv/lib/python3.8/site-packages/seaborn_qqplot/plots.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 2 | 2020-04-09T15:35:25.000Z | 2020-10-11T20:44:25.000Z | # BSD 3-Clause License
#
# Copyright (c) 2019, Rene Jean Corneille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import rv_continuous, probplot, linregress, t
from seaborn_qqplot.transforms import (
RegressionFit,
ConfidenceInterval,
Scale
)
from seaborn_qqplot.empirical_functions import (
EmpiricalCDF,
EmpiricalQF
)
class _Plot:
def __init__(self, **kwargs):
display_kws = kwargs.pop("display_kws", {})
self.identity = display_kws.get('identity', False)
self.fit = display_kws.get('fit', False)
self.reg = display_kws.get('reg', False)
self.ci = display_kws.get('ci', 0.05)
self.plot_kws = kwargs.pop("plot_kws", {})
def _get_axis_data(self, x, y):
raise NotImplementedError("Method should be overriden in child class.")
def __call__(self, x, y, **kwargs):
""" Draw a probability plot of data contained in x against data in y.
Parameters
----------
x : array_like
Data
y : array_like
Data
kwargs : key, value pairings
Additional keyword arguments are passed to the function.
"""
#TODO: function not tested - write test for this
xr, yr = self._get_axis_data(x,y)
path = plt.scatter(xr, yr, **self.plot_kws)
# display regression
if self.fit:
regression = RegressionFit(xr,yr)
xr_, yr_ = regression(xr,yr)
plt.plot(xr_, yr_, color=path.get_facecolors()[0], **self.plot_kws)
# confidence intervals
if self.reg:
confidence_interval = ConfidenceInterval(x, y, ci=self.ci)
x2, y2_plus, y2_minus = confidence_interval(xr,yr)
plt.gca().fill_between(x2, y2_plus, y2_minus, color=path.get_facecolors()[0], alpha=0.1, **self.plot_kws)
if self.identity:
plt.plot(yr,yr, color='black')
return plt.axes
class ProbabilityPlot(_Plot):
def _get_axis_data(self, x, y=None):
ecdf = EmpiricalCDF(x)
values = np.arange(np.min(x), np.max(x), (np.max(x)-np.min(x))/len(x))
yr = ecdf(values)
return x, yr
class QuantilePlot(_Plot):
def _get_axis_data(self, x, y=None):
ecdf_x = EmpiricalQF(x)
quantiles = np.arange(0,1, 1/len(x))
yr = ecdf_x(quantiles)
return x, yr
class QQPlot(_Plot):
def _get_axis_data(self, x, y=None):
ecdf_x = EmpiricalQF(x)
ecdf_y = EmpiricalQF(y)
quantiles_x = np.arange(0,1, 1/len(x))
quantiles_y = np.arange(0,1, 1/len(x))
xr = ecdf_x(quantiles_x)
yr = ecdf_y(quantiles_y)
return xr, yr
class PPPlot(_Plot):
def _get_axis_data(self, x, y=None):
ecdf_x = EmpiricalCDF(x)
ecdf_y = EmpiricalCDF(y)
values = np.arange(np.min(np.hstack((x,y))), np.max(np.hstack((x,y))), (np.max(np.hstack((x,y)))-np.min(np.hstack((x,y)))+1)/len(x))
xr = ecdf_x(values)
yr = ecdf_y(values)
return xr, yr
class ProbabilityPlot(_Plot):
def _get_axis_data(self, x, y=None):
ecdf = EmpiricalCDF(x)
values = np.arange(np.min(x), np.max(x), 1/len(x))
yr = ecdf(values)
return x, yr
class QuantilePlot(_Plot):
def _get_axis_data(self, x, y=None):
ecdf_x = EmpiricalQF(x)
quantiles = np.arange(0,1, 1/len(x))
yr = ecdf_x(quantiles)
return x, yr
class QQPlot(_Plot):
def _get_axis_data(self, x, y=None):
ecdf_x = EmpiricalQF(x)
ecdf_y = EmpiricalQF(y)
quantiles = np.arange(0,1,1/len(x))
xr = ecdf_x(quantiles)
yr = ecdf_y(quantiles)
return xr, yr
class PPPlot(_Plot):
def _get_axis_data(self, x, y=None):
ecdf_x = EmpiricalCDF(x)
ecdf_y = EmpiricalCDF(y)
values = np.arange(np.min(np.hstack((x,y))),np.max(np.hstack((x,y))+1), 1/len(x))
xr = ecdf_x(values)
yr = ecdf_y(values)
return xr, yr | 33.473054 | 140 | 0.64025 | 798 | 5,590 | 4.358396 | 0.290727 | 0.010351 | 0.031627 | 0.036228 | 0.389592 | 0.351351 | 0.347614 | 0.337263 | 0.337263 | 0.337263 | 0 | 0.00959 | 0.253846 | 5,590 | 167 | 141 | 33.473054 | 0.824263 | 0.328265 | 0 | 0.505376 | 0 | 0 | 0.022497 | 0 | 0 | 0 | 0 | 0.005988 | 0 | 1 | 0.11828 | false | 0 | 0.053763 | 0 | 0.365591 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3a4bac81af5bc3661970906bcc01a696388f5ca | 2,131 | py | Python | src/main/resources/google/cloud/compute/instance/compute_create_running.py | s-lal/xld-google-cloud-compute-plugin | 28389184a6df8bdee33893da6030499384a2b565 | [
"MIT"
] | null | null | null | src/main/resources/google/cloud/compute/instance/compute_create_running.py | s-lal/xld-google-cloud-compute-plugin | 28389184a6df8bdee33893da6030499384a2b565 | [
"MIT"
] | null | null | null | src/main/resources/google/cloud/compute/instance/compute_create_running.py | s-lal/xld-google-cloud-compute-plugin | 28389184a6df8bdee33893da6030499384a2b565 | [
"MIT"
] | null | null | null | #
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from org.xebialabs.community.googlecloud import GoogleCloudCompute
googleCompute = GoogleCloudCompute(deployed.container.clientEmail, deployed.container.privateKey, deployed.container.projectId)
instanceName = deployed.instanceName if deployed.instanceName else deployed.name
zone = deployed.zone
print("Wait for a new instance {} in {} ...".format(instanceName, zone))
if not googleCompute.isOperationDone(deployed.operationSelfLink, zone):
result = "RETRY"
else:
instance = googleCompute.getInstanceByName(instanceName, zone)
print("instance is {0}".format(instance))
deployed.instanceId = str(instance.getSelfLink())
deployed.privateIp = instance.getNetworkInterfaces().get(0).getNetworkIP()
if instance.getNetworkInterfaces().get(0).getAccessConfigs():
deployed.publicIp = instance.getNetworkInterfaces().get(0).getAccessConfigs().get(0).getNatIP()
print("instance ID is {0}".format(deployed.instanceId))
print("private IP is {0}".format(deployed.privateIp))
print("public IP is {0}".format(deployed.publicIp))
| 68.741935 | 462 | 0.779916 | 279 | 2,131 | 5.956989 | 0.480287 | 0.052948 | 0.021661 | 0.057762 | 0.080626 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006529 | 0.137494 | 2,131 | 30 | 463 | 71.033333 | 0.897715 | 0.49038 | 0 | 0 | 0 | 0 | 0.10223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0.294118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3a6dfb6d53c3397fca6347f04d68006bd544731 | 2,468 | py | Python | figures/seal_delays.py | aditya-sengupta/sealrtc | cadc01d63bd07590b30decbe5fb57c79b3e68503 | [
"CC0-1.0"
] | null | null | null | figures/seal_delays.py | aditya-sengupta/sealrtc | cadc01d63bd07590b30decbe5fb57c79b3e68503 | [
"CC0-1.0"
] | null | null | null | figures/seal_delays.py | aditya-sengupta/sealrtc | cadc01d63bd07590b30decbe5fb57c79b3e68503 | [
"CC0-1.0"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
import sys
import re
from os.path import join
fs = 100
def stamp_to_seconds(t):
h, m, s, ms = [int(x) for x in re.search("(\d+):(\d+):(\d+),(\d+)", t).groups()]
return 3600 * h + 60 * m + s + 0.001 * ms
good_runs = ["07_02_14", "07_04_45", "08_11_19", "08_11_46"]
exposures = []
measures = []
dmcs = []
total_nframes = 0
for fname in good_runs:
with open(join("..", "data", "log", f"log_13_11_2021_{fname}.log")) as file:
final_frame = np.inf
for line in file:
time = re.search("\d+:\d+:\d+,\d+", line)[0]
seconds = stamp_to_seconds(time)
event = re.search("INFO \| (.+)", line)[1]
if not any([event.startswith(x) for x in ["Exposure", "Measurement", "DMC"]]):
continue
frame_num = re.search("\d+", event)
if frame_num:
frame_num = int(frame_num[0])
if event.startswith("Exposure"):
exposures.append(seconds)
elif event.startswith("Measurement"):
measures.append(seconds)
elif event.startswith("DMC"):
dmcs.append(seconds)
final_frame = frame_num
total_nframes += final_frame
exposures = exposures[:total_nframes]
measures = measures[:total_nframes]
dmcs = dmcs[:total_nframes]
t0 = exposures[0]
exposures = np.array(exposures) - t0
measures = np.array(measures) - t0
dmcs = np.array(dmcs) - t0
get_meanstd = lambda data: f"{round(np.mean(data), 3)} $\pm$ {round(np.std(data), 3)}"
measure_delays = (measures - exposures) * fs
control_delays = (dmcs - measures) * fs
total_delays = (dmcs - exposures) * fs
fig, axs = plt.subplots(1,3, figsize=(12,8))
def plot_delay_hist(data, i, xlabel, title):
bins = int(max(data) * 1000 / fs) + 1
axs[i].hist(data, bins=min(100, bins), range = (0, min(2, max(total_delays))))
axs[i].set_xlabel(xlabel)
axs[i].set_ylabel("Count")
axs[i].set_title(f"{title}: {get_meanstd(data)} frames")
for (i, (data, xlabel, title)) in enumerate([
(total_delays, "Exposure to DM command", "Overall AO loop"),
(measure_delays, "Measurement to DM command", "Measurement"),
(control_delays, "Exposure to measurement", "Controller")
]):
plot_delay_hist(data, i, xlabel, title)
fig.suptitle("Delays in the AO loop on SEAL, in #frames")
plt.savefig("seal_delays.pdf", bbox_inches='tight') | 35.768116 | 90 | 0.608185 | 351 | 2,468 | 4.136752 | 0.373219 | 0.008264 | 0.008264 | 0.009642 | 0.100551 | 0.056474 | 0.039945 | 0 | 0 | 0 | 0 | 0.037408 | 0.230956 | 2,468 | 69 | 91 | 35.768116 | 0.727608 | 0 | 0 | 0 | 0 | 0.016667 | 0.172945 | 0.028352 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.083333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3a78423413c586b43d5c6f2ba43237b6c4989d3 | 514 | py | Python | Competive Programming/CodeVitaV7/MockVita/problem_d.py | RitamDey/My-Simple-Programs | 147b455a6a40c371ec894ce979e8a61d242e03bd | [
"Unlicense"
] | 2 | 2016-10-14T16:58:05.000Z | 2017-05-04T04:59:18.000Z | Competive Programming/CodeVitaV7/MockVita/problem_d.py | GreenJoey/My-Simple-Programs | 147b455a6a40c371ec894ce979e8a61d242e03bd | [
"Unlicense"
] | null | null | null | Competive Programming/CodeVitaV7/MockVita/problem_d.py | GreenJoey/My-Simple-Programs | 147b455a6a40c371ec894ce979e8a61d242e03bd | [
"Unlicense"
] | null | null | null | def mul(arr):
res = arr[0]
for i in arr[1:]:
res *= i
return res
def gen_subarray(arr, n):
for i in range(1 << n):
y = 1
for j in range(n):
if i & (1 << j):
y *= arr[j]
yield y
def main():
n, p1, p2 = map(int, input().split(","))
arr = list(map(int, input().split(",")))
obj = gen_subarray(arr, n)
count = 0
for i in obj:
if i % (p1 * p2) == 0:
count += 1
print(count % 1009)
main()
| 15.117647 | 44 | 0.424125 | 79 | 514 | 2.734177 | 0.379747 | 0.055556 | 0.083333 | 0.064815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 0.40856 | 514 | 33 | 45 | 15.575758 | 0.657895 | 0 | 0 | 0 | 0 | 0 | 0.003891 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0 | 0 | 0.181818 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3ab8d6929c69624d061fbf0aa9d0caa473c282b | 9,205 | py | Python | prototypes/python_prototype/empiricalEstimation.py | Elbercaio/adaptive-engine | c51661ad64d95c7ed36508c90787521261038525 | [
"Apache-2.0"
] | null | null | null | prototypes/python_prototype/empiricalEstimation.py | Elbercaio/adaptive-engine | c51661ad64d95c7ed36508c90787521261038525 | [
"Apache-2.0"
] | null | null | null | prototypes/python_prototype/empiricalEstimation.py | Elbercaio/adaptive-engine | c51661ad64d95c7ed36508c90787521261038525 | [
"Apache-2.0"
] | null | null | null | ##Author: Ilia Rushkin, VPAL Research, Harvard University, Cambridge, MA, USA
######################################
####Estimation functions are below####
######################################
import numpy as np
def knowledge(self, problems, correctness):
"""
##This function finds the empirical knowledge of a single user given a
chronologically ordered sequence of items submitted.
"""
# global m_slip_neg_log, m_guess_neg_log, n_los
n_los = len(self.los)
m_slip_u = self.m_slip_neg_log[
problems,
]
m_guess_u = self.m_guess_neg_log[
problems,
]
N = len(problems)
#### In case there is only one problem, need to make sure the result of subsetting is a 1-row matrix, not a 1D array. Later, for dot product to work out.
m_slip_u = m_slip_u.reshape(N, n_los)
m_guess_u = m_guess_u.reshape(N, n_los)
####
z = np.zeros((N + 1, n_los))
x = np.repeat(0.0, N)
z[
0,
] = np.dot((1.0 - correctness), m_slip_u)
z[
N,
] = np.dot(correctness, m_guess_u)
if N > 1:
for n in range(1, N):
x[range(n)] = correctness[range(n)]
x[range(n, N)] = 1.0 - correctness[range(n, N)]
temp = np.vstack(
(
m_guess_u[
range(n),
],
m_slip_u[
n:,
],
)
)
z[
n,
] = np.dot(x, temp)
knowl = np.zeros((N, n_los))
for j in range(n_los):
ind = np.where(z[:, j] == min(z[:, j]))[0]
for i in ind:
temp = np.repeat(0.0, N)
if i == 0:
temp = np.repeat(1.0, N)
elif i < N:
temp[i:N] = 1.0
knowl[:, j] = knowl[:, j] + temp
knowl[:, j] = knowl[:, j] / len(
ind
) ##We average the knowledge when there are multiple candidates (length(ind)>1)
return knowl
# This function estimates the BKT model using empirical probabilities
##To account for the fact that NaN and Inf elements of the estimated matrices should not be used as updates, this function replaces such elements with the corresponding elements of the current BKT parameter matrices.
##Thus, the outputs of this function do not contain any non-numeric values and should be used to simply replace the current BKT parameter matrices.
def estimate(
self, relevance_threshold=0.01, information_threshold=20, remove_degeneracy=True
):
# global n_items,n_los, m_k, transactions, L_i, m_trans, m_guess, m_slip, self.epsilon, useForTraining, n_users
n_items = len(self.items)
n_los = len(self.los)
n_users = len(self.users)
trans = np.zeros((n_items, n_los))
trans_denom = trans.copy()
guess = trans.copy()
guess_denom = trans.copy()
slip = trans.copy()
slip_denom = trans.copy()
p_i = np.repeat(0.0, n_los)
p_i_denom = p_i.copy()
# if ~('training_set' in globals()):
training_set = range(n_users)
for u in training_set:
##List problems that the user tried, in chronological order
# n_of_na=np.count_nonzero(np.isnan(m_timestamp[u,]))
# problems=m_timestamp[u,].argsort() ##It is important here that argsort() puts NaNs at the end, so we remove them from there
# if n_of_na>0:
# problems=problems[:-n_of_na] ##These are indices of items submitted by user u, in chronological order.
#
# problems=np.intersect1d(problems, useForTraining)
temp = self.transactions.loc[
(self.transactions.user_id == u)
& (self.transactions.problem_id.isin(self.useForTraining))
]
temp = temp.sort_values("time")
temp.index = range(np.shape(temp)[0])
## Now temp is the data frame of submits of a particular user u, arranged in chronological order. In particular, temp.problem_id is the list of problems in chronological order.
J = np.shape(temp)[0]
if J > 0:
m_k_u = self.m_k[
temp.problem_id,
]
##Reshape for the case J=1, we still want m_k_u to be a 2D array, not 1D.
m_k_u = m_k_u.reshape(J, n_los)
# Calculate the sum of relevances of user's experience for a each learning objective
if J == 1:
u_R = m_k_u[0]
else:
u_R = np.sum(m_k_u, axis=0)
##Implement the relevance threshold: zero-out what is not above it, set the rest to 1
# u_R=u_R*(u_R>relevance_threshold)
# u_R[u_R>0]=1
u_R = u_R > relevance_threshold
# m_k_u=m_k_u*(m_k_u>relevance_threshold)
# m_k_u[m_k_u>0]=1
m_k_u = m_k_u > relevance_threshold
# u_correctness=m_correctness[u,problems]
# u_knowledge=knowledge(problems, u_correctness)
u_knowledge = knowledge(self, temp.problem_id, temp.score)
# Now prepare the matrix by replicating the correctness column for each LO.
# u_correctness=np.tile(u_correctness,(n_los,1)).transpose()
##Contribute to the averaged initial knowledge.
p_i += (
u_knowledge[
0,
]
* u_R
)
p_i_denom += u_R
##Contribute to the trans, guess and slip probabilities (numerators and denominators separately).
for pr in range(J):
prob_id = temp.problem_id[pr]
shorthand = m_k_u[pr,] * (
1.0
- u_knowledge[
pr,
]
)
guess[prob_id,] += (
shorthand * temp.score[pr]
)
guess_denom[
prob_id,
] += shorthand
shorthand = (
m_k_u[
pr,
]
- shorthand
) ##equals m_k_u*u_knowledge
slip[
prob_id,
] += shorthand * (1.0 - temp.score[pr])
slip_denom[
prob_id,
] += shorthand
if pr < (J - 1):
shorthand = m_k_u[pr,] * (
1.0
- u_knowledge[
pr,
]
)
trans[prob_id,] += (
shorthand
* u_knowledge[
pr + 1,
]
)
trans_denom[
prob_id,
] += shorthand
##Normalize the results over users.
ind = np.where(p_i_denom != 0)
p_i[ind] /= p_i_denom[ind]
ind = np.where(trans_denom != 0)
trans[ind] /= trans_denom[ind]
ind = np.where(guess_denom != 0)
guess[ind] /= guess_denom[ind]
ind = np.where(slip_denom != 0)
slip[ind] /= slip_denom[ind]
##Replace with nans where denominators are below information cutoff
p_i[(p_i_denom < information_threshold) | (p_i_denom == 0)] = np.nan
trans[(trans_denom < information_threshold) | (trans_denom == 0)] = np.nan
guess[(guess_denom < information_threshold) | (guess_denom == 0)] = np.nan
slip[(slip_denom < information_threshold) | (slip_denom == 0)] = np.nan
##Remove guess and slip probabilities of 0.5 and above (degeneracy):
if remove_degeneracy:
# these two lines will throw warnings for comparisons to np.nan's
ind_g = np.where((guess >= 0.5) | (guess + slip >= 1))
ind_s = np.where((slip >= 0.5) | (guess + slip >= 1))
guess[ind_g] = np.nan
slip[ind_s] = np.nan
# Convert to odds (logarithmic in case of p.i):
p_i = np.minimum(np.maximum(p_i, self.epsilon), 1.0 - self.epsilon)
trans = np.minimum(np.maximum(trans, self.epsilon), 1.0 - self.epsilon)
guess = np.minimum(np.maximum(guess, self.epsilon), 1.0 - self.epsilon)
slip = np.minimum(np.maximum(slip, self.epsilon), 1.0 - self.epsilon)
# L=np.log(p_i/(1-p_i))
L = p_i / (1.0 - p_i)
trans = trans / (1.0 - trans)
guess = guess / (1.0 - guess)
slip = slip / (1.0 - slip)
##Keep the versions with NAs in them:
L_i_nan = L.copy()
trans_nan = trans.copy()
guess_nan = guess.copy()
slip_nan = slip.copy()
ind = np.where(np.isnan(L) | np.isinf(L))
L[ind] = self.L_i[ind]
ind = np.where(np.isnan(trans) | np.isinf(trans))
trans[ind] = self.m_trans[ind]
ind = np.where(np.isnan(guess) | np.isinf(guess))
guess[ind] = self.m_guess[ind]
ind = np.where(np.isnan(slip) | np.isinf(slip))
slip[ind] = self.m_slip[ind]
return {
"L_i": L,
"trans": trans,
"guess": guess,
"slip": slip,
"L_i_nan": L_i_nan,
"trans_nan": trans_nan,
"guess_nan": guess_nan,
"slip_nan": slip_nan,
}
| 33.841912 | 216 | 0.5283 | 1,237 | 9,205 | 3.755861 | 0.199677 | 0.008179 | 0.010977 | 0.016789 | 0.124623 | 0.068661 | 0.028196 | 0.028196 | 0.012053 | 0.012053 | 0 | 0.014644 | 0.35459 | 9,205 | 271 | 217 | 33.96679 | 0.767379 | 0.29723 | 0 | 0.148352 | 0 | 0 | 0.008569 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010989 | false | 0 | 0.005495 | 0 | 0.027473 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3ad330362e01dc7f15821884e5a6bbe50194e31 | 953 | py | Python | scrap2rst/cmd.py | shimizukawa/scrap2rst | d6cab254d03f8e161f210e793083d8b26b44abd7 | [
"MIT"
] | 5 | 2019-06-29T03:17:09.000Z | 2021-06-19T20:41:04.000Z | scrap2rst/cmd.py | shimizukawa/scrap2rst | d6cab254d03f8e161f210e793083d8b26b44abd7 | [
"MIT"
] | 24 | 2019-08-14T23:07:43.000Z | 2019-09-30T04:59:42.000Z | scrap2rst/cmd.py | shimizukawa/scrap2rst | d6cab254d03f8e161f210e793083d8b26b44abd7 | [
"MIT"
] | null | null | null | """
command-line interface for scrap2rst
"""
import sys
import logging
import argparse
from scrap2rst import __version__
from scrap2rst.logging import setup_logger
from scrap2rst.converter import convert
logger = logging.getLogger(__name__)
def get_argparser():
p = argparse.ArgumentParser()
p.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)
p.add_argument('-d', '--debug', action='store_true', default=False, help='debug mode')
p.add_argument('url', metavar='URL', help='url to convert')
p.add_argument('-o', '--output', type=argparse.FileType(mode='w', encoding='utf-8'),
default=sys.stdout, help='output filename')
return p
def main():
args = get_argparser().parse_args()
setup_logger(args.debug)
output = convert(args.url)
if args.output:
args.output.write(output)
else:
print(output)
if __name__ == '__main__':
main()
| 24.435897 | 90 | 0.673662 | 120 | 953 | 5.1 | 0.475 | 0.026144 | 0.078431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006394 | 0.179433 | 953 | 38 | 91 | 25.078947 | 0.776215 | 0.037775 | 0 | 0 | 0 | 0 | 0.126513 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.24 | 0 | 0.36 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3af3f55df4b117b6e91dedf512a6d86c7573fca | 8,605 | py | Python | pricing/__init__.py | troylar/aws-pricing-importer | 6fabbea533e07e1fad1f3840a28e2a0fb435c888 | [
"MIT"
] | 3 | 2019-04-10T13:46:51.000Z | 2019-04-16T14:32:55.000Z | pricing/__init__.py | troylar/aws-pricing-to-athena | 6fabbea533e07e1fad1f3840a28e2a0fb435c888 | [
"MIT"
] | 1 | 2021-06-01T23:23:24.000Z | 2021-06-01T23:23:24.000Z | pricing/__init__.py | troylar/aws-pricing-importer | 6fabbea533e07e1fad1f3840a28e2a0fb435c888 | [
"MIT"
] | null | null | null | import json
import os
import os.path
import requests
import csv
import tempfile
from io import StringIO
import boto3
import time
import urllib3
create_table = """
CREATE EXTERNAL TABLE `[OFFER]`(
[COLUMNS]
[PARTITION]
ROW FORMAT SERDE
'org.apache.hadoop.hive.serde2.OpenCSVSerde'
STORED AS INPUTFORMAT
'org.apache.hadoop.mapred.TextInputFormat'
OUTPUTFORMAT
'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
LOCATION
's3://aws-price-magician-data/[OFFER]/'
TBLPROPERTIES (
'classification'='csv',
'columnsOrdered'='true',
'compressionType'='none',
'delimiter'=',',
'skip.header.line.count'='1',
'typeOfData'='file')
"""
class PriceManager:
def __init__(self, **kwargs):
urllib3.disable_warnings()
self.prefix = kwargs.get("Prefix", "https://pricing.us-east-1.amazonaws.com")
self.price_folder = kwargs.get("PriceFolder", "./price_files")
self.ddl_folder = kwargs.get("DDLFolder", "./ddl")
self.athena_database = kwargs.get("AthenaDatabase", "awspricedatabase")
self.bucket_name = kwargs.get("BucketName", "aws-price-magician-data")
self.price_url = kwargs.get('PriceUrl', 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/index.json')
self.client = boto3.client('athena')
self.glue_client = boto3.client('glue')
def load_partitions(self):
queries = []
files = os.listdir(self.ddl_folder)
while len(files) > 0:
for ddl_file in files[:3]:
offer = ddl_file.split('_')[3].split('.')[0]
r = self.client.start_query_execution(
QueryString = 'MSCK REPAIR TABLE {};'.format(offer.lower()),
QueryExecutionContext = {
'Database': self.athena_database
},
ResultConfiguration = {
'OutputLocation': 's3://{}/query_results'.format(self.bucket_name)
})
queries.append(r['QueryExecutionId'])
files.remove(ddl_file)
while len(queries) > 0:
r = self.client.batch_get_query_execution(QueryExecutionIds=queries[:50])
for query_exec in r['QueryExecutions']:
if query_exec['Status'] in ['QUEUED', 'RUNNING']:
continue
else:
print(query_exec['Status'])
queries.remove(query_exec['QueryExecutionId'])
print('Queries left: {}'.format(len(queries)))
time.sleep(2)
def create_tables(self):
queries = []
files = []
self.glue_client.create_database(
DatabaseInput={
'Name': self.athena_database
})
for ddl_file in os.listdir(self.ddl_folder):
files.append(ddl_file)
with open('{}/{}'.format(self.ddl_folder, ddl_file), 'r') as ddl:
query = ddl.read()
print('Starting execution for ' + ddl_file)
r = self.client.start_query_execution(
QueryString = query,
QueryExecutionContext = {
'Database': self.athena_database
},
ResultConfiguration = {
'OutputLocation': 's3://{}/query_results'.format(self.bucket_name)
})
queries.append(r['QueryExecutionId'])
while len(queries) > 0:
r = self.client.batch_get_query_execution(QueryExecutionIds=queries[:50])
for query_exec in r['QueryExecutions']:
if query_exec['Status'] in ['QUEUED', 'RUNNING']:
continue
else:
print(query_exec['Status'])
queries.remove(query_exec['QueryExecutionId'])
print('Queries left: {}'.format(len(queries)))
time.sleep(2)
def upload_files(self):
session = boto3.Session()
s3 = session.resource('s3')
bucket = s3.Bucket(self.bucket_name)
for subdir, dirs, files in os.walk(self.price_folder):
for file in files:
full_path = os.path.join(subdir, file)
with open(full_path, 'rb') as data:
print('Uploading {}'.format(full_path))
bucket.put_object(Key=full_path[len(self.price_folder)+1:], Body=data)
def get_master_price_list(self):
print('Downloading master price list')
data = requests.get(self.price_url, verify=False)
return json.loads(data.text)
def download_offer_file(self, path):
print('Downloading {}'.format(path))
r = requests.get(path, verify=False)
tmp = tempfile.NamedTemporaryFile(delete=False)
with open(tmp.name, 'w') as tmp_file:
tmp_file.write('\n'.join(r.content.decode().split('\n')[5:]))
s = StringIO(r.content.decode())
rows = csv.reader(s)
version = ""
for row in rows:
if row[0] == 'Version':
version = row[1]
break
return tmp.name, version
def get_row_location(self, row):
has_location = 'Location' in dict(row).keys()
if has_location and row['Location']:
loc = row['Location']
else:
loc = "location-agnostic"
return has_location, loc
def make_folders(self, folders):
for f in folders:
if not os.path.exists(f):
os.makedirs(f)
def download_prices(self):
values = self.get_master_price_list()
writers = {}
files = {}
region_services = {}
for offer in list(values["offers"].keys()):
o = values["offers"][offer]
path = "{0}{1}".format(self.prefix,
o["currentVersionUrl"].replace('json',
'csv'))
offer_file, version = self.download_offer_file(path)
with open(offer_file, 'rt') as csvfile:
rows = csv.DictReader(csvfile)
columns = []
ddl_column_text = ""
for row in rows:
has_location, loc = self.get_row_location(row)
if loc not in region_services:
region_services[loc] = []
if offer not in region_services[loc]:
region_services[loc].append(offer)
loc_folder = "{}/{}/location={}".format(self.price_folder, offer, loc)
self.make_folders([self.ddl_folder, loc_folder])
offer_loc = offer + loc
offer_file = "{}/{}.csv".format(loc_folder, offer)
if offer_loc not in files.keys():
files[offer_loc] = open(offer_file, 'w') # noqa: E501
writers[offer_loc] = csv.writer(files[offer_loc],
quotechar='"',
quoting=csv.QUOTE_ALL)
if not columns:
columns = row.keys()
for col in columns:
# Athena will throw a 'duplicate column' error if we add the partition column
if has_location and col == 'Location':
continue
ddl_column_text = '{} `{}` string,\n'.format(ddl_column_text, col)
# strip off the trailing comma
ddl_column_text = ddl_column_text[:-2] + ")"
# write the header row
writers[offer_loc].writerow(dict(row))
writers[offer_loc].writerow(dict(row).values())
create_ddl = create_table.replace('[COLUMNS]', ddl_column_text).replace('[OFFER]', offer)
if has_location:
create_ddl = create_ddl.replace('[PARTITION]', 'PARTITIONED BY (\n `location` string)\n')
else:
create_ddl = create_ddl.replace('[PARTITION]', '\n')
with open('{}/create_table_ddl_{}.sql'.format(self.ddl_folder, offer),'w') as f:
f.write(create_ddl)
print('Offer {} processed'.format(offer))
for key in files.keys():
del writers[key]
files = {}
| 40.399061 | 117 | 0.525276 | 879 | 8,605 | 4.990899 | 0.25711 | 0.018236 | 0.01778 | 0.008206 | 0.253248 | 0.243219 | 0.227718 | 0.180533 | 0.180533 | 0.180533 | 0 | 0.007223 | 0.356421 | 8,605 | 212 | 118 | 40.589623 | 0.78494 | 0.015805 | 0 | 0.235294 | 0 | 0.005348 | 0.16198 | 0.044542 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048128 | false | 0 | 0.053476 | 0 | 0.122995 | 0.048128 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3b008c8303bfbb30f36a958105a103791dbc6b7 | 1,920 | py | Python | ensembling.py | Masdevallia/3rd-place-Kaggle-SIIM-ISIC-Melanoma-Classification | 69c9481c4f00961c52c1e4d580666b286deba6cf | [
"MIT"
] | 14 | 2020-09-08T08:10:56.000Z | 2022-03-16T05:47:46.000Z | ensembling.py | Masdevallia/3rd-place-Kaggle-SIIM-ISIC-Melanoma-Classification | 69c9481c4f00961c52c1e4d580666b286deba6cf | [
"MIT"
] | null | null | null | ensembling.py | Masdevallia/3rd-place-Kaggle-SIIM-ISIC-Melanoma-Classification | 69c9481c4f00961c52c1e4d580666b286deba6cf | [
"MIT"
] | 8 | 2020-09-09T14:12:50.000Z | 2021-09-11T16:27:39.000Z |
import sys
import argparse
import pandas as pd
import numpy as np
import os
def get_settings():
parser = argparse.ArgumentParser(description='Script to ensemble submissions.')
parser.add_argument('--include_external', help='whether to include external data (1) or not (0).',
default=0, type = int)
parser.add_argument('--metadata_weight', help='Weight assigned to metadata (0-1).',
default=0.2, type = float)
parser.add_argument('--ensemble_filename', help='The desired name for the ensemble file.',
default='ensemble', type = str)
args = parser.parse_args()
return args
def main():
config = get_settings()
include_external = config.include_external
metadata_weight = config.metadata_weight
ensemble_filename = config.ensemble_filename
# Image submissions
image_sub_files = os.listdir('./submissions/image_data')
image_sub_list = [pd.read_csv(f'./submissions/image_data/{e}') for e in image_sub_files]
if include_external == 1:
ext_image_sub_files = os.listdir('./submissions/external_image_data')
ext_image_sub_list = [pd.read_csv(f'./submissions/external_image_data/{e}') for e in ext_image_sub_files]
image_sub_list = image_sub_list + ext_image_sub_list
target_images = np.mean([e.target for e in image_sub_list], axis=0)
# Metadata submission
metadata_sub_files = os.listdir('./submissions/metadata')
sub_metadata = pd.read_csv(f'./submissions/metadata/{metadata_sub_files[0]}')
# Image + Metadata
target = ((1-metadata_weight) * target_images) + (metadata_weight * sub_metadata.target)
# Save ensemble
sub_ensemble = sub_metadata.copy()
sub_ensemble.target = target
sub_ensemble.to_csv(f'./ensemble/{ensemble_filename}.csv', index=False)
print(f'File {ensemble_filename}.csv saved in ./ensemble.')
if __name__=='__main__':
main()
| 35.555556 | 113 | 0.70625 | 258 | 1,920 | 4.968992 | 0.282946 | 0.062403 | 0.056162 | 0.039782 | 0.183307 | 0.127925 | 0.051482 | 0.051482 | 0 | 0 | 0 | 0.006975 | 0.178646 | 1,920 | 53 | 114 | 36.226415 | 0.805961 | 0.035417 | 0 | 0 | 0 | 0 | 0.268147 | 0.133803 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.138889 | 0 | 0.222222 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3b57eaa438b0e0ff2f40a515ff88ec6d01bf26c | 5,202 | py | Python | roots/format.py | TomRegan/roots_legacy | cf145d80b526b7142f869eb5d869a27c3d7bb1eb | [
"Apache-2.0"
] | null | null | null | roots/format.py | TomRegan/roots_legacy | cf145d80b526b7142f869eb5d869a27c3d7bb1eb | [
"Apache-2.0"
] | null | null | null | roots/format.py | TomRegan/roots_legacy | cf145d80b526b7142f869eb5d869a27c3d7bb1eb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 Tom Regan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""e-book formats
"""
import zipfile
import xml.etree.ElementTree as ET
import re
from urlparse import urljoin
from urllib import pathname2url as to_url
from hashlib import sha1
from HTMLParser import HTMLParser
class BaseFormat(object):
def load(self, srcpath):
"""Return a dict representing an e-book.
"""
return {}
def _author(self, string):
"""Return the normalised author name.
"""
if string is None or len(string) == 0:
return None
names = sorted({x.strip() for x in string.split(';')})
authors = self._reverse_csv_list(names)
init, last = authors[:-1], authors[-1]
if len(init) != 0:
return ', '.join(init) + ' and ' + last
return last
def _reverse_csv_list(self, list):
"""If elements in a list are comma separated, and the comma
is removed they are reversed, otherwise they are unchanged.
['b, a', 'c d'] -> ['a b', 'c d']
"""
return [' '.join([first.strip(), last.strip()])
if first is not None else last for (last, first) in
[name.split(',') if len(name.split(',')) == 2 else (name, None)
for name in list]]
def _isbn(self, number):
"""Return an ISBN given a (possibly malformed) string.
"""
if number is None or len(number) == 0:
return
number = number.replace('-', '')
expr = (r'^[^\d]*('
r'(97[8|9])?' # ean, excluded if ISBN-10
r'\d{2}' # group
r'\d{4}' # registrant
r'\d{3}' # publication
r'[\d|xX]' # check
r')[^\d]*$')
matches = re.search(expr, number)
return matches is not None and matches.group(1) or None
def _search(self, element, tag_name, attribute=None):
if element is None or tag_name is None:
return
elements = [e for e in element.iter() if e.tag.endswith(tag_name)]
if elements is None or len(elements) < 1:
return
elif attribute is not None:
for element in elements:
for attr in element.items():
if attr[1].lower() == attribute.lower():
return self._unescape(element.text)
return self._unescape(elements[0].text or elements[0])
def _unescape(self, string):
return HTMLParser().unescape(string)
class EpubFormat(BaseFormat):
def __init__(self, configuration):
self._configuration = configuration
def load(self, srcpath):
"""Reads the metadata from an ebook file.
"""
content_xml = self._load_metadata(srcpath)
if content_xml is not None:
book = self._load_ops_data(content_xml)
if self._configuration['import']['hash']:
with open(srcpath, 'rb') as zipfile:
book['_sha_hash'] = sha1(zipfile.read()).hexdigest()
return book
def _load_metadata(self, epub_filename):
"""Reads an epub file and returns its OPS / OEBPS blob.
"""
if not zipfile.is_zipfile(epub_filename):
raise Exception("Not importing %s because it is not a .epub file.",
epub_filename.replace("./", ""))
with zipfile.ZipFile(epub_filename, 'r') as epub_file:
meta_data = None
try:
meta_data = epub_file.read("META-INF/container.xml")
except Exception:
raise Exception("Could not locate a container file in %s.",
epub_filename)
meta_xml = ET.fromstring(meta_data)
full_path = self._search(meta_xml, "rootfile")
if full_path is None:
raise Exception("Could not locate a metadata file in %s.",
epub_filename)
return ET.fromstring(epub_file.read(full_path.attrib["full-path"]))
def _load_ops_data(self, xml_data):
"""Constructs a dictionary from OPS XML data.
"""
title = self._search(xml_data, 'title')
author = self._author(self._search(xml_data, 'creator'))
isbn = self._isbn(self._search(xml_data, 'identifier', 'isbn'))
if isbn is None:
isbn = ''
if author is None or title is None:
raise Exception("Required metadata is missing.")
return {
'title': title,
'author': author,
'isbn': isbn
}
| 36.377622 | 79 | 0.572472 | 650 | 5,202 | 4.481538 | 0.321538 | 0.018538 | 0.013732 | 0.011329 | 0.032956 | 0.019911 | 0 | 0 | 0 | 0 | 0 | 0.009014 | 0.31757 | 5,202 | 142 | 80 | 36.633803 | 0.811549 | 0.217032 | 0 | 0.076087 | 0 | 0 | 0.080261 | 0.005518 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.097826 | 0.01087 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3b58ba311536941e44b47fb58812f09838e1ac8 | 11,567 | py | Python | frost/client.py | Sigvesor/frost-client | 3c0cf2af27d1ab471ce06331b7b0e6cc87f421e6 | [
"MIT"
] | 7 | 2019-02-17T05:00:33.000Z | 2020-11-10T13:13:26.000Z | frost/client.py | Sigvesor/frost-client | 3c0cf2af27d1ab471ce06331b7b0e6cc87f421e6 | [
"MIT"
] | 2 | 2020-03-24T18:23:18.000Z | 2020-03-31T10:22:02.000Z | frost/client.py | Sigvesor/frost-client | 3c0cf2af27d1ab471ce06331b7b0e6cc87f421e6 | [
"MIT"
] | 1 | 2020-04-03T23:53:30.000Z | 2020-04-03T23:53:30.000Z | import os
from urllib.parse import urljoin
import requests
from requests.auth import HTTPBasicAuth
from .models import SourcesResponse
from .models import AvailableTimeSeriesResponse
from .models import ObservationsResponse
FROST_API_KEY = os.environ.get('FROST_API_KEY', None)
class APIError(Exception):
""" Raised when the API responds with a 400 og 404 """
def __init__(self, e):
self.code = e['code']
class Frost(object):
"""Interface to frost.met.no API
The Frost API key should be exposed as a environment variable called
`FROST_API_KEY`
or passed as a username parameter when creating and instance of the class.
>>> frost = Frost(username="myapikey")
"""
def __init__(self, username=None):
"""
:param str username: your own frost.met.no username/key.
"""
self.base_url = 'https://frost.met.no/'
self.api_version = 'v0'
self.session = requests.Session()
self.username = username or FROST_API_KEY
if not self.username:
raise Exception(
"""
You must provide a username parameter
or set the FROST_API_KEY environment variable to
use the Frost class
""")
self.session.auth = (self.username, '')
def let_it_go(self):
return """
Let it go, let it go
Can't hold it back anymore
Let it go, let it go
Turn away and slam the door
I don't care what they're going to say
Let the storm rage on
The cold never bothered me anyway
"""
def stringify_kwargs(self, kwargs):
for key, value in kwargs.items():
if type(kwargs[key]) == list:
kwargs[key] = ",".join(value)
return kwargs
def make_request(self, method, **kwargs):
"""
Make an API request, with all kwargs passed through as URL params
"""
url = urljoin(self.base_url, method + '/' +
self.api_version + '.jsonld')
response = self.session.get(
url,
params=kwargs, timeout=60)
if response.status_code < 200 or response.status_code > 500:
response.raise_for_status()
json = response.json()
if 'data' in json:
return json['data']
if 'error' in json:
raise APIError(json['error'])
return json
def get_sources(self, **kwargs):
"""Get metadata for the source entitites defined in the Frost API.
Use the query parameters to filter the set of sources returned.
:param str ids: The Frost API source ID(s) that you want metadata for.
Enter a list or Python list to select multiple sources.
:param str types: The type of Frost API source that
you want metadata for.
:param str geometry: Get Frost API sources defined by a
specified geometry.
Geometries are specified as either nearest(POINT(...)) or
POLYGON(...)
:param str nearestmaxcount: The maximum number of sources returned when
using nearest(POINT(...)) for geometry. The default value is 1.
:param str validtime: If specified, only sources that have been,
or still are, valid/applicable during some part of this interval
may be included in the result. Specify date/date;, date/now,
dategt or now, where dategt is of the form YYYY-MM-DD,
e.g. 2017-03-06.
The default is 'now', i.e. only currently valid/applicable
sources are included.
:param str name: If specified, only sources whose 'name' attribute
matches this
:param str country: If specified, only sources whose 'country'
or 'countryCode' attribute matches this
:param str county: If specified, only sources whose 'county'
or 'countyId' attribute matches this .
:param str municipality: If specified, only sources whose
'municipality' or 'municipalityId' attribute matches this
:param str wmoid: If specified, only sources whose 'wmoId'
attribute matches this
:param str stationholder: If specified, only sources whose
'stationHolders' attribute contains at least one name that
matches this
:param str externalid: If specified, only sources whose 'externalIds'
attribute contains at least one name that matches this
:param str fields: A list of the fields that should be
present in the response.
:returns: :meth:`SourcesResponse`
:raises APIError: raises exception if error in the returned data or
not found.
:examples:
>>> f = Frost()
>>> f.get_sources(county='12')
"""
kwargs = self.stringify_kwargs(kwargs)
res = self.make_request('sources',
**kwargs
)
return SourcesResponse(res)
def get_available_timeseries(self, include_sourcemeta=False, **kwargs):
"""Find timeseries metadata by source and/or element
:param bool include_sourcemeta: If True will return a tuple with time
series and source meta.
:param list/str sources: The ID(s) of the data sources to get time
series for
:param str referencetime: The time range to get time series for as
extended ISO-8601 format.
:param list/str elements: The elements to get time series for as a
list of Element ids.
:param list/str timeoffsets: The time offsets to get time series for
as a list of
ISO-8601 periods, e.g. 'PT6H,PT18H'. If left out,
the output is not filtered on time offset.
:param list/str timeresolutions: The time resolutions to get time @
series for as a list of
ISO-8601 periods, e.g. 'PT6H,PT18H'. If left out,
the output is not filtered on time resolution.
:param str timeseriesids: The internal time series IDs to get time @
series for as a
list of integers, e.g. '0,1'. If left out,
the output is not filtered on internal time series ID.
:param str performancecategories: The performance categories to get
time series for as a
list of letters, e.g. 'A,C'. If left out,
the output is not filtered on performance category.
:param str exposurecategories: The exposure categories to get time
series for as a
list of integers, e.g. '1,2'.
If left out, the output is not filtered on exposure category.
:param str levels: The sensor levels to get observations for as a
list of
numbers, e.g. '0.1,2,10,20'. If left out, the output is not
filtered on sensor level.
:param str level_types: The sensor level types to get records for as a
list of search filters
:param str level_units: The sensor level units to get records for as a
list of search filters
:param str fields: Fields to include in the output as a list.
If specified, only these fields are included in the output.
If left out, all fields are included.
:returns: :meth:`AvailableTimeSeriesResponse`
:raises APIError: raises exception if error in the returned data or
not found.
"""
kwargs = self.stringify_kwargs(kwargs)
res = self.make_request('observations/availableTimeSeries',
**kwargs
)
sources = None
if include_sourcemeta:
source_ids = list(set([s["sourceId"].split(':')[0] for s in res]))
sources = self.get_sources(ids=source_ids)
return AvailableTimeSeriesResponse(res, sources=sources)
def get_observations(self, include_sourcemeta=False, **kwargs):
"""Get observation data from the Frost API.
:param bool include_sourcemeta: If True will return a tuple
with time series and source meta.
:param list/str sources: The ID(s) of the data sources to get
observations for as a list of Frost API station
IDs, e.g. _SN18700_ for Blindern.
:param str referencetime: The time range to get observations
for in either
extended ISO-8601 format or the single word 'latest'.
:param list/str elements: The elements to get observations for as a
list of names that follow the Frost API naming convention.
:param str format: The output format of the result. (required)
:param str maxage: The maximum observation age as an ISO-8601 period,
like 'P1D'. Applicable only when referencetime=latest. In general,
the lower the value of maxage, the shorter the request will take
to complete. The default value is 'PT3H'.
:param str limit: The maximum number of observation times to be
returned for each source/element combination, counting from the
most recent time. Applicable only when referencetime=latest.
Specify either 'all' to get all available times, or a positive
integer. The default value is 1.
:param list/str timeoffsets: The time offsets to get observations
for as a list of ISO-8601 periods, e.g. 'PT6H,PT18H'.
If left out, the output is not filtered on time offset.
:param list/str timeresolutions: The time resolutions to
get observations for as a list of ISO-8601 periods, e.g.
'PT6H,PT18H'. If left out, the output is not filtered on time
resolution.
:param str timeseriesids: The internal time series IDs to get
observations for as a list of integers, e.g. '0,1'.
If left out, the output is not filtered on internal time series ID.
:param str performancecategories: The performance categories to
get observations for as a list of letters, e.g. 'A,C'.
Enter a list to specify multiple performance categories.
If left out, the output is not filtered on performance category.
:param str exposurecategories: The exposure categories to
get observations for as a list of integers, e.g. '1,2'.
If left out, the output is not filtered on exposure category.
:param str levels: The sensor levels to get observations for as a
list of numbers, e.g. '0.1,2,10,20'.
If left out, the output is not filtered on sensor level.
:param str fields: Fields to include in the output as a
list. If specified, only these fields are included in the output.
If left out, all fields are included.
:returns: :meth:`ObservationsResponse`
:raises APIError: raises exception if error in the returned data or
not found.
"""
kwargs = self.stringify_kwargs(kwargs)
res = self.make_request('observations',
**kwargs
)
sources = None
if include_sourcemeta:
source_ids = list(set([s["sourceId"].split(':')[0] for s in res]))
sources = self.get_sources(ids=source_ids)
return ObservationsResponse(res, sources=sources)
| 42.369963 | 79 | 0.61736 | 1,517 | 11,567 | 4.669743 | 0.208965 | 0.035008 | 0.018775 | 0.023998 | 0.529927 | 0.455957 | 0.444099 | 0.444099 | 0.417137 | 0.394269 | 0 | 0.012416 | 0.317628 | 11,567 | 272 | 80 | 42.525735 | 0.885088 | 0.619348 | 0 | 0.210526 | 0 | 0 | 0.128055 | 0.010427 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.092105 | 0.013158 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3b5c82cda0e55aa0b6ba4cf5db98b3355aacc13 | 3,411 | py | Python | llvd/downloader.py | rahil-sh/llvd | e0e55db66b0a7a828f6040cbac8679c304e1c5db | [
"MIT"
] | 4 | 2022-01-03T08:19:08.000Z | 2022-02-04T16:51:39.000Z | llvd/downloader.py | rahil-sh/llvd | e0e55db66b0a7a828f6040cbac8679c304e1c5db | [
"MIT"
] | null | null | null | llvd/downloader.py | rahil-sh/llvd | e0e55db66b0a7a828f6040cbac8679c304e1c5db | [
"MIT"
] | 1 | 2022-01-03T08:19:09.000Z | 2022-01-03T08:19:09.000Z |
from tqdm import tqdm
from itertools import starmap
import requests
import time
import click
import re
import os
from llvd.utils import clean_name, subtitles_time_format, throttle
def download_video(url, index, filename, path, delay=None):
"""
Downloads a video and saves it by its name plus index for easy sorting
"""
if delay:
throttle( delay );
maximum_retries = 5
with open(f'{path}/{index:0=2d}-{clean_name(filename)}.mp4', 'wb') as f:
download_size = 0
while maximum_retries > 0:
requests.adapters.HTTPAdapter(max_retries=maximum_retries)
response = requests.get(
url, stream=True, headers={'Accept-Encoding': None, 'Content-Encoding': 'gzip'})
download_size = response.headers.get('content-length')
if download_size is None and maximum_retries > 0:
maximum_retries -= 1
else:
break
pbar = tqdm(
total=int(download_size),
initial=0,
unit='B',
unit_scale=True,
position=0,
leave=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.set_description("Downloading video... ")
pbar.update(len(chunk))
pbar.close()
def download_subtitles(index, subs, video_name, path, video_duration):
"""
Writes to a file(subtitle file) caption matching the right time
"""
def subs_to_lines(idx, sub):
starts_at = sub['transcriptStartAt']
ends_at = subs[idx]['transcriptStartAt'] if idx < len(
subs) else video_duration
caption = sub['caption']
return f"{idx}\n" \
f"{subtitles_time_format(starts_at)} --> {subtitles_time_format(ends_at)}\n" \
f"{caption}\n\n"
with open(f"{path}/{index:0=2d}-{clean_name(video_name).strip()}.srt", 'wb') as f:
click.echo(f"Downloading subtitles..")
for line in starmap(subs_to_lines, enumerate(subs, start=1)):
f.write(line.encode('utf8'))
def download_exercises(links, path):
"""
Downloads exercises
"""
for link in links:
maximum_retries = 3
filename = re.split("exercises/(.+).zip", link)[1]
with open(f"{path}/{clean_name(filename)}.zip", 'wb') as f:
download_size = 0
while maximum_retries > 0:
requests.adapters.HTTPAdapter(max_retries=maximum_retries)
response = requests.get(
link, stream=True, headers={'Accept-Encoding': None, 'Content-Encoding': 'gzip'})
download_size = response.headers.get('content-length')
if download_size is None and maximum_retries > 0:
maximum_retries -= 1
else:
break
pbar = tqdm(
total=int(download_size),
initial=0,
unit='B',
unit_scale=True,
position=0,
leave=True)
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.set_description("Downloading exercise files... ")
pbar.update(len(chunk))
pbar.close()
print("\n")
| 35.164948 | 101 | 0.562005 | 390 | 3,411 | 4.779487 | 0.317949 | 0.075107 | 0.032189 | 0.020923 | 0.520386 | 0.520386 | 0.491416 | 0.491416 | 0.491416 | 0.459227 | 0 | 0.013095 | 0.328349 | 3,411 | 96 | 102 | 35.53125 | 0.800524 | 0.045148 | 0 | 0.538462 | 0 | 0 | 0.14772 | 0.063398 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.102564 | 0 | 0.166667 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3b8ce5fd606e50e10f8ca74fe1352d18e7c8eec | 3,938 | py | Python | drivers/plot_Porb_Prot.py | lgbouma/earhart | 1d2b65d58655725f43c1bf9705b897bf767d4ca1 | [
"MIT"
] | null | null | null | drivers/plot_Porb_Prot.py | lgbouma/earhart | 1d2b65d58655725f43c1bf9705b897bf767d4ca1 | [
"MIT"
] | null | null | null | drivers/plot_Porb_Prot.py | lgbouma/earhart | 1d2b65d58655725f43c1bf9705b897bf767d4ca1 | [
"MIT"
] | null | null | null | """
The stellar spin periods and planetary orbital periods originally collected by
Penev et al (2018) are shown in Figure~\ref{fig:Pspin_vs_Porb}. We only show
hot Jupiter systems with spin period S/N ratios of at least 5, and have colored
the hot Jupiters by whether their stellar radius is below or above
$1.2R_\odot$. For a dwarf star, this radius corresponds to effective
temperatures above and below roughly 6000$\,$K, roughly at the F9V-G0V
boundary, and slightly below the Kraft break where the stellar spindown becomes
particularly inefficient. This control between early and late spectral types
was apparently not needed, as four of the five other hot Jupiter host stars
with the shortest rotation periods have radii below $1.2R_\odot$ (CoRoT-18,
CoRoT-2, HAT-P-20, and HAT-P-23). Similarly, the other hot Jupiters with
orbital periods below 1 day (HATS-18, WASP-19, and WASP-43) all also orbit G or
K dwarf hosts.
"""
import numpy as np, matplotlib.pyplot as plt
from astropy.io import fits
from aesthetic.plot import set_style, savefig
from scipy.stats import linregress
hdulist = fits.open('../data/Penev_2018_table1_vizier.fits')
d = hdulist[1].data
hdulist.close()
# 'ID', 'Per', 'E_Per', 'e_per_lc', 'SPer', 'E_SPer', 'e_sper_lc', 'M_', 'E_M_',
# 'e_m__lc', 'R_', 'E_R_', 'e_r__lc', 'Mp', 'E_Mp', 'e_mp_lc', 'Rp', 'E_Rp',
# 'e_rp_lc', 'l_logQ', 'logQ', 'E_logQ', 'e_logq_lc',
def plot_Porb_Prot(snr_cutoff=2, includefit=False):
sel = (
((d['SPer'] / d['E_SPer']) > snr_cutoff)
&
(d['SPer'] < 30)
)
sel0 = (
((d['SPer'] / d['E_SPer']) > snr_cutoff)
&
(d['R_'] < 1.2)
&
(d['SPer'] < 30)
)
sel1 = (
((d['SPer'] / d['E_SPer']) > snr_cutoff)
&
(d['R_'] >= 1.2)
&
(d['SPer'] < 30)
)
sel2 = (
((d['SPer'] / d['E_SPer']) > snr_cutoff)
&
(d['R_'] < 1.2)
&
(d['SPer'] < 8)
)
sel3 = (
((d['SPer'] / d['E_SPer']) > snr_cutoff)
&
(d['R_'] < 1.2)
&
(d['Per'] < 1)
)
print(d['ID'][sel2])
print(d['ID'][sel3])
#
# make the plot!
#
set_style()
f, ax = plt.subplots(figsize=(4,3))
Porb_1937 = 0.947
Prot_1937 = 6.5
ax.scatter(
d['Per'][sel0], d['SPer'][sel0], zorder=2, c='k', alpha=0.9, s=9,
linewidths=0,
label='HJs ($\mathrm{R}_{\star} < 1.2 \mathrm{R}_{\odot}$)'
)
ax.scatter(
d['Per'][sel1], d['SPer'][sel1], zorder=2, c='gray', alpha=0.9, s=9,
linewidths=0,
label='HJs ($\mathrm{R}_{\star} \geq 1.2 \mathrm{R}_{\odot}$)'
)
ax.plot(
Porb_1937, Prot_1937, mew=0.5, zorder=3,
markerfacecolor='yellow', markersize=18, marker='*',
color='k', lw=0, label='TOI 1937b (1.1$\mathrm{R}_{\odot}$)'
)
if includefit:
x = np.hstack([d['Per'][sel0], Porb_1937])
y = np.hstack([d['SPer'][sel0], Prot_1937])
slope, intercept, rvalue, pvalue, stderr = linregress(x, y)
label = f'Slope={slope:.1f}$\pm${stderr:.1f}, p={pvalue:.1e}'
ax.plot(x, intercept + slope*x, label=label)
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), handletextpad=0.1)
ax.set_xlabel('Planet orbital period [days]')
ax.set_ylabel('Stellar spin period [days]')
ax.set_xlim([0.5, 4.2])
ax.set_ylim([2, 28])
extrastr = '_withfit' if includefit else '_nofit'
figpath = (
f'../results/Porb_Prot/Porb_Prot_snrcut{snr_cutoff}{extrastr}.png'
)
savefig(f, figpath)
if __name__ == "__main__":
for includefit in [0, 1]:
for snr_cutoff in [1,2,3,4,5]:
plot_Porb_Prot(snr_cutoff=snr_cutoff, includefit=includefit)
| 30.061069 | 80 | 0.588624 | 607 | 3,938 | 3.667216 | 0.395387 | 0.026954 | 0.013477 | 0.015723 | 0.128931 | 0.110063 | 0.096586 | 0.096586 | 0.087152 | 0.087152 | 0 | 0.050201 | 0.241239 | 3,938 | 130 | 81 | 30.292308 | 0.694779 | 0.308532 | 0 | 0.178571 | 0 | 0.02381 | 0.181751 | 0.059106 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011905 | false | 0 | 0.047619 | 0 | 0.059524 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3b8e9007add6621a63ae003963d1b0398741980 | 4,918 | py | Python | conjugate_gradient_method.py | Kedow132/extremum_search_methods | 16f83a3979a9f1b3f738627f466f97ec72b2031d | [
"BSD-3-Clause"
] | null | null | null | conjugate_gradient_method.py | Kedow132/extremum_search_methods | 16f83a3979a9f1b3f738627f466f97ec72b2031d | [
"BSD-3-Clause"
] | null | null | null | conjugate_gradient_method.py | Kedow132/extremum_search_methods | 16f83a3979a9f1b3f738627f466f97ec72b2031d | [
"BSD-3-Clause"
] | null | null | null | from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
from prettytable import PrettyTable
headers = PrettyTable(
['№', 'x1_1', 'x2_1', 'x1_2', 'x2_2', 'function(x1,x2)', 'df'])
def table(count, old_function, x1_1, x2_1, x1_2, x2_2, new_function):
Tablelist = {
'№': count,
'x1_1': round(x1_1, 7),
'x2_1': round(x2_1, 7),
'x1_2': round(x1_2, 7),
'x2_2': round(x2_2, 7),
'function(x1,x2)': round(new_function, 8),
'df': abs(round(new_function - old_function, 8)),
}
headers.add_row(Tablelist.values())
def output(x1, x2, count, eps_y):
return (f'Число шагов = {count}\nx1 = {x1}, x2 = {x2}\n'
f'function(x1,x2) = {function(x1, x2)}\neps_y = {eps_y}')
def function(x1, x2):
#return 10 * x1 * x1 + 2 * x2 * x2 - 2 * x1 - 2 * x2 + 1 - 4 * x1 * x2
return 22 * x1 + 0.1 * x2 + np.exp(4.84 * x1 * x1 + 1.2 * x2 * x2)
def grad_function(x1, x2, delta):
def derivative(x1, x2, delta_x1, delta_x2):
der = ((function(x1 + delta_x1, x2 + delta_x2) - function(
x1 - delta_x1, x2 - delta_x2)) / (
2 * delta))
return der
gradient = (
[-1 * derivative(x1, x2, delta, 0), -1 * derivative(x1, x2, 0, delta)])
return gradient
def gss_1(a, b, gradient, x1, x2, eps, s):
interval = (b - a)
a1 = a + interval * (1 - s)
b1 = a + interval * s
fa1 = function(x1 + a1 * gradient[0], x2 + a1 * gradient[1])
fb1 = function(x1 + b1 * gradient[0], x2 + b1 * gradient[1])
while abs(interval) >= eps:
if fa1 <= fb1: # <= - минимум, >= - максимум
b = b1
b1 = a1
fb1 = fa1
interval = interval * s
a1 = a + interval * (1 - s)
fa1 = function(x1 + a1 * gradient[0], x2 + a1 * gradient[1])
else:
a = a1
a1 = b1
fa1 = fb1
interval = interval * s
b1 = a + interval * s
fb1 = function(x1 + b1 * gradient[0], x2 + b1 * gradient[1])
L = (a + b) / 2
return L
def grad_move(old_x1, old_x2, lam, gradient):
x1 = old_x1 + lam * gradient[0]
x2 = old_x2 + lam * gradient[1]
old_gradient = gradient
gradient = grad_function(x1, x2, delta)
new_function = function(x1, x2)
return [new_function, x1, x2, gradient, old_gradient]
def CH(grad0, grad1):
new_grad = np.array(grad1)
old_grad = np.array(grad0)
khi = (np.transpose(new_grad).dot(new_grad - old_grad)) / (
np.transpose(old_grad).dot(old_grad))
return khi
def s_1(old_gradient, new_gradient, chi):
sx1 = new_gradient[0] + chi * old_gradient[0]
sx2 = new_gradient[1] + chi * old_gradient[1]
s = [sx1, sx2]
return s
def conj_grad(x1, x2, delta):
points_x = [x1]
points_y = [x2]
func = [function(x1, x2)]
count = 0
new_function = function(x1, x2)
old_function = new_function + 100
eps_y = 0.000001
a, b = 0, 1
eps = (1 - a) / 100000
x1_0, x2_0 = x1, x2
gradient = grad_function(x1_0, x2_0, delta)
while abs(new_function - old_function) > eps_y:
count += 1
lam = gss_1(a, b, gradient, x1_0, x2_0, eps, s)
func_value, x1_1, x2_1, gradient, old_gradient = grad_move(x1_0, x2_0,
lam,
gradient)
points_x.append(x1_1)
points_y.append(x2_1)
func.append(func_value)
chi = CH(old_gradient, gradient)
s1 = s_1(old_gradient, gradient, chi)
lam = gss_1(a, b, s1, x1_1, x2_1, eps, s)
old_function = new_function
new_function, x1_2, x2_2, gradient = grad_move(x1_1, x2_1, lam, s1)[
:-1]
x1_0, x2_0 = x1_2, x2_2
table(count, old_function, x1_1, x2_1, x1_0, x2_0, new_function)
points_x.append(x1_2)
points_y.append(x2_2)
func.append(new_function)
return output(x1_0, x2_0, count, eps_y), [points_x, points_y], func
if __name__ == '__main__':
s = ((sqrt(5) - 1) / 2)
x1 = 1
x2 = 1
delta = 0.000001
info, points_coord, coord_func = conj_grad(x1, x2, delta)
print(headers)
print(info)
x_axis = y_axis = np.arange(0, 2, 0.001)
X, Y = np.meshgrid(x_axis, y_axis)
Zs = np.array(function(np.ravel(X), np.ravel(Y)))
Z = Zs.reshape(X.shape)
sorted_coord_func = sorted(coord_func)
cs = plt.contour(X, Y, Z, levels=sorted_coord_func)
plt.clabel(cs)
plt.xlabel('x1')
plt.ylabel('x2')
plt.plot(points_coord[0], points_coord[1])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, zorder=2)
ax.plot(points_coord[0], points_coord[1], coord_func, color='red',
zorder=1)
plt.show()
| 30.930818 | 79 | 0.547377 | 745 | 4,918 | 3.420134 | 0.167785 | 0.036107 | 0.051805 | 0.016484 | 0.22292 | 0.142857 | 0.130298 | 0.087912 | 0.081633 | 0.05887 | 0 | 0.093491 | 0.312729 | 4,918 | 158 | 80 | 31.126582 | 0.659172 | 0.019723 | 0 | 0.093023 | 0 | 0 | 0.037975 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077519 | false | 0 | 0.031008 | 0.015504 | 0.178295 | 0.015504 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3b9c03b0b6999a6a3e2dacc735a7de64457c8b0 | 784 | py | Python | tcga_encoder/analyses/old/dna_functions.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
] | 2 | 2017-12-19T15:32:46.000Z | 2018-01-12T11:24:24.000Z | tcga_encoder/analyses/old/dna_functions.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
] | null | null | null | tcga_encoder/analyses/old/dna_functions.py | tedmeeds/tcga_encoder | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | [
"MIT"
] | null | null | null | from tcga_encoder.utils.helpers import *
from scipy import stats
def auc_standard_error( theta, nA, nN ):
# from: Hanley and McNeil (1982), The Meaning and Use of the Area under the ROC Curve
# theta: estimated AUC, can be 0.5 for a random test
# nA size of population A
# nN size of population N
Q1=theta/(2.0-theta); Q2=2*theta*theta/(1+theta)
SE = np.sqrt( (theta*(1-theta)+(nA-1)*(Q1-theta*theta) + (nN-1)*(Q2-theta*theta) )/(nA*nN) )
return SE
def auc_p_value( auc1, auc2, std_error1, std_error2 ):
se_combined = np.sqrt( std_error1**2 + std_error2**2 )
difference = auc1 - auc2
z_values = difference / se_combined
sign_difference = np.sign(difference)
p_values = 1.0 - stats.norm.cdf( np.abs(z_values) )
return p_values
| 28 | 94 | 0.670918 | 132 | 784 | 3.863636 | 0.469697 | 0.041176 | 0.035294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046474 | 0.204082 | 784 | 28 | 95 | 28 | 0.770833 | 0.232143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3badbbe2ba65c13681fed044b28fd4aa43440f3 | 646 | py | Python | notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/Project Euler/Problem 9/sol2.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/Project Euler/Problem 9/sol2.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 160 | 2021-04-26T19:04:15.000Z | 2022-03-26T20:18:37.000Z | notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/Project Euler/Problem 9/sol2.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | """A Pythagorean triplet is a set of three natural numbers, for which,
a^2+b^2=c^2
Given N, Check if there exists any Pythagorean triplet for which a+b+c=N
Find maximum possible value of product of a,b,c among all such Pythagorean triplets, If there is no such Pythagorean triplet print -1."""
#!/bin/python3
import sys
product = -1
d = 0
N = int(input())
for a in range(1, N // 3):
"""Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c """
b = (N * N - 2 * a * N) // (2 * N - 2 * a)
c = N - a - b
if c * c == (a * a + b * b):
d = a * b * c
if d >= product:
product = d
print(product)
| 32.3 | 137 | 0.588235 | 124 | 646 | 3.064516 | 0.403226 | 0.031579 | 0.031579 | 0.021053 | 0.031579 | 0.031579 | 0 | 0 | 0 | 0 | 0 | 0.03178 | 0.26935 | 646 | 19 | 138 | 34 | 0.773305 | 0.465944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3bf686e9d3b64fb3a2f04a67c481cb724c6b8f5 | 1,894 | py | Python | pushnetbox.py | M35a2/cisco-interfaces-to-netbox | 0d8df402ed85571bdb6e8b627c080fbea670a7f8 | [
"BSD-3-Clause"
] | null | null | null | pushnetbox.py | M35a2/cisco-interfaces-to-netbox | 0d8df402ed85571bdb6e8b627c080fbea670a7f8 | [
"BSD-3-Clause"
] | null | null | null | pushnetbox.py | M35a2/cisco-interfaces-to-netbox | 0d8df402ed85571bdb6e8b627c080fbea670a7f8 | [
"BSD-3-Clause"
] | null | null | null | from netbox import NetBox
import requests
import json
import csv
from re import search
requests.packages.urllib3.disable_warnings()
def createAddress(address, description, vrf, network, role):
#netbox settings
netbox = NetBox(host='10.255.X.X', port=443, use_ssl=True, auth_token='5d6dfe9f6f39785eb86f')
# set VRF ID
id = 0
#attempt to create prefix if its not already there
if vrf == "GRT":
id = 1
try:
netboxPrefix = netbox.ipam.create_ip_prefix(prefix = network, vrf = id)
except:
print("prefix already exists")
elif not vrf:
try:
netboxPrefix = netbox.ipam.create_ip_prefix(prefix = network)
except:
print("prefix already exists")
#set VRF ID
id2 = 0
#attempt to create address
if vrf == "GRT":
id2 = 1
loopback = "Loopback"
if search(loopback, role):
try:
netboxAPIcall = netbox.ipam.create_ip_address(address = address, vrf = id2, description = description, role = "10")
except:
print("ip address already exists")
else:
try:
netboxAPIcall = netbox.ipam.create_ip_address(address = address, vrf = id2, description = description)
except:
print("ip address already exists")
elif vrf == "":
loopback = "Loopback"
if search(loopback, role):
try:
netboxAPIcall = netbox.ipam.create_ip_address(address = address, description = description, role = "10")
except:
print("ip address already exists")
else:
try:
netboxAPIcall = netbox.ipam.create_ip_address(address = address, description = description)
except:
print("ip address already exists")
| 30.548387 | 131 | 0.578669 | 201 | 1,894 | 5.378109 | 0.288557 | 0.066605 | 0.088807 | 0.099907 | 0.654949 | 0.599445 | 0.599445 | 0.599445 | 0.53839 | 0.442183 | 0 | 0.025397 | 0.334741 | 1,894 | 61 | 132 | 31.04918 | 0.83254 | 0.058078 | 0 | 0.565217 | 0 | 0 | 0.111675 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.108696 | 0 | 0.130435 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3c245e82abe59f7ffe1e78f2b4cafcd335e9622 | 2,837 | py | Python | scikits/pulsefit/correct_addpulses.py | johnnylee/scikits.pulsefit | d07571d524d974f52a863cd96a823fce6f1fed1e | [
"MIT"
] | 2 | 2015-08-25T15:41:26.000Z | 2016-05-23T01:42:37.000Z | scikits/pulsefit/correct_addpulses.py | johnnylee/scikits.pulsefit | d07571d524d974f52a863cd96a823fce6f1fed1e | [
"MIT"
] | 1 | 2015-03-28T00:32:16.000Z | 2017-04-04T10:48:49.000Z | scikits/pulsefit/correct_addpulses.py | johnnylee/scikits.pulsefit | d07571d524d974f52a863cd96a823fce6f1fed1e | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
class CorrectAddPulses(object):
def __init__(self, ampfit, optfit, flagger, pulse_add_len, th_min,
min_dist=0, debug=False):
"""Arguments:
ampfit -- Ampitude fitter.
optfit -- Fit optimizer.
flagger -- Block flagger.
pulse_add_len -- The max number of pulses to add is the block
length divided by pulse_add_len.
th_min -- Minimum allowed pulse height.
min_dist -- If pulses are closer than min_dist, they are merged
into a single pulse.
"""
self.ampfit = ampfit
self.optfit = optfit
self.flagger = flagger
self.pulse_add_len = pulse_add_len
self.th_min = th_min
self.min_dist = min_dist
self.debug = debug
def sanitize(self, block):
refit = False
# Remove low-amplitude pulses.
mask = block.amps > self.th_min
if mask.sum() != block.inds.size:
refit = True
block.inds = block.inds[mask]
block.amps = block.amps[mask]
if self.debug:
print("Correct: Removed low-amplitude pulses.")
# Merge pulses that are too close together.
if self.min_dist != 0 and len(block.inds) > 1:
dinds = block.inds[1:] - block.inds[:-1]
mask = dinds > self.min_dist
if mask.sum() != block.inds.size - 1:
refit = True
new_inds = np.empty(mask.sum() + 1, dtype=np.float64)
new_inds[0] = block.inds[0]
if mask.sum() != 0:
new_inds[1:] = block.inds[1:][mask]
if self.debug:
print("Correct: Merged pulses.")
if refit:
self.refit(block)
def refit(self, block):
self.ampfit.fit(block)
self.optfit.optimize(block)
self.flagger.flag(block)
def correct(self, block):
if self.debug:
print("\nCorrecting...")
add_max = max(1, int((block.i1 - block.i0) / self.pulse_add_len))
for i in xrange(add_max):
if np.all(block.flags == 0):
if self.debug:
print("Correct: All OK.")
return
# Add a new pulse.
idx_new = max(block.res.argmax() - block.p.argmax(), 0)
inds = np.concatenate((block.inds, (idx_new,)))
inds.sort()
block.inds = inds
if self.debug:
print(" Adding pulse at:", idx_new)
print(" Inds:", block.inds)
self.refit(block)
self.sanitize(block)
| 32.238636 | 73 | 0.505111 | 331 | 2,837 | 4.205438 | 0.305136 | 0.077586 | 0.047414 | 0.057471 | 0.134339 | 0.094828 | 0 | 0 | 0 | 0 | 0 | 0.011047 | 0.393726 | 2,837 | 87 | 74 | 32.609195 | 0.798256 | 0.153331 | 0 | 0.160714 | 0 | 0 | 0.052155 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.035714 | 0 | 0.142857 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3c27fde8746702040364868f95806b209ac0997 | 2,549 | py | Python | server/accession/fixtures/actiontypes.py | coll-gate/collgate | 8c2ff1c59adda2bf318040f588c05263317a2812 | [
"MIT"
] | 2 | 2017-07-04T16:19:09.000Z | 2019-08-16T04:54:47.000Z | server/accession/fixtures/actiontypes.py | coll-gate/collgate | 8c2ff1c59adda2bf318040f588c05263317a2812 | [
"MIT"
] | null | null | null | server/accession/fixtures/actiontypes.py | coll-gate/collgate | 8c2ff1c59adda2bf318040f588c05263317a2812 | [
"MIT"
] | 1 | 2018-04-13T08:28:09.000Z | 2018-04-13T08:28:09.000Z | # -*- coding: utf-8; -*-
#
# @file actiontypes.py
# @brief collgate
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2017-11-30
# @copyright Copyright (c) 2017 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
ACTION_TYPES = {
'introduction': {
'id': None,
'name': 'introduction',
'label': {
'en': 'Introduction',
'fr': 'Introduction'
},
'format': {'steps': []}
},
'multiplication': {
'id': None,
'name': 'multiplication',
'label': {
'en': 'Multiplication',
'fr': 'Multiplication'
},
'format': {'steps': []}
},
'regeneration': {
'id': None,
'name': 'regeneration',
'label': {
'en': 'Regeneration',
'fr': 'Regeneration'
},
'format': {'steps': []}
},
'test': {
'id': None,
'name': 'conformity_test',
'label': {
'en': 'Conformity test',
'fr': 'Test de conformité'
},
'format': {'steps': []}
},
'cleanup': {
'id': None,
'name': 'cleanup',
'label': {
'en': 'Nettoyage',
'fr': 'Clean-up'
},
'format': {'steps': []}
},
'sample': {
'id': None,
'name': 'sample',
'label': {
'en': 'Sample',
'fr': 'Echantillon'
},
'format': {'steps': []}
},
'dispatch': {
'id': None,
'name': 'dispatch',
'label': {
'en': 'Dispatch',
'fr': 'Dispatch'
},
'format': {'steps': []}
},
'elimination': {
'id': None,
'name': 'elimination',
'label': {
'en': 'Elimination',
'fr': 'Elimination'
},
'format': {'steps': []}
},
'complement': {
'id': None,
'name': 'complement',
'label': {
'en': 'Complement',
'fr': 'Complément'
},
'format': {'steps': []}
},
'characterization': {
'id': None,
'name': 'characterization',
'label': {
'en': 'Characterization',
'fr': 'Caractérisation'
},
'format': {'steps': []}
},
}
def fixture(fixture_manager, factory_manager):
from accession.api.actiontype import ActionTypeFactory
factory = ActionTypeFactory()
factory_manager.register(factory)
factory.create_or_update(factory_manager, ACTION_TYPES, False)
| 22.758929 | 66 | 0.429973 | 183 | 2,549 | 5.939891 | 0.371585 | 0.055198 | 0.091996 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010746 | 0.379364 | 2,549 | 111 | 67 | 22.963964 | 0.676359 | 0.077678 | 0 | 0.309278 | 0 | 0 | 0.300299 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010309 | false | 0 | 0.010309 | 0 | 0.020619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3c4355aa628e0a4b790fc108449eb7a3f5be2e3 | 2,128 | py | Python | ops_omgx/aws-secrets-importer.py | jet86/optimism | ed0d1ba349c5aa6f3e57c5b5886b34a167177ffa | [
"MIT"
] | 13 | 2021-06-04T20:01:01.000Z | 2021-11-25T07:43:53.000Z | ops_omgx/aws-secrets-importer.py | jet86/optimism | ed0d1ba349c5aa6f3e57c5b5886b34a167177ffa | [
"MIT"
] | 361 | 2021-05-24T16:36:12.000Z | 2021-12-04T18:03:26.000Z | ops_omgx/aws-secrets-importer.py | jet86/optimism | ed0d1ba349c5aa6f3e57c5b5886b34a167177ffa | [
"MIT"
] | 9 | 2021-05-29T00:13:15.000Z | 2021-11-19T18:11:01.000Z | #!/usr/bin/python3
import json
import re
import subprocess
import sys, getopt
from ruamel.yaml import YAML
def main(argv):
inputfile = ''
description = "Create Secret Manager"
secret_name = ''
region = 'us-east-1'
profile = 'default'
if (len(sys.argv) <= 1 ) or (len(sys.argv) > 11):
print('aws-secrets-importer.py -i <inputfile> -d <description> -n <external secret name> -r <region> -p <profile> ')
sys.exit()
try:
opts, args = getopt.getopt(argv,"hi:d:n:r:p:",["ifile=","name=","description=","region=","profile="])
print(opts)
except getopt.GetoptError:
print('aws-secrets-importer.py -i <inputfile> -d <description> -n <external secret name> -r <region> -p <profile> ')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('aws-secrets-importer.py -i <inputfile> -d <description> -n <external secret name> -r <region> -p <profile> ')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-d", "--description"):
description = arg
elif opt in ("-n", "--name"):
secret_name = arg
elif opt in ("-r", "--region"):
region = arg
elif opt in ("-p", "--profile"):
profile = arg
else:
print("Else")
print('aws-secrets-importer.py -i <inputfile> -d <description> -n <external secret name> -r <region> -p <profile> ')
sys.exit()
with open(inputfile) as vars_file:
pattern = "="
secrets = {}
for line in vars_file:
if re.search(pattern, line):
variables = line.strip()
clean = re.sub(r"^- ", "", variables)
key, value = clean.split('=')
secrets[key] = value
cmd = ['aws', 'secretsmanager', 'create-secret', '--region', region, '--profile', profile, '--description', description, '--name', secret_name, '--secret-string', json.dumps(secrets)]
result = subprocess.run(cmd)
print(result)
if __name__ == "__main__":
main(sys.argv[1:])
| 36.689655 | 187 | 0.547932 | 253 | 2,128 | 4.557312 | 0.312253 | 0.060711 | 0.039029 | 0.079792 | 0.305291 | 0.305291 | 0.305291 | 0.305291 | 0.305291 | 0.305291 | 0 | 0.004587 | 0.282895 | 2,128 | 57 | 188 | 37.333333 | 0.750983 | 0.007989 | 0 | 0.134615 | 0 | 0.076923 | 0.316114 | 0.043602 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.173077 | 0 | 0.192308 | 0.134615 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3c7d9360b54f39f3fe5652ae31ef0dfc4bf8638 | 9,331 | py | Python | tethys_compute/models/dask/dask_job.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | null | null | null | tethys_compute/models/dask/dask_job.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | 1 | 2018-09-20T21:27:14.000Z | 2018-09-20T21:27:14.000Z | tethys_compute/models/dask/dask_job.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | null | null | null | """
********************************************************************************
* Name: basic_job
* Author: nswain, teva, tran
* Created On: September 19, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
import logging
import datetime
from django.utils import timezone
from django.db import models
from dask.delayed import Delayed
from dask.distributed import Client, Future, fire_and_forget
from tethys_compute.models.tethys_job import TethysJob
from tethys_compute.models.dask.dask_scheduler import DaskScheduler
from tethys_compute.models.dask.dask_field import DaskSerializedField
log = logging.getLogger('tethys.' + __name__)
client_fire_forget = None
class DaskJob(TethysJob):
"""
Dask job type.
"""
key = models.CharField(max_length=1024, null=True)
scheduler = models.ForeignKey(DaskScheduler, on_delete=models.SET_NULL, blank=True, null=True)
forget = models.BooleanField(default=False)
result = DaskSerializedField(blank=True, null=True)
DASK_TO_STATUS_TYPES = {
# States returned from Scheduler transition event.
'new-released': 'SUB',
'released-waiting': 'SUB',
'waiting-no-worker': 'SUB',
'no-worker-waiting': 'SUB',
'no-worker-processing': 'RUN',
'waiting-processing': 'RES', # Should technically be RUN, but needs to be RES to get results
'processing-memory': 'RES',
'memory-released': 'RES',
# 'memory-forgotten': 'RES', # Will freeze processing results if enabled.
# 'processing-forgotten': 'RES', # Will freeze processing results if enabled.
'processing-erred': 'ERR',
'erred-forgotten': 'ERR',
# States returned by Future objects
'pending': 'SUB',
'processing': 'RUN',
'finished': 'COM'
}
@property
def client(self):
if self.scheduler:
return self.scheduler.client
else:
return Client()
@property
def update_status_interval(self):
"""
Override default update status interval.
Returns:
datetime.timedelta: update status interval.
"""
if not hasattr(self, '_update_status_interval'):
self._update_status_interval = datetime.timedelta(seconds=0)
return self._update_status_interval
@property
def future(self):
"""
Get Future instance associated with this job. The Future can be used to query status, get results, and manage the dask task.
Returns:
dask.distributed.Future: a future bound to the key associated with the Dask Job.
""" # noqa: #E501
if not getattr(self, '_future', None):
if self.key and self.client:
try:
future = Future(key=self.key,
client=self.client)
except Exception:
log.exception('Dask Future Init Error')
return None
else:
return None
self._future = future
return self._future
def _execute(self, future_or_delayed, *args, **kwargs):
"""
Execute Delayed jobs using the distributed Client to get a future object. Save the key of the future object for later use.
Args:
future_or_delayed (dask.Delayed or dask.distributed.Future): dask task object to track using TethysJobs.
""" # noqa: E501
if not isinstance(future_or_delayed, Future) and not isinstance(future_or_delayed, Delayed):
raise ValueError('Must pass a valid instance of Delayed or Future.')
if isinstance(future_or_delayed, Delayed):
future = self.client.compute(future_or_delayed)
else:
future = future_or_delayed
self.key = future.key
# NOTE: Job may not actually be running at this point, but we don't have another
# way to know when the first node in the workflow starts running.
self._status = 'RUN'
self.start_time = timezone.now()
# Send key to the dask scheduler so the scheduler knows which jobs to send status updates to Tethys.
self.client.set_metadata(self.key, True)
# Save updated attributes
self.save()
# Must use fire and forget to ensure job runs after the future goes out of scope.
fire_and_forget(future)
# Save this client to close it after obtaining the result.
global client_fire_forget
client_fire_forget = self.client
def _update_status(self, *args, **kwargs):
"""
Check status using a Future, translate to Tethys Jobs status and save.
"""
# Get Future
future = self.future
# Do nothing if no Future
if not future:
return
# Get the status
dask_status = future.status.lower()
try:
# Translate to TethysJob status
self._status = self.DASK_TO_STATUS_TYPES[dask_status]
self.save()
# Clean up client
self.client.close()
except KeyError:
log.error('Unknown Dask Status: "{}"'.format(dask_status))
def _process_results(self, *args, **kwargs):
"""
Process results callback. If process_results_function is specified, we call it with the results as an argument, otherwise get the result, serialize, and save to database. Also update job status accordingly.
""" # noqa: E501
# Lock before processing results to prevent conflicts
if not self._acquire_pr_lock():
return
# Get the future instance
future = self.future
# Skip if no Future
if not future:
return
# Skip processing results if forget
if self.forget:
# Clean up client
self.client.close()
return
try:
# Get results using the client
result = self.client.gather(future)
except Exception as e:
# Tell scheduler to stop sending updates about this key
self.client.set_metadata(self.key, False)
# Clean up client
self.client.close()
result = e
log.warning('Exception encountered when retrieving results: "{}"'.format(str(e)))
# Tell scheduler to stop sending updates about this key
self.client.set_metadata(self.key, False)
# Handle custom process results function
if self.process_results_function:
# Get the process_results_function in TethysJob and call it with the result retrived
try:
result = self.process_results_function(result)
except Exception as e:
log.exception('Process Results Function Error')
self._status = 'ERR'
result = str(e)
# Serialize the result
try:
self.result = result
except Exception:
log.exception('Results Serialization Error')
self._status = 'ERR'
else:
self._status = 'COM' if self._status != 'ERR' else 'ERR'
# Erase the key to avoid problem with dask recycle key
self.key = ''
# save the results or status in the database
self.save()
# Clean up client
self.client.close()
if client_fire_forget:
client_fire_forget.close()
self._release_pr_lock()
def _acquire_pr_lock(self):
"""
Processing results lock to prevent collisions between multiple processes.
Returns:
bool: True if lock acquired successfully, else False.
"""
ep = self.extended_properties
is_processing_results = ep.get('processing_results', False)
if not is_processing_results:
ep['processing_results'] = True
self.extended_properties = ep
self.save()
return True
else:
log.warning('Unable to aquire lock. Processing results already occurring. Skipping...')
return False
def _release_pr_lock(self):
"""
Release processing results lock.
"""
ep = self.extended_properties
ep['processing_results'] = False
self.extended_properties = ep
self.save()
def stop(self):
"""
Stops job from executing.
"""
# Get the current future instance
future = self.future
# Cancel the job
if future:
future.cancel()
def pause(self):
"""
Pauses job during execution.
"""
raise NotImplementedError()
def resume(self):
"""
Resumes a job that has been paused.
"""
raise NotImplementedError()
def done(self):
"""
Check if job is finished running.
Returns:
bool: True if the job has finished running.
"""
future = self.future
if future:
result = future.done()
return result
def retry(self):
"""
Retry this job.
"""
future = self.future
if future:
future.retry()
| 31.630508 | 214 | 0.58729 | 1,044 | 9,331 | 5.14272 | 0.260536 | 0.037996 | 0.019557 | 0.012665 | 0.163345 | 0.125535 | 0.074502 | 0.064444 | 0.051034 | 0.029428 | 0 | 0.003767 | 0.317222 | 9,331 | 294 | 215 | 31.738095 | 0.838958 | 0.31315 | 0 | 0.342105 | 0 | 0 | 0.102106 | 0.003844 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085526 | false | 0.006579 | 0.059211 | 0 | 0.269737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3c9636997a7889d7b0b583458839ea611f4ceb5 | 7,566 | py | Python | public_21CMvFAST_MC/Programs/CosmoHammer_21CMMC/emcee/utils.py | NNSSA/21cmvFAST | 8f015427f3609a3051b4fa185bdbe55b379c930f | [
"MIT"
] | 5 | 2019-04-18T11:31:34.000Z | 2020-11-15T03:29:14.000Z | emcee/utils.py | lauralwatkins/emcee | 051770bcfb6ed85bbc08459850deae8561ea7110 | [
"MIT"
] | 1 | 2019-12-17T05:27:25.000Z | 2019-12-18T19:59:55.000Z | emcee/utils.py | lauralwatkins/emcee | 051770bcfb6ed85bbc08459850deae8561ea7110 | [
"MIT"
] | 2 | 2019-11-14T13:54:46.000Z | 2019-12-08T17:16:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["sample_ball", "MH_proposal_axisaligned"]
import numpy as np
# If mpi4py is installed, import it.
try:
from mpi4py import MPI
MPI = MPI
except ImportError:
MPI = None
def sample_ball(p0, std, size=1):
"""
Produce a ball of walkers around an initial parameter value.
:param p0: The initial parameter value.
:param std: The axis-aligned standard deviation.
:param size: The number of samples to produce.
"""
assert(len(p0) == len(std))
return np.vstack([p0 + std * np.random.normal(size=len(p0))
for i in range(size)])
class MH_proposal_axisaligned(object):
"""
A Metropolis-Hastings proposal, with axis-aligned Gaussian steps,
for convenient use as the ``mh_proposal`` option to
:func:`EnsembleSampler.sample` .
"""
def __init__(self, stdev):
self.stdev = stdev
def __call__(self, X):
(nw, npar) = X.shape
assert(len(self.stdev) == npar)
return X + self.stdev * np.random.normal(size=X.shape)
if MPI is not None:
class _close_pool_message(object):
def __repr__(self):
return "<Close pool message>"
class _function_wrapper(object):
def __init__(self, function):
self.function = function
def _error_function(task):
raise RuntimeError("Pool was sent tasks before being told what "
"function to apply.")
class MPIPool(object):
"""
A pool that distributes tasks over a set of MPI processes. MPI is an
API for distributed memory parallelism. This pool will let you run
emcee without shared memory, letting you use much larger machines
with emcee.
The pool only support the :func:`map` method at the moment because
this is the only functionality that emcee needs. That being said,
this pool is fairly general and it could be used for other purposes.
Contributed by `Joe Zuntz <https://github.com/joezuntz>`_.
:param comm: (optional)
The ``mpi4py`` communicator.
:param debug: (optional)
If ``True``, print out a lot of status updates at each step.
"""
def __init__(self, comm=MPI.COMM_WORLD, debug=False):
self.comm = comm
self.rank = comm.Get_rank()
self.size = comm.Get_size() - 1
self.debug = debug
self.function = _error_function
if self.size == 0:
raise ValueError("Tried to create an MPI pool, but there "
"was only one MPI process available. "
"Need at least two.")
def is_master(self):
"""
Is the current process the master?
"""
return self.rank == 0
def wait(self):
"""
If this isn't the master process, wait for instructions.
"""
if self.is_master():
raise RuntimeError("Master node told to await jobs.")
status = MPI.Status()
while True:
# Event loop.
# Sit here and await instructions.
if self.debug:
print("Worker {0} waiting for task.".format(self.rank))
# Blocking receive to wait for instructions.
task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
if self.debug:
print("Worker {0} got task {1} with tag {2}."
.format(self.rank, task, status.tag))
# Check if message is special sentinel signaling end.
# If so, stop.
if isinstance(task, _close_pool_message):
if self.debug:
print("Worker {0} told to quit.".format(self.rank))
break
# Check if message is special type containing new function
# to be applied
if isinstance(task, _function_wrapper):
self.function = task.function
if self.debug:
print("Worker {0} replaced its task function: {1}."
.format(self.rank, self.function))
continue
# If not a special message, just run the known function on
# the input and return it asynchronously.
result = self.function(task)
if self.debug:
print("Worker {0} sending answer {1} with tag {2}."
.format(self.rank, result, status.tag))
self.comm.isend(result, dest=0, tag=status.tag)
def map(self, function, tasks):
"""
Like the built-in :func:`map` function, apply a function to all
of the values in a list and return the list of results.
:param function:
The function to apply to the list.
:param tasks:
The list of elements.
"""
ntask = len(tasks)
# If not the master just wait for instructions.
if not self.is_master():
self.wait()
return
if function is not self.function:
if self.debug:
print("Master replacing pool function with {0}."
.format(function))
self.function = function
F = _function_wrapper(function)
# Tell all the workers what function to use.
requests = []
for i in range(self.size):
r = self.comm.isend(F, dest=i + 1)
requests.append(r)
# Wait until all of the workers have responded. See:
# https://gist.github.com/4176241
MPI.Request.waitall(requests)
# Send all the tasks off and wait for them to be received.
# Again, see the bug in the above gist.
requests = []
for i, task in enumerate(tasks):
worker = i % self.size + 1
if self.debug:
print("Sent task {0} to worker {1} with tag {2}."
.format(task, worker, i))
r = self.comm.isend(task, dest=worker, tag=i)
requests.append(r)
MPI.Request.waitall(requests)
# Now wait for the answers.
results = []
for i in range(ntask):
worker = i % self.size + 1
if self.debug:
print("Master waiting for worker {0} with tag {1}"
.format(worker, i))
result = self.comm.recv(source=worker, tag=i)
results.append(result)
return results
def close(self):
"""
Just send a message off to all the pool members which contains
the special :class:`_close_pool_message` sentinel.
"""
if self.is_master():
for i in range(self.size):
self.comm.isend(_close_pool_message(), dest=i + 1)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
| 34.081081 | 79 | 0.524055 | 871 | 7,566 | 4.463835 | 0.311137 | 0.016975 | 0.022634 | 0.032922 | 0.09465 | 0.067644 | 0.028292 | 0.016461 | 0.016461 | 0 | 0 | 0.009107 | 0.390431 | 7,566 | 221 | 80 | 34.235294 | 0.833912 | 0.277161 | 0 | 0.198198 | 0 | 0 | 0.105584 | 0.004522 | 0 | 0 | 0 | 0 | 0.018018 | 1 | 0.117117 | false | 0 | 0.036036 | 0.018018 | 0.252252 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3c9da5766ad3a8e6c556571dd37b7fb321d959a | 1,522 | py | Python | utils/lru_cache.py | glongh/visual-run | 252e160cec8906319baf0394fc3feb133df3947a | [
"MIT"
] | null | null | null | utils/lru_cache.py | glongh/visual-run | 252e160cec8906319baf0394fc3feb133df3947a | [
"MIT"
] | null | null | null | utils/lru_cache.py | glongh/visual-run | 252e160cec8906319baf0394fc3feb133df3947a | [
"MIT"
] | null | null | null | from collections import OrderedDict
'''
Cache Eviction Flow
# get(i)
if i in cache:
return cache[i]
else:
# set(i, m)
x = longest-in-the-past from cache
evict x, add i to cache
return cache[i]
'''
class CacheLRU():
def __init__(self, size: int) -> None:
self.size = size
self.cache = OrderedDict()
def get(self, key: str) -> str or False:
if key not in self.cache:
return False
else:
# Move to the key to the end to demonstrate if was recently used
self.cache.move_to_end(key)
return self.cache[key]
def set(self, key: str, item: str) -> None:
# Oa (Parts): cache consists of parts of a solution
# Cache eviction manager starts to builds up.
self.cache[key] = item
# [Ob (Greed used): All changes to 'cache' have been "additive", when a key is evicted, there is no backtrack,
# and "optimizing" based on information available at the time, in this case what it is available in the 'cache'
# at the time of the request.
# Greedy step: Evict an item when needed. Evict the element which is Least Recently Used.
# 1: Moves the key to the end to indicate if was recently used.
self.cache.move_to_end(key)
# 2: If the size has been exceeded, we remove the first key, the "Least Recently Used"
if len(self.cache) > self.size:
self.cache.popitem(last=False)
# Oc (Complete): 'cache' is completely build. | 33.822222 | 120 | 0.622208 | 229 | 1,522 | 4.100437 | 0.441048 | 0.076677 | 0.034079 | 0.036209 | 0.115016 | 0.115016 | 0.080937 | 0.080937 | 0.080937 | 0.080937 | 0 | 0.001854 | 0.291064 | 1,522 | 45 | 121 | 33.822222 | 0.868397 | 0.449409 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.0625 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |