hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7982421d07b8b666fc8fc840123a2f47aa6edf14 | 860 | py | Python | client-hints/resources/echo-ua-client-hints-received.py | BasixKOR/wpt | aa27d567c10dcdb2aea6884d5155dfaaa177a800 | [
"BSD-3-Clause"
] | null | null | null | client-hints/resources/echo-ua-client-hints-received.py | BasixKOR/wpt | aa27d567c10dcdb2aea6884d5155dfaaa177a800 | [
"BSD-3-Clause"
] | 59 | 2022-01-19T21:35:57.000Z | 2022-03-30T21:35:27.000Z | client-hints/resources/echo-ua-client-hints-received.py | BasixKOR/wpt | aa27d567c10dcdb2aea6884d5155dfaaa177a800 | [
"BSD-3-Clause"
] | null | null | null | import importlib
client_hints_ua_list = importlib.import_module("client-hints.resources.clienthintslist").client_hints_ua_list
def main(request, response):
"""
Simple handler that sets a response header based on which client hint
request headers were received.
"""
response.headers.append(b"Access-Control-Allow-Origin", b"*")
response.headers.append(b"Access-Control-Allow-Headers", b"*")
response.headers.append(b"Access-Control-Expose-Headers", b"*")
client_hint_headers = client_hints_ua_list()
request_client_hints = {i: request.headers.get(i) for i in client_hint_headers}
for header in client_hint_headers:
if request_client_hints[header] is not None:
response.headers.set(header + b"-received", request_client_hints[header])
headers = []
content = u""
return 200, headers, content
| 35.833333 | 109 | 0.731395 | 116 | 860 | 5.232759 | 0.396552 | 0.126853 | 0.06425 | 0.08402 | 0.192751 | 0.192751 | 0.192751 | 0 | 0 | 0 | 0 | 0.004149 | 0.159302 | 860 | 23 | 110 | 37.391304 | 0.835408 | 0.116279 | 0 | 0 | 0 | 0 | 0.179625 | 0.163539 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79836d938d077f9ed2ab86fd140347063a4e8fc0 | 8,892 | py | Python | base/vocab.py | thu-spmi/semi-EBM | 393e3ea3566dd60c48872a5c573a335e8e802707 | [
"Apache-2.0"
] | 2 | 2021-09-18T14:21:24.000Z | 2021-12-20T03:39:13.000Z | base/vocab.py | thu-spmi/semi-EBM | 393e3ea3566dd60c48872a5c573a335e8e802707 | [
"Apache-2.0"
] | null | null | null | base/vocab.py | thu-spmi/semi-EBM | 393e3ea3566dd60c48872a5c573a335e8e802707 | [
"Apache-2.0"
] | 1 | 2021-09-12T07:02:23.000Z | 2021-09-12T07:02:23.000Z | import os
import json
import numpy as np
class Vocab(object):
def __init__(self):
self.word_to_id = dict()
self.count = list()
self.words = list()
self.to_lower = False
# add character information
self.chars = list() # ['a', 'b', 'c', 'd', ...]
self.char_to_id = dict() # {'a': 0, 'b': 1, ...}
self.word_to_chars = list() # [ ['a', 'b', 'c'], ... ]
self.word_max_len = 0
self.char_beg_id = 0
self.char_end_id = 0
def load_data(self, file_list):
v_count = dict()
total_line = 0
total_word = 0
for file in file_list:
print('[%s.%s] generate_vocab: ' % (__name__, self.__class__.__name__), file)
with open(file, 'rt') as f:
for line in f:
# to lower
if self.to_lower:
line = line.lower()
for w in line.split():
v_count.setdefault(w, 0)
v_count[w] += 1
total_word += 1
total_line += 1
return v_count, total_line, total_word
def generate_vocab(self, file_list, cutoff=0, max_size=None,
add_beg_token='<s>', add_end_token='</s>', add_unk_token='<unk>',
to_lower=False):
self.to_lower = to_lower
v_count, total_line, total_word = self.load_data(file_list)
if add_beg_token is not None:
v_count[add_beg_token] = total_line
if add_end_token is not None:
v_count[add_end_token] = total_line
if add_unk_token is not None:
v_count[add_unk_token] = 1
print('[%s.%s] vocab_size=' % (__name__, self.__class__.__name__), len(v_count))
print('[%s.%s] total_line=' % (__name__, self.__class__.__name__), total_line)
print('[%s.%s] total_word=' % (__name__, self.__class__.__name__), total_word)
# cutoff
v_list = []
ignore_list = [add_beg_token, add_end_token, add_unk_token]
for w, count in v_count.items():
if w in ignore_list:
continue
if count > cutoff:
v_list.append((w, count))
# to handle the words with the same counts
v_list = sorted(v_list, key=lambda x: x[0]) # sorted as the word
v_list = sorted(v_list, key=lambda x: -x[1]) # sorted as the count
ignore_dict = dict()
for ignore_token in reversed(ignore_list):
if ignore_token is not None and ignore_token not in ignore_dict:
v_list.insert(0, (ignore_token, v_count[ignore_token]))
ignore_dict[ignore_token] = 0
print('[%s.%s] vocab_size(after_cutoff)=' % (__name__, self.__class__.__name__), len(v_list))
if max_size is not None:
print('[%s.%s] vocab max_size=()' % (__name__, self.__class__.__name__), max_size)
unk_count = sum(x[1] for x in v_list[max_size:])
v_list = v_list[0: max_size]
# revise the unkcount
if add_unk_token is not None:
for i in range(len(v_list)):
if v_list[i][0] == add_unk_token:
v_list[i] = (add_unk_token, v_list[i][1] + unk_count)
break
# create vocab
self.count = list()
self.words = list()
self.word_to_id = dict()
for i, (w, count) in enumerate(v_list):
self.words.append(w)
self.count.append(count)
self.word_to_id[w] = i
return self
def write(self, fname):
with open(fname, 'wt') as f:
f.write('to_lower = %d\n' % int(self.to_lower))
for i in range(len(self.words)):
f.write('{}\t{}\t{}'.format(i, self.words[i], self.count[i]))
if self.word_to_chars:
s = ' '.join('{}'.format(k) for k in self.word_to_chars[i])
f.write('\t{}'.format(s))
f.write('\n')
# write a extra char vocabulary
if self.chars:
with open(fname + '.chr', 'wt') as f:
f.write('char_beg_id = %d\n' % self.char_beg_id)
f.write('char_end_id = %d\n' % self.char_end_id)
f.write('word_max_len = %d\n' % self.word_max_len)
f.write('id \t char\n')
for i in range(len(self.chars)):
f.write('{}\t{}\n'.format(i, self.chars[i]))
def read(self, fname):
self.words = list()
self.count = list()
self.word_to_id = dict()
self.word_to_chars = list()
with open(fname, 'rt') as f:
self.to_lower = bool(int(f.readline().split()[-1]))
for line in f:
a = line.split()
i = int(a[0])
w = a[1]
n = int(a[2])
self.words.append(w)
self.count.append(n)
self.word_to_id[w] = i
# read word_to_chars
if len(a) > 3:
self.word_to_chars.append([int(k) for k in a[3:]])
if self.word_to_chars:
# read char vocab
self.chars = list()
self.char_to_id = dict()
with open(fname + '.chr', 'rt') as f:
self.char_beg_id = int(f.readline().split()[-1])
self.char_end_id = int(f.readline().split()[-1])
self.word_max_len = int(f.readline().split()[-1])
f.readline()
for line in f:
a = line.split()
i = int(a[0])
c = a[1]
self.chars.append(c)
self.char_to_id[c] = i
return self
def create_chars(self, add_char_beg='<s>', add_char_end='</s>'):
if self.chars:
return
# process the word and split to chars
c_dict = dict()
for w in self.words:
for c in list(w):
c_dict.setdefault(c, 0)
if add_char_beg is not None:
c_dict.setdefault(add_char_beg)
if add_char_end is not None:
c_dict.setdefault(add_char_end)
self.chars = sorted(c_dict.keys())
self.char_to_id = dict([(c, i) for i, c in enumerate(self.chars)])
self.char_beg_id = self.char_to_id[add_char_beg]
self.char_end_id = self.char_to_id[add_char_end]
self.word_to_chars = []
for w in self.words:
chr_ids = [self.char_to_id[c] for c in w]
chr_ids.insert(0, self.char_beg_id)
chr_ids.append(self.char_end_id)
self.word_to_chars.append(chr_ids)
self.word_max_len = max([len(x) for x in self.word_to_chars])
def words_to_ids(self, word_list, unk_token='<unk>'):
id_list = []
for w in word_list:
if self.to_lower:
w = w.lower()
if w in self.word_to_id:
id_list.append(self.word_to_id[w])
elif unk_token is not None and unk_token in self.word_to_id:
id_list.append(self.word_to_id[unk_token])
else:
raise KeyError('[%s.%s] cannot find the word = %s' % (__name__, self.__class__.__name__, w))
return id_list
def ids_to_words(self, id_list):
return [self.words[i] for i in id_list]
def get_size(self):
return len(self.words)
def get_char_size(self):
if not self.chars:
raise TypeError('[Vocab] no char information!!')
return len(self.chars)
def __contains__(self, item):
return item in self.word_to_id
class VocabX(Vocab):
def __init__(self, total_level=2, read_level=0):
super().__init__()
self.total_level = total_level
self.read_level = read_level
def load_data(self, file_list):
v_count = dict()
total_line = 0
total_word = 0
for file in file_list:
print('[%s.%s] generate_vocab: ' % (__name__, self.__class__.__name__), file)
cur_line = 0
with open(file, 'rt') as f:
for line in f:
if cur_line % self.total_level == self.read_level:
for w in line.split():
v_count.setdefault(w, 0)
v_count[w] += 1
total_word += 1
total_line += 1
cur_line += 1
return v_count, total_line, total_word
| 36.743802 | 109 | 0.501012 | 1,203 | 8,892 | 3.386534 | 0.102244 | 0.047128 | 0.046637 | 0.029455 | 0.459745 | 0.302896 | 0.249877 | 0.17354 | 0.158321 | 0.129111 | 0 | 0.008026 | 0.383491 | 8,892 | 241 | 110 | 36.896266 | 0.735133 | 0.036887 | 0 | 0.338542 | 0 | 0 | 0.045504 | 0.00301 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067708 | false | 0 | 0.015625 | 0.015625 | 0.145833 | 0.036458 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7983823b3cdf770a3c86d666eba52cc7de43379b | 2,749 | py | Python | dpfinder/searcher/statistics/ratio/ratio_cdf.py | barryZZJ/dp-finder | ddf8e3589110b4b35920b437d605b45dd56291da | [
"MIT"
] | 15 | 2018-10-19T05:48:17.000Z | 2022-02-14T20:34:16.000Z | dpfinder/searcher/statistics/ratio/ratio_cdf.py | barryZZJ/dp-finder | ddf8e3589110b4b35920b437d605b45dd56291da | [
"MIT"
] | 1 | 2020-04-22T22:55:39.000Z | 2020-04-22T22:55:39.000Z | dpfinder/searcher/statistics/ratio/ratio_cdf.py | barryZZJ/dp-finder | ddf8e3589110b4b35920b437d605b45dd56291da | [
"MIT"
] | 9 | 2018-11-13T12:37:55.000Z | 2021-11-22T11:11:52.000Z | # ==BEGIN LICENSE==
#
# MIT License
#
# Copyright (c) 2018 SRI Lab, ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ==END LICENSE==
import ctypes
import os
from dpfinder.logging import logger
from dpfinder.utils.redirect import redirect_stdout
path = os.path.dirname(__file__)
lib = ctypes.cdll.LoadLibrary(path + '/libratio.so')
joint_gauss_fraction = getattr(lib, "ratio_cdf_extern", None)
joint_gauss_fraction.restype = ctypes.c_double
ratio_pdf_extern = getattr(lib, "ratio_pdf_extern", None)
ratio_pdf_extern.restype = ctypes.c_double
def cdf(lower, upper, mx, my, sx, sy, rho):
lower = ctypes.c_double(lower)
upper = ctypes.c_double(upper)
mx = ctypes.c_double(mx)
my = ctypes.c_double(my)
sx = ctypes.c_double(sx)
sy = ctypes.c_double(sy)
rho = ctypes.c_double(rho)
return joint_gauss_fraction(lower, upper, mx, my, sx, sy, rho)
def pdf(w, mx, my, sx, sy, rho):
w = ctypes.c_double(w)
mx = ctypes.c_double(mx)
my = ctypes.c_double(my)
sx = ctypes.c_double(sx)
sy = ctypes.c_double(sy)
rho = ctypes.c_double(rho)
return ratio_pdf_extern(w, mx, my, sx, sy, rho)
ratio_confidence_interval_C = getattr(lib, "ratio_confidence_interval_extern", None)
ratio_confidence_interval_C.restype = ctypes.c_double
def ratio_confidence_interval(p1, p2, d1, d2, corr, center, confidence, err_goal):
p1 = ctypes.c_double(p1)
p2 = ctypes.c_double(p2)
d1 = ctypes.c_double(d1)
d2 = ctypes.c_double(d2)
corr = ctypes.c_double(corr)
center = ctypes.c_double(center)
confidence = ctypes.c_double(confidence)
err_goal = ctypes.c_double(err_goal)
with redirect_stdout.redirect(output=logger.debug):
ret = ratio_confidence_interval_C(p1, p2, d1, d2, corr, center, confidence, err_goal)
return ret
| 33.938272 | 87 | 0.75773 | 436 | 2,749 | 4.630734 | 0.34633 | 0.08321 | 0.154532 | 0.015849 | 0.182268 | 0.159485 | 0.147598 | 0.126795 | 0.126795 | 0.092125 | 0 | 0.00854 | 0.148054 | 2,749 | 80 | 88 | 34.3625 | 0.853544 | 0.404147 | 0 | 0.243902 | 0 | 0 | 0.047146 | 0.019851 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.097561 | 0 | 0.243902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
798d2621ee7b1e2db86f8b2f4ca02a4a32de49fa | 398 | py | Python | binary_counting.py | Lioheart/python-samples | de5f82b20fa216178e0084b7693e42df4fcaf883 | [
"Unlicense"
] | null | null | null | binary_counting.py | Lioheart/python-samples | de5f82b20fa216178e0084b7693e42df4fcaf883 | [
"Unlicense"
] | null | null | null | binary_counting.py | Lioheart/python-samples | de5f82b20fa216178e0084b7693e42df4fcaf883 | [
"Unlicense"
] | null | null | null | """
Przemienia liczbę na wartość binarną i zwraca sumę jedynek występującą w wartości binarnej
Example: The binary representation of 1234 is 10011010010, so the function should return 5 in this case
"""
def countBits(n):
# szybsza metoda
# return bin(n).count("1")
final = 0
for x in str(bin(n)):
if x == '1':
final += 1
return final
print(countBits(1234)) | 26.533333 | 103 | 0.660804 | 58 | 398 | 4.534483 | 0.758621 | 0.030418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08 | 0.246231 | 398 | 15 | 104 | 26.533333 | 0.796667 | 0.590452 | 0 | 0 | 0 | 0 | 0.006452 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7990913dfb319b60a3a689bb4ec8e33cb489297d | 10,799 | py | Python | mednickdb_pyapi/test_mednickdb_usecases.py | MednickLab/python_module | 818763a70d1058e72ddecfea7e07b88e42b39f3b | [
"MIT"
] | null | null | null | mednickdb_pyapi/test_mednickdb_usecases.py | MednickLab/python_module | 818763a70d1058e72ddecfea7e07b88e42b39f3b | [
"MIT"
] | null | null | null | mednickdb_pyapi/test_mednickdb_usecases.py | MednickLab/python_module | 818763a70d1058e72ddecfea7e07b88e42b39f3b | [
"MIT"
] | 1 | 2018-12-06T21:51:22.000Z | 2018-12-06T21:51:22.000Z | from mednickdb_pyapi.mednickdb_pyapi import MednickAPI
import pytest
import time
import pandas as pd
import pprint
pp = pprint.PrettyPrinter(indent=4)
server_address = 'http://saclab.ss.uci.edu:8000'
file_update_time = 2
data_update_time = 10
data_upload_working = False
def dict_issubset(superset, subset, show_diffs=False):
if show_diffs:
return [item for item in subset.items() if item not in superset.items()]
return all(item in superset.items() for item in subset.items())
def pytest_namespace():
return {'usecase_1_filedata': None}
def test_clear_test_study():
"""
Clear all data and files with the studyid of "TEST". This esentually refreshes the database for new testing.
"""
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
fids = med_api.extract_var(med_api.get_files(studyid='TEST'), '_id')
if fids:
for fid in fids:
med_api.delete_file(fid, delete_all_versions=True)
med_api.delete_data_from_single_file(fid)
fids2 = med_api.extract_var(med_api.get_files(studyid='TEST'),'_id')
assert fid not in fids2
assert (fids2 == [])
deleted_fids = med_api.extract_var(med_api.get_deleted_files(),'_id')
assert all([dfid in deleted_fids for dfid in fids])
med_api.delete_data(studyid='TEST')
assert len(med_api.get_data(studyid='TEST', format='nested_dict')) == 0 #TODO after clearing up sourceid bug
@pytest.mark.dependency(['test_clear_test_study'])
def test_usecase_1():
"""runs usecase one from the mednickdb_usecase document (fid=)"""
#a)
med_api = MednickAPI(server_address, 'test_ra_account@uci.edu', 'pass1234')
file_info_post = {
'fileformat':'eeg',
'studyid':'TEST',
'versionid':1,
'subjectid':1,
'visitid':1,
'sessionid':1,
'filetype':'sleep_eeg',
}
file_data_real = file_info_post.copy()
with open('testfiles/sleepfile1.edf','rb') as sleepfile:
file_info_returned = med_api.upload_file(fileobject=sleepfile, **file_info_post)
with open('testfiles/sleepfile1.edf', 'rb') as sleepfile:
downloaded_sleepfile = med_api.download_file(file_info_returned['_id'])
assert (downloaded_sleepfile == sleepfile.read())
# b)
time.sleep(file_update_time) # give db 5 seconds to update
file_info_get = med_api.get_file_by_fid(file_info_returned['_id'])
file_info_post.update({'filename': 'sleepfile1.edf', 'filedir': 'uploads/TEST/1/1/1/1/sleep_eeg/'})
assert dict_issubset(file_info_get, file_info_post)
time.sleep(data_update_time-file_update_time) # give db 5 seconds to update
file_datas = med_api.get_data_from_single_file(filetype='sleep_eeg', fid=file_info_returned['_id'], format='flat_dict')
file_data_real.pop('fileformat')
file_data_real.pop('filetype')
file_data_real.update({'sleep_eeg.eeg_nchan': 3, 'sleep_eeg.eeg_sfreq':128, 'sleep_eeg.eeg_meas_date':1041380737000, 'sleep_eeg.eeg_ch_names': ['C3A2', 'C4A1', 'ECG']}) # add actual data in file. # TODO add all
pytest.usecase_1_filedata = file_data_real
pytest.usecase_1_filename_version = file_info_get['filename_version']
assert(any([dict_issubset(file_data, file_data_real) for file_data in file_datas])), "Is pyparse running? (and working)"
@pytest.mark.dependency(['test_usecase_1'])
def test_usecase_2():
# a)
file_info_post = {'filetype':'demographics',
'fileformat':'tabular',
'studyid':'TEST',
'versionid':1}
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
with open('testfiles/TEST_Demographics.xlsx', 'rb') as demofile:
# b)
file_info = med_api.upload_file(fileobject=demofile, **file_info_post)
fid = file_info['_id']
downloaded_demo = med_api.download_file(fid)
with open('testfiles/TEST_Demographics.xlsx', 'rb') as demofile:
assert downloaded_demo == demofile.read()
# c)
time.sleep(file_update_time) # Give file db 5 seconds to update
file_info_post.update({'filename': 'TEST_Demographics.xlsx', 'filedir': 'uploads/TEST/1/demographics/'})
file_info_get = med_api.get_file_by_fid(fid)
assert dict_issubset(file_info_get, file_info_post)
# d)
time.sleep(data_update_time-file_update_time) # Give data db 50 seconds to update
data_rows = med_api.get_data(studyid='TEST', versionid=1, format='flat_dict')
correct_row1 = {'studyid': 'TEST', 'versionid': 1, 'subjectid': 1,
'demographics.age': 23, 'demographics.sex': 'F', 'demographics.bmi': 23}
correct_row1.update(pytest.usecase_1_filedata)
correct_row2 = {'studyid': 'TEST', 'versionid': 1, 'subjectid': 2,
'demographics.age': 19, 'demographics.sex': 'M', 'demographics.bmi': 20}
correct_rows = [correct_row1, correct_row2]
pytest.usecase_2_row1 = correct_row1
pytest.usecase_2_row2 = correct_row2
pytest.usecase_2_filename_version = file_info_get['filename_version']
for correct_row in correct_rows:
assert any([dict_issubset(data_row, correct_row) for data_row in data_rows]), "demographics data downloaded does not match expected"
# e)
data_sleep_eeg = med_api.get_data(studyid='TEST', versionid=1, filetype='sleep_eeg')[0] #FIXME will fail here until filetype is query-able
assert dict_issubset(data_sleep_eeg, pytest.usecase_1_filedata), "sleep data downloaded does not match what was uploaded in usecase 1"
@pytest.mark.dependency(['test_usecase_2'])
def test_usecase_3():
# a)
med_api = MednickAPI(server_address, 'test_ra_account@uci.edu', 'Pass1234')
fid_for_manual_upload = med_api.extract_var(med_api.get_files(studyid='TEST'), '_id')[0] # get a random fid
data_post = {'studyid': 'TEST',
'filetype': 'memtesta',
'data': {'accuracy': 0.9},
'versionid': 1,
'subjectid': 2,
'visitid': 1,
'sessionid': 1}
med_api.upload_data(**data_post, fid=fid_for_manual_upload)
# b)
time.sleep(5) # Give db 5 seconds to update
correct_filename_versions = [pytest.usecase_1_filename_version, pytest.usecase_2_filename_version]
filename_versions = med_api.extract_var(med_api.get_files(studyid='TEST', versionid=1), 'filename_version')
assert all([fid in correct_filename_versions for fid in filename_versions]), "Missing expected filename versions from two previous usecases"
# c)
time.sleep(5) # Give db 5 seconds to update
data_rows = med_api.get_data(studyid='TEST', versionid=1, format='flat_dict')
correct_row_2 = pytest.usecase_2_row2.copy()
correct_row_2.update({'memtesta.accuracy': 0.9, 'visitid': 1})
pytest.usecase_3_row2 = correct_row_2
correct_rows = [pytest.usecase_2_row1, correct_row_2]
for correct_row in correct_rows:
assert any([dict_issubset(data_row, correct_row) for data_row in data_rows])
@pytest.mark.dependency(['test_usecase_3'])
def test_usecase_4():
# a)
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
# b) uploading some scorefiles
file_info1_post = {
'fileformat':'sleep_scoring',
'studyid':'TEST',
'versionid':1,
'subjectid':2,
'visitid':1,
'sessionid':1,
'filetype':'sleep_scoring'
}
with open('testfiles/scorefile1.mat', 'rb') as scorefile1:
fid1 = med_api.upload_file(scorefile1,
**file_info1_post)
file_info2_post = file_info1_post.copy()
file_info2_post.update({'visitid':2})
with open('testfiles/scorefile2.mat', 'rb') as scorefile2:
fid2 = med_api.upload_file(scorefile2,
**file_info2_post)
scorefile1_data = {'sleep_scoring.epochstage': [-1, -1, -1, 0, 0, 0, 0, 0, 0, 0],
'sleep_scoring.epochoffset': [0, 30, 60, 90, 120, 150, 180, 210, 240, 270],
'sleep_scoring.starttime': 1451635302000, 'sleep_scoring.mins_in_0': 3.5, 'sleep_scoring.mins_in_1': 0,
'sleep_scoring.mins_in_2': 0, 'sleep_scoring.mins_in_3': 0, 'sleep_scoring.mins_in_4': 0,
'sleep_scoring.sleep_efficiency': 0, 'sleep_scoring.total_sleep_time': 0}
scorefile2_data = {'sleep_scoring.epochstage': [0, 0, 1, 1, 2, 2, 3, 3, 2, 2],
'sleep_scoring.epochoffset': [0, 30, 60, 90, 120, 150, 180, 210, 240, 270],
'sleep_scoring.starttime': 1451635302000, 'sleep_scoring.mins_in_0': 1, 'sleep_scoring.mins_in_1': 1,
'sleep_scoring.mins_in_2': 2, 'sleep_scoring.mins_in_3': 1, 'sleep_scoring.mins_in_4': 0,
'sleep_scoring.sleep_efficiency': 0.8, 'sleep_scoring.total_sleep_time': 4}
# c)
time.sleep(data_update_time) # Give db 50 seconds to update
data_rows = med_api.get_data(studyid='TEST', versionid=1, format='flat_dict')
correct_row_1 = pytest.usecase_2_row1.copy()
scorefile1_data.update(pytest.usecase_3_row2)
correct_row_2 = scorefile1_data
scorefile2_data.update(pytest.usecase_2_row2)
correct_row_3 = scorefile2_data
correct_rows = [correct_row_1, correct_row_2, correct_row_3]
for correct_row in correct_rows:
assert any([dict_issubset(data_row, correct_row) for data_row in data_rows])
pytest.usecase_4_row1 = correct_row_1
pytest.usecase_4_row2 = correct_row_2
pytest.usecase_4_row3 = correct_row_3
@pytest.mark.dependency(['test_usecase_4'])
def test_usecase_5():
# a)
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
data_rows = med_api.get_data(query='studyid=TEST and data.memtesta.accuracy>=0.9', format='flat_dict')
assert any([dict_issubset(data_row, pytest.usecase_3_row2) for data_row in data_rows])
def test_get_specifiers():
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
sids = med_api.get_unique_var_values('studyid', store='data')
assert 'TEST' in sids
vids = med_api.get_unique_var_values('versionid', studyid='TEST', store='data')
assert vids == [1]
sids = med_api.get_unique_var_values('subjectid', studyid='TEST', store='data')
assert sids == [1, 2]
vids = med_api.get_unique_var_values('visitid', studyid='TEST', store='data')
assert vids == [1, 2]
sids = med_api.get_unique_var_values('sessionid', studyid='TEST', store='data')
assert sids == [1]
filetypes = med_api.get_unique_var_values('filetype', studyid='TEST', store='data')
assert set(filetypes) == {'sleep_eeg', 'sleep_scoring', 'demographics', 'memtesta'}
| 43.720648 | 215 | 0.682471 | 1,506 | 10,799 | 4.580345 | 0.158035 | 0.036532 | 0.026095 | 0.030444 | 0.530444 | 0.403015 | 0.358365 | 0.296028 | 0.26283 | 0.212525 | 0 | 0.037883 | 0.193351 | 10,799 | 246 | 216 | 43.898374 | 0.753989 | 0.054079 | 0 | 0.224719 | 0 | 0 | 0.227849 | 0.09824 | 0 | 0 | 0 | 0.004065 | 0.117978 | 1 | 0.050562 | false | 0.039326 | 0.02809 | 0.005618 | 0.095506 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79941e8098ddb85ae7adb492cd1a81eb3262f856 | 4,078 | py | Python | sslcommerz_sdk/store.py | monim67/sslcommerz-sdk | 77219fc90ab12222df2c03abc95c8d2b19768eeb | [
"MIT"
] | 6 | 2021-01-15T13:31:37.000Z | 2021-12-06T13:44:39.000Z | sslcommerz_sdk/store.py | monim67/sslcommerz-sdk | 77219fc90ab12222df2c03abc95c8d2b19768eeb | [
"MIT"
] | null | null | null | sslcommerz_sdk/store.py | monim67/sslcommerz-sdk | 77219fc90ab12222df2c03abc95c8d2b19768eeb | [
"MIT"
] | null | null | null | import requests
from .enums import TransactionStatus
from .exceptions import InvalidPaymentException, SslcommerzAPIException
from .services import PayloadSchema, is_verify_sign_valid
DEFAULT_CONFIG = {
"base_url": "https://sandbox.sslcommerz.com",
"session_url": "/gwprocess/v4/api.php",
"validation_url": "/validator/api/validationserverAPI.php",
"transaction_url": "/validator/api/merchantTransIDvalidationAPI.php",
}
class SslcommerzStore:
def __init__(self, store_id, store_passwd, **kwargs):
self.id = store_id
self.credentials = dict(store_id=store_id, store_passwd=store_passwd)
self.config = {**DEFAULT_CONFIG, **kwargs}
def request(self, method, url, **kwargs):
url = self.config["base_url"] + url
return requests.request(method, url, **kwargs)
def create_session(self, **kwargs):
response = self.request(
method="POST",
url=self.config["session_url"],
data={**self.credentials, **kwargs},
)
if response.status_code != 200:
raise SslcommerzAPIException(
f"Unexpected status code: {response.status_code}"
)
response_json = response.json()
if response_json["status"] != "SUCCESS":
raise SslcommerzAPIException(f"Error: {response_json['failedreason']}")
return response_json
def validate_ipn_payload(self, payload):
try:
if not is_verify_sign_valid(
store_passwd=self.credentials["store_passwd"],
payload=payload["original"],
):
raise InvalidPaymentException("verify_sign mismatch")
if payload["status"] == TransactionStatus.VALID:
validation_response = self.validate_transaction(payload["val_id"])
if validation_response["status"] not in (
TransactionStatus.VALID,
TransactionStatus.VALIDATED,
):
raise InvalidPaymentException(
f"Payment status: {validation_response['status']}"
)
return PayloadSchema().load(validation_response)
except KeyError as key:
raise InvalidPaymentException(f"{key} is missing in payload") from key
def validate_transaction(self, val_id):
response = self.request(
method="GET",
url=self.config["validation_url"],
params=dict(**self.credentials, val_id=val_id, format="json"),
)
if response.status_code != 200:
raise SslcommerzAPIException(
f"Unexpected status code: {response.status_code}"
)
return response.json()
def query_transaction_by_sessionkey(self, sessionkey):
response = self.request(
method="GET",
url=self.config["transaction_url"],
params=dict(**self.credentials, sessionkey=sessionkey, format="json"),
)
return response.json()
def query_transaction_by_tran_id(self, tran_id):
response = self.request(
method="GET",
url=self.config["transaction_url"],
params=dict(**self.credentials, tran_id=tran_id, format="json"),
)
return response.json()
def init_refund(self, bank_tran_id, refund_amount, refund_remarks):
response = self.request(
method="GET",
url=self.config["transaction_url"],
params=dict(
**self.credentials,
bank_tran_id=bank_tran_id,
refund_amount=refund_amount,
refund_remarks=refund_remarks,
format="json",
),
)
return response.json()
def query_refund_status(self, refund_ref_id):
response = self.request(
method="GET",
url=self.config["transaction_url"],
params=dict(**self.credentials, refund_ref_id=refund_ref_id, format="json"),
)
return response.json()
| 37.072727 | 88 | 0.601766 | 402 | 4,078 | 5.893035 | 0.223881 | 0.050654 | 0.038413 | 0.063318 | 0.344449 | 0.33263 | 0.282398 | 0.230055 | 0.230055 | 0.211904 | 0 | 0.002424 | 0.29181 | 4,078 | 109 | 89 | 37.412844 | 0.817867 | 0 | 0 | 0.291667 | 0 | 0 | 0.144924 | 0.051986 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0.03125 | 0.041667 | 0 | 0.229167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7994462d7302f56ae3248adc4074cec0dff871a2 | 1,636 | py | Python | nobrace/converter.py | iblis17/nobrace | 7333029c6cd5f2a885614b5fe64f6c85ee5f296d | [
"MIT"
] | 2 | 2015-07-13T09:08:53.000Z | 2017-05-22T07:56:29.000Z | nobrace/converter.py | iblis17/nobrace | 7333029c6cd5f2a885614b5fe64f6c85ee5f296d | [
"MIT"
] | null | null | null | nobrace/converter.py | iblis17/nobrace | 7333029c6cd5f2a885614b5fe64f6c85ee5f296d | [
"MIT"
] | null | null | null | import abc
import re
from .exceptions import FileSuffixError
from .stack import LineCounter, IndentStack
class ConverterBase(metaclass=abc.ABCMeta):
def __init__(self, src: str):
self.src = src
self.code_blocks
@property
def code_blocks(self):
'''
Aggregate code block into tuple.
A code block could be determined by intentation.
'''
indent_stack = IndentStack([''])
blankline_stack = LineCounter()
def complete_brace(indent, cur_indent):
if indent == cur_indent:
print('\n' * blankline_stack.pop(cur_indent, 0))
return
if len(indent) > len(cur_indent):
print(indent_stack.push(indent))
elif len(indent) < len(cur_indent):
print(indent_stack.pop())
else:
print('\n' * blankline_stack.pop(cur_indent, 0))
try:
complete_brace(indent, indent_stack.top)
except IndexError:
return
for line in self.src.split('\n'):
indent_match = re.match('^([ \t]+)[\S]+', line)
cur_indent = indent_stack[-1]
if indent_match:
indent = indent_match.group(1)
complete_brace(indent, cur_indent)
print(line, sep=', ')
else:
indent = None
if cur_indent:
blankline_stack.push(cur_indent)
else:
print(line)
del line
# handle eol
print('{}}}'.format(indent_stack[-2]))
| 27.728814 | 64 | 0.525672 | 170 | 1,636 | 4.870588 | 0.388235 | 0.108696 | 0.067633 | 0.05314 | 0.236715 | 0.169082 | 0.169082 | 0.169082 | 0 | 0 | 0 | 0.004888 | 0.374694 | 1,636 | 58 | 65 | 28.206897 | 0.804497 | 0.056846 | 0 | 0.170732 | 0 | 0 | 0.017207 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.097561 | 0 | 0.243902 | 0.170732 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7994c8744e38290defb02afb422aa332b6ee9f48 | 541 | py | Python | test/cv2countours.py | taiwc/raspwww | 835befaa9255fe53b7ce97b50f9d825191979eae | [
"Apache-2.0"
] | null | null | null | test/cv2countours.py | taiwc/raspwww | 835befaa9255fe53b7ce97b50f9d825191979eae | [
"Apache-2.0"
] | null | null | null | test/cv2countours.py | taiwc/raspwww | 835befaa9255fe53b7ce97b50f9d825191979eae | [
"Apache-2.0"
] | null | null | null | # import the necessary packages
import numpy as np
import argparse
import cv2
im = cv2.imread('/var/www/test/test.jpg')
cv2.imshow("im", im)
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,127,255,0)
cv2.imshow("Thresh", thresh)
(cnts, _) = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#(cnts, _) = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(im,cnts,-1,(0,255,0),3)
cv2.drawContours(im,cnts,-1,(0,255,0),-1)
cv2.imshow("Image",im)
cv2.waitKey(0)
| 31.823529 | 83 | 0.744917 | 91 | 541 | 4.32967 | 0.43956 | 0.038071 | 0.096447 | 0.101523 | 0.137056 | 0.137056 | 0.137056 | 0.137056 | 0 | 0 | 0 | 0.07984 | 0.073937 | 541 | 16 | 84 | 33.8125 | 0.706587 | 0.205176 | 0 | 0 | 0 | 0 | 0.081967 | 0.051522 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79954f94e96d2ea0202820bdc5e93050e74cbb64 | 810 | py | Python | migrations/versions/schema/783682226c9b_.py | Georgi2704/pricelist-fastapi-boilerplate | 24b88e1f5c28b7eaff50745cd4464caac6de01e6 | [
"Apache-2.0"
] | null | null | null | migrations/versions/schema/783682226c9b_.py | Georgi2704/pricelist-fastapi-boilerplate | 24b88e1f5c28b7eaff50745cd4464caac6de01e6 | [
"Apache-2.0"
] | 2 | 2021-11-11T15:19:30.000Z | 2022-02-07T22:52:07.000Z | migrations/versions/schema/783682226c9b_.py | Georgi2704/pricelist-fastapi | 24b88e1f5c28b7eaff50745cd4464caac6de01e6 | [
"Apache-2.0"
] | null | null | null | """empty message
Revision ID: 783682226c9b
Revises: b882b9ab026c
Create Date: 2019-10-19 10:07:14.923441
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "783682226c9b"
down_revision = "b882b9ab026c"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"prices", "internal_product_id", existing_type=sa.INTEGER(), type_=sa.String(), existing_nullable=True
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"prices", "internal_product_id", existing_type=sa.String(), type_=sa.INTEGER(), existing_nullable=True
)
# ### end Alembic commands ###
| 25.3125 | 110 | 0.693827 | 98 | 810 | 5.581633 | 0.520408 | 0.043876 | 0.076782 | 0.084095 | 0.47532 | 0.47532 | 0.33638 | 0.33638 | 0.33638 | 0.33638 | 0 | 0.081203 | 0.179012 | 810 | 31 | 111 | 26.129032 | 0.741353 | 0.364198 | 0 | 0.142857 | 0 | 0 | 0.154812 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
799b0a8db84df97948487d6c7aed90ab475e0d53 | 1,212 | py | Python | simulations/yml_to_df.py | danibachar/Kube-Load-Balancing | 8b9ea68ddbb46cc730a02ffe30cc68b3d65ca491 | [
"MIT"
] | null | null | null | simulations/yml_to_df.py | danibachar/Kube-Load-Balancing | 8b9ea68ddbb46cc730a02ffe30cc68b3d65ca491 | [
"MIT"
] | null | null | null | simulations/yml_to_df.py | danibachar/Kube-Load-Balancing | 8b9ea68ddbb46cc730a02ffe30cc68b3d65ca491 | [
"MIT"
] | null | null | null | import argparse
import pandas as pd
from config_builder import build_config
from utils.helpers import load_ymal
def app_dep_graph(yml):
nodes = []
source = []
target = []
print(yml)
for svc_name, service in yml["services"].items():
print(service)
nodes.append(svc_name)
for dep in service["dependencies"].values():
source.append(svc_name)
target.append(dep["name"])
edges = pd.DataFrame({'source': source,
'target': target, })
return edges
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run Kuberentes simulation')
parser.add_argument(
'--config_file_name',
type=str,
default="yamls/configurations/simple_run.yml",
help='A configuration file that describe the test'
)
args = parser.parse_args()
config_file_name = args.config_file_name
config = build_config(config_file_name)
apps = config["simulation_ymals"]["apps"]
for app_file in apps:
app_name = app_file.split("/")[-1].split(".")[0]
yml = load_ymal(app_file)
graph = app_dep_graph(yml)
graph.to_csv("{}.csv".format(app_name))
| 28.857143 | 77 | 0.632013 | 149 | 1,212 | 4.885906 | 0.463087 | 0.054945 | 0.076923 | 0.038462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002191 | 0.2467 | 1,212 | 41 | 78 | 29.560976 | 0.795181 | 0 | 0 | 0 | 0 | 0 | 0.159241 | 0.028878 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.114286 | 0 | 0.171429 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
799c75fdf25b724115e60255f1f09d6eec4d851d | 4,187 | py | Python | ckanext/ckanext-sixodp_scheming/ckanext/sixodp_scheming/helpers.py | Tampere/sixodp-tampere | 975105a5e20e97c54dd7c84c761f91cca1316842 | [
"MIT"
] | 8 | 2016-10-14T14:32:00.000Z | 2022-01-14T16:04:07.000Z | ckanext/ckanext-sixodp_scheming/ckanext/sixodp_scheming/helpers.py | Tampere/sixodp-tampere | 975105a5e20e97c54dd7c84c761f91cca1316842 | [
"MIT"
] | 42 | 2016-11-11T12:24:54.000Z | 2021-07-12T03:29:18.000Z | ckanext/ckanext-sixodp_scheming/ckanext/sixodp_scheming/helpers.py | Tampere/sixodp-tampere | 975105a5e20e97c54dd7c84c761f91cca1316842 | [
"MIT"
] | 7 | 2017-03-13T09:21:08.000Z | 2018-01-08T06:40:22.000Z | from ckan.plugins import toolkit
from ckan.lib.i18n import get_lang
import ckan.lib.i18n as i18n
from ckan.common import config, c
import ckan.logic as logic
import ckan.lib.base as base
import ckan.model as model
from ckan.model.package import Package
from ckan.lib.dictization.model_dictize import group_list_dictize
import logging
get_action = toolkit.get_action
NotFound = logic.NotFound
abort = base.abort
log = logging.getLogger(__name__)
def call_toolkit_function(fn, args, kwargs):
return getattr(toolkit,fn)(*args, **kwargs)
def add_locale_to_source(kwargs, locale):
copy = kwargs.copy()
source = copy.get('data-module-source', None)
if source:
copy.update({'data-module-source': source + '_' + locale})
return copy
return copy
def get_current_lang():
return get_lang()
def scheming_field_only_default_required(field, lang):
if field and field.get('only_default_lang_required') and lang == config.get('ckan.locale_default', 'en'):
return True
return False
def get_current_date():
import datetime
return datetime.date.today().strftime("%d.%m.%Y")
def get_package_groups_by_type(package_id, group_type):
context = {'model': model, 'session': model.Session,
'for_view': True, 'use_cache': False}
group_list = []
data_dict = {
'all_fields': True,
'include_extras': True,
'type': group_type
}
groups = logic.get_action('group_list')(context, data_dict)
try:
pkg_obj = Package.get(package_id)
pkg_group_ids = set(group['id'] for group in group_list_dictize(pkg_obj.get_groups(group_type, None), context))
group_list = [group
for group in groups if
group['id'] in pkg_group_ids]
except (NotFound):
abort(404, _('Dataset not found'))
return group_list
_LOCALE_ALIASES = {'en_GB': 'en'}
def get_lang_prefix():
language = i18n.get_lang()
if language in _LOCALE_ALIASES:
language = _LOCALE_ALIASES[language]
return language
def get_translated_or_default_locale(data_dict, field):
language = i18n.get_lang()
if language in _LOCALE_ALIASES:
language = _LOCALE_ALIASES[language]
try:
value = data_dict[field+'_translated'][language]
if value:
return value
else:
return data_dict[field+'_translated'][config.get('ckan.locale_default', 'en')]
except KeyError:
return data_dict.get(field, '')
def show_qa():
from ckan.plugins import plugin_loaded
if plugin_loaded('qa'):
return True
return False
def scheming_category_list(args):
from ckan.logic import NotFound
# FIXME: sometimes this might return 0 categories if in development
try:
context = {'model': model, 'session': model.Session, 'ignore_auth': True}
group_ids = get_action('group_list')(context, {})
except NotFound:
return None
else:
category_list = []
# filter groups to those user is allowed to edit
group_authz = get_action('group_list_authz')({
'model': model, 'session': model.Session, 'user': c.user
}, {})
user_group_ids = set(group[u'name'] for group in group_authz)
group_ids = [group for group in group_ids if group in user_group_ids]
for group in group_ids:
try:
context = {'model': model, 'session': model.Session, 'ignore_auth': True}
group_details = get_action('group_show')(context, {'id': group})
except Exception as e:
log.error(e)
return None
category_list.append({
"value": group,
"label": group_details.get('title')
})
return category_list
def check_group_selected(val, data):
log.info(val)
log.info(data)
if filter(lambda x: x['name'] == val, data):
return True
return False
def get_field_from_schema(schema, field_name):
field = next(field for field in schema.get('dataset_fields', []) if field.get('field_name') == field_name)
return field
| 27.366013 | 119 | 0.64557 | 545 | 4,187 | 4.733945 | 0.247706 | 0.027907 | 0.01938 | 0.034109 | 0.212016 | 0.15814 | 0.10155 | 0.10155 | 0.10155 | 0.10155 | 0 | 0.004453 | 0.249104 | 4,187 | 152 | 120 | 27.546053 | 0.816158 | 0.026749 | 0 | 0.220183 | 0 | 0 | 0.093075 | 0.006385 | 0 | 0 | 0 | 0.006579 | 0 | 1 | 0.110092 | false | 0 | 0.119266 | 0.018349 | 0.412844 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79a1df3d282563da1e1751edcf4f307e4b4ca364 | 8,794 | py | Python | notebooks/experiments.py | yashasvi-ranawat/viabel | 99245b7f3db8ea9dc55d6130bd5672e1adc62b63 | [
"MIT"
] | 1 | 2022-02-15T23:43:22.000Z | 2022-02-15T23:43:22.000Z | notebooks/experiments.py | yashasvi-ranawat/viabel | 99245b7f3db8ea9dc55d6130bd5672e1adc62b63 | [
"MIT"
] | null | null | null | notebooks/experiments.py | yashasvi-ranawat/viabel | 99245b7f3db8ea9dc55d6130bd5672e1adc62b63 | [
"MIT"
] | 3 | 2020-03-21T12:45:22.000Z | 2020-10-06T18:30:47.000Z | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
all
from viabel import all_bounds
from viabel.vb import black_box_klvi, black_box_chivi, adagrad_optimize
from utils import Timer
from psis import psislw
## Display bounds information ##
def print_bounds(results):
print('Bounds on...')
print(' 2-Wasserstein {:.3g}'.format(results['W2']))
print(' 2-divergence {:.3g}'.format(results['d2']))
print(' mean error {:.3g}'.format(results['mean_error']))
print(' stdev error {:.3g}'.format(results['std_error']))
print(' sqrt cov error {:.3g}'.format(np.sqrt(results['cov_error'])))
print(' cov error {:.3g}'.format(results['cov_error']))
## Check approximation accuracy ##
def check_accuracy(true_mean, true_cov, approx_mean, approx_cov, verbose=False,
method=None):
true_std = np.sqrt(np.diag(true_cov))
approx_std = np.sqrt(np.diag(approx_cov))
results = dict(mean_error=np.linalg.norm(true_mean - approx_mean),
cov_error_2=np.linalg.norm(true_cov - approx_cov, ord=2),
cov_norm_2=np.linalg.norm(true_cov, ord=2),
cov_error_nuc=np.linalg.norm(true_cov - approx_cov, ord='nuc'),
cov_norm_nuc=np.linalg.norm(true_cov, ord='nuc'),
std_error=np.linalg.norm(true_std - approx_std),
rel_std_error=np.linalg.norm(approx_std/true_std - 1),
)
if method is not None:
results['method'] = method
if verbose:
print('mean =', approx_mean)
print('stdevs =', approx_std)
print()
print('mean error = {:.3g}'.format(results['mean_error']))
print('stdev error = {:.3g}'.format(results['std_error']))
print('||cov error||_2^{{1/2}} = {:.3g}'.format(np.sqrt(results['cov_error_2'])))
print('||true cov||_2^{{1/2}} = {:.3g}'.format(np.sqrt(results['cov_norm_2'])))
return results
def check_approx_accuracy(var_family, var_param, true_mean, true_cov,
verbose=False, name=None):
return check_accuracy(true_mean, true_cov,
*var_family.mean_and_cov(var_param),
verbose, name)
## Convenience functions and PSIS ##
def get_samples_and_log_weights(logdensity, var_family, var_param, n_samples):
samples = var_family.sample(var_param, n_samples)
log_weights = logdensity(samples) - var_family.logdensity(samples, var_param)
return samples, log_weights
def psis_correction(logdensity, var_family, var_param, n_samples):
samples, log_weights = get_samples_and_log_weights(logdensity, var_family,
var_param, n_samples)
smoothed_log_weights, khat = psislw(log_weights)
return samples.T, smoothed_log_weights, khat
def improve_with_psis(logdensity, var_family, var_param, n_samples,
true_mean, true_cov, transform=None, verbose=False):
samples, slw, khat = psis_correction(logdensity, var_family,
var_param, n_samples)
if verbose:
print('khat = {:.3g}'.format(khat))
print()
if transform is not None:
samples = transform(samples)
slw -= np.max(slw)
wts = np.exp(slw)
wts /= np.sum(wts)
approx_mean = np.sum(wts[np.newaxis,:]*samples, axis=1)
approx_cov = np.cov(samples, aweights=wts, ddof=0)
res = check_accuracy(true_mean, true_cov, approx_mean, approx_cov, verbose)
res['khat'] = khat
return res, approx_mean, approx_cov
## Plotting ##
def plot_approx_and_exact_contours(logdensity, var_family, var_param,
xlim=[-10,10], ylim=[-3, 3],
cmap2='Reds', savepath=None):
xlist = np.linspace(*xlim, 100)
ylist = np.linspace(*ylim, 100)
X, Y = np.meshgrid(xlist, ylist)
XY = np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T
zs = np.exp(logdensity(XY))
Z = zs.reshape(X.shape)
zsapprox = np.exp(var_family.logdensity(XY, var_param))
Zapprox = zsapprox.reshape(X.shape)
plt.contour(X, Y, Z, cmap='Greys', linestyles='solid')
plt.contour(X, Y, Zapprox, cmap=cmap2, linestyles='solid')
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show()
def plot_history(history, B=None, ylabel=None):
if B is None:
B = min(500, history.size//10)
window = np.ones(B)/B
smoothed_history = np.convolve(history, window, 'valid')
plt.plot(smoothed_history)
yscale = 'log' if np.all(smoothed_history > 0) else 'linear'
plt.yscale(yscale)
if ylabel is not None:
plt.ylabel(ylabel)
plt.xlabel('iteration')
plt.show()
def plot_dist_to_opt_param(var_param_history, opt_param):
plt.plot(np.linalg.norm(var_param_history - opt_param[np.newaxis,:], axis=1))
plt.title('iteration vs distance to optimal parameter')
plt.xlabel('iteration')
plt.ylabel('distance')
sns.despine()
plt.show()
## Run experiment with both KLVI and CHIVI ##
def _optimize_and_check_results(logdensity, var_family, objective_and_grad,
init_var_param, true_mean, true_cov,
plot_contours, ylabel, contour_kws=dict(),
elbo=None, n_iters=5000,
bound_w2=True, verbose=False, use_psis=True,
n_psis_samples=1000000, **kwargs):
opt_param, var_param_history, value_history, _ = \
adagrad_optimize(n_iters, objective_and_grad, init_var_param, **kwargs)
plot_dist_to_opt_param(var_param_history, opt_param)
accuracy_results = check_approx_accuracy(var_family, opt_param,
true_mean, true_cov, verbose);
other_results = dict(opt_param=opt_param,
var_param_history=var_param_history,
value_history=value_history)
if bound_w2 not in [False, None]:
if bound_w2 is True:
n_samples = 1000000
else:
n_samples = bound_w2
print()
with Timer('Computing CUBO and ELBO with {} samples'.format(n_samples)):
_, log_weights = get_samples_and_log_weights(
logdensity, var_family, opt_param, n_samples)
var_dist_cov = var_family.mean_and_cov(opt_param)[1]
moment_bound_fn = lambda p: var_family.pth_moment(p, opt_param)
other_results.update(all_bounds(log_weights,
q_var=var_dist_cov,
moment_bound_fn=moment_bound_fn,
log_norm_bound=elbo))
if verbose:
print()
print_bounds(other_results)
if plot_contours:
plot_approx_and_exact_contours(logdensity, var_family, opt_param,
**contour_kws)
if use_psis:
print()
print('Results with PSIS correction')
print('----------------------------')
other_results['psis_results'], _, _ = \
improve_with_psis(logdensity, var_family, opt_param, n_psis_samples,
true_mean, true_cov, verbose=verbose)
return accuracy_results, other_results
def run_experiment(logdensity, var_family, init_param, true_mean, true_cov,
kl_n_samples=100, chivi_n_samples=500,
alpha=2, **kwargs):
klvi = black_box_klvi(var_family, logdensity, kl_n_samples)
chivi = black_box_chivi(alpha, var_family, logdensity, chivi_n_samples)
dim = true_mean.size
plot_contours = dim == 2
if plot_contours:
plot_approx_and_exact_contours(logdensity, var_family, init_param,
**kwargs.get('contour_kws', dict()))
print('|--------------|')
print('| KLVI |')
print('|--------------|', flush=True)
kl_results, other_kl_results = _optimize_and_check_results(
logdensity, var_family, klvi, init_param,
true_mean, true_cov, plot_contours, '-ELBO', **kwargs)
kl_results['method'] = 'KLVI'
print()
print('|---------------|')
print('| CHIVI |')
print('|---------------|', flush=True)
elbo = other_kl_results['log_norm_bound']
chivi_results, other_chivi_results = _optimize_and_check_results(
logdensity, var_family, chivi, init_param, true_mean, true_cov,
plot_contours, 'CUBO', elbo=elbo, **kwargs)
chivi_results['method'] = 'CHIVI'
return klvi, chivi, kl_results, chivi_results, other_kl_results, other_chivi_results
| 41.677725 | 90 | 0.61053 | 1,107 | 8,794 | 4.558266 | 0.171635 | 0.042806 | 0.052715 | 0.032699 | 0.378121 | 0.309354 | 0.245937 | 0.217598 | 0.161316 | 0.131193 | 0 | 0.013033 | 0.267114 | 8,794 | 210 | 91 | 41.87619 | 0.769899 | 0.015806 | 0 | 0.102857 | 0 | 0 | 0.090003 | 0.003243 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062857 | false | 0 | 0.04 | 0.005714 | 0.142857 | 0.171429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79a4a7145c7782f4c24c78c13d10feef5db3850e | 2,456 | py | Python | data_capture/jobs.py | connectthefuture/calc | 7f0dc01d9265f26a36c2b9b5ee779fe876e4a494 | [
"CC0-1.0"
] | null | null | null | data_capture/jobs.py | connectthefuture/calc | 7f0dc01d9265f26a36c2b9b5ee779fe876e4a494 | [
"CC0-1.0"
] | 1 | 2021-06-10T23:13:04.000Z | 2021-06-10T23:13:04.000Z | data_capture/jobs.py | connectthefuture/calc | 7f0dc01d9265f26a36c2b9b5ee779fe876e4a494 | [
"CC0-1.0"
] | null | null | null | import logging
import traceback
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.db import transaction
from django_rq import job
from . import email
from .r10_spreadsheet_converter import Region10SpreadsheetConverter
from contracts.loaders.region_10 import Region10Loader
from contracts.models import Contract, BulkUploadContractSource
contracts_logger = logging.getLogger('contracts')
@transaction.atomic
def _process_bulk_upload(upload_source):
file = ContentFile(upload_source.original_file)
converter = Region10SpreadsheetConverter(file)
contracts_logger.info("Deleting contract objects related to region 10.")
# Delete existing contracts identified by the same
# procurement_center
Contract.objects.filter(
upload_source__procurement_center=BulkUploadContractSource.REGION_10
).delete()
contracts = []
bad_rows = []
contracts_logger.info("Generating new contract objects.")
for row in converter.convert_next():
try:
c = Region10Loader.make_contract(row, upload_source=upload_source)
contracts.append(c)
except (ValueError, ValidationError) as e:
bad_rows.append(row)
contracts_logger.info("Saving new contract objects.")
# Save new contracts
Contract.objects.bulk_create(contracts)
contracts_logger.info("Updating full-text search indexes.")
# Update search field on Contract models
Contract._fts_manager.update_search_field()
# Update the upload_source
upload_source.has_been_loaded = True
upload_source.save()
return len(contracts), len(bad_rows)
@job
def process_bulk_upload_and_send_email(upload_source_id):
contracts_logger.info(
"Starting bulk upload processing (pk=%d)." % upload_source_id
)
upload_source = BulkUploadContractSource.objects.get(
pk=upload_source_id
)
try:
num_contracts, num_bad_rows = _process_bulk_upload(upload_source)
email.bulk_upload_succeeded(upload_source, num_contracts, num_bad_rows)
except:
contracts_logger.exception(
'An exception occurred during bulk upload processing '
'(pk=%d).' % upload_source_id
)
tb = traceback.format_exc()
email.bulk_upload_failed(upload_source, tb)
contracts_logger.info(
"Ending bulk upload processing (pk=%d)." % upload_source_id
)
| 29.95122 | 79 | 0.735342 | 286 | 2,456 | 6.055944 | 0.374126 | 0.117783 | 0.06582 | 0.038106 | 0.122979 | 0.064088 | 0.064088 | 0.064088 | 0 | 0 | 0 | 0.008065 | 0.192182 | 2,456 | 81 | 80 | 30.320988 | 0.864919 | 0.061075 | 0 | 0.071429 | 0 | 0 | 0.125217 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.178571 | 0 | 0.232143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79a5279be723e7987fc33c8f7184b26de97782d2 | 1,525 | py | Python | rss_temple/api/archived_feed_entry_util.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | null | null | null | rss_temple/api/archived_feed_entry_util.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | 8 | 2019-12-04T21:58:35.000Z | 2021-12-15T02:29:49.000Z | rss_temple/api/archived_feed_entry_util.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | null | null | null | import itertools
from django.conf import settings
from django.dispatch import receiver
from django.core.signals import setting_changed
from api import models
_USER_UNREAD_GRACE_INTERVAL = None
_USER_UNREAD_GRACE_MIN_COUNT = None
@receiver(setting_changed)
def _load_global_settings(*args, **kwargs):
global _USER_UNREAD_GRACE_INTERVAL
global _USER_UNREAD_GRACE_MIN_COUNT
_USER_UNREAD_GRACE_INTERVAL = settings.USER_UNREAD_GRACE_INTERVAL
_USER_UNREAD_GRACE_MIN_COUNT = settings.USER_UNREAD_GRACE_MIN_COUNT
_load_global_settings()
def mark_archived_entries(read_mappings_generator, batch_size=768):
while True:
batch = list(itertools.islice(read_mappings_generator, batch_size))
if len(batch) < 1:
break
models.ReadFeedEntryUserMapping.objects.bulk_create(
batch, batch_size=batch_size, ignore_conflicts=True)
def read_mapping_generator_fn(feed, user):
grace_start = user.created_at + _USER_UNREAD_GRACE_INTERVAL
feed_entries = None
if models.FeedEntry.objects.filter(feed=feed, published_at__gte=grace_start).count() > _USER_UNREAD_GRACE_MIN_COUNT:
feed_entries = models.FeedEntry.objects.filter(
feed=feed, published_at__lt=grace_start)
else:
feed_entries = models.FeedEntry.objects.filter(feed=feed).order_by(
'published_at')[_USER_UNREAD_GRACE_MIN_COUNT:]
for feed_entry in feed_entries.iterator():
yield models.ReadFeedEntryUserMapping(feed_entry=feed_entry, user=user)
| 31.122449 | 120 | 0.773115 | 200 | 1,525 | 5.445 | 0.34 | 0.10101 | 0.151515 | 0.099174 | 0.321396 | 0.139578 | 0.139578 | 0.139578 | 0 | 0 | 0 | 0.003118 | 0.158689 | 1,525 | 48 | 121 | 31.770833 | 0.845674 | 0 | 0 | 0 | 0 | 0 | 0.007869 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.15625 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79a565e7d7928d619d4922162412c3aac164285d | 3,016 | py | Python | nonebot_plugin_bam/database/helper.py | 7sDream/nonebot_plugin_bam | 9d19856661a75484440efff8d77094390230f4c9 | [
"MIT"
] | 4 | 2021-02-08T16:18:12.000Z | 2021-12-28T07:13:51.000Z | nonebot_plugin_bam/database/helper.py | 7sDream/nonebot_plugin_bam | 9d19856661a75484440efff8d77094390230f4c9 | [
"MIT"
] | null | null | null | nonebot_plugin_bam/database/helper.py | 7sDream/nonebot_plugin_bam | 9d19856661a75484440efff8d77094390230f4c9 | [
"MIT"
] | null | null | null | from collections import defaultdict
from typing import Dict
from nonebot.log import logger
from peewee import JOIN
from .db import DB
from .tables import BilibiliUser, BilibiliUserStatus, FollowLink, Group
def log_sql(s):
# logger.debug(f"[DB:SQL] {s.sql()}")
return s
def get_all_groups():
yield from log_sql(Group.select())
def get_group(gid: int) -> Group:
for group in log_sql(Group.select().where(Group.gid == gid)):
return group
return None
def add_group(gid: int, group_suid: int):
return log_sql(
Group.insert(gid=gid, super_user=group_suid).on_conflict_replace()
).execute()
def remove_group(group: Group):
group.delete_instance(recursive=True, delete_nullable=True)
def get_users_with_linked_groups_and_status() -> Dict[int, BilibiliUser]:
users = {}
for user in log_sql(
BilibiliUser.select(BilibiliUser, FollowLink, BilibiliUserStatus)
.join(FollowLink, JOIN.LEFT_OUTER)
.switch(BilibiliUser)
.join(BilibiliUserStatus, JOIN.LEFT_OUTER, attr="status")
):
users[user.uid] = user
return users
def clean_users_live_status():
log_sql(BilibiliUserStatus.update(live_status=False)).execute(None)
def clean_user_live_status_in(users):
if len(users) > 0:
log_sql(
BilibiliUserStatus.update(live_status=False).where(
BilibiliUserStatus.bilibili_user.in_(users)
)
).execute()
def set_user_live_status_in(users):
if len(users) > 0:
log_sql(
BilibiliUserStatus.update(live_status=True).where(
BilibiliUserStatus.bilibili_user.in_(users)
)
).execute()
def get_group_with_following_users(gid):
for group in log_sql(
Group.select()
.where(Group.gid == gid)
.join(FollowLink, JOIN.LEFT_OUTER)
.join(BilibiliUser, JOIN.LEFT_OUTER)
):
return group
return None
def get_user(uid):
for user in log_sql(BilibiliUser.select().where(BilibiliUser.uid == uid)):
return user
return None
def add_user(uid, nickname, rid):
user, created = BilibiliUser.get_or_create(
uid=uid, defaults={"nickname": nickname, "rid": rid}
)
if created:
BilibiliUserStatus.create(
bilibili_user=user, newest_activity_id=0, live_status=False
)
else:
user.nickname = nickname
user.rid = rid
user.save()
return user
def add_link(group, user):
FollowLink.create(group=group, bilibili_user=user)
def remove_link(gid, uid):
log_sql(
FollowLink.delete().where(
(FollowLink.group == gid) & (FollowLink.bilibili_user == uid)
)
).execute()
def update_user_newest_activity_id(data: dict[int, int]):
with DB.atomic():
for user, act_id in data.items():
BilibiliUserStatus.update(newest_activity_id=act_id).where(
BilibiliUserStatus.bilibili_user == user
).execute()
| 24.92562 | 78 | 0.655172 | 372 | 3,016 | 5.107527 | 0.228495 | 0.034737 | 0.023158 | 0.026842 | 0.290526 | 0.236842 | 0.236842 | 0.175789 | 0.121053 | 0.121053 | 0 | 0.001306 | 0.238395 | 3,016 | 120 | 79 | 25.133333 | 0.82586 | 0.011605 | 0 | 0.267442 | 0 | 0 | 0.005707 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.174419 | false | 0 | 0.069767 | 0.023256 | 0.360465 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79a69b808745f05349b2ede483ce4782883293d0 | 1,536 | py | Python | monitor.py | hletrd/Facebook-Autopoker | 18735eebd4a34992a43a0987d390bbcfc0050d96 | [
"MIT"
] | 5 | 2015-07-14T17:11:24.000Z | 2016-07-28T11:52:03.000Z | monitor.py | hletrd/Facebook-Autopoker | 18735eebd4a34992a43a0987d390bbcfc0050d96 | [
"MIT"
] | null | null | null | monitor.py | hletrd/Facebook-Autopoker | 18735eebd4a34992a43a0987d390bbcfc0050d96 | [
"MIT"
] | null | null | null | db = 'log.db'
import sqlite3
import time
dbc = sqlite3.connect(db, check_same_thread=False)
dbc.text_factory = str
c = dbc.cursor()
def key(obj):
return obj[2]
while True:
c.execute('SELECT userid, name, COUNT(`date`) FROM log WHERE `date` > \'' + time.strftime('%Y-%m-%d 00:00:00') + '\' AND result=1 GROUP BY userid;')
result = c.fetchall()
result.sort(key=key, reverse=True)
total = 0
for i in result:
total += i[2]
print('Poked ' + str(i[1]) + '(' + str(i[0]) + ') ' + str(i[2]) + ' time' + ('s' if (i[2] > 1) else '') + ' today')
print('Total: ' + str(total) + ' poke' + ('s' if (total > 1) else ''))
c.execute('SELECT COUNT(`date`) FROM log WHERE `date` > datetime(\'' + time.strftime('%Y-%m-%d %H:%M:%S') + '\', \'-24 hours\') AND result=1;')
print(str((c.fetchone()[0] * 100 / 1440) / 100.0) + ' ppm for last 24 hours')
c.execute('SELECT COUNT(`date`) FROM log WHERE `date` > datetime(\'' + time.strftime('%Y-%m-%d %H:%M:%S') + '\', \'-6 hours\') AND result=1;')
print(str((c.fetchone()[0] * 100 / 360) / 100.0) + ' ppm for last 6 hours')
c.execute('SELECT COUNT(`date`) FROM log WHERE `date` > datetime(\'' + time.strftime('%Y-%m-%d %H:%M:%S') + '\', \'-1 hours\') AND result=1;')
print(str((c.fetchone()[0] * 100 / 60) / 100.0) + ' ppm for last 1 hour')
c.execute('SELECT COUNT(`date`) FROM log WHERE `date` > datetime(\'' + time.strftime('%Y-%m-%d %H:%M:%S') + '\', \'-5 minutes\') AND result=1;')
print(str((c.fetchone()[0] * 100 / 5) / 100.0) + ' ppm for last 5 minutes')
print('')
time.sleep(5) | 45.176471 | 149 | 0.575521 | 254 | 1,536 | 3.468504 | 0.283465 | 0.022701 | 0.079455 | 0.090806 | 0.573212 | 0.492622 | 0.464245 | 0.464245 | 0.464245 | 0.429058 | 0 | 0.058457 | 0.164714 | 1,536 | 34 | 150 | 45.176471 | 0.628215 | 0 | 0 | 0 | 0 | 0 | 0.398829 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0.037037 | 0.148148 | 0.259259 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79a7ef7c11468985df93521d767d16d3db7e7f54 | 1,949 | py | Python | annoTree/subs/parsSyms.py | jvfNontools/jvfNontools | 60b3c2643f6cabbcad342b5f6b3e5490e89f31f5 | [
"Apache-2.0"
] | null | null | null | annoTree/subs/parsSyms.py | jvfNontools/jvfNontools | 60b3c2643f6cabbcad342b5f6b3e5490e89f31f5 | [
"Apache-2.0"
] | null | null | null | annoTree/subs/parsSyms.py | jvfNontools/jvfNontools | 60b3c2643f6cabbcad342b5f6b3e5490e89f31f5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
#Copyright 2018 Jim Van Fleet
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
class SearchFileForSyms:
def doSearch(self, openFile, searchItem):
symb = "sym="
startb = "start-address=0x"
commb = ","
spacb = " "
parab = "("
brackb = "["
allSyms = []
symIndex = 0
with open(openFile) as symFile:
for line in symFile:
if (line.find(searchItem) == -1):
continue
# want exception if search items not found
si = line.index(symb)
ei = line.index(commb, (si+1))
li = line.rfind(parab, (si+1), ei)
if (li == -1):
li = line.rfind(brackb, (si+1), ei)
if li == -1:
sy0 = line[(si+4): (ei)]
else:
sy0 = line[(si+4): li]
else:
sy0 = line[(si+4): li]
line = symFile.readline()
line = symFile.readline()
si = line.index(startb)
ei = line.index(spacb, si)
star10 = line[(si+16): ei]
star1 = star10.lstrip("0")
allSyms.append(sy0)
allSyms.append(star1)
return allSyms
def __init__(self, openFile, searchItem):
self.symData = self.doSearch(openFile, searchItem)
| 34.192982 | 73 | 0.531042 | 226 | 1,949 | 4.561947 | 0.50885 | 0.058196 | 0.026188 | 0.029098 | 0.050436 | 0.050436 | 0 | 0 | 0 | 0 | 0 | 0.026742 | 0.366855 | 1,949 | 56 | 74 | 34.803571 | 0.808752 | 0.307337 | 0 | 0.166667 | 0 | 0 | 0.018685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79aa3668bb043f0729ae0d753b69ad0de26cb30d | 2,270 | py | Python | models/networkgcn.py | Byomyyt/GnTCN | b4cc9e97fc0b0438deb0a7e118817a7ab73ae93c | [
"MIT"
] | 91 | 2021-04-06T15:33:11.000Z | 2022-03-31T05:16:27.000Z | models/networkgcn.py | ddddwee1/GnTCN | e1abb8c526b2a9904d6f964b0084b54f123b82c9 | [
"MIT"
] | 17 | 2021-01-04T09:08:20.000Z | 2022-03-17T11:45:27.000Z | models/networkgcn.py | ddddwee1/GnTCN | e1abb8c526b2a9904d6f964b0084b54f123b82c9 | [
"MIT"
] | 15 | 2021-01-18T01:54:23.000Z | 2021-09-24T01:29:32.000Z | import numpy as np
import torch
import torch.nn.functional as F
from TorchSUL import Model as M
from torch.nn.parameter import Parameter
import torch.nn.init as init
class PropLayer(M.Model):
def initialize(self, outdim, usebias=True):
self.outdim = outdim
self.act = torch.nn.ReLU()
self.act2 = torch.nn.ReLU()
self.usebias = usebias
def build(self, *inp):
# inp: [Bsize, num_pts, 2]
num_pts = inp[0].shape[1]
indim = inp[0].shape[2]
self.weight = Parameter(torch.Tensor(num_pts, indim, self.outdim))
self.weight2 = Parameter(torch.Tensor(num_pts, self.outdim, self.outdim))
init.kaiming_uniform_(self.weight, a=np.sqrt(5))
init.kaiming_uniform_(self.weight2, a=np.sqrt(5))
if self.usebias:
print('initialize bias')
self.bias = Parameter(torch.Tensor(num_pts, self.outdim))
self.bias2 = Parameter(torch.Tensor(num_pts, self.outdim))
init.uniform_(self.bias, -0.1, 0.1)
init.uniform_(self.bias2, -0.1, 0.1)
def forward(self, inp, aff=None, act=True):
if aff is not None:
# propagate the keypoints
x = torch.einsum('ikl,ijk->ijl', inp, aff)
else:
x = inp
x = torch.einsum('ijk,jkl->ijl', x, self.weight)
if self.usebias:
x = x + self.bias
if act:
x = self.act(x)
# x = F.dropout(x, 0.25, self.training, False)
x = torch.einsum('ijk,jkl->ijl', x, self.weight2)
if self.usebias:
x = x + self.bias2
if act:
x = self.act2(x)
#x = F.dropout(x, 0.25, self.training, False)
if aff is not None:
x = torch.cat([inp, x], dim=-1)
return x
class TransNet(M.Model):
def initialize(self, outdim, num_pts):
self.num_pts = num_pts
self.c1 = PropLayer(outdim)
self.c2 = PropLayer(outdim)
self.c3 = PropLayer(outdim)
self.b2 = PropLayer(outdim)
self.b3 = PropLayer(outdim)
self.c8 = PropLayer(outdim)
self.c9 = PropLayer(3)
def forward(self, x, aff, aff_bone, inc, inc_inv):
x = feat = self.c1(x)
x = self.c2(x, aff)
x = self.c3(x, aff)
feat = torch.einsum('ijk,lj->ilk', feat, inc)
feat = self.b2(feat, aff_bone)
feat = self.b3(feat, aff_bone)
feat = torch.einsum('ijk,lj->ilk', feat, inc_inv)
x = torch.cat([x, feat], dim=-1)
x = self.c8(x)
x = self.c9(x, act=False)
# print(x.shape)
x = x.reshape(-1, self.num_pts, 3)
return x
| 27.02381 | 75 | 0.657269 | 385 | 2,270 | 3.820779 | 0.220779 | 0.067981 | 0.077498 | 0.062542 | 0.299116 | 0.262407 | 0.197145 | 0.172672 | 0.042148 | 0.042148 | 0 | 0.025918 | 0.184141 | 2,270 | 83 | 76 | 27.349398 | 0.768359 | 0.067401 | 0 | 0.136364 | 0 | 0 | 0.034581 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0 | 0.090909 | 0 | 0.227273 | 0.015152 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79ac7f2c83fe009a0c2f95dafc3599ebde6411df | 307 | py | Python | tests/backends/test_init.py | benkrikler/fast-carpenter-github-test | b6f7e1b218d3a1f39fcbe739c8bab19af63aabb8 | [
"Apache-2.0"
] | 12 | 2019-05-17T13:02:20.000Z | 2020-08-31T08:16:47.000Z | tests/backends/test_init.py | FAST-HEP/fast-carpenter | b6f7e1b218d3a1f39fcbe739c8bab19af63aabb8 | [
"Apache-2.0"
] | 104 | 2019-05-17T16:25:35.000Z | 2022-03-28T16:11:10.000Z | tests/backends/test_init.py | benkrikler/fast-carpenter-github-test | b6f7e1b218d3a1f39fcbe739c8bab19af63aabb8 | [
"Apache-2.0"
] | 16 | 2019-05-20T16:57:48.000Z | 2020-09-28T16:36:21.000Z | import pytest
import fast_carpenter.backends as backends
def test_get_backend():
coffea_back = backends.get_backend("coffea:dask")
assert hasattr(coffea_back, "execute")
with pytest.raises(ValueError) as e:
backends.get_backend("doesn't exist")
assert "Unknown backend" in str(e)
| 25.583333 | 53 | 0.732899 | 42 | 307 | 5.190476 | 0.619048 | 0.137615 | 0.146789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169381 | 307 | 11 | 54 | 27.909091 | 0.854902 | 0 | 0 | 0 | 0 | 0 | 0.149837 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79ae5d267641860a50ba60429c85299cdeeef14d | 1,534 | py | Python | serving_patterns/src/api_composition_proxy/helpers.py | shibuiwilliam/ml-system-in-action | 0aa9d6bc4a4346236b9c971ec90afad04bcf5cca | [
"MIT"
] | 10 | 2020-08-30T03:19:10.000Z | 2021-08-08T17:38:06.000Z | serving_patterns/src/api_composition_proxy/helpers.py | shibuiwilliam/ml-system-in-action | 0aa9d6bc4a4346236b9c971ec90afad04bcf5cca | [
"MIT"
] | null | null | null | serving_patterns/src/api_composition_proxy/helpers.py | shibuiwilliam/ml-system-in-action | 0aa9d6bc4a4346236b9c971ec90afad04bcf5cca | [
"MIT"
] | 6 | 2020-08-30T03:19:13.000Z | 2021-11-26T23:32:42.000Z | from typing import Dict
import logging
logger = logging.getLogger(__name__)
def path_builder(url: str, path: str) -> str:
if path == "" or path is None:
return url
if path.startswith("/"):
path = path[1:]
if url.endswith("/"):
url = f"{url}{path}"
else:
url = f"{url}/{path}"
return url
def url_builder(hostname: str, https: bool = False) -> str:
if not (hostname.startswith("http://") or hostname.startswith("https://")):
hostname = f"https://{hostname}" if https else f"http://{hostname}"
return hostname
def url_path_builder(hostname: str, path: str, https: bool = False) -> str:
hostname = url_builder(hostname, https)
url = path_builder(hostname, path)
return url
def customized_redirect_builder(alias: str, url: str, redirect_path: str, customized_redirect_map: Dict[str, Dict[str, str]] = None) -> str:
"""
customized_redirect_map
{
ALIAS_0:
{
REDIRECT_PATH_0: redirect_path_0,
REDIRECT_PATH_1: redirect_path_1,
},
ALIAS_1:
{
REDIRECT_PATH_0: redirect_path_0,
REDIRECT_PATH_2: redirect_path_2,
}
}
"""
path = path_builder(url, redirect_path)
if customized_redirect_map is None:
return path
if alias in customized_redirect_map.keys():
if redirect_path in customized_redirect_map[alias].keys():
path = path_builder(url, customized_redirect_map[alias][redirect_path])
return path
| 27.890909 | 140 | 0.627119 | 194 | 1,534 | 4.71134 | 0.190722 | 0.157549 | 0.137856 | 0.091904 | 0.128009 | 0.084245 | 0.083151 | 0.083151 | 0 | 0 | 0 | 0.009649 | 0.256845 | 1,534 | 54 | 141 | 28.407407 | 0.792105 | 0.160365 | 0 | 0.172414 | 0 | 0 | 0.061275 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.068966 | 0 | 0.413793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79ae9cb10f166f65649baf95240f5d262fca4fa9 | 1,856 | py | Python | rsa/rsa/common.py | andrew-kulikov/crypto | c81cf7965d58da23ce234435676c8516daf3c649 | [
"MIT"
] | null | null | null | rsa/rsa/common.py | andrew-kulikov/crypto | c81cf7965d58da23ce234435676c8516daf3c649 | [
"MIT"
] | null | null | null | rsa/rsa/common.py | andrew-kulikov/crypto | c81cf7965d58da23ce234435676c8516daf3c649 | [
"MIT"
] | null | null | null | import typing
class NotRelativePrimeError(ValueError):
def __init__(self, a, b, d, msg=''):
super().__init__(msg or "%d and %d are not relatively prime, divider=%i" % (a, b, d))
self.a = a
self.b = b
self.d = d
def bit_size(num: int) -> int:
try:
return num.bit_length()
except AttributeError:
raise TypeError('bit_size(num) only supports integers, not %r' % type(num))
def byte_size(number: int) -> int:
if number == 0: return 1
return ceil_div(bit_size(number), 8)
def ceil_div(num: int, div: int) -> int:
quanta, mod = divmod(num, div)
if mod:
quanta += 1
return quanta
def extended_gcd(a: int, b: int) -> typing.Tuple[int, int, int]:
"""Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
"""
# r = gcd(a,b) i = multiplicitive inverse of a mod b
# or j = multiplicitive inverse of b mod a
# Neg return values for i or j are made positive mod b or a respectively
# Iterateive Version is faster and uses much less stack space
x = 0
y = 1
lx = 1
ly = 0
oa = a # Remember original a/b to remove
ob = b # negative values from return results
while b != 0:
q = a // b
(a, b) = (b, a % b)
(x, lx) = ((lx - (q * x)), x)
(y, ly) = ((ly - (q * y)), y)
if lx < 0:
lx += ob # If neg wrap modulo orignal b
if ly < 0:
ly += oa # If neg wrap modulo orignal a
return a, lx, ly # Return only positive values
def inverse(x: int, n: int) -> int:
"""Returns the inverse of x % n under multiplication, a.k.a x^-1 (mod n)
>>> inverse(7, 4)
3
>>> (inverse(143, 4) * 143) % 4
1
"""
(divider, inv, _) = extended_gcd(x, n)
if divider != 1:
raise NotRelativePrimeError(x, n, divider)
return inv
| 25.424658 | 93 | 0.553341 | 293 | 1,856 | 3.443686 | 0.351536 | 0.015857 | 0.005946 | 0.011893 | 0.043608 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019701 | 0.316272 | 1,856 | 72 | 94 | 25.777778 | 0.775414 | 0.310884 | 0 | 0 | 0 | 0 | 0.072698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.02381 | 0 | 0.309524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79af2c5e0250f4d13af181fe19d4ed482ecdc804 | 12,217 | py | Python | tests/unit/core/test_datasetprofile.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | tests/unit/core/test_datasetprofile.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | tests/unit/core/test_datasetprofile.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | import datetime
import json
import os
from uuid import uuid4
import pytest
import numpy as np
from pandas import util
from whylogs.core.datasetprofile import DatasetProfile, array_profile, dataframe_profile
from whylogs.core.model_profile import ModelProfile
from whylogs.util import time
from whylogs.util.protobuf import message_to_dict, message_to_json
from whylogs.util.time import to_utc_ms
def test_all_zeros_returns_summary_with_stats():
stats = ("min", "max", "stddev", "mean")
array = np.zeros([100, 1])
prof = array_profile(array)
msg = prof.to_summary()
d = message_to_dict(msg)
d1 = json.loads(message_to_json(msg))
number_summary = d["columns"]["0"]["numberSummary"]
missing_stats = [k for k in stats if k not in number_summary]
if len(missing_stats) > 0:
raise RuntimeError(f"Stats missing from number summary: {missing_stats}")
assert d == d1
def test_empty_valid_datasetprofiles_empty():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert merged.columns == {}
def test_merge_different_columns():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "x1"}, )
x1.track("col1", "value")
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "x2"}, )
x2.track("col2", "value")
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert set(list(merged.columns.keys())) == {"col1", "col2"}
assert merged.columns["col1"].counters.count == 1
assert merged.columns["col2"].counters.count == 1
assert merged.tags == dict({"name": "test", "key": "value"})
assert merged.metadata == dict({"key": "x1"})
def test_merge_lhs_no_profile():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, model_profile=ModelProfile())
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert merged.columns == {}
assert merged.model_profile is not None
def test_merge_rhs_no_profile():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, model_profile=ModelProfile())
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert merged.columns == {}
assert merged.model_profile is not None
def test_merge_same_columns():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
x1.track("col1", "value1")
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
x2.track("col1", "value1")
x2.track("col2", "value")
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert set(list(merged.columns.keys())) == {"col1", "col2"}
assert merged.columns["col1"].counters.count == 2
assert merged.columns["col2"].counters.count == 1
def test_protobuf_round_trip():
now = datetime.datetime.utcnow()
tags = {"k1": "rock", "k2": "scissors", "k3": "paper"}
original = DatasetProfile(name="test", dataset_timestamp=now, tags=tags, )
original.track("col1", "value")
original.track("col2", "value")
msg = original.to_protobuf()
roundtrip = DatasetProfile.from_protobuf(msg)
assert roundtrip.name == "test"
assert roundtrip.session_id == original.session_id
assert to_utc_ms(roundtrip.session_timestamp) == to_utc_ms(
original.session_timestamp)
assert set(list(roundtrip.columns.keys())) == {"col1", "col2"}
assert roundtrip.columns["col1"].counters.count == 1
assert roundtrip.columns["col2"].counters.count == 1
tags["name"] = "test"
assert set(roundtrip.tags) == set(tags)
assert roundtrip.metadata == original.metadata
def test_non_string_tag_raises_assert_error():
now = datetime.datetime.utcnow()
tags = {"key": "value"}
x = DatasetProfile("test", now, tags=tags)
x.validate()
# Include a non-string tag
x._tags["number"] = 1
try:
x.validate()
raise RuntimeError("validate should raise an AssertionError")
except AssertionError:
pass
def test_mismatched_tags_raises_assertion_error():
now = datetime.datetime.utcnow()
x1 = DatasetProfile("test", now, tags={"key": "foo"})
x2 = DatasetProfile("test", now, tags={"key": "bar"})
try:
x1.merge_strict(x2)
raise RuntimeError("Assertion error not raised")
except AssertionError:
pass
def test_mismatched_tags_merge_succeeds():
now = datetime.datetime.utcnow()
x1 = DatasetProfile("test", now, tags={"key": "foo"})
x2 = DatasetProfile("test2", now, tags={"key": "bar"})
result = x1.merge(x2)
assert result.tags.get("key") == "foo"
def test_name_always_appear_in_tags():
x1 = DatasetProfile(name="test")
assert x1.tags["name"] == "test"
def test_parse_delimited_from_java_single():
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, "output_from_java_08242020.bin"), "rb") as f:
data = f.read()
assert DatasetProfile.parse_delimited_single(data) is not None
with open(os.path.join(dir_path, "output_from_java_01212021.bin"), "rb") as f:
data = f.read()
assert DatasetProfile.parse_delimited_single(data) is not None
def test_parse_from_protobuf():
dir_path = os.path.dirname(os.path.realpath(__file__))
DatasetProfile.read_protobuf(os.path.join(
dir_path, "output_from_java_08242020.bin"))
def test_parse_delimited_from_java_multiple():
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, "output_from_java_08242020.bin"), "rb") as f:
data = f.read()
multiple = data + data
result = DatasetProfile.parse_delimited(multiple)
assert len(result) == 2
def test_write_delimited_single():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
original.track("col1", "value")
output_bytes = original.serialize_delimited()
pos, roundtrip = DatasetProfile.parse_delimited_single(output_bytes)
assert roundtrip.session_id == original.session_id
# Python time precision includes nanoseconds
assert time.to_utc_ms(roundtrip.session_timestamp) == time.to_utc_ms(
original.session_timestamp)
assert roundtrip.tags == original.tags
assert roundtrip.metadata == original.metadata
def test_write_delimited_multiple():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
original.track("col1", "value")
output_bytes = original.serialize_delimited()
multiple_entries = output_bytes
for i in range(1, 5):
multiple_entries += output_bytes
entries = DatasetProfile.parse_delimited(multiple_entries)
assert len(entries) == 5
for entry in entries:
assert entry.session_id == original.session_id
# Python time precisions are different
assert time.to_utc_ms(entry.session_timestamp) == time.to_utc_ms(
original.session_timestamp)
assert entry.tags == original.tags
assert entry.metadata == original.metadata
def test_verify_schema_version():
dp = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=datetime.datetime.now(
), tags={"key": "value"}, metadata={"key": "value"}, )
props = dp.to_properties()
assert props.schema_major_version == 1
assert props.schema_minor_version == 1
def tests_timestamp():
time = datetime.datetime.now()
dp = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=datetime.datetime.now(
), tags={"key": "value"}, metadata={"key": "value"}, )
time_2 = dp.session_timestamp_ms
assert time_2 == int(time.replace(
tzinfo=datetime.timezone.utc).timestamp() * 1000.0)
def test_dataframe_profile():
time = datetime.datetime.now()
df = util.testing.makeDataFrame()
profile = DatasetProfile("test", time)
profile.track_dataframe(df)
profile_factory = dataframe_profile(df, name="test", timestamp=time)
assert profile_factory.columns["A"].number_tracker.variance.mean == profile.columns[
"A"].number_tracker.variance.mean
profile_factory_2 = dataframe_profile(df)
assert profile_factory_2.columns["A"].number_tracker.variance.mean == profile.columns[
"A"].number_tracker.variance.mean
profile_factory_3 = dataframe_profile(df, timestamp=103433)
assert profile_factory_3.columns["A"].number_tracker.variance.mean == profile.columns[
"A"].number_tracker.variance.mean
def test_track():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
data = {
"rows": 1,
"names": "roger roger",
}
original.track(columns=data)
def test_errors():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
with pytest.raises(TypeError):
original.track(columns=1, data=34)
def test_flat_summary():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
flat_summary = original.flat_summary()
assert flat_summary is not None
assert len(original.flat_summary()) == 4
def test_chunk_iterator():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
data = {
"rows": 1,
"names": "roger roger",
}
original.track(columns=data)
for each_chuck in original.chunk_iterator():
assert each_chuck is not None
def test_array():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
with pytest.raises(ValueError):
original.track_array(np.random.rand(3))
| 35.106322 | 107 | 0.681919 | 1,535 | 12,217 | 5.227362 | 0.134853 | 0.066176 | 0.045364 | 0.068669 | 0.645937 | 0.617896 | 0.600698 | 0.559696 | 0.547233 | 0.542498 | 0 | 0.014706 | 0.17623 | 12,217 | 347 | 108 | 35.207493 | 0.782591 | 0.008513 | 0 | 0.515504 | 0 | 0 | 0.092906 | 0.00958 | 0 | 0 | 0 | 0 | 0.255814 | 1 | 0.093023 | false | 0.007752 | 0.046512 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79af4b15407f4473ba60b0d4c07074b41824263f | 2,645 | py | Python | canvas/cli/api.py | robinsax/canvas | 6e8b9b260fdda868d687b562926a2038736ec56c | [
"Apache-2.0"
] | 4 | 2018-01-24T01:34:39.000Z | 2021-01-14T21:29:47.000Z | canvas/cli/api.py | robinsax/canvas | 6e8b9b260fdda868d687b562926a2038736ec56c | [
"Apache-2.0"
] | 2 | 2018-06-09T22:28:56.000Z | 2018-06-12T01:40:10.000Z | canvas/cli/api.py | robinsax/canvas | 6e8b9b260fdda868d687b562926a2038736ec56c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
'''
The CLI API definition, available to both the core and plugins.
'''
import sys
from ..exceptions import NotInstalled
from .. import __installed__
# Define the global name to launcher function map.
_launchers = dict()
# Define a single character to launcher function map.
_shortforms = dict()
def launcher(name, **info):
'''
Register a launcher function to be referenced from the CLI as `name`. An
abbreviation will be automatically assigned if one is available. The `info`
keyword arguments can contain one or more of:
* `description` - A textual description of the launch mode.
* `argspec` - A CLI argument specification.
* `init` - A flag indicating a full initialization is required before the
handler is invoked.
'''
def launcher_wrap(func):
ref_name, char = name, name[0]
func.__info__ = info
if char not in _shortforms:
# Assign a short form alias.
ref_name = ''.join(('(', char, ')', name[1:]))
_shortforms[char] = func
info['ref_name'] = ref_name
_launchers[name] = func
return func
return launcher_wrap
def launch_cli(args):
'''Launch the CLI given the commandline arguments `args`.'''
# Define the incorrect usage handler.
def print_usage():
# Define the argument representation generatior.
def write_one(name, launcher):
ref_name = launcher.__info__['ref_name']
string = ' '.join(
(''.join(('--', ref_name)), launcher.__info__.get('argspec', ''))
)
string = ''.join((
string, ' '*(35 - len(string)),
launcher.__info__.get('description', '')
))
return string
# Sort launch options alphabetically.
alpha_order = sorted(_launchers.keys())
print(' '.join((
'Usage:',
'python3 canvas [',
'\n\t' + '\n\t'.join(
write_one(name, _launchers[name]) for name in alpha_order
),
'\n]'
)))
# Exit.
sys.exit(1)
# Define an asserted initializer.
def safe_initialize():
if not __installed__:
raise NotInstalled('Run python3 canvas --init')
from ..core import initialize
initialize()
if args and args[0] == '-!':
# The -i switch causes eager initialization.
safe_initialize()
args = args[1:]
# Nothing supplied, show usage.
if not args:
print_usage()
# Look up the launcher.
launcher = None
if args[0].startswith('--'):
launcher = _launchers.get(args[0][2:])
elif args[0].startswith('-'):
launcher = _shortforms.get(args[0][1:])
if not launcher:
print_usage()
if launcher.__info__.get('init', False):
# This launcher requires initialization.
safe_initialize()
if launcher(args[1:]) is False:
# The launch function reported incorrect usage.
print_usage()
| 25.432692 | 76 | 0.677127 | 345 | 2,645 | 5.023188 | 0.37971 | 0.028275 | 0.025967 | 0.024235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007955 | 0.19206 | 2,645 | 103 | 77 | 25.679612 | 0.802995 | 0.377694 | 0 | 0.084746 | 0 | 0 | 0.066998 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.067797 | 0 | 0.220339 | 0.084746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b154f9526abf942504a9812110e0bdc124d139 | 1,898 | py | Python | tests/run/test_config_file.py | vincent99/rio | 018dac19be47ee20ae47bcd8eea71c8c4f07a1af | [
"Apache-2.0"
] | 1 | 2019-05-28T11:32:11.000Z | 2019-05-28T11:32:11.000Z | tests/run/test_config_file.py | vincent99/rio | 018dac19be47ee20ae47bcd8eea71c8c4f07a1af | [
"Apache-2.0"
] | null | null | null | tests/run/test_config_file.py | vincent99/rio | 018dac19be47ee20ae47bcd8eea71c8c4f07a1af | [
"Apache-2.0"
] | null | null | null | from os import unlink
from random import randint
import util
import tempfile
def config_setup(stack, *configs):
config_name = "tconfig" + str(randint(1000, 5000))
fp = tempfile.NamedTemporaryFile(delete=False)
for c in configs:
fp.write(bytes(c+"\n", 'utf8'))
fp.close()
util.run(f"rio config create {stack}/{config_name} {fp.name}")
unlink(fp.name)
return config_name
def run_config(stack, config_names):
name = "tsrv" + str(randint(1000, 5000))
fullName = "%s/%s" % (stack, name)
cmd = (f'rio run -n {fullName}')
for c in config_names:
tempdir = ":/temp" + str(randint(100, 999))
cmd += " --config " + c + tempdir
cmd += " nginx"
print(cmd)
util.run(cmd)
util.run(f"rio wait {fullName}")
print(name)
return name
def rio_chk(stack, sname):
fullName = "%s/%s" % (stack, sname)
inspect = util.rioInspect(fullName)
out = []
for item in inspect["configs"]:
out.append(item["source"])
out.sort()
return out
def kube_chk(stack, service_name):
fullName = "%s/%s" % (stack, service_name)
id = util.rioInspect(fullName, "id")
namespace = id.split(":")[0]
obj = util.kubectl(namespace, "deployment", service_name)
out = []
for item in obj['spec']['template']['spec']['volumes']:
if 'configMap' in item:
out.append(str(item['configMap']['name']).split("-")[0])
out.sort()
print(out)
return out
def test_content(stack):
config_name1 = config_setup(stack, "1foo=1bar", "1foo2=1bar2")
config_setup(stack, "2foo=2bar", "2foo1=2bar2")
expect = [config_name1]
expect.sort()
servicename = run_config(stack, expect)
print(stack, servicename)
gotrio = rio_chk(stack, servicename)
assert expect == gotrio
gotk8s = kube_chk(stack, servicename)
assert expect == gotk8s
| 21.325843 | 68 | 0.615385 | 245 | 1,898 | 4.685714 | 0.35102 | 0.027875 | 0.041812 | 0.039199 | 0.054007 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028082 | 0.230769 | 1,898 | 88 | 69 | 21.568182 | 0.758219 | 0 | 0 | 0.105263 | 0 | 0 | 0.134352 | 0.011064 | 0 | 0 | 0 | 0 | 0.035088 | 1 | 0.087719 | false | 0 | 0.070175 | 0 | 0.22807 | 0.070175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b4dd93658058c4c08b578922c3ee4d84c4d4e5 | 5,548 | py | Python | vivareal.py | erlancassiano/portal_crawler | bcbda7871d74080b926b0f59c05d813385286173 | [
"MIT"
] | null | null | null | vivareal.py | erlancassiano/portal_crawler | bcbda7871d74080b926b0f59c05d813385286173 | [
"MIT"
] | null | null | null | vivareal.py | erlancassiano/portal_crawler | bcbda7871d74080b926b0f59c05d813385286173 | [
"MIT"
] | null | null | null | import os
import datetime
import csv
import time
import random
from time import sleep
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
import undetected_chromedriver as uc
class Vivareal:
timestamp = str(datetime.datetime.now()).replace(".","").replace("-","").replace(":","")
filename = "results_{}".format(timestamp)+".csv"
chromeOptions = uc.ChromeOptions()
#chromeOptions.add_argument('--headless')
driver = uc.Chrome(options=chromeOptions)
def __init__(self):
self.csvCreater()
url = "https://www.vivareal.com.br/aluguel"
self.driver.get(url)
while True:
# driver.implicitly_wait(10)
WebDriverWait(self.driver, 60).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'span.property-card__title.js-cardLink.js-card-title')))
self.ScrollPage()
result_div = self.driver.find_element_by_css_selector(".results-list.js-results-list")
result_cards_list = result_div.find_elements_by_css_selector("article.property-card__container.js-property-card")
for item in result_cards_list:
try:
title = item.find_element_by_css_selector("span.property-card__title.js-cardLink.js-card-title").text
except NoSuchElementException:
title = "-"
try:
address = item.find_element_by_css_selector("span.property-card__address").text
except NoSuchElementException:
address = "-"
try:
price = item.find_element_by_css_selector(".property-card__price.js-property-card-prices.js-property-card__price-small").text
except:
price = "-"
try:
price_details = item.find_element_by_css_selector(".property-card__price-details--condo")
price_details = str(price_details.text).replace("Condomínio:","").strip()
except NoSuchElementException:
price_details = "-"
try:
area = item.find_element_by_css_selector("li.property-card__detail-item.property-card__detail-area")
area = str(area.text).replace(" ","").strip()
except NoSuchElementException:
area = "-"
try:
rooms = item.find_element_by_css_selector("li.property-card__detail-item.property-card__detail-room.js-property-detail-rooms")
rooms = str(rooms.text).replace(" Quarto","").replace("s","").strip()
except NoSuchElementException:
rooms = "-"
try:
garages = item.find_element_by_css_selector("li.property-card__detail-item.property-card__detail-garage.js-property-detail-garages")
garages = str(garages.text).replace("Vaga","").replace("s","").strip()
except NoSuchElementException:
garages = "-"
try:
bathrooms = item.find_element_by_css_selector("li.property-card__detail-item.property-card__detail-bathroom.js-property-detail-bathroom")
bathrooms = str(bathrooms.text).replace(" Banheiro","").replace("s","").strip()
except NoSuchElementException:
bathrooms = "-"
self.csvupdate(title,address,price,price_details,area,rooms,bathrooms,garages)
print(title,"\n",address,"\n",price,"\n",price_details,"\n",area,"\n",rooms,"\n",bathrooms,"\n",garages,"\n\n")
self.driver.find_element_by_xpath("//a[@class='js-change-page' and contains(text(), 'Próxima página')]").click()
time.sleep(5)
def csvCreater(self):
with open(self.filename,'w' ,newline='') as file:
fieldNames = ['Title','Address','Rent','Admin Fee','Area','Rooms','Bathrooms','Parking']
thewriter = csv.DictWriter(file, fieldnames=fieldNames)
thewriter.writeheader()
def csvupdate(self,title,address,price,price_details,area,rooms,bathrooms,garages):
with open(self.filename,'a' ,newline='') as file:
fieldNames = ['Title','Address','Rent','Admin Fee','Area','Rooms','Bathrooms','Parking']
thewriter = csv.DictWriter(file, fieldnames=fieldNames)
thewriter.writerow({'Title': str(title),'Address': str(address),'Rent': price,'Admin Fee': price_details,'Area': area,'Rooms':rooms,'Bathrooms': bathrooms,'Parking': garages})
def ScrollPage(self):
lenOfPage = self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match=False
while(match==False):
lastCount = lenOfPage
sleep(3)
lenOfPage = self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount==lenOfPage:
match=True
bot = Vivareal() | 53.864078 | 188 | 0.605443 | 568 | 5,548 | 5.734155 | 0.265845 | 0.062634 | 0.043905 | 0.044212 | 0.427694 | 0.351551 | 0.351551 | 0.351551 | 0.351551 | 0.272644 | 0 | 0.001975 | 0.270007 | 5,548 | 103 | 189 | 53.864078 | 0.802222 | 0.012076 | 0 | 0.238636 | 0 | 0.056818 | 0.217739 | 0.149312 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.238636 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b52191dc7ea7de4c6237513c7af4e22ce1b28f | 3,339 | py | Python | seahub/base/profile.py | gzy403999903/seahub | 992e5852579a6d9e0cfdaf18c77ce0191cb64449 | [
"Apache-2.0"
] | null | null | null | seahub/base/profile.py | gzy403999903/seahub | 992e5852579a6d9e0cfdaf18c77ce0191cb64449 | [
"Apache-2.0"
] | 6 | 2019-12-13T09:55:45.000Z | 2022-03-11T23:47:29.000Z | seahub/base/profile.py | gzy403999903/seahub | 992e5852579a6d9e0cfdaf18c77ce0191cb64449 | [
"Apache-2.0"
] | 1 | 2019-05-16T06:58:16.000Z | 2019-05-16T06:58:16.000Z | # Copyright (c) 2012-2016 Seafile Ltd.
"""
The MIT License (MIT)
Copyright (c) 2013 Omar Bohsali
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
try:
import cProfile as profile
except ImportError:
import profile
import pstats
from cStringIO import StringIO
from django.conf import settings
class ProfilerMiddleware(object):
"""
Simple profile middleware to profile django views. To run it, add ?prof to
the URL like this:
http://localhost:8000/view/?__prof__=true
Optionally pass the following to modify the output:
?sort => Sort the output by a given metric. Default is time.
See http://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats
for all sort options.
quick reference:
- time: sort by function execution time
- cum: the cumulative time spent in this and all subfunctions (from invocation till exit). This figure is accurate even for recursive functions.
?count => The number of rows to display. Default is 100.
?fullpath=<true|false> default false. True to show full path of the source file of each function
?callee=<true|false> default false. True to show the time of a function spent on its sub function.
This is adapted from an example found here:
http://www.slideshare.net/zeeg/django-con-high-performance-django-presentation.
"""
def can(self, request):
return settings.DEBUG and request.GET.get('__prof__', False) == 'true'
def process_view(self, request, callback, callback_args, callback_kwargs):
if self.can(request):
self.profiler = profile.Profile()
args = (request,) + callback_args
return self.profiler.runcall(callback, *args, **callback_kwargs)
def process_response(self, request, response):
if self.can(request):
self.profiler.create_stats()
io = StringIO()
stats = pstats.Stats(self.profiler, stream=io)
if not request.GET.get('fullpath', False):
stats.strip_dirs()
stats.sort_stats(request.GET.get('sort', 'time'))
if request.GET.get('callee', False):
stats.print_callees()
stats.print_stats(int(request.GET.get('count', 100)))
response.content = '<pre>%s</pre>' % io.getvalue()
return response
| 38.825581 | 152 | 0.7059 | 466 | 3,339 | 5.015021 | 0.472103 | 0.037655 | 0.027813 | 0.017972 | 0.050492 | 0.050492 | 0.02653 | 0 | 0 | 0 | 0 | 0.008785 | 0.215933 | 3,339 | 85 | 153 | 39.282353 | 0.883881 | 0.620545 | 0 | 0.071429 | 0 | 0 | 0.043771 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.214286 | 0.035714 | 0.464286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b7a93216b116c4fe2b33e6f3183397b498a763 | 1,047 | py | Python | year2019/day21/code.py | romainvigneres/advent_of_code | 2ae38617706cb1041ab3950cdec3713176dc3633 | [
"MIT"
] | null | null | null | year2019/day21/code.py | romainvigneres/advent_of_code | 2ae38617706cb1041ab3950cdec3713176dc3633 | [
"MIT"
] | null | null | null | year2019/day21/code.py | romainvigneres/advent_of_code | 2ae38617706cb1041ab3950cdec3713176dc3633 | [
"MIT"
] | null | null | null | from year2019.intcode_v2 import Intcode
from common import input_integer_sep
def part_one(inp_list):
program1 = (
"NOT A J\n"
"NOT B T\n"
"OR T J\n"
"NOT C T\n"
"OR T J\n"
"AND D J\n"
"WALK\n"
)
p = Intcode(
inp_list,
[ord(char) for char in program1]
)
while not p.done:
out = p.run_until_output()
if out > 999:
return out
def part_two(inp_list):
program2 = (
"NOT C J\n"
"NOT B T\n"
"OR T J\n"
"NOT A T\n"
"OR T J\n"
"AND D J\n"
"NOT E T\n"
"NOT T T\n"
"OR H T\n"
"AND T J\n"
"RUN\n"
)
p = Intcode(
inp_list,
[ord(char) for char in program2]
)
while not p.done:
out = p.run_until_output()
if out > 257:
return out
def get_result():
inp = input_integer_sep("2019", "21")
print("Part one", part_one(inp.copy()))
print("Part two", part_two(inp.copy()))
| 20.529412 | 43 | 0.472779 | 163 | 1,047 | 2.92638 | 0.312883 | 0.037736 | 0.052411 | 0.041929 | 0.406709 | 0.406709 | 0.406709 | 0.406709 | 0.406709 | 0.406709 | 0 | 0.0336 | 0.403056 | 1,047 | 50 | 44 | 20.94 | 0.7296 | 0 | 0 | 0.391304 | 0 | 0 | 0.164279 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.043478 | 0 | 0.152174 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b7cbd53300df46238acfe16835276ec2f45c5e | 2,252 | py | Python | july/management/commands/fix_locations.py | kanika-art/julython.org | 557b29e5d69a772b684fb6073a616f06b97d0a48 | [
"MIT"
] | 7 | 2015-07-01T18:01:40.000Z | 2019-12-27T02:04:07.000Z | july/management/commands/fix_locations.py | kanika-art/julython.org | 557b29e5d69a772b684fb6073a616f06b97d0a48 | [
"MIT"
] | 6 | 2015-07-01T11:32:34.000Z | 2021-06-10T20:35:32.000Z | july/management/commands/fix_locations.py | kanika-art/julython.org | 557b29e5d69a772b684fb6073a616f06b97d0a48 | [
"MIT"
] | 10 | 2015-07-01T11:20:35.000Z | 2020-10-02T18:58:07.000Z |
import logging
from django.core.management.base import BaseCommand
from django.template.defaultfilters import slugify
from july.models import User
from july.people.models import Location
from july.utils import check_location
from optparse import make_option
class Command(BaseCommand):
help = 'fix locations'
option_list = BaseCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
default=False,
help='Actually move the items.'),
)
def handle(self, *args, **options):
commit = options['commit']
empty = 0
fine = 0
fixable = 0
bad = []
for location in Location.objects.all():
user_count = User.objects.filter(location=location).count()
if not user_count:
logging.info("Empty location: %s", location)
if commit:
location.delete()
logging.info('Deleted')
empty += 1
continue
l = check_location(location.name)
if l == location.name:
logging.info('Location fine: %s', location)
fine += 1
continue
if not commit:
if l:
fixable += 1
else:
bad.append((location, user_count))
continue
elif l is not None:
new_loc = Location.create(l)
User.objects.filter(location=location).update(location=new_loc)
user_count = User.objects.filter(location=location).count()
if not user_count:
logging.error("missed users!")
else:
location.delete()
elif l is None:
logging.info('Bad location: %s', location)
location.approved = False
location.save()
if not commit:
[logging.error('Bad Loc: %s, count: %s', l, c) for l, c in bad]
logging.info('Empty: %s, Fine: %s, fixable: %s',
empty, fine, fixable)
logging.info('Add --commit to fix locations')
| 32.637681 | 79 | 0.517318 | 232 | 2,252 | 4.961207 | 0.349138 | 0.057341 | 0.044309 | 0.065161 | 0.146829 | 0.118158 | 0.118158 | 0.118158 | 0.118158 | 0.118158 | 0 | 0.00436 | 0.388988 | 2,252 | 68 | 80 | 33.117647 | 0.832122 | 0 | 0 | 0.216667 | 0 | 0 | 0.098179 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.116667 | 0 | 0.183333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b9db085b3980a703000d7ded2a0b497ec1fcdd | 6,338 | py | Python | core/feature/gps_location_daywise/gps_location_daywise.py | MD2Korg/CerebralCortex-DataAnalysis | 73f5ea2430bc7c23de422dccb7b65ef9f8917595 | [
"BSD-2-Clause"
] | 1 | 2018-04-24T18:11:24.000Z | 2018-04-24T18:11:24.000Z | core/feature/gps_location_daywise/gps_location_daywise.py | Boris69bg/CerebralCortex-DataAnalysis | 49565bdff348d69153bd5d3a37e73f1645f82b32 | [
"BSD-2-Clause"
] | 10 | 2018-03-13T19:04:09.000Z | 2018-05-12T01:40:03.000Z | core/feature/gps_location_daywise/gps_location_daywise.py | Boris69bg/CerebralCortex-DataAnalysis | 49565bdff348d69153bd5d3a37e73f1645f82b32 | [
"BSD-2-Clause"
] | 42 | 2017-12-07T17:08:14.000Z | 2019-06-02T08:25:12.000Z | # Copyright (c) 2018, MD2K Center of Excellence
# - Alina Zaman <azaman@memphis.edu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from cerebralcortex.core.data_manager.raw.stream_handler import DataSet
from cerebralcortex.cerebralcortex import CerebralCortex
from cerebralcortex.core.datatypes.datastream import DataStream
from cerebralcortex.core.datatypes.datastream import DataPoint
from datetime import datetime, timedelta, time
from core.computefeature import ComputeFeatureBase
from typing import List
import pprint as pp
import numpy as np
import pdb
import pickle
import uuid
import json
import traceback
feature_class_name = 'GpsLocationDaywise'
GPS_EPISODES_AND_SEMANTIC_lOCATION_STREAM = "org.md2k.data_analysis.gps_episodes_and_semantic_location_from_model"
class GpsLocationDaywise(ComputeFeatureBase):
"""
Produce feature from gps location from
"org.md2k.data_analysis.gps_episodes_and_semantic_location" data stream. One data
point is split into two when it starts from one day and ends in other day. In that way,
we are getting semantic location of daily data
"""
def listing_all_gps_location_daywise(self, user_id: str, all_days: List[str]):
"""
Produce and save the gps location of participant's in day basis
:param str user_id: UUID of the stream owner
:param List(str) all_days: All days of the user in the format 'YYYYMMDD'
"""
self.CC.logging.log('%s started processing for user_id %s' %
(self.__class__.__name__, str(user_id)))
gps_data = []
stream_ids = self.get_latest_stream_id(user_id,
GPS_EPISODES_AND_SEMANTIC_lOCATION_STREAM)
for stream_id in stream_ids:
for day in all_days:
location_data_stream = \
self.CC.get_stream(stream_id["identifier"], user_id, day, localtime=False)
for data in set(location_data_stream.data):
if data.start_time.date() != data.end_time.date():
temp = DataPoint(data.start_time, data.end_time, data.offset, data.sample)
start_day = data.start_time.date()
end_time = datetime.combine(start_day, time.max)
end_time = end_time.replace(tzinfo=data.start_time.tzinfo)
temp.end_time = end_time
gps_data.append(temp)
end_day = data.end_time.date()
start_day += timedelta(days=1)
while start_day != end_day:
temp = DataPoint(data.start_time, data.end_time, data.offset, data.sample)
start_time = datetime.combine(start_day, time.min)
start_time = start_time.replace(tzinfo=data.start_time.tzinfo)
temp.start_time = start_time
end_time = datetime.combine(start_day, time.max)
end_time = end_time.replace(tzinfo=data.start_time.tzinfo)
temp.end_time = end_time
gps_data.append(temp)
start_day += timedelta(days=1)
temp = DataPoint(data.start_time, data.end_time, data.offset, data.sample)
start_time = datetime.combine(start_day, time.min)
start_time = start_time.replace(tzinfo=data.start_time.tzinfo)
temp.start_time = start_time
gps_data.append(temp)
else:
gps_data.append(data)
try:
if len(gps_data):
streams = self.CC.get_user_streams(user_id)
for stream_name, stream_metadata in streams.items():
if stream_name == GPS_EPISODES_AND_SEMANTIC_lOCATION_STREAM:
self.store_stream(filepath="gps_location_daywise.json",
input_streams=[stream_metadata],
user_id=user_id,
data=gps_data)
break
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(traceback.format_exc())
self.CC.logging.log('%s finished processing for user_id %s saved %d '
'data points' %
(self.__class__.__name__, str(user_id),
len(gps_data)))
def process(self, user_id: str, all_days: List[str]):
"""
Main processing function inherited from ComputerFeatureBase
:param str user_id: UUID of the user
:param List(str) all_days: List of days with format 'YYYYMMDD'
"""
if self.CC is not None:
self.CC.logging.log("Processing Working Days")
self.listing_all_gps_location_daywise(user_id, all_days)
| 48.015152 | 114 | 0.631745 | 779 | 6,338 | 4.947368 | 0.297818 | 0.044369 | 0.030358 | 0.028542 | 0.395693 | 0.314997 | 0.251168 | 0.239232 | 0.225221 | 0.199792 | 0 | 0.002032 | 0.301041 | 6,338 | 131 | 115 | 48.381679 | 0.867946 | 0.304986 | 0 | 0.266667 | 0 | 0 | 0.057998 | 0.021749 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0 | 0.186667 | 0 | 0.226667 | 0.013333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79bc48970d40d17cad24372399c2613c8f57a896 | 2,939 | py | Python | benchmark/invocation.py | zanderhavgaard/thesis-code | d9f193e622b8b98ec88c33006f8e0e1dbb3d17fc | [
"MIT"
] | null | null | null | benchmark/invocation.py | zanderhavgaard/thesis-code | d9f193e622b8b98ec88c33006f8e0e1dbb3d17fc | [
"MIT"
] | 2 | 2020-04-28T07:59:30.000Z | 2020-05-17T15:36:04.000Z | benchmark/invocation.py | zanderhavgaard/thesis-code | d9f193e622b8b98ec88c33006f8e0e1dbb3d17fc | [
"MIT"
] | null | null | null | import sys
import uuid
import psutil
import time
from datetime import datetime
# remove for production
from pprint import pprint
from functools import reduce
import function_lib as lib
class Invocation:
def __init__(self, exp_uuid: str, root: str, data: dict):
self.exp_id = exp_uuid
self.root_identifier = root
# parse data to self
self.parse_data(data)
def get_data(self):
return self.__dict__
def dev_print(self):
pprint(self.get_data())
def parse_data(self, data: dict):
for i in map(lambda x: setattr(self, x, data[x]), list(data)):
pass
# invocation can be either a success or an error, this will be marked
if('error' in data):
self.is_error = True
self.type = lib.str_replace(self.error['type'],[('\'',''),('\"','')])
self.trace = lib.str_replace(self.error['trace'],[('\'',''),('\"','')])
self.message = lib.str_replace( self.error['message'], [('\'',''),('\"','')])
delattr(self, 'error')
else:
self.is_error = False
self.execution_total = self.execution_end - self.execution_start
self.invocation_total = self.invocation_end - self.invocation_start
def create_monolith_query(self, invo_dict:dict):
keys = 'exp_id,invo_id,seed,function_argument,function_called,monolith_result'
values = """'{0}','{1}',{2},{3},'{4}','{5}'""".format(
self.exp_id,
invo_dict['identifier'],
invo_dict.pop('seed'),
invo_dict.pop('function_argument'),
invo_dict.pop('function_called'),
invo_dict.pop('monolith_result'))
if 'process_time_matrix' in invo_dict:
keys += ',process_time_matrix,running_time_matrix'
values += """,{0},{1}""".format(invo_dict.pop('process_time_matrix'),invo_dict.pop('running_time_matrix'))
return [f'INSERT INTO Monolith ({keys}) VALUES ({values});']
def get_query_string(self):
key_values = self.__dict__.copy()
monolith = [] if 'monolith_result' not in key_values else self.create_monolith_query(key_values)
is_error = key_values.pop('is_error')
list(map(lambda x: x if x[1] != None else key_values.pop(x[0]), key_values.copy().items()))
(keys,vals) = reduce(lambda x,y: ( f'{x[0]}{y[0]},', f'{x[1]}{y[1]},') if not isinstance(y[1],str)
else ( f'{x[0]}{y[0]},', f"""{x[1]}'{y[1]}',""") ,[('','')] + list(key_values.items()))
return ['INSERT INTO {0} ({1}) VALUES ({2});'.format('Error' if is_error else 'Invocation', keys[:-1], vals[:-1])]+monolith
| 40.819444 | 131 | 0.543042 | 361 | 2,939 | 4.204986 | 0.277008 | 0.047431 | 0.043478 | 0.033597 | 0.056653 | 0.013175 | 0.013175 | 0.013175 | 0.013175 | 0.013175 | 0 | 0.011667 | 0.300102 | 2,939 | 71 | 132 | 41.394366 | 0.7263 | 0.036747 | 0 | 0 | 0 | 0 | 0.171621 | 0.04954 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.019608 | 0.156863 | 0.019608 | 0.352941 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79bcabef33a714ea5bd9e55eb07ea14a99365d51 | 4,123 | py | Python | src/pymor/tools/io/vtk.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | 2 | 2022-03-22T11:47:12.000Z | 2022-03-22T11:48:23.000Z | src/pymor/tools/io/vtk.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | 14 | 2022-01-05T09:25:11.000Z | 2022-03-31T17:07:10.000Z | src/pymor/tools/io/vtk.py | moro1111/pymor | aa03f2521ee3c7b8a9e7da4cb109caea4c788b29 | [
"Unlicense"
] | 1 | 2022-03-28T10:58:18.000Z | 2022-03-28T10:58:18.000Z | # This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
from pymor.core.config import config
config.require('VTKIO')
from pathlib import Path
import meshio
from xml.etree.ElementTree import fromstring
from collections import OrderedDict
from xmljson import BadgerFish
from lxml import etree
from pymor.core.exceptions import IOLibsMissing
def _read_collection(xml, metadata_key):
collection = xml['VTKFile']['Collection']
files = collection['DataSet']
data = [(fl[f'@{metadata_key}'], _read_single(fl['@file'])) for fl in files]
data.sort(key=lambda t: t[0])
return data
def _read_single(filename):
mesh = meshio.read(filename)
assert len(mesh.points)
return mesh
def _get_collection_data(filename):
path = Path(filename)
assert path.is_file()
bf = BadgerFish(dict_type=OrderedDict)
return path, bf.data(fromstring(open(path, 'rb').read()))
def _get_vtk_type(path):
"""Parse given file until a VTKFile element is found.
We use the incremental event emitting parser here since we can expect to encounter appended
binary data in the xml which lxml cannot parse.
Parameters
----------
path
vtk file to peek into
Returns
-------
None if no VTKFile element found, else the type attribute of the VTKFile element
"""
parser = etree.XMLPullParser(events=('start',))
with open(path, 'rb') as xml:
for lines in xml.readlines():
parser.feed(lines)
for action, element in parser.read_events():
if element.tag == 'VTKFile':
return element.get('type')
return None
def read_vtkfile(filename, metadata_key='timestep'):
"""Try to read a given file into a Sequence of meshio.Mesh instances
Parameters
----------
metadata_key
Which metadata to extract and return alongside the meshio.Mesh instances.
Returns
-------
A list of (metadata_value, meshio.Mesh) tuples. The length of the list is either 1 for
a singular vtk/vtu/vtp input file (None is returned as metadata),
or however many members are in the collection file (pvd).
"""
from pymor.tools.io import change_to_directory
vtk_type = _get_vtk_type(filename)
if vtk_type == 'Collection':
path, xml = _get_collection_data(filename)
with change_to_directory(path.parent):
return _read_collection(xml, metadata_key=metadata_key)
return [(None, _read_single(filename, vtk_type))]
def write_vtk_collection(filename_base, meshes, metadata=None):
"""Output grid-associated data in vtk format
filename_base
common component for output files in collection
meshes
Sequence of meshio.Mesh objects
metadata
dict of {key1: sequence1, key2: sequence2} where sequence must be of len(meshes) or len == 1
currently supported keys are "timestep", "name", "group" and "part"
used to describe datapoints in Vtk collection file
defaults to { 'timestep': list(range(len(meshes))) }
Returns
-------
full filename of saved file
"""
if not config.HAVE_VTKIO:
raise IOLibsMissing()
from pyevtk.vtk import VtkGroup
fn_tpl = '{}_{:08d}.vtu'
metadata = metadata or {'timestep': list(range(len(meshes)))}
def _meta(key, i):
if key in metadata.keys():
return metadata[key][0] if len(metadata[key]) == 1 else metadata[key][i]
# carry over defaults from pyevtk to not break backwards compat
return {'timestep': 0, 'group': '', 'name': '', 'part': '0'}[key]
group = VtkGroup(filename_base)
for i, mesh in enumerate(meshes):
fn = fn_tpl.format(filename_base, i)
mesh.write(fn)
group.addFile(filepath=fn, sim_time=_meta('timestep', i), group=_meta('group', i), name=_meta('name', i),
part=_meta('part', i))
group.save()
return f'{filename_base}.pvd'
| 32.464567 | 113 | 0.66408 | 551 | 4,123 | 4.865699 | 0.352087 | 0.036927 | 0.00746 | 0.01865 | 0.040283 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004717 | 0.228717 | 4,123 | 126 | 114 | 32.722222 | 0.838365 | 0.369634 | 0 | 0 | 0 | 0 | 0.06953 | 0 | 0 | 0 | 0 | 0 | 0.033898 | 1 | 0.118644 | false | 0 | 0.169492 | 0 | 0.457627 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79c3715dd99e77bde511c274c350ef404bb7cf2e | 1,887 | py | Python | models/vanilla_cnn.py | mamaheux/pytorch-exemple-calcul-canada | 41bd1769aaf30bd3786589bd3e3252bb115fdd69 | [
"MIT"
] | null | null | null | models/vanilla_cnn.py | mamaheux/pytorch-exemple-calcul-canada | 41bd1769aaf30bd3786589bd3e3252bb115fdd69 | [
"MIT"
] | null | null | null | models/vanilla_cnn.py | mamaheux/pytorch-exemple-calcul-canada | 41bd1769aaf30bd3786589bd3e3252bb115fdd69 | [
"MIT"
] | null | null | null | import torch.nn as nn
from models.blocks import GlobalAvgPool2d
class _VanillaConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size):
super(_VanillaConvBlock, self).__init__()
self._block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self._block(x)
class VanillaCnn(nn.Module):
def __init__(self, class_count=10, use_softmax=True):
super(VanillaCnn, self).__init__()
self._features = nn.Sequential(_VanillaConvBlock(in_channels=3, out_channels=8, kernel_size=3),
_VanillaConvBlock(in_channels=8, out_channels=16, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2),
_VanillaConvBlock(in_channels=16, out_channels=32, kernel_size=3),
_VanillaConvBlock(in_channels=32, out_channels=64, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2),
_VanillaConvBlock(in_channels=64, out_channels=128, kernel_size=3),
_VanillaConvBlock(in_channels=128, out_channels=256, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2))
classifier_layers = [
GlobalAvgPool2d(),
nn.Conv2d(256, class_count, kernel_size=1)
]
if use_softmax:
classifier_layers.append(nn.Softmax(dim=1))
self._classifier = nn.Sequential(*classifier_layers)
def forward(self, x):
y = self._features(x)
return self._classifier(y)[:, :, 0, 0]
| 39.3125 | 110 | 0.581346 | 206 | 1,887 | 5 | 0.276699 | 0.126214 | 0.151456 | 0.078641 | 0.371845 | 0.334951 | 0.16699 | 0.16699 | 0.16699 | 0.16699 | 0 | 0.042868 | 0.320085 | 1,887 | 47 | 111 | 40.148936 | 0.759938 | 0 | 0 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0.029412 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79c4bc7411a8ceae834135ef0832c81c48a8f427 | 5,528 | py | Python | transforms.py | amitkumarj441/TGS_Kaggle | a4f613046cc36f3f6dbec28adb35f97a63c2a994 | [
"MIT"
] | 1 | 2019-03-20T07:10:08.000Z | 2019-03-20T07:10:08.000Z | transforms.py | amitkumarj441/TGS_Kaggle | a4f613046cc36f3f6dbec28adb35f97a63c2a994 | [
"MIT"
] | null | null | null | transforms.py | amitkumarj441/TGS_Kaggle | a4f613046cc36f3f6dbec28adb35f97a63c2a994 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
def upsample(image, image_size_target):
padding0 = (image_size_target - image.shape[0]) / 2
padding1 = (image_size_target - image.shape[1]) / 2
padding_start0 = int(np.ceil(padding0))
padding_end0 = int(np.floor(padding0))
padding_start1 = int(np.ceil(padding1))
padding_end1 = int(np.floor(padding1))
return cv2.copyMakeBorder(image, padding_start0, padding_end0, padding_start1, padding_end1, cv2.BORDER_REFLECT_101)
def downsample(image, image_size_original):
padding = (image.shape[0] - image_size_original) / 2
padding_start = int(np.ceil(padding))
return image[padding_start:padding_start + image_size_original, padding_start:padding_start + image_size_original]
def augment(image, mask):
if np.random.rand() < 0.5:
image = np.fliplr(image)
mask = np.fliplr(mask)
if np.random.rand() < 0.5:
c = np.random.choice(2)
if c == 0:
image = multiply_brightness(image, np.random.uniform(1 - 0.1, 1 + 0.1))
elif c == 1:
image = adjust_gamma(image, np.random.uniform(1 - 0.1, 1 + 0.1))
if np.random.rand() < 0.5:
c = np.random.choice(3)
if c == 0:
image, mask = apply_elastic_transform(image, mask, alpha=150, sigma=8, alpha_affine=0)
elif c == 1:
image, mask = apply_elastic_transform(image, mask, alpha=0, sigma=0, alpha_affine=8)
elif c == 2:
image, mask = apply_elastic_transform(image, mask, alpha=150, sigma=10, alpha_affine=5)
if np.random.rand() < 0.5:
image, mask = random_crop_and_pad(image, mask)
return image, mask
def multiply_brightness(image, coefficient):
image_HLS = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
image_HLS = np.array(image_HLS, dtype=np.float64)
image_HLS[:, :, 1] = image_HLS[:, :, 1] * coefficient
image_HLS[:, :, 1][image_HLS[:, :, 1] > 255] = 255
image_HLS = np.array(image_HLS, dtype=np.uint8)
return cv2.cvtColor(image_HLS, cv2.COLOR_HLS2RGB)
def adjust_gamma(image, gamma):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
# Function to distort image
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
# Random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dz = np.zeros_like(dx)
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
def apply_elastic_transform(image, mask, alpha, sigma, alpha_affine):
channels = np.concatenate((image, mask[..., None]), axis=2)
result = elastic_transform(channels, alpha, sigma, alpha_affine, random_state=np.random.RandomState(None))
image_result = result[..., 0:3]
mask_result = result[..., 3]
mask_result = (mask_result > 0.5).astype(mask.dtype)
return image_result, mask_result
def random_crop_and_pad(image, mask):
max_crop = 40
crop_x_total = np.random.randint(max_crop)
crop_x0 = np.random.randint(crop_x_total + 1)
crop_x1 = crop_x_total - crop_x0
crop_y_total = np.random.randint(max_crop)
crop_y0 = np.random.randint(crop_y_total + 1)
crop_y1 = crop_y_total - crop_y0
cropped_image = image[crop_x0:image.shape[0] - crop_x1, crop_y0:image.shape[1] - crop_y1, :]
cropped_mask = mask[crop_x0:mask.shape[0] - crop_x1, crop_y0:mask.shape[1] - crop_y1]
cropped_padded_image = upsample(cropped_image, image.shape[0])
cropped_padded_mask = upsample(cropped_mask, mask.shape[0])
return cropped_padded_image, cropped_padded_mask
def random_crop_to_size(image, mask, size):
dmax = image.shape[0] - size
dx = np.random.randint(dmax + 1)
dy = np.random.randint(dmax + 1)
cropped_image = image[dx:dx + size, dy:dy + size, :]
cropped_mask = mask[dx:dx + size, dy:dy + size]
return cropped_image, cropped_mask
| 38.929577 | 120 | 0.680174 | 806 | 5,528 | 4.475186 | 0.219603 | 0.035487 | 0.024951 | 0.015525 | 0.289992 | 0.254505 | 0.168561 | 0.114777 | 0.085944 | 0.085944 | 0 | 0.045098 | 0.193741 | 5,528 | 141 | 121 | 39.205674 | 0.764191 | 0.100398 | 0 | 0.086022 | 0 | 0 | 0.00243 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.043011 | 0 | 0.236559 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79c67d96a3b58ae9b3f7d1e5efc7a2527f181276 | 1,663 | py | Python | taskobra/monitor/system_info.py | Vipyr/taskobra | d9884f006ef9c735852075912d5a945543de52f5 | [
"MIT"
] | null | null | null | taskobra/monitor/system_info.py | Vipyr/taskobra | d9884f006ef9c735852075912d5a945543de52f5 | [
"MIT"
] | 43 | 2020-02-06T22:23:42.000Z | 2020-04-29T23:56:43.000Z | taskobra/monitor/system_info.py | Vipyr/taskobra | d9884f006ef9c735852075912d5a945543de52f5 | [
"MIT"
] | 2 | 2020-02-06T21:01:42.000Z | 2020-02-06T23:43:11.000Z | from taskobra.orm import *
import platform
import cpuinfo
import subprocess
def create_system(args, database_engine):
system = System(name=platform.node())
cpu_info = cpuinfo.get_cpu_info()
system.add_component(OperatingSystem(
name=platform.system(),
version=platform.platform(),
))
system.add_component(CPU(
manufacturer=cpu_info.get('vendor_id', ''),
model=cpu_info.get('brand', ''),
isa=cpu_info.get('arch', ''),
core_count=cpu_info.get('count', 1),
threads_per_core=1,
nominal_frequency=(cpu_info.get('hz_actual_raw')[0] / 1000000000),
))
with get_session(bind=database_engine) as session:
current_system = session.query(System).filter(
System.name == platform.node(),
).first()
if current_system is None:
session.add(system)
session.commit()
#gpu = GPU(
# manufacturer="NVIDIA",
# model="1070",
# architecture="CUDA",
# tdp=105,
# core_count=1920,
# memory=8.0,
#)
#memory = Memory(
# manufacturer="G-Skill",
# model="Trident",
# standard="DDR4",
# capacity=16.0,
# frequency=3600,
# cas_latency=16,
# t_rcd=19,
# t_rp=19,
# t_ras=39,
#)
#storage = Storage(
# manufacturer="Sabrent",
# model="Rocket",
# standard="NVMe PCIe 4.0",
# capacity=500.0,
# max_read=5000,
# max_write=2500,
#)
#system.add_component(gpu)
#system.add_component(memory)
#system.add_component(memory)
#system.add_component(storage)
| 25.984375 | 74 | 0.578473 | 185 | 1,663 | 5.016216 | 0.491892 | 0.052802 | 0.116379 | 0.047414 | 0.071121 | 0.071121 | 0.071121 | 0 | 0 | 0 | 0 | 0.046823 | 0.280818 | 1,663 | 63 | 75 | 26.396825 | 0.729097 | 0.338545 | 0 | 0.076923 | 0 | 0 | 0.033676 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79cc5960935ea7fbba4fb0eb6555e1ecb03c2fbf | 1,562 | py | Python | mints/args/typed.py | candy-kingdom/mints | e68a2351cf3ff6823e978bc6a4b740bd2a974ca3 | [
"MIT"
] | 4 | 2020-05-09T11:01:32.000Z | 2020-06-03T14:44:06.000Z | mints/args/typed.py | candy-kingdom/cli | e68a2351cf3ff6823e978bc6a4b740bd2a974ca3 | [
"MIT"
] | 43 | 2020-01-27T21:14:16.000Z | 2020-06-18T17:57:20.000Z | mints/args/typed.py | candy-kingdom/mints | e68a2351cf3ff6823e978bc6a4b740bd2a974ca3 | [
"MIT"
] | null | null | null | from typing import Type, Any
class Typed:
"""A typed command line argument.
A typed command line argument is used for specifying the type
to convert the value to. For example, consider the following code:
@cli
def double(number: Opt[int]('A number to double.')):
print(number * 2)
When this CLI is called as
$ example.py double --number 5
the value '5' of the argument '--number' is converted to `int`
and passed to the function.
Note:
The default type of arguments is `str`. Thus, if an argument
is annotated as `Opt('A number to double.')`, a string value
will be passed to the function.
Attributes:
kind: A kind of an argument
(for example, `Arg`, `Opt` or `Flag`).
type: A type of an argument
(for example, `int`, `List[double]`, etc.).
"""
def __init__(self, kind: Type, type: Type):
self.kind = kind
self.type = type
def __call__(self, *args: Any, **kwargs: Any) -> 'Typed':
# Instantiate the parameter being wrapped. For example,
# `Arg[int]` will return `Typed(Arg, int)`, and
# `Typed(Arg, int)('Description.')` will instantiate
# `self.kind = Arg('Description.')`.
if isinstance(self.kind, type):
self.kind = self.kind(*args, **kwargs)
else:
raise ValueError(f"Cannot instantiate {type(self.kind)} twice: it"
f"is already instantiated as {repr(self.kind)}.")
return self
| 31.877551 | 78 | 0.587068 | 206 | 1,562 | 4.412621 | 0.398058 | 0.070407 | 0.039604 | 0.037404 | 0.10341 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002747 | 0.300896 | 1,562 | 48 | 79 | 32.541667 | 0.82967 | 0.604994 | 0 | 0 | 0 | 0 | 0.180791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79cd97ed3020f125684d084c92be22793583d226 | 5,994 | py | Python | lab3/es2/webservice.py | haraldmeister/Programming_for_IoT_applications | 04ec13689caee1fca28bf4fb6a261c318ebd374d | [
"Apache-2.0"
] | null | null | null | lab3/es2/webservice.py | haraldmeister/Programming_for_IoT_applications | 04ec13689caee1fca28bf4fb6a261c318ebd374d | [
"Apache-2.0"
] | null | null | null | lab3/es2/webservice.py | haraldmeister/Programming_for_IoT_applications | 04ec13689caee1fca28bf4fb6a261c318ebd374d | [
"Apache-2.0"
] | null | null | null | import json
import time
import cherrypy
class albums:
def __init__(self,artist,year,title,num):
self.artist=artist
self.year=year
self.title=title
self.N=num
class owner(albums):
def __init__(self,nome,date):
self.album_list=[]
self.nome=nome
self.last_upd=date
self.result={"Artist":[],"Year":[],"Title":[],"Total songs":[]}
self.discography={"Owner":self.nome,"Last update":self.last_upd,"Album List":self.album_list}
def search_artist(self,key_artist):
for i in range(len(self.album_list)):
if(str(self.album_list[i].artist)==key_artist):
self.result["Artist"]=self.album_list[i].artist
self.result["Year"]=self.album_list[i].year
self.result["Title"]=self.album_list[i].title
self.result["Total songs"]=self.album_list[i].N
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
def search_title(self,key_title):
for i in range(len(self.album_list)):
if(str(self.album_list[i].title)==key_title):
self.result["Artist"]=self.album_list[i].artist
self.result["Year"]=self.album_list[i].year
self.result["Title"]=self.album_list[i].title
self.result["Total songs"]=self.album_list[i].N
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
def search_year(self,key_year):
for i in range(len(self.album_list)):
if(str(self.album_list[i].year)==key_year):
self.result["Artist"]=self.album_list[i].artist
self.result["Year"]=self.album_list[i].year
self.result["Title"]=self.album_list[i].title
self.result["Total songs"]=self.album_list[i].N
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
def search_totalsong(self,key_nsong):
for i in range(len(self.album_list)):
if(str(self.album_list[i].N)==key_nsong):
self.result["Artist"]=self.album_list[i].artist
self.result["Year"]=self.album_list[i].year
self.result["Title"]=self.album_list[i].title
self.result["Total songs"]=self.album_list[i].N
return self.result
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
def insert_album(self,artist,year,title,num):
for i in range(len(self.album_list)):
if(str(self.album_list[i].artist)==artist and str(self.album_list[i].title)==title ):
self.album_list[i].N=num
self.album_list[i].year=year
self.last_upd=time.strftime('%d/%m/%Y')+' '+time.strftime('%H:%M:%S')
return
self.album_list.append(albums(artist,year,title,num))
self.last_upd=time.strftime('%d/%m/%Y')+' '+time.strftime('%H:%M:%S')
def delete_album(self,artist,year,title,num):
for i in range(len(self.album_list)):
if(str(self.album_list[i].artist)==artist and str(self.album_list[i].title)==title ):
self.album_list.remove(self.album_list[i])
self.last_upd=time.strftime('%d/%m/%Y')+' '+time.strftime('%H:%M:%S')
def print_all(self):
return json.loads(json.dumps(self.discography,default=lambda x: x.__dict__))
class Discography(owner):
exposed=True
def __init__(self):
json_data=open("discography.txt")
data = json.load(json_data)
self.discogr=owner(data['discography_owner'],data['last_update'])
for j in range(len(data['album_list'])):
self.discogr.album_list.append(albums(data['album_list'][j]['artist'],
data['album_list'][j]['publication_year'],
data['album_list'][j]['title'],
data['album_list'][j]['total_tracks']))
@cherrypy.tools.json_out()
def GET(self,*uri,**params):
if (len(uri)==0):
return self.discogr.print_all()
else:
if uri[0]=="search_artist":
return self.discogr.search_artist(uri[1])
elif uri[0]=="search_title":
return self.discogr.search_title(uri[1])
elif uri[0]=="search_year":
return self.discogr.search_year(uri[1])
elif uri[0]=="search_totalsong":
return self.discogr.search_totalsong(uri[1])
elif uri[0]=="print":
return self.discogr.print_all()
@cherrypy.tools.json_in()
def POST(self,*uri,**params):
if uri[0]=="insert_album":
input_json = cherrypy.request.json
artist=input_json["artist"]
year=int(input_json["year"])
title=input_json["title"]
N=int(input_json["N"])
self.discogr.insert_album(artist,year,title,N)
return
if uri[0]=="delete_album":
input_json = cherrypy.request.json
artist=input_json["artist"]
year=int(input_json["year"])
title=input_json["title"]
N=int(input_json["N"])
self.discogr.delete_album(artist,year,title,N)
return
if __name__ == '__main__':
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True
}
}
cherrypy.tree.mount(Discography(), '/', conf)
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
cherrypy.config.update({'server.socket_port': 9090})
cherrypy.engine.start()
cherrypy.engine.block()
| 45.067669 | 101 | 0.584918 | 790 | 5,994 | 4.249367 | 0.124051 | 0.117962 | 0.143283 | 0.112601 | 0.633006 | 0.569258 | 0.543342 | 0.526065 | 0.526065 | 0.526065 | 0 | 0.004514 | 0.260761 | 5,994 | 132 | 102 | 45.409091 | 0.753103 | 0 | 0 | 0.395161 | 0 | 0 | 0.089756 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.024194 | 0.008065 | 0.298387 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79d29ea8f56cec3596c251c94d5aca0bfd3a1026 | 1,653 | pyde | Python | examples/01_game_of_life/sketch_gameoflife.pyde | underwit/pyprocessing-examples | c6e84fded23dcdd5bf32d499aa91900d68ec213d | [
"MIT"
] | null | null | null | examples/01_game_of_life/sketch_gameoflife.pyde | underwit/pyprocessing-examples | c6e84fded23dcdd5bf32d499aa91900d68ec213d | [
"MIT"
] | null | null | null | examples/01_game_of_life/sketch_gameoflife.pyde | underwit/pyprocessing-examples | c6e84fded23dcdd5bf32d499aa91900d68ec213d | [
"MIT"
] | null | null | null | import random
from itertools import product
CS = 10 # cell size
W = 600 # width
H = 600 # height
COLS = W // CS
ROWS = H // CS
DENSITY = 0.35
dirs = list(product((-1, 0, 1), repeat=2))
dirs.remove((0, 0))
points = []
new_points = []
run = False
def xy2flat(x, y):
x = (x + COLS) % COLS
y = (y + ROWS) % ROWS
return x + COLS * y
def flat2xy(index):
return index % COLS, index // COLS
def setup():
frameRate(20)
size(600, 600)
for i in range(0, W * H, CS):
points.append(random.random() < DENSITY)
new_points.append(False)
def mouseClicked():
x = mouseX // CS
y = mouseY // CS
index = xy2flat(x, y)
points[index] = not points[index]
def keyPressed():
global run
if key == ' ':
run = not run
elif key == 'r': # randomly fill the board
for i, _ in enumerate(points):
points[i] = random.random() < DENSITY
elif key == 'c': # clear the board
for i, _ in enumerate(points):
points[i] = False
def calc_cell(index):
x, y = flat2xy(index)
nb = sum([points[xy2flat(x + _x, y + _y)] for _x, _y in dirs])
new_points[index] = points[index]
if points[index] and (nb < 2 or nb > 3):
new_points[index] = False
elif nb == 3:
new_points[index] = True
def draw():
global points, new_points
background(52, 63, 62)
fill(220, 237, 255)
for index, is_alive in enumerate(points):
if is_alive:
x, y = flat2xy(index)
rect(x * CS, y * CS, CS, CS)
if run:
calc_cell(index)
if run:
points, new_points = new_points, points
| 21.467532 | 66 | 0.557774 | 241 | 1,653 | 3.751037 | 0.327801 | 0.079646 | 0.066372 | 0.026549 | 0.117257 | 0.079646 | 0.079646 | 0.079646 | 0.079646 | 0 | 0 | 0.043821 | 0.30974 | 1,653 | 76 | 67 | 21.75 | 0.748466 | 0.037508 | 0 | 0.1 | 0 | 0 | 0.001893 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116667 | false | 0 | 0.033333 | 0.016667 | 0.183333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79d585582066d6853246fd14d5eec7b556e67b85 | 5,064 | py | Python | conanfile.py | madebr/conan-repo-actions-conan-libwebp | 2f2eaad6e8de2cbec611f19de5205fc0b3267492 | [
"MIT"
] | null | null | null | conanfile.py | madebr/conan-repo-actions-conan-libwebp | 2f2eaad6e8de2cbec611f19de5205fc0b3267492 | [
"MIT"
] | null | null | null | conanfile.py | madebr/conan-repo-actions-conan-libwebp | 2f2eaad6e8de2cbec611f19de5205fc0b3267492 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
from conans import ConanFile, CMake, tools
class LibwebpConan(ConanFile):
name = "libwebp"
version = "1.0.0"
description = "library to encode and decode images in WebP format"
url = "http://github.com/bincrafters/conan-libwebp"
homepage = "https://github.com/webmproject/libwebp"
author = "Bincrafters <bincrafters@gmail.com>"
license = "BSD 3-Clause"
exports = ["LICENSE.md"]
exports_sources = ['CMakeLists.txt',
'0001-install-pkg-config-files-during-the-CMake-build.patch']
generators = 'cmake'
_source_subfolder = "source_subfolder"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False],
"with_simd": [True, False], "near_lossless": [True, False],
"swap_16bit_csp": [True, False]}
default_options = {'shared': False, 'fPIC': True, 'with_simd': True, 'near_lossless': True, 'swap_16bit_csp': False}
def source(self):
source_url = "https://github.com/webmproject/libwebp"
tools.get("{0}/archive/v{1}.tar.gz".format(source_url, self.version))
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
tools.patch(base_path=self._source_subfolder,
patch_file='0001-install-pkg-config-files-during-the-CMake-build.patch')
os.rename(os.path.join(self._source_subfolder, "CMakeLists.txt"),
os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"))
shutil.copy("CMakeLists.txt",
os.path.join(self._source_subfolder, "CMakeLists.txt"))
def configure(self):
del self.settings.compiler.libcxx
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
@property
def _version_components(self):
return [int(x) for x in self.version.split('.')]
def _configure_cmake(self):
cmake = CMake(self)
# should be an option but it doesn't work yet
cmake.definitions["WEBP_ENABLE_SIMD"] = self.options.with_simd
if self._version_components[0] >= 1:
cmake.definitions["WEBP_NEAR_LOSSLESS"] = self.options.near_lossless
else:
cmake.definitions["WEBP_ENABLE_NEAR_LOSSLESS"] = self.options.near_lossless
cmake.definitions['WEBP_ENABLE_SWAP_16BIT_CSP'] = self.options.swap_16bit_csp
# avoid finding system libs
cmake.definitions['CMAKE_DISABLE_FIND_PACKAGE_GIF'] = True
cmake.definitions['CMAKE_DISABLE_FIND_PACKAGE_PNG'] = True
cmake.definitions['CMAKE_DISABLE_FIND_PACKAGE_TIFF'] = True
cmake.definitions['CMAKE_DISABLE_FIND_PACKAGE_JPEG'] = True
if self.settings.os == "Android":
if 'CMAKE_ANDROID_ARCH_ABI' in cmake.definitions:
cmake.definitions['ANDROID_ABI'] = cmake.definitions['CMAKE_ANDROID_ARCH_ABI']
if 'ANDROID_NDK_HOME' in os.environ:
cmake.definitions['ANDROID_NDK'] = os.environ.get('ANDROID_NDK_HOME')
cmake.configure(source_folder=self._source_subfolder)
return cmake
def build(self):
# WEBP_EXTERN is not specified on Windows
# Set it to dllexport for building (see CMakeLists.txt) and to dllimport otherwise
if self.options.shared and self.settings.compiler == "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, 'src', 'webp', 'types.h'),
'#ifndef WEBP_EXTERN',
"""#ifndef WEBP_EXTERN
#ifdef _MSC_VER
#ifdef WEBP_DLL
#define WEBP_EXTERN __declspec(dllexport)
#else
#define WEBP_EXTERN __declspec(dllimport)
#endif
#endif /* _MSC_VER */
#endif
#ifndef WEBP_EXTERN""")
# cmake misses dll (RUNTIME) copy
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"LIBRARY DESTINATION lib",
"RUNTIME DESTINATION bin\nLIBRARY DESTINATION lib")
if self._version_components[0] >= 1:
# allow to build webpmux
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"if(WEBP_BUILD_GIF2WEBP OR WEBP_BUILD_IMG2WEBP)",
"if(TRUE)")
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
self.copy("FindWEBP.cmake", dst=".", src=".")
def package_info(self):
self.cpp_info.libs = ['webpmux', 'webpdemux', 'webpdecoder', 'webp']
if self.options.shared and self.settings.os == "Windows" and self.settings.compiler != 'Visual Studio':
self.cpp_info.libs = [lib + '.dll' for lib in self.cpp_info.libs]
| 42.2 | 120 | 0.634874 | 598 | 5,064 | 5.16388 | 0.304348 | 0.05829 | 0.061529 | 0.027202 | 0.310557 | 0.271697 | 0.202396 | 0.138601 | 0.090997 | 0.090997 | 0 | 0.007576 | 0.244076 | 5,064 | 119 | 121 | 42.554622 | 0.799112 | 0.056872 | 0 | 0.073171 | 0 | 0 | 0.267637 | 0.102954 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.036585 | 0.012195 | 0.341463 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79d5867444343ac92dd71c753e06968277e1c875 | 5,184 | py | Python | sjtwo-c/site_scons/site_tools/codegen/site_packages/can/broadcastmanager.py | seanlinc/Playmate | 077877d172dd6b7beab910c52ec95ee300bc6480 | [
"Apache-2.0"
] | 2 | 2020-04-04T21:09:56.000Z | 2020-04-08T17:00:58.000Z | sjtwo-c/site_scons/site_tools/codegen/site_packages/can/broadcastmanager.py | seanlinc/Playmate | 077877d172dd6b7beab910c52ec95ee300bc6480 | [
"Apache-2.0"
] | 13 | 2020-04-11T21:50:57.000Z | 2020-04-19T03:19:48.000Z | sjtwo-c/site_scons/site_tools/codegen/site_packages/can/broadcastmanager.py | seanlinc/Playmate | 077877d172dd6b7beab910c52ec95ee300bc6480 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Exposes several methods for transmitting cyclic messages.
The main entry point to these classes should be through
:meth:`can.BusABC.send_periodic`.
"""
import abc
import logging
import threading
import time
import warnings
log = logging.getLogger('can.bcm')
class CyclicTask(object):
"""
Abstract Base for all cyclic tasks.
"""
@abc.abstractmethod
def stop(self):
"""Cancel this periodic task.
:raises can.CanError:
If stop is called on an already stopped task.
"""
class CyclicSendTaskABC(CyclicTask):
"""
Message send task with defined period
"""
def __init__(self, message, period):
"""
:param can.Message message: The message to be sent periodically.
:param float period: The rate in seconds at which to send the message.
"""
self.message = message
self.can_id = message.arbitration_id
self.arbitration_id = message.arbitration_id
self.period = period
super(CyclicSendTaskABC, self).__init__()
class LimitedDurationCyclicSendTaskABC(CyclicSendTaskABC):
def __init__(self, message, period, duration):
"""Message send task with a defined duration and period.
:param can.Message message: The message to be sent periodically.
:param float period: The rate in seconds at which to send the message.
:param float duration:
The duration to keep sending this message at given rate.
"""
super(LimitedDurationCyclicSendTaskABC, self).__init__(message, period)
self.duration = duration
class RestartableCyclicTaskABC(CyclicSendTaskABC):
"""Adds support for restarting a stopped cyclic task"""
@abc.abstractmethod
def start(self):
"""Restart a stopped periodic task.
"""
class ModifiableCyclicTaskABC(CyclicSendTaskABC):
"""Adds support for modifying a periodic message"""
def modify_data(self, message):
"""Update the contents of this periodically sent message without altering
the timing.
:param can.Message message:
The message with the new :attr:`can.Message.data`.
Note: The arbitration ID cannot be changed.
"""
self.message = message
class MultiRateCyclicSendTaskABC(CyclicSendTaskABC):
"""A Cyclic send task that supports switches send frequency after a set time.
"""
def __init__(self, channel, message, count, initial_period, subsequent_period):
"""
Transmits a message `count` times at `initial_period` then continues to
transmit message at `subsequent_period`.
:param channel: See interface specific documentation.
:param can.Message message:
:param int count:
:param float initial_period:
:param float subsequent_period:
"""
super(MultiRateCyclicSendTaskABC, self).__init__(channel, message, subsequent_period)
class ThreadBasedCyclicSendTask(ModifiableCyclicTaskABC,
LimitedDurationCyclicSendTaskABC,
RestartableCyclicTaskABC):
"""Fallback cyclic send task using thread."""
def __init__(self, bus, lock, message, period, duration=None):
super(ThreadBasedCyclicSendTask, self).__init__(message, period, duration)
self.bus = bus
self.lock = lock
self.stopped = True
self.thread = None
self.end_time = time.time() + duration if duration else None
self.start()
def stop(self):
self.stopped = True
def start(self):
self.stopped = False
if self.thread is None or not self.thread.is_alive():
name = "Cyclic send task for 0x%X" % (self.message.arbitration_id)
self.thread = threading.Thread(target=self._run, name=name)
self.thread.daemon = True
self.thread.start()
def _run(self):
while not self.stopped:
# Prevent calling bus.send from multiple threads
with self.lock:
started = time.time()
try:
self.bus.send(self.message)
except Exception as exc:
log.exception(exc)
break
if self.end_time is not None and time.time() >= self.end_time:
break
# Compensate for the time it takes to send the message
delay = self.period - (time.time() - started)
time.sleep(max(0.0, delay))
def send_periodic(bus, message, period, *args, **kwargs):
"""
Send a :class:`~can.Message` every `period` seconds on the given bus.
:param can.BusABC bus: A CAN bus which supports sending.
:param can.Message message: Message to send periodically.
:param float period: The minimum time between sending messages.
:return: A started task instance
"""
warnings.warn("The function `can.send_periodic` is deprecated and will " +
"be removed in an upcoming version. Please use `can.Bus.send_periodic` instead.", DeprecationWarning)
return bus.send_periodic(message, period, *args, **kwargs)
| 32.603774 | 119 | 0.643711 | 590 | 5,184 | 5.561017 | 0.308475 | 0.034136 | 0.022859 | 0.033526 | 0.116733 | 0.078635 | 0.068881 | 0.068881 | 0.068881 | 0.068881 | 0 | 0.001061 | 0.272569 | 5,184 | 158 | 120 | 32.810127 | 0.869 | 0.36169 | 0 | 0.181818 | 0 | 0 | 0.055537 | 0.007695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.075758 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79d996f9c7b739201903f7162ae39e85d80aae38 | 513 | py | Python | accessible_output/braille/outputs/virgo.py | Timtam/cards-against-humanity | 89ea61b5c9915198b845bbf8a93c3f7827323ceb | [
"MIT"
] | 5 | 2017-04-11T00:18:42.000Z | 2021-08-01T04:27:20.000Z | accessible_output/braille/outputs/virgo.py | Timtam/cards-against-humanity | 89ea61b5c9915198b845bbf8a93c3f7827323ceb | [
"MIT"
] | 47 | 2017-04-27T18:57:27.000Z | 2017-07-16T21:18:28.000Z | accessible_output/braille/outputs/virgo.py | Timtam/cards-against-humanity | 89ea61b5c9915198b845bbf8a93c3f7827323ceb | [
"MIT"
] | 4 | 2018-05-17T12:33:59.000Z | 2022-02-20T16:08:51.000Z | from pywintypes import com_error
import win32com.client
from main import OutputError, BrailleOutput
class Virgo (BrailleOutput):
"""Braille output supporting the Virgo screen reader."""
name = 'Virgo'
def __init__(self, *args, **kwargs):
super (Virgo, self).__init__(*args, **kwargs)
try:
self.object = win32com.client.Dispatch("phoenix.BrailleSysClass")
except com_error:
raise OutputError
def braille(self, text):
self.object.sayonbraille(True,text)
def canBraille(self):
return True | 23.318182 | 68 | 0.744639 | 63 | 513 | 5.904762 | 0.587302 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009132 | 0.146199 | 513 | 22 | 69 | 23.318182 | 0.840183 | 0.097466 | 0 | 0 | 0 | 0 | 0.061135 | 0.050218 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0.066667 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79da26b04e69fcee30b862a1e5dd200b98e09556 | 3,050 | py | Python | data_ai/comp3006/src/test.py | lonelyhentai/workspace | 2a996af58d6b9be5d608ed040267398bcf72403b | [
"MIT"
] | 2 | 2021-04-26T16:37:38.000Z | 2022-03-15T01:26:19.000Z | data_ai/comp3006/src/test.py | lonelyhentai/workspace | 2a996af58d6b9be5d608ed040267398bcf72403b | [
"MIT"
] | null | null | null | data_ai/comp3006/src/test.py | lonelyhentai/workspace | 2a996af58d6b9be5d608ed040267398bcf72403b | [
"MIT"
] | 1 | 2022-03-15T01:26:23.000Z | 2022-03-15T01:26:23.000Z | import pandas as pd
import numpy as np
from os import path
from path_service import LOG_DIR, DATA_DIR
from sklearn.metrics import log_loss
import re
prob_columns = list(map(lambda x: f"prob{x}", range(8)))
prob_columns_without_end = list(map(lambda x: f"prob{x}", range(7)))
def row_check(df: pd.DataFrame):
df.loc[:,prob_columns]=df.loc[:,prob_columns].apply(lambda x: x/np.sum(x),axis=1,result_type='expand')
df = df.round(5)
sum7 = np.sum(df.loc[:,prob_columns_without_end],axis=1)
df.loc[:,'prob7'] = 1.0 - sum7
return df
def get_prob_res(file_name: str):
df: pd.DataFrame = pd.DataFrame([])
with open(path.join(LOG_DIR, file_name), 'r') as prob_file:
prob_lines = prob_file.readlines()
probs = {}
for i in range(8):
probs[i] = []
for line in prob_lines:
words = re.split(r"\s", line)
for i in range(8):
pos = i * 2
prob_index = int(words[pos][-1])
probs[prob_index].append(float(words[pos + 1]))
df.loc[:, "file_id"] = pd.Series(list(range(1, len(probs[0]) + 1)), dtype=np.int)
for i in range(8):
df.loc[:, f"prob{i}"] = pd.Series(probs[i], dtype=np.float)
return row_check(df)
def get_single_res(file_name: str, true_mode: bool = True):
df: pd.DataFrame = pd.DataFrame([])
with open(path.join(LOG_DIR if not true_mode else DATA_DIR, file_name), 'r') as prob_file:
prob_lines = prob_file.readlines()
probs = {}
for i in range(8):
probs[i] = []
j = 0
for line in prob_lines:
label = int(str.strip(re.split(r"\s", line)[0])[-1])
for i in range(8):
if i == label:
probs[i].append(1.0)
else:
probs[i].append(0.0)
df.loc[:, "file_id"] = pd.Series(list(range(1, len(probs[0]) + 1)), dtype=np.int)
for i in range(8):
df.loc[:, f"prob{i}"] = pd.Series(probs[i], dtype=np.float)
return df
def get_probs(df: pd.DataFrame) -> pd.DataFrame:
return df.loc[:, list(map(lambda x: f"prob{x}", range(8)))]
def check_valid_log_loss():
valid_prob_df = get_prob_res('valid_prob.log')
labels = get_single_res('security.valid', True)
print("prob mode: ", log_loss(get_probs(labels), get_probs(valid_prob_df)))
def check_train_log_loss():
valid_prob_df = get_prob_res('train_prob.log')
labels = get_single_res('new_train', True)
print("prob mode: ", log_loss(get_probs(labels), get_probs(valid_prob_df)))
def save_train_res(df: pd.DataFrame):
df.to_csv(path.join(DATA_DIR, "test_submit.csv"), sep=",", index=False, float_format='%.5f')
if __name__ == "__main__":
check_valid_log_loss()
check_train_log_loss()
test_prob_df = get_prob_res("test_prob.log")
save_train_res(test_prob_df)
df = pd.read_csv(path.join(DATA_DIR, "test_submit.csv"), sep=",")
for index, row in df.iterrows():
if np.abs(np.sum(row[list(map(lambda x: f"prob{x}", range(8)))]) - 1.0) > 1e-6:
raise Exception(f"sum prob not equal 1.0 in {index}")
| 34.659091 | 106 | 0.627213 | 513 | 3,050 | 3.524366 | 0.216374 | 0.029867 | 0.019912 | 0.036504 | 0.53208 | 0.462389 | 0.434735 | 0.434735 | 0.389934 | 0.309181 | 0 | 0.01697 | 0.207869 | 3,050 | 87 | 107 | 35.057471 | 0.731374 | 0 | 0 | 0.338028 | 0 | 0 | 0.077377 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098592 | false | 0 | 0.084507 | 0.014085 | 0.239437 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79dd7101e6c2dbca64177e87d238cd48079dd45d | 6,466 | py | Python | resources/lib/auth_routes.py | t43pasdf/plugin.video.espn_3 | f111edf14f0344d248f0a62de3da4f15afc7d354 | [
"MIT"
] | 4 | 2019-10-18T01:27:48.000Z | 2020-02-14T05:45:29.000Z | resources/lib/auth_routes.py | t43pasdf/plugin.video.espn_3 | f111edf14f0344d248f0a62de3da4f15afc7d354 | [
"MIT"
] | 3 | 2020-02-10T05:58:30.000Z | 2020-09-28T22:42:04.000Z | resources/lib/auth_routes.py | t43pasdf/plugin.video.espn_3 | f111edf14f0344d248f0a62de3da4f15afc7d354 | [
"MIT"
] | null | null | null | # Copyright 2019 https://github.com/kodi-addons
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
from urllib2 import HTTPError
except ImportError:
from urllib.error import HTTPError
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
import logging
import threading
import time
import xbmcgui
from resources.lib import adobe_activate_api, espnplus, player_config, util
from resources.lib.plugin_routing import plugin
from resources.lib.kodiutils import get_string, set_setting
@plugin.route('/login-tv-provider')
def login_tv_provider():
logging.debug('Authenticate Device')
if adobe_activate_api.is_authenticated():
logging.debug('Device already authenticated, skipping authentication')
dialog = xbmcgui.Dialog()
dialog.ok(get_string(30037), get_string(30301))
set_setting('LoggedInToTvProvider', True)
return True
else:
regcode = adobe_activate_api.get_regcode()
dialog = xbmcgui.Dialog()
ok = dialog.yesno(get_string(30310),
get_string(30320),
get_string(30330) % regcode,
get_string(30340),
get_string(30360),
get_string(30350))
if ok:
try:
adobe_activate_api.authenticate(regcode)
dialog.ok(get_string(30310), get_string(30370))
set_setting('LoggedInToTvProvider', True)
return True
except HTTPError as e:
dialog.ok(get_string(30037), get_string(30420) % e)
set_setting('LoggedInToTvProvider', False)
return False
@plugin.route('/view-tv-provider-details')
def view_tv_provider_details():
dialog = xbmcgui.Dialog()
dialog.ok(get_string(30380),
get_string(30390) % adobe_activate_api.get_authentication_expires(),
get_string(30700) % (player_config.get_dma(), player_config.get_timezone()))
@plugin.route('/logout-tv-provider')
def logout_tv_provider():
dialog = xbmcgui.Dialog()
ok = dialog.yesno(get_string(30381),
get_string(30382))
if ok:
adobe_activate_api.deauthorize()
set_setting('LoggedInToTvProvider', False)
@plugin.route('/login-espn-plus')
def login_espn_plus():
if not espnplus.have_valid_login_id_token():
logging.debug('Requesting login id token')
semaphore = threading.Semaphore(0)
result_queue = Queue()
license_plate, ws = espnplus.perform_license_plate_auth_flow(semaphore, result_queue)
progress_dialog = xbmcgui.DialogProgress()
progress_dialog.create(get_string(40100), get_string(40110), license_plate)
espnplus.start_websocket_thread(ws)
times = 0
sleep_time = 1
max_time = 180
max_times = max_time / sleep_time
# wait a maximum of 3 minutes
while times < max_times:
time.sleep(sleep_time)
canceled = progress_dialog.iscanceled()
acquired = semaphore.acquire(blocking=False)
logging.debug('Canceled: %s Acquired: %s' % (canceled, acquired))
seconds_left = max_time - times * sleep_time
minutes, seconds = divmod(seconds_left, 60)
percent = int(times / max_times)
progress_dialog.update(percent, get_string(40110), license_plate,
get_string(40120) % (minutes, seconds))
if canceled or acquired:
break
times = times + 1
ws.close()
progress_dialog.close()
token = None
try:
token = result_queue.get(block=True, timeout=1)
except Empty as e:
logging.error('No result from websocket %s', e)
if token is not None and 'id_token' in token:
espnplus.handle_license_plate_token(token)
else:
dialog = xbmcgui.Dialog()
dialog.ok(get_string(30037), get_string(40130))
set_setting('LoggedInToEspnPlus', False)
return False
if not espnplus.has_valid_bam_account_access_token():
espnplus.request_bam_account_access_token()
logging.debug('Bam token %s' % espnplus.get_bam_account_access_token())
dialog = xbmcgui.Dialog()
dialog.ok(get_string(40000), get_string(40101))
set_setting('LoggedInToEspnPlus', True)
return True
@plugin.route('/view-espn-plus-details')
def view_espn_plus_details():
account_details = espnplus.get_bam_account_details()
email = util.get_nested_value(account_details, ['attributes', 'email'], 'Unknown Email')
profile_name = util.get_nested_value(account_details, ['activeProfile', 'profileName'], 'Unknown Profile Name')
product_details = email + ' - ' + profile_name + '\n'
sub_details = espnplus.get_bam_sub_details()
for sub in sub_details:
if sub['isActive']:
product_name = ''
for product in sub['products']:
product_name = product_name + ' ' + product['name']
product_details = product_details + product_name + ' ' + sub['expirationDate'] + '\n'
dialog = xbmcgui.Dialog()
dialog.ok(get_string(40260), product_details)
@plugin.route('/logout-espn-plus')
def logout_espn_plus():
set_setting('LoggedInToEspnPlus', False)
espnplus.config.reset_settings()
| 38.951807 | 115 | 0.665636 | 780 | 6,466 | 5.329487 | 0.324359 | 0.058456 | 0.031994 | 0.028626 | 0.156844 | 0.113784 | 0.077219 | 0.043782 | 0.024056 | 0.024056 | 0 | 0.0299 | 0.244819 | 6,466 | 165 | 116 | 39.187879 | 0.821421 | 0.169193 | 0 | 0.225806 | 0 | 0 | 0.100149 | 0.008969 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.104839 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79df34a92c6aa109d6fb09a1fbe24d44b829d071 | 3,440 | py | Python | app.py | rbSparky/umit-hack-backend | a9402d35d07693b78498a2ba2d4ff08fcb6cab44 | [
"MIT"
] | null | null | null | app.py | rbSparky/umit-hack-backend | a9402d35d07693b78498a2ba2d4ff08fcb6cab44 | [
"MIT"
] | null | null | null | app.py | rbSparky/umit-hack-backend | a9402d35d07693b78498a2ba2d4ff08fcb6cab44 | [
"MIT"
] | null | null | null | import pickle
from flask import Flask, request, jsonify, session
from flask_cors import CORS, cross_origin
import sklearn
from sklearn.decomposition import TruncatedSVD
import pandas as pd
import numpy as np
ranks = []
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
class Model:
i = '0'
lrank = 1
hrank = 2
r_names = None
r_ID = None
corr_ID = None
recc = None
cmat = []
fir = []
sec = []
final = []
def predict(self):
self.sec = []
self.fir = []
flf = []
SVD = TruncatedSVD(n_components = 10)
decompm = SVD.fit_transform(self.cmat)
df = pd.DataFrame(decompm)
corrm = np.corrcoef(decompm)
p_names = list(self.cmat.index)
p_ID = p_names.index(str(self.i))
c_ID = corrm[p_ID]
Recommend = list(self.cmat.index[c_ID > 0.95])
fl = []
for i in range(len(c_ID)):
if(c_ID[i] > 0.95):
fl.append([c_ID[i], self.cmat.index[i]])
fl.sort(reverse=True)
flf = []
for i in range(len(fl)):
if (fl[i][0] > 0.95):
flf.append(fl[i])
clgdf, clgds = {}, {}
for i in flf:
for j in self.cmat.loc[i[1]].items():
if ((j[1] == 5) and (j[0] not in self.fir) and (ranks[j[0]][1] >= self.lrank)):# and (self.hrank >= ranks[j[0]][0])):
if(j[0] in clgdf):
clgdf[j[0]] += 1
else:
clgdf[j[0]] = 1
elif ((j[1] == 2) and (j[0] not in self.sec) and (ranks[j[0]][1] >= self.lrank)):# and (self.hrank >= ranks[j[0]][0])):
if(j[0] in clgds):
clgds[j[0]] += 1
else:
clgds[j[0]] = 1
tf, ts = [], []
for k in clgdf:
tf.append([clgdf[k], k])
for k in clgds:
ts.append([clgds[k], k])
tf.sort(reverse=True)
ts.sort(reverse=True)
for i in tf:
j = i[1]
self.fir.append([(j.split())[0], j[len((j.split())[0]):], ranks[j][0], ranks[j][1]])
for i in ts:
j = i[1]
self.sec.append([(j.split())[0], j[len((j.split())[0]):], ranks[j][0], ranks[j][1]])
#print(self.fir, self.sec, sep = '\n\n\n')
self.final = []
for i in self.fir:
self.final.append(i)
for i in self.sec:
self.final.append(i)
rfinal = []
[rfinal.append(x) for x in self.final if x not in rfinal]
return jsonify(rfinal)
@app.route('/')
def hello():
return 'hi main'
@app.route('/predict', methods=['POST','GET']) #or POST u see that
@cross_origin()
def predict():
#take all these as input from args
global ranks
#session.clear()
req_dat = request.get_json()
lrank = req_dat['lrank']#5000
hrank = req_dat['hrank']#7000
stream1 = req_dat['stream1']#'Computer Science'
stream2 = req_dat['stream2']#'Electronics'
'''
lrank = int(request.args.get("lrank"))
hrank = int(request.args.get("hrank"))
stream1 = request.args.get("p1")
stream2 = request.args.get("p2")
'''
f = open('essentials.pckl', 'rb')
f1 = pickle.load(f)
f.close()
#print(f1)
f = open('Model2.pckl', 'rb')
f2 = pickle.load(f)
f.close()
wsc = f1[0]
ranks = f1[1]
f2.cmat = f1[2]
f2.lrank = lrank
f2.hrank = hrank
f2.stream1 = stream1
f2.stream2 = stream2
f2.final = []
f2.i = wsc[(stream1, stream2)]
return f2.predict()
if __name__ == '__main__':
#app.secret_key = 'super secret key'
#app.config['SESSION_TYPE'] = 'filesystem'
#session.init_app(app)
app.run(debug=True)
| 23.888889 | 127 | 0.563081 | 540 | 3,440 | 3.514815 | 0.264815 | 0.014752 | 0.022129 | 0.017914 | 0.135933 | 0.103267 | 0.088514 | 0.088514 | 0.088514 | 0.088514 | 0 | 0.034924 | 0.250872 | 3,440 | 143 | 128 | 24.055944 | 0.701591 | 0.094477 | 0 | 0.092593 | 0 | 0 | 0.037428 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.064815 | 0.009259 | 0.231481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dab135854cbf1898ed8a1808f3a10f5e2425b1b | 235 | py | Python | ccr/urls.py | nikhil96sher/coding_companion | bb5d9596dff74e342ca07b6d95c37fb491877224 | [
"MIT"
] | 12 | 2015-12-30T06:31:57.000Z | 2017-12-26T01:42:18.000Z | ccr/urls.py | nikhilsheoran96/coding_companion | bb5d9596dff74e342ca07b6d95c37fb491877224 | [
"MIT"
] | null | null | null | ccr/urls.py | nikhilsheoran96/coding_companion | bb5d9596dff74e342ca07b6d95c37fb491877224 | [
"MIT"
] | 5 | 2015-12-30T07:06:22.000Z | 2019-04-24T05:46:01.000Z | from django.conf.urls import patterns,url
from ccr import views
urlpatterns=patterns(
'',
url(r'^$',views.main),
url(r'^save/',views.save),
url(r'^template/',views.template),
url(r'^compile/',views.compile),
url(r'^run/',views.run),
) | 21.363636 | 41 | 0.702128 | 37 | 235 | 4.459459 | 0.432432 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068085 | 235 | 11 | 42 | 21.363636 | 0.753425 | 0 | 0 | 0 | 0 | 0 | 0.135593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db06f6b303411c51b7e7ee7f461a4a3b7ef48b6 | 3,941 | py | Python | spowtd/transmissivity.py | alex-cobb/python-spowtd | b841ce63a4ed168a6e1b4e17b689d8be9dc11318 | [
"BSD-2-Clause"
] | null | null | null | spowtd/transmissivity.py | alex-cobb/python-spowtd | b841ce63a4ed168a6e1b4e17b689d8be9dc11318 | [
"BSD-2-Clause"
] | null | null | null | spowtd/transmissivity.py | alex-cobb/python-spowtd | b841ce63a4ed168a6e1b4e17b689d8be9dc11318 | [
"BSD-2-Clause"
] | 2 | 2021-10-14T14:38:43.000Z | 2022-03-21T16:21:06.000Z | """Transmissivity classes
"""
import numpy as np
import scipy.integrate as integrate_mod
import spowtd.spline as spline_mod
def create_transmissivity_function(parameters):
"""Create a transmissivity function
Returns a callable object that returns transmissivity at a given
water level. The class of the object depends on the "type" field
in the parameters provided, and must be either "peatclsm" or
"spline".
"""
if 'type' not in parameters:
raise ValueError(
'"type" field is required in parameters; got {}'
.format(parameters))
sy_type = parameters.pop('type', None)
return {
'peatclsm': PeatclsmTransmissivity,
'spline': SplineTransmissivity
}[sy_type](**parameters)
class SplineTransmissivity:
"""Transmissivity parameterized as a spline of log conductivity
zeta_knots_mm: Sequence of water levels in mm
K_knots: Condutivity values at those water levels
Stores a set of knots representing hydraulic conductivity at water
table heights (relative to surface) zeta. When called, takes a
water table height and returns a transmissivity obtained by linear
interpolation of log-conductivity.
This is an extended value function that returns
minimum_transmissivity below min(zeta) and extrapolates
exponentially or linearly above max(zeta), according to whether
the last two knots have the same or different conductivity.
"""
__slots__ = ['zeta_knots_mm', 'K_knots_km_d',
'minimum_transmissivity_m2_d', '_spline']
def __init__(self, zeta_knots_mm, K_knots_km_d,
minimum_transmissivity_m2_d):
self.zeta_knots_mm = np.asarray(zeta_knots_mm, dtype='float64')
self.K_knots_km_d = np.asarray(K_knots_km_d, dtype='float64')
self.minimum_transmissivity_m2_d = minimum_transmissivity_m2_d
log_K_knots = np.log(K_knots_km_d)
self._spline = spline_mod.Spline.from_points(
zip(zeta_knots_mm, log_K_knots),
order=1)
def conductivity(self, water_level_mm):
assert water_level_mm >= self.zeta_knots_mm.min()
if water_level_mm >= self.zeta_knots_mm.max():
raise NotImplementedError('Extrapolation above highest knot')
return np.exp(self._spline(water_level_mm))
def __call__(self, water_level_mm):
if np.isscalar(water_level_mm):
return self.call_scalar(water_level_mm)
return np.array(
[self.call_scalar(value) for value in water_level_mm],
dtype='float64')
def call_scalar(self, water_level_mm):
"""Compute transmissivity for a scalar argument
"""
if water_level_mm <= self.zeta_knots_mm.min():
return self.minimum_transmissivity_m2_d
return (
self.minimum_transmissivity_m2_d +
integrate_mod.quad(
self.conductivity,
self.zeta_knots_mm.min(),
water_level_mm)[0])
class PeatclsmTransmissivity:
"""Transmissivity function used in PEATCLSM
Computes transmissivity in m^2 / s from water level in mm.
See equation 3 in Apers et al. 2022, JAMES.
"""
__slots__ = ['Ksmacz0', 'alpha', 'zeta_max_cm']
def __init__(self, Ksmacz0, alpha, zeta_max_cm):
self.Ksmacz0 = Ksmacz0
self.alpha = alpha
self.zeta_max_cm = zeta_max_cm
def __call__(self, water_level_mm):
Ksmacz0 = self.Ksmacz0
alpha = self.alpha
zeta_max_cm = self.zeta_max_cm
water_level_mm = np.asarray(water_level_mm)
if (water_level_mm / 10 > zeta_max_cm).any():
raise ValueError('T undefined at water level > {} cm in {}'
.format(zeta_max_cm, water_level_mm / 10))
return (
Ksmacz0 * (zeta_max_cm - water_level_mm / 10) ** (1 - alpha)
) / (100 * (alpha - 1))
| 34.269565 | 73 | 0.664298 | 511 | 3,941 | 4.829746 | 0.295499 | 0.081037 | 0.082658 | 0.058347 | 0.200162 | 0.145867 | 0.091167 | 0.072528 | 0.035656 | 0.035656 | 0 | 0.01296 | 0.256026 | 3,941 | 114 | 74 | 34.570175 | 0.828786 | 0.274042 | 0 | 0.064516 | 0 | 0 | 0.088332 | 0.009815 | 0 | 0 | 0 | 0 | 0.016129 | 1 | 0.112903 | false | 0 | 0.048387 | 0 | 0.33871 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db29e40510fc64c7655a39c604ac0d49c03c44b | 795 | py | Python | setup.py | Flowerowl/ici | 7c3209ee0ddfae27bda76f586ac02545364a0c73 | [
"MIT"
] | 204 | 2015-01-03T14:29:43.000Z | 2021-12-15T16:21:28.000Z | setup.py | QQ83076130/ici | 7c3209ee0ddfae27bda76f586ac02545364a0c73 | [
"MIT"
] | 5 | 2015-05-14T10:34:24.000Z | 2017-10-09T15:53:47.000Z | setup.py | QQ83076130/ici | 7c3209ee0ddfae27bda76f586ac02545364a0c73 | [
"MIT"
] | 77 | 2015-01-13T01:44:16.000Z | 2021-12-15T16:21:39.000Z | #encoding:utf-8
from setuptools import setup, find_packages
import sys, os
version = '0.4.3'
setup(name='ici',
version=version,
description="方便程序员在terminal查询生词的小工具",
long_description="""方便程序员在terminal查询生词的小工具""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='python iciba dictionary terminal',
author='yuzhe',
author_email='lazynightz@gmail.com',
url='https://github.com/Flowerowl/ici',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'termcolor',
],
entry_points={
'console_scripts':[
'ici = ici.ici:main'
]
},
)
| 27.413793 | 95 | 0.632704 | 83 | 795 | 5.915663 | 0.759036 | 0.04888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00813 | 0.226415 | 795 | 28 | 96 | 28.392857 | 0.790244 | 0.108176 | 0 | 0 | 0 | 0 | 0.288543 | 0.062235 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db3a905d27b52ca5f7ab31fe8496b3bc345779b | 24,074 | py | Python | src/son/monitor/son_sp.py | dang03/son-cli | 3e29322d4556f3e02f7b15c43c5e66a1e7e07bd3 | [
"Apache-2.0"
] | 4 | 2017-02-08T22:50:28.000Z | 2018-05-29T07:29:47.000Z | src/son/monitor/son_sp.py | dang03/son-cli | 3e29322d4556f3e02f7b15c43c5e66a1e7e07bd3 | [
"Apache-2.0"
] | 81 | 2016-07-19T13:55:12.000Z | 2021-05-07T15:03:05.000Z | src/son/monitor/son_sp.py | dang03/son-cli | 3e29322d4556f3e02f7b15c43c5e66a1e7e07bd3 | [
"Apache-2.0"
] | 13 | 2016-07-19T13:33:19.000Z | 2019-04-25T08:04:15.000Z | """
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import logging
from requests import Session, post, get
import websocket
import threading
from subprocess import call, check_output
import json
from son.profile.helper import read_yaml, write_yaml
from prometheus_client import start_http_server, Gauge
import os
import docker
from time import gmtime, strftime
import datetime
"""
This class implements the son-sp commands.
These commands translate to the API's of the SONATA SP
"""
LOG = logging.getLogger('SP_monitor')
LOG.setLevel(level=logging.INFO)
prometheus_stream_port = 8082
prometheus_server_api = 'http://127.0.0.1:9090'
prometheus_config_path = '/tmp/son-monitor/prometheus/prometheus_sdk.yml'
GK_api = 'http://sp.int3.sonata-nfv.eu:32001/api/v2/'
monitor_api = 'http://sp.int3.sonata-nfv.eu:8000/api/v1/'
son_access_config_path = "/home/steven/.son-workspace"
platform_id = 'sp1'
class Service_Platform():
def __init__(self, export_port=8082, GK_api=None, **kwargs):
self.monitor_api = kwargs.get('monitor_api', monitor_api)
self.GK_api = kwargs.get('GK_api', GK_api)
self.son_access_config_path = kwargs.get('son_access_config_path', son_access_config_path)
self.platform_id = kwargs.get('platform_id', platform_id)
# Build up our session
self.session = Session()
self.session.headers = {
"Accept": "application/json; charset=UTF-8"
}
# global parameters needed for the SP_websocket Class
global prometheus_stream_port
prometheus_stream_port = export_port
global prometheus_server_api
prometheus_server_api = kwargs.get('prometheus_server_api', prometheus_server_api)
global prometheus_config_path
prometheus_config_path = kwargs.get('prometheus_config_path', prometheus_config_path)
self.ws_thread = None
# websocket in the SP
self.ws = None
# access token to auth the SDK user
self.access_token = None
def list(self, **kwargs):
# if metric is specified, show the list of VNFs that export ths metric
metric = kwargs.get('metric')
if metric :
url = self.monitor_api + 'prometheus/metrics/name/' + metric
ret = self.session.get(url).json().get("metrics").get("result")
else:
url = self.monitor_api + 'prometheus/metrics/list'
resp = self.session.get(url)
ret = resp.json().get('metrics')
return ret
def query(self, **kwargs):
verbose = kwargs.get("verbose", False)
LOG.setLevel(level=logging.INFO)
if verbose:
LOG.setLevel(level=logging.DEBUG)
# periodically refresh token
self._get_token()
service_name = kwargs.get("service")
vnf_name = kwargs.get("vnf_name")
vdu_id = kwargs.get("vdu_id")
vnfc_id = kwargs.get("vnfc_id")
metric = kwargs.get("metric")
since = kwargs.get("since")
until = kwargs.get("until")
metric_list = []
service_desc_uuid = self._get_service_descriptor_uuid(service_name)
vnf_instances = self._get_vnf_instances(service_desc_uuid)
if len(vnf_instances) <= 0:
LOG.warning("found no VNF instances for this service descriptor uuid: {0}".format(service_desc_uuid))
else:
vnf_descriptor_uuid = self._get_VNF_descriptor_uuid(vnf_name)
for vnf_instance_uuid in vnf_instances:
vdu_id, vc_id = self._check_VNF_instance(vnf_instance_uuid, vnf_descriptor_uuid, vdu_id, vnfc_id)
if vc_id:
LOG.info("found VNF: {0} with instance uuid: {2}, vdu_id: {3} vnfc_id: {4} in service: {1} ".format(
vnf_name, service_name, vnf_instance_uuid, vdu_id, vc_id))
metric_list = self._get_async_metric(vnf_instance_uuid, vdu_id, vc_id, metric, since, until)
break
return metric_list
def stream_test(self, **kwargs):
metric = kwargs.get('metric')
vnf_name = kwargs.get('vnf_name')
action = kwargs.get('action', 'start')
if action == 'stop':
SP_websocket._config_prometheus(remove=True)
if self.ws:
self.ws.close()
# kill all running websocket streams
call(['pkill', '-f', 'son-monitor stream'])
return 'websocket closed'
# create the websocket with a filter eg: {"metric":"vm_cpu_perc","filters":["exported_instance":"vtc-vnf"]}
url = self.monitor_api + 'ws/new'
data = {'metric':str(metric), 'filters':str(list("exported_instance={}".format(vnf_name)))}
response = self.session.post(url, json=data)
code = response.status_code
if code == 200:
ws_url = response.json().get('ws_url')
LOG.info('ws_url: {}'.format(ws_url))
self.ws = SP_websocket(ws_url, vnf_name=vnf_name, metric=metric)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.ws_thread.start()
self.ws_thread.join()
return 'websocket thread started'
def stream_auth(self, **kwargs):
"""
call the SONATA Gatekeeper API to request monitoring metrics
:param kwargs:
:return:
"""
verbose = kwargs.get("verbose", False)
LOG.setLevel(level=logging.INFO)
if verbose:
LOG.setLevel(level=logging.DEBUG)
action = kwargs.get('action', 'start')
if action == 'stop':
SP_websocket._config_prometheus(remove=True)
if self.ws:
self.ws.close()
# kill all running websocket streams
LOG.info('closing websocket')
call(['pkill', '-f', 'son-monitor stream'])
LOG.info('websocket closed')
return 'websocket closed'
# periodically refresh token
self._get_token()
service_name = kwargs.get("service","sonata-demo-12")
vnf_name = kwargs.get("vnf_name","vtc-vnf2")
vdu_id = kwargs.get("vdu_id")
vnfc_id = kwargs.get("vnfc_id")
metric = kwargs.get("metric")
ws_url = None
# first lookup if the service name is instantiated
service_desc_uuid = self._get_service_descriptor_uuid(service_name)
# then check if the service has an instance of this VNF
vnf_instances = self._get_vnf_instances(service_desc_uuid)
if len(vnf_instances) <= 0:
LOG.warning("found no VNF instances for this service descriptor uuid: {0}".format(service_desc_uuid))
else:
# get the descriptor uuid of this vnf
vnf_descriptor_uuid = self._get_VNF_descriptor_uuid(vnf_name)
for vnf_instance_uuid in vnf_instances:
# check if this VNF instance has the correct vdu and vnfc
vdu_id, vnfc_id = self._check_VNF_instance(vnf_instance_uuid, vnf_descriptor_uuid, vdu_id, vnfc_id)
if vnfc_id:
LOG.info("found VNF: {0} with instance uuid: {2}, vdu_id: {3} vnfc_id: {4} in service: {1} ".format(
vnf_name, service_name, vnf_instance_uuid, vdu_id, vnfc_id))
ws_url = self._get_ws_url(vnf_instance_uuid, vdu_id, vnfc_id, metric)
break
if not vnfc_id:
return 'No vnfc_id found in the record'
if not ws_url:
return 'No websocket url received'
#ws_url = 'ws://10.30.0.112:8002/ws/98adab175fd64cc4bbe50ae9505fecf6'
self.ws = SP_websocket(ws_url, vnf_name=vnf_name, metric=metric, vm_id=vnfc_id)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.ws_thread.start()
self.ws_thread.join()
return 'websocket thread started'
# TODO: start background thread to refresh token
def _get_token(self):
# the credentials and token is fetched via son-access, the son-access config path must be given
token_path = os.path.join(self.son_access_config_path, 'platforms', 'token.txt')
output = check_output(['son-access', '-w', self.son_access_config_path, '-p', self.platform_id, 'auth'])
#token_path = workspace_dir + '/' + token_file
with open(token_path, 'r') as token:
self.access_token = token.read()
def _get_VNF_descriptor_uuid(self, vnf_name):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "functions"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
functions_list = resp.json()
found_functions = [function.get("uuid") for function in functions_list if function["vnfd"]["name"] == vnf_name]
if len(found_functions) > 1 or len(found_functions) == 0:
LOG.warning("found {0} functions with name: {1}".format(len(found_functions), vnf_name))
return None
else:
uuid = found_functions[0]
LOG.info("found function descriptor of {0} with uuid: {1}".format(vnf_name, uuid))
return uuid
def _check_VNF_instance(self, vnf_instance_uuid, vnf_descriptor_uuid, vdu_id=None, vnfc_id=None):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "records/functions"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
LOG.debug('request VNF record, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
vnf_list = resp.json()
vnf_list = [vnf for vnf in vnf_list if vnf.get("descriptor_reference") == vnf_descriptor_uuid and vnf.get("uuid") == vnf_instance_uuid]
if len(vnf_list) > 1 :
LOG.info("found multiple VNF instances with matching uuid: {0}".format(vnf_list))
return False
elif len(vnf_list) == 0 :
LOG.info("found no VNF instance with matching uuid: {0}".format(vnf_instance_uuid))
return False
# we found 1 matching vnf instance, now check if it has a vdu
LOG.info("found VNF instance with matching uuid: {0}".format(vnf_instance_uuid))
vnf_record = vnf_list[0]
vdu_list = vnf_record["virtual_deployment_units"]
if vdu_id:
vdu_list = [vdu for vdu in vdu_list if vdu.get("id") == vdu_id]
else:
#pick by default first vdu
vdu_list = [vdu_list[0]]
vdu = vdu_list[0]
vdu_id = vdu["id"]
if len(vdu_list) > 1 :
LOG.info("found multiple vdu_ids with matching id: {0} list: {1}".format(vdu_id, vdu_list))
return False
elif len(vdu_list) == 0 :
LOG.info("found no VDUs with matching id: {0}".format(vdu_id))
return False
# we found 1 matching vdu id, now check if it has a vdu instance(vnfc)
LOG.info("found VDU with matching id: {0}".format(vdu_id))
vdu = vdu_list[0]
vnfc_list = vdu["vnfc_instance"]
if vnfc_id:
vnfc_list = [vnfc for vnfc in vnfc_list if vnfc.get("id") == vnfc_id]
else:
#pick by default first vnfc
vnfc_list = [vnfc_list[0]]
vnfc = vnfc_list[0]
vnfc_id = vnfc["id"]
if len(vnfc_list) > 1 :
LOG.info("found multiple vnfc_ids with matching id: {0} list: {1}".format(vnfc_id, vnfc_list))
return False
elif len(vnfc_list) == 0 :
LOG.info("found no VNFCs with matching id: {0}".format(vnfc_id))
return False
vnfc = vnfc_list[0]
vc_id = vnfc["vc_id"]
LOG.info("found VNFC with matching id: {0} and vc_id: {1}".format(vnfc_id, vc_id))
return vdu_id, vc_id
# Get the list of all the service instances registered
def _get_service_instance_list(self):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "records/services"
resp = get(url, headers=headers)
LOG.info('request service instance uuid list, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
return resp.text
# Gets a registered service instance
def _get_vnf_instances(self, service_descriptor_uuid):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "records/services"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
LOG.debug('request service instances, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
services_list = resp.json()
found_services = [service for service in services_list if service["descriptor_reference"] == service_descriptor_uuid]
if len(found_services) > 1 or len(found_services) == 0 :
LOG.warning("found {0} service instances with descriptor uuid: {1}". format(len(found_services), service_descriptor_uuid))
return []
else:
service = found_services[0]
service_instance_uuid = service["uuid"]
vnfr_list = [vnf.get("vnfr_id") for vnf in service["network_functions"]]
LOG.info("found VNF descriptors: {}".format(json.dumps(vnfr_list,indent=2)))
return vnfr_list
# Obtain the list of services that can be instantiated
def _get_service_descriptor_uuid(self, service_name):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "services"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
LOG.debug('request service descriptor uuid, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
services_list = resp.json()
found_services = [service.get("uuid") for service in services_list if service.get("nsd",{}).get("name") == service_name]
if len(found_services) > 1 or len(found_services) == 0 :
LOG.warning("found {0} services with name: {1}". format(len(found_services), service_name))
return None
else:
uuid = found_services[0]
LOG.info("found service descriptor of service: {0} with uuid: {1}".format(service_name, uuid))
return uuid
# get the websocket url where the metrocs will be streamed
def _get_ws_url(self, vnf_instance_uuid, vdu_id, vc_id, metric):
"""
call Gatekeeper API …/functions/metrics/:inst_id/:vdu_id/:vnfc_id/synch-mon-data
A metric is uniquely identified by vnf_instance + vdu_id + vnfc_id.
A VNF can consist out of multiple VDU's, a VNFC is an instance of a VDU.
the vnfc_id is only unique in the scope of the VNFR/VDU
:param vnf_instance_uuid: vnf instance uuid of the VNF
:param vdu_id: vdu id in the VNFD of the metric we want to monitor
:param vc_id: vc id in the VNFR of the metric we want to monitor
:param metric:
:return:
"""
headers = {'Authorization': "Bearer %s" % self.access_token}
#url = self.GK_api + "functions/" + function_uuid + "/instances/" + instance_uuid + "/synch-mon-data?metrics=" + \
# metric + "&for=10"
url = self.GK_api + "functions/metrics/" + vnf_instance_uuid + "/" + vdu_id + "/" + vc_id +"/synch-mon-data"
params = {"metrics": metric}
response = get(url, headers=headers, params=params)
code = response.status_code
LOG.debug("url: {}".format(response.url))
LOG.debug("websocket request response: {}".format(response.json()))
if code == 200:
ws_url = response.json().get('ws_url')
LOG.info('ws_url: {}'.format(ws_url))
return ws_url
# Do a query to the SP Prometheus DB
def _get_async_metric(self, vnf_instance_uuid, vdu_id, vc_id, metric, since=None, until=None, step='10s'):
"""
call Gatekeeper API …/functions/metrics/:inst_id/:vdu_id/:vnfc_id/asynch-mon-data
:param vnf_instance_uuid: vnf instance uuid of the VNF
:param vdu_id: vdu id in the VNFD of the metric we want to monitor
:param vnfc_id: vnfc id in the VNFR of the metric we want to monitor
:param metric:
:param since:
:param until:
:return:
"""
# pick some default time values (since 1 min ago until now) (notation eg. 2017-05-05T17:10:22Z)
# The SONATA integration env is UTC time
if not until:
#now = datetime.datetime.now()
now = datetime.datetime.utcnow()
until = now.strftime("%Y-%m-%dT%H:%M:%SZ")
#until = '2017-06-19T10:06:00Z'
if not since:
#now = datetime.datetime.now()
now = datetime.datetime.utcnow()
now_minus_1 = now - datetime.timedelta(minutes=1)
since = now_minus_1.strftime("%Y-%m-%dT%H:%M:%SZ")
#since = '2017-06-19T10:05:00Z'
LOG.info("since: {}".format(since))
LOG.info("until: {}".format(until))
LOG.info("step: {}".format(step))
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "functions/metrics/" + vnf_instance_uuid + "/" + vdu_id + "/" + vc_id + "/asynch-mon-data"
params = {"metrics":metric,
"since":since,
"until":until,
"step":step}
response = get(url, headers=headers, params=params)
code = response.status_code
LOG.debug("url: {}".format(response.url))
LOG.debug("metric request response: {}".format(response.text))
return response.json()
class SP_websocket(websocket.WebSocketApp):
def __init__(self, url, vnf_name=None, metric=None, vm_id=None,
desc='exported metric from SP', print=True):
self.vnf_name = vnf_name
self.metric = metric
self.vc_id = vm_id #the unique identifier of the vm, used by OpenStack
self.desc = desc
self.print = print
self.metric_received = False
self.prometheus_metric = None
websocket.WebSocketApp.__init__(self, url,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close,
on_open=self._on_open
)
def _on_message(self, ws, message):
LOG.info('ws message: {}'.format(message))
metric_list = self.find_metric(message)
# set the metric with the correct labels once, when first value is received
if not self.metric_received:
self.set_exported_metric(metric_list)
if self.metric_received:
for metric in metric_list:
self.prometheus_metric.labels(**metric['labels']).set(metric["value"])
# some info printing
if self.metric_received and self.print \
and self.vnf_name is not None and self.metric is not None:
message = self.filter_output(message)
def _on_error(self, ws, error):
self._config_prometheus(remove=True)
pass
def _on_close(self, ws):
self._config_prometheus(remove=True)
pass
def _on_open(self, ws):
global prometheus_stream_port
# start local http export server
start_http_server(prometheus_stream_port)
# make Prometheus scrape this server
self._config_prometheus()
LOG.info('websocket opened: {}'.format(self.url))
@staticmethod
def _config_prometheus(remove=False):
global prometheus_server_api
global prometheus_config_path
docker_cli = docker.from_env()
# check if containers are already running
c1 = docker_cli.containers.list(filters={'status': 'running', 'name': 'prometheus'})
if len(c1) < 1:
LOG.info('Prometheus is not running')
return "Prometheus DB is not running"
# make Prometheus scrape this server
config_file = read_yaml(prometheus_config_path)
targets = config_file.get('scrape_configs', [])
SP_stream_config = next((target for target in targets if target.get('job_name') == 'SP_stream'), None)
# the SP http server is not yet added to the config file
config_dict = {'job_name': 'SP_stream', 'scrape_interval': '1s',
'static_configs': [{'targets': ['172.17.0.1:{}'.format(prometheus_stream_port)]}]}
if not SP_stream_config and not remove:
config_file['scrape_configs'].append(config_dict)
LOG.info('added SP stream to Prometheus')
elif remove and SP_stream_config:
config_file['scrape_configs'].remove(config_dict)
LOG.info('removed SP stream from Prometheus')
write_yaml(prometheus_config_path, config_file)
post(prometheus_server_api + '/-/reload')
def set_exported_metric(self, metric_list):
for metric in metric_list:
# metric is found and labels are set
metric_name = self.metric
labels = list(metric['labels'])
self.prometheus_metric = Gauge(metric_name, self.desc, labels)
self.metric_received = True
LOG.info('exporting metric with labels: {}'.format(labels))
break
def filter_output(self, message):
data = json.loads(message)
metric_list = data.get(self.metric, [])
metric = {}
for metric in metric_list:
for label in metric.get('labels', []):
if self.vc_id in label:
LOG.info('label: {}'.format(label))
LOG.info('value: {}'.format(metric.get('value')))
LOG.info('time: {}'.format(metric.get('time')))
break
return metric
def find_metric(self, message):
data = json.loads(message)
metric_list = data.get(self.metric, [])
metric_list_out = []
for metric in metric_list:
metric_found = False
labels = {}
LOG.debug('metric found:{}'.format(metric))
for label in metric.get('labels', []):
key, value = label.split('=')
labels[key] = str(value).replace('"','')
if self.vc_id in value:
metric_found = True
if metric_found:
# metric is found and labels are set
value = metric.get('value')
metric = {'labels': labels, "value": value}
metric_list_out.append(metric)
return metric_list_out | 43.533454 | 143 | 0.620005 | 3,160 | 24,074 | 4.537658 | 0.138608 | 0.012204 | 0.020922 | 0.007671 | 0.432039 | 0.40219 | 0.34577 | 0.316898 | 0.29758 | 0.290048 | 0 | 0.013361 | 0.272493 | 24,074 | 553 | 144 | 43.533454 | 0.805013 | 0.169436 | 0 | 0.378238 | 0 | 0.005181 | 0.157024 | 0.010645 | 0 | 0 | 0 | 0.001808 | 0 | 1 | 0.056995 | false | 0.005181 | 0.031088 | 0 | 0.173575 | 0.007772 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db3db553de8307aa88c5fde47c1bd6250050be2 | 2,246 | py | Python | cogs/miscellaneous/avatar.py | AkshuAgarwal/Aperture-1.7 | c55ffa68d3a4de0daaaad2c918173e5ebca9f006 | [
"MIT"
] | 2 | 2021-09-05T16:42:13.000Z | 2021-09-09T18:41:14.000Z | cogs/miscellaneous/avatar.py | AkshuAgarwal/Aperture-1.7 | c55ffa68d3a4de0daaaad2c918173e5ebca9f006 | [
"MIT"
] | null | null | null | cogs/miscellaneous/avatar.py | AkshuAgarwal/Aperture-1.7 | c55ffa68d3a4de0daaaad2c918173e5ebca9f006 | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import Union
from discord import Member, User, Embed
from discord.ext import commands
from bot.main import NewCommand
class Avatar(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(
name='avatar',
cls=NewCommand,
aliases=['av'],
brief='That Avatar looks cool!',
description='Get the Avatar of a User',
help="""This command is used to get the Avatar of a User/Member.
The Member should be visible to Me. That means I need to share atleast 1 common Server with the user of whom I need to get the Avatar.""",
usage='[user:name/id/@mention, default:command_invoker]',
explained_usage=["**User:** User whose Avatar you need to get. Can be Name, ID or Mention."],
examples=[
'avatar',
'avatar 764462046032560128',
'avatar @Akshu'
]
)
@commands.cooldown(1, 5, commands.BucketType.member)
async def _avatar(self, ctx, user:Union[User, Member]=None):
if not user:
user = ctx.author
if not user.avatar:
desc = f"> **Download Avatar:**\n> [png]({user.avatar_url})"
elif user.is_avatar_animated() is False:
desc = f"> **Download Avatar:**\n> [webp]({user.avatar_url_as(format='webp')}) | [jpeg]({user.avatar_url_as(format='jpeg')}) | [jpg]({user.avatar_url_as(format='jpg')}) | [png]({user.avatar_url_as(format='png')})"
elif user.is_avatar_animated() is True:
desc = f"> **Download Avatar:**\n> [gif]({user.avatar_url_as(format='gif')}) | [webp]({user.avatar_url_as(format='webp')}) | [jpeg]({user.avatar_url_as(format='jpeg')}) | [jpg]({user.avatar_url_as(format='jpg')}) | [png]({user.avatar_url_as(format='png')})"
embed = Embed(title=f"{user}'s Avatar", description=desc, color=0x00eeff, timestamp=datetime.utcnow())
embed.set_author(name=user, icon_url=user.avatar_url)
embed.set_footer(text=f'Thanks for using {ctx.guild.me.name}', icon_url=ctx.guild.me.avatar_url)
embed.set_image(url=user.avatar_url)
await ctx.reply(embed=embed)
def setup(client):
client.add_cog(Avatar(client)) | 45.836735 | 269 | 0.638023 | 312 | 2,246 | 4.467949 | 0.355769 | 0.093257 | 0.111908 | 0.096844 | 0.283357 | 0.225251 | 0.160689 | 0.160689 | 0.160689 | 0.160689 | 0 | 0.01359 | 0.213713 | 2,246 | 49 | 270 | 45.836735 | 0.775764 | 0 | 0 | 0 | 0 | 0.073171 | 0.427681 | 0.199377 | 0 | 0 | 0.00356 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.121951 | 0 | 0.195122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db5993b3ba09fcfb72c92ea6f0805e8ba07d24f | 810 | py | Python | Basics II/Lists2.py | marinaoliveira96/python-exercises | 13fc0ec30dec9bb6531cdeb41c80726971975835 | [
"MIT"
] | null | null | null | Basics II/Lists2.py | marinaoliveira96/python-exercises | 13fc0ec30dec9bb6531cdeb41c80726971975835 | [
"MIT"
] | null | null | null | Basics II/Lists2.py | marinaoliveira96/python-exercises | 13fc0ec30dec9bb6531cdeb41c80726971975835 | [
"MIT"
] | null | null | null | print(isinstance(3, int))
lista = ['marina', 2, 'jujuba']
lista2 = []
for i in lista:
if isinstance(i, str):
lista2.append(i)
print(lista2)
myList = ['marina', 123, 9.5]
print(isinstance(9.5, int))
#strings
items = ['marina', 123, 9.5]
print(isinstance(9.5, float))
str_items = ['abc', 'Abc','def', 'BBBB','ghi', 'AAAA']
str_items.sort(key=str.lower, reverse=True)
print(str_items)
new_items = sorted(str_items)
print(new_items)
#numbers
int_numbers = [123, 13.44, 5436, 324.54, 9034]
int_numbers.sort()
print(f'sort.() = {int_numbers}')
int_numbers.sort(reverse=True)
print(f'sort.(reverse=True) = {int_numbers}')
#esse sorted ta relacionado a lista n aos numeros
new_numbers = sorted(int_numbers, reverse=False)
print(f'new numbers = {new_numbers}')
total = sum(int_numbers)
print(total)
| 18.837209 | 54 | 0.691358 | 128 | 810 | 4.257813 | 0.40625 | 0.12844 | 0.036697 | 0.040367 | 0.102752 | 0.102752 | 0.102752 | 0.102752 | 0 | 0 | 0 | 0.055241 | 0.128395 | 810 | 42 | 55 | 19.285714 | 0.716714 | 0.076543 | 0 | 0 | 0 | 0 | 0.173154 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db72819dbae785cf03bf81e31c9e2232cea71f2 | 1,283 | py | Python | webapp/config.py | rustprooflabs/psycopg3-connpool | 5576fd89ed986afb24fa2f229d52925e7a6d845c | [
"MIT"
] | 3 | 2021-03-13T14:07:25.000Z | 2022-03-12T01:51:49.000Z | webapp/config.py | rustprooflabs/psycopg3-connpool | 5576fd89ed986afb24fa2f229d52925e7a6d845c | [
"MIT"
] | 1 | 2021-09-12T15:03:12.000Z | 2021-09-12T15:03:12.000Z | webapp/config.py | rustprooflabs/psycopg3-connpool | 5576fd89ed986afb24fa2f229d52925e7a6d845c | [
"MIT"
] | null | null | null | import os
import logging
APP_NAME = 'psycopg3-connpool'
# Set to False to force reporting queries to share pool with non-reporting queries
REPORTING_POOL = True
POOL_MIN_SIZE = 1
POOL_MAX_SIZE = 10
POOL_MAX_IDLE = 60
POOL_STAT_SLEEP = 300
if not REPORTING_POOL:
pool_max_size += 5
CURR_PATH = os.path.abspath(os.path.dirname(__file__))
PROJECT_BASE_PATH = os.path.abspath(os.path.join(CURR_PATH, os.pardir))
try:
LOG_PATH = os.environ['LOG_PATH']
except KeyError:
LOG_PATH = PROJECT_BASE_PATH + '/webapp.log'
# Required for CSRF protection in Flask, please change to something secret!
try:
APP_SECRET_KEY = os.environ['APP_SECRET_KEY']
except KeyError:
ERR_MSG = '\nSECURITY WARNING: To ensure security please set the APP_SECRET_KEY'
ERR_MSG += ' environment variable.\n'
#LOGGER.warning(ERR_MSG)
print(ERR_MSG)
APP_SECRET_KEY = 'S$332sgajg9GHKL14jklsjfkjasglmssajfsdgGADAAJj77j@neHMld'
try:
DATABASE_STRING = os.environ['PG_CONN']
except KeyError:
key_msg = 'Database environment variable not set. Need PG_CONN string'
sys.exit(key_msg)
try:
APP_DEBUG_RAW = os.environ['APP_DEBUG']
if APP_DEBUG_RAW == 'False':
APP_DEBUG = False
else:
APP_DEBUG = True
except KeyError:
APP_DEBUG = False
| 23.759259 | 84 | 0.731878 | 189 | 1,283 | 4.698413 | 0.433862 | 0.054054 | 0.054054 | 0.038288 | 0.051802 | 0.051802 | 0 | 0 | 0 | 0 | 0 | 0.01711 | 0.180047 | 1,283 | 53 | 85 | 24.207547 | 0.826996 | 0.137958 | 0 | 0.277778 | 0 | 0 | 0.251589 | 0.049955 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db868f6631d93b30648549794d251ef271627af | 3,695 | py | Python | mnist.py | xiaoxinyi/tfrecord | 6f39e3dbd5b1ffb3df8636b3163dbe2161469075 | [
"Apache-2.0"
] | null | null | null | mnist.py | xiaoxinyi/tfrecord | 6f39e3dbd5b1ffb3df8636b3163dbe2161469075 | [
"Apache-2.0"
] | null | null | null | mnist.py | xiaoxinyi/tfrecord | 6f39e3dbd5b1ffb3df8636b3163dbe2161469075 | [
"Apache-2.0"
] | null | null | null | import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import mnist
TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'train.tfrecords'
def lenet(images):
net = slim.layers.conv2d(images, 20, [5,5], scope='conv1')
net = slim.layers.max_pool2d(net, [2,2], scope='pool1')
net = slim.layers.conv2d(net, 50, [5,5], scope='conv2')
net = slim.layers.max_pool2d(net, [2,2], scope='pool2')
net = slim.layers.flatten(net, scope='flatten3')
net = slim.layers.fully_connected(net, 500, scope='fully_connected4')
net = slim.layers.fully_connected(net, 10, activation_fn=None, scope='fully_connected5')
return net
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([mnist.IMAGE_PIXELS])
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
image = tf.reshape(image, [mnist.IMAGE_SIZE, mnist.IMAGE_SIZE, 1])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
def inputs(train_dir, train, batch_size, num_epochs, one_hot_labels=False):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, mnist.NUM_CLASSES).
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
filename = os.path.join(train_dir,
TRAIN_FILE if train else VALIDATION_FILE)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs)
# Even when reading in multiple threads, share the filename
# queue.
image, label = read_and_decode(filename_queue)
if one_hot_labels:
label = tf.one_hot(label, mnist.NUM_CLASSES, dtype=tf.int32)
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
images, sparse_labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size, num_threads=2,
capacity=1000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=1000)
return images, sparse_labels
| 39.731183 | 91 | 0.673884 | 518 | 3,695 | 4.673745 | 0.372587 | 0.02974 | 0.037588 | 0.015696 | 0.115655 | 0.074349 | 0.026435 | 0.026435 | 0.026435 | 0 | 0 | 0.024613 | 0.230311 | 3,695 | 92 | 92 | 40.163043 | 0.826653 | 0.381867 | 0 | 0 | 0 | 0 | 0.055732 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.088889 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db9b3ff0897bce11d0fb7fc945e79ab18d1a305 | 2,635 | py | Python | setup.py | BenFrankel/hgf | 78ec6a1e4eaa62005cc3914e8a554d2f1401ac37 | [
"Apache-2.0"
] | null | null | null | setup.py | BenFrankel/hgf | 78ec6a1e4eaa62005cc3914e8a554d2f1401ac37 | [
"Apache-2.0"
] | 2 | 2017-12-27T17:38:18.000Z | 2017-12-27T17:42:10.000Z | setup.py | BenFrankel/hgf | 78ec6a1e4eaa62005cc3914e8a554d2f1401ac37 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
###############################################################################
# #
# Copyright 2017 - Ben Frankel #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
###############################################################################
from setuptools import setup, find_packages
version = '0.2.2'
with open('README.md') as f:
long_description = f.read()
setup(
name='hgf',
version=version,
description='A framework for building hierarchical GUIs',
long_description=long_description,
author='Ben Frankel',
author_email='ben.frankel7@gmail.com',
license='Apache 2.0',
url='https://www.github.com/BenFrankel/hgf',
download_url='https://www.github.com/BenFrankel/hgf/tarball/' + version,
keywords='hgf hierarchical gui framework',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: pygame',
],
packages=find_packages(),
install_requires=[
'pygame (>=1.9.1)',
'pyperclip (>=1.6.0)',
],
provides=['hgf']
)
| 43.196721 | 83 | 0.447059 | 218 | 2,635 | 5.366972 | 0.59633 | 0.051282 | 0.022222 | 0.02735 | 0.05641 | 0.05641 | 0.05641 | 0 | 0 | 0 | 0 | 0.014085 | 0.407211 | 2,635 | 60 | 84 | 43.916667 | 0.734955 | 0.4 | 0 | 0.0625 | 0 | 0 | 0.466562 | 0.017309 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.03125 | 0 | 0.03125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db9c18f5e2082b747c4a03ec17f797125c796c9 | 605 | py | Python | tools/ml/get_email.py | Xowap/Maiznet | bd564d4c93eb28dc87135e9d31dad9a921ea8cf6 | [
"WTFPL"
] | 1 | 2015-05-04T09:28:14.000Z | 2015-05-04T09:28:14.000Z | tools/ml/get_email.py | Xowap/Maiznet | bd564d4c93eb28dc87135e9d31dad9a921ea8cf6 | [
"WTFPL"
] | null | null | null | tools/ml/get_email.py | Xowap/Maiznet | bd564d4c93eb28dc87135e9d31dad9a921ea8cf6 | [
"WTFPL"
] | null | null | null | #!/usr/bin/python
from django.core.management import setup_environ
import sys
sys.path.append('/var/wsgi/maiznet')
sys.path.append('/var/wsgi')
from maiznet import settings
setup_environ(settings)
from maiznet.register.models import Presence
wfile_announces = open("/var/wsgi/maiznet/tools/ml/emails_announces","w")
wfile_talkings = open("/var/wsgi/maiznet/tools/ml/emails_talkings","w")
presence = Presence.objects.all()
for p in presence :
if p.talkings==1 :
wfile_talkings.write(p.user.email + "\n")
wfile_announces.write(p.user.email + "\n")
wfile_announces.close()
wfile_talkings.close()
| 23.269231 | 73 | 0.760331 | 90 | 605 | 5 | 0.444444 | 0.062222 | 0.093333 | 0.071111 | 0.36 | 0.271111 | 0.271111 | 0 | 0 | 0 | 0 | 0.001818 | 0.090909 | 605 | 25 | 74 | 24.2 | 0.816364 | 0.026446 | 0 | 0 | 0 | 0 | 0.19898 | 0.144558 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dbbf348283d5d908174cdee1b01595e478b5b7d | 11,798 | py | Python | src/network/graph_module.py | andrewliao11/env-aware-program-gen | bc50b788c35e8e8545b8af9127c279a7387146d6 | [
"MIT"
] | 5 | 2019-08-17T07:53:02.000Z | 2022-02-26T07:17:37.000Z | src/network/graph_module.py | andrewliao11/env-aware-program-gen | bc50b788c35e8e8545b8af9127c279a7387146d6 | [
"MIT"
] | 9 | 2019-06-28T07:36:10.000Z | 2022-03-11T23:48:39.000Z | src/network/graph_module.py | andrewliao11/env-aware-program-gen | bc50b788c35e8e8545b8af9127c279a7387146d6 | [
"MIT"
] | 1 | 2020-04-14T12:48:40.000Z | 2020-04-14T12:48:40.000Z | import torch
import torch.nn as nn
from program.graph_utils import *
from helper import fc_block, LayerNormGRUCell
# helper class for GraphEncoder
class AttrProxy(object):
"""
Translates index lookups into attribute lookups.
To implement some trick which able to use list of nn.Module in a nn.Module
see https://discuss.pytorch.org/t/list-of-nn-module-in-a-nn-module/219/2
"""
def __init__(self, module, prefix):
self.module = module
self.prefix = prefix
def __getitem__(self, i):
return getattr(self.module, self.prefix + str(i))
class VanillaGraphEncoder(nn.Module):
def __init__(
self,
n_timesteps,
n_edge_types,
graph_hidden,
embedding_dim,
hidden):
super(VanillaGraphEncoder, self).__init__()
layernorm = True
self.n_timesteps = n_timesteps
self.n_edge_types = n_edge_types
self.embedding_dim = embedding_dim
self.input_dim = n_edge_types + embedding_dim
self.graph_hidden = graph_hidden
node_init2hidden = nn.Sequential()
node_init2hidden.add_module(
'fc1',
fc_block(
3 * embedding_dim,
graph_hidden,
False,
nn.Tanh))
node_init2hidden.add_module(
'fc2',
fc_block(
graph_hidden,
graph_hidden,
False,
nn.Tanh))
for i in range(n_edge_types):
hidden2message_in = fc_block(
graph_hidden, graph_hidden, False, nn.Tanh)
self.add_module(
"hidden2message_in_{}".format(i),
hidden2message_in)
hidden2message_out = fc_block(
graph_hidden, graph_hidden, False, nn.Tanh)
self.add_module(
"hidden2message_out_{}".format(i),
hidden2message_out)
if layernorm:
self.gru_cell = LayerNormGRUCell
else:
self.gru_cell = nn.GRUCell
propagator = self.gru_cell(
input_size=2 * n_edge_types * graph_hidden,
hidden_size=graph_hidden)
self.node_init2hidden = node_init2hidden
self.hidden2message_in = AttrProxy(self, "hidden2message_in_")
self.hidden2message_out = AttrProxy(self, "hidden2message_out_")
self.propagator = propagator
def forward(
self,
edge_adjacency_matrix,
node_state_prev,
related_mask=None):
"""edge_adjacency_matrix: e, b, v, v
object_state_arry: b, v, p
state: b, v, h
"""
B, V, H = node_state_prev.size()
node_state_prev = node_state_prev.view(B * V, -1)
node_state = node_state_prev
edge_adjacency_matrix = edge_adjacency_matrix.float()
edge_adjacency_matrix_out = edge_adjacency_matrix
# convert the outgoing edges to incoming edges
edge_adjacency_matrix_in = edge_adjacency_matrix.permute(0, 1, 3, 2)
for i in range(self.n_timesteps):
message_out = []
for j in range(self.n_edge_types):
node_state_hidden = self.hidden2message_out[j](
node_state) # b*v, h
node_state_hidden = node_state_hidden.view(B, V, -1)
message_out.append(
torch.bmm(
edge_adjacency_matrix_out[j],
node_state_hidden)) # b, v, h
# concatenate the message from each edges
message_out = torch.stack(message_out, 2) # b, v, e, h
message_out = message_out.view(B * V, -1) # b, v, e*h
message_in = []
for j in range(self.n_edge_types):
node_state_hidden = self.hidden2message_in[j](
node_state) # b*v, h
node_state_hidden = node_state_hidden.view(B, V, -1)
message_in.append(
torch.bmm(
edge_adjacency_matrix_in[j],
node_state_hidden))
# concatenate the message from each edges
message_in = torch.stack(message_in, 2) # b, v, e, h
message_in = message_in.view(B * V, -1) # b, v, e*h
message = torch.cat([message_out, message_in], 1)
node_state = self.propagator(message, node_state)
if related_mask is not None:
# mask out un-related changes
related_mask_expand = related_mask.unsqueeze(
2).repeat(1, 1, self.graph_hidden).float()
related_mask_expand = related_mask_expand.view(B * V, -1)
node_state = node_state * related_mask_expand + \
node_state_prev * (-related_mask_expand + 1)
node_state = node_state.view(B, V, -1)
return node_state
class ResidualActionGraphEncoder(VanillaGraphEncoder):
def __init__(
self,
n_edge_types,
n_touch,
graph_hidden,
embedding_dim,
hidden):
super(
ResidualActionGraphEncoder,
self).__init__(
0,
n_edge_types,
graph_hidden,
embedding_dim,
hidden)
self.n_touch = n_touch
action2hidden = nn.Sequential()
action2hidden.add_module(
'fc1',
fc_block(
embedding_dim + n_touch,
graph_hidden,
False,
nn.Tanh))
action2hidden.add_module(
'fc2',
fc_block(
graph_hidden,
graph_hidden,
False,
nn.Tanh))
compute_residual = nn.Sequential()
compute_residual.add_module(
'fc1',
fc_block(
2 * graph_hidden,
graph_hidden,
False,
nn.Tanh))
compute_residual.add_module(
'fc2',
fc_block(
graph_hidden,
graph_hidden,
False,
nn.Tanh))
self.compute_residual = compute_residual
self.action2hidden = action2hidden
def action_applier(
self,
action_embedding,
batch_touch_idx,
batch_node_state_prev,
batch_touch_mask):
"""
action_embedding: b, emb
batch_touch_idx: b, n, touch_type,
batch_node_state_prev: b, n, h
batch_touch_mask: b, n
"""
B, N, _ = batch_touch_idx.size()
action_embedding = action_embedding.unsqueeze(1).repeat(1, N, 1)
graph_input = torch.cat([action_embedding, batch_touch_idx], 2)
graph_input = self.action2hidden(graph_input)
graph_input = graph_input.view(B * N, -1)
batch_node_state_prev = batch_node_state_prev.view(B * N, -1)
residual = self.compute_residual(
torch.cat([graph_input, batch_node_state_prev], 1))
batch_touch_mask = batch_touch_mask.unsqueeze(
2).repeat(1, 1, self.graph_hidden)
batch_touch_mask = batch_touch_mask.view(B * N, -1)
batch_node_state = batch_node_state_prev + residual * batch_touch_mask
batch_node_state = batch_node_state.view(B, N, -1)
return batch_node_state
class FCActionGraphEncoder(VanillaGraphEncoder):
def __init__(
self,
n_edge_types,
n_touch,
graph_hidden,
embedding_dim,
hidden):
super(FCActionGraphEncoder,
self).__init__(
0,
n_edge_types,
graph_hidden,
embedding_dim,
hidden)
self.n_touch = n_touch
action2hidden = nn.Sequential()
action2hidden.add_module('fc1', fc_block(embedding_dim + n_touch, graph_hidden, False, nn.Tanh))
action2hidden.add_module('fc2', fc_block(graph_hidden, graph_hidden, False, nn.Tanh))
compute_residual = nn.Sequential()
compute_residual.add_module('fc1', fc_block(2*graph_hidden, graph_hidden, False, nn.Tanh))
compute_residual.add_module('fc2', fc_block(graph_hidden, graph_hidden, False, nn.Tanh))
self.compute_residual = compute_residual
self.action2hidden = action2hidden
def action_applier(
self,
action_embedding,
batch_touch_idx,
batch_node_state_prev,
batch_touch_mask):
"""
action_embedding: b, emb
batch_touch_idx: b, n, touch_type,
batch_node_state_prev: b, n, h
batch_touch_mask: b, n
"""
B, N, _ = batch_touch_idx.size()
action_embedding = action_embedding.unsqueeze(1).repeat(1, N, 1)
graph_input = torch.cat([action_embedding, batch_touch_idx], 2)
graph_input = self.action2hidden(graph_input)
graph_input = graph_input.view(B * N, -1)
batch_node_state_prev = batch_node_state_prev.view(B * N, -1)
batch_node_state = self.compute_residual(torch.cat([graph_input, batch_node_state_prev], 1))
batch_touch_mask = batch_touch_mask.unsqueeze(2).repeat(1, 1, self.graph_hidden)
batch_touch_mask = batch_touch_mask.view(B * N, -1)
batch_node_state = batch_node_state * batch_touch_mask + batch_node_state_prev * (-batch_touch_mask + 1)
batch_node_state = batch_node_state.view(B, N, -1)
return batch_node_state
class GRUActionGraphEncoder(VanillaGraphEncoder):
def __init__(
self,
n_edge_types,
n_touch,
graph_hidden,
embedding_dim,
hidden):
super(
GRUActionGraphEncoder,
self).__init__(
0,
n_edge_types,
graph_hidden,
embedding_dim,
hidden)
self.n_touch = n_touch
action2hidden = nn.Sequential()
action2hidden.add_module('fc1', fc_block(embedding_dim + n_touch, graph_hidden, False, nn.Tanh))
action2hidden.add_module('fc2', fc_block(graph_hidden, graph_hidden, False, nn.Tanh))
temporal_propagator = self.gru_cell(input_size=graph_hidden, hidden_size=graph_hidden)
self.temporal_propagator = temporal_propagator
self.action2hidden = action2hidden
def action_applier(
self,
action_embedding,
batch_touch_idx,
batch_node_state_prev,
batch_touch_mask):
"""
action_embedding: b, emb
batch_touch_idx: b, n, touch_type,
batch_node_state_prev: b, n, h
batch_touch_mask: b, n
"""
B, N, _ = batch_touch_idx.size()
action_embedding = action_embedding.unsqueeze(1).repeat(1, N, 1)
graph_input = torch.cat([action_embedding, batch_touch_idx], 2)
graph_input = self.action2hidden(graph_input)
graph_input = graph_input.view(B * N, -1)
batch_node_state_prev = batch_node_state_prev.view(B * N, -1)
batch_node_state = self.temporal_propagator(graph_input, batch_node_state_prev)
batch_touch_mask = batch_touch_mask.unsqueeze(2).repeat(1, 1, self.graph_hidden)
batch_touch_mask = batch_touch_mask.view(B * N, -1)
batch_node_state = batch_node_state * batch_touch_mask + batch_node_state_prev * (-batch_touch_mask + 1)
batch_node_state = batch_node_state.view(B, N, -1)
return batch_node_state
| 32.323288 | 112 | 0.577386 | 1,356 | 11,798 | 4.651917 | 0.100295 | 0.082752 | 0.07546 | 0.051363 | 0.717185 | 0.683893 | 0.654883 | 0.627616 | 0.60558 | 0.594008 | 0 | 0.014564 | 0.342346 | 11,798 | 364 | 113 | 32.412088 | 0.798428 | 0.073233 | 0 | 0.647287 | 0 | 0 | 0.010682 | 0.001968 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03876 | false | 0 | 0.015504 | 0.003876 | 0.093023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc3361defa92720863211b21b935ff651f2bf8d | 2,185 | py | Python | tests/test_view.py | takos22/baguette | 36c6cafa793ff4be057ca2f8a5c7129baf8a5ab8 | [
"MIT"
] | 20 | 2021-04-13T06:23:33.000Z | 2021-12-12T13:52:50.000Z | tests/test_view.py | takos22/baguette | 36c6cafa793ff4be057ca2f8a5c7129baf8a5ab8 | [
"MIT"
] | 4 | 2021-04-17T23:17:36.000Z | 2021-05-23T14:20:08.000Z | tests/test_view.py | takos22/baguette | 36c6cafa793ff4be057ca2f8a5c7129baf8a5ab8 | [
"MIT"
] | 3 | 2021-04-23T00:01:45.000Z | 2021-04-29T22:48:33.000Z | import pytest
from baguette.app import Baguette
from baguette.httpexceptions import MethodNotAllowed
from baguette.responses import make_response
from baguette.view import View
@pytest.mark.asyncio
async def test_view_create():
class TestView(View):
async def get(self, request):
return "GET"
async def post(self, request):
return "POST"
async def put(self, request):
return "PUT"
async def delete(self, request):
return "DELETE"
async def nonexistent_method(self, request):
return "NONEXISTENT"
view = TestView(Baguette())
assert view.methods == ["GET", "POST", "PUT", "DELETE"]
assert await view.get(None) == "GET"
assert await view.post(None) == "POST"
assert await view.put(None) == "PUT"
assert await view.delete(None) == "DELETE"
assert await view.nonexistent_method(None) == "NONEXISTENT"
@pytest.fixture(name="view")
def create_view():
class TestView(View):
async def get(self, request):
return "GET"
async def post(self, request):
return "POST"
async def put(self, request):
return "PUT"
async def delete(self, request):
return "DELETE"
return TestView(Baguette())
@pytest.mark.asyncio
async def test_view_call(view, test_request):
result = await view(test_request)
response = make_response(result)
assert response.status_code == 200
assert response.body == "GET"
@pytest.mark.asyncio
@pytest.mark.parametrize(
["method", "method_allowed"],
[
["GET", True],
["POST", True],
["PUT", True],
["DELETE", True],
["PATCH", False],
["NONEXISTENT", False],
],
)
async def test_view_dispatch(view, test_request, method, method_allowed):
test_request.method = method
if method_allowed:
result = await view.dispatch(test_request)
response = make_response(result)
assert response.status_code == 200
assert response.body == method
else:
with pytest.raises(MethodNotAllowed):
await view.dispatch(test_request)
| 25.406977 | 73 | 0.626545 | 249 | 2,185 | 5.405622 | 0.200803 | 0.071322 | 0.11367 | 0.035661 | 0.425706 | 0.392273 | 0.392273 | 0.343239 | 0.343239 | 0.343239 | 0 | 0.003715 | 0.26087 | 2,185 | 85 | 74 | 25.705882 | 0.829721 | 0 | 0 | 0.384615 | 0 | 0 | 0.066362 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.015385 | false | 0 | 0.076923 | 0 | 0.276923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc61223149b3158489a5e9ccf76ac85256384c3 | 1,278 | py | Python | utils/filter_empty_lines.py | LeCongThuong/deep-text-recognition-benchmark | b9f4e5dab9a991435d9ba9e71a89dd6fce20f468 | [
"Apache-2.0"
] | null | null | null | utils/filter_empty_lines.py | LeCongThuong/deep-text-recognition-benchmark | b9f4e5dab9a991435d9ba9e71a89dd6fce20f468 | [
"Apache-2.0"
] | null | null | null | utils/filter_empty_lines.py | LeCongThuong/deep-text-recognition-benchmark | b9f4e5dab9a991435d9ba9e71a89dd6fce20f468 | [
"Apache-2.0"
] | null | null | null | import re
def read_from_file(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read().splitlines()
return content
def write_to_file(corpus, dest_path):
with open(dest_path, 'w', encoding='utf-8') as f:
for item in corpus:
f.write("%s\n" % item)
def filter_emtpy_lines(content, character_vocab):
out_of_vocab = f'[^{character_vocab}]'
count = 0
filtered_content = []
for line in content:
print(f'\r{line}', end='')
filtered_line = re.sub(out_of_vocab, '', line)
if len(filtered_line) == 0:
count = count + 1
else:
filtered_content.extend(line)
print("Done")
print("Num of invalid lines: ", count)
return filtered_content
def main():
file_path = '/home/love_you/ocr-gen/vi.txt'
character_vocab = 'hjbóẺoÝLvÚẼÁÂẩởĨỈtgKứẾmŨÒWsăỷịơIÔỀửãùaXP9ẰẳỉẹỶzầẪâỸỎảệyOựỬẵỘxCỐlỲD6ộỦỒĂƠÌồ1áTFnỆpHẽờếỏẢYẨUắƯẦíÃẤJèýẲ2i4ẬỊÊÓớR7ÙÕàGỨềỳecêSéừqQạòấỮ0ốẫ5õfỗđỡúNũỤợỖỠMằẸôỚặuỌỞụÀEkĐÉBưẮ3ỂễAìỜủỔỢổọwậdZĩẻ8ỄỰểrÈẴÍỪẶẠữỹV '
dest_path = '/home/love_you/ocr-gen/filtered_vi.txt'
content = read_from_file(file_path)
filtered_content = filter_emtpy_lines(content, character_vocab)
write_to_file(filtered_content, dest_path)
main()
| 29.72093 | 219 | 0.692488 | 154 | 1,278 | 5.5 | 0.383117 | 0.088548 | 0.028335 | 0.03778 | 0.219599 | 0.136954 | 0 | 0 | 0 | 0 | 0 | 0.014663 | 0.199531 | 1,278 | 42 | 220 | 30.428571 | 0.813294 | 0 | 0 | 0 | 0 | 0 | 0.259781 | 0.204225 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.032258 | 0 | 0.225806 | 0.096774 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc6bc8eb5fc6294a76cfdda9f4e036dfe3da0de | 3,680 | py | Python | cltk/data.py | fractaledmind/cltk | 78c7259c1845a4ae8bbd33935ffbae34da23234b | [
"MIT"
] | 1 | 2020-08-02T19:35:06.000Z | 2020-08-02T19:35:06.000Z | cltk/data.py | fractaledmind/cltk | 78c7259c1845a4ae8bbd33935ffbae34da23234b | [
"MIT"
] | null | null | null | cltk/data.py | fractaledmind/cltk | 78c7259c1845a4ae8bbd33935ffbae34da23234b | [
"MIT"
] | null | null | null | """Classes to access the `cltk_data/` directory tree"""
__author__ = 'Stephen Margheim <stephen.margheim@gmail.com>'
__license__ = 'MIT License. See LICENSE.'
import os
import site
from cltk.cltk import CLTK_DATA_DIR
from cltk.cltk.corpus.wrappers.logger import logger
class CorpusError(Exception):
pass
class CLTKData(object):
"""This class provides access to the full directory tree of `cltk_data/`.
The basic structure of the `cltk_data/` directory is:
```
cltk_data/
{language}/
text_corpora/
originals/
{corpus}/
structured/
{corpus}/
plain/
{corpus}/
readable/
{corpus}/
treebank/
{corpus}/
training_set/
{corpus}/
```
Users can set the path to `cltk_data/` via the ``data_path`` property.
When dealing with a particular corpus, users will also need to set the
``language_dir`` property properly in order to access the corpus.
"""
def __init__(self):
self._data_path = None
self._language_dir = None
## Base `cltk_data/` directory --------------------------------------------
@property
def data_path(self):
if self._data_path:
return self.resolve_path(self._data_path)
else:
return self.resolve_path(CLTK_DATA_DIR)
@data_path.setter
def data_path(self, value):
self._data_path = value
## 2nd level language directories -----------------------------------------
@property
def language_dir(self):
if self._language_dir:
return self.resolve_path(os.path.join(self.data_path,
self._language_dir))
else:
# TODO: Fix error message
raise CorpusError('Define `language_dir`!')
@language_dir.setter
def language_dir(self, value):
self._language_dir = value
## 3rd level corpus type directories --------------------------------------
@property
def corpora_dir(self):
return self.resolve_path(os.path.join(self.language_dir,
'text_corpora'))
@property
def treebank_dir(self):
return self.resolve_path(os.path.join(self.language_dir,
'treebank'))
@property
def training_dir(self):
return self.resolve_path(os.path.join(self.language_dir,
'training_set'))
## Misc. ------------------------------------------------------------------
# What does this do?
@property
def bin_path(self):
return os.path.join(site.getsitepackages()[0], 'cltk')
def resolve_path(self, path):
# Resolve absolute path
if os.path.isabs(path):
full_path = path
elif path.startswith('~'):
full_path = os.path.expanduser(path)
elif path.startswith('.'):
full_path = os.path.abspath(path)
# Ensure absolute path exists
if not os.path.exists(full_path):
# If directory
if os.path.splitext(full_path)[1] == '':
os.makedirs(full_path)
logger.info('Directory created at : {}'.format(full_path))
# If file
else:
open(full_path).close()
logger.info('File created at : {}'.format(full_path))
return full_path
# Alias
cltk_data = CLTKData()
| 30.92437 | 79 | 0.521467 | 381 | 3,680 | 4.834646 | 0.296588 | 0.071661 | 0.057003 | 0.068404 | 0.169381 | 0.144408 | 0.144408 | 0.144408 | 0.086319 | 0.086319 | 0 | 0.001645 | 0.339402 | 3,680 | 118 | 80 | 31.186441 | 0.756067 | 0.325 | 0 | 0.193548 | 0 | 0 | 0.073871 | 0.011819 | 0 | 0 | 0 | 0.008475 | 0 | 1 | 0.16129 | false | 0.016129 | 0.064516 | 0.064516 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc7dd2aecae51adb10cd582c54a3498d17a6890 | 3,482 | py | Python | qa/L0_backend_python/model_control/model_control_test.py | galv/server | 071eb2c6c9a8f1bba380c0e69592f50a857c5c42 | [
"BSD-3-Clause"
] | 2,159 | 2020-08-26T06:21:38.000Z | 2022-03-31T16:13:46.000Z | qa/L0_backend_python/model_control/model_control_test.py | galv/server | 071eb2c6c9a8f1bba380c0e69592f50a857c5c42 | [
"BSD-3-Clause"
] | 1,482 | 2020-08-26T08:26:36.000Z | 2022-03-31T23:11:19.000Z | qa/L0_backend_python/model_control/model_control_test.py | galv/server | 071eb2c6c9a8f1bba380c0e69592f50a857c5c42 | [
"BSD-3-Clause"
] | 592 | 2020-08-26T06:09:25.000Z | 2022-03-31T00:37:41.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../../common")
import test_util as tu
import tritonclient.http as httpclient
from tritonclient.utils import *
import numpy as np
import unittest
class ExplicitModelTest(tu.TestResultCollector):
def send_identity_request(self, client, model_name):
inputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "FP32"))
input0_data = np.arange(start=0, stop=16, dtype=np.float32)
input0_data = np.expand_dims(input0_data, axis=0)
inputs[0].set_data_from_numpy(input0_data)
result = client.infer(
model_name=model_name,
inputs=inputs,
outputs=[httpclient.InferRequestedOutput('OUTPUT0')])
output_numpy = result.as_numpy('OUTPUT0')
self.assertTrue(np.all(input0_data == output_numpy))
def test_model_reload(self):
model_name = "identity_fp32"
ensemble_model_name = 'simple_' + "identity_fp32"
with httpclient.InferenceServerClient("localhost:8000") as client:
for _ in range(5):
self.assertFalse(client.is_model_ready(model_name))
# Load the model before the ensemble model to make sure reloading the
# model works properly in Python backend.
client.load_model(model_name)
client.load_model(ensemble_model_name)
self.assertTrue(client.is_model_ready(model_name))
self.assertTrue(client.is_model_ready(ensemble_model_name))
self.send_identity_request(client, model_name)
self.send_identity_request(client, ensemble_model_name)
client.unload_model(ensemble_model_name)
client.unload_model(model_name)
self.assertFalse(client.is_model_ready(model_name))
self.assertFalse(client.is_model_ready(ensemble_model_name))
if __name__ == '__main__':
unittest.main()
| 46.426667 | 85 | 0.716255 | 449 | 3,482 | 5.400891 | 0.427617 | 0.059381 | 0.042062 | 0.037113 | 0.23134 | 0.211546 | 0.183505 | 0.141856 | 0.056082 | 0.056082 | 0 | 0.012044 | 0.213096 | 3,482 | 74 | 86 | 47.054054 | 0.872993 | 0.456634 | 0 | 0.052632 | 0 | 0 | 0.04882 | 0 | 0.026316 | 0 | 0 | 0 | 0.157895 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc7f5a5c998df601fc1435d5e14c66275786aee | 19,093 | py | Python | learnable_primitives/primitives.py | ianhuang0630/CSQ | 5f1fe99a8d9da73692643b3911d675dce269a03d | [
"MIT"
] | null | null | null | learnable_primitives/primitives.py | ianhuang0630/CSQ | 5f1fe99a8d9da73692643b3911d675dce269a03d | [
"MIT"
] | null | null | null | learnable_primitives/primitives.py | ianhuang0630/CSQ | 5f1fe99a8d9da73692643b3911d675dce269a03d | [
"MIT"
] | null | null | null |
import numpy as np
import torch
def fexp(x, p):
return torch.sign(x)*(torch.abs(x)**p)
def cuboid_inside_outside_function(X, shape_params, epsilon=0.25):
"""
Arguments:
----------
X: Tensor with size BxNxMx3, containing the 3D points, where B is the
batch size and N is the number of points
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
epsilon: int, the shape of the SQ along the latitude and longitude
Returns:
---------
F: Tensor with size BxNxM, containing the values of the
inside-outside function
"""
# Make sure that both tensors have the right shape
assert X.shape[0] == shape_params.shape[0] # batch size
assert X.shape[2] == shape_params.shape[1] # number of primitives
assert X.shape[-1] == 3 # 3D points
# Tensor that holds the values of the inside-outside function
F = shape_params.new_zeros(X.shape[:-1])
shape_params = shape_params.unsqueeze(1)
for i in range(3):
F += (X[:, :, :, i] / shape_params[:, :, :, i])**(2.0/epsilon)
return F**(epsilon)
def inside_outside_function(X, shape_params, epsilons):
"""
Arguments:
----------
X: Tensor with size BxNxMx3, containing the 3D points, where B is the
batch size and N is the number of points
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
epsilons: Tensor with size BxMx2, containing the shape along the
longitude and the latitude for the M primitives
Returns:
---------
F: Tensor with size BxNxM, containing the values of the
inside-outside function
"""
B = X.shape[0] # batch_size
N = X.shape[1] # number of points on target object
M = X.shape[2] # number of primitives
# Make sure that both tensors have the right shape
assert shape_params.shape[0] == B # batch size
assert epsilons.shape[0] == B # batch size
assert shape_params.shape[1] == M # number of primitives
assert shape_params.shape[1] == epsilons.shape[1]
assert shape_params.shape[-1] == 3 # number of shape parameters
assert epsilons.shape[-1] == 2 # number of shape parameters
assert X.shape[-1] == 3 # 3D points
# Declare some variables
a1 = shape_params[:, :, 0].unsqueeze(1) # size Bx1xM
a2 = shape_params[:, :, 1].unsqueeze(1) # size Bx1xM
a3 = shape_params[:, :, 2].unsqueeze(1) # size Bx1xM
e1 = epsilons[:, :, 0].unsqueeze(1) # size Bx1xM
e2 = epsilons[:, :, 1].unsqueeze(1) # size Bx1xM
# Add a small constant to points that are completely dead center to avoid
# numerical issues in computing the gradient
# zeros = X == 0
# X[zeros] = X[zeros] + 1e-6
X = ((X > 0).float() * 2 - 1) * torch.max(torch.abs(X), X.new_tensor(1e-6))
F = ((X[:, :, :, 0] / a1)**2)**(1./e2)
# F += ((X[:, :, :, 1] / a2)**2)**(1./e2)
F = F+((X[:, :, :, 1] / a2)**2)**(1./e2)
F = F**(e2 / e1)
# F += ((X[:, :, :, 2] / a3)**2)**(1./e1)
F = F+((X[:, :, :, 2] / a3)**2)**(1./e1)
# Sanity check to make sure that we have the expected size
assert F.shape == (B, N, M)
return F**e1
# return F
def points_to_cuboid_distances(X, shape_params):
"""
Arguments:
----------
X: Tensor with size BxNxMx3, containing the 3D points, where B is the
batch size and N is the number of points
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
Returns:
---------
F: Tensor with size BxNxM, containing the distances of each point to
every primitive
"""
# Make sure that everything has the right size
assert X.shape[0] == shape_params.shape[0] # batch size
assert X.shape[2] == shape_params.shape[1] # number of primitives
assert X.shape[-1] == 3 # 3D points
# The distance between a point (x, y, z) to a cuboid with dimensions
# (a1, a2, a3) is sqrt(max(0, abs(x) - a1)^2 + max(0, abs(y) - a2)^2 +
# max(0, abs(z) - a3)^2). Technically, F=0 for all points either inside or
# on the surface of the primitive, while we only want F=0 for the points on
# the surface of the cuboid.
F = (torch.max(
X.abs() - shape_params.unsqueeze(1),
torch.zeros_like(X)
)**2).sum(-1)
return F
def euler_angles_to_rotation_matrices(angles):
"""
Arguments:
---------
angles: Tensor with size Kx3, where K is the number of Euler angles we
want to transform to rotation matrices
Returns:
-------
rotation_matrices: Tensor with size Kx3x3, that contains the computed
rotation matrices
"""
K = angles.shape[0]
# Allocate memory for a Tensor of size Kx3x3 that will hold the rotation
# matrix along the x-axis
r_x = angles.new_zeros((K, 3, 3))
r_x[:, 0, 0] = 1.0
c = torch.cos(angles[:, 0])
s = torch.sin(angles[:, 0])
r_x[torch.arange(K), 1, 1] = c
r_x[torch.arange(K), 2, 2] = c
r_x[torch.arange(K), 1, 2] = -s
r_x[torch.arange(K), 2, 1] = s
# Similar for the rotation matrices along the y-axis and z-axis
r_y = angles.new_zeros((K, 3, 3))
r_y[:, 1, 1] = 1.0
c = torch.cos(angles[:, 1])
s = torch.sin(angles[:, 1])
r_y[torch.arange(K), 0, 0] = c
r_y[torch.arange(K), 2, 2] = c
r_y[torch.arange(K), 2, 0] = -s
r_y[torch.arange(K), 0, 2] = s
r_z = angles.new_zeros((K, 3, 3))
r_z[:, 2, 2] = 1.0
c = torch.cos(angles[:, 2])
s = torch.sin(angles[:, 2])
r_z[torch.arange(K), 0, 0] = c
r_z[torch.arange(K), 1, 1] = c
r_z[torch.arange(K), 0, 1] = -s
r_z[torch.arange(K), 1, 0] = s
return r_z.bmm(r_y.bmm(r_x))
def quaternions_to_rotation_matrices(quaternions):
"""
Arguments:
---------
quaternions: Tensor with size Kx4, where K is the number of quaternions
we want to transform to rotation matrices
Returns:
-------
rotation_matrices: Tensor with size Kx3x3, that contains the computed
rotation matrices
"""
K = quaternions.shape[0]
# Allocate memory for a Tensor of size Kx3x3 that will hold the rotation
# matrix along the x-axis
R = quaternions.new_zeros((K, 3, 3))
# A unit quaternion is q = w + xi + yj + zk
xx = quaternions[:, 1]**2
yy = quaternions[:, 2]**2
zz = quaternions[:, 3]**2
ww = quaternions[:, 0]**2
n = (ww + xx + yy + zz).unsqueeze(-1)
s = quaternions.new_zeros((K, 1))
s[n != 0] = 2 / n[n != 0]
xy = s[:, 0] * quaternions[:, 1] * quaternions[:, 2]
xz = s[:, 0] * quaternions[:, 1] * quaternions[:, 3]
yz = s[:, 0] * quaternions[:, 2] * quaternions[:, 3]
xw = s[:, 0] * quaternions[:, 1] * quaternions[:, 0]
yw = s[:, 0] * quaternions[:, 2] * quaternions[:, 0]
zw = s[:, 0] * quaternions[:, 3] * quaternions[:, 0]
xx = s[:, 0] * xx
yy = s[:, 0] * yy
zz = s[:, 0] * zz
idxs = torch.arange(K).to(quaternions.device)
R[idxs, 0, 0] = 1 - yy - zz
R[idxs, 0, 1] = xy - zw
R[idxs, 0, 2] = xz + yw
R[idxs, 1, 0] = xy + zw
R[idxs, 1, 1] = 1 - xx - zz
R[idxs, 1, 2] = yz - xw
R[idxs, 2, 0] = xz - yw
R[idxs, 2, 1] = yz + xw
R[idxs, 2, 2] = 1 - xx - yy
return R
def transform_to_primitives_centric_system(X, translations, rotation_angles):
"""
Arguments:
----------
X: Tensor with size BxNx3, containing the 3D points, where B is the
batch size and N is the number of points
translations: Tensor with size BxMx3, containing the translation
vectors for the M primitives
rotation_angles: Tensor with size BxMx4 containing the 4 quaternion
values for the M primitives
Returns:
--------
X_transformed: Tensor with size BxNxMx3 containing the N points
transformed in the M primitive centric coordinate
systems.
"""
# Make sure that all tensors have the right shape
assert X.shape[0] == translations.shape[0]
assert translations.shape[0] == rotation_angles.shape[0]
assert translations.shape[1] == rotation_angles.shape[1]
assert X.shape[-1] == 3
assert translations.shape[-1] == 3
assert rotation_angles.shape[-1] == 4
# Subtract the translation and get X_transformed with size BxNxMx3
X_transformed = X.unsqueeze(2) - translations.unsqueeze(1)
# R = euler_angles_to_rotation_matrices(rotation_angles.view(-1, 3)).view(
R = quaternions_to_rotation_matrices(rotation_angles.view(-1, 4)).view(
rotation_angles.shape[0], rotation_angles.shape[1], 3, 3
)
# Let as denote a point x_p in the primitive-centric coordinate system and
# its corresponding point in the world coordinate system x_w. We denote the
# transformation from the point in the world coordinate system to a point
# in the primitive-centric coordinate system as x_p = R(x_w - t)
X_transformed = R.unsqueeze(1).matmul(X_transformed.unsqueeze(-1))
X_signs = (X_transformed > 0).float() * 2 - 1
X_abs = X_transformed.abs()
X_transformed = X_signs * torch.max(X_abs, X_abs.new_tensor(1e-5))
return X_transformed.squeeze(-1)
def transform_to_world_coordinates_system(X_SQ, translations, rotation_angles):
"""
Arguments:
----------
X_SQ: Tensor with size BxMxSx3, containing the 3D points, where B is
the batch size, M is the number of primitives and S is the number
of points on each primitive-centric system
translations: Tensor with size BxMx3, containing the translation
vectors for the M primitives
rotation_angles: Tensor with size BxMx3 containing the 3 Euler angles
for the M primitives
Returns:
--------
X_SQ_w: Tensor with size BxMxSx3 containing the N points
transformed in the M primitive centric coordinate
systems.
"""
# Make sure that all tensors have the right shape
assert X_SQ.shape[0] == translations.shape[0]
assert translations.shape[0] == rotation_angles.shape[0]
assert translations.shape[1] == rotation_angles.shape[1]
assert X_SQ.shape[1] == translations.shape[1]
assert X_SQ.shape[-1] == 3
assert translations.shape[-1] == 3
assert rotation_angles.shape[-1] == 4
# Compute the rotation matrices to every primitive centric coordinate
# system (R has size BxMx3x3)
R = quaternions_to_rotation_matrices(rotation_angles.view(-1, 4)).view(
rotation_angles.shape[0], rotation_angles.shape[1], 3, 3
)
# We need the R.T to get the rotation matrix from the primitive-centric
# coordinate system to the world coordinate system.
R_t = torch.einsum("...ij->...ji", (R,))
assert R.shape == R_t.shape
X_SQ_w = R.unsqueeze(2).matmul(X_SQ.unsqueeze(-1))
X_SQ_w = X_SQ_w.squeeze(-1) + translations.unsqueeze(2)
return X_SQ_w
def deform(X, shape_params, tapering_params, bending_params=None):
"""
Arguments:
----------
X: Tensor with size BxMxSx3 containing the S points
sampled on the surfaces of each SQ
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
tapering_params: Tensor with size BxMx2, containing the tapering_params
for every primitive
bending_params: Tensor with size BxMx2, containing the bending_params
for every primitive
Returns:
--------
X_deformed: Tensor with size BxMxSx3 containing the N points
transformed in the M primitive centric coordinate
systems after the deformations.
"""
B, M, S, _ = X.shape
# Make sure that all tensors have the right shape
assert X.shape[0] == shape_params.shape[0] # batch size
assert X.shape[0] == tapering_params.shape[0] # batch size
assert shape_params.shape[-1] == 3
assert tapering_params.shape[-1] == 2
assert X.shape[1] == shape_params.shape[1]
assert X.shape[1] == tapering_params.shape[1]
# Compute the two linear tapering functions
K = tapering_params / shape_params[:, :, -1].unsqueeze(-1)
assert tapering_params.shape == K.shape
f = K.unsqueeze(2) * X[:, :, :, -1].unsqueeze(-1) + 1.0
assert f.shape == (B, M, S, 2)
f = torch.cat([
f,
f.new_ones(B, M, S, 1)
], -1)
assert f.shape == X.shape
X_d = X * f
X_d = apply_bending(X_d, bending_params)
return X_d
def apply_bending(X, bending_params):
"""
Arguments:
----------
X: Tensor with size BxMxSx3 containing the S points
sampled on the surfaces of each SQ
bending_params: Tensor with size BxMx2, containing the bending_params
for every primitive
Returns:
--------
X_d: Tensor with size BxMxSx3 containing the N points
transformed in the M primitive centric coordinate
systems after the deformations.
"""
# If there no bending params specified return the input as is
if bending_params is None:
return X
B, M, S, _ = X.shape
# Make sure that all tensors have the right shape
assert X.shape[0] == bending_params.shape[0] # batch size
assert bending_params.shape[-1] == 2
# Apply the bending operation
bending_params = bending_params.unsqueeze(2) # BXMX2 -> BxMx1x2
k = bending_params[:, :, :, 0].unsqueeze(-1) # BxMx1x1
a = bending_params[:, :, :, 1].unsqueeze(-1) # BxMx1x1
b = torch.atan2(X[:, :, :, 1].unsqueeze(-1), X[:, :, :, 0].unsqueeze(-1))
assert b.shape == (B, M, S, 1)
r = torch.sqrt(
X_d[:, :, :, 0].unsqueeze(-1)**2 + X_d[:, :, :, 1].unsqueeze(-1)**2
) * torch.cos(a - b)
assert r.shape == (B, M, S, 1)
k_inv = 1 / k # BxMx1x1
gamma = X_d[:, :, :, -1].unsqueeze(-1) / k
R = k_inv - (k_inv - r) * torch.cos(gamma)
assert R.shape == (B, M, S, 1)
X_d = X.new_zeros(X.shape)
X_d[:, :, :, 0] = X_d[:, :, :, 0] + (R - r)*torch.cos(a)
X_d[:, :, :, 1] = X_d[:, :, :, 1] + (R - r)*torch.sin(a)
X_d[:, :, :, 2] = (k_inv - r)*(R - r)*torch.sin(gamma)
return X_d
def distance(F, shape_params=None, use_chamfer=False):
"""
Arguments:
----------
F: Tensor of size BxNxM, with the values of the inside-outside function
for the N points w.r.t. the M primitives
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
Returns:
--------
C: Tensor of size BxNxM, with the distance between points and
primitives
primitive_idxs: Tensor of size BxNxM, with the indices of the
primitives in the original tensor F
"""
# Minimization of the distances between points and primitives
if use_chamfer:
C = (F-1.0)**2.0
else:
a1a2a3 = torch.sqrt(shape_params.prod(-1)).unsqueeze(1)
# C = torch.max(a1a2a3*(F - 1.0), torch.zeros_like(F))
# C = torch.max(torch.sqrt(F) - 1.0, torch.zeros_like(F))
C = torch.max((F - 1.0), torch.zeros_like(F))
return torch.sort(C, dim=-1)
def ray_plane_intersections(P, V, normals, exp1, exp2):
"""
Find the interesection between a set of rays and a set of planes. Rays are
defined as two points and normals as points and planes.
We we want to compute
rs = n (Vo - Po)
-----------
n (P1 - Po)
n and Vo define the plane and Po and P1 the ray
Arguments:
----------
P: Tensor of size BxMx?x3 containing the start of each ray (P1 - Po)
V: Tensor of size BxMxSxNx3 with the differences between the ray_starts
and the points of the planes (Vo - Po)
normals: Tensor of size BxMx?x3 N normals transformed in the M
primitive-centric coordinate systems
Returns:
--------
r: Tensor of size BxMxSxN with the squared_distances
"""
B, M, S, N, _ = V.shape
t1 = torch.einsum(exp1, [normals, V])
t2 = torch.einsum(exp2, [normals, P])
rs = torch.div(t1, t2)
assert rs.shape == (B, M, S, N)
return torch.pow(rs, 2)
def beta_stirling(x, y):
sqrt2pi = float(np.sqrt(2*np.pi))
return sqrt2pi * (x**(x-0.5) * y**(y-0.5)) / (x+y)**(x+y-0.5)
def sq_volumes(parameters):
a1a2a3 = parameters[3].view(-1, 3).prod(-1)
e = parameters[4].view(-1, 2)
e1 = e[:, 0]
e2 = e[:, 1]
e1e2 = e.prod(-1)
b1 = beta_stirling(e1/2 + 1, e1)
b2 = beta_stirling(e2/2, e2/2)
volumes = 2 * a1a2a3 * e1e2 * b1 * b2
return volumes
def sq_areas(shapes, epsilons):
"""Approximate area of the superquadric.
We use Knud Thomsen's formula for ellipsoids.
"""
p = 1.6075
a = shapes[:, :, 0]
b = shapes[:, :, 1]
c = shapes[:, :, 2]
return 4 * np.pi * (((a*b)**p + (a*c)**p + (b*c)**p)/3)**(1/p)
def sample_points_inside_primitives(X_SQ, N, rotations, translations):
"""Sample points inside the primitives, given S points on their surface
Arguments:
----------
X_SQ: Tensor of size BxMxSx3 containing S points sampled on the surface
of each primitive
rotations: Tensor of size BxMx4 containing the quaternions of the SQs
translations: Tensor of size BxMx4 containing the translation vectors
of the SQs
N: number of points to be generated internally in each primitive
Returns:
--------
X_world: Tensor of size BxMxNx3 containing N points sampled uniformly
inside and on the surface of the SQs
"""
B, M, S, _ = X_SQ.shape
assert rotations.shape == (B, M, 4)
assert translations.shape == (B, M, 3)
# Create points inside the primitives
device = X_SQ.device
batch = (torch.arange(B*M*N) / (M*N)).view(B, M, N).to(device)
prim = ((torch.arange(B*M*N) / N) % M).view(B, M, N).to(device)
pointsA = torch.randint(0, S, (B, M, N), dtype=torch.long).to(device)
pointsB = torch.randint(0, S, (B, M, N), dtype=torch.long).to(device)
t = torch.rand(B, M, N, 1).to(device)
X_a = X_SQ[batch, prim, pointsA]
X_b = X_SQ[batch, prim, pointsB]
X = X_a + t * (X_b-X_a)
# Transform the points to world coordinates
# R = quaternions_to_rotation_matrices(rotations.view(-1, 4))
# R = R.view(B, M, 3, 3)
# X_world = X.view(B, M, N, 1, 3).matmul(R.view(B, M, 1, 3, 3))
# X_world = X_world.view(B, M, N, 3)
# X_world = X_world + translations.view(B, M, 1, 3)
X_world = transform_to_world_coordinates_system(
X,
translations,
rotations
)
assert X_world.shape == (B, M, N, 3)
return X_world
| 35.423006 | 79 | 0.59514 | 2,851 | 19,093 | 3.899684 | 0.109786 | 0.017809 | 0.039036 | 0.015291 | 0.520777 | 0.450711 | 0.369221 | 0.325418 | 0.320921 | 0.319032 | 0 | 0.035194 | 0.273765 | 19,093 | 538 | 80 | 35.488848 | 0.766623 | 0.467292 | 0 | 0.116883 | 0 | 0 | 0.00128 | 0 | 0 | 0 | 0 | 0 | 0.199134 | 1 | 0.069264 | false | 0 | 0.008658 | 0.004329 | 0.151515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc85eed480d432c4899171384c4da5e6df7a236 | 6,206 | py | Python | spring semester 2 course/computer_graphics_labs/3D OpenGL/lab.py | andrwnv/study-progs | 902c4ede0b273d91fd87c93e861b40439847c1a9 | [
"MIT"
] | 4 | 2020-01-02T08:38:55.000Z | 2020-11-12T19:46:22.000Z | spring semester 2 course/computer_graphics_labs/3D OpenGL/lab.py | andrwnv/StudyProgs | 902c4ede0b273d91fd87c93e861b40439847c1a9 | [
"MIT"
] | null | null | null | spring semester 2 course/computer_graphics_labs/3D OpenGL/lab.py | andrwnv/StudyProgs | 902c4ede0b273d91fd87c93e861b40439847c1a9 | [
"MIT"
] | null | null | null | from PyQt5.QtOpenGL import *
from OpenGL.GL import *
from PyQt5 import QtWidgets, QtCore
class FigureWidget(QGLWidget):
""" Main OpenGL widget. """
def __init__(self, parent):
super(FigureWidget, self).__init__()
self.setMinimumSize(1280, 720)
self.__rotate_angle_y = 70
self.__rotate_angle_x = 15
self.__rotate_angle_z = 0
self.__zoom_coefficient = -5
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.__timer = QtCore.QTimer()
self.__timer.setInterval(30)
self.__timer.timeout.connect(lambda: self.idle())
self.__timer.start()
def idle(self):
self.__rotate_angle_y += 0.5
self.update()
def paintGL(self) -> None:
""" Draw scene. """
glClear(GL_COLOR_BUFFER_BIT)
glClearColor(0, 0, 0, 1.0)
glColor3f(1.0, 1.0, 1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glFrustum(-3, 3, -2, 2, 1.2, 40)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(0, 0, self.__zoom_coefficient)
glRotatef(self.__rotate_angle_x, 1, 0, 0)
glRotatef(self.__rotate_angle_y, 0, 1, 0)
glRotatef(self.__rotate_angle_z, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor3f(1.0, 1.0, 1.0)
glBegin(GL_LINES)
i: float = -2.5
# Draw coordinate grid.
while i <= 2.5:
glVertex3f(i, -4, 2.5)
glVertex3f(i, -4, -2.5)
glVertex3f(2.5, -4, i)
glVertex3f(-2.5, -4, i)
i += 0.25
glEnd()
# Draw up pyramid.
glBegin(GL_TRIANGLE_STRIP)
# 1st face.
glColor3f(1, 0, 1)
glVertex3f(0, 3, 0)
glColor3f(0.5, 0, 0.5)
glVertex3f(-1, 1, -1)
glColor3f(0.5, 1, 1)
glVertex3f(-1, 1, 1)
# 2nd face.
glColor3f(0, 0, 1)
glVertex3f(0, 3, 0)
glColor3f(0.5, 0.5, 1)
glVertex3f(-1, 1, 1)
glColor3f(0.5, 0.3, 0.2)
glVertex3f(1, 1, 1)
# 3th face.
glColor3f(0, 1, 1)
glVertex3f(0, 3, 0)
glColor3f(0.5, 0, 0.5)
glVertex3f(1, 1, 1)
glColor3f(0, 1, 1)
glVertex3f(1, 1, -1)
# 4sth face.
glColor3f(0, 1, 0)
glVertex3f(0, 3, 0)
glColor3f(0.5, 0.7, 0.3)
glVertex3f(1, 1, -1)
glColor3f(0.1, 0.4, 0.3)
glVertex3f(-1, 1, -1)
glEnd()
# Draw cube.
glBegin(GL_QUAD_STRIP)
glColor3f(1, 1, 0)
glVertex3f(-1, 1, -1)
glColor3f(0.5, 1, 0)
glVertex3f(-1, 1, 1)
glColor3f(1, 0, 1)
glVertex3f(-1, -1, -1)
glVertex3f(-1, -1, 1)
glColor3f(1, 0, 0)
glVertex3f(-1, 1, 1)
glColor3f(1, 1, 0)
glVertex3f(1, 1, 1)
glVertex3f(-1, -1, 1)
glColor3f(0.5, 1, 0)
glVertex3f(1, -1, 1)
glColor3f(0, 0.5, 1)
glVertex3f(1, 1, 1)
glVertex3f(1, 1, -1)
glColor3f(1, 0, 1)
glVertex3f(1, -1, 1)
glColor3f(0, 1, 0)
glVertex3f(1, -1, -1)
glColor3f(1, 0, 1)
glVertex3f(1, 1, -1)
glColor3f(0.5, 1, 0)
glVertex3f(-1, 1, -1)
glColor3f(1, 1, 0)
glVertex3f(1, -1, -1)
glVertex3f(-1, -1, -1)
glEnd()
# Draw down pyramid.
glBegin(GL_TRIANGLE_STRIP)
# 1st face.
glColor3f(1, 0, 1)
glVertex3f(0, -3, 0)
glColor3f(0.2, 0.7, 1)
glVertex3f(-1, -1, -1)
glColor3f(0.1, 0.7, 0.8)
glVertex3f(-1, -1, 1)
# 2nd face.
glColor3f(0, 0, 1)
glVertex3f(0, -3, 0)
glColor3f(0.1, 0, 0.8)
glVertex3f(-1, -1, 1)
glColor3f(0.8, 0, 0.8)
glVertex3f(1, -1, 1)
# 3th face.
glColor3f(0, 1, 1)
glVertex3f(0, -3, 0)
glColor3f(0.1, 0.7, 0.8)
glVertex3f(1, -1, 1)
glColor3f(0.2, 0.7, 1)
glVertex3f(1, -1, -1)
# 4sth face.
glColor3f(0, 1, 0)
glVertex3f(0, -3, 0)
glColor3f(0, 0, 1)
glVertex3f(1, -1, -1)
glColor3f(0.8, 0, 0.8)
glVertex3f(-1, -1, -1)
glEnd()
glFlush()
def resizeGL(self, w, h) -> None:
""" Resize event. """
glViewport(50, 50, w - 100, h - 100)
def initializeGL(self) -> None:
""" Init OpenGL. """
# glEnable(GL_CULL_FACE)
# glCullFace(GL_FRONT)
# Enable depth.
glEnable(GL_DEPTH_TEST)
glClearColor(0.1, 0.39, 0.88, 1.0)
glColor3f(1.0, 1.0, 1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glFrustum(-2, 2, -1.5, 1.5, 1, 40)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(0, 0, -3)
glRotatef(70, 0, 1, 0)
glDisable(GL_BLEND)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_A:
self.__rotate_angle_y -= 0.5
self.update()
elif event.key() == QtCore.Qt.Key_D:
self.__rotate_angle_y += 0.5
self.update()
elif event.key() == QtCore.Qt.Key_W:
self.__rotate_angle_x += 0.5
self.update()
elif event.key() == QtCore.Qt.Key_S:
self.__rotate_angle_x -= 0.5
self.update()
elif event.key() == QtCore.Qt.Key_Q:
self.__rotate_angle_z += 0.5
self.update()
elif event.key() == QtCore.Qt.Key_E:
self.__rotate_angle_z -= 0.5
self.update()
elif event.key() == QtCore.Qt.Key_Plus:
self.__zoom_coefficient += 0.5
self.update()
elif event.key() == QtCore.Qt.Key_Minus:
self.__zoom_coefficient -= 0.5
self.update()
class App(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__glWidget = FigureWidget(self)
self.setCentralWidget(self.__glWidget)
if __name__ == '__main__':
app = QtWidgets.QApplication(['3D OpenGL'])
window = App()
window.show()
app.exec_()
| 24.626984 | 58 | 0.508701 | 824 | 6,206 | 3.667476 | 0.163835 | 0.046989 | 0.127068 | 0.137657 | 0.656188 | 0.603574 | 0.591661 | 0.568829 | 0.487756 | 0.487095 | 0 | 0.12086 | 0.34805 | 6,206 | 251 | 59 | 24.7251 | 0.62605 | 0.045923 | 0 | 0.548571 | 0 | 0 | 0.00289 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.017143 | 0 | 0.068571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dca2da6c8645bf9fdd19dabbdf05a6549cdbc55 | 4,220 | py | Python | mate/environments/environment.py | thomyphan/emergent-cooperation | 2406b8679ddbebba745f1026ca3689f1ba181e28 | [
"MIT"
] | null | null | null | mate/environments/environment.py | thomyphan/emergent-cooperation | 2406b8679ddbebba745f1026ca3689f1ba181e28 | [
"MIT"
] | null | null | null | mate/environments/environment.py | thomyphan/emergent-cooperation | 2406b8679ddbebba745f1026ca3689f1ba181e28 | [
"MIT"
] | null | null | null | import numpy
class Environment:
def __init__(self, params) -> None:
self.domain_value_labels = params["domain_value_labels"]
self.observation_dim = params["observation_dim"]
self.nr_agents = params["nr_agents"]
self.nr_actions = params["nr_actions"]
self.time_limit = params["time_limit"]
self.gamma = params["gamma"]
self.time_step = 0
self.sent_gifts = numpy.zeros(self.nr_agents)
self.discounted_returns = numpy.zeros(self.nr_agents)
self.undiscounted_returns = numpy.zeros(self.nr_agents)
self.domain_counts = numpy.zeros(len(self.domain_value_labels))
self.last_joint_action = -numpy.ones(self.nr_agents, dtype=numpy.int)
"""
Performs the joint action in order to change the environment.
Returns the reward for each agent in a list sorted by agent ID.
"""
def perform_step(self, joint_action):
assert not self.is_done(), "Episode terminated at time step {}. Please, reset before calling 'step'."\
.format(self.time_step)
return numpy.zeros(self.nr_agents), {}
"""
Indicates if an episode is done and the environments needs
to be reset.
"""
def is_done(self):
return self.time_step >= self.time_limit
def action_as_vector(self, action):
if action < self.nr_actions:
vector = numpy.zeros(self.nr_actions)
if action >= 0:
vector[action] = 1
else:
vector = numpy.ones(self.nr_actions)
return vector
"""
Performs a joint action to change the state of the environment.
Returns the joint observation, the joint reward, a done flag,
and other optional information (e.g., logged data).
Note: The joint action must be a list ordered according to the agent ID!.
"""
def step(self, joint_action):
assert len(joint_action) == self.nr_agents, "Length of 'joint_action' is {}, expected {}"\
.format(len(joint_action), self.nr_agents)
assert not self.is_done(), "Episode terminated at time step {}. Please, reset before calling 'step'."\
.format(self.time_step)
rewards, infos = self.perform_step(joint_action)
for i, a in enumerate(joint_action):
self.last_joint_action[i] = a
if a >= self.nr_actions:
self.sent_gifts[i] += 1
assert len(rewards) == self.nr_agents, "Length of 'rewards' is {}, expected {}"\
.format(len(rewards), self.nr_agents)
observations = self.joint_observation()
assert len(observations) == self.nr_agents, "Length of 'observations' is {}, expected {}"\
.format(len(observations), self.nr_agents)
self.time_step += 1
self.domain_counts[0] += 1.0
self.undiscounted_returns += rewards
self.discounted_returns += (self.gamma**self.time_step)*rewards
if "neighbor_agents" not in infos:
infos["neighbor_agents"] = [[j for j in range(self.nr_agents) if j != i] for i in range(self.nr_agents)]
return observations, rewards, self.is_done(), infos
def get_index(self, label):
return self.domain_value_labels.index(label)
"""
The local observation for a specific agent. Only visible for
the corresponding agent and private to others.
"""
def local_observation(self, agent_id):
pass
"""
Returns the observations of all agents in a listed sorted by agent ids.
"""
def joint_observation(self):
return [numpy.array(self.local_observation(i)).reshape(self.observation_dim) for i in range(self.nr_agents)]
"""
Returns a high-level value which is domain-specific.
"""
def domain_values(self):
return self.domain_counts
def domain_value_debugging_indices(self):
return 0,1
"""
Re-Setup of the environment for a new episode.
"""
def reset(self):
self.time_step = 0
self.discounted_returns[:] = 0
self.undiscounted_returns[:] = 0
self.last_joint_action[:] = -1
self.domain_counts[:] = 0
self.sent_gifts[:] = 0
return self.joint_observation()
| 38.363636 | 116 | 0.638389 | 555 | 4,220 | 4.684685 | 0.225225 | 0.046154 | 0.069231 | 0.030769 | 0.250385 | 0.143846 | 0.113846 | 0.070769 | 0.070769 | 0.070769 | 0 | 0.005099 | 0.256398 | 4,220 | 109 | 117 | 38.715596 | 0.823454 | 0 | 0 | 0.086957 | 0 | 0 | 0.109352 | 0 | 0 | 0 | 0 | 0 | 0.072464 | 1 | 0.15942 | false | 0.014493 | 0.014493 | 0.072464 | 0.318841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dcb21a58f1185a80b68bc21fae747a3384bef5d | 2,409 | py | Python | craynn/viz/imgs.py | maxim-borisyak/craynn | fceabd33f5969033fb3605f894778c42c42f3e08 | [
"MIT"
] | null | null | null | craynn/viz/imgs.py | maxim-borisyak/craynn | fceabd33f5969033fb3605f894778c42c42f3e08 | [
"MIT"
] | null | null | null | craynn/viz/imgs.py | maxim-borisyak/craynn | fceabd33f5969033fb3605f894778c42c42f3e08 | [
"MIT"
] | null | null | null | import os
import os.path as osp
__all__ = [
'pack_images',
'plot_and_pack',
'save_images'
]
def pack_images(output, imgs, vmax=1024.0, archive=None, name="image_%d.png", **data):
from scipy.misc import toimage
try:
os.makedirs(output)
except:
pass
for i in range(imgs.shape[0]):
args = dict([ (k, v[i]) for k, v in data.items()])
args['index'] = i
path = osp.join(output, name.format(**args))
toimage(imgs[i], cmin=0.0, cmax=vmax, channel_axis=0).save(path)
if archive is not None:
import subprocess as sb
if sb.check_call(['tar', '-czvf', archive, output]):
os.removedirs(output)
def save_images(cycle, version, original, transformed, outdir='output', pack=True):
import matplotlib.pyplot as plt
import os
import os.path as osp
path = osp.join(outdir, 'images_%012d_%s' % (cycle, str(version)))
os.system('rm -rf %s' % path)
os.system('mkdir -p %s' % path)
plt.ioff()
for i in range(original.shape[0]):
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(1, 2, 1)
ax.grid('off')
im = ax.imshow(original[i, 0], interpolation='None', cmap=plt.cm.gray)
cb = fig.colorbar(im)
ax = fig.add_subplot(1, 2, 2)
ax.grid('off')
im = ax.imshow(transformed[i, 0], interpolation='None', cmap=plt.cm.gray)
cb = fig.colorbar(im)
plt.savefig(osp.join(path, 'test_%06d.png' % i), dpi=80)
plt.close(fig)
plt.ion()
if pack:
tar_path = osp.join(outdir, 'test_images_%s.tar.gz' % version)
os.system('tar -czf %s %s ' % (tar_path, path))
def plot_and_pack(imgs, outdir='output', pack=True, name="image_{index}.png",
figsize=(5, 4), cmap='Grey', **data):
import matplotlib.pyplot as plt
import os
import os.path as osp
os.system('rm -rf %s' % outdir)
os.system('mkdir -p %s' % outdir)
plt.ioff()
for i in range(imgs.shape[0]):
fig = plt.figure(figsize=figsize)
plt.grid('off')
plt.imshow(imgs[i, 0], interpolation='None', cmap=cmap)
plt.colorbar()
args = dict([(k, v[i]) for k, v in data.items()])
args['index'] = i
filename = name.format(**args)
plt.savefig(osp.join(outdir, filename), dpi=80)
plt.close(fig)
plt.ion()
if pack:
basedir, cwd = osp.split(outdir)
tar_path = osp.join(basedir, '%s.tar.gz' % cwd)
print('Archive', tar_path)
return os.system('tar -czf %s %s ' % (tar_path, outdir))
| 26.184783 | 86 | 0.62308 | 384 | 2,409 | 3.835938 | 0.296875 | 0.032587 | 0.029871 | 0.032587 | 0.448744 | 0.395112 | 0.299389 | 0.253904 | 0.222675 | 0.184657 | 0 | 0.017672 | 0.201328 | 2,409 | 91 | 87 | 26.472527 | 0.747921 | 0 | 0 | 0.371429 | 0 | 0 | 0.105438 | 0.008717 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0.014286 | 0.142857 | 0 | 0.2 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd01c14c4996542c62dcb370ac32dc38f1f238a | 3,922 | py | Python | Chapter 09/exercise9_10/exercise9_10.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 35 | 2019-05-03T00:30:31.000Z | 2022-01-20T06:57:25.000Z | Chapter 09/exercise9_10/exercise9_10.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | null | null | null | Chapter 09/exercise9_10/exercise9_10.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 22 | 2020-05-13T21:20:02.000Z | 2021-12-21T08:35:59.000Z | ##Write a program that reads the contents of a text file. The program should create a diction-
##ary in which the key-value pairs are described as follows:
##• Key. The keys are the individual words found in the file.
##• Values. Each value is a list that contains the line numbers in the file where the word
##(the key) is found.
##For example, suppose the word “robot” is found in lines 7, 18, 94, and 138. The dictionary
##would contain an element in which the key was the string “robot”, and the value was a list
##containing the numbers 7, 18, 94, and 138.
##Once the dictionary is built, the program should create another text file, known as a word
##index, listing the contents of the dictionary. The word index file should contain an alpha-
##betical listing of the words that are stored as keys in the dictionary, along with the line
##numbers where the words appear in the original file. Figure 9-1 shows an example of an
##original text file ( Kennedy.txt ) and its index file ( index.txt ).
# Open a text file.
# Read the contents
# Every time a new word appers, create a new key with value a list with a single
# element, which will be the lien where the word appeared.
# Every time a word that already exists appears, add to the list of that key,
# the line that appeared.
# Create another text file
# Sort the keys alphabetically
# Write each word and the times it appeared with a colon (:) in between.
def get_textname():
# Ask the use the name of the text file to create an index for.
name = input('For which file would you like me to create a word index? ')
return name
def create_dictionary(filename):
infile = open(filename, 'r', encoding='utf8') # Open the file
word_index = dict() # Create an empty dictionary to store the words and line numbers
counter = 0 # Set a counter to count the line we found the word
for line in infile:
wordlinelist = line.rstrip('\n').split() # remove \n and split the line into words.
counter += 1 # advance the counter to reflect the line we are in
for word in wordlinelist: # for every word in the line
if word not in word_index: # If we haven't yet encountered it,
word_index[word] = [str(counter)] # Start a key/value pair with value being a list with the line number.
elif word in word_index: # If the word was found,
word_index[word].append(str(counter)) # Append the line number to the list in the value
infile.close() # Close the file since we are done reading.
return word_index
def create_index_file(dict):
outfile = open('index.txt', 'w', encoding='utf8') # create a file to store the word index.
a_list = [] # Create a list to store the word the index.
index = 0 # Create a counter to control the index we are checking.
for key in dict.keys(): # Ever word found in the dictionary
a_list.append(key) # Append it to the a_list
for value in dict[key]: # Ever value found for that word/key
a_list[index] = a_list[index] + ' ' + value # Add it to the a_list, in the same index, seperated by a space
index += 1 # Advance the index by one to continue to the next word.
a_list.sort() # Sort the finished list.
for element in a_list:
outfile.write(element + '\n') # Extract the list, element by element to the file index.txt.
outfile.close() # Close the file
def main():
print('This program creates a word index of the file you request.')
print('----------------------------------------------------------')
print()
file = get_textname() # Ask the user for the file to create an index for.
dictionary = create_dictionary(file) # Create the dictionary for the file.
create_index_file(dictionary) # Write the the word index to a file.
print('Word index is created. File name is: index.txt')
# Call the main function.
main() | 56.028571 | 120 | 0.685365 | 647 | 3,922 | 4.123648 | 0.261206 | 0.024363 | 0.011244 | 0.016492 | 0.033733 | 0.016492 | 0 | 0 | 0 | 0 | 0 | 0.007923 | 0.22769 | 3,922 | 70 | 121 | 56.028571 | 0.872235 | 0.631311 | 0 | 0 | 0 | 0 | 0.175451 | 0.041877 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0 | 0 | 0.153846 | 0.102564 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd45ea0a29eda5d4c2a5b4693e02c4c67831b90 | 4,151 | py | Python | pynextion/hardware.py | cowo78/pynextion | 40215761bc8abbd7cc53fefa68e8b78a67b73aed | [
"Apache-2.0"
] | null | null | null | pynextion/hardware.py | cowo78/pynextion | 40215761bc8abbd7cc53fefa68e8b78a67b73aed | [
"Apache-2.0"
] | null | null | null | pynextion/hardware.py | cowo78/pynextion | 40215761bc8abbd7cc53fefa68e8b78a67b73aed | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import queue
import threading
import typing
from .constants import S_END_OF_CMD
import serial
class AbstractSerialNex(object):
INCOMING_BUFFER_SIZE = 1024 # Seems the Nextion buffer size, mentioned in official docs
MIN_SIZE_READ = len(S_END_OF_CMD) + 1 # Minimum return data size
TERMINATOR_SIZE = len(S_END_OF_CMD)
def __init__(self):
super().__init__()
self._port_mutex = threading.Lock()
# Queue of event objects
self._events = queue.Queue()
# Incoming serial buffer
self._buffer = bytearray(self.INCOMING_BUFFER_SIZE)
self._events_queue = collections.deque() # type: typing.Sequence[bytearray]
def write(self, data: bytes) -> int:
""" Raw write access to underlying transport. Threadsafe.
:returns: Number of bytes written
"""
with self._port_mutex:
nbytes = self.sp.write(data)
return nbytes
send = write
def read_all(self) -> bytes:
""" Read all buffered data. Threadsafe. """
with self._port_mutex:
data = self.sp.read_all()
return data
def read_next(self) -> bytes:
""" Read next message. Threadsafe. May return an empty array is no event is available. """
# At some point (along with editor 0.58) the Nextion firmware changed and now it returns
# an "instruction successful" everytime, even after a string or numeric data event
if self._events_queue:
return self._events_queue.pop()
buffer_size = 0
with self._port_mutex:
# Reading one byte at a time is of course inefficient, so serial.read_until is not the best option
# We know the minimal read should be 4 chars (i.e. Invalid Instruction) and must be prepared to
# partial command reads since we have no guarantee that we will always have complete commands in the buffer
if self.sp.in_waiting < self.MIN_SIZE_READ:
# Partial event, unlikely at this point
return b''
# Read bulk of data
chunk = self.sp.read(self.sp.in_waiting)
self._buffer[buffer_size:buffer_size+len(chunk)] = chunk
buffer_size = len(chunk)
while self._buffer[buffer_size - self.TERMINATOR_SIZE:buffer_size] != S_END_OF_CMD:
# Trickle until end of event
chunk = self.sp.read(1)
self._buffer[buffer_size:buffer_size+1] = chunk
buffer_size += 1
# Finished reading and we are sure we have complete event(s), now split into single events
start = 0
while True:
pos = self._buffer.find(S_END_OF_CMD, start, buffer_size)
if pos == -1:
break
self._events_queue.appendleft(self._buffer[start:pos+self.TERMINATOR_SIZE])
start = pos + self.TERMINATOR_SIZE
return self._events_queue.pop()
def clear_events_queue(self):
self._events_queue.clear()
def close(self):
return self.sp.close()
class PySerialNex(AbstractSerialNex):
def __init__(self, port_or_url: str, *args, **kwargs):
super().__init__()
self.sp = serial.serial_for_url(port_or_url, *args, **kwargs)
self.sp.reset_input_buffer()
self.sp.reset_output_buffer()
@property
def baudrate(self):
return self.sp.baudrate
@baudrate.setter
def baudrate(self, val):
self.sp.baudrate = val
# TODO: rotten
class NexSerialMock(AbstractSerialNex):
def __init__(self, *args, **kwargs):
super().__init__()
def write(self, cmd):
pass
def read(self):
return None
def close(self):
print("close")
"""
# PyBoard 1.1
# https://docs.micropython.org/en/latest/pyboard/pyboard/quickref.html
# RED: VIN
# BLACK: GND
# YELLOW: X9 (Board TX)
# BLUE: X10 (Board RX)
import machine
import time
class uPyNexSerial(AbstractSerialNex):
def __init__(self, *args, **kwargs):
self.sp = machine.UART(*args, **kwargs)
"""
| 30.07971 | 119 | 0.63286 | 539 | 4,151 | 4.669759 | 0.374768 | 0.051649 | 0.041716 | 0.017878 | 0.116806 | 0.054033 | 0 | 0 | 0 | 0 | 0 | 0.006977 | 0.274874 | 4,151 | 137 | 120 | 30.29927 | 0.829236 | 0.256083 | 0 | 0.138889 | 0 | 0 | 0.001842 | 0 | 0 | 0 | 0 | 0.007299 | 0 | 1 | 0.180556 | false | 0.013889 | 0.083333 | 0.041667 | 0.472222 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd4c7ac514b07c8936d2d8818ce12d988742cca | 1,762 | py | Python | scripts/compute_stats.py | mikeshuser/TopicWordMap | 7ed9df73d1b7dd8ded03361a662444c31fad70bc | [
"MIT"
] | null | null | null | scripts/compute_stats.py | mikeshuser/TopicWordMap | 7ed9df73d1b7dd8ded03361a662444c31fad70bc | [
"MIT"
] | null | null | null | scripts/compute_stats.py | mikeshuser/TopicWordMap | 7ed9df73d1b7dd8ded03361a662444c31fad70bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Compute essential stats(freq/tf-idf) on a corpus
Dependencies:
pandas == 0.23
gensim == 3.8
"""
__author__ = "Mike Shuser"
import pickle
import numpy as np
import pandas as pd
from gensim.models import TfidfModel
from gensim.corpora import Dictionary
DATA_SRC = "../processed_corpus"
MODEL_SRC = "../modelling"
if __name__ == '__main__':
files = ["positive_text","negative_text"]
vecs = pd.read_csv(f"{MODEL_SRC}/imdb_wordvectors.csv",
index_col=[0],
na_filter=False)
for filetype in files:
with open(f"{DATA_SRC}/{filetype}.csv.bigrams.pkl", "rb") as handle:
docs = pickle.load(handle)
vocab = pd.DataFrame(index=vecs.index)
dct = Dictionary(docs)
corpus = [dct.doc2bow(line) for line in docs]
tfidf = TfidfModel(corpus)
#corpus statistics
def lookup_mentions(x):
try:
return dct.cfs[dct.token2id[x]]
except KeyError:
return 0
vocab['mentions'] = vocab.index.map(lookup_mentions)
vocab['log2_mentions'] = np.log2(vocab.mentions)
#get tf-idfs for every word in each doc, then get average per word
vocab_tfidf = {k : [] for k in vocab.index}
for i, row in enumerate(docs):
tmp = dict(tfidf[corpus[i]])
for word in row:
if word in vocab_tfidf:
vocab_tfidf[word].extend([tmp[dct.token2id[word]]])
for k, v in vocab_tfidf.items():
vocab_tfidf[k] = np.mean(v)
vocab['avg_tfidf'] = vocab.index.map(lambda x: vocab_tfidf[x])
vocab.to_csv(f"{MODEL_SRC}/{filetype}_vocab_stats.csv")
| 28.419355 | 76 | 0.586833 | 228 | 1,762 | 4.372807 | 0.469298 | 0.060181 | 0.018054 | 0.024072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01045 | 0.293984 | 1,762 | 61 | 77 | 28.885246 | 0.790997 | 0.116345 | 0 | 0 | 0 | 0 | 0.138979 | 0.069166 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.135135 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd5699562650669f491fdd18d9c1edc25f11acb | 2,937 | py | Python | app/utils/images/linalg/utils.py | vinaykakkad/audio_and_image_compression | b5f7c767429f36805262ae87e8239434569fc372 | [
"MIT"
] | 1 | 2021-11-13T11:08:24.000Z | 2021-11-13T11:08:24.000Z | app/utils/images/linalg/utils.py | neelpopat242/audio_and_image_compression | b5f7c767429f36805262ae87e8239434569fc372 | [
"MIT"
] | null | null | null | app/utils/images/linalg/utils.py | neelpopat242/audio_and_image_compression | b5f7c767429f36805262ae87e8239434569fc372 | [
"MIT"
] | 1 | 2021-11-13T11:07:54.000Z | 2021-11-13T11:07:54.000Z | import math
def print_matrix(matrix):
"""
Function to print a matrix
"""
for row in matrix:
for col in row:
print("%.3f" % col, end=" ")
print()
def rows(matrix):
"""
Returns the no. of rows of a matrix
"""
if type(matrix) != list:
return 1
return len(matrix)
def cols(matrix):
"""
Returns the no. of columns of a matrix
"""
if type(matrix[0]) != list:
return 1
return len(matrix[0])
def eye(size):
"""
Returns an identity matrix
"""
mat = list()
for r in range(size):
row = list()
for c in range(size):
if r == c:
row.append(1)
else:
row.append(0)
mat.append(row)
return mat
def pivot_index(row):
"""
Returns the index of pivot in a row
"""
counter = 0
for element in row:
if element != float(0):
return counter
counter += 1
return counter
def pivot_value(row):
"""
Returns the value of pivot in a row
"""
for element in row:
if element > math.exp(-8):
return element
return 0
def swap(matrix, index_1, index_2):
"""
Function to swap two rows
"""
x = matrix[index_1]
matrix[index_1] = matrix[index_2]
matrix[index_2] = x
def transpose(matrix):
"""
Returns the transpose of a matrix
"""
transpose_matrix = list()
for i in range(cols(matrix)):
row = list()
for j in range(rows(matrix)):
row.append(matrix[j][i])
transpose_matrix.append(row)
return transpose_matrix
def mat_multiply(a, b):
"""
Function to multiply two matrices
"""
c = [[0 for i in range(cols(b))] for j in range(rows(a))]
for i in range(rows(a)):
for j in range(cols(b)):
for k in range(rows(b)):
c[i][j] += a[i][k] * b[k][j]
return c
def mat_splice(matrix, r, c):
"""
Function which returns a matrix with the first r rows and first c
columns of the original matrix
"""
result = list()
for i in range(r):
row = matrix[i]
result.append(row[:c])
return result
def to_int(matrix):
"""
Funciton to convert the eact element of the matrix to int
"""
for row in range(rows(matrix)):
for col in range(cols(matrix)):
for j in range(3):
matrix[row][col][j] = int(matrix[row][col][j])
return matrix
def clip(matrix):
"""
Function to clip each element to the range float[0, 1]
"""
for row in range(rows(matrix)):
for col in range(cols(matrix)):
for j in range(3):
if matrix[row][col][j] > 1:
matrix[row][col][j] = 1
if matrix[row][col][j] < 0:
matrix[row][col][j] = 0
return matrix
| 17.908537 | 69 | 0.520259 | 410 | 2,937 | 3.690244 | 0.173171 | 0.074025 | 0.043622 | 0.051553 | 0.35228 | 0.170522 | 0.076669 | 0.076669 | 0.076669 | 0.076669 | 0 | 0.014316 | 0.357848 | 2,937 | 163 | 70 | 18.018405 | 0.787911 | 0.171604 | 0 | 0.207792 | 0 | 0 | 0.002224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155844 | false | 0 | 0.012987 | 0 | 0.350649 | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd7751b946690a5f79059a3575a07f2c9cb06b8 | 1,443 | py | Python | mos_ru_service/file_crypt.py | onlycska/depersonalization-of-data | d11497d0f0708496975d682ae447e97bfd9177d9 | [
"MIT"
] | null | null | null | mos_ru_service/file_crypt.py | onlycska/depersonalization-of-data | d11497d0f0708496975d682ae447e97bfd9177d9 | [
"MIT"
] | null | null | null | mos_ru_service/file_crypt.py | onlycska/depersonalization-of-data | d11497d0f0708496975d682ae447e97bfd9177d9 | [
"MIT"
] | null | null | null | import hashlib
from datetime import datetime
def salsa_20_xor_bytes():
pass
def n_string(string, n):
hash = hashlib.sha512()
hash.update(string.encode('utf-8'))
return hash.digest()[:n]
def encryption(iv: str, key: str, filename: str) -> bool:
try:
iv = n_string(iv, 8)
key = n_string(key, 32)
header_bytes = 50
with open(filename, "rb") as picture:
picture.seek(header_bytes)
picture_content = picture.read()
cipher = salsa_20_xor_bytes(picture_content, key, iv)
with open(filename + ".encr", "wb") as encryption:
picture.seek(0)
encryption.write(picture.read(header_bytes))
encryption.write(cipher)
return True
except Exception as e:
return False
def decryption(iv: str, key: str, filename: str) -> bool:
try:
iv = n_string(iv, 8)
key = n_string(key, 32)
header_bytes = 50
with open(filename + ".encr", "rb") as picture:
picture.seek(header_bytes)
encryption = picture.read()
original = salsa_20_xor_bytes(encryption, key, iv)
with open(filename, "wb") as decrypted:
picture.seek(0)
decrypted.write(picture.read(header_bytes))
decrypted.write(original)
return True
except Exception as e:
return False
print()
| 26.722222 | 65 | 0.579349 | 175 | 1,443 | 4.651429 | 0.297143 | 0.081081 | 0.078624 | 0.055283 | 0.503686 | 0.385749 | 0.385749 | 0.304668 | 0.208845 | 0.208845 | 0 | 0.022312 | 0.316701 | 1,443 | 53 | 66 | 27.226415 | 0.803245 | 0 | 0 | 0.439024 | 0 | 0 | 0.015939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0.02439 | 0.04878 | 0 | 0.268293 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd86a18a4119d3adc2f367d73fae1d910601d27 | 3,747 | py | Python | fitbenchmarking/utils/tests/test_create_dirs.py | fitbenchmarking/fitbenchmarking | ea398efa61f071dc64fe7c3b484d5bb4e1897856 | [
"BSD-3-Clause"
] | 6 | 2019-07-22T01:56:10.000Z | 2021-12-10T05:29:30.000Z | fitbenchmarking/utils/tests/test_create_dirs.py | fitbenchmarking/fitbenchmarking | ea398efa61f071dc64fe7c3b484d5bb4e1897856 | [
"BSD-3-Clause"
] | 677 | 2019-04-29T10:23:49.000Z | 2022-03-22T12:01:30.000Z | fitbenchmarking/utils/tests/test_create_dirs.py | fitbenchmarking/fitbenchmarking | ea398efa61f071dc64fe7c3b484d5bb4e1897856 | [
"BSD-3-Clause"
] | 8 | 2019-06-13T10:32:17.000Z | 2020-12-09T15:08:40.000Z | """
This file contains tests on the creation of directories
"""
from __future__ import absolute_import, division, print_function
import time
import os
import shutil
import unittest
from fitbenchmarking.utils.create_dirs import (figures, group_results, results,
support_pages, css)
class CreateDirsTests(unittest.TestCase):
"""
Tests for the creation of directories
"""
def setUp(self):
"""
Sets a temporary directory in which results are stored
"""
path = 'r{}'.format(int(time.time()))
self.results_dir = os.path.join(os.getcwd(), path)
def tearDown(self):
"""
Deletes the temporary folder
"""
if os.path.exists(self.results_dir):
shutil.rmtree(self.results_dir)
def test_results_throw_correct_error(self):
"""
Check that the correct error is raised
"""
self.assertRaises(TypeError, results, 123)
self.assertRaises(TypeError, results, None)
def test_results_create_correct_dir(self):
"""
Check that the correct directory is created
"""
results_dir = results(self.results_dir)
results_dir_expected = self.results_dir
self.assertEqual(results_dir_expected, results_dir)
self.assertTrue(os.path.exists(results_dir_expected))
shutil.rmtree(results_dir_expected)
def test_groupResults_create_correct_group_results(self):
"""
Check that the Group results directory is as expected
"""
results_dir = results(self.results_dir)
group_results_dir = group_results(results_dir, "test_group")
group_results_dir_expected = os.path.join(results_dir, "test_group")
self.assertEqual(group_results_dir_expected, group_results_dir)
self.assertTrue(os.path.exists(group_results_dir_expected))
shutil.rmtree(results_dir)
def test_support_pages_create_correct_dir(self):
"""
Check that the support pages directory is as expected
"""
results_dir = results(self.results_dir)
group_results_dir = group_results(results_dir, "test_group")
support_pages_dir = support_pages(group_results_dir)
support_pages_dir_expected = os.path.join(group_results_dir,
'support_pages')
self.assertEqual(support_pages_dir_expected, support_pages_dir)
self.assertTrue(os.path.exists(support_pages_dir_expected))
shutil.rmtree(results_dir)
def test_figures_create_correct_dir(self):
"""
Check that the figures directory is as expected
"""
results_dir = results(self.results_dir)
group_results_dir = group_results(results_dir, "test_group")
support_pages_dir = support_pages(group_results_dir)
figures_dir = figures(support_pages_dir)
figures_dir_expected = os.path.join(support_pages_dir, 'figures')
self.assertEqual(figures_dir_expected, figures_dir)
self.assertTrue(os.path.exists(figures_dir_expected))
shutil.rmtree(results_dir)
def test_css_create_correct_dir(self):
"""
Check that the css directory is as expected
"""
results_dir = results(self.results_dir)
group_results_dir = group_results(results_dir, "test_group")
css_dir = css(group_results_dir)
css_dir_expected = os.path.join(group_results_dir,
'css')
self.assertEqual(css_dir_expected, css_dir)
self.assertTrue(os.path.exists(css_dir_expected))
shutil.rmtree(css_dir)
if __name__ == "__main__":
unittest.main()
| 32.868421 | 79 | 0.662397 | 446 | 3,747 | 5.226457 | 0.174888 | 0.1716 | 0.083655 | 0.075504 | 0.484341 | 0.443157 | 0.392535 | 0.286572 | 0.204204 | 0.204204 | 0 | 0.001075 | 0.255137 | 3,747 | 113 | 80 | 33.159292 | 0.83411 | 0.122765 | 0 | 0.237288 | 0 | 0 | 0.027273 | 0 | 0 | 0 | 0 | 0 | 0.20339 | 1 | 0.135593 | false | 0 | 0.101695 | 0 | 0.254237 | 0.016949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd963b267b7e2d742f71f63075d5795850791fb | 1,173 | py | Python | router.py | laddge/cardAPI | 770c5f8936f7b699ccaf386c82f7172e84292ecc | [
"MIT"
] | null | null | null | router.py | laddge/cardAPI | 770c5f8936f7b699ccaf386c82f7172e84292ecc | [
"MIT"
] | null | null | null | router.py | laddge/cardAPI | 770c5f8936f7b699ccaf386c82f7172e84292ecc | [
"MIT"
] | null | null | null | import os
from urllib.parse import urlparse
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import RedirectResponse, Response
from fastapi.staticfiles import StaticFiles
from typing import Optional
import api
app = FastAPI()
app.mount("/files", StaticFiles(directory="files"), name="files")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.middleware("http")
async def middleware(request: Request, call_next):
if request.method == "HEAD":
response = Response()
elif "herokuapp" in urlparse(str(request.url)).netloc:
domain = os.getenv("DOMAIN")
if domain:
url = urlparse(str(request.url))._replace(netloc=domain).geturl()
response = RedirectResponse(url)
else:
response = await call_next(request)
else:
response = await call_next(request)
return response
@app.get('/')
async def getAPI(url: Optional[str] = None):
if url:
return api.main(url)
else:
return {'message': 'hello, world'}
| 24.957447 | 77 | 0.672634 | 134 | 1,173 | 5.820896 | 0.432836 | 0.05641 | 0.046154 | 0.053846 | 0.082051 | 0.082051 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208014 | 1,173 | 46 | 78 | 25.5 | 0.839612 | 0 | 0 | 0.135135 | 0 | 0 | 0.052856 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.216216 | 0 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ddb9586e5b11a1deeec483163fc0ec9a71544d9 | 791 | py | Python | Table/groupingDeviceMappingTable.py | tuanldchainos/HcPullData | 65f89cfdcae135781aad4b3edf210c0ecd2d6a1c | [
"Apache-2.0"
] | null | null | null | Table/groupingDeviceMappingTable.py | tuanldchainos/HcPullData | 65f89cfdcae135781aad4b3edf210c0ecd2d6a1c | [
"Apache-2.0"
] | null | null | null | Table/groupingDeviceMappingTable.py | tuanldchainos/HcPullData | 65f89cfdcae135781aad4b3edf210c0ecd2d6a1c | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import Column, Integer, String
from sqlalchemy import DateTime
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
class groupingDeviceMappingTable():
def __init__(self, metadata: MetaData):
self.groupingDeviceMappingTable = Table('GroupingDeviceMapping', metadata,
Column('GroupingId', String, primary_key=True, nullable=False),
Column('GroupUnicastId', Integer, nullable=False),
Column('DeviceId', String, primary_key=True, nullable=False),
Column('DeviceUnicastId', Integer, nullable=False),
)
| 56.5 | 111 | 0.548673 | 57 | 791 | 7.508772 | 0.438596 | 0.121495 | 0.140187 | 0.093458 | 0.182243 | 0.182243 | 0.182243 | 0 | 0 | 0 | 0 | 0 | 0.376738 | 791 | 13 | 112 | 60.846154 | 0.868154 | 0 | 0 | 0 | 0 | 0 | 0.085967 | 0.026549 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ddb98d57d427980a09a0ebd40ba75c59e9df8f6 | 802 | py | Python | nutils/__init__.py | wijnandhoitinga/nutils | 7ad6793ca5e3a43f45dcc0a4a795b381d2a0b9d4 | [
"MIT"
] | 25 | 2015-04-29T13:10:22.000Z | 2019-03-18T09:45:29.000Z | nutils/__init__.py | wijnandhoitinga/nutils | 7ad6793ca5e3a43f45dcc0a4a795b381d2a0b9d4 | [
"MIT"
] | 330 | 2015-03-04T09:06:38.000Z | 2019-06-11T10:31:54.000Z | nutils/__init__.py | wijnandhoitinga/nutils | 7ad6793ca5e3a43f45dcc0a4a795b381d2a0b9d4 | [
"MIT"
] | 16 | 2015-03-23T08:00:46.000Z | 2019-02-21T11:14:47.000Z | import sys
import numpy
from distutils.version import LooseVersion
assert sys.version_info >= (3, 5)
assert LooseVersion(numpy.version.version) >= LooseVersion('1.16'), 'nutils requires numpy 1.16 or higher, got {}'.format(numpy.version.version)
version = '8.0a0'
version_name = None
long_version = ('{} "{}"' if version_name else '{}').format(version, version_name)
__all__ = [
'cache',
'cli',
'element',
'elementseq',
'evaluable',
'export',
'expression_v1',
'expression_v2',
'function',
'matrix',
'mesh',
'numeric',
'parallel',
'points',
'pointsseq',
'sample',
'solver',
'sparse',
'testing',
'topology',
'transform',
'transformseq',
'types',
'unit',
'util',
'warnings',
]
# vim:sw=2:sts=2:et
| 19.095238 | 144 | 0.599751 | 86 | 802 | 5.465116 | 0.686047 | 0.119149 | 0.080851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024116 | 0.224439 | 802 | 41 | 145 | 19.560976 | 0.731511 | 0.021197 | 0 | 0 | 0 | 0 | 0.320562 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ddd126c99d8feae03f19cc66e85504d9442c512 | 1,710 | py | Python | ReadCifar.py | timestocome/ReadCifar10 | ea6e70a982bdc923386327db648e038a63b1d55d | [
"MIT"
] | null | null | null | ReadCifar.py | timestocome/ReadCifar10 | ea6e70a982bdc923386327db648e038a63b1d55d | [
"MIT"
] | null | null | null | ReadCifar.py | timestocome/ReadCifar10 | ea6e70a982bdc923386327db648e038a63b1d55d | [
"MIT"
] | null | null | null |
# http://github.com/timestocome
# data
# https://www.cs.toronto.edu/~kriz/cifar.html
import numpy as np
import pickle
import matplotlib.pyplot as plt
###################################################################################
# read in data
##################################################################################
n_classes = 10
image_height = 32
image_width = 32
image_depth = 3
label_bytes = 1
def unpickle(file):
fo = open(file, 'rb')
dict = pickle.load(fo)
fo.close()
return dict
def load_data():
xs = []
ys = []
# read in training files
for i in range(5):
# this is the directory you put the cifar batch files into
filename = 'cifar-10/data_batch_%d' % (i+1)
with open(filename, 'rb') as f:
d = pickle.load(f, encoding='latin1') # needed for python2-python3 pickle
x = d['data']
y = d['labels']
xs.append(x)
ys.append(y)
# read in test files
filename = 'cifar-10/test_batch'
with open(filename, 'rb') as f:
d = pickle.load(f, encoding='latin1')
xs.append(d['data'])
ys.append(d['labels'])
x = np.concatenate(xs) # images
y = np.concatenate(ys) # labels
x = x.reshape((x.shape[0], 3, 32, 32)).transpose(0,2,3,1)
# Visualizing CIFAR 10
fig, axes1 = plt.subplots(5,5,figsize=(10,10))
for j in range(5):
for k in range(5):
i = np.random.choice(range(len(x)))
axes1[j][k].set_axis_off()
axes1[j][k].imshow(x[i:i+1][0])
plt.show()
# scale images
x = x / 255.
load_data() | 21.375 | 85 | 0.489474 | 225 | 1,710 | 3.666667 | 0.44 | 0.021818 | 0.029091 | 0.043636 | 0.113939 | 0.113939 | 0.113939 | 0.113939 | 0.113939 | 0.113939 | 0 | 0.037798 | 0.288304 | 1,710 | 80 | 86 | 21.375 | 0.640099 | 0.159649 | 0 | 0.097561 | 0 | 0 | 0.062748 | 0.017474 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.073171 | 0 | 0.146341 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ddf6ed4392a1c97342e5972d833a191e47b53d0 | 826 | py | Python | app/api/errors/server.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | app/api/errors/server.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | app/api/errors/server.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | import traceback
from fastapi import Request
from fastapi.responses import PlainTextResponse
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR
def internal_server_exception_handler(
_: Request,
exception: Exception
) -> PlainTextResponse:
""" Return the traceback of the internal server error. """
exception_traceback = ''.join(
traceback.format_exception(
type(exception),
value=exception,
tb=exception.__traceback__
)
)
message = (
f'{"Internal server error has occurred.":<50}|\n'
f'{"Please, check the traceback.":<50}|\n'
f'{"-" * 50}x\n\n'
)
message += exception_traceback
return PlainTextResponse(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
content=message
)
| 27.533333 | 62 | 0.6477 | 85 | 826 | 6.058824 | 0.423529 | 0.135922 | 0.147573 | 0.081553 | 0.100971 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01964 | 0.260291 | 826 | 29 | 63 | 28.482759 | 0.823241 | 0.060533 | 0 | 0 | 0 | 0 | 0.130208 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8de0588b8d216183bd189807faae11d1037b92e8 | 11,025 | py | Python | dynamic_rcnn/utils/misc.py | yyzq1/bigwork | c2247abd2355b0f64ddfcc6e489e77b1eec55147 | [
"MIT"
] | 177 | 2020-04-14T01:16:26.000Z | 2022-03-28T03:29:28.000Z | dynamic_rcnn/utils/misc.py | yyzq1/bigwork | c2247abd2355b0f64ddfcc6e489e77b1eec55147 | [
"MIT"
] | 10 | 2020-05-06T13:42:47.000Z | 2021-02-06T13:35:27.000Z | dynamic_rcnn/utils/misc.py | yyzq1/bigwork | c2247abd2355b0f64ddfcc6e489e77b1eec55147 | [
"MIT"
] | 23 | 2020-04-14T05:41:25.000Z | 2021-12-21T02:43:01.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
helper class that supports empty tensors on some nn functions.
Ideally, add support directly in PyTorch to empty tensors in
those functions.
This can be removed once https://github.com/pytorch/pytorch/issues/12013
is implemented
"""
import math
import torch
from torch import nn
from torch.nn.modules.utils import _ntuple
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class Conv2d(torch.nn.Conv2d):
def forward(self, x):
if x.numel() > 0:
return super(Conv2d, self).forward(x)
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class ConvTranspose2d(torch.nn.ConvTranspose2d):
def forward(self, x):
if x.numel() > 0:
return super(ConvTranspose2d, self).forward(x)
# get output shape
output_shape = [
(i - 1) * d - 2 * p + (di * (k - 1) + 1) + op
for i, p, di, k, d, op in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride,
self.output_padding,
)
]
output_shape = [x.shape[0], self.bias.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class BatchNorm2d(torch.nn.BatchNorm2d):
def forward(self, x):
if x.numel() > 0:
return super(BatchNorm2d, self).forward(x)
# get output shape
output_shape = x.shape
return _NewEmptyTensorOp.apply(x, output_shape)
def interpolate(
input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
def _check_size_scale_factor(dim):
if size is None and scale_factor is None:
raise ValueError("either size or scale_factor should be defined")
if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined")
if (
scale_factor is not None
and isinstance(scale_factor, tuple)
and len(scale_factor) != dim
):
raise ValueError(
"scale_factor shape must match input shape. "
"Input is {}D, scale_factor size is {}".format(dim, len(scale_factor))
)
def _output_size(dim):
_check_size_scale_factor(dim)
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
# math.floor might return float in py2.7
return [
int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)
]
output_shape = tuple(_output_size(2))
output_shape = input.shape[:-2] + output_shape
return _NewEmptyTensorOp.apply(input, output_shape)
class DFConv2d(nn.Module):
"""Deformable convolutional layer"""
def __init__(
self,
in_channels,
out_channels,
with_modulated_dcn=True,
kernel_size=3,
stride=1,
groups=1,
dilation=1,
deformable_groups=1,
bias=False
):
super(DFConv2d, self).__init__()
if isinstance(kernel_size, (list, tuple)):
assert isinstance(stride, (list, tuple))
assert isinstance(dilation, (list, tuple))
assert len(kernel_size) == 2
assert len(stride) == 2
assert len(dilation) == 2
padding = (
dilation[0] * (kernel_size[0] - 1) // 2,
dilation[1] * (kernel_size[1] - 1) // 2
)
offset_base_channels = kernel_size[0] * kernel_size[1]
else:
padding = dilation * (kernel_size - 1) // 2
offset_base_channels = kernel_size * kernel_size
if with_modulated_dcn:
from dynamic_rcnn.kernels.ops.dcn import ModulatedDeformConv
offset_channels = offset_base_channels * 3 #default: 27
conv_block = ModulatedDeformConv
else:
from dynamic_rcnn.kernels.ops.dcn import DeformConv
offset_channels = offset_base_channels * 2 #default: 18
conv_block = DeformConv
self.offset = Conv2d(
in_channels,
deformable_groups * offset_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=1,
dilation=dilation
)
for l in [self.offset,]:
nn.init.kaiming_uniform_(l.weight, a=1)
torch.nn.init.constant_(l.bias, 0.)
self.conv = conv_block(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
bias=bias
)
self.with_modulated_dcn = with_modulated_dcn
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
def forward(self, x):
if x.numel() > 0:
if not self.with_modulated_dcn:
offset = self.offset(x)
x = self.conv(x, offset)
else:
offset_mask = self.offset(x)
offset = offset_mask[:, :18, :, :]
mask = offset_mask[:, -9:, :, :].sigmoid()
x = self.conv(x, offset, mask)
return x
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride
)
]
output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters
are fixed
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def forward(self, x):
# Cast all fixed parameters to half() if necessary
if x.dtype == torch.float16:
self.weight = self.weight.half()
self.bias = self.bias.half()
self.running_mean = self.running_mean.half()
self.running_var = self.running_var.half()
scale = self.weight * self.running_var.rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
def get_group_gn(dim, dim_per_gp, num_groups):
"""get number of groups used by GroupNorm, based on number of channels."""
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0, \
"dim: {}, dim_per_gp: {}".format(dim, dim_per_gp)
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0, \
"dim: {}, num_groups: {}".format(dim, num_groups)
group_gn = num_groups
return group_gn
# TODO, fix the cfg setting
def group_norm(out_channels, affine=True, divisor=1, cfg=None):
out_channels = out_channels // divisor
if cfg:
dim_per_gp = cfg.MODEL.GROUP_NORM.DIM_PER_GP // divisor
num_groups = cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor
eps = cfg.MODEL.GROUP_NORM.EPSILON # default: 1e-5
else:
dim_per_gp = -1
num_groups = 32
eps = 1e-5
return torch.nn.GroupNorm(
get_group_gn(out_channels, dim_per_gp, num_groups),
out_channels,
eps,
affine
)
def make_conv3x3(
in_channels,
out_channels,
dilation=1,
stride=1,
use_gn=False,
use_relu=False,
kaiming_init=True
):
conv = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False if use_gn else True
)
if kaiming_init:
nn.init.kaiming_normal_(
conv.weight, mode="fan_out", nonlinearity="relu"
)
else:
torch.nn.init.normal_(conv.weight, std=0.01)
if not use_gn:
nn.init.constant_(conv.bias, 0)
module = [conv,]
if use_gn:
module.append(group_norm(out_channels))
if use_relu:
module.append(nn.ReLU(inplace=True))
if len(module) > 1:
return nn.Sequential(*module)
return conv
def make_fc(dim_in, hidden_dim, use_gn=False):
'''
Caffe2 implementation uses XavierFill, which in fact
corresponds to kaiming_uniform_ in PyTorch
'''
if use_gn:
fc = nn.Linear(dim_in, hidden_dim, bias=False)
nn.init.kaiming_uniform_(fc.weight, a=1)
return nn.Sequential(fc, group_norm(hidden_dim))
fc = nn.Linear(dim_in, hidden_dim)
nn.init.kaiming_uniform_(fc.weight, a=1)
nn.init.constant_(fc.bias, 0)
return fc
def conv_with_kaiming_uniform(use_gn=False, use_relu=False):
def make_conv(
in_channels, out_channels, kernel_size, stride=1, dilation=1
):
conv = Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=dilation * (kernel_size - 1) // 2,
dilation=dilation,
bias=False if use_gn else True
)
# Caffe2 implementation uses XavierFill, which in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(conv.weight, a=1)
if not use_gn:
nn.init.constant_(conv.bias, 0)
module = [conv,]
if use_gn:
module.append(group_norm(out_channels))
if use_relu:
module.append(nn.ReLU(inplace=True))
if len(module) > 1:
return nn.Sequential(*module)
return conv
return make_conv
| 31.590258 | 88 | 0.581043 | 1,372 | 11,025 | 4.478863 | 0.169825 | 0.039056 | 0.014321 | 0.031245 | 0.403255 | 0.358503 | 0.326932 | 0.279414 | 0.230269 | 0.199349 | 0 | 0.017308 | 0.31873 | 11,025 | 348 | 89 | 31.681034 | 0.800825 | 0.080363 | 0 | 0.355872 | 0 | 0 | 0.030707 | 0 | 0 | 0 | 0 | 0.002874 | 0.02847 | 1 | 0.064057 | false | 0 | 0.021352 | 0 | 0.192171 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8de07753d8112e17484865d9eb71f8e6d5d640f0 | 3,133 | py | Python | pyontutils/googapis.py | dbrnz/pyontutils | 439e7b572a0a8cbe6817b9a294745db8688da666 | [
"MIT"
] | null | null | null | pyontutils/googapis.py | dbrnz/pyontutils | 439e7b572a0a8cbe6817b9a294745db8688da666 | [
"MIT"
] | null | null | null | pyontutils/googapis.py | dbrnz/pyontutils | 439e7b572a0a8cbe6817b9a294745db8688da666 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" api access for google sheets (and friends)
Usage:
googapis auth (sheets|docs|drive)... [options] [--drive-scope=<SCOPE>...]
Examples:
googapis auth sheets
Options:
--store-file=<PATH>... write to a specific store file
-n --readonly set the readonly scope
--drive-scope=<SCOPE>... add drive scopes (overrides readonly)
values: appdata
file
metadata
metadata.readonly
photos.readonly
readonly
scripts
-d --debug
"""
import sys
from pathlib import Path
from pyontutils.utils import log
from pyontutils.clifun import Dispatcher, Options as BaseOptions
from pyontutils.sheets import _get_oauth_service
log = log.getChild('googapis')
class Options(BaseOptions):
drive_scopes = (
'appdata',
'file',
'metadata',
'metadata.readonly',
'photos.readonly',
'readonly',
'scripts',)
def __new__(cls, args, defaults):
bads = []
for scope in args['--drive-scope']:
if scope not in cls.drive_scopes:
bads.append(scope)
if bads:
log.error(f'Invalid scopes! {bads}')
sys.exit(1)
return super().__new__(cls, args, defaults)
@property
def store_file(self):
return Path(self._args['--store-file']).resolve()
class Main(Dispatcher):
@property
def _scopes(self):
base = 'https://www.googleapis.com/auth/'
suffix = '.readonly' if self.options.readonly else ''
if self.options.sheets:
yield base + 'spreadsheets' + suffix
if self.options.docs:
yield base + 'doccuments' + suffix
if self.options.drive:
suffixes = []
suffixes += ['.' + s for s in self.options.drive_scope]
if suffix and not suffixes:
suffixes.append(suffix)
if not suffixes:
suffixes = '',
for suffix in suffixes:
yield base + 'drive' + suffix
def auth(self):
newline = '\n'
scopes = list(self._scopes)
if self.options.debug:
log.debug(f'requesting for scopes:\n{newline.join(scopes)}')
service = _get_oauth_service(readonly=self.options.readonly, SCOPES=scopes,
store_file=self.options.store_file)
# FIXME decouple this ...
log.info(f'Auth finished successfully for scopes:\n{newline.join(scopes)}')
def main():
from docopt import docopt, parse_defaults
args = docopt(__doc__, version='googapis 0.0.0')
defaults = {o.name:o.value if o.argcount else None for o in parse_defaults(__doc__)}
options = Options(args, defaults)
main = Main(options)
if main.options.debug:
log.setLevel('DEBUG')
print(main.options)
main()
if __name__ == '__main__':
main()
| 27.973214 | 88 | 0.551867 | 334 | 3,133 | 5.053892 | 0.332335 | 0.052133 | 0.038507 | 0.031991 | 0.10782 | 0.10782 | 0.075829 | 0.075829 | 0.075829 | 0 | 0 | 0.002425 | 0.341845 | 3,133 | 111 | 89 | 28.225225 | 0.816198 | 0.240983 | 0 | 0.061538 | 0 | 0 | 0.138033 | 0.026171 | 0 | 0 | 0 | 0.009009 | 0 | 1 | 0.076923 | false | 0 | 0.092308 | 0.015385 | 0.246154 | 0.015385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8de1b7d242ca78d038847d03a2a25db0ebb7c8e2 | 517 | py | Python | Leetcode/0484. Find Permutation/0484-2.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0484. Find Permutation/0484-2.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0484. Find Permutation/0484-2.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | class Solution:
def findPermutation(self, s: str) -> List[int]:
ans = [i for i in range(1, len(s) + 2)]
# for each D* group (s[i..j]), reverse ans[i..j + 1]
i = -1
j = -1
def getNextIndex(c: chr, start: int) -> int:
for i in range(start, len(s)):
if s[i] == c:
return i
return len(s)
while True:
i = getNextIndex('D', j + 1)
if i == len(s):
break
j = getNextIndex('I', i + 1)
ans[i:j + 1] = ans[i:j + 1][::-1]
return ans
| 22.478261 | 56 | 0.477756 | 86 | 517 | 2.872093 | 0.360465 | 0.040486 | 0.060729 | 0.072874 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029499 | 0.344294 | 517 | 22 | 57 | 23.5 | 0.699115 | 0.096712 | 0 | 0 | 0 | 0 | 0.004301 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8de62c242c479afad43ca797ee2384f022ddfbb5 | 10,112 | py | Python | Gridex.py | sabbatinif/GridEx | 8f1ff5c29081e0556d16f49e56bd5390be87d7b2 | [
"Apache-2.0"
] | null | null | null | Gridex.py | sabbatinif/GridEx | 8f1ff5c29081e0556d16f49e56bd5390be87d7b2 | [
"Apache-2.0"
] | null | null | null | Gridex.py | sabbatinif/GridEx | 8f1ff5c29081e0556d16f49e56bd5390be87d7b2 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from itertools import product
from tensorflow.keras.models import load_model
from sklearn.feature_selection import SelectKBest, f_regression
from joblib import load
import random as rnd
class Gridex:
def __init__(self, target, name, ext, feat, steps, th, adap = None):
print("GridEx -", name, "data set")
self.name = name
self.ext = ext
self.feat = feat
self.target = target
self.steps = steps
self.threshold = th
self.model = load_model("models/{}".format(name))
self.fake = load("datasets/train/x/{}.{}.joblib".format(name, ext))
self.Xtrain = np.array(self.fake)
self.Xtest = np.array(load("datasets/test/x/{}.{}.joblib".format(name, ext)))
self.__adaptiveSplits(adap)
self.__createSurrounding()
self.__iterate()
def __count(self, c, samples, mean = False):
cond = np.ones((len(samples),), dtype = bool)
for i, f in enumerate(self.feat):
[a, b] = c[f]
col = samples[:, i]
cond &= ((a <= col) & (col <= b))
n = len(np.nonzero(cond)[0])
if mean:
if n > 0:
pred = self.model.predict(samples[cond])
return n, samples[cond].tolist(), pred.mean(), pred.std()
else:
return n, samples[cond].tolist(), 0, 0
else:
return n, samples[cond].tolist()
def __predict(self, samples):
ret = []
for s in samples:
for hc in self.hyperCubes:
found = True
c = self.hyperCubes[hc]
for i, f in enumerate(self.feat):
[a, b] = c[f]
v = s[i]
found &= (a <= v <= b)
if ~found:
break
if found:
ret.append(c[self.target])
break
if ~found:
ret.append(np.nan)
return ret
def __createSurrounding(self):
self.minmax = { "std" : 2 * self.threshold, self.target : 0 } # surrounding cube
for i, c in enumerate(self.feat):
mi = min(self.Xtrain[:, i].min(), self.Xtest[:, i].min())
ma = max(self.Xtrain[:, i].max(), self.Xtest[:, i].max())
eps = 1e-5
self.minmax[c] = [mi - eps, ma + eps]
self.V = 1.
for f in self.feat:
[a, b] = self.minmax[f]
self.V *= (b - a)
def __iterate(self):
prev = { 0 : self.minmax }
tot = 0
for step in self.steps:
self.hyperCubes = {}
for c in prev:
self.split = {}
if self.__count(prev[c], self.Xtrain)[0] == 0:
continue
if prev[c]["std"] < self.threshold:
self.hyperCubes[len(self.hyperCubes)] = prev[c]
continue
ranges = {}
for (f, imp) in zip(self.feat, self.scores):
r = []
[a, b] = prev[c][f]
if self.adap is not None:
step = self.adap[f]
s = (b - a) / step
for i in range(step):
r.append([a + s * i, a + s * (i + 1)])
ranges[f] = r
prod = list(product(*ranges.values()))
tot += len(prod)
for (pn, p) in enumerate(prod):
print("{:.2f}%".format(pn / len(prod) * 100), end = "\r")
cube = { self.target : 0 }
for i, f in enumerate(self.feat):
cube[f] = p[i]
n, s, m, std = self.__count(cube, self.Xtrain, True)
self.__produceFake(cube, n)
nn, s, m, std = self.__count(cube, np.array(self.fake), True)
if n > 0:
cube[self.target] = m
cube["std"] = std
cube["n"] = n
if std > self.threshold:
self.hyperCubes[len(self.hyperCubes)] = cube
else:
self.split[len(self.split)] = cube
co = 0
to = len(self.split)
self.oldAdj = {}
self.oldMer = {}
self.last = [i for i in self.split]
while(self.__merge()):
co += 1
print("merged", co, "of", to, " " * 20, end = "\r")
for res in self.split:
n, s = self.__count(self.split[res], self.Xtrain, False)
self.hyperCubes[len(self.hyperCubes)] = self.split[res]
print("Useful hyper-cubes:", len(self.hyperCubes), "of", tot)
self.__checkV()
prev = self.hyperCubes.copy()
self.metrics()
print()
def __merge(self):
ret = False
checked = []
self.temp = []
for i in self.split:
checked.append(i)
for j in self.split:
if j not in checked:
if (i in self.last) or (j in self.last):
adj = self.__adjacent(self.split[i], self.split[j])
else:
adj = self.oldAdj[(i, j)]
if adj is not None:
self.temp.append((i, j, adj))
self.oldAdj[(i, j)] = adj
merged = []
for (i, j, adj) in self.temp:
if (i in self.last) or (j in self.last):
t = self.__tempCube(i, j, adj)
self.oldMer[(i, j)] = t
else:
t = self.oldMer[(i, j)]
if t is not None:
merged.append(t)
if(len(merged) > 0):
std, c1, c2, mi = min(merged)
del self.split[c1]
del self.split[c2]
self.last = [c1, c2]
self.split[c1] = mi
ret = True
return ret
def __tempCube(self, i, j, f):
c1 = self.split[i]
c2 = self.split[j]
cube = {}
for k in self.feat:
if k != f:
cube[k] = c1[k]
else:
[a1, b1] = c1[f]
[a2, b2] = c2[f]
cube[f] = [min(a1, a2), max(b1, b2)]
n, s, m, std = self.__count(cube, np.array(self.fake), True)
cube[self.target] = m
cube["std"] = std
cube["n"] = n
if std < self.threshold:
return (std, i, j, cube)
else:
return None
def __adjacent(self, c1, c2):
adj = None
for f in self.feat:
if c1[f] == c2[f]:
continue
if adj is not None:
return None
[a1, b1] = c1[f]
[a2, b2] = c2[f]
if (b1 == a2) or (b2 == a1):
adj = f
else:
return None
return adj
def __produceFake(self, cube, n):
for i in range(n, 15):
sample = []
for f in self.feat:
[a, b] = self.minmax[f]
sample.append(rnd.uniform(a, b))
self.fake.append(sample)
def __adaptiveSplits(self, adap):
fs = SelectKBest(score_func = f_regression, k = "all")
fit = fs.fit(self.Xtrain, self.model.predict(self.Xtrain).flatten())
self.scores = np.array(fit.scores_) / max(fit.scores_)
#print(self.scores)
self.adap = {}
if adap is not None:
for (f, imp) in zip(self.feat, self.scores):
step = 1
for (l, s) in adap:
if imp > l:
step = s
else:
break
self.adap[f] = step
else:
self.adap = None
#print(self.adap)
def __volume(self, hc):
v = 1.
for f in self.feat:
[a, b] = hc[f]
v *= (b - a)
return v
def __checkV(self):
tot = 0.
self.vols = []
for c in self.hyperCubes:
hc = self.hyperCubes[c]
v = self.__volume(hc)
self.vols.append(v / self.V)
tot += v
print("Covered {:.2f}% of the surrounding cube".format(tot / self.V * 100))
def metrics(self, p = True):
ITER = np.array(self.__predict(self.Xtest))
TRUE = load("datasets/test/y/{}.{}.joblib".format(self.name, self.ext)).values
ANN = self.model.predict(self.Xtest).flatten()
nan = np.count_nonzero(np.isnan(ITER))
if nan > 0:
if p:
print(nan, "outliers of", len(self.Xtest), "test samples ({:.2f}%)".format(nan / len(self.Xtest) * 100))
idx = np.argwhere(~np.isnan(ITER))
ITER = ITER[idx]
TRUE = TRUE[idx]
ANN = ANN[idx]
if p:
print("MAE wrt data: {:.2f}, wrt ANN: {:.2f}, ANN MAE: {:.2f}".format(self.__mae(ITER, TRUE), self.__mae(ITER, ANN), self.__mae(ANN, TRUE)))
print("R2 wrt data: {:.2f}, wrt ANN: {:.2f}, ANN MAE: {:.2f}".format(self.__r2(ITER, TRUE), self.__r2(ITER, ANN), self.__r2(ANN, TRUE)))
print()
n = []
for h in self.hyperCubes:
n.append(self.__count(self.hyperCubes[h], self.Xtrain, self.feat)[0])
return (n, self.vols)
def __r2(self, pred, true):
u = ((true - pred)**2).sum()
v = ((true - true.mean())**2).sum()
r2 = 1 - u / v
return r2
def __mae(self, pred, true):
return abs(pred - true).mean() | 36.114286 | 153 | 0.426721 | 1,177 | 10,112 | 3.595582 | 0.15633 | 0.02552 | 0.010633 | 0.011815 | 0.221408 | 0.174386 | 0.149811 | 0.139414 | 0.114367 | 0.095463 | 0 | 0.015963 | 0.442445 | 10,112 | 280 | 154 | 36.114286 | 0.734658 | 0.005142 | 0 | 0.222222 | 0 | 0 | 0.034531 | 0.008459 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059524 | false | 0 | 0.02381 | 0.003968 | 0.142857 | 0.039683 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8de91facddbd7d35bd7ba7937ee91b396276006a | 630 | py | Python | sitemap.py | brenopoggiali/Google-internship-alert | eefaa5291f3790379974505d71a3d2c4acfd22f6 | [
"MIT"
] | 6 | 2020-04-07T21:29:42.000Z | 2021-06-08T00:23:53.000Z | sitemap.py | brenopoggiali/Google-internship-alert | eefaa5291f3790379974505d71a3d2c4acfd22f6 | [
"MIT"
] | null | null | null | sitemap.py | brenopoggiali/Google-internship-alert | eefaa5291f3790379974505d71a3d2c4acfd22f6 | [
"MIT"
] | null | null | null | import xmltodict
from browsers import load_firefox
GOOGLE_SITEMAP_URL = "https://careers.google.com/jobs/sitemap"
def get_xml():
browser = load_firefox()
browser.get(GOOGLE_SITEMAP_URL)
jobs_data = xmltodict.parse(browser.page_source)
browser.quit()
return jobs_data
def get_jobs():
jobs_data = get_xml()
jobs = jobs_data['urlset']['url']
return jobs
def get_internships(jobs):
internships = []
for job in jobs:
url = job['loc']
mid = '-intern-' in url
end = '-intern/' in url
if mid or end:
internships.append(job)
return internships
| 21 | 62 | 0.646032 | 82 | 630 | 4.780488 | 0.426829 | 0.081633 | 0.081633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.246032 | 630 | 29 | 63 | 21.724138 | 0.825263 | 0 | 0 | 0 | 0 | 0 | 0.106349 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8deb27f0761e2aaaa60c41fe1b1bca80aaa1cfe7 | 9,162 | py | Python | app/recipe/tests/test_recipe_api.py | pouryazk/recipe-app-api | 8845cb2c9a1ddd52f0f2fbf6aeb1f6d998b878ea | [
"MIT"
] | null | null | null | app/recipe/tests/test_recipe_api.py | pouryazk/recipe-app-api | 8845cb2c9a1ddd52f0f2fbf6aeb1f6d998b878ea | [
"MIT"
] | null | null | null | app/recipe/tests/test_recipe_api.py | pouryazk/recipe-app-api | 8845cb2c9a1ddd52f0f2fbf6aeb1f6d998b878ea | [
"MIT"
] | null | null | null | # Testing Upload image feature
import tempfile
import os
from PIL import Image
# ----------------
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework import test
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list') # generated by viewsets
# /api/recipe/recipes
# /api/recipe/recipes/1/
def detail_url(recipe_id):
""" return recipe detail url """
# this is the way reverse function works
return reverse('recipe:recipe-detail', args=[recipe_id])
# helper function for creating sample recipes, tag, ingredient
def sample_ingredient(user, name='Cinnamon'):
""" create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_tag(user, name='MAIN COURSE'):
""" create and Return a simple tag """
return Tag.objects.create(user=user, name=name)
def sample_recipe(user, **params):
""" create and return a sample recipe """
defaults = {
'title':'sample recipe',
'time_minutes':10,
'price': 5.00,
}
defaults.update(params) # default built-in function of python for dics
return Recipe.objects.create(user=user, **defaults)
def image_upload_url(recipe_id):
""" Return Url for recipe image upload """
return reverse('recipe:recipe-upload-image', args=[recipe_id])
class PublicRecipeApiTests(TestCase):
""" Tests unauthenticated recipe api access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
""" tests that authentication is required """
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTest(TestCase):
""" Tests Authenticated Api Access """
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@eniac.com',
'testpass',
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
""" test retrieving a list of recipes """
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
""" test retrieving recipes for user """
user2 = get_user_model().objects.create_user(
'test2@eniac.com',
'testpass2',
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(
user=self.user
)
serializer = RecipeSerializer(recipes, many=True) # because its a list
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
""" tests viewing a recipe detail """
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe) # it is a single object
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
""" test creating recipe"""
payload = {
'title': 'chocolate cheesecake',
'time_minutes':30,
'price':5.00,
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
# get_attr is a built_in python function
def test_create_recipe_with_tags(self):
""" test creating a recipe with tags """
tag1 = sample_tag(user=self.user, name='Vegen')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'avocado lime cheese cake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 20.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all() # return all tags as a queryset
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
# use assertIn for checking list or checking querysets
def test_create_recipe_with_ingredients(self):
ingredient1 = sample_ingredient(user=self.user, name="Prawns")
ingredient2 = sample_ingredient(user=self.user, name="Jinja")
payload = {
'title': "thai prawn red curry",
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@eniac.com',
'testpass',
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self): # trigerred after test completes
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
""" test uploading image to recipe """
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10,10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
# multipart is for posting Data instead of json format
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
""" test uploading an invalid image """
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'Not Image'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
""" Tests Returning recipes with specific tags """
recipe1 = sample_recipe(user=self.user, title='Thai vegetable curry')
recipe2 = sample_recipe(user=self.user, title='aubergine with tahini')
tag1 = sample_tag(user=self.user, name='vegan')
tag2 = sample_tag(user=self.user, name='vegeterian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Fish and Chips')
res = self.client.get(
RECIPES_URL,
{'tags': f'{tag1.id}, {tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
""" Tests returning recipes with specific ingredients """
recipe1 = sample_recipe(user=self.user, title='posh bin on toasts')
recipe2 = sample_recipe(user=self.user, title='Chicken cacciatore')
ingredient1 = sample_ingredient(user=self.user, name='Feta Cheese')
ingredient2 = sample_ingredient(user=self.user, name='Chicken')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Steak and mushroom')
res = self.client.get(
RECIPES_URL,
{'ingredients': f'{ingredient1.id}, {ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 35.238462 | 79 | 0.649967 | 1,092 | 9,162 | 5.330586 | 0.211538 | 0.035733 | 0.045353 | 0.037794 | 0.48033 | 0.427074 | 0.377083 | 0.26748 | 0.226422 | 0.226422 | 0 | 0.015703 | 0.235429 | 9,162 | 259 | 80 | 35.374517 | 0.815275 | 0.119625 | 0 | 0.337079 | 0 | 0 | 0.078878 | 0.003271 | 0 | 0 | 0 | 0 | 0.151685 | 1 | 0.11236 | false | 0.016854 | 0.061798 | 0 | 0.219101 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dee03510c0ac2b9c5008f158836e0b55eedc40e | 1,666 | py | Python | src/infoScraper.py | Strassboom/BOMGenerator | 1531d825aa4133c1e58eccfd3c36f76fa596536c | [
"MIT"
] | null | null | null | src/infoScraper.py | Strassboom/BOMGenerator | 1531d825aa4133c1e58eccfd3c36f76fa596536c | [
"MIT"
] | null | null | null | src/infoScraper.py | Strassboom/BOMGenerator | 1531d825aa4133c1e58eccfd3c36f76fa596536c | [
"MIT"
] | null | null | null | import requests
from lxml import html
import os
import sys
sys.path.append("..")
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36"}
def adafruitScrape(link):
link = link.strip()
response = requests.get(link,headers=header)
root = html.fromstring(response.content)
name = root.xpath('//h1[@class="products_name"]/text()')[0].strip()
price = root.xpath('//div[@class="product-price"]/span/text()')[0].strip("$")
info = {"Name":name,"Price":price,"URL":link}
info = [name,price,link]
return info
def amazonScrape(link):
link = link.strip()
response = requests.get(link,headers=header)
root = html.fromstring(response.content)
name = root.xpath('//span[@id="productTitle"]/text()')[0].strip()
leftDecPrice = root.xpath('//span[@class="price-large"]/text()')[0].strip()
rightDecPrice = root.xpath('//span[@class="a-size-small price-info-superscript"]/text()')[1].strip()
price = "{}.{}".format(leftDecPrice,rightDecPrice)
info = {"Name":name,"Price":price,"URL":link}
info = [name,price,link]
return info
def scrapePartData(srcFile):
with open(srcFile,"r") as r:
linkList = r.readlines()
for item in range(len(linkList)):
if "https://www.amazon.com/" in linkList[item]:
linkList[item] = amazonScrape(linkList[item])
elif "https://www.adafruit.com/" in linkList[item]:
linkList[item] = adafruitScrape(linkList[item])
return linkList
def defaultFile():
filelist = ["data/"+item for item in os.listdir("data")]
return filelist[0] | 37.863636 | 141 | 0.647659 | 217 | 1,666 | 4.967742 | 0.414747 | 0.06679 | 0.037106 | 0.03154 | 0.346939 | 0.346939 | 0.293135 | 0.293135 | 0.293135 | 0.293135 | 0 | 0.025881 | 0.165066 | 1,666 | 44 | 142 | 37.863636 | 0.749101 | 0 | 0 | 0.315789 | 0 | 0.026316 | 0.25075 | 0.121176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8def1994e3ba6e7b2d4593ea911ee13f3f4389a8 | 5,288 | py | Python | 2020/python/day-06.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | 1 | 2021-12-04T18:09:44.000Z | 2021-12-04T18:09:44.000Z | 2020/python/day-06.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | 2020/python/day-06.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | import pdb
import subprocess
from functools import partial, wraps
from math import prod
from pathlib import Path
from pprint import pprint
from string import (
ascii_lowercase,
digits as ascii_digits,
)
from typing import Any, Callable, List, Iterable, Optional, Union
from toolz import ( # type: ignore
compose_left,
concat,
curry,
do,
excepts,
keyfilter,
pluck,
pipe,
unique,
)
IterableS = Iterable[str]
hexc = ["a", "b", "c", "d", "e", "f"] + list(ascii_digits)
def toolz_pick(keep: IterableS, d: dict) -> dict:
return keyfilter(lambda x: x in keep, d)
def toolz_omit(remove: IterableS, d: dict) -> dict:
return keyfilter(lambda x: x not in remove, d)
def pick(keep: IterableS, d: dict) -> dict:
return {k: d[k] for k in d if k in keep}
def omit(remove: IterableS, d: dict) -> dict:
return {k: d[k] for k in d if k not in remove}
def add_debug(debug_f: Callable, orig_f: Callable) -> Callable:
"""
Transforms the function such that output is passed
to the debug function before being returned as normal.
add_debug(print, str.upper) would return a function equivalent to:
def fn(val: str): -> str
result = str.upper(val)
print(result)
return result
"""
do_f = partial(do, debug_f)
return compose_left(orig_f, do_f)
def add_debug_list(debug_f: Callable, funcs: List[Callable]) -> List[Callable]:
"""
Transforms each of the functions such that the output of each is passed
to the debug function before being returned as normal.
"""
return [add_debug(debug_f, f) for f in funcs]
def run_process(
command: Union[list, str], options: Optional[dict] = None
) -> subprocess.CompletedProcess:
base_opts = {"check": True, "text": True, "capture_output": True}
opts = options if options else {}
# pylint: disable=subprocess-run-check
# return subprocess.run(command, **{**base_opts, **opts}) # type: ignore
return subprocess.run(command, **(base_opts | opts)) # type: ignore
def until_stable(func: Callable) -> Callable:
"""
Repeatedly call the same function on its arguments until the result doesn't
change.
Not sure how to make this work in variadic cases; comparing a single result
to *args doesn't seem to work.
"""
def inner(arg: Any, **kwds: Any) -> Any:
if func(arg, **kwds) == arg:
return arg
return inner(func(arg, **kwds))
return inner
def oxford(lst: List[str]) -> str:
"""
Turns a list into a properly-formatted list phrase.
``["something"]`` becomes "something".
``["thing1", "thing2"]`` becomes "thing1 and thing2".
``["thing1", "thing2", "thing3"]`` becomes "thing1, thing2, and thing3".
``["a", "b", "c", "d"]`` becomes "a, b, c, and d".
"""
if len(lst) <= 2:
return " and ".join(lst)
return f'{", ".join(lst[:-1])}, and {lst[-1]}'
def excepts_wrap(err: Any, err_func: Callable) -> Callable:
"""
This basically means that::
@excepts_wrap(ValueError, lambda _: None)
def get_formatted_time(fmt: str, value: str) -> Optional[datetime]:
return datetime.strptime(value.strip(), fmt)
gft = get_formatted_time
With the decorator, that's broadly equivalent to this without
any decorator::
gft = excepts(
ValueError,
get_formatted_time,
lambda _: None
)
"""
def inner_excepts_wrap(fn: Callable) -> Callable:
return excepts(err, fn, err_func)
return inner_excepts_wrap
lfilter = compose_left(filter, list) # lambda f, l: [*filter(f, l)]
lmap = compose_left(map, list) # lambda f, l: [*map(f, l)]
lpluck = compose_left(pluck, list) # lambda k, l: [*pluck(f, l)]
c_map = curry(map)
c_lmap = curry(lmap)
is_char_az = partial(lambda y, x: x in y, ascii_lowercase)
is_char_hex = partial(lambda y, x: x in y, hexc)
is_char_az09 = partial(lambda y, x: x in y, ascii_lowercase + ascii_digits)
filter_str = partial(lambda f, s: "".join(filter(f, s)))
filter_az = partial(filter_str, is_char_az)
filter_az09 = partial(filter_str, is_char_az09)
filter_hex = partial(filter_str, is_char_hex)
add_pprint = partial(add_debug, pprint)
add_pprinting = partial(lmap, add_pprint)
lcompact = partial(lfilter, None)
def group_to_unique(group):
string = "".join(group)
return list(unique(string))
def group_to_unan(group):
group = lcompact(group)
total = list(concat(group))
return sum([1 for i in unique(total) if total.count(i) == len(group)])
unan = 0
for c in unique(total):
if total.count(c) == len(group):
unan = unan + 1
return unan
def unanimous(group):
pass
def process(text):
groups = lcompact(_.split("\n") for _ in text.split("\n\n"))
ugr = lmap(group_to_unique, groups)
count = sum([len(_) for _ in ugr])
unangr = lmap(group_to_unan, groups)
pdb.set_trace()
return
if __name__ == "__main__":
# test = Path("test-input-00.txt").read_text().strip()
# test_answer = whatever
# assert process(test, params) == test_answer
raw = Path("input-06.txt").read_text()
raw = raw.strip() # comment this out if trailing stuff is important!
result = process(raw)
| 27.398964 | 79 | 0.644667 | 757 | 5,288 | 4.373844 | 0.277411 | 0.010873 | 0.016913 | 0.021746 | 0.179704 | 0.15977 | 0.144669 | 0.128058 | 0.128058 | 0.054364 | 0 | 0.006359 | 0.22674 | 5,288 | 192 | 80 | 27.541667 | 0.803375 | 0.310893 | 0 | 0 | 0 | 0 | 0.027778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.164948 | false | 0.010309 | 0.092784 | 0.051546 | 0.443299 | 0.030928 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8defd7400006559bffbbff0680b35455d86ade71 | 2,665 | py | Python | Xcode/XcodeVersionEmitter.py | davidschoefberger/Recipes-for-AutoPkg | f68d9a5da4b1778886ec67ed76a591ada00fbfd1 | [
"Apache-2.0"
] | 76 | 2015-08-31T18:34:14.000Z | 2021-10-16T13:33:52.000Z | Xcode/XcodeVersionEmitter.py | davidschoefberger/Recipes-for-AutoPkg | f68d9a5da4b1778886ec67ed76a591ada00fbfd1 | [
"Apache-2.0"
] | 41 | 2015-09-02T01:52:55.000Z | 2022-02-28T21:50:31.000Z | Xcode/XcodeVersionEmitter.py | davidschoefberger/Recipes-for-AutoPkg | f68d9a5da4b1778886ec67ed76a591ada00fbfd1 | [
"Apache-2.0"
] | 43 | 2015-09-02T14:41:37.000Z | 2021-12-01T16:52:08.000Z | #!/usr/bin/python
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Get all Version information from Xcode."""
import os.path
from autopkglib import Processor
try:
# python 2
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
__all__ = ["XcodeVersionEmitter"]
class XcodeVersionEmitter(Processor):
"""Output a version number based on the URL. Skipped by default."""
description = __doc__
input_variables = {
"dont_skip": {
"required": False,
"default": False,
"description": ("If this evaluates as truthy, do not skip this step."),
},
"url": {"required": True, "description": ("URL to parse the version from.")},
"output_filepath": {
"required": True,
"description": ("Path to which xcode version tag is emitted."),
},
}
output_variables = {
"derived_filename": {"description": "The derived filename to emit."}
}
__doc__ = description
def main(self):
"""Main."""
if not self.env["dont_skip"]:
self.output("dont_skip is false, so skipping this Processor.")
return
url = self.env["url"]
url_split_object = urlsplit(url)
# "https://download.developer.apple.com/Developer_Tools/Xcode_10.2.1/Xcode_10.2.1.xip" # noqa
# "https://developer.apple.com//services-account/download?path=/Developer_Tools/Xcode_11_Beta_2/Xcode_11_Beta_2.xip" # noqa
filename = os.path.splitext(os.path.basename(url_split_object.path))[0].lower()
self.output("Derived filename: {}".format(filename))
self.env["derived_filename"] = filename
destination = os.path.expandvars(self.env["output_filepath"])
with open(destination, "w") as f:
f.write(filename)
self.output(
"Derived filename ({}) written to disk at {}".format(
filename, destination
)
)
if __name__ == "__main__":
PROCESSOR = XcodeVersionEmitter()
PROCESSOR.execute_shell()
| 32.5 | 132 | 0.641651 | 322 | 2,665 | 5.173913 | 0.465839 | 0.036014 | 0.015606 | 0.019208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009935 | 0.244653 | 2,665 | 81 | 133 | 32.901235 | 0.817685 | 0.34409 | 0 | 0 | 0 | 0 | 0.264019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.111111 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df00c7b6abbb17febb26deca50443ccd300a865 | 4,763 | py | Python | note_recognition.py | jemsbhai/battlenotes | 71e2c44aec71505bce204757a10f9ddfb5e5e02e | [
"MIT"
] | null | null | null | note_recognition.py | jemsbhai/battlenotes | 71e2c44aec71505bce204757a10f9ddfb5e5e02e | [
"MIT"
] | null | null | null | note_recognition.py | jemsbhai/battlenotes | 71e2c44aec71505bce204757a10f9ddfb5e5e02e | [
"MIT"
] | null | null | null | # taken largely from https://github.com/ianvonseggern1/note-prediction
from pydub import AudioSegment
import pydub.scipy_effects
import numpy as np
import scipy
import matplotlib.pyplot as plt
from solo_generation_esac import *
from utils import frequency_spectrum, \
calculate_distance, \
classify_note_attempt_1, \
classify_note_attempt_2, \
classify_note_attempt_3
def main(file, note_arr=None, plot_starts=False, plot_fft_indices=[]):
actual_notes = []
if note_arr:
actual_notes = note_arr
song = AudioSegment.from_file(file)
#song = song.high_pass_filter(80, order=4)
starts = predict_note_starts(song, plot_starts)
predicted_notes = predict_notes(song, starts, plot_fft_indices)
print("")
if actual_notes:
print("Actual Notes")
print(actual_notes)
print("Predicted Notes")
print(predicted_notes)
if actual_notes:
lev_distance = calculate_distance(predicted_notes, actual_notes)
score = abs(len(actual_notes) - lev_distance)/len(actual_notes)
print("Levenshtein distance: {}/{}".format(lev_distance, len(actual_notes)))
return score
# Very simple implementation, just requires a minimum volume and looks for left edges by
# comparing with the prior sample, also requires a minimum distance between starts
# Future improvements could include smoothing and/or comparing multiple samples
#
# song: pydub.AudioSegment
# plot: bool, whether to show a plot of start times
# actual_starts: []float, time into song of each actual note start (seconds)
#
# Returns perdicted starts in ms
def predict_note_starts(song, plot):
# Size of segments to break song into for volume calculations
SEGMENT_MS = 50
# Minimum volume necessary to be considered a note
VOLUME_THRESHOLD = -27.8
# The increase from one sample to the next required to be considered a note
EDGE_THRESHOLD = 0.09
# Throw out any additional notes found in this window
MIN_MS_BETWEEN = 100
# Filter out lower frequencies to reduce noise
#song = song.high_pass_filter(80, order=4)
# dBFS is decibels relative to the maximum possible loudness
volume = [segment.dBFS for segment in song[::SEGMENT_MS]]
predicted_starts = []
for i in range(1, len(volume)):
if volume[i] > VOLUME_THRESHOLD and volume[i] - volume[i - 1] > EDGE_THRESHOLD:
ms = i * SEGMENT_MS
# Ignore any too close together
if len(predicted_starts) == 0 or ms - predicted_starts[-1] >= MIN_MS_BETWEEN:
predicted_starts.append(ms)
#predicted_starts.append(ms)
#for i in range(len(predicted_starts)-2):
# if predicted_starts[i+1] - predicted_starts[i] <= MIN_MS_BETWEEN:
# predicted_starts.remove(predicted_starts[i])
# Plot the volume over time (sec)
if plot:
x_axis = np.arange(len(volume)) * (SEGMENT_MS / 1000)
plt.plot(x_axis, volume)
# Add vertical lines for predicted note starts and actual note starts
for ms in predicted_starts:
plt.axvline(x=(ms / 1000), color="g", linewidth=0.5, linestyle=":")
plt.show()
return predicted_starts
def predict_notes(song, starts, plot_fft_indices):
predicted_notes = []
for i, start in enumerate(starts):
sample_from = start + 50
sample_to = start + 200
if i < len(starts) - 1:
sample_to = min(starts[i + 1], sample_to)
segment = song[sample_from:sample_to]
freqs, freq_magnitudes = frequency_spectrum(segment)
predicted = classify_note_attempt_2(freqs, freq_magnitudes)
predicted_notes.append(predicted or "U")
# Print general info
print("")
print("Note: {}".format(i))
print("Predicted start: {}".format(start))
length = sample_to - sample_from
print("Sampled from {} to {} ({} ms)".format(sample_from, sample_to, length))
print("Frequency sample period: {}hz".format(freqs[1]))
# Print peak info
peak_indicies, props = scipy.signal.find_peaks(freq_magnitudes, height=0.015)
print("Peaks of more than 1.5 percent of total frequency contribution:")
for j, peak in enumerate(peak_indicies):
freq = freqs[peak]
magnitude = props["peak_heights"][j]
print("{:.1f}hz with magnitude {:.3f}".format(freq, magnitude))
if i in plot_fft_indices:
plt.plot(freqs, freq_magnitudes, "b")
plt.xlabel("Freq (Hz)")
plt.ylabel("|X(freq)|")
plt.show()
return predicted_notes
if __name__ == "__main__":
main("untitled.wav", note_arr=["C", "D", "E", "F", "G", "A"], plot_starts=True)
| 35.81203 | 89 | 0.664707 | 635 | 4,763 | 4.8 | 0.332283 | 0.059055 | 0.024934 | 0.013123 | 0.122047 | 0.059055 | 0.043307 | 0.019685 | 0 | 0 | 0 | 0.014856 | 0.236826 | 4,763 | 133 | 90 | 35.81203 | 0.823659 | 0.266429 | 0 | 0.075949 | 0 | 0 | 0.084223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037975 | false | 0 | 0.088608 | 0 | 0.164557 | 0.164557 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df01847a7b4b8bed8f795d24425994dc853d3c0 | 2,720 | py | Python | tests/unit/models/gpflux/test_config.py | satrialoka/trieste | b58eb924a49ad86e27fa2e082defe2d37afcc14a | [
"Apache-2.0"
] | 119 | 2020-10-06T16:27:05.000Z | 2022-03-28T00:27:18.000Z | tests/unit/models/gpflux/test_config.py | satrialoka/trieste | b58eb924a49ad86e27fa2e082defe2d37afcc14a | [
"Apache-2.0"
] | 275 | 2020-10-07T22:32:53.000Z | 2022-03-31T15:57:44.000Z | tests/unit/models/gpflux/test_config.py | satrialoka/trieste | b58eb924a49ad86e27fa2e082defe2d37afcc14a | [
"Apache-2.0"
] | 30 | 2020-10-08T23:00:01.000Z | 2022-02-25T17:04:22.000Z | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Dict
import gpflow
import numpy as np
import pytest
import tensorflow as tf
from gpflow.models import GPMC
from gpflux.models import DeepGP
from tests.util.models.gpflux.models import two_layer_dgp_model
from tests.util.models.models import fnc_3x_plus_10
from trieste.models import TrainableProbabilisticModel
from trieste.models.gpflux import DeepGaussianProcess, GPfluxModelConfig
def test_gpflux_model_config_raises_not_supported_model_type() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
model_specs = {"model": GPMC((x, y), gpflow.kernels.Matern32(), gpflow.likelihoods.Gaussian())}
with pytest.raises(NotImplementedError):
GPfluxModelConfig(**model_specs)
def test_gpflux_model_config_has_correct_supported_models() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model_specs = {"model": two_layer_dgp_model(x)}
model_config = GPfluxModelConfig(**model_specs)
models_mapping: Dict[
Any, Callable[[Any, tf.optimizers.Optimizer], TrainableProbabilisticModel]
] = {
DeepGP: DeepGaussianProcess,
}
assert model_config.supported_models() == models_mapping
def test_gpflux_model_config_has_correct_default_optimizer() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model_specs = {"model": two_layer_dgp_model(x)}
model_config = GPfluxModelConfig(**model_specs)
default_optimizer = tf.optimizers.Adam
assert isinstance(model_config.optimizer, default_optimizer)
def test_gpflux_model_config_allows_changing_default_optimizer() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model_specs = {
"model": two_layer_dgp_model(x),
"optimizer": tf.optimizers.RMSprop(),
}
model_config = GPfluxModelConfig(**model_specs)
expected_optimizer = tf.optimizers.RMSprop
assert isinstance(model_config.optimizer, expected_optimizer)
| 34 | 99 | 0.758088 | 364 | 2,720 | 5.456044 | 0.357143 | 0.055388 | 0.022155 | 0.032226 | 0.329305 | 0.249748 | 0.249748 | 0.215509 | 0.215509 | 0.215509 | 0 | 0.012079 | 0.147794 | 2,720 | 79 | 100 | 34.43038 | 0.844694 | 0.20625 | 0 | 0.204545 | 0 | 0 | 0.01352 | 0 | 0 | 0 | 0 | 0 | 0.068182 | 1 | 0.090909 | false | 0 | 0.295455 | 0 | 0.386364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df1c6f7606169d28d7470f55794f4e8e7d28814 | 493 | py | Python | examples/server_control.py | tkmmark/compas_cloud | e05fe12923868ba37de215a5a4cc3f8638213ae7 | [
"MIT"
] | 4 | 2020-06-19T20:51:36.000Z | 2020-07-05T17:12:56.000Z | examples/server_control.py | tkmmark/compas_cloud | e05fe12923868ba37de215a5a4cc3f8638213ae7 | [
"MIT"
] | 10 | 2020-04-24T09:05:52.000Z | 2020-12-17T14:00:04.000Z | examples/server_control.py | tkmmark/compas_cloud | e05fe12923868ba37de215a5a4cc3f8638213ae7 | [
"MIT"
] | 2 | 2020-10-22T18:12:52.000Z | 2020-10-28T16:25:07.000Z | from compas_cloud import Proxy
import time
print("\n starting a new Proxy and by default starts a server in background")
proxy = Proxy(background=True)
time.sleep(3)
print("\n restarting the background server and open a new one in a prompt console")
proxy.background = False
proxy.restart()
time.sleep(3)
print("\n check if the proxy is healthily connected to server")
print(proxy.check())
time.sleep(3)
print("\n shut the the server and quite the program")
proxy.shutdown()
time.sleep(3) | 24.65 | 83 | 0.760649 | 83 | 493 | 4.506024 | 0.46988 | 0.064171 | 0.106952 | 0.120321 | 0.128342 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009434 | 0.139959 | 493 | 20 | 84 | 24.65 | 0.872642 | 0 | 0 | 0.266667 | 0 | 0 | 0.48583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df1ebaaebb6455a233f57c92e803cc41ed159be | 3,871 | py | Python | supermarket management.py | shreytec11/Simple_Supermarket_Maanagement | 1579adf7c35e43cd5b539edddf6044c1a7e6602a | [
"MIT"
] | null | null | null | supermarket management.py | shreytec11/Simple_Supermarket_Maanagement | 1579adf7c35e43cd5b539edddf6044c1a7e6602a | [
"MIT"
] | null | null | null | supermarket management.py | shreytec11/Simple_Supermarket_Maanagement | 1579adf7c35e43cd5b539edddf6044c1a7e6602a | [
"MIT"
] | null | null | null | #-----------------SUPERMARKET MANAGEMENT SYSTEM--------------------
items = []
while True:
print('------------------Welcome to the supermarket------------------')
print('1. View items\n2. Add items for sale\n3. Purchase items\n4. Search items \n5. Edit items\n6. Exit')
choice = int(input('Enter the number of your choice : '))
if choice == 1 :
print('------------------View Items------------------')
print('The number of items in the inventory are : %d ' %len(items))
if len(items) != 0:
print('Here are all the items available in the supermarket.')
for item in items:
for key, value in item.items():
print("%s : %s " %(key, value))
elif choice == 2 :
print('------------------Add items------------------')
print('To add an item fill in the form')
item = {}
item['name'] = input('Item name : ')
while True:
try:
item['quantity'] = int(input('Item quantity : '))
break
except ValueError:
print('Quantity should only be in digits')
while True:
try:
item['price'] = int(input('Price $ : '))
break
except ValueError:
print('Price should only be in digits')
print('Item has been successfully added.')
items.append(item)
elif choice == 3 :
print('------------------purchase items------------------')
print(items)
purchase_item = input('which item do you want to purchase? Enter name : ')
purchase_quantity= int(input('Enter the quantity wanted : '))
for item in items:
if purchase_item.lower() == item['name'].lower() :
if item['quantity'] != 0 :
if purchase_quantity <= item['quantity']:
print('Pay %d at checkout counter.' %(item['price']* purchase_quantity))
item['quantity'] -= purchase_quantity
else:
print("Quantity required is not available")
else:
print('item out of stock.')
elif choice == 4 :
print('------------------search items------------------')
find_item = input('Enter the items name to search in inventory : ')
for item in items:
if item['name'].lower() == find_item.lower():
print('The item named ' + find_item + ' is displayed below with its details')
print(item)
else:
print('item not found.')
elif choice == 5 :
print('------------------edit items------------------')
item_name = input('Enter the name of the item that you want to edit : ')
for item in items:
if item_name.lower() == item['name'].lower():
print('Here are the current details of ' + item_name)
print(item)
item['name'] = input('Item name : ')
while True:
try:
item['quantity'] = int(input('Item quantity : '))
break
except ValueError:
print('Quantity should only be in digits')
while True:
try:
item['price'] = int(input('Price $ : '))
break
except ValueError:
print('Price should only be in digits')
print('Item has been successfully updated.')
print(item)
else:
print('Item not found')
elif choice == 6 :
print('------------------exited------------------')
break
else:
print('You entered an invalid option')
| 40.322917 | 110 | 0.453888 | 386 | 3,871 | 4.520725 | 0.261658 | 0.045845 | 0.029799 | 0.032092 | 0.358739 | 0.34957 | 0.34957 | 0.34957 | 0.316332 | 0.270487 | 0 | 0.005773 | 0.373547 | 3,871 | 95 | 111 | 40.747368 | 0.713814 | 0.01705 | 0 | 0.470588 | 0 | 0.011765 | 0.353838 | 0.085699 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.352941 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df2385af3f40cba7575f1c6c89a8a66c7739db0 | 23,993 | py | Python | vcm/downloader/link.py | sralloza/vcm | 9906bc5ad286754a1e71f69f0f7ca43ad1152a6f | [
"MIT"
] | 1 | 2020-06-18T21:11:57.000Z | 2020-06-18T21:11:57.000Z | vcm/downloader/link.py | sralloza/vcm | 9906bc5ad286754a1e71f69f0f7ca43ad1152a6f | [
"MIT"
] | 87 | 2019-09-30T08:28:00.000Z | 2020-12-03T08:39:57.000Z | vcm/downloader/link.py | sralloza/vcm | 9906bc5ad286754a1e71f69f0f7ca43ad1152a6f | [
"MIT"
] | null | null | null | """Contains the links that can be downloaded."""
from hashlib import sha1
import logging
import os
from pathlib import Path
import re
import warnings
from bs4 import BeautifulSoup
from requests import Response
import unidecode
from vcm.core.exceptions import AlgorithmFailureError, MoodleError, ResponseError
from vcm.core.modules import Modules
from vcm.core.networking import Connection
from vcm.core.results import Results
from vcm.core.utils import Patterns, save_crash_context, secure_filename
from vcm.settings import settings
from .alias import Alias
from .filecache import REAL_FILE_CACHE
class _Notify:
NOTIFY = False
@property
def notify(self):
return self.NOTIFY
class BaseLink(_Notify):
"""Base class for Links."""
def __init__(self, name, section, url, icon_url, subject, parent=None):
"""
Args:
name (str): name of the url.
url (str): URL of the url.
icon_url (str or None): URL of the icon.
subject (vcm.subject.Subject): subject of the url.
parent (BaseLink): object that created self.
"""
self.name = name.strip()
self.section = section
self.url = url
self.icon_url = icon_url
self.subject = subject
self.connection = Connection()
self.parent = parent
self.response: Response = None
self.soup: BeautifulSoup = None
self.filepath: Path = None
self.redirect_url = None
self.response_name = None
self.subfolders = []
self.logger = logging.getLogger(__name__)
self.logger.debug(
"Created %s(name=%r, url=%r, subject=%r)",
self.__class__.__name__,
self.name,
self.url,
self.subject.name,
)
@property
def content_disposition(self):
if self.response is None:
raise RuntimeError("Response not made yet")
return unidecode.unidecode(self.response.headers["Content-Disposition"])
def append_subfolder(self, dirname):
dirname = secure_filename(dirname)
return self.subfolders.append(dirname)
def insert_subfolder(self, index, dirname):
dirname = secure_filename(dirname)
return self.subfolders.insert(index, dirname)
def create_subfolder(self):
"""Creates the subfolder, if it is configured."""
self.create_subject_folder()
if not self.filepath:
self.autoset_filepath()
folder: Path = self.filepath.parent
if not folder.exists():
os.makedirs(folder.as_posix(), exist_ok=True)
self.logger.debug("Created subfolder %r", folder.as_posix())
else:
self.logger.debug("Subfolder already exists %r", folder.as_posix())
@staticmethod
def _process_filename(filepath: str):
"""Quits some characters from the filename that can not be in a filepath.
Args:
filepath (st): filepath to process.
Returns:
str: filepath processed.
"""
filepath = filepath.replace(">", " mayor que ")
filepath = filepath.replace("<", " menor que ")
return filepath
@staticmethod
def _filename_to_ext(filename):
"""Returns the extension given a filename."""
return Path(filename).suffix[1:]
def _get_ext_from_response(self):
"""Returns the extension of the filename of the response, got from the Content-Dispotition
HTTP header.
Returns:
str: the extension.
"""
if self.response_name is not None:
return self._filename_to_ext(self.response_name)
try:
# unidecode.unidecode is used to remove accents.
self.response_name = Patterns.FILENAME.search(
self.content_disposition
).group(1)
extension = self._filename_to_ext(self.response_name)
if extension:
return extension
except KeyError:
pass
self.response_name = Path(self.url).name
extension = self._filename_to_ext(self.response_name)
if extension:
return extension
return self.content_type.split("/")[-1]
def create_subject_folder(self):
"""Creates the subject's principal folder."""
return self.subject.create_folder()
def make_request(self):
"""Makes the request for the Link."""
self.logger.debug("Making request")
self.response = self.connection.get(self.redirect_url or self.url)
self.logger.debug(
"Response obtained [%d | %s]", self.response.status_code, self.content_type
)
if 500 <= self.response.status_code <= 599:
raise MoodleError(f"Moodle server replied with {self.response.status_code}")
if self.response.status_code == 408:
self.logger.warning("Received response with code 408, retrying")
return self.make_request()
if not self.response.ok:
raise ResponseError(f"Got HTTP {self.response.status_code}")
def close_connection(self):
warnings.warn(
"Since streams are not used, this method should not be called",
DeprecationWarning,
)
self.logger.debug("Closing connection")
self.response.close()
def process_request_bs4(self):
"""Parses the response with BeautifulSoup with the html parser."""
self.logger.debug("Parsing response (bs4)")
self.soup = BeautifulSoup(self.response.text, "html.parser")
self.logger.debug("Response parsed (bs4)")
def autoset_filepath(self):
"""Determines the filepath of the Link."""
if self.filepath is not None:
self.logger.debug("Filepath is setted, skipping (%s)", self.filepath)
return
if self.response is None:
raise RuntimeError("Request not launched")
filename = secure_filename(
self._process_filename(self.name) + "." + self._get_ext_from_response()
)
self.logger.debug("Initial filename: %s", filename)
temp_filepath = self.subject.folder
if self.subfolders:
temp_filepath.joinpath(*self.subfolders)
if self.section:
temp_filepath /= self.section.name
temp_filepath /= filename
try:
folder_id = self.id
except AttributeError:
folder_id = None
self.filepath = Path(
Alias.id_to_alias(
sha1(self.url.encode()).hexdigest(), temp_filepath.as_posix(), folder_id
)
)
self.logger.debug("Set filepath: %r", self.filepath.as_posix())
def download(self):
"""Wrapper for self.do_download()."""
try:
self.do_download()
finally:
self.response = None
self.soup = None
def do_download(self):
"""Abstract method to download the Link. Must be overridden by subclasses."""
self.logger.debug("Called do_download() but it was not implemented")
raise NotImplementedError
def get_header_length(self):
try:
return int(self.response.headers["Content-Length"])
except KeyError:
return len(self.response.content)
@property
def content_type(self):
if "Content-Type" in self.response.headers:
return self.response.headers["Content-Type"]
return None
def save_response_content(self):
"""Saves the response content to the disk."""
if self.filepath is None:
self.autoset_filepath()
if Modules.current() == Modules.notify:
return
self.create_subfolder()
self.logger.debug(
"filepath in REAL_FILE_CACHE: %s", self.filepath in REAL_FILE_CACHE
)
if self.filepath in REAL_FILE_CACHE:
if REAL_FILE_CACHE[self.filepath] == self.get_header_length():
self.logger.debug(
"File found in cache: Same content (%d)", len(self.response.content)
)
return
self.logger.debug(
"File found in cache: Different content (%d --> %d)",
REAL_FILE_CACHE[self.filepath],
len(self.response.content),
)
Results.print_updated(self.filepath)
else:
self.logger.debug(
"File added to cache: %s [%d]",
self.filepath,
len(self.response.content),
)
REAL_FILE_CACHE[self.filepath] = len(self.response.content)
Results.print_new(self.filepath)
try:
with self.filepath.open("wb") as file_handler:
file_handler.write(self.response.content)
self.logger.debug("File downloaded and saved: %s", self.filepath)
except PermissionError:
self.logger.warning(
"File couldn't be downloaded due to permission error: %s",
self.filepath.name,
)
self.logger.warning(
"Permission error %s -- %s", self.subject.name, self.filepath.name
)
@staticmethod
def ensure_origin(url: str) -> bool:
"""Returns True if the origin is the virtual campus."""
return "uva.es" in url
class Resource(BaseLink):
"""Representation of a resource."""
NOTIFY = True
def __init__(self, name, section, url, icon_url, subject, parent=None):
super().__init__(name, section, url, icon_url, subject, parent)
self.resource_type = "unknown"
def set_resource_type(self, new):
"""Sets a new resource type.
Args:
new (str): new resource type.
"""
self.logger.debug("Set resource type: %r", new)
self.resource_type = new
if self.resource_type == "html":
self.process_request_bs4()
def do_download(self):
"""Downloads the resource."""
self.logger.debug("Downloading resource %r", self.name)
url = self.redirect_url or self.url
if not self.ensure_origin(url):
self.logger.warning(
"Permision denied: URL is outside of campusvirtual.uva.es"
)
return
self.make_request()
if self.response.status_code == 404:
self.logger.error("state code of 404 in url %r [%r]", self.url, self.name)
return None
if "application/pdf" in self.content_type:
self.set_resource_type("pdf")
return self.save_response_content()
if "officedocument.wordprocessingml.document" in self.content_type:
self.set_resource_type("word")
return self.save_response_content()
if (
"officedocument.spreadsheetml.sheet" in self.content_type
or "excel" in self.content_type
):
self.set_resource_type("excel")
return self.save_response_content()
if "officedocument.presentationml.slideshow" in self.content_type:
self.set_resource_type("power-point")
return self.save_response_content()
if "presentationml.presentation" in self.content_type:
self.set_resource_type("power-point")
return self.save_response_content()
if "powerpoint" in self.content_type:
self.set_resource_type("power-point")
return self.save_response_content()
if "msword" in self.content_type:
self.set_resource_type("word")
return self.save_response_content()
if "application/zip" in self.content_type:
self.set_resource_type("zip")
return self.save_response_content()
if "application/g-zip" in self.content_type:
self.set_resource_type("gzip")
return self.save_response_content()
if "application/x-7z-compressed" in self.content_type:
self.set_resource_type("7zip")
return self.save_response_content()
if "x-rar-compressed" in self.content_type:
self.set_resource_type("rar")
return self.save_response_content()
if "text/plain" in self.content_type:
self.set_resource_type("plain")
return self.save_response_content()
if "application/json" in self.content_type:
self.set_resource_type("json")
return self.save_response_content()
if "application/octet-stream" in self.content_type:
self.set_resource_type("octect-stream")
return self.save_response_content()
if "image/jpeg" in self.content_type:
self.set_resource_type("jpeg")
return self.save_response_content()
if "image/png" in self.content_type:
self.set_resource_type("png")
return self.save_response_content()
if "video/mp4" in self.content_type:
self.set_resource_type("mp4")
return self.save_response_content()
if "video/x-ms-wm" in self.content_type:
self.set_resource_type("avi")
return self.save_response_content()
if "text/html" in self.content_type:
self.set_resource_type("html")
self.logger.debug(
"Created forum discussion from forum list: %r, %s",
self.name,
self.url,
)
self.subject.add_link(
Html(
self.name, self.section, self.url, self.icon_url, self.subject, self
)
)
return
if self.response.status_code % 300 < 100:
self.url = self.response.headers["Location"]
self.logger.warning("Redirecting to %r", self.url)
return self.download()
self.logger.error(
"Content not identified: %r (code=%s, header=%r)",
self.url,
self.response.status_code,
self.response.headers,
)
return None
class Folder(BaseLink):
"""Representation of a folder."""
NOTIFY = True
def __init__(self, name, section, url, icon_url, subject, id_, parent=None):
super().__init__(name, section, url, icon_url, subject, parent)
self.id = id_
def make_request(self):
"""Makes the request for the Link."""
self.logger.debug("Making request")
data = {"id": self.id, "sesskey": self.connection.sesskey}
self.response = self.connection.post(self.url, data=data)
self.logger.debug("Response obtained [%d]", self.response.status_code)
def do_download(self):
"""Downloads the folder."""
self.logger.debug("Downloading folder %r", self.name)
self.make_request()
self.save_response_content()
class BaseForum(BaseLink):
"""Representation of a Forum link."""
BASE_DIR = "foros"
def do_download(self):
"""Downloads the resources found in the forum hierarchy."""
raise NotImplementedError
class ForumList(BaseForum):
def do_download(self):
self.logger.debug("Downloading forum list %r", self.name)
self.make_request()
self.process_request_bs4()
themes = self.soup.findAll("td", {"class": "topic starter"})
for theme in themes:
forum = ForumDiscussion(
theme.text,
self.section,
theme.a["href"],
self.icon_url,
self.subject,
self,
)
self.logger.debug(
"Created forum discussion from forum list: %r, %s",
forum.name,
forum.url,
)
self.subject.add_link(forum)
class ForumDiscussion(BaseForum):
# NOTIFY = True
def do_download(self):
self.logger.debug("Downloading forum discussion %r", self.name)
self.make_request()
self.process_request_bs4()
attachments = self.soup.findAll("div", {"class": "attachments"})
images = self.soup.findAll("div", {"class": "attachedimages"})
for attachment in attachments:
try:
resource = Resource(
Path(attachment.text).stem,
self.section,
attachment.a["href"],
attachment.a.img["src"],
self.subject,
self,
)
resource.subfolders = self.subfolders
self.logger.debug(
"Created resource from forum: %r, %s", resource.name, resource.url
)
self.subject.add_link(resource)
except TypeError:
pass
for image_container in images:
real_images = image_container.findAll("img")
for image in real_images:
try:
url = image["href"]
except KeyError:
url = image["src"]
resource = Image(
Path(url).stem, self.section, url, None, self.subject, self
)
resource.subfolders = self.subfolders
self.logger.debug(
"Created resource (image) from forum: %r, %s",
resource.name,
resource.url,
)
self.subject.add_link(resource)
class Delivery(BaseLink):
"""Representation of a delivery link."""
NOTIFY = True
def do_download(self):
"""Downloads the resources found in the delivery."""
self.logger.debug("Downloading delivery %r", self.name)
self.make_request()
self.process_request_bs4()
links = []
containers = self.soup.findAll("a", {"target": "_blank"})
for container in containers:
url = container["href"]
if self.ensure_origin(url):
icon_url = container.parent.img["src"]
valid = True
else:
icon_url = self.icon_url
valid = False
resource = Resource(
Path(container.text).stem,
self.section,
container["href"],
icon_url,
self.subject,
self,
)
resource.subfolders = self.subfolders
self.logger.debug(
"Created resource from delivery: %r, %s", resource.name, resource.url
)
links.append(resource)
names = [link.name for link in links]
dupes = {x for x in names if names.count(x) > 1}
dupes_counters = {x: 1 for x in dupes}
if dupes:
for i, _ in enumerate(links):
if links[i].name in dupes:
name = links[i].name
links[i].name += "_" + str(dupes_counters[name])
dupes_counters[name] += 1
self.logger.debug("Changed name %r -> %r", name, links[i].name)
for link in links:
self.subject.add_link(link)
class BaseUndownloableLink(BaseLink):
"""Represents a link which can not be downloaded."""
def do_download(self):
"""Doens't do anything, because this is an unparseable link."""
class_name = type(self).__name__.lower()
self.logger.debug("Downloading %s %r", class_name, self.name)
self.logger.info("%s links are unparseable.", class_name.title())
class Chat(BaseUndownloableLink):
"""Representation of a chat link."""
NOTIFY = True
class Page(BaseUndownloableLink):
"""Representation of a page link."""
NOTIFY = True
class Url(BaseUndownloableLink):
"""Representation of an url link."""
NOTIFY = True
class Kalvidres(BaseUndownloableLink):
"""Representation of a kalvidres link.
A Kalvidres is some kind of video, but it can't be downloaded yet due to lack of I+D.
"""
NOTIFY = True
class Quiz(BaseUndownloableLink):
"""Representation of a quiz link."""
NOTIFY = True
class BlackBoard(BaseUndownloableLink):
"""Representation of a blackboard link.
A blackboard is a link to a VoIP chat.
"""
NOTIFY = True
class Html(BaseLink):
def do_download(self):
"""Downloads the resources found in a html web page."""
self.logger.debug("Downloading html %r", self.name)
self.make_request()
self.process_request_bs4()
self.logger.debug("Parsing HTML (%r)", self.url)
try:
name = self.soup.find("div", {"role": "main"}).h2.text
except AttributeError:
# Check if it is a weird page
if self.soup.find("applet"):
self.logger.debug("Identified as weird page without content, skipping")
return
raise
return self.try_algorithms(name)
def try_algorithms(self, name):
# call actual algorithms
algorithms = [x for x in dir(self) if x.startswith("check_algorithm")]
algorithms = [getattr(self, x) for x in algorithms]
algorithms.sort(key=lambda x: x.__name__)
for algorithm in algorithms:
resource = algorithm(name)
if resource:
break
else:
# If not parsed:
return self.handle_algorithm_failure()
# If everithing ok:
self.logger.debug(
"Created resource from HTML: %r, %s", resource.name, resource.url
)
self.subject.add_link(resource)
return
def check_algorithm_1(self, name):
try:
resource = self.soup.find("object", {"id": "resourceobject"})
assert resource
return Resource(
name, self.section, resource["data"], self.icon_url, self.subject, self
)
except AssertionError:
return None
def check_algorithm_2(self, name):
try:
resource = self.soup.find("iframe", {"id": "resourceobject"})
assert resource
return Resource(
name, self.section, resource["src"], self.icon_url, self.subject, self
)
except AssertionError:
return None
def check_algorithm_3(self, name):
try:
container = self.soup.find("div", {"class": "resourceworkaround"})
return Resource(
name,
self.section,
container.a["href"],
self.icon_url,
self.subject,
self,
)
except AttributeError:
return None
def check_algorithm_4(self, name):
try:
resource = self.soup.find("div", class_="resourcecontent resourceimg")
assert resource
return Resource(
name,
self.section,
resource.img["src"],
self.icon_url,
self.subject,
self,
)
except AssertionError:
return None
def handle_algorithm_failure(self):
self.logger.error("HTML ALGORITHM FAILURE")
save_crash_context(
self.response,
"html-algorithm-failure",
"html algorithm failure",
)
raise AlgorithmFailureError
class Image(BaseLink):
def do_download(self):
self.make_request()
match = re.search(r"image/(\w+)", self.content_type)
if not match:
raise RuntimeError
image_type = match.group(1)
self.logger.debug("Identified image as %r", image_type)
self.icon_url = "https://campusvirtual.uva.es/invalid/f/" + image_type
return self.save_response_content()
| 30.681586 | 98 | 0.578919 | 2,654 | 23,993 | 5.100603 | 0.148078 | 0.03472 | 0.040999 | 0.025116 | 0.365812 | 0.3175 | 0.288543 | 0.225678 | 0.182611 | 0.159637 | 0 | 0.003133 | 0.321635 | 23,993 | 781 | 99 | 30.720871 | 0.828582 | 0.08415 | 0 | 0.339518 | 0 | 0 | 0.115536 | 0.012349 | 0 | 0 | 0 | 0 | 0.011132 | 1 | 0.072356 | false | 0.003711 | 0.03154 | 0.001855 | 0.25974 | 0.003711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df2655a19e924bcd99a2911fc97761b628649f1 | 7,472 | py | Python | define_simple_tracker.py | Erotemic/misc | 6f8460a690d05e7e0117becc6cae9902cbe2cedd | [
"Apache-2.0"
] | 5 | 2021-04-29T21:07:18.000Z | 2021-09-29T08:46:08.000Z | define_simple_tracker.py | Erotemic/misc | 6f8460a690d05e7e0117becc6cae9902cbe2cedd | [
"Apache-2.0"
] | null | null | null | define_simple_tracker.py | Erotemic/misc | 6f8460a690d05e7e0117becc6cae9902cbe2cedd | [
"Apache-2.0"
] | 1 | 2018-04-07T12:26:21.000Z | 2018-04-07T12:26:21.000Z |
def main():
import pipedef
pipe = pipedef.Pipeline()
# ============================== GLOBAL PROPERTIES =================================
# global pipeline config
pipe.config = {
'_pipeline:_edge': {'capacity': 5},
}
# ============================== INPUT FRAME LIST ==================================
input = pipe.add_process(name='input', type='frame_list_input', config={
'image_reader:type' : 'vxl',
'image_list_file' : 'input_list.txt',
'frame_time' : 0.03333,
})
input.iports.define()
input.oports.define('image', 'timestamp', 'image_file_name')
# ================================== DETECTOR ======================================
detector = pipe.add_process(name='detector', type='image_object_detector', config={
'detector:type': 'darknet',
# Network config
':detector:darknet:net_config' : '../detector_pipelines/models/model2.cfg',
':detector:darknet:weight_file' : '../detector_pipelines/models/model2.weights',
':detector:darknet:class_names' : '../detector_pipelines/models/scallop_and_fish.lbl',
# Detector parameters
':detector:darknet:thresh' : 0.001,
':detector:darknet:hier_thresh' : 0.001,
':detector:darknet:gpu_index' : 0,
# Image scaling parameters
':detector:darknet:resize_option': 'maintain_ar',
':detector:darknet:resize_ni': 544,
':detector:darknet:resize_nj': 544,
':detector:darknet:scale': 1.0,
})
detector.iports.define('image')
detector.oports.define('detected_object_set')
detector_writer = pipe.add_process(name='detector_writer', type='detected_object_output', config={
# Type of file to output
':file_name': 'output/individual_detections.kw18',
':writer:type': 'kw18',
# Write out FSO classifications alongside tracks
':writer:kw18:write_tot': True,
':writer:kw18:tot_field1_ids': 'fish',
':writer:kw18:tot_field2_ids': 'scallop',
})
detector_writer.iports.define('detected_object_set', 'image_file_name')
detector_writer.oports.define()
input.oports.connect({
'image': detector.iports['image'],
'image_file_name': detector_writer.iports['image_file_name'],
})
detector.oports.connect({
'detected_object_set': detector_writer.iports['detected_object_set'],
})
# Note these other alternative ways of creating edges
# input.oports['image'].connect(detector.iports['image'])
# input.oports['image'].connect(detector_writer.iports['image_file_name'])
# detector.oports['detected_object_set'].connect(detector_writer.iports['detected_object_set'])
# input.oports['image'].connect(detector.iports['image']) # closer to syntax of a .pipe file
# input.oports.connect({'image': detector.iports['image']}) # closer to syntax of a .pipe file
# detector.iports.connect(**input.oports) # can use if input and output ports share names
# detector.iports.connect({'image': input.oports['image']}) # closer to the syntax of a function call
# ================================ CORE TRACKER ===================================
detection_descriptor = pipe.add_process(name='detection_descriptor', type='compute_track_descriptors')
detection_descriptor.config = {
':inject_to_detections' : True,
':computer:type' : 'burnout',
':computer:burnout:config_file' : 'detection_descriptors.conf',
}
detection_descriptor.iports.define('image', 'timestamp', 'detected_object_set')
detection_descriptor.oports.define('detected_object_set')
tracker = pipe.add_process(name='tracker', type='compute_association_matrix')
tracker.config = '''
:matrix_generator:type from_features
:matrix_generator:from_features:max_distance 40
block matrix_generator:from_features:filter
:type class_probablity_filter
:class_probablity_filter:threshold 0.001
:class_probablity_filter:keep_all_classes false
:class_probablity_filter:keep_classes fish;scallop
endblock
'''
tracker.iports.define('image', 'timestamp', 'detected_object_set', 'object_track_set')
tracker.oports.define('matrix_d', 'object_track_set', 'detected_object_set')
track_associator = pipe.add_process(name='track_associator', type='associate_detections_to_tracks')
track_associator.config = '''
:track_associator:type threshold
:track_associator:threshold:threshold 100.0
:track_associator:threshold:higher_is_better false
'''
track_associator.iports.define('image', 'timestamp', 'matrix_d', 'object_track_set', 'detected_object_set')
track_associator.oports.define('object_track_set', 'unused_detections')
track_initializer = pipe.add_process(name='track_initializer', type='initialize_object_tracks')
track_initializer.config = '''
:track_initializer:type threshold
block track_initializer:threshold:filter
:type class_probablity_filter
:class_probablity_filter:threshold 0.001
:class_probablity_filter:keep_all_classes false
:class_probablity_filter:keep_classes fish;scallop
endblock
'''
track_initializer.iports.define('image', 'timestamp', 'object_track_set', 'detected_object_set')
track_initializer.oports.define('object_track_set')
# To use the star notation the input ports and output ports must have the
# same name. Currently you must also define the ports. Eventually we might
# read them from sprokit.
# Connect inputs to detection descriptor
detection_descriptor.iports.connect(**input.oports, **detector.oports)
# Connect inputs to tracker
tracker.iports.connect(**input.oports, **detection_descriptor.oports, **track_initializer.oports)
# Connect inputs to track_associator
track_associator.iports.connect(**input.oports, **tracker.oports)
# Connect inputs to track_initializer
track_initializer.iports.connect(
detected_object_set=track_associator.oports['unused_detections'],
**input.oports, **track_associator.oports)
# ================================= INDEX DATA ====================================
track_writer = pipe.add_process(name='track_writer', type='write_object_track')
track_writer.iports.define('object_track_set')
track_writer.config = '''
:file_name output_tracks.kw18
:writer:type kw18
'''
# Connect inputs to track writer
track_writer.iports.connect(**track_initializer.oports)
return pipe
if __name__ == '__main__':
r"""
CommandLine:
source ~/code/VIAME/build/install/setup_viame.sh
cd /home/joncrall/code/VIAME/examples/tracking_pipelines
~/code/VIAME/build/install/bin/pipe_to_dot -p simple_tracker.pipe -o g.dot
dot -Tpng g.dot > g.png
python ~/code/VIAME/examples/tracking_pipelines/define_simple_tracker.py
"""
pipe = main()
pipe.write('auto_simple_tracker.pipe')
pipe.draw_graph('pipeline.png')
import ubelt as ub
ub.startfile('pipeline.png')
| 43.44186 | 111 | 0.636777 | 801 | 7,472 | 5.682896 | 0.235955 | 0.043058 | 0.04855 | 0.031634 | 0.321837 | 0.213093 | 0.18717 | 0.13181 | 0.11116 | 0.11116 | 0 | 0.009469 | 0.208512 | 7,472 | 171 | 112 | 43.695906 | 0.76023 | 0.207441 | 0 | 0.179245 | 0 | 0 | 0.48917 | 0.233755 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009434 | false | 0 | 0.018868 | 0 | 0.037736 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df37da869fe9f71df32fa2732b4a908910d89ba | 1,918 | py | Python | Model/app.py | The-SocialLion/Lower-Back-Pain-Analysis-using-Machine-learning | 87b8dd0fb7d58cafe96f9bdef09961750384b098 | [
"Apache-2.0"
] | null | null | null | Model/app.py | The-SocialLion/Lower-Back-Pain-Analysis-using-Machine-learning | 87b8dd0fb7d58cafe96f9bdef09961750384b098 | [
"Apache-2.0"
] | null | null | null | Model/app.py | The-SocialLion/Lower-Back-Pain-Analysis-using-Machine-learning | 87b8dd0fb7d58cafe96f9bdef09961750384b098 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, request, render_template
import pandas as pd
import joblib
# Declare a Flask app
app = Flask(__name__)
def model_predict(i):
if i==1:
return "Normal"
else:
return "Abnormal"
@app.route('/', methods=['GET', 'POST'])
# Main function here
def main():
# If a form is submitted
if request.method == "POST": # Displaying Result based on values retrieved from Get (Front End)
# Unpickle classifier
gbc = joblib.load("gbc.pkl")
# Get values through input bars
pelvic_incidence = request.form.get("Pelvic_incidence")
pelvic_tilt = request.form.get("Pelvic_tilt")
lumbar_lordosis_angle = request.form.get("Lumbar_Lordosis_Angle")
sacral_slope = request.form.get("Sacral_slope")
pelvic_radius = request.form.get("Pelvic_radius")
degree_spondylolisthesis = request.form.get("Degree_spondylolisthesis")
pelvic_slope = request.form.get("Pelvic_slope")
Direct_tilt = request.form.get("Direct_tilt")
thoracic_slope = request.form.get("Thoracic_slope")
cervical_tilt = request.form.get("Cervical_tilt")
sacrum_angle = request.form.get("Sacrum_angle")
scoliosis_slope = request.form.get("Scoliosis_slope")
# Put inputs to dataframe
X = pd.DataFrame([[pelvic_incidence,pelvic_tilt,lumbar_lordosis_angle,sacral_slope,pelvic_radius,degree_spondylolisthesis,pelvic_slope,Direct_tilt,thoracic_slope,cervical_tilt,sacrum_angle,scoliosis_slope]], columns = ["Col1", "Col2","Col3","Col4","Col5","Col6","Col7","Col8","Col9","Col10","Col11","Col12"])
# Get prediction
prediction = model_predict(gbc.predict(X)[0])
else:
prediction = ""
return render_template("website.html", output = prediction)
# Running the app
if __name__ == '__main__':
app.run(debug = True)
| 38.36 | 316 | 0.67049 | 232 | 1,918 | 5.306034 | 0.409483 | 0.10723 | 0.136474 | 0.064988 | 0.080422 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011221 | 0.210115 | 1,918 | 49 | 317 | 39.142857 | 0.80132 | 0.120438 | 0 | 0.0625 | 0 | 0 | 0.165772 | 0.026834 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.09375 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df499879e2e02643bddc0414a78f011539ed93e | 424 | py | Python | py/tests/leetcode/str/find_and_replace_test.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 1 | 2020-06-26T13:28:43.000Z | 2020-06-26T13:28:43.000Z | py/tests/leetcode/str/find_and_replace_test.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | 7 | 2021-11-18T19:46:08.000Z | 2022-03-12T01:03:01.000Z | py/tests/leetcode/str/find_and_replace_test.py | bmoretz/Daily-Coding-Problem | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | [
"MIT"
] | null | null | null | import unittest
from dcp.leetcode.str.find_and_replace import findReplaceString
class Test_FindAndReplace(unittest.TestCase):
def test_case1(self):
str_in = "jjievdtjfb"
indexes, sources, targets = [4,6,1], ["md","tjgb","jf"], ["foe","oov","e"]
actual = findReplaceString(str_in, indexes, sources, targets)
expected = "jjievdtjfb"
assert actual == expected | 24.941176 | 82 | 0.639151 | 47 | 424 | 5.638298 | 0.723404 | 0.037736 | 0.158491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012346 | 0.235849 | 424 | 17 | 83 | 24.941176 | 0.805556 | 0 | 0 | 0 | 0 | 0 | 0.082353 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df4c2e64e992b855a9e6d5e81533c702e3e464a | 5,606 | py | Python | tests/test_sagemaker/cloudformation_test_configs.py | zachurchill-root/moto | f6dda54a6c744938a1863720c71acc192ac73cf9 | [
"Apache-2.0"
] | null | null | null | tests/test_sagemaker/cloudformation_test_configs.py | zachurchill-root/moto | f6dda54a6c744938a1863720c71acc192ac73cf9 | [
"Apache-2.0"
] | null | null | null | tests/test_sagemaker/cloudformation_test_configs.py | zachurchill-root/moto | f6dda54a6c744938a1863720c71acc192ac73cf9 | [
"Apache-2.0"
] | null | null | null | import json
from abc import ABCMeta, abstractmethod
import six
from moto.sts.models import ACCOUNT_ID
@six.add_metaclass(ABCMeta)
class TestConfig:
"""Provides the interface to use for creating test configurations.
This class will provide the interface for what information will be
needed for the SageMaker CloudFormation tests. Ultimately, this will
improve the readability of the tests in `test_sagemaker_cloudformation.py`
because it will reduce the amount of information we pass through the
`pytest.mark.parametrize` decorator.
"""
@property
@abstractmethod
def resource_name(self):
pass
@property
@abstractmethod
def describe_function_name(self):
pass
@property
@abstractmethod
def name_parameter(self):
pass
@property
@abstractmethod
def arn_parameter(self):
pass
@abstractmethod
def get_cloudformation_template(self, include_outputs=True, **kwargs):
pass
class NotebookInstanceTestConfig(TestConfig):
"""Test configuration for SageMaker Notebook Instances."""
@property
def resource_name(self):
return "TestNotebook"
@property
def describe_function_name(self):
return "describe_notebook_instance"
@property
def name_parameter(self):
return "NotebookInstanceName"
@property
def arn_parameter(self):
return "NotebookInstanceArn"
def get_cloudformation_template(self, include_outputs=True, **kwargs):
instance_type = kwargs.get("instance_type", "ml.c4.xlarge")
role_arn = kwargs.get(
"role_arn", "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)
)
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
self.resource_name: {
"Type": "AWS::SageMaker::NotebookInstance",
"Properties": {"InstanceType": instance_type, "RoleArn": role_arn},
},
},
}
if include_outputs:
template["Outputs"] = {
"Arn": {"Value": {"Ref": self.resource_name}},
"Name": {
"Value": {
"Fn::GetAtt": [self.resource_name, "NotebookInstanceName"]
}
},
}
return json.dumps(template)
class NotebookInstanceLifecycleConfigTestConfig(TestConfig):
"""Test configuration for SageMaker Notebook Instance Lifecycle Configs."""
@property
def resource_name(self):
return "TestNotebookLifecycleConfig"
@property
def describe_function_name(self):
return "describe_notebook_instance_lifecycle_config"
@property
def name_parameter(self):
return "NotebookInstanceLifecycleConfigName"
@property
def arn_parameter(self):
return "NotebookInstanceLifecycleConfigArn"
def get_cloudformation_template(self, include_outputs=True, **kwargs):
on_create = kwargs.get("on_create")
on_start = kwargs.get("on_start")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
self.resource_name: {
"Type": "AWS::SageMaker::NotebookInstanceLifecycleConfig",
"Properties": {},
},
},
}
if on_create is not None:
template["Resources"][self.resource_name]["Properties"]["OnCreate"] = [
{"Content": on_create}
]
if on_start is not None:
template["Resources"][self.resource_name]["Properties"]["OnStart"] = [
{"Content": on_start}
]
if include_outputs:
template["Outputs"] = {
"Arn": {"Value": {"Ref": self.resource_name}},
"Name": {
"Value": {
"Fn::GetAtt": [
self.resource_name,
"NotebookInstanceLifecycleConfigName",
]
}
},
}
return json.dumps(template)
class ModelTestConfig(TestConfig):
"""Test configuration for SageMaker Models."""
@property
def resource_name(self):
return "TestModel"
@property
def describe_function_name(self):
return "describe_model"
@property
def name_parameter(self):
return "ModelName"
@property
def arn_parameter(self):
return "ModelArn"
def get_cloudformation_template(self, include_outputs=True, **kwargs):
execution_role_arn = kwargs.get(
"execution_role_arn", "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)
)
image = kwargs.get(
"image", "404615174143.dkr.ecr.us-east-2.amazonaws.com/linear-learner:1"
)
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
self.resource_name: {
"Type": "AWS::SageMaker::Model",
"Properties": {
"ExecutionRoleArn": execution_role_arn,
"PrimaryContainer": {"Image": image,},
},
},
},
}
if include_outputs:
template["Outputs"] = {
"Arn": {"Value": {"Ref": self.resource_name}},
"Name": {"Value": {"Fn::GetAtt": [self.resource_name, "ModelName"],}},
}
return json.dumps(template)
| 29.197917 | 87 | 0.567428 | 489 | 5,606 | 6.343558 | 0.278119 | 0.058027 | 0.056738 | 0.040297 | 0.554803 | 0.508382 | 0.357511 | 0.357511 | 0.341715 | 0.235977 | 0 | 0.010339 | 0.327149 | 5,606 | 191 | 88 | 29.350785 | 0.812036 | 0.097217 | 0 | 0.506849 | 0 | 0.006849 | 0.202832 | 0.097926 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136986 | false | 0.034247 | 0.027397 | 0.082192 | 0.294521 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df6d2cc3ef1b38f8dcb2b9e634dc513aa241c09 | 5,748 | py | Python | skytour/skytour/apps/dso/plot.py | ursomniac/skytour | 3320d96d7ca41b8f52ede87e7112477902a37a0e | [
"MIT"
] | null | null | null | skytour/skytour/apps/dso/plot.py | ursomniac/skytour | 3320d96d7ca41b8f52ede87e7112477902a37a0e | [
"MIT"
] | 1 | 2022-03-17T01:19:23.000Z | 2022-03-17T01:19:23.000Z | skytour/skytour/apps/dso/plot.py | ursomniac/skytour | 3320d96d7ca41b8f52ede87e7112477902a37a0e | [
"MIT"
] | null | null | null | import base64
import datetime, pytz
from re import X
import io
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from scipy import spatial
from skyfield.api import load, Star
from skyfield.projections import build_stereographic_projection
from ..astro.angdist import chord_length
from ..astro.transform import get_cartesian
from ..dso.models import DSO
from ..plotting.map import *
from ..solar_system.plot import r2d, d2r
from ..utils.format import to_hm, to_dm
from .finder import plot_dso
def plate_list():
ldec = [90, 75, 60, 45, 30, 15, 0, -15, -30, -45, -60, -75, -90]
lsize = [1, 12, 16, 20, 24, 32, 48, 32, 24, 20, 16, 12, 1]
mul = [0., 2.0, 1.5, 1.2, 1.0, 0.75, 0.5, 0.75, 1.0, 1.2, 1.5, 2.0, 0.]
plate = {}
j = 1
for i in range(len(ldec)):
dec = ldec[i]
if abs(dec) == 90:
plate[j] = (0, dec)
j += 1
continue
ras = [x * mul[i] for x in range(lsize[i])]
for ra in ras:
plate[j] = (ra, dec)
j += 1
return plate
def get_fn(ra, dec, shapes=False):
rah = int(ra)
ram = int(60.*(ra - rah) + 0.00005)
decs = 'N' if dec >= 0.0 else 'S'
d = abs(dec)
dd = int(d)
dm = int(60.*(d - dd) + 0.00005)
x = 'X' if shapes else ''
return f"{rah:02d}{ram:02d}{decs}{dd:02d}{dm:02d}.png"
def get_dsos_on_plate(ra, dec, fov=20):
fudge = 120
dsos = DSO.objects.all()
radius = chord_length(fov, degrees=True) * fudge
coords = []
for other in dsos:
coords.append(other.get_xyz)
center = get_cartesian(ra, dec, ra_dec=True)
tree = spatial.KDTree(coords)
neighbor_list = tree.query_ball_point([center], radius)
neighbor_objects = []
for idx in neighbor_list[[0][0]]:
neighbor_objects.append(dsos[idx])
return neighbor_objects
def create_atlas_plot(
center_ra, center_dec,
reversed=False, mag_limit=9.5,
fov=20, save_file=True,
mag_offset = 0, shapes = False,
label_size = 'x-small',
label_weight = 'normal'
):
ts = load.timescale()
# Datetime is arbitrary
t = ts.from_datetime(datetime.datetime(2022, 1, 1, 0, 0).replace(tzinfo=pytz.utc)) # Arbitrary time
eph = load('de421.bsp')
earth = eph['earth']
zenith = earth.at(t).observe(Star(ra_hours=center_ra, dec_degrees=center_dec))
ra = to_hm(center_ra) # String value
dec = to_dm(center_dec) # string value
# Start up a Matplotlib plot
style = 'dark_background' if reversed else 'default'
plt.style.use(style)
fig, ax = plt.subplots(figsize=[9,9])
# center
projection = build_stereographic_projection(zenith)
angle = np.pi - fov / 360. * np.pi
limit = np.sin(angle) / (1.0 - np.cos(angle))
# NOW PLOT THINGS!
# 1. stars and constellation lines
ax, stars = map_hipparcos(ax, earth, t, mag_limit, projection, reversed=reversed, mag_offset=mag_offset)
ax = map_constellation_lines(ax, stars, reversed=reversed)
ax = map_bright_stars(ax, earth, t, projection, points=False, annotations=True, reversed=reversed)
if shapes:
other_dso_records = DSO.objects.order_by('-major_axis_size')
other_dsos = {'x': [], 'y': [], 'label': [], 'marker': []}
for other in other_dso_records:
x, y = projection(earth.at(t).observe(other.skyfield_object))
if abs(x) > limit or abs(y) > limit:
continue # not on the plot
other_dsos['x'].append(x)
other_dsos['y'].append(y)
other_dsos['label'].append(other.shown_name)
other_dsos['marker'].append(other.object_type.marker_type)
ax = plot_dso(ax, x, y, other, alpha=0.6)
xxx = np.array(other_dsos['x'])
yyy = np.array(other_dsos['y'])
for x, y, z in zip(xxx, yyy, other_dsos['label']):
plt.annotate(
z, (x, y),
textcoords='offset points',
xytext=(5, 5),
ha='left'
)
else:
ax, _ = map_dsos(ax, earth, t, projection,
center = (center_ra, center_dec),
reversed=reversed,
label_size=label_size,
label_weight=label_weight,
product = 'atlas'
)
# Set the display
ax.set_xlim(-limit, limit)
ax.set_ylim(-limit, limit)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
secax = ax.secondary_xaxis('bottom', functions=(r2d, d2r))
secax.set_xlabel('Degrees')
secay = ax.secondary_yaxis('left', functions=(r2d, d2r))
title = f"Chart: RA {ra} DEC {dec}"
ax.set_title(title)
on_plate = get_dsos_on_plate(center_ra, center_dec, fov=fov)
if save_file:
fn = get_fn(center_ra, center_dec, shapes=shapes)
fig.savefig('media/atlas_images/{}'.format(fn), bbox_inches='tight')
plt.cla()
plt.close(fig)
return fn, on_plate
plt.tight_layout(pad=2.0)
# Convert to a PNG image
pngImage = io.BytesIO()
FigureCanvas(fig).print_png(pngImage)
pngImageB64String = 'data:image/png;base64,'
pngImageB64String += base64.b64encode(pngImage.getvalue()).decode('utf8')
# close things
plt.cla()
plt.close(fig)
return pngImageB64String, on_plate
"""
258 atlas plates
1: 0, +90 N polar plot
2 - 13: 2.0h, +75
14 - 29: 1.5h, +60
30 - 49: 1.2h, +45
50 - 73: 1.0h, +30
74 - 105: 0.75h, +15
106 - 153: 0.5h, 0
154 - 185: 0.75h, -15
186 - 209: 1.0h, -30
210 - 229: 1.2h, -45
230 - 245: 1.5h, -60
246 - 257: 2.0h, -75
258: 0, -90 S polar plot
""" | 33.034483 | 108 | 0.59499 | 842 | 5,748 | 3.935867 | 0.325416 | 0.021726 | 0.016898 | 0.020519 | 0.028968 | 0.013881 | 0 | 0 | 0 | 0 | 0 | 0.066365 | 0.268615 | 5,748 | 174 | 109 | 33.034483 | 0.721931 | 0.03723 | 0 | 0.06015 | 0 | 0 | 0.050838 | 0.016946 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030075 | false | 0 | 0.120301 | 0 | 0.18797 | 0.007519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8df72be42a3ea83a7f917562f0ba78dd4aaeb72c | 884 | py | Python | 2021/CourseraPython/course1-basics/loops/numcheck.py | Muramatsu2602/python-study | c81eb5d2c343817bc29b2763dcdcabed0f6a42c6 | [
"MIT"
] | 2 | 2021-01-11T16:13:40.000Z | 2022-03-02T02:03:46.000Z | 2021/CourseraPython/course1-basics/loops/numcheck.py | Muramatsu2602/python-study | c81eb5d2c343817bc29b2763dcdcabed0f6a42c6 | [
"MIT"
] | null | null | null | 2021/CourseraPython/course1-basics/loops/numcheck.py | Muramatsu2602/python-study | c81eb5d2c343817bc29b2763dcdcabed0f6a42c6 | [
"MIT"
] | null | null | null | # 5.2 Write a program that repeatedly prompts a user for integer numbers until the user enters 'done'. Once 'done' is entered, print out the largest and smallest of the numbers. If the user enters anything other than a valid number catch it with a try/except and put out an appropriate message and ignore the number. Enter 7, 2, bob, 10, and 4 and match the output below.
lock = 0
while True:
num = input("Enter a number: ")
if num == "done":
break
try:
value = int(num)
# default max/min values
if lock == 0:
largest = value
smallest = value
# finding max/min values
if value > largest:
largest = value
elif value < smallest:
smallest = value
lock += 1
except:
print("Invalid input")
print("Maximum is", largest)
print("Minimum is", smallest)
| 32.740741 | 371 | 0.618778 | 125 | 884 | 4.376 | 0.544 | 0.025594 | 0.047532 | 0.051188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01634 | 0.307692 | 884 | 26 | 372 | 34 | 0.877451 | 0.469457 | 0 | 0.210526 | 0 | 0 | 0.113978 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dfe8011c2244a0ed87994c9abfc3f5206f12836 | 2,371 | py | Python | fastestimator/op/tensorop/loss/mixup_loss.py | AriChow/fastestimator | d381d9acc1d42c6cf88a4424e083375cf98140bf | [
"Apache-2.0"
] | null | null | null | fastestimator/op/tensorop/loss/mixup_loss.py | AriChow/fastestimator | d381d9acc1d42c6cf88a4424e083375cf98140bf | [
"Apache-2.0"
] | null | null | null | fastestimator/op/tensorop/loss/mixup_loss.py | AriChow/fastestimator | d381d9acc1d42c6cf88a4424e083375cf98140bf | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow.python.keras.losses import Loss as tfLoss
from fastestimator.op.tensorop.loss import Loss
class MixUpLoss(Loss):
"""
This class should be used in conjunction with MixUpBatch to perform mix-up training, which helps to reduce
over-fitting, stabilize GAN training, and harden against adversarial attacks (https://arxiv.org/abs/1710.09412)
"""
def __init__(self, loss, lam=None, y_true=None, y_pred=None, inputs=None, outputs="loss", mode=None):
"""
Args:
loss (func): A loss object (tf.losses) which can be invoked like "loss(true, pred)". It's reduction method
will be overridden to 'none'
lam: The key of the lambda value generated by MixUpBatch
y_true: ground truth label key
y_pred: prediction label key
inputs: A tuple or list like: [<lam>, <y_true>, <y_pred>]
outputs: Where to store the computed loss value (not required under normal use cases)
mode: 'train', 'eval', 'test', or None
"""
assert isinstance(loss, tfLoss), "MixUpLoss requires a TensorFlow loss function"
loss_config = loss.get_config()
loss_config['reduction'] = 'none'
inputs = self.validate_loss_inputs(inputs, lam, y_true, y_pred)
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.loss_obj = loss.from_config(loss_config)
def forward(self, data, state):
lam, true, pred = data
loss1 = self.loss_obj(true, pred)
loss2 = self.loss_obj(tf.roll(true, shift=1, axis=0), pred)
return lam * loss1 + (1.0 - lam) * loss2
| 47.42 | 118 | 0.657107 | 326 | 2,371 | 4.699387 | 0.503067 | 0.039164 | 0.02154 | 0.020888 | 0.016971 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013557 | 0.222269 | 2,371 | 49 | 119 | 48.387755 | 0.817245 | 0.577394 | 0 | 0 | 0 | 0 | 0.070295 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.125 | false | 0 | 0.1875 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c0031226e3df4483ca669793e8975e7761b6bcd | 1,502 | py | Python | Bithumb.py | luigitabarca/appp | a3c8417cf7deb7f5c413ee879e6192007741a570 | [
"MIT"
] | null | null | null | Bithumb.py | luigitabarca/appp | a3c8417cf7deb7f5c413ee879e6192007741a570 | [
"MIT"
] | null | null | null | Bithumb.py | luigitabarca/appp | a3c8417cf7deb7f5c413ee879e6192007741a570 | [
"MIT"
] | null | null | null |
from pybithumb.core import *
from pandas import DataFrame
import pandas as pd
import datetime
import math
class Bithumb:
@staticmethod
def _convert_unit(unit):
try:
unit = math.floor(unit * 10000) / 10000
return unit
except:
return 0
@staticmethod
def get_tickers(payment_currency="KRW"):
"""
빗썸이 지원하는 암호화폐의 리스트
:param payment_currency : KRW
:return:
"""
resp = None
try:
resp = PublicApi.ticker("ALL", payment_currency)
data = resp['data']
tickers = [k for k, v in data.items() if isinstance(v, dict)]
return tickers
except Exception:
return resp
@staticmethod
def get_current_price(order_currency, payment_currency="KRW"):
"""
최종 체결 가격 조회
:param order_currency : BTC/ETH/DASH/LTC/ETC/XRP/BCH/XMR/ZEC/QTUM/BTG/EOS/ICX/VEN/TRX/ELF/MITH/MCO/OMG/KNC
:param payment_currency : KRW
:return : price
"""
resp = None
try:
resp = PublicApi.ticker(order_currency, payment_currency)
if order_currency != "ALL":
return float(resp['data']['closing_price'])
else:
del resp["data"]['date']
return resp["data"]
except Exception:
return resp
| 26.350877 | 117 | 0.515313 | 156 | 1,502 | 4.858974 | 0.50641 | 0.118734 | 0.094987 | 0.060686 | 0.155673 | 0.079156 | 0 | 0 | 0 | 0 | 0 | 0.012141 | 0.396804 | 1,502 | 56 | 118 | 26.821429 | 0.824503 | 0.159787 | 0 | 0.342857 | 0 | 0 | 0.040323 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.142857 | 0 | 0.457143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c05545589141fb82f106641f89a15eb131c03e9 | 301 | py | Python | python/image_processing/rotation_img.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 16 | 2018-11-26T08:39:42.000Z | 2019-05-08T10:09:52.000Z | python/image_processing/rotation_img.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 8 | 2020-05-04T06:29:26.000Z | 2022-02-12T05:33:16.000Z | python/image_processing/rotation_img.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 5 | 2020-02-11T16:02:21.000Z | 2021-02-05T07:48:30.000Z |
import numpy as np
import cv2
img = cv2.imread('city2.jpg',0)
rows,cols = img.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)
dst = cv2.warpAffine(img,M,(cols,rows))
cv2.imshow('image cv2',dst)
cv2.waitKey(0)
# to save the image
# cv2.imwrite('image1.png',img)
cv2.destroyAllWindows()
| 15.05 | 49 | 0.704319 | 51 | 301 | 4.156863 | 0.588235 | 0.056604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07197 | 0.122924 | 301 | 19 | 50 | 15.842105 | 0.731061 | 0.156146 | 0 | 0 | 0 | 0 | 0.072 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c077ecb8a5382d71c5438b3e59abd26f0bd8969 | 1,160 | py | Python | hardware/testbenches/common/drivers/axi4stream/io.py | Intuity/nexus | 0d1414fa2ea518dae9f031930c40692ebac5d154 | [
"Apache-2.0"
] | 6 | 2021-06-28T05:52:15.000Z | 2022-03-27T20:45:28.000Z | hardware/testbenches/common/drivers/axi4stream/io.py | Intuity/nexus | 0d1414fa2ea518dae9f031930c40692ebac5d154 | [
"Apache-2.0"
] | null | null | null | hardware/testbenches/common/drivers/axi4stream/io.py | Intuity/nexus | 0d1414fa2ea518dae9f031930c40692ebac5d154 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Peter Birch, mailto:peter@lightlogic.co.uk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..io_common import BaseIO
class AXI4StreamIO(BaseIO):
""" AXI4 stream interface """
def __init__(self, dut, name, role):
""" Initialise AXI4StreamIO.
Args:
dut : Pointer to the DUT boundary
name: Name of the signal - acts as a prefix
role: Role of this signal on the DUT boundary
"""
super().__init__(dut, name, role, [
"tvalid", "tdata", "tstrb", "tkeep", "tlast", "tid", "tdest",
"tuser", "twakeup",
], [
"tready",
])
| 34.117647 | 74 | 0.649138 | 154 | 1,160 | 4.831169 | 0.655844 | 0.080645 | 0.034946 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012673 | 0.251724 | 1,160 | 33 | 75 | 35.151515 | 0.84447 | 0.662069 | 0 | 0 | 0 | 0 | 0.159509 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c0b1af22a5316e17602db8cfb4f8b44f7bf9111 | 781 | py | Python | fit/messages/workout.py | rembish/f | 7c5913b80cb7cc6ef102ceb168ec8c281233fb65 | [
"BSD-3-Clause"
] | 10 | 2015-04-09T02:18:48.000Z | 2021-02-22T09:27:08.000Z | fit/messages/workout.py | rembish/f | 7c5913b80cb7cc6ef102ceb168ec8c281233fb65 | [
"BSD-3-Clause"
] | 2 | 2017-04-16T14:07:59.000Z | 2020-01-31T02:25:53.000Z | fit/messages/workout.py | rembish/f | 7c5913b80cb7cc6ef102ceb168ec8c281233fb65 | [
"BSD-3-Clause"
] | 5 | 2015-04-09T02:20:45.000Z | 2022-01-17T10:38:10.000Z | from fit.messages import Message
from fit.types.extended import Sport, WorkoutCapabilities, MessageIndex, \
Intensity, WktStepTarget, WktStepDuration
from fit.types.general import UInt16, String, UInt32
class Workout(Message):
msg_type = 26
sport = Sport(4)
capabilities = WorkoutCapabilities(5)
num_valid_steps = UInt16(6)
wkt_name = String(8)
class WorkoutStep(Message):
msg_type = 27
message_index = MessageIndex(254)
wkt_step_name = String(0)
duration_type = WktStepDuration(1)
duration_value = UInt32(2) # variants
target_type = WktStepTarget(3)
target_value = UInt32(4) # variants
custom_target_value_low = UInt32(5) # variants
custom_target_value_high = UInt32(6) # variants
intensity = Intensity(7)
| 27.892857 | 74 | 0.724712 | 95 | 781 | 5.768421 | 0.515789 | 0.038321 | 0.043796 | 0.091241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052381 | 0.193342 | 781 | 27 | 75 | 28.925926 | 0.81746 | 0.044814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.952381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c1434a7013b9bee370455ab2802787652471f6d | 2,158 | py | Python | hummingbot/connector/exchange/kucoin/kucoin_constants.py | pachares/beatit | 1c92ca68545667498f46db1c262e3bfb98acfcc9 | [
"Apache-2.0"
] | null | null | null | hummingbot/connector/exchange/kucoin/kucoin_constants.py | pachares/beatit | 1c92ca68545667498f46db1c262e3bfb98acfcc9 | [
"Apache-2.0"
] | null | null | null | hummingbot/connector/exchange/kucoin/kucoin_constants.py | pachares/beatit | 1c92ca68545667498f46db1c262e3bfb98acfcc9 | [
"Apache-2.0"
] | 1 | 2022-03-28T09:28:25.000Z | 2022-03-28T09:28:25.000Z | import sys
from hummingbot.core.api_throttler.data_types import RateLimit
# REST endpoints
BASE_PATH_URL = "https://api.kucoin.com"
PUBLIC_WS_DATA_PATH_URL = "/api/v1/bullet-public"
PRIVATE_WS_DATA_PATH_URL = "/api/v1/bullet-private"
TICKER_PRICE_CHANGE_PATH_URL = "/api/v1/market/allTickers"
EXCHANGE_INFO_PATH_URL = "/api/v1/symbols"
SNAPSHOT_PATH_URL = "/api/v3/market/orderbook/level2"
SNAPSHOT_NO_AUTH_PATH_URL = "/api/v1/market/orderbook/level2_100"
ACCOUNTS_PATH_URL = "/api/v1/accounts?type=trade"
SERVER_TIME_PATH_URL = "/api/v1/timestamp"
SYMBOLS_PATH_URL = "/api/v1/symbols"
ORDERS_PATH_URL = "/api/v1/orders"
TRADE_ORDERS_ENDPOINT_NAME = "/spotMarket/tradeOrders"
BALANCE_ENDPOINT_NAME = "/account/balance"
PRIVATE_ENDPOINT_NAMES = [
TRADE_ORDERS_ENDPOINT_NAME,
BALANCE_ENDPOINT_NAME,
]
WS_CONNECTION_LIMIT_ID = "WSConnection"
WS_CONNECTION_LIMIT = 30
WS_CONNECTION_TIME_INTERVAL = 60
WS_REQUEST_LIMIT_ID = "WSRequest"
GET_ORDER_LIMIT_ID = "GetOrders"
POST_ORDER_LIMIT_ID = "PostOrder"
DELETE_ORDER_LIMIT_ID = "DeleteOrder"
WS_PING_HEARTBEAT = 10
NO_LIMIT = sys.maxsize
RATE_LIMITS = [
RateLimit(WS_CONNECTION_LIMIT_ID, limit=WS_CONNECTION_LIMIT, time_interval=WS_CONNECTION_TIME_INTERVAL),
RateLimit(WS_REQUEST_LIMIT_ID, limit=100, time_interval=10),
RateLimit(limit_id=PUBLIC_WS_DATA_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=PRIVATE_WS_DATA_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=TICKER_PRICE_CHANGE_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=EXCHANGE_INFO_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=SNAPSHOT_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=SNAPSHOT_NO_AUTH_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=ACCOUNTS_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=SERVER_TIME_PATH_URL, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=GET_ORDER_LIMIT_ID, limit=NO_LIMIT, time_interval=1),
RateLimit(limit_id=POST_ORDER_LIMIT_ID, limit=45, time_interval=3),
RateLimit(limit_id=DELETE_ORDER_LIMIT_ID, limit=60, time_interval=3),
]
| 43.16 | 108 | 0.810009 | 333 | 2,158 | 4.798799 | 0.231231 | 0.09199 | 0.110138 | 0.067584 | 0.409887 | 0.313517 | 0.313517 | 0.283479 | 0.283479 | 0.257822 | 0 | 0.020823 | 0.087581 | 2,158 | 49 | 109 | 44.040816 | 0.790757 | 0.006487 | 0 | 0 | 0 | 0 | 0.155462 | 0.085901 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.046512 | 0 | 0.046512 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c147b1581c9e42ed038ee8839eff0846fb1f39f | 3,586 | py | Python | ml4a/utils/histogram.py | KushGabani/ml4a-guides | d71b61a99c417b9ace3404420b37d22f6da06153 | [
"MIT"
] | 1,110 | 2016-06-02T23:58:41.000Z | 2020-11-29T07:24:20.000Z | ml4a/utils/histogram.py | KushGabani/ml4a-guides | d71b61a99c417b9ace3404420b37d22f6da06153 | [
"MIT"
] | 49 | 2016-08-14T22:58:41.000Z | 2020-07-17T17:59:56.000Z | ml4a/utils/histogram.py | KushGabani/ml4a-guides | d71b61a99c417b9ace3404420b37d22f6da06153 | [
"MIT"
] | 300 | 2016-06-13T23:06:55.000Z | 2020-11-18T22:42:55.000Z | from PIL import Image
import numpy as np
from tqdm import tqdm
def histogram_equalization(x):
hist, bins = np.histogram(x.flatten(), 255, [0, 256])
cdf = hist.cumsum()
cdf_m = np.ma.masked_equal(cdf,0)
cdf_m = (cdf_m - cdf_m.min())*255.0/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_m,0).astype('uint8')
x2 = cdf[x.astype('uint8')]
return x2, cdf
def get_histogram(pixels, bright=True):
hist = np.zeros((256,1 if bright else 3))
for p in pixels:
if bright:
avg = int((p[0]+p[1]+p[2])/3.0)
hist[avg,0] += 1
else:
hist[p[0],0] += 1
hist[p[1],1] += 1
hist[p[2],2] += 1
return np.array(hist)
def match_histogram2(img1, hist):
colors = img1.getdata()
red, green, blue = [c[0] for c in colors], [c[1] for c in colors], [c[2] for c in colors]
sr = sorted(range(len(red)), key=lambda k: red[k])
sg = sorted(range(len(green)), key=lambda k: green[k])
sb = sorted(range(len(blue)), key=lambda k: blue[k])
hr, hg, hb = [[hist[i][c] for i in range(256)] for c in range(3)]
fr, fg, fb = 0,0,0
for c in range(len(hr)):
nfr, nfg, nfb = int(hr[c]), int(hg[c]), int(hb[c])
idxr = [sr[k] for k in range(fr,fr+nfr)]
idxg = [sg[k] for k in range(fg,fg+nfg)]
idxb = [sb[k] for k in range(fb,fb+nfb)]
for ir in idxr:
red[ir] = c
for ig in idxg:
green[ig] = c
for ib in idxb:
blue[ib] = c
fr, fg, fb = fr+nfr, fg+nfg, fb+nfb
adjusted_colors = zip(red, green, blue)
img_adjusted = Image.new(img1.mode, img1.size)
img_adjusted.putdata(adjusted_colors)
return img_adjusted
def match_histogram(img1, hist):
pixels = list(img1.getdata())
red, green, blue = np.array([c[0] for c in pixels]), np.array([c[1] for c in pixels]), np.array([c[2] for c in pixels])
sr = sorted(range(len(red)), key=lambda k: red[k])
sg = sorted(range(len(green)), key=lambda k: green[k])
sb = sorted(range(len(blue)), key=lambda k: blue[k])
num_pixel_mult = (3 * len(pixels)) / np.sum(hist)
hr, hg, hb = [[int(num_pixel_mult * hist[i][c]) for i in range(256)] for c in range(3)]
fr, fg, fb = 0, 0, 0
for c in range(len(hr)):
nfr, nfg, nfb = int(hr[c]), int(hg[c]), int(hb[c])
red[np.array([sr[k] for k in xrange(fr,fr+nfr)]).astype('int')] = c
green[np.array([sg[k] for k in xrange(fg,fg+nfg)]).astype('int')] = c
blue[np.array([sb[k] for k in xrange(fb,fb+nfb)]).astype('int')] = c
fr, fg, fb = fr+nfr, fg+nfg, fb+nfb
adjusted_pixels = zip(red, green, blue)
img_adjusted = Image.new(img1.mode, img1.size)
img_adjusted.putdata(adjusted_pixels)
return img_adjusted
def adjust_color_range(img, hist, amt, border):
cdf = hist.cumsum() / np.sum(hist)
i1, i2 = min([i for i in range(256) if cdf[i]>border]), max([i for i in range(256) if cdf[i]<1.0-border])
j1, j2 = int((1.0-amt)*i1), i2 + amt*(255-i2)
img2 = np.clip(j1 + (j2-j1)*(img - i1)/(i2-i1), 0.0, 255.0)
return img2
def get_average_histogram(frames_path):
numframes = len([f for f in listdir(frames_path) if isfile(join(frames_path, f)) and f[-4:]=='.png'])
img = Image.open('%s/f00001.png'%(frames_path))
histogram = get_histogram(list(img.getdata()))
for t in tqdm(range(1,numframes,8)):
img = Image.open('%s/f%05d.png'%(frames_path, t+1))
histogram += get_histogram(list(img.getdata()))
histogram /= (1+len(range(1,numframes,8)))
return histogram
| 38.148936 | 123 | 0.583101 | 639 | 3,586 | 3.214398 | 0.189358 | 0.037488 | 0.029211 | 0.020448 | 0.448393 | 0.361246 | 0.327167 | 0.307692 | 0.307692 | 0.287244 | 0 | 0.042259 | 0.234523 | 3,586 | 93 | 124 | 38.55914 | 0.706011 | 0 | 0 | 0.225 | 0 | 0 | 0.013385 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.0375 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |