hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56c4a181e46ff75702be3d6706e5216784d4e18d | 12,870 | py | Python | umpleonline/chatbot/processresponse.py | YounesB-McGill/Comp550-Project | bbc9cf91e295a26fd1e8f2ba8371f737a449a47a | [
"MIT"
] | null | null | null | umpleonline/chatbot/processresponse.py | YounesB-McGill/Comp550-Project | bbc9cf91e295a26fd1e8f2ba8371f737a449a47a | [
"MIT"
] | 6 | 2020-07-19T01:29:06.000Z | 2021-05-10T21:21:27.000Z | umpleonline/chatbot/processresponse.py | YounesB-McGill/Comp550-Project | bbc9cf91e295a26fd1e8f2ba8371f737a449a47a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import re
from typing import List
import numpy as np
from keras.models import load_model
from action import (add_class_json, add_attribute, create_association, create_inheritance, create_composition,
return_error_to_user)
from data import ADD_WORDS, CONTAINS_WORDS, HAVE_WORDS, ISA_WORDS
from model import predict, getIntent, keyIntent
from npparser import get_chunks, get_NP_subtrees, get_num_nonnested_NP_subtrees, get_noun_from_np
from utils import (first_letter_lowercase, first_letter_uppercase, contains_one_of, get_DT_for_word, is_attribute,
get_detected_keywords, strip_punctuation)
classes_created = [] # Must keep track of this to avoid errors
def process_response_model(user_input: str) -> str:
message_text = strip_punctuation(user_input.lower())
intent = get_intent(predict(user_input))
if intent == "add_class":
return add_class_action(message_text)
elif intent == "add_attribute":
return add_attribute_action(message_text)
elif intent == "create_composition":
return make_composition(message_text)
elif intent == "create_association":
return make_association(message_text)
elif intent == "create_inheritance":
return make_inheritance(message_text)
else:
return process_response_baseline(user_input)
# The following three functions call into the same NP parser as the baseline, once the intent is determined.
def add_class_action(message_text):
return handle_add_kw(message_text)
def make_composition(message_text):
return handle_contain_kw(message_text)
def make_inheritance(message_text):
return handle_isa_kw(message_text)
# Since handle_have_kw tries to guess whether it needs to add an attribute (A student has a name) or an association
# (A student has an address), the logic for the following two functions needs to be specified separately.
def add_attribute_action(message_text):
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
return return_error_to_user("I really don't understand what you meant. Please rephrase.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
if class_name in classes_created:
return return_error_to_user(f"What do want to specify about {class_name}?")
else:
dt = get_DT_for_word(class_name)
return return_error_to_user(f"Are trying to add a class? Try saying 'Create {dt} {class_name}.'")
else:
class_name = get_noun_from_np(nps[0])
attribute_name = first_letter_lowercase(get_noun_from_np(nps[1]))
if class_name in classes_created:
classes_created.append(class_name)
return add_attribute(class_name, attribute_name)
def make_association(message_text):
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
return return_error_to_user("I really don't understand what you meant. Please rephrase.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
if class_name in classes_created:
return return_error_to_user(f"What do want to specify about {class_name}?")
else:
dt = get_DT_for_word(class_name)
return return_error_to_user(f"Are trying to add a class? Try saying 'Create {dt} {class_name}.'")
else:
class1 = get_noun_from_np(nps[0])
class2 = get_noun_from_np(nps[1])
if class1 in classes_created:
classes_created.append(class1)
if class2 not in classes_created:
classes_created.append(class2)
return create_association(class1, class2)
def process_response_baseline(user_input: str) -> str:
"""
Function used to reply with a baseline response based on the Socio model.
This function assumes valid input.
"""
print("Processing message in baseline mode.")
message_text = strip_punctuation(user_input.lower())
detected_keywords = get_detected_keywords(message_text)
nk = len(detected_keywords)
if nk == 0:
return handle_no_kw(message_text)
elif nk == 1:
kw = list(detected_keywords.keys())[0]
if kw == "ADD":
return handle_add_kw(message_text)
elif kw == "CONTAIN":
return handle_contain_kw(message_text)
elif kw == "HAVE":
return handle_have_kw(message_text)
elif kw == "ISA":
return handle_isa_kw(message_text)
elif nk == 2:
if "CONTAIN" in detected_keywords.keys() and "ISA" in detected_keywords.keys(): # "can consist of"
return handle_contain_kw(message_text)
else:
print("nk = 2", detected_keywords)
return process_response_fallback(message_text)
else:
# TODO Handle more complex multiple keyword scenarios
print("nk =", nk, detected_keywords)
return process_response_fallback(message_text)
def handle_add_kw(message_text: str) -> str:
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
kw = get_detected_keywords(message_text).get("ADD", "add")
return return_error_to_user(f"Please specify what you want to {kw}.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
return add_class(class_name)
elif n_st == 2:
class_name = get_noun_from_np(nps[1])
attribute_name = first_letter_lowercase(get_noun_from_np(nps[0]))
return add_attribute(class_name, attribute_name)
else:
return process_response_fallback(message_text)
def handle_contain_kw(message_text: str) -> str:
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st < 2:
return return_error_to_user(
"I don't get what you meant. If you want to make a composition, specify the two classes.")
elif n_st == 2:
first_noun = get_noun_from_np(nps[0])
second_noun = get_noun_from_np(nps[1])
if first_noun not in classes_created:
classes_created.append(first_noun)
if is_attribute(get_noun_from_np(nps[1])):
return add_attribute(first_noun, first_letter_lowercase(second_noun))
else:
whole = first_noun
part = second_noun
if part not in classes_created:
classes_created.append(part)
return create_composition(whole, part)
else:
return process_response_fallback(message_text)
def handle_have_kw(message_text: str) -> str:
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
return return_error_to_user("I really don't understand what you meant. Please rephrase.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
if class_name in classes_created:
return return_error_to_user(f"What do want to specify about {class_name}?")
else:
dt = get_DT_for_word(class_name)
return return_error_to_user(f"Are trying to add a class? Try saying 'Create {dt} {class_name}.'")
else:
# TODO In the future, also allow multiple attributes ("Student has a name and email").
# This requires updating the website.
class_name = get_noun_from_np(nps[0])
second_noun = get_noun_from_np(nps[1])
if class_name in classes_created:
classes_created.append(class_name)
if is_attribute(second_noun):
return add_attribute(class_name, first_letter_lowercase(second_noun))
else:
if second_noun not in classes_created:
classes_created.append(second_noun)
return create_association(class_name, second_noun)
return process_response_fallback(message_text)
def handle_isa_kw(message_text: str) -> str:
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st < 2:
return return_error_to_user("If you're trying to create an inheritance, clearly specify both classes.")
else:
if ((" serve" in message_text and " as " in message_text) or
(" play" in message_text and " role" in message_text)):
child = get_noun_from_np(nps[1])
parent = get_noun_from_np(nps[0])
else:
child = get_noun_from_np(nps[0])
parent = get_noun_from_np(nps[1])
if child not in classes_created:
classes_created.append(child)
if parent not in classes_created:
classes_created.append(parent)
return create_inheritance(child, parent)
return process_response_fallback(message_text)
def handle_no_kw(message_text: str) -> str:
"""
Add an association if possible, otherwise create a class.
"""
chunks = get_chunks(message_text)
nps = get_NP_subtrees(chunks)
n_st = get_num_nonnested_NP_subtrees(chunks)
if n_st == 0:
return return_error_to_user("I really don't understand what you meant. Please rephrase.")
elif n_st == 1:
class_name = get_noun_from_np(nps[0])
return add_class(class_name)
elif n_st == 2:
class1 = get_noun_from_np(nps[0])
class2 = get_noun_from_np(nps[1])
if class1 not in classes_created:
classes_created.append(class1)
if class2 not in classes_created:
classes_created.append(class2)
return create_association(class1, class2)
return process_response_fallback(message_text)
def process_response_fallback(user_input: str) -> str:
"""
Fallback method from Younes' undergrad project, to be used for the cases not handled by Socio's logic.
"""
print("Processing request in fallback mode")
message_text = user_input.lower()
words = message_text.split(' ')
# This logic is not always correct, eg "Add attribute in class."
if contains_one_of(message_text, ADD_WORDS):
for i in range(len(words) - 2):
if words[i] in ADD_WORDS:
# strip punctuation
class_name = first_letter_uppercase(strip_punctuation(words[i + 2]))
return add_class(class_name)
if "has a" in message_text:
for i in range(len(words) - 2):
if words[i] == 'has':
class_name = first_letter_uppercase(words[i - 1])
attribute_name = strip_punctuation(words[i + 2])
return add_attribute(class_name, attribute_name)
if "is composed of" in message_text:
for i in range(len(words) - 2):
if words[i] == "is":
whole_class_name = first_letter_uppercase(words[i - 1])
part_class_name = first_letter_uppercase(strip_punctuation(words[i + 3]))
# assume the plural when part_class_name ends with s
if part_class_name[-1] == "s":
part_class_name = part_class_name[:-1]
return create_composition(whole_class_name, part_class_name)
# not very useful, but good for testing
if "is associated with" in message_text:
for i in range(len(words) - 3):
if words[i] == "is":
class_name1 = first_letter_uppercase(words[i - 1])
if words[i + 3] in ["a", "an"]:
class_name2 = words[i + 4]
else:
class_name2 = words[i + 3]
class_name2 = first_letter_uppercase(strip_punctuation(class_name2))
return create_association(class_name1, class_name2)
if "is a" in message_text:
for i in range(len(words) - 2):
if words[i] == "is":
child = first_letter_uppercase(words[i - 1])
parent = first_letter_uppercase(strip_punctuation(words[i + 2]))
return create_inheritance(child, parent)
return return_error_to_user("Sorry, I could not process your request :(")
def get_intent(predicts):
prediction = predicts[0]
intents = np.array(keyIntent)
ids = np.argsort(-prediction)
intents = intents[ids]
predictions = -np.sort(-prediction)
return intents[np.argmax(predictions)]
# These functions are kept here since they modify the global state
def add_class(class_name: str) -> str:
global classes_created
if class_name in classes_created:
return return_error_to_user(f"{class_name} is already created, so let's not make it again.")
return add_class_json(class_name)
def reset_classes_created():
global classes_created
classes_created = []
| 37.304348 | 115 | 0.671484 | 1,788 | 12,870 | 4.530201 | 0.134228 | 0.071975 | 0.031235 | 0.036914 | 0.611852 | 0.539012 | 0.475062 | 0.422469 | 0.37037 | 0.333827 | 0 | 0.008782 | 0.247941 | 12,870 | 344 | 116 | 37.412791 | 0.828081 | 0.083683 | 0 | 0.505882 | 0 | 0.003922 | 0.095396 | 0 | 0 | 0 | 0 | 0.002907 | 0 | 1 | 0.062745 | false | 0 | 0.035294 | 0.011765 | 0.313725 | 0.015686 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56c81124f1aaadd1eefd7577b6f9a4ae2b4cf780 | 2,890 | py | Python | quantum_machine_learning/qml_100_GeneratingFourierState_solution.py | zemarchezi/QHack2022 | e388a546027168c3f1d48ad2e7ecf831425bb2dc | [
"CC0-1.0"
] | null | null | null | quantum_machine_learning/qml_100_GeneratingFourierState_solution.py | zemarchezi/QHack2022 | e388a546027168c3f1d48ad2e7ecf831425bb2dc | [
"CC0-1.0"
] | null | null | null | quantum_machine_learning/qml_100_GeneratingFourierState_solution.py | zemarchezi/QHack2022 | e388a546027168c3f1d48ad2e7ecf831425bb2dc | [
"CC0-1.0"
] | 5 | 2022-03-16T00:02:24.000Z | 2022-03-23T20:12:23.000Z | #! /usr/bin/python3
import sys
from pennylane import numpy as np
import pennylane as qml
def generating_fourier_state(n_qubits, m):
"""Function which, given the number of qubits and an integer m, returns the circuit and the angles that generate the state
QFT|m> following the above template.
Args:
- n_qubits (int): number of qubits in the circuit.
- m (int): basis state that we generate. For example, for 'm = 3' and 'n_qubits = 4'
we would generate the state QFT|0011> (3 in binary is 11).
Returns:
- (qml.QNode): circuit used to generate the state.
- (list[float]): angles that generate the state QFT|m>.
"""
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def circuit(angles):
"""This is the quantum circuit that we will use."""
# QHACK #
# Add the template of the statement with the angles passed as an argument.
for w in range(n_qubits):
qml.Hadamard(wires=w)
qml.RZ(angles[w],wires=w)
# QHACK #
# We apply QFT^-1 to return to the computational basis.
# This will help us to see how well we have done.
qml.adjoint(qml.QFT)(wires=range(n_qubits))
# We return the probabilities of seeing each basis state.
return qml.probs(wires=range(n_qubits))
def error(angles):
"""This function will determine, given a set of angles, how well it approximates
the desired state. Here it will be necessary to call the circuit to work with these results.
"""
probs = circuit(angles)
# QHACK #
# The return error should be smaller when the state m is more likely to be obtained.
target=np.zeros(2**n_qubits)
target[m]=1
loss=np.sum((target-probs)**2)
return loss
# QHACK #
# This subroutine will find the angles that minimize the error function.
# Do not modify anything from here.
opt = qml.AdamOptimizer(stepsize=0.8)
epochs = 5000
angles = np.zeros(n_qubits, requires_grad=True)
for epoch in range(epochs):
angles = opt.step(error, angles)
angles = np.clip(opt.step(error, angles), -2 * np.pi, 2 * np.pi)
return circuit, angles
if __name__ == "__main__":
# DO NOT MODIFY anything in this code block
inputs = sys.stdin.read().split(",")
n_qubits = int(inputs[0])
m = int(inputs[1])
output = generating_fourier_state(n_qubits, m)
output[0](output[1])
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def check_with_arbitrary_state():
for i in range(n_qubits):
qml.RY(i, wires=i)
for op in output[0].qtape.operations:
qml.apply(op)
return qml.state()
print(",".join([f"{p.real.round(5)},{p.imag.round(5)}" for p in check_with_arbitrary_state()]))
| 32.111111 | 126 | 0.63045 | 428 | 2,890 | 4.182243 | 0.376168 | 0.050838 | 0.035754 | 0.031844 | 0.141899 | 0.122905 | 0.089385 | 0.055866 | 0.055866 | 0.055866 | 0 | 0.013634 | 0.264014 | 2,890 | 89 | 127 | 32.47191 | 0.827927 | 0.420761 | 0 | 0.1 | 0 | 0 | 0.04471 | 0.02204 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.075 | 0 | 0.275 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56cb8fcd45f5672fe2b1eb0a6363664189af573d | 2,004 | py | Python | Class08.py | Kiran1178/Python201901 | 8f387c9ed451a8b0cf5c20e9d8f6ae53cafaf0df | [
"MIT"
] | null | null | null | Class08.py | Kiran1178/Python201901 | 8f387c9ed451a8b0cf5c20e9d8f6ae53cafaf0df | [
"MIT"
] | null | null | null | Class08.py | Kiran1178/Python201901 | 8f387c9ed451a8b0cf5c20e9d8f6ae53cafaf0df | [
"MIT"
] | null | null | null | # # # ####
# import_os.path as os_path
from os import path, makedirs
# #########################
# 1) Python Absoulute path
# ########################
#
# current absolute path
# file_path = r"c:\repos\Library"
# current_file_path = path.abspath(__file__)
# print(current_file_path)
# print(path.dirname(current_file_path))
# print(path.basename(current_file_path))
# Get current directory
current_directory = path.dirname(path.abspath(__file__))
# print(current_directory)
# Concat file path
jason_file_path = path.join(
current_directory, 'test_demo', 'jason_file', 'parse_jason_dat.jason'
)
# if path.exists(jason_file_path):
# print("hello JSON")
#
xml_file_path = path.join(
current_directory, 'test_demo', 'xml_file', 'parse_xml_data.xml'
)
#
# if path.exists(xml_file_path):
# print("hello XML")
#
text_file_path = path.join(
current_directory, 'test_demo', 'xml_file', 'parse_xml_data.xml'
)
# print("hello text")
#
CSV_file_path = path.join(
current_directory, 'test_demo', 'xml_file', 'parse_xml_data.xml'
)
# print("hello csv")
#
#
# class_09 = path.join(
# current_directory, 'test_demo', 'class_09', 'test_dr', 'whynot dr'
# )
# print(class_09)
#
if not path.exists(path.dirname(text_file_path)):
makedirs(path.dirname(text_file_path))
file_data = "This is my classo9 file, which is created for test purpose,"
with open(text_file_path, 'w+') as text_file:
text_file.writelines(file_data)
from pprint import pprint
# with open(text_file_path, 'r+') as text_file_read:
# data = text_file_read.readlines()
# pprint(data, width=120/
# if path.exists(text_file_path):
# print("exists")
# with open(text_file_path, 'r+') as text_file_read:
# for line in text_file_read:
# print(line.replace("\n", ''))
def generator_parse_file(file_path):
with open(file_path, 'r+') as text_file:
for line in text_file:
yield line.replace("\n", '')
for i in generator_parse_file(text_file_path):
print(i)
#
| 21.094737 | 73 | 0.683633 | 290 | 2,004 | 4.403448 | 0.231034 | 0.131558 | 0.075176 | 0.09397 | 0.440094 | 0.287392 | 0.247455 | 0.247455 | 0.216132 | 0.216132 | 0 | 0.006501 | 0.155689 | 2,004 | 94 | 74 | 21.319149 | 0.748227 | 0.428643 | 0 | 0.115385 | 0 | 0 | 0.201149 | 0.020115 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0 | 0.115385 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56ccf9b464e87b0ee37675d5598960af66f6aaee | 996 | py | Python | explicalib/calibration/evaluation/diagrams/binary/binary_calibration_error_curve.py | euranova/estimating_eces | 9bfa81dd7a39ebe069c5b11b8e7a9bf9017e9350 | [
"MIT"
] | 2 | 2021-11-30T18:44:11.000Z | 2021-11-30T18:44:19.000Z | explicalib/calibration/evaluation/diagrams/binary/binary_calibration_error_curve.py | euranova/estimating_eces | 9bfa81dd7a39ebe069c5b11b8e7a9bf9017e9350 | [
"MIT"
] | null | null | null | explicalib/calibration/evaluation/diagrams/binary/binary_calibration_error_curve.py | euranova/estimating_eces | 9bfa81dd7a39ebe069c5b11b8e7a9bf9017e9350 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: nicolas.posocco
"""
from .binary_reliability_curve import binary_reliability_curve
def binary_calibration_error_curve(model=None, X=None, Y=None,
kernel=None, bandwidth=None,
positive_scores=None,
positive_scores_for_positive_gt=None,
positive_class_probability=None):
reliability_curve = binary_reliability_curve(model=model, X=X, Y=Y,
kernel=kernel, bandwidth=bandwidth,
positive_scores=positive_scores,
positive_scores_for_positive_gt=positive_scores_for_positive_gt,
positive_class_probability=positive_class_probability)
result = {"scores": reliability_curve["scores"],
}
return result
| 39.84 | 113 | 0.5251 | 84 | 996 | 5.845238 | 0.333333 | 0.171079 | 0.13442 | 0.152749 | 0.181263 | 0.126273 | 0 | 0 | 0 | 0 | 0 | 0.001698 | 0.408635 | 996 | 24 | 114 | 41.5 | 0.831919 | 0.047189 | 0 | 0 | 0 | 0 | 0.012752 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56cefc1f7836af65100b50a748c1af6718286e94 | 8,714 | py | Python | bayesmark/np_util.py | goncaloperes/bayesmark | 8c420e935718f0d6867153b781e58943ecaf2338 | [
"Apache-2.0"
] | 102 | 2019-09-27T02:38:52.000Z | 2022-03-12T13:31:11.000Z | bayesmark/np_util.py | goncaloperes/bayesmark | 8c420e935718f0d6867153b781e58943ecaf2338 | [
"Apache-2.0"
] | 17 | 2019-10-07T18:20:21.000Z | 2022-01-03T08:19:16.000Z | bayesmark/np_util.py | goncaloperes/bayesmark | 8c420e935718f0d6867153b781e58943ecaf2338 | [
"Apache-2.0"
] | 34 | 2019-09-27T02:38:31.000Z | 2022-02-09T21:32:25.000Z | # Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to that could be included in `numpy` but aren't.
"""
import numpy as np
# np seed must be in [0, 2**32 - 1] = [0, uint32 max]
SEED_MAX_INCL = np.iinfo(np.uint32).max
# Access default numpy rng in way that is short and sphinx friendly
random = np.random.random.__self__
def random_seed(random=random):
"""Draw a random seed compatible with :class:`numpy:numpy.random.RandomState`.
Parameters
----------
random : :class:`numpy:numpy.random.RandomState`
Random stream to use to draw the random seed.
Returns
-------
seed : int
Seed for a new random stream in ``[0, 2**32-1)``.
"""
# np randint is exclusive on the high value, py randint is inclusive. We
# must use inclusive limit here to work with both. We are missing one
# possibility here (2**32-1), but I don't think that matters.
seed = random.randint(0, SEED_MAX_INCL)
return seed
def shuffle_2d(X, random=random):
"""Generalization of :func:`numpy:numpy.random.shuffle` of 2D array.
Performs in-place shuffling of `X`. So, it has no return value.
Parameters
----------
X : :class:`numpy:numpy.ndarray` of shape (n, m)
Array-like 2D data to shuffle in place. Shuffles order of rows and order of elements within a row.
random : :class:`numpy:numpy.random.RandomState`
Random stream to use to draw the random seed.
"""
random.shuffle(X)
for rr in X:
random.shuffle(rr)
def strat_split(X, n_splits, inplace=False, random=random):
"""Make a stratified random split of items.
Parameters
----------
X : :class:`numpy:numpy.ndarray` of shape (n, m)
Data we would like to split randomly into groups. We should get the same number +/-1 of elements from each row
in each group.
n_splits : int
How many groups we want to split into.
inplace : bool
If true, this function will cause in place modifications to `X`.
random : :class:`numpy:numpy.random.RandomState`
Random stream to use for reproducibility.
Returns
-------
Y : list(:class:`numpy:numpy.ndarray`)
Stratified split of `X` where each row of `Y` contains the same number +/-1 of elements from each row of `X`.
Must be a list of arrays since each row may have a different length.
"""
# Arguably, this function could go in stats
assert np.ndim(X) == 2
assert n_splits > 0
if not inplace:
X = np.array(X, copy=True)
shuffle_2d(X, random=random)
# Note this is like X.T.ravel()
Y = np.array_split(np.ravel(X, order="F"), n_splits)
# Just for good measure make sure this is shuffled too, prob not needed.
shuffle_2d(Y, random=random)
return Y
def isclose_lte(x, y):
"""Check that less than or equal to (lte, ``x <= y``) is approximately true between all elements of `x` and `y`.
This is similar to :func:`numpy:numpy.allclose` for equality. Shapes of all input variables must be broadcast
compatible.
Parameters
----------
x : :class:`numpy:numpy.ndarray`
Lower limit in ``<=`` check.
y : :class:`numpy:numpy.ndarray`
Upper limit in ``<=`` check.
Returns
-------
lte : bool
True if ``x <= y`` is approximately true element-wise.
"""
# Use np.less_equal to ensure always np type consistently
lte = np.less_equal(x, y) | np.isclose(x, y)
return lte
def clip_chk(x, lb, ub, allow_nan=False):
"""Clip all element of `x` to be between `lb` and `ub` like :func:`numpy:numpy.clip`, but also check
:func:`numpy:numpy.isclose`.
Shapes of all input variables must be broadcast compatible.
Parameters
----------
x : :class:`numpy:numpy.ndarray`
Array containing elements to clip.
lb : :class:`numpy:numpy.ndarray`
Lower limit in clip.
ub : :class:`numpy:numpy.ndarray`
Upper limit in clip.
allow_nan : bool
If true, we allow ``nan`` to be present in `x` without out raising an error.
Returns
-------
x : :class:`numpy:numpy.ndarray`
An array with the elements of `x`, but where values < `lb` are replaced with `lb`, and those > `ub` with `ub`.
"""
assert np.all(lb <= ub) # np.clip does not do this check
x = np.asarray(x)
# These are asserts not exceptions since clip_chk most used internally.
if allow_nan:
assert np.all(isclose_lte(lb, x) | np.isnan(x))
assert np.all(isclose_lte(x, ub) | np.isnan(x))
else:
assert np.all(isclose_lte(lb, x))
assert np.all(isclose_lte(x, ub))
x = np.clip(x, lb, ub)
return x
def snap_to(x, fixed_val=None):
"""Snap input `x` to the `fixed_val` unless `fixed_val` is `None`, where `x` is returned.
Parameters
----------
x : :class:`numpy:numpy.ndarray`
Array containing elements to snap.
fixed_val : :class:`numpy:numpy.ndarray` or None
Values to be returned if `x` is close, otherwise an error is raised. If `fixed_val` is `None`, `x` is returned.
Returns
-------
fixed_val : :class:`numpy:numpy.ndarray`
Snapped to value of `x`.
"""
if fixed_val is None:
return x
# Include == for discrete types where allclose doesn't work
if not (np.all(x == fixed_val) or np.allclose(x, fixed_val)):
raise ValueError("Expected fixed value %s, got %s." % (repr(fixed_val), repr(x)))
assert np.all(x == fixed_val) or np.allclose(x, fixed_val)
fixed_val = np.broadcast_to(fixed_val, np.shape(x))
return fixed_val
def linear_rescale(X, lb0, ub0, lb1, ub1, enforce_bounds=True):
"""Linearly transform all elements of `X`, bounded between `lb0` and `ub0`, to be between `lb1` and `ub1`.
Shapes of all input variables must be broadcast compatible.
Parameters
----------
X : :class:`numpy:numpy.ndarray`
Array containing elements to rescale.
lb0 : :class:`numpy:numpy.ndarray`
Current lower bound of `X`.
ub0 : :class:`numpy:numpy.ndarray`
Current upper bound of `X`.
lb1 : :class:`numpy:numpy.ndarray`
Desired lower bound of `X`.
ub1 : :class:`numpy:numpy.ndarray`
Desired upper bound of `X`.
enforce_bounds : bool
If True, perform input bounds check (and clipping if slight violation) on the input `X` and again on the
output. This argument is not meant to be vectorized like the other input variables.
Returns
-------
X : :class:`numpy:numpy.ndarray`
Elements of input `X` after linear rescaling.
"""
assert np.all(np.isfinite(lb0))
assert np.all(np.isfinite(lb1))
assert np.all(np.isfinite(ub0))
assert np.all(np.isfinite(ub1))
assert np.all(lb0 < ub0)
assert np.all(lb1 <= ub1)
m = np.true_divide(ub1 - lb1, ub0 - lb0)
assert np.all(m >= 0)
if enforce_bounds:
X = clip_chk(X, lb0, ub0) # This will flag any non-finite X input.
X = clip_chk(m * (X - lb0) + lb1, lb1, ub1)
else:
X = m * (X - lb0) + lb1
return X
def argmin_2d(X):
"""Take the arg minimum of a 2D array."""
assert X.size > 0, "argmin of empty array not defined"
ii, jj = np.unravel_index(X.argmin(), X.shape)
return ii, jj
def cummin(x_val, x_key):
"""Get the cumulative minimum of `x_val` when ranked according to `x_key`.
Parameters
----------
x_val : :class:`numpy:numpy.ndarray` of shape (n, d)
The array to get the cumulative minimum of along axis 0.
x_key : :class:`numpy:numpy.ndarray` of shape (n, d)
The array for ranking elements as to what is the minimum.
Returns
-------
c_min : :class:`numpy:numpy.ndarray` of shape (n, d)
The cumulative minimum array.
"""
assert x_val.shape == x_key.shape
assert x_val.ndim == 2
assert not np.any(np.isnan(x_key)), "cummin not defined for nan key"
n, _ = x_val.shape
xm = np.minimum.accumulate(x_key, axis=0)
idx = np.maximum.accumulate((x_key <= xm) * np.arange(n)[:, None])
c_min = np.take_along_axis(x_val, idx, axis=0)
return c_min
| 33.13308 | 119 | 0.638398 | 1,334 | 8,714 | 4.113193 | 0.248876 | 0.052852 | 0.068343 | 0.084199 | 0.299617 | 0.223619 | 0.20175 | 0.172408 | 0.163295 | 0.144341 | 0 | 0.011651 | 0.241565 | 8,714 | 262 | 120 | 33.259542 | 0.818581 | 0.64689 | 0 | 0.057971 | 0 | 0 | 0.037442 | 0 | 0 | 0 | 0 | 0 | 0.275362 | 1 | 0.130435 | false | 0 | 0.014493 | 0 | 0.275362 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56d37c682b8fa36e0cc92147b67a5132d916883c | 1,332 | py | Python | Leetcode/74_search-a-2d-matrix.py | diekaltesonne/Contexts | 064f61e84896852d6579675e2423537ee5bf8331 | [
"MIT"
] | null | null | null | Leetcode/74_search-a-2d-matrix.py | diekaltesonne/Contexts | 064f61e84896852d6579675e2423537ee5bf8331 | [
"MIT"
] | null | null | null | Leetcode/74_search-a-2d-matrix.py | diekaltesonne/Contexts | 064f61e84896852d6579675e2423537ee5bf8331 | [
"MIT"
] | null | null | null | class Solution:
def _search(self,l,r,x):
if r >= l:
mid = l + (r - l) // 2
# If element is present at the middle itself
if self.nums[mid][-1] >= x and self.nums[mid][0] <=x:
return self._search_small(0,len(self.nums[mid])-1,x,mid)
elif self.nums[mid][-1] > x:
return self._search(l, mid-1, x)
# Else the element can only be present
# in right subarray
else:
return self._search(mid + 1, r, x)
else:
return False
def _search_small(self,l,r,x,a):
if len(self.nums[a]) == 0 and x == 0:
return False
if r >= l:
mid = l + (r - l) // 2
# If element is present at the middle itself
if self.nums[a][mid] == x:
return True
elif self.nums[a][mid] > x:
return self._search_small(l, mid-1, x,a)
# Else the element can only be present
# in right subarray
else:
return self._search_small(mid + 1, r, x,a)
else:
return False
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
self.nums = matrix
return self._search(0,len(self.nums)-1,target) | 36 | 73 | 0.482733 | 185 | 1,332 | 3.410811 | 0.227027 | 0.114105 | 0.152139 | 0.057052 | 0.538827 | 0.431062 | 0.383518 | 0.383518 | 0.383518 | 0.383518 | 0 | 0.018797 | 0.400901 | 1,332 | 37 | 74 | 36 | 0.77193 | 0.146396 | 0 | 0.392857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56d7e1352e0a41bda99357c7991be824ba742bcd | 6,484 | py | Python | fingerprint/client/util.py | ghoshishan/comp-sec | f1bec8fc68814bc421337069e58a67447baf2a89 | [
"MIT"
] | null | null | null | fingerprint/client/util.py | ghoshishan/comp-sec | f1bec8fc68814bc421337069e58a67447baf2a89 | [
"MIT"
] | null | null | null | fingerprint/client/util.py | ghoshishan/comp-sec | f1bec8fc68814bc421337069e58a67447baf2a89 | [
"MIT"
] | null | null | null | import json
import base64
import random
import logging
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
from phe import paillier, EncryptedNumber, PaillierPublicKey
import client.dbhandler as dbhandler
from client.exceptions import WrongPin, UnknownUser
logger = logging.getLogger('client')
# for salting pins of users
SALT = b'=sNmXf\xd6\xefe\xf8\xd0\x10\xe5\xb2\xf3o\x01|\xf3\x99\xbf\xd6\x88\x0c\xb6\x9b\x08\xb3\xac\xf0\xb9g'
def generate_verification_code():
"""
Generates a list of random numbers which is used to transform
the fingerprint vector to protect against malicious users
who have access to the fingerprint data of the user they want to impersonate.
:return: user verification code
"""
user_vcode = random.sample(range(1, 255), 4)
return user_vcode
def generate_shuffle_code():
"""
Returns a random shuffle code.
:return: shuffle code
"""
user_shuffle_code = random.randint(1000, 9999)
return user_shuffle_code
def enrollment_transform(user_fingerprint, user_vcode, user_shuffle_code):
"""
Performs fingerprint transform during enrollment
:param user_fingerprint: fingerprint feature vector
:param user_vcode: verification code of the user
:return: transformed fingerprint vector
"""
transformed_fingerprint = user_fingerprint + user_vcode
sumOfXiSquare = sum(x*x for x in user_fingerprint)
sumOfViSquare = sum(v*v for v in user_vcode)
transformed_fingerprint.extend([1, 1, sumOfXiSquare, sumOfViSquare])
random.Random(user_shuffle_code).shuffle(transformed_fingerprint)
return transformed_fingerprint
def string_encrypt(pin, plaintext):
"""
Performs AES encryption based on a pin.
Used for storing paillier key pair and verification code of a user.
:param pin: 4 digit integer string
:param plaintext: JSON dumps of reaquired data to be encrypted
:return: ciphertext and initialization vector
"""
key = PBKDF2(pin, SALT, dkLen=32)
data = plaintext.encode('utf-8')
# CFB basically doesn't require padding to maintain block size
cipher_encrypt = AES.new(key, AES.MODE_CFB)
ciphered_bytes = cipher_encrypt.encrypt(data)
iv = cipher_encrypt.iv
return ciphered_bytes, iv
def string_decrypt(pin, iv, ciphertext):
"""
Performs AES decryption on a ciphertext given a pin and iv.
:param pin: 4 digit integer string
:param iv: Initialization vector returned during encryption
:param ciphertext: encrypted cipher text
:return: decrypted string data
"""
key = PBKDF2(pin, SALT, dkLen=32)
cipher_decrypt = AES.new(key, AES.MODE_CFB, iv)
deciphered_bytes = cipher_decrypt.decrypt(ciphertext)
try:
decrypted_data = deciphered_bytes.decode('utf-8')
except UnicodeDecodeError as e:
logger.info(f'Incorrect pin')
return None
return decrypted_data
def paillier_encrypt_vector(pub_key, transformed_fingerprint):
"""
Performs encryption on the transformmed fingerprint
using the paillier cryptosystem.
:param pub_key: public key of the user
:param transformed_fingerprint: a fingerprint feature vector
:return: encrypted feature vector
"""
encrypted_fingerprint = [pub_key.encrypt(
feature) for feature in transformed_fingerprint]
serialized_fingerprint = [] # readable form of the ciphertext
for entry in encrypted_fingerprint:
serialized_fingerprint.append(entry._EncryptedNumber__ciphertext)
logger.debug(json.dumps(serialized_fingerprint, indent=2))
return encrypted_fingerprint
def store_credentials(user_roll_no, user_pin, user_tid, user_pub_key, user_priv_key, user_vcode, user_shuffle_code):
"""
Store credentials of the user in an encrypted format.
:param user_roll_no: user roll no
:param user_pin: user 4 digit integer pin
:param user_tid: user fingerprint id stored on the server
:param user_pub_key: user paillier public key
:param user_priv_key: user paillier private key
:param user_vcode: user verification code
"""
data = dbhandler.read_data('userdata.json')
user_data = {
'tid': user_tid,
'vcode': user_vcode,
'scode': user_shuffle_code,
'n': user_pub_key.n,
'p': user_priv_key.p,
'q': user_priv_key.q
}
user_data_string = json.dumps(user_data)
ciphertext, iv = string_encrypt(user_pin, user_data_string)
store_data = {
'roll_no': user_roll_no,
'ciphertext': base64.b64encode(ciphertext).decode('utf-8'),
'iv': base64.b64encode(iv).decode('utf-8')
}
data.append(store_data)
dbhandler.write_data(data, 'userdata.json')
logger.info(f'User data stored: {user_roll_no}')
def retrieve_credentials(user_roll_no, user_pin):
"""
Fetch and decrypt encrypted user data stored in the database
:param user_roll_no: user roll number
:param user_pin: user pin
:return: decrypted data
"""
data = dbhandler.read_data('userdata.json')
ciphertext = None
iv = None
flag = 0
for user in data:
if user['roll_no'] == user_roll_no:
ciphertext = base64.b64decode(user['ciphertext'].encode('utf-8'))
iv = base64.b64decode(user['iv'].encode('utf-8'))
flag = 1
break
if flag == 0:
print(f'Unknown user: {user_roll_no}')
raise UnknownUser
return None
user_data = string_decrypt(user_pin, iv, ciphertext)
if not user_data:
print(f'Incorrect pin: {user_roll_no}')
raise WrongPin
return None
user_data = json.loads(user_data)
return user_data
def verification_transform(user_fingerprint, user_vcode, user_shuffle_code):
"""
Performs transformation on the fingerprint feature vector
required during verification.
:param user_fingerprint: fingerprint feature vector
:param user_vcode: verification code of the user
:return: transformed fingerprint
"""
# is not this same as enrollment_transform
transformed_fingerprint = user_fingerprint + user_vcode
transformed_fingerprint = [-2*n for n in transformed_fingerprint]
sumOfYiSquare = sum(y*y for y in user_fingerprint)
sumOfViSquare = sum(v*v for v in user_vcode)
transformed_fingerprint.extend([sumOfYiSquare, sumOfViSquare, 1, 1])
random.Random(user_shuffle_code).shuffle(transformed_fingerprint)
return transformed_fingerprint
| 33.947644 | 116 | 0.715299 | 849 | 6,484 | 5.287397 | 0.249706 | 0.073513 | 0.024504 | 0.015594 | 0.258409 | 0.253063 | 0.175095 | 0.146581 | 0.146581 | 0.121631 | 0 | 0.015516 | 0.204812 | 6,484 | 190 | 117 | 34.126316 | 0.85512 | 0.313849 | 0 | 0.151515 | 0 | 0.010101 | 0.07867 | 0.023434 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.292929 | 0.232323 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56dc55fee9c5b749a9b50ac4f9d5e574bceb9dda | 6,622 | py | Python | kemeny.py | cai-michael/kemenyapprox | 802e22c58f8649dcc8ddf888603f8c19ae32621c | [
"Apache-2.0"
] | null | null | null | kemeny.py | cai-michael/kemenyapprox | 802e22c58f8649dcc8ddf888603f8c19ae32621c | [
"Apache-2.0"
] | null | null | null | kemeny.py | cai-michael/kemenyapprox | 802e22c58f8649dcc8ddf888603f8c19ae32621c | [
"Apache-2.0"
] | null | null | null | """
Implements the Kemeny Rule and various heuristics
"""
import time
import datetime
from itertools import combinations, permutations
from multiprocessing import Pool
import functools
from collections import defaultdict
from matrix import generate_zeros_matrix, matrix_multiplication
NUM_WORKERS = 2
STATIONARY_DISTRIBUTION_ITERATIONS = 1000
def kendall_tau_distance(ranking_a, ranking_b):
"""
Determines the Kendell Tau Distance between two orderings
"""
distance = 0
num_candidates = len(ranking_a)
pairs = combinations(range(1, num_candidates + 1), 2)
for alt_x, alt_y in pairs:
a_order = ranking_a.index(alt_x) - ranking_a.index(alt_y)
b_order = ranking_b.index(alt_x) - ranking_b.index(alt_y)
if a_order * b_order < 0:
distance += 1
return distance
def calculate_ranking_score(ranking, profile):
"""
Calculates the ranking score for a particular strict ordering
"""
ranking_score = 0
for profile_ranking in profile:
ranking_score += kendall_tau_distance(ranking, profile_ranking)
return ranking_score
def kemeny_rule(profile, num_workers=1):
"""
Implements the kemeny rule by calculating all Kendell-Tau distances
"""
print('\nApplying the Kemeny Rule to the Profile...')
# Start timer
time_start = time.perf_counter()
num_candidates = len(profile[0])
ranking_scores = []
rank_permutations = list(permutations(range(1, num_candidates + 1)))
calculate_scores = functools.partial(calculate_ranking_score, profile=profile)
with Pool(num_workers) as worker_pool:
ranking_scores = worker_pool.map(calculate_scores, rank_permutations)
min_ranking_score = min(ranking_scores)
win_idx = [index for index, score in enumerate(ranking_scores) if score == min_ranking_score]
print("The winning ranking(s) are as follows: ")
for index in win_idx:
winning_ranking = rank_permutations[index]
winning_ranking_stringified = [str(i) for i in winning_ranking]
print(", ".join(winning_ranking_stringified))
# Calculate time required to finish
time_finish = time.perf_counter()
time_elapsed = datetime.timedelta(seconds = (time_finish - time_start))
print(f"Applying the Kemeny Rule took {time_elapsed}")
def determine_pairwise_victories(profile):
"""
Determines the pairwise victories for candidates
Returns a dictionary indexed by tuples of candidates
"""
pairwise_victories = defaultdict(int)
num_candidates = len(profile[0])
candidiate_pairs = list(permutations(range(1, num_candidates + 1), 2))
for pair in candidiate_pairs:
for vote in profile:
if vote.index(pair[0]) < vote.index(pair[1]):
pairwise_victories[pair] += 1
return pairwise_victories
def create_transition_matrix(pairwise_victories, num_candidates, num_votes, mc_type):
"""
Generates a transition matrix based on the MC heuristic type
Type 1:
The transition probability of a to b is:
1 / # Candidates if b is preferred to a at some point
0 otherwise
The transition probability from a to a is 1 - Sum of all other transitions
Type 2:
The transition probability of a to b is:
1 / # Candidates if the majority of ballots prefer b to a
0 otherwise
The transition probability from a to a is 1 - Sum of all other transitions
Type 3:
The transition probability of a to b is:
Summation of all orderings where
sum(orderings where b is preferred to a) / Orderings * candidates
The transition probability from a to a is 1 - Sum of all other transitions
"""
# Put 0's on transition matrix
transition_matrix = generate_zeros_matrix(num_candidates, num_candidates)
# Populate transition probabilities in the matrix
candidiate_pairs = list(permutations(range(1, num_candidates + 1), 2))
# Based on preferences of a and b assign probability of a -> b
if mc_type == 1:
for first, second in candidiate_pairs:
if pairwise_victories[(second, first)] > 0:
probability = 1 / num_candidates
else:
probability = 0
transition_matrix[first - 1][second - 1] = probability
elif mc_type == 2:
for first, second in candidiate_pairs:
if pairwise_victories[(second, first)] > (num_votes // 2):
probability = 1 / num_candidates
else:
probability = 0
transition_matrix[first - 1][second - 1] = probability
elif mc_type == 3:
for first, second in candidiate_pairs:
probability = pairwise_victories[(second, first)] / (num_votes * num_candidates)
transition_matrix[first - 1][second - 1] = probability
# Determine the probability of a self-transition
for candidate in range(1, num_candidates + 1):
self_transition_probability = 1 - sum(transition_matrix[candidate - 1])
transition_matrix[candidate - 1][candidate - 1] = self_transition_probability
return transition_matrix
def markov_heuristic(profile, mc_type):
"""
Applies the Markov Chain Heuristic to a Profile using a transition function of mc_type
"""
print(f'\nApplying the MC{mc_type} Markov Heuristic to the Profile...')
# Start timer
time_start = time.perf_counter()
num_candidates = len(profile[0])
num_votes = len(profile)
# Determine pairwise victories for each pair of candidates
pairwise_wins = determine_pairwise_victories(profile)
transition_matrix = create_transition_matrix(pairwise_wins, num_candidates, num_votes, mc_type)
# Put the probability matrix to a high power to find the stationary distribution
stationary_distribution = transition_matrix.copy()
for _ in range(STATIONARY_DISTRIBUTION_ITERATIONS):
stationary_distribution = matrix_multiplication(stationary_distribution, transition_matrix)
final_probabilities = stationary_distribution[0]
prob_tuples = [(idx + 1, prob) for idx, prob in enumerate(final_probabilities)]
prob_tuples.sort(key=lambda x: x[1], reverse=True)
final_ranking = [pair[0] for pair in prob_tuples]
print("The winning ranking is as follows: ")
winning_ranking_stringified = [str(i) for i in final_ranking]
print(", ".join(winning_ranking_stringified))
# Calculate time required to finish
time_finish = time.perf_counter()
time_elapsed = datetime.timedelta(seconds = (time_finish - time_start))
print(f"Applying the MC{mc_type} Markov Model took {time_elapsed}")
| 36.384615 | 99 | 0.706131 | 864 | 6,622 | 5.21875 | 0.196759 | 0.04613 | 0.021734 | 0.021069 | 0.357951 | 0.334664 | 0.306055 | 0.283211 | 0.26059 | 0.26059 | 0 | 0.012195 | 0.219873 | 6,622 | 181 | 100 | 36.585635 | 0.860627 | 0.228179 | 0 | 0.257732 | 0 | 0 | 0.05749 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061856 | false | 0 | 0.072165 | 0 | 0.175258 | 0.082474 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56dc67205242f7ff839dde303a1973e4737ed5cb | 1,331 | py | Python | utilities/write_spatial_file.py | markfoleyie/gisp_2021 | 6077b0980d775fefeb46983e70a1f03faa1220ec | [
"MIT"
] | 1 | 2022-01-28T13:39:42.000Z | 2022-01-28T13:39:42.000Z | utilities/write_spatial_file.py | markfoleyie/gisp_2021 | 6077b0980d775fefeb46983e70a1f03faa1220ec | [
"MIT"
] | null | null | null | utilities/write_spatial_file.py | markfoleyie/gisp_2021 | 6077b0980d775fefeb46983e70a1f03faa1220ec | [
"MIT"
] | null | null | null | try:
import fiona
from fiona.crs import from_epsg
import utilities.fiona_supported_drivers as fsd
import os
except Exception as e:
print(f"{e}")
quit(1)
def write_spatial(file=None, directory=None, data=None, **meta):
try:
if not data:
raise ValueError(f"No data to write.")
if not os.path.exists(directory):
raise ValueError(f"Target directory doesn't exist.")
if "driver" not in meta:
raise ValueError(f"Missing driver.")
if "crs" not in meta:
raise ValueError(f"Missing CRS.")
if "schema" not in meta:
raise ValueError(f"Missing schema.")
if meta["driver"] not in fsd.file_extensions:
raise ValueError(f"Invalid driver.")
target = os.path.join(directory, f"{file}.{fsd.file_extensions[meta['driver']]}")
meta["crs"] = from_epsg(meta["crs"])
for k, v in meta["schema"]["properties"].items():
if v == "string":
meta["schema"]["properties"][k] = "str"
elif v == "double":
meta["schema"]["properties"][k] = "float"
with fiona.open(target, "w", **meta) as fh:
for feature in data:
fh.write(feature)
except Exception as e:
print(f"{e}")
quit(1)
| 32.463415 | 89 | 0.561232 | 171 | 1,331 | 4.327485 | 0.356725 | 0.121622 | 0.12973 | 0.056757 | 0.210811 | 0.210811 | 0.210811 | 0.081081 | 0.081081 | 0 | 0 | 0.00216 | 0.304282 | 1,331 | 40 | 90 | 33.275 | 0.796976 | 0 | 0 | 0.228571 | 0 | 0 | 0.18858 | 0.033058 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.114286 | 0 | 0.142857 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56dfe03b101cc2f8e7b14651f15e361abb52dfc4 | 3,536 | py | Python | src/pytest_qatouch/plugin.py | MohamedRaslan/pytest-qatouch | 8d6ddd414d2ee836da1ebb9bee64a7672ed7e04f | [
"MIT"
] | null | null | null | src/pytest_qatouch/plugin.py | MohamedRaslan/pytest-qatouch | 8d6ddd414d2ee836da1ebb9bee64a7672ed7e04f | [
"MIT"
] | 6 | 2021-06-26T20:11:10.000Z | 2022-02-21T19:41:50.000Z | src/pytest_qatouch/plugin.py | MohamedRaslan/pytest-qatouch | 8d6ddd414d2ee836da1ebb9bee64a7672ed7e04f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from .utils import QATOUCH_MARK, MissingQatouchData, ExpectedIntegerValue
from .qatouch import QatouchTestResult
__QATOUCH_TEST_RSESULT = None
___Enable_PLUGIN = None
def pytest_addoption(parser):
group = parser.getgroup("QaTouch")
def add_option(option, dest, help, default=None, type=None, **kwargs):
group.addoption(option, dest=dest, default=default, **kwargs)
parser.addini(dest, default=default, type=type, help=help)
add_option(
option="--qatouch",
action="store",
dest="qatouch",
default="False",
help="Enable the qatouch plugin (Set ['True', 'False'])",
)
add_option(
option="--qatouch-subdomain",
action="store",
dest="qatouch-subdomain",
help="Your qatouch submodule name (i.e <your_subdomain>.qatouch.com)",
)
add_option(
"--qatouch-api-token",
action="store",
dest="qatouch-api-token",
help="Your qatouch API token",
)
add_option(
"--qatouch-project-key",
action="store",
dest="qatouch-project-key",
help="The qatouch project key",
)
add_option(
"--qatouch-testrun-key",
action="store",
dest="qatouch-testrun-key",
help="The testrun key in qatouch project",
)
def pytest_configure(config):
config.addinivalue_line("markers", f"{QATOUCH_MARK}(TR): Mark test")
global ___Enable_PLUGIN
___Enable_PLUGIN = (
str(config.getoption("--qatouch")).lower() == "true"
or str(config.getini("qatouch")).lower() == "true"
)
if ___Enable_PLUGIN:
def get_option(option: str):
value = config.getoption("--" + option) or config.getini(option)
if value is None:
raise MissingQatouchData(
f"The option ['--'{option}] or the ini option[{option}] not set"
)
return value
global __QATOUCH_TEST_RSESULT
__QATOUCH_TEST_RSESULT = QatouchTestResult(
domain=get_option("qatouch-subdomain"),
api_token=get_option("qatouch-api-token"),
project_key=get_option("qatouch-project-key"),
testrun_key=get_option("qatouch-testrun-key"),
)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
test_result = outcome.get_result()
qa_marker = item.get_closest_marker(QATOUCH_MARK)
if __QATOUCH_TEST_RSESULT and qa_marker:
if test_result.when == "call":
__add_test(qa_marker, test_result)
elif test_result.when in ("setup", "teardown") and test_result.outcome != "passed":
__add_test(qa_marker, test_result)
def pytest_sessionfinish():
global __QATOUCH_TEST_RSESULT
if ___Enable_PLUGIN and __QATOUCH_TEST_RSESULT:
__QATOUCH_TEST_RSESULT.push_results_to_qatouch()
__QATOUCH_TEST_RSESULT = None
def __add_test(qa_marker, test_result):
if "TR" in qa_marker.kwargs:
tr_value = qa_marker.kwargs["TR"]
if not isinstance(tr_value, int):
raise ExpectedIntegerValue(
f"Expected the TR value to be a valid integer value bug insted got {tr_value} of type {type(tr_value)}"
)
else:
raise MissingQatouchData(f"Expected to have a TR and its value, but not found")
__QATOUCH_TEST_RSESULT.push_testcase_to_results(
testcase_id=tr_value, testcase_status=test_result.outcome
)
| 30.747826 | 119 | 0.63914 | 414 | 3,536 | 5.169082 | 0.270531 | 0.046262 | 0.075701 | 0.051402 | 0.092056 | 0.068692 | 0 | 0 | 0 | 0 | 0 | 0.000376 | 0.248586 | 3,536 | 114 | 120 | 31.017544 | 0.805043 | 0.005939 | 0 | 0.177778 | 0 | 0.011111 | 0.218332 | 0.020211 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077778 | false | 0.011111 | 0.033333 | 0 | 0.122222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56e136e9f8cd4fb32fc3b35b6dbfa5fc8c91cf9e | 6,596 | py | Python | sheetmaker/language_strings.py | cosme12/cheet-sheet-maker | 7bbb4f4dd310127d9ca57a9365dc2bfb6bce91da | [
"MIT"
] | 112 | 2017-02-08T20:42:14.000Z | 2022-03-04T01:50:32.000Z | sheetmaker/language_strings.py | cosme12/cheet-sheet-maker | 7bbb4f4dd310127d9ca57a9365dc2bfb6bce91da | [
"MIT"
] | 20 | 2017-02-09T11:22:08.000Z | 2018-06-22T19:04:23.000Z | sheetmaker/language_strings.py | cosme12/cheet-sheet-maker | 7bbb4f4dd310127d9ca57a9365dc2bfb6bce91da | [
"MIT"
] | 30 | 2017-02-09T13:05:52.000Z | 2022-01-30T05:59:09.000Z | """Language selector handler
Todo:
* Use internacionalization
* Add more languages
"""
english = {
"INTRO_MESSAGE" : "Welcome to CheatSheet Maker",
"MAIN_MENU_OPTIONS" : { 1: "Create sheet",
2: "Export (NOT CODED YET)",
3: "Help (NOT CODED YET)",
4: "Exit",
},
"MENU_MESSAGE" : "Type the number to choose your option.",
"CONFIG_SHEET_MESSAGE1" : "Building the basic layout... answer the next questions.",
"CONFIG_SHEET_MESSAGE2" : "How many columns your sheet will have?",
"CONFIG_SHEET_MESSAGE3" : "Which color style do you prefer?",
"CONFIG_SHEET_OPTIONS1" : { 1: "What is your sheet title? ('CheatSheet' is added automatically)"
},
"CONFIG_SHEET_OPTIONS2" : { 1: "1 main column",
2: "2 main columns",
3: "3 main columns"
},
"CONFIG_SHEET_OPTIONS3" : { 1: "Orange",
2: "Black and white",
3: "Red",
4: "Yellow",
5: "Green",
6: "Blue",
},
"HEADER_MESSAGE" : "Building the header... answer the next questions.",
"HEADER_OPTIONS" : { 1: "What is the author name?"
},
"FOOTER_MESSAGE" : "Building the footer... answer the next questions.",
"FOOTER_OPTIONS1" : { 1: "What is the author picture url?"
},
"FOOTER_OPTIONS2" : { 1: "What is the author website url? (use http://)"
},
"FOOTER_OPTIONS3" : { 1: "What is the sponsor name?"
},
"FOOTER_OPTIONS4" : { 1: "What is the sponsor webite url? (use http://)"
},
"BLOCK_MESSAGE" : "Building the blocks... answer the next questions.",
"BLOCK_OPTIONS" : { 1: "Create text block",
2: "Create block with rows",
0: "Done"
},
"BLOCK_ROWS_MESSAGE1" : "Building block with rows... answer the next questions.",
"BLOCK_ROWS_MESSAGE2" : "In what main column do you want to build it?",
"BLOCK_ROWS_OPTIONS1" : { 1: "What is the title of the block?"
},
"BLOCK_ROWS_OPTIONS2" : { 1: "How many rows does it have?"
},
"BLOCK_ROWS_OPTIONS3" : { 1: "What is the text of each row? (text row1. # text row2. # text row3)"
},
"TEXT_BLOCK_MESSAGE" : "Building text block... answer the next questions.",
"TEXT_BLOCK_EXTRA" : "main column",
"TEXT_BLOCK_OPTIONS1" : { 1: "What is the title of the block?"
},
"TEXT_BLOCK_OPTIONS2" : { 1: "What is the text for the block (use <br> for new line or any html tag for formatting)"
},
"END_MESSAGE" : "Thanks for using CheatSheet Maker. Feel free to share your ideas at http://github.com/cosme12/cheasheet-maker",
"EXIT_MESSAGE" : "Press any key to exit",
"INVALID_INPUT_MESSAGE" : "Invalid input. Try again.",
}
espanol = {
"INTRO_MESSAGE" : "Bienvenido a CheatSheet Maker",
"MAIN_MENU_OPTIONS" : { 1: "Crear hoja",
2: "Exportar (NOT CODED YET)",
3: "Ayuda (NOT CODED YET)",
4: "Salir",
},
"MENU_MESSAGE" : "Escribe el numero para elegir tu opcion",
"CONFIG_SHEET_MESSAGE1" : "Cosntruyendo la estructura basica... responde las siguientes preguntas.",
"CONFIG_SHEET_MESSAGE2" : "Cuantas columnas tiene tu hoja?",
"CONFIG_SHEET_MESSAGE3" : "Que color de estilo prefieres?",
"CONFIG_SHEET_OPTIONS1" : { 1: "Cual es el titulo de tu hoja? ('CheatSheet' se agrega automaticamente)"
},
"CONFIG_SHEET_OPTIONS2" : { 1: "1 columna principal",
2: "2 columnas principales",
3: "3 columnas principales"
},
"CONFIG_SHEET_OPTIONS3" : { 1: "Naranja",
2: "Negro y Blanco",
3: "Rojo",
4: "Amarillo",
5: "Verde",
6: "Azul",
},
"HEADER_MESSAGE" : "Cosntruyendo el encabezado... contesta las siguientes preguntas.",
"HEADER_OPTIONS" : { 1: "Cual es el nombre del autor?"
},
"FOOTER_MESSAGE" : "Construyendo el pie de pagina... contesta las siguientes preguntas.",
"FOOTER_OPTIONS1" : { 1: "Cual es la url de la imagen del autor?"
},
"FOOTER_OPTIONS2" : { 1: "Cual es la url del sitio web del autor? (use http://)"
},
"FOOTER_OPTIONS3" : { 1: "Cual es el nombre del sponsor?"
},
"FOOTER_OPTIONS4" : { 1: "Cual es la url del sitio web del sponsor? (use http://)"
},
"BLOCK_MESSAGE" : "Construyendo los bloques... contesta las siguientes preguntas.",
"BLOCK_OPTIONS" : { 1: "Crear bloque de texto",
2: "Crear bloque con filas",
0: "Fin"
},
"BLOCK_ROWS_MESSAGE1" : "Construyendo bloque con filas... contesta las siguientes preguntas.",
"BLOCK_ROWS_MESSAGE2" : "En que columna principal quieres construilo?",
"BLOCK_ROWS_OPTIONS1" : { 1: "Cual es el titulo del bloque?"
},
"BLOCK_ROWS_OPTIONS2" : { 1: "Cuantas filas tiene?"
},
"BLOCK_ROWS_OPTIONS3" : { 1: "Cual es el texto de cada fila? (texto fila1. # texto fila2. # texto fila3.)"
},
"TEXT_BLOCK_MESSAGE" : "Construyendo bloque de texto... contesta las siguientes preguntas.",
"TEXT_BLOCK_EXTRA" : "columna principal",
"TEXT_BLOCK_OPTIONS1" : { 1: "Cual es el titulo del bloque?"
},
"TEXT_BLOCK_OPTIONS2" : { 1: "Cual es el texto para el bloque? (usa <br> para nueva linea o cualquier html tag para dar formato)"
},
"END_MESSAGE" : "Gracias por utilizar CheatSheet Maker. Comparte tus ideas en http://github.com/cosme12/cheasheet-maker",
"EXIT_MESSAGE" : "Presiona cualquier tecla para salir",
"INVALID_INPUT_MESSAGE" : "Entrada invalida. Pruba otra vez.",
} | 52.349206 | 133 | 0.516828 | 693 | 6,596 | 4.78355 | 0.326118 | 0.039819 | 0.021116 | 0.027149 | 0.238612 | 0.118552 | 0.082051 | 0.082051 | 0.035596 | 0 | 0 | 0.0252 | 0.374318 | 6,596 | 126 | 134 | 52.349206 | 0.778047 | 0.013341 | 0 | 0 | 0 | 0.033898 | 0.586371 | 0.045224 | 0 | 0 | 0 | 0.007937 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56e46bb7818acd0c03702e88afa0e940878c4a01 | 2,989 | py | Python | Hourglass_network/train.py | Ali-Sahili/Background-Subtraction-Unsupervised-Learning | 445b2cf8736a4a28cff2b074a32afe8fe6986d53 | [
"MIT"
] | 5 | 2021-05-17T06:52:28.000Z | 2022-02-20T15:35:51.000Z | Hourglass_network/train.py | WN1695173791/Background-Subtraction-Unsupervised-Learning | 445b2cf8736a4a28cff2b074a32afe8fe6986d53 | [
"MIT"
] | null | null | null | Hourglass_network/train.py | WN1695173791/Background-Subtraction-Unsupervised-Learning | 445b2cf8736a4a28cff2b074a32afe8fe6986d53 | [
"MIT"
] | 1 | 2021-05-17T06:52:33.000Z | 2021-05-17T06:52:33.000Z | import torch
from torch import nn
import torchvision.utils as vutils
import numpy as np
from focal_loss import FocalLoss
from Param import *
from utils import weights_init
from net import PoseNet
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def fit(data, mask, Net, optimizer, criterion, max_norm=0):
img = data[0].to(device)
heat_maps, output = Net(img)
loss = 0
for i in range(output.shape[1]):
loss += criterion(output[:,i], mask[0].to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss.detach_()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(Encoder.parameters(), max_norm)
torch.nn.utils.clip_grad_norm_(Decoder.parameters(), max_norm)
return loss
def train(dataloader, dataloader_mask, print_epoch=batch_size, verbose=False):
assert image_size == 256
model = PoseNet(nstack, image_size, oup_dim, bn, increase).to(device)
#if initialize_weights:
# model.apply(weights_init)
#criterion = nn.MSELoss()
criterion = FocalLoss(gamma=2)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params ', n_parameters)
print("Starting Training Loop...")
losses = []
img_list = []
heat_maps_list = []
# For each epoch
for epoch in range(num_epochs):
torch.cuda.empty_cache()
model.train()
# For each batch in the dataloader
for i, (data, mask) in enumerate(zip(dataloader, dataloader_mask), 0):
if verbose: print(data[0].shape)
if verbose: print(data[1].shape)
recons_loss = fit(data, mask, model, optimizer, criterion)
# Output training stats
if i % print_epoch == 0:
print('[%d/%d][%d/%d]\tLoss: %.4f'
% (epoch+1, num_epochs, i, len(dataloader), recons_loss.item()))
# Save Losses for plotting later
losses.append(recons_loss.item())
# Check how the generator is doing by saving G's output on fixed_noise
if (i % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
with torch.no_grad():
heat_maps, img_out = model(data[0].to(device))
img_out = img_out.detach().cpu()
heat_maps = heat_maps.detach().cpu()
img_list.append(vutils.make_grid(img_out[0:10,0], nrow=5, normalize=True))
if epoch == (num_epochs-1):
for qq in range(heat_maps.shape[2]):
heat_maps_list.append(vutils.make_grid(heat_maps[0:5,nstack-1,qq].unsqueeze(1), nrow=5, normalize=True, padding=5, pad_value=1).permute(1,2,0))
heat_map_out = np.vstack(heat_maps_list)
return losses, img_list, heat_map_out, model
| 27.675926 | 159 | 0.613249 | 408 | 2,989 | 4.330882 | 0.367647 | 0.040747 | 0.01528 | 0.014714 | 0.054329 | 0.027165 | 0 | 0 | 0 | 0 | 0 | 0.01924 | 0.269655 | 2,989 | 107 | 160 | 27.934579 | 0.790197 | 0.082302 | 0 | 0 | 0 | 0 | 0.024908 | 0.007692 | 0 | 0 | 0 | 0 | 0.017544 | 1 | 0.035088 | false | 0 | 0.157895 | 0 | 0.22807 | 0.122807 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56e51968e0b294a8b19d2f549c0b644ea69e8277 | 6,308 | py | Python | main.py | abditag2/DCGAN-tensorflow | 432b0d91bd8252c48869c205b86701993eb37618 | [
"MIT"
] | 4 | 2019-04-30T08:46:13.000Z | 2020-09-08T07:18:23.000Z | main.py | abditag2/DCGAN-tensorflow | 432b0d91bd8252c48869c205b86701993eb37618 | [
"MIT"
] | null | null | null | main.py | abditag2/DCGAN-tensorflow | 432b0d91bd8252c48869c205b86701993eb37618 | [
"MIT"
] | 1 | 2019-10-24T12:24:23.000Z | 2019-10-24T12:24:23.000Z | import io
import os
import os.path
from os import listdir
from os.path import isfile, join
import numpy as np
import tensorflow as tf
from PIL import Image
import horovod.tensorflow as hvd
from model import DCGAN
from utils import pp, visualize, show_all_variables
flags = tf.app.flags
flags.DEFINE_integer("epoch", 25, "Epoch to train [25]")
flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_float("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("batch_size", None, "The size of batch images [64]")
flags.DEFINE_integer("grid_height", 8, "Grid Height")
flags.DEFINE_integer("grid_width", 8, "Grid Width")
flags.DEFINE_integer("input_height", None, "The size of image to use (will be center cropped). [108]")
flags.DEFINE_integer("input_width", None, "The size of image to use (will be center cropped). If None, same value as input_height [None]")
flags.DEFINE_integer("output_height", None, "The size of the output images to produce [64]")
flags.DEFINE_integer("output_width", None, "The size of the output images to produce. If None, same value as output_height [None]")
flags.DEFINE_string("dataset", "celebA", "The name of dataset [celebA, mnist, lsun]")
flags.DEFINE_string("input_fname_pattern", "*.jpg", "Glob pattern of filename of input images [*]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_integer("sample_rate", None, "If == 5, it will take a sample image every 5 iterations")
flags.DEFINE_boolean("train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("crop", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]")
flags.DEFINE_integer("generate_test_images", 100, "Number of images to generate during test. [100]")
flags.DEFINE_integer("nbr_of_layers_d", 5, "Number of layers in Discriminator")
flags.DEFINE_integer("nbr_of_layers_g", 5, "Number of layers in Generator")
flags.DEFINE_boolean("use_checkpoints", True, "Save and load checkpoints")
FLAGS = flags.FLAGS
# default batch_size
if FLAGS.batch_size is None and FLAGS.grid_height is not None and FLAGS.grid_width is not None:
batch_size = FLAGS.grid_height * FLAGS.grid_width
elif FLAGS.batch_size is not None:
batch_size = FLAGS.batch_size
else:
raise Exception('grid_height/grid_width or batch_size must be provided')
# default size parameters
input_width = FLAGS.input_width
input_height = FLAGS.input_height
output_width = FLAGS.output_width
output_height = FLAGS.output_height
if (input_height is None and input_width is None) or (output_height is None and output_width is None):
data_path = 'data/' + FLAGS.dataset
first_image = [f for f in listdir(data_path) if isfile(join(data_path, f))][0]
image_data = open(data_path + '/' + first_image, "rb").read()
image = Image.open(io.BytesIO(image_data))
rgb_im = image.convert('RGB')
input_width = rgb_im.size[0]
output_width = rgb_im.size[0]
input_height = rgb_im.size[1]
output_height = rgb_im.size[1]
def main(_):
pp.pprint(flags.FLAGS.__flags)
hvd.init()
if FLAGS.input_width is None:
FLAGS.input_width = FLAGS.input_height
if FLAGS.output_width is None:
FLAGS.output_width = FLAGS.output_height
if FLAGS.use_checkpoints and not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
sample_dir = FLAGS.sample_dir + "_g" + str(FLAGS.nbr_of_layers_g) + "_d" + str(FLAGS.nbr_of_layers_d)
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
run_config.gpu_options.visible_device_list = str(hvd.local_rank())
with tf.Session(config=run_config) as sess:
if FLAGS.dataset == 'mnist':
dcgan = DCGAN(
sess,
input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height,
grid_height=FLAGS.grid_height,
grid_width=FLAGS.grid_width,
batch_size=batch_size,
sample_num=batch_size,
y_dim=10,
z_dim=FLAGS.generate_test_images,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=sample_dir,
nbr_of_layers_d=FLAGS.nbr_of_layers_d,
nbr_of_layers_g=FLAGS.nbr_of_layers_g,
use_checkpoints=FLAGS.use_checkpoints)
else:
dcgan = DCGAN(
sess,
input_width=input_width,
input_height=input_height,
output_width=output_width,
output_height=output_height,
grid_height=FLAGS.grid_height,
grid_width=FLAGS.grid_width,
batch_size=batch_size,
sample_num=batch_size,
z_dim=FLAGS.generate_test_images,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=sample_dir,
sample_rate=FLAGS.sample_rate,
nbr_of_layers_d=FLAGS.nbr_of_layers_d,
nbr_of_layers_g=FLAGS.nbr_of_layers_g,
use_checkpoints=FLAGS.use_checkpoints)
show_all_variables()
if FLAGS.train:
dcgan.train(FLAGS)
else:
if not dcgan.load(FLAGS.checkpoint_dir)[0]:
raise Exception("[!] Train a model first, then run test mode")
# to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
# [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
# [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
# [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
# [dcgan.h4_w, dcgan.h4_b, None])
# Below is codes for visualization
OPTION = 1
visualize(sess, dcgan, FLAGS, batch_size, OPTION)
if __name__ == '__main__':
tf.app.run()
| 40.696774 | 138 | 0.708307 | 939 | 6,308 | 4.497338 | 0.200213 | 0.05991 | 0.051148 | 0.017049 | 0.354487 | 0.292209 | 0.267582 | 0.267582 | 0.267582 | 0.250059 | 0 | 0.012668 | 0.186588 | 6,308 | 154 | 139 | 40.961039 | 0.810368 | 0.067058 | 0 | 0.330645 | 0 | 0.008065 | 0.231489 | 0.003745 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008065 | false | 0 | 0.08871 | 0 | 0.096774 | 0.008065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56e55db3074073781e32a309aaad46301011098d | 2,768 | py | Python | Packs/Okta/Integrations/OktaEventCollector/OktaEventCollector_test.py | jrauen/content | 81a92be1cbb053a5f26a6f325eff3afc0ca840e0 | [
"MIT"
] | null | null | null | Packs/Okta/Integrations/OktaEventCollector/OktaEventCollector_test.py | jrauen/content | 81a92be1cbb053a5f26a6f325eff3afc0ca840e0 | [
"MIT"
] | 40 | 2022-03-03T07:34:00.000Z | 2022-03-31T07:38:35.000Z | Packs/Okta/Integrations/OktaEventCollector/OktaEventCollector_test.py | jrauen/content | 81a92be1cbb053a5f26a6f325eff3afc0ca840e0 | [
"MIT"
] | null | null | null | from OktaEventCollector import ReqParams, Client, Request, GetEvents, Method
import pytest
req_params = ReqParams(since='', sortOrder='ASCENDING', limit='5')
request = Request(method=Method.GET, url='https://testurl.com', headers={}, params=req_params)
client = Client(request)
get_events = GetEvents(client)
id1 = {'uuid': 'a5b57ec5febb'}
id2 = {'uuid': 'a5b57ec5fecc'}
id3 = {'uuid': 'a12f3c5d77f3'}
id4 = {'uuid': 'a12f3c5dxxxx'}
class MockResponse:
def __init__(self, data):
self.data = data
def json(self):
return self.data
@pytest.mark.parametrize("events,ids,result", [
([id1, id2, id3], ['a12f3c5d77f3'], [id1, id2]),
([id1, id2, id3], ['a12f3c5dxxxx'], [id1, id2, id3]),
([], ['a12f3c5d77f3'], []),
([{'uuid': 0}, {'uuid': 1}, {'uuid': 2}, {'uuid': 3}, {'uuid': 4}, {'uuid': 5}, {'uuid': 6}, {'uuid': 7},
{'uuid': 8}, {'uuid': 9}], [0, 4, 7, 9],
[{'uuid': 1}, {'uuid': 2}, {'uuid': 3}, {'uuid': 5}, {'uuid': 6}, {'uuid': 8}])])
def test_remove_duplicates(events, ids, result):
assert get_events.remove_duplicates(events, ids) == result
@pytest.mark.parametrize("events,result", [
([{'published': '2022-04-17T12:31:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5faaa'},
{'published': '2022-04-17T12:32:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fbbb'},
{'published': '2022-04-17T12:33:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fccc'}],
{'after': '2022-04-17T12:33:36.667000', 'ids': ['1d0844b6-3148-11ec-9027-a5b57ec5fccc']}),
([{'published': '2022-04-17T12:31:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5faaa'},
{'published': '2022-04-17T12:32:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fbbb'},
{'published': '2022-04-17T12:32:36.667',
'uuid': '1d0844b6-3148-11ec-9027-a5b57ec5fccc'}], {'after': '2022-04-17T12:32:36.667000',
'ids': ['1d0844b6-3148-11ec-9027-a5b57ec5fccc',
'1d0844b6-3148-11ec-9027-a5b57ec5fbbb']})])
def test_get_last_run(events, result):
assert get_events.get_last_run(events) == result
@pytest.mark.parametrize("time", ['2022-04-17T12:32:36.667)'])
def test_set_since_value(time):
req_params.set_since_value(time)
assert req_params.since == time
def test_make_api_call(mocker):
mock_res = MockResponse([{1}, {1}, {1}, {1}, {1}])
mocker.patch.object(client, 'call', return_value=mock_res)
assert get_events.make_api_call() == [{1}, {1}, {1}, {1}, {1}]
mock_res.data = [{1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}]
assert get_events.make_api_call() == [{1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}, {1}]
| 42.584615 | 109 | 0.58526 | 355 | 2,768 | 4.459155 | 0.247887 | 0.032849 | 0.041693 | 0.045483 | 0.50537 | 0.396083 | 0.384713 | 0.360708 | 0.30638 | 0.30638 | 0 | 0.19627 | 0.186416 | 2,768 | 64 | 110 | 43.25 | 0.506661 | 0 | 0 | 0.173077 | 0 | 0 | 0.311777 | 0.194364 | 0 | 0 | 0 | 0 | 0.096154 | 1 | 0.115385 | false | 0 | 0.038462 | 0.019231 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56e5c536074d74d31f4d24ac8e326a346c1ae65e | 2,563 | py | Python | test/models/test_deepset.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | test/models/test_deepset.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | test/models/test_deepset.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import jax
import jax.numpy as jnp
import netket as nk
@pytest.mark.parametrize(
"cusp_exponent", [pytest.param(None, id="cusp=None"), pytest.param(5, id="cusp=5")]
)
@pytest.mark.parametrize(
"L",
[
pytest.param(1.0, id="1D"),
pytest.param((1.0, 1.0), id="2D-Square"),
pytest.param((1.0, 0.5), id="2D-Rectangle"),
],
)
def test_deepsets(cusp_exponent, L):
hilb = nk.hilbert.Particle(N=2, L=L, pbc=True)
sdim = len(hilb.extent)
x = jnp.hstack([jnp.ones(4), -jnp.ones(4)]).reshape(1, -1)
xp = jnp.roll(x, sdim)
ds = nk.models.DeepSetRelDistance(
hilbert=hilb,
cusp_exponent=cusp_exponent,
layers_phi=2,
layers_rho=2,
features_phi=(10, 10),
features_rho=(10, 1),
)
p = ds.init(jax.random.PRNGKey(42), x)
assert jnp.allclose(ds.apply(p, x), ds.apply(p, xp))
def test_deepsets_error():
hilb = nk.hilbert.Particle(N=2, L=1.0, pbc=True)
sdim = len(hilb.extent)
x = jnp.hstack([jnp.ones(4), -jnp.ones(4)]).reshape(1, -1)
xp = jnp.roll(x, sdim)
ds = nk.models.DeepSetRelDistance(
hilbert=hilb,
layers_phi=3,
layers_rho=3,
features_phi=(10, 10),
features_rho=(10, 1),
)
with pytest.raises(ValueError):
p = ds.init(jax.random.PRNGKey(42), x)
with pytest.raises(AssertionError):
ds = nk.models.DeepSetRelDistance(
hilbert=hilb,
layers_phi=2,
layers_rho=2,
features_phi=(10, 10),
features_rho=(10, 2),
)
p = ds.init(jax.random.PRNGKey(42), x)
with pytest.raises(ValueError):
ds = nk.models.DeepSetRelDistance(
hilbert=nk.hilbert.Particle(N=2, L=1.0, pbc=False),
layers_phi=2,
layers_rho=2,
features_phi=(10, 10),
features_rho=(10, 2),
)
p = ds.init(jax.random.PRNGKey(42), x)
| 29.125 | 87 | 0.616075 | 373 | 2,563 | 4.171582 | 0.340483 | 0.03856 | 0.020566 | 0.071979 | 0.446015 | 0.423522 | 0.423522 | 0.405527 | 0.338689 | 0.308483 | 0 | 0.042465 | 0.246586 | 2,563 | 87 | 88 | 29.45977 | 0.763335 | 0.225127 | 0 | 0.564516 | 0 | 0 | 0.026329 | 0 | 0 | 0 | 0 | 0 | 0.032258 | 1 | 0.032258 | false | 0 | 0.064516 | 0 | 0.096774 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56e77e033d14f603000e73fa84271bc6b5607ec9 | 3,987 | py | Python | exp/hou_ximg.py | schaban/crosscore_dev | 1eae118a485cb1de1d54d2da01ff0b32966205ef | [
"MIT"
] | 5 | 2022-01-26T03:25:00.000Z | 2022-03-06T03:27:13.000Z | exp/hou_ximg.py | schaban/crosscore_dev | 1eae118a485cb1de1d54d2da01ff0b32966205ef | [
"MIT"
] | null | null | null | exp/hou_ximg.py | schaban/crosscore_dev | 1eae118a485cb1de1d54d2da01ff0b32966205ef | [
"MIT"
] | null | null | null | # Author: Sergey Chaban <sergey.chaban@gmail.com>
import sys
import hou
import os
import imp
import re
import inspect
from math import *
from array import array
import xcore
import xhou
try: xrange
except: xrange = range
def writeBits(bw, bits, nbits):
nbytes = xcore.ceilDiv(nbits, 8)
wk = bits
for i in xrange(nbytes):
bw.writeU8(wk & 0xFF)
wk >>= 8
class ImgPlane:
def __init__(self, ximg, name, rawFlg = not True):
self.ximg = ximg
self.name = name
self.nameId = ximg.strLst.add(name)
if name == "a":
self.data = ximg.cop.allPixels("A")
else:
self.data = ximg.cop.allPixels("C", xhou.getRGBComponentName(ximg.cop, name))
ref = self.data[0]
self.constFlg = True
for val in self.data:
if val != ref:
self.constFlg = False
break
self.compress(rawFlg)
def compress(self, rawFlg):
self.minVal = min(self.data)
self.maxVal = max(self.data)
self.valOffs = self.minVal
if self.valOffs > 0: self.valOffs = 0
self.bitCnt = 0
self.bits = 0
self.minTZ = 32
if self.constFlg:
self.format = 0
return
if rawFlg:
self.format = -1
return
self.format = 1
for fval in self.data:
fval -= self.valOffs
ival = xcore.getBitsF32(fval) & ((1<<31)-1)
self.minTZ = min(self.minTZ, xcore.ctz32(ival))
tblSize = 1 << 8
tbl = [0 for i in xrange(tblSize)]
pred = 0
hash = 0
nlenBits = 5
w = self.ximg.w
h = self.ximg.h
for y in xrange(h):
for x in xrange(w):
idx = (h-1-y)*w + x
fval = self.data[idx] - self.valOffs
ival = xcore.getBitsF32(fval) & ((1<<31)-1)
ival >>= self.minTZ
xor = ival ^ pred
tbl[hash] = ival
hash = ival >> 21
hash &= tblSize - 1
pred = tbl[hash]
xlen = 0
if xor: xlen = xcore.bitLen32(xor)
dat = xlen
if xlen: dat |= (xor & ((1<<xlen)-1)) << nlenBits
self.bits |= dat << self.bitCnt
self.bitCnt += nlenBits + xlen
def writeInfo(self, bw):
bw.writeU32(0) # +00 -> data
self.ximg.writeStrId16(bw, self.nameId) # +04
bw.writeU8(self.minTZ) # +06
bw.writeI8(self.format) # +07
bw.writeF32(self.minVal) # +08
bw.writeF32(self.maxVal) # +0C
bw.writeF32(self.valOffs) # +10
bw.writeU32(self.bitCnt) # +14
bw.writeU32(0) # +18 reserved0
bw.writeU32(0) # +1C reserved1
def writeData(self, bw):
if self.format == 0:
bw.writeF32(self.data[0])
elif self.format == 1:
writeBits(bw, self.bits, self.bitCnt)
else:
w = self.ximg.w
h = self.ximg.h
for y in xrange(h):
for x in xrange(w):
idx = (h-1-y)*w + x
bw.writeF32(self.data[idx])
class ImgExporter(xcore.BaseExporter):
def __init__(self):
xcore.BaseExporter.__init__(self)
self.sig = "XIMG"
def build(self, copPath, rawFlg = True):
self.copPath = copPath
self.nameId, self.pathId = self.strLst.addNameAndPath(copPath)
self.cop = hou.node(copPath)
self.w = self.cop.xRes()
self.h = self.cop.yRes()
self.planes = {}
self.addPlane("r", rawFlg)
self.addPlane("g", rawFlg)
self.addPlane("b", rawFlg)
self.addPlane("a", rawFlg)
def addPlane(self, name, rawFlg = True):
self.planes[name] = ImgPlane(self, name, rawFlg)
def writeHead(self, bw, top):
npln = len(self.planes)
bw.writeU32(self.w) # +20
bw.writeU32(self.h) # +24
bw.writeU32(npln) # +28
self.patchPos = bw.getPos()
bw.writeI32(0) # +2C -> info
def writeData(self, bw, top):
plnLst = []
for plnName in self.planes: plnLst.append(self.planes[plnName])
npln = len(plnLst)
bw.align(0x10)
infoTop = bw.getPos()
bw.patch(self.patchPos, bw.getPos() - top) # -> info
for i in xrange(npln):
plnLst[i].writeInfo(bw)
for i, pln in enumerate(plnLst):
bw.align(4)
bw.patch(infoTop + (i*0x20), bw.getPos() - top)
xcore.dbgmsg("Saving plane " + pln.name)
pln.writeData(bw)
def save(self, outPath):
xcore.BaseExporter.save(self, outPath)
| 25.234177 | 81 | 0.616002 | 589 | 3,987 | 4.149406 | 0.264856 | 0.032733 | 0.028642 | 0.01473 | 0.094926 | 0.075286 | 0.075286 | 0.075286 | 0.075286 | 0.04419 | 0 | 0.037841 | 0.237773 | 3,987 | 157 | 82 | 25.394904 | 0.766371 | 0.037121 | 0 | 0.135714 | 0 | 0 | 0.006552 | 0 | 0 | 0 | 0.003276 | 0 | 0 | 1 | 0.078571 | false | 0 | 0.071429 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56e9fd79e108a7ca6eae3fd77971936796edbc9e | 9,698 | py | Python | macadam/conf/constant_params.py | yongzhuo/Macadam | 794a29c760ce25264388c3a85a6b118733afb023 | [
"MIT"
] | 290 | 2020-06-04T17:01:30.000Z | 2022-03-29T13:10:18.000Z | macadam/conf/constant_params.py | furtherthanfar/Macadam | 794a29c760ce25264388c3a85a6b118733afb023 | [
"MIT"
] | 7 | 2020-06-05T02:30:51.000Z | 2022-03-17T01:05:42.000Z | macadam/conf/constant_params.py | furtherthanfar/Macadam | 794a29c760ce25264388c3a85a6b118733afb023 | [
"MIT"
] | 35 | 2020-06-11T07:32:17.000Z | 2022-03-09T06:08:03.000Z | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2020/4/26 20:25
# @author : Mo
# @function: constant of token-symbol and hyper-parameters-default
from macadam.conf.path_config import path_model_dir
from typing import Dict
import os
EMBEDDING_TYPE = ["ROBERTA","ELECTRA","RANDOM","ALBERT",
"XLNET","NEZHA","GPT2","WORD","BERT", "MIX"]
# symbol of common token
MASK = "[MASK]"
CLS = "[CLS]"
SEP = "[SEP]"
PAD = "[PAD]"
UNK = "[UNK]"
BOS = "[BOS]"
EOS = "[EOS]"
WC = "[WC]"
# task of macadam
SL = "SL" # sequence-labeling(ner, pos, tag)
TC = "TC" # text-classification
RE = "RE" # relation-extraction
# hyper_parameters of deep-learning, include sharing, embed, graph, train, save and data
hyper_parameters_default = {
"sharing": {"length_max": None, # 句子最大长度, 不配置则会选择前95%数据的最大长度, 配置了则会强制选择, 固定推荐20-50, bert越长会越慢, 占用空间也会变大, 小心OOM
"embed_size": 768, # 字/词向量维度, bert取768, word取300, char可以更小些
"vocab_size": None, # 字典/词典大小, 可根据具体语料更新, 可不配置
"trainable": True, # embedding是静态的还是动态的, 即控制可不可以微调
"task": None, # 任务类型, "SL"(sequence-labeling), "TC"(text-classification),"RE"(relation-extraction)
"token_type": "CHAR", # 级别, 最小单元, 字/词, 填 "CHAR" or "WORD", "NGRAM", 注意:word2vec模式下训练语料要首先切好
"embed_type": "BERT", # 级别, 嵌入类型, 还可以填"WORD"、"RANDOM"、 "BERT"、 "ALBERT"、"ROBERTA"、"NEZHA"、"XLNET"、"ELECTRA"、"GPT2"
"gpu_memory_fraction": 0.6, # gpu使用率, 0-1
},
"embed": {"layer_idx": [-2], # 取bert的layer层输出, -1~-12, 0-11等, eg. 0, 1, 11, -1, -2, -12等
"path_embed": None, # 外部embed模型地址, 如word2vec, bert
"merge_type": "concat", # bert的layer层输出融合方式, 包括 "concat", "add", "pool-max", "pool-avg", "multi"
"application": "encode", # bert4keras下游任务, "encode", "lm", "unilm"等
"length_first": None, # 第一句最大长度, 大则截断-小则padding
"length_second": None, # 第二句最大长度, 大则截断-小则padding
"xlnet_embed": {"attention_type": "bi",
"memory_len": 0,
"target_len": 5}, # xlnet的参数, 使用的是keras-xlnet
},
"graph": {"filters_size": [3, 4, 5], # 卷积核尺寸, 1-10
"filters_num": 300, # 卷积个数 text-cnn:300-600
"rnn_type": None, # 循环神经网络, select "LSTM", "GRU", "Bidirectional-GRU"
"rnn_unit": 256, # RNN隐藏层, 8的倍数, 一般取64, 128, 256, 512, 768等
"dropout": 0.5, # 随机失活, 概率, 0-1
"activate_mid": "tanh", # 中间激活函数, 非线性变幻, 提升逼近能力, 选择"relu","tanh"或"sigmoid"
"activate_end": "softmax", # 结束激活函数, 即最后一层的激活函数, 如cls激活函数, ner激活函数
"use_onehot": True, # label是否使用独热编码
"use_crf": False, # 是否使用CRF(条件随机场), task="sl"(序列标注任务)任务
"loss": None, # 损失函数, 真实值与实际预测的差值损失, 最优化的方向, "categorical_crossentropy"
"metrics": "accuracy", # 评估指标, 保存更好模型的评价标准, 一般选择loss, acc或f1等
"optimizer": "Adam", # 优化器, 可选["Adam", "Radam", "RAdam,Lookahead"]
"optimizer_extend":[
"gradient_accumulation",
"piecewise_linear_lr",
"layer_adaptation",
"lazy_optimization",
"]weight_decay",
"lookahead"], # 优化器拓展, ["gradient_accumulation", "piecewise_linear_lr", "layer_adaptation",
# "lazy_optimization","weight_decay", "lookahead"]
},
"train": {"learning_rate": 1e-3, # 学习率, 必调参数, 对训练影响较大, word2vec一般设置1e-3, bert设置5e-5或2e-5
"decay_rate": 0.999, # 学习率衰减系数, 即乘法, lr = lr * rate
"decay_step": 1000, # 学习率每step步衰减, 每N个step衰减一次
"batch_size": 32, # 批处理尺寸, 设置过小会造成收敛困难、陷入局部最小值或震荡, 设置过大会造成泛化能力降低
"early_stop": 6, # 早停, N个轮次(epcoh)评估指标(metrics)不增长就停止训练
"epochs": 20, # 训练最大轮次, 即最多训练N轮
"label": None, # 类别数, auto无需定义, 如果定义则是强制指定
"is_training": True, # 是否训练, 用以区分训练train或预测predict, 用它判断后确定加不加载优化器optimizer
},
"save": {
# "path_hyper_parameters": None, # 超参数文件地址
"path_model_dir": None, # 模型目录, loss降低则保存的依据, save_best_only=True, save_weights_only=True
"path_model_info": None, # 模型所有超参数, 保存在model_info.json
"path_fineture": None, # 微调后embedding文件地址, 例如字向量、词向量、bert向量等
},
"data": {"train_data": None, # 训练数据
"val_data": None # 验证数据
},
}
class Config:
def __init__(self, hyper_parameters: Dict={}):
"""
Init of hyper_parameters and build_embed.
Args:
hyper_parameters: hyper_parameters of all, which contains "sharing", "embed", "graph", "train", "save" and "data".
Returns:
None
"""
# 各种超参数, 设置默认超参数
self.hyper_parameters = self.get_hyper_parameters_default()
# 只更新传入的key-value
for k in hyper_parameters.keys():
self.hyper_parameters[k].update(hyper_parameters.get(k, {}))
self.params_sharing = self.hyper_parameters.get("sharing", {})
self.params_embed = self.hyper_parameters.get("embed", {})
self.params_graph = self.hyper_parameters.get("graph", {})
self.params_train = self.hyper_parameters.get("train", {})
self.params_save = self.hyper_parameters.get("save", {})
self.params_data = self.hyper_parameters.get("data", {})
# params of sharing
self.gpu_memory_fraction = self.params_sharing.get("gpu_memory_fraction", 0.60)
self.embed_type = self.params_sharing.get("embed_type", "RANDOM")
self.token_type = self.params_sharing.get("token_type", "CHAR")
self.task = self.params_sharing.get("task", None)
self.length_max = self.params_sharing.get("length_max", None)
self.vocab_size = self.params_sharing.get("vocab_size", None)
self.embed_size = self.params_sharing.get("embed_size", None)
self.trainable = self.params_sharing.get("trainable", True)
# params of embed
self.layer_idx = self.params_embed.get("layer_idx", [])
self.path_embed = self.params_embed.get("path_embed", None)
self.merge_type = self.params_embed.get("merge_type", "concat")
self.length_first = self.params_embed.get("length_first", None)
self.length_second = self.params_embed.get("length_second", None)
self.xlnet_embed = self.params_embed.get("xlnet_embed", {})
self.attention_type = self.params_embed.get("attention_type", "bi")
self.memory_len = self.params_embed.get("memory_len", 128)
self.target_len = self.params_embed.get("target_len", 128)
# params of graph
self.filters_size = self.params_graph.get("filters_size", [3, 4, 5])
self.filters_num = self.params_graph.get("filters_num", 300)
self.rnn_type = self.params_graph.get("rnn_type", None)
self.rnn_unit = self.params_graph.get("rnn_unit", 256)
self.dropout = self.params_graph.get("dropout", 0.5)
self.activate_mid = self.params_graph.get("activate_mid", "tanh")
self.activate_end = self.params_graph.get("activate_end", "softmax")
self.use_onehot = self.params_graph.get("use_onehot", True)
self.use_crf = self.params_graph.get("use_crf", False)
self.loss = self.params_graph.get("loss", "categorical_crossentropy" if self.use_onehot
else "sparse_categorical_crossentropy")
self.metrics = self.params_graph.get("metrics", "accuracy")
self.optimizer = self.params_graph.get("optimizer", "Adam").upper()
self.optimizer_extend = self.params_graph.get("optimizer_extend", [])
# params of train
self.learning_rate = self.params_train.get("learning_rate", 5e-5)
self.decay_rate = self.params_train.get("decay_rate", 0.999)
self.decay_step = self.params_train.get("decay_step", 32000)
self.early_stop = self.params_train.get("early_stop", 6)
self.batch_size = self.params_train.get("batch_size", 32)
self.epochs = self.params_train.get("epochs", 20)
self.label = self.params_train.get("label", None)
self.is_training = self.params_train.get("is_training", True)
# params of save
self.path_model_dir = self.params_save.get("path_model_dir", path_model_dir)
# self.path_model_info = self.params_save.get("path_model_info", None)
self.path_fineture = self.params_save.get("path_fineture", None)
# params of data
self.train_data = self.params_data.get("train_data", None)
self.val_data = self.params_data.get("val_data", None)
# 特殊符号
self.token_dict = {PAD: 0, UNK: 1,
CLS: 2, SEP: 3,
BOS: 4, EOS: 5,
MASK: 6, WC: 7
}
# 递归创建模型保存目录
if not self.path_model_dir: self.path_model_dir = path_model_dir
if not os.path.exists(self.path_model_dir):
os.makedirs(self.path_model_dir)
def get_hyper_parameters_default(self) -> Dict:
"""
Get hyper_parameters of default.
Args:
None
Returns:
Dict
"""
return hyper_parameters_default
| 51.312169 | 137 | 0.576614 | 1,106 | 9,698 | 4.84358 | 0.302893 | 0.091469 | 0.039201 | 0.043681 | 0.198619 | 0.067575 | 0.045175 | 0.032854 | 0.032854 | 0.032854 | 0 | 0.022783 | 0.289441 | 9,698 | 188 | 138 | 51.585106 | 0.754607 | 0.26026 | 0 | 0 | 0 | 0 | 0.181078 | 0.010862 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0 | 0.021739 | 0 | 0.050725 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56ea4043e94445a1fa0825bf267b5e1fb99e0df2 | 786 | py | Python | tests/test_model/test_backbone/test_mobilenetv3_backbone.py | ZJCV/PyCls | 1ef59301646b6134f2ffcc009b4fd76550fa4089 | [
"Apache-2.0"
] | 110 | 2021-02-04T14:32:57.000Z | 2022-03-30T01:51:56.000Z | tests/test_model/test_backbone/test_mobilenetv3_backbone.py | likyoo/ZCls | 568621aca3a8b090c93345f0858d52c5757f2f0e | [
"Apache-2.0"
] | 8 | 2021-04-11T02:46:57.000Z | 2021-12-14T19:30:58.000Z | tests/test_model/test_backbone/test_mobilenetv3_backbone.py | likyoo/ZCls | 568621aca3a8b090c93345f0858d52c5757f2f0e | [
"Apache-2.0"
] | 20 | 2021-02-07T14:17:07.000Z | 2022-03-22T05:20:40.000Z | # -*- coding: utf-8 -*-
"""
@date: 2020/12/30 下午9:36
@file: test_mobilenetv3_backbone.py
@author: zj
@description:
"""
import torch
from zcls.model.backbones.mobilenet.mobilenetv3_backbone import MobileNetV3Backbone
def test_mobilenet_v3_backbone():
data = torch.randn(1, 3, 224, 224)
model = MobileNetV3Backbone(
in_channels=3,
base_channels=16,
out_channels=960,
width_multiplier=1.,
round_nearest=8,
reduction=4,
attention_type='SqueezeAndExcitationBlock2D',
conv_layer=None,
norm_layer=None,
act_layer=None,
)
print(model)
outputs = model(data)
print(outputs.shape)
assert outputs.shape == (1, 960, 7, 7)
if __name__ == '__main__':
test_mobilenet_v3_backbone()
| 20.684211 | 83 | 0.655216 | 93 | 786 | 5.258065 | 0.645161 | 0.055215 | 0.06135 | 0.09407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069421 | 0.23028 | 786 | 37 | 84 | 21.243243 | 0.738843 | 0.139949 | 0 | 0 | 0 | 0 | 0.052395 | 0.040419 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.136364 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56eb32a92cc867cd71aa0914a66e1907fb58aeae | 4,348 | py | Python | analog_sim/spice/ngspice.py | yrrapt/spice_interface | 2a66bd2672b5154920457676bbaaef8ddd694640 | [
"Apache-2.0"
] | 5 | 2021-04-29T21:38:17.000Z | 2021-07-07T04:03:45.000Z | analog_sim/spice/ngspice.py | yrrapt/spice_interface | 2a66bd2672b5154920457676bbaaef8ddd694640 | [
"Apache-2.0"
] | null | null | null | analog_sim/spice/ngspice.py | yrrapt/spice_interface | 2a66bd2672b5154920457676bbaaef8ddd694640 | [
"Apache-2.0"
] | 1 | 2021-11-30T01:12:21.000Z | 2021-11-30T01:12:21.000Z | import os, re, subprocess
import numpy as np
from spyci import spyci
from PySpice.Spice.NgSpice.Shared import NgSpiceShared
from analog_sim.spice.generic import GenericSpiceInterface
class NgSpiceInterface(GenericSpiceInterface):
'''
'''
def __init__(self, verbose=True, netlist_path=None, pdk_path=None):
'''
Instantiate the object
'''
self.config = {}
self.config['simulator'] = {'executable' : 'ngspice',
# 'shared' : True,
'shared' : False,
'silent' : False}
self.config['verbose'] = verbose
# create an ngspice shared object
self.ngspice = NgSpiceShared.new_instance()
def run_simulation(self, new_instance=True, outputs=None):
'''
Run simulation
'''
# pre-create the file locations
netlist_path = self.run_dir + '/' + self.temp_netlist
raw_path = self.run_dir + '/' + self.temp_result
log_path = self.run_dir + '/' + self.temp_log
# run ngspice
if self.config['simulator']['shared']:
# destroy previous run data
self.ngspice.destroy()
# self.ngspice.exec_command("reset")
# self.ngspice.reset()
# load the netlist into the
if new_instance:
self.ngspice.source(netlist_path)
# run the simulation
if self.config['simulator']['silent']:
with suppress_stdout_stderr():
self.ngspice.run()
else:
self.ngspice.run()
# save the outputs
self.ngspice.exec_command("set filetype=ascii")
self.ngspice.exec_command("write %s" % raw_path)
else:
# set the output format to ascii required by spyci
os.environ["SPICE_ASCIIRAWFILE"] = "1"
self.result_type = 'ascii'
# run the simulation through command line
bash_command = "ngspice -b -r %s -o %s %s" % (raw_path, log_path, netlist_path)
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
# check if error occured
with open(log_path) as f:
sim_log = f.read()
if 'fatal' in sim_log or 'aborted' in sim_log:
print('\033[91m')
print('-'*150)
print('ERROR IN SIMULATION:')
print(sim_log)
print('-'*150)
print('\033[0m')
# read in the results of the simulation
if outputs:
self.simulation_data = {}
for output in outputs:
self.read_results("rundir/spiceinterface_temp_"+output+".raw", output)
else:
self.read_results(raw_path)
def netlist_voltage_pwl(self, name, voltage, negative='0', dc=0):
'''
Write a netlist line for a DC PWL source
'''
return 'V' + name + ' ' + name + ' ' + negative + ' dc %f ' % dc + 'pwl ( ' + voltage + ' )'
def netlist_temperature(self, temperature):
'''
Set the temperature
'''
# form the include line
line = '.option TEMP=%s' % temperature
return line
def netlist_control_block(self, control_block):
'''
Set a control block
'''
# form the include line
line = '.control\n'
line += control_block + '\n'
line += '.endc'
return line
def netlist_sim_tran(self, final_time, initial_step=-1, use_intitial_conditions=False):
'''
Define a transient simulation
TRAN <initial step value> <final time value>
'''
# if the rise and fall is not set then default to 1/50 of the period
if initial_step < 0:
initial_step = final_time/1000
# form the transient instruction
line = '.tran %s %s' % (self.unit_format(initial_step), self.unit_format(final_time))
if use_intitial_conditions:
line += ' uic'
return line | 30.194444 | 100 | 0.527599 | 458 | 4,348 | 4.862445 | 0.336245 | 0.044454 | 0.025595 | 0.018859 | 0.049394 | 0.029636 | 0 | 0 | 0 | 0 | 0 | 0.009912 | 0.373505 | 4,348 | 144 | 101 | 30.194444 | 0.807636 | 0.169273 | 0 | 0.151515 | 0 | 0 | 0.087709 | 0.00792 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.075758 | 0 | 0.242424 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56ee5b13733521aa2c6d7583b5c0eff94fcf5da5 | 728 | py | Python | producer/kwebmon_producer/json_schemas.py | nicolalamacchia/kwebmon | 13d8720314e9faff99b34dd5cb6c10d1cf45d786 | [
"MIT"
] | null | null | null | producer/kwebmon_producer/json_schemas.py | nicolalamacchia/kwebmon | 13d8720314e9faff99b34dd5cb6c10d1cf45d786 | [
"MIT"
] | 4 | 2021-04-28T03:19:37.000Z | 2021-04-28T13:10:27.000Z | producer/kwebmon_producer/json_schemas.py | nicolalamacchia/kwebmon | 13d8720314e9faff99b34dd5cb6c10d1cf45d786 | [
"MIT"
] | null | null | null | SITES_JSON_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"sites": {"type": "array"},
"items": {"$ref": "#/$defs/site"}
},
"$defs": {
"site": {
"type": "object",
"required": ["url"],
"properties": {
"url": {
"type": "string",
"description": "Website URL"
},
"pattern": {
"type": "string",
"description": ("Python-compatible RegEx pattern to be "
"used to validate website content")
}
}
}
}
}
| 28 | 76 | 0.362637 | 49 | 728 | 5.346939 | 0.591837 | 0.076336 | 0.160305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005141 | 0.465659 | 728 | 25 | 77 | 29.12 | 0.66838 | 0 | 0 | 0.24 | 0 | 0 | 0.376374 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56ee8b1c1d8d6917b939b39a1094ae81309532e0 | 4,404 | py | Python | willie/modules/whois.py | ezoSresyeK/willie | 5782628d15996d0cc901bb4ee27d89e9c7ad94ae | [
"EFL-2.0"
] | null | null | null | willie/modules/whois.py | ezoSresyeK/willie | 5782628d15996d0cc901bb4ee27d89e9c7ad94ae | [
"EFL-2.0"
] | null | null | null | willie/modules/whois.py | ezoSresyeK/willie | 5782628d15996d0cc901bb4ee27d89e9c7ad94ae | [
"EFL-2.0"
] | null | null | null | """
whois.py - Willie Whois module
Copyright 2014, Ellis Percival (Flyte) willie@failcode.co.uk
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
A module to enable Willie to perform WHOIS lookups on nicknames.
This can either be to have Willie perform lookups on behalf of
other people, or can be imported and used by other modules.
"""
from willie.module import commands, event, rule
from time import sleep
from datetime import datetime, timedelta
AGE_THRESHOLD = timedelta(days=1)
class Whois(object):
def __init__(self, data):
to, self.nick, self.ident, self.host, star, self.name = data
self.datetime = datetime.now()
def __repr__(self):
return '%s(nick=%r, ident=%r, host=%r, name=%r, datetime=%r)' % (
self.__class__.__name__,
self.nick,
self.ident,
self.host,
self.name,
self.datetime
)
def __str__(self):
return '%s!%s@%s * %s' % (
self.nick, self.ident, self.host, self.name)
def set_chans(self, trigger):
self.chans = trigger
class WhoisFailed(Exception):
pass
def setup(bot):
bot.memory['whois'] = {}
def check_setup(bot):
if 'whois' not in bot.memory:
bot.memory['whois'] = {}
def _clear_old_entries(bot):
"""
Removes entries from the bot's memory which are older
than AGE_THRESHOLD.
"""
to_del = []
for nick, whois in bot.memory['whois'].items():
if whois.datetime < datetime.now() - AGE_THRESHOLD:
to_del.append(nick)
for nick in to_del:
try:
del bot.memory['whois'][nick]
except KeyError:
pass
def send_whois(bot, nick):
"""
Sends the WHOIS command to the server for the
specified nick.
"""
bot.write(['WHOIS', nick])
def get_whois(bot, nick):
"""
Waits for the response to be put into the bot's
memory by the receiving thread.
"""
check_setup(bot)
i = 0
while nick.lower() not in bot.memory['whois'] and i < 10:
i += 1
sleep(2)
if nick.lower() not in bot.memory['whois']:
return
#raise WhoisFailed('No reply from server')
elif bot.memory['whois'][nick.lower()] is None:
try:
del bot.memory['whois'][nick.lower()]
except KeyError:
pass
#raise WhoisFailed('No such nickname')
# A little housekeeping
_clear_old_entries(bot)
try:
return bot.memory['whois'][nick.lower()]
except KeyError:
return None
def whois(bot, nick):
"""
Sends the WHOIS command to the server then waits for
the response to be put into the bot's memory by the
receiving thread.
"""
# Remove entry first so that we get the latest
check_setup(bot)
try:
del bot.memory['whois'][nick]
except KeyError:
pass
send_whois(bot, nick)
return get_whois(bot, nick)
@event('311')
@rule(r'.*')
def whois_found_reply(bot, trigger):
"""
Listens for successful WHOIS responses and saves
them to the bot's memory.
"""
check_setup(bot)
nick = trigger.args[1]
bot.memory['whois'][nick.lower()] = Whois(trigger.args)
@event('319')
@rule(r'.*')
def whois_chan_list(bot, trigger):
nick = trigger.args[1]
if nick not in bot.memory['whois']:
sleep(3)
bot.memory['whois'][nick.lower()].set_chans(trigger)
@event('401')
@rule(r'.*')
def whois_not_found_reply(bot, trigger):
"""
Listens for unsuccessful WHOIS responses and saves
None to the bot's memory so that the initial
whois function is aware that the lookup failed.
"""
check_setup(bot)
nick = trigger.args[1]
bot.memory['whois'][nick] = None
print("Encountered 401")
# Give the initiating whois function time to see
# that the lookup has failed, then remove the None.
sleep(5)
try:
del bot.memory['whois'][nick]
except KeyError:
pass
@commands('whois')
def display_whois(bot, trigger):
"""PM's you the chans the nick is in."""
nick = trigger.group().split()[1]
try:
w = whois(bot, nick)
sleep(3)
bot.msg(trigger.nick,
'%s is on the following chans: %s' % (w.nick, w.chans))
except:
bot.msg(trigger.nick,
'%s could not be found'
% (nick))
| 24.331492 | 73 | 0.60604 | 607 | 4,404 | 4.313015 | 0.285008 | 0.055004 | 0.080214 | 0.061879 | 0.326585 | 0.274637 | 0.239878 | 0.190222 | 0.165011 | 0.116883 | 0 | 0.009381 | 0.273842 | 4,404 | 180 | 74 | 24.466667 | 0.809256 | 0.268847 | 0 | 0.336538 | 0 | 0.009615 | 0.077273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134615 | false | 0.048077 | 0.028846 | 0.019231 | 0.240385 | 0.009615 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56ef8a75099969f876b3cdd3157b7f50324c1ed5 | 1,188 | py | Python | setup.py | satyrius/cmsplugin-scripts | bffcaefa36377b0baeedc6a0006b2c3ce5a50a98 | [
"MIT"
] | null | null | null | setup.py | satyrius/cmsplugin-scripts | bffcaefa36377b0baeedc6a0006b2c3ce5a50a98 | [
"MIT"
] | null | null | null | setup.py | satyrius/cmsplugin-scripts | bffcaefa36377b0baeedc6a0006b2c3ce5a50a98 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from cmsplugin_scripts import __version__
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Programming Language :: Python :: 2.7',
]
setup(
name='cmsplugin-scripts',
version=__version__,
description='Django CMS plugin for script tag injection',
author='Anton Egorov',
author_email='anton.egoroff@gmail.com',
url='https://github.com/satyrius/cmsplugin-scripts',
license='MIT',
long_description=open('README.rst').read(),
classifiers=CLASSIFIERS,
platforms=['OS Independent'],
packages=find_packages(),
include_package_data=True,
install_requires=[
'django-cms',
],
tests_require=['tox>=1.8'],
zip_safe=False,
)
| 29.7 | 73 | 0.661616 | 125 | 1,188 | 6.144 | 0.672 | 0.0625 | 0.0625 | 0.078125 | 0.088542 | 0.088542 | 0 | 0 | 0 | 0 | 0 | 0.005236 | 0.196128 | 1,188 | 39 | 74 | 30.461538 | 0.798953 | 0 | 0 | 0 | 0 | 0 | 0.521044 | 0.01936 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.057143 | 0 | 0.057143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56f43352bfe59575a440aa446f6337c18f283182 | 1,747 | py | Python | 03LinkedList/143ReorderList.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | 03LinkedList/143ReorderList.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | 03LinkedList/143ReorderList.py | zhaoxinlu/leetcode-algorithms | f5e1c94c99628e7fb04ba158f686a55a8093e933 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Editor: Zhao Xinlu
School: BUPT
Date: 2018-04-11
算法思想:链表重排序
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reorderList(self, head):
"""
:type head: ListNode
:rtype: void Do not return anything, modify head in-place instead.
"""
if not head or not head.next:
return
midNode = self.midOfList(head)
behindHead = self.reverseList(midNode.next)
midNode.next = None
head = self.mergeList(head, behindHead)
def midOfList(self, head):
if not head:
return head
slow, fast = head, head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
return slow
def reverseList(self, head):
if not head or not head.next:
return head
pre = None
cur = head
nhead = None
while cur:
nextNode = cur.next
if cur.next == None:
nhead = cur
cur.next = pre
pre = cur
cur = nextNode
return nhead
def mergeList(self, head1, head2):
if not head2:
return head1
if not head1:
return head2
dummy = ListNode(0)
l3 = dummy
while head1 and head2:
l3.next = head1
head1 = head1.next
l3 = l3.next
l3.next = head2
head2 = head2.next
l3 = l3.next
if head1:
l3.next = head1
if head2:
l3.next = head2
return dummy.next | 21.048193 | 74 | 0.507728 | 201 | 1,747 | 4.393035 | 0.313433 | 0.04077 | 0.030578 | 0.024915 | 0.091733 | 0.06342 | 0.06342 | 0.06342 | 0 | 0 | 0 | 0.035957 | 0.41099 | 1,747 | 83 | 75 | 21.048193 | 0.822157 | 0.117344 | 0 | 0.188679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09434 | false | 0 | 0 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56f4fe4a463dd38569b76ab12f231c84b957ff91 | 2,409 | py | Python | libraries/colors/colors_example1.py | est/nodebox-gl | f1909a7a4ebc55c8ba254f92e25acb26e8cf1f1d | [
"BSD-3-Clause"
] | 1 | 2015-09-29T14:22:49.000Z | 2015-09-29T14:22:49.000Z | libraries/colors/colors_example1.py | est/nodebox-gl | f1909a7a4ebc55c8ba254f92e25acb26e8cf1f1d | [
"BSD-3-Clause"
] | 1 | 2015-09-29T14:23:35.000Z | 2015-09-30T02:33:13.000Z | libraries/colors/colors_example1.py | est/nodebox-gl | f1909a7a4ebc55c8ba254f92e25acb26e8cf1f1d | [
"BSD-3-Clause"
] | null | null | null | # ANALOG COLORS
# Import the library
try:
# This is the statement you normally use.
colors = ximport("colors")
except ImportError:
# But since these examples are "inside" the library
# we may need to try something different when
# the library is not located in /Application Support
colors = ximport("__init__")
reload(colors)
size(600, 600)
nofill()
stroke(0.4, 0.5, 0)
strokewidth(0.1)
autoclosepath(False)
clr = colors.color(0.6, 0.4, 0)
# Get a very dark variation of the color for the background.
background(colors.dark(clr).darken(0.1))
clr.alpha = 0.5
# Each curve has a shadow and there are a lot of them,
# so we have to use a very subtle shadow:
# very transparent and thin (little blur).
colors.shadow(alpha=0.05, blur=0.2)
for i in range(50):
# Each strand of curves has an analogous color
# (i.e. hues that are next to each other on the color wheel).
# This yields a very natural effect.
stroke(clr.analog(angle=10, d=0.3))
# Start drawing strands of curves from the center.
x0 = WIDTH/2
y0 = HEIGHT/2
# Each strand of curves bends in a certain way.
vx0 = random(-200, 200)
vy0 = random(-200, 200)
vx1 = random(-200, 200)
vy1 = random(-200, 200)
# A strand ends up either left or right outside the screen.
# Each curve in a strand ends up at the same place
# (identical x1 and y1).
x1 = choice((-10, WIDTH))
y1 = random(HEIGHT)
# This code gives interesting effects as well:
#from math import radians, sin, cos
#angle = random(360)
#x1 = x0 + cos(radians(angle)) * 100
#y1 = y0 + sin(radians(angle)) * 100
for j in range(100):
beginpath(x0, y0)
curveto(
# The bend of each curve in a strand differs slightly
# at the start, so the strand looks thicker at the start
# and then all the curves come together at x1 and y1.
x0+vx0+random(80),
y0+vy0+random(80),
x1+vx1,
y1+vy1,
x1,
y1
)
endpath()
"""
# Some type, with a heart symbol!
heart = u"\u2665"
s1 = "strands of analogous curves "+heart
s2 = "gratuitous type always looks cool on these things"
fill(1, 1, 1, 0.85)
fontsize(18)
text(s1, 65, HEIGHT/2)
fontsize(9)
text(s2.upper(), 65, HEIGHT/2+12)
stroke(1)
strokewidth(1)
line(0, HEIGHT/2, 60, HEIGHT/2)
""" | 28.341176 | 68 | 0.628892 | 380 | 2,409 | 3.976316 | 0.473684 | 0.023163 | 0.031767 | 0.023825 | 0.023825 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077445 | 0.26567 | 2,409 | 85 | 69 | 28.341176 | 0.77671 | 0.457866 | 0 | 0 | 0 | 0 | 0.014583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.085714 | 0 | 0.085714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56f7e6b34768a05254458c5974b6b68155a3ea9f | 4,704 | py | Python | tests/db/ops/test_import_convert_str.py | simonsobs/acondbs | 6ca11c2889d827ecdb2b54d0cf3b94b8cdd281e6 | [
"MIT"
] | null | null | null | tests/db/ops/test_import_convert_str.py | simonsobs/acondbs | 6ca11c2889d827ecdb2b54d0cf3b94b8cdd281e6 | [
"MIT"
] | 24 | 2020-04-02T19:29:07.000Z | 2022-03-08T03:05:43.000Z | tests/db/ops/test_import_convert_str.py | simonsobs/acondbs | 6ca11c2889d827ecdb2b54d0cf3b94b8cdd281e6 | [
"MIT"
] | 1 | 2020-04-08T15:48:28.000Z | 2020-04-08T15:48:28.000Z | import csv
from io import StringIO
import datetime
from sqlalchemy import MetaData
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy_utils import EncryptedType
import pytest
from acondbs import create_app
from acondbs.db.ops import convert_data_type_for_insert
##__________________________________________________________________||
sa = SQLAlchemy()
class SampleTable(sa.Model):
__tablename__ = "sample_table"
id_ = sa.Column(sa.Integer(), primary_key=True)
# https://docs.sqlalchemy.org/en/14/core/type_basics.html#generic-types
text = sa.Column(sa.Text())
unicode_text = sa.Column(sa.UnicodeText())
boolean = sa.Column(sa.Boolean())
integer = sa.Column(sa.Integer())
float = sa.Column(sa.Float())
date = sa.Column(sa.Date())
date_time = sa.Column(sa.DateTime())
time = sa.Column(sa.Time())
encrypted = sa.Column(EncryptedType(sa.Text(), "8b5d3d25b3e5"))
##__________________________________________________________________||
@pytest.fixture
def app_with_empty_db():
database_uri = "sqlite:///:memory:"
app = create_app(SQLALCHEMY_DATABASE_URI=database_uri)
yield app
@pytest.fixture
def app_with_empty_tables(app_with_empty_db):
app = app_with_empty_db
# define tables
with app.app_context():
engine = sa.engine
metadata = MetaData()
metadata.reflect(bind=engine)
metadata.drop_all(bind=engine)
sa.Model.metadata.create_all(engine)
yield app
##__________________________________________________________________||
params = [
pytest.param(
dict(
text="abcde",
unicode_text="絵文字😀 😃 😄 😁 😆",
boolean=False,
integer=512,
float=2.34556234,
date=datetime.date(2021, 10, 7),
date_time=datetime.datetime(2021, 10, 7, 15, 4, 20),
time=datetime.time(15, 4, 20),
encrypted="secret string",
),
id="one",
),
pytest.param(
dict(
boolean=True,
),
id="bool-true",
),
pytest.param(
dict(
text="",
unicode_text="",
boolean=None,
integer=None,
float=None,
date=None,
date_time=None,
time=None,
encrypted=None,
),
id="none",
),
]
@pytest.mark.parametrize("data", params)
def test_convert(app_with_empty_tables, data):
"""test convert_data_type_for_insert()"""
app = app_with_empty_tables
tbl_name = "sample_table"
expected = list(data.items()) # e.g., [('text', 'abcde'), ...]
fields = list(data.keys()) # .e.,g ['text', 'unicode_text', ...]
# delete all rows from the table
# The table is not empty! Not clear why!
with app.app_context():
SampleTable.query.delete()
sa.session.commit()
# enter data
with app.app_context():
row = SampleTable(**data)
sa.session.add(row)
sa.session.commit()
# assert the data are committed as they entered
with app.app_context():
row = SampleTable.query.one()
actual = [(f, getattr(row, f)) for f in fields]
assert actual == expected
# export to csv as string
with app.app_context():
csv_str = _export_tbl_to_csv(tbl_name)
# empty the table
SampleTable.query.delete()
sa.session.commit()
# import from the csv
with app.app_context():
# confirm the table is empty
assert SampleTable.query.count() == 0
_import_tbl_from_csv(tbl_name, csv_str)
# assert
with app.app_context():
row = SampleTable.query.one()
actual = [(f, getattr(row, f)) for f in fields]
assert actual == expected
def _export_tbl_to_csv(tbl_name):
result_proxy = sa.session.execute(f"select * from {tbl_name}")
b = StringIO()
csv_writer = csv.writer(b, lineterminator="\n")
csv_writer.writerow(result_proxy.keys())
csv_writer.writerows(result_proxy)
ret = b.getvalue()
b.close()
return ret
def _import_tbl_from_csv(tbl_name, csv_str):
engine = sa.engine
metadata = MetaData()
metadata.reflect(bind=engine)
tbl = metadata.tables[tbl_name]
rows = list(csv.reader(StringIO(csv_str)))
fields = rows[0]
rows = rows[1:]
field_types = [tbl.columns[f].type for f in fields]
data = [
{
f: convert_data_type_for_insert(e, t)
for f, t, e in zip(fields, field_types, r)
}
for r in rows
]
ins = tbl.insert()
sa.session.execute(ins, data)
##__________________________________________________________________||
| 25.427027 | 75 | 0.625638 | 568 | 4,704 | 4.551056 | 0.27993 | 0.030948 | 0.034816 | 0.046035 | 0.240232 | 0.212379 | 0.133849 | 0.133849 | 0.111412 | 0.068859 | 0 | 0.013726 | 0.25659 | 4,704 | 184 | 76 | 25.565217 | 0.724049 | 0.144345 | 0 | 0.3125 | 0 | 0 | 0.032516 | 0 | 0 | 0 | 0 | 0 | 0.023438 | 1 | 0.039063 | false | 0 | 0.085938 | 0 | 0.226563 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56f8f1bd78977f320adb7ac5e330627101781a73 | 3,041 | py | Python | pythonFiles/PCA.py | SANDEEPREDDY56712/OELP_6thSem | 0904c5b47eb57b8399844ca5f3cd9dded6361c5a | [
"MIT"
] | null | null | null | pythonFiles/PCA.py | SANDEEPREDDY56712/OELP_6thSem | 0904c5b47eb57b8399844ca5f3cd9dded6361c5a | [
"MIT"
] | null | null | null | pythonFiles/PCA.py | SANDEEPREDDY56712/OELP_6thSem | 0904c5b47eb57b8399844ca5f3cd9dded6361c5a | [
"MIT"
] | 1 | 2021-07-30T17:24:10.000Z | 2021-07-30T17:24:10.000Z | import pandas as pd
from sklearn.decomposition import PCA
import DataPreprocessing as dp
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from scipy.stats import pearsonr
#################################################################################################
#################################################################################################
#################################################################################################
def implementClustering(principal_df):
X_df = pd.DataFrame(principal_df)
principal_df = StandardScaler().fit_transform(X_df)
kmeans = KMeans(n_clusters=3, init='k-means++')
y_kmeans3 = kmeans.fit_predict(principal_df)
print(y_kmeans3)
cent = kmeans.cluster_centers_
print(cent)
plt.figure(figsize=(10,7))
X = np.array(principal_df)
plt.scatter(X[:,0],X[:,1],c=y_kmeans3,cmap='rainbow')
plt.title('K_means_clustering')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.show()
def loading_matrix(pca_model):
variables_name=['V60','Vmn','Vsd','Asd','A+mn','A+sd','Br_mn','Br_sd','W']
mat = pd.DataFrame(pca_model.components_,columns=variables_name)
print(np.transpose(mat))
def plot_principalComponents(pca_train):
plt.figure(figsize=(8,6))
plt.title("PCA for Drivability")
plt.scatter(pca_train[:,0],pca_train[:,1],cmap='rainbow')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.show()
def correlation(X,Y):
return pearsonr(X,Y)[0]
if __name__=='__main__':
dataset = pd.DataFrame(dp.X_norm)
#print(dataset)
pca_obd = PCA(n_components=2)
principal_comp = pca_obd.fit(dp.X_norm)
principal_comp = pca_obd.fit_transform(dp.X_norm)
############# PRINTING THE TYPE ##########################################
print(type(principal_comp))
principal_df = pd.DataFrame(data=principal_comp,columns=['PC1','PC2'])
print(principal_df)
X = dp.X
###################################################################################
############### CALCULAING CORRELATION MATRIX ####################################
###################################################################################
corr_matrix = []
for i in range(X.shape[1]):
temp = []
for j in range(principal_comp.shape[1]):
temp.append(correlation(X[:,i],principal_comp[:,j]))
corr_matrix.append(temp)
corr_matrix = np.array(corr_matrix)
print(pd.DataFrame(corr_matrix,index= ['V60','Vmn','Vsd','Asd','A+mn','A+sd','Br_mn','Br_sd','W'],columns=['PC1','PC2']))
###################################################################################
############## CALCULATINg VARIANCE RETAINED ####################################
###################################################################################
print("Amount of data held after Dimensionality Reduction")
print(sum(pca_obd.explained_variance_ratio_)*100)
#RCA(principal_comp)
#plot_principalComponents(principal_comp)
#loading_matrix(pca_model)
implementClustering(principal_df)
| 32.698925 | 122 | 0.545215 | 340 | 3,041 | 4.670588 | 0.361765 | 0.055416 | 0.013224 | 0.018892 | 0.104534 | 0.076826 | 0.076826 | 0.076826 | 0.076826 | 0.034005 | 0 | 0.011523 | 0.086814 | 3,041 | 92 | 123 | 33.054348 | 0.560317 | 0.059849 | 0 | 0.105263 | 0 | 0 | 0.098266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0 | 0.157895 | 0.017544 | 0.245614 | 0.140351 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56ff69de5c0b77597019e7ce269a5c5386a35249 | 1,519 | py | Python | uocsecrets/forum/urls.py | jeff-zqiu/uocweb | bb6e99a7ab01c9634f8b8446127c4bd1c0701388 | [
"MIT"
] | 1 | 2018-09-24T13:32:06.000Z | 2018-09-24T13:32:06.000Z | uocsecrets/forum/urls.py | jeff-zqiu/uocweb | bb6e99a7ab01c9634f8b8446127c4bd1c0701388 | [
"MIT"
] | null | null | null | uocsecrets/forum/urls.py | jeff-zqiu/uocweb | bb6e99a7ab01c9634f8b8446127c4bd1c0701388 | [
"MIT"
] | null | null | null | from django.urls import path, include
from . import views
from django.views.generic import TemplateView
app_name = 'forum'
urlpatterns = [
# /forum/
path('about/', TemplateView.as_view(template_name='forum/about.html'),name='about'),
path('', views.IndexView.as_view(), name = 'index'),
path('top/', views.IndexView.as_view(), name = 'top'),
path('new/', views.IndexView.as_view(), name = 'new'),
path('<str:mode>/<int:page>/', views.PageView.as_view(), name = 'page'),
# /forum/edit/
path('edit/', views.EditView.as_view(), name = 'new_post'),
path('<int:post_id>/edit/', views.EditView.as_view(), name='edit'),
path('<int:post_id>/edit/delete/', views.delete, name='delete'),
# /forum/<post_id>/
path('<int:post_id>/', views.ContentView.as_view() , name='content'),
path('<int:post_id>/clickup/', views.ClickUpView.as_view(), name='clickup'),
# /forum/<post_id>/comment/
path('<int:post_id>/comment/', views.CommentView.as_view(), name='new_comment'),
path('<int:post_id>/comment/<int:comment_id>/', views.CommentView.as_view(), name='comment'),
path('sign_up/', views.SignUpView.as_view(), name='sign_up'),
path('login/', views.LoginView.as_view(template_name='forum/login.html',
extra_context = {'next': '/forum/'}), name='login'),
path('logout/', views.LogoutView.as_view(), name = 'logout'),
# /forum/user/
path('user/<str:username>/', views.UserView.as_view(), name='user'),
] | 41.054054 | 97 | 0.631995 | 199 | 1,519 | 4.663317 | 0.256281 | 0.096983 | 0.140086 | 0.084052 | 0.331897 | 0.116379 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150757 | 1,519 | 37 | 98 | 41.054054 | 0.71938 | 0.050691 | 0 | 0 | 0 | 0 | 0.253306 | 0.091162 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
71008fe29a8062c4d781fffdb3dbd9227f9e7c32 | 12,085 | py | Python | leer/core/primitives/block.py | WTRMQDev/leer | c77c6c1d31e6d99996f471bf4c45b8af70f44fa7 | [
"MIT"
] | 5 | 2018-11-10T03:33:37.000Z | 2019-08-23T07:02:32.000Z | leer/core/primitives/block.py | WTRMQDev/leer | c77c6c1d31e6d99996f471bf4c45b8af70f44fa7 | [
"MIT"
] | 2 | 2018-11-22T11:10:49.000Z | 2018-12-15T14:44:03.000Z | leer/core/primitives/block.py | WTRMQDev/leer | c77c6c1d31e6d99996f471bf4c45b8af70f44fa7 | [
"MIT"
] | 2 | 2018-10-30T13:43:54.000Z | 2018-11-13T06:30:56.000Z | from leer.core.primitives.header import Header, PoPoW, VoteData
from leer.core.storage.txos_storage import TXOsStorage
from leer.core.chains.headers_manager import HeadersManager
from leer.core.storage.excesses_storage import ExcessesStorage
from leer.core.storage.headers_storage import HeadersStorage
from leer.core.primitives.transaction_skeleton import TransactionSkeleton
from leer.core.lubbadubdub.transaction import Transaction
from leer.core.lubbadubdub.ioput import IOput
from leer.core.lubbadubdub.offset_utils import sum_offset
from time import time
from leer.core.parameters.dynamic import next_reward, next_target
from leer.core.parameters.constants import initial_target
import functools
class Block():
def __init__(self, storage_space, header=None, transaction_skeleton=None):
self._header = header if header else Header()
self.transaction_skeleton = transaction_skeleton if transaction_skeleton else TransactionSkeleton()
self.tx=None
self.storage_space = storage_space
@property
def header(self):
try:
return self._header
except:
self._header = Header()
return self._header
@property
def hash(self):
return self.header.hash
@property
def partial_hash(self):
return self.header.partial_hash
def serialize(self, rtx, rich_block_format=False, max_size =40000):
serialized=b""
serialized += self.header.serialize()
serialized += self.transaction_skeleton.serialize(rich_format=rich_block_format, max_size=max_size,
full_tx = build_tx_from_skeleton(self.transaction_skeleton,\
self.storage_space.txos_storage,\
self.storage_space.excesses_storage,\
self.header.height, self.header.version, rtx=rtx,\
historical = True) if rich_block_format else None)
return serialized
@classmethod
@functools.lru_cache(maxsize=40)
def from_serialized(cls, serialized_block, storage_space):
b = cls(storage_space=storage_space)
b.deserialize(serialized_block)
return b
def deserialize(self, serialized):
self.deserialize_raw(serialized)
def deserialize_raw(self, serialized):
serialized = self.header.deserialize_raw(serialized)
serialized = self.transaction_skeleton.deserialize_raw(serialized, storage_space=self.storage_space)
return serialized
def non_context_verify(self, rtx):
'''
While this check is called 'non_context', it actually uses context since it needs:
a) fully validated headers chain up to this block
b) downloaded outputs
c) blocks which create inputs spent in checked(self) block should be applied
Currently if those conditions are not satisfied block is marked as not_downloaded and thus can not be validated.
To verify block we need to
0) check that header is known and valid
1) verify transaction
2) check that transaction can be applied
3) check reward size (actually in can be checked on headers level)
'''
# stage 1
assert self.storage_space.headers_storage.has(self.header.hash, rtx=rtx), "Block's header is unknown"
#self.storage_space.headers_storage.context_validation(self.header.hash)
assert not self.storage_space.headers_storage.get(self.header.hash, rtx=rtx).invalid, "Block's header is invalid. Reason: `%s`"%self.storage_space.headers_storage.get(self.header.hash, rtx=rtx).reason
#currently during building we automatically check that tx can ba applied and tx is valid
self.tx = build_tx_from_skeleton(self.transaction_skeleton, txos_storage=self.storage_space.txos_storage,
excesses_storage=self.storage_space.excesses_storage,
block_height=self.header.height, block_version = self.header.version, rtx=rtx, non_context = True)
# stage 3 => should be moved to blockchain
#commitment_root, txos_root = self.storage_space.txos_storage.apply_block_tx_get_merkles_and_rollback(tx)
#excesses_root = self.storage_space.excesses_storage.apply_block_tx_get_merkles_and_rollback(tx)
#assert [commitment_root, txos_root, excesses_root]==self.header.merkles
# This is context validation too??? TODO
miner_subsidy, dev_reward = next_reward(self.header.prev, self.storage_space.headers_storage, rtx=rtx)
assert self.tx.coinbase.value == (miner_subsidy+self.transaction_skeleton.relay_fee), "Wrong miner subsidy"
if dev_reward:
assert self.tx.dev_reward.value == dev_reward, "Wrong miner subsidy"
return True
def __str__(self):
return "Block< hash: %s..., height: %d, inputs: %d, outputs %d>"%(self.header.hash[:6], self.header.height
, len(self.transaction_skeleton.input_indexes),len(self.transaction_skeleton.output_indexes) )
def build_tx_from_skeleton(tx_skeleton, txos_storage, excesses_storage, block_height, block_version, rtx, historical=False, non_context = False):
'''
By given tx_skeleton and txos_storage return transaction.
If transaction is invalid or any input/output isn't available exception will be raised.
Optionally, if `historical` is True we will check output_indexes both in mempool and spent outputs.
'''
tx=Transaction(txos_storage=txos_storage, excesses_storage=excesses_storage)
for _i in tx_skeleton.input_indexes:
if historical or non_context:
tx.inputs.append(txos_storage.confirmed.find(_i, rtx=rtx))
else:
tx.inputs.append(txos_storage.confirmed.get(_i, rtx=rtx))
for _o in tx_skeleton.output_indexes:
if historical or non_context:
# About non_context: if we are on one branch and build block from another one
# and this block contain output which is already commited on our branch (tx is
# confirmed on both branches) we should get txo from confirmed storage
try:
tx.outputs.append(txos_storage.confirmed.find(_o, rtx=rtx))
except:
tx.outputs.append(txos_storage.mempool[_o])
else:
tx.outputs.append(txos_storage.mempool[_o])
tx.additional_excesses = tx_skeleton.additional_excesses.copy()
tx.updated_excesses = tx_skeleton.updated_excesses.copy()
tx.mixer_offset = tx_skeleton.mixer_offset
if historical or non_context:
assert tx.non_context_verify(block_height=block_height)
else:
assert tx.verify(block_height=block_height, block_version = block_version, rtx=rtx)
return tx
#To setup utils
def generate_genesis(tx, storage_space, wtx):
'''
1. spend inputs and add outputs and excesses from tx to storage
2. calc new mercles
3. generate header
4. rollback outputs
'''
storage = storage_space.txos_storage
excesses = storage_space.excesses_storage
exc_merkle = excesses.apply_block_tx_get_merkles_and_rollback(tx, wtx=wtx) # it should be calced first, since we nned to calc address_excess_num_index
merkles = storage.apply_block_tx_get_merkles_and_rollback(tx, wtx=wtx) + [exc_merkle]
popow = PoPoW([])
votedata = VoteData()
target = initial_target
full_offset = tx.mixer_offset
header=Header(height = 0, supply=tx.coinbase.value, full_offset=full_offset, merkles=merkles, popow=popow, votedata=votedata, timestamp=int(time()), target=target, version=int(1), nonce=b"\x00"*16)
tx_skeleton = TransactionSkeleton(tx=tx)
new_block = Block(storage_space, header, tx_skeleton)
return new_block
def generate_block_template(tx, storage_space, wtx, get_tx_from_mempool = True, timestamp = None, dev_reward_vote = b"\x00"):
'''
Generate block template: block is correct but nonce (by default) is equal to zero.
Thus difficulty target (almost always) isn't met.
arguments:
tx [mandatory]: transaction which contains coinbase output. It also may contain other inputs and outputs.
storage_space [mandatory] : -
get_tx_from_mempool [optional, default True]: if get_tx_from_mempool, transaction from mempool will be merged to block_transaction. If this merge will produce invalid tx (for instance tx from mempool spends the same inputs as tx with coinbase), tx from mempool will be discarded.
Inner logic:
1. apply block_tx to txos_storage and excesses_storage
2. calc new merkles
3. generate header with new merkles
4. generate block by appending tx_skeleton and new header
5. rollback block_tx
'''
storage = storage_space.txos_storage
excesses = storage_space.excesses_storage
current_block = storage_space.blocks_storage.get(storage_space.blockchain.current_tip(rtx=wtx), rtx=wtx)
if get_tx_from_mempool:
try:
tx = tx.merge(storage_space.mempool_tx.give_tx(), rtx=wtx)
except:
pass
exc_merkle = excesses.apply_block_tx_get_merkles_and_rollback(tx, wtx=wtx) # it should be calced first, since we nned to calc address_excess_num_index
merkles = storage.apply_block_tx_get_merkles_and_rollback(tx, wtx=wtx) + [exc_merkle]
popow = current_block.header.next_popow()
supply = current_block.header.supply + tx.minted_value - tx.calc_new_outputs_fee()
height = current_block.header.height+1
votedata = VoteData()
target = next_target(current_block.hash, storage_space.headers_storage, rtx=wtx)
full_offset = sum_offset(current_block.header.full_offset,tx.mixer_offset)
if not timestamp:
timestamp = max(int(time()), storage_space.headers_storage.get(storage_space.blockchain.current_tip(rtx=wtx), rtx=wtx).timestamp+1)
header=Header(height = height, supply=supply, full_offset=full_offset, merkles=merkles, popow=popow, votedata=votedata, timestamp=timestamp, target=target, version=int(1), nonce=b"\x00"*16)
tx_skeleton = TransactionSkeleton(tx=tx)
new_block = Block(storage_space, header, tx_skeleton)
return new_block
class ContextBlock(Block):
# TODO consider removing ContextBlock. For now we store all information about validity in ContextHeader
# (it allows headers_manager to provide less useless paths).
'''
Wrapper of Block for inner storage. It contains contextual info about block: for instance is it valid in chain or not.
'''
def __init__(self, storage_space = None, block=None):
if block:
Block.__init__(self, storage_space= block.storage_space, header=block.header, transaction_skeleton=block.transaction_skeleton)
if block.tx:
self.tx=block.tx
else:
if not storage_space:
raise TypeError("ContextBlock initialized without context")
Block.__init__(self, storage_space)
self.invalid = False
self.reason = None
def serialize_with_context(self):
ser = super(ContextBlock, self).serialize(rtx=None) # We can pass None as rtx, since rtx is required for rich block serialization
ser += int(self.invalid).to_bytes(1,'big')
reason = self.reason if self.reason else ""
ser += int(len(reason)).to_bytes(2,'big')
ser += reason.encode('utf-8')
return ser
@classmethod
@functools.lru_cache(maxsize=10)
def from_serialized(cls, serialized_block, storage_space):
b = cls(storage_space=storage_space)
b.deserialize(serialized_block)
return b
def deserialize(self, serialized):
self.deserialize_raw(serialized)
def deserialize_raw(self, serialized):
ser = super(ContextBlock, self).deserialize_raw(serialized)
self.invalid, ser = bool(ser[0]), ser[1:]
reason_len, ser = int.from_bytes(ser[:2], 'big'), ser[2:]
self.reason, ser = ser[:reason_len].decode('utf-8'), ser[reason_len:]
return ser
def __str__(self):
return "ContextBlock< hash: %s..., height: %d, inputs: %d, outputs %d, valid: %s, reason %s>"%(self.header.hash[:6], self.header.height
, len(self.transaction_skeleton.input_indexes),len(self.transaction_skeleton.output_indexes),
("-" if self.invalid else '+'), self.reason )
| 46.841085 | 289 | 0.725528 | 1,645 | 12,085 | 5.122796 | 0.179331 | 0.059808 | 0.032277 | 0.021597 | 0.349116 | 0.265456 | 0.237332 | 0.229263 | 0.212412 | 0.202445 | 0 | 0.004999 | 0.188912 | 12,085 | 257 | 290 | 47.023346 | 0.854724 | 0.244435 | 0 | 0.353659 | 0 | 0.012195 | 0.035452 | 0 | 0 | 0 | 0 | 0.007782 | 0.036585 | 1 | 0.115854 | false | 0.006098 | 0.079268 | 0.02439 | 0.304878 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7101adf147765864029e975e6ef3b5f8d4d932f9 | 1,465 | py | Python | Loan-Approval-Analysis/code.py | acharya221b/ga-learner-dsmp-repo | 9b493aff25cf861fa8b757d7f2e926e1dcbe6061 | [
"MIT"
] | null | null | null | Loan-Approval-Analysis/code.py | acharya221b/ga-learner-dsmp-repo | 9b493aff25cf861fa8b757d7f2e926e1dcbe6061 | [
"MIT"
] | null | null | null | Loan-Approval-Analysis/code.py | acharya221b/ga-learner-dsmp-repo | 9b493aff25cf861fa8b757d7f2e926e1dcbe6061 | [
"MIT"
] | null | null | null | # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
bank=pd.read_csv(path)
categorical_var=bank.select_dtypes(include='object')
print(categorical_var)
numerical_var=bank.select_dtypes(include='number')
print(numerical_var)
# code starts here
# code ends here
# --------------
# code starts here
banks=bank.drop('Loan_ID',axis=1)
print(banks.isnull().sum())
bank_mode=banks.mode().iloc[0]
print(type(bank_mode))
print(bank_mode)
banks.fillna(bank_mode, inplace=True)
print(banks.isnull().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount=banks.pivot_table(index=['Gender','Married','Self_Employed'],values='LoanAmount',aggfunc='mean')
print(avg_loan_amount)
# code ends here
# --------------
# code starts here
loan_approved_se=len(banks[(banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')])
loan_approved_nse=len(banks[(banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')])
percentage_se=loan_approved_se*100/614
percentage_nse=loan_approved_nse*100/614
# code ends here
# --------------
# code starts here
loan_term=banks['Loan_Amount_Term'].apply(lambda x:x/12)
big_loan_term=len(banks[loan_term>=25])
print(big_loan_term)
print(banks[loan_term>=25])
# code ends here
# --------------
# code starts here
loan_groupby=banks.groupby('Loan_Status')[['ApplicantIncome', 'Credit_History']]
mean_values=loan_groupby.mean()
# code ends here
| 18.08642 | 112 | 0.703072 | 211 | 1,465 | 4.663507 | 0.364929 | 0.060976 | 0.085366 | 0.081301 | 0.247967 | 0.144309 | 0.091463 | 0 | 0 | 0 | 0 | 0.015094 | 0.095563 | 1,465 | 80 | 113 | 18.3125 | 0.727547 | 0.202048 | 0 | 0.074074 | 0 | 0 | 0.148213 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7103030bd95a829786ccbdd3fa84915b9d8496a9 | 1,568 | py | Python | test/test_mega.py | adacker10/showdown | 8ceb1ff46d5c33ec3055928d6ad293224446f63c | [
"MIT"
] | 8 | 2019-02-02T01:15:57.000Z | 2021-12-23T04:43:46.000Z | test/test_mega.py | adacker10/showdown | 8ceb1ff46d5c33ec3055928d6ad293224446f63c | [
"MIT"
] | null | null | null | test/test_mega.py | adacker10/showdown | 8ceb1ff46d5c33ec3055928d6ad293224446f63c | [
"MIT"
] | 6 | 2020-09-11T13:15:05.000Z | 2022-03-18T15:46:35.000Z | import unittest
from sim.battle import Battle
from data import dex
class TestMega(unittest.TestCase):
def test_pidgeot(self):
battle = Battle(debug=False, rng=False)
battle.join(0, [{'species': 'pidgeot',
'item': 'pidgeotite',
'moves': ['tackle', 'protect']}])
battle.join(1, [{'species': 'mew', 'moves': ['tackle']}])
battle.choose(0, dex.Decision('move', 0, mega=True))
battle.choose(1, dex.Decision('move', 0, mega=True))
battle.do_turn()
pidgeot = battle.sides[0].pokemon[0]
self.assertEqual(pidgeot.species, 'pidgeotmega')
self.assertEqual(pidgeot.hp, pidgeot.maxhp-23)
def test_mewtwo_x(self):
battle = Battle(debug=False, rng=False)
battle.join(0, [{'species': 'mewtwo',
'item': 'mewtwonitex',
'moves': ['tackle', 'protect']
}])
battle.join(1, [{'species': 'charizard',
'item': 'charizarditex',
'moves': ['tackle']
}])
battle.choose(0, dex.Decision('move', 0, mega=True))
battle.choose(1, dex.Decision('move', 0, mega=False))
battle.do_turn()
mewtwo = battle.sides[0].pokemon[0]
charizard = battle.sides[1].pokemon[0]
self.assertEqual(mewtwo.species, 'mewtwomegax')
self.assertEqual(mewtwo.hp, mewtwo.maxhp-17)
def runTest(self):
self.test_pidgeot()
self.test_mewtwo_x
| 34.844444 | 65 | 0.53699 | 166 | 1,568 | 5.024096 | 0.295181 | 0.047962 | 0.071942 | 0.076739 | 0.465228 | 0.417266 | 0.417266 | 0.318945 | 0.318945 | 0.318945 | 0 | 0.020314 | 0.309311 | 1,568 | 44 | 66 | 35.636364 | 0.749769 | 0 | 0 | 0.222222 | 0 | 0 | 0.124362 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7105dbc3e130b4596cac492082bc99f3266720ff | 3,872 | py | Python | tests/test_pybrain.py | carlosf/rep | 365917a1d582c7d784e26f80808eeed18f655cb3 | [
"Apache-2.0"
] | null | null | null | tests/test_pybrain.py | carlosf/rep | 365917a1d582c7d784e26f80808eeed18f655cb3 | [
"Apache-2.0"
] | null | null | null | tests/test_pybrain.py | carlosf/rep | 365917a1d582c7d784e26f80808eeed18f655cb3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2015 Yandex LLC and contributors <https://yandex.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from rep.test.test_estimators import check_classifier, check_regression, check_params, \
generate_classification_data, check_classification_reproducibility
from rep.estimators.pybrain import PyBrainClassifier, PyBrainRegressor
from sklearn.ensemble import BaggingClassifier
from rep.estimators import SklearnClassifier
__author__ = 'Artem Zhirokhov'
classifier_params = {
'has_staged_pp': False,
'has_importances': False,
'supports_weight': False
}
regressor_params = {
'has_staged_predictions': False,
'has_importances': False,
'supports_weight': False
}
def test_pybrain_params():
check_params(PyBrainClassifier, layers=[1, 2], epochs=5, use_rprop=True, hiddenclass=['LinearLayer'])
check_params(PyBrainRegressor, layers=[1, 2], epochs=5, etaplus=1.3, hiddenclass=['LinearLayer'], learningrate=0.1)
def test_pybrain_classification():
clf = PyBrainClassifier(epochs=2)
check_classifier(clf, **classifier_params)
check_classifier(PyBrainClassifier(epochs=-1, continue_epochs=1, layers=[]), **classifier_params)
check_classifier(PyBrainClassifier(epochs=2, layers=[5, 2]), **classifier_params)
def test_pybrain_reproducibility():
try:
import numpy
X, y, _ = generate_classification_data()
clf1 = PyBrainClassifier(layers=[4], epochs=2).fit(X, y)
clf2 = PyBrainClassifier(layers=[4], epochs=2).fit(X, y)
print(clf1.predict_proba(X)-clf2.predict_proba(X))
assert numpy.allclose(clf1.predict_proba(X), clf2.predict_proba(X)), 'different predicitons'
check_classification_reproducibility(clf1, X, y)
except:
# This test fails. Because PyBrain can't reproduce training.
pass
def test_pybrain_Linear_MDLSTM():
check_classifier(PyBrainClassifier(epochs=2, layers=[10, 2], hiddenclass=['LinearLayer', 'MDLSTMLayer']),
**classifier_params)
check_regression(PyBrainRegressor(epochs=3, layers=[10, 2], hiddenclass=['LinearLayer', 'MDLSTMLayer']),
**regressor_params)
def test_pybrain_SoftMax_Tanh():
check_classifier(PyBrainClassifier(epochs=2, layers=[10, 5, 2], hiddenclass=['SoftmaxLayer', 'SoftmaxLayer', 'TanhLayer'], use_rprop=True),
**classifier_params)
check_regression(PyBrainRegressor(epochs=2, layers=[10, 5, 2], hiddenclass=['SoftmaxLayer', 'TanhLayer', 'TanhLayer']),
**regressor_params)
def pybrain_test_partial_fit():
clf = PyBrainClassifier(layers=[4], epochs=2)
X, y, _ = generate_classification_data()
clf.partial_fit(X, y)
clf.partial_fit(X[:2], y[:2])
def test_pybrain_multi_classification():
check_classifier(PyBrainClassifier(), n_classes=4, **classifier_params)
def test_pybrain_regression():
check_regression(PyBrainRegressor(), **regressor_params)
def test_pybrain_multi_regression():
check_regression(PyBrainRegressor(), n_targets=4, **regressor_params)
def test_simple_stacking_pybrain():
base_pybrain = PyBrainClassifier()
base_bagging = BaggingClassifier(base_estimator=base_pybrain, n_estimators=3)
check_classifier(SklearnClassifier(clf=base_bagging), **classifier_params)
| 37.592233 | 143 | 0.737345 | 460 | 3,872 | 5.982609 | 0.341304 | 0.022892 | 0.040698 | 0.055233 | 0.331032 | 0.24564 | 0.139898 | 0.079942 | 0 | 0 | 0 | 0.019512 | 0.152893 | 3,872 | 102 | 144 | 37.960784 | 0.819512 | 0.167355 | 0 | 0.163934 | 0 | 0 | 0.081073 | 0.00686 | 0 | 0 | 0 | 0 | 0.016393 | 1 | 0.163934 | false | 0.016393 | 0.131148 | 0 | 0.295082 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7113e4f6820f3436a0a547fe628d3433163dbab4 | 2,756 | py | Python | single_preprocessing.py | OpenVessel/RedTinSaintBernard-for-BraTS2021-challenge | dafe6f33ff6269869377d01a014ab1528b0f1c1d | [
"MIT"
] | null | null | null | single_preprocessing.py | OpenVessel/RedTinSaintBernard-for-BraTS2021-challenge | dafe6f33ff6269869377d01a014ab1528b0f1c1d | [
"MIT"
] | null | null | null | single_preprocessing.py | OpenVessel/RedTinSaintBernard-for-BraTS2021-challenge | dafe6f33ff6269869377d01a014ab1528b0f1c1d | [
"MIT"
] | null | null | null | import os
import pandas as pd
from brats_toolkit.preprocessor import Preprocessor
# instantiate
prep = Preprocessor()
## convert mapping info
## survial
name_mapping = r"E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\name_mapping.csv"
survival_info = r"E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\survival_info.csv"
df_name_mapping = pd.read_csv(name_mapping)
df_survival_info = pd.read_csv(survival_info)
root_path_train = r"E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData"
outputDir = r"E:\Datasets\BraTS challenge\Output\Output_training"
list_of_dir = os.listdir(root_path_train)
for name_of_file in list_of_dir:
#if name_of_file contains .csv it skips iteration on the loop
if name_of_file.endswith('.csv'):
continue
#We make new path tto list to for loop through we list that dir
readable_path = os.path.join(root_path_train , name_of_file)
list_of_zips = os.listdir(readable_path)
# we for loop each folder
list_sort = []
outpath = os.path.join(outputDir, name_of_file)
for zips in list_of_zips:
readable_path_2nd = os.path.join(readable_path, zips)
list_sort.append(readable_path_2nd)
list_sort = sorted(list_sort)
## missing var for segmentation preprocessing # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_seg.nii.gz 2 ??
examName = name_of_file
flaFile = list_sort[0] # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_flair.nii.gz1 flaFile
t1File = list_sort[2] # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_t1.nii.gz 3 t1File
t1cFile = list_sort[3] # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_t1ce.nii.gz 4 t1cFile
t2File = list_sort[4] # E:\Datasets\BraTS challenge\MICCAI_BraTS2020_TrainingData\BraTS20_Training_369\BraTS20_Training_369_t2.nii.gz 5 t2File
## this code calls docker!
##dcm2niix conversion
prep.single_preprocess(t1File=t1File,
t1cFile=t1cFile,
t2File=t2File,
flaFile=flaFile,
outputFolder=outputDir,
mode="cpu",
confirm=True,
skipUpdate=False,
gpuid='0')
# start_docker(exam_import_folder=exam_import_folder, exam_export_folder=exam_export_folder,
# dicom_import_folder=dicom_import_folder, nifti_export_folder=nifti_export_folder, mode=self.mode, gpuid=self.gpuid)
## expected outtputs?
#hdbet_brats-space
#hdbet_native-space
#mask_hdbet_brats-space
#masks_hdbet-space
#niftis_brats-space
#png_slices
#registrations
| 38.816901 | 171 | 0.759071 | 382 | 2,756 | 5.159686 | 0.324607 | 0.076104 | 0.091324 | 0.105023 | 0.307966 | 0.295789 | 0.295789 | 0.295789 | 0.218163 | 0.218163 | 0 | 0.047557 | 0.16074 | 2,756 | 70 | 172 | 39.371429 | 0.804583 | 0.445573 | 0 | 0 | 0 | 0 | 0.176353 | 0.122912 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7116b439a184e893f7256cd540dd3d4a730960fe | 4,263 | py | Python | infer/lib/capture/make.py | vaginessa/infer | 553d39eb7d7663fb8762d368feb3b824416f37a1 | [
"BSD-3-Clause"
] | null | null | null | infer/lib/capture/make.py | vaginessa/infer | 553d39eb7d7663fb8762d368feb3b824416f37a1 | [
"BSD-3-Clause"
] | null | null | null | infer/lib/capture/make.py | vaginessa/infer | 553d39eb7d7663fb8762d368feb3b824416f37a1 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import os
import subprocess
import traceback
MODULE_NAME = 'make/cc/clang/gcc'
MODULE_DESCRIPTION = '''Run analysis of code built with commands like:
make [target]
clang [compiler_options] <filename>
gcc [compiler_options] <filename>
cc [compiler_options] <filename>
Analysis examples:
infer -- make all
infer -- clang -c srcfile.m
infer -- gcc -c srcfile.c'''
def gen_instance(*args):
return MakeCapture(*args)
def mkdir_if_not_exists(path):
if not os.path.exists(path):
os.mkdir(path)
def create_argparser(group_name=MODULE_NAME):
"""This defines the set of arguments that get added by this module to the
set of global args defined in the infer top-level module
Do not use this function directly, it should be invoked by the infer
top-level module"""
parser = argparse.ArgumentParser(add_help=False)
group = parser.add_argument_group(
"{grp} module".format(grp=MODULE_NAME),
description=MODULE_DESCRIPTION,
)
group.add_argument(
'-hd', '--headers',
action='store_true',
help='Analyze code in header files',
)
group.add_argument(
'--models_mode',
action='store_true',
dest='models_mode',
help='Mode for computing the models',
)
group.add_argument(
'--no_failures_allowed',
action='store_true',
dest='no_failures_allowed',
help='Fail if at least one of the translations fails',
)
group.add_argument(
'-tm', '--testing_mode',
dest='testing_mode',
action='store_true',
help='Testing mode for the translation: Do not translate libraries'
' (including enums)')
group.add_argument(
'-fs', '--frontend-stats',
dest='frontend_stats',
action='store_true',
help='Output statistics about the capture phase to *.o.astlog')
group.add_argument(
'-fd', '--frontend-debug',
dest='frontend_debug',
action='store_true',
help='Output debugging information to *.o.astlog during capture')
return parser
class MakeCapture:
def __init__(self, args, cmd):
self.args = args
self.cmd = [os.path.basename(cmd[0])] + cmd[1:]
def create_results_dir(self):
results_dir = self.args.infer_out
mkdir_if_not_exists(results_dir)
mkdir_if_not_exists(os.path.join(results_dir, 'specs'))
mkdir_if_not_exists(os.path.join(results_dir, 'captured'))
mkdir_if_not_exists(os.path.join(results_dir, 'sources'))
def get_envvars(self):
env_vars = dict(os.environ)
env_vars['INFER_RESULTS_DIR'] = self.args.infer_out
wrappers_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..', 'wrappers')
env_vars['INFER_OLD_PATH'] = env_vars['PATH']
env_vars['PATH'] = '{wrappers}{sep}{path}'.format(
wrappers=wrappers_path,
sep=os.pathsep,
path=env_vars['PATH'],
)
return env_vars
def capture(self):
self.create_results_dir()
env_vars = self.get_envvars()
frontend_args = []
if self.args.headers:
frontend_args.append('-headers')
if self.args.models_mode:
frontend_args.append('-models_mode')
if self.args.project_root:
frontend_args += ['-project_root', self.args.project_root]
if self.args.testing_mode:
frontend_args.append('-testing_mode')
if self.args.frontend_debug:
frontend_args += ['-debug']
env_vars['FCP_DEBUG_MODE'] = '1'
if self.args.frontend_stats:
frontend_args += ['-stats']
env_vars['FCP_DEBUG_MODE'] = '1'
if self.args.no_failures_allowed:
env_vars['FCP_REPORT_FRONTEND_FAILURE'] = '1'
# export an env variable with all the arguments to pass to InferClang
env_vars['FCP_INFER_FRONTEND_ARGS'] = ' '.join(frontend_args)
try:
subprocess.check_call(self.cmd, env=env_vars)
return os.EX_OK
except subprocess.CalledProcessError as exc:
if self.args.debug:
traceback.print_exc()
return exc.returncode
| 32.295455 | 77 | 0.630776 | 536 | 4,263 | 4.787313 | 0.311567 | 0.04053 | 0.031177 | 0.031177 | 0.122369 | 0.085737 | 0.065472 | 0.065472 | 0.065472 | 0 | 0 | 0.001575 | 0.255454 | 4,263 | 131 | 78 | 32.541985 | 0.806868 | 0.066151 | 0 | 0.12844 | 0 | 0 | 0.262932 | 0.023215 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06422 | false | 0 | 0.036697 | 0.009174 | 0.155963 | 0.009174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7116ca2e4f0dcb2bd507fa78836458daf8085478 | 2,315 | py | Python | projects/Doodle/Alexander/code/train/utils.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 280 | 2018-10-21T01:07:18.000Z | 2021-12-30T11:29:48.000Z | projects/Doodle/YourVenn_code/code/train/utils.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 3 | 2018-11-13T08:04:48.000Z | 2020-04-17T09:20:03.000Z | projects/Doodle/YourVenn_code/code/train/utils.py | liaopeiyuan/ml-arsenal-public | f8938ce3cb58b35fc7cc20d096c39a85ec9780b2 | [
"Apache-2.0"
] | 59 | 2018-10-21T04:38:23.000Z | 2021-03-29T07:58:47.000Z | from common import *
from torch.autograd import Variable
def to_var(x, volatile=False):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x, volatile=volatile)
def softmax_cross_entropy_criterion(logit, truth, is_average=True):
loss = F.cross_entropy(logit, truth, reduce=is_average)
return loss
def metric(logit, truth, is_average=True):
# with torch.no_grad():
prob = F.softmax(logit, 1)
value, top = prob.topk(3, dim=1, largest=True, sorted=True)
correct = top.eq(truth.view(-1, 1).expand_as(top))
if is_average==True:
# top-3 accuracy
correct = correct.float().sum(0, keepdim=False)
correct = correct/len(truth)
top = [correct[0], correct[0]+correct[1], correct[0]+correct[1]+correct[2]]
precision = correct[0]/1 + correct[1]/2 + correct[2]/3
return precision, top
else:
return correct
def do_valid( net, valid_loader, criterion ):
valid_num = 0
probs = []
truths = []
losses = []
corrects = []
for input, truth, _ in valid_loader:
input = input.cuda()
truth = truth.cuda()
input = to_var(input)
truth = to_var(truth)
logit = net(input)
prob = F.softmax(logit,1)
loss = criterion(logit, truth, False)
correct = metric(logit, truth, False)
valid_num += len(input)
probs.append(prob.data.cpu().numpy())
losses.append(loss.data.cpu().numpy())
corrects.append(correct.data.cpu().numpy())
truths.append(truth.data.cpu().numpy())
assert(valid_num == len(valid_loader.sampler))
#------------------------------------------------------
prob = np.concatenate(probs)
correct = np.concatenate(corrects)
truth = np.concatenate(truths).astype(np.int32).reshape(-1,1)
loss = np.concatenate(losses)
#---
#top = np.argsort(-predict,1)[:,:3]
loss = loss.mean()
correct = correct.mean(0)
top = [correct[0], correct[0]+correct[1], correct[0]+correct[1]+correct[2]]
precision = correct[0]/1 + correct[1]/2 + correct[2]/3
#----
valid_loss = np.array([
loss, top[0], top[2], precision
])
return valid_loss | 30.866667 | 84 | 0.570626 | 290 | 2,315 | 4.472414 | 0.282759 | 0.049345 | 0.069391 | 0.049345 | 0.197379 | 0.134156 | 0.134156 | 0.134156 | 0.134156 | 0.134156 | 0 | 0.024662 | 0.264363 | 2,315 | 75 | 85 | 30.866667 | 0.736935 | 0.056587 | 0 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018519 | 1 | 0.074074 | false | 0 | 0.037037 | 0 | 0.203704 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
71184f8e2f9e3b802d08210e84b8fd4a03eb2e43 | 1,281 | py | Python | day14/a.py | Cefqrn/advent-of-code-2021 | 1979f3cff981cfe1a5d59d39ec02f104b0e27abd | [
"MIT"
] | null | null | null | day14/a.py | Cefqrn/advent-of-code-2021 | 1979f3cff981cfe1a5d59d39ec02f104b0e27abd | [
"MIT"
] | null | null | null | day14/a.py | Cefqrn/advent-of-code-2021 | 1979f3cff981cfe1a5d59d39ec02f104b0e27abd | [
"MIT"
] | null | null | null | import os
from collections import defaultdict
with open(os.path.join(os.path.dirname(__file__), "input")) as f:
data = f.read().split('\n\n')
template, rules = data
rules = [x.split(' -> ') for x in rules.splitlines()]
rules = dict(rules)
pair_counts = defaultdict(int)
for i, pair in enumerate(zip(template, template[1:])):
pair_counts[''.join(pair)] += 1
rules2 = {}
for pair, inserted_char in rules.items():
rules2[pair] = (pair[0] + inserted_char, inserted_char + pair[1])
for x in range(10):
for pair, count in tuple(pair_counts.items()):
if pair in rules2 and count:
for pair2 in rules2[pair]:
pair_counts[pair2] += count
pair_counts[pair] -= count
c = defaultdict(int)
for pair, count in pair_counts.items():
c[pair[1]] += count
print(c[max(c, key=lambda x: c[x])] - c[min(c, key=lambda x: c[x])])
for x in range(30):
for pair, count in tuple(pair_counts.items()):
if pair in rules2 and count:
for pair2 in rules2[pair]:
pair_counts[pair2] += count
pair_counts[pair] -= count
c = defaultdict(int)
for pair, count in pair_counts.items():
c[pair[1]] += count
print(c[max(c, key=lambda x: c[x])] - c[min(c, key=lambda x: c[x])]) | 30.5 | 69 | 0.613583 | 200 | 1,281 | 3.845 | 0.255 | 0.130039 | 0.062419 | 0.072822 | 0.543563 | 0.543563 | 0.543563 | 0.543563 | 0.543563 | 0.543563 | 0 | 0.020305 | 0.231069 | 1,281 | 42 | 70 | 30.5 | 0.760406 | 0 | 0 | 0.545455 | 0 | 0 | 0.01014 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.060606 | 0 | 0.060606 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
71187322e743030c13b6dd0566757709045bdff7 | 3,793 | py | Python | app/arguments.py | calio/taski | c06346d7e3600f41b1347c6d9f73616f17b226e4 | [
"MIT"
] | null | null | null | app/arguments.py | calio/taski | c06346d7e3600f41b1347c6d9f73616f17b226e4 | [
"MIT"
] | 1 | 2021-06-01T22:24:59.000Z | 2021-06-01T22:24:59.000Z | app/arguments.py | calio/taski | c06346d7e3600f41b1347c6d9f73616f17b226e4 | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import six
import argparse
import app
import app.taski as taski
def check_positive_int(val):
"""Make sure input argument is an positive integer"""
ival = int(val)
if ival <= 0:
raise argparse.ArgumentTypeError("%s is not a positive integer" % val)
return ival
def str2unicode(val):
"""
Python2 will set val to type `bytes` while Python3 will set val to
unicode. So we need to convert bytes to unicode in Python2.
https://stackoverflow.com/questions/22947181/dont-argparse-read-unicode-from-commandline
"""
if six.PY2:
return val.decode(sys.getfilesystemencoding())
return val
def parse(cmd=None):
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help="config file path")
parser.set_defaults(config=os.path.expanduser("~") + "/.taski.yaml")
parser.add_argument('-d', '--dryrun', help="dryrun", action='store_true')
parser.add_argument('-v', '--verbose',
help="enable debugging", action='store_true')
subparsers = parser.add_subparsers(help='available commands')
plan_parser = subparsers.add_parser('plan', help='plan tasks')
plan_parser.add_argument('-v', '--verbose',
help="enable debugging", action='store_true')
plan_parser.add_argument('-l', '--limit',
help='limit number of tasks to plan',
type=check_positive_int, default=30)
plan_parser.add_argument('-n', '--daily-goal',
help='number of tasks scheduled per day',
type=check_positive_int, default=10)
plan_parser.set_defaults(func=taski.plan)
rank_parser = subparsers.add_parser('rank', help='rank tasks')
rank_parser.add_argument('-v', '--verbose',
help="enable debugging", action='store_true')
rank_parser.add_argument('-p', '--project', help='project name',
type=str2unicode)
rank_parser.add_argument('-t', '--tui', help='Use terminal UI for ranking',
default=False, action='store_true')
rank_parser.set_defaults(func=taski.rank)
show_parser = subparsers.add_parser('show', help='show things')
show_parser.add_argument('show_cmd', help='show things',
choices=["api_token", "stats", "config", "old_tasks", "completed_tasks"])
show_parser.add_argument(
'--since', help='show completed task since this date. Format "2007-4-29T10:13"')
show_parser.add_argument(
'--until', help='show completed task until this date. Format "2007-4-29T10:13"')
show_parser.set_defaults(since=None)
show_parser.set_defaults(until=None)
show_parser.set_defaults(func=taski.show)
dump_parser = subparsers.add_parser('dump', help='dump tasks to csv file: todoist.csv')
dump_parser.add_argument('-f', '--file', help="output file name",
default="taski.csv")
dump_parser.add_argument('-c', '--completed', help="include completed tasks",
action='store_true', default=False)
dump_parser.add_argument('-v', '--verbose',
help="enable debugging", action='store_true')
dump_parser.set_defaults(func=taski.dump)
version_parser = subparsers.add_parser(
'version', help='print version number')
version_parser.set_defaults(
quick_func=lambda args: sys.stdout.write(app.VERSION + "\n"))
test_parser = subparsers.add_parser('test', help="¯\_(ツ)_/¯")
test_parser.set_defaults(func=taski.test)
if cmd:
args = parser.parse_args(cmd)
else:
args = parser.parse_args()
return args
| 36.471154 | 102 | 0.631163 | 469 | 3,793 | 4.940299 | 0.315565 | 0.062149 | 0.110056 | 0.064739 | 0.259819 | 0.132931 | 0.132931 | 0.132931 | 0.132931 | 0.101856 | 0 | 0.014458 | 0.234115 | 3,793 | 103 | 103 | 36.825243 | 0.782444 | 0.075402 | 0 | 0.085714 | 0 | 0 | 0.233822 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0.085714 | 0 | 0.185714 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
711c0839688b9f5dedcef052e9032977bfdb8fbd | 13,158 | py | Python | nlp_uncertainty_ssl/models/emotion_classifier.py | apmoore1/nlp-uncertainty-ssl | 4531ffce61557b4d4a71b97698479a30f65efaec | [
"Apache-2.0"
] | null | null | null | nlp_uncertainty_ssl/models/emotion_classifier.py | apmoore1/nlp-uncertainty-ssl | 4531ffce61557b4d4a71b97698479a30f65efaec | [
"Apache-2.0"
] | null | null | null | nlp_uncertainty_ssl/models/emotion_classifier.py | apmoore1/nlp-uncertainty-ssl | 4531ffce61557b4d4a71b97698479a30f65efaec | [
"Apache-2.0"
] | null | null | null | from typing import Dict, Optional, List, Any
from allennlp.common.checks import check_dimensions_match, ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import Seq2VecEncoder, TimeDistributed, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.modules import FeedForward
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from allennlp.modules.attention import DotProductAttention
from allennlp.models.model import Model
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import InitializerApplicator, RegularizerApplicator
import allennlp.nn.util as util
import numpy
from overrides import overrides
import torch
from torch.nn.modules.linear import Linear
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from nlp_uncertainty_ssl.metrics.jaccard_index import JaccardIndex
@Model.register("emotion_classifier")
class EmotionClassifier(Model):
"""
The ``emotion_classifier`` is a multi label classifier (predict 0-N labels per
sample).
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the tokens ``TextField`` we get as input to the model.
encoder : ``Seq2SeqEncoder``, optional (default=None)
The encoder that we will use in between embedding tokens and predicting output tags.
label_namespace : ``str``, optional (default=``labels``)
This is needed to compute the SpanBasedF1Measure metric.
Unless you did something unusual, the default value should be what you want.
feedforward : ``FeedForward``, optional, (default = None).
An optional feedforward layer to apply after the encoder.
label_encoding : ``str``, optional (default=``None``)
Label encoding to use when calculating span f1.
Valid options are "BIO", "BIOUL", "IOB1", "BMES".
Required if ``calculate_span_f1`` is true.
calculate_span_f1 : ``bool``, optional (default=``None``)
Calculate span-level F1 metrics during training. If this is ``True``, then
``label_encoding`` is required. If ``None`` and
label_encoding is specified, this is set to ``True``.
If ``None`` and label_encoding is not specified, it defaults
to ``False``.
dropout: ``float``, optional (default=``None``). Use `Variational Dropout
<https://arxiv.org/abs/1512.05287>`_ for sequence and normal
dropout for non sequences.
verbose_metrics : ``bool``, optional (default = False)
If true, metrics will be returned per label class in addition
to the overall statistics.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
label_namespace: str = "labels",
encoder: Optional[Seq2VecEncoder] = None,
seq_encoder: Optional[Seq2SeqEncoder] = None,
feedforward: Optional[FeedForward] = None,
dropout: Optional[float] = None,
incl_neutral: Optional[bool] = False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab, regularizer)
self.label_namespace = label_namespace
self.text_field_embedder = text_field_embedder
self.num_labels = self.vocab.get_vocab_size(label_namespace)
self.encoder = encoder
self.seq_encoder = seq_encoder
if self.seq_encoder is not None:
self.attention_vector = Parameter(torch.Tensor(self.seq_encoder.get_output_dim()))
self.attention_layer = DotProductAttention(normalize=True)
embedding_output_dim = self.text_field_embedder.get_output_dim()
if dropout is not None:
self.dropout = torch.nn.Dropout(dropout)
self.variational_dropout = InputVariationalDropout(dropout)
else:
self.dropout = None
self._feedforward = feedforward
if feedforward is not None:
output_dim = feedforward.get_output_dim()
elif encoder is not None:
output_dim = self.encoder.get_output_dim()
elif seq_encoder is not None:
output_dim = self.seq_encoder.get_output_dim()
else:
output_dim = embedding_output_dim
# Have to create a tag projection layer for each label in the
# multi label classifier
self._tag_projection_layers: Any = []
for k in range(self.num_labels):
tag_projection_layer = Linear(output_dim, 1)
self.add_module(f'tag_projection_layer_{k}', tag_projection_layer)
self._tag_projection_layers.append(tag_projection_layer)
self.output_activation = torch.nn.Sigmoid()
self.loss_criterion = torch.nn.BCEWithLogitsLoss(reduction='mean')
self.incl_neutral = incl_neutral
self.metrics = {"jaccard_index": JaccardIndex(self.incl_neutral)}
if encoder is not None:
check_dimensions_match(embedding_output_dim, encoder.get_input_dim(),
"text field embedding dim", "encoder input dim")
if feedforward is not None and encoder is not None:
check_dimensions_match(encoder.get_output_dim(), feedforward.get_input_dim(),
"encoder output dim", "feedforward input dim")
elif feedforward is not None and encoder is None:
check_dimensions_match(embedding_output_dim, feedforward.get_input_dim(),
"text field output dim", "feedforward input dim")
if self.seq_encoder is not None:
self.reset_parameters()
initializer(self)
def reset_parameters(self):
'''
Intitalises the attnention vector
'''
torch.nn.init.uniform_(self.attention_vector, -0.01, 0.01)
@overrides
def forward(self, # type: ignore
tokens: Dict[str, torch.LongTensor],
labels: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
tokens : ``Dict[str, torch.LongTensor]``, required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
labels : ``torch.LongTensor``, optional (default = ``None``)
A torch tensor representing the multiple labels that the sample
can be as a one hot vector where each True label is 1 and the
rest 0.
``(batch_size, num_labels)``.
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
metadata containg:
1. ``text`` - Original sentence
2. ``words`` - Tokenised words from the sentence
3. ``ID`` - Optionally the ID of the sample
Returns
-------
An output dictionary consisting of:
logits : ``torch.FloatTensor``
The logits that are the output of the ``N`` tag projection layers
where each projection layer represents a different tag.
probs: ``torch.FloatTensor``
A tensor of shape ``(batch_size, num_labels)``
The probability that the sample is one of those labels. > 0.5
suggests that a label is associated to that sample.
labels : ``List[List[int]]``
The predicted labels where the inner list represents the multi label
classification.
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised. Only computed if gold label ``labels`` are provided.
words : ``List[List[str]]``
The tokens that were given as input
text: ``List[str]``
The text that was given to the tokeniser.
ID: ``List[str]``
The ID that is associated to the training example. Only returned if the ``ID`` are provided.
"""
embedded_text_input = self.text_field_embedder(tokens)
mask = util.get_text_field_mask(tokens)
encoded_text = embedded_text_input
batch_size = embedded_text_input.shape[0]
if self.dropout is not None:
encoded_text = self.variational_dropout(encoded_text)
if self.seq_encoder is not None:
encoded_text = self.seq_encoder(encoded_text, mask)
encoded_text = self.variational_dropout(encoded_text)
attention_vector = self.attention_vector.unsqueeze(0).expand(batch_size, -1)
attention_weights = self.attention_layer(attention_vector,
encoded_text,
mask)
attention_weights = attention_weights.unsqueeze(-1)
weighted_encoded_text_seq = encoded_text * attention_weights
weighted_encoded_text_vec = weighted_encoded_text_seq.sum(1)
encoded_text = self.dropout(weighted_encoded_text_vec)
if self.encoder is not None:
encoded_text = self.encoder(encoded_text, mask)
if self.dropout is not None:
encoded_text = self.dropout(encoded_text)
# Dropout is applied after each layer for feed forward if specified
# in the config.
if self._feedforward is not None:
encoded_text = self._feedforward(encoded_text)
all_label_logits = torch.empty(batch_size, self.num_labels)
for i in range(len(self._tag_projection_layers)):
tag_projection = getattr(self, f'tag_projection_layer_{i}')
i_tag_predictions = tag_projection(encoded_text).reshape(-1)
all_label_logits[:, i] = i_tag_predictions
probs = self.output_activation(all_label_logits)
predicted_labels = probs > 0.5
output = {'probs': probs, 'logits': all_label_logits,
'labels': predicted_labels}
if labels is not None:
labels = labels.type(torch.FloatTensor)
loss = self.loss_criterion(all_label_logits, labels)
output["loss"] = loss
for metric in self.metrics.values():
metric(predicted_labels, labels)
if metadata is not None:
words, texts, ids = [], [], []
for sample in metadata:
words.append(sample['words'])
texts.append(sample['text'])
if 'ID' in sample:
ids.append(sample['ID'])
output["words"] = words
output["text"] = texts
if ids:
output['ID'] = ids
return output
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Converts the labels to the actual labels. ``output_dict["readable_labels"]``
is a list of lists which will contain zero or more readable labels.
The type associated to the value of ``output_dict["readable_labels"]`` is
List[List[str]].
"""
readable_labels: List[List[str]] = []
for sample in output_dict['labels']:
sample_labels: List[str] = []
sample: List[int]
# This should be a list of 0's and 1's
for index, multi_label in enumerate(sample):
if multi_label:
word_label = self.vocab.get_token_from_index(index, namespace=self.label_namespace)
sample_labels.append(word_label)
readable_labels.append(sample_labels)
output_dict['readable_labels'] = readable_labels
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics_to_return = {metric_name: metric.get_metric(reset) for
metric_name, metric in self.metrics.items()}
return metrics_to_return | 48.197802 | 104 | 0.636039 | 1,521 | 13,158 | 5.340565 | 0.21236 | 0.028438 | 0.018835 | 0.015758 | 0.13234 | 0.0943 | 0.064139 | 0.01625 | 0.00911 | 0 | 0 | 0.004749 | 0.27983 | 13,158 | 273 | 105 | 48.197802 | 0.852469 | 0.351649 | 0 | 0.07947 | 0 | 0 | 0.034865 | 0.006042 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033113 | false | 0 | 0.119205 | 0 | 0.178808 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
711d1b4f75a4256d5a0cf38d84457010bf2940ef | 3,225 | py | Python | medcople.py | tks1998/statistical-function-and-algorithm-ML- | 2b287524690e05087da400d879c2f901e148a5e3 | [
"MIT"
] | null | null | null | medcople.py | tks1998/statistical-function-and-algorithm-ML- | 2b287524690e05087da400d879c2f901e148a5e3 | [
"MIT"
] | 1 | 2020-12-07T19:29:21.000Z | 2020-12-28T02:29:19.000Z | medcople.py | tks1998/statistical-function-and-algorithm-ML- | 2b287524690e05087da400d879c2f901e148a5e3 | [
"MIT"
] | null | null | null | import numpy as np
import math
from statistics import median
from scipy.stats import skew
import weightedstats as ws
from statsmodels.stats.stattools import medcouple
class Med_couple:
def __init__(self,data):
self.data = np.sort(data,axis = None)[::-1] # sorted decreasing
self.med = np.median(self.data)
self.scale = 2*np.amax(np.absolute(self.data))
self.Zplus = [(x-self.med)/self.scale for x in self.data if x>=self.med]
self.Zminus = [(x-self.med)/self.scale for x in self.data if x<=self.med]
self.p = len(self.Zplus)
self.q = len(self.Zminus)
def H(self,i,j):
a = self.Zplus[i]
b = self.Zminus[j]
if a==b:
return np.sign(self.p - 1 - i - j)
else:
return (a+b)/(a-b)
def greater_h(self,u):
P = [0]*self.p
j = 0
for i in range(self.p-1,-1,-1):
while j < self.q and self.H(i,j)>u:
j+=1
P[i]=j-1
return P
def less_h(self,u):
Q = [0]*self.p
j = self.q - 1
for i in range(self.p):
while j>=0 and self.H(i,j) < u:
j=j-1
Q[i]=j+1
return Q
#Kth pair algorithm (Johnson & Mizoguchi)
def kth_pair_algorithm(self):
L = [0]*self.p
R = [self.q-1]*self.p
Ltotal = 0
Rtotal = self.p*self.q
medcouple_index = math.floor(Rtotal / 2)
while Rtotal - Ltotal > self.p:
middle_idx = [i for i in range(self.p) if L[i]<=R[i]]
row_medians = [self.H(i,math.floor((L[i]+R[i])/2)) for i in middle_idx]
weight = [R[i]-L[i] + 1 for i in middle_idx]
WM = ws.weighted_median(row_medians,weights = weight)
P = self.greater_h(WM)
Q = self.less_h(WM)
Ptotal = np.sum(P)+len(P)
Qtotal = np.sum(Q)
if medcouple_index <= Ptotal-1:
R = P.copy()
Rtotal = Ptotal
else:
if medcouple_index > Qtotal - 1:
L = Q.copy()
Ltotal = Qtotal
else:
return WM
remaining = np.array([])
for i in range(self.p):
for j in range(L[i],R[i]+1):
remaining = np.append(remaining,self.H(i,j))
find_index = medcouple_index-Ltotal
k_minimum_element = remaining[np.argpartition(remaining,find_index)]
# print(find_index,'tim trong mang ',sorted(remaining))
return k_minimum_element[find_index]
def naive_algorithm_testing(self):
result = [self.H(i,j) for i in range(self.p) for j in range(self.q)]
return np.median(result)
if __name__ == '__main__':
sum=0
for i in range(1000):
data = np.random.randint(low = 0, high = 200000, size = 1000)
A = Med_couple(data)
sum+=abs(medcouple(data)-A.kth_pair_algorithm())
# print(skew(data))
# print("kth",A.kth_pair_algorithm())
# print("naive my code",A.naive_algorithm_testing())
# print("naive",medcouple(data))
print(sum) | 27.801724 | 83 | 0.52124 | 471 | 3,225 | 3.471338 | 0.227176 | 0.039755 | 0.029358 | 0.040367 | 0.185321 | 0.13211 | 0.102752 | 0.088073 | 0.088073 | 0.088073 | 0 | 0.019066 | 0.349457 | 3,225 | 116 | 84 | 27.801724 | 0.760248 | 0.077209 | 0 | 0.063291 | 0 | 0 | 0.002695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075949 | false | 0 | 0.075949 | 0 | 0.253165 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
711f7d7edce90878e1c7f4456d59b5282f3d8837 | 3,644 | py | Python | shitty_tools/evil.py | njatkinson/shitty_tools | 78c56eba331728d610d12c17fa5b34120fe31f03 | [
"WTFPL"
] | null | null | null | shitty_tools/evil.py | njatkinson/shitty_tools | 78c56eba331728d610d12c17fa5b34120fe31f03 | [
"WTFPL"
] | null | null | null | shitty_tools/evil.py | njatkinson/shitty_tools | 78c56eba331728d610d12c17fa5b34120fe31f03 | [
"WTFPL"
] | null | null | null | from sqlalchemy.orm import relationship
from sqlalchemy import and_
def create_attribute_associator(entity_id_col, eav_cls, eav_entity_id_col, eav_attr_col, eav_value_col):
'''
Returns a class method that allows one to associate attributes in an Entity-Attribute-Value table
with a sqlalchemy class and then access those attributes as properties of the entity class.
Example usage:
>>> from sqlalchemy import Column, ForeignKey, Index, Integer, String
>>> from sqlalchemy.orm import relationship
>>> from sqlalchemy.ext.declarative import declarative_base
>>> Base = declarative_base()
>>> metadata = Base.metadata
>>>
>>> class Eav(Base):
... __tablename__ = 'eav'
... __table_args__ = (
... Index('e_a_uq', 'entity_id', 'attribute', unique=True),
... )
... id = Column(Integer, primary_key=True)
... entity_id = Column(ForeignKey('entity.id', ondelete='CASCADE', onupdate='CASCADE'), nullable=False)
... attribute = Column(String(255), nullable=False)
... value = Column(String(255))
...
>>>
>>> class Entity(Base):
... __tablename__ = 'entity'
... id = Column(Integer, primary_key=True)
... name = Column(String(255), nullable=False)
... _add_attribute = create_attribute_associator(id, Eav, Eav.entity_id, Eav.attribute, Eav.value)
...
>>> Entity._add_attribute('foo')
>>> Entity._add_attribute('bar')
>>>
>>> dir(Entity)
['__class__', '__delattr__', '__dict__', '__doc__', '__format__', '__getattribute__', '__hash__', '__init__',
'__mapper__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__',
'__str__', '__subclasshook__', '__table__', '__tablename__', '__weakref__', '_add_attribute', '_bar_get',
'_bar_obj', '_bar_set', '_decl_class_registry', '_foo_get', '_foo_obj', '_foo_set', '_sa_class_manager',
'bar', 'foo', 'id', 'metadata', 'name']
:param entity_id_col: The id column of your entity
:param eav_cls: The sqlalchemy class of the entity attribute value (EAV) table
:param eav_entity_id_col: The foreign key column from the EAV table to the entity table
:param eav_attr_col: The EAV table column that stores the attribute name
:param eav_value_col: The EAV table column that stores the attribute value
:return: class method to with signature like add_attribute(cls, attr_name, lazy='joined')
'''
attr_col_name = eav_attr_col.key
value_col_name = eav_value_col.key
@classmethod
def add_attribute(cls, attr_name, lazy='joined'):
obj_name = '_%s_obj' % attr_name
getter_name = '_%s_get' % attr_name
setter_name = '_%s_set' % attr_name
rel = relationship(eav_cls,
primaryjoin=and_(entity_id_col == eav_entity_id_col,
eav_attr_col == attr_name),
uselist=False, lazy=lazy)
def getter(self):
obj = getattr(self, obj_name)
return getattr(obj, value_col_name)
def setter(self, value):
obj = getattr(self, obj_name)
if obj is None:
obj = eav_cls(**{attr_col_name: attr_name, value_col_name: value})
setattr(self, obj_name, obj)
else:
setattr(obj, value_col_name, value)
prop = property(getter, setter)
setattr(cls, obj_name, rel)
setattr(cls, getter_name, getter)
setattr(cls, setter_name, setter)
setattr(cls, attr_name, prop)
return add_attribute | 46.126582 | 114 | 0.63831 | 440 | 3,644 | 4.809091 | 0.265909 | 0.041588 | 0.031191 | 0.026465 | 0.213611 | 0.167297 | 0.139887 | 0.039698 | 0.039698 | 0 | 0 | 0.003257 | 0.241767 | 3,644 | 79 | 115 | 46.126582 | 0.762577 | 0.585071 | 0 | 0.066667 | 0 | 0 | 0.020194 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
712033ec7a6e7fd4c8901d3c8d26af890c676809 | 2,168 | py | Python | backend/models/roboschool_fc.py | AroMorin/DNNOP | 271e65811fe7cadcffc8155049e256fa78c0c5c6 | [
"MIT"
] | 6 | 2020-01-14T00:01:34.000Z | 2021-12-28T14:31:05.000Z | backend/models/roboschool_fc.py | AroMorin/DNNOP | 271e65811fe7cadcffc8155049e256fa78c0c5c6 | [
"MIT"
] | null | null | null | backend/models/roboschool_fc.py | AroMorin/DNNOP | 271e65811fe7cadcffc8155049e256fa78c0c5c6 | [
"MIT"
] | 1 | 2020-09-06T10:44:29.000Z | 2020-09-06T10:44:29.000Z | """A script that defines a simple FC model for function solving"""
import torch.nn as nn
import numpy as np
import torch
class Net(nn.Module):
def __init__(self, model_params):
super(Net, self).__init__()
model_params = self.ingest_params_lvl1(model_params)
ins = model_params['in features']
outs = model_params['number of outputs']
self.out_size = outs
self.fc1 = nn.Linear(ins, 512)
self.fc2 = nn.Linear(512, 16)
self.fc3 = nn.Linear(64, 32)
self.fc4 = nn.Linear(16, outs)
self.drop = nn.Dropout(0.1)
self.act = nn.ReLU()
#self.act = nn.Tanh()
self.reps = 20
self.rep = 0
self.step = 0
self.val = torch.zeros(outs).half().cuda()
def ingest_params_lvl1(self, model_params):
assert type(model_params) is dict
default_params = {
"in features": 128,
"number of outputs": 18
}
default_params.update(model_params) # Update with user selections
return default_params
def generate_noise(self, x):
n = torch.empty_like(x)
n.normal_(mean=0., std=0.3)
return n.cuda()
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
#x = self.drop(x)
x = self.fc2(x)
x = self.act(x)
#x = self.drop(x)
#x = self.fc3(x)
#x = self.act(x)
#x = self.drop(x)
x = self.fc4(x).squeeze().clamp_(-1., 1.)
#self.repeat(x)
return x.cpu().detach().numpy()
def repeat(self, x):
if self.rep > self.reps:
self.reset(x)
self.rep=0
else:
self.rep +=1
print(self.val, self.rep)
def reset(self, x):
default = torch.zeros(self.out_size).cuda()
choice = np.random.choice([0, 1], p=[0.5, 0.5])
if choice == 0:
self.val = default
else:
self.val = x.clone()
| 30.971429 | 74 | 0.535055 | 297 | 2,168 | 3.808081 | 0.377104 | 0.04863 | 0.05305 | 0.023873 | 0.066313 | 0.066313 | 0.066313 | 0.066313 | 0.066313 | 0.066313 | 0 | 0.035392 | 0.335332 | 2,168 | 69 | 75 | 31.42029 | 0.74948 | 0.153598 | 0 | 0.113208 | 0 | 0 | 0.030769 | 0 | 0 | 0 | 0 | 0 | 0.018868 | 1 | 0.113208 | false | 0 | 0.056604 | 0 | 0.245283 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7120c63dc1de2d2819806215bfba1cf552bbc4da | 666 | py | Python | recipes/Python/576838_Recursivemethod/recipe-576838.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/576838_Recursivemethod/recipe-576838.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/576838_Recursivemethod/recipe-576838.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | def recursive(func):
func.func_globals[func.__name__] = func
return func
class Test:
def method(self, x = False):
if x:
print(x)
else:
self.method("I'm method")
@staticmethod
def smethod(x = False):
if x:
print(x)
else:
method("I'm static method")
@staticmethod
@recursive
def rmethod(x = False):
if x:
print(x)
else:
rmethod("I'm recursive method")
test = Test()
test.method() # I'm method
test.rmethod() # I'm recursive method
test.smethod() # raises NameError: global name 'method' is not defined
| 20.8125 | 70 | 0.540541 | 82 | 666 | 4.329268 | 0.329268 | 0.028169 | 0.067606 | 0.076056 | 0.31831 | 0.31831 | 0.160563 | 0 | 0 | 0 | 0 | 0 | 0.34985 | 666 | 31 | 71 | 21.483871 | 0.819861 | 0.127628 | 0 | 0.423077 | 0 | 0 | 0.081456 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.230769 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7125865039e4808ac309b57d84350350e5e69e6d | 4,858 | py | Python | tests/gitlab_test_utils.py | jarda-wien/gitlabber | e3e53b183233be6b08c47a8ce1264415dc7af6e4 | [
"MIT"
] | 344 | 2020-04-28T16:59:02.000Z | 2022-03-30T08:50:58.000Z | tests/gitlab_test_utils.py | jarda-wien/gitlabber | e3e53b183233be6b08c47a8ce1264415dc7af6e4 | [
"MIT"
] | 86 | 2020-04-28T13:21:37.000Z | 2022-03-31T12:51:29.000Z | tests/gitlab_test_utils.py | jarda-wien/gitlabber | e3e53b183233be6b08c47a8ce1264415dc7af6e4 | [
"MIT"
] | 64 | 2020-04-29T11:53:14.000Z | 2022-03-23T09:41:05.000Z | import pytest
import json
from unittest import mock
from gitlabber import gitlab_tree
URL = "http://gitlab.my.com/"
TOKEN = "MOCK_TOKEN"
GROUP_URL = "http://gitlab.my.com/group"
GROUP_NAME = "group"
SUBGROUP_URL = "http://gitlab.my.com/group/subgroup"
SUBGROUP_NAME = "subgroup"
PROJECT_URL = "http://gitlab.my.com/group/subgroup/project/project.git"
PROJECT_NAME = "project"
YAML_TEST_INPUT_FILE = "tests/test-input.yaml"
YAML_TEST_OUTPUT_FILE = "tests/test-output.yaml"
JSON_TEST_OUTPUT_FILE = "tests/test-output.json"
TREE_TEST_OUTPUT_FILE = "tests/test-output.tree"
class MockNode:
def __init__(self, id, name, url, subgroups=mock.MagicMock(), projects=mock.MagicMock(), parent_id=None):
self.id = id
self.name = name
self.path = name
self.url = url
self.web_url = url
self.ssh_url_to_repo = url
self.http_url_to_repo = url
self.subgroups = subgroups
self.projects = projects
self.parent_id = parent_id
class Listable:
def __init__(self, list_result, get_result=None, archive_result=None):
self.list_result = list_result
self.get_result = get_result
self.archive_result = archive_result
def list(self, as_list=False, archived=None):
if archived is None:
return [self.list_result, self.archive_result] if self.archive_result is not None else [self.list_result]
elif archived is True:
return [self.archive_result]
else:
return [self.list_result]
def get(self, id):
if self.get_result is not None:
return self.get_result
else:
return self.list_result
def validate_root(root):
assert root.is_leaf is False
assert root.name == ""
assert root.url == "http://gitlab.my.com/"
assert len(root.children) == 1
assert root.height == 3
def validate_group(group):
assert group.name == GROUP_NAME
assert group.url == GROUP_URL
assert group.is_leaf is False
assert len(group.children) == 1
assert group.height == 2
def validate_subgroup(subgroup):
assert subgroup.name == SUBGROUP_NAME
assert subgroup.url == SUBGROUP_URL
assert subgroup.is_leaf is False
assert len(subgroup.children) == 1
assert subgroup.height == 1
def validate_project(project):
assert project.name == PROJECT_NAME
assert project.url == PROJECT_URL
assert project.is_leaf is True
assert len(project.children) == 0
def validate_tree(root):
validate_root(root)
validate_group(root.children[0])
validate_subgroup(root.children[0].children[0])
validate_project(root.children[0].children[0].children[0])
def create_test_gitlab(monkeypatch, includes=None, excludes=None, in_file=None):
gl = gitlab_tree.GitlabTree(
URL, TOKEN, "ssh", "name", includes=includes, excludes=excludes, in_file=in_file)
projects = Listable(MockNode(2, PROJECT_NAME, PROJECT_URL))
subgroup_node = MockNode(2, SUBGROUP_NAME, SUBGROUP_URL, projects=projects)
subgroups = Listable(subgroup_node)
groups = Listable(MockNode(2, GROUP_NAME, GROUP_URL,
subgroups=subgroups), subgroup_node)
monkeypatch.setattr(gl.gitlab, "groups", groups)
return gl
def create_test_gitlab_with_toplevel_subgroups(monkeypatch):
gl = gitlab_tree.GitlabTree(URL, TOKEN, "ssh", "path")
groups = Listable([MockNode(2, GROUP_NAME, GROUP_URL),
MockNode(2, GROUP_NAME, GROUP_URL, parent_id=1)])
monkeypatch.setattr(gl.gitlab, "groups", groups)
return gl
def create_test_gitlab_with_archived(monkeypatch, includes=None, excludes=None, in_file=None, archived=None):
gl = gitlab_tree.GitlabTree(
URL, TOKEN, "ssh", "name", includes=includes, excludes=excludes, in_file=in_file, archived=archived)
project_node = MockNode(1, PROJECT_NAME, PROJECT_URL)
archived_project_node = MockNode(
2, "_archived_" + PROJECT_NAME, "_archived_" + PROJECT_URL)
projects = Listable(project_node, archive_result=archived_project_node)
subgroup_node = MockNode(2, SUBGROUP_NAME, SUBGROUP_URL, projects=projects)
archived_subgroup_node = MockNode(
2, "_archived_" + SUBGROUP_NAME, "_archived_" + SUBGROUP_URL, projects=projects)
subgroups = Listable(subgroup_node, archive_result=archived_subgroup_node)
archived_subgroups = Listable(archived_subgroup_node, archive_result=archived_subgroup_node)
group_node = MockNode(2, GROUP_NAME, GROUP_URL, subgroups=archived_subgroups)
archived_group_node = MockNode(2, "_archived_" + GROUP_NAME, "_archived_" + GROUP_URL, subgroups=archived_subgroups)
groups = Listable(group_node, get_result=subgroup_node, archive_result=archived_group_node)
monkeypatch.setattr(gl.gitlab, "groups", groups)
# gl.print_tree()
return gl
| 36.253731 | 120 | 0.711404 | 640 | 4,858 | 5.140625 | 0.125 | 0.039514 | 0.025532 | 0.022796 | 0.426444 | 0.361094 | 0.306383 | 0.206383 | 0.130091 | 0.130091 | 0 | 0.006332 | 0.18732 | 4,858 | 133 | 121 | 36.526316 | 0.827001 | 0.003088 | 0 | 0.113208 | 0 | 0 | 0.077257 | 0.017971 | 0 | 0 | 0 | 0 | 0.179245 | 1 | 0.113208 | false | 0 | 0.037736 | 0 | 0.245283 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7126a909c8eb6e0615ba8dbc55706b97b9c85813 | 33,512 | py | Python | mindquantum/simulator/simulator.py | Takishima/mindquantum | e90dfe474b759023d7ae18281b9a87cb8d223d04 | [
"Apache-2.0"
] | null | null | null | mindquantum/simulator/simulator.py | Takishima/mindquantum | e90dfe474b759023d7ae18281b9a87cb8d223d04 | [
"Apache-2.0"
] | null | null | null | mindquantum/simulator/simulator.py | Takishima/mindquantum | e90dfe474b759023d7ae18281b9a87cb8d223d04 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Simulator."""
import numpy as np
from mindquantum import mqbackend as mb
from mindquantum.core.circuit import Circuit
from mindquantum.core.gates import BarrierGate, Measure, MeasureResult
from mindquantum.core.gates.basic import BasicGate
from mindquantum.core.operators import Hamiltonian
from mindquantum.core.operators.hamiltonian import MODE
from mindquantum.core.parameterresolver import ParameterResolver
from mindquantum.utils import ket_string
from mindquantum.utils.type_value_check import (
_check_and_generate_pr_type,
_check_input_type,
_check_int_type,
_check_seed,
_check_value_should_not_less,
)
SUPPORTED_SIMULATOR = ['projectq']
def get_supported_simulator():
"""
Get simulator name that supported by MindQuantum.
Returns:
list, The supported simulator list.
"""
return SUPPORTED_SIMULATOR
class Simulator:
"""
Quantum simulator that simulate quantum circuit.
Args:
backend (str): which backend you want. The supported backend can be found
in SUPPORTED_SIMULATOR
n_qubits (int): number of quantum simulator.
seed (int): the random seed for this simulator, if None, seed will generate
by `numpy.random.randint`. Default: None.
Raises:
TypeError: if `backend` is not str.
TypeError: if `n_qubits` is not int.
TypeError: if `seed` is not int.
ValueError: if `backend` is not supported.
ValueError: if `n_qubits` is negative.
ValueError: if `seed` is less than 0 or great than 2**23 - 1.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import qft
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(qft(range(2)))
>>> sim.get_qs()
array([0.5+0.j, 0.5+0.j, 0.5+0.j, 0.5+0.j])
"""
def __init__(self, backend, n_qubits, seed=None):
"""Initialize a Simulator object."""
_check_input_type('backend', str, backend)
_check_int_type('n_qubits', n_qubits)
_check_value_should_not_less('n_qubits', 0, n_qubits)
if seed is None:
seed = np.random.randint(1, 2**23)
_check_seed(seed)
if backend not in SUPPORTED_SIMULATOR:
raise ValueError(f"backend {backend} not supported, now we support {SUPPORTED_SIMULATOR}!")
self.backend = backend
self.seed = seed
self.n_qubits = n_qubits
if backend == 'projectq':
self.sim = mb.projectq(seed, n_qubits)
def copy(self):
"""
Copy this simulator.
Returns:
Simulator, a copy version of this simulator.
Examples:
>>> from mindquantum import RX, Simulator
>>> sim = Simulator('projectq', 1)
>>> sim.apply_gate(RX(1).on(0))
>>> sim.flush()
>>> sim2 = sim.copy()
>>> sim2.apply_gate(RX(-1).on(0))
>>> sim2
projectq simulator with 1 qubit (little endian).
Current quantum state:
1¦0⟩
"""
sim = Simulator(self.backend, self.n_qubits, self.seed)
sim.sim = self.sim.copy()
return sim
def __str__(self):
"""Return a string representation of the object."""
state = self.get_qs()
s = f"{self.backend} simulator with {self.n_qubits} qubit{'s' if self.n_qubits > 1 else ''} (little endian)."
s += "\nCurrent quantum state:\n"
if self.n_qubits < 4:
s += '\n'.join(ket_string(state))
else:
s += state.__str__()
return s
def __repr__(self):
"""Return a string representation of the object."""
return self.__str__()
def reset(self):
"""
Reset simulator to zero state.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import qft
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(qft(range(2)))
>>> sim.reset()
>>> sim.get_qs()
array([1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j])
"""
self.sim.reset()
def flush(self):
"""
Flush gate that works for projectq simulator.
The projectq simulator will cache several gate and fushion these gate into a bigger gate, and than act on the
quantum state. The flush command will ask the simulator to fushion currently stored gate and act on the quantum
state.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import H
>>> sim = Simulator('projectq', 1)
>>> sim.apply_gate(H.on(0))
>>> sim.flush()
"""
if self.backend == 'projectq':
self.sim.run()
def apply_gate(self, gate, pr=None, diff=False):
"""
Apply a gate on this simulator, can be a quantum gate or a measurement operator.
Args:
gate (BasicGate): The gate you want to apply.
pr (Union[numbers.Number, numpy.ndarray, ParameterResolver, list]): The
parameter for parameterized gate. Default: None.
diff (bool): Whether to apply the derivative gate on this simulator. Default: False.
Returns:
int or None, if the gate if a measure gate, then return a collapsed state, Otherwise
return None.
Raises:
TypeError: if `gate` is not a BasicGate.
ValueError: if any qubit of `gate` is higher than simulator qubits.
ValueError: if `gate` is parameterized, but no parameter supplied.
TypeError: the `pr` is not a ParameterResolver if `gate` is parameterized.
Examples:
>>> import numpy as np
>>> from mindquantum import Simulator
>>> from mindquantum import RY, Measure
>>> sim = Simulator('projectq', 1)
>>> sim.apply_gate(RY('a').on(0), np.pi/2)
>>> sim.get_qs()
array([0.70710678+0.j, 0.70710678+0.j])
>>> sim.apply_gate(Measure().on(0))
1
>>> sim.get_qs()
array([0.+0.j, 1.+0.j])
"""
_check_input_type('gate', BasicGate, gate)
if not isinstance(gate, BarrierGate):
gate_max = max(max(gate.obj_qubits, gate.ctrl_qubits))
if self.n_qubits < gate_max:
raise ValueError(f"qubits of gate {gate} is higher than simulator qubits.")
if isinstance(gate, Measure):
return self.sim.apply_measure(gate.get_cpp_obj())
if pr is None:
if gate.parameterized:
raise ValueError("apply a parameterized gate needs a parameter_resolver")
self.sim.apply_gate(gate.get_cpp_obj())
else:
pr = _check_and_generate_pr_type(pr, gate.coeff.params_name)
self.sim.apply_gate(gate.get_cpp_obj(), pr.get_cpp_obj(), diff)
return None
def apply_circuit(self, circuit, pr=None):
"""
Apply a circuit on this simulator.
Args:
circuit (Circuit): The quantum circuit you want to apply on this simulator.
pr (Union[ParameterResolver, dict, numpy.ndarray, list, numbers.Number]): The
parameter resolver for this circuit. If the circuit is not parameterized,
this arg should be None. Default: None.
Returns:
MeasureResult or None, if the circuit has measure gate, then return a MeasureResult,
otherwise return None.
Examples:
>>> import numpy as np
>>> from mindquantum import Circuit, H
>>> from mindquantum import Simulator
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(Circuit().un(H, 2))
>>> sim.apply_circuit(Circuit().ry('a', 0).ry('b', 1), np.array([1.1, 2.2]))
>>> sim
projectq simulator with 2 qubits (little endian).
Current quantum state:
-0.0721702531972066¦00⟩
-0.30090405886869676¦01⟩
0.22178317006196263¦10⟩
0.9246947752567126¦11⟩
>>> sim.apply_circuit(Circuit().measure(0).measure(1))
shots: 1
Keys: q1 q0│0.00 0.2 0.4 0.6 0.8 1.0
───────────┼───────────┴───────────┴───────────┴───────────┴───────────┴
11│▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
│
{'11': 1}
"""
_check_input_type('circuit', Circuit, circuit)
if self.n_qubits < circuit.n_qubits:
raise ValueError(f"Circuit has {circuit.n_qubits} qubits, which is more than simulator qubits.")
if circuit.has_measure_gate:
res = MeasureResult()
res.add_measure(circuit.all_measures.keys())
if circuit.params_name:
if pr is None:
raise ValueError("Applying a parameterized circuit needs a parameter_resolver")
pr = _check_and_generate_pr_type(pr, circuit.params_name)
else:
pr = ParameterResolver()
if circuit.has_measure_gate:
samples = np.array(
self.sim.apply_circuit_with_measure(circuit.get_cpp_obj(), pr.get_cpp_obj(), res.keys_map)
)
samples = samples.reshape((1, -1))
res.collect_data(samples)
return res
if circuit.params_name:
self.sim.apply_circuit(circuit.get_cpp_obj(), pr.get_cpp_obj())
else:
self.sim.apply_circuit(circuit.get_cpp_obj())
return None
def sampling(self, circuit, pr=None, shots=1, seed=None):
"""
Samping the measure qubit in circuit. Sampling do not change the origin quantum state of this simulator.
Args:
circuit (Circuit): The circuit that you want to evolution and do sampling.
pr (Union[None, dict, ParameterResolver]): The parameter
resolver for this circuit, if this circuit is a parameterized circuit.
Default: None.
shots (int): How many shots you want to sampling this circuit. Default: 1
seed (int): Random seed for random sampling. If None, seed will be a random
int number. Default: None.
Returns:
MeasureResult, the measure result of sampling.
Examples:
>>> from mindquantum import Circuit, Measure
>>> from mindquantum import Simulator
>>> circ = Circuit().ry('a', 0).ry('b', 1)
>>> circ += Measure('q0_0').on(0)
>>> circ += Measure('q0_1').on(0)
>>> circ += Measure('q1').on(1)
>>> sim = Simulator('projectq', circ.n_qubits)
>>> res = sim.sampling(circ, {'a': 1.1, 'b': 2.2}, shots=100, seed=42)
>>> res
shots: 100
Keys: q1 q0_1 q0_0│0.00 0.122 0.245 0.367 0.49 0.612
──────────────────┼───────────┴───────────┴───────────┴───────────┴───────────┴
000│▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
│
011│▒▒▒▒▒▒▒▒▒
│
100│▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓
│
111│▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
│
{'000': 18, '011': 9, '100': 49, '111': 24}
"""
if not circuit.all_measures.map:
raise ValueError("circuit must have at least one measurement gate.")
_check_input_type("circuit", Circuit, circuit)
if self.n_qubits < circuit.n_qubits:
raise ValueError(f"Circuit has {circuit.n_qubits} qubits, which is more than simulator qubits.")
_check_int_type("sampling shots", shots)
_check_value_should_not_less("sampling shots", 1, shots)
if circuit.parameterized:
if pr is None:
raise ValueError("Sampling a parameterized circuit need a ParameterResolver")
if not isinstance(pr, (dict, ParameterResolver)):
raise TypeError("pr requires a dict or a ParameterResolver, but get {}!".format(type(pr)))
pr = ParameterResolver(pr)
else:
pr = ParameterResolver()
if seed is None:
seed = int(np.random.randint(1, 2 << 20))
else:
_check_seed(seed)
res = MeasureResult()
res.add_measure(circuit.all_measures.keys())
sim = self
if circuit.is_measure_end and not circuit.is_noise_circuit:
sim = Simulator(self.backend, self.n_qubits, self.seed)
sim.set_qs(self.get_qs())
sim.apply_circuit(circuit.remove_measure(), pr)
circuit = Circuit(circuit.all_measures.keys())
samples = np.array(
sim.sim.sampling(circuit.get_cpp_obj(), pr.get_cpp_obj(), shots, res.keys_map, seed)
).reshape((shots, -1))
res.collect_data(samples)
return res
def apply_hamiltonian(self, hamiltonian: Hamiltonian):
"""
Apply hamiltonian to a simulator, this hamiltonian can be hermitian or non hermitian.
Note:
The quantum state may be not a normalized quantum state after apply hamiltonian.
Args:
hamiltonian (Hamiltonian): the hamiltonian you want to apply.
Examples:
>>> from mindquantum import Simulator
>>> from mindquantum import Circuit, Hamiltonian
>>> from mindquantum.core.operators import QubitOperator
>>> import scipy.sparse as sp
>>> sim = Simulator('projectq', 1)
>>> sim.apply_circuit(Circuit().h(0))
>>> sim.get_qs()
array([0.70710678+0.j, 0.70710678+0.j])
>>> ham1 = Hamiltonian(QubitOperator('Z0'))
>>> sim.apply_hamiltonian(ham1)
>>> sim.get_qs()
array([ 0.70710678+0.j, -0.70710678+0.j])
>>> sim.reset()
>>> ham2 = Hamiltonian(sp.csr_matrix([[1, 2], [3, 4]]))
>>> sim.apply_hamiltonian(ham2)
>>> sim.get_qs()
array([1.+0.j, 3.+0.j])
"""
_check_input_type('hamiltonian', Hamiltonian, hamiltonian)
_check_hamiltonian_qubits_number(hamiltonian, self.n_qubits)
self.sim.apply_hamiltonian(hamiltonian.get_cpp_obj())
def get_expectation(self, hamiltonian):
r"""
Get expectation of the given hamiltonian. The hamiltonian could be non hermitian.
.. math::
E = \left<\psi\right|H\left|\psi\right>
Args:
hamiltonian (Hamiltonian): The hamiltonian you want to get expectation.
Returns:
numbers.Number, the expectation value.
Examples:
>>> from mindquantum.core.operators import QubitOperator
>>> from mindquantum import Circuit, Simulator
>>> from mindquantum import Hamiltonian
>>> sim = Simulator('projectq', 1)
>>> sim.apply_circuit(Circuit().ry(1.2, 0))
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim.get_expectation(ham)
(0.36235775447667357+0j)
"""
if not isinstance(hamiltonian, Hamiltonian):
raise TypeError(f"hamiltonian requires a Hamiltonian, but got {type(hamiltonian)}")
_check_hamiltonian_qubits_number(hamiltonian, self.n_qubits)
return self.sim.get_expectation(hamiltonian.get_cpp_obj())
def get_qs(self, ket=False):
"""
Get current quantum state of this simulator.
Args:
ket (bool): Whether to return the quantum state in ket format or not.
Default: False.
Returns:
numpy.ndarray, the current quantum state.
Examples:
>>> from mindquantum import qft, Simulator
>>> sim = Simulator('projectq', 2)
>>> sim.apply_circuit(qft(range(2)))
>>> sim.get_qs()
array([0.5+0.j, 0.5+0.j, 0.5+0.j, 0.5+0.j])
"""
if not isinstance(ket, bool):
raise TypeError(f"ket requires a bool, but get {type(ket)}")
state = np.array(self.sim.get_qs())
if ket:
return '\n'.join(ket_string(state))
return state
def set_qs(self, quantum_state):
"""
Set quantum state for this simulation.
Args:
quantum_state (numpy.ndarray): the quantum state that you want.
Examples:
>>> from mindquantum import Simulator
>>> import numpy as np
>>> sim = Simulator('projectq', 1)
>>> sim.get_qs()
array([1.+0.j, 0.+0.j])
>>> sim.set_qs(np.array([1, 1]))
>>> sim.get_qs()
array([0.70710678+0.j, 0.70710678+0.j])
"""
if not isinstance(quantum_state, np.ndarray):
raise TypeError(f"quantum state must be a ndarray, but get {type(quantum_state)}")
if len(quantum_state.shape) != 1:
raise ValueError(f"vec requires a 1-dimensional array, but get {quantum_state.shape}")
n_qubits = np.log2(quantum_state.shape[0])
if n_qubits % 1 != 0:
raise ValueError(f"vec size {quantum_state.shape[0]} is not power of 2")
n_qubits = int(n_qubits)
if self.n_qubits != n_qubits:
raise ValueError(f"{n_qubits} qubits vec does not match with simulation qubits ({self.n_qubits})")
self.sim.set_qs(quantum_state / np.sqrt(np.sum(np.abs(quantum_state) ** 2)))
def get_expectation_with_grad(
self,
hams,
circ_right,
circ_left=None,
simulator_left=None,
encoder_params_name=None,
ansatz_params_name=None,
parallel_worker=None,
):
r"""
Get a function that return the forward value and gradient w.r.t circuit parameters.
This method is designed to calculate the expectation and its gradient shown as below.
.. math::
E = \left<\varphi\right|U_l^\dagger H U_r \left|\psi\right>
where :math:`U_l` is circ_left, :math:`U_r` is circ_right, :math:`H` is hams
and :math:`\left|\psi\right>` is the current quantum state of this simulator,
and :math:`\left|\varphi\right>` is the quantum state of `simulator_left`.
Args:
hams (Hamiltonian): The hamiltonian that need to get expectation.
circ_right (Circuit): The :math:`U_r` circuit described above.
circ_left (Circuit): The :math:`U_l` circuit described above. By default, this circuit
will be none, and in this situation, :math:`U_l` will be equals to
:math:`U_r`. Default: None.
simulator_left (Simulator): The simulator that contains :math:`\left|\varphi\right>`. If
None, then :math:`\left|\varphi\right>` is assumed to be equals to :math:`\left|\psi\right>`.
Default: None.
encoder_params_name (list[str]): To specific which parameters belongs to encoder,
that will encoder the input data into quantum state. The encoder data
can be a batch. Default: None.
ansatz_params_name (list[str]): To specific which parameters belongs to ansatz,
that will be trained during training. Default: None.
parallel_worker (int): The parallel worker numbers. The parallel workers can handle
batch in parallel threads. Default: None.
Returns:
GradOpsWrapper, a grad ops wrapper than contains information to generate this grad ops.
Examples:
>>> import numpy as np
>>> from mindquantum import Simulator, Hamiltonian
>>> from mindquantum import Circuit
>>> from mindquantum.core.operators import QubitOperator
>>> circ = Circuit().ry('a', 0)
>>> ham = Hamiltonian(QubitOperator('Z0'))
>>> sim = Simulator('projectq', 1)
>>> grad_ops = sim.get_expectation_with_grad(ham, circ)
>>> grad_ops(np.array([1.0]))
(array([[0.54030231+0.j]]), array([[[-0.84147098+0.j]]]))
>>> sim1 = Simulator('projectq', 1)
>>> prep_circ = Circuit().h(0)
>>> ansatz = Circuit().ry('a', 0).rz('b', 0).ry('c', 0)
>>> sim1.apply_circuit(prep_circ)
>>> sim2 = Simulator('projectq', 1)
>>> ham = Hamiltonian(QubitOperator(""))
>>> grad_ops = sim2.get_expectation_with_grad(ham, ansatz, Circuit(), simulator_left=sim1)
>>> f, g = grad_ops(np.array([7.902762e-01, 2.139225e-04, 7.795934e-01]))
>>> f
array([[0.99999989-7.52279618e-05j]])
"""
if isinstance(hams, Hamiltonian):
hams = [hams]
elif not isinstance(hams, list):
raise TypeError(f"hams requires a Hamiltonian or a list of Hamiltonian, but get {type(hams)}")
for h_tmp in hams:
_check_input_type("hams's element", Hamiltonian, h_tmp)
_check_hamiltonian_qubits_number(h_tmp, self.n_qubits)
_check_input_type("circ_right", Circuit, circ_right)
if circ_right.is_noise_circuit:
raise ValueError("noise circuit not support yet.")
non_hermitian = False
if circ_left is not None:
_check_input_type("circ_left", Circuit, circ_left)
if circ_left.is_noise_circuit:
raise ValueError("noise circuit not support yet.")
non_hermitian = True
if simulator_left is not None:
_check_input_type("simulator_left", Simulator, simulator_left)
if self.backend != simulator_left.backend:
raise ValueError(
f"simulator_left should have the same backend as this simulator, \
which is {self.backend}, but get {simulator_left.backend}"
)
if self.n_qubits != simulator_left.n_qubits:
raise ValueError(
f"simulator_left should have the same n_qubits as this simulator, \
which is {self.n_qubits}, but get {simulator_left.n_qubits}"
)
non_hermitian = True
if non_hermitian and simulator_left is None:
simulator_left = self
if circ_left is None:
circ_left = circ_right
if circ_left.has_measure_gate or circ_right.has_measure_gate:
raise ValueError("circuit for variational algorithm cannot have measure gate")
if parallel_worker is not None:
_check_int_type("parallel_worker", parallel_worker)
if encoder_params_name is None and ansatz_params_name is None:
encoder_params_name = []
ansatz_params_name = list(circ_right.params_name)
for i in circ_left.params_name:
if i not in ansatz_params_name:
ansatz_params_name.append(i)
if encoder_params_name is None:
encoder_params_name = []
if ansatz_params_name is None:
ansatz_params_name = []
_check_input_type("encoder_params_name", list, encoder_params_name)
_check_input_type("ansatz_params_name", list, ansatz_params_name)
for i in encoder_params_name:
_check_input_type("Element of encoder_params_name", str, i)
for i in ansatz_params_name:
_check_input_type("Element of ansatz_params_name", str, i)
s1 = set(circ_right.params_name) | set(circ_left.params_name)
s2 = set(encoder_params_name) | set(ansatz_params_name)
if s1 - s2 or s2 - s1:
raise ValueError("encoder_params_name and ansatz_params_name are different with circuit parameters")
circ_n_qubits = max(circ_left.n_qubits, circ_right.n_qubits)
if self.n_qubits < circ_n_qubits:
raise ValueError(f"Simulator has {self.n_qubits} qubits, but circuit has {circ_n_qubits} qubits.")
version = "both"
if not ansatz_params_name:
version = "encoder"
if not encoder_params_name:
version = "ansatz"
def grad_ops(*inputs):
if version == "both" and len(inputs) != 2:
raise ValueError("Need two inputs!")
if version in ("encoder", "ansatz") and len(inputs) != 1:
raise ValueError("Need one input!")
if version == "both":
_check_encoder(inputs[0], len(encoder_params_name))
_check_ansatz(inputs[1], len(ansatz_params_name))
batch_threads, mea_threads = _thread_balance(inputs[0].shape[0], len(hams), parallel_worker)
inputs0 = inputs[0]
inputs1 = inputs[1]
if version == "encoder":
_check_encoder(inputs[0], len(encoder_params_name))
batch_threads, mea_threads = _thread_balance(inputs[0].shape[0], len(hams), parallel_worker)
inputs0 = inputs[0]
inputs1 = np.array([])
if version == "ansatz":
_check_ansatz(inputs[0], len(ansatz_params_name))
batch_threads, mea_threads = _thread_balance(1, len(hams), parallel_worker)
inputs0 = np.array([[]])
inputs1 = inputs[0]
if non_hermitian:
f_g1_g2 = self.sim.non_hermitian_measure_with_grad(
[i.get_cpp_obj() for i in hams],
[i.get_cpp_obj(hermitian=True) for i in hams],
circ_left.get_cpp_obj(),
circ_left.get_cpp_obj(hermitian=True),
circ_right.get_cpp_obj(),
circ_right.get_cpp_obj(hermitian=True),
inputs0,
inputs1,
encoder_params_name,
ansatz_params_name,
batch_threads,
mea_threads,
simulator_left.sim,
)
else:
f_g1_g2 = self.sim.hermitian_measure_with_grad(
[i.get_cpp_obj() for i in hams],
circ_right.get_cpp_obj(),
circ_right.get_cpp_obj(hermitian=True),
inputs0,
inputs1,
encoder_params_name,
ansatz_params_name,
batch_threads,
mea_threads,
)
res = np.array(f_g1_g2)
if version == 'both':
f = res[:, :, 0]
g1 = res[:, :, 1 : 1 + len(encoder_params_name)] # noqa:E203
g2 = res[:, :, 1 + len(encoder_params_name) :] # noqa:E203
return f, g1, g2
f = res[:, :, 0]
g = res[:, :, 1:]
return f, g
grad_wrapper = GradOpsWrapper(
grad_ops, hams, circ_right, circ_left, encoder_params_name, ansatz_params_name, parallel_worker
)
s = f'{self.n_qubits} qubit' + ('' if self.n_qubits == 1 else 's')
s += f' {self.backend} VQA Operator'
grad_wrapper.set_str(s)
return grad_wrapper
def _check_encoder(data, encoder_params_size):
if not isinstance(data, np.ndarray):
raise ValueError(f"encoder parameters need numpy array, but get {type(data)}")
data_shape = data.shape
if len(data_shape) != 2:
raise ValueError("encoder data requires a two dimension numpy array")
if data_shape[1] != encoder_params_size:
raise ValueError(
f"encoder parameters size do not match with encoder parameters name,\
need {encoder_params_size} but get {data_shape[1]}."
)
def _check_ansatz(data, ansatz_params_size):
"""Check ansatz."""
if not isinstance(data, np.ndarray):
raise ValueError(f"ansatz parameters need numpy array, but get {type(data)}")
data_shape = data.shape
if len(data_shape) != 1:
raise ValueError("ansatz data requires a one dimension numpy array")
if data_shape[0] != ansatz_params_size:
raise ValueError(
f"ansatz parameters size do not match with ansatz parameters name,\
need {ansatz_params_size} but get {data_shape[0]}"
)
def _thread_balance(n_prs, n_meas, parallel_worker):
"""Thread balance."""
if parallel_worker is None:
parallel_worker = n_meas * n_prs
if n_meas * n_prs <= parallel_worker:
batch_threads = n_prs
mea_threads = n_meas
else:
if n_meas < n_prs:
batch_threads = min(n_prs, parallel_worker)
mea_threads = min(n_meas, max(1, parallel_worker // batch_threads))
else:
mea_threads = min(n_meas, parallel_worker)
batch_threads = min(n_prs, max(1, parallel_worker // mea_threads))
return batch_threads, mea_threads
def _check_hamiltonian_qubits_number(hamiltonian, sim_qubits):
"""Check hamiltonian qubits number."""
if hamiltonian.how_to != MODE['origin']:
if hamiltonian.n_qubits != sim_qubits:
raise ValueError(
f"Hamiltonian qubits is {hamiltonian.n_qubits}, not match \
with simulator qubits number {sim_qubits}"
)
else:
if hamiltonian.n_qubits > sim_qubits:
raise ValueError(f"Hamiltonian qubits is {hamiltonian.n_qubits}, which is bigger than simulator qubits.")
class GradOpsWrapper:
"""
Wrapper the gradient operator that with the information that generate this gradient operator.
Args:
grad_ops (Union[FunctionType, MethodType])): A function or a method
that return forward value and gradient w.r.t parameters.
hams (Hamiltonian): The hamiltonian that generate this grad ops.
circ_right (Circuit): The right circuit that generate this grad ops.
circ_left (Circuit): The left circuit that generate this grad ops.
encoder_params_name (list[str]): The encoder parameters name.
ansatz_params_name (list[str]): The ansatz parameters name.
parallel_worker (int): The number of parallel worker to run the batch.
"""
def __init__(self, grad_ops, hams, circ_right, circ_left, encoder_params_name, ansatz_params_name, parallel_worker):
"""Initialize a GradOpsWrapper object."""
self.grad_ops = grad_ops
self.hams = hams
self.circ_right = circ_right
self.circ_left = circ_left
self.encoder_params_name = encoder_params_name
self.ansatz_params_name = ansatz_params_name
self.parallel_worker = parallel_worker
self.str = ''
def __call__(self, *args):
"""Definition of a function call operator."""
return self.grad_ops(*args)
def set_str(self, s):
"""
Set expression for gradient operator.
Args:
s (str): The string of QNN operator.
"""
self.str = s
def inner_product(bra_simulator: Simulator, ket_simulator: Simulator):
"""
Calculate the inner product of two state that in the given simulator.
Args:
bra_simulator (Simulator): The simulator that serve as bra state.
ket_simulator (Simulator): The simulator that serve as ket state.
Returns:
numbers.Number, the inner product of two quantum state.
Examples:
>>> from mindquantum import RX, RY, Simulator
>>> from mindquantum.simulator import inner_product
>>> bra_simulator = Simulator('projectq', 1)
>>> bra_simulator.apply_gate(RY(1.2).on(0))
>>> ket_simulator = Simulator('projectq', 1)
>>> ket_simulator.apply_gate(RX(2.3).on(0))
>>> inner_product(bra_simulator, ket_simulator)
"""
_check_input_type('bra_simulator', Simulator, bra_simulator)
_check_input_type('ket_simulator', Simulator, ket_simulator)
if bra_simulator.n_qubits != ket_simulator.n_qubits:
raise ValueError(
f"Two simulator should have same quantum state, \
but get {bra_simulator.n_qubits} and {ket_simulator.n_qubits}."
)
if bra_simulator.backend != ket_simulator.backend:
raise ValueError("The backend of two simulator should be same.")
if bra_simulator.backend == 'projectq' and ket_simulator.backend == 'projectq':
bra_simulator.flush()
ket_simulator.flush()
return mb.cpu_projectq_inner_product(bra_simulator.sim, ket_simulator.sim)
raise ValueError(f"backend for {bra_simulator.backend} not implement.")
__all__ = ['Simulator', 'get_supported_simulator', 'GradOpsWrapper', 'inner_product']
| 42.153459 | 120 | 0.586894 | 4,088 | 33,512 | 4.7182 | 0.106164 | 0.020686 | 0.021153 | 0.00674 | 0.366497 | 0.282922 | 0.217389 | 0.177105 | 0.141228 | 0.111209 | 0 | 0.024353 | 0.302787 | 33,512 | 794 | 121 | 42.206549 | 0.786946 | 0.397529 | 0 | 0.227273 | 0 | 0.002674 | 0.127635 | 0.0088 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064171 | false | 0 | 0.026738 | 0 | 0.144385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7126b8e0e2a112a169fa2ccb17434fdbeb6afcc6 | 10,853 | py | Python | seasonedParser/core.py | KevinMidboe/seasonMover | 380c4a02f48679c0204ecf1a5807718b93f2ff19 | [
"MIT"
] | null | null | null | seasonedParser/core.py | KevinMidboe/seasonMover | 380c4a02f48679c0204ecf1a5807718b93f2ff19 | [
"MIT"
] | 9 | 2017-09-29T11:35:37.000Z | 2020-02-19T09:34:15.000Z | seasonedParser/core.py | KevinMidboe/seasonedParser | 380c4a02f48679c0204ecf1a5807718b93f2ff19 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-08-25 23:22:27
# @Last Modified by: KevinMidboe
# @Last Modified time: 2019-02-02 01:04:25
from guessit import guessit
from babelfish import Language, LanguageReverseError
import hashlib
import os, errno
import shutil
import re
import tvdb_api
import click
from pprint import pprint
from titlecase import titlecase
import langdetect
from exceptions import InsufficientNameError
import logging
logger = logging.getLogger('seasonedParser')
from video import VIDEO_EXTENSIONS, Episode, Movie, Video
from subtitle import SUBTITLE_EXTENSIONS, Subtitle, get_subtitle_path
from utils import sanitize, refine
def search_external_subtitles(path, directory=None):
dirpath, filename = os.path.split(path)
dirpath = dirpath or '.'
fileroot, fileext = os.path.splitext(filename)
subtitles = {}
for p in os.listdir(directory or dirpath):
if not p.endswith(SUBTITLE_EXTENSIONS):
continue
language = Language('und')
language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_','-')[1:]
if language_code:
try:
language = Language.fromietf(language_code)
except (ValueError, LanguageReverseError):
logger.error('Cannot parse language code %r', language_code)
f = open(os.path.join(dirpath, p), 'r', encoding='ISO-8859-15')
pattern = re.compile('[0-9:\,-<>]+')
# head = list(islice(f.read(), 10))
filecontent = pattern.sub('', f.read())
filecontent = filecontent[0:1000]
language = langdetect.detect(filecontent)
f.close()
subtitles[os.path.join(dirpath, p)] = language
logger.debug('Found subtitles %r', subtitles)
return subtitles
def find_file_size(video):
return os.path.getsize(video.name)
def scan_video(path):
"""Scan a video from a `path`.
:param str path: existing path to the video.
:return: the scanned video.
:rtype: :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check video extension
if not path.endswith(VIDEO_EXTENSIONS):
raise ValueError('%r is not a valid video extension' % os.path.splitext(path)[1])
dirpath, filename = os.path.split(path)
logger.info('Scanning video %r in %r', filename, dirpath)
# guess
video = Video.fromguess(path, guessit(filename))
video.subtitles |= set(search_external_subtitles(video.name))
refine(video)
# hash of name
# if isinstance(video, Movie):
# if type(video.title) is str and type(video.year) is int:
# home_path = '{} ({})'.format(video.title, video.year)
# hash_str = ''.join([video.title, str(video.year) or ''])
# elif isinstance(video, Episode):
# if type(video.series) is str and type(video.season) is int and type(video.episode) is int:
# home_path = '{} ({})'.format(video.title, video.year)
# hash_str = ''.join([video.series, str(video.season), str(video.episode)])
# video.hash = hashlib.md5(hash_str.encode()).hexdigest()
# except:
# print(video)
return video
def scan_subtitle(path):
if not os.path.exists(path):
raise ValueError('Path does not exist')
dirpath, filename = os.path.split(path)
logger.info('Scanning subtitle %r in %r', filename, dirpath)
# guess
parent_path = path.strip(filename)
subtitle = Subtitle.fromguess(parent_path, guessit(path))
return subtitle
def subtitle_path(sibling, subtitle):
parent_path = os.path.dirname(sibling)
return os.path.join(parent_path, os.path.basename(subtitle))
def scan_videos(path):
"""Scan `path` for videos and their subtitles.
See :func:`refine` to find additional information for the video.
:param str path: existing directory path to scan.
:return: the scanned videos.
:rtype: list of :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check for non-directory path
if not os.path.isdir(path):
raise ValueError('Path is not a directory')
# setup progress bar
path_children = 0
for _ in os.walk(path): path_children += 1
with click.progressbar(length=path_children, show_pos=True, label='Collecting videos') as bar:
# walk the path
videos = []
insufficient_name = []
errors_path = []
for dirpath, dirnames, filenames in os.walk(path):
logger.debug('Walking directory %r', dirpath)
# remove badly encoded and hidden dirnames
for dirname in list(dirnames):
if dirname.startswith('.'):
logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath)
dirnames.remove(dirname)
# scan for videos
for filename in filenames:
if not (filename.endswith(VIDEO_EXTENSIONS)):
logger.debug('Skipping non-video file %s', filename)
continue
# skip hidden files
if filename.startswith('.'):
logger.debug('Skipping hidden filename %r in %r', filename, dirpath)
continue
# reconstruct the file path
filepath = os.path.join(dirpath, filename)
if os.path.islink(filepath):
logger.debug('Skipping link %r in %r', filename, dirpath)
continue
# scan
if filename.endswith(VIDEO_EXTENSIONS): # video
try:
video = scan_video(filepath)
except InsufficientNameError as e:
logger.info(e)
insufficient_name.append(filepath)
continue
except ValueError: # pragma: no cover
logger.exception('Error scanning video')
errors_path.append(filepath)
continue
else: # pragma: no cover
raise ValueError('Unsupported file %r' % filename)
videos.append(video)
bar.update(1)
return videos, insufficient_name, errors_path
def organize_files(path):
hashList = {}
mediafiles = scan_files(path)
# print(mediafiles)
for file in mediafiles:
hashList.setdefault(file.__hash__(),[]).append(file)
# hashList[file.__hash__()] = file
return hashList
def save_subtitles(files, single=False, directory=None, encoding=None):
t = tvdb_api.Tvdb()
if not isinstance(files, list):
files = [files]
for file in files:
# TODO this should not be done in the loop
dirname = "%s S%sE%s" % (file.series, "%02d" % (file.season), "%02d" % (file.episode))
createParentfolder = not dirname in file.parent_path
if createParentfolder:
dirname = os.path.join(file.parent_path, dirname)
print('Created: %s' % dirname)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# TODO Clean this !
try:
tvdb_episode = t[file.series][file.season][file.episode]
episode_title = tvdb_episode['episodename']
except:
episode_title = ''
old = os.path.join(file.parent_path, file.name)
if file.name.endswith(SUBTITLE_EXTENSIONS):
lang = file.getLanguage()
sdh = '.sdh' if file.sdh else ''
filename = "%s S%sE%s %s%s.%s.%s" % (file.series, "%02d" % (file.season), "%02d" % (file.episode), episode_title, sdh, lang, file.container)
else:
filename = "%s S%sE%s %s.%s" % (file.series, "%02d" % (file.season), "%02d" % (file.episode), episode_title, file.container)
if createParentfolder:
newname = os.path.join(dirname, filename)
else:
newname = os.path.join(file.parent_path, filename)
print('Moved: %s ---> %s' % (old, newname))
os.rename(old, newname)
def scan_folder(path):
videos = []
insufficient_name = []
errored_paths = []
logger.debug('Collecting path %s', path)
# non-existing
if not os.path.exists(path):
errored_paths.append(path)
logger.exception("The path '{}' does not exist".format(path))
# file
# if path is a file
if os.path.isfile(path):
logger.info('Path is a file')
try:
video = scan_video(path)
videos.append(video)
except InsufficientNameError as e:
logger.info(e)
insufficient_name.append(path)
# directories
if os.path.isdir(path):
logger.info('Path is a directory')
scanned_videos = []
try:
videos, insufficient_name, errored_paths = scan_videos(path)
except:
logger.exception('Unexpected error while collecting directory path %s', path)
errored_paths.append(path)
click.echo('%s video%s collected / %s file%s with insufficient name / %s error%s' % (
click.style(str(len(videos)), bold=True, fg='green' if videos else None),
's' if len(videos) > 1 else '',
click.style(str(len(insufficient_name)), bold=True, fg='yellow' if insufficient_name else None),
's' if len(insufficient_name) > 1 else '',
click.style(str(len(errored_paths)), bold=True, fg='red' if errored_paths else None),
's' if len(errored_paths) > 1 else '',
))
return videos, insufficient_name
def pickforgirlscouts(video):
if video.sufficientInfo():
video.moveLocation()
return True
return False
def moveHome(video):
wantedFilePath = video.wantedFilePath()
dir = os.path.dirname(wantedFilePath)
if not os.path.exists(dir):
logger.info('Creating directory {}'.format(dir))
os.makedirs(dir)
logger.info("Moving video file from: '{}' to: '{}'".format(video.name, wantedFilePath))
shutil.move(video.name, wantedFilePath)
for sub in video.subtitles:
if not os.path.isfile(sub):
continue
oldpath = sub
newpath = subtitle_path(wantedFilePath, sub)
logger.info("Moving subtitle file from: '{}' to: '{}'".format(oldpath, newpath))
shutil.move(oldpath, newpath)
# Give feedback before delete ?
def empthDirectory(paths):
pass
| 32.887879 | 152 | 0.602322 | 1,278 | 10,853 | 5.046948 | 0.21831 | 0.026047 | 0.012403 | 0.011938 | 0.228682 | 0.167597 | 0.11969 | 0.11969 | 0.114419 | 0.093643 | 0 | 0.008741 | 0.28324 | 10,853 | 329 | 153 | 32.987842 | 0.820414 | 0.152861 | 0 | 0.206897 | 0 | 0.004926 | 0.095385 | 0 | 0 | 0 | 0 | 0.00304 | 0 | 1 | 0.059113 | false | 0.004926 | 0.078818 | 0.004926 | 0.187192 | 0.014778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7127698ab3d52c9f1add1c5b008972b4228385d7 | 1,266 | py | Python | networking_vsphere/utils/db.py | huadream/networking-vsphere | 8669a78d4d2eb4620610fe7e4548cac7fbfa9e6a | [
"Apache-2.0"
] | null | null | null | networking_vsphere/utils/db.py | huadream/networking-vsphere | 8669a78d4d2eb4620610fe7e4548cac7fbfa9e6a | [
"Apache-2.0"
] | null | null | null | networking_vsphere/utils/db.py | huadream/networking-vsphere | 8669a78d4d2eb4620610fe7e4548cac7fbfa9e6a | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db.models import agent as agents_db
from neutron_lib.db import api as db_api
from networking_vsphere.common import constants
def get_agent_by_host(agent_host):
"""Return a L2 agent on the host."""
session = db_api.get_writer_session()
with session.begin(subtransactions=True):
query = session.query(agents_db.Agent)
agent = query.filter(
agents_db.Agent.host == agent_host,
agents_db.Agent.agent_type == constants.AGENT_TYPE_DVS,
agents_db.Agent.admin_state_up.is_(True)).first()
if agent and agent.is_active:
return agent
return None
| 37.235294 | 78 | 0.71169 | 185 | 1,266 | 4.745946 | 0.540541 | 0.068337 | 0.059226 | 0.036446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009045 | 0.21406 | 1,266 | 33 | 79 | 38.363636 | 0.873367 | 0.49684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
712c4c6a5b75135845d649016c8d2919cb39542c | 6,613 | py | Python | api/app/main.py | JexPY/filemanager-fastapi | da830fe6d9a3d515e0d04e6e690ff366225ec251 | [
"MIT"
] | 24 | 2020-09-15T11:59:55.000Z | 2022-03-13T19:58:02.000Z | api/app/main.py | JexPY/filemanager-fastapi | da830fe6d9a3d515e0d04e6e690ff366225ec251 | [
"MIT"
] | null | null | null | api/app/main.py | JexPY/filemanager-fastapi | da830fe6d9a3d515e0d04e6e690ff366225ec251 | [
"MIT"
] | 5 | 2020-10-11T08:41:29.000Z | 2022-03-10T07:23:55.000Z | from fastapi import FastAPI, File, UploadFile, BackgroundTasks, Depends, HTTPException,status,Query
from fastapi.responses import FileResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.security import HTTPBearer,OAuth2AuthorizationCodeBearer,HTTPBasicCredentials
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from dotenv import load_dotenv
from typing import List,Optional
import os
import sys
from services.serveUploadedFiles import handle_upload_image_file, handle_multiple_image_file_uploads, handle_upload_video_file
from services.serveQrcode import handle_qr_code
from services.security.customBearerCheck import validate_token
from services.storage.local import response_image_file
from services.serveDataFromUrl import handle_download_data_from_url, handle_multiple_image_file_downloads
load_dotenv()
app = FastAPI(docs_url=None if os.environ.get('docs_url') == 'None' else '/docs', redoc_url=None if os.environ.get('redoc_url') == 'None' else '/redoc')
# If you want to serve files from local server you need to mount your static file directory
if os.environ.get('PREFERED_STORAGE') == 'local' and 'pytest' not in sys.modules.keys():
app.mount("/static", StaticFiles(directory="static"), name="static")
# If you want cors configuration also possible thanks to fast-api
origins = os.environ.get('CORS_ORIGINS').split(',')
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/", tags=["main"])
def root(
cpu_load: Optional[str] = Query(
False,
description='True/False depending your needs, gets average CPU load value',
regex='^(True|False)$'
),
token: str = Depends(validate_token)):
result = {
"Hello": f"Token is {token}",
}
if cpu_load == 'True':
result['cpu_average_load'] = os.getloadavg()
return result
# File size validates NGINX
@app.post("/image", tags=["image"])
async def upload_image_file(
thumbnail: Optional[str] = Query(
os.environ.get('IMAGE_THUMBNAIL'),
description='True/False depending your needs',
regex='^(True|False)$'
),
file: UploadFile = File(...),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_upload_image_file(True if thumbnail == 'True' else False, file)
@app.post("/images", tags=["image"])
async def upload_image_files(
thumbnail: Optional[str] = Query(
os.environ.get('IMAGE_THUMBNAIL'),
description='True/False depending your needs',
regex='^(True|False)$'
),
files: List[UploadFile] = File(...),
OAuth2AuthorizationCodeBearer = Depends(validate_token)
):
fileAmount = len(files)
if fileAmount > int(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT')):
raise HTTPException(
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
detail='Amount of files must not be more than {}'.format(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT'))
)
return handle_multiple_image_file_uploads(files, fileAmount, True if thumbnail == 'True' else False)
@app.get("/image", tags=["image"])
async def get_image(
image: str = Query(...,
description='uploaded image name',
max_length=50
),
image_type: str = Query(
...,
description='Should provide verision of image you want from localStorage original, thumbnail or qrImage',
regex='^(original|thumbnail|qrImage)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)
):
return response_image_file(image, image_type)
@app.post("/qrImage", tags=["image"])
async def text_to_generate_qr_image(
qr_text: str = Query(
...,
description='Provide text to generate qr image',
),
with_logo: Optional[str] = Query(
os.environ.get('QR_IMAGE_WITH_LOGO'),
description='True/False depending your needs default is {}'.format(os.environ.get('QR_IMAGE_WITH_LOGO')),
regex='^(True|False)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_qr_code(qr_text, True if with_logo == 'True' else False)
@app.post("/video", tags=["video"])
async def upload_video_file(
optimize: Optional[str] = Query(
os.environ.get('VIDEO_OPTIMIZE'),
description='True/False depending your needs default is {}'.format(os.environ.get('VIDEO_OPTIMIZE')),
regex='^(True|False)$'
),
file: UploadFile = File(..., description='Allows mov, mp4, m4a, 3gp, 3g2, mj2'),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_upload_video_file(True if optimize == 'True' else False, file)
@app.get("/imageUrl", tags=["from url"])
async def image_from_url(
image_url: str = Query(
None,
description = "Pass valid image url to upload",
min_length = 5
),
thumbnail: Optional[str] = Query(
os.environ.get('IMAGE_THUMBNAIL'),
description='True/False depending your needs',
regex='^(True|False)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_download_data_from_url(image_url, True if thumbnail == 'True' else False, file_type='image')
@app.get("/imageUrls", tags=["from url"])
async def images_from_urls(
image_urls: List[str] = Query(
None,
description = "Pass valid image urls to upload",
min_length = 5
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
fileAmount = len(image_urls)
if fileAmount > int(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT')):
raise HTTPException(
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
detail='Amount of files must not be more than {}'.format(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT'))
)
return handle_multiple_image_file_downloads(image_urls, fileAmount)
@app.get("/videoUrl", tags=["from url"])
async def video_from_url(
video_url: str = Query(
None,
description = "Pass valid video url to upload",
min_length = 5
),
optimize: Optional[str] = Query(
os.environ.get('VIDEO_OPTIMIZE'),
description='True/False depending your needs default is {}'.format(os.environ.get('VIDEO_OPTIMIZE')),
regex='^(True|False)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_download_data_from_url(video_url, False, True if optimize == 'True' else False, file_type='video') | 37.573864 | 152 | 0.692122 | 804 | 6,613 | 5.508706 | 0.210199 | 0.034545 | 0.04606 | 0.088508 | 0.575977 | 0.504629 | 0.415218 | 0.299616 | 0.299616 | 0.282682 | 0 | 0.004846 | 0.188719 | 6,613 | 176 | 153 | 37.573864 | 0.82069 | 0.027068 | 0 | 0.425676 | 0 | 0 | 0.200902 | 0.020837 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006757 | false | 0.02027 | 0.101351 | 0 | 0.168919 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
712c549f586b26511dd3c9bf33e7238504d23130 | 3,570 | py | Python | src/modules/Load.py | aaanh/duplicated_accelcamp | 7d4b60ace023bede907f8ed367ba492731a1951d | [
"FTL",
"CNRI-Python",
"RSA-MD"
] | null | null | null | src/modules/Load.py | aaanh/duplicated_accelcamp | 7d4b60ace023bede907f8ed367ba492731a1951d | [
"FTL",
"CNRI-Python",
"RSA-MD"
] | 2 | 2021-05-21T16:31:41.000Z | 2021-08-25T16:05:48.000Z | src/modules/Load.py | aaanh/duplicated_accelcamp | 7d4b60ace023bede907f8ed367ba492731a1951d | [
"FTL",
"CNRI-Python",
"RSA-MD"
] | null | null | null | from modules.LoadAccel import *
from modules.LoadOmega import *
import os
from tkinter import *
defaultdir = "../data"
def LoadDataSet(dirpath=None):
if(dirpath==None):
root = Tk()
root.withdraw()
dirpath = filedialog.askdirectory(parent=root,initialdir=defaultdir,title='Please select a dataset')
files = os.listdir(dirpath)
print("-------Found "+str(len(files))+ " files-------")
for i in files:
print("Found: "+i)
print("----------------------------")
i = 1
runs_files = []
while(True):
run = list(filter(lambda x: x == "run"+str(i), files))
if(run != []):
runs_files += run
else:
break
i+=1
print("Found "+str(len(runs_files))+" runs")
runs_data = []
for run in runs_files:
print("\n\n-----------------"+run+"-----------------")
runs_data.append(LoadRun(dirpath+"/"+run+"/"))
return runs_data
# load a single AccelData object and RotaryData object
# simpler front-end for LoadRun()
def LoadSingleRun( dirpath=None):
run = LoadRun(dirpath)
return { "accel": run["accel"][0], "omega": run["omega"][0]}
# deprecated:
def LoadRun(dirpath=None):
return LoadMultiRun(dirpath)
# Load multiple runs as a list of AccelData objects and list of RotaryData objects
def LoadMultiRun(dirpath=None):
if(dirpath==None):
root = Tk()
root.withdraw()
dirpath = filedialog.askdirectory(parent=root,initialdir=defaultdir,title='Please select a run')
found_files = os.listdir(dirpath)
print("-------Found "+str(len(found_files))+ " files-------")
for i in found_files:
print("Found: "+i)
print("The Following Files Will be Ignored:")
not_file = list(filter(lambda x: ((x.split(".")[type_index]!="accel" and
x.split(".")[type_index]!="omega") or
x.split(".")[-1].lower()!="csv" or
len(x.split(".")) != 4
),
found_files))
for i in not_file:
print("- "+i+("(Wrong File Structure)" if len(i.split(".")) != 4
else "(Wrong File Format)" if i.split(".")[-1].lower()!="csv"
else "(Unsupported Type)" if i.split(".")[type_index]!="accel" and i.split(".")[type_index]!="omega"
else ""
))
if(not_file == []):
print("--None--")
print("----------------------------")
files = list(filter(lambda x: not_file.count(x) == 0,
found_files))
accels_files = list(filter(lambda x: x.split(".")[type_index]=="accel", files))
accels_data = []
for file in accels_files:
print("processing "+file+"...")
data = LoadAccelFile(dirpath+"/"+file)
if(data != "Model is not currently supported"):
accels_data.append(data)
else:
print("Failed to Load: "+file+" (Model not supported)")
omega_files = list(filter(lambda x: x.split(".")[type_index]=="omega", files))
omega_data = []
for file in omega_files:
print("processing "+file+"...")
omega_data.append(Load_Omega(filepath=str(dirpath+"/"+file)))
if accels_data == [] and omega_data == []:
raise FileNotFoundError('No files were found.')
return {"accel": accels_data, "omega": omega_data}
| 35.346535 | 123 | 0.519048 | 394 | 3,570 | 4.614213 | 0.258883 | 0.036304 | 0.046205 | 0.046755 | 0.325633 | 0.235424 | 0.235424 | 0.235424 | 0.194719 | 0.130913 | 0 | 0.003594 | 0.298599 | 3,570 | 100 | 124 | 35.7 | 0.722444 | 0.04958 | 0 | 0.202532 | 0 | 0 | 0.159976 | 0.023418 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0 | 0.050633 | 0.012658 | 0.151899 | 0.177215 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
712ffc09478a5f0361603a065889b6ec9109be8d | 1,300 | py | Python | restfulpy/tests/test_jwt_cli.py | mehdishirazi/restfulpy | 244a53a8ea4692a37b4db82b6cb5ef83c27f0b53 | [
"MIT"
] | null | null | null | restfulpy/tests/test_jwt_cli.py | mehdishirazi/restfulpy | 244a53a8ea4692a37b4db82b6cb5ef83c27f0b53 | [
"MIT"
] | null | null | null | restfulpy/tests/test_jwt_cli.py | mehdishirazi/restfulpy | 244a53a8ea4692a37b4db82b6cb5ef83c27f0b53 | [
"MIT"
] | null | null | null | import json
import pytest
from bddcli import Given, given, when, stdout, stderr, Application
from itsdangerous import TimedJSONWebSignatureSerializer
from itsdangerous.exc import SignatureExpired
from nanohttp import settings
from restfulpy import Application as RestfulpyApplication
foo = RestfulpyApplication(name='jwt')
foo.__configuration__ = ''
def foo_main():
return foo.cli_main()
app = Application('foo', 'restfulpy.tests.test_jwt_cli:foo_main')
def test_jwt():
foo.configure(force=True)
pirincipal = TimedJSONWebSignatureSerializer(
settings.jwt.secret,
algorithm_name=settings.jwt.algorithm
)
with Given(app, ['jwt', 'create']):
assert stderr == ''
token = f'{stdout}'[:-1]
assert pirincipal.loads(token) == {}
# Create a jwt token with a payload
payload = dict(a=1)
when(given + f'\'{json.dumps(payload)}\'')
assert stderr == ''
token = f'{stdout}'[:-1]
assert pirincipal.loads(token) == payload
# Create a expired token
when(given + '-e -1')
assert stderr == ''
token = f'{stdout}'[:-1]
with pytest.raises(SignatureExpired):
pirincipal.loads(token)
if __name__ == '__main__':
foo.cli_main(['jwt', 'create'])
| 24.528302 | 66 | 0.646923 | 144 | 1,300 | 5.701389 | 0.354167 | 0.043849 | 0.062119 | 0.065773 | 0.154689 | 0.154689 | 0.124239 | 0.124239 | 0.124239 | 0.124239 | 0 | 0.004995 | 0.23 | 1,300 | 52 | 67 | 25 | 0.815185 | 0.043077 | 0 | 0.176471 | 0 | 0 | 0.079839 | 0.029839 | 0 | 0 | 0 | 0 | 0.147059 | 1 | 0.058824 | false | 0 | 0.205882 | 0.029412 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
71313701f0c786d0a59625fef35a307d370ccdba | 913 | py | Python | Challenges/16/tree_max/tree_max.py | makkahwi/data-structures-and-algorithms | 06551786258bb7dabb9b0ab07c0f80ff78abca41 | [
"MIT"
] | null | null | null | Challenges/16/tree_max/tree_max.py | makkahwi/data-structures-and-algorithms | 06551786258bb7dabb9b0ab07c0f80ff78abca41 | [
"MIT"
] | null | null | null | Challenges/16/tree_max/tree_max.py | makkahwi/data-structures-and-algorithms | 06551786258bb7dabb9b0ab07c0f80ff78abca41 | [
"MIT"
] | null | null | null | class BinaryNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinaryTree:
def __init__(self):
self.root = None
def tree_max(self):
"""
To find the maximum node value
Input:
None
Output:
Return maximum value
"""
if self.root == None:
raise Exception("Empty Tree")
elif self.root.left == None and self.root.right == None:
return self.root.value
max = self.root.value
def search(current):
nonlocal max
if current.value > max:
max = current.value
if current.left:
search(current.left)
if current.right:
search(current.right)
search(self.root)
return max
if __name__ == "__main__":
pass
| 18.26 | 64 | 0.511501 | 98 | 913 | 4.591837 | 0.336735 | 0.124444 | 0.048889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.405257 | 913 | 49 | 65 | 18.632653 | 0.828729 | 0.079956 | 0 | 0 | 0 | 0 | 0.022959 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0.038462 | 0 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
713280b49e1a2690e858ead874212334f33b4458 | 6,928 | py | Python | sahyun_bot/elastic_settings.py | TheGoodlike13/sahyun-bot | 8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb | [
"MIT"
] | 1 | 2022-02-21T18:55:34.000Z | 2022-02-21T18:55:34.000Z | sahyun_bot/elastic_settings.py | TheGoodlike13/sahyun-bot | 8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb | [
"MIT"
] | null | null | null | sahyun_bot/elastic_settings.py | TheGoodlike13/sahyun-bot | 8ebc3d4e58a0acf9bde3c9ea8339145abcc53fcb | [
"MIT"
] | null | null | null | """
Initializes settings for elastic.py.
To make the index dynamic (which also allows to switch it out for tests), the value must be explicitly initialized
by some other module. If this does not happen, and somebody attempts to load elastic.py, 'ready_or_die' will get
executed which will shut down the application, thus preventing any shenanigans with the wrong parameters being used.
At least in normal circumstances :)
"""
from datetime import timezone, datetime
from typing import Optional, List, Union
from elasticsearch_dsl import Document, Date, integer_types, ValidationException, Search
from elasticsearch_dsl.query import Query
from sahyun_bot.the_danger_zone import nuke_from_orbit
from sahyun_bot.utils import NON_EXISTENT
from sahyun_bot.utils_settings import read_config, parse_bool, parse_list
DEFAULT_HOST = 'localhost'
DEFAULT_CUSTOMSFORGE_INDEX = 'cdlcs'
DEFAULT_USER_INDEX = 'users'
DEFAULT_FUZZINESS = 'auto:5,11'
DEFAULT_SHINGLE_CEILING = 3
DEFAULT_PLATFORMS = ['pc']
DEFAULT_PARTS = ['lead', 'rhythm']
DEFAULT_OFFICIAL = False
TEST_CUSTOMSFORGE_INDEX = DEFAULT_CUSTOMSFORGE_INDEX + '_test'
TEST_USER_INDEX = DEFAULT_USER_INDEX + '_test'
TEST_ONLY_VALUES = frozenset([
TEST_CUSTOMSFORGE_INDEX,
TEST_USER_INDEX,
])
e_host = NON_EXISTENT
e_cf_index = NON_EXISTENT
e_rank_index = NON_EXISTENT
e_fuzzy = NON_EXISTENT
e_shingle = NON_EXISTENT
e_explain = NON_EXISTENT
e_refresh = False
e_platforms = NON_EXISTENT
e_parts = NON_EXISTENT
e_allow_official = NON_EXISTENT
def important_values() -> List:
return [e_cf_index, e_rank_index]
def ready_or_die():
"""
Immediately shuts down the application if the module is not properly configured.
Make the call immediately after imports in every module that depends on this configuration to be loaded.
"""
if NON_EXISTENT in important_values():
nuke_from_orbit('programming error - elastic module imported before elastic_settings is ready!')
def init():
global e_host
global e_cf_index
global e_rank_index
global e_fuzzy
global e_shingle
global e_explain
global e_platforms
global e_parts
global e_allow_official
e_host = read_config('elastic', 'Host', fallback=DEFAULT_HOST)
e_cf_index = read_config('elastic', 'CustomsforgeIndex', fallback=DEFAULT_CUSTOMSFORGE_INDEX)
e_rank_index = read_config('elastic', 'RankIndex', fallback=DEFAULT_USER_INDEX)
e_fuzzy = read_config('elastic', 'Fuzziness', fallback=DEFAULT_FUZZINESS)
e_shingle = read_config('elastic', 'ShingleCeiling', convert=int, fallback=DEFAULT_SHINGLE_CEILING)
e_explain = read_config('elastic', 'Explain', convert=parse_bool, fallback=False)
# noinspection PyTypeChecker
e_platforms = read_config('elastic', 'Platforms', convert=parse_list, fallback=DEFAULT_PLATFORMS)
# noinspection PyTypeChecker
e_parts = read_config('elastic', 'Parts', convert=parse_list, fallback=DEFAULT_PARTS)
e_allow_official = read_config('elastic', 'RandomOfficial', convert=parse_bool, fallback=DEFAULT_OFFICIAL)
e_shingle = max(2, e_shingle)
for value in important_values():
if value in TEST_ONLY_VALUES:
nuke_from_orbit('configuration error - cannot use TEST values for REAL initialization')
def init_test():
global e_host
global e_cf_index
global e_rank_index
global e_fuzzy
global e_shingle
global e_explain
global e_refresh
global e_platforms
global e_parts
global e_allow_official
e_host = DEFAULT_HOST
e_cf_index = TEST_CUSTOMSFORGE_INDEX
e_rank_index = TEST_USER_INDEX
e_fuzzy = DEFAULT_FUZZINESS
e_shingle = DEFAULT_SHINGLE_CEILING
e_explain = True
e_refresh = True
e_platforms = DEFAULT_PLATFORMS
e_parts = DEFAULT_PARTS
e_allow_official = DEFAULT_OFFICIAL
RANDOM_SORT = {
'_script': {
'script': 'Math.random()',
'type': 'number',
},
}
class BaseDoc(Document):
@classmethod
def index_name(cls) -> Optional[str]:
return cls._index._name if cls._index else None
@classmethod
def mapping(cls) -> Optional[dict]:
return cls._doc_type.mapping.to_dict()
@classmethod
def search(cls, **kwargs) -> Search:
return super().search(**kwargs).extra(explain=e_explain)
@classmethod
def as_lucine(cls, query: Union[Query, dict], **kwargs) -> str:
"""
:returns given query as it will be interpreted by the index of this document in Lucine format
"""
kwargs['explain'] = True
kwargs['rewrite'] = True
es = cls._get_connection()
body = query if isinstance(query, dict) else {'query': query.to_dict()}
result = es.indices.validate_query(body, cls._default_index(), **kwargs)
if 'error' in result:
raise ValueError(result['error'])
return result['explanations'][0]['explanation']
def explain(self, query: Query, **kwargs) -> dict:
"""
:returns lucine query, whether it matches this document & basic explanation why or why not
"""
es = self._get_connection()
body = {'query': query.to_dict()}
response = es.explain(self._get_index(), self.meta.id, body=body, **kwargs)
return {
'search': self.as_lucine(body),
'match': response['matched'],
'reason': response['explanation'],
}
def terms(self, *fields: str, **kwargs) -> dict:
"""
:returns for every field, the terms that have been analyzed for this particular document
"""
vectors = self.term_vectors(*fields, **kwargs)
return {field_name: list(data['terms'].keys()) for field_name, data in vectors.items()}
def term_vectors(self, *fields: str, **kwargs) -> dict:
"""
:returns for every field, information about the terms that have been analyzed for this particular document
"""
es = self._get_connection()
response = es.termvectors(index=self._get_index(), id=self.meta.id, fields=fields, **kwargs)
return response['term_vectors']
def delete(self, **kwargs):
kwargs.setdefault('refresh', e_refresh)
super().delete(**kwargs)
def update(self, **kwargs):
kwargs.setdefault('refresh', e_refresh)
return super().update(**kwargs)
def save(self, **kwargs):
kwargs.setdefault('refresh', e_refresh)
return super().save(**kwargs)
class EpochSecond(Date):
def __init__(self, *args, **kwargs):
kwargs.pop('default_timezone', None)
kwargs['format'] = 'epoch_second'
super().__init__(default_timezone=timezone.utc, *args, **kwargs)
def _deserialize(self, data):
if not isinstance(data, integer_types):
raise ValidationException(f'Could not parse epoch second from the value <{data}>')
return datetime.fromtimestamp(data, tz=timezone.utc)
| 32.990476 | 116 | 0.69948 | 887 | 6,928 | 5.21646 | 0.270575 | 0.028744 | 0.033067 | 0.009726 | 0.194943 | 0.132267 | 0.132267 | 0.123406 | 0.123406 | 0.082343 | 0 | 0.001088 | 0.203811 | 6,928 | 209 | 117 | 33.148325 | 0.837745 | 0.15026 | 0 | 0.192857 | 0 | 0 | 0.103693 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.078571 | 0.028571 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
713385c102d9118616dbff568943593032126378 | 13,816 | py | Python | ipec/ga/population.py | wwwbbb8510/ippso | fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5 | [
"MIT"
] | 9 | 2018-05-10T01:04:34.000Z | 2019-06-28T07:47:37.000Z | ipec/ga/population.py | wwwbbb8510/ippso | fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5 | [
"MIT"
] | null | null | null | ipec/ga/population.py | wwwbbb8510/ippso | fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5 | [
"MIT"
] | 2 | 2020-10-12T03:54:30.000Z | 2021-09-08T14:10:21.000Z | import copy
import logging
import numpy as np
from ipec.cnn.evaluator import Evaluator, CNNEvaluator, initialise_cnn_evaluator
from ipec.cnn.layers import ConvLayer
from ipec.cnn.layers import DisabledLayer
from ipec.cnn.layers import FullyConnectedLayer
from ipec.cnn.layers import PoolingLayer
from ipec.ip.decoder import Decoder
from .chromosome import Chromosome, CNNChromosome
POPULATION_DEFAULT_PARAMS = {
'pop_size': 3, #50,
'chromosome_length': 5, #15,
'max_full': 2, #5,
'elitism_rate': 0.5,
'mutation_rate': np.asarray([0.1, 0.2]),
'layers': {
'conv': ConvLayer(),
'pooling': PoolingLayer(),
'full': FullyConnectedLayer(),
'disabled': DisabledLayer()
},
'max_generation': 3, #50
}
def initialise_cnn_population(pop_size=None, chromosome_length=None, max_fully_connected_length=None, elitism_rate=None, mutation_rate=None, layers=None, evaluator=None, max_generation=None):
"""
initialise a cnn population
:param pop_size: population size
:type pop_size: int
:param chromosome_length: the length/dimension of the chromosome
:type chromosome_length: int
:param max_fully_connected_length: the max length of fully-connected layers
:type max_fully_connected_length: int
:param elitism_rate: elitism rate
:type elitism_rate: float
:param mutation_rate: mutation rate. [mutation rate for interfaces in a chromosome, mutation rate for bits in an interface]
:type mutation_rate: numpy.array
:param layers: a dict of (layer_name, layer) pairs; keys: conv, pooling, full, disabled
:type layers: dict
:param max_generation: max DE generation
:type max_generation: int
:return: a cnn population
:rtype: CNNPopulation
"""
if pop_size is None:
pop_size = POPULATION_DEFAULT_PARAMS['pop_size']
if chromosome_length is None:
chromosome_length = POPULATION_DEFAULT_PARAMS['chromosome_length']
if max_fully_connected_length is None:
max_fully_connected_length = POPULATION_DEFAULT_PARAMS['max_full']
if mutation_rate is None:
mutation_rate = POPULATION_DEFAULT_PARAMS['mutation_rate']
if elitism_rate is None:
elitism_rate = POPULATION_DEFAULT_PARAMS['elitism_rate']
if max_generation is None:
max_generation = POPULATION_DEFAULT_PARAMS['max_generation']
if layers is None:
layers = POPULATION_DEFAULT_PARAMS['layers']
logging.info('===initialise the PSO population with the following parameters===')
logging.info('population size: %d, chromosome length: %d, max fully-connected length: %d, max generation: %d', pop_size, chromosome_length, max_fully_connected_length, max_generation)
return CNNPopulation(pop_size, chromosome_length, max_fully_connected_length, elitism_rate, mutation_rate, layers, evaluator, max_generation).initialise()
class Population:
"""
Population class
"""
def __init__(self, pop_size, chromosome_length, elitism_rate, mutation_rate, layers, evaluator=None, max_generation=None):
"""
constructor
:param pop_size: population size
:type pop_size: int
:param chromosome_length: the length/dimension of the chromosome
:type chromosome_length: int
:param elitism_rate: elitism rate
:type elitism_rate: float
:param mutation_rate: mutation rate. [mutation rate for interfaces in a chromosome, mutation rate for bits in an interface]
:type mutation_rate: numpy.array
:param layers: a dict of (layer_name, layer) pairs; keys: conv, pooling, full, disabled
:type layers: dict
:param evaluator: evaluator to calculate the fitness
:type evaluator: Evaluator
:param max_generation: max generation
:type max_generation: int
"""
self.pop_size = pop_size
self.pop = np.empty(pop_size, dtype=Chromosome)
self.chromosome_length = chromosome_length
self.elitism_rate = elitism_rate
self.mutation_rate = mutation_rate
self.layers = layers
self.max_generation = max_generation if max_generation > 0 else POPULATION_DEFAULT_PARAMS['max_generation']
self.evaluator = evaluator
self.decoder = Decoder()
self.best_chromosome = None
self.roulette_proportions = None
def evolve(self):
"""
evolve the population
"""
for g in range(self.max_generation):
logging.info('===start updating population at step-%d===', g)
# evaluate the first generation as the chromosomes are not evaluated during initialisation
if g == 0:
for chromosome in self.pop:
eval_result = self.evaluator.eval(chromosome)
# use minus standard deviation which is the less the better
# use minus number of connections which is the less the better
chromosome.fitness = (eval_result[0], -eval_result[1], -eval_result[2])
# generate new pop
new_pop = np.empty(self.pop_size, dtype=Chromosome)
new_pop_index = 0
# add elite chromosomes in the new generation
elite_chromosomes = self.elitism()
if elite_chromosomes is not None:
for chromosome in elite_chromosomes:
new_chromosome = copy.deepcopy(chromosome)
new_chromosome.id = new_pop_index
new_pop[new_pop_index] = new_chromosome
new_pop_index = new_pop_index + 1
# generate children (after doing selection, crossover, mutation) in the population
while new_pop_index < self.pop_size:
chromosome_1, chromosome_2 = self.select()
candidate_chromosome = self.crossover(chromosome_1, chromosome_2)
candidate_chromosome = self.mutate(candidate_chromosome)
candidate_chromosome.id = new_pop_index
eval_result = self.evaluator.eval(chromosome)
# use minus standard deviation which is the less the better
# use minus number of connections which is the less the better
chromosome.fitness = (eval_result[0], -eval_result[1], -eval_result[2])
# update best chromosome
if self.best_chromosome is None:
self.best_chromosome = copy.deepcopy(self.pop[new_pop_index])
elif self.best_chromosome.compare_with(self.pop[new_pop_index]) < 0:
self.best_chromosome = copy.deepcopy(self.pop[new_pop_index])
logging.info('===fitness of Chromosome-%d at generation-%d: %s===', new_pop_index, g, str(self.pop[new_pop_index].fitness))
new_pop[new_pop_index] = candidate_chromosome
new_pop_index = new_pop_index + 1
logging.info('===fitness of best chromosome at generation-%d: %s===', g, str(self.best_chromosome.fitness))
logging.info('===finish updating population at generation-%d===', g)
return self.best_chromosome
def elitism(self):
"""
GA elitism
:return: elitism array of chromosome
:type: numpy.array
"""
elitism_pop = None
elitism_amount = int(self.elitism_rate * self.pop_size)
if elitism_amount > 0:
# construct a sortable array
dtype = [('chromosome', Chromosome), ('s_0', float), ('s_1', float), ('s_2', float)]
sortable_pop = np.empty(self.pop_size, dtype=dtype)
for i in range(self.pop_size):
fitness = self.pop[i].fitness
sortable_pop[i] = (self.pop[i], fitness[0], fitness[1], fitness[2])
sorted_pop = np.sort(sortable_pop, order=['s_0', 's_1', 's_2'])
elitism_pop = np.empty(elitism_amount, dtype=Chromosome)
for i in range(self.pop_size-elitism_amount, self.pop_size):
elitism_pop[i-(self.pop_size-elitism_amount)] = sorted_pop[i][0]
return elitism_pop
def select(self):
"""
select two chromosomes for crossover and mutation
:return: two unique chromosomes
:rtype: tuple
"""
# roulette-select chromosome_1
c1_index = self.spin_roulette()
chromosome_1 = self.pop[c1_index]
# roulette-select chromosome_2
c2_index = c1_index
while c1_index == c2_index:
c2_index = self.spin_roulette()
chromosome_2 = self.pop[c2_index]
return (chromosome_1, chromosome_2)
def spin_roulette(self):
if self.roulette_proportions is None:
self.roulette_proportions = self.calculate_roulette_proportions()
prob = np.random.uniform(0, 1)
roulette_index = self.pop_size - 1
for i in range(self.roulette_proportions.shape[0]):
if prob < self.roulette_proportions[i]:
roulette_index = i
break
return roulette_index
def calculate_roulette_proportions(self):
"""
calculate roulette proportions for selection
:return:
"""
# calculate the accumulated fitness
accumulated_fitness = 0
for chromosome in self.pop:
accumulated_fitness += chromosome.fitness[0]
# calculate the proportion
previous_roulette_point = 0
self.roulette_proportions = np.zeros(29)
for i in range(self.pop_size-1):
new_roulette_point = previous_roulette_point + self.pop[i].fitness[0]/accumulated_fitness
self.roulette_proportions[i] = new_roulette_point
previous_roulette_point = new_roulette_point
return self.roulette_proportions
def crossover(self, chromosome_1, chromosome_2):
"""
crossover
:param chromosome_1: first parent chromosome
:type chromosome_1: Chromosome
:param chromosome_2: second parent chromosome
:type chromosome_2: Chromosome
:return: candidate chromosome
:rtype: Chromosome
"""
candidate_chromosome = copy.deepcopy(chromosome_1)
start_point = np.random.randint(0, self.chromosome_length)
mutation_length = np.random.randint(1, self.chromosome_length - start_point+1)
for i in range(start_point, start_point+mutation_length):
candidate_chromosome.x[i] = chromosome_2.x[i]
return candidate_chromosome
def mutate(self, candidate_chromosome):
"""
mutation
:param candidate_chromosome: candidate chromosome after crossover
:type candidate_chromosome: Chromosome
:return: candidate chromosome
:rtype: Chromosome
"""
for i in range(self.chromosome_length):
interface = candidate_chromosome.x[i]
rand = np.random.uniform(0, 1)
# check whether to mutate the interface
if rand < self.mutation_rate[0]:
bin_ip_list = list(interface.ip.bin_ip)
bin_ip_length = len(bin_ip_list)
field_length = interface.ip_structure.fields_length
# mutate fields of a specific layer type instead of the entire IP
for j in range(bin_ip_length - field_length, bin_ip_length):
# check whether to mutate the bit
rand = np.random.uniform(0, 1)
if rand < self.mutation_rate[1]:
bin_ip_list[j] = '0' if bin_ip_list[j] == '1' else '1'
candidate_chromosome.x[i].update_ip_by_binary_string(''.join(bin_ip_list))
if self.layers is not None:
candidate_chromosome.x[i].update_subnet_and_structure(self.layers)
else:
continue
# fix invalid interface after crossover
candidate_chromosome.fix_invalid_interface()
return candidate_chromosome
class CNNPopulation(Population):
"""
CNNPopulation class
"""
def __init__(self, pop_size, chromosome_length, max_fully_connected_length, elitism_rate, mutation_rate, layers, evaluator=None, max_generation=None):
"""
constructor
:param pop_size: population size
:type pop_size: int
:param chromosome_length: the length/dimension of the chromosome
:type chromosome_length: int
:param max_fully_connected_length: the max length of fully-connected layers
:type max_fully_connected_length: int
:param f: F value in the update equation at the mutation step
:type f: float
:param cr: crossover rate at the mutation step
:type cr: float
:param layers: a dict of (layer_name, layer) pairs; keys: conv, pooling, full, disabled
:type layers: dict
:param evaluator: evaluator to calculate the fitness
:type evaluator: CNNEvaluator
:param max_generation: max generation
:type max_generation: int
"""
self.max_fully_connected_length = max_fully_connected_length
super(CNNPopulation, self).__init__(pop_size, chromosome_length, elitism_rate, mutation_rate, layers, evaluator, max_generation)
def initialise(self):
"""
initialise the population
"""
# set default evaluator
if self.evaluator is None:
self.evaluator = initialise_cnn_evaluator()
logging.info('===start initialising population')
for i in range(self.pop_size):
chromosome = CNNChromosome(i, self.chromosome_length, self.max_fully_connected_length, self.layers).initialise()
self.pop[i] = chromosome
logging.info('===finish initialising population')
return self
| 43.040498 | 191 | 0.652287 | 1,653 | 13,816 | 5.22686 | 0.120992 | 0.024306 | 0.019097 | 0.037269 | 0.434954 | 0.331713 | 0.28669 | 0.270486 | 0.253819 | 0.25081 | 0 | 0.008796 | 0.267661 | 13,816 | 320 | 192 | 43.175 | 0.845128 | 0.256659 | 0 | 0.094118 | 0 | 0.005882 | 0.067189 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064706 | false | 0 | 0.058824 | 0 | 0.188235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
71347fdcbbf1d234dd6e12f39abe3f11e92be5a5 | 2,030 | py | Python | migrations/versions/8a480de4de4c_adjusts_for_seed_development.py | eubr-bigsea/limonero | 54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5 | [
"Apache-2.0"
] | 1 | 2018-01-01T20:35:43.000Z | 2018-01-01T20:35:43.000Z | migrations/versions/8a480de4de4c_adjusts_for_seed_development.py | eubr-bigsea/limonero | 54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5 | [
"Apache-2.0"
] | 37 | 2017-02-24T17:07:25.000Z | 2021-09-02T14:49:19.000Z | migrations/versions/8a480de4de4c_adjusts_for_seed_development.py | eubr-bigsea/limonero | 54851b73bb1e4f5626b3d38ea7eeb50f3ed2e3c5 | [
"Apache-2.0"
] | 2 | 2019-11-05T13:45:45.000Z | 2020-11-13T22:02:37.000Z | """Adjusts for Seed development
Revision ID: 8a480de4de4c
Revises: 7addb7587b1a
Create Date: 2021-07-13 17:16:20.807567
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from limonero.migration_utils import (is_mysql, is_psql, upgrade_actions,
downgrade_actions, get_psql_enum_alter_commands, is_sqlite)
# revision identifiers, used by Alembic.
revision = '8a480de4de4c'
down_revision = '7addb7587b1a'
branch_labels = None
depends_on = None
def upgrade():
if is_mysql():
op.execute("""
ALTER TABLE `storage` CHANGE `type` `type` ENUM(
'CASSANDRA','ELASTIC_SEARCH','HDFS','HIVE', 'HIVE_WAREHOUSE',
'JDBC', 'KAFKA', 'LOCAL','MONGODB'
) CHARSET utf8 COLLATE
utf8_unicode_ci NOT NULL;""")
elif is_psql():
storage_values = ['CASSANDRA','ELASTIC_SEARCH','HDFS',
'HIVE', 'HIVE_WAREHOUSE', 'JDBC', 'KAFKA', 'LOCAL','MONGODB']
all_commands = [
[
get_psql_enum_alter_commands(['storage'], ['type'],
'StorageTypeEnumType', storage_values, 'HDFS'),
None
]
]
upgrade_actions(all_commands)
# ### end Alembic commands ###
def downgrade():
if is_mysql():
op.execute("""
ALTER TABLE `storage` CHANGE `type` `type` ENUM(
'CASSANDRA','ELASTIC_SEARCH','HDFS','HIVE', 'HIVE_WAREHOUSE',
'KAFKA', 'JDBC','LOCAL','MONGODB'
) CHARSET utf8 COLLATE
utf8_unicode_ci NOT NULL;""")
elif is_psql():
storage_values = ['CASSANDRA','ELASTIC_SEARCH','HDFS',
'HIVE', 'HIVE_WAREHOUSE', 'JDBC','LOCAL','MONGODB']
all_commands = [
[
None,
get_psql_enum_alter_commands(['storage'], ['type'],
'StorageTypeEnumType', storage_values, 'HDFS'),
]
]
downgrade_actions(all_commands)
| 32.222222 | 77 | 0.576355 | 203 | 2,030 | 5.53202 | 0.384236 | 0.05699 | 0.078362 | 0.092609 | 0.555655 | 0.534283 | 0.534283 | 0.534283 | 0.534283 | 0.534283 | 0 | 0.033684 | 0.29803 | 2,030 | 62 | 78 | 32.741935 | 0.754386 | 0.088177 | 0 | 0.468085 | 0 | 0 | 0.415259 | 0.059946 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.085106 | 0 | 0.12766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7139484e64db6cee198f70d7bc368fac65431c29 | 1,313 | py | Python | Problems/199.py | kvedula/leetcode | 8576b1ef466529b9e0d337af78fc833acb686a3c | [
"MIT"
] | null | null | null | Problems/199.py | kvedula/leetcode | 8576b1ef466529b9e0d337af78fc833acb686a3c | [
"MIT"
] | null | null | null | Problems/199.py | kvedula/leetcode | 8576b1ef466529b9e0d337af78fc833acb686a3c | [
"MIT"
] | null | null | null | # Kamesh Vedula
# Problem: Binary Tree Right Side View
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
def rightSideView(self, root: TreeNode) -> List[int]:
if root is None:
return []
# q = []
# q.append(root)
# levelOrder = []
# while q:
# count = len(q)
# level = []
# for i in range(count):
# temp = q.pop(0)
# level.append(temp.val)
# if temp.right:
# q.append(temp.right)
# if temp.left:
# q.append(temp.left)
# levelOrder.append(level)
# rightVals = [lvl[-1] for lvl in levelOrder]
# return rightVals
q = collections.deque()
q.append(root)
levelOrder = []
while q:
count = len(q)
for i in range(count):
temp = q.popleft()
if i == 0:
levelOrder.append(temp.val)
if temp.right:
q.append(temp.right)
if temp.left:
q.append(temp.left)
return levelOrder
| 22.254237 | 55 | 0.450876 | 139 | 1,313 | 4.230216 | 0.330935 | 0.071429 | 0.07483 | 0.071429 | 0.414966 | 0.414966 | 0.414966 | 0.343537 | 0.343537 | 0.221088 | 0 | 0.00545 | 0.440975 | 1,313 | 58 | 56 | 22.637931 | 0.79564 | 0.549886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
713a3074599a837fcc6b69f08f73d38cb5ca45a1 | 182 | py | Python | curso2.py | ralabarta/educationar_python_repo | f89ddb0bb19b039459e42472e0f52f31c69a3853 | [
"MIT"
] | null | null | null | curso2.py | ralabarta/educationar_python_repo | f89ddb0bb19b039459e42472e0f52f31c69a3853 | [
"MIT"
] | null | null | null | curso2.py | ralabarta/educationar_python_repo | f89ddb0bb19b039459e42472e0f52f31c69a3853 | [
"MIT"
] | null | null | null | import statistics
datos = [2,4,6,8]
datos2 = [2, 2, 3, 5, 8, 9]
mean_r = statistics.mean(datos)
median_r = statistics.median(datos2)
print(mean_r)
print(median_r)
| 13 | 37 | 0.637363 | 30 | 182 | 3.733333 | 0.5 | 0.089286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.084507 | 0.21978 | 182 | 13 | 38 | 14 | 0.704225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
713f16f6c1f8f19f1fc82172faed9240a38e2015 | 1,632 | py | Python | examples/custom-validator.py | RyanSquared/gigaspoon | c5bf31fbffa1c7ec8e0c91ef7ae79040d553151a | [
"MIT"
] | 1 | 2018-02-06T16:15:44.000Z | 2018-02-06T16:15:44.000Z | examples/custom-validator.py | RyanSquared/gigaspoon | c5bf31fbffa1c7ec8e0c91ef7ae79040d553151a | [
"MIT"
] | 1 | 2019-10-15T13:57:09.000Z | 2019-10-15T16:08:42.000Z | examples/custom-validator.py | RyanSquared/gigaspoon | c5bf31fbffa1c7ec8e0c91ef7ae79040d553151a | [
"MIT"
] | null | null | null | import os
import flask
import gigaspoon as gs
app = flask.Flask(__name__)
app.secret_key = os.urandom(24)
class CustomSelect(gs.v.Validator):
def __init__(self, name, options):
self.name = name
self._options = set(options)
def __repr__(self):
return "%r %r" % (type(self), self._options)
def populate(self):
return {
"options": self._options,
"name": self.name
}
def validate(self, form, key, value):
if value not in self._options:
self.raise_error(key, value)
html = """
<!DOCTYPE HTML>
{% for message in get_flashed_messages() -%}
<pre>{{ message }}</pre>
{%- endfor %}
<form method="POST">
{% autoescape false %}
{{ g.csrf_token_validator.csrf_tag }}
{% endautoescape %}
<select required name="{{ g.user_validator.name }}">
{% for user in g.user_validator.options -%}
<option value="{{ user }}">{{ user }}</option>
{%- endfor %}
<option value="break!">Bad input!</option>
</select>
<input type="submit" value="submit">
</form>
"""
@app.route("/", methods=["GET", "POST"])
@gs.set_methods("POST")
@gs.validator(CustomSelect("user", ["Fred", "George"]))
@gs.validator(gs.v.CSRF())
@gs.base
def index(form):
if form.is_form_mode():
# Method is POST and form fields are valid
flask.flash(repr(form))
return flask.redirect(flask.url_for('index'))
return flask.render_template_string(html)
@app.errorhandler(gs.e.FormError)
def handle_form_error(exc):
return flask.escape(str(exc)), 400
if __name__ == "__main__":
app.run()
| 23.652174 | 56 | 0.610907 | 206 | 1,632 | 4.640777 | 0.427184 | 0.046025 | 0.029289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003937 | 0.221814 | 1,632 | 68 | 57 | 24 | 0.748819 | 0.02451 | 0 | 0.038462 | 0 | 0 | 0.358491 | 0.061635 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.057692 | 0.057692 | 0.288462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
714198dc8f030861acfb87346bb996cc656b4494 | 2,244 | py | Python | gdk/jetson/tracker.py | dbadrian/gdk_dlrc17 | 7aebed740dc4a09f3549674b0cfeb22bdb392ac6 | [
"MIT"
] | 1 | 2019-03-29T12:36:55.000Z | 2019-03-29T12:36:55.000Z | gdk/jetson/tracker.py | dbadrian/gdk_dlrc17 | 7aebed740dc4a09f3549674b0cfeb22bdb392ac6 | [
"MIT"
] | null | null | null | gdk/jetson/tracker.py | dbadrian/gdk_dlrc17 | 7aebed740dc4a09f3549674b0cfeb22bdb392ac6 | [
"MIT"
] | null | null | null | import time
import sys
import logging
# Import PS-Drone
import cv2
import numpy as np
import gdk.config as config
logger = logging.getLogger(__name__)
class CheckerBoardTracker():
def __init__(self):
self.tracking = False
def update(self, frame):
self.tracking, self.corners = self.__get_corners_from_marker(frame)
if self.tracking:
self.centroid = self.__get_centroid_from_corners()
self.outer_corners = self.__get_main_corners_from_corners()
self.height, self.width = frame.shape[:2]
return self.tracking
def get_centroid_error(self):
if self.tracking:
errx = (self.centroid[0][0] - config.XY_TRACK_POINT[0])#/(config.XY_TRACK_POINT[0])
erry = (self.centroid[0][1] - config.XY_TRACK_POINT[1])#/(config.XY_TRACK_POINT[1])
return errx, erry
def get_distance_error(self):
if self.tracking:
short_1 = np.linalg.norm(self.outer_corners[0]-self.outer_corners[1])
short_2 = np.linalg.norm(self.outer_corners[3]-self.outer_corners[2])
long_1 = np.linalg.norm(self.outer_corners[1]-self.outer_corners[3])
long_2 = np.linalg.norm(self.outer_corners[2]-self.outer_corners[0])
avg_short = (short_1+short_2)/2.0
avg_long = (long_1+long_2)/2.0
dif_short = (
avg_short - config.BEST_DISTANCE[0])/config.BEST_DISTANCE[0]
dif_long = (avg_long - config.BEST_DISTANCE[1])/config.BEST_DISTANCE[1]
return (dif_short+dif_long)/2.0
def __get_main_corners_from_corners(self):
return np.array([self.corners[0][0], self.corners[3][0], self.corners[16][0], self.corners[19][0]])
def __get_centroid_from_corners(self):
return np.sum(self.corners, 0) / float(len(self.corners))
def __get_corners_from_marker(self, frame):
corners = None
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
found, corners = cv2.findChessboardCorners(
gray, config.PATTERN_SIZE, corners, cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_NORMALIZE_IMAGE+cv2.CALIB_CB_FAST_CHECK)
npcorners = np.array(corners)
return found, npcorners
| 34.523077 | 130 | 0.657754 | 312 | 2,244 | 4.435897 | 0.25 | 0.058526 | 0.104046 | 0.052023 | 0.264451 | 0.182081 | 0.083815 | 0 | 0 | 0 | 0 | 0.031142 | 0.227273 | 2,244 | 64 | 131 | 35.0625 | 0.767013 | 0.030749 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155556 | false | 0 | 0.133333 | 0.044444 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7143d82db45d18175969ef941dd86101829ae9a5 | 15,402 | py | Python | Tryp_T.py | johnheap/VAPPER-Galaxy | 4ce903b3b44755198e59368057863a5eb62ff6c6 | [
"Apache-2.0"
] | null | null | null | Tryp_T.py | johnheap/VAPPER-Galaxy | 4ce903b3b44755198e59368057863a5eb62ff6c6 | [
"Apache-2.0"
] | null | null | null | Tryp_T.py | johnheap/VAPPER-Galaxy | 4ce903b3b44755198e59368057863a5eb62ff6c6 | [
"Apache-2.0"
] | null | null | null | """
* Copyright 2018 University of Liverpool
* Author: John Heap, Computational Biology Facility, UoL
* Based on original scripts of Sara Silva Pereira, Institute of Infection and Global Health, UoL
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
"""
import subprocess
import pandas as pd
import re
import os
import sys
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
pList = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11', 'P12', 'P13', 'P14', 'P15']
quietString = "" #"">> Vap_log.txt 2>&1"
def transcriptMapping(inputname, strain, forwardFN,reverseFN):
#where is our Reference data -
dir_path = os.path.dirname(os.path.realpath(__file__))
refName = dir_path+"/data/Reference/Tc148" #default
if strain == "Tc148":
refName = dir_path+"/data/Reference/Tc148"
if strain == "IL3000":
refName = dir_path+"/data/Reference/IL3000"
#argString = "bowtie2 -x Refe4rence/IL3000 -1 data/"+forwardFN+" -2 data/"+reverseFN+" -S "+inputname+".sam" #>log.txt
#argString = "bowtie2 -x Reference/Tc148 -1 data/"+forwardFN+" -2 data/"+reverseFN+" -S "+inputname+".sam" #>log.txt
argString = "bowtie2 -x "+refName+" -1 "+forwardFN+" -2 "+reverseFN+" -S "+inputname+".sam"+quietString #>log.txt
#print(argString)
returncode = subprocess.call(argString, shell=True)
def processSamFiles(inputname):
#debug use a mapping sam file we have already found
#dir_path = os.path.dirname(os.path.realpath(__file__))
#bugName = dir_path+"/data/T_Test" #defasult
cur_path = os.getcwd()
samName = cur_path+"/"+inputname
#argString = "samtools view -bS "+bugName+" > "+inputname+".bam"
argString = "samtools view -bS "+inputname+".sam > "+samName+".bam"+quietString
#print(argString)
returncode = subprocess.call(argString, shell=True)
#argString = "samtools sort "+bugName+" -o "+inputname+".sorted"
argString = "samtools sort "+samName+".bam -o "+samName+".sorted"+quietString
#print("argstring = "+argString)
returncode = subprocess.call(argString, shell=True)
#argString = "samtools index "+bugName+".sorted "+inputname+".sorted.bai"
argString = "samtools index "+samName+".sorted "+samName+".sorted.bai"+quietString
#print("argstring = " + argString)
returncode = subprocess.call(argString, shell=True)
def transcriptAbundance(inputname, strain):
dir_path = os.path.dirname(os.path.realpath(__file__))
refName = dir_path + "/data/Reference/ORFAnnotation.gtf" # defasult
if strain == "Tc148":
refName = dir_path + "/data/Reference/ORFAnnotation.gtf"
if strain == "IL3000":
refName = dir_path + "/data/Reference/IL3000.gtf"
#argString = "cufflinks -G Reference/IL3000.gtf -o "+inputname+".cuff -u -p 8 "+inputname+".sorted"
#argString = "cufflinks -G Reference/ORFAnnotation.gtf -o "+inputname+".cuff -u -p 8 "+inputname+".sorted"
argString = "cufflinks -q -G "+refName+" -o "+inputname+".cuff -u -p 8 "+inputname+".sorted"+quietString
returncode = subprocess.call(argString, shell = True)
def convertToFasta(inputName, strain): #equivalent to Sara's awk scripte
dir_path = os.path.dirname(os.path.realpath(__file__))
refName = dir_path + "/data/Reference/ORFAnnotation.gtf" # default
if strain == "Tc148":
refName = dir_path + "/data/Reference/148_prot.fasta"
if strain == "IL3000":
refName = dir_path + "/data/Reference/IL3000_prot.fasta"
cuff_df = pd.read_csv(inputName+".cuff/genes.fpkm_tracking", sep='\t')
cuff_df = cuff_df[(cuff_df['FPKM'] > 0)]
cuff_df.to_csv("cuffTest.csv")
gene_id_List = cuff_df['gene_id'].tolist()
#print(gene_id_List)
#print ("Found from 8880="+str(found))
# need to load in IL3000_prot.fasta
# for each line with >TcIL3000_1_1940
# search within cuff_df[gene_id] for match
# add it to the outfile. (need to save it as used by hmmer later
number = 0
all = 0
with open(inputName+"_6frame.fas", 'w') as outfile:
ref = open(refName,'r')
#ref = open(r"Reference/IL3000_prot.fasta",'r')
n = 0
line = ref.readline()
while line:
if line[0] == '>':
all = all+1
ln = line[1:] #remove >
ln = ln.rstrip() #remove /n /r etc
#print (ln)
if ln in gene_id_List:
number = number+1
outfile.write(line)
line = ref.readline()
if line:
while line[0] != '>':
outfile.write(line)
line=ref.readline()
if not line:
break;
else:
line = ref.readline()
else:
line =ref.readline()
ref.close()
print(str(len(gene_id_List))+":"+str(number)+" from "+str(all))
return cuff_df
def HMMerMotifSearch(name, strain, cuff_df):
motifs = ['1', '2a', '2b', '3', '4a', '4b', '4c', '5', '6', '7', '8a', '8b', '9a', '9b',
'9c', '10a', '10b', '11a', '11b', '12', '13a', '13b', '13c', '13d', '14', '15a', '15b', '15c']
dir_path = os.path.dirname(os.path.realpath(__file__))
phylopath = dir_path + "/data/Motifs/Phylotype"
lineCounts = []
compoundList = []
for m in motifs:
argString = "hmmsearch "+phylopath + m + ".hmm " + name + "_6frame.fas > Phy" + m + ".out"
print(argString)
subprocess.call(argString, shell=True)
hmmResult = open("Phy" + m + ".out", 'r')
regex = r"Tc148[0-9]{1,8}"
if strain == "Tc148":
regex = r"Tc148[0-9]{1,8}"
if strain == "IL3000":
regex = r"TcIL3000_[0-9]{1,4}_[0-9]{1,5}"
n = 0
outList = []
for line in hmmResult:
m = re.search(regex, line)
if m:
outList.append(""+m.group())
n += 1
if re.search(r"inclusion", line):
print("inclusion threshold reached")
break
compoundList.append(outList)
lineCounts.append(n)
hmmResult.close()
#print(lineCounts)
#print(cuff_df)
concatGroups = [1, 2, 1, 3, 1, 1, 1, 2, 3, 2, 2, 1, 4, 1, 3]
countList = []
weightList = []
countIndex = 0
totalCount = 0
totalWeigth = 0
for c in concatGroups:
a = []
weight = []
for n in range(0, c):
a = a + compoundList.pop(0)
t = set(a)
countList.append(len(t))
wa = 0
for w in t:
wt = cuff_df.loc[cuff_df['gene_id'] == w, 'FPKM'].iloc[0]
#print(w)
#print(wt)
wa = wa+wt
weightList.append(wa)
totalWeigth+=wa
totalCount += len(t)
countList.append(totalCount)
weightList.append(totalWeigth)
#print(countList)
#print("--------")
#print(weightList)
#print("--------")
return countList,weightList
def relativeFrequencyTable(countList, name, htmlresource):
relFreqList = []
c = float(countList[15])
for i in range(0, 15):
relFreqList.append(countList[i] / c)
data = {'Phylotype': pList, 'Relative Frequency': relFreqList}
relFreq_df = pd.DataFrame(data)
j_fname = htmlresource+ "/" + name + "_t_relative_frequency.csv"
relFreq_df.to_csv(j_fname)
return relFreqList # 0-14 = p1-p15 counts [15] = total counts
def weightedFrequencyTable(countList, name, htmlresource):
relFreqList = []
c = float(countList[15])
for i in range(0, 15):
relFreqList.append(countList[i] / c)
data = {'Phylotype': pList, 'Weighted Frequency': relFreqList}
relFreq_df = pd.DataFrame(data)
j_fname = htmlresource+ "/" + name + "_t_weighted_frequency.csv"
relFreq_df.to_csv(j_fname)
return relFreqList # 0-14 = p1-p15 counts [15] = total counts
def createStackedBar(name,freqList,strain,pdf,html_resource):
palette = ["#0000ff", "#6495ed", "#00ffff", "#caff70",
"#228b22", "#528b8b", "#00ff00", "#a52a2a",
"#ff0000", "#ffff00", "#ffa500", "#ff1493",
"#9400d3", "#bebebe", "#000000", "#ff00ff"]
VAP_148 = [0.072, 0.032, 0.032, 0.004, 0.007,
0.005, 0.202, 0.004, 0.006, 0.014,
0.130, 0.133, 0.054, 0.039, 0.265]
VAP_IL3000 = [0.073, 0.040, 0.049, 0.018, 0.060,
0.055, 0.054, 0.025, 0.012, 0.060,
0.142, 0.100, 0.061, 0.078, 0.172]
cmap = plt.cm.get_cmap('tab20')
palette = [cmap(i) for i in range(cmap.N)]
if strain == "Tc148":
VAPtable = VAP_148
VAPname='Tc148\nGenome VAP'
if strain == "IL3000":
VAPtable = VAP_IL3000
VAPname= 'IL3000\nGenome VAP'
width = 0.35 # the width of the bars: can also be len(x) sequence
plots = []
fpos = 0
vpos = 0
for p in range(0, 15):
tp = plt.bar(0, freqList[p], width, color= palette[p], bottom = fpos)
fpos +=freqList[p]
tp = plt.bar(1, VAPtable[p], width, color= palette[p], bottom = vpos)
vpos +=VAPtable[p]
plots.append(tp)
plt.xticks([0,1],[name,VAPname])
plt.legend(plots[::-1],['p15','p14','p13','p12','p11','p10','p9','p8','p7','p6','p5','p4','p3','p2','p1'])
title = "Figure Legend: The transcriptomic Variant Antigen Profile of $\itTrypanosoma$ $\itcongolense$ estimated as phylotype " \
"proportion adjusted for transcript abundance and the reference genomic Variant Antigen Profile. " \
"\nData was produced with the 'Variant Antigen Profiler' (Silva Pereira et al., 2019)."
#plt.title(title, wrap="True")
#plt.text(-0.2, -0.05, title, va="top", transform=ax.transAxes, wrap="True")
plt.text(-0.3, -0.15, title, va="top", wrap="True")
plt.tight_layout(pad=1.5)
plt.subplots_adjust(bottom = 0.3,top=0.99,left=0.125,right=0.9,hspace=0.2,wspace=0.2)
plt.savefig(html_resource + "/stackedbar.png")
if pdf == 'PDF_Yes':
plt.savefig(html_resource + "/stackedbar.pdf")
#plt.show()
def createHTML(name,htmlfn,htmlresource,freqList,weightList):
#assumes imgs are heatmap.png, dheatmap.png, vapPCA.png and already in htmlresource
htmlString = r"<html><title>T.congolense VAP</title><body><div style='text-align:center'><h2><i>Trypanosoma congolense</i> Variant Antigen Profile</h2><h3>"
htmlString += name
htmlString += r"<br>Transcriptomic Analysis</h3></p>"
htmlString += "<p style = 'margin-left:20%; margin-right:20%'>Table Legend: Variant Antigen Profiles of a transcriptome of <i>Trypanosoma congolense</i> estimated as phylotype proportion. " \
"Weighted frequency refers to the phylotype proportion based transcript abundance. " \
"Data was produced with the 'Variant Antigen Profiler' (Silva Pereira et al., 2019).</p> "
htmlString += r"<style> table, th, tr, td {border: 1px solid black; border-collapse: collapse;}</style>"
htmlString += r"<table style='width:50%;margin-left:25%;text-align:center'><tr><th>Phylotype</th><th>Relative Frequency</th><th>Weighted Frequency</th></tr>"
tabString = ""
# flush out table with correct values
for i in range(0, 15):
f = format(freqList[i], '.4f')
w = format(weightList[i], '.4f')
tabString += "<tr><td>phy" + str(i + 1) + "</td><td>" + f + "</td><td>" + w + "</td></tr>"
htmlString += tabString + "</table><br><br><br><br><br>"
htmlString += r"<p> <h3>Stacked Bar chart of Phylotype Frequency</h3> The 'weighted' relative frequency of each phylotype alongside the VAP of selected strain.</p>"
imgString = r"<img src = 'stackedbar.png' alt='Stacked bar chart of phylotype variation' style='max-width:100%'><br><br>"
htmlString += imgString
# htmlString += r"<p><h3>The Deviation Heat Map and Dendogram</h3>The phylotype variation expressed as the deviation from your sample mean compared to the model dataset</p>"
# imgString = r"<img src = 'dheatmap.png' alt='Deviation Heatmap' style='max-width:100%'><br><br>"
# htmlString += imgString
# htmlString += r"<p><h3>The Variation PCA plot</h3>PCA analysis corresponding to absolute variation. Colour coded according to location</p>"
# imgString = r"<img src = 'vapPCA.png' alt='PCA Analysis' style='max-width:100%'><br><br>"
# htmlString += imgString + r"</div></body></html>"
with open(htmlfn, "w") as htmlfile:
htmlfile.write(htmlString)
#argdict = {'name':2, 'pdfexport': 3, 'strain': 4, 'forward': 5, 'reverse': 6, 'html_file': 7, 'html_resource': 8}
def transcriptomicProcess(args,dict):
transcriptMapping(args[dict['name']], args[dict['strain']], args[dict['forward']], args[dict['reverse']]) #uses bowtie
processSamFiles(args[dict['name']]) #uses samtools
transcriptAbundance(args[dict['name']],args[dict['strain']]) #uses cufflinks -> ?.cuff/*.*
cuff_df = convertToFasta(args[dict['name']],args[dict['strain']])
countList, weightList = HMMerMotifSearch(args[dict['name']],args[dict['strain']], cuff_df)
relFreqList = relativeFrequencyTable(countList,args[dict['name']],args[dict['html_resource']])
relWeightList = weightedFrequencyTable(weightList,args[dict['name']],args[dict['html_resource']])
createStackedBar(args[dict['name']],relWeightList, args[dict['strain']],args[dict['pdfexport']],args[dict['html_resource']])
createHTML(args[dict['name']],args[dict['html_file']],args[dict['html_resource']], relFreqList, relWeightList)
if __name__ == "__main__":
#print("Commencing Transcript Mapping")
#transcriptMapping("T_Test", "Transcripts.1","Transcripts.2")
#print("Processimg Sam Files")
#processSamFiles("T_Test")
#print("Assessing Transcript Abundance")
#transcriptAbundance("T_Test")
#print ("Converting to Fasta Subset")
#cuff_df = convertToFasta("T_Test")
#print("Commencing HMMer search")
#countList, weightList = HMMerMotifSearch("T_Test",cuff_df)
#relativeFrequencyTable(countList,'T_Test')
#weightedFrequencyTable(weightList,'T_Test')
relFreqList = [0.111842105,0.059210526,0.026315789,0.013157895,
0.006578947,0.013157895,0.032894737,0.019736842,
0.039473684,0.046052632,0.217105263,0.065789474,
0.151315789,0.059210526,0.138157895]
relWeightList = [0.07532571,0.05900545,0.009601452,0.042357532,0.01236219,0.001675663,0.04109726,
0.097464248,0.057491666,0.05826875,0.279457473,0.070004772,0.065329007,0.085361298,0.045197529]
createStackedBar('T_Test',relWeightList, 'Tc148','PDF_Yes','results')
createHTML("t_test","results/t_test.html","results",relFreqList,relWeightList)
| 44.514451 | 195 | 0.617907 | 1,965 | 15,402 | 4.775573 | 0.280407 | 0.019608 | 0.012894 | 0.017263 | 0.28101 | 0.253197 | 0.231671 | 0.211317 | 0.192349 | 0.145247 | 0 | 0.072558 | 0.224192 | 15,402 | 345 | 196 | 44.643478 | 0.712779 | 0.243085 | 0 | 0.22807 | 0 | 0.026316 | 0.23681 | 0.057343 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.030702 | 0 | 0.092105 | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7144cdbf12d2350acabc972907aa336bd9391ec1 | 442 | py | Python | scale/node/migrations/0003_node_is_paused_errors.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 121 | 2015-11-18T18:15:33.000Z | 2022-03-10T01:55:00.000Z | scale/node/migrations/0003_node_is_paused_errors.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 1,415 | 2015-12-23T23:36:04.000Z | 2022-01-07T14:10:09.000Z | scale/node/migrations/0003_node_is_paused_errors.py | kaydoh/scale | 1b6a3b879ffe83e10d3b9d9074835a4c3bf476ee | [
"Apache-2.0"
] | 66 | 2015-12-03T20:38:56.000Z | 2020-07-27T15:28:11.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('node', '0002_node_pause_reason'),
]
operations = [
migrations.AddField(
model_name='node',
name='is_paused_errors',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| 21.047619 | 53 | 0.606335 | 42 | 442 | 6.095238 | 0.761905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015773 | 0.282805 | 442 | 20 | 54 | 22.1 | 0.791798 | 0.047511 | 0 | 0 | 0 | 0 | 0.109785 | 0.052506 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
852e97bba32d24a91db45aae8147ee20bfee4935 | 3,482 | py | Python | LineDetect/videoLineDet.py | RonellBr/LaneDetection | 349e5c75bee39c3006fcf206565915fe7493e796 | [
"MIT"
] | null | null | null | LineDetect/videoLineDet.py | RonellBr/LaneDetection | 349e5c75bee39c3006fcf206565915fe7493e796 | [
"MIT"
] | null | null | null | LineDetect/videoLineDet.py | RonellBr/LaneDetection | 349e5c75bee39c3006fcf206565915fe7493e796 | [
"MIT"
] | null | null | null | ################################################################
# Author: Ronell Bresler
# Module: VideoLineDetect.py
#
#
# References:
# https://www.analyticsvidhya.com/blog/2020/05/tutorial-real-time-lane-detection-opencv/
# https://towardsdatascience.com/tutorial-build-a-lane-detector-679fd8953132
# https://medium.com/computer-car/udacity-self-driving-car-nanodegree-project-1-finding-lane-lines-9cd6a846c58c
# https://campushippo.com/lessons/detect-highway-lane-lines-with-opencv-and-python-21438a3e2
# https://www.youtube.com/watch?v=G0cHyaP9HaQ
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
################################################################
import cv2
import matplotlib.pyplot as plt
import numpy as np
class Inputfile:
def __init__(self, cap, height, width, frame):
self.cap = cap
self.height = height
self.width = width
self.frame = frame
def main():
inputfile = Inputfile(cv2.VideoCapture('SampleIMG/gmod2.mp4'), 0, 0, 0)
while inputfile.cap.isOpened():
ret, frame = inputfile.cap.read()
inputfile.frame = frame
inputfile.height = inputfile.frame.shape[0]
inputfile.width = inputfile.frame.shape[1]
frame1 = One_frame(inputfile)
cv2.imshow('frame', frame1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
################################################################
def One_frame(inputfile):
region_of_interest_vertices = Set_region_of_interest_vertices(inputfile.height, inputfile.width)
# Canny filter
canny_edges = Canny_edge_detector(inputfile.frame)
# Crop img with roi
cropped_image = Region_of_interest(canny_edges, np.array([region_of_interest_vertices], np.int32), inputfile.height, inputfile.width)
lines = cv2.HoughLinesP(cropped_image,
rho=6,
theta=np.pi/180,
threshold=160,
lines=np.array([]),
minLineLength=40,
maxLineGap=25)
return Draw_lines(inputfile.frame, lines)
################################################################
def Canny_edge_detector(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
canny_image = cv2.Canny(gray, 100, 200)
return canny_image
################################################################
def Region_of_interest(img, vertices, height, width):
mask = np.zeros_like(img)
cv2.fillPoly(mask, vertices, 255)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
################################################################
def Draw_lines(img, lines):
color = [0, 255, 0] # green
thickness = 10
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1,y1), (x2,y2), color, thickness)
return img
################################################################
def Set_region_of_interest_vertices(height, width):
region_of_interest_vertices = [
(0, height),
(round(width/1.9), round(height/1.9)),
(width, height)
]
return region_of_interest_vertices
if __name__ == "__main__":
main() | 31.654545 | 138 | 0.553418 | 371 | 3,482 | 5.02965 | 0.420485 | 0.034298 | 0.068596 | 0.07717 | 0.028939 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037937 | 0.220276 | 3,482 | 110 | 139 | 31.654545 | 0.649355 | 0.178059 | 0 | 0 | 0 | 0 | 0.014404 | 0 | 0 | 0 | 0.001746 | 0 | 0 | 1 | 0.116667 | false | 0 | 0.05 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85320c46304d80fec430c5914f3f698f8524a178 | 3,407 | py | Python | code/app.py | annotation/app-uruk | aee4ed0c3fd574251f8b4eb9169705e8ac26bf95 | [
"MIT"
] | null | null | null | code/app.py | annotation/app-uruk | aee4ed0c3fd574251f8b4eb9169705e8ac26bf95 | [
"MIT"
] | null | null | null | code/app.py | annotation/app-uruk | aee4ed0c3fd574251f8b4eb9169705e8ac26bf95 | [
"MIT"
] | null | null | null | import types
from tf.advanced.helpers import dh
from tf.advanced.find import loadModule
from tf.advanced.app import App
def transform_prime(app, n, p):
return ("'" * int(p)) if p else ""
def transform_ctype(app, n, t):
if t == "uncertain":
return "?"
elif t == "properName":
return "="
elif t == "supplied":
return ">"
else:
return ""
def transform_atf(app, n, a):
return app.atfFromSign(n, flags=True)
class TfApp(App):
def __init__(app, *args, silent=False, **kwargs):
app.transform_ctype = types.MethodType(transform_ctype, app)
app.transform_prime = types.MethodType(transform_prime, app)
app.transform_atf = types.MethodType(transform_atf, app)
atf = loadModule("atf", *args)
atf.atfApi(app)
app.atf = atf
super().__init__(*args, silent=silent, **kwargs)
app.image = loadModule("image", *args)
app.image.getImagery(app, silent, checkout=kwargs.get("checkout", ""))
app.reinit()
def reinit(app):
customMethods = app.customMethods
customMethods.afterChild.clear()
customMethods.afterChild.update(quad=app.getOp)
customMethods.plainCustom.clear()
customMethods.plainCustom.update(
sign=app.plainAtfType, quad=app.plainAtfType, cluster=app.plainAtfType,
)
customMethods.prettyCustom.clear()
customMethods.prettyCustom.update(
case=app.caseDir, cluster=app.clusterBoundaries, comments=app.commentsCls
)
def cdli(app, n, linkText=None, asString=False):
(nType, objectType, identifier) = app.image.imageCls(app, n)
if linkText is None:
linkText = identifier
result = app.image.wrapLink(linkText, objectType, "main", identifier)
if asString:
return result
else:
dh(result)
# PRETTY HELPERS
def getGraphics(app, isPretty, n, nType, outer):
api = app.api
F = api.F
E = api.E
result = ""
isOuter = outer or (all(F.otype.v(parent) != "quad" for parent in E.sub.t(n)))
if isOuter:
width = "2em" if nType == "sign" else "4em"
height = "4em" if nType == "quad" else "6em"
theGraphics = app.image.getImages(
app,
n,
kind="lineart",
width=width,
height=height,
_asString=True,
withCaption=False,
warning=False,
)
if theGraphics:
result = f"<div>{theGraphics}</div>" if isPretty else f" {theGraphics}"
return result
def lineart(app, ns, key=None, asLink=False, withCaption=None, **options):
return app.image.getImages(
app,
ns,
kind="lineart",
key=key,
asLink=asLink,
withCaption=withCaption,
**options,
)
def photo(app, ns, key=None, asLink=False, withCaption=None, **options):
return app.image.getImages(
app,
ns,
kind="photo",
key=key,
asLink=asLink,
withCaption=withCaption,
**options,
)
def imagery(app, objectType, kind):
return set(app._imagery.get(objectType, {}).get(kind, {}))
| 28.872881 | 87 | 0.561785 | 361 | 3,407 | 5.249307 | 0.301939 | 0.029551 | 0.022164 | 0.031662 | 0.134037 | 0.134037 | 0.134037 | 0.134037 | 0.081266 | 0.081266 | 0 | 0.001729 | 0.321104 | 3,407 | 117 | 88 | 29.119658 | 0.817553 | 0.004109 | 0 | 0.223404 | 0 | 0 | 0.039811 | 0.007078 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.042553 | 0.053191 | 0.276596 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85322edb1455b94f135f0f46c6eb2897360629a3 | 12,632 | py | Python | shunt/hmap/hmap.py | velezj/project-manager | 92e28e5718ca1302f6da0cf8b3d4a3bb5a1a8a72 | [
"MIT"
] | null | null | null | shunt/hmap/hmap.py | velezj/project-manager | 92e28e5718ca1302f6da0cf8b3d4a3bb5a1a8a72 | [
"MIT"
] | null | null | null | shunt/hmap/hmap.py | velezj/project-manager | 92e28e5718ca1302f6da0cf8b3d4a3bb5a1a8a72 | [
"MIT"
] | null | null | null | import logging
logger = logging.getLogger( __name__ )
import copy
import tempfile
import os
import jinja2
import yaml
##
# Interface functions for Hiearchichal Maps (hmaps)
# which are jsut dictionaries-of-dictionaries :)
TEMPLATE_HANDLEBAR_START = "{{"
TEMPLATE_HANDLEBAR_END = "}}"
JINJA_VARIABLE_KEY = "_"
##============================================================================
##
# Returns true iff the given object is a structured key with
# given delimiter
def is_structured_key( x, delim='/' ):
return isinstance( x, str ) and delim in x
##============================================================================
##
# Convert from a structured key to a path.
# A structured key is just a delimited single-string key
# much like a file system path or url :)
def structured_key_to_path( sk, delim='/' ):
def _numerate(x):
try:
return int(x)
except:
return x
return list(map(_numerate, sk.split( delim )))
##============================================================================
##
# Take a path of a structured key and return a path
def ensure_path( sk_or_path, delim='/' ):
if isinstance( sk_or_path, str ):
return structured_key_to_path( sk_or_path, delim=delim )
return sk_or_path
##============================================================================
##
# Traverse a hiearchical map (dict of dict) structure with a path
# (a list of keys).
# This will return the parent dictionary and key for the last
# item in the path or None,None if the path is not valid
#
# This will *change* the given hmap (potentially) since it will
# *create* the hmap structure down the path if it was not
# previously created in the hmap
def hmap_probe( hmap, path ):
path = ensure_path( path )
if path is None or hmap is None or len(path) < 1:
return None, None
if len(path) == 1:
return hmap, path[0]
if path[0] not in hmap:
hmap[ path[0] ] = {}
return hmap_probe( hmap[ path[0] ], path[1:] )
##============================================================================
##
# Get the value for a path from an hmap
# Or returns the given default value.
# This may change the given hmap by probing it.
def hmap_get( hmap, path, default ):
node, key = hmap_probe( hmap, path )
if node is None or key not in node:
return default
return node[ key ]
##============================================================================
##
# Sets the value of the given path in an hmap to the
# given value.
# This will create the path layers if need be
def hmap_set( hmap, path, value ):
node, key = hmap_probe( hmap, path )
if node is None:
raise ValueError( "Could not probe hmap, returned None. This usually means that the hmap itself was None!" )
old = node.get( key, None )
node[ key ] = value
return old
##============================================================================
##
# returns true if the given path has a set value in the given hmap
def hmap_has_path( hmap, path ):
node, key = hmap_probe( hmap_probe, path )
return node is not None and key in node
##============================================================================
##============================================================================
##
# Given an hmap that *may* have structured keys as keys,
# returns a new hmap which has the structured keys resolves into
# an actual structure in the hmap (so not more keys are strucutred-keys)
#
# The resulting hmap *may* share structure with the input hmap
def resolve_structured_keys( hmap, delim='/' ):
# ok, create a new dict as the base
base = {}
# now, let's check each key of the given hmap
# and resolve if it is a strucutred key, otherwise
# use the value of the input hjmap
for key, value in hmap.items():
# recurse to value irregardless of key if it is an hmap node
if isinstance( value, dict ):
value = resolve_structured_keys( value, delim=delim )
# nothing to resolve for this key, jsut use hte value
if not is_structured_key( key ):
base[ key ] = value
else:
# resolve the key
path = ensure_path( key )
temp_map = base
for p in path[:-1]:
temp_map[ p ] = {}
temp_map = temp_map[p]
# ok, last part of path gets the value
temp_map[path[-1]] = value
# return the resolved map
return base
##============================================================================
##============================================================================
##============================================================================
##
# Returns true iff the given object does not have any free variables
# (which are template {{ }} handlebar slots) in it
def has_free_variables( x ):
if isinstance( x, (list,tuple) ):
return not any( has_free_variables, x )
if isinstance( x, dict ):
return not any( has_free_variables, x.items() )
s = str(x)
return TEMPLATE_HANDLEBAR_START not in s and TEMPLATE_HANDLEBAR_END not in s
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##
# Resolves the free variables within the hmap.
# This does a global resolve on all the free variables since
# the templates are treated globally
#
# Returns a new parse state with given parse state as parent
def resolve_free_variables( parse_state, template_context ):
# first, translate any variable blocks into jinja set statements
# for use within the hmap
hmap_with_jinja_vars = add_jinja_variable_nodes( parse_state.hmap, template_context )
# write out the resulting hmap's YAML
with tempfile.NamedTemporaryFile( mode='w', prefix='shunt-pre-resolve_') as f:
f.write( yaml.dump( hmap_with_jinja_vars ) )
f.flush()
logger.info( "dumping pre-resolve into '{0}'".format( f.name ) )
# ok, load in the jinja template
template, render_context = template_context.load_intermediate_template( f.name )
# now render the template
template_string = template.render(render_context)
opened_file = None
with open( f.name + ".rendered", 'w' ) as wf:
opened_file = f.name + ".rendered"
wf.write( template_string )
# ok, repase the resulting yaml
try:
new_parse_state = parse_yaml( opened_file, parent=parse_state )
except Exception as e:
msg = "Unable to re-load rendered template as YAML. Rendering at '{0}'".format( opened_file )
raise RuntimeError( msg ) from e
# ok, remove rendered temporary file
os.remove( opened_file )
# return the resulting parse
return new_parse_state
##============================================================================
##============================================================================
##============================================================================
##
# Given a ParseState, returns a new hmap with any 'var' nodes
# having and additional '_' key with jinja template code to
# actually set the variables for jinja templates
def add_jinja_variable_nodes( hmap, template_context ):
# deal with non-dictionaries
if not isinstance( hmap, dict ):
# lists and tuples and just recursed over each element :)
if isinstance( hmap, (list,tuple) ):
return type(hmap)( map( lambda x: add_jinja_variable_nodes(x,template_context), hmap ) )
# everything else is an atom and cannot have vars
return hmap
# new structure to return
new_hmap = copy.copy( hmap )
# ok, grab any immediate variables
if 'vars' in hmap:
# create jinaj set equivalents
accum = hmap['vars']
jinja_sets = []
for (key,value) in accum.items():
jinja_sets.append(
"{{%- set {name} = \"{value}\" -%}}".format(
name = discard_handlebars( key ),
value = discard_handlebars( value ) ) )
# assign jinja sets to special key
new_hmap[ JINJA_VARIABLE_KEY ] = "\n".join( jinja_sets )
# recurse to children
for (key, value) in hmap.items():
if key == 'vars':
continue
new_hmap[ key ] = add_jinja_variable_nodes( value, template_context )
# return new structure
return new_hmap
##============================================================================
##
# Given a string, discards any enclosing handlebars (first order)
def discard_handlebars( x ):
if not isinstance( x, str ):
return x
find_start_idx = x.find( TEMPLATE_HANDLEBAR_START )
res = x
if find_start_idx >= 0:
res = res[0:find_start_idx] + res[find_start_idx+len(TEMPLATE_HANDLEBAR_START):]
find_end_idx = res.rfind( TEMPLATE_HANDLEBAR_END )
if find_end_idx >= 0:
res = res[0:find_end_idx] + res[find_end_idx+len(TEMPLATE_HANDLEBAR_END):]
return res
##============================================================================
##============================================================================
##============================================================================
##
# A template context allows us to load "intermediate" templates.
# This also includes the jinja Environment and loaders being used
class TemplateContext( object ):
##
#
def __init__( self,
environment = None,
context = None):
if environment is None:
self.environment = jinja2.Environment(
loader = jinja2.FileSystemLoader([
"templates",
".",
] )
)
else:
self.environment = environment
if context is None:
self.context = {}
else:
self.context = context
##
#
def load_intermediate_template( self, template_filename ):
with open( template_filename ) as f:
template = self.environment.from_string( f.read() )
context = self.context
return template, context
DEFAULT_TEMPLATE_CONTEXT = TemplateContext()
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
##============================================================================
| 36.827988 | 116 | 0.443002 | 1,207 | 12,632 | 4.506214 | 0.22121 | 0.014709 | 0.011951 | 0.012502 | 0.072256 | 0.055709 | 0.031807 | 0.013238 | 0.013238 | 0.013238 | 0 | 0.001776 | 0.197831 | 12,632 | 342 | 117 | 36.935673 | 0.534985 | 0.518445 | 0 | 0.06338 | 0 | 0 | 0.046575 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105634 | false | 0 | 0.042254 | 0.007042 | 0.323944 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8533f45f79e26e6d7713f555d363262a5ebdca2b | 2,496 | py | Python | kur/sources/jsonl.py | greedyuser/kur | ba6588ebfa5dec66d1e462c180618cc115fd38ef | [
"Apache-2.0"
] | 867 | 2016-12-05T20:24:23.000Z | 2022-02-18T09:07:14.000Z | kur/sources/jsonl.py | greedyuser/kur | ba6588ebfa5dec66d1e462c180618cc115fd38ef | [
"Apache-2.0"
] | 90 | 2017-01-14T22:46:23.000Z | 2021-02-09T13:32:27.000Z | kur/sources/jsonl.py | greedyuser/kur | ba6588ebfa5dec66d1e462c180618cc115fd38ef | [
"Apache-2.0"
] | 135 | 2017-01-18T19:21:20.000Z | 2022-01-24T16:57:59.000Z | import linecache
import numpy
import json
from ..sources import ChunkSource
###############################################################################
class JSONLSource(ChunkSource):
""" Data source for tensors stored in JSONL format
"""
###########################################################################
def __init__(self, source, key, num_entries, *args, **kwargs):
""" Creates a new JSONL source for file named `source`.
"""
super().__init__(*args, **kwargs)
self.source = source
self.num_entries = num_entries
self.key = key
self.indices = numpy.arange(len(self))
###########################################################################
def __iter__(self):
""" Return an iterator to the data. Get the value (tensor) for self.key
from each object and yield batches of these tensors
"""
start = 0
while start < self.num_entries:
end = min(self.num_entries, start + self.chunk_size)
# linecache line numbering starts at 1
batch = [
json.loads(linecache.getline(self.source, i + 1).strip())[self.key]
for i in self.indices[start:end]
]
yield batch
start = end
###########################################################################
def __len__(self):
""" Returns the total number of entries that this source can return, if
known.
"""
return self.num_entries
###########################################################################
def shape(self):
""" Return the shape of the tensor (excluding batch size) returned by
this data source.
"""
return numpy.array(json.loads(linecache.getline(self.source, 0 + 1))[self.key]).shape
###########################################################################
def can_shuffle(self):
""" This source can be shuffled.
"""
return True
###########################################################################
def shuffle(self, indices):
""" Applies a permutation to the data.
"""
if len(indices) > len(self):
raise ValueError('Shuffleable was asked to apply permutation, but '
'the permutation is longer than the length of the data set.')
self.indices[:len(indices)] = self.indices[:len(indices)][indices]
| 35.15493 | 93 | 0.455529 | 236 | 2,496 | 4.716102 | 0.415254 | 0.053908 | 0.050314 | 0.044924 | 0.062893 | 0.062893 | 0 | 0 | 0 | 0 | 0 | 0.002693 | 0.25601 | 2,496 | 70 | 94 | 35.657143 | 0.596661 | 0.204728 | 0 | 0 | 0 | 0 | 0.078287 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.125 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8539ad589810749d569e8c96666ae5bd84a052e5 | 2,571 | py | Python | nb2blog.py | rowanc1/nb2blog | 1c625a2727124898c4f3d9c9742feb268c554ebd | [
"MIT"
] | null | null | null | nb2blog.py | rowanc1/nb2blog | 1c625a2727124898c4f3d9c9742feb268c554ebd | [
"MIT"
] | null | null | null | nb2blog.py | rowanc1/nb2blog | 1c625a2727124898c4f3d9c9742feb268c554ebd | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
import requests, argparse, p3c, os, json, subprocess, keyring
def main():
parser = argparse.ArgumentParser(description='Upload a notebook to a gist and 3point/SimPEG blog.')
parser.add_argument('notebook', type=str, help='The file name of the notebook.')
parser.add_argument('-m', type=str, help='Description of the notebook.')
args = parser.parse_args()
jsonFile = '/'.join(p3c.__file__.split('/')[:-1]+['nb2blog.json'])
if os.path.exists(jsonFile):
with file(jsonFile,'r') as f:
R = json.loads(f.read())
else:
f = file(jsonFile,'w')
f.write('{}\n')
f.close()
R = {}
# Get the data ready for uploading to gist.github.com
ipynb = file(args.notebook,'r')
data = {
"description": args.m,
"public": True,
"files": {}
}
data['files'][args.notebook] = {"content": str(ipynb.read())}
ipynb.close()
token = keyring.get_password('3pt','github.gist')
if token is None:
raise Exception("""keyring could not find your gist token:
ipython
> import keyring
> keyring.set_password('3pt', 'github.gist', 'YOUR GITHUB TOKEN')
Go to github to create one if you haven't made it yet (make sure you enable gist,repo,user):
https://github.com/settings/applications#personal-access-tokens
""")
# Check if the ipynb is in the dict, and post to gist.github.com
if args.notebook in R:
url = R[args.notebook]['gistURL']
resp = requests.patch("%s?access_token=%s"%(url,token), data=json.dumps(data))
else:
resp = requests.post("https://api.github.com/gists?access_token=%s"%token, data=json.dumps(data))
url = resp.json()['url']
R[args.notebook] = {"gistURL": url}
gitResp = resp.json()
f = file(jsonFile,'w')
f.write(json.dumps(R))
f.close()
# Convert the notebook to html
subprocess.check_output("ipython nbconvert %s --to html --template basic" % (args.notebook.replace(' ','\\ ')), shell=True)
f = file(args.notebook.replace('ipynb','html'),'r')
nbhtml = f.read()
f.close()
subprocess.check_output("rm %s" % (args.notebook.replace(' ','\\ ')).replace('ipynb','html'), shell=True)
uid = args.notebook[:-6].lower().replace(' ','-')
title = args.notebook[:-6].title()
b = p3c.Blog.new({'uid':uid,"content":nbhtml, "title":title, "description": args.m, 'setTags':'simpeg'})
if __name__ == "__main__":
main()
| 34.743243 | 127 | 0.596266 | 336 | 2,571 | 4.5 | 0.392857 | 0.079365 | 0.037698 | 0.018519 | 0.085979 | 0.026455 | 0 | 0 | 0 | 0 | 0 | 0.005053 | 0.230261 | 2,571 | 73 | 128 | 35.219178 | 0.758969 | 0.064566 | 0 | 0.132075 | 0 | 0.018868 | 0.315285 | 0.011245 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0.037736 | 0.037736 | 0 | 0.056604 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
853bd8589a69bf83feb568fd0e023ea150961b83 | 11,382 | py | Python | src/data_scraping.py | othmanefc/ttfl_fantasy | 6b5d4316553a5d01114218fcfbe26588de499ead | [
"CC0-1.0"
] | null | null | null | src/data_scraping.py | othmanefc/ttfl_fantasy | 6b5d4316553a5d01114218fcfbe26588de499ead | [
"CC0-1.0"
] | 6 | 2020-01-28T23:09:28.000Z | 2022-02-10T00:28:14.000Z | src/data_scraping.py | othmanefc/ttfl_fantasy | 6b5d4316553a5d01114218fcfbe26588de499ead | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import List, Dict, Any, Union, Optional, Callable, Sequence
from bs4 import BeautifulSoup, Comment, element
import pandas as pd
import re
from urllib.request import urlopen
import os
import datetime
from tqdm import tqdm as tqdm_notebook
import time
from src.constants import DATA_DIR
def get_scores(date: str, metrics: List[str]) -> pd.DataFrame:
path_check = os.path.join(DATA_DIR, "dates", f"{date}.csv")
if os.path.exists(path_check):
df_games = pd.read_csv(path_check)
return df_games
url_parent: str = "https://www.basketball-reference.com"
url: str = (f"https://www.basketball-reference.com/boxscores/?month="
f"{date[4:6]}&day={date[6:8]}&year={date[0:4]}")
soup: BeautifulSoup = BeautifulSoup(urlopen(url), "lxml")
games: Sequence[Optional[element.Tag]] = soup.find_all(
"div", class_="game_summary expanded nohover")
if len(games) == 0:
return pd.DataFrame(columns=metrics)
df_games: List[Any] = []
for game in tqdm_notebook(games, desc=f"Date: {date}", total=len(games)):
summary: Dict[str, List[Any]] = {}
# host = game.find_all('table')[1].find_all('a')[1]['href'][7:10]
# away = game.find_all('table')[1].find_all('a')[0]['href'][7:10]
winner: Sequence[Optional[element.Tag]] = game.find(
"tr", class_="winner").find_all("td")
loser: Sequence[Optional[element.Tag]] = game.find(
"tr", class_="loser").find_all("td")
summary["winner"] = [
winner[0].find("a")["href"][7:10],
int(winner[1].get_text()),
]
summary["loser"] = [
loser[0].find("a")["href"][7:10],
int(loser[1].get_text())
]
url_game: str = url_parent + game.find("a", text="Box Score")["href"]
soup_game: BeautifulSoup = BeautifulSoup(urlopen(url_game), "lxml")
box_score: Optional[element.Tag] = game.find("a",
text="Box Score")["href"]
date = re.findall(r"\d\d\d\d\d\d\d\d", box_score)[0]
for result, (side, score) in summary.items():
game_result: Optional[element.Tag] = soup_game.find(
"table",
class_="sortable stats_table",
id=f"box-{side}-game-basic")
player_list: List[Any] = game_result.find_all("tr",
class_=None)[1:-1]
team: List[Dict[str, Optional[Union[float, int, str]]]] = []
for player in player_list:
player_name: Optional[str] = player.find("th")["csk"]
player_dict: Dict[str, Optional[Union[str, int, str]]] = {
"name": player_name,
"date": date
}
for metric in metrics:
try:
res: Union[str, int, float] = player.find(
"td", {
"data-stat": metric
}).contents[0]
except Exception:
res: Union[str, int, float] = 0
player_dict.update({metric: res})
if result == "winner":
player_dict.update({
"result": 1,
"score": score,
"team": summary["winner"][0],
"opp": summary["loser"][0],
"opp_score": summary["loser"][1],
})
if result == "loser":
player_dict.update({
"result": 0,
"score": score,
"team": summary["winner"][0],
"opp": summary["winner"][0],
"opp_score": summary["winner"][1],
})
if int(str(player_dict["mp"]).split(":")[0]) >= 10:
team.append(player_dict)
team_df: pd.DataFrame = pd.DataFrame(team)
team_df["score"] = score
df_games.append(pd.DataFrame(team_df))
df_games_df: pd.DataFrame = pd.concat(df_games)
if ' trb' in df_games_df.columns:
df_games_df.rename({' trb': 'trb'}, inplace=True)
Data_scrapper.write_csv(df=df_games_df, name=date, extra_path="dates")
return df_games_df
class Data_scrapper(object):
def __init__(self, start: str, end: str) -> None:
self.metrics: List[str] = [
"mp",
"fg",
"fga",
"fg_pct",
"fg3",
"fg3a",
"fg3_pct",
"ft",
"fta",
"ft_pct",
"orb",
"drb",
" trb",
"ast",
"stl",
"blk",
"tov",
"pf",
"pts",
"plus_minus",
]
self.start: datetime.datetime = datetime.datetime.strptime(
start, "%Y%m%d")
self.end: datetime.datetime = datetime.datetime.strptime(end, "%Y%m%d")
self.timeframe: pd.DataFrame = self.generate_time_frame()
@staticmethod
def write_csv(df: pd.DataFrame, name: str, extra_path: str = None) -> None:
if extra_path is not None:
path_data: str = os.path.join(DATA_DIR, extra_path)
else:
path_data = os.path.join(DATA_DIR)
if not os.path.exists(path_data):
os.mkdir(path_data)
full_path: str = os.path.join(path_data, f"{name}.csv")
df.to_csv(full_path, index=False)
def get_timeframe_data(self,
sleep: int = 0,
name: str = "default",
write: bool = True,
get_scores: Callable = get_scores) -> pd.DataFrame:
full_time_list: List[pd.DataFrame] = []
for date in tqdm_notebook(self.timeframe,
total=len(self.timeframe),
desc="Main Frame"):
# get_scores_cached: Callable = memory1.cache(get_scores)
# date_df: pd.DataFrame = get_scores_cached(date, self.metrics)
date_df: pd.DataFrame = get_scores(date, self.metrics)
full_time_list.append(date_df)
time.sleep(sleep)
full_time_df: pd.DataFrame = pd.concat(full_time_list, sort=True)
if write:
Data_scrapper.write_csv(full_time_df, name=name)
return full_time_df
def generate_time_frame(self) -> List[str]:
date_range: List[str] = [
(self.start + datetime.timedelta(days=x)).strftime("%Y%m%d")
for x in range(0, (self.end - self.start).days + 1)
]
return date_range
@staticmethod
def get_next_games(
date: str,
season_year: Union[str, int]) -> List[Dict[str, Optional[str]]]:
month: str = datetime.datetime.strptime(
date, "%Y%m%d").strftime("%B").lower()
url_games: str = (f"https://www.basketball-reference.com/leagues/"
f"NBA_{season_year}_games-{month}.html")
print(url_games)
soup: BeautifulSoup = BeautifulSoup(urlopen(url_games), "lxml")
month_games: Sequence[Any] = soup.find_all("tr")
match_ups: List[Dict[str, Optional[str]]] = []
for month_game in month_games:
try:
check_date: bool = month_game.find("th")["csk"].startswith(
date)
except Exception:
continue
if check_date:
visitor: Optional[str] = month_game.find(
"td", {
"data-stat": "visitor_team_name"
}).find("a")["href"][7:10]
home: Optional[str] = month_game.find(
"td", {
"data-stat": "home_team_name"
}).find("a")["href"][7:10]
match_ups.append({"home": home, "visitor": visitor})
return match_ups
@staticmethod
def get_all_players(
team: Optional[str], date: str,
season_year: Union[str, int]) -> List[Dict[str, Optional[str]]]:
url: str = (f"https://www.basketball-reference.com/"
f"teams/{team}/{season_year}.html")
print(url)
soup: BeautifulSoup = BeautifulSoup(urlopen(url), "lxml")
table_players: Optional[element.Tag] = soup.find("tbody")
players: List[Dict[str, Optional[element.Tag]]] = []
for player in table_players.find_all("tr"):
name: Optional[str] = player.find("td",
{"data-stat": "player"})["csk"]
players.append({"name": name, "team": team, "date": date})
return players
@staticmethod
def get_injured_players(team: Optional[str], date: str,
season_year: Union[str, int]) -> List:
url: str = (f"https://www.basketball-reference.com/"
f"teams/{team}/{season_year}.html")
soup: BeautifulSoup = BeautifulSoup(urlopen(url), "lxml")
div_inj: Optional[element.Tag] = soup.find("div", id="all_injury")
try:
comments: Sequence[Optional[element.Tag]] = div_inj.find_all(
string=lambda text: isinstance(text, Comment))
comms: Optional[str] = re.sub("\n", "", comments[0]).strip()
soup = BeautifulSoup(comms, "lxml")
body: Optional[element.Tag] = soup.find("tbody")
players: List[Dict[str, Optional[str]]] = []
for player in body.find_all("tr"):
name: Optional[str] = player.find(
"th", {"data-stat": "player"})["csk"]
players.append({"name": name, "team": team, "date": date})
return players
except Exception:
return list()
@staticmethod
def get_next_games_player(date: str,
season_year: Union[str, int]) -> pd.DataFrame:
match_ups: List[Dict[str,
Optional[str]]] = Data_scrapper.get_next_games(
date, season_year)
all_players_list: List = []
for match_up in match_ups:
for i, team in enumerate(match_up.values()):
all_players: List[Dict[
str, Optional[str]]] = Data_scrapper.get_all_players(
team, date, season_year)
injured_players: List = Data_scrapper.get_injured_players(
team, date, season_year)
injured_players_names: List = ([
player["name"] for player in injured_players
] if len(injured_players) > 0 else [])
available_players: List = [
player for player in all_players
if player["name"] not in injured_players_names
]
for player in available_players:
ind: int = 1 if i == 0 else 0
player["opp"] = list(match_up.values())[ind]
all_players_list.extend(available_players)
return pd.DataFrame(all_players_list)
| 41.540146 | 79 | 0.511949 | 1,283 | 11,382 | 4.381138 | 0.176929 | 0.029354 | 0.032023 | 0.027041 | 0.349582 | 0.273617 | 0.232877 | 0.178082 | 0.097136 | 0.097136 | 0 | 0.008689 | 0.352838 | 11,382 | 273 | 80 | 41.692308 | 0.754412 | 0.025391 | 0 | 0.160494 | 0 | 0.004115 | 0.092541 | 0.014702 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.041152 | 0 | 0.123457 | 0.00823 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
853d84e2e9e82207867461c37d8d12080faf1569 | 766 | py | Python | src/DataGenerator.py | nikhil-garg/CartPoleSimulation | fd778327af5fe764248b68db53a456a77e903656 | [
"MIT"
] | null | null | null | src/DataGenerator.py | nikhil-garg/CartPoleSimulation | fd778327af5fe764248b68db53a456a77e903656 | [
"MIT"
] | null | null | null | src/DataGenerator.py | nikhil-garg/CartPoleSimulation | fd778327af5fe764248b68db53a456a77e903656 | [
"MIT"
] | null | null | null | from src.CartClass import *
from src.utilis import *
from src.utilis import *
from tqdm import tqdm
csv = 'data_rnn'
number_of_experiments = 10
length_of_experiment = 1e3
dt_main_simulation = dt_main_simulation_globals
track_relative_complexity = 0.5 # randomly placed points/s
track_complexity = int(dt_main_simulation*length_of_experiment*track_relative_complexity) # Total number of randomly placed points
mode = 2
MyCart = Cart()
for i in range(number_of_experiments):
print(i)
sleep(0.1)
Generate_Experiment(MyCart,
mode=mode,
exp_len=length_of_experiment,
dt=dt_main_simulation,
track_complexity=track_complexity,
csv=csv) | 30.64 | 131 | 0.678851 | 97 | 766 | 5.061856 | 0.463918 | 0.04888 | 0.130346 | 0.077393 | 0.09776 | 0.09776 | 0 | 0 | 0 | 0 | 0 | 0.015901 | 0.261097 | 766 | 25 | 132 | 30.64 | 0.85159 | 0.082245 | 0 | 0.095238 | 0 | 0 | 0.011412 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
854189ca67276f7eea81393038f259f4f1505403 | 4,970 | py | Python | trojsten/events/migrations/0001_initial.py | MvonK/web | b701a6ea8fb6f0bdfb720e66d0a430db13db8bff | [
"MIT"
] | 5 | 2018-04-22T22:44:02.000Z | 2021-04-26T20:44:44.000Z | trojsten/events/migrations/0001_initial.py | MvonK/web | b701a6ea8fb6f0bdfb720e66d0a430db13db8bff | [
"MIT"
] | 250 | 2018-04-24T12:04:11.000Z | 2022-03-09T06:56:47.000Z | trojsten/events/migrations/0001_initial.py | MvonK/web | b701a6ea8fb6f0bdfb720e66d0a430db13db8bff | [
"MIT"
] | 8 | 2019-04-28T11:33:03.000Z | 2022-02-26T13:30:36.000Z | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Event",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("name", models.CharField(max_length=100, verbose_name="n\xe1zov")),
("start_time", models.DateTimeField(verbose_name="\u010das za\u010diatku")),
("end_time", models.DateTimeField(verbose_name="\u010das konca")),
(
"registration_deadline",
models.DateTimeField(
null=True, verbose_name="deadline pre registr\xe1ciu", blank=True
),
),
(
"text",
models.TextField(
default="",
help_text='Obsah bude prehnan\xfd <a href="http://en.wikipedia.org/wiki/Markdown">Markdownom</a>.',
blank=True,
),
),
],
options={
"ordering": ["-end_time", "-start_time"],
"verbose_name": "akcia",
"verbose_name_plural": "akcie",
},
),
migrations.CreateModel(
name="EventType",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("name", models.CharField(max_length=100, verbose_name="n\xe1zov")),
("is_camp", models.BooleanField(verbose_name="s\xfastredko")),
],
options={"verbose_name": "typ akcie", "verbose_name_plural": "typy akci\xed"},
),
migrations.CreateModel(
name="Invitation",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
(
"type",
models.SmallIntegerField(
default=0,
verbose_name="typ pozv\xe1nky",
choices=[
(0, "\xfa\u010dastn\xedk"),
(1, "n\xe1hradn\xedk"),
(2, "ved\xfaci"),
],
),
),
("going", models.NullBooleanField(verbose_name="z\xfa\u010dastn\xed sa")),
],
options={"verbose_name": "pozv\xe1nka", "verbose_name_plural": "pozv\xe1nky"},
),
migrations.CreateModel(
name="Link",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("title", models.CharField(max_length=100, verbose_name="titulok")),
("name", models.CharField(max_length=300, verbose_name="meno")),
("url", models.URLField(max_length=300)),
],
options={"verbose_name": "odkaz", "verbose_name_plural": "odkazy"},
),
migrations.CreateModel(
name="Place",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("name", models.CharField(max_length=100, verbose_name="n\xe1zov")),
],
options={"verbose_name": "miesto akcie", "verbose_name_plural": "miesta akci\xed"},
),
migrations.CreateModel(
name="Registration",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("name", models.CharField(max_length=100, verbose_name="n\xe1zov")),
(
"text",
models.TextField(
help_text='Obsah bude prehnan\xfd <a href="http://en.wikipedia.org/wiki/Markdown">Markdownom</a>.'
),
),
],
options={
"verbose_name": "Prihl\xe1\u0161ka",
"verbose_name_plural": "Prihl\xe1\u0161ky",
},
),
]
| 37.368421 | 123 | 0.423742 | 370 | 4,970 | 5.521622 | 0.313514 | 0.161527 | 0.073421 | 0.067548 | 0.503182 | 0.45815 | 0.418013 | 0.399413 | 0.399413 | 0.399413 | 0 | 0.022148 | 0.45493 | 4,970 | 132 | 124 | 37.651515 | 0.732004 | 0.004225 | 0 | 0.574803 | 0 | 0.015748 | 0.175864 | 0.004245 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.007874 | 0 | 0.031496 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85440bc2c3a337f01c193e1d5db700e9605da53f | 1,128 | py | Python | lion - white.py | Abdumajidhu/Image-Enhancement-therough-Image-Processing-Techniques | 126690319297a5ed7df99ff47797980cc525ecf3 | [
"MIT"
] | 1 | 2019-10-27T13:03:05.000Z | 2019-10-27T13:03:05.000Z | lion - white.py | Abdumajidhu/Image-Enhancement-therough-Image-Processing-Techniques | 126690319297a5ed7df99ff47797980cc525ecf3 | [
"MIT"
] | null | null | null | lion - white.py | Abdumajidhu/Image-Enhancement-therough-Image-Processing-Techniques | 126690319297a5ed7df99ff47797980cc525ecf3 | [
"MIT"
] | 1 | 2021-12-17T06:01:52.000Z | 2021-12-17T06:01:52.000Z | # import opencv
import numpy as np
import cv2
# Read image
src = cv2.imread("exercise_images/lion.jpg",0)
# Set threshold and maxValue
thresh = 25
thresh3 = 255
thresh4 = 205
thresh5 = 105
thresh2 = 155
maxValue = 255
# Basic threshold example
th, dst = cv2.threshold(src, thresh, maxValue, cv2.THRESH_BINARY);
th, dsts = cv2.threshold(src, thresh2, maxValue, cv2.THRESH_BINARY);
th, dsts1 = cv2.threshold(src, thresh3, maxValue, cv2.THRESH_BINARY);
th, dsts2 = cv2.threshold(src, thresh4, maxValue, cv2.THRESH_BINARY);
th, dsts3 = cv2.threshold(src, thresh5, maxValue, cv2.THRESH_BINARY);
improved = np.hstack((src,dsts)) #stacking images side-by-side
improvedmore = np.hstack((src,dsts)) #stacking images side-by-side
imp = np.hstack((dst,dsts)) #stacking images side-by-side
cv2.imshow('Have You of 165',dst)
cv2.imshow('Got You of 155',dsts2)
cv2.imshow('Have You of 255',dsts3)
cv2.imshow('Got You of 205',dsts1)
cv2.imshow('Have You of 100',dsts)
cv2.imwrite('doc.jpeg',improved)
cv2.imwrite('doc2.jpeg',improvedmore)
cv2.imwrite('alike.jpeg',imp)
#cv2.imshow('Image',src)
| 28.923077 | 70 | 0.711879 | 171 | 1,128 | 4.660819 | 0.333333 | 0.067754 | 0.094103 | 0.144291 | 0.368883 | 0.132999 | 0.097867 | 0.097867 | 0.097867 | 0 | 0 | 0.071875 | 0.148936 | 1,128 | 38 | 71 | 29.684211 | 0.758333 | 0.161348 | 0 | 0 | 0 | 0 | 0.137931 | 0.026696 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8545d557cf7ae6b6369e7d408bec5095c2c77b1b | 2,167 | py | Python | examples/mxnet/export.py | mitaki28/onnx-chainer | 845aa6c168d912ce044183c6dff6f21ce498d17c | [
"MIT"
] | null | null | null | examples/mxnet/export.py | mitaki28/onnx-chainer | 845aa6c168d912ce044183c6dff6f21ce498d17c | [
"MIT"
] | 1 | 2018-09-21T08:11:43.000Z | 2018-09-21T08:11:43.000Z | examples/mxnet/export.py | mitaki28/onnx-chainer | 845aa6c168d912ce044183c6dff6f21ce498d17c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import numpy as np
import chainer
import chainer.functions as F
import chainercv.links as C
import mxnet
import onnx_chainer
def save_as_onnx_then_import_from_mxnet(model, fn):
# Prepare an input tensor
x = np.random.rand(1, 3, 224, 224).astype(np.float32) * 255
# Run the model on the data
with chainer.using_config('train', False):
chainer_out = model(x).array
# Export Chainer model into ONNX
onnx_chainer.export(model, x, fn)
# Load ONNX model into MXNet symbol
sym, arg, aux = mxnet.contrib.onnx.import_model(fn)
# Find the name of input tensor
data_names = [graph_input for graph_input in sym.list_inputs()
if graph_input not in arg and graph_input not in aux]
data_shapes = [(data_names[0], x.shape)]
# Create MXNet model
mod = mxnet.mod.Module(
symbol=sym, data_names=data_names, context=mxnet.cpu(),
label_names=None)
mod.bind(
for_training=False, data_shapes=data_shapes,
label_shapes=None)
mod.set_params(
arg_params=arg, aux_params=aux, allow_missing=True,
allow_extra=True)
# Create input data
Batch = collections.namedtuple('Batch', ['data'])
input_data = Batch([mxnet.nd.array(x)])
# Forward computation using MXNet
mod.forward(input_data)
# Retrieve the output of forward result
mxnet_out = mod.get_outputs()[0].asnumpy()
# Check the prediction results are same
assert np.argmax(chainer_out) == np.argmax(mxnet_out)
# Check both outputs have same values
np.testing.assert_almost_equal(chainer_out, mxnet_out, decimal=5)
def main():
model = C.VGG16(pretrained_model='imagenet')
save_as_onnx_then_import_from_mxnet(model, 'vgg16.onnx')
model = C.ResNet50(pretrained_model='imagenet', arch='he')
# Change cover_all option to False to match the default behavior of MXNet's pooling
model.pool1 = lambda x: F.max_pooling_2d(
x, ksize=3, stride=2, cover_all=False)
save_as_onnx_then_import_from_mxnet(model, 'resnet50.onnx')
if __name__ == '__main__':
main()
| 28.893333 | 87 | 0.693124 | 320 | 2,167 | 4.484375 | 0.43125 | 0.027875 | 0.020906 | 0.029268 | 0.07108 | 0.07108 | 0.07108 | 0.07108 | 0 | 0 | 0 | 0.01687 | 0.206737 | 2,167 | 74 | 88 | 29.283784 | 0.817917 | 0.20766 | 0 | 0 | 0 | 0 | 0.036994 | 0 | 0 | 0 | 0 | 0 | 0.05 | 1 | 0.05 | false | 0 | 0.275 | 0 | 0.325 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8545f8abe40c339f00fc63341382c5d42092fb16 | 982 | py | Python | Otherfiles/notebook_check.py | lewiuberg/pycm | 50fe8f55e073d438fadd0e27cc02090cd8361501 | [
"MIT"
] | 1,266 | 2018-01-22T20:54:00.000Z | 2022-03-31T12:41:53.000Z | Otherfiles/notebook_check.py | lewiuberg/pycm | 50fe8f55e073d438fadd0e27cc02090cd8361501 | [
"MIT"
] | 375 | 2018-02-19T16:06:24.000Z | 2022-03-17T16:27:48.000Z | Otherfiles/notebook_check.py | lewiuberg/pycm | 50fe8f55e073d438fadd0e27cc02090cd8361501 | [
"MIT"
] | 110 | 2018-01-22T23:38:59.000Z | 2022-03-23T10:08:30.000Z | # -*- coding: utf-8 -*-
"""Notebook-check script."""
import os
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from art import tprint
NOTEBOOKS_LIST = [
"Document",
"Example1",
"Example2",
"Example3",
"Example4",
"Example5",
"Example6",
"Example7",
"Example8"]
EXTENSION = ".ipynb"
if __name__ == "__main__":
tprint("PYCM", "bulbhead")
tprint("Document", "bulbhead")
print("Processing ...")
for index, notebook in enumerate(NOTEBOOKS_LIST):
ep = ExecutePreprocessor(timeout=6000, kernel_name='python3')
path = os.path.join("Document", notebook)
with open(path + EXTENSION, "r", encoding="utf-8") as f:
nb = nbformat.read(f, as_version=4)
ep.preprocess(nb, {'metadata': {'path': 'Document/'}})
with open(path + EXTENSION, 'w', encoding='utf-8') as f:
nbformat.write(nb, f)
print("{0}.{1} [OK]".format(str(index + 1), notebook))
| 28.882353 | 69 | 0.607943 | 109 | 982 | 5.366972 | 0.59633 | 0.020513 | 0.041026 | 0.071795 | 0.051282 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026212 | 0.223014 | 982 | 33 | 70 | 29.757576 | 0.740498 | 0.045825 | 0 | 0 | 0 | 0 | 0.201933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.178571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8546f1c8d609306bdae939b66f98fe07f0ef570a | 2,315 | py | Python | src/eventsHandler/on_message/moderation/mute/revoke_mute.py | gastbob40/discord_brawl_bot | 90ee7ef6492b5e4272a8baa42fd97f8369b07864 | [
"MIT"
] | null | null | null | src/eventsHandler/on_message/moderation/mute/revoke_mute.py | gastbob40/discord_brawl_bot | 90ee7ef6492b5e4272a8baa42fd97f8369b07864 | [
"MIT"
] | null | null | null | src/eventsHandler/on_message/moderation/mute/revoke_mute.py | gastbob40/discord_brawl_bot | 90ee7ef6492b5e4272a8baa42fd97f8369b07864 | [
"MIT"
] | null | null | null | from typing import List
import discord
import yaml
from src.models.models import Mute, session
from src.utils.embeds_manager import EmbedsManager
from src.utils.permissions_manager import PermissionsManager
async def revoke_mute(client: discord.Client, message: discord.Message, args: List[str]):
with open('run/config/config.yml', 'r') as file:
config = yaml.safe_load(file)
if not PermissionsManager.has_perm(message.author, 'mute'):
return await message.channel.send(
embed=EmbedsManager.error_embed(
"Vous n'avez pas les permissions pour cette commande."
)
)
# Help message
if args and args[0] == '-h':
return await message.channel.send(
embed=EmbedsManager.information_embed(
"Rappel de la commande : \n"
f"`{config['prefix']}rmute <mute_id>`"
)
)
if len(args) != 1:
return await message.channel.send(
embed=EmbedsManager.error_embed(
f":x: Erreur dans la commande, merci de spécifier l'index du mute."
)
)
if not args[0].startswith("m"):
return await message.channel.send(
embed=EmbedsManager.error_embed(
":x: Erreur, index invalide."
)
)
index = int(args[0][1:])
current_mute: Mute = session.query(Mute).filter_by(id=index).first()
if current_mute is None:
return await message.channel.send(
embed=EmbedsManager.error_embed(
":x: Erreur, index invalide."
)
)
if not current_mute.is_active:
return await message.channel.send(
embed=EmbedsManager.error_embed(
":x: Erreur, ce mute est déjà révoqué."
)
)
current_mute.is_active = False
session.commit()
target: discord.Member = message.guild.get_member(current_mute.target_id)
for channel in message.guild.channels:
if not target.permissions_in(channel).send_messages:
await channel.set_permissions(target,
overwrite=None)
await message.channel.send(
embed=EmbedsManager.complete_embed(
f"⚠ Le mute **{args[0]}** a été révoqué."
)
)
| 30.866667 | 89 | 0.597408 | 267 | 2,315 | 5.086142 | 0.400749 | 0.064801 | 0.097938 | 0.118557 | 0.309278 | 0.309278 | 0.279087 | 0.244477 | 0.244477 | 0.16053 | 0 | 0.003729 | 0.304968 | 2,315 | 74 | 90 | 31.283784 | 0.839652 | 0.005184 | 0 | 0.216667 | 0 | 0 | 0.145589 | 0.019557 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
854863e9c50296993f6a388b3257fb1d9813ee80 | 1,398 | py | Python | QUANTAXIS_Test/QAAnalysis_Test/QASignal_hull_MA_Test.py | PenghuiCheng/QUANTAXIS | b8d81ed592d7008151dc0bbbd5d1030e8af73067 | [
"MIT"
] | 1 | 2020-01-31T05:23:21.000Z | 2020-01-31T05:23:21.000Z | QUANTAXIS_Test/QAAnalysis_Test/QASignal_hull_MA_Test.py | PenghuiCheng/QUANTAXIS | b8d81ed592d7008151dc0bbbd5d1030e8af73067 | [
"MIT"
] | null | null | null | QUANTAXIS_Test/QAAnalysis_Test/QASignal_hull_MA_Test.py | PenghuiCheng/QUANTAXIS | b8d81ed592d7008151dc0bbbd5d1030e8af73067 | [
"MIT"
] | null | null | null | import QUANTAXIS as QA
from numpy import *
from scipy.signal import savgol_filter
import numpy as np
import matplotlib.pyplot as plt
from QUANTAXIS.QAIndicator.talib_numpy import *
import mpl_finance as mpf
import matplotlib.dates as mdates
def smooth_demo():
data2 = QA.QA_fetch_crypto_asset_day_adv(['huobi'],
symbol=['btcusdt'],
start='2017-10-01',
end='2020-06-30 23:59:59')
xn = data2.close.values
ma5 = talib.MA(data2.close.values, 10)
hma5 = TA_HMA(data2.close.values, 10)
kama5 = TA_KAMA(data2.close.values, 10)
window_size, poly_order = 5, 1
yy_sg = savgol_filter(xn, window_size, poly_order)
plt.figure(figsize = (22,9))
ax1 = plt.subplot(111)
mpf.candlestick2_ochl(ax1, data2.data.open.values, data2.data.close.values, data2.data.high.values, data2.data.low.values, width=0.6, colorup='r', colordown='green', alpha=0.5)
#ax1.title("The smoothing windows")
#plt.plot(xn, lw=1, alpha=0.8)
ax1.plot(hma5, lw=2, linestyle="--", color='darkcyan', alpha=0.6)
ax1.plot(yy_sg, lw=1, color='darkcyan', alpha=0.8)
ax1.plot(ma5, lw=1, color='orange', alpha=0.8)
ax1.plot(kama5, lw=1, color='lightskyblue', alpha=0.8)
l=['Hull Moving Average', 'savgol_filter', 'talib.MA10', 'KAMA10']
ax1.legend(l)
plt.title("Smoothing a MA10 line")
plt.show()
if __name__=='__main__':
smooth_demo()
| 33.285714 | 180 | 0.679542 | 222 | 1,398 | 4.148649 | 0.486486 | 0.039088 | 0.06949 | 0.058632 | 0.045603 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073024 | 0.167382 | 1,398 | 41 | 181 | 34.097561 | 0.718213 | 0.045064 | 0 | 0 | 0 | 0 | 0.12003 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.25 | 0 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8548e030cb94201bb3f56fd501049ba22c2f09df | 1,626 | py | Python | Src/Server/handlers.py | HamishHamiltonSmith/TKNET-Remote-file-transfer | 2fc394281463482c5e6039cead9dc052cc09eb94 | [
"Apache-2.0"
] | 1 | 2021-12-04T16:57:19.000Z | 2021-12-04T16:57:19.000Z | Src/Server/handlers.py | HamishHamiltonSmith/TKNET-Remote-file-transfer | 2fc394281463482c5e6039cead9dc052cc09eb94 | [
"Apache-2.0"
] | null | null | null | Src/Server/handlers.py | HamishHamiltonSmith/TKNET-Remote-file-transfer | 2fc394281463482c5e6039cead9dc052cc09eb94 | [
"Apache-2.0"
] | null | null | null | import time
import os
from datetime import datetime
import breakpoint
def log(msg):
curr_date = datetime.now()
l = open('/usr/share/Tknet/Server/tknet.log','a')
l.write(f'\n[{curr_date}]: {msg}')
l.close()
def file_transfer_handle(c,x,d_name,address):
c.send('FILEMODE'.encode())
c.send(f'DIRADD {d_name.split(".")[0]}'.encode())
time.sleep(0.5)
log(f'Reached breakpoint of directory transfer for {address}')
breakpoint.wait(c)
log(f'Breakpoint resolved for {address}')
log(f'Sending {x[1]} to {address}')
c.send(f'FILEADD {x[1]}'.encode())
time.sleep(1)
f = open(f'{x[1]}')
c.send(f'FILECONT {x[1]} {f.read()}'.encode())
time.sleep(1)
c.send('END'.encode())
def dir_transfer_handle(c,x,d_name,address):
log(f"{[x[1]]}-Found directory, sending all files...")
c.send('DIRMODE'.encode())
time.sleep(0.5)
c.send("The selected option contains multiple files, be warned...".encode())
files = os.listdir(f'{x[1]}')
time.sleep(0.5)
c.send(f"DIRADD {d_name}".encode())
log(f'Reached breakpoint of directory transfer for {address}')
breakpoint.wait(c)
log(f'Breakpoint resolved for {address}')
for item in files:
if os.path.isdir(f'{x[1]}/{item}'):
print(f'Dir found {item}')
else:
log(f'Sending {item} to {address}')
c.send(f"FILEADD {item}".encode())
time.sleep(1)
f = open(f'{x[1]}/{item}')
c.send(f'FILECONT {item} {f.read()}'.encode())
time.sleep(1)
#End directory transfer
c.send('END'.encode()) | 31.269231 | 80 | 0.590406 | 246 | 1,626 | 3.861789 | 0.292683 | 0.057895 | 0.037895 | 0.067368 | 0.492632 | 0.468421 | 0.313684 | 0.254737 | 0.254737 | 0.204211 | 0 | 0.01489 | 0.215252 | 1,626 | 52 | 81 | 31.269231 | 0.729624 | 0.01353 | 0 | 0.333333 | 0 | 0 | 0.365337 | 0.034289 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.088889 | 0 | 0.155556 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85495a86fbc5eda9a5807aaf00f10f29d51d67f3 | 5,297 | py | Python | SequenceModel/seq_model.py | BhaveshJP25/RSNA | 48d85faf82651b1ae4fdcd829ce2d4978a858d3f | [
"MIT"
] | null | null | null | SequenceModel/seq_model.py | BhaveshJP25/RSNA | 48d85faf82651b1ae4fdcd829ce2d4978a858d3f | [
"MIT"
] | null | null | null | SequenceModel/seq_model.py | BhaveshJP25/RSNA | 48d85faf82651b1ae4fdcd829ce2d4978a858d3f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class SequenceModel(nn.Module):
def __init__(self, model_num, feature_dim, feature_num,
lstm_layers, hidden, drop_out, Add_position):
super(SequenceModel, self).__init__()
self.feature_num=feature_num
# seq model 1
self.fea_conv = nn.Sequential(
nn.Dropout2d(drop_out),
nn.Conv2d(feature_dim, 512, kernel_size=(1, 1), stride=(1,1), padding=(0,0), bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Dropout2d(drop_out),
nn.Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout2d(drop_out),
)
self.fea_first_final = nn.Sequential(nn.Conv2d(128 * feature_num, 6, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True))
# # bidirectional GRU
self.hidden_fea = hidden
self.fea_lstm = nn.GRU(128 * feature_num, self.hidden_fea, num_layers=lstm_layers, batch_first=True, bidirectional=True)
self.fea_lstm_final = nn.Sequential(nn.Conv2d(1, 6, kernel_size=(1, self.hidden_fea*2), stride=(1, 1), padding=(0, 0), dilation=1, bias=True))
ratio = 4
if Add_position:
model_num += 2
else:
model_num += 1
# seq model 2
self.conv_first = nn.Sequential(nn.Conv2d(model_num, 128*ratio, kernel_size=(5, 1), stride=(1,1), padding=(2,0), dilation=1, bias=False),
nn.BatchNorm2d(128*ratio),
nn.ReLU(),
nn.Conv2d(128*ratio, 64*ratio, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=2, bias=False),
nn.BatchNorm2d(64*ratio),
nn.ReLU())
self.conv_res = nn.Sequential(nn.Conv2d(64 * ratio, 64 * ratio, kernel_size=(3, 1), stride=(1, 1), padding=(4, 0), dilation=4, bias=False),
nn.BatchNorm2d(64 * ratio),
nn.ReLU(),
nn.Conv2d(64 * ratio, 64 * ratio, kernel_size=(3, 1), stride=(1, 1), padding=(2, 0), dilation=2, bias=False),
nn.BatchNorm2d(64 * ratio),
nn.ReLU(),)
self.conv_final = nn.Sequential(nn.Conv2d(64*ratio, 1, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0), dilation=1,bias=False))
# bidirectional GRU
self.hidden = hidden
self.lstm = nn.GRU(64*ratio*6, self.hidden, num_layers=lstm_layers, batch_first=True, bidirectional=True)
self.final = nn.Sequential(nn.Conv2d(1, 6, kernel_size=(1, self.hidden*2), stride=(1, 1), padding=(0, 0), dilation=1, bias=True))
def forward(self, fea, x):
batch_size, _, _, _ = x.shape
fea = self.fea_conv(fea)
fea = fea.permute(0, 1, 3, 2).contiguous()
fea = fea.view(batch_size, 128 * self.feature_num, -1).contiguous()
fea = fea.view(batch_size, 128 * self.feature_num, -1, 1).contiguous()
fea_first_final = self.fea_first_final(fea)
#################################################
out0 = fea_first_final.permute(0, 3, 2, 1)
#################################################
# bidirectional GRU
fea = fea.view(batch_size, 128 * self.feature_num, -1).contiguous()
fea = fea.permute(0, 2, 1).contiguous()
fea, _ = self.fea_lstm(fea)
fea = fea.view(batch_size, 1, -1, self.hidden_fea * 2)
fea_lstm_final = self.fea_lstm_final(fea)
fea_lstm_final = fea_lstm_final.permute(0, 3, 2, 1)
#################################################
out0 += fea_lstm_final
#################################################
out0_sigmoid = torch.sigmoid(out0)
x = torch.cat([x, out0_sigmoid], dim = 1)
x = self.conv_first(x)
x = self.conv_res(x)
x_cnn = self.conv_final(x)
#################################################
out = x_cnn
#################################################
# bidirectional GRU
x = x.view(batch_size, 256, -1, 6)
x = x.permute(0,2,1,3).contiguous()
x = x.view(batch_size, x.size()[1], -1).contiguous()
x, _= self.lstm(x)
x = x.view(batch_size, 1, -1, self.hidden*2)
x = self.final(x)
x = x.permute(0,3,2,1)
#################################################
out += x
#################################################
#res
return out, out0
if __name__ == '__main__':
model = SequenceModel(model_num=15, feature_dim = 128, feature_num=16,
lstm_layers = 2, hidden=128, drop_out=0.5,
Add_position = True)
print(model) | 47.720721 | 150 | 0.473853 | 627 | 5,297 | 3.827751 | 0.122807 | 0.014167 | 0.033333 | 0.0625 | 0.51875 | 0.455 | 0.39125 | 0.370417 | 0.33375 | 0.33375 | 0 | 0.061334 | 0.332075 | 5,297 | 111 | 151 | 47.720721 | 0.617015 | 0.01869 | 0 | 0.15 | 0 | 0 | 0.001667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.05 | 0 | 0.1 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
854b394bb67bd9f05c7137d19f721026d26e8cfb | 1,107 | py | Python | eventrack/settings/prod.py | FedorSelitsky/eventrack | 54869ab0eaba56d266a3d9c0c56c601039905255 | [
"MIT"
] | 5 | 2017-12-14T09:04:42.000Z | 2018-06-01T20:09:02.000Z | eventrack/settings/prod.py | FedorSelitsky/eventrack | 54869ab0eaba56d266a3d9c0c56c601039905255 | [
"MIT"
] | 11 | 2017-11-08T10:35:08.000Z | 2018-10-11T19:37:36.000Z | eventrack/settings/prod.py | FedorSelitsky/eventrack | 54869ab0eaba56d266a3d9c0c56c601039905255 | [
"MIT"
] | null | null | null | import dj_database_url
from .base import * # NOQA
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# SECURITY WARNING: keep the secret key used in production secret!
if 'CFG_SECRET_KEY' in os.environ:
SECRET_KEY = os.environ['CFG_SECRET_KEY']
if 'CFG_ALLOWED_HOSTS' in os.environ:
ALLOWED_HOSTS = os.environ['CFG_ALLOWED_HOSTS'].split(',')
# Database
# https://docs.djangoproject.com/en/stable/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default='postgis://postgis:postgis@postgis/postgis',
),
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/stable/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.getenv('CFG_STATIC_ROOT', os.path.join(BASE_DIR, 'static'))
MEDIA_URL = '/media/'
MEDIA_ROOT = os.getenv('CFG_MEDIA_ROOT', os.path.join(BASE_DIR, 'media'))
# ManifestStaticFilesStorage
# https://docs.djangoproject.com/en/stable/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
| 27 | 94 | 0.750678 | 146 | 1,107 | 5.520548 | 0.410959 | 0.044665 | 0.081886 | 0.093052 | 0.182382 | 0.182382 | 0.08933 | 0 | 0 | 0 | 0 | 0 | 0.117435 | 1,107 | 40 | 95 | 27.675 | 0.824974 | 0.383921 | 0 | 0 | 0 | 0 | 0.338806 | 0.152239 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
854bf3a30cf643b88428f0e25a363a7a8a2a0940 | 6,708 | py | Python | pysparnn/cluster_selection.py | kchaliki/pysparnn | 426b9e660fdd8c32bb6af4a7f833fb34a3d070ef | [
"BSD-3-Clause"
] | null | null | null | pysparnn/cluster_selection.py | kchaliki/pysparnn | 426b9e660fdd8c32bb6af4a7f833fb34a3d070ef | [
"BSD-3-Clause"
] | null | null | null | pysparnn/cluster_selection.py | kchaliki/pysparnn | 426b9e660fdd8c32bb6af4a7f833fb34a3d070ef | [
"BSD-3-Clause"
] | null | null | null | import random as _random
import numpy as _np
import collections as _collections
from abc import ABC, abstractmethod
from sklearn.cluster import DBSCAN
def _k_best(tuple_list, k):
"""For a list of tuples [(distance, value), ...] - Get the k-best tuples by
distance.
Args:
tuple_list: List of tuples. (distance, value)
k: Number of tuples to return.
"""
tuple_lst = sorted(tuple_list, key=lambda x: x[0],
reverse=False)[:k]
return tuple_lst
class ClusterSelector(ABC):
@abstractmethod
def select_clusters(self, features):
pass
class DefaultClusterSelector(ClusterSelector):
"""
Default cluster selector, picks sqrt(num_records) random points (at most 1000)
and allocates points to their nearest category. This can often end up splitting
similar points into multiple paths of the tree
"""
def __init__(self, distance_type):
self._distance_type = distance_type
def select_clusters(self, features):
# number of points to cluster
num_records = features.shape[0]
matrix_size = max(int(_np.sqrt(num_records)), 1000)
# set num_clusters = min(max(sqrt(num_records), 1000), num_records))
clusters_size = min(matrix_size, num_records)
# make list [0, 1, ..., num_records-1]
records_index = list(_np.arange(features.shape[0]))
# randomly choose num_clusters records as the cluster roots
# this randomizes both selection and order of features in the selection
clusters_selection = _random.sample(records_index, clusters_size)
clusters_selection = features[clusters_selection]
# create structure to store clusters
item_to_clusters = _collections.defaultdict(list)
# create a distance_type object containing the cluster roots
# labeling them as 0 to N-1 in their current (random) order
root = self._distance_type(clusters_selection,
list(_np.arange(clusters_selection.shape[0])))
# remove duplicate cluster roots
root.remove_near_duplicates()
# initialize distance type object with the remaining cluster roots
root = self._distance_type(root.matrix,
list(_np.arange(root.matrix.shape[0])))
rng_step = matrix_size
# walk features in steps of matrix_size = max(sqrt(num_records), 1000)
for rng in range(0, features.shape[0], rng_step):
# don't exceed the array length on the last step
max_rng = min(rng + rng_step, features.shape[0])
records_rng = features[rng:max_rng]
# find the nearest cluster root for each feature in the step
for i, clstrs in enumerate(root.nearest_search(records_rng)):
_random.shuffle(clstrs)
for _, cluster in _k_best(clstrs, k=1):
# add each feature to its nearest cluster, here the cluster label
# is the label assigned to the root feature after it had been selected at random
item_to_clusters[cluster].append(i + rng)
# row index in clusters_selection maps to key in item_to_clusters
# but the values in item_to_clusters are row indices of the original features matrix
return clusters_selection, item_to_clusters
class DbscanClusterSelector(ClusterSelector):
"""
Dbscan based cluster selector, picks sqrt(num_records) random points (at most 1000)
and then forms groups inside the random selection, before allocating other features
to the groups
"""
def __init__(self, distance_type):
self._distance_type = distance_type
self._eps = 0.4
def select_clusters(self, features):
# number of points to cluster
num_records = features.shape[0]
matrix_size = max(int(_np.sqrt(num_records)), 1000)
# set num_clusters = min(max(sqrt(num_records), 1000), num_records))
clusters_size = min(matrix_size, num_records)
# make list [0, 1, ..., num_records-1]
records_index = list(_np.arange(features.shape[0]))
# randomly choose num_clusters records as the cluster roots
# this randomizes both selection and order of features in the selection
random_clusters_selection = _random.sample(records_index, clusters_size)
random_clusters_selection = features[random_clusters_selection]
# now cluster the cluster roots themselves to avoid
# randomly separating neighbours, this probably means fewer clusters per level
# TODO might want to propagate the distance type to the clustering
db_scan_clustering = DBSCAN(eps=self._eps, min_samples=2).fit(random_clusters_selection)
# get all the individual points from the cluster
unique_indices = _np.where(db_scan_clustering.labels_ == -1)[0]
# and the first item from each cluster
_, cluster_start_indices = _np.unique(db_scan_clustering.labels_, return_index=True)
# merge and uniquefy, the result is sorted
all_indices = _np.concatenate((unique_indices, cluster_start_indices))
all_indices_unique = _np.unique(all_indices)
# create a matrix where rows are the first item in each dbscan cluster
# set that as cluster selection and then allocate features to cluster
clusters_selection = random_clusters_selection[all_indices_unique]
# create structure to store clusters
item_to_clusters = _collections.defaultdict(list)
# create a distance_type object containing the cluster root
root = self._distance_type(clusters_selection,
list(_np.arange(clusters_selection.shape[0])))
rng_step = matrix_size
# walk features in steps of matrix_size = max(sqrt(num_records), 1000)
for rng in range(0, features.shape[0], rng_step):
max_rng = min(rng + rng_step, features.shape[0])
records_rng = features[rng:max_rng]
# find the nearest cluster root for each feature in the step
for i, clstrs in enumerate(root.nearest_search(records_rng)):
# this is slow, disable until proven useful
# _random.shuffle(clstrs)
for _, cluster in _k_best(clstrs, k=1):
# add each feature to its nearest cluster
item_to_clusters[cluster].append(i + rng)
# row index in clusters_selection maps to key in item_to_clusters
# but the values in item_to_clusters are row indices of the original features matrix
return clusters_selection, item_to_clusters
| 42.726115 | 100 | 0.672481 | 875 | 6,708 | 4.948571 | 0.232 | 0.066744 | 0.032333 | 0.024942 | 0.591917 | 0.573672 | 0.573672 | 0.573672 | 0.549192 | 0.549192 | 0 | 0.012288 | 0.259988 | 6,708 | 156 | 101 | 43 | 0.859992 | 0.405933 | 0 | 0.560606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00641 | 0 | 1 | 0.090909 | false | 0.015152 | 0.075758 | 0 | 0.257576 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
854cf8601e7b178c75e75d9ca11f1ae6300bc673 | 1,254 | py | Python | setup.py | wildthingz/pyAMD | 13f5691de9a7fc488859113bc934090857ac0f06 | [
"MIT"
] | null | null | null | setup.py | wildthingz/pyAMD | 13f5691de9a7fc488859113bc934090857ac0f06 | [
"MIT"
] | null | null | null | setup.py | wildthingz/pyAMD | 13f5691de9a7fc488859113bc934090857ac0f06 | [
"MIT"
] | null | null | null | from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md')) as f:
long_description = f.read()
setup(name = 'pyAMD',
version = '0.1.0',
description = 'A tool to find the optimal mesh density for visualising macrosegregation -- An extension to MakeContour',
long_description = long_description,
url = 'https://github.com/wildthingz/pyAMD',
author = 'Hatef Khadivinassab',
author_email = 'hatef.hadivinassab@gmail.com',
packages = ['pyAMD'],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Operating System :: Linux :: Linux Debian"
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
'Programming Language :: Python :: 2.7',
'Framework :: Spyder',
'Intended Audience :: End Users/Desktop',
'Natural Language :: English',
],
license = 'Creative Commons Attribution-Noncommercial-Share Alike license',
keywords = ['AMD', 'macrosegregation', 'mesh density', 'visaliziation', 'contour']
)
| 35.828571 | 121 | 0.674641 | 142 | 1,254 | 5.894366 | 0.661972 | 0.0908 | 0.119474 | 0.062127 | 0.064516 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01173 | 0.184211 | 1,254 | 34 | 122 | 36.882353 | 0.806452 | 0 | 0 | 0 | 0 | 0 | 0.590112 | 0.047049 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
854d5eaf6ade4462203d501132678e102772680e | 2,407 | py | Python | nablapps/blog/models.py | pettaroni/nablaweb | 5e610698a276884b9cd779a718dfdee641713636 | [
"MIT"
] | null | null | null | nablapps/blog/models.py | pettaroni/nablaweb | 5e610698a276884b9cd779a718dfdee641713636 | [
"MIT"
] | null | null | null | nablapps/blog/models.py | pettaroni/nablaweb | 5e610698a276884b9cd779a718dfdee641713636 | [
"MIT"
] | null | null | null | """
Models for blog app
"""
from datetime import date
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from nablapps.core.models import TimeStamped
class Blog(models.Model):
"""
Represents a blog which can have multiple blog entries/posts.
"""
name = models.CharField(
max_length=80,
verbose_name="Navn"
)
slug = models.SlugField(
unique=True,
blank=True,
null=True,
editable=True,
)
created = models.DateField(
auto_now_add=True,
verbose_name="Opprettet"
)
class Meta:
verbose_name = "Blogg"
verbose_name_plural = "Blogger"
db_table = "content_blog"
def save(self, *args, **kwargs): # pylint: disable=W0221
if not self.id:
self.created = date.today()
self.slug = slugify(self.name)
return super().save(*args, **kwargs)
def __str__(self):
return self.name
def get_absolute_url(self):
"""Return canonical url for the blog"""
return reverse('blog', kwargs={'blog': self.slug})
class BlogPost(TimeStamped, models.Model):
"""
A single blog post belonging to a specific blog
"""
blog = models.ForeignKey(
Blog,
related_name="posts",
verbose_name="Blogg",
on_delete=models.CASCADE
)
title = models.CharField(
max_length=80,
verbose_name="Tittel"
)
slug = models.SlugField(
unique=True,
blank=True,
editable=True,
)
content = models.TextField(
verbose_name="Innhold",
help_text="Her kan du skrive i Markdown"
)
list_image = models.ImageField(
upload_to="blogpics",
verbose_name="Listebilde",
help_text="Bilde som vises i listevisningen av bloggene",
blank=True,
null=True
)
class Meta:
verbose_name = "Post"
verbose_name_plural = "Poster"
db_table = "content_blogpost"
def save(self, *args, **kwargs): # pylint: disable=W0221
self.slug = slugify(self.title)
return super().save(*args, **kwargs)
def __str__(self):
return self.title
def get_absolute_url(self):
"""Return canonical url for the blog post"""
return reverse('blog_post', kwargs={'blog': self.blog.slug, 'slug': self.slug})
| 23.598039 | 87 | 0.605318 | 281 | 2,407 | 5.046263 | 0.3879 | 0.077574 | 0.025388 | 0.03385 | 0.293371 | 0.293371 | 0.293371 | 0.187588 | 0.132581 | 0.132581 | 0 | 0.006981 | 0.285833 | 2,407 | 101 | 88 | 23.831683 | 0.817917 | 0.102617 | 0 | 0.295775 | 0 | 0 | 0.095216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084507 | false | 0 | 0.070423 | 0.028169 | 0.408451 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
854e2f2fb882ebf0df9421c2942719fbe330679b | 1,654 | py | Python | Bhaskara.py | RafaelaBF/Bhaskara | 027b5b91cbc632fd82a0c6f7904d3cef026bdc85 | [
"MIT"
] | null | null | null | Bhaskara.py | RafaelaBF/Bhaskara | 027b5b91cbc632fd82a0c6f7904d3cef026bdc85 | [
"MIT"
] | null | null | null | Bhaskara.py | RafaelaBF/Bhaskara | 027b5b91cbc632fd82a0c6f7904d3cef026bdc85 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Rafaela BF
Faça um programa que resolva Bhaskara por meio de uma equação completa do segundo grau.
"""
eq = input("Entre com a equação: ")
aux =[""]*3
i = eq.find("²", 0)
aux[0] = eq[0:(i+1)]
j = eq.find("x", i)
aux[1] = eq[(i+1):(j+1)]
aux[2] = eq[(j+1):len(eq)]
i = 0
j = 0
#A
if len(aux[0]) < 3:
aux[0] = 1
elif aux[0].find('-', 0) != -1:
if len(aux[0]) < 4:
aux[0] = -1
else:
i = aux[0].find("x²", 0)
aux[0] = int(aux[0][0:i])
else:
i = aux[0].find("x²", 0)
aux[0] = int(aux[0][0:i])
#B
if len(aux[1]) < 2:
aux[1] = 1
elif aux[1].find('-', 0) != -1:
if len(aux[1]) < 3:
aux[1] = -1
else:
i = aux[1].find("x", 0)
aux[1] = int(aux[1][0:i])
else:
i = aux[1].find("x", 0)
aux[1] = int(aux[1][0:i])
#C
aux[2] = int(aux[2])
#Equação
print()
print(f"A equação: {eq}")
print(f"Onde A = {aux[0]} B = {aux[1]} C = {aux[2]}")
#Raízes
print()
print("Tem raízes: ")
x1 = (-aux[1] + (aux[1]**2 - 4*aux[0]*aux[2])**(1/2))/(2*aux[0])
print(f"X1 = {(x1):.2f}")
x2 = (-aux[1] - (aux[1]**2 - 4*aux[0]*aux[2])**(1/2))/(2*aux[0])
print(f"X2 = {(x2):.2f}")
#Vértices
print()
print("Vértices: ")
print(f"Xv = {((-aux[1])/(2*aux[0])):.2f}")
print(f"Yv = {((-(aux[1]**2 - 4*aux[0]*aux[2]))/(4*aux[0])):.2f}")
#Forma Fatorada
print()
print("sua Forma Fatorada é: ")
print(f"{aux[0]} * (X - ({(x1):.2f})) * (X - ({(x2):.2f})) = 0")
#Concavidade da parábola
print()
print("Concavidade da parábola é:", end=" ")
if aux[0] > 0:
print("voltada para cima")
else:
print("voltada para baixo")
| 17.784946 | 87 | 0.479444 | 307 | 1,654 | 2.583062 | 0.214984 | 0.110971 | 0.031526 | 0.030265 | 0.274905 | 0.274905 | 0.239596 | 0.239596 | 0.221942 | 0.221942 | 0 | 0.087059 | 0.229141 | 1,654 | 93 | 88 | 17.784946 | 0.534902 | 0.116082 | 0 | 0.333333 | 0 | 0.055556 | 0.254319 | 0.042847 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
854e8ce73be0938ffa4976e6cdc514a985349435 | 2,107 | py | Python | 01_demo/weight_decay_test.py | wwww666/Tensorflow2.0 | 4df3a3784482bb8db7943ffb402b5822d5111ab9 | [
"Apache-2.0"
] | 2 | 2020-04-24T10:20:18.000Z | 2021-02-25T03:53:07.000Z | 01_demo/weight_decay_test.py | wwww666/Tensorflow2.0 | 4df3a3784482bb8db7943ffb402b5822d5111ab9 | [
"Apache-2.0"
] | null | null | null | 01_demo/weight_decay_test.py | wwww666/Tensorflow2.0 | 4df3a3784482bb8db7943ffb402b5822d5111ab9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/4/20 11:46
# @Author : wwwzk
# @FileName: weight_decay_test.py
'''
L2范数正则化权重衰减
'''
import tensorflow as tf
from tensorflow.keras import layers,optimizers,models,initializers
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras as ks
from liner_test import linreg,squared_loss,sgd
from fit_test import semilogy
# 定义初始化数据集,权重,偏重
n_train,n_test,num_input=20,100,200
true_w,true_b=tf.ones((num_input,1))*0.01,0.05
features = tf.random.normal(shape=(n_train+n_test,num_input))
labels=ks.backend.dot(features,true_w)+true_b
labels+=tf.random.normal(mean=0.01,shape=labels.shape)
train_features,test_features=features[:n_train,:],features[n_train:,:]
train_labels,test_labels=labels[:n_train],labels[n_train:]
# 定义随机初始化模型参数
def init_params():
w=tf.Variable(tf.random.normal(mean=1,shape=(num_input,1)))
b=tf.Variable(tf.zeros(shape=(1,)))
return [w,b]
# 定义L2范数
def l2_penalty(w):
return tf.reduce_sum(w**2)/2
# 定义超参数
batch_size,num_epochs,lr=1,100,0.003
#定义网络结构
net,loss=linreg,squared_loss
optimizer=ks.optimizers.SGD()
train_iter = tf.data.Dataset.from_tensor_slices((train_features,train_labels)).batch(batch_size).shuffle(batch_size)
# 训练模型
def fit_and_plot(lambd):
w,b=init_params()
train_ls,test_ls=[],[]
for _ in range(num_epochs):
for X,y in train_iter:
with tf.GradientTape() as tape:
l=loss(net(X,w,b),y)+lambd*l2_penalty(w)
grads=tape.gradient(l,[w,b])
sgd([w,b],lr,batch_size,grads)
train_ls.append(tf.reduce_mean(loss(net(train_features,w,b),
train_labels)).numpy())
test_ls.append(tf.reduce_mean(loss(net(test_features,w,b),
test_labels)).numpy())
semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('L2 norm of w:', tf.norm(w).numpy())
fit_and_plot(lambd=0)
fit_and_plot(lambd=3)
| 32.921875 | 117 | 0.656858 | 329 | 2,107 | 4.012158 | 0.346505 | 0.010606 | 0.031818 | 0.034091 | 0.093939 | 0.069697 | 0.040909 | 0 | 0 | 0 | 0 | 0.031915 | 0.196963 | 2,107 | 63 | 118 | 33.444444 | 0.748227 | 0.076412 | 0 | 0 | 0 | 0 | 0.017167 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.166667 | 0.02381 | 0.285714 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8550fe05eaac9342db606a4bddf7afe6170d5e33 | 3,835 | py | Python | bittensor/dataloaders/dataloader.py | parall4x/bittensor | abacb0b0f1b078d3103f516aff1328f049f9dc34 | [
"MIT"
] | null | null | null | bittensor/dataloaders/dataloader.py | parall4x/bittensor | abacb0b0f1b078d3103f516aff1328f049f9dc34 | [
"MIT"
] | null | null | null | bittensor/dataloaders/dataloader.py | parall4x/bittensor | abacb0b0f1b078d3103f516aff1328f049f9dc34 | [
"MIT"
] | null | null | null | import argparse
import bittensor
import requests
import random
from munch import Munch
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class BittensorDataLoader():
def __init__(self):
# IPFS hash of the genesis dataset
# TODO (shibshib): Find a proper way to set this as config instead of hardcoding it.
# More dataset hashes can be added as we add directories for other modalities.
self.genesis_text_dataset_hash = "QmXwfPoh2QFYqC6cYcW8kzyd9ruFfhnUi2kVBkdhawjUzj"
# Used to retrieve directory contentx
self.dag_get = 'https://ipfs.infura.io:5001/api/v0/dag/get'
# Used to retrieve file contents
self.file_cat = 'https://ipfs.infura.io:5001/api/v0/cat'
# Used when current corpus has been exhausted
self.refresh_corpus = False
@staticmethod
def default_config() -> Munch:
parser = argparse.ArgumentParser();
BittensorDataLoader.add_args(parser)
config = bittensor.config.Config.to_config(parser);
return config
@staticmethod
def add_args(parser: argparse.ArgumentParser):
""" Add model params
"""
parser.add_argument('--dataloader.max_corpus_size', default=1e+6, type=int,
help='Maximum amount of data to download from IPFS into memory for training.')
parser.add_argument('--dataloader.num_workers', default=0, type=int,
help='Number of workers for data loader.')
@staticmethod
def check_config(config: Munch):
pass
@staticmethod
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
""" Creates a retriable session for request calls. This enables
automatic retries and back-off retries should any request calls fail.
Args:
retries (int, optional): Maximum number of retries. Defaults to 3.
backoff_factor (float, optional): Factor by which to back off if a retry fails. Defaults to 0.3.
status_forcelist (tuple, optional): A set of integer HTTP status codes that we should force a retry on. Defaults to (500, 502, 504).
session ([type], optional): Session for which to set up the retries. Defaults to None.
Returns:
requests.Session(): A Requests Session object set up for retries and backoff.
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def retrieve_directory(self, dir_hash: str):
"""Connects to Infura IPFS gateway and retrieves the directory of
genesis datasets.
Returns:
dict: A dictionary of the files inside of the genesis_datasets and their hashes.
"""
session = requests.Session()
params = (('arg', dir_hash),)
session.params.update(params)
directory = None
response = BittensorDataLoader.requests_retry_session(session=session).post(self.dag_get)
if response.status_code == 200:
directory = response.json()
return directory
def __len__(self):
""" Returns length of the dataset that the dataloader is processing
"""
pass
def __getitem__(self, idx):
"""returns the next batch from the dataset.
"""
pass | 35.183486 | 144 | 0.629726 | 441 | 3,835 | 5.369615 | 0.401361 | 0.008446 | 0.010135 | 0.014358 | 0.021959 | 0.021959 | 0.021959 | 0 | 0 | 0 | 0 | 0.016937 | 0.291786 | 3,835 | 109 | 145 | 35.183486 | 0.854934 | 0.329074 | 0 | 0.116667 | 0 | 0 | 0.123254 | 0.040263 | 0 | 0 | 0 | 0.009174 | 0 | 1 | 0.133333 | false | 0.05 | 0.116667 | 0 | 0.316667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85535f7564894f7aff285a016fb2d947fb9c1a70 | 3,300 | py | Python | pyinspect/__init__.py | dhruvmanila/pyinspect | ce90df243e5e5ee100f13de4329c111454b8c891 | [
"MIT"
] | 87 | 2020-09-30T10:18:26.000Z | 2022-03-10T08:56:04.000Z | pyinspect/__init__.py | dhruvmanila/pyinspect | ce90df243e5e5ee100f13de4329c111454b8c891 | [
"MIT"
] | 16 | 2020-09-30T10:57:17.000Z | 2022-01-16T02:10:45.000Z | pyinspect/__init__.py | dhruvmanila/pyinspect | ce90df243e5e5ee100f13de4329c111454b8c891 | [
"MIT"
] | 5 | 2020-11-20T07:39:26.000Z | 2022-01-13T04:54:51.000Z | # nopycln: file
from pyinspect.exceptions import install_traceback
from pyinspect.show import showme, what
from pyinspect.find import search
from pyinspect.answers import get_answers, ask
from pyinspect.panels import ok, warn, error, message, Report, NestedPanel
from pyinspect._rich import console
from pyinspect.classes import Enhanced
from pyinspect.builtins import List, Tuple, Dict, pilist, pidict
from pyinspect._colors import (
salmon,
lightsalmon,
orange,
mocassin,
lightblue,
lightorange,
gray,
)
from rich import pretty
pretty.install(
overflow="ellipse",
max_length=33,
)
try:
from github import Github
except Exception:
Github = None
__author__ = "Federico Claudi"
__license__ = "MIT"
__maintainer__ = "Federico Claudi"
__email__ = "federicoclaudi@protonmail.com"
__status__ = "dev"
__website__ = "https://github.com/FedeClaudi/pyinspect"
__version__ = "0.1.1rc"
def whats_pi():
"""
Prints a Report with an overview of `pyinspect`.
"""
# ? Intro
rep = Report(f"Pynspect", dim=orange, accent=orange)
rep._type = "Pyinspect info"
rep.width = 100
rep.add(
f"[b {lightorange}]The python package for lazy programmers",
justify="center",
)
# Features summary
rep.add(
f"""
[{salmon}]Don't remember a function's name?[/{salmon}] Use `pyinspect` to look for it.
[{salmon}]Don't remember what a function does?[/{salmon}] Use `pyinspect` to print its source code directly to your terminal.
[{salmon}]Can't figure out why you keep getting an error?[/{salmon}] Use `pyinspect`'s fancy tracebacks to figure it out
[{salmon}]Still can't figure it out, but too lazy to google it?[/{salmon}] Use `pyinspect` to print Stack Overflow's top answer for your error message directly to your terminal!
"""
)
# Package / Repo info as a nested panel
info = NestedPanel(color=mocassin, dim=mocassin)
_info = dict(
Author=__author__,
License=__license__,
Version=__version__,
Website=__website__,
)
if Github is not None:
n_stars = Github().get_repo("FedeClaudi/pyinspect").stargazers_count
_info["Github stars"] = n_stars
else:
warn(
"Could not fetch repo info",
"Perhaps `PyGithub` is not installed?s",
)
for k, v in _info.items():
info.add(f"[b {gray}]{k}[/b {gray}]: [{orange}]{v}", justify="right")
rep.add(info, "rich")
# Features examples
rep.add("""## Features""", "markdown", style=lightsalmon)
features = {
"Look up local variables": "pinspect.what()",
"Search functions by name": "pinspect.search(package, function_name)",
"Print source code to console": "pinspect.showme(function)",
"Enhanced tracebacks": "pinspect.install_traceback()",
"Render [i]Stack Overflow[/i] answers in the terminal": 'pinspect.ask("How to python?")',
}
for txt, code in features.items():
rep.spacer()
rep.add(f"[{gray}]" + txt, justify="center")
rep.add(" " + code, "code")
rep.spacer()
rep.add(f"[{lightorange}]... and a bunch of others!")
rep.spacer(2)
rep.add(f"[{lightsalmon}]Get in touch at:[/{lightsalmon}] {__website__}")
console.print(rep)
| 28.695652 | 177 | 0.655758 | 416 | 3,300 | 5.048077 | 0.430288 | 0.055714 | 0.016667 | 0.028571 | 0.039048 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00349 | 0.218485 | 3,300 | 114 | 178 | 28.947368 | 0.810779 | 0.043636 | 0 | 0.047619 | 0 | 0.047619 | 0.410845 | 0.033812 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011905 | false | 0 | 0.130952 | 0 | 0.142857 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8555997972d2b27f982f14b4c869a0e8df897de6 | 2,714 | py | Python | mycnn/data/voc_segment.py | jacky10001/tf2-mycnn | 6a631ee71b2a91fc4e6e7a43f8f9179260a1d7fa | [
"MIT"
] | null | null | null | mycnn/data/voc_segment.py | jacky10001/tf2-mycnn | 6a631ee71b2a91fc4e6e7a43f8f9179260a1d7fa | [
"MIT"
] | 20 | 2022-01-24T15:28:48.000Z | 2022-02-13T14:56:25.000Z | mycnn/data/voc_segment.py | jacky10001/tf2-mycnn | 6a631ee71b2a91fc4e6e7a43f8f9179260a1d7fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import numpy as np
from skimage import io
import matplotlib.pyplot as plt
from PIL import Image
def make_voc_segment_dataset(voc_directory: str, save_directory: str):
flag = False
## Set some directory
JPEGImages_dir = os.path.join(voc_directory, "JPEGImages")
SegmentationClass_dir = os.path.join(voc_directory, "SegmentationClass")
ImageSets_dir = os.path.join(voc_directory, "ImageSets", "Segmentation")
trainval_path = os.path.join(ImageSets_dir, "trainval.txt")
main_folder = os.path.join(save_directory, "VOCSegmentation")
train_folder = os.path.join(main_folder, "train")
train_images_folder = os.path.join(train_folder, "images")
train_masks_folder = os.path.join(train_folder, "masks")
train_visualization_folder = os.path.join(train_folder, "visualization")
## Check dataset
check_list = [train_images_folder, train_masks_folder, train_visualization_folder]
for check_path in check_list:
if os.path.exists(check_path):
if not os.listdir(check_path) or len(os.listdir(check_path)) != 2913:
raise ValueError(f"Detect incomplete data in {check_path}. "
"Please delete all data and unzip again.")
flag = False
else:
flag = True
print("Make some folders.")
if not os.path.exists(main_folder):
os.makedirs(main_folder, exist_ok=True)
if not os.path.exists(train_images_folder):
os.makedirs(train_images_folder, exist_ok=True)
if not os.path.exists(train_masks_folder):
os.makedirs(train_masks_folder, exist_ok=True)
if not os.path.exists(train_visualization_folder):
os.makedirs(train_visualization_folder, exist_ok=True)
print("Get data list.")
with open(trainval_path) as f:
t = f.read().split('\n')[:-1]
if flag:
print("Start to make dataset.")
for name in t:
## get file path
im_path = os.path.join(JPEGImages_dir, name+".jpg")
gt_path = os.path.join(SegmentationClass_dir, name+".png")
## read data
im = io.imread(im_path)
vs = Image.open(gt_path)
gt = Image.open(gt_path)
gt = np.array(gt)
gt[gt == 255] = 0
io.imsave(os.path.join(train_images_folder, os.path.basename(im_path)), im, check_contrast=False)
io.imsave(os.path.join(train_masks_folder, os.path.basename(gt_path)), gt, check_contrast=False)
vs.save(os.path.join(train_visualization_folder, os.path.basename(gt_path)))
print("Finished making dataset.")
else:
print("Already made dataset.") | 39.333333 | 109 | 0.657701 | 369 | 2,714 | 4.631436 | 0.276423 | 0.077238 | 0.081919 | 0.052662 | 0.293739 | 0.217086 | 0.068461 | 0.068461 | 0.068461 | 0.068461 | 0 | 0.004787 | 0.230287 | 2,714 | 69 | 110 | 39.333333 | 0.813308 | 0.02874 | 0 | 0.075472 | 0 | 0 | 0.111153 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.09434 | 0 | 0.113208 | 0.09434 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
855b4e01e5b934e421e4c9ebea7a438fae9617b9 | 1,187 | py | Python | python/eks-simple-fargate/eks_simple_fargate/eks_simple_fargate_stack.py | kloia/aws-cdk-samples | 69cb2bb45aab23e08d19d5ace24915893fe92360 | [
"MIT"
] | null | null | null | python/eks-simple-fargate/eks_simple_fargate/eks_simple_fargate_stack.py | kloia/aws-cdk-samples | 69cb2bb45aab23e08d19d5ace24915893fe92360 | [
"MIT"
] | null | null | null | python/eks-simple-fargate/eks_simple_fargate/eks_simple_fargate_stack.py | kloia/aws-cdk-samples | 69cb2bb45aab23e08d19d5ace24915893fe92360 | [
"MIT"
] | null | null | null | from aws_cdk import core, aws_eks
from .eks_base import EKSBase
from .alb_ingress import ALBIngressController
class EksSimpleFargateStack(core.Stack):
def __init__(self, scope: core.Construct, construct_id: str,
eks_version=aws_eks.KubernetesVersion.V1_19, cluster_name=None,
capacity_details='small', fargate_enabled=False, bottlerocket_asg=False,**kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
self.eks_version = eks_version
self.cluster_name = cluster_name
self.capacity_details = capacity_details
self.fargate_enabled = fargate_enabled
self.bottlerocket_asg = bottlerocket_asg
config_dict = {
'eks_version': self.eks_version,
'cluster_name': self.cluster_name,
'capacity_details': self.capacity_details,
'fargate_enabled': self.fargate_enabled,
'bottlerocket_asg': self.bottlerocket_asg
}
base_cluster = EKSBase(self, "Base", cluster_configuration=config_dict)
alb_ingress = ALBIngressController(self, "ALBIngress", cluster=base_cluster.cluster)
# The code that defines your stack goes here
| 39.566667 | 94 | 0.705139 | 135 | 1,187 | 5.851852 | 0.362963 | 0.063291 | 0.035443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003209 | 0.2123 | 1,187 | 29 | 95 | 40.931034 | 0.841711 | 0.035383 | 0 | 0 | 0 | 0 | 0.077865 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
855ca1ff8948c123a855f128a9073dceff93dd69 | 8,196 | py | Python | g2client/g2disp.py | naojsoft/g2client | e49d0f3cacccdb569897f17d37c8c2e8f01c66cb | [
"BSD-3-Clause"
] | null | null | null | g2client/g2disp.py | naojsoft/g2client | e49d0f3cacccdb569897f17d37c8c2e8f01c66cb | [
"BSD-3-Clause"
] | null | null | null | g2client/g2disp.py | naojsoft/g2client | e49d0f3cacccdb569897f17d37c8c2e8f01c66cb | [
"BSD-3-Clause"
] | null | null | null | #
# Gen2 observation workstation client -- command line version
#
"""
Gen2 observation workstation client -- command line version
"""
import sys, time, os
import threading
import binascii
from g2base import ssdlog, myproc
from g2base.remoteObjects import remoteObjects as ro
from g2base.remoteObjects import Monitor
from g2client import soundsink
# Default ports
default_svc_port = 19051
default_mon_port = 19052
# TODO: put this in a utilities module
def error(msg, exitcode=0):
"""Called for an error. Print _msg_ to stderr and exit program
with code _exitcode_ if _exitcode_ is set to non-zero.
"""
sys.stderr.write(msg + '\n')
if exitcode != 0:
sys.exit(exitcode)
class g2Disp(object):
def __init__(self, **kwdargs):
self.__dict__.update(kwdargs)
self.lock = threading.RLock()
self.procs = {}
# Needed for starting our own tasks
self.tag = 'g2disp'
self.shares = ['logger', 'threadPool']
def ro_echo(self, arg):
return arg
def start_server(self, rohosts, options):
# Initialize remoteObjects subsystem
try:
ro.init(rohosts)
except ro.remoteObjectError as e:
self.logger.error("Error initializing remote objects subsystem: %s" % \
str(e))
return
# channels we are interested in
channels = ['sound']
self.ev_quit = threading.Event()
self.server_exited = threading.Event()
# Create a local pub sub instance
# mymon = PubSub.PubSub('%s.mon' % self.basename, self.logger,
# numthreads=30)
monname = '%s.mon' % self.basename
mymon = Monitor.Monitor(monname, self.logger,
numthreads=options.numthreads,
ev_quit=self.ev_quit)
self.monitor = mymon
self.soundsink = soundsink.SoundSink(monitor=mymon,
logger=self.logger,
ev_quit=self.ev_quit)
self.soundsource = soundsink.SoundSource(monitor=mymon,
logger=self.logger,
channels=['sound'])
# Subscribe our callback functions to the local monitor
mymon.subscribe_cb(self.soundsink.anon_arr, channels)
self.mon_server_started = False
self.ro_server_started = False
# Startup monitor threadpool
mymon.start(wait=True)
mymon.start_server(wait=True, port=options.monport)
self.mon_server_started = True
self.threadPool = self.monitor.get_threadPool()
# subscribe our monitor to the central monitor hub
mymon.subscribe_remote(options.monitor, channels, ())
# publish to central monitor hub
#mymon.subscribe(options.monitor, channels, ())
mymon.publish_to(options.monitor, ['sound'], {})
self.svc = ro.remoteObjectServer(svcname=self.basename,
obj=self, logger=self.logger,
port=options.port,
ev_quit=self.ev_quit,
threadPool=self.threadPool,
#auth=None,
usethread=True)
self.svc.ro_start(wait=True)
self.ro_server_started = True
def stop_server(self):
self.logger.info("%s exiting..." % self.basename)
if self.mon_server_started:
self.logger.info("stopping monitor server...")
self.monitor.stop_server(wait=True)
if self.ro_server_started:
self.logger.info("stopping remote object server...")
self.svc.ro_stop(wait=True)
self.logger.info("stopping monitor client...")
self.monitor.stop(wait=True)
def viewerOn(self, localdisp, localgeom, remotedisp, passwd, viewonly):
self.muteOff()
passwd = binascii.a2b_base64(passwd)
passwd_file = '/tmp/v__%d' % os.getpid()
with open(passwd_file, 'wb') as out_f:
out_f.write(passwd)
# VNC window
cmdstr = "vncviewer -display %s -geometry=%s %s -passwd %s RemoteResize=0" % (
localdisp, localgeom, remotedisp, passwd_file)
if viewonly:
cmdstr += " -viewonly"
self.logger.info("viewer ON (-display %s -geometry=%s %s)" % (
localdisp, localgeom, remotedisp))
key = localdisp + localgeom
try:
self.procs[key].killpg()
except Exception as e:
pass
try:
self.procs[key] = myproc.myproc(cmdstr, usepg=True)
except Exception as e:
self.logger.error("viewer on error: %s" % (str(e)))
#os.remove(passwd_file)
return 0
def viewerOff(self, localdisp, localgeom):
self.muteOn()
self.logger.info("viewer OFF (%s)" % (localdisp))
try:
key = localdisp + localgeom
self.procs[key].killpg()
del self.procs[key]
except Exception as e:
self.logger.error("viewer off error: %s" % (str(e)))
return 0
def allViewersOff(self):
self.logger.info("All viewers OFF")
for key in list(self.procs.keys()):
try:
self.procs[key].killpg()
del self.procs[key]
except Exception as e:
self.logger.warn("viewer off error: %s" % (str(e)))
return 0
def muteOn(self):
self.soundsink.muteOn()
return 0
def muteOff(self):
self.soundsink.muteOff()
return 0
class CmdLineUI(object):
def __init__(self, options):
self.options = options
self.ev_quit = threading.Event()
def ui(self, obj):
obj.start_server(self.options.rohosts.split(','),
self.options)
try:
try:
while True:
print("Type ^C to exit the server")
sys.stdin.readline()
except KeyboardInterrupt:
print("Keyboard interrupt!")
finally:
obj.allViewersOff()
obj.stop_server()
def add_options(argprs):
argprs.add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("-c", "--channels", dest="channels", default='sound',
metavar="LIST",
help="Subscribe to the comma-separated LIST of channels")
argprs.add_argument("-m", "--monitor", dest="monitor", default='monitor',
metavar="NAME",
help="Subscribe to feeds from monitor service NAME")
argprs.add_argument("--monport", dest="monport", type=int,
default=default_mon_port, metavar="PORT",
help="Use PORT for our monitor")
argprs.add_argument("--numthreads", dest="numthreads", type=int,
default=50, metavar="NUM",
help="Use NUM threads in thread pool")
argprs.add_argument("--port", dest="port", type=int,
default=default_svc_port, metavar="PORT",
help="Use PORT for our monitor")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
argprs.add_argument("--rohosts", dest="rohosts", default='localhost',
metavar="HOSTLIST",
help="Hosts to use for remote objects connection")
ssdlog.addlogopts(argprs)
def main(options, args, ui):
myhost = ro.get_myhost(short=False)
basename = 'g2disp-%s' % (myhost.replace('.', '_'))
logger = ssdlog.make_logger(basename, options)
# Make our callback object
mobj = g2Disp(logger=logger, basename=basename)
ui.ui(mobj)
| 34.15 | 86 | 0.556857 | 875 | 8,196 | 5.12 | 0.276571 | 0.037946 | 0.030357 | 0.011607 | 0.198214 | 0.121875 | 0.097321 | 0.075 | 0.065179 | 0.052232 | 0 | 0.006447 | 0.337604 | 8,196 | 239 | 87 | 34.292887 | 0.818751 | 0.09651 | 0 | 0.186747 | 0 | 0 | 0.126018 | 0 | 0 | 0 | 0 | 0.004184 | 0 | 1 | 0.084337 | false | 0.048193 | 0.042169 | 0.006024 | 0.180723 | 0.012048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
855e126f49dc147682a8ddcf7d8b3444b2c3acd5 | 8,575 | py | Python | src/pyte_prism/__init__.py | fkshom/pyte-prism | 00e464b5f23f205d2913bc776f97cc248006c559 | [
"MIT"
] | null | null | null | src/pyte_prism/__init__.py | fkshom/pyte-prism | 00e464b5f23f205d2913bc776f97cc248006c559 | [
"MIT"
] | 1 | 2020-12-07T16:18:16.000Z | 2020-12-07T16:18:16.000Z | src/pyte_prism/__init__.py | fkshom/pyte-prism | 00e464b5f23f205d2913bc776f97cc248006c559 | [
"MIT"
] | 1 | 2021-06-21T05:00:15.000Z | 2021-06-21T05:00:15.000Z | import time
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from uritemplate import expand as uriexpand
from logging import getLogger
__version__ = '0.0.4'
logger = getLogger(__name__)
def logged(func):
def wrapper(*args, **kwargs):
try:
qualfuncname = f"{func.__qualname__}"
logger.info(f"started {qualfuncname}, params: {args} and {kwargs}")
return func(*args, **kwargs)
except Exception as e:
logger.exception(e)
return wrapper
class Element(object):
def __init__(self, by, selector):
self.by = by
self.selector = selector
def __get__(self, obj, klass):
if hasattr(obj, 'base_element') and obj.base_element is not None:
return obj.base_element.find_element(self.by, self.selector)
else:
return obj.driver.find_element(self.by, self.selector)
class Elements(object):
def __init__(self, by, selector):
self.by = by
self.selector = selector
def __get__(self, obj, klass):
if hasattr(obj, 'base_element') and obj.base_element is not None:
return obj.base_element.find_elements(self.by, self.selector)
else:
return obj.driver.find_elements(self.by, self.selector)
class SupportMethodGenerator(object):
def __init__(self, timeout=10):
self.timeout = timeout
def wait_until_element_visible(self, by, selector):
this = self
def inner(self, timeout=this.timeout):
wait = WebDriverWait(self.driver, timeout)
wait.until(
EC.visibility_of_element_located((by, selector))
)
return self.driver.find_element(by, selector)
return inner
def wait_until_element_invisible(self, by, selector):
this = self
def inner(self, timeout=this.timeout):
wait = WebDriverWait(self.driver, timeout)
wait.until(
EC.invisibility_of_element_located((by, selector))
)
return None
return inner
def wait_until_element_to_be_clickable(self, by, selector):
this = self
def inner(self, timeout=this.timeout):
wait = WebDriverWait(self.driver, timeout)
wait.until(
EC.element_to_be_clickable((by, selector))
)
return self.driver.find_element(by, selector)
return inner
def has_element(self, by, selector):
this = self
def inner(self):
try:
self.driver.find_element(by, selector)
return True
except NoSuchElementException:
return False
return inner
def has_no_element(self, by, selector):
this = self
def inner(self):
try:
self.driver.find_element(by, selector)
return False
except NoSuchElementException:
return True
return inner
def element_element(self, by, selector):
this = self
def inner(self):
return self.driver.find_element(by, selector)
return inner
def element_elements(self, by, selector):
this = self
def inner(self):
return self.driver.find_elements(by, selector)
return inner
class Section(object):
def __init__(self, klass, base_by, base_selector):
self.klass = klass
self.base_by = base_by
self.base_selector = base_selector
def __get__(self, obj, klass):
base_element = obj.driver.find_element(self.base_by, self.base_selector)
return self.klass(obj.driver, base_element=base_element)
class Sections(object):
def __init__(self, klass, base_by, base_selector):
self.klass = klass
self.base_by = base_by
self.base_selector = base_selector
def __get__(self, obj, klass):
base_elements = obj.driver.find_elements(self.base_by, self.base_selector)
return [self.klass(obj.driver, base_element=base_element) for base_element in base_elements]
class Iframe(object):
def __init__(self, klass, base_by, base_selector):
self.klass = klass
self.base_by = base_by
self.base_selector = base_selector
def __get__(self, obj, klass):
iframe_element = obj.driver.find_element(self.base_by, self.base_selector)
return self.klass(obj.driver, iframe_element=iframe_element)
class PageMetaclass(type):
def __new__(cls, name, bases, dict_):
for k, v in list(dict_.items()):
if isinstance(v, Element) or isinstance(v, Elements):
smg = SupportMethodGenerator()
dict_[f"wait_until_{k}_visible"] = smg.wait_until_element_visible(v.by, v.selector)
dict_[f"wait_until_{k}_invisible"] = smg.wait_until_element_invisible(v.by, v.selector)
dict_[f"wait_until_{k}_to_be_clickable"] = smg.wait_until_element_to_be_clickable(v.by, v.selector)
# Elementsのときもfind_elementが使われるため、「少なくとも1つのelementがあるかどうか」が検査される
dict_[f"has_{k}"] = smg.has_element(v.by, v.selector)
dict_[f"has_no_{k}"] = smg.has_no_element(v.by, v.selector)
if isinstance(v, Element):
dict_[f"{k}_element"] = smg.element_element(v.by, v.selector)
elif isinstance(v, Elements):
dict_[f"{k}_elements"] = smg.element_elements(v.by, v.selector)
if isinstance(v, Section) or isinstance(v, Sections) or isinstance(v, Iframe):
smg = SupportMethodGenerator()
dict_[f"wait_until_{k}_visible"] = smg.wait_until_element_visible(v.base_by, v.base_selector)
dict_[f"wait_until_{k}_invisible"] = smg.wait_until_element_invisible(v.base_by, v.base_selector)
# Sectionsのときもfind_elementが使われるため、「少なくとも1つのelementがあるかどうか」が検査される
dict_[f"has_{k}"] = smg.has_element(v.base_by, v.base_selector)
dict_[f"has_no_{k}"] = smg.has_no_element(v.base_by, v.base_selector)
if isinstance(v, Section):
dict_[f"{k}_element"] = smg.element_element(v.base_by, v.base_selector)
elif isinstance(v, Sections):
dict_[f"{k}_elements"] = smg.element_elements(v.base_by, v.base_selector)
elif isinstance(v, Iframe):
dict_[f"{k}_element"] = smg.element_element(v.base_by, v.base_selector)
return type.__new__(cls, name, bases, dict_)
class Page(object, metaclass=PageMetaclass):
_url = None
_url_matcher = None
def __init__(self, driver):
self.driver = driver
@logged
def load(self, **kwargs):
if self._url:
uri = uriexpand(self._url, **kwargs)
self.driver.get(uri)
else:
raise Exception(f"Cant load. {self.__class__} has not _url parameter")
@logged
def is_loaded(self):
if self._url_matcher:
if re.fullmatch(self._url_matcher, self.current_url):
return True
else:
return False
elif self._url:
if self._url == self.current_url:
return True
else:
return False
else:
raise Exception(f"Cant load check. {self.__class__} has neither _url and _url_matcher parameter")
if self._url_matcher is not None and re.fullmatch(self._url_matcher, self.current_url):
return True
else:
return False
@logged
def assert_loaded(self):
if self.is_loaded():
return True
else:
raise Exception(f"Page {self.__class__} is not loaded.")
@logged
def wait_until_page_loaded(self, timeout=10):
for i in range(1, timeout+1):
logger.debug(f"checking page is loaded {i}/{timeout}")
if self.is_loaded():
logger.debug(f"page is loaded!")
break
time.sleep(1)
else:
raise Exception(f"Timeout loading Page {self.__class__}")
@logged
def wait_until_page_readystate_is_complete(self, timeout=10):
for i in range(1, timeout+1):
logger.debug(f"checking document.readyState {i}/{timeout}")
if self.driver.execute_script("return document.readyState") == "complete":
logger.debug(f"document.readyState is complete!")
break
time.sleep(1)
else:
raise Exception(f"Timeout loading Page {self.__class__}")
@property
def current_url(self):
return self.driver.current_url
class PageSection(object, metaclass=PageMetaclass):
def __init__(self, driver, base_element):
self.driver = driver
self.base_element = base_element
def __enter__(self):
return self
def __exit__(self, *args):
pass
class PageIframe(object, metaclass=PageMetaclass):
def __init__(self, driver, iframe_element):
self.driver = driver
self.iframe_element = iframe_element
def __enter__(self):
self.driver.switch_to_frame(self.iframe_element)
return self
def __exit__(self, *args):
self.driver.switch_to.default_content()
| 32.116105 | 107 | 0.694927 | 1,161 | 8,575 | 4.849268 | 0.126615 | 0.035524 | 0.017584 | 0.02238 | 0.630728 | 0.594316 | 0.536057 | 0.508171 | 0.480284 | 0.450444 | 0 | 0.002476 | 0.1993 | 8,575 | 266 | 108 | 32.236842 | 0.817507 | 0.014577 | 0 | 0.497758 | 0 | 0 | 0.083935 | 0.014443 | 0 | 0 | 0 | 0 | 0.004484 | 1 | 0.183857 | false | 0.004484 | 0.040359 | 0.017937 | 0.439462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
855ea6f04c921ad42df72e3376925fdd9e5314d7 | 2,432 | py | Python | scripts/update_camera_info.py | mikelgarciafonseca/lidar_camera_calibration | 529dc69abd0faed293b9ca2238b7c6b25c66cb9e | [
"BSD-3-Clause"
] | 291 | 2019-04-08T15:02:09.000Z | 2022-03-29T07:43:53.000Z | scripts/update_camera_info.py | CXT-666/lidar_camera_calibration | 688397c500967a42b2aca82d6c11393dd73aaa9c | [
"BSD-3-Clause"
] | 45 | 2019-08-22T10:15:10.000Z | 2022-03-23T05:11:36.000Z | scripts/update_camera_info.py | CXT-666/lidar_camera_calibration | 688397c500967a42b2aca82d6c11393dd73aaa9c | [
"BSD-3-Clause"
] | 77 | 2019-04-17T05:25:11.000Z | 2022-03-20T04:39:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author : Heethesh Vhavle
Email : heethesh@cmu.edu
Version : 1.0.1
Date : Jan 18, 2019
Description:
Script to update the camera calibration data into the ROSBAG file
Ensure that this file has executable permissions
Example Usage:
$ rosrun lidar_camera_calibration update_camera_info.py rosbag.bag calibration.yaml
Notes:
Make sure this file has executable permissions:
$ chmod +x update_camera_info.py
'''
# Python 2/3 compatibility
from __future__ import print_function
# Built-in modules
import os
import sys
import yaml
# ROS modules
PKG = 'lidar_camera_calibration'
import roslib; roslib.load_manifest(PKG)
import rosbag
import rospy
def load_calibration_data(filename):
# Open calibration file
with open(filename, 'r') as stream:
try:
calibration = yaml.load(stream)
except yaml.YAMLError as exc:
rospy.logerr(exc)
sys.exit(1)
return calibration
if __name__ == '__main__':
# Get parameters when starting node from a launch file.
if len(sys.argv) < 1:
BAG_FILE = rospy.get_param('filename')
CALIB_FILE = rospy.get_param('calib_data')
CAMERA_INFO = rospy.get_param('camera_info')
# Get parameters as arguments
else:
BAG_FILE = sys.argv[1]
CALIB_FILE = sys.argv[2]
CAMERA_INFO = '/sensors/camera/camera_info'
# Load ROSBAG file
rospy.loginfo('Bag Filename: %s', BAG_FILE)
bag = rosbag.Bag(BAG_FILE, 'r')
# Output file
folder = os.path.dirname(BAG_FILE)
output_name = os.path.splitext(os.path.basename(BAG_FILE))[0] + '_updated.bag'
OUTPUT_FILE = os.path.join(folder, output_name)
os.mknod(OUTPUT_FILE)
output = rosbag.Bag(OUTPUT_FILE, 'w')
# Load calibration data
calibration = load_calibration_data(CALIB_FILE)
# Update calibration data
rospy.loginfo('Updating %s data...' % CAMERA_INFO)
for topic, msg, t in bag.read_messages():
if topic == CAMERA_INFO:
msg.D = calibration['distortion_coefficients']['data']
msg.K = calibration['camera_matrix']['data']
msg.R = calibration['rectification_matrix']['data']
msg.P = calibration['projection_matrix']['data']
output.write(topic, msg, msg.header.stamp if msg._has_header else t)
rospy.loginfo('Done')
# Close bag file
bag.close()
output.close()
| 26.725275 | 83 | 0.675576 | 326 | 2,432 | 4.861963 | 0.404908 | 0.050473 | 0.035962 | 0.026498 | 0.040379 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008943 | 0.218339 | 2,432 | 90 | 84 | 27.022222 | 0.824829 | 0.287829 | 0 | 0 | 0 | 0 | 0.13493 | 0.043224 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.159091 | 0 | 0.204545 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
855efe602850a47790a27ec939c0a1e729cd7710 | 733 | py | Python | lim/random/glmm.py | glimix/glimix | 22c9b94732918bce31f64cb33ce368ea85ead478 | [
"MIT"
] | 2 | 2016-12-16T14:14:59.000Z | 2017-01-31T16:50:08.000Z | lim/random/glmm.py | glimix/glimix | 22c9b94732918bce31f64cb33ce368ea85ead478 | [
"MIT"
] | null | null | null | lim/random/glmm.py | glimix/glimix | 22c9b94732918bce31f64cb33ce368ea85ead478 | [
"MIT"
] | 2 | 2017-02-13T14:34:37.000Z | 2017-02-15T14:27:32.000Z | from __future__ import division
from numpy.random import RandomState
from numpy_sugar.linalg import sum2diag
from numpy_sugar import epsilon
from numpy_sugar.random import multivariate_normal
class GLMMSampler(object):
def __init__(self, lik, mean, cov):
self._lik = lik
self._mean = mean
self._cov = cov
def sample(self, random_state=None):
if random_state is None:
random_state = RandomState()
m = self._mean.feed('sample').value()
K = self._cov.feed('sample').value()
sum2diag(K, +epsilon.small, out=K)
u = multivariate_normal(m, K, random_state)
sum2diag(K, -epsilon.small, out=K)
return self._lik.sample(u, random_state)
| 27.148148 | 51 | 0.667121 | 97 | 733 | 4.793814 | 0.360825 | 0.11828 | 0.090323 | 0.090323 | 0.107527 | 0.107527 | 0 | 0 | 0 | 0 | 0 | 0.005357 | 0.236016 | 733 | 26 | 52 | 28.192308 | 0.825 | 0 | 0 | 0 | 0 | 0 | 0.016371 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
855fc87a5ce5bd24bab6ab108fbb29dc4e299ceb | 607 | py | Python | dags/simple_dag_backfill.py | rodrigoarenas456/airflow-course | 8ffda59b8ac4cfa18b4cd614bc0f75ee18324b28 | [
"MIT"
] | null | null | null | dags/simple_dag_backfill.py | rodrigoarenas456/airflow-course | 8ffda59b8ac4cfa18b4cd614bc0f75ee18324b28 | [
"MIT"
] | 8 | 2021-09-08T21:24:25.000Z | 2022-03-29T22:28:47.000Z | dags/simple_dag_backfill.py | rodrigoarenas456/airflow-course | 8ffda59b8ac4cfa18b4cd614bc0f75ee18324b28 | [
"MIT"
] | 1 | 2021-09-06T12:18:39.000Z | 2021-09-06T12:18:39.000Z | import datetime as dt
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
"""
if catchup=False, then it will not run for past dates that didn't got executed
"""
default_args = {
'owner': 'airflow',
'start_date': dt.datetime(2020, 7, 1),
'concurrency': 1,
'retries': 0
}
with DAG('simple_dag_backfill',
default_args=default_args,
schedule_interval='@daily') as dag:
task_hello = BashOperator(task_id='hello', bash_command='echo "hello!"')
task_bye = BashOperator(task_id='bye', bash_command='echo "bye!"')
task_hello >> task_bye
| 26.391304 | 78 | 0.691928 | 84 | 607 | 4.809524 | 0.595238 | 0.081683 | 0.089109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016129 | 0.182867 | 607 | 22 | 79 | 27.590909 | 0.798387 | 0 | 0 | 0 | 0 | 0 | 0.18618 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
856089efc2c334ffeab6724cf280b643ff6f434c | 1,706 | py | Python | google/home/graph/v1/home-graph-v1-py/google/home/graph_v1/types/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/home/graph/v1/home-graph-v1-py/google/home/graph_v1/types/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/home/graph/v1/home-graph-v1-py/google/home/graph_v1/types/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .device import (
AgentOtherDeviceId,
Device,
DeviceInfo,
DeviceNames,
)
from .homegraph import (
AgentDeviceId,
DeleteAgentUserRequest,
QueryRequest,
QueryRequestInput,
QueryRequestPayload,
QueryResponse,
QueryResponsePayload,
ReportStateAndNotificationDevice,
ReportStateAndNotificationRequest,
ReportStateAndNotificationResponse,
RequestSyncDevicesRequest,
RequestSyncDevicesResponse,
StateAndNotificationPayload,
SyncRequest,
SyncResponse,
SyncResponsePayload,
)
__all__ = (
'AgentOtherDeviceId',
'Device',
'DeviceInfo',
'DeviceNames',
'AgentDeviceId',
'DeleteAgentUserRequest',
'QueryRequest',
'QueryRequestInput',
'QueryRequestPayload',
'QueryResponse',
'QueryResponsePayload',
'ReportStateAndNotificationDevice',
'ReportStateAndNotificationRequest',
'ReportStateAndNotificationResponse',
'RequestSyncDevicesRequest',
'RequestSyncDevicesResponse',
'StateAndNotificationPayload',
'SyncRequest',
'SyncResponse',
'SyncResponsePayload',
)
| 27.079365 | 74 | 0.732708 | 140 | 1,706 | 8.9 | 0.642857 | 0.048154 | 0.020867 | 0.025682 | 0.537721 | 0.537721 | 0.537721 | 0.537721 | 0.537721 | 0.537721 | 0 | 0.006494 | 0.187573 | 1,706 | 62 | 75 | 27.516129 | 0.892496 | 0.333529 | 0 | 0 | 0 | 0 | 0.338983 | 0.17752 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8560b0e4b09c5f913481d0494af7a7cde6e95874 | 2,348 | py | Python | states/sense_profile_state.py | rbenamotz/LEMPA | eab84e2494aac0d1461582c7f83405cb4ab7c16e | [
"MIT"
] | 83 | 2020-08-11T21:03:21.000Z | 2022-02-27T17:52:31.000Z | states/sense_profile_state.py | rbenamotz/LEMPA | eab84e2494aac0d1461582c7f83405cb4ab7c16e | [
"MIT"
] | 7 | 2020-09-06T17:10:04.000Z | 2021-05-25T11:53:18.000Z | states/sense_profile_state.py | rbenamotz/LEMPA | eab84e2494aac0d1461582c7f83405cb4ab7c16e | [
"MIT"
] | 6 | 2020-09-05T23:42:01.000Z | 2021-06-21T04:09:03.000Z | import sys
import time
import RPi.GPIO as GPIO
from application import Application, COMMAND_LINE_PARAM_PROFILE_ID
from profiles import profile_by_id, profile_by_jumper
from . import State
from hardware import PINS_PROFILES
class SensingProfileState(State):
def __load_profile__(self, profile_id, first=True):
p = profile_by_id(profile_id)
if first:
self.app.profiles = []
self.app.profile_name = profile_id
self.app.profile_info = p
self.app.detail('Loading "{}"'.format(profile_id))
if "plugins" in p:
for pl in self.app.plugins:
pl.load_conf(p["plugins"][0]["conf"])
if p["type"] == "bin":
self.app.profiles.append(p)
return True
if p["type"] == "composite":
for p0 in p["profiles"]:
self.__load_profile__(p0, False)
return True
raise ValueError("Unknown profile type {}".format(p["type"]))
def __init__(self, app):
super().__init__(app)
for p in PINS_PROFILES:
GPIO.setup(p, GPIO.IN, pull_up_down=GPIO.PUD_UP)
self.app.skip_detect = False
if len(sys.argv) >= COMMAND_LINE_PARAM_PROFILE_ID + 1:
profile_id = sys.argv[COMMAND_LINE_PARAM_PROFILE_ID]
if not profile_id == "_":
self.app.detail("Using profile from args: {}".format(profile_id))
self.__load_profile__(sys.argv[1])
self.app.skip_detect = True
return
self.app.detail("Detecting profile by jumper")
self.message_shown = False
def do_step(self):
if self.app.skip_detect:
return True
for j in range(4):
p = PINS_PROFILES[j]
if not GPIO.input(p):
self.app.detail("Detected jumper {}".format(j + 1))
temp = profile_by_jumper(j + 1)
profile_id = temp["id"]
self.__load_profile__(profile_id)
return True
time.sleep(0.1)
if not self.message_shown:
self.app.print("Connect jumper")
self.message_shown = True
return False
def on_event(self, event):
if event:
return Application.APP_STATE_FIRMWARE_DOWNLOAD
return self
| 34.529412 | 81 | 0.579216 | 296 | 2,348 | 4.331081 | 0.280405 | 0.076443 | 0.040562 | 0.053822 | 0.069423 | 0.049922 | 0.049922 | 0 | 0 | 0 | 0 | 0.006285 | 0.322402 | 2,348 | 67 | 82 | 35.044776 | 0.799497 | 0 | 0 | 0.066667 | 0 | 0 | 0.074106 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.116667 | 0 | 0.333333 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8561517142c3b7bdb23c921efcc982af0f356670 | 2,741 | py | Python | handlers/registration.py | StGrail/AlleyCat-bot | 3681627138e088bef2eea6867eb0b9a11d183e38 | [
"MIT"
] | null | null | null | handlers/registration.py | StGrail/AlleyCat-bot | 3681627138e088bef2eea6867eb0b9a11d183e38 | [
"MIT"
] | 1 | 2021-03-16T17:39:14.000Z | 2021-03-17T07:31:26.000Z | handlers/registration.py | StGrail/AlleyCat-bot | 3681627138e088bef2eea6867eb0b9a11d183e38 | [
"MIT"
] | null | null | null | from aiogram.dispatcher import FSMContext
from aiogram.types import CallbackQuery
from FSM.Registation_states import Registration_form
from constants.text_messages import RULES, START_INFO
from keyboards.inline_kb import bicycle_type, gender, apply_registration, check_reg_answer
from utils.loader import dp, db
# нажатие кнопки правила
@dp.callback_query_handler(text='rules')
async def rules(call: CallbackQuery):
await call.answer(cache_time=55)
await call.message.edit_text(f'{RULES}', reply_markup=apply_registration)
# нажатие кнопки "Регистрация"
@dp.callback_query_handler(text='start_reg')
async def reg(call: CallbackQuery):
await call.message.edit_text(f'Привет {call.from_user.full_name}, укажи свой пол:',
reply_markup=gender)
await Registration_form.Sex.set()
# выбор пола и кнопка выбора велосипеда
@dp.callback_query_handler(state=Registration_form.Sex)
async def choose_sex(call: CallbackQuery, state: FSMContext):
await call.answer(cache_time=1)
answer = call.data
await state.update_data(sex=answer)
await db.update_racer_gender(gender=answer, id=call.from_user.id)
await call.message.edit_text(f'В какой категории участвуешь?', reply_markup=bicycle_type)
await Registration_form.next()
# выбор категории велосипеда кнопки выбора проверки ответов
@dp.callback_query_handler(state=Registration_form.Bicycle_type)
async def choose_bicycle_type(call: CallbackQuery, state: FSMContext):
await call.answer(cache_time=1)
answer = call.data
await db.update_racer_bicycle(bicycle=answer, id=call.from_user.id) # добавление в бд
await state.update_data(bicycle_type=answer)
data = await state.get_data()
if data.get('sex') == 'male':
sex = 'Ты выбрал'
elif data.get('sex') == 'female':
sex = 'Ты выбрала'
else:
sex = 'Ты еще не определился с полом (участвуешь вне зачета) и выбрал'
if call.data == 'fixie':
bicycle = 'фиксы 🚲'
else:
bicycle = 'мульти/синглспид 🚴'
await call.message.edit_text(f'{sex} категорию: {bicycle}', reply_markup=check_reg_answer)
await state.reset_state(with_data=False)
# исправление ошибок при регистрации
@dp.callback_query_handler(text='data_not_ok')
async def correcting(call: CallbackQuery, state: FSMContext):
await call.answer(cache_time=1)
await state.reset_data()
await state.reset_state()
await call.message.edit_text('Укажи еще раз свой пол:', reply_markup=gender)
await Registration_form.Sex.set()
# информация о месте старта.
@dp.callback_query_handler(text='data_ok')
async def waiting_start(call: CallbackQuery):
await call.answer(cache_time=1)
await call.message.edit_text(START_INFO)
| 37.040541 | 94 | 0.747537 | 384 | 2,741 | 5.153646 | 0.315104 | 0.050025 | 0.045478 | 0.0667 | 0.398686 | 0.350682 | 0.2476 | 0.15715 | 0.15715 | 0.15715 | 0 | 0.002586 | 0.153594 | 2,741 | 73 | 95 | 37.547945 | 0.849569 | 0.082087 | 0 | 0.188679 | 0 | 0 | 0.117225 | 0.010766 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.113208 | 0 | 0.113208 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
856482c3cbf03c7b7af2b44e3422a0b6758b9099 | 3,446 | py | Python | python/grouped_iterator.py | kadepettie/mike_tools | 467698a835c04383d97c18055cb200ea6cdbc9b0 | [
"Unlicense"
] | 2 | 2016-01-14T02:04:37.000Z | 2018-03-16T09:38:10.000Z | python/grouped_iterator.py | kadepettie/mike_tools | 467698a835c04383d97c18055cb200ea6cdbc9b0 | [
"Unlicense"
] | null | null | null | python/grouped_iterator.py | kadepettie/mike_tools | 467698a835c04383d97c18055cb200ea6cdbc9b0 | [
"Unlicense"
] | 1 | 2018-07-20T20:31:39.000Z | 2018-07-20T20:31:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Iterate through a pre-sorted text file and return lines as a group.
============================================================================
AUTHOR: Michael D Dacre, mike.dacre@gmail.com
ORGANIZATION: Stanford University
LICENSE: MIT License, property of Stanford, use as you wish
VERSION: 0.1
CREATED: 2016-29-27 16:09
Last modified: 2016-09-27 17:16
============================================================================
"""
import gzip
import bz2
def giterate(infile, groupby, columns=None, sep='\t', header=False,
pandas=False):
"""Iterate through a text file and yield lines in groups.
:infile: The path to a plain text, gzipped, or bzipped text file or a file
handle.
:groupby: An integer reference to the column you wish to group on or a
column name if either header or column names provided.
:columns: Either None, or an integer count of columns, or a list of column
names you would like to use to access your data. If integer is
provided then column count is confirmed.
:header: If true, first line is used as column names if none provided or
skipped.
:pandas: Yield a pandas dataframe for every group instead of a list of
lists or Line objects.
:yields: Default is a list of lists for each group. If pandas is True,
then yields a dataframe for every group.
"""
if pandas:
import pandas as pd
if isinstance(columns, list):
collen = len(columns)
else:
collen = columns if isinstance(columns, int) else None
columns = None
with open_zipped(infile) as fin:
grp = []
nxt = ''
if header:
head = fin.readline()
if not columns:
columns = head.rstrip().split(sep)
if isinstance(groupby, str):
if isinstance(columns, list):
groupby = columns.index(groupby)
else:
raise ValueError("groupby cannot be a string if neither " +
"header nor column names specified")
for line in fin:
fields = line.rstrip().split(sep)
if collen:
assert collen == fields
if not nxt:
nxt = fields[groupby]
grp.append(fields)
continue
if fields[groupby] == nxt:
grp.append(fields)
continue
else:
if pandas:
out = pd.DataFrame(grp)
if columns:
out.columns = columns
else:
out = grp
grp = [fields]
yield out
def open_zipped(infile, mode='r'):
""" Return file handle of file regardless of zipped or not
Text mode enforced for compatibility with python2 """
mode = mode[0] + 't'
p2mode = mode
if hasattr(infile, 'write'):
return infile
if isinstance(infile, str):
if infile.endswith('.gz'):
return gzip.open(infile, mode)
if infile.endswith('.bz2'):
if hasattr(bz2, 'open'):
return bz2.open(infile, mode)
else:
return bz2.BZ2File(infile, p2mode)
return open(infile, p2mode)
| 34.46 | 79 | 0.53047 | 402 | 3,446 | 4.542289 | 0.375622 | 0.032859 | 0.011501 | 0.024096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017033 | 0.352583 | 3,446 | 99 | 80 | 34.808081 | 0.801434 | 0.412652 | 0 | 0.224138 | 0 | 0 | 0.046956 | 0 | 0 | 0 | 0 | 0 | 0.017241 | 1 | 0.034483 | false | 0 | 0.051724 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8565f946b86cb9e0c19ac6a1ce1fc8a3f4778e60 | 3,772 | py | Python | announcer/email_decorators/ticket.py | dokipen/trac-announcer-plugin | 7ef4123a7508c5395c8008fa2a8478b1888b4f63 | [
"BSD-3-Clause"
] | null | null | null | announcer/email_decorators/ticket.py | dokipen/trac-announcer-plugin | 7ef4123a7508c5395c8008fa2a8478b1888b4f63 | [
"BSD-3-Clause"
] | 1 | 2018-06-11T14:48:06.000Z | 2018-06-11T14:48:06.000Z | announcer/email_decorators/ticket.py | dokipen/trac-announcer-plugin | 7ef4123a7508c5395c8008fa2a8478b1888b4f63 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2009, Robert Corsaro
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from trac.core import *
from trac.config import Option
from trac.util.text import to_unicode
from genshi.template import NewTextTemplate
from announcer.distributors.mail import IAnnouncementEmailDecorator
from announcer.util.mail import next_decorator, set_header
class TicketSubjectEmailDecorator(Component):
implements(IAnnouncementEmailDecorator)
ticket_email_subject = Option('announcer', 'ticket_email_subject',
"Ticket #${ticket.id}: ${ticket['summary']} " \
"{% if action %}[${action}]{% end %}",
"""Format string for ticket email subject. This is
a mini genshi template that is passed the ticket
event and action objects.""")
def decorate_message(self, event, message, decorates=None):
if event.realm == 'ticket':
if event.changes:
if 'status' in event.changes:
action = 'Status -> %s' % (event.target['status'])
template = NewTextTemplate(self.ticket_email_subject)
subject = to_unicode(template.generate(
ticket=event.target,
event=event,
action=event.category
).render())
prefix = self.config.get('announcer', 'email_subject_prefix')
if prefix == '__default__':
prefix = '[%s] ' % self.env.project_name
if prefix:
subject = "%s%s"%(prefix, subject)
if event.category != 'created':
subject = 'Re: %s'%subject
set_header(message, 'Subject', subject)
return next_decorator(event, message, decorates)
class TicketAddlHeaderEmailDecorator(Component):
implements(IAnnouncementEmailDecorator)
def decorate_message(self, event, message, decorates=None):
if event.realm == 'ticket':
for k in ('id', 'priority', 'severity'):
name = 'X-Announcement-%s'%k.capitalize()
set_header(message, name, event.target[k])
return next_decorator(event, message, decorates)
| 44.376471 | 79 | 0.661188 | 434 | 3,772 | 5.693548 | 0.440092 | 0.024282 | 0.029138 | 0.018616 | 0.15945 | 0.140024 | 0.107649 | 0.107649 | 0.107649 | 0.107649 | 0 | 0.001739 | 0.23754 | 3,772 | 84 | 80 | 44.904762 | 0.857441 | 0.429215 | 0 | 0.2 | 0 | 0 | 0.125956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.325 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8566ce5f7a94d07cf4e8e35937a2d2bccb26654d | 5,155 | py | Python | ccwt_client/strategy/multi_symbols.py | nigelliyang/ccwt_client | 4efb8f2c790145b5f547e350d24413bb8b1bd9ed | [
"Apache-2.0"
] | null | null | null | ccwt_client/strategy/multi_symbols.py | nigelliyang/ccwt_client | 4efb8f2c790145b5f547e350d24413bb8b1bd9ed | [
"Apache-2.0"
] | null | null | null | ccwt_client/strategy/multi_symbols.py | nigelliyang/ccwt_client | 4efb8f2c790145b5f547e350d24413bb8b1bd9ed | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyalgotrade import strategy
from pyalgotrade import plotter
from pyalgotrade.broker.backtesting import Broker
from pyalgotrade.broker.backtesting import TradePercentage
from pyalgotrade.broker.slippage import VolumeShareSlippage
from pyalgotrade.bar import Frequency
from pyalgotrade.technical import ma
from pyalgotrade.stratanalyzer import sharpe
from pyalgotrade.stratanalyzer import returns
from ccwt_client.ccwt_feed import Feed
class MultiSymbols(strategy.BacktestingStrategy):
def __init__(self, feed, instruments, broker):
strategy.BacktestingStrategy.__init__(self, feed, broker)
self.__instruments = instruments
self.__sharesToBuy = {}
# Initialize indicators for each instrument.
''' 技术指标
SMA、EMA、WMA、VMAP、MACD、RSI、StochasticOscillator、BollingerBands、ATR、HurstExponent
CumulativeReturn、LeastSquaresRegression、Slope、StdDev、ZScore
'''
self.__sma = {}
for instrument in instruments:
priceDS = feed[instrument].getPriceDataSeries()
self.__sma[instrument] = ma.SMA(priceDS, 15)
def getSMA(self, instrument):
return self.__sma[instrument]
def onBars(self, bars):
#获取多标的的bar
#for instrument in bars.getInstruments():
# self.info('%s price: %.6f' % (instrument, bars.getBar(instrument).getClose()))
orders = self.getBroker().getActiveOrders('okex_BTCUSDT')
if orders:
self.info(str(orders))
bitmex = bars.getBar('bitmex_XBTUSD')
okex = bars.getBar('okex_BTCUSDT')
bitmexSMA = self.getSMA('bitmex_XBTUSD')
if bitmex is None:
return None
if okex is None:
return None
if bitmexSMA[-1] is None:
return None
if bitmex is not None and okex is not None:
if bitmex.getClose() - okex.getClose() > 3 and bitmex.getClose() > bitmexSMA[-1]:
cash = self.getBroker().getCash()
size = cash * 0.1 / okex.getClose()
'''
size > 0 buy ; size < 0 sell;
marketOrder:以市场价成交 onClose : True,用下一个bar的收盘价; False: 用下一个bar的开盘价,目前onClose True不支持一天内的bar
limitOrder:限价成交
buy:如果下一个bar低于limitPrice,成交价=开盘价;如果下一个bar包含limitPrice,成交价=min(open,limitPrice)
sell:如果下一个bar高于limitPrice,成交价=开盘价;如果下一个bar包含limitPrice,成交价=max(open,limitPrice)
stopOrder:止损单
buy:如果下一个bar高于stopPrice,成交价=开盘价;如果包含stopPrice,成交价=max(open,stopPrice)
sell:如果下一个bar低于stopPrice,成交价=开盘价;如果包含stopPrice,成交价=min(open,stopPrice)
stopLimitOrder:限价止损单
先判断是否到达止损价,然后再判断是否到了限定价格
'''
self.marketOrder('okex_BTCUSDT', size)
self.info('cash %.2f ; size %.2f' % (cash, size))
self.info('bitmex price %.6f ; okex price %.6f ; bitmexSMA %.6f' % (bitmex.getClose(), okex.getClose(), bitmexSMA[-1]))
if bitmex.getClose() - okex.getClose() < 4 and bitmex.getClose() < bitmexSMA[-1]:
okexShares = self.getBroker().getShares('okex_BTCUSDT')
size = okexShares * -0.5
self.marketOrder('okex_BTCUSDT', size)
self.info('okexShares %.2f ; size %.2f' % (okexShares, size))
self.info('bitmex price %.6f ; okex price %.6f ; bitmexSMA %.6f' % (bitmex.getClose(), okex.getClose(), bitmexSMA[-1]))
def main(plot):
instruments = ['bitmex_XBTUSD','okex_BTCUSDT']
feed = Feed(Frequency.SECOND)
feed.loadBars("bitmex_XBTUSD", test_back=True)
feed.loadBars("okex_BTCUSDT", test_back=True)
'''初始保证金'''
initCash = 1000000
'''手续费设置
目前不支持多标的设置不同的手续费类型
3种手续费类型:
NoCommission:None 默认
FixedPerTrade:固定金额
TradePercentage:按比例收费
'''
commission = TradePercentage(0.0003)
broker = Broker(initCash,feed,commission)
#设置为滑点模型,默认为 NoSlippage
#broker.getFillStrategy().setSlippageModel(VolumeShareSlippage)
#设置交易量限制
#每一个bar中的 volume * limit
#broker.getFillStrategy().setVolumeLimit(0.1)
strat = MultiSymbols(feed, instruments, broker)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
returnsAnalyzer = returns.Returns()
strat.attachAnalyzer(returnsAnalyzer)
if plot:
plt = plotter.StrategyPlotter(strat, False, False, True)
plt.getOrCreateSubplot("cash").addCallback("Cash", lambda x: strat.getBroker().getCash())
# Plot strategy vs. SPY cumulative returns.
# plt.getOrCreateSubplot("returns").addDataSeries("SPY", cumret.CumulativeReturn(feed["SPY"].getPriceDataSeries()))
plt.getOrCreateSubplot("returns").addDataSeries("Strategy", returnsAnalyzer.getCumulativeReturns())
strat.run()
print("Sharpe ratio: %.2f" % sharpeRatioAnalyzer.getSharpeRatio(0.05))
print("Returns: %.2f %%" % (returnsAnalyzer.getCumulativeReturns()[-1] * 100))
if plot:
plt.plot()
if __name__ == "__main__":
main(True) | 40.273438 | 135 | 0.644811 | 511 | 5,155 | 6.424658 | 0.356164 | 0.041121 | 0.021931 | 0.031678 | 0.146817 | 0.073713 | 0.073713 | 0.050564 | 0.050564 | 0.050564 | 0 | 0.013074 | 0.243259 | 5,155 | 128 | 136 | 40.273438 | 0.828506 | 0.102813 | 0 | 0.126761 | 0 | 0 | 0.099046 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0 | 0.140845 | 0.014085 | 0.267606 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8569aa3b086b166d6250ee9399cc1fd457cec787 | 470 | py | Python | sme_material_apps/utils/html_to_pdf_response.py | luizhpriotto/piloto_apresentacao | c968025db819633ee4cd75df5357ab6a4ab7d9af | [
"MIT"
] | null | null | null | sme_material_apps/utils/html_to_pdf_response.py | luizhpriotto/piloto_apresentacao | c968025db819633ee4cd75df5357ab6a4ab7d9af | [
"MIT"
] | null | null | null | sme_material_apps/utils/html_to_pdf_response.py | luizhpriotto/piloto_apresentacao | c968025db819633ee4cd75df5357ab6a4ab7d9af | [
"MIT"
] | 1 | 2020-02-01T12:10:42.000Z | 2020-02-01T12:10:42.000Z | from django.http import HttpResponse
from django_weasyprint.utils import django_url_fetcher
from weasyprint import HTML
def html_to_pdf_response(html_string, pdf_filename):
pdf_file = HTML(
string=html_string,
url_fetcher=django_url_fetcher,
base_url='file://abobrinha').write_pdf()
response = HttpResponse(pdf_file, content_type='application/pdf')
response['Content-Disposition'] = f'filename="{pdf_filename}"'
return response
| 33.571429 | 69 | 0.755319 | 60 | 470 | 5.616667 | 0.433333 | 0.089021 | 0.094955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153191 | 470 | 13 | 70 | 36.153846 | 0.846734 | 0 | 0 | 0 | 0 | 0 | 0.159574 | 0.053191 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
856b70394ea2a27e4b33e3b939eda069451e5cfe | 2,149 | py | Python | elektronn3/data/transforms/random.py | riegerfr/elektronn3 | a1fab65914d8975e6d869a5711a84d4553935bd5 | [
"MIT"
] | 124 | 2017-12-11T03:38:43.000Z | 2022-03-25T03:10:32.000Z | elektronn3/data/transforms/random.py | riegerfr/elektronn3 | a1fab65914d8975e6d869a5711a84d4553935bd5 | [
"MIT"
] | 34 | 2018-03-27T17:11:28.000Z | 2020-07-19T20:52:14.000Z | elektronn3/data/transforms/random.py | riegerfr/elektronn3 | a1fab65914d8975e6d869a5711a84d4553935bd5 | [
"MIT"
] | 28 | 2018-04-16T04:11:54.000Z | 2022-03-25T03:23:30.000Z | """Random number generators for random augmentation parametrization"""
from typing import Optional, Tuple
import numpy as np
import scipy.stats
class RandomSampler:
"""Samples random variables from a ``scipy.stats`` distribution."""
def __init__(
self,
rv: scipy.stats.rv_continuous,
shape: Tuple[int, ...] = (),
bounds: Optional[Tuple[float, float]] = None,
):
self.rv = rv
self.shape = shape
self.bounds = bounds
def __call__(self, shape=None):
shape = self.shape if shape is None else shape
rand = self.rv.rvs(size=shape)
if self.bounds is not None:
lo, hi = self.bounds
rand = np.clip(rand, lo, hi)
return rand
class Normal(RandomSampler):
"""Normal distribution sampler."""
def __init__(
self,
mean: float = 0,
sigma: float = 1,
shape: Tuple[int, ...] = (),
bounds: Optional[Tuple[float, float]] = None,
):
rv = scipy.stats.norm(loc=mean, scale=sigma)
super().__init__(rv=rv, shape=shape, bounds=bounds)
class HalfNormal(RandomSampler):
"""Half-normal distribution sampler.
See https://en.wikipedia.org/wiki/Half-normal_distribution.
Note that all sampled values are positive, regardless of the parameters."""
def __init__(
self,
sigma: float = 1,
shape: Tuple[int, ...] = (),
bounds: Optional[Tuple[float, float]] = None,
):
rv = scipy.stats.halfnorm(loc=0, scale=sigma)
super().__init__(rv=rv, shape=shape, bounds=bounds)
class RandInt(RandomSampler):
"""Discrete uniform distribution sampler
Outputs random integers in a defined range ``(low, high)`` with equal
probability.
By default (``low=0, high=2``), it generates binary values (0 or 1)."""
def __init__(
self,
low: int = 0,
high: int = 2,
shape: Tuple[int, ...] = (),
):
rv = scipy.stats.randint(low=low, high=high)
super().__init__(rv=rv, shape=shape, bounds=None)
| 29.438356 | 79 | 0.580735 | 251 | 2,149 | 4.836653 | 0.374502 | 0.049423 | 0.036244 | 0.046952 | 0.257825 | 0.257825 | 0.257825 | 0.233937 | 0.233937 | 0.196046 | 0 | 0.006588 | 0.293625 | 2,149 | 72 | 80 | 29.847222 | 0.793149 | 0.239646 | 0 | 0.479167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104167 | false | 0 | 0.0625 | 0 | 0.270833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
856d5f7ffc3bcce6913018e4ef646a6e008b5e19 | 831 | py | Python | client_server_socket/server_socket.py | KiwiShow/PythonWeb | a489bc2ab16f06f7cc4524bab6b45b2653bfb1bd | [
"MIT"
] | 7 | 2018-02-24T13:41:21.000Z | 2022-02-06T04:59:13.000Z | client_server_socket/server_socket.py | KiwiShow/PythonWeb | a489bc2ab16f06f7cc4524bab6b45b2653bfb1bd | [
"MIT"
] | 6 | 2018-02-25T11:50:42.000Z | 2021-12-13T19:55:13.000Z | client_server_socket/server_socket.py | KiwiShow/PythonWeb | a489bc2ab16f06f7cc4524bab6b45b2653bfb1bd | [
"MIT"
] | 1 | 2018-03-01T02:43:15.000Z | 2018-03-01T02:43:15.000Z | import socket
# 1. create socket
# 2. bind
# 3. listen
# 4. accept
# 5. recv
# 6. send
# 7. close -> 3
# 运行这个程序后, 浏览器打开 localhost:2000 就能访问了
# 一般浏览器默认2个连接GET / HTTP/1.1 和 GET /favicon.ico HTTP/1.1
s = socket.socket()
host = ''
port = 2000
s.bind((host, port))
while True:
s.listen(5)
print('before accept')
# 当有客户端过来连接的时候, s.accept 函数就会返回 2 个值
# 分别是 连接 和 客户端 ip 地址
connection, address = s.accept()
print('after accept')
buf = b''
while True:
cache = connection.recv(1024)
buf += cache
if len(cache) < 1024:
break
request = buf.decode('utf-8')
print('客户端ip and request: {}\n{}'.format(address, request))
response = b'HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n<h1>Hello, world</h1>'
connection.sendall(response)
connection.close()
| 21.307692 | 89 | 0.610108 | 124 | 831 | 4.08871 | 0.580645 | 0.029586 | 0.035503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061905 | 0.241877 | 831 | 38 | 90 | 21.868421 | 0.742857 | 0.262335 | 0 | 0.095238 | 0 | 0.047619 | 0.209651 | 0.044925 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
856dd362c9beb3c3c6cbb93ff094ed39d0df4ed7 | 8,515 | py | Python | metalsmith/_nics.py | openstack/metalsmith | 880d9e47d3fe3f8d6cb83311b0fde3173f92beb4 | [
"Apache-2.0"
] | 8 | 2018-06-27T11:19:31.000Z | 2020-06-17T08:05:11.000Z | metalsmith/_nics.py | openstack/metalsmith | 880d9e47d3fe3f8d6cb83311b0fde3173f92beb4 | [
"Apache-2.0"
] | null | null | null | metalsmith/_nics.py | openstack/metalsmith | 880d9e47d3fe3f8d6cb83311b0fde3173f92beb4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import logging
from openstack import exceptions as sdk_exc
from metalsmith import _utils
from metalsmith import exceptions
LOG = logging.getLogger(__name__)
class NICs(object):
"""Requested NICs."""
def __init__(self, connection, node, nics, hostname=None):
if nics is None:
nics = []
if not isinstance(nics, collections.abc.Sequence):
raise TypeError("NICs must be a list of dicts")
for nic in nics:
if not isinstance(nic, collections.abc.Mapping):
raise TypeError("Each NIC must be a dict got %s" % nic)
self._node = node
self._connection = connection
self._nics = nics
self._validated = None
self._hostname = hostname
self.created_ports = []
self.attached_ports = []
def validate(self):
"""Validate provided NIC records."""
if self._validated is not None:
return
result = []
for nic in self._nics:
if 'port' in nic:
result.append(('port', self._get_port(nic)))
elif 'network' in nic:
result.append(('network', self._get_network(nic)))
elif 'subnet' in nic:
result.append(('subnet', self._get_subnet(nic)))
else:
raise exceptions.InvalidNIC(
'Unknown NIC record type, export "port", "subnet" or '
'"network", got %s' % nic)
self._validated = result
def create_and_attach_ports(self):
"""Attach ports to the node, creating them if requested."""
self.validate()
for nic_type, nic in self._validated:
if nic_type != 'port':
# The 'binding:host_id' must be set to ensure IP allocation
# is not deferred.
# See: https://storyboard.openstack.org/#!/story/2009715
port = self._connection.network.create_port(
binding_host_id=self._node.id, **nic)
self.created_ports.append(port.id)
LOG.info('Created port %(port)s for node %(node)s with '
'%(nic)s', {'port': _utils.log_res(port),
'node': _utils.log_res(self._node),
'nic': nic})
else:
# The 'binding:host_id' must be set to ensure IP allocation
# is not deferred.
# See: https://storyboard.openstack.org/#!/story/2009715
self._connection.network.update_port(
nic, binding_host_id=self._node.id)
port = nic
self._connection.baremetal.attach_vif_to_node(self._node,
port.id)
LOG.info('Attached port %(port)s to node %(node)s',
{'port': _utils.log_res(port),
'node': _utils.log_res(self._node)})
self.attached_ports.append(port.id)
def detach_and_delete_ports(self):
"""Detach attached port and delete previously created ones."""
detach_and_delete_ports(self._connection, self._node,
self.created_ports, self.attached_ports)
def _get_port(self, nic):
"""Validate and get the NIC information for a port.
:param nic: NIC information in the form ``{"port": "<port ident>"}``.
:returns: `Port` object to use.
"""
unexpected = set(nic) - {'port'}
if unexpected:
raise exceptions.InvalidNIC(
'Unexpected fields for a port: %s' % ', '.join(unexpected))
try:
port = self._connection.network.find_port(
nic['port'], ignore_missing=False)
except sdk_exc.SDKException as exc:
raise exceptions.InvalidNIC(
'Cannot find port %(port)s: %(error)s' %
{'port': nic['port'], 'error': exc})
return port
def _get_network(self, nic):
"""Validate and get the NIC information for a network.
:param nic: NIC information in the form ``{"network": "<net ident>"}``
or ``{"network": "<net ident>", "fixed_ip": "<desired IP>"}``.
:returns: keyword arguments to use when creating a port.
"""
unexpected = set(nic) - {'network', 'fixed_ip'}
if unexpected:
raise exceptions.InvalidNIC(
'Unexpected fields for a network: %s' % ', '.join(unexpected))
try:
network = self._connection.network.find_network(
nic['network'], ignore_missing=False)
except sdk_exc.SDKException as exc:
raise exceptions.InvalidNIC(
'Cannot find network %(net)s: %(error)s' %
{'net': nic['network'], 'error': exc})
port_args = {'network_id': network.id}
if nic.get('fixed_ip'):
port_args['fixed_ips'] = [{'ip_address': nic['fixed_ip']}]
if self._hostname:
port_args['name'] = '%s-%s' % (self._hostname, network.name)
return port_args
def _get_subnet(self, nic):
"""Validate and get the NIC information for a subnet.
:param nic: NIC information in the form ``{"subnet": "<id or name>"}``.
:returns: keyword arguments to use when creating a port.
"""
unexpected = set(nic) - {'subnet'}
if unexpected:
raise exceptions.InvalidNIC(
'Unexpected fields for a subnet: %s' % ', '.join(unexpected))
try:
subnet = self._connection.network.find_subnet(
nic['subnet'], ignore_missing=False)
except sdk_exc.SDKException as exc:
raise exceptions.InvalidNIC(
'Cannot find subnet %(sub)s: %(error)s' %
{'sub': nic['subnet'], 'error': exc})
try:
network = self._connection.network.get_network(subnet.network_id)
except sdk_exc.SDKException as exc:
raise exceptions.InvalidNIC(
'Cannot find network %(net)s for subnet %(sub)s: %(error)s' %
{'net': subnet.network_id, 'sub': nic['subnet'], 'error': exc})
port_args = {'network_id': network.id,
'fixed_ips': [{'subnet_id': subnet.id}]}
if self._hostname:
port_args['name'] = '%s-%s' % (self._hostname, network.name)
return port_args
def detach_and_delete_ports(connection, node, created_ports, attached_ports):
"""Detach attached port and delete previously created ones.
:param connection: `openstacksdk.Connection` instance.
:param node: `Node` object to detach ports from.
:param created_ports: List of IDs of previously created ports.
:param attached_ports: List of IDs of previously attached_ports.
"""
for port_id in set(attached_ports + created_ports):
LOG.debug('Detaching port %(port)s from node %(node)s',
{'port': port_id, 'node': _utils.log_res(node)})
try:
connection.baremetal.detach_vif_from_node(node, port_id)
except Exception as exc:
LOG.debug('Failed to remove VIF %(vif)s from node %(node)s, '
'assuming already removed: %(exc)s',
{'vif': port_id, 'node': _utils.log_res(node),
'exc': exc})
for port_id in created_ports:
LOG.debug('Deleting port %s', port_id)
try:
connection.network.delete_port(port_id, ignore_missing=False)
except Exception as exc:
LOG.warning('Failed to delete neutron port %(port)s: %(exc)s',
{'port': port_id, 'exc': exc})
else:
LOG.info('Deleted port %(port)s for node %(node)s',
{'port': port_id, 'node': _utils.log_res(node)})
| 39.239631 | 79 | 0.570053 | 1,001 | 8,515 | 4.703297 | 0.190809 | 0.015293 | 0.042481 | 0.01593 | 0.423959 | 0.368309 | 0.347494 | 0.300127 | 0.265293 | 0.228972 | 0 | 0.003777 | 0.316031 | 8,515 | 216 | 80 | 39.421296 | 0.804602 | 0.215737 | 0 | 0.248175 | 0 | 0 | 0.153716 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058394 | false | 0 | 0.036496 | 0 | 0.131387 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
85740ce3a03beaf2eb89116783a5b5dcaacc3a51 | 2,753 | py | Python | customforms/blocks.py | SquarehostLtd/django-wagtail-customforms | a317fc421faae6fb1f9155bf03eb2930523c46d2 | [
"BSD-3-Clause"
] | 1 | 2018-02-28T09:06:39.000Z | 2018-02-28T09:06:39.000Z | customforms/blocks.py | SquarehostLtd/django-wagtail-customforms | a317fc421faae6fb1f9155bf03eb2930523c46d2 | [
"BSD-3-Clause"
] | 1 | 2020-11-03T10:50:51.000Z | 2020-11-03T10:50:51.000Z | customforms/blocks.py | SquarehostLtd/django-wagtail-customforms | a317fc421faae6fb1f9155bf03eb2930523c46d2 | [
"BSD-3-Clause"
] | 1 | 2020-01-22T23:05:50.000Z | 2020-01-22T23:05:50.000Z | from django.contrib import messages
from django.http import HttpResponseRedirect
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from wagtail.core.blocks import ChooserBlock
from .models import Form
from .widgets import AdminFormChooser
# class FormBlock(StructBlock):
# form =
class FormChooserBlock(ChooserBlock):
@cached_property
def target_model(self):
return Form
@cached_property
def widget(self):
return AdminFormChooser
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context=parent_context)
request = context.get('request')
if request and request.method == 'POST':
form = value.get_form(request.POST, request.FILES, page=value, user=request.user)
if form.is_valid():
value.process_form_submission(form)
messages.add_message(request, messages.SUCCESS, 'Thank you for submitting the form.')
context['redirect'] = request.path_info
form = value.get_form(page=value, user=request.user)
else:
messages.add_message(request, messages.ERROR, 'There was an error on the form, please correct it.')
else:
form = value.get_form(page=value, user=request.user)
context['form'] = form
if value.display_title:
context['form_title'] = value.title
if value.button_alignment:
context['button_alignment'] = value.button_alignment
return context
def render(self, value, context=None):
"""
Return a text rendering of 'value', suitable for display on templates. By default, this will
use a template (with the passed context, supplemented by the result of get_context) if a
'template' property is specified on the block, and fall back on render_basic otherwise.
"""
template = self.get_template(context=context, value=value)
if not template:
return self.render_basic(value, context=context)
if context is None:
new_context = self.get_context(value)
else:
new_context = self.get_context(value, parent_context=dict(context))
return mark_safe(render_to_string(template, new_context))
def get_template(self, context=None, value=None):
if not value.form_template or value.form_template == 'standard':
return getattr(self.meta, 'template', None)
return value.form_template
class Meta:
icon = "form"
template = 'customforms/blocks/form.html'
| 37.202703 | 115 | 0.674174 | 337 | 2,753 | 5.376855 | 0.329377 | 0.033113 | 0.024834 | 0.02649 | 0.148455 | 0.076159 | 0.04415 | 0.04415 | 0.04415 | 0 | 0 | 0 | 0.243008 | 2,753 | 73 | 116 | 37.712329 | 0.869482 | 0.112968 | 0 | 0.134615 | 0 | 0 | 0.075385 | 0.011662 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096154 | false | 0 | 0.173077 | 0.038462 | 0.442308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
857606e28a85102079da6fd107aaf732413bd215 | 2,286 | py | Python | views/index/students.py | woyanh/bysj-master | d2ba7fbba7145b04e05ad3e7e14fa70018a6ce4c | [
"MIT"
] | null | null | null | views/index/students.py | woyanh/bysj-master | d2ba7fbba7145b04e05ad3e7e14fa70018a6ce4c | [
"MIT"
] | null | null | null | views/index/students.py | woyanh/bysj-master | d2ba7fbba7145b04e05ad3e7e14fa70018a6ce4c | [
"MIT"
] | null | null | null | from flask import Blueprint,render_template,flash,redirect,url_for,send_from_directory,current_app
from flask_login import current_user
from models import Student,Course
from forms.students import UploadAvatarForm,CropAvatarForm
from extensions import avatars,db
from utils import flash_errors
index_stu_bp = Blueprint('index_stu',__name__)
@index_stu_bp.route('/')
def index_stu():
return render_template('stu/student.html')
@index_stu_bp.route('/mycourse')
def course():
return render_template('stu/course.html')
@index_stu_bp.route('/myinfo')
def info():
info = Student.query.filter_by(id=current_user.id).first()
return render_template('stu/info.html',info=info)
@index_stu_bp.route('/setting')
def setting():
upload_form = UploadAvatarForm()
crop_form = CropAvatarForm()
return render_template('stu/setting.html', upload_form=upload_form, crop_form=crop_form)
@index_stu_bp.route('/setting/upload',methods=['POST'])
def upload_avatar():
form = UploadAvatarForm()
if form.validate_on_submit():
image = form.image.data
filename = avatars.save_avatar(image)
stu_pic = Student.query.filter_by(id = current_user.id).first()
stu_pic.pic = filename
#db.session.add(stu_pic)
db.session.commit()
flash('Image uploaded, please crop.', 'success')
flash_errors(form)
return redirect(url_for('.setting'))
@index_stu_bp.route('/setting/<path:filename>')
def get_avatar(filename):
return send_from_directory(current_app.config['AVATARS_SAVE_PATH'], filename)
@index_stu_bp.route('/settings/avatar/crop', methods=['POST'])
def crop_avatar():
form = CropAvatarForm()
if form.validate_on_submit():
x = form.x.data
y = form.y.data
w = form.w.data
h = form.h.data
stu_pic = Student.query.filter_by(id=current_user.id).first()
filenames = avatars.crop_avatar(stu_pic.pic, x, y, w, h)
stu_pic.pic_s = filenames[0]
stu_pic.pic_m = filenames[1]
stu_pic.pic_l = filenames[2]
#db.session.add(stu_pic)
db.session.commit()
flash('Avatar updated.', 'success')
flash_errors(form)
return redirect(url_for('.setting'))
| 33.617647 | 99 | 0.682415 | 308 | 2,286 | 4.818182 | 0.266234 | 0.053908 | 0.053908 | 0.070755 | 0.342318 | 0.206199 | 0.206199 | 0.206199 | 0.206199 | 0.061995 | 0 | 0.001623 | 0.191601 | 2,286 | 67 | 100 | 34.119403 | 0.801407 | 0.020122 | 0 | 0.185185 | 0 | 0 | 0.116076 | 0.020728 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12963 | false | 0 | 0.111111 | 0.055556 | 0.37037 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |