hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56652a7a9ad8a080d50971b9bce49832c3f1c49d | 6,853 | py | Python | util.py | jacklxc/ScientificDiscourseTagging | d75514b631b95d39451abd2396f57c3da1c19801 | [
"Apache-2.0"
] | 15 | 2020-01-17T16:45:09.000Z | 2022-01-18T08:44:16.000Z | util.py | jacklxc/ScientificDiscourseTagging | d75514b631b95d39451abd2396f57c3da1c19801 | [
"Apache-2.0"
] | 3 | 2020-12-01T07:34:57.000Z | 2021-08-09T23:07:19.000Z | util.py | jacklxc/ScientificDiscourseTagging | d75514b631b95d39451abd2396f57c3da1c19801 | [
"Apache-2.0"
] | 2 | 2019-05-30T18:52:09.000Z | 2020-06-01T13:36:33.000Z | import codecs
import numpy
import glob
import re
from sklearn.metrics import f1_score
def read_passages(filename, is_labeled):
str_seqs = []
str_seq = []
label_seqs = []
label_seq = []
for line in codecs.open(filename, "r", "utf-8"):
lnstrp = line.strip()
if lnstrp == "":
if len(str_seq) != 0:
str_seqs.append(str_seq)
str_seq = []
label_seqs.append(label_seq)
label_seq = []
else:
if is_labeled:
clause, label = lnstrp.split("\t")
label_seq.append(label.strip())
else:
clause = lnstrp
str_seq.append(clause)
if len(str_seq) != 0:
str_seqs.append(str_seq)
str_seq = []
label_seqs.append(label_seq)
label_seq = []
return str_seqs, label_seqs
def from_BIO_ind(BIO_pred, BIO_target, indices):
table = {} # Make a mapping between the indices of BIO_labels and temporary original label indices
original_labels = []
for BIO_label,BIO_index in indices.items():
if BIO_label[:2] == "I_" or BIO_label[:2] == "B_":
label = BIO_label[2:]
else:
label = BIO_label
if label in original_labels:
table[BIO_index] = original_labels.index(label)
else:
table[BIO_index] = len(original_labels)
original_labels.append(label)
original_pred = [table[label] for label in BIO_pred]
original_target = [table[label] for label in BIO_target]
return original_pred, original_target
def to_BIO(label_seqs):
new_label_seqs = []
for label_para in label_seqs:
new_label_para = []
prev = ""
for label in label_para:
if label!="none": # "none" is O, remain unchanged.
if label==prev:
new_label = "I_"+label
else:
new_label = "B_"+label
else:
new_label = label # "none"
prev = label
new_label_para.append(new_label)
new_label_seqs.append(new_label_para)
return new_label_seqs
def from_BIO(label_seqs):
new_label_seqs = []
for label_para in label_seqs:
new_label_para = []
for label in label_para:
if label[:2] == "I_" or label[:2] == "B_":
new_label = label[2:]
else:
new_label = label
new_label_para.append(new_label)
new_label_seqs.append(new_label_para)
return new_label_seqs
def clean_url(word):
"""
Clean specific data format from social media
"""
# clean urls
word = re.sub(r'https? : \/\/.*[\r\n]*', '<URL>', word)
word = re.sub(r'exlink', '<URL>', word)
return word
def clean_num(word):
# check if the word contain number and no letters
if any(char.isdigit() for char in word):
try:
num = float(word.replace(',', ''))
return '@'
except:
if not any(char.isalpha() for char in word):
return '@'
return word
def clean_words(str_seqs):
processed_seqs = []
for str_seq in str_seqs:
processed_clauses = []
for clause in str_seq:
filtered = []
tokens = clause.split()
for word in tokens:
word = clean_url(word)
word = clean_num(word)
filtered.append(word)
filtered_clause = " ".join(filtered)
processed_clauses.append(filtered_clause)
processed_seqs.append(processed_clauses)
return processed_seqs
def test_f1(test_file,pred_label_seqs):
def linearize(labels):
linearized = []
for paper in labels:
for label in paper:
linearized.append(label)
return linearized
_, label_seqs = read_passages_original(test_file,True)
true_label = linearize(label_seqs)
pred_label = linearize(pred_label_seqs)
f1 = f1_score(true_label,pred_label,average="weighted")
print("F1 score:",f1)
return f1
def evaluate(y, pred):
accuracy = float(sum([c == p for c, p in zip(y, pred)]))/len(pred)
num_gold = {}
num_pred = {}
num_correct = {}
for c, p in zip(y, pred):
if c in num_gold:
num_gold[c] += 1
else:
num_gold[c] = 1
if p in num_pred:
num_pred[p] += 1
else:
num_pred[p] = 1
if c == p:
if c in num_correct:
num_correct[c] += 1
else:
num_correct[c] = 1
fscores = {}
for p in num_pred:
precision = float(num_correct[p]) / num_pred[p] if p in num_correct else 0.0
recall = float(num_correct[p]) / num_gold[p] if p in num_correct else 0.0
fscores[p] = 2 * precision * recall / (precision + recall) if precision !=0 and recall !=0 else 0.0
weighted_fscore = sum([fscores[p] * num_gold[p] if p in num_gold else 0.0 for p in fscores]) / sum(num_gold.values())
return accuracy, weighted_fscore, fscores
def make_folds(train_X, train_Y, num_folds):
num_points = train_X.shape[0]
fol_len = num_points / num_folds
rem = num_points % num_folds
print(train_X.shape, train_Y.shape)
X_folds = numpy.split(train_X, num_folds) if rem == 0 else numpy.split(train_X[:-rem], num_folds)
Y_folds = numpy.split(train_Y, num_folds) if rem == 0 else numpy.split(train_Y[:-rem], num_folds)
cv_folds = []
for i in range(num_folds):
train_folds_X = []
train_folds_Y = []
for j in range(num_folds):
if i != j:
train_folds_X.append(X_folds[j])
train_folds_Y.append(Y_folds[j])
train_fold_X = numpy.concatenate(train_folds_X)
train_fold_Y = numpy.concatenate(train_folds_Y)
cv_folds.append(((train_fold_X, train_fold_Y), (X_folds[i], Y_folds[i])))
return cv_folds
def arg2param(args):
params = vars(args)
params["lr"] = float(args.lr)
params["hard_k"] = int(args.hard_k)
params["embedding_dropout"] = float(args.embedding_dropout)
params["high_dense_dropout"] = float(args.high_dense_dropout)
params["attention_dropout"] = float(args.attention_dropout)
params["lstm_dropout"] = float(args.lstm_dropout)
params["word_proj_dim"] = int(args.word_proj_dim)
params["lstm_dim"] = int(args.lstm_dim)
params["att_proj_dim"] = int(args.att_proj_dim)
params["rec_hid_dim"] = int(args.rec_hid_dim)
params["epoch"] = int(args.epoch)
params["maxseqlen"] = int(args.maxseqlen)
params["maxclauselen"] = int(args.maxclauselen)
params["batch_size"]=int(args.batch_size)
params["validation_split"] = float(args.validation_split)
return params
| 34.265 | 121 | 0.59069 | 926 | 6,853 | 4.115551 | 0.176026 | 0.039885 | 0.018893 | 0.017843 | 0.19365 | 0.178693 | 0.166623 | 0.145106 | 0.138546 | 0.109683 | 0 | 0.007708 | 0.299577 | 6,853 | 199 | 122 | 34.437186 | 0.78625 | 0.03327 | 0 | 0.233333 | 0 | 0 | 0.038025 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0.011111 | 0.027778 | 0 | 0.172222 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5669105e25d30b05664f89f0df0423f50da1ae02 | 2,965 | py | Python | examples/Kane1985/Chapter6/Ex11.5.py | nouiz/pydy | 20c8ca9fc521208ae2144b5b453c14ed4a22a0ec | [
"BSD-3-Clause"
] | 298 | 2015-01-31T11:43:22.000Z | 2022-03-15T02:18:21.000Z | examples/Kane1985/Chapter6/Ex11.5.py | nouiz/pydy | 20c8ca9fc521208ae2144b5b453c14ed4a22a0ec | [
"BSD-3-Clause"
] | 359 | 2015-01-17T16:56:42.000Z | 2022-02-08T05:27:08.000Z | examples/Kane1985/Chapter6/Ex11.5.py | nouiz/pydy | 20c8ca9fc521208ae2144b5b453c14ed4a22a0ec | [
"BSD-3-Clause"
] | 109 | 2015-02-03T13:02:45.000Z | 2021-12-21T12:57:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 11.5 from Kane 1985."""
from __future__ import division
from sympy import expand, solve, symbols, trigsimp
from sympy import sin, tan, pi
from sympy.physics.mechanics import Point, ReferenceFrame, RigidBody
from sympy.physics.mechanics import dot, dynamicsymbols, inertia, msprint
from util import generalized_active_forces, generalized_inertia_forces
from util import partial_velocities, subs
g, m, Px, Py, Pz, R, t = symbols('g m Px Py Pz R t')
q1, q2, q3, q4, q5 = q = dynamicsymbols('q1:6')
qd = dynamicsymbols('q1:6', level=1)
u1, u2, u3, u4, u5 = u = dynamicsymbols('u1:6')
# reference frames
A = ReferenceFrame('A')
B_prime = A.orientnew('B_prime', 'Axis', [q1, A.z])
B = B_prime.orientnew('B', 'Axis', [pi/2 - q2, B_prime.x])
C = B.orientnew('C', 'Axis', [q3, B.z])
# points, velocities
pO = Point('O')
pO.set_vel(A, 0)
# R is the point in plane H that comes into contact with disk C.
pR = pO.locatenew('R', q4*A.x + q5*A.y)
pR.set_vel(A, pR.pos_from(pO).dt(A))
pR.set_vel(B, 0)
# C^ is the point in disk C that comes into contact with plane H.
pC_hat = pR.locatenew('C^', 0)
pC_hat.set_vel(C, 0)
# C* is the point at the center of disk C.
pC_star = pC_hat.locatenew('C*', R*B.y)
pC_star.set_vel(C, 0)
pC_star.set_vel(B, 0)
# calculate velocities in A
pC_star.v2pt_theory(pR, A, B)
pC_hat.v2pt_theory(pC_star, A, C)
# kinematic differential equations
kde = [x - y for x, y in zip(
[dot(C.ang_vel_in(A), basis) for basis in B] + qd[3:],
u)]
kde_map = solve(kde, qd)
# include second derivatives in kde map
for k, v in kde_map.items():
kde_map[k.diff(t)] = v.diff(t)
vc = map(lambda x: dot(pC_hat.vel(A), x), [A.x, A.y])
vc_map = solve(subs(vc, kde_map), [u4, u5])
# define disc rigidbody
I_C = inertia(C, m*R**2/4, m*R**2/4, m*R**2/2)
rbC = RigidBody('rbC', pC_star, C, m, (I_C, pC_star))
# forces
R_C_hat = Px*A.x + Py*A.y + Pz*A.z
R_C_star = -m*g*A.z
forces = [(pC_hat, R_C_hat), (pC_star, R_C_star)]
# partial velocities
bodies = [rbC]
system = ([i.masscenter for i in bodies] + [i.frame for i in bodies] +
list(zip(*forces)[0]))
partials = partial_velocities(system, [u1, u2, u3], A, kde_map, vc_map)
# generalized active forces
Fr, _ = generalized_active_forces(partials, forces)
Fr_star, _ = generalized_inertia_forces(partials, bodies, kde_map, vc_map)
# dynamical equations
dyn_eq = subs([x + y for x, y in zip(Fr, Fr_star)], kde_map)
u1d, u2d, u3d = ud = [x.diff(t) for x in [u1, u2, u3]]
dyn_eq_map = solve(dyn_eq, ud)
for x in ud:
print('{0} = {1}'.format(msprint(x),
msprint(trigsimp(dyn_eq_map[x]))))
u1d_expected = (u2**2*tan(q2) - 6*u2*u3 -4*g*sin(q2)/R)/5
u2d_expected = 2*u3*u1 - u1*u2*tan(q2)
u3d_expected = 2*u1*u2/3
assert trigsimp(expand(dyn_eq_map[u1d] - u1d_expected)) == 0
assert trigsimp(expand(dyn_eq_map[u2d] - u2d_expected)) == 0
assert trigsimp(expand(dyn_eq_map[u3d] - u3d_expected)) == 0
| 31.88172 | 74 | 0.66914 | 562 | 2,965 | 3.379004 | 0.261566 | 0.025276 | 0.021064 | 0.036335 | 0.153239 | 0.082675 | 0.06793 | 0.038968 | 0 | 0 | 0 | 0.036768 | 0.165261 | 2,965 | 92 | 75 | 32.228261 | 0.730505 | 0.157504 | 0 | 0 | 0 | 0 | 0.027441 | 0 | 0 | 0 | 0 | 0 | 0.051724 | 1 | 0 | false | 0 | 0.12069 | 0 | 0.12069 | 0.051724 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
566a183503f12d2d76263243546091eed83cab3c | 3,706 | py | Python | seahub/drafts/utils.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 420 | 2015-01-03T11:34:46.000Z | 2022-03-10T07:15:41.000Z | seahub/drafts/utils.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 735 | 2015-01-04T21:22:51.000Z | 2022-03-31T09:26:07.000Z | seahub/drafts/utils.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 379 | 2015-01-05T17:08:03.000Z | 2022-03-06T00:11:50.000Z | import hashlib
import os
import logging
import posixpath
from seaserv import seafile_api
from seahub.utils import normalize_file_path, check_filename_with_rename
from seahub.tags.models import FileUUIDMap
logger = logging.getLogger(__name__)
def create_user_draft_repo(username, org_id=-1):
repo_name = 'Drafts'
if org_id and org_id > 0:
repo_id = seafile_api.create_org_repo(repo_name, '', username, org_id)
else:
repo_id = seafile_api.create_repo(repo_name, '', username)
return repo_id
def get_draft_file_name(repo_id, file_path):
file_path = normalize_file_path(file_path)
file_name, file_ext = os.path.splitext(os.path.basename(file_path))
draft_file_name = "%s%s%s" % (file_name, '(draft)', file_ext)
draft_file_name = check_filename_with_rename(repo_id, '/Drafts', draft_file_name)
return draft_file_name
def is_draft_file(repo_id, file_path):
is_draft = False
file_path = normalize_file_path(file_path)
from .models import Draft
try:
draft = Draft.objects.filter(origin_repo_id=repo_id, draft_file_path=file_path)
if draft:
is_draft = True
except Draft.DoesNotExist:
pass
return is_draft
def has_draft_file(repo_id, file_path):
has_draft = False
file_path = normalize_file_path(file_path)
parent_path = os.path.dirname(file_path)
filename = os.path.basename(file_path)
file_uuid = FileUUIDMap.objects.get_fileuuidmap_by_path(
repo_id, parent_path, filename, is_dir=False)
from .models import Draft
if file_uuid:
try:
d = Draft.objects.filter(origin_file_uuid=file_uuid.uuid)
if d:
d = d[0]
file_id = seafile_api.get_file_id_by_path(repo_id, d.draft_file_path)
if file_id:
has_draft = True
else:
Draft.DoesNotExist
except Draft.DoesNotExist:
pass
return has_draft
def get_file_draft(repo_id, file_path, is_draft=False, has_draft=False):
draft = {}
draft['draft_id'] = None
draft['draft_file_path'] = ''
draft['draft_origin_file_path'] = ''
from .models import Draft
if is_draft:
d = Draft.objects.filter(origin_repo_id=repo_id, draft_file_path=file_path)
if d:
d = d[0]
uuid = FileUUIDMap.objects.get_fileuuidmap_by_uuid(d.origin_file_uuid)
file_path = posixpath.join(uuid.parent_path, uuid.filename)
draft['draft_id'] = d.id
draft['draft_file_path'] = d.draft_file_path
draft['draft_origin_file_path'] = file_path
else:
Draft.DoesNotExist
if has_draft:
file_path = normalize_file_path(file_path)
parent_path = os.path.dirname(file_path)
filename = os.path.basename(file_path)
file_uuid = FileUUIDMap.objects.get_fileuuidmap_by_path(
repo_id, parent_path, filename, is_dir=False)
d = Draft.objects.filter(origin_file_uuid=file_uuid.uuid)
if d:
d = d[0]
draft['draft_id'] = d.id
draft['draft_file_path'] = d.draft_file_path
else:
Draft.DoesNotExist
return draft
def send_draft_publish_msg(draft, username, path):
"""
send draft publish msg to seafevents
"""
repo_id = draft.origin_repo_id
old_path = draft.draft_file_path
msg = '%s\t%s\t%s\t%s\t%s\t%s' % ("publish", "draft", repo_id, username, path, old_path)
try:
seafile_api.publish_event('seahub.draft', msg)
except Exception as e:
logger.error("Error when sending draft publish message: %s" % str(e))
| 28.953125 | 92 | 0.658662 | 522 | 3,706 | 4.337165 | 0.151341 | 0.130742 | 0.058304 | 0.056537 | 0.523852 | 0.430212 | 0.380742 | 0.347615 | 0.320671 | 0.292403 | 0 | 0.001801 | 0.250675 | 3,706 | 127 | 93 | 29.181102 | 0.813468 | 0.009714 | 0 | 0.445652 | 0 | 0.01087 | 0.062671 | 0.018062 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0.021739 | 0.108696 | 0 | 0.228261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
566d06f55eb168bd0c5dd0836c75fd3bf4352b95 | 781 | py | Python | Questions/Airline Iternary/solution.py | leander-dsouza/Abhyudaya_2020 | 54ec7608c5caa14310b635ac8e8b090156ca0ea4 | [
"MIT"
] | 1 | 2020-07-13T17:28:27.000Z | 2020-07-13T17:28:27.000Z | Questions/Airline Iternary/solution.py | leander-dsouza/Abhyudaya_2020 | 54ec7608c5caa14310b635ac8e8b090156ca0ea4 | [
"MIT"
] | null | null | null | Questions/Airline Iternary/solution.py | leander-dsouza/Abhyudaya_2020 | 54ec7608c5caa14310b635ac8e8b090156ca0ea4 | [
"MIT"
] | null | null | null | def get_itinerary(flights, starting_point, current_itinerary):
if not flights:
return current_itinerary + [starting_point]
updated_itinerary = None
for index, (city_1, city_2) in enumerate(flights):
if starting_point == city_1:
child_itinerary = get_itinerary(
flights[:index] + flights[index + 1:], city_2, current_itinerary + [city_1])
if child_itinerary:
if not updated_itinerary or "".join(child_itinerary) < "".join(updated_itinerary):
updated_itinerary = child_itinerary
return updated_itinerary
size = int(input())
array_input = []
for x in range(size):
array_input.append(tuple(input().split()))
g = get_itinerary(array_input,'MSC',[])
print(" ".join(g))
| 31.24 | 98 | 0.653009 | 94 | 781 | 5.148936 | 0.361702 | 0.165289 | 0.078512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01005 | 0.235595 | 781 | 24 | 99 | 32.541667 | 0.80067 | 0 | 0 | 0 | 0 | 0 | 0.005122 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.166667 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
566e992466a08d95a9769f7efc588017224e9ab9 | 2,632 | py | Python | SubShift.py | nsaftarli/SubShift | fa1ac906b569fb7dd238e0241b84cd20c1ba2387 | [
"MIT"
] | null | null | null | SubShift.py | nsaftarli/SubShift | fa1ac906b569fb7dd238e0241b84cd20c1ba2387 | [
"MIT"
] | null | null | null | SubShift.py | nsaftarli/SubShift | fa1ac906b569fb7dd238e0241b84cd20c1ba2387 | [
"MIT"
] | null | null | null | import re
import numpy as np
def timestamp_to_num(ts):
num_list = []
ts_list = re.split('[:,]', ts)
for i in ts_list:
num_list.append(int(i))
return np.array(num_list)
def main(filename, delta, output, direction):
buff = []
# Read file
with open(filename, 'r') as f:
contents = f.readlines()
# For each line
for line in contents:
# Parse line for timestamp
ts = parse_timestamp(line)
# If no timestamp, put into buffer as is
if ts == []:
buff.append(line)
# If timestamp exists, make change of delta, put into buffer:
else:
new_ts = update_timestamps(ts, delta, direction)
new_ts_str = timestamp_to_string(new_ts)
new_line = create_ts_line(new_ts_str)
buff.append(new_line)
# Write buffer out
with open(output, 'w') as file:
for i in buff:
file.write(i)
def create_ts_line(ts):
begin = ts[0]
end = ts[1]
string = begin + ' --> ' + end + '\n'
return string
def timestamp_to_string(ts):
strings = []
begin = ts[0]
end = ts[1]
strings.append('%02d:%02d:%02d,%03d' % (begin[0], begin[1], begin[2], begin[3]))
strings.append('%02d:%02d:%02d,%03d' % (end[0], end[1], end[2], end[3]))
return strings
def update_timestamps(lst, delta, direction):
# Convert to lists
begin = timestamp_to_num(lst[0])
end = timestamp_to_num(lst[1])
new_delta = timestamp_to_num(delta)
# Convert to millisecond scalars
begin_ms = convert_to_ms(begin)
end_ms = convert_to_ms(end)
new_delta_ms = convert_to_ms(new_delta)
# Update timestamps
if direction == 'B':
new_begin_ms = begin_ms - new_delta_ms
new_end_ms = end_ms - new_delta_ms
else:
new_begin_ms = begin_ms + new_delta_ms
new_end_ms = end_ms + new_delta_ms
# Convert back to list format
new_begin = convert_to_ts(new_begin_ms)
new_end = convert_to_ts(new_end_ms)
return [new_begin, new_end]
def convert_to_ts(millis):
hours = int(millis // 3.6e6)
millis %= 3.6e6
minutes = int(millis // 60000)
millis %= 60000
seconds = int(millis // 1000)
millis = int(millis % 1000)
return [hours, minutes, seconds, millis]
def convert_to_ms(timestamp):
ms = timestamp[3]
secs = timestamp[2] * 1000
mins = timestamp[1] * 1000 * 60
hours = timestamp[0] * 1000 * 3600
time_in_ms = ms + secs + mins + hours
return time_in_ms
def parse_timestamp(txt):
pattern = '\d\d:\d\d:\d\d,\d*'
return re.findall(pattern, txt)
| 24.37037 | 84 | 0.612082 | 389 | 2,632 | 3.922879 | 0.233933 | 0.05308 | 0.032765 | 0.031455 | 0.121232 | 0.121232 | 0.065531 | 0.065531 | 0.065531 | 0.065531 | 0 | 0.039583 | 0.270517 | 2,632 | 107 | 85 | 24.598131 | 0.755208 | 0.098024 | 0 | 0.085714 | 0 | 0 | 0.029648 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.028571 | 0 | 0.242857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5672916d34e9bf0fa027e7668987fc3274ffeb22 | 7,445 | py | Python | code/training/i_vector_extraction.py | oananovac/Speaker_Recognition_System | 526eb2467190efeeeb2256849f53cde648b3a294 | [
"MIT"
] | null | null | null | code/training/i_vector_extraction.py | oananovac/Speaker_Recognition_System | 526eb2467190efeeeb2256849f53cde648b3a294 | [
"MIT"
] | null | null | null | code/training/i_vector_extraction.py | oananovac/Speaker_Recognition_System | 526eb2467190efeeeb2256849f53cde648b3a294 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.linalg import eigh
import voice_activity_detector
import features_extraction
import statistics
import utils
def get_sigma(ubm, space_dimension):
sigma = np.zeros(shape=(len(ubm.covariances) * len(ubm.covariances[0])))
k = 0
for i in range(len(ubm.covariances[0])):
for j in range(len(ubm.covariances)):
sigma[k] = ubm.covariances[j][i]
k += 1
repeat_sigma = np.repeat(np.transpose(sigma)[:, np.newaxis],
space_dimension, axis=1)
return repeat_sigma
def save_i_vector_model(path, i_vector, speaker, components_number):
f = open(
path + "/ivectors/" + speaker + "_ivector_model_" +
str(components_number) + ".txt",
"wb")
np.save(f, i_vector)
f.close
def load_i_vector_model(path, speaker, components_number):
f = open(
path + "/ivectors/" + speaker + "_ivector_model_" +
str(components_number) + ".txt",
"rb")
i_vector = np.load(f)
f.close
return i_vector
def save_i_vectors(path, i_vectors, speaker, components_number):
f = open(
path + "/ivectors/" + speaker + "_ivector_" + str(
components_number) +
".txt",
"wb")
np.save(f, i_vectors)
f.close
def extract_i_vector_from_signal(ubm, utterance_path, t_matrix,
space_dimension,
mfcc_number, frame_duration, step_duration,
sigma):
t_matrix_divides_sigma = np.divide(t_matrix, sigma)
t_matrix_divides_sigma_transpose = np.transpose(t_matrix_divides_sigma)
identity_matrix = np.eye(space_dimension, dtype=float)
vad_object = voice_activity_detector.Vad(utterance_path, 2)
signal_samples, sample_rate = vad_object.get_speech_signal()
del vad_object
mfcc = features_extraction.FeaturesExtraction(mfcc_number, True,
frame_duration,
step_duration)
features = mfcc.extract_mfcc_from_signal(signal_samples, sample_rate)
log_likelihood = statistics.log_likelihood_computation(features, ubm)
n, f, s = statistics.statistics_computation(log_likelihood, features)
# first order statistics are centered by the mean vector
f = np.subtract(f, np.multiply(np.transpose(
np.repeat(n[:, np.newaxis], np.shape(ubm.means)[1], axis=1)),
np.transpose(ubm.means)))
# i-vector computation
i1 = np.matmul(np.transpose(
np.multiply(t_matrix_divides_sigma,
np.repeat(
np.transpose(np.repeat(n, np.shape(features)[1]))[:,
np.newaxis], space_dimension, axis=1))), t_matrix)
i2 = np.matmul(np.linalg.pinv(np.add(identity_matrix, i1)),
t_matrix_divides_sigma_transpose)
i3 = []
for i in range(np.shape(f)[1]):
if i == 0:
i3 = np.transpose(f)[i]
else:
i3 = np.concatenate((i3, np.transpose(f)[i]), axis=0)
i_vector = np.matmul(i2, i3)
return i_vector
def extract_i_vectors(path, ubm, train_paths, t_matrix, space_dimension,
mfcc_number, frame_duration, step_duration,
components_number):
sigma = get_sigma(ubm, space_dimension)
speakers_list = train_paths.keys()
ivectors = {}
for speaker in speakers_list:
ivector_per_file = []
for file in range(len(train_paths[speaker])):
ivector_per_file.append(extract_i_vector_from_signal(ubm,
train_paths[speaker][file],
t_matrix,
space_dimension,
mfcc_number,
frame_duration,
step_duration,
sigma))
i_vectors = np.transpose(np.dstack(ivector_per_file)[0])
# ivectors[speaker] = i_vectors
save_i_vectors(path, i_vectors, speaker, components_number)
def LDA_projection_matrix(ivectors): # LDA projection matrix
ivector_list = ivectors
cat_list = utils.concatenate_ivectors(ivector_list)
projection_matrix = np.identity(len(ivector_list[0][0]))
num_eigen_vectors = len(ivector_list)
sw = np.zeros(np.shape(projection_matrix))
sb = np.zeros(np.shape(projection_matrix))
wbar = np.mean(cat_list, axis=0)
for lists in ivector_list:
ws = lists
wsbar = np.mean(ws, axis=0)
ws_sub = np.reshape(np.subtract(wsbar, wbar), (np.shape(wbar)[0], 1))
ws_mul = np.matmul(ws_sub, np.transpose(ws_sub))
sb = np.add(sb, ws_mul)
ws_cov = np.cov(np.transpose(ws), bias=True)
sw = np.add(sw, ws_cov)
eigvals, eigvecs = eigh(sb, sw, eigvals_only=False)
zipped_eig = zip(eigvals, eigvecs)
sorted_zipped_eig = sorted(zipped_eig, reverse=True)
sortedd = [element for _, element in sorted_zipped_eig]
a_matrix = []
for i in range(num_eigen_vectors):
a_matrix.append(sortedd[i])
a_matrix = np.dstack(a_matrix)
a_matrix = np.rollaxis(a_matrix[0], -1)
a_matrix = np.divide(a_matrix, np.repeat(
np.linalg.norm(a_matrix, axis=1)[:, np.newaxis], len(a_matrix[0]),
axis=1))
ivectors_fin = np.matmul(a_matrix, np.transpose(cat_list))
projection_matrix = np.matmul(a_matrix, projection_matrix)
return projection_matrix, ivectors_fin
def WCCN_projection_matrix(lda_projection_matrix, ivectors, utterances):
num_eigen_vectors = len(ivectors)
alpha = 0.9
ivv = []
index = 0
utt_keys = utterances.keys()
start = 0
final = 0
for i in utt_keys:
final += utterances[i]
iv = np.zeros((num_eigen_vectors, utterances[i]))
for j in range(num_eigen_vectors):
iv[j] = ivectors[j][start:final]
ivv.append(iv)
index += 1
start += utterances[i]
w_ = np.zeros((len(lda_projection_matrix), len(lda_projection_matrix)))
for i in range(len(ivv)):
w_ = np.add(w_, np.cov(ivv[i], bias=True))
w_ = np.divide(w_, np.full((np.shape(w_)[0], np.shape(w_)[1]),
num_eigen_vectors))
w_ = np.add(
np.multiply(np.full((np.shape(w_)[0], np.shape(w_)[1]), 1 - alpha),
w_),
np.multiply(np.full((np.shape(w_)[0], np.shape(w_)[1]), alpha),
np.identity(np.shape(w_)[0])))
b_matrix = np.linalg.cholesky(
np.linalg.pinv(w_)) # nearestPD(np.linalg.pinv(w_)))
wccn_projection_matrix = np.matmul(b_matrix, lda_projection_matrix)
return wccn_projection_matrix
def load_projection_matrix(path, components_number):
f = open(path + "/models/projection_matrix_" + str(components_number) +
".txt",
"rb")
p_matrix = np.load(f)
f.close
return p_matrix
def save_projection_matrix(path, components_number, p_matrix):
f = open(path + "/models/projection_matrix_" + str(components_number) +
".txt",
"wb")
np.save(f, p_matrix)
f.close
| 33.236607 | 92 | 0.58724 | 932 | 7,445 | 4.433476 | 0.17382 | 0.073572 | 0.013553 | 0.026621 | 0.334221 | 0.233785 | 0.172798 | 0.172798 | 0.17062 | 0.130203 | 0 | 0.00924 | 0.302216 | 7,445 | 223 | 93 | 33.38565 | 0.786141 | 0.021222 | 0 | 0.180723 | 0 | 0 | 0.020739 | 0.007142 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060241 | false | 0 | 0.036145 | 0 | 0.13253 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56756d9a6a2f6b3681bd9d47482a96048107979e | 947 | py | Python | Anaconda-files/Program_15d.py | arvidl/dynamical-systems-with-applications-using-python | db747f550337a7e7ec4a0851b188dd6e2e816a64 | [
"BSD-2-Clause"
] | 106 | 2018-10-10T18:04:02.000Z | 2022-03-11T06:32:38.000Z | Anaconda-files/Program_15d.py | arvidl/dynamical-systems-with-applications-using-python | db747f550337a7e7ec4a0851b188dd6e2e816a64 | [
"BSD-2-Clause"
] | null | null | null | Anaconda-files/Program_15d.py | arvidl/dynamical-systems-with-applications-using-python | db747f550337a7e7ec4a0851b188dd6e2e816a64 | [
"BSD-2-Clause"
] | 54 | 2018-02-06T09:47:42.000Z | 2022-03-25T15:41:43.000Z | # Program 15d: Plotting a Newton fractal.
# See Figure 15.7.
from PIL import Image
width = height = 512
image = Image.new('RGB', (width, height))
xmin, xmax = -1.5, 1.5
ymin, ymax = -1.5, 1.5
max_iter = 20
h = 1e-6 # Step size
eps = 1e-3 # Maximum error
def f(z):
return z**3 - 1.0 # Complex function.
# Draw the fractal.
for y in range(height):
zy = y * (ymax - ymin) / (height - 1) + ymin
for x in range(width):
zx = x * (xmax - xmin) / (width - 1) + xmin
z = complex(zx, zy)
for i in range(max_iter):
# Complex numerical derivative.
dz = (f(z + complex(h, h)) - f(z)) / complex(h, h)
z0 = z - f(z) / dz # Newton iteration.
if abs(z0 - z) < eps: # Stop when close enough to any root.
break
z = z0
image.putpixel((x, y), (i % 4 * 64, i % 8 * 32, i % 16 * 16))
image.save('Newton_Fractal.png', 'PNG')
image.show()
| 26.305556 | 71 | 0.531151 | 152 | 947 | 3.289474 | 0.513158 | 0.016 | 0.012 | 0.016 | 0.044 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06192 | 0.317846 | 947 | 35 | 72 | 27.057143 | 0.712074 | 0.211193 | 0 | 0 | 0 | 0 | 0.032564 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.041667 | 0.041667 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5675e78e6bff192c2a34c289667d015bc90abcc8 | 870 | py | Python | bonga.py | AfonsoFGarcia/BigBongaClock | bc75f27d7f37a989e2efb417b74f1adfc2821c94 | [
"MIT"
] | 1 | 2015-06-22T16:08:38.000Z | 2015-06-22T16:08:38.000Z | bonga.py | AfonsoFGarcia/BigBongaClock | bc75f27d7f37a989e2efb417b74f1adfc2821c94 | [
"MIT"
] | 1 | 2020-09-08T20:38:24.000Z | 2020-09-08T20:38:24.000Z | bonga.py | AfonsoFGarcia/BigBongaClock | bc75f27d7f37a989e2efb417b74f1adfc2821c94 | [
"MIT"
] | null | null | null | import time
import tweepy as twitter
import os
superhour = time.localtime().tm_hour
hour = superhour % 12
if hour == 0:
hour = 12
sentence = "Tenho %d lágrima%s no canto do mostrador, %s nos Açores%s"
if superhour >= 12:
if hour == 1:
sentence = sentence % (hour, "", "12 lágrimas", "")
else:
sentence = sentence % (hour, "s", "menos uma lágrima", "")
else:
if hour == 1:
sentence = sentence % (hour, "", "12 lágrimas", ".")
else:
sentence = sentence % (hour, "s", "menos uma lágrima", ".")
CONSUMER_KEY = os.getenv('CONSUMER_KEY')
CONSUMER_SECRET = os.getenv('CONSUMER_SECRET')
ACCESS_TOKEN = os.getenv('ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET')
auth = twitter.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = twitter.API(auth)
api.update_status(status=sentence)
| 26.363636 | 71 | 0.705747 | 121 | 870 | 4.917355 | 0.338843 | 0.129412 | 0.134454 | 0.110924 | 0.352941 | 0.258824 | 0.258824 | 0.258824 | 0.258824 | 0.258824 | 0 | 0.017568 | 0.149425 | 870 | 32 | 72 | 27.1875 | 0.786486 | 0 | 0 | 0.192308 | 0 | 0 | 0.201149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5679b4a709d6dc06439e297747d31c23263a2fac | 1,816 | py | Python | newserver.py | pedrohhcunha/Encryption-system | 2d1be01ab00e3e089f4db2ba391b1d294fbc8a72 | [
"MIT"
] | null | null | null | newserver.py | pedrohhcunha/Encryption-system | 2d1be01ab00e3e089f4db2ba391b1d294fbc8a72 | [
"MIT"
] | null | null | null | newserver.py | pedrohhcunha/Encryption-system | 2d1be01ab00e3e089f4db2ba391b1d294fbc8a72 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# import thread
import threading
import os.path
import random
import hashlib
import socket
import time
import os
import copy
import socket
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
host = ''
port = 9093
pega_mensagem = ''
addr = (host, port)
serv_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv_socket.bind(addr)
serv_socket.listen(1)
tam_mensagem = ""
print('Aguardando cliente...')
con, cliente = serv_socket.accept()
print('Na espera de mensagem')
while(pega_mensagem != 0):
pega_mensagem = con.recv(1024)
alfabeto_normal = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
print("Chegou a mensagem " + pega_mensagem.decode('utf-8'))
pega_mensagem = pega_mensagem.decode('utf-8')
# se encontrar o caractere retorna seu indice
if (pega_mensagem.find('!', 0, len(pega_mensagem)) != -1):
tam_mensagem = len(pega_mensagem)/2 + 1
# Codificar aqui o hacker de Cesar
if (pega_mensagem.find(')', 0, len(pega_mensagem)) != -1):
tmp = pega_mensagem.split(')')
pega_mensagem = tmp[0]
decipher_text = ''
print(pega_mensagem)
chave = input("Digite a chave de quebra da mensagem")
decodedMessage = []
for letter in pega_mensagem:
indexLetterInAlfabet = alfabeto_normal.find(letter)
letterDecoded = indexLetterInAlfabet - int(chave)
if letterDecoded < 0:
print(letterDecoded, 'foi menor que 0')
letterDecoded += 26
print(indexLetterInAlfabet, letterDecoded)
decodedMessage.append(alfabeto_normal[letterDecoded])
output = ''.join(decodedMessage)
print(output)
# Codificar aqui a decifragem da mensagem
serv_socket.close()
| 28.825397 | 65 | 0.675661 | 214 | 1,816 | 5.588785 | 0.439252 | 0.150502 | 0.037625 | 0.043478 | 0.108696 | 0.108696 | 0.058528 | 0.058528 | 0.058528 | 0 | 0 | 0.016937 | 0.219714 | 1,816 | 62 | 66 | 29.290323 | 0.8271 | 0.0837 | 0 | 0.042553 | 0 | 0 | 0.106088 | 0.031344 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.191489 | 0 | 0.191489 | 0.148936 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
567f65f38faefff2b824b29b2ea7a8229dd32be4 | 8,294 | py | Python | model/networks.py | ifding/dynamic-analysis-firmware | 4d786c2280527ff38ba615974dd227c4f44c93b2 | [
"MIT"
] | 17 | 2019-01-18T12:45:38.000Z | 2021-12-03T19:55:25.000Z | model/networks.py | ifding/dynamic-analysis-firmware | 4d786c2280527ff38ba615974dd227c4f44c93b2 | [
"MIT"
] | 3 | 2018-06-27T19:08:21.000Z | 2019-12-18T09:29:11.000Z | model/networks.py | ifding/dynamic-analysis-firmware | 4d786c2280527ff38ba615974dd227c4f44c93b2 | [
"MIT"
] | 7 | 2018-07-28T17:58:23.000Z | 2021-01-02T17:16:20.000Z | """
Neural network modules for WaveNet
References :
https://arxiv.org/pdf/1609.03499.pdf
https://github.com/ibab/tensorflow-wavenet
https://qiita.com/MasaEguchi/items/cd5f7e9735a120f27e2a
https://github.com/musyoku/wavenet/issues/4
"""
import torch
import numpy as np
from utils.exceptions import InputSizeError
class DilatedCausalConv1d(torch.nn.Module):
"""Dilated Causal Convolution for WaveNet"""
def __init__(self, channels, dilation=1):
super(DilatedCausalConv1d, self).__init__()
self.conv = torch.nn.Conv1d(channels, channels,
kernel_size=2, stride=1, # Fixed for WaveNet
dilation=dilation,
padding=0, # Fixed for WaveNet dilation
bias=False) # Fixed for WaveNet but not sure
def init_weights_for_test(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv1d):
m.weight.data.fill_(1)
def forward(self, x):
output = self.conv(x)
return output
class CausalConv1d(torch.nn.Module):
"""Causal Convolution for WaveNet"""
def __init__(self, in_channels, out_channels):
super(CausalConv1d, self).__init__()
# padding=1 for same size(length) between input and output for causal convolution
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=2, stride=1, padding=1,
bias=False) # Fixed for WaveNet but not sure
def init_weights_for_test(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv1d):
m.weight.data.fill_(1)
def forward(self, x):
output = self.conv(x)
# remove last value for causal convolution
return output[:, :, :-1]
class ResidualBlock(torch.nn.Module):
def __init__(self, res_channels, skip_channels, dilation):
"""
Residual block
:param res_channels: number of residual channel for input, output
:param skip_channels: number of skip channel for output
:param dilation:
"""
super(ResidualBlock, self).__init__()
self.dilated = DilatedCausalConv1d(res_channels, dilation=dilation)
self.conv_res = torch.nn.Conv1d(res_channels, res_channels, 1)
self.conv_skip = torch.nn.Conv1d(res_channels, skip_channels, 1)
self.gate_tanh = torch.nn.Tanh()
self.gate_sigmoid = torch.nn.Sigmoid()
def forward(self, x, skip_size):
"""
:param x:
:param skip_size: The last output size for loss and prediction
:return:
"""
output = self.dilated(x)
# PixelCNN gate
gated_tanh = self.gate_tanh(output)
gated_sigmoid = self.gate_sigmoid(output)
gated = gated_tanh * gated_sigmoid
# Residual network
output = self.conv_res(gated)
input_cut = x[:, :, -output.size(2):]
output += input_cut
# Skip connection
skip = self.conv_skip(gated)
skip = skip[:, :, -skip_size:]
return output, skip
class ResidualStack(torch.nn.Module):
def __init__(self, layer_size, stack_size, res_channels, skip_channels):
"""
Stack residual blocks by layer and stack size
:param layer_size: integer, 10 = layer[dilation=1, dilation=2, 4, 8, 16, 32, 64, 128, 256, 512]
:param stack_size: integer, 5 = stack[layer1, layer2, layer3, layer4, layer5]
:param res_channels: number of residual channel for input, output
:param skip_channels: number of skip channel for output
:return:
"""
super(ResidualStack, self).__init__()
self.layer_size = layer_size
self.stack_size = stack_size
self.res_blocks = self.stack_res_block(res_channels, skip_channels)
@staticmethod
def _residual_block(res_channels, skip_channels, dilation):
block = ResidualBlock(res_channels, skip_channels, dilation)
if torch.cuda.device_count() > 1:
block = torch.nn.DataParallel(block)
if torch.cuda.is_available():
block.cuda()
return block
def build_dilations(self):
dilations = []
# 5 = stack[layer1, layer2, layer3, layer4, layer5]
for s in range(0, self.stack_size):
# 10 = layer[dilation=1, dilation=2, 4, 8, 16, 32, 64, 128, 256, 512]
for l in range(0, self.layer_size):
dilations.append(2 ** l)
return dilations
def stack_res_block(self, res_channels, skip_channels):
"""
Prepare dilated convolution blocks by layer and stack size
:return:
"""
res_blocks = []
dilations = self.build_dilations()
for dilation in dilations:
block = self._residual_block(res_channels, skip_channels, dilation)
res_blocks.append(block)
return res_blocks
def forward(self, x, skip_size):
"""
:param x:
:param skip_size: The last output size for loss and prediction
:return:
"""
output = x
skip_connections = []
for res_block in self.res_blocks:
# output is the next input
output, skip = res_block(output, skip_size)
skip_connections.append(skip)
return torch.stack(skip_connections)
class DensNet(torch.nn.Module):
def __init__(self, channels):
"""
The last network of WaveNet
:param channels: number of channels for input and output
:return:
"""
super(DensNet, self).__init__()
self.conv1 = torch.nn.Conv1d(channels, channels, 1)
self.conv2 = torch.nn.Conv1d(channels, channels, 1)
self.relu = torch.nn.ReLU()
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
output = self.relu(x)
output = self.conv1(output)
output = self.relu(output)
output = self.conv2(output)
output = self.softmax(output)
return output
class WaveNet(torch.nn.Module):
def __init__(self, layer_size, stack_size, in_channels, res_channels):
"""
Stack residual blocks by layer and stack size
:param layer_size: integer, 10 = layer[dilation=1, dilation=2, 4, 8, 16, 32, 64, 128, 256, 512]
:param stack_size: integer, 5 = stack[layer1, layer2, layer3, layer4, layer5]
:param in_channels: number of channels for input data. skip channel is same as input channel
:param res_channels: number of residual channel for input, output
:return:
"""
super(WaveNet, self).__init__()
self.receptive_fields = self.calc_receptive_fields(layer_size, stack_size)
self.causal = CausalConv1d(in_channels, res_channels)
self.res_stack = ResidualStack(layer_size, stack_size, res_channels, in_channels)
self.densnet = DensNet(in_channels)
@staticmethod
def calc_receptive_fields(layer_size, stack_size):
layers = [2 ** i for i in range(0, layer_size)] * stack_size
num_receptive_fields = np.sum(layers)
return int(num_receptive_fields)
def calc_output_size(self, x):
output_size = int(x.size(2)) - self.receptive_fields
self.check_input_size(x, output_size)
return output_size
def check_input_size(self, x, output_size):
if output_size < 1:
raise InputSizeError(int(x.size(2)), self.receptive_fields, output_size)
def forward(self, x):
"""
The size of timestep(3rd dimention) has to be bigger than receptive fields
:param x: Tensor[batch, timestep, channels]
:return: Tensor[batch, timestep, channels]
"""
#output = x.transpose(1, 2)
#output_size = self.calc_output_size(output)
#output = self.causal(output)
output_size = self.calc_output_size(x)
output = self.causal(x)
skip_connections = self.res_stack(output, output_size)
output = torch.sum(skip_connections, dim=0)
output = self.densnet(output)
return output.transpose(1, 2).contiguous()
| 31.777778 | 103 | 0.618278 | 1,016 | 8,294 | 4.858268 | 0.163386 | 0.026945 | 0.02107 | 0.037277 | 0.435981 | 0.384724 | 0.326985 | 0.241086 | 0.241086 | 0.241086 | 0 | 0.025797 | 0.284905 | 8,294 | 260 | 104 | 31.9 | 0.806441 | 0.266578 | 0 | 0.177419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.024194 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56804b24fb35ab2abb9bf99473495ce4e51fa000 | 3,643 | py | Python | metrics/f2_structured_metadata.py | MaastrichtU-IDS/fair-enough-metrics | deb238a84385e1f94c0e2321b4b3ebdc231094d3 | [
"MIT"
] | 1 | 2022-01-28T09:42:20.000Z | 2022-01-28T09:42:20.000Z | metrics/f2_structured_metadata.py | MaastrichtU-IDS/fair-enough-metrics | deb238a84385e1f94c0e2321b4b3ebdc231094d3 | [
"MIT"
] | null | null | null | metrics/f2_structured_metadata.py | MaastrichtU-IDS/fair-enough-metrics | deb238a84385e1f94c0e2321b4b3ebdc231094d3 | [
"MIT"
] | 1 | 2022-01-29T03:39:37.000Z | 2022-01-29T03:39:37.000Z | import requests
import yaml
from fair_test import FairTest, FairTestEvaluation
class MetricTest(FairTest):
metric_path = 'f2-structured-metadata'
applies_to_principle = 'F2'
title = 'Metadata is structured'
description = """Tests whether a machine is able to find structured metadata. This could be (for example) RDFa, embedded json, json-ld, or content-negotiated structured metadata such as RDF Turtle.
This assessment will try to extract metadata from the resource URI:
- Search for structured metadata at the resource URI.
- Use HTTP requests with content-negotiation (RDF, JSON-LD, JSON, YAML),
- Extract metadata from the HTML landing page using extruct"""
topics = ['metadata']
author = 'https://orcid.org/0000-0002-1501-1082'
metric_version = '0.1.0'
test_test={
'https://doi.org/10.1594/PANGAEA.908011': 1,
'https://w3id.org/ejp-rd/fairdatapoints/wp13/dataset/c5414323-eab1-483f-a883-77951f246972': 1,
'https://doi.org/10.1186/2041-1480-5-14': 1,
'https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge': 1,
'https://doi.org/10.5281/zenodo.5541440': 1,
'https://doi.org/10.34894/DR3I2A': 1,
'https://doi.org/10.1045/november2015-vandesompel': 1,
'https://doi.org/10.1016/j.jbi.2008.03.004': 1,
'https://doi.org/10.1038/sdata.2016.18': 1,
'https://doi.org/10.1016/J.JBI.2019.103292': 1,
'https://w3id.org/AmIFAIR': 1,
'https://purl.uniprot.org/uniprot/P51587': 1,
'https://w3id.org/FAIR_Evaluator/evaluations/6259.json': 1,
'http://example.com': 0,
# 'https://w3id.org/FAIR_Tests/tests/gen2_structured_metadata': 0,
# FAIRsharing not consistent, most of the time give 1, but sometimes fails (their server timeout)
# 'https://doi.org/10.25504/FAIRsharing.jptb1m': 1,
# 'https://www.proteinatlas.org/ENSG00000084110-HAL': 1,
# 'https://data.rivm.nl/meta/srv/eng/rdf.metadata.get?uuid=1c0fcd57-1102-4620-9cfa-441e93ea5604&approved=true': 1,
}
def evaluate(self, eval: FairTestEvaluation):
eval.info('Checking if machine readable data (e.g. RDF, JSON-LD) can be retrieved using content-negotiation at ' + eval.subject)
g = eval.retrieve_metadata(eval.subject)
if not isinstance(g, (list, dict)) and len(g) > 1:
eval.success(f'Successfully found and parsed RDF metadata. It contains {str(len(g))} triples')
elif isinstance(g, (list, dict)) and len(g) > 1:
eval.success(f'Successfully found and parsed structured metadata. It contains {str(len(g))} objects')
else:
# eval.failure(f"No RDF metadata found at the subject URL {eval.subject}")
eval.warn('No RDF metadata found, checking for JSON')
try:
r_json = requests.get(eval.subject, headers={'accept': 'application/json'})
metadata = r_json.json()
eval.data['metadata_json'] = metadata
eval.success('Successfully found and parsed JSON metadata')
except:
eval.warn('No JSON metadata found, checking for YAML')
try:
r_yaml = requests.get(eval.subject, headers={'accept': 'text/yaml'})
metadata = yaml.load(str(r_yaml.text), Loader=yaml.FullLoader)
eval.data['metadata_yaml'] = metadata
eval.success('Successfully found and parsed YAML metadata')
except Exception as e:
eval.failure('No YAML metadata found')
return eval.response()
| 52.042857 | 201 | 0.639308 | 483 | 3,643 | 4.786749 | 0.438923 | 0.036332 | 0.04282 | 0.050606 | 0.196367 | 0.16609 | 0.114187 | 0.07526 | 0.056228 | 0.056228 | 0 | 0.08188 | 0.228932 | 3,643 | 69 | 202 | 52.797101 | 0.741189 | 0.123799 | 0 | 0.036364 | 0 | 0.072727 | 0.518681 | 0.006907 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018182 | false | 0 | 0.054545 | 0 | 0.254545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56812e1d2c9fb35b48bbbc87de532ca4299da390 | 1,017 | py | Python | tests/runtime/redis/test_redis.py | igboyes/virtool-workflow | 1ef9a4b0bada1963ff9be0470dfe74b32c9e7ccf | [
"MIT"
] | null | null | null | tests/runtime/redis/test_redis.py | igboyes/virtool-workflow | 1ef9a4b0bada1963ff9be0470dfe74b32c9e7ccf | [
"MIT"
] | null | null | null | tests/runtime/redis/test_redis.py | igboyes/virtool-workflow | 1ef9a4b0bada1963ff9be0470dfe74b32c9e7ccf | [
"MIT"
] | null | null | null | import asyncio
from virtool_workflow_runtime._redis import connect, VIRTOOL_JOBS_CHANNEL, job_id_queue
from virtool_workflow_runtime.runtime import execute_from_redis
JOB_IDs = [str(n) for n in range(3)]
async def assert_correct_job_ids():
queue = job_id_queue()
for id_ in JOB_IDs:
_id = await queue.__anext__()
assert _id == id_
async def publish_job_ids():
async with connect() as redis:
for id_ in JOB_IDs:
await redis.publish(VIRTOOL_JOBS_CHANNEL, id_)
async def run_workflows_from_redis(test_workflow):
exec_ = execute_from_redis(workflow=test_workflow)
for _ in JOB_IDs:
result = await exec_.__anext__()
assert result["start"] and result["clean"]
assert result["1"] and result["2"]
async def test_job_id_queue():
await asyncio.gather(assert_correct_job_ids(), publish_job_ids())
async def test_execute_from_redis(test_workflow):
await asyncio.gather(run_workflows_from_redis(test_workflow), publish_job_ids())
| 28.25 | 87 | 0.73353 | 148 | 1,017 | 4.594595 | 0.277027 | 0.079412 | 0.044118 | 0.092647 | 0.135294 | 0.097059 | 0 | 0 | 0 | 0 | 0 | 0.00361 | 0.182891 | 1,017 | 35 | 88 | 29.057143 | 0.814681 | 0 | 0 | 0.086957 | 0 | 0 | 0.011811 | 0 | 0 | 0 | 0 | 0 | 0.217391 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5683642aced3575289798f545fc9efd887e19acc | 3,363 | py | Python | dustmaker/cmd/thumbnail.py | msg555/dustmaker | 8ce54e7e6b29af75d72ca42051881df26624b6fc | [
"Apache-2.0"
] | 11 | 2015-09-29T07:48:30.000Z | 2019-05-05T20:44:48.000Z | dustmaker/cmd/thumbnail.py | msg555/dustmaker | 8ce54e7e6b29af75d72ca42051881df26624b6fc | [
"Apache-2.0"
] | 5 | 2016-10-16T00:30:18.000Z | 2022-02-12T20:04:11.000Z | dustmaker/cmd/thumbnail.py | msg555/dustmaker | 8ce54e7e6b29af75d72ca42051881df26624b6fc | [
"Apache-2.0"
] | 3 | 2016-10-15T20:51:03.000Z | 2019-03-21T03:31:47.000Z | #!/usr/bin/env python3
"""
Sample script to extract and set level thumbnails.
"""
import argparse
import io
import os
import sys
from dustmaker import DFReader, DFWriter
from dustmaker.cmd.common import (
run_utility,
CliUtility,
)
from dustmaker.variable import VariableBool
class Thumbnail(CliUtility):
"""CLI utility for adjusting level thumbnails"""
def setup_parser(self, parser: argparse.ArgumentParser) -> None:
"""Read CLI arguments"""
parser.description = "extract or update a level thumbnail"
parser.add_argument("level")
parser.add_argument("image")
parser.add_argument(
"--force",
action="store_const",
const=True,
default=False,
required=False,
help="allow overwrite of existing image",
)
parser.add_argument(
"--update",
action="store_const",
const=True,
default=False,
required=False,
help="read in the image and update the level thumbnail",
)
parser.add_argument(
"--auto-convert",
action="store_const",
const=True,
default=False,
required=False,
help="automatically convert to PNG format (implies --update)",
)
parser.add_argument(
"--auto-scale",
action="store_const",
const=True,
default=False,
required=False,
help="automaticaly scale image to expected 382 x 182 size (implies --auto-convert)",
)
def main(self, args) -> int:
"""thumbnail CLI entrypoint"""
if args.auto_scale:
args.auto_convert = True
if args.auto_convert:
args.update = True
with DFReader(open(args.level, "rb")) as reader:
level, region_offsets = reader.read_level_ex()
region_data = b""
if args.update:
region_data = reader.read_bytes(region_offsets[-1])
if not args.update:
if not args.force and os.path.exists(args.image):
print("path already exists, use --force to ignore")
return 1
with open(args.image, "wb") as fout:
fout.write(level.sshot)
return 0
if args.auto_convert:
try:
# pylint: disable=import-outside-toplevel
from PIL import Image # type: ignore
except ImportError:
print(
"failed to import PIL, cannot convert image (try `pip install pillow`)"
)
return 1
with Image.open(args.image) as im:
if args.auto_scale:
im = im.resize((382, 182))
with io.BytesIO() as io_out:
im.save(io_out, format="PNG")
level.sshot = io_out.getvalue()
else:
with open(args.image, "rb") as fimg:
level.sshot = fimg.read()
level.variables["icon_taken"] = VariableBool(True)
with DFWriter(open(args.level, "wb")) as writer:
writer.write_level_ex(level, region_offsets, region_data)
return 0
if __name__ == "__main__":
sys.exit(run_utility(Thumbnail))
| 30.297297 | 96 | 0.549509 | 364 | 3,363 | 4.964286 | 0.368132 | 0.029884 | 0.056447 | 0.046486 | 0.153846 | 0.119535 | 0.119535 | 0.119535 | 0.119535 | 0.119535 | 0 | 0.008287 | 0.354148 | 3,363 | 110 | 97 | 30.572727 | 0.823665 | 0.063039 | 0 | 0.321839 | 0 | 0 | 0.153871 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022989 | false | 0 | 0.114943 | 0 | 0.195402 | 0.022989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
568673ef2cde487c729769189c6ebe595faadce9 | 2,170 | py | Python | kts/ui/leaderboard.py | konodyuk/kts | 3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7 | [
"MIT"
] | 18 | 2019-02-14T13:10:07.000Z | 2021-11-26T07:10:13.000Z | kts/ui/leaderboard.py | konodyuk/kts | 3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7 | [
"MIT"
] | 2 | 2019-02-17T14:06:42.000Z | 2019-09-15T18:05:54.000Z | kts/ui/leaderboard.py | konodyuk/kts | 3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7 | [
"MIT"
] | 2 | 2019-09-15T13:12:42.000Z | 2020-04-15T14:05:54.000Z | import time
from kts.ui.components import HTMLRepr, Column, Field, Title, ThumbnailField, Raw
from kts.util.formatting import format_value
def format_experiment_date(date):
delta = time.time() - date
if delta < 60 * 60 * 24:
return format_value(delta, time=True) + ' ago'
else:
return format_value(date, time=True)
class Leaderboard(HTMLRepr):
"""Needs refactoring, very sketchy"""
def __init__(self, experiments):
self.experiments = experiments
self.col_widths = [1, 6, 5, 12, 6, 8, 8]
self.col_names = ['#', 'id', 'score', 'model', '# features', "date", "took"]
self.data = [
(
i,
e.id,
format_value(e.score),
e.model_class,
e.n_features,
format_experiment_date(e.date),
format_value(e.took, time=True)
)
for i, e in enumerate(experiments)
]
def head_style(self, i):
return dict(bg=False, accent=False, bold=False,
style=f"padding: 0px 5px; margin: 0px; width: {i}em; border: 0px;")
def cell_style(self, i):
return dict(bg=False, style=f"padding: 0px 5px; margin: 0px; width: {i}em; border: 0px;")
def concat(self, row):
return ' '.join(cell.html if not isinstance(cell, str) else cell for cell in row)
@property
def html(self):
head_cells = [Field(self.col_names[0], **self.head_style(self.col_widths[0]))]
for i in range(1, len(self.col_widths)):
head_cells.append(Field(self.col_names[i], **self.head_style(self.col_widths[i])))
rows = [[Field(self.data[i][j], **self.cell_style(self.col_widths[j]))
for j in range(len(self.data[0]))
] for i in range(len(self.data))]
rows = [Raw(e.html_collapsible(ThumbnailField(self.concat(rows[i]), css_id=-1, first=False), border=True)) for i, e in enumerate(self.experiments)]
res = Column([Title('leaderboard'), Field(self.concat(head_cells), bg=False, bold=False, style="padding-bottom: 0px; margin: 0px 2px 0px 2px;")] + rows)
return res.html
| 38.070175 | 160 | 0.595392 | 298 | 2,170 | 4.224832 | 0.312081 | 0.04448 | 0.051628 | 0.042891 | 0.238284 | 0.196187 | 0.123114 | 0.084194 | 0.084194 | 0.084194 | 0 | 0.020013 | 0.263134 | 2,170 | 56 | 161 | 38.75 | 0.767355 | 0.014286 | 0 | 0 | 0 | 0 | 0.096578 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.068182 | 0.068182 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5687b2eebefd1922fec6386607f4531267b31693 | 2,726 | py | Python | dags/etl_store_dag.py | nileshvarshney/airflow | 6bb31a3acdd5a9c8bb74ddb01a851adb99602b9b | [
"Apache-2.0"
] | null | null | null | dags/etl_store_dag.py | nileshvarshney/airflow | 6bb31a3acdd5a9c8bb74ddb01a851adb99602b9b | [
"Apache-2.0"
] | null | null | null | dags/etl_store_dag.py | nileshvarshney/airflow | 6bb31a3acdd5a9c8bb74ddb01a851adb99602b9b | [
"Apache-2.0"
] | null | null | null | # import python libraries
from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from datacleaner import data_cleaner
from airflow.operators.mysql_operator import MySqlOperator
from airflow.operators.email_operator import EmailOperator
yesterday_date = datetime.strftime((datetime.now() - timedelta(1) ),'%Y-%m-%d')
# default argument directory
default_args = {
"owner" : "Nilesh Varshney",
"start_date" : datetime(2021,3,21),
"retries" : 1,
"retries_delay" : timedelta(seconds=10)
}
dag = DAG('etl_store_dag', default_args=default_args, schedule_interval='@daily',
template_searchpath = ['/usr/local/airflow/sql_files'], catchup=False)
#========================================#
# Task section
#========================================#
#Task 1 : Check the source file exist
check_source_file = BashOperator(
task_id = 'check_source_file',
bash_command = 'shasum ~/store_files_airflow/raw_store_transactions.csv',
retries=2,
retry_delay=timedelta(seconds=15),
dag = dag
)
#Task 2 : clean the input datafile
data_cleaning = PythonOperator(
task_id = 'clean_raw_csv',
python_callable = data_cleaner,
dag = dag
)
#Task 3 : create mysql table
create_table = MySqlOperator(
task_id = 'create_mysql_table',
mysql_conn_id="mysql_conn",
sql = "create_table.sql",
dag= dag)
#Task 4 : Populate mysql table
populate_table = MySqlOperator(
task_id = 'populate_table',
mysql_conn_id="mysql_conn",
sql = "load_data.sql",
dag= dag)
# task 5: Generate Aggreegate data
output_report_generation = MySqlOperator(
task_id = 'output_report_generation',
mysql_conn_id="mysql_conn",
sql = "daily_store_profit.sql",
dag= dag)
# Task 6: To Raname the existing file if it exists
rename_existing_report_01 = BashOperator(
task_id = 'rename_existing_report_01',
bash_command = 'cat ~/store_files_airflow/location_wise_daily_profit.csv && mv ~/store_files_airflow/location_wise_daily_profit.csv ~/store_files_airflow/location_wise_daily_profit_%s.csv' % yesterday_date,
dag = dag
)
# Task 7: To Raname the existing file if it exists
rename_existing_report_02 = BashOperator(
task_id = 'rename_existing_report_02',
bash_command = 'cat ~/store_files_airflow/store_wise_daily_profit.csv && mv ~/store_files_airflow/store_wise_daily_profit.csv ~/store_files_airflow/store_wise_daily_profit_%s.csv' % yesterday_date,
dag = dag
)
check_source_file >> data_cleaning >> create_table >> populate_table >> output_report_generation >> [rename_existing_report_01,rename_existing_report_02]
| 32.843373 | 210 | 0.733309 | 356 | 2,726 | 5.275281 | 0.297753 | 0.025559 | 0.063365 | 0.038339 | 0.309904 | 0.309904 | 0.242279 | 0.187433 | 0.096912 | 0.056443 | 0 | 0.014145 | 0.144167 | 2,726 | 82 | 211 | 33.243902 | 0.790827 | 0.147469 | 0 | 0.185185 | 0 | 0.037037 | 0.307759 | 0.208496 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12963 | 0 | 0.12963 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
568d890d93930eebca3929a03cee09545033af9c | 1,976 | py | Python | Pibow/sprinkles.py | ShineTop/Unicorn-HAT | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | [
"MIT"
] | null | null | null | Pibow/sprinkles.py | ShineTop/Unicorn-HAT | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | [
"MIT"
] | null | null | null | Pibow/sprinkles.py | ShineTop/Unicorn-HAT | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Sprinkles - Pibow
This program lights up and turns off random LEDS using the colors of the
Pibow Zero Candy case
....................
Functions:
- sprinkles: Lights up and turns off random LEDs
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
import time
import unicornhat
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
from bfp_unicornhat import get_random_color
from bfp_unicornhat import light_up_random_led
from bfp_unicornhat import random_x_coordinate
from bfp_unicornhat import random_y_coordinate
########################################################################
# Functions #
########################################################################
def sprinkles():
"""
Lights up and turns off random LEDs
"""
start_time = time.time()
time.clock()
seconds_elapsed = 0
while seconds_elapsed < 15:
seconds_elapsed = time.time() - start_time
# Turn on a random LED
red, green, blue = get_random_color()
light_up_random_led(red, green, blue)
# Turn OFF a random LED
unicornhat.set_pixel(random_x_coordinate(),
random_y_coordinate(),
0, 0, 0)
unicornhat.show()
time.sleep(0.01)
if __name__ == '__main__':
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
sprinkles()
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
| 27.830986 | 72 | 0.508097 | 199 | 1,976 | 4.844221 | 0.442211 | 0.043568 | 0.105809 | 0.143154 | 0.212656 | 0.108921 | 0.108921 | 0.078838 | 0 | 0 | 0 | 0.018056 | 0.271255 | 1,976 | 70 | 73 | 28.228571 | 0.651389 | 0.321356 | 0 | 0.068966 | 0 | 0 | 0.052941 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.275862 | 0 | 0.310345 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56920c08dfcb1a77f8cde28ba7bdd1f09b763b05 | 4,387 | py | Python | src/luh3417/snapshot/__init__.py | HenryJobst/luh3417 | 680c21739d2afb9559e4d8bdf4eedeaf5a6b1e28 | [
"WTFPL"
] | 1 | 2020-12-02T15:47:11.000Z | 2020-12-02T15:47:11.000Z | src/luh3417/snapshot/__init__.py | HenryJobst/luh3417 | 680c21739d2afb9559e4d8bdf4eedeaf5a6b1e28 | [
"WTFPL"
] | null | null | null | src/luh3417/snapshot/__init__.py | HenryJobst/luh3417 | 680c21739d2afb9559e4d8bdf4eedeaf5a6b1e28 | [
"WTFPL"
] | null | null | null | import subprocess
import re
from typing import Sequence, Text
from luh3417.luhfs import LocalLocation, Location, SshLocation
from luh3417.luhssh import SshManager
from luh3417.utils import LuhError
def rsync_files(source: Location, target: Location, delete: bool = False):
"""
Use rsync to copy files from a location to another
"""
args = [
"rsync",
"-rz",
"--exclude=.git",
"--exclude=.idea",
"--exclude=*.swp",
"--exclude=*.un~",
]
if delete:
args.append("--delete")
args += [source.rsync_path(True), target.rsync_path(True)]
cp = subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)
return cp.returncode, cp.stderr
def sync_files(source: Location, target: Location, delete: bool = False):
"""
Use rsync to copy files from a location to another
"""
target.ensure_exists_as_dir()
rc, stderr = rsync_files(source, target, delete)
if rc:
cmd_not_found = re.search("command not found", str(stderr))
if not cmd_not_found:
raise LuhError(f"Error while copying files: {stderr}")
copy_files_with_delete(source, target, delete)
def _build_args(location: Location, args: Sequence[Text]) -> Sequence[Text]:
"""
Builds args to use either with SSH either straight
"""
if isinstance(location, LocalLocation):
return args
elif isinstance(location, SshLocation):
return SshManager.instance(location.user, location.host, location.port).get_args(args)
def activate_maintenance_mode(remote: Location):
remote_args = _build_args(remote, ["wp", "maintenance-mode", "activate", "--path=", remote.path, "--quiet"])
remote_p = subprocess.Popen(
remote_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
remote_p.wait()
if remote_p.returncode:
raise LuhError(
f'Error while activate maintenance mode at "{remote}": {remote_p.stderr.read(1000)}'
)
def deactivate_maintenance_mode(remote: Location):
remote_args = _build_args(remote, ["wp", "maintenance-mode", "deactivate", "--path=", remote.path, "--quiet"])
remote_p = subprocess.Popen(
remote_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
remote_p.wait()
if remote_p.returncode:
raise LuhError(
f'Error while deactivate maintenance mode at "{remote}": {remote_p.stderr.read(1000)}'
)
def copy_files(source: Location, target: Location, excludes, exclude_tag_alls):
"""
Copies files from the remote location to the local locations. Files are
serialized and pipelined through tar, maybe locally, maybe through SSH
depending on the locations.
"""
source_tar_command = ["tar", "-C", source.path]
if excludes:
for exclude in excludes:
source_tar_command.append("--exclude")
source_tar_command.append(exclude)
if exclude_tag_alls:
for exclude_tag_all in exclude_tag_alls:
source_tar_command.append("--exclude-tag-all")
source_tar_command.append(exclude_tag_all)
source_tar_command.extend(["-c", "."])
source_args = _build_args(source, source_tar_command)
target_args_1 = _build_args(target, ["mkdir", "-p", target.path])
target_args_2 = _build_args(target, ["tar", "-C", target.path, "-x"])
cp = subprocess.run(target_args_1, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)
if cp.returncode:
raise LuhError(f'Error while creating target dir "{target}": {cp.stderr}')
source_p = subprocess.Popen(
source_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
target_p = subprocess.Popen(
target_args_2,
stdin=source_p.stdout,
stderr=subprocess.PIPE,
stdout=subprocess.DEVNULL,
)
source_p.wait()
target_p.wait()
if source_p.returncode:
raise LuhError(
f'Error while reading files from "{source}": {source_p.stderr.read(1000)}'
)
if target_p.returncode:
raise LuhError(f'Error writing files to "{target}": {target_p.stderr.read(1000)}')
def copy_files_with_delete(source: Location, target: Location, delete: bool = False):
if delete:
target.delete_dir_content()
target.ensure_exists_as_dir()
copy_files(source, target, None, None)
| 29.442953 | 114 | 0.664235 | 542 | 4,387 | 5.197417 | 0.226937 | 0.044728 | 0.039759 | 0.040469 | 0.457579 | 0.389776 | 0.336528 | 0.28044 | 0.28044 | 0.28044 | 0 | 0.009338 | 0.218828 | 4,387 | 148 | 115 | 29.641892 | 0.812664 | 0.073627 | 0 | 0.163043 | 0 | 0 | 0.152691 | 0.028035 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076087 | false | 0 | 0.065217 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5692c81d7e2760ade8f07b80322678af0eaf034a | 988 | py | Python | Longest Palindrome.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Longest Palindrome.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Longest Palindrome.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | '''
Given a string which consists of lowercase or uppercase letters, find the length of the longest palindromes that can be built with those letters.
This is case sensitive, for example "Aa" is not considered a palindrome here.
Note:
Assume the length of given string will not exceed 1,010.
Example:
Input:
"abccccdd"
Output:
7
Explanation:
One longest palindrome that can be built is "dccaccd", whose length is 7.
'''
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
fre = {}
for c in s:
if c in fre:
fre[c] += 1
else:
fre[c] = 1
res = 0
has_odd = False
for c in fre:
if fre[c] % 2 == 0:
res += fre[c]
else:
has_odd = True
res += fre[c] - 1
if has_odd:
res += 1
return res
| 21.021277 | 145 | 0.508097 | 128 | 988 | 3.898438 | 0.546875 | 0.04008 | 0.03006 | 0.056112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022414 | 0.412955 | 988 | 46 | 146 | 21.478261 | 0.837931 | 0.446356 | 0 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
569a7754edeb369bfa7791b3bdcf74473cb3053f | 3,780 | py | Python | GUI/Toolbox/metadata.py | Guillermo-Hidalgo-Gadea/RPi4Toolbox | 47a265aa9828f144155c097efc8ff36bd435099f | [
"MIT"
] | null | null | null | GUI/Toolbox/metadata.py | Guillermo-Hidalgo-Gadea/RPi4Toolbox | 47a265aa9828f144155c097efc8ff36bd435099f | [
"MIT"
] | null | null | null | GUI/Toolbox/metadata.py | Guillermo-Hidalgo-Gadea/RPi4Toolbox | 47a265aa9828f144155c097efc8ff36bd435099f | [
"MIT"
] | 1 | 2021-10-15T16:14:48.000Z | 2021-10-15T16:14:48.000Z | # Metadata module to save metadata as dictionary, save trial metadata as yaml and export metadata as csv
import yaml
import datetime
import pandas as pd
from pathlib import Path
class Metadata:
def __init__(self):
base_path = Path().parent
self.metadata_dir = (base_path / "RPi4Toolbox/GUI/Toolbox/metadata.yaml").resolve()
self.subject = ''
self.experimenter = ''
self.date = ''
self.session = 0
self.condition = ''
self.trial = 0
self.repetition = 0
self.start_habituation = ''
self.start_stimulus = ''
self.reactiontime_keypeck = ''
self.optimal_stimulus = ''
self.key_choice = ''
self.reward = 0
# Initialize dictionary from existing metadata or create new
try:
# if metadata exists, read keys to initialize dictionary
with open(self.metadata_dir, 'r') as yamlfile:
metadata = yaml.safe_load(yamlfile)
self.dictionary = dict.fromkeys(metadata.keys(), [])
except IOError:
# if no metadata file exists initialize new empty ditionary
self.dictionary = {'subject':[],'experimenter':[],'date':[],'condition':[],'session':[],'trial':[],'repetition':[],
'start_habituation':[],'start_stimulus':[],'reactiontime_keypeck':[],
'optimal_stimulus':[],'key_choice':[],'reward':[],'col1':[]}
def append(self):
# update dictionary with session related metadata
self.dictionary['subject'].append(self.subject)
self.dictionary['experimenter'].append(self.experimenter)
self.dictionary['date'].append(self.date)
self.dictionary['condition'].append(self.condition)
self.dictionary['session'].append(self.session)
# update dictionary with trial related metadata
self.dictionary['trial'].append(self.trial)
self.dictionary['repetition'].append(self.repetition)
self.dictionary['start_habituation'].append(self.start_habituation)
self.dictionary['start_stimulus'].append(self.start_stimulus)
self.dictionary['reactiontime_keypeck'].append(self.reactiontime_keypeck)
self.dictionary['optimal_stimulus'].append(self.optimal_stimulus)
self.dictionary['key_choice'].append(self.key_choice)
self.dictionary['reward'].append(self.reward)
def save(self):
# SAVE TO YAML at the end of session
try:
# if metadata exists, append new data
with open(self.metadata_dir, 'r') as yamlfile:
metadata = yaml.safe_load(yamlfile)
metadata.update(self.dictionary)
with open(self.metadata_dir, 'w') as file:
yaml.safe_dump(metadata, file, sort_keys=False)
except IOError:
# if no metadata exists, create new file
with open(self.metadata_dir, 'w') as file:
yaml.dump(self.dictionary, file, sort_keys=False)
def export():
"""
This function exports the metadata.yaml file to a standard metadata.csv and cleans the
metadata.yaml history after moving it to backup.
"""
## EXPORT METADATA
base_path = Path().parent
file_path = (base_path / "../RPi4Toolbox/GUI/Toolbox/metadata.yaml").resolve()
with open(file_path, 'r') as yamlfile:
data = yaml.safe_load(yamlfile)
metadata = pd.DataFrame.from_dict(data, orient='index')
metadata = metadata.transpose()
filename = str(file_path)[0:-5]+'_' + datetime.datetime.now().strftime('%Y-%m-%d') + '.csv'
metadata.to_csv(filename, index = False, header=True, encoding='utf-8')
# move metadata csv and yaml file to sciebo backup
# erase yaml file to keep it slim | 43.953488 | 127 | 0.636508 | 436 | 3,780 | 5.415138 | 0.263761 | 0.100805 | 0.031766 | 0.033884 | 0.166878 | 0.130453 | 0.121982 | 0.121982 | 0.081321 | 0.05252 | 0 | 0.003492 | 0.242328 | 3,780 | 86 | 128 | 43.953488 | 0.82088 | 0.18836 | 0 | 0.193548 | 0 | 0 | 0.126111 | 0.025354 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.064516 | 0 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
569ec3463a6b9dc7fdb5c4eccfb276fd52b756ed | 1,190 | py | Python | jobya/companies/management/commands/setup_company.py | xblzbjs/Jobya | b936ce37da86bfe8326a532dab3887fae6c65e45 | [
"MIT"
] | null | null | null | jobya/companies/management/commands/setup_company.py | xblzbjs/Jobya | b936ce37da86bfe8326a532dab3887fae6c65e45 | [
"MIT"
] | 2 | 2022-02-08T01:15:52.000Z | 2022-03-31T04:24:15.000Z | jobya/companies/management/commands/setup_company.py | xblzbjs/Jobya | b936ce37da86bfe8326a532dab3887fae6c65e45 | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand
from django.db import transaction
from jobya.companies.models import Company
from jobya.companies.tests.factories import CompanyFactory
class Command(BaseCommand):
help = "Set up company data"
def add_arguments(self, parser):
parser.add_argument(
"total",
nargs="+",
type=int,
help="Indicates the number of companies to be created",
)
parser.add_argument(
"--delete",
action="store_true",
help="Delete old companies data before creating",
)
@transaction.atomic
def handle(self, *args, **options):
total = options.get("total")[0]
if options["delete"]:
self.delete_old_data()
self.stdout.write("Creating new companies...")
for _ in range(total):
CompanyFactory()
self.stdout.write("Created successfully!")
def delete_old_data(self):
self.stdout.write("Deleting old companies data...")
models = [Company]
for m in models:
m.objects.all().delete()
self.stdout.write("Deleted successfully!")
| 29.02439 | 67 | 0.607563 | 130 | 1,190 | 5.492308 | 0.5 | 0.056022 | 0.084034 | 0.047619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001174 | 0.284034 | 1,190 | 40 | 68 | 29.75 | 0.836854 | 0 | 0 | 0.060606 | 0 | 0 | 0.20084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.121212 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
569fd8ab2bfa51b46a8fc425da22db12d4345b01 | 2,188 | py | Python | presenter.py | liordon/motion_detector | 7c22062bb3a8b254d9e4a3d6d88a89d89320785a | [
"Unlicense"
] | null | null | null | presenter.py | liordon/motion_detector | 7c22062bb3a8b254d9e4a3d6d88a89d89320785a | [
"Unlicense"
] | null | null | null | presenter.py | liordon/motion_detector | 7c22062bb3a8b254d9e4a3d6d88a89d89320785a | [
"Unlicense"
] | null | null | null | import ast
import datetime
import cv2
import psutil
from utils import *
def presenter_log(message: str):
log("PRST", message)
def present_annotated_frames_from_stream(pipe_reader, pid):
presenter_log("presenter presents")
while pipe_reader.poll(3) or psutil.pid_exists(pid):
message = pipe_reader.recv()
if message is None:
if not psutil.pid_exists(pid):
break
else:
continue
frame_string = message.split('|')[0]
annotations = message.split('|')[1]
gray_frame = string_to_frame(frame_string)
blurred_frame = cv2.GaussianBlur(gray_frame, (21, 21), 0)
text = "unoccupied" if len(annotations) == 0 else "occupied"
# loop over the contours
for (bottom_left_corner, top_right_corner) in ast.literal_eval(annotations):
blur_mask = np.ones(gray_frame.shape, dtype=np.uint8)
cv2.rectangle(blur_mask, bottom_left_corner, top_right_corner, 0, thickness=-1)
gray_frame = np.where(np.logical_not(blur_mask), blurred_frame, gray_frame)
cv2.rectangle(gray_frame, bottom_left_corner, top_right_corner, (0, 255, 0), 2)
# draw the text and timestamp on the frame
cv2.putText(img=gray_frame,
text="Room Status: {}".format(text),
org=(10, 20),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(2, 2, 255),
thickness=2)
cv2.putText(img=gray_frame,
text=datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
org=(10, gray_frame.shape[0] - 10),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.35,
color=(2, 2, 255),
thickness=1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", cv2.convertScaleAbs(gray_frame))
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
presenter_log("presenter finished presenting")
cv2.destroyAllWindows()
| 33.661538 | 91 | 0.612431 | 286 | 2,188 | 4.513986 | 0.451049 | 0.069713 | 0.03718 | 0.044152 | 0.201394 | 0.17196 | 0.108443 | 0 | 0 | 0 | 0 | 0.036825 | 0.280165 | 2,188 | 64 | 92 | 34.1875 | 0.782857 | 0.094607 | 0 | 0.173913 | 0 | 0 | 0.061772 | 0 | 0 | 0 | 0.002025 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.108696 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56a0677ee2c20f71870059ac35a9ec0979418868 | 3,412 | py | Python | mllearn/problem_transform/klabelsets.py | Lxinyuelxy/multi-label-learn | ab347e9c9ccac1503f22c7b76e0b3e9a4e8214da | [
"MIT"
] | 4 | 2018-11-19T13:34:53.000Z | 2020-01-11T11:58:13.000Z | mllearn/problem_transform/klabelsets.py | Lxinyuelxy/multi-label-learn | ab347e9c9ccac1503f22c7b76e0b3e9a4e8214da | [
"MIT"
] | null | null | null | mllearn/problem_transform/klabelsets.py | Lxinyuelxy/multi-label-learn | ab347e9c9ccac1503f22c7b76e0b3e9a4e8214da | [
"MIT"
] | 3 | 2019-04-14T18:13:33.000Z | 2021-04-05T14:45:56.000Z | import copy
import random
import numpy as np
from sklearn.svm import SVC
class RandomKLabelsets:
"""RandomKLabelsets
Reference Paper:
Min-Ling Zhang and Zhi-Hua Zhou. A Review on Multi-Label Learning Algorithms
"""
def __init__(self, classifier=SVC(kernel='rbf')):
self.classifier = classifier
def fit(self, X, y, k=3, n=0):
self.m = X.shape[0]
self.label_count = y.shape[1]
self.k = self.chooseLabelsetsSize(k)
self.n = self.chooseLabelsetsNum(n)
self.k_labelsets = np.zeros((self.n, self.label_count))
self.classifiers = []
for i in range(self.n):
classifier = copy.deepcopy(self.classifier)
k_labelset = self.generateRandomK_labelsets()
y_subset = self.getSubsetOfy(y, k_labelset)
classifier.fit(X, self.transform(y_subset))
self.classifiers.append(classifier)
self.k_labelsets[i, :] = k_labelset
return self
def predict(self, X_pre):
result = np.zeros((X_pre.shape[0], self.label_count))
ysubsets = []
for i in range(self.n):
ysubsets.append(self.inverse_transform(self.classifiers[i].predict(X_pre)))
for sample in range(X_pre.shape[0]):
for label in range(self.label_count):
maxVotes = 0
actualVotes = 0
for i in range(self.n):
if ysubsets[i][sample, label] == 1:
actualVotes += 1
if self.k_labelsets[i, label] == 1:
maxVotes += 1
if (actualVotes/maxVotes) > 0.5:
result[sample][label] = 1
return result
def chooseLabelsetsSize(self, k):
if k > self.label_count:
raise ValueError('the given size of labelsets is exceed')
else:
return k
def chooseLabelsetsNum(self, n):
if n == 0:
n = 2*self.label_count
mostLabelsetsNum = 1
for i in range(self.k):
mostLabelsetsNum = mostLabelsetsNum * (self.label_count-i) / (self.k-i)
return min(n, mostLabelsetsNum)
def generateRandomK_labelsets(self):
labelIndexes = set()
labelset = np.zeros(self.label_count)
while len(labelIndexes) < self.k:
randomIndex = random.randint(0,self.label_count-1)
labelIndexes.add(randomIndex)
labelset[randomIndex] = 1
return labelset
def getSubsetOfy(self, y, k_labelset):
y_subset = np.zeros((self.m, self.label_count))
for sample in range(self.m):
for index in range(self.label_count):
if y[sample, index]==1 and k_labelset[index]==1:
y_subset[sample, index] = 1
return y_subset
def transform(self, y_subset):
result = np.zeros(y_subset.shape[0])
for i in range(y_subset.shape[0]):
for j in range(y_subset.shape[1]):
result[i] += y_subset[i][j] * (2**j)
return result
def inverse_transform(self, y):
result = np.zeros((y.shape[0], self.label_count))
for row in range(result.shape[0]):
number = y[row]
for col in range(result.shape[1]):
result[row][col] = number % 2
number = int(number/2)
return result
| 36.297872 | 87 | 0.566823 | 425 | 3,412 | 4.451765 | 0.225882 | 0.057082 | 0.088795 | 0.02907 | 0.118922 | 0.02537 | 0 | 0 | 0 | 0 | 0 | 0.014354 | 0.326202 | 3,412 | 93 | 88 | 36.688172 | 0.808612 | 0.033411 | 0 | 0.075 | 0 | 0 | 0.012199 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1125 | false | 0 | 0.05 | 0 | 0.275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56a0e67715f2ad6066c4212bdf3b6c7670e86244 | 406 | py | Python | users/tests/test_view.py | VladaDidko/skill- | 861c08376e2bc9b9a5a44e3a8560324ee53ce2d0 | [
"Unlicense"
] | null | null | null | users/tests/test_view.py | VladaDidko/skill- | 861c08376e2bc9b9a5a44e3a8560324ee53ce2d0 | [
"Unlicense"
] | 18 | 2019-05-28T17:20:34.000Z | 2022-03-11T23:50:12.000Z | users/tests/test_view.py | VladaDidko/skill- | 861c08376e2bc9b9a5a44e3a8560324ee53ce2d0 | [
"Unlicense"
] | 3 | 2019-05-27T09:51:54.000Z | 2019-12-12T20:35:29.000Z | from django.test import TestCase, Client
from django.urls import reverse
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.register_url = reverse('register')
self.profile_url = reverse('profile')
def test_register(self):
response = self.client.get(self.register_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'users/register.html') | 29 | 58 | 0.768473 | 53 | 406 | 5.792453 | 0.471698 | 0.065147 | 0.09772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008357 | 0.115764 | 406 | 14 | 58 | 29 | 0.846797 | 0 | 0 | 0 | 0 | 0 | 0.083538 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.181818 | false | 0 | 0.181818 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56a142df9367848a23bc2307ae8b5ba73cf7b0ac | 976 | py | Python | incidences/forms.py | atlasfoo/risk_audit_websys | df43a48699b16d0d0bade3f597d889bfe20eda7b | [
"MIT"
] | null | null | null | incidences/forms.py | atlasfoo/risk_audit_websys | df43a48699b16d0d0bade3f597d889bfe20eda7b | [
"MIT"
] | 13 | 2021-05-28T05:22:16.000Z | 2021-06-02T05:49:07.000Z | incidences/forms.py | atlasfoo/risksys | df43a48699b16d0d0bade3f597d889bfe20eda7b | [
"MIT"
] | null | null | null | from django import forms
from incidences.models import Incidence
class IncidenceForm(forms.ModelForm):
class Meta:
model = Incidence
fields = ['name', 'description', 'risk', 'causes', 'effects', 'controls']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Incidencia'}),
'description': forms.TextInput(attrs={'class': 'form-control'}),
'risk': forms.Select(attrs={'class': 'form-select'}),
'causes': forms.SelectMultiple(attrs={'class': 'form-select'}),
'effects': forms.SelectMultiple(attrs={'class': 'form-select'}),
'controls': forms.SelectMultiple(attrs={'class': 'form-select'}),
}
labels = {
'name': '',
'description': 'Descripción del evento',
'risk': 'Riesgo asociado',
'causes': 'Causas manifestadas',
'effects': 'Consecuencias manifestadas',
}
| 39.04 | 98 | 0.571721 | 84 | 976 | 6.642857 | 0.440476 | 0.107527 | 0.150538 | 0.143369 | 0.335125 | 0.335125 | 0 | 0 | 0 | 0 | 0 | 0 | 0.259221 | 976 | 24 | 99 | 40.666667 | 0.771784 | 0 | 0 | 0 | 0 | 0 | 0.320697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56a2df9338d095c9e041cd414ec3dfeb1e4f74ab | 2,206 | py | Python | Detector.py | Corzair1/EyeC | 0e90f8d296833c6d4b9d8eeeeed48d3a05d52ffb | [
"MIT"
] | null | null | null | Detector.py | Corzair1/EyeC | 0e90f8d296833c6d4b9d8eeeeed48d3a05d52ffb | [
"MIT"
] | null | null | null | Detector.py | Corzair1/EyeC | 0e90f8d296833c6d4b9d8eeeeed48d3a05d52ffb | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
from urllib.request import urlopen
import os
import datetime
import time
import sys
#change to your ESP32-CAM ip
url="http://192.168.31.184:81/stream"
CAMERA_BUFFRER_SIZE=4096
stream=urlopen(url)
bts=b''
while True:
try:
while True:
bts+=stream.read(CAMERA_BUFFRER_SIZE)
jpghead=bts.find(b'\xff\xd8')
jpgend=bts.find(b'\xff\xd9')
if jpghead>-1 and jpgend>-1:
jpg=bts[jpghead:jpgend+2]
bts=bts[jpgend+2:]
img=cv.imdecode(np.frombuffer(jpg,dtype=np.uint8),cv.IMREAD_UNCHANGED)
v=cv.flip(img,0)
h=cv.flip(img,1)
p=cv.flip(img,-1)
frame=p
img=cv.resize(frame,(480,320))
img = img[0:200, 60:300]
h, w = img.shape[:2]
img = cv.rotate(img, cv.cv2.ROTATE_90_CLOCKWISE)
rows, cols, _ = img.shape
gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray_img = cv.GaussianBlur(gray_img, (7, 7), 0)
_, threshold = cv.threshold(gray_img, 70, 255, cv.THRESH_BINARY_INV)
contours, hierarchy = cv.findContours(threshold, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
contours = [max(contours, key = cv.contourArea)]
for c in contours:
M = cv.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
cv.circle(gray_img, (cX, cY), 7, (255, 255, 255), -1)
cv.putText(gray_img, "center", (cX - 20, cY - 20),
cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
#gray_img = cv.drawContours(gray_img, contours, -1, (0,255,0), 3)
cv.imshow('contoured', gray_img)
k=cv.waitKey(1)
if k & 0xFF==ord('q'):
exit()
cv.destroyAllWindows()
except Exception as e:
pass | 29.413333 | 106 | 0.497733 | 282 | 2,206 | 3.801418 | 0.489362 | 0.058769 | 0.025187 | 0.020522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07478 | 0.381686 | 2,206 | 75 | 107 | 29.413333 | 0.711144 | 0.06573 | 0 | 0.040816 | 0 | 0 | 0.036425 | 0 | 0 | 0 | 0.001943 | 0 | 0 | 1 | 0 | false | 0.020408 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56a3e14dbfe824cc296d28795afa04041f550530 | 3,489 | py | Python | imdb/imdb/spiders/imdb_3.py | KarolinaSzwedo/WebscrapingProject | fb59c476df8632a449290f9a4374501673729d7c | [
"MIT"
] | 1 | 2021-05-02T20:21:26.000Z | 2021-05-02T20:21:26.000Z | imdb/imdb/spiders/imdb_3.py | KarolinaSzwedo/WebscrapingProject | fb59c476df8632a449290f9a4374501673729d7c | [
"MIT"
] | null | null | null | imdb/imdb/spiders/imdb_3.py | KarolinaSzwedo/WebscrapingProject | fb59c476df8632a449290f9a4374501673729d7c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
class Movie(scrapy.Item):
# define all items to scrape
title = scrapy.Field()
genres = scrapy.Field()
when = scrapy.Field()
director = scrapy.Field()
stars = scrapy.Field()
country = scrapy.Field()
language = scrapy.Field()
writer = scrapy.Field()
url = scrapy.Field()
time = scrapy.Field()
class MovieSpider(scrapy.Spider):
name = 'movies' # name of the Spider
allowed_domains = ['imdb.com']
try:
# links.csv - list with coming soon movies
with open("links.csv", "rt") as file:
# read each line from the file without the first one
start_urls = [url.strip() for url in file.readlines()][1:]
except:
start_urls = []
# 'imdb.pipelines.DuplicatesPipelineItems': 300 - calls DuplicatesPipelineItems class from pipelines.py file (this will filter duplicated links in all movies)
# 'CLOSESPIDER_PAGECOUNT': 100 - sets limit pages to 100 (delay in scrapy respone - more information in project description)
# 'DEPTH_LIMIT': 1 - allows scrapy to go only to one next page
custom_settings = {'ITEM_PIPELINES': {'imdb.pipelines.DuplicatesPipelineItems': 300}, 'CLOSESPIDER_PAGECOUNT': 100, 'DEPTH_LIMIT': 1}
def parse(self, response):
# scrape all information about movie
f = Movie()
# get xpaths to items
title_xpath = '//h1/text()'
genres_xpath = '//div[@class="subtext"]/a[re:test(@href, "(genres){1}")]/text()'
when_xpath = '//div[@class="subtext"]/a[re:test(text(), "[0-9]+\s+[A-Za-z]+\s+[0-9]+")]/text()'
director_xpath = '//h4[re:test(text(), "(Director)")]/following-sibling::a/text()'
stars_xpath = '//h4[text()="Stars:"]/following-sibling::a[re:test(@href, "name")]/text()'
country_xpath = '//h4[text()="Country:"]/following-sibling::a/text()'
language_xpath = '//h4[text()="Language:"]/following-sibling::a/text()'
writer_xpath = '//h4[re:test(text(), "(Writer)")]/following-sibling::a[re:test(@href, "name")]/text()'
time_xpath = '//h4[text()="Runtime:"]/following-sibling::time/text()'
f['url'] = response.url
f['title'] = [x.strip() for x in response.xpath(title_xpath).getall()]
f['genres'] = response.xpath(genres_xpath).getall()
f['when'] = [x.strip() for x in response.xpath(when_xpath).getall()]
f['director'] = response.xpath(director_xpath).getall()
f['stars'] = response.xpath(stars_xpath).getall()
f['country'] = response.xpath(country_xpath).getall()
f['language'] = response.xpath(language_xpath).getall()
f['writer'] = response.xpath(writer_xpath).getall()
f['time'] = response.xpath(time_xpath).getall()
yield f
# after scraping page of "coming soon" movie go to the first movie from "more like this" section
# get link to movie from "more like this" section
next_page = response.xpath('//div[re:test(@data-tconst, "tt")]/div/a/@href').extract_first()
if next_page:
next_page = response.urljoin(next_page)
# go to the next page and call parse function to get all items from page
yield scrapy.Request(url=next_page, callback = self.parse)
| 50.565217 | 162 | 0.5953 | 431 | 3,489 | 4.742459 | 0.310905 | 0.053816 | 0.046967 | 0.016145 | 0.129159 | 0.112524 | 0.085127 | 0.034247 | 0 | 0 | 0 | 0.011864 | 0.251075 | 3,489 | 68 | 163 | 51.308824 | 0.770379 | 0.220407 | 0 | 0 | 0 | 0.083333 | 0.274575 | 0.196231 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.041667 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56a469688cdd8e5eda3a9186b703e25f8c24b34a | 14,742 | py | Python | main.py | odgon/monitoring-vertica | 300cc2bbe490dddc331475732cb6d5766a128efb | [
"MIT"
] | 3 | 2020-07-29T19:30:25.000Z | 2022-03-20T13:57:28.000Z | main.py | odgon/monitoring-vertica | 300cc2bbe490dddc331475732cb6d5766a128efb | [
"MIT"
] | null | null | null | main.py | odgon/monitoring-vertica | 300cc2bbe490dddc331475732cb6d5766a128efb | [
"MIT"
] | null | null | null | from fastapi import FastAPI
from vc import vc
import json
from fastapi.openapi.utils import get_openapi
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
with open('config.json') as jf:
d = json.load(jf)
vh = d['vertica']['host']
vpo = d['vertica']['port']
vu = d['vertica']['user']
vp = d['vertica']['password']
vd = d['vertica']['database']
class connection(vc):
ci = {'host': vh,
'port': vpo,
'user': vu,
'password': vp,
'database': vd,
'read_timeout': 100}
def go(self, query):
q = f'{query}'
self.query(q)
r = self.fetchall()
self.close()
return r
def custom(self, query, commit):
q = f'{query}'
self.query(q)
r = self.fetchall()
if commit:
self.commit()
self.close()
return r
app = FastAPI(title="Monitoring Vertica", docs_url=None, redoc_url=None)
@app.get("/docs", include_in_schema=False)
async def custom_swagger_ui_html():
return get_swagger_ui_html(
openapi_url=app.openapi_url,
title=app.title + " - Swagger UI",
oauth2_redirect_url=app.swagger_ui_oauth2_redirect_url,
swagger_js_url="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui-bundle.js",
swagger_css_url="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/swagger-ui.css",
swagger_favicon_url="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/favicon-32x32.png",
)
@app.get(app.swagger_ui_oauth2_redirect_url, include_in_schema=False)
async def swagger_ui_redirect():
return get_swagger_ui_oauth2_redirect_html()
@app.get("/redoc", include_in_schema=False)
async def redoc_html():
return get_redoc_html(
openapi_url=app.openapi_url,
title=app.title + " - ReDoc",
redoc_js_url="https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js",
redoc_favicon_url="https://cdn.jsdelivr.net/npm/swagger-ui-dist@3/favicon-32x32.png",
)
@app.get("/", tags=["index"])
def read_root():
return {"Hello": "World"}
@app.get("/query/{content}", tags=["query"])
def custom_query(content: str, commit: bool = False):
v = connection()
try:
r = v.custom(content, commit)
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/node/status", tags=["System Health"])
def node_status():
v = connection()
try:
r = v.go("""SELECT node_name,
node_state
FROM nodes
ORDER BY 1;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/epoch/status", tags=["System Health"])
def epoch_status():
v = connection()
try:
r = v.go("""SELECT current_epoch,
ahm_epoch,
last_good_epoch,
designed_fault_tolerance,
current_fault_tolerance,
wos_used_bytes,
ros_used_bytes FROM system;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/delete/vector/count", tags=["System Health"])
def gather_the_total_count_of_delete_vectors_for_the_system():
v = connection()
try:
r = v.go("SELECT COUNT(*) FROM v_monitor.delete_vectors;")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/delete/vector", tags=["System Health"])
def delete_vector():
v = connection()
try:
r = v.go("""SELECT node_name,
schema_name,
projection_name,
total_row_count,
deleted_row_count,
delete_vector_count
FROM storage_containers
WHERE deleted_row_count > total_row_count*.05::float
ORDER BY deleted_row_count desc;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/delete/vector/ros/containers", tags=["System Health"])
def view_the_number_of_ROS_containers_per_projection_per_node():
v = connection()
try:
r = v.go("""SELECT node_name,
projection_schema,
projection_name,
SUM(ros_count) AS ros_count
FROM v_monitor.projection_storage
GROUP BY node_name,
projection_schema,
projection_name
ORDER BY ros_count DESC;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/resource/pools", tags=["Resource Usage"])
def resource_pools():
v = connection()
try:
r = v.go("""SELECT sysdate AS current_time,
node_name,
pool_name,
memory_inuse_kb,
general_memory_borrowed_kb,
running_query_count
FROM resource_pool_status
WHERE pool_name IN ('general')
ORDER BY 1,2,3;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/query/excessive/{memory}", tags=["Resource Usage"])
def monitor_if_a_query_is_taking_excessive_memory_resource_and_causing_the_cluster_to_slow_down(memory: str):
v = connection()
try:
r = v.go(
f"SELECT * FROM resource_acquisitions ORDER BY memory_inuse_kb desc limit {memory};")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/resource/pools/queue/status", tags=["Resource Usage"])
def resource_pool_queue_status():
v = connection()
try:
r = v.go("SELECT * FROM v_monitor.resource_queues;")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/resource/request/rejections", tags=["Resource Usage"])
def resource_request_rejections():
v = connection()
try:
r = v.go("SELECT * FROM v_monitor.resource_rejections;")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/resource/bottleneck", tags=["Resource Usage"])
def resource_bottleneck():
v = connection()
try:
r = v.go(
"SELECT * FROM v_monitor.system_resource_usage ORDER BY end_time DESC;")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/storage/space", tags=["Resource Usage"])
def storage_space_availability():
v = connection()
try:
r = v.go(
"SELECT * FROM v_monitor.storage_usage ORDER BY poll_timestamp DESC;")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/active/sessions", tags=["Active Sessions"])
def active_sessions():
v = connection()
try:
r = v.go(
"SELECT user_name, session_id, current_statement, statement_start FROM v_monitor.sessions;")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/active/sessions/close/{session_id}", tags=["Active Sessions"])
def close_the_active_sessions(session_id: str):
v = connection()
try:
r = v.go(f"SELECT close_session ('{session_id}');")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/running/queries", tags=["Active Queries"])
def get_a_list_of_queries_executing_at_the_moment():
v = connection()
try:
r = v.go("""SELECT node_name,
query,
query_start,
user_name,
is_executing
FROM v_monitor.query_profiles
WHERE is_executing = 't';""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/load/status", tags=["Active Queries"])
def check_the_loading_progress_of_active_and_historical_queries():
v = connection()
try:
r = v.go("""SELECT table_name,
read_bytes,
input_file_size_bytes,
accepted_row_count,
rejected_row_count,
parse_complete_percent,
sort_complete_percent
FROM load_streams
WHERE is_executing = 't'
ORDER BY table_name;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/lock/status", tags=["Active Queries"])
def a_query_with_no_results_indicates_that_no_locks_are_in_use():
v = connection()
try:
r = v.go("""SELECT locks.lock_mode,
locks.lock_scope,
substr(locks.transaction_description, 1, 100) AS "left",
locks.request_timestamp,
locks.grant_timestamp
FROM v_monitor.locks;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/recovery/status", tags=["Recovery"])
def node_recovery_status():
v = connection()
try:
r = v.go("""SELECT node_name,
recover_epoch,
recovery_phase,
current_completed,
current_total,
is_running
FROM v_monitor.recovery_status
ORDER BY 1;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/rebalance/status", tags=["Rebalance"])
def rebalance_status():
v = connection()
try:
r = v.go("SELECT GET_NODE_DEPENDENCIES();")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/overall/progress/rebalance/operation", tags=["Rebalance"])
def progress_of_each_currently_executing_rebalance_operation():
v = connection()
try:
r = v.go("""SELECT rebalance_method
Rebalance_method,
Status,
COUNT(*) AS Count
FROM
( SELECT rebalance_method,
CASE WHEN (separated_percent = 100 AND transferred_percent = 100)
THEN 'Completed'
WHEN ( separated_percent <> 0 and separated_percent <> 100)
OR (transferred_percent <> 0 AND transferred_percent <> 100)
THEN 'In Progress'
ELSE 'Queued'
END AS Status
FROM v_monitor.rebalance_projection_status
WHERE is_latest)
AS tab
GROUP BY 1, 2
ORDER BY 1, 2;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/execution/time/{limit}", tags=["Historical Activities"])
def queries_based_on_execution_time(limit: int):
v = connection()
try:
r = v.go(f"""SELECT user_name,
start_timestamp,
request_duration_ms,
transaction_id,
statement_id,
substr(request, 0, 1000) as request
FROM v_monitor.query_requests
WHERE transaction_id > 0
ORDER BY request_duration_ms
DESC limit {limit};""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/memory/usage", tags=["Historical Activities"])
def memory_usage_for_a_particular_query():
v = connection()
try:
r = v.go("""SELECT node_name,
transaction_id,
statement_id,
user_name,
start_timestamp,
request_duration_ms,
memory_acquired_mb,
substr(request, 1, 100) AS request
FROM v_monitor.query_requests
WHERE transaction_id = transaction_id
AND statement_id = statement_id;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/partitions", tags=["Object Statistics"])
def view_the_partition_count_per_node_per_projection():
v = connection()
try:
r = v.go("""SELECT node_name,
projection_name,
count(partition_key)
FROM v_monitor.partitions
GROUP BY node_name,
projection_name
ORDER BY node_name,
projection_name;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/segmentation/data/skew", tags=["Object Statistics"])
def view_the_row_count_per_segmented_projection_per_node():
v = connection()
try:
r = v.go("""SELECT ps.node_name,
ps.projection_schema,
ps.projection_name,
ps.row_count
FROM v_monitor.projection_storage ps
INNER JOIN v_catalog.projections p
ON ps.projection_schema = p.projection_schema
AND ps.projection_name = p.projection_name
WHERE p.is_segmented
ORDER BY ps.projection_schema,
ps.projection_name,
ps.node_name;""")
except Exception as e:
return {"error": e}
return {"data": r}
@app.get("/load/streams", tags=["Performance"])
def view_the_performance_of_load_streams():
v = connection()
try:
r = v.go("""SELECT schema_name,
table_name,
load_start,
load_duration_ms,
is_executing,
parse_complete_percent,
sort_complete_percent,
accepted_row_count,
rejected_row_count
FROM v_monitor.load_streams;""")
except Exception as e:
return {"error": e}
return {"data": r}
def custom_openapi(openapi_prefix: str):
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="Monitoring Vertica",
version="0.0.1",
description="Vertica api <br><br> Project launched for test the <a href='https://fastapi.tiangolo.com/' target='_blank'>FastAPI</a> <br><br> Based on: <a href='https://www.vertica.com/kb/Best-Practices-for-Monitoring-Vertica/Content/BestPractices/BestPracticesforMonitoringVertica.htm' target='_blank'>Best Practices for Monitoring Vertica</a>",
routes=app.routes,
openapi_prefix=openapi_prefix,
)
openapi_schema["info"]["x-logo"] = {
"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/0/03/Vertica_pos_blk_rgb.svg/300px-Vertica_pos_blk_rgb.svg.png"
}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi
| 31.035789 | 353 | 0.574413 | 1,737 | 14,742 | 4.64882 | 0.173287 | 0.043344 | 0.043344 | 0.04644 | 0.482848 | 0.418947 | 0.344644 | 0.301672 | 0.290402 | 0.254241 | 0 | 0.006691 | 0.310609 | 14,742 | 474 | 354 | 31.101266 | 0.787858 | 0 | 0 | 0.441975 | 0 | 0.017284 | 0.526591 | 0.06824 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071605 | false | 0.004938 | 0.012346 | 0.002469 | 0.232099 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b05ff6c3393fdf9cff7387c667789a685e86381 | 6,465 | py | Python | drive.py | 7th-mod-korea/when_they_cry_converter | 92956d40c02ece1b0536fbddc9799553e11af93c | [
"MIT"
] | 1 | 2020-03-10T01:16:34.000Z | 2020-03-10T01:16:34.000Z | drive.py | 7th-mod-korea/when_they_cry_converter | 92956d40c02ece1b0536fbddc9799553e11af93c | [
"MIT"
] | null | null | null | drive.py | 7th-mod-korea/when_they_cry_converter | 92956d40c02ece1b0536fbddc9799553e11af93c | [
"MIT"
] | null | null | null | from __future__ import print_function
import pickle
import os.path
import sys
import hashlib
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from apiclient import errors
from googleapiclient.http import MediaIoBaseDownload, MediaFileUpload
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive']
TRANSLATION_FOLDER_ID = '1Q8BO4CB6tGk-hpYsPOq_Tc6FVPYqP5JA'
#TRANSLATION_FOLDER_ID = '1W7Yxvl3WRzZ1fDbuim8EPediY0qrxWBe'
def get_files(service, folderId, files, filter_folder_name):
page_token = None
while True:
try:
param = {}
if page_token:
param['pageToken'] = page_token
children = service.files().list(
fields='files(id, name, mimeType, md5Checksum)',
q=f"'{folderId}' in parents and trashed = false",
**param).execute()
for child in children['files']:
mimeType = child['mimeType']
if mimeType == 'application/vnd.google-apps.folder':
sub_folder_name = child['name']
print(f"searching {sub_folder_name}")
if filter_folder_name and sub_folder_name != filter_folder_name:
continue
files[sub_folder_name] = {}
get_files(service, child['id'], files[sub_folder_name], None)
# xlsx
elif mimeType == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet':
present_files = files.get('.', [])
present_files.append(child)
files['.'] = present_files
elif mimeType == 'text/plain':
pass
else:
print(f"unexpected mimeType {mimeType} found, {child['name']}", file=sys.stderr)
page_token = children.get('nextPageToken')
if not page_token:
break
except errors.HttpError as error:
print(f'An error occured: {error}')
break
def get_creds():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
def download_folder(drive_service, tree, folder_path):
downloaders = []
for folder_name, contents in tree.items():
parent_folder = os.path.normpath(os.path.join(folder_path, folder_name))
if folder_name != '.':
downloaders.extend(download_folder(drive_service, contents, parent_folder))
continue
for file in contents:
if not os.path.exists(parent_folder):
os.mkdir(parent_folder)
local_file_path = os.path.join(parent_folder, file['name'])
if os.path.exists(local_file_path):
with open(local_file_path, 'rb') as local_file_fd:
local_md5 = hashlib.md5(local_file_fd.read()).hexdigest()
if local_md5 == file['md5Checksum']:
continue
print(f"Downloading {file['name']} at {local_file_path}")
request = drive_service.files().get_media(fileId=file['id'])
fd = open(local_file_path, 'wb')
downloaders.append((MediaIoBaseDownload(fd, request), fd))
return downloaders
def upload_folder(drive_service, tree, folder_path):
for folder_name, contents in tree.items():
parent_folder = os.path.normpath(os.path.join(folder_path, folder_name))
if folder_name != '.':
upload_folder(drive_service, contents, parent_folder)
continue
for file in contents:
local_file_path = os.path.join(parent_folder, file['name'])
if not os.path.exists(local_file_path):
print(f"{local_file_path} not exist")
continue
with open(local_file_path, 'rb') as local_file_fd:
local_md5 = hashlib.md5(local_file_fd.read()).hexdigest()
if local_md5 == file['md5Checksum']:
continue
print(f"Uploading {local_file_path}")
file = drive_service.files().update(fileId=file['id'],
media_body=MediaFileUpload(local_file_path)
).execute()
def download_drive(local_folder, filter_folder_name=None):
creds = get_creds()
drive_service = build('drive', 'v3', credentials=creds)
root = {}
get_files(drive_service, TRANSLATION_FOLDER_ID, root, filter_folder_name)
downloaders = download_folder(drive_service, root, local_folder)
while downloaders:
for item in downloaders[:10]:
down, fd = item
try:
status, done = down.next_chunk()
except errors.HttpError:
print(f"Failed to downloading {fd.name}")
raise
if done:
fd.close()
downloaders.remove(item)
def upload_drive(local_folder, filter_folder_name=None):
creds = get_creds()
drive_service = build('drive', 'v3', credentials=creds)
root = {}
get_files(drive_service, TRANSLATION_FOLDER_ID, root, filter_folder_name)
upload_folder(drive_service, root, local_folder)
if __name__ == '__main__':
if sys.argv[1] == 'download':
download_drive(f"{os.path.pardir}{os.path.sep}Drive", sys.argv[2] if len(sys.argv) >= 3 else None)
elif sys.argv[1] == 'upload':
upload_drive(f"{os.path.pardir}{os.path.sep}Drive", sys.argv[2] if len(sys.argv) >= 3 else None) | 39.662577 | 106 | 0.605878 | 746 | 6,465 | 5.049598 | 0.257373 | 0.047783 | 0.037961 | 0.020706 | 0.341917 | 0.336607 | 0.285638 | 0.285638 | 0.285638 | 0.285638 | 0 | 0.00701 | 0.29389 | 6,465 | 163 | 107 | 39.662577 | 0.818182 | 0.058778 | 0 | 0.27907 | 0 | 0 | 0.1221 | 0.032911 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0.007752 | 0.077519 | 0 | 0.139535 | 0.062016 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b08aa7fb58998cc3b6424f138688be5f547dfe9 | 15,841 | py | Python | minecraftcogs/chatrelay.py | jinkhya/Charfred_Cogs | d6afc4c02e668c046ba40e9a7afae68004658f6d | [
"MIT"
] | null | null | null | minecraftcogs/chatrelay.py | jinkhya/Charfred_Cogs | d6afc4c02e668c046ba40e9a7afae68004658f6d | [
"MIT"
] | null | null | null | minecraftcogs/chatrelay.py | jinkhya/Charfred_Cogs | d6afc4c02e668c046ba40e9a7afae68004658f6d | [
"MIT"
] | null | null | null | import logging
import asyncio
from concurrent.futures import CancelledError
from discord.ext import commands
from utils import Config, permission_node
log = logging.getLogger('charfred')
formats = {
'MSG': '[**{}**] {}: {}',
'STF': '**{}**: {}',
'DTH': '[**{}**] {} {}',
'ME': '[**{}**] {}: {}',
'SAY': '[**{}**] {}: {}',
'SYS': '{}'
}
def escape(string):
return string.strip().replace('\n', '\\n').replace('::', ':\:').replace('::', ':\:')
class ChatRelay(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.server = None
self.inqueue = asyncio.Queue(maxsize=64, loop=self.loop)
self.clients = {}
self.inqueue_worker_task = None
self.relaycfg = Config(f'{bot.dir}/configs/chatrelaycfg.toml',
load=True, loop=self.loop)
if 'ch_to_clients' not in self.relaycfg:
self.relaycfg['ch_to_clients'] = {}
self.relaycfg._save()
if 'client_to_ch' not in self.relaycfg:
self.relaycfg['client_to_ch'] = {}
self.relaycfg._save()
def cog_unload(self):
if self.server:
log.info('CR: Closing relay server.')
self.server.close()
if self.inqueue_worker_task:
self.inqueue_worker_task.cancel()
if self.clients:
for client in self.clients.values():
try:
client['workers'][0].cancel()
client['workers'][1].cancel()
except KeyError:
pass
self.loop.create_task(self.server.wait_closed())
@commands.Cog.listener()
async def on_message(self, message):
if self.server is None: # Don't even do anything if the server isn't running.
return
if message.author.bot or (message.guild is None):
return
ch_id = str(message.channel.id)
if message.content and (ch_id in self.relaycfg['ch_to_clients']):
# Check whether the message is a command, as determined
# by having a valid prefix, and don't proceed if it is.
prefix = await self.bot.get_prefix(message)
if isinstance(prefix, str):
if message.content.startswith(prefix):
return
else:
try:
if message.content.startswith(tuple(prefix)):
return
except TypeError:
# If we get here, then the prefixes are borked.
raise
content = f'MSG::Discord::{escape(message.author.display_name)}:' \
f':{escape(message.clean_content)}::\n'
for client in self.relaycfg['ch_to_clients'][ch_id]:
try:
self.clients[client]['queue'].put_nowait((5, content))
except KeyError:
pass
except asyncio.QueueFull:
pass
@commands.group(invoke_without_command=True)
async def chatrelay(self, ctx):
"""Minecraft chat relay commands.
This returns a list of all Minecraft servers currently
connected and what channel they're linked to.
"""
info = ['# Chat Relay Status:']
if self.server and self.server.sockets:
info.append('\n# Relay server is online.\n')
else:
info.append('\n< Relay server is offline! >\n')
if self.clients:
info.append('\n# Currently connected clients:')
for client in self.clients:
info.append(f'- {client}')
if self.relaycfg['ch_to_clients']:
info.append('\n# Relay configuration:')
for channel_id, clients in self.relaycfg['ch_to_clients'].items():
channel = self.bot.get_channel(int(channel_id))
info.append(f'{channel.name if channel else channel_id}:')
if clients:
for client in clients:
info.append(f'- {client}')
else:
info.append('\n')
else:
info.append('> No clients configured.\n')
if len(info) == 2:
info.append('> No clients connected, nothing configured.')
await ctx.sendmarkdown('\n'.join(info))
async def incoming_worker(self, reader, client):
log.info(f'CR-Incoming: Worker for {client} started.')
try:
while True:
data = await reader.readline()
if not data:
log.info(f'CR-Incoming: {client} appears to have disconnected!')
break
try:
data = data.decode()
except UnicodeDecodeError as e:
log.info(f'CR-Incoming: {e}')
continue
try:
self.inqueue.put_nowait((client, data))
except asyncio.QueueFull:
log.warning(f'CR-Incoming: Incoming queue full, message dropped!')
except CancelledError:
raise
finally:
log.info(f'CR-Incoming: Worker for {client} exited.')
async def outgoing_worker(self, writer, client):
log.info(f'CR-Outgoing: Worker for {client} started.')
try:
while True:
try:
_, data = await self.clients[client]['queue'].get()
except (KeyError, AttributeError):
log.error(f'CR-Outgoing: Outqueue for {client} is gone!'
' Connection shutting down!')
break
else:
data = data.encode()
writer.write(data)
await writer.drain()
except CancelledError:
raise
finally:
log.info(f'CR-Outgoing: Worker for {client} exited.')
async def connection_handler(self, reader, writer):
peer = str(writer.get_extra_info("peername"))
log.info(f'CR-Connection: New connection established with {peer}!')
handshake = await reader.readline()
if not handshake:
log.warning(f'CR-Connection: No handshake from {peer} recieved!'
' Connection shutting down!')
writer.close()
return
handshake = handshake.decode()
hshk = handshake.split('::')
if hshk[0] == 'HSHK':
try:
client = hshk[1]
except IndexError:
log.warning(f'CR-Connection: Invalid handshake: {handshake}')
client = None
else:
log.warning(f'CR-Connection: Invalid handshake: {handshake}')
client = None
if client is None:
log.warning(f'CR-Connection: Using client address as name.')
client = peer
await self.inqueue.put((client, f'SYS::```markdown\n# {client} connected!\n```'))
if client in self.clients and self.clients[client]:
if 'worker' in self.clients[client]:
log.warning(f'CR-Connection: {client} reconnecting after messy exit, cleaning up!')
for worker in self.clients[client]['workers']:
worker.cancel()
self.clients[client] = {}
self.clients[client]['queue'] = asyncio.PriorityQueue(maxsize=24, loop=self.loop)
in_task = self.loop.create_task(self.incoming_worker(reader, client))
out_task = self.loop.create_task(self.outgoing_worker(writer, client))
self.clients[client]['workers'] = (in_task, out_task)
_, waiting = await asyncio.wait([in_task, out_task],
return_when=asyncio.FIRST_COMPLETED)
for task in waiting:
task.cancel()
try:
baggage = self.clients.pop(client)
except KeyError:
pass
else:
log.info(f'CR-Connection: Outqueue for {client} removed with'
f' {baggage["queue"].qsize()} items.')
writer.close()
log.info(f'CR-Connection: Connection with {client} closed!')
await self.inqueue.put((client, f'SYS::```markdown\n< {client} disconnected! >\n```'))
async def inqueue_worker(self):
log.info('CR-Inqueue: Worker started!')
try:
while True:
client, data = await self.inqueue.get()
# Check if the data has a valid format.
_data = data.split('::')
if _data[0] not in formats:
log.debug(f'CR-Inqueue: Data from {client} with invalid format: {data}')
continue
# If we get here, then the format is valid and we can relay to other clients.
if _data[0] != 'SYS':
for other in self.clients:
if other == client:
continue
try:
self.clients[other]['queue'].put_nowait((5, data))
except KeyError:
pass
except asyncio.QueueFull:
pass
# Check if we have a channel to send this message to.
if client not in self.relaycfg['client_to_ch']:
log.debug(f'CR-Inqueue: No channel for: "{client} : {data}", dropping!')
continue
# If we get here, we have a channel and can process according to format map.
channel = self.bot.get_channel(int(self.relaycfg['client_to_ch'][client]))
if not channel:
log.warning(f'CR-Inqueue: {_data[0]} message from {client} could not be sent.'
' Registered channel does not exist!')
continue
try:
await channel.send(formats[_data[0]].format(*_data[1:]))
except IndexError as e:
log.debug(f'{e}: {data}')
pass
except CancelledError:
raise
finally:
log.info('CR-Inqueue: Worker exited.')
@chatrelay.command(aliases=['start', 'init'])
@permission_node(f'{__name__}.init')
async def initialize(self, ctx, port):
"""This initializes the relay server on the given port,
allowing connections from Minecraft servers to be established.
Be sure to also set up at least one channel to relay chat
to and from, using the 'register' subcommand, otherwise
chat recieved from clients will just be dropped!
"""
if self.server:
log.warning('CR: Server already established!')
await ctx.sendmarkdown('> Relay server already running!')
return
self.inqueue_worker_task = self.loop.create_task(self.inqueue_worker())
self.server = await asyncio.start_server(self.connection_handler, '127.0.0.1', port,
loop=self.loop)
log.info('CR: Server started!')
await ctx.sendmarkdown('# Relay server started.')
@chatrelay.command(aliases=['stop'])
@permission_node(f'{__name__}.init')
async def close(self, ctx):
"""This closes the relay server, disconnecting all clients.
"""
if not self.server:
log.info('CR: No server to be closed.')
await ctx.sendmarkdown('> No relay server to be closed.')
return
self.server.close()
if self.inqueue_worker_task:
self.inqueue_worker_task.cancel()
if self.clients:
for client in self.clients.values():
try:
client['workers'][0].cancel()
client['workers'][1].cancel()
except KeyError:
pass
await self.server.wait_closed()
log.info('CR: Server closed!')
self.server = None
await ctx.sendmarkdown('# Relay server closed, all clients disconnected!')
@chatrelay.command(aliases=['listen'])
@permission_node(f'{__name__}.register')
async def register(self, ctx, client: str):
"""Registers a channel to recieve chat from a given client,
and send chat from the channel to the client.
The channel you run this in will be the registered channel.
You can get a list of clients by just running 'chatrelay'
without a subcommand.
"""
channel_id = str(ctx.channel.id)
if client not in self.clients:
await ctx.sendmarkdown('< Client unknown, registering anyway. >\n'
'< Please check if you got the name right,'
' when the client eventually connects. >')
log.info(f'CR: Trying to register {ctx.channel.name} for {client}.')
if client in self.relaycfg['client_to_ch'] and self.relaycfg['client_to_ch'][client]:
channel = self.bot.get_channel(int(self.relaycfg['client_to_ch'][client]))
if channel == ctx.channel:
await ctx.sendmarkdown(f'> {client} is already registered with this channel!')
else:
await ctx.sendmarkdown(f'< {client} is already registered with {channel.name}! >\n'
'> A client can only be registered to one channel.\n'
'> Please unregister the other channel first!')
return
else:
self.relaycfg['client_to_ch'][client] = channel_id
if channel_id in self.relaycfg['ch_to_clients']:
self.relaycfg['ch_to_clients'][channel_id].append(client)
else:
self.relaycfg['ch_to_clients'][channel_id] = [client]
await self.relaycfg.save()
await ctx.sendmarkdown(f'# {ctx.channel.name} is now registered for'
f' recieving chat from, and sending chat to {client}.')
@chatrelay.command(aliases=['unlisten'])
@permission_node(f'{__name__}.register')
async def unregister(self, ctx, client: str):
"""Unregisters a channel from recieving chat from a given
client or sending chat to that client.
The channel you run this in will be the unregistered channel.
You can get a list of clients by just running 'chatrelay'
without a subcommand.
"""
channel_id = str(ctx.channel.id)
log.info(f'CR: Trying to unregister {ctx.channel.name} for {client}.')
if client in self.relaycfg['client_to_ch']:
if self.relaycfg['client_to_ch'][client] == channel_id:
del self.relaycfg['client_to_ch'][client]
else:
await ctx.sendmarkdown(f'< {client} is not registered for this channel! >')
return
try:
self.relaycfg['ch_to_clients'][channel_id].remove(client)
except ValueError:
log.critical(f'CR: Relay mapping inconsistency detected!')
raise
else:
await ctx.sendmarkdown('# This channel will no longer send chat to'
f' or recieve chat from {client}!')
finally:
await self.relaycfg.save()
else:
await ctx.sendmarkdown(f'> {client} is not registered with any channel.')
def setup(bot):
permission_nodes = ['init', 'register']
bot.register_nodes([f'{__name__}.{node}' for node in permission_nodes])
bot.add_cog(ChatRelay(bot))
| 40.307888 | 99 | 0.540496 | 1,751 | 15,841 | 4.804683 | 0.175328 | 0.037085 | 0.028527 | 0.013075 | 0.370498 | 0.288839 | 0.233092 | 0.176631 | 0.144538 | 0.121716 | 0 | 0.002334 | 0.350862 | 15,841 | 392 | 100 | 40.410714 | 0.815813 | 0.028155 | 0 | 0.375405 | 0 | 0 | 0.221528 | 0.010455 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012945 | false | 0.02589 | 0.016181 | 0.003236 | 0.064725 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b0aff3db58f48e9ba786715261c204ed5990700 | 7,504 | py | Python | Code/SubwayMap.py | VGarK/Mapz | e09654b261ae25fbc73c677432aff5e26f43e42f | [
"MIT"
] | null | null | null | Code/SubwayMap.py | VGarK/Mapz | e09654b261ae25fbc73c677432aff5e26f43e42f | [
"MIT"
] | null | null | null | Code/SubwayMap.py | VGarK/Mapz | e09654b261ae25fbc73c677432aff5e26f43e42f | [
"MIT"
] | null | null | null | # This file has all the functions required to load the information of a city.
# - Definition of the class Station
# - Definition of the class CityInfo
# - Read functions from files
# - Structure of the information
#
__authors__='TO_BE_FILLED'
__group__='DL01'
# _________________________________________________________________________________________
# Intel.ligencia Artificial
# Grau en Enginyeria Informatica
# Curs 2016- 2017
# Universitat Autonoma de Barcelona
# _________________________________________________________________________________________
class Station:
# __init__ Constructor of Station Class.
def __init__(self, id, name, line, x, y):
self.id = id # station id
self.destinationDic = {} # Dictionary where principal keys refers to the set of stations that it is connected.
# The value of this dictionary refers to the time cost between two stations.
self.name = name # station Name
self.line = int(line) # line name string
self.x = x # coordinate X of the station
self.y = y # coordinate Y of the station
class CityInfo:
# __init__ Constructor of CityInfo class
def __init__(self, vel_lines, station_list, connection_time, multipleLines=0):
self.num_lines=len(vel_lines) # Number of different lines
self.velocity_lines=vel_lines # velocity of each line
self.max_velocity=max(vel_lines) # maximum velocity of the subways (faster subway)
self.min_velocity=min(vel_lines) # minimum velocity of the subways (slower subway)
self.max_transfer=20 # slower transfer time
self.min_transfer=6 # faster transfer time
self.multipleLines=multipleLines
self.StationList =station_list
self.setNextStations(connection_time)
self.walking_velocity = 4
# setNextStations: Given a stationList (- id, name, line, x, y - information), and the set of possible connections between stations,
# This function set the dictionary of the possible destinations for each station (including the cost )
def setNextStations( self, connections):
for i in self.StationList:
if int(i.id) in connections:
i.destinationDic.update(connections[int(i.id)])
def getTransfers(self):
for i in self.StationList:
for j in self.StationList[i].destinationDic:
if i.line != j.line:
self.max_transfer = max(self.max_transfer,self.StationList[i].destinationDic[j])
self.min_transfer = min(self.min_transfer, self.StationList[i].destinationDic[j])
def search_multiple_lines(stationList):
"""
search_multiple_lines: Searches the set of stations that have different lines.
:param
- stationList: LIST of the stations of the current cicty (-id, destinationDic, name, line, x, y -)
:return:
- multiplelines: DICTIONARY which relates the different stations with the same name and different id's
(stations that have different metro lines)
"""
multipleLines = {}
for i in stationList:
for j in stationList:
if i.id != j.id:
if i.x == j.x and i.y == j.y:
if i.id in multipleLines:
if j.id not in multipleLines[i.id]:
multipleLines[i.id].append(j.id)
else:
multipleLines[i.id] = []
multipleLines[i.id].append(j.id)
if j.id in multipleLines:
if j.id not in multipleLines[i.id]:
multipleLines[j.id].append(i.id)
else:
multipleLines[j.id] = []
multipleLines[j.id].append(i.id)
return multipleLines
# readStationInformation: Given a filename, it reads the information of this file.
# The file should keep the format:
# id <\t> name <\t> line <\t> x <\t> y <\n>
def readStationInformation(filename):
fileMetro = open(filename, 'r')
stationList = []
for line in fileMetro:
information = line.split('\t')
station_read = Station(int(information[0]), information[1], information[2], int(information[3]),
int((information[4].replace('\n', '')).replace(' ', '')))
stationList.append(station_read)
fileMetro.close()
return stationList
def readInformation(filename):
vector=[]
fp = open(filename,'r')
line = fp.readline()
while line:
# tmp=fp.readline()
try:
value=line.split(" : ")
value=value[1].split("\n")
vector.append(int(value[0]))
line = fp.readline()
except :
line = fp.readline()
del vector[-1] #remove min value
del vector[-1] #remove max value
fp.close()
return (vector)
# readCostTable: Given a filename, it reads the information of this file.
# The file should be an inferior matrix with the cost between two different stations.
def readCostTable(filename):
fileCorrespondencia = open(filename, 'r')
connections = {}
origin = 1
for i in fileCorrespondencia:
informations = i.split('\t')
destination = 1 # because ID of the stations started at '1' instead of '0'
for j in informations:
j = j.replace('\n', '')
if j != '':
if j != '0':
if int(origin) not in connections:
connections[int(origin)] = {}
if int(destination) not in connections[int(origin)]:
connections[int(origin)][int(destination)] = float(j)
# as the matrix is an inferior matrix, we should duplicate the information to the superior missing part.
if int(destination) not in connections:
connections[int(destination)] = {}
if int(origin) not in connections[int(destination)]:
connections[int(destination)][int(origin)] = float(j)
destination = destination + 1
origin = origin + 1
return connections
# print_stationList: Given a stationList (- id, name, line, x, y - information), it prints the information by terminal
def print_stationList(stationList):
print("\n")
print (" ______________ STATION LIST________________")
print ("\n")
for i in stationList:
print (" ID : " + str(i.id) + " - " + str(i.name) + " linea: " + str(i.line) + " pos: (" + str(i.x) + "," + str(i.y) + ")")
print ("\n")
print ("\n")
# print_connections: Given a connections dictionary, it prints the information by terminal
def print_connections(connections):
print ("\n")
print (" ______________ CONNECTIONS ________________")
print ("\n")
for i in connections.keys():
print (" ID : " + str(i) + " ")
for j in connections[i]:
print (" " + str(j) + " : " + str(connections[i][j]))
#print ("\n")
#print ("\n")
def print_dictionary(stationList):
print ("\n")
print (" ______________ DICTIONARY ________________")
print ("\n")
for i in stationList:
print (" ID : "+ str(i.id) + " --> " + str(i.destinationDic))
print ("\n")
print ("\n") | 41.458564 | 136 | 0.597415 | 852 | 7,504 | 4.880282 | 0.211268 | 0.00938 | 0.010101 | 0.00962 | 0.214045 | 0.181818 | 0.13468 | 0.125301 | 0.106061 | 0.074074 | 0 | 0.005912 | 0.301173 | 7,504 | 181 | 137 | 41.458564 | 0.786995 | 0.315165 | 0 | 0.227642 | 0 | 0 | 0.04919 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089431 | false | 0 | 0 | 0 | 0.138211 | 0.162602 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b0ce22e9f3f3849e6cb4645ba1ee7779174285d | 5,290 | py | Python | deprecated/converters/gw100_converter.py | materials-data-facility/connect | 9ec5b61750bf6fa579bf3ec122f31880d3c049b8 | [
"Apache-2.0"
] | 1 | 2019-09-13T18:35:56.000Z | 2019-09-13T18:35:56.000Z | deprecated/converters/gw100_converter.py | materials-data-facility/connect_server | 9ec5b61750bf6fa579bf3ec122f31880d3c049b8 | [
"Apache-2.0"
] | 15 | 2018-11-01T18:08:11.000Z | 2021-12-06T17:55:03.000Z | deprecated/converters/gw100_converter.py | materials-data-facility/connect | 9ec5b61750bf6fa579bf3ec122f31880d3c049b8 | [
"Apache-2.0"
] | 1 | 2020-11-30T17:02:41.000Z | 2020-11-30T17:02:41.000Z | import json
import sys
import os
from tqdm import tqdm
from mdf_refinery.validator import Validator
from mdf_refinery.parsers.tab_parser import parse_tab
# VERSION 0.3.0
# This is the converter for the GW100 dataset.
# Arguments:
# input_path (string): The file or directory where the data resides.
# NOTE: Do not hard-code the path to the data in the converter. The converter should be portable.
# metadata (string or dict): The path to the JSON dataset metadata file, a dict or json.dumps string containing the dataset metadata, or None to specify the metadata here. Default None.
# verbose (bool): Should the script print status messages to standard output? Default False.
# NOTE: The converter should have NO output if verbose is False, unless there is an error.
def convert(input_path, metadata=None, verbose=False):
if verbose:
print("Begin converting")
# Collect the metadata
if not metadata:
dataset_metadata = {
"mdf": {
"title": "Benchmark of G0W0 on 100 Molecules",
"acl": ["public"],
"source_name": "gw100",
"citation": ["M.J. van Setten, F. Caruso, S. Sharifzadeh, X. Ren, M. Scheffler, F. Liu, J. Lischner, L. Lin, J.R. Deslippe, S.G. Louie, C. Yang, F. Weigend, J.B. Neaton, F. Evers, and P. Rinke, GW100: Benchmarking G0W0 for Molecular Systems, J. Chem. Theory Comput. 11, 5665 (2015).", "M. Govoni et al., (2016). In preparation.", "P.J. Linstrom and W.G. Mallard, Eds., NIST Chemistry WebBook, NIST Standard Reference Database Number 69, National Institute of Standards and Technology, Gaithersburg MD, 20899, http://webbook.nist.gov."],
"data_contact": {
"given_name": "Michiel",
"family_name": "van Setten",
"email": "michiel.vansetten@uclouvain.be",
"institution": "Université catholique de Louvain",
},
# "author":
# "license": ,
"collection": "GW100",
# "tags": ,
"description": "This is a benchmark of G0W0 on 100 molecules.",
"year": 2015,
"links": {
"landing_page": "http://www.west-code.org/database/gw100/index.php",
"publication": "https://dx.doi.org/10.1021/acs.jctc.5b00453",
# "dataset_doi": ,
# "related_id": ,
# data links: {
#"globus_endpoint": ,
#"http_host": ,
#"path": ,
#}
},
# "mrr": ,
"data_contributor": {
"given_name": "Jonathon",
"family_name": "Gaff",
"email": "jgaff@uchicago.edu",
"institution": "The University of Chicago",
"github": "jgaff"
}
}
}
elif type(metadata) is str:
try:
dataset_metadata = json.loads(metadata)
except Exception:
try:
with open(metadata, 'r') as metadata_file:
dataset_metadata = json.load(metadata_file)
except Exception as e:
sys.exit("Error: Unable to read metadata: " + repr(e))
elif type(metadata) is dict:
dataset_metadata = metadata
else:
sys.exit("Error: Invalid metadata parameter")
dataset_validator = Validator(dataset_metadata)
# Get the data
with open(os.path.join(input_path, "gw100.csv")) as in_file:
data = in_file.read()
for record in tqdm(parse_tab(data), desc="Processing records", disable= not verbose):
record_metadata = {
"mdf": {
"title": "GW100 - " + record["name"],
"acl": ["public"],
# "tags": ,
# "description": ,
"composition": record["formula"],
# "raw": ,
"links": {
"landing_page": "http://www.west-code.org/database/gw100/pag/" + record["cas"] + ".php",
# "publication": ,
# "dataset_doi": ,
# "related_id": ,
# data links: {
#"globus_endpoint": ,
#"http_host": ,
#"path": ,
#},
},
# "citation": ,
# "data_contact": {
# "given_name": ,
# "family_name": ,
# "email": ,
# "institution":,
# IDs
# },
# "author": ,
# "license": ,
# "collection": ,
# "data_format": ,
# "data_type": ,
# "year": ,
# "mrr":
# "processing": ,
# "structure":,
}
}
# Pass each individual record to the Validator
result = dataset_validator.write_record(record_metadata)
# Check if the Validator accepted the record, and print a message if it didn't
# If the Validator returns "success" == True, the record was written successfully
if result["success"] is not True:
print("Error:", result["message"])
if verbose:
print("Finished converting")
| 31.488095 | 548 | 0.520038 | 546 | 5,290 | 4.957875 | 0.457875 | 0.038788 | 0.011082 | 0.008866 | 0.096047 | 0.096047 | 0.074621 | 0.074621 | 0.074621 | 0.074621 | 0 | 0.022425 | 0.359357 | 5,290 | 167 | 549 | 31.676647 | 0.776335 | 0.320605 | 0 | 0.142857 | 0 | 0.028571 | 0.364586 | 0.008472 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.085714 | 0 | 0.1 | 0.042857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b1083dfb47666192fcefb6373fe2fcf7bc0a2fb | 9,098 | py | Python | backend/backend.py | Mishelles/vk-spotify-playlist-transfer | 4c15a9e35b1ff9aa81c7d36c53ef69b54d5a6914 | [
"MIT"
] | 1 | 2021-04-16T21:48:21.000Z | 2021-04-16T21:48:21.000Z | backend/backend.py | Mishelles/vk-spotify-playlist-transfer | 4c15a9e35b1ff9aa81c7d36c53ef69b54d5a6914 | [
"MIT"
] | 8 | 2021-04-05T17:16:10.000Z | 2021-10-12T13:31:19.000Z | backend/backend.py | Mishelles/vk-spotify-playlist-transfer | 4c15a9e35b1ff9aa81c7d36c53ef69b54d5a6914 | [
"MIT"
] | null | null | null | import os
import uuid
import json
import yaml
import re
from nltk.tokenize import RegexpTokenizer
import requests
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from get_root_access_token_for_sp import get_token
from pydantic import BaseModel
from vkaudiotoken import (
TokenReceiverOfficial,
CommonParams,
TokenException,
TwoFAHelper,
supported_clients
)
app = FastAPI()
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
with open('creds.yaml', 'r') as c:
config = yaml.safe_load(c)
SPOTIFY_REDIRECT_URL = os.environ.get('SPOTIFY_REDIRECT_URL', 'http://localhost:3000/spotify-callback')
VK_API_DEFAULT_VERSION = '5.95'
sp_code = ''
sp_access_token = ''
sp_refresh_token = ''
sp_playlist_id =''
vk_session = None
vk_access_token = ''
vk_total_tracks = 0
last_iteration = False
batch = 0
offset = 0
page_size=200
class SpotifyLoginInputDto(BaseModel):
code: str
class VkLoginInputDto(BaseModel):
vkLogin: str
vkPass: strt
class BatchSizeDto(BaseModel):
size: str
@app.post("/login/spotify", status_code=200)
def login_to_spotify(dto: SpotifyLoginInputDto):
print("Code " + dto.code)
global sp_code
sp_code = dto.code
response = requests.post(
url='https://accounts.spotify.com/api/token',
data={
'grant_type': 'authorization_code',
'code': dto.code,
'redirect_uri': SPOTIFY_REDIRECT_URL
},
headers={
"Authorization": 'Basic {}'.format(config.get('sp_basic_auth'))
}).json()
try:
global sp_access_token
sp_access_token = response['access_token']
global sp_refresh_token
sp_refresh_token = response['refresh_token']
except KeyError:
raise HTTPException(status_code=400, detail='Invalid code provided')
@app.post("/login/vk", status_code=200)
def login_to_vk(dto: VkLoginInputDto):
print("Login: " + dto.vkLogin + ", pass: " + dto.vkPass)
params = CommonParams(supported_clients.VK_OFFICIAL.user_agent)
receiver = TokenReceiverOfficial(dto.vkLogin, dto.vkPass, params)
try:
credentials_from_vk = receiver.get_token()
except TokenException as err:
if err.code == TokenException.TWOFA_REQ and 'validation_sid' in err.extra:
TwoFAHelper(params).validate_phone(err.extra['validation_sid'])
print('2FA auth enabled. SMS should be sent')
""" auth_code = input('Please, wait for SMS and insert your authorization code below: \n')
receiver = TokenReceiverOfficial(self._config.get('vk_login'), self._config.get('vk_password'), params, auth_code)
try:
credentials_from_vk = receiver.get_token()
except Exception as e:
raise """
else:
raise
token = credentials_from_vk['access_token']
print("VK token: " + token)
session = requests.session()
session.headers.update({'User-Agent': supported_clients.VK_OFFICIAL.user_agent})
try:
global vk_session
vk_session = session
global vk_access_token
vk_access_token = token
except KeyError:
raise HTTPException(status_code=400, detail='Invalid code provided')
@app.post("/init-transfer", status_code=200)
def init_process():
print("Process has started")
global vk_total_tracks
vk_total_tracks = get_total_tracks()
print("VK total tracks: ")
print(vk_total_tracks)
global sp_playlist_id
sp_playlist_id = create_playlist_in_spotify()
print("SP playlist id: " + sp_playlist_id)
@app.get('/get-batch', status_code=200)
def process_batch(dto: BatchSizeDto):
print("yee " + dto.size)
batch = getTracksFromVK(dto.size)
print(batch)
tracks = batch_track_search(batch)
add_tracks_to_playlist([track['id'] for track in tracks], sp_playlist_id)
def get_total_tracks() -> int:
return vk_session.get(
url="https://api.vk.com/method/audio.get",
params=[
('access_token', vk_access_token),
('v', config.get('vk_version', VK_API_DEFAULT_VERSION))
]
).json()['response']['count']
def _revoke_root_token():
config['sp_root_token'] = get_token()
def revoke_user_token():
response = requests.post(
url='https://accounts.spotify.com/api/token',
data={
'refresh_token': sp_refresh_token,
'grant_type': 'refresh_token'
},
headers={
"Authorization": 'Basic {}'.format(sp_code)
}
).json()
global sp_access_token
sp_access_token = response['access_token']
def create_playlist_in_spotify(level=0) -> str:
if level > 2:
raise Exception
result = requests.post(
url='https://api.spotify.com/v1/users/{}/playlists'.format(config.get('sp_user_id')),
json={
"name": config.get("sp_playlist_name"),
"description": config.get("sp_playlist_description"),
"public": config.get("sp_is_playlist_public")
},
headers={
"Authorization": 'Bearer {}'.format(sp_access_token)
}
)
if result.status_code == 401:
revoke_user_token()
return create_playlist_in_spotify(level + 1)
try:
playlist_id = result.json()['id']
except Exception:
raise Exception
return playlist_id
def getTracksFromVK(offset):
current_page_tracks = vk_session.get(
url="https://api.vk.com/method/audio.get",
params=[
('access_token', vk_access_token),
('v', config.get('vk_version', VK_API_DEFAULT_VERSION)),
('count', page_size),
('offset', offset)
])
current_page_tracks = current_page_tracks.json()['response']['items']
offset += page_size
return [{'artist': l['artist'], 'title': l['title']} for l in current_page_tracks]
def batch_track_search(track_list) -> list:
track_list_spotify = []
for song in track_list:
title = song['title']
artist = song['artist']
cleaned_title = clean(title)
cleaned_artist = clean(artist)
try:
track_id, track_name = search_track_on_spotify(cleaned_title + " " + cleaned_artist)
except Exception:
try:
track_id, track_name = search_track_on_spotify(cleaned_title)
except Exception as ex:
print(cleaned_title + " " + cleaned_artist + ' not found! ' + ex.__str__())
else:
track_list_spotify.append({'Track name': track_name, 'id': track_id})
else:
track_list_spotify.append({'Track name': track_name, 'id': track_id})
time.sleep(0.2)
return track_list_spotify
def search_track_on_spotify(query, level=0) -> (str, str):
if level > 2:
raise SpotifyAuthException
response = requests.get(
url='https://spclient.wg.spotify.com/searchview/km/v4/search/{}'.format(query),
params={
'catalogue': '',
'country': 'RU'
},
headers={
'Authorization': "Bearer {}".format(self._config.get('sp_root_token')),
'Host': "spclient.wg.spotify.com"
}
)
if response.status_code == 401:
revoke_root_token()
return search_track_on_spotify(query, level + 1)
elif response.status_code == 404:
raise Exception
else:
try:
results = response.json()
except Exception:
raise Exception
try:
track_id = results['results']['tracks']['hits'][0]['uri']
track_returned_name = results['results']['tracks']['hits'][0]['name']
except Exception:
raise Exception
return track_id, track_returned_name
def add_tracks_to_playlist(tracks, id, level=0) -> None:
if level > 2:
raise Exception
tracks_str = ','.join(tracks)
res = requests.post(
url='https://api.spotify.com/v1/playlists/{}/tracks?uris={}'.format(id, tracks_str),
headers={
"Authorization": 'Bearer {}'.format(self._config.get('sp_access_token'))
}
)
if res.status_code == 401:
revoke_user_token()
return add_tracks_to_playlist(tracks, id, level + 1)
@staticmethod
def clean(clean_sting) -> str:
# Remove "()"
clean_sting = re.sub(r'\([^)]*\)', '', clean_sting)
# Remove "[]"
clean_sting = re.sub(r'\[[^)]*\]', '', clean_sting)
# Remove "feat."
clean_sting = re.sub(r'(?i)(\s*)f(?:ea)?t(?:(?:\.?|\s)|uring)(?=\s).*$', '', clean_sting)
# Remove date
clean_sting = re.sub(r'(0[1-9]|[12][0-9]|3[01])[- /.](0[1-9]|1[012])[- /.](19|20)\d\d', '', clean_sting)
# Remove numbers
if re.match(r'\s*[^0-9]+\s*', clean_sting):
clean_sting = re.sub(r'[0-9]+', '', clean_sting)
# Remove other garbage
tokenizer = RegexpTokenizer(r'\w+')
return " ".join(tokenizer.tokenize(clean_sting)) | 30.530201 | 125 | 0.62783 | 1,097 | 9,098 | 4.958979 | 0.220602 | 0.036397 | 0.016728 | 0.013787 | 0.348529 | 0.287684 | 0.231618 | 0.207353 | 0.161765 | 0.148897 | 0 | 0.012273 | 0.238734 | 9,098 | 298 | 126 | 30.530201 | 0.773174 | 0.009453 | 0 | 0.223141 | 0 | 0.004132 | 0.162412 | 0.016172 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053719 | false | 0.012397 | 0.049587 | 0.004132 | 0.169421 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b121f96edfab2bb880eeea95628f1c1be9789b4 | 8,616 | py | Python | src/Noncircular/Calculations/_Appendix13_7_c.py | thepvguy/calctoys | f7ef4e422d8a27cc387c1a24b5fb6e318d774f57 | [
"Unlicense"
] | 7 | 2018-07-17T08:01:34.000Z | 2021-06-14T03:33:58.000Z | src/Noncircular/Calculations/_Appendix13_7_c.py | thepvguy/calctoys | f7ef4e422d8a27cc387c1a24b5fb6e318d774f57 | [
"Unlicense"
] | null | null | null | src/Noncircular/Calculations/_Appendix13_7_c.py | thepvguy/calctoys | f7ef4e422d8a27cc387c1a24b5fb6e318d774f57 | [
"Unlicense"
] | 6 | 2018-10-01T10:29:58.000Z | 2022-01-24T22:34:16.000Z | import math
# TODO: Implement acceptibility tests
class Appendix13_7_cParams:
def __init__(
self,
internal_pressure,
corner_radius,
short_side_half_length,
long_side_half_length,
thickness,
eval_at_outer_walls = False):
self.P = internal_pressure
self.R = corner_radius
self.L_1 = short_side_half_length
self.L_2 = long_side_half_length
self.t_1 = thickness
self.eval_at_outer_walls = eval_at_outer_walls
class Appendix13_7_cCalcs:
def __init__(self, params: Appendix13_7_cParams):
self.P = params.P
self.R = params.R
self.L_1 = params.L_1
self.L_2 = params.L_2
self.t_1 = params.t_1
self.isOuterWallEval = params.eval_at_outer_walls
def c(self):
"""
:return: The distance from the neutral axis of cross section to extreme fibers. Will return c_i or c_o for its thickness, depending on pressure
"""
sign = 1
if self.isOuterWallEval:
sign = -1
return 0.5 * sign * self.t_1
def I_1(self):
return (1 / 12.0) * self.t_1 ** 3
def alpha3(self):
return self.L_2 / self.L_1
def phi(self):
return self.R / self.L_1
def K_3(self):
"""
:return: Equation 40
"""
return (-1.0) * (self.L_1 ** 2) * (
6.0 * (self.phi() ** 2) * self.alpha3()
- 3.0 * math.pi * (self.phi() ** 2)
+ 6.0 * (self.phi() ** 2)
+ (self.alpha3() ** 3)
+ (3.0 * self.alpha3() ** 2)
- 6.0 * self.phi()
- 2.0
+ 1.5 * math.pi * self.phi() * (self.alpha3() ** 2)
+ 6.0 * self.phi() * self.alpha3()
) / (3.0 * (2.0 * self.alpha3() + math.pi * self.phi() + 2.0))
def M_A(self):
"""
:return: Equation 38
"""
return self.P * self.K_3()
def M_r(self):
"""
:return: equation 39
"""
raise ValueError("Looks like it's time to implement M_r")
def S_m_C(self):
"""
:return: Short side membrane stress at point C for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 25
"""
return (self.P * (self.R + self.L_2)) / self.t_1
def S_m_D(self):
"""
:return: Same as S_m_C
"""
return self.S_m_C()
def S_m_A(self):
"""
:return: Long side membrane stress at point A for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 26
"""
return (self.P *(self.L_1 + self.R)) / self.t_1
def S_m_B(self):
"""
:return: Same as S_m_A
"""
return self.S_m_A()
def S_m_BC(self):
"""
:return: Membrane stress in radius, between points B and C for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 27
"""
return (self.P / self.t_1) * (math.sqrt((self.L_2 ** 2) + self.L_1 ** 2) + self.R)
def S_b_C(self):
"""
:return: Bending stress at C for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 28
"""
return (self.c() / (2.0 * self.I_1())) * (2.0 * self.M_A() + self.P * (2 * self.R * self.L_2 - 2.0 * self.R * self.L_1 + self.L_2 ** 2))
def S_b_D(self):
"""
:return: Bending stress at D for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 29
"""
return (self.c() / (2.0 * self.I_1())) * (2.0 * self.M_A() + self.P * ((self.L_2 ** 2) + 2 * self.R * self.L_2 - 2.0 * self.R * self.L_1 + self.L_2 ** 2))
def S_b_A(self):
"""
:return: Bending stress at point A for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 30
"""
return self.M_A() * self.c() / self.I_1()
def S_b_B(self):
"""
:return: Bending stress at point B for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 31
"""
return (self.c() / (2 * self.I_1())) * (2 * self.M_A() + self.P * self.L_2 ** 2)
def S_b_BC(self):
"""
:return: Max bending stress between points B and C for corner sections for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 32
"""
maxStressTheta = math.atan(self.L_1 / self.L_2)
geom = self.c() / self.I_1()
moment = 0.5 * (2 * self.M_A() + self.P * (2 * self.R * (self.L_2 * math.cos(maxStressTheta) - self.L_1 * (1 - math.sin(maxStressTheta))) + self.L_2 ** 2))
return geom * moment
def S_T_C(self):
"""
:return: Total stress at point C for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 33
"""
return self.S_m_C() + self.S_b_C()
def S_T_D(self):
"""
:return: Total stress at point D for short side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 34
"""
return self.S_m_D() + self.S_b_D()
def S_T_A(self):
"""
:return: Total stress at point A for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 35
"""
return self.S_m_A() + self.S_b_A()
def S_T_B(self):
"""
:return: Total stress at point B for long side plate for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 36
"""
return self.S_m_B() + self.S_b_B()
def S_T_BC(self):
"""
:return: Total stress between points B and C for corner sections for Figure 13-2(a) Sketch 3 vessels; appendix 13-7 equation 37
"""
return self.S_m_BC() + self.S_b_BC()
if __name__ == "__main__":
import copy
params_inner = Appendix13_7_cParams(
internal_pressure=100,
corner_radius=3,
short_side_half_length=5,
long_side_half_length=10,
thickness=1
)
calc_inner = Appendix13_7_cCalcs(params_inner)
params_outer = copy.deepcopy(params_inner)
params_outer.eval_at_outer_walls = True
calc_outer = Appendix13_7_cCalcs(params_outer)
print("*** Input ***")
print("P = " + str(params_inner.P))
print("R = " + str(params_inner.R))
print("L_1 = " + str(params_inner.L_1))
print("L_2 = " + str(params_inner.L_2))
print("t_1 = " + str(params_inner.t_1))
print("")
print("*** Output ***")
print("")
print("*** Inner Walls ***")
print("c = " + str(calc_inner.c()))
print("I_1 = " + str(calc_inner.I_1()))
print("alpha3 = " + str(calc_inner.alpha3()))
print("phi = " + str(calc_inner.phi()))
print("K_3 = " + str(calc_inner.K_3()))
print("M_A = " + str(calc_inner.M_A()))
# print("M_r = " + str(calc_inner.M_r()))
print("S_m_C = " + str(calc_inner.S_m_C()))
print("S_m_D = " + str(calc_inner.S_m_D()))
print("S_m_A = " + str(calc_inner.S_m_A()))
print("S_m_B = " + str(calc_inner.S_m_B()))
print("S_m_BC = " + str(calc_inner.S_m_BC()))
print("S_b_C = " + str(calc_inner.S_b_C()))
print("S_b_D = " + str(calc_inner.S_b_D()))
print("S_b_A = " + str(calc_inner.S_b_A()))
print("S_b_B = " + str(calc_inner.S_b_B()))
print("S_b_BC = " + str(calc_inner.S_b_BC()))
print("S_T_C = " + str(calc_inner.S_T_C()))
print("S_T_D = " + str(calc_inner.S_T_D()))
print("S_T_A = " + str(calc_inner.S_T_A()))
print("S_T_B = " + str(calc_inner.S_T_B()))
print("S_T_BC = " + str(calc_inner.S_T_BC()))
print("")
print("*** Outer Walls ***")
print("c = " + str(calc_outer.c()))
print("I_1 = " + str(calc_outer.I_1()))
print("alpha3 = " + str(calc_outer.alpha3()))
print("phi = " + str(calc_outer.phi()))
print("K_3 = " + str(calc_outer.K_3()))
print("M_A = " + str(calc_outer.M_A()))
# print("M_r = " + str(calc_outer.M_r()))
print("S_m_C = " + str(calc_outer.S_m_C()))
print("S_m_D = " + str(calc_outer.S_m_D()))
print("S_m_A = " + str(calc_outer.S_m_A()))
print("S_m_B = " + str(calc_outer.S_m_B()))
print("S_m_BC = " + str(calc_outer.S_m_BC()))
print("S_b_C = " + str(calc_outer.S_b_C()))
print("S_b_D = " + str(calc_outer.S_b_D()))
print("S_b_A = " + str(calc_outer.S_b_A()))
print("S_b_B = " + str(calc_outer.S_b_B()))
print("S_b_BC = " + str(calc_outer.S_b_BC()))
print("S_T_C = " + str(calc_outer.S_T_C()))
print("S_T_D = " + str(calc_outer.S_T_D()))
print("S_T_A = " + str(calc_outer.S_T_A()))
print("S_T_B = " + str(calc_outer.S_T_B()))
print("S_T_BC = " + str(calc_outer.S_T_BC())) | 32.636364 | 163 | 0.558264 | 1,412 | 8,616 | 3.139518 | 0.097026 | 0.069479 | 0.059553 | 0.043988 | 0.57681 | 0.46808 | 0.397248 | 0.37266 | 0.36499 | 0.233476 | 0 | 0.046191 | 0.28389 | 8,616 | 264 | 164 | 32.636364 | 0.672285 | 0.220636 | 0 | 0.019868 | 0 | 0 | 0.072878 | 0 | 0 | 0 | 0 | 0.003788 | 0 | 1 | 0.15894 | false | 0 | 0.013245 | 0.019868 | 0.324503 | 0.357616 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b1344dd323e948e9f6017df3b1661af235dfa13 | 1,619 | py | Python | tests/api_resources/test_file_link.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 8 | 2021-05-29T08:57:58.000Z | 2022-02-19T07:09:25.000Z | tests/api_resources/test_file_link.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 5 | 2021-05-31T10:18:36.000Z | 2022-01-25T11:39:03.000Z | tests/api_resources/test_file_link.py | bhch/async-stripe | 75d934a8bb242f664e7be30812c12335cf885287 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-05-29T13:27:10.000Z | 2021-05-29T13:27:10.000Z | from __future__ import absolute_import, division, print_function
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "link_123"
class TestFileLink(object):
async def test_is_listable(self, request_mock):
resources = await stripe.FileLink.list()
request_mock.assert_requested("get", "/v1/file_links")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.FileLink)
async def test_is_retrievable(self, request_mock):
resource = await stripe.FileLink.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/file_links/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.FileLink)
async def test_is_creatable(self, request_mock):
resource = await stripe.FileLink.create(file="file_123")
request_mock.assert_requested("post", "/v1/file_links")
assert isinstance(resource, stripe.FileLink)
async def test_is_saveable(self, request_mock):
resource = await stripe.FileLink.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
await resource.save()
request_mock.assert_requested(
"post", "/v1/file_links/%s" % TEST_RESOURCE_ID
)
async def test_is_modifiable(self, request_mock):
resource = await stripe.FileLink.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/file_links/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.FileLink)
| 33.040816 | 67 | 0.683138 | 190 | 1,619 | 5.547368 | 0.278947 | 0.104364 | 0.092979 | 0.066414 | 0.612903 | 0.597723 | 0.571158 | 0.491461 | 0.387097 | 0.324478 | 0 | 0.009471 | 0.217418 | 1,619 | 48 | 68 | 33.729167 | 0.822415 | 0 | 0 | 0.277778 | 0 | 0 | 0.079679 | 0 | 0 | 0 | 0 | 0 | 0.277778 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.111111 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b15a52f6be4dc16088c1fb00a71fbd34c59ea53 | 762 | py | Python | L1Trigger/GlobalTriggerAnalyzer/python/l1GtBeamModeFilter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | L1Trigger/GlobalTriggerAnalyzer/python/l1GtBeamModeFilter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | L1Trigger/GlobalTriggerAnalyzer/python/l1GtBeamModeFilter_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
l1GtBeamModeFilter = cms.EDFilter("L1GtBeamModeFilter",
# input tag for input tag for ConditionInEdm products
CondInEdmInputTag = cms.InputTag("conditionsInEdm"),
# input tag for the L1 GT EVM product
L1GtEvmReadoutRecordTag = cms.InputTag("gtEvmDigis"),
#
# vector of allowed beam modes
# default value: 11 (STABLE)
AllowedBeamMode = cms.vuint32(11),
# return the inverted result, to be used instead of NOT
# normal result: true if filter true
# false if filter false or error (no product found)
# inverted result: true if filter false
# false if filter true or error (no product found)
InvertResult = cms.bool( False )
)
| 34.636364 | 71 | 0.675853 | 91 | 762 | 5.659341 | 0.582418 | 0.062136 | 0.064078 | 0.069903 | 0.081553 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017637 | 0.255906 | 762 | 21 | 72 | 36.285714 | 0.890653 | 0.528871 | 0 | 0 | 0 | 0 | 0.123563 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b168867b6c2192e22d3fb03d5618d1c3ca2e893 | 3,177 | py | Python | python/Day11/seating.py | joelbygger/adventofcode20 | 35f9f4fa9bf051f420a22400c896bc7d26dc44d7 | [
"MIT"
] | null | null | null | python/Day11/seating.py | joelbygger/adventofcode20 | 35f9f4fa9bf051f420a22400c896bc7d26dc44d7 | [
"MIT"
] | null | null | null | python/Day11/seating.py | joelbygger/adventofcode20 | 35f9f4fa9bf051f420a22400c896bc7d26dc44d7 | [
"MIT"
] | null | null | null | import copy
def _direction():
# If array index start at 0, 0 and we say that is top left, (x, y)
yield -1, -1 # UL
yield -1, 0 # L
yield -1, 1 # UR
yield 0, -1 # U
yield 0, 1 # D
yield 1, -1 # DL
yield 1, 0 # R
yield 1, 1 # DR
# def _in_matrix(pos, seats):
# return 0 <= pos[0] < len(seats[0]) and 0 <= pos[1] < len(seats)
class Seating:
def __init__(self, file):
with open(file) as f:
# A list of char arrays.
self._seats = [list(x) for x in f.read().splitlines()]
def _valid_position(self, pos):
return 0 <= pos[0] < len(self._seats[0]) and 0 <= pos[1] < len(self._seats)
def _calc_pos(self, pos, d, ignore_floor):
n_pos = (pos[0] + d[0], pos[1] + d[1])
if ignore_floor:
while True:
if not self._valid_position(n_pos) or not self._floor(self._seats[n_pos[1]][n_pos[0]]):
break
n_pos = (n_pos[0] + d[0], n_pos[1] + d[1])
return n_pos
def _get_neighbor_seats(self, pos, ignore_floor):
ns_pos = [self._calc_pos(pos, d, ignore_floor) for d in _direction()]
ns_pos_valid = filter(self._valid_position, ns_pos)
return [self._seats[x[1]][x[0]] for x in ns_pos_valid]
@staticmethod
def _free(seat):
return seat == 'L'
@staticmethod
def _floor(seat):
return seat == '.'
@staticmethod
def _occupied(seat):
return seat == '#'
def _seat_change(self, pos, neighbors, tolerant):
curr = self._seats[pos[1]][pos[0]]
occupied_cnt = len([n for n in neighbors if self._occupied(n)])
if self._free(curr) and occupied_cnt == 0:
curr = '#'
elif self._occupied(curr):
if not tolerant:
if occupied_cnt >= 4:
curr = 'L'
else:
if occupied_cnt >= 5:
curr = 'L'
return curr
def _iterate(self, ignore_floor, tolerant):
new_seats = copy.deepcopy(self._seats)
for y, row in enumerate(self._seats):
for x, seat in enumerate(row):
neighbors = self._get_neighbor_seats((x, y), ignore_floor)
seat = self._seat_change((x, y), neighbors, tolerant)
if seat != self._seats[y][x]:
new_seats[y][x] = seat
if self._seats == new_seats:
return True
else:
self._seats = copy.deepcopy(new_seats)
return False
def iterate_until_stable(self, ignore_floor, tolerant):
while True:
if self._iterate(ignore_floor, tolerant):
break
return
def iterate_times(self, iterations, ignore_floor, tolerant):
while True:
if iterations == 0 or self._iterate(ignore_floor, tolerant):
break
iterations -= 1
return
def count_occupied(self):
cnt = 0
for r in self._seats:
for s in r:
cnt += self._occupied(s)
return cnt
def get_seats(self):
return copy.deepcopy(self._seats)
| 28.621622 | 103 | 0.537299 | 433 | 3,177 | 3.736721 | 0.212471 | 0.072312 | 0.058714 | 0.013597 | 0.118665 | 0.10136 | 0.021014 | 0 | 0 | 0 | 0 | 0.023188 | 0.348442 | 3,177 | 110 | 104 | 28.881818 | 0.758454 | 0.063582 | 0 | 0.182927 | 0 | 0 | 0.002026 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170732 | false | 0 | 0.012195 | 0.060976 | 0.353659 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b1770ba8b608be4e3ab9c20fe2c9cb9f117e749 | 1,408 | py | Python | main.py | LucioC/sortable | 4301188933eeec96b7da3f906d80fc35ad154032 | [
"Apache-2.0"
] | null | null | null | main.py | LucioC/sortable | 4301188933eeec96b7da3f906d80fc35ad154032 | [
"Apache-2.0"
] | null | null | null | main.py | LucioC/sortable | 4301188933eeec96b7da3f906d80fc35ad154032 | [
"Apache-2.0"
] | null | null | null | import os
import json
from challenge import FileReader, Product, Listing, MatchSearch
import challenge
reader = FileReader()
search = MatchSearch()
products = reader.read_products('products.txt');
listings = reader.read_listings('listings.txt');
listings = listings[0:1000]
result = search.match_listings(listings, products, debug = lambda c: print(c))
f = open('output.txt', 'w')
key_list = list(result.keys())
key_list = sorted(key_list,key=lambda s: s.lower())
for key in key_list:
f.write(json.dumps({ "product_name" : key, "listings" : result[key] }))
f.write('\n')
f.close()
print("non matches: " + str(len(search.non_matches)))
f = open('output_non_matches.txt', 'w')
for non_match in search.non_matches:
f.write(json.dumps(non_match.dict_without_tags()))
f.write('\n')
f.close()
#verify solution
to_verify_list = reader.read_json_list('correct_partial_solution.txt')
products_expected = []
for item in to_verify_list:
products_expected.append(item['product_name'])
expected_missing = []
for correct in products_expected:
if correct not in key_list:
expected_missing.append(correct)
print("expected to be on output:")
for error in expected_missing:
print(error)
non_expected_list = []
for o in key_list:
if o not in products_expected:
non_expected_list.append(o)
print("Non expected to be on output:")
for error in non_expected_list:
print(error)
| 22.709677 | 78 | 0.734375 | 211 | 1,408 | 4.7109 | 0.293839 | 0.042254 | 0.027163 | 0.030181 | 0.086519 | 0.060362 | 0.060362 | 0.060362 | 0 | 0 | 0 | 0.004115 | 0.137074 | 1,408 | 61 | 79 | 23.081967 | 0.813992 | 0.010653 | 0 | 0.146341 | 0 | 0 | 0.136265 | 0.036049 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.097561 | 0 | 0.097561 | 0.146341 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b19ff6520a92cbe9bced32400b4df1a8b799dfb | 1,057 | py | Python | Executables/PythonScriptTakingArguments.py | SimioLLC/RunExecutableStep | 377fde62b3ce022a54c7f60d8d1fe70880ce610c | [
"MIT"
] | 2 | 2021-12-12T14:30:51.000Z | 2022-02-08T07:31:50.000Z | Executables/PythonScriptTakingArguments.py | SimioLLC/RunExecutableStep | 377fde62b3ce022a54c7f60d8d1fe70880ce610c | [
"MIT"
] | 2 | 2021-05-20T17:17:11.000Z | 2022-02-09T06:58:22.000Z | Executables/PythonScriptTakingArguments.py | SimioLLC/RunExecutableStep | 377fde62b3ce022a54c7f60d8d1fe70880ce610c | [
"MIT"
] | null | null | null | import sys
import datetime
# Sample program to be initiated by the Simio Step RunExecutable with "Python" ArgumentLogic.
# This runs python scripts with argument convention of: 1st arg is the script name, followed
# by arguments. All args are surrounded with a double-quote.
# The script append-prints the arguments it finds and redirects to a file.
def logit( message ):
dt = datetime.datetime.now()
print(dt.strftime("[%H:%M:%S.%f] "), message)
# redirect stdout to a file
from contextlib import redirect_stdout
try:
with open('c:\\test\\testRunExecutable\PythonScriptTakingArgumentsOutput.txt', 'a') as f:
with redirect_stdout(f):
logit('Name of the script: ' + sys.argv[0])
numArgs = len(sys.argv)
logit('Number of arguments: ' + str(numArgs))
for arg in range(0,numArgs):
logit("Arg[" + str(arg) + "]=" + sys.argv[arg] )
logit('The list of arguments: ' + str(sys.argv))
except:
e = sys.exc_info()[0]
print("Error= %s" % e)
| 30.2 | 93 | 0.639546 | 144 | 1,057 | 4.673611 | 0.5625 | 0.041605 | 0.020802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005 | 0.243141 | 1,057 | 34 | 94 | 31.088235 | 0.83625 | 0.321665 | 0 | 0 | 0 | 0 | 0.223629 | 0.091421 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.222222 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b1ca3b503a037398aebee47693ea3fd4611ebf6 | 8,712 | py | Python | app/handlers/gear_handlers.py | lik33v3n/Tower-of-God | 1e6c86939f053739f9e73d56fd1c04d7fb444e8b | [
"MIT"
] | 3 | 2020-06-28T18:04:12.000Z | 2022-02-15T19:46:47.000Z | app/handlers/gear_handlers.py | lik33v3n/Tower-of-God | 1e6c86939f053739f9e73d56fd1c04d7fb444e8b | [
"MIT"
] | null | null | null | app/handlers/gear_handlers.py | lik33v3n/Tower-of-God | 1e6c86939f053739f9e73d56fd1c04d7fb444e8b | [
"MIT"
] | null | null | null | import logging
from contextlib import suppress
from math import fabs
from aiogram.dispatcher import FSMContext
from aiogram.types import CallbackQuery, Message, ReplyKeyboardRemove
from aiogram.utils.exceptions import (MessageToDeleteNotFound,
MessageToEditNotFound)
from app.__main__ import bot
from ..database.base import Item, Shop, User
from ..handlers.user_handlers import user_inventory
from ..helpers.dev_text import gear_info_text
from ..helpers.keyboards import (CONFIRM_Kb, CRAFT_Kb, EQUIPMENT_Kb, IDLE_Kb,
UNDRESS_Kb)
from ..utils.states import MainStates
async def gear_info_check(m: Message):
try:
gear = await Item.get(int(m.text[1:]))
if gear:
await m.answer(text=gear_info_text(gear))
else:
with suppress(MessageToDeleteNotFound):
await m.delete()
await m.answer('❗ Такого предмета не существует')
except ValueError:
return
async def gear_equip(c: CallbackQuery, user: User):
if c.data[6:] == 'back':
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await user_inventory(c.message, user)
else:
gear = await Item.get(int(c.data[6:]))
if gear.id in user.inventory:
if getattr(user, gear.item_class) is None:
user.inventory.remove(gear.id)
await user.update(inventory=user.inventory, defence=user.defence + gear.defence_boost,
max_defence=user.max_defence + gear.defence_boost,
damage=user.damage + gear.attack_boost).apply()
await user.update(weapon=gear.id).apply() if gear.item_class == 'weapon' else await user.update(armor=gear.id).apply()
await c.message.delete()
await c.message.answer(text="❕ Вы надели экипировку", reply_markup=IDLE_Kb())
else:
await c.message.delete()
await c.message.answer(text="❗ Сначала снимите экипировку", reply_markup=EQUIPMENT_Kb())
else:
await c.message.delete()
await c.message.answer(text="❗ У вас нету такого предмета", reply_markup=IDLE_Kb())
async def gear_unequip(m: Message, user: User):
if (user.weapon or user.armor) != None:
eq = [user.weapon, user.armor]
data = []
for i in range(len(eq)):
if eq[i] != None:
gear = await Item.get(eq[i])
data.extend([gear.name, gear.id])
else:
data.extend(['- Пусто -', 'empty'])
await m.answer('❔ Выбери какую экипировку снимать:',
reply_markup=UNDRESS_Kb(data))
else:
await m.answer('❗ У тебя нету экипировки', reply_markup=IDLE_Kb())
async def gear_unequip_query(c: CallbackQuery, user: User):
gear = await Item.get(int(c.data[8:]))
# user.weapon => Common Sword (example)
if gear:
user.inventory.append(gear.id)
await user.update(defence=user.defence - gear.defence_boost if user.defence - gear.defence_boost >= 0 else 0,
max_defence=user.max_defence - gear.defence_boost,
damage=user.damage - gear.attack_boost, inventory=user.inventory).apply()
await user.update(weapon=None).apply() if gear.item_class == 'weapon' else await user.update(armor=None).apply()
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer(f"❕ Вы сняли \"{gear.name}\"", reply_markup=IDLE_Kb())
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('❗ У тебя нету экипировки', reply_markup=IDLE_Kb())
async def gear_craft(m: Message, user: User):
raw = []
if user.inventory:
inv = dict((x, int(user.inventory.count(x) / 2)) for x in set(user.inventory) if user.inventory.count(x) != 1)
if inv:
for x, y in inv.items():
raw_items = await Item.get(int(x))
if raw_items:
for _ in range(y):
raw.append(raw_items)
print(inv, '|', raw_items, '|', raw)
await m.answer(text='🧳❕ Выберите какую пару предметов крафтить:', reply_markup=CRAFT_Kb(raw))
else:
await m.answer(text='❗ У вас нету подходящих предметов', reply_markup=IDLE_Kb())
else:
await m.answer(text='❗ Инвентарь пуст', reply_markup=IDLE_Kb())
async def gear_craft_query(c: CallbackQuery, user: User):
curr_gear = await Item.get(int(c.data[6:]))
if curr_gear:
for _ in range(2):
if curr_gear.id in user.inventory:
user.inventory.remove(curr_gear.id)
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('❕ В вашем инвентаре больше нету такого предмета', reply_markup=IDLE_Kb())
return
craft_result = await Item.get(curr_gear.id + 1)
if curr_gear.item_class == craft_result.item_class:
user.inventory.append(craft_result.id)
await user.update(inventory=user.inventory).apply()
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer(
text=f"❕ Вы успешно скрафтили предмет:\n\n{gear_info_text(craft_result)}",
reply_markup=IDLE_Kb())
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('❗ Предметы уже максимального качества', reply_markup=IDLE_Kb())
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('<b>Error:</b> Broken item (Свяжитесь с администрацией)', reply_markup=IDLE_Kb())
raise NameError("Broken item")
async def gear_sell_confirm(c: CallbackQuery, user: User):
await c.message.edit_text(f'💸 <b>Продажа предмета.</b>\n\n<i> - Продажа предмета осуществляется между игроками, без участия администрации. Советуем ставить разумную цену\n\n'
f' - Продавая предмет вы не получите прибыль <u>моментально</u>! Вы лишь регистрируете его \"в очередь\" где другие пользователи могут купить его. </i>',
reply_markup=CONFIRM_Kb(text=('💸 Продолжить', '🔚 Отменить'), callback=f'sell_register_{c.data[5:]}'))
async def gear_sell_register(c: CallbackQuery, user: User, state: FSMContext):
item = await Item.get(int(c.data[14:]))
if item:
await MainStates.selling.set()
with suppress(MessageToDeleteNotFound):
await c.message.delete()
trash = await c.message.answer('❔ <b>Как зарегистрировать предмет:</b>\n\n<i> - На данном этапе всё просто ведь Башня делает почти всё за вас, '
'вам же нужно отправить боту <u>стоимость</u> предмета</i>. \n\nПример: '
'\"999\"', reply_markup=ReplyKeyboardRemove())
async with state.proxy() as data:
data['sell_item'] = item
data['trash'] = trash
else:
with suppress(MessageToDeleteNotFound):
await c.message.delete()
await c.message.answer('<b>Error:</b> Broken item (Свяжитесь с администрацией)', reply_markup=IDLE_Kb())
raise NameError("Broken item")
async def gear_sell_registered(m: Message, user: User, state: FSMContext):
async with state.proxy() as data:
item = data['sell_item']
trash = data['trash']
try:
request = await Shop.create(item_id=item.id, item=item.name, rank=item.rank, price=int(fabs(int(m.text))), user_id=user.id)
# removing from the inventory
user.inventory.remove(request.item_id)
await m.delete()
with suppress(MessageToDeleteNotFound):
await trash.delete()
await m.answer(text=f'❕ Лот №{request.id} на продажу создан:\n\n{request.item}: /{request.item_id}\n'
f'🏆 Ранг предмета: {request.rank}\n💸 Цена: {request.price}', reply_markup=IDLE_Kb())
await user.update(inventory=user.inventory).apply()
except (ValueError):
await m.delete()
with suppress(MessageToDeleteNotFound):
await trash.delete()
await m.answer(text='❗️ Вы не ввели число.', reply_markup=IDLE_Kb())
finally:
await state.reset_data()
await state.reset_state()
| 44.676923 | 184 | 0.609734 | 1,087 | 8,712 | 4.805888 | 0.221711 | 0.038285 | 0.059724 | 0.045559 | 0.453675 | 0.403331 | 0.360452 | 0.312404 | 0.285413 | 0.266271 | 0 | 0.002698 | 0.276745 | 8,712 | 194 | 185 | 44.907216 | 0.82225 | 0.007461 | 0 | 0.343558 | 0 | 0.030675 | 0.152938 | 0.010875 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.07362 | 0 | 0.08589 | 0.006135 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b1d65a917c8c063a1bd09d9e9f6843cb500fb33 | 701 | py | Python | app/project/config.py | caulagi/shakuni | f027810bc72b55da302d6672cd64fdf7c92f1661 | [
"MIT"
] | null | null | null | app/project/config.py | caulagi/shakuni | f027810bc72b55da302d6672cd64fdf7c92f1661 | [
"MIT"
] | null | null | null | app/project/config.py | caulagi/shakuni | f027810bc72b55da302d6672cd64fdf7c92f1661 | [
"MIT"
] | null | null | null | """
project.conf
Configuration module holding all the options
"""
DEBUG = True
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
MONGO_DBNAME = os.environ.get("MONGOHQ_URL") or "mongodb://localhost:27017/shakuni"
THREADS_PER_PAGE = 2
CSRF_ENABLED = True
CSRF_SESSION_KEY = "secret"
SECRET_KEY = "secret"
STATIC_FOLDER = 'app/static'
TEMPLATES_FOLDER = 'app/templates'
FACEBOOK_APP_ID = os.environ.get("FACEBOOK_APP_ID") or '672966529447612'
FACEBOOK_APP_SECRET = os.environ.get("FACEBOOK_APP_SECRET") or '8e4a083bb66fc0e81d18e3acbd3b52aa'
# supported currencies
CURRENCIES = (
('INR', 'Indian Rupee'),
('USD', 'US Dollar'),
('GBP', 'Pound'),
('EUR', 'Euro'),
)
| 21.90625 | 97 | 0.723252 | 90 | 701 | 5.377778 | 0.644444 | 0.090909 | 0.07438 | 0.082645 | 0.095041 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060855 | 0.132668 | 701 | 31 | 98 | 22.612903 | 0.735197 | 0.114123 | 0 | 0 | 0 | 0 | 0.329527 | 0.106036 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b1e0e175fb077fad4c9db8318a631de85c5f035 | 2,934 | py | Python | Script/train_w2v.py | zrfan/Tencent-Ads-Algo-Comp-2020 | 8b52df4b86b95de581549e61d15a1403f636d530 | [
"MIT"
] | null | null | null | Script/train_w2v.py | zrfan/Tencent-Ads-Algo-Comp-2020 | 8b52df4b86b95de581549e61d15a1403f636d530 | [
"MIT"
] | null | null | null | Script/train_w2v.py | zrfan/Tencent-Ads-Algo-Comp-2020 | 8b52df4b86b95de581549e61d15a1403f636d530 | [
"MIT"
] | 2 | 2020-06-18T05:05:55.000Z | 2020-12-21T06:30:08.000Z | import os
import sys
import numpy as np
import pandas as pd
import logging
import gc
import tqdm
import pickle
import json
import time
import tempfile
from gensim.models import Word2Vec
cwd = os.getcwd()
embed_path = os.path.join(cwd, 'embed_artifact')
# Training corpus for w2v model
corpus_dic = {
'creative': os.path.join(embed_path, 'embed_train_creative_id_seq.pkl'),
'ad': os.path.join(embed_path, 'embed_train_ad_id_seq.pkl'),
'advertiser': os.path.join(embed_path, 'embed_train_advertiser_id_seq.pkl'),
'product': os.path.join(embed_path, 'embed_train_product_id_seq.pkl')
}
def initiate_logger(log_path):
"""
Initialize a logger with file handler and stream handler
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-s: %(message)s', datefmt='%H:%M:%S')
fh = logging.FileHandler(log_path)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.info('===================================')
logger.info('Begin executing at {}'.format(time.ctime()))
logger.info('===================================')
return logger
def train(target, embed_size, logger=None):
"""
Train a Word2Vec Model and save the model artifact
"""
global corpus_dic, embed_path
assert target in corpus_dic
start = time.time()
with open(corpus_dic[target], 'rb') as f:
corpus = pickle.load(f)
if logger: logger.info('{} corpus is loaded after {:.2f}s'.format(target.capitalize(), time.time()-start))
model = Word2Vec(sentences=corpus, size=embed_size, window=175, sg=1, hs=1, min_count=1, workers=16)
if logger: logger.info('{} w2v training is done after {:.2f}s'.format(target.capitalize(), time.time()-start))
save_path = os.path.join(embed_path, '{}_sg_embed_s{}_'.format(target, embed_size))
with tempfile.NamedTemporaryFile(prefix=save_path, delete=False) as tmp:
tmp_file_path = tmp.name
model.save(tmp_file_path)
if logger: logger.info('{} w2v model is saved to {} after {:.2f}s'.format(target.capitalize(), tmp_file_path, time.time()-start))
return tmp_file_path
if __name__=='__main__':
assert len(sys.argv)==3
target, embed_size = sys.argv[1], int(sys.argv[2])
# Set up w2v model registry
registry_path = os.path.join(embed_path, 'w2v_registry.json')
if os.path.isfile(registry_path):
with open(registry_path, 'r') as f:
w2v_registry = json.load(f)
else:
w2v_registry = {}
logger = initiate_logger('train_w2v.log')
# Train w2v model if there hasn't been one registered
if target not in w2v_registry:
w2v_path = train(target, embed_size, logger=logger)
w2v_registry[target] = w2v_path
else:
logger.info('{} w2v model found, skip'.format(target.capitalize()))
# Save w2v model registry
with open(registry_path, 'w') as f:
json.dump(w2v_registry, f)
| 31.212766 | 130 | 0.719496 | 442 | 2,934 | 4.599548 | 0.30543 | 0.035416 | 0.034432 | 0.04427 | 0.182981 | 0.136744 | 0.099361 | 0.042302 | 0.042302 | 0 | 0 | 0.012796 | 0.120995 | 2,934 | 93 | 131 | 31.548387 | 0.775494 | 0.0818 | 0 | 0.058824 | 0 | 0 | 0.183895 | 0.070787 | 0 | 0 | 0 | 0 | 0.029412 | 1 | 0.029412 | false | 0 | 0.176471 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b1f18f1cb1193facb4ab6b88b9e77bb24dc04a6 | 8,632 | py | Python | src/utils.py | huyhoang17/DB_text_minimal | 0d1466889b21cb74a0571a0fb3856902739ea523 | [
"MIT"
] | 30 | 2020-07-20T12:13:27.000Z | 2022-03-08T06:30:31.000Z | src/utils.py | huyhoang17/DB_text_minimal | 0d1466889b21cb74a0571a0fb3856902739ea523 | [
"MIT"
] | 10 | 2020-08-11T10:21:11.000Z | 2022-03-07T15:27:49.000Z | src/utils.py | huyhoang17/DB_text_minimal | 0d1466889b21cb74a0571a0fb3856902739ea523 | [
"MIT"
] | 6 | 2020-09-02T10:58:00.000Z | 2021-08-13T01:43:47.000Z | import os
import gc
import glob
import time
import random
import imageio
import logging
from functools import wraps
import cv2
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision.utils as torch_utils
from postprocess import SegDetectorRepresenter
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = 'cpu'
def setup_determinism(seed=42):
"""
https://github.com/pytorch/pytorch/issues/7068#issuecomment-487907668
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def setup_logger(logger_name='dbtext', log_file_path=None):
logging._warn_preinit_stderr = 0
logger = logging.getLogger(logger_name)
formatter = logging.Formatter(
'%(asctime)s %(name)s %(levelname)s: %(message)s')
if log_file_path is not None:
file_handle = logging.FileHandler(log_file_path)
file_handle.setFormatter(formatter)
logger.addHandler(file_handle)
logger.setLevel(logging.DEBUG)
return logger
def timer(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(">>> Function {}: {}'s".format(func.__name__, end - start))
return result
return wrapper
def to_device(batch, device='cuda'):
new_batch = []
for ele in batch:
if isinstance(ele, torch.Tensor):
new_batch.append(ele.to(device))
else:
new_batch.append(ele)
return new_batch
def dict_to_device(batch, device='cuda'):
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(device)
return batch
def to_list_tuples_coords(anns):
new_anns = []
for ann in anns:
points = []
for x, y in ann:
points.append((x[0].tolist(), y[0].tolist()))
new_anns.append(points)
return new_anns
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def str_to_bool(value):
if value.lower() in {'False', 'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'True', 'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError('{} is not a valid boolean value'.format(value))
def minmax_scaler_img(img):
img = ((img - img.min()) * (1 / (img.max() - img.min()) * 255)).astype(
'uint8') # noqa
return img
def visualize_tfb(tfb_writer,
imgs,
preds,
global_steps,
thresh=0.5,
mode="TRAIN"):
# origin img
# imgs.shape = (batch_size, 3, image_size, image_size)
imgs = torch.stack([
torch.Tensor(
minmax_scaler_img(img_.to('cpu').numpy().transpose((1, 2, 0))))
for img_ in imgs
])
imgs = torch.Tensor(imgs.numpy().transpose((0, 3, 1, 2)))
imgs_grid = torch_utils.make_grid(imgs)
imgs_grid = torch.unsqueeze(imgs_grid, 0)
# imgs_grid.shape = (3, image_size, image_size * batch_size)
tfb_writer.add_images('{}/origin_imgs'.format(mode), imgs_grid,
global_steps)
# pred_prob_map / pred_thresh_map
pred_prob_map = preds[:, 0, :, :]
pred_thred_map = preds[:, 1, :, :]
pred_prob_map[pred_prob_map <= thresh] = 0
pred_prob_map[pred_prob_map > thresh] = 1
# make grid
pred_prob_map = pred_prob_map.unsqueeze(1)
pred_thred_map = pred_thred_map.unsqueeze(1)
probs_grid = torch_utils.make_grid(pred_prob_map, padding=0)
probs_grid = torch.unsqueeze(probs_grid, 0)
probs_grid = probs_grid.detach().to('cpu')
thres_grid = torch_utils.make_grid(pred_thred_map, padding=0)
thres_grid = torch.unsqueeze(thres_grid, 0)
thres_grid = thres_grid.detach().to('cpu')
tfb_writer.add_images('{}/prob_imgs'.format(mode), probs_grid,
global_steps)
tfb_writer.add_images('{}/thres_imgs'.format(mode), thres_grid,
global_steps)
def test_resize(img, size=640, pad=False):
h, w, c = img.shape
scale_w = size / w
scale_h = size / h
scale = min(scale_w, scale_h)
h = int(h * scale)
w = int(w * scale)
new_img = None
if pad:
new_img = np.zeros((size, size, c), img.dtype)
new_img[:h, :w] = cv2.resize(img, (w, h))
else:
new_img = cv2.resize(img, (w, h))
return new_img
def read_img(img_fp):
img = cv2.imread(img_fp)[:, :, ::-1]
h_origin, w_origin, _ = img.shape
return img, h_origin, w_origin
def test_preprocess(img,
mean=[103.939, 116.779, 123.68],
to_tensor=True,
pad=False):
img = test_resize(img, size=640, pad=pad)
img = img.astype(np.float32)
img[..., 0] -= mean[0]
img[..., 1] -= mean[1]
img[..., 2] -= mean[2]
img = np.expand_dims(img, axis=0)
if to_tensor:
img = torch.Tensor(img.transpose(0, 3, 1, 2))
return img
def draw_bbox(img, result, color=(255, 0, 0), thickness=3):
"""
:input: RGB img
"""
if isinstance(img, str):
img = cv2.imread(img)
img = img.copy()
for point in result:
point = point.astype(int)
cv2.polylines(img, [point], True, color, thickness)
return img
def visualize_heatmap(args, img_fn, tmp_img, tmp_pred):
pred_prob = tmp_pred[0]
pred_prob[pred_prob <= args.prob_thred] = 0
pred_prob[pred_prob > args.prob_thred] = 1
np_img = minmax_scaler_img(tmp_img[0].to(device).numpy().transpose(
(1, 2, 0)))
plt.imshow(np_img)
plt.imshow(pred_prob, cmap='jet', alpha=args.alpha)
img_fn = "heatmap_result_{}".format(img_fn)
plt.savefig(os.path.join(args.save_dir, img_fn),
dpi=200,
bbox_inches='tight')
gc.collect()
def visualize_polygon(args, img_fn, origin_info, batch, preds, vis_char=False):
img_origin, h_origin, w_origin = origin_info
seg_obj = SegDetectorRepresenter(thresh=args.thresh,
box_thresh=args.box_thresh,
unclip_ratio=args.unclip_ratio)
box_list, score_list = seg_obj(batch,
preds,
is_output_polygon=args.is_output_polygon)
box_list, score_list = box_list[0], score_list[0]
if len(box_list) > 0:
if args.is_output_polygon:
idx = [x.sum() > 0 for x in box_list]
box_list = [box_list[i] for i, v in enumerate(idx) if v]
score_list = [score_list[i] for i, v in enumerate(idx) if v]
else:
idx = box_list.reshape(box_list.shape[0], -1).sum(axis=1) > 0
box_list, score_list = box_list[idx], score_list[idx]
else:
box_list, score_list = [], []
tmp_img = draw_bbox(img_origin, np.array(box_list))
tmp_pred = cv2.resize(preds[0, 0, :, :].cpu().numpy(),
(w_origin, h_origin))
# https://stackoverflow.com/questions/42262198
h_, w_ = 32, 100
if not args.is_output_polygon and vis_char:
char_img_fps = glob.glob(os.path.join("./tmp/reconized", "*"))
for char_img_fp in char_img_fps:
os.remove(char_img_fp)
for index, (box_list_,
score_list_) in enumerate(zip(box_list,
score_list)): # noqa
src_pts = np.array(box_list_.tolist(), dtype=np.float32)
dst_pts = np.array([[0, 0], [w_, 0], [w_, h_], [0, h_]],
dtype=np.float32)
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warp = cv2.warpPerspective(img_origin, M, (w_, h_))
imageio.imwrite("./tmp/reconized/word_{}.jpg".format(index), warp)
plt.imshow(tmp_img)
plt.imshow(tmp_pred, cmap='inferno', alpha=args.alpha)
if args.is_output_polygon:
img_fn = "poly_result_{}".format(img_fn)
else:
img_fn = "rect_result_{}".format(img_fn)
plt.savefig(os.path.join(args.save_dir, img_fn),
dpi=200,
bbox_inches='tight')
gc.collect()
| 30.394366 | 79 | 0.597544 | 1,197 | 8,632 | 4.086048 | 0.227235 | 0.022899 | 0.020241 | 0.019628 | 0.165406 | 0.114087 | 0.080147 | 0.057248 | 0.044981 | 0.044981 | 0 | 0.02381 | 0.270158 | 8,632 | 283 | 80 | 30.501767 | 0.75254 | 0.052132 | 0 | 0.122066 | 0 | 0 | 0.040541 | 0.003317 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079812 | false | 0 | 0.065728 | 0 | 0.206573 | 0.004695 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b1f289d94d22713713a02c29b3bffd65bfda6e1 | 45,021 | py | Python | example/demos/views.py | bashu/django-uncharted | b285b4dfc8310cb62e7535fb39326916e2c81159 | [
"MIT"
] | 9 | 2015-06-07T06:50:42.000Z | 2020-09-04T05:57:20.000Z | example/demos/views.py | bashu/django-uncharted | b285b4dfc8310cb62e7535fb39326916e2c81159 | [
"MIT"
] | 1 | 2015-09-24T08:17:25.000Z | 2019-03-31T03:51:00.000Z | example/demos/views.py | bashu/django-uncharted | b285b4dfc8310cb62e7535fb39326916e2c81159 | [
"MIT"
] | 2 | 2018-11-13T22:56:05.000Z | 2020-11-18T07:18:49.000Z | # -*- coding: utf-8 -*-
from random import random
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from django.views.generic import TemplateView
from uncharted.chart import *
class Area100PercentStacked(TemplateView):
template_name = 'area/chart.html'
chartData = [
{
'year': 2000,
'cars': 1587,
'motorcycles': 650,
'bicycles': 121
}, {
'year': 1995,
'cars': 1567,
'motorcycles': 683,
'bicycles': 146
}, {
'year': 1996,
'cars': 1617,
'motorcycles': 691,
'bicycles': 138
}, {
'year': 1997,
'cars': 1630,
'motorcycles': 642,
'bicycles': 127
}, {
'year': 1998,
'cars': 1660,
'motorcycles': 699,
'bicycles': 105
}, {
'year': 1999,
'cars': 1683,
'motorcycles': 721,
'bicycles': 109
}, {
'year': 2000,
'cars': 1691,
'motorcycles': 737,
'bicycles': 112
}, {
'year': 2001,
'cars': 1298,
'motorcycles': 680,
'bicycles': 101
}, {
'year': 2002,
'cars': 1275,
'motorcycles': 664,
'bicycles': 97
}, {
'year': 2003,
'cars': 1246,
'motorcycles': 648,
'bicycles': 93
}, {
'year': 2004,
'cars': 1218,
'motorcycles': 637,
'bicycles': 101
}, {
'year': 2005,
'cars': 1213,
'motorcycles': 633,
'bicycles': 87
}, {
'year': 2006,
'cars': 1199,
'motorcycles': 621,
'bicycles': 79
}, {
'year': 2007,
'cars': 1110,
'motorcycles': 210,
'bicycles': 81
}, {
'year': 2008,
'cars': 1165,
'motorcycles': 232,
'bicycles': 75
}, {
'year': 2009,
'cars': 1145,
'motorcycles': 219,
'bicycles': 88
}, {
'year': 2010,
'cars': 1163,
'motorcycles': 201,
'bicycles': 82
}, {
'year': 2011,
'cars': 1180,
'motorcycles': 285,
'bicycles': 87
}, {
'year': 2012,
'cars': 1159,
'motorcycles': 277,
'bicycles': 71
}]
def get_context_data(self, *args, **kwargs):
context = super(Area100PercentStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
chart.zoomOutButton = {
'backgroundColor': "#000000",
'backgroundAlpha': 0.15,
}
chart.addTitle("Traffic incidents per year", 15)
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.07
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.startOnAxis = True
# Value
valueAxis = amValueAxis(title="percent", stackType="100%", gridAlpha=0.07)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph = amGraph(
type="line",
title="Cars",
valueField="cars",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=0,
fillAlphas=0.6,
)
chart.addGraph(graph)
# second graph
graph = amGraph(
type="line",
title="Motorcycles",
valueField="motorcycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=0,
fillAlphas=0.6,
)
chart.addGraph(graph)
# third graph
graph = amGraph(
type="line",
title="Bicycles",
valueField="bicycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=0,
fillAlphas=0.6,
)
chart.addGraph(graph)
# LEGEND
legend = amLegend(align="center")
chart.addLegend(legend)
# CURSOR
chartCursor = amChartCursor(zoomable=False, cursorAlpha=0)
chart.addChartCursor(chartCursor)
context['chart'] = chart
return context
area100PercentStacked = Area100PercentStacked.as_view()
class AreaStacked(Area100PercentStacked):
def get_context_data(self, *args, **kwargs):
context = super(AreaStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
marginTop=10,
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
chart.zoomOutButton = {
'backgroundColor': "#000000",
'backgroundAlpha': 0.15,
}
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.07
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.startOnAxis = True
# Value
valueAxis = amValueAxis(
title="Traffic incidents",
stackType="regular", # this line makes the chart "stacked"
gridAlpha=0.07,
)
chart.addValueAxis(valueAxis)
# GUIDES are vertical (can also be horizontal) lines (or areas) marking some event.
# first guide
guide1 = amGuide(
category="2001",
lineColor="#CC0000",
lineAlpha=1,
dashLength=2,
inside=True,
labelRotation=90,
label="fines for speeding increased",
)
chart.categoryAxis.addGuide(guide1);
# second guide
guide2 = amGuide(
category="2007",
lineColor="#CC0000",
lineAlpha=1,
dashLength=2,
inside=True,
labelRotation=90,
label="motorcycle maintenance fee introduced",
)
chart.categoryAxis.addGuide(guide2);
# GRAPHS
# first graph
graph = amGraph(
type="line",
title="Cars",
valueField="cars",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=1,
fillAlphas=0.6, # setting fillAlphas to > 0 value makes it area graph
hidden=True,
)
chart.addGraph(graph)
# second graph
graph = amGraph(
type="line",
title="Motorcycles",
valueField="motorcycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=1,
fillAlphas=0.6,
)
chart.addGraph(graph)
# third graph
graph = amGraph(
type="line",
title="Bicycles",
valueField="bicycles",
balloonText="[[value]] ([[percents]]%)",
lineAlpha=1,
fillAlphas=0.6,
)
chart.addGraph(graph)
# LEGEND
legend = amLegend(position="top")
chart.addLegend(legend)
# CURSOR
chartCursor = amChartCursor(zoomable=False, cursorAlpha=0)
chart.addChartCursor(chartCursor)
context['chart'] = chart
return context
areaStacked = AreaStacked.as_view()
class AreaWithTimeBasedData(Area100PercentStacked):
@property
def chartData(self):
output = []
d = timezone.now() - timedelta(minutes=1000)
for i in xrange(0, 1000):
d = d + timedelta(minutes=1)
value = int((random() * 40) + 10)
output.append({
'date': d,#.isoformat(),
'visits': value,
})
return output
def get_context_data(self, *args, **kwargs):
context = super(AreaWithTimeBasedData, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
marginRight=30,
dataProvider=self.chartData,
categoryField="date",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
chart.zoomOutButton = {
'backgroundColor': "#000000",
'backgroundAlpha': 0.15,
}
chart.addListener("dataUpdated", "zoomChart");
# AXES
# Category
chart.categoryAxis.parseDates = True
chart.categoryAxis.minPeriod = "mm"
chart.categoryAxis.gridAlpha = 0.07
chart.categoryAxis.axisColor = "#DADADA"
# Value
valueAxis = amValueAxis(
title="Unique visitors",
gridAlpha=0.07,
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph = amGraph(
type="line",
title="red line",
valueField="visits",
lineAlpha=1,
lineColor="#d1cf2a",
fillAlphas=0.3, # setting fillAlphas to > 0 value makes it area graph
)
chart.addGraph(graph)
# CURSOR
chartCursor = amChartCursor(
cursorPosition="mouse",
categoryBalloonDateFormat="JJ:NN, DD MMMM",
)
chart.addChartCursor(chartCursor)
# SCROLLBAR
chartScrollbar = amChartScrollbar()
chart.addChartScrollbar(chartScrollbar)
context['chart'] = chart
return context
areaWithTimeBasedData = AreaWithTimeBasedData.as_view()
class Bar3D(TemplateView):
template_name = 'bar/chart.html'
chartData = [
{
'year': 2005,
'income': 23.5
}, {
'year': 2006,
'income': 26.2
}, {
'year': 2007,
'income': 30.1
}, {
'year': 2008,
'income': 29.5
}, {
'year': 2009,
'income': 24.6
}]
def get_context_data(self, *args, **kwargs):
context = super(Bar3D, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
rotate=True,
depth3D=20,
angle=30,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.fillAlpha = 1
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.fillColor = "#FAFAFA"
# Value
valueAxis = amValueAxis(title="Income in millions, USD", axisColor="#DADADA", gridAlpha=0.1)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
type="column",
title="Income",
valueField="income",
balloonText="Income in [[category]]:[[value]]",
lineAlpha=0,
fillColors=["#bf1c25"],
fillAlphas=1,
)
chart.addGraph(graph)
context['chart'] = chart
return context
bar3D = Bar3D.as_view()
class BarAndLineMix(Bar3D):
chartData = [
{
'year': 2005,
'income': 23.5,
'expenses': 18.1
}, {
'year': 2006,
'income': 26.2,
'expenses': 22.8
}, {
'year': 2007,
'income': 30.1,
'expenses': 23.9
}, {
'year': 2008,
'income': 29.5,
'expenses': 25.1
}, {
'year': 2009,
'income': 24.6,
'expenses': 25.0
}]
def get_context_data(self, *args, **kwargs):
context = super(BarAndLineMix, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
startDuration=1,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.axisColor = "#DADADA"
chart.categoryAxis.dashLength = 5
# Value
valueAxis = amValueAxis(
title="Million USD",
dashLength=5,
axisAlpha=0.2,
position="top",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# column graph
graph1 = amGraph(
type="column",
title="Income",
valueField="income",
lineAlpha=0,
fillColors=["#ADD981"],
fillAlphas=1,
)
chart.addGraph(graph1)
# line graph
graph2 = amGraph(
type="line",
title="Expenses",
valueField="expenses",
lineThickness=2,
bullet="round",
fillAlphas=0,
)
chart.addGraph(graph2)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
barAndLineMix = BarAndLineMix.as_view()
class BarClustered(BarAndLineMix):
def get_context_data(self, *args, **kwargs):
context = super(BarClustered, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
startDuration=1,
plotAreaBorderColor="#DADADA",
plotAreaBorderAlpha=1,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0.1,
position="top",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
type="column",
title="Income",
valueField="income",
balloonText="Income:[[value]]",
lineAlpha=0,
fillColors=["#ADD981"],
fillAlphas=1,
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
type="column",
title="Expenses",
valueField="expenses",
balloonText="Expenses:[[value]]",
lineAlpha=0,
fillColors=["#81acd9"],
fillAlphas=1,
)
chart.addGraph(graph2)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
barClustered = BarClustered.as_view()
class BarFloating(BarClustered):
template_name = 'area/chart.html'
chartData = [
{
'name': "John",
'startTime': 8,
'endTime': 11,
'color': "#FF0F00"
}, {
'name': "Joe",
'startTime': 10,
'endTime': 13,
'color': "#FF9E01"
}, {
'name': "Susan",
'startTime': 11,
'endTime': 18,
'color': "#F8FF01"
}, {
'name': "Eaton",
'startTime': 15,
'endTime': 19,
'color': "#04D215"
}]
def get_context_data(self, *args, **kwargs):
context = super(BarFloating, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="name",
startDuration=1,
columnWidth=0.9,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0.1,
unit=":00",
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph1 = amGraph(
type="column",
valueField="endTime",
openField="startTime",
balloonText="Income:[[value]]",
lineAlpha=0,
colorField="color",
fillAlphas=0.8,
)
chart.addGraph(graph1)
context['chart'] = chart
return context
barFloating = BarFloating.as_view()
class BarStacked(BarFloating):
template_name = 'bar/3d.html'
chartData = [
{
'year': "2003",
'europe': 2.5,
'namerica': 2.5,
'asia': 2.1,
'lamerica': 0.3,
'meast': 0.2,
'africa': 0.1
}, {
'year': "2004",
'europe': 2.6,
'namerica': 2.7,
'asia': 2.2,
'lamerica': 0.3,
'meast': 0.3,
'africa': 0.1
}, {
'year': "2005",
'europe': 2.8,
'namerica': 2.9,
'asia': 2.4,
'lamerica': 0.3,
'meast': 0.3,
'africa': 0.1
}]
def get_context_data(self, *args, **kwargs):
context = super(BarStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
plotAreaBorderAlpha=0.2,
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0.1,
stackType="regular",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
type="column",
title="Europe",
labelText="[[value]]",
valueField="europe",
lineAlpha=0,
fillAlphas=1,
lineColor="#C72C95",
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
type="column",
title="North America",
labelText="[[value]]",
valueField="namerica",
lineAlpha=0,
fillAlphas=1,
lineColor="#D8E0BD",
)
chart.addGraph(graph2)
# third graph
graph3 = amGraph(
type="column",
title="Asia-Pacific",
labelText="[[value]]",
valueField="asia",
lineAlpha=0,
fillAlphas=1,
lineColor="#B3DBD4",
)
chart.addGraph(graph3)
# forth graph
graph4 = amGraph(
type="column",
title="Latin America",
labelText="[[value]]",
valueField="lamerica",
lineAlpha=0,
fillAlphas=1,
lineColor="#69A55C",
)
chart.addGraph(graph4)
# fifth graph
graph5 = amGraph(
type="column",
title="Middle-East",
labelText="[[value]]",
valueField="meast",
lineAlpha=0,
fillAlphas=1,
lineColor="#B5B8D3",
)
chart.addGraph(graph5)
# sixth graph
graph6 = amGraph(
type="column",
title="Africa",
labelText="[[value]]",
valueField="africa",
lineAlpha=0,
fillAlphas=1,
lineColor="#F4E23B",
)
chart.addGraph(graph6)
# LEGEND
legend = amLegend()
legend.position = "right"
legend.borderAlpha = 0.3
legend.horizontalGap = 10
legend.switchType = "v"
chart.addLegend(legend)
context['chart'] = chart
return context
barStacked = BarStacked.as_view()
class BarWithBackgroundImage(BarStacked):
template_name = 'bar/bg.html'
chartData = [
{
'country': "Czech Republic",
'litres': 156.90,
'short': "CZ"
}, {
'country': "Ireland",
'litres': 131.10,
'short': "IR"
}, {
'country': "Germany",
'litres': 115.80,
'short': "DE"
}, {
'country': "Australia",
'litres': 109.90,
'short': "AU"
}, {
'country': "Austria",
'litres': 108.30,
'short': "AT"
}, {
'country': "UK",
'litres': 99.00,
'short': "UK"
}, {
'country': "Belgium",
'litres': 93.00,
'short': "BE"
}]
def get_context_data(self, *args, **kwargs):
context = super(BarWithBackgroundImage, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
color="#FFFFFF",
rotate=True,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# this line makes the chart to show image in the background
chart.backgroundImage = "%simages/bg.jpg" % settings.STATIC_URL
# sometimes we need to set margins manually
# autoMargins should be set to false in order chart to use custom margin values
chart.autoMargins = False
chart.marginTop = 100
chart.marginLeft = 50
chart.marginRight = 30
chart.startDuration = 2
# AXES
# Category
chart.categoryAxis.labelsEnabled = False
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.axisAlpha = 0
# Value
valueAxis = amValueAxis(
axisAlpha=0,
gridAlpha=0,
labelsEnabled=False,
minimum=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
type="column",
valueField="litres",
lineAlpha=0,
fillAlphas=0.5,
# you can pass any number of colors in array to create more fancy gradients
fillColors=["#000000", "#FF6600"],
gradientOrientation="horizontal",
labelPosition="bottom",
labelText="[[category]]: [[value]] Litres",
balloonText="[[category]]: [[value]] Litres",
)
chart.addGraph(graph)
# LABEL
chart.addLabel(50, 40, "Beer Consumption by country", "left", 15, "#000000", 0, 1, True);
context['chart'] = chart
return context
barWithBackgroundImage = BarWithBackgroundImage.as_view()
class Column100PercentStacked(TemplateView):
template_name = 'column/stacked.html'
chartData = [
{
"year": "2003",
"europe": 2.5,
"namerica": 2.5,
"asia": 2.1,
"lamerica": 0.3,
"meast": 0.2,
"africa": 0.1
}, {
"year": "2004",
"europe": 2.6,
"namerica": 2.7,
"asia": 2.2,
"lamerica": 0.3,
"meast": 0.3,
"africa": 0.1
}, {
"year": "2005",
"europe": 2.8,
"namerica": 2.9,
"asia": 2.4,
"lamerica": 0.3,
"meast": 0.3,
"africa": 0.1
}]
def get_context_data(self, *args, **kwargs):
context = super(Column100PercentStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# sometimes we need to set margins manually
# autoMargins should be set to false in order chart to use custom margin values
chart.autoMargins = False
chart.marginLeft = 0
chart.marginRight = 0
chart.marginTop = 30
chart.marginBottom = 40
# AXES
# Category
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.axisAlpha = 0
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
stackType="100%", # this line makes the chart 100% stacked
gridAlpha=0,
axisAlpha=0,
labelsEnabled=False,
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
title="Europe",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="europe",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#C72C95",
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
title="North America",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="namerica",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#D8E0BD",
)
chart.addGraph(graph2)
# third graph
graph3 = amGraph(
title="Asia-Pacific",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="asia",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#B3DBD4",
)
chart.addGraph(graph3)
# fourth graph
graph4 = amGraph(
title="Latin America",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="lamerica",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#69A55C",
)
chart.addGraph(graph4)
# fifth graph
graph5 = amGraph(
title="Middle-East",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="meast",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#B5B8D3",
)
chart.addGraph(graph5)
# sixth graph
graph6 = amGraph(
title="Africa",
labelText="[[percents]]%",
balloonText="[[value]] ([[percents]]%)",
valueField="africa",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#F4E23B",
)
chart.addGraph(graph6)
# LEGEND
legend = amLegend(
borderAlpha=0.2,
horizontalGap=10,
autoMargins=False,
marginLeft=30,
marginRight=30,
switchType="v",
)
chart.addLegend(legend)
context['chart'] = chart
return context
column100PercentStacked = Column100PercentStacked.as_view()
class Column3D(Column100PercentStacked):
template_name = 'column/chart.html'
chartData = [
{
"country": "USA",
"visits": 4025,
"color": "#FF0F00"
}, {
"country": "China",
"visits": 1882,
"color": "#FF6600"
}, {
"country": "Japan",
"visits": 1809,
"color": "#FF9E01"
}, {
"country": "Germany",
"visits": 1322,
"color": "#FCD202"
}, {
"country": "UK",
"visits": 1122,
"color": "#F8FF01"
}, {
"country": "France",
"visits": 1114,
"color": "#B0DE09"
}, {
"country": "India",
"visits": 984,
"color": "#04D215"
}, {
"country": "Spain",
"visits": 711,
"color": "#0D8ECF"
}, {
"country": "Netherlands",
"visits": 665,
"color": "#0D52D1"
}, {
"country": "Russia",
"visits": 580,
"color": "#2A0CD0"
}, {
"country": "South Korea",
"visits": 443,
"color": "#8A0CCF"
}, {
"country": "Canada",
"visits": 441,
"color": "#CD0D74"
}, {
"country": "Brazil",
"visits": 395,
"color": "#754DEB"
}, {
"country": "Italy",
"visits": 386,
"color": "#DDDDDD"
}, {
"country": "Australia",
"visits": 384,
"color": "#999999"
}, {
"country": "Taiwan",
"visits": 338,
"color": "#333333"
}, {
"country": "Poland",
"visits": 328,
"color": "#000000"
}]
def get_context_data(self, *args, **kwargs):
context = super(Column3D, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# the following two lines makes chart 3D
chart.depth3D = 20
chart.angle = 30
# AXES
# Category
chart.categoryAxis.labelRotation = 90
chart.categoryAxis.dashLength = 5
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
dashLength=5,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
type="column",
valueField="visits",
colorField="color",
lineAlpha=0,
fillAlphas=1,
balloonText="[[category]]: [[value]]",
)
chart.addGraph(graph)
context['chart'] = chart
return context
column3D = Column3D.as_view()
class Column3DStacked(Column100PercentStacked):
template_name = 'column/3d.html'
chartData = [
{
"country": "USA",
"year2004": 3.5,
"year2005": 4.2
}, {
"country": "UK",
"year2004": 1.7,
"year2005": 3.1
}, {
"country": "Canada",
"year2004": 2.8,
"year2005": 2.9
}, {
"country": "Japan",
"year2004": 2.6,
"year2005": 2.3
}, {
"country": "France",
"year2004": 1.4,
"year2005": 2.1
}, {
"country": "Brazil",
"year2004": 2.6,
"year2005": 4.9
}, {
"country": "Russia",
"year2004": 6.4,
"year2005": 7.2
}, {
"country": "India",
"year2004": 8.0,
"year2005": 7.1
}, {
"country": "China",
"year2004": 9.9,
"year2005": 10.1
}]
def get_context_data(self, *args, **kwargs):
context = super(Column3DStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
color="#FFFFFF",
startDuration=1,
plotAreaFillAlphas=0.2,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# the following two lines makes chart 3D
chart.angle = 30
chart.depth3D = 60
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.2
chart.categoryAxis.gridPosition = "start"
chart.categoryAxis.gridColor = "#FFFFFF"
chart.categoryAxis.axisColor = "#FFFFFF"
chart.categoryAxis.axisAlpha = 0.5
chart.categoryAxis.dashLength = 5
# Value
valueAxis = amValueAxis(
stackType="3d", # This line makes chart 3D stacked (columns are placed one behind another)
gridAlpha=0.2,
gridColor="#FFFFFF",
axisColor="#FFFFFF",
axisAlpha=0.5,
dashLength=5,
title="GDP growth rate",
titleBold=False,
unit="%",
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
title="2004",
valueField="year2004",
type="column",
lineAlpha=0,
lineColor="#D2CB00",
fillAlphas=1,
balloonText="GDP grow in [[category]] (2004): [[value]]",
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
title="2005",
valueField="year2005",
type="column",
lineAlpha=0,
lineColor="#BEDF66",
fillAlphas=1,
balloonText="GDP grow in [[category]] (2005): [[value]]",
)
chart.addGraph(graph2)
context['chart'] = chart
return context
column3DStacked = Column3DStacked.as_view()
class ColumnAndLineMix(Column100PercentStacked):
chartData = [
{
"year": 2005,
"income": 23.5,
"expenses": 18.1
}, {
"year": 2006,
"income": 26.2,
"expenses": 22.8
}, {
"year": 2007,
"income": 30.1,
"expenses": 23.9
}, {
"year": 2008,
"income": 29.5,
"expenses": 25.1
}, {
"year": 2009,
"income": 24.6,
"expenses": 25.0
}]
def get_context_data(self, *args, **kwargs):
context = super(ColumnAndLineMix, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
startDuration=1,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
axisAlpha=0,
tickLength=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
# column graph
graph1 = amGraph(
type="column",
title="Income",
valueField="income",
lineAlpha=0,
fillAlphas=1,
)
chart.addGraph(graph1)
# line graph
graph2 = amGraph(
type="line",
title="Expenses",
valueField="expenses",
lineThickness=2,
bullet="round",
)
chart.addGraph(graph2)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
columnAndLineMix = ColumnAndLineMix.as_view()
class ColumnWithRotatedSeries(Column100PercentStacked):
template_name = 'column/chart.html'
chartData = [
{
"country": "USA",
"visits": 3025,
"color": "#FF0F00"
}, {
"country": "China",
"visits": 1882,
"color": "#FF6600"
}, {
"country": "Japan",
"visits": 1809,
"color": "#FF9E01"
}, {
"country": "Germany",
"visits": 1322,
"color": "#FCD202"
}, {
"country": "UK",
"visits": 1122,
"color": "#F8FF01"
}, {
"country": "France",
"visits": 1114,
"color": "#B0DE09"
}, {
"country": "India",
"visits": 984,
"color": "#04D215"
}, {
"country": "Spain",
"visits": 711,
"color": "#0D8ECF"
}, {
"country": "Netherlands",
"visits": 665,
"color": "#0D52D1"
}, {
"country": "Russia",
"visits": 580,
"color": "#2A0CD0"
}, {
"country": "South Korea",
"visits": 443,
"color": "#8A0CCF"
}, {
"country": "Canada",
"visits": 441,
"color": "#CD0D74"
}]
def get_context_data(self, *args, **kwargs):
context = super(ColumnWithRotatedSeries, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
startDuration=1,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.labelRotation = 45 # this line makes category values to be rotated
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.fillAlpha = 1
chart.categoryAxis.fillColor = "#FAFAFA"
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
dashLength=5,
title="Visitors from country",
axisAlpha=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
valueField="visits",
colorField="color",
balloonText="[[category]]: [[value]]",
type="column",
lineAlpha=0,
fillAlphas=1,
)
chart.addGraph(graph)
context['chart'] = chart
return context
columnWithRotatedSeries = ColumnWithRotatedSeries.as_view()
class ColumnSimple(Column3D):
template_name = 'column/chart.html'
def get_context_data(self, *args, **kwargs):
context = super(ColumnSimple, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
startDuration=1,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.labelRotation = 90
chart.categoryAxis.gridPosition = "start"
# Value
# in case you don"t want to change default settings of value axis,
# you don"t need to create it, as one value axis is created automatically.
# GRAPHS
graph = amGraph(
valueField="visits",
balloonText="[[category]]: [[value]]",
type="column",
lineAlpha=0,
fillAlphas=0.8,
)
chart.addGraph(graph)
context['chart'] = chart
return context
columnSimple = ColumnSimple.as_view()
class ColumnStacked(Column100PercentStacked):
template_name = 'column/chart.html'
def get_context_data(self, *args, **kwargs):
context = super(ColumnStacked, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="year",
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# AXES
# Category
chart.categoryAxis.gridAlpha = 0.1
chart.categoryAxis.axisAlpha = 0
chart.categoryAxis.gridPosition = "start"
# Value
valueAxis = amValueAxis(
stackType="regular",
gridAlpha=0.1,
axisAlpha=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
# first graph
graph1 = amGraph(
title="Europe",
labelText="[[value]]",
balloonText="[[value]]",
valueField="europe",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#C72C95",
)
chart.addGraph(graph1)
# second graph
graph2 = amGraph(
title="North America",
labelText="[[value]]",
balloonText="[[value]]",
valueField="namerica",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#D8E0BD",
)
chart.addGraph(graph2)
# third graph
graph3 = amGraph(
title="Asia-Pacific",
labelText="[[value]]",
balloonText="[[value]]",
valueField="asia",
type="column",
lineAlpha=0,
fillAlphas=1,
lineColor="#B3DBD4",
)
chart.addGraph(graph3)
# LEGEND
legend = amLegend()
chart.addLegend(legend)
context['chart'] = chart
return context
columnStacked = ColumnStacked.as_view()
class ColumnWithGradient(BarWithBackgroundImage):
template_name = 'column/chart.html'
def get_context_data(self, *args, **kwargs):
context = super(ColumnWithGradient, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="country",
startDuration=2,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# change balloon text color
chart.balloon.color = "#000000"
# AXES
# Category
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.axisAlpha = 0
chart.categoryAxis.labelsEnabled = False
# Value
valueAxis = amValueAxis(
gridAlpha=0,
axisAlpha=0,
labelsEnabled=False,
minimum=0,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
balloonText="[[category]]: [[value]] Litres",
valueField="litres",
descriptionField="short",
type="column",
lineAlpha=0,
fillAlphas=1,
fillColors=["#ffe78e", "#bf1c25"],
labelText="[[description]]",
)
chart.addGraph(graph)
context['chart'] = chart
return context
columnWithGradient = ColumnWithGradient.as_view()
class ColumnWithImagesOnTop(Column100PercentStacked):
template_name = 'column/chart.html'
chartData = [
{
"name": "John",
"points": 35654,
"color": "#7F8DA9",
"bullet": "%simages/0.gif" % settings.STATIC_URL,
}, {
"name": "Damon",
"points": 65456,
"color": "#FEC514",
"bullet": "%simages/1.gif" % settings.STATIC_URL,
}, {
"name": "Patrick",
"points": 45724,
"color": "#DB4C3C",
"bullet": "%simages/2.gif" % settings.STATIC_URL,
}, {
"name": "Mark",
"points": 13654,
"color": "#DAF0FD",
"bullet": "%simages/3.gif" % settings.STATIC_URL,
}]
def get_context_data(self, *args, **kwargs):
context = super(ColumnWithImagesOnTop, self).get_context_data(*args, **kwargs)
chart = amSerialChart(
name='chart',
dataProvider=self.chartData,
categoryField="name",
startDuration=1,
pathToImages="%samcharts2/amcharts/images/" % settings.STATIC_URL,
)
# sometimes we need to set margins manually
# autoMargins should be set to false in order chart to use custom margin values
chart.autoMargins = False
chart.marginRight = 0
chart.marginLeft = 0
# AXES
# Category
chart.categoryAxis.inside = True
chart.categoryAxis.axisAlpha = 0
chart.categoryAxis.gridAlpha = 0
chart.categoryAxis.tickLength = 0
# Value
valueAxis = amValueAxis(
minimum=0,
axisAlpha=0,
gridAlpha=0,
maximum=80000,
)
chart.addValueAxis(valueAxis)
# GRAPHS
graph = amGraph(
valueField="points",
customBulletField="bullet", # field of the bullet in data provider
bulletOffset=16, # distance from the top of the column to the bullet
colorField="color",
bulletSize=34, # bullet image should be rectangle (width = height)
type="column",
fillAlphas=0.8,
cornerRadiusTop=8,
lineAlpha=0,
)
chart.addGraph(graph)
context['chart'] = chart
return context
columnWithImagesOnTop = ColumnWithImagesOnTop.as_view()
| 26.420775 | 103 | 0.49046 | 3,629 | 45,021 | 6.049876 | 0.151006 | 0.048007 | 0.022956 | 0.018174 | 0.661581 | 0.631838 | 0.598315 | 0.576452 | 0.551401 | 0.4977 | 0 | 0.053683 | 0.386397 | 45,021 | 1,703 | 104 | 26.436289 | 0.741068 | 0.05051 | 0 | 0.642645 | 0 | 0 | 0.143883 | 0.012346 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014116 | false | 0 | 0.004458 | 0 | 0.064636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b2534c0418b9126bf14031fac35d279d4d24036 | 2,220 | py | Python | experiment1_meantime.py | mcsosa121/KSRFILS | 75995933771d8338de33cc9bbb5e9416e4242c6b | [
"MIT"
] | null | null | null | experiment1_meantime.py | mcsosa121/KSRFILS | 75995933771d8338de33cc9bbb5e9416e4242c6b | [
"MIT"
] | null | null | null | experiment1_meantime.py | mcsosa121/KSRFILS | 75995933771d8338de33cc9bbb5e9416e4242c6b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import numpy
from krypy.linsys import LinearSystem, Cg
from krypy.deflation import DeflatedCg, DeflatedGmres, Ritz
from krypy.utils import Arnoldi, ritz, BoundCG
from krypy.recycling import RecyclingCg
from krypy.recycling.factories import RitzFactory,RitzFactorySimple
from krypy.recycling.evaluators import RitzApriori,RitzApproxKrylov
from scipy import random, linalg
def find_deflation_subspace(A,b,k,ortho='dmgs',ritz_type='ritz'):
Ar = Arnoldi(A,b,ortho=ortho)
for i in range(1,k+1):
Ar.advance()
[V,H] = Ar.get()
[theta,U,resnorm,Z] = ritz(H,V,type=ritz_type)
return Z
def reuse_deflation_subspace(sol,ritz_type='ritz'):
[theta,U,resnorm,Z] = ritz(sol.H,sol.V,type=ritz_type)
return Z
cgt = []
dft = []
rct = []
for i in range(1,100):
matrixSize = 100
R = random.rand(matrixSize,matrixSize)
A = numpy.dot(R,R.transpose())
b=numpy.ones((matrixSize, 1))
k = 10
numSystems = 10
rank = 1 #rank of each system to add
Asys = [A]
for i in range(1,numSystems):
u = random.rand(matrixSize, rank)
Asys.append(Asys[i-1] + numpy.dot(u,u.T))
systems = []
for i in range(0,len(Asys)):
systems.append(LinearSystem(A=Asys[i],b=b,self_adjoint=True,positive_definite=True))
ts = time.time()
for i in range(0,len(Asys)):
cg_sol = Cg(systems[i],maxiter=1000)
te = time.time()
cgt.append((te-ts)*1000)
ts = time.time()
for i in range(0,len(Asys)):
U=find_deflation_subspace(Asys[i],b,k)
deflated_sol = DeflatedCg(systems[i],U=U,maxiter=1000)
te = time.time()
dft.append((te-ts)*1000)
vector_factory = RitzFactorySimple(n_vectors=k, which='sm')
ts = time.time()
recycler = RecyclingCg(vector_factory=vector_factory)
for i in range(0,len(Asys)):
recycled_sol = recycler.solve(systems[i],maxiter=1000)
te = time.time()
rct.append((te-ts)*1000)
print('Mean time taken for CG (ms):', sum(cgt)/len(cgt))
print('Mean time taken for Deflated CG (ms):', sum(dft)/len(dft))
print('Mean time taken for Recycled CG (ms):', sum(rct)/len(rct))
| 32.647059 | 93 | 0.644144 | 335 | 2,220 | 4.21194 | 0.307463 | 0.019844 | 0.029766 | 0.054571 | 0.248051 | 0.137491 | 0.109142 | 0.041106 | 0.041106 | 0.041106 | 0 | 0.026286 | 0.211712 | 2,220 | 67 | 94 | 33.134328 | 0.78 | 0.021171 | 0 | 0.206897 | 0 | 0 | 0.055159 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.155172 | 0 | 0.224138 | 0.051724 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b272c4081ff788cf0e7635f139e4a72c7417fd5 | 3,935 | py | Python | club_crm/api/backend/restaurant.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | club_crm/api/backend/restaurant.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | club_crm/api/backend/restaurant.py | VivekChamp/clubcrm | 82036360d867d3dc5406bc71445a98841b5bffbf | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import frappe
from datetime import datetime, date
from club_crm.club_crm.utils.sms_notification import send_sms
from club_crm.club_crm.utils.push_notification import send_push
from frappe.utils import getdate, get_time, flt
from frappe.utils import escape_html
from frappe import throw, msgprint, _
@frappe.whitelist()
def todays_order():
today = date.today()
orders = []
order_list = frappe.get_all('Food Order Entry', filters={'date': today, 'order_status':['in', {'Ordered','Ready', 'Delivered'}]}, fields=['*'])
if order_list:
for each_order in order_list:
order = frappe.get_doc('Food Order Entry', each_order.name)
items = []
if order.order_items:
for row in order.order_items:
items.append({
'item_name': row.item_name,
'qty': row.qty,
'rate': row.rate,
'amount': row.amount
})
orders.append({
'order_id': order.name,
'client_name': order.client_name,
'order_status': order.order_status,
'mobile_no': order.mobile_number,
'total_quantity': order.total_quantity,
'total_amount': order.total_amount,
'order_type': order.order_type,
'items': items
})
frappe.response["message"] = {
"orders": orders
}
@frappe.whitelist()
def order_ready(order_id):
order = frappe.get_doc('Food Order Entry', order_id)
frappe.db.set_value("Food Order Entry",order_id,"order_status","Ready")
frappe.db.commit()
if order.ready_notify==0:
client = frappe.get_doc('Client', order.client_id)
msg = "Your food order from Grams is ready."
receiver_list='"'+str(order.mobile_number)+'"'
send_sms(receiver_list,msg)
if client.fcm_token:
title = "Grams at Katara Club"
send_push(client.name,title,msg)
frappe.db.set_value("Food Order Entry",order_id,"ready_notify",1)
frappe.db.commit()
order = frappe.get_doc('Food Order Entry', order_id)
items = []
if order.order_items:
for row in order.order_items:
items.append({
'item_name': row.item_name,
'qty': row.qty,
'rate': row.rate,
'amount': row.amount
})
frappe.response["message"] = {
'status': 1,
'status_message': 'Order is marked as Ready',
'order_id': order.name,
'client_name': order.client_name,
'order_status': order.order_status,
'mobile_no': order.mobile_number,
'total_quantity': order.total_quantity,
'total_amount': order.total_amount,
'order_type': order.order_type,
'items': items
}
@frappe.whitelist()
def order_delivered(order_id):
order = frappe.get_doc('Food Order Entry', order_id)
frappe.db.set_value("Food Order Entry",order_id,"order_status","Delivered")
frappe.db.commit()
order = frappe.get_doc('Food Order Entry', order_id)
items = []
if order.order_items:
for row in order.order_items:
items.append({
'item_name': row.item_name,
'qty': row.qty,
'rate': row.rate,
'amount': row.amount
})
frappe.response["message"] = {
"status": 1,
"status_message": 'Order is marked as Delivered',
'order_id': order.name,
'client_name': order.client_name,
'order_status': order.order_status,
'mobile_no': order.mobile_number,
'total_quantity': order.total_quantity,
'total_amount': order.total_amount,
'order_type': order.order_type,
'items': items
} | 34.823009 | 148 | 0.581194 | 461 | 3,935 | 4.726681 | 0.177874 | 0.055071 | 0.057825 | 0.061037 | 0.647086 | 0.647086 | 0.625975 | 0.611749 | 0.611749 | 0.594768 | 0 | 0.001452 | 0.300127 | 3,935 | 113 | 149 | 34.823009 | 0.78976 | 0 | 0 | 0.637255 | 0 | 0 | 0.184451 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.078431 | 0 | 0.107843 | 0.009804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b28f0284102a05a1095c18ed52c32ed434b06cb | 5,448 | py | Python | keras_vgg_16.py | henniekim/python_keras_vgg_16 | 46f86f8737244cf10155b08eaebe0d5232199215 | [
"MIT"
] | null | null | null | keras_vgg_16.py | henniekim/python_keras_vgg_16 | 46f86f8737244cf10155b08eaebe0d5232199215 | [
"MIT"
] | null | null | null | keras_vgg_16.py | henniekim/python_keras_vgg_16 | 46f86f8737244cf10155b08eaebe0d5232199215 | [
"MIT"
] | null | null | null | from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers.pooling import MaxPooling2D
from keras.layers.convolutional import Conv2D
from keras.initializers import he_normal
from keras.initializers import Zeros
from keras.activations import relu
from keras.layers import Flatten
from keras.activations import softmax
from keras import optimizers
from keras.losses import categorical_crossentropy
from keras.metrics import top_k_categorical_accuracy
from keras.applications import VGG16, VGG19
import os
import cv2
import numpy as np
# select GPU number to use
os.environ["CUDA_VISIBLE_DEVICES"]="3"
# select data to train
image_path = '/datahdd/workdir/donghyun/faster_rcnn_kdh/PascalDataSetReduced/'
filenumber = 0
X_train = list()
Y_train = list()
while(1):
path = image_path + 'pascal_voc_'+str(filenumber)
if os.path.isfile(path+'.jpg') is True & os.path.isfile(path+'.txt') is True:
X_image = cv2.imread(path+'.jpg')
Y_label = np.loadtxt(path+'.txt', delimiter = ' ')
X_train.append(X_image)
Y_train.append(Y_label)
#print(str(filenumber) + ' is loaded')
else:
print('image loading stopped at ' + str(filenumber-1))
break
filenumber += 1
# data separate and shuffle and save indices
X_train = np.array(X_train)
Y_train = np.array(Y_train)
# shuffling all of the data set and separate train & val set
shuffled_indexes = np.arange(len(X_train))
np.random.shuffle(shuffled_indexes)
shuffle_indexes = shuffled_indexes[0:int(float(0.1*len(X_train)))]
X_test = X_train[shuffle_indexes, :]
Y_test = Y_train[shuffle_indexes, :]
np.savetxt('test_shuffled_index_reduced.txt', shuffle_indexes, delimiter = ' ', fmt = '%i')
print('TEST SET INDEX saved')
shuffle_indexes = shuffled_indexes[int(float(0.1 * len(X_train))):len(X_train)]
X_train = X_train[shuffle_indexes, :]
Y_train = Y_train[shuffle_indexes, :]
np.savetxt('train_shuffled_index_reduced.txt', shuffle_indexes, delimiter = ' ', fmt = '%i')
print('TRAIN SET INDEX saved')
model = Sequential()
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 64, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
input_shape = (224, 224, 3)))
model.add(Conv2D( filters = 64, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 128, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 128, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 256, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 256, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 256, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(Conv2D( filters = 512, kernel_size = (3, 3), strides = 1, padding = "same", activation = 'relu',
))
model.add(MaxPooling2D( pool_size = (2,2), strides = (2,2), padding= 'same', data_format = None))
##-------------------------------------------------------------------------##
model.add(Flatten())
model.add(Dense( units = 1024, activation = 'relu'))
model.add(Dense( units = 20, activation = 'softmax'))
model.summary()
##---OPTIMIZERS---##
adam = optimizers.adam(lr=0.0001, beta_1 = 0.9, beta_2 = 0.999, epsilon = None, decay= 0, amsgrad = False)
momentum = optimizers.SGD(lr=0.01, momentum = 0.9, decay=1e-6)
model.compile(optimizer = adam, loss = categorical_crossentropy, metrics=['accuracy'])
# when using the categorical_crossentropy loss, your targets should be in categorical format (one- hot encoding)
model.fit(X_train, Y_train, batch_size = 64, epochs = 100, validation_data=(X_test, Y_test))
#score = model.evaluate(X_test, Y_test, batch_size = 64)
| 34.481013 | 112 | 0.606094 | 684 | 5,448 | 4.692982 | 0.247076 | 0.052336 | 0.056698 | 0.085047 | 0.458255 | 0.445171 | 0.427103 | 0.415265 | 0.415265 | 0.415265 | 0 | 0.038564 | 0.176579 | 5,448 | 157 | 113 | 34.700637 | 0.676995 | 0.147577 | 0 | 0.348837 | 0 | 0 | 0.084691 | 0.027362 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.186047 | 0 | 0.186047 | 0.034884 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b3429811d85f7005761b8ac7ab0e4ba8f27c361 | 10,675 | py | Python | disco/cli/config_time_series.py | NREL/disco | 19afa1c397c6c24e37222f6cbf027eb88833beda | [
"BSD-3-Clause"
] | 2 | 2022-03-11T20:04:34.000Z | 2022-03-14T22:25:29.000Z | disco/cli/config_time_series.py | NREL/disco | 19afa1c397c6c24e37222f6cbf027eb88833beda | [
"BSD-3-Clause"
] | 4 | 2022-03-11T17:48:50.000Z | 2022-03-17T21:39:47.000Z | disco/cli/config_time_series.py | NREL/disco | 19afa1c397c6c24e37222f6cbf027eb88833beda | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Creates JADE configuration for stage 1 of pydss_simulation pipeline."""
import logging
import sys
import click
from jade.common import CONFIG_FILE
from jade.loggers import setup_logging
from jade.utils.utils import load_data
from PyDSS.reports.pv_reports import PF1_SCENARIO, CONTROL_MODE_SCENARIO
from disco.enums import SimulationType
from disco.extensions.pydss_simulation.pydss_configuration import PyDssConfiguration
from disco.extensions.pydss_simulation.estimate_run_minutes import generate_estimate_run_minutes
from disco.pydss.common import ConfigType
from disco.pydss.pydss_configuration_base import get_default_reports_file
logger = logging.getLogger(__name__)
def callback_is_enabled(_, __, value):
if value is None:
return None
return {"true": True, "false": False}[value.lower()]
COMMON_TIME_SERIES_OPTIONS = (
click.option(
"-c",
"--config-file",
default=CONFIG_FILE,
show_default=True,
help="JADE config file to create",
),
click.option(
"--feeder-losses",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the Feeder Losses report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--pv-clipping",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the PV clipping report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--pv-curtailment",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the PV curtailment report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--thermal-metrics",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the Thermal Metrics report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--voltage-metrics",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the Voltage Metrics report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--capacitor-changes",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the Capacitor State Changes report. If not set, use the value in "
"--reports-filename.",
),
click.option(
"--regcontrol-changes",
type=click.Choice(("true", "false"), case_sensitive=False),
callback=callback_is_enabled,
default=None,
show_default=True,
help="Whether to enable the RegControl Tap Number Changes report. If not set, use the "
"value in --reports-filename.",
),
click.option(
"--export-data-tables",
default=False,
is_flag=True,
show_default=True,
help="Export collected circuit element properties as tables.",
),
click.option(
"--exports-filename",
default=None,
show_default=True,
help="PyDSS export options, default is None.",
),
click.option(
"-r",
"--reports-filename",
default=get_default_reports_file(SimulationType.QSTS),
show_default=True,
help="PyDSS report options",
),
click.option(
"--skip-night/--no-skip-night",
default=False,
is_flag=True,
show_default=True,
help="Don't run controls or collect data during nighttime hours.",
),
click.option(
"--store-all-time-points/--no-store-all-time-points",
is_flag=True,
default=False,
show_default=True,
help="Store per-element data at all time points for thermal and voltage metrics.",
),
click.option(
"--store-per-element-data/--no-store-per-element-data",
is_flag=True,
default=False,
show_default=True,
help="Store per-element data in thermal and voltage metrics.",
),
click.option(
"-v",
"--volt-var-curve",
default=None,
help="Update the PyDSS volt-var curve name. If not set, use the pre-configured curve.",
),
click.option(
"--verbose",
is_flag=True,
default=False,
help="Enable debug logging",
),
)
def common_time_series_options(func):
for option in reversed(COMMON_TIME_SERIES_OPTIONS):
func = option(func)
return func
@click.command()
@click.argument("inputs")
@common_time_series_options
@click.option(
"-e",
"--estimated-run-minutes",
type=int,
help="Estimated per-job runtime. Default is None.",
)
@click.option(
"--calc-estimated-run-minutes/--no-calc-estimated-run-minutes",
is_flag=True,
default=True,
show_default=True,
help="Calculate estimated per-job runtime by parsing the OpenDSS files.",
)
@click.option(
"--dc-ac-ratio",
default=None,
type=float,
help="Set a custom DC-AC ratio for PV Systems.",
)
@click.option(
"--pf1/--no-pf1",
is_flag=True,
default=True,
show_default=True,
help="Include PF1 scenario or not",
)
@click.option(
"--control-mode/--no-control-mode",
is_flag=True,
default=True,
show_default=True,
help="Include control_mode scenario or not",
)
@click.option(
"--order-by-penetration/--no-order-by-penetration",
default=False,
show_default=True,
help="Make jobs with higher penetration levels blocked by those with lower levels. This "
"can be beneficial if you want the higher-penetration-level jobs to be "
"canceled if a job with a lower penetration level fails. However, it can significantly "
"reduce the number of jobs that can run simultaneously.",
)
def time_series(
inputs,
config_file,
feeder_losses,
pv_clipping,
pv_curtailment,
thermal_metrics,
voltage_metrics,
capacitor_changes,
regcontrol_changes,
export_data_tables,
exports_filename,
reports_filename,
skip_night,
store_all_time_points,
store_per_element_data,
volt_var_curve,
verbose,
estimated_run_minutes,
calc_estimated_run_minutes,
dc_ac_ratio,
pf1,
control_mode,
order_by_penetration,
):
"""Create JADE configuration for time series simulations."""
level = logging.DEBUG if verbose else logging.INFO
setup_logging(__name__, None, console_level=level, packages=["disco"])
if not pf1 and not control_mode:
logger.error("At least one of '--pf1' or '--control-mode' must be set.")
sys.exit(1)
simulation_config = PyDssConfiguration.get_default_pydss_simulation_config()
simulation_config["project"]["simulation_type"] = SimulationType.QSTS.value
simulation_config["reports"] = load_data(reports_filename)["reports"]
simulation_config["exports"]["export_data_tables"] = export_data_tables
for report in simulation_config["reports"]["types"]:
if report["name"] == "Feeder Losses" and feeder_losses is not None:
report["enabled"] = feeder_losses
if report["name"] == "PV Clipping" and pv_clipping is not None:
report["enabled"] = pv_clipping
if report["name"] == "PV Curtailment" and pv_curtailment is not None:
report["enabled"] = pv_curtailment
if report["name"] == "Thermal Metrics" and thermal_metrics is not None:
report["enabled"] = thermal_metrics
if report["name"] == "Voltage Metrics" and voltage_metrics is not None:
report["enabled"] = voltage_metrics
if report["name"] in ("Thermal Metrics", "Voltage Metrics"):
report["store_all_time_points"] = store_all_time_points
report["store_per_element_data"] = store_per_element_data
if report["name"] == "Capacitor State Change Counts" and capacitor_changes is not None:
report["enabled"] = capacitor_changes
if report["name"] == "RegControl Tap Number Change Counts" and regcontrol_changes is not None:
report["enabled"] = regcontrol_changes
exports = {} if exports_filename is None else load_data(exports_filename)
scenarios = []
if control_mode:
scenarios.append(
PyDssConfiguration.make_default_pydss_scenario(CONTROL_MODE_SCENARIO, exports)
)
if pf1:
scenarios.append(PyDssConfiguration.make_default_pydss_scenario(PF1_SCENARIO, exports))
config = PyDssConfiguration.auto_config(
inputs,
simulation_config=simulation_config,
scenarios=scenarios,
order_by_penetration=order_by_penetration,
estimated_run_minutes=estimated_run_minutes,
dc_ac_ratio=dc_ac_ratio,
)
has_pydss_controllers = config.has_pydss_controllers()
if control_mode and not has_pydss_controllers:
scenarios_config = config.get_pydss_config(ConfigType.SCENARIOS)
assert scenarios_config[0]["name"] == CONTROL_MODE_SCENARIO
scenarios_config.pop(0)
logger.info(
"Excluding %s scenario because there are no pydss controllers.", CONTROL_MODE_SCENARIO
)
config.set_pydss_config(ConfigType.SCENARIOS, scenarios_config)
if volt_var_curve is not None:
if has_pydss_controllers and control_mode:
config.update_volt_var_curve(volt_var_curve)
else:
logger.warning(
"Setting a volt_var_curve has no effect when there is no %s scenario.",
CONTROL_MODE_SCENARIO,
)
if calc_estimated_run_minutes:
generate_estimate_run_minutes(config)
if skip_night:
pydss_sim_config = config.get_pydss_config(ConfigType.SIMULATION_CONFIG)
pydss_sim_config["project"]["simulation_range"] = {"start": "06:00:00", "end": "18:00:00"}
# Note that we are using the same convergence error threshold percent.
config.set_pydss_config(ConfigType.SIMULATION_CONFIG, pydss_sim_config)
config.dump(filename=config_file)
print(f"Created {config_file} for TimeSeries Analysis")
| 34.214744 | 102 | 0.662857 | 1,301 | 10,675 | 5.240584 | 0.177556 | 0.035494 | 0.039601 | 0.050161 | 0.38897 | 0.331769 | 0.262834 | 0.246113 | 0.231153 | 0.213259 | 0 | 0.003049 | 0.231944 | 10,675 | 311 | 103 | 34.324759 | 0.828516 | 0.019953 | 0 | 0.369718 | 0 | 0 | 0.27378 | 0.03445 | 0 | 0 | 0 | 0 | 0.003521 | 1 | 0.010563 | false | 0 | 0.042254 | 0 | 0.06338 | 0.003521 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b36647274e28645db368fe1412571e540dc57c9 | 1,919 | py | Python | vcfp_attack/trainByBayes.py | kenneds6/VCFingerprinting | 2de88766e2b2beeed44a4267c370fe755b5db90d | [
"MIT"
] | null | null | null | vcfp_attack/trainByBayes.py | kenneds6/VCFingerprinting | 2de88766e2b2beeed44a4267c370fe755b5db90d | [
"MIT"
] | null | null | null | vcfp_attack/trainByBayes.py | kenneds6/VCFingerprinting | 2de88766e2b2beeed44a4267c370fe755b5db90d | [
"MIT"
] | null | null | null | #!/usr/bin/python
import os
import sys
import sklearn
from sklearn.naive_bayes import GaussianNB
from sklearn.externals import joblib
import argparse
import numpy as np
import fileUtils
import tools
def saveModel(modelData, fpath):
joblib.dump(modelData, fpath)
def readfile(fpath):
tmpList = []
for line in fileUtils.readTxtFile(fpath, ','):
tmp = line.split(',')
if len(tmp) > 4:
tmp_multi = fileUtils.str2int(tmp[3]) * fileUtils.str2int(tmp[4])
else:
tmp_multi = fileUtils.str2int(tmp[-1]) * fileUtils.str2int(tmp[-2])
tmpList.append(tmp_multi)
return tmpList
def computeFeature(fpath, rangeList):
start, end, interval = rangeList[0], rangeList[1], rangeList[2]
rangeList, sectionList = tools.getSectionList(start, end, interval)
features = readfile(fpath)
for feat in features:
index = tools.computeRange(rangeList, feat)
sectionList[index] += 1
return sectionList
def computeAllFeature(dpath):
fileList = fileUtils.genfilelist(dpath)
allFeatures = []
for fpath in fileList:
tmpFeat = computeFeature(fpath)
allFeatures.append(tmpFeat)
return np.array(allFeatures)
def train(trainData, trainLabel):
gnb = GaussianNB()
y_pred = gnb.fit(trainData, trainLabel)
return y_pred
def main(opts):
trainDataDir = opts.trainDataDir
data, label = loadTrainData(trainDataDir)
mymodel = train(data, label)
saveModel(mymodel, opts.modelSaveDir)
print('model saved at {}'.format(opts.modelSaveDir))
def parseOpts(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--trainDataDir', help='path to training data dir')
parser.add_argument('-m', '--modelSaveDir', help='path to model save dir')
opts = parser.parse_args()
return opts
if __name__ == "__main__":
opts = parseOpts(sys.argv)
main(opts)
| 24.922078 | 81 | 0.682126 | 224 | 1,919 | 5.767857 | 0.441964 | 0.049536 | 0.058824 | 0.037152 | 0.041796 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008525 | 0.205315 | 1,919 | 76 | 82 | 25.25 | 0.838689 | 0.008338 | 0 | 0 | 0 | 0 | 0.055731 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.163636 | 0 | 0.381818 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b3666930d6995caea754b79c0c21bae3db8e9e7 | 2,472 | py | Python | hosting-scripts/leaseweb_invoices.py | sromanenko/hand-tools | 50be74f07c8f8f6bb89e6470c4370c62c2fbc2e0 | [
"MIT"
] | null | null | null | hosting-scripts/leaseweb_invoices.py | sromanenko/hand-tools | 50be74f07c8f8f6bb89e6470c4370c62c2fbc2e0 | [
"MIT"
] | null | null | null | hosting-scripts/leaseweb_invoices.py | sromanenko/hand-tools | 50be74f07c8f8f6bb89e6470c4370c62c2fbc2e0 | [
"MIT"
] | 1 | 2020-10-05T08:11:13.000Z | 2020-10-05T08:11:13.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import gspread
import config
from oauth2client.service_account import ServiceAccountCredentials as Account
api_url = 'https://api.leaseweb.com/invoices/v1/invoices'
def api_request(url, headers, params=None):
try:
conn = requests.get(url=url, headers=headers, params=params)
conn.raise_for_status()
except requests.exceptions.HTTPError as http_error:
raise SystemExit(http_error)
except requests.exceptions.RequestException as req_error:
raise SystemExit(req_error)
except Exception as error:
raise SystemExit(error)
else:
return conn.json()
def main(header):
hosts = []
for item in api_request(api_url, header)['invoices']:
host = {
'ContractId': item['id'],
'Date': item['date'],
'DueDate': item['dueDate'],
'TaxAmount': item['taxAmount'],
'Total': item['total'],
'OpenAmount': item['openAmount'],
'Currency': item['currency'],
'Status': item['status'],
}
hosts.append(host)
return hosts
# Google sheet
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = Account.from_json_keyfile_name('google_sheet_secret.json', scope)
client = gspread.authorize(creds)
def update_google_table(parameter_list):
# Google spreadsheet
spreadsheet = client.open("Leaseweb invoices")
# Создание вкладки worksheet
worksheet = spreadsheet.worksheet('All invoices')
# Формирование заголовка таблицы
header = [
'ContractId',
'Date',
'DueDate',
'TaxAmount',
'Total',
'OpenAmount',
'Currency',
'Status',
]
worksheet.update('A1', [header])
start_cell = 'A2'
end_cell = 'H' + str(len(parameter_list) + 1)
cell_range = worksheet.range('{}:{}'.format(start_cell, end_cell))
simplyfied_data = []
for row in parameter_list:
for column in header:
simplyfied_data.append(row[column])
for i, cell in enumerate(cell_range):
cell.value = simplyfied_data[i]
worksheet.update_cells(cell_range)
if __name__ == '__main__':
invoices_list = []
for auth_key in config.lw_accounts:
for invoice in main(config.lw_accounts[auth_key]):
invoices_list.append(invoice)
update_google_table(invoices_list)
| 28.413793 | 77 | 0.637136 | 276 | 2,472 | 5.528986 | 0.413043 | 0.019659 | 0.039318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003729 | 0.240696 | 2,472 | 86 | 78 | 28.744186 | 0.80927 | 0.053803 | 0 | 0 | 0 | 0 | 0.157308 | 0.010287 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.060606 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b377d3baccb78698043aba61e68c933edadec23 | 2,499 | py | Python | scrapy_ddiy/utils/common.py | LZC6244/scrapy_ddiy | 1bf7cdd382afd471af0bf7069b377fb364dc4730 | [
"MIT"
] | 9 | 2021-05-17T02:55:16.000Z | 2022-03-28T08:36:50.000Z | scrapy_ddiy/utils/common.py | LZC6244/scrapy_ddiy | 1bf7cdd382afd471af0bf7069b377fb364dc4730 | [
"MIT"
] | null | null | null | scrapy_ddiy/utils/common.py | LZC6244/scrapy_ddiy | 1bf7cdd382afd471af0bf7069b377fb364dc4730 | [
"MIT"
] | 1 | 2022-01-23T06:28:31.000Z | 2022-01-23T06:28:31.000Z | # -*- coding: utf-8 -*-
import ast
import redis
import socket
import hashlib
import pymongo
from scrapy import Request
from w3lib.url import canonicalize_url
from scrapy.utils.python import to_bytes
def get_str_md5(string: str, encoding='utf-8'):
"""
计算字符串的 MD5 值
:param string:
:param encoding:
:return:
"""
md5_obj = hashlib.md5()
md5_obj.update(string.encode(encoding=encoding))
return md5_obj.hexdigest()
def get_request_md5(request: Request):
"""
计算 scrapy.Request 的 MD5 值
(仿照 scrapy.utils.request 的 request_fingerprint 函数)
:param request:
:return:
"""
md5_obj = hashlib.md5()
md5_obj.update(to_bytes(request.method))
md5_obj.update(to_bytes(canonicalize_url(request.url)))
md5_obj.update(request.body or b'')
return md5_obj.hexdigest()
def get_redis_conn(settings):
"""从项目配置中获取Redis配置并建立连接"""
return redis.Redis(host=settings.get('REDIS_HOST'), port=settings.get('REDIS_PORT'),
**settings.get('REDIS_PARAMS'))
def get_mongo_cli(settings):
"""从项目配置中获取MongoDB配置并建立连接"""
return pymongo.MongoClient(settings.get('MONGO_URI'), **settings.get('MONGO_PARAMS'))
def get_local_ip():
"""
:return: 本地内网 IP 字符串,如:'192.168.0.1'
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
local_ip = s.getsockname()[0]
s.close()
return local_ip
def cookie_str_to_dict(cookie_str):
"""将浏览器抓包获取到的 cookie 字符串转换为字典形式"""
cookie_dict = dict()
for i in cookie_str.split(';'):
i = i.strip()
if '=' not in i:
i += '='
k, v = i.split('=', maxsplit=1)
cookie_dict[k] = v
return cookie_dict
def run_func(argv, local_var):
"""Run as : run_func(sys.argv, locals())"""
argv_len = len(argv)
warn_msg = f'Please run this program as [ python file_name.py function_name k1=v1 k2="\'str_v2\'" ... ] \n' \
f'(Please use single quotes when passing strings)\n'
if argv_len > 1:
func_name = argv[1]
func = local_var.get(func_name)
assert func, f'Please check if [ {func_name} ] exists '
params = dict()
try:
for arg in argv[2:]:
k, v = arg.split('=', 1)
v = v.strip("'") if v.startswith("'") else ast.literal_eval(v)
params[k] = v
except:
raise UserWarning(warn_msg)
return func(**params)
else:
print(warn_msg)
| 27.163043 | 113 | 0.612245 | 343 | 2,499 | 4.294461 | 0.373178 | 0.032587 | 0.032587 | 0.027155 | 0.100475 | 0.082824 | 0.046164 | 0.046164 | 0 | 0 | 0 | 0.021843 | 0.2489 | 2,499 | 91 | 114 | 27.461538 | 0.76292 | 0.130452 | 0 | 0.070175 | 0 | 0 | 0.117139 | 0 | 0 | 0 | 0 | 0 | 0.017544 | 1 | 0.122807 | false | 0.017544 | 0.140351 | 0 | 0.385965 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b380e0ffaac00c93adb248541f24f62ceacc3dd | 7,392 | py | Python | src/ctc/toolbox/amm_utils/cpmm/cpmm_trade.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 94 | 2022-02-15T19:34:49.000Z | 2022-03-26T19:26:22.000Z | src/ctc/toolbox/amm_utils/cpmm/cpmm_trade.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-03-03T02:58:47.000Z | 2022-03-11T18:41:05.000Z | src/ctc/toolbox/amm_utils/cpmm/cpmm_trade.py | fei-protocol/checkthechain | ec838f3d0d44af228f45394d9ba8d8eb7f677520 | [
"MIT"
] | 7 | 2022-02-15T17:53:07.000Z | 2022-03-17T19:14:17.000Z | from __future__ import annotations
import decimal
from ctc.toolbox import validate_utils
from . import cpmm_spec
def trade(
x_reserves: int | float,
y_reserves: int | float,
x_sold: int | float | None = None,
x_bought: int | float | None = None,
y_sold: int | float | None = None,
y_bought: int | float | None = None,
new_x_reserves: int | float | None = None,
new_y_reserves: int | float | None = None,
fee_rate: int | float | None = None,
) -> cpmm_spec.Trade:
"""perform trade with AMM
## Input Requirements
- all input values must be positive
- must always specify both x_reserves and y_reserves
- must specify exactly one of:
- x_sold
- x_bought
- y_sold
- y_bought
- new_x_reserves
- new_y_reserves
- values in this list can be scalars or numpy arrays
"""
# validate inputs
if fee_rate is None:
fee_rate = 0.003
value = validate_utils._ensure_exactly_one(
x_sold, x_bought, y_sold, y_bought, new_x_reserves, new_y_reserves
)
validate_utils._ensure_non_negative(value)
kwargs = {
'x_reserves': x_reserves,
'y_reserves': y_reserves,
'fee_rate': fee_rate,
}
reverse_kwargs = {
'y_reserves': x_reserves,
'x_reserves': y_reserves,
'fee_rate': fee_rate,
}
if x_sold is not None:
# case: sell x for y, x specified
x_bought = -x_sold
y_bought = compute_y_bought_when_x_sold(x_sold=x_sold, **kwargs)
y_sold = -y_bought
elif y_sold is not None:
# case: sell y for x, y specified
y_bought = -y_sold
x_bought = compute_y_bought_when_x_sold(x_sold=y_sold, **reverse_kwargs)
x_sold = -x_bought
elif x_bought is not None:
# case: sell y for x, x specified
x_sold = -x_bought
y_sold = compute_x_sold_when_y_bought(
y_bought=x_bought, **reverse_kwargs
)
y_bought = -y_sold
elif y_bought is not None:
# case: sell y for x, x specified
y_sold = -y_bought
x_sold = compute_x_sold_when_y_bought(y_bought=y_bought, **kwargs)
x_bought = -x_sold
else:
raise Exception('could not compute output')
return {
'x_bought': x_bought,
'x_sold': x_sold,
'y_bought': y_bought,
'y_sold': y_sold,
'fee_rate': fee_rate,
'new_pool': {
'x_reserves': x_reserves + x_sold,
'y_reserves': y_reserves + y_sold,
},
}
def trade_to_target_reserves(
x_reserves: int | float,
y_reserves: int | float,
new_x_reserves: int | float | None = None,
new_y_reserves: int | float | None = None,
fee_rate: float | None = None,
) -> cpmm_spec.Trade:
"""compute trade required to reach specific target token reserve amounts"""
# convert reserve targets to bought or sold amounts
if new_x_reserves is not None:
if validate_utils._ensure_positive(
x_reserves - new_x_reserves, error=False
):
x_bought = x_reserves - new_x_reserves
return trade(
x_bought=x_bought,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
else:
x_sold = new_x_reserves - x_reserves
return trade(
x_sold=x_sold,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
elif new_y_reserves is not None:
if validate_utils._ensure_positive(
y_reserves - new_y_reserves, error=False
):
y_bought = y_reserves - new_y_reserves
return trade(
y_bought=y_bought,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
else:
y_sold = new_y_reserves - y_reserves
return trade(
y_sold=y_sold,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
else:
raise Exception('specify either new_x_reserves or new_y_reserves')
def trade_to_price(
x_reserves: int | float,
y_reserves: int | float,
new_x_per_y: int | float | None = None,
new_y_per_x: int | float | None = None,
fee_rate: float | None = None,
) -> cpmm_spec.Trade:
"""compute trade required to reach specific price"""
validate_utils._ensure_exactly_one(new_x_per_y, new_y_per_x)
# convert prices to x per y
if new_x_per_y is None:
if new_y_per_x is None:
raise Exception('must specify x_per_y or y_per_x')
new_x_per_y = new_y_per_x ** -1
# compute trades
if new_x_per_y >= x_reserves / y_reserves:
# case: sell x to increase x per y
x_sold = compute_x_sold_to_reach_price(
new_x_per_y=new_x_per_y,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
return trade(
x_sold=x_sold,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
else:
# case: sell y to decrease x per y
y_sold = compute_x_sold_to_reach_price(
new_x_per_y=(new_x_per_y ** -1),
x_reserves=y_reserves,
y_reserves=x_reserves,
fee_rate=fee_rate,
)
return trade(
y_sold=y_sold,
x_reserves=x_reserves,
y_reserves=y_reserves,
fee_rate=fee_rate,
)
def compute_x_sold_to_reach_price(
x_reserves: int | float,
y_reserves: int | float,
new_x_per_y: int | float,
fee_rate: float | None = None,
) -> float:
"""use quadratic formula to find trade size needed to reach new price
- see wolframalpha.com/input/?i=g+x%5E2+%2B+%281+%2B+g%29+x+%2B+C+%3D+0
"""
if fee_rate is None:
fee_rate = 0.003
gamma = 1 - fee_rate
C = 1 - new_x_per_y * y_reserves / x_reserves
alpha = (gamma + 1) ** 2 - 4 * C * gamma
if isinstance(gamma, decimal.Decimal):
alpha = alpha.sqrt()
else:
alpha = alpha ** 0.5
alpha = alpha - gamma - 1
alpha = alpha / 2 / gamma
x_sold = alpha * x_reserves
return x_sold
def compute_y_bought_when_x_sold(
x_sold: int | float,
x_reserves: int | float,
y_reserves: int | float,
fee_rate: float | None = None,
) -> float:
"""compute amount of y bought when selling x_sold amount of x"""
if fee_rate is None:
fee_rate = 0.003
validate_utils._ensure_non_negative(x_sold)
alpha = x_sold / x_reserves
gamma = 1 - fee_rate
y_bought = alpha * gamma / (1 + alpha * gamma) * y_reserves
return y_bought
def compute_x_sold_when_y_bought(
y_bought: int | float,
x_reserves: int | float,
y_reserves: int | float,
fee_rate: float | None = None,
) -> float:
"""compute amount of x that must be sold to buy y_bought amount of y"""
if fee_rate is None:
fee_rate = 0.003
validate_utils._ensure_non_negative(y_bought)
beta = y_bought / y_reserves
gamma = 1 - fee_rate
x_sold = beta / (1 - beta) / gamma * x_reserves
return x_sold
| 28.875 | 80 | 0.597673 | 1,055 | 7,392 | 3.835071 | 0.117536 | 0.102323 | 0.092437 | 0.0435 | 0.635195 | 0.522986 | 0.486159 | 0.47306 | 0.430796 | 0.361592 | 0 | 0.008578 | 0.321834 | 7,392 | 255 | 81 | 28.988235 | 0.798524 | 0.135281 | 0 | 0.484375 | 0 | 0 | 0.035277 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.020833 | 0 | 0.104167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b39d14aa460ee7aad9a34f8b5f86ea2f7ba1e12 | 5,144 | py | Python | main_simV4.py | iexarchos/motion_imitation | ea9004f77405c8eb1e8a53650dffa723f86018d9 | [
"Apache-2.0"
] | null | null | null | main_simV4.py | iexarchos/motion_imitation | ea9004f77405c8eb1e8a53650dffa723f86018d9 | [
"Apache-2.0"
] | null | null | null | main_simV4.py | iexarchos/motion_imitation | ea9004f77405c8eb1e8a53650dffa723f86018d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 6 14:09:05 2020
@author: yannis
"""
import torch
import random
from pdb import set_trace as bp
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.utils import get_vec_normalize
import motion_imitation
import time
import numpy as np
def testPolicy(path,scales=None,pol_scales=None):
processes = 1
render = True
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
env = make_vec_envs(
'A1GymEnv-v1',
seed,
processes,
None,
None,
device='cpu',
allow_early_resets=True, render=render)
env_core = env.venv.venv.envs[0].env.env
actor_critic, ob_rms = torch.load(path,map_location=torch.device('cpu'))
vec_norm = get_vec_normalize(env)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
recurrent_hidden_states = torch.zeros(1,actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, processes)
#env_core = env.venv.venv.envs[0]
if processes==1:
N_sim = 100
Reward = np.zeros((N_sim,))
input('press enter')
n=0
R=0
obs=env.reset()
while n<N_sim:
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales)
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(obs,recurrent_hidden_states,masks, deterministic = True )
obs, reward, done, _ = env.step(action[0])
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales)
#env_core.cam_track_torso_link()
R+=reward
#control_steps +=1
time.sleep(5*1.0/240.0)
if done:
n+=1
Reward[n]=R
print('Reward: ',R)
R=0
#obs=env.reset()
#obs[:,-4:] = torch.FloatTensor(pol_scales)
#input('press enter')
masks.fill_(0.0 if done else 1.0)
#print('Scale: ', Scale[j,:], ', total reward:' , Reward)
input('press enter')
else:
N_sim = processes
TotalReward = np.zeros((processes,))
obs=env.reset()
#bp()
n = 0
while n<N_sim:
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales) # replace scale in the input of the policy
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=True)
obs, reward, done, _ = env.step(action)
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales) # replace scale in the input of the policy
TotalReward += reward.numpy().flatten()
for D in done:
if D:
#print(done)
n+=1
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
print('TotalReward: ', TotalReward, flush=True)
AverageTotalReward = np.mean(TotalReward)
Std = np.std(TotalReward)
#print(TotalReward)
print('Av. Total reward: ',AverageTotalReward, ', std: ',Std,', virtual scale: ', obs[0,-4:], flush=True)
#bp()
N_sim = processes
TotalReward = np.zeros((processes,))
obs=env.reset()
#bp()
n = 0
while n<N_sim:
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales) # replace scale in the input of the policy
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=True)
obs, reward, done, _ = env.step(action)
if pol_scales is not None:
obs[:,-4:] = torch.FloatTensor(pol_scales) # replace scale in the input of the policy
TotalReward += reward.numpy().flatten()
for D in done:
if D:
#print(done)
n+=1
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
print('TotalReward: ', TotalReward, flush=True)
AverageTotalReward = np.mean(TotalReward)
Std = np.std(TotalReward)
#print(TotalReward)
print('Av. Total reward: ',AverageTotalReward, ', std: ',Std,', virtual scale: ', obs[0,-4:], flush=True)
env.close()
#bp()
if __name__ == '__main__':
scales = None
pol_scales = None
#path = '/home/yannis/Repositories/motion_imitation/12_03_nominal_policy/ppo/A1GymEnv-v1.pt'
#path = '/home/yannis/Repositories/motion_imitation/12_11_nominal_policy/ppo/A1GymEnv-v1.pt'
path = '/home/yannis/Repositories/motion_imitation/12_18_nominal_policy/ppo/A1GymEnv-v1.pt'
testPolicy(path,scales,pol_scales) | 32.974359 | 137 | 0.565708 | 639 | 5,144 | 4.380282 | 0.226917 | 0.051447 | 0.022508 | 0.050018 | 0.674884 | 0.64916 | 0.628796 | 0.591997 | 0.591997 | 0.591997 | 0 | 0.024307 | 0.320179 | 5,144 | 156 | 138 | 32.974359 | 0.776094 | 0.141719 | 0 | 0.564815 | 0 | 0 | 0.056316 | 0.018696 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009259 | false | 0 | 0.074074 | 0 | 0.083333 | 0.046296 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b40b53be905051fc29376c809a528f0f56e00ed | 3,747 | py | Python | distribution/src/assembly/test/test.py | aliyun/alibabacloud-maxcompute-tool-migrate | 22ba9d36c0fe9b79b3d91766a22ec43372b6c540 | [
"Apache-2.0"
] | 19 | 2019-12-17T10:00:59.000Z | 2022-03-20T03:20:42.000Z | distribution/src/assembly/test/test.py | aliyun/alibabacloud-maxcompute-tool-migrate | 22ba9d36c0fe9b79b3d91766a22ec43372b6c540 | [
"Apache-2.0"
] | 73 | 2020-08-13T10:40:16.000Z | 2022-03-21T06:57:36.000Z | distribution/src/assembly/test/test.py | aliyun/alibabacloud-maxcompute-tool-migrate | 22ba9d36c0fe9b79b3d91766a22ec43372b6c540 | [
"Apache-2.0"
] | 6 | 2020-08-13T10:42:21.000Z | 2022-01-13T04:04:24.000Z | #
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import unittest
import mma_test.utils as utils
import shutil
import time
from typing import Dict
from mma_test.test_hive import TestHive
def get_test_suites_map() -> Dict[str, unittest.TestSuite]:
test_suites = {}
test_suites[TestHive.__name__] = (
unittest.defaultTestLoader.loadTestsFromTestCase(TestHive))
return test_suites
if __name__ == '__main__':
suites = get_test_suites_map()
parser = argparse.ArgumentParser(description='MMA FT runner')
parser.add_argument(
"--list_test_suites",
required=False,
const=True,
action="store_const",
default=False,
help="list available test suites")
parser.add_argument(
"--list_test_cases",
required=False,
type=str,
help="list test cases of specified test suite")
parser.add_argument(
"--run_test_suite",
required=False,
help="run specified test suite")
parser.add_argument(
"--run_test_case",
required=False,
help="run specified test case, should be in format suite.case")
parser.add_argument(
"--fail_fast",
required=False,
const=True,
action="store_const",
default=False,
help="fail fast")
args = parser.parse_args()
if args.list_test_suites:
for suite in suites.keys():
print(suite)
exit(0)
if args.list_test_cases is not None:
suite_name = args.list_test_cases
if suite_name in suites:
suite = suites[suite_name]
for test in suite._tests:
print(test.id().split(".")[-1])
exit(0)
else:
raise Exception("Test suite not found: %s" % suite_name)
if args.run_test_suite is not None and args.run_test_case is not None:
err_msg = ("--run_test_suite and "
"--run_test_case cannot present at the same time")
raise Exception(err_msg)
os.makedirs(utils.get_test_temp_dir(), exist_ok=True)
print("Start MMA server")
mma_server_sp = utils.start_mma_server()
print("MMA server pid: %s" % str(mma_server_sp.pid))
time.sleep(10)
try:
s = unittest.TestSuite()
if args.run_test_suite is not None:
if args.run_test_suite in suites:
s.addTest(suites[args.run_test_suite])
else:
raise Exception("Invalid test suite")
elif args.run_test_case is not None:
splits = args.run_test_case.split(".")
if len(splits) != 2:
raise Exception("Invalid testcase: %s" % args.run_test_case)
for test in suites[splits[0]]._tests:
if splits[1] == test.id().split(".")[-1]:
s.addTest(test)
else:
s.addTests(suites.values())
runner = unittest.TextTestRunner(
verbosity=3, failfast=args.fail_fast, buffer=True)
runner.run(s)
finally:
print("Stop MMA server")
utils.stop_mma_server(mma_server_sp)
shutil.rmtree(utils.get_test_temp_dir())
| 30.966942 | 76 | 0.631705 | 490 | 3,747 | 4.640816 | 0.340816 | 0.036939 | 0.038698 | 0.028144 | 0.216799 | 0.152595 | 0.129288 | 0.108179 | 0.047493 | 0.047493 | 0 | 0.008065 | 0.271951 | 3,747 | 120 | 77 | 31.225 | 0.825513 | 0.152655 | 0 | 0.235955 | 0 | 0 | 0.144124 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011236 | false | 0 | 0.089888 | 0 | 0.11236 | 0.05618 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b429026656499e942a38341d6e198b9bfc94595 | 1,740 | py | Python | src/muses/search_index/documents/helpers.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | null | null | null | src/muses/search_index/documents/helpers.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | null | null | null | src/muses/search_index/documents/helpers.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | 3 | 2018-10-01T12:04:36.000Z | 2021-01-07T09:30:50.000Z | import csv
import logging
__all__ = (
'read_synonyms',
)
LOGGER = logging.getLogger(__name__)
def read_synonyms(path):
"""Read synonyms.
Read synonyms from the following format:
word_id;preferred_EN;variant1;variant2;variant3;variant4;variant5
1;Anatolia;anatolia;anatolie;anatolien;;
2;Assyria;assyria;assyrie;assyrien;;
3;Babylonia;babylonia;babylonie;babylonien;;
4;Byblos;;;;;
5;Crocodilopolis;;;;;
What we do:
- Remove first line (word_id, etc.)
- Remove first (numbered) elements from each line
- Remove empty elements (that are produced when reading the CSV)
:param path:
:return:
"""
data = []
try:
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
counter = 0 # Counter so that we skip the first line
for row in csv_reader:
# Skip the first line
if counter == 0:
counter += 1
continue
# Remove the first (numbered) element
row.pop(0)
# Remove empty elements
row = [__i.lower() for __i in row if __i]
if len(row) > 1:
# Append remaining (usable) elements separated by comma
# to the returned list.
data.append(
', '.join(row)
)
counter += 1
except OSError as err:
LOGGER.error("Can't read from file {}.".format(path))
LOGGER.error(err.message)
LOGGER.debug("Produced synonyms file for {}:".format(path))
LOGGER.debug(data)
return data
| 28.064516 | 75 | 0.543678 | 191 | 1,740 | 4.832461 | 0.513089 | 0.052004 | 0.04117 | 0.03467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01444 | 0.363218 | 1,740 | 61 | 76 | 28.52459 | 0.818592 | 0.402299 | 0 | 0.068966 | 0 | 0 | 0.072016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.068966 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b46710ce31a8de493b043c80a7fb418b77deda4 | 5,503 | py | Python | GxbManager.py | moonmagian/GxbManager | fb6c31ce6b53f049ca1b40129e57ab04189d1a28 | [
"MIT"
] | 3 | 2018-08-31T07:33:12.000Z | 2019-06-10T14:21:38.000Z | GxbManager.py | moonmagian/GxbManager | fb6c31ce6b53f049ca1b40129e57ab04189d1a28 | [
"MIT"
] | null | null | null | GxbManager.py | moonmagian/GxbManager | fb6c31ce6b53f049ca1b40129e57ab04189d1a28 | [
"MIT"
] | 2 | 2018-08-20T14:45:11.000Z | 2018-08-24T09:12:47.000Z | from selenium import webdriver
import selenium
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import re
STATUS_OUTPUT = \
'''Video: {0}
Status: {1}
Time(sec): {2} / {3}'''
CLASS_REGEX = r'''https://bh3773.class.gaoxiaobang.com/class/(\d+)/unit/(\d+)/chapter/(\d+)'''
CLASS_STRING = '''https://bh3773.class.gaoxiaobang.com/class/{0}/unit/{1}/chapter/{2}'''
# Get VideoListIDs needs LOTS OF resources, cache them to lower CPU usage.
VLIDcache = {}
class Status:
title = "TITLE"
playStatus = "PLAYSTATUS"
ctime = -1
duration = -1
error = False
def __repr__(self):
if(not self.error):
return STATUS_OUTPUT.format(self.title, self.playStatus, str(self.ctime), str(self.duration))
else:
return "Not valid video page."
def videoList(driver: webdriver.chrome.webdriver.WebDriver):
try:
return list(filter(lambda x: x.get_attribute(
'content_type') == 'Video', driver.find_elements_by_class_name("chapter-info")))
except:
return []
def autoLogin(driver: webdriver.chrome.webdriver.WebDriver, loginLink: str, username: str, passwd: str):
try:
driver.get(loginLink)
driver.find_element_by_id('username').send_keys(username)
driver.find_element_by_id('password').send_keys(passwd)
driver.find_element_by_class_name('login_btn').click()
return True
except selenium.common.exceptions.NoSuchElementException:
return False
def status(driver: webdriver.chrome.webdriver.WebDriver):
'''
Get current status of video page.
:param driver: WebDriver, the WebDriver to get status
:returns: Status, a Status object storing status information
'''
output = Status()
try:
videoPlayer = driver.find_element_by_id('video_player_html5_api')
output.title = driver.find_element_by_class_name('chapter-title').text
videoShell = driver.find_element_by_id('video_player')
vsClass = videoShell.get_attribute('class')
if(vsClass.find('vjs-paused') + 1):
output.playStatus = 'paused'
else:
output.playStatus = 'playing'
output.duration = videoPlayer.get_property('duration')
output.ctime = videoPlayer.get_property('currentTime')
except Exception:
output.error = True
finally:
return output
def triggerPlay(driver):
'''
Trigger current play status.
:param driver: WebDriver, the WebDriver to trigger
:returns: Bool, if the trigger is successful
'''
try:
videoPlayer = driver.find_element_by_class_name('video-js')
videoPlayer.click()
return True
except Exception:
return False
def needAnswer(driver: selenium.webdriver.chrome.webdriver.WebDriver):
'''
Check if a question is shown.
:param driver: WebDriver, the WebDriver to check
:returns: Bool, if a question is shown.
'''
f = driver.find_elements_by_class_name('correctAnswer')
if(f):
return True
else:
return False
def answer(driver: selenium.webdriver.chrome.webdriver.WebDriver):
'''
Answer in-video questions.
:param driver: WebDriver, the WebDriver to answer
:returns: Bool, if answer is successful
'''
try:
answers = driver.find_element_by_class_name(
'correctAnswer').get_attribute('data')
correctArray = [ord(i) - ord('A') for i in answers]
chooseName = 'gxb-icon-check'
try:
driver.find_element_by_class_name('gxb-icon-radio')
chooseName = 'gxb-icon-radio'
except selenium.common.exceptions.NoSuchElementException:
pass
for answer in correctArray:
driver.find_elements_by_class_name(chooseName)[
answer].click()
driver.find_element_by_class_name('submit').click()
play = WebDriverWait(driver, 2).until(
EC.presence_of_element_located((By.CLASS_NAME, 'player')))
play.click()
return True
except:
return False
def nextVideo(driver: webdriver.chrome.webdriver.WebDriver):
match = re.match(CLASS_REGEX, driver.current_url)
if(not match):
return False
videoIds = list(map(lambda x: x.get_attribute(
'chapter_id'), videoList(driver)))
try:
# When the page is not video, append it to video list to get the nearest video.
if(match.groups()[2] not in videoIds):
videoIds.append(match.groups()[2])
videoIds.sort()
index = videoIds.index(match.groups()[2])
if(index != len(videoIds) - 1):
url = CLASS_STRING.format(
*match.groups()[:-1], videoIds[index + 1])
driver.get(url)
return True
else:
return False
# TODO: When the class ends. Raise a custom error and start a new class.
except:
return False
def inVideoPage(driver: webdriver.chrome.webdriver.WebDriver):
match = re.match(CLASS_REGEX, driver.current_url)
if(not match):
return False
if(match.groups()[0] not in VLIDcache.keys()):
VLIDcache[match.groups()[0]] = list(map(lambda x: x.get_attribute(
'chapter_id'), videoList(driver)))
return(match.groups()[2] in VLIDcache[match.groups()[0]])
| 32.952096 | 105 | 0.652008 | 662 | 5,503 | 5.294562 | 0.256798 | 0.03709 | 0.031384 | 0.054208 | 0.374893 | 0.269615 | 0.103281 | 0.085021 | 0.085021 | 0.085021 | 0 | 0.007136 | 0.236053 | 5,503 | 166 | 106 | 33.150602 | 0.826594 | 0.133745 | 0 | 0.318966 | 0 | 0.017241 | 0.094527 | 0.004759 | 0 | 0 | 0 | 0.006024 | 0 | 1 | 0.077586 | false | 0.025862 | 0.060345 | 0 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b46ed8634fc704f45f15531d6f71a175564ad9b | 16,090 | py | Python | statey/fsm.py | cfeenstra67/statey | 6d127ed48265e2e072fbb26486458a4b28a333ec | [
"MIT"
] | 4 | 2021-02-16T19:34:38.000Z | 2022-01-31T16:44:14.000Z | statey/fsm.py | cfeenstra67/statey | 6d127ed48265e2e072fbb26486458a4b28a333ec | [
"MIT"
] | null | null | null | statey/fsm.py | cfeenstra67/statey | 6d127ed48265e2e072fbb26486458a4b28a333ec | [
"MIT"
] | null | null | null | import abc
import dataclasses as dc
import enum
import types as pytypes
from collections import Counter
from functools import wraps, partial
from typing import Sequence, Callable, Type as PyType, Dict, Any, Optional
import networkx as nx
import statey as st
from statey import resource, task, exc
from statey.provider import Provider
from statey.syms import utils, types, Object, diff
class Transition(abc.ABC):
"""
A transition defines the procedure from migration a machine
from one state to another (they may also be the same state)
"""
from_name: str
to_name: str
name: str
@abc.abstractmethod
async def plan(
self,
current: resource.BoundState,
config: resource.BoundState,
session: task.TaskSession,
) -> Object:
"""
Same as Resource.plan(), except for planning
a specific transition.
"""
raise NotImplementedError
@dc.dataclass(frozen=True)
class FunctionTransition(Transition):
"""
Transition class that simply wraps a function
"""
from_name: str
to_name: str
name: str
func: Callable[[Any], Any]
async def plan(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
return await self.func(current=current, config=config, session=session)
def transition(from_name: str, to_name: str, name: str = utils.MISSING) -> Any:
"""
Generate a decorate to wrap a function as a transition
"""
def dec(func):
nonlocal name
if name is utils.MISSING:
name = getattr(func, "__name__", "<unknown>")
@wraps(func)
def get_transition(*args, **kwargs):
new_func = lambda *args2, **kwargs2: func(
*args, *args2, **kwargs, **kwargs2
)
return FunctionTransition(from_name, to_name, name, new_func)
get_transition.transition_factory = True
return get_transition
return dec
class MachineMeta(type(resource.Resource)):
"""
Special behavior for state machines
"""
@classmethod
def _validate_states(
cls, old_states: Sequence[resource.State], new_states: Sequence[resource.State]
) -> Sequence[resource.State]:
new_names = Counter(state.name for state in new_states)
if new_names and max(new_names.values()) > 1:
multi = {k: v for k, v in new_names.items() if v > 1}
raise ValueError(f"Duplicate states found: {multi}")
old_states = [state for state in old_states if state.name not in new_names]
return old_states + list(new_states)
def __new__(
cls, name: str, bases: Sequence[PyType], attrs: Dict[str, Any]
) -> PyType:
super_cls = super().__new__(cls, name, bases, attrs)
states = super_cls.__states__ if hasattr(super_cls, "__states__") else ()
new_states = [val for val in attrs.values() if isinstance(val, resource.State)]
states = cls._validate_states(states, new_states)
super_cls.__states__ = tuple(states)
transitions = (
super_cls.__transitions__
if hasattr(super_cls, "__transitions__")
else set()
)
new_transitions = {
name
for name, val in attrs.items()
if hasattr(val, "transition_factory") and val.transition_factory
}
super_cls.__transitions__ = transitions | new_transitions
return super_cls
class Machine(resource.Resource, metaclass=MachineMeta):
"""
Class with a metaclass to automatically collect states and transitions into class variables.
"""
def __init__(self, name: str, provider: Optional[Provider] = None) -> None:
if provider is None:
from statey.provider import default_provider as provider
self.name = name
self.provider = provider
# This is temporary, should clean this up
for state in self.__states__:
self.set_resource_state(resource.ResourceState(state, name, provider.id))
def set_resource_state(self, state: resource.ResourceState) -> None:
setattr(self, state.state.name, state)
@property
def null_state(self) -> resource.ResourceState:
state = next((s for s in self.__states__ if s.null))
return resource.ResourceState(state, self.name, self.provider.id)
async def plan(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
from_name = current.state.name
to_name = config.state.name
transitions = (getattr(self, tran)() for tran in self.__transitions__)
transition = next(
(
tran
for tran in transitions
if tran.from_name == from_name
if tran.to_name == to_name
),
None,
)
if transition is None:
raise exc.PlanError(
f"Unable to find transition from {from_name} to {to_name}."
)
return await transition.plan(current, config, session)
def __call__(self, *args, **kwargs) -> resource.ResourceState:
states = [state for state in self.__states__ if state != self.null_state.state]
if len(states) > 1:
raise TypeError(f'"{self.name}" has more than one non-null state.')
if len(states) < 1:
raise TypeError(f'"{self.name}" does not have any non-null states.')
return resource.ResourceState(states[0], self.name, self.provider.id)(
*args, **kwargs
)
@abc.abstractmethod
async def refresh(self, current: resource.BoundState) -> resource.BoundState:
"""
Same as Resource.refresh()
"""
raise NotImplementedError
async def finalize(self, current: resource.BoundState) -> resource.BoundState:
return current
class ModificationAction(enum.Enum):
"""
Actions to control simple machine behavior
"""
NONE = "none"
MODIFY = "modify"
DELETE_AND_RECREATE = "delete_and_recreate"
class SingleStateMachine(Machine):
"""
A simple machine is an FSM which can only have two states: UP and DOWN.
Note that a SimpleMachine's UP state should have all of the same fields available
in its output type as its input type.
"""
UP: resource.State
DOWN: resource.NullState = resource.NullState("DOWN")
@abc.abstractmethod
async def create(
self, session: task.TaskSession, config: resource.StateConfig
) -> "Object":
"""
Create this resource with the given configuration
"""
raise NotImplementedError
@abc.abstractmethod
async def delete(
self, session: task.TaskSession, current: resource.StateSnapshot
) -> "Object":
"""
Delete the resource with the given data
"""
raise NotImplementedError
@abc.abstractmethod
async def modify(
self,
session: task.TaskSession,
current: resource.StateSnapshot,
config: resource.StateConfig,
) -> "Object":
"""
Modify the resource from `data` to the given config. Default implementation
is always to delete and recreate the resource.
NOTE: if subclasses do not modify the get_action() implementation they can
override this with a stub method, as it will never be called. It is defined
as an abstract to avoid the case where it is omitted accidentally and
NotImplementedError is raised during the task execution
"""
raise NotImplementedError
# Overridding this as an "optional" abstract method
modify = NotImplemented
@abc.abstractmethod
async def refresh_state(self, data: Any) -> Optional[Any]:
"""
Get a refreshed version of `data` (which is in the state UP). Return None
to indicate the resource no longer exists.
"""
raise NotImplementedError
@abc.abstractmethod
async def get_action(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> ModificationAction:
"""
From the current, and config values, determine which modification action should be taken.
"""
raise NotImplementedError
async def refresh_config(self, config: "Object") -> "Object":
"""
Transform a configuration before planning
"""
return config
async def refresh(self, current: resource.StateSnapshot) -> resource.StateSnapshot:
if current.state.name == self.null_state.name:
return current
info = await self.refresh_state(current.data)
if info is None:
return resource.StateSnapshot({}, self.null_state)
return resource.StateSnapshot(info, current.state)
@transition("UP", "UP")
async def modify_resource(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
config = config.clone(obj=await self.refresh_config(config.obj))
action = await self.get_action(current, config, session)
if action == ModificationAction.NONE:
return current.obj
if action == ModificationAction.MODIFY:
if self.modify is NotImplemented:
raise NotImplementedError(
f"`modify` has not been defined in {type(self).__name__}."
)
return await self.modify(session, current, config)
if action == ModificationAction.DELETE_AND_RECREATE:
raise exc.NullRequired
raise exc.InvalidModificationAction(action)
@transition("DOWN", "UP")
async def create_resource(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
config = config.clone(obj=await self.refresh_config(config.obj))
return await self.create(session, config)
@transition("UP", "DOWN")
async def delete_resource(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
return await self.delete(session, current)
@transition("DOWN", "DOWN")
async def noop_down(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Object:
return current.obj
class SimpleMachine(SingleStateMachine):
"""
A simple machine has only a single state and each transition only consists
of a single task
"""
async def get_expected(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> Any:
"""
Get the expected output for the given configuration. Default implementation
is just passing through config fields and setting the rest as unknown
"""
output = st.Unknown[config.state.output_type]
if not current.state.null:
output = current.obj
return st.fill(config.obj, config.state.output_type, output)
# Not defined as abstract methods because subclasses may want to just override
# the top-level methods instead
async def create_task(self, config: Any) -> Any:
"""
Defines a single task called "create" that will create this resource
"""
raise NotImplementedError
async def delete_task(self, current: Any) -> Any:
"""
Defines a single task called "delete" that will delete this resource
"""
raise NotImplementedError
async def modify_task(self, diff: diff.Diff, current: Any, config: Any) -> Any:
"""
Defines a single task called "modify" that will modify this resource
"""
raise NotImplementedError
def _get_optional_method(self, name: str) -> Callable[[Any], Any]:
if getattr(type(self), name) is getattr(SimpleMachine, name):
raise NotImplementedError(f"{name} has not been defined in this class.")
return getattr(self, name)
def get_action_from_diff(self, diff: diff.Diff) -> ModificationAction:
"""
With the given diff, determine which action must be taken to get to the configured
state. This is only called when both the current and configured state are UP.
Overriding this method is optional, by default it will always delete and recreate
the resource.
"""
if not diff:
return ModificationAction.NONE
return ModificationAction.DELETE_AND_RECREATE
def get_diff(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> diff.Diff:
"""
Produce a diff given the current, config and session data
"""
differ = session.ns.registry.get_differ(config.state.input_type)
current_as_config = st.filter_struct(current.obj, config.type)
return differ.diff(current_as_config, config.obj, session)
async def get_action(
self,
current: resource.StateSnapshot,
config: resource.StateConfig,
session: task.TaskSession,
) -> ModificationAction:
"""
Split get_action into get_diff and get_action_from_diff
"""
diff = self.get_diff(current, config, session)
return self.get_action_from_diff(diff)
async def create(
self, session: task.TaskSession, config: resource.StateConfig
) -> "Object":
current = resource.StateSnapshot({}, self.null_state.state)
expected = await self.get_expected(current, config, session)
create_task = self._get_optional_method("create_task")
return session["create"] << (task.new(create_task)(config.obj) >> expected)
async def delete(
self, session: task.TaskSession, current: resource.StateSnapshot
) -> "Object":
delete_task = self._get_optional_method("delete_task")
ref = session["delete"] << task.new(delete_task)(current.obj)
return st.join(st.Object({}, st.EmptyType, session.ns.registry), ref)
async def modify(
self,
session: task.TaskSession,
current: resource.StateSnapshot,
config: resource.StateConfig,
) -> "Object":
expected = await self.get_expected(current, config, session)
modify_task = self._get_optional_method("modify_task")
diff = self.get_diff(current, config, session)
partial_modify = partial(modify_task, diff)
return session["modify"] << (
task.new(partial_modify)(current.obj, config.obj) >> expected
)
# class MachineResource(resource.Resource):
# """
# Simple wrapper resource, for state machines all logic is really in the States
# implementation
# Example:
# rs = MachineResource(MyMachine('new_resource'))
# """
# # This will be set in the constructor
# States = None
# def __init__(
# self, name: str, machine_cls: PyType[Machine], provider: Provider
# ) -> None:
# self.States = self.machine_cls = machine_cls
# self.name = name
# self.provider = provider
# super().__init__()
# async def plan(
# self,
# current: resource.StateSnapshot,
# config: resource.StateConfig,
# session: task.TaskSession,
# ) -> Object:
# return await self.s.plan(current, config, session)
# async def refresh(self, current: resource.StateSnapshot) -> resource.StateSnapshot:
# return await self.s.refresh(current)
# async def finalize(self, current: resource.StateSnapshot) -> resource.StateSnapshot:
# return await self.s.finalize(current)
| 32.374245 | 97 | 0.636482 | 1,810 | 16,090 | 5.541436 | 0.156354 | 0.021535 | 0.053041 | 0.044666 | 0.325723 | 0.278265 | 0.233699 | 0.223729 | 0.198903 | 0.190628 | 0 | 0.000771 | 0.274332 | 16,090 | 496 | 98 | 32.439516 | 0.858256 | 0.137415 | 0 | 0.386986 | 0 | 0 | 0.041133 | 0.001828 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041096 | false | 0 | 0.044521 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b4761fe2b3dfb5179be295baf3be2ef36b02d3e | 2,555 | py | Python | aicup-python/model/unit.py | arijitgupta42/RAIC-2019 | e17828a4a6ac7990fe340b56276378be2297397f | [
"MIT"
] | null | null | null | aicup-python/model/unit.py | arijitgupta42/RAIC-2019 | e17828a4a6ac7990fe340b56276378be2297397f | [
"MIT"
] | null | null | null | aicup-python/model/unit.py | arijitgupta42/RAIC-2019 | e17828a4a6ac7990fe340b56276378be2297397f | [
"MIT"
] | null | null | null | from .vec2_double import Vec2Double
from .vec2_double import Vec2Double
from .jump_state import JumpState
from .weapon import Weapon
class Unit:
def __init__(self, player_id, id, health, position, size, jump_state, walked_right, stand, on_ground, on_ladder, mines, weapon):
self.player_id = player_id
self.id = id
self.health = health
self.position = position
self.size = size
self.jump_state = jump_state
self.walked_right = walked_right
self.stand = stand
self.on_ground = on_ground
self.on_ladder = on_ladder
self.mines = mines
self.weapon = weapon
@staticmethod
def read_from(stream):
player_id = stream.read_int()
id = stream.read_int()
health = stream.read_int()
position = Vec2Double.read_from(stream)
size = Vec2Double.read_from(stream)
jump_state = JumpState.read_from(stream)
walked_right = stream.read_bool()
stand = stream.read_bool()
on_ground = stream.read_bool()
on_ladder = stream.read_bool()
mines = stream.read_int()
if stream.read_bool():
weapon = Weapon.read_from(stream)
else:
weapon = None
return Unit(player_id, id, health, position, size, jump_state, walked_right, stand, on_ground, on_ladder, mines, weapon)
def write_to(self, stream):
stream.write_int(self.player_id)
stream.write_int(self.id)
stream.write_int(self.health)
self.position.write_to(stream)
self.size.write_to(stream)
self.jump_state.write_to(stream)
stream.write_bool(self.walked_right)
stream.write_bool(self.stand)
stream.write_bool(self.on_ground)
stream.write_bool(self.on_ladder)
stream.write_int(self.mines)
if self.weapon is None:
stream.write_bool(False)
else:
stream.write_bool(True)
self.weapon.write_to(stream)
def __repr__(self):
return "Unit(" + \
repr(self.player_id) + "," + \
repr(self.id) + "," + \
repr(self.health) + "," + \
repr(self.position) + "," + \
repr(self.size) + "," + \
repr(self.jump_state) + "," + \
repr(self.walked_right) + "," + \
repr(self.stand) + "," + \
repr(self.on_ground) + "," + \
repr(self.on_ladder) + "," + \
repr(self.mines) + "," + \
repr(self.weapon) + \
")"
| 37.028986 | 132 | 0.585127 | 305 | 2,555 | 4.655738 | 0.134426 | 0.073239 | 0.06338 | 0.050704 | 0.215493 | 0.157746 | 0.112676 | 0.112676 | 0.112676 | 0.112676 | 0 | 0.003359 | 0.300978 | 2,555 | 68 | 133 | 37.573529 | 0.791713 | 0 | 0 | 0.058824 | 0 | 0 | 0.006654 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0.014706 | 0.161765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b4832ce003abf03eb474b13d67edabb8d78412f | 305 | py | Python | Python3/Lucky Numbers in a Matrix.py | olma2077/LeetCode | 6a229ae23c5a211bc44de51178ced5bef6a44233 | [
"MIT"
] | 1 | 2020-04-12T09:34:52.000Z | 2020-04-12T09:34:52.000Z | Python3/Lucky Numbers in a Matrix.py | olma2077/LeetCode | 6a229ae23c5a211bc44de51178ced5bef6a44233 | [
"MIT"
] | null | null | null | Python3/Lucky Numbers in a Matrix.py | olma2077/LeetCode | 6a229ae23c5a211bc44de51178ced5bef6a44233 | [
"MIT"
] | null | null | null | class Solution:
def luckyNumbers (self, matrix: List[List[int]]) -> List[int]:
nums = []
for row in matrix:
num = min(row)
i = row.index(num)
if num == max([line[i] for line in matrix]):
nums.append(num)
return nums
| 27.727273 | 66 | 0.478689 | 37 | 305 | 3.945946 | 0.567568 | 0.09589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.4 | 305 | 10 | 67 | 30.5 | 0.797814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b4a40f899a77b427cfbccdfdad28f929fa2fc9b | 10,008 | py | Python | modules/jwtoken/handlers/jwtokenhandler.py | umbros/spid-sp-sapspid | 5546aeb2bc968d26537732af8e7aee52d1896e99 | [
"MIT"
] | 6 | 2017-09-30T11:10:22.000Z | 2022-02-04T19:42:28.000Z | modules/jwtoken/handlers/jwtokenhandler.py | umbros/spid-sp-sapspid | 5546aeb2bc968d26537732af8e7aee52d1896e99 | [
"MIT"
] | 4 | 2019-01-30T13:38:42.000Z | 2021-03-28T14:51:31.000Z | modules/jwtoken/handlers/jwtokenhandler.py | umbros/spid-sp-sapspid | 5546aeb2bc968d26537732af8e7aee52d1896e99 | [
"MIT"
] | 4 | 2017-10-06T14:17:50.000Z | 2021-02-18T08:38:19.000Z | from response import ResponseObj
from response import RequestHandler
from request import RequestObjNew
import tornado.web
import traceback
import tornado.gen
import tornado.ioloop
import tornado.concurrent
import logging
from lib.customException import ApplicationException
import globalsObj
import re
import jwtoken.lib.jwtoken
import asyncio
class jwtokenHandler(RequestHandler):
def __init__(self, *args, **kwds):
super(RequestHandler, self).__init__(*args, **kwds)
self.dbobjJwt = globalsObj.DbConnections['jwtDb']
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
#self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', ' POST, GET, OPTIONS')
# gestione errore generico
def write_error(self, status_code, **kwargs):
# debug info
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
debugTmp = ""
for line in traceback.format_exception(*kwargs["exc_info"]):
debugTmp += line
getResponse = ResponseObj(debugMessage=debugTmp,httpcode=status_code,devMessage=self._reason)
else:
getResponse = ResponseObj(httpcode=status_code,devMessage=self._reason)
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_status(status_code)
# inserisci codice errore personalizzato
getResponse.setError('3')
getResponse.setResult()
self.write(getResponse.jsonWrite())
self.finish()
#get
async def get(self):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_default_headers()
if re.match("/api/jwt/getByType", self.request.path):
#task da eseguire per il get
response_obj = await asyncio.get_event_loop().run_in_executor(None, self.getByType)
#response_obj = await tornado.platform.asyncio.to_tornado_future(fut)
elif re.match("/api/jwt/verify", self.request.path):
#task da eseguire per il get
response_obj = await asyncio.get_event_loop().run_in_executor(None, self.verify)
#response_obj = await tornado.platform.asyncio.to_tornado_future(fut)
self.writeLog(response_obj)
self.writeResponse(response_obj)
#@tornado.gen.coroutine
async def post(self):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_default_headers()
if re.match("/api/jwt/verify", self.request.path):
response_obj = await asyncio.get_event_loop().run_in_executor(None, self.verify)
#response_obj = await tornado.platform.asyncio.to_tornado_future(fut)
self.writeLog(response_obj)
self.writeResponse(response_obj)
def options(self):
# no body
self.set_status(204)
self.finish()
def writeResponse(self, response_obj):
self.set_status(response_obj.error.httpcode)
self.write(response_obj.jsonWrite())
self.finish()
def writeLog(self, response_obj):
x_real_ip = self.request.headers.get("X-Real-IP")
remote_ip = x_real_ip or self.request.remote_ip
#insert log
if str(self.request.body, 'utf-8') == '':
body = None
else:
body = str(self.request.body, 'utf-8')
log_request = self.dbobjJwt.makeQuery("EXECUTE log_request(%s, %s, %s, %s)",
[self.request.method,
self.request.protocol + "://" + self.request.host + self.request.uri,
body,
remote_ip],
type = self.dbobjJwt.stmts['log_request']['pool'], close = True, fetch=False)
log_response = self.dbobjJwt.makeQuery("EXECUTE log_response(%s, %s, %s, %s)",
[response_obj.error.httpcode,
self.request.protocol + "://" + self.request.host + self.request.uri,
response_obj.jsonWrite(),
remote_ip],
type = self.dbobjJwt.stmts['log_response']['pool'], close = True, fetch=False)
return
#@tornado.concurrent.run_on_executor
def getByType(self):
try:
jwtCode = super(self.__class__, self).get_argument('type')
""" This will be executed in `executor` pool. """
#connJwt = jwtoken.lib.database.Database(globalsObj.DbConnections['jwtMasterdsn'])
#newcod_token = connJwt.createTokenByType(jwtCode)
newcod_cod_token = self.dbobjJwt.makeQuery("EXECUTE create_token_by_type(%s)",
[jwtCode],type = self.dbobjJwt.stmts['create_token_by_type']['pool'], close = True)
newcod_token = self.dbobjJwt.makeQuery("EXECUTE get_token_by_cod(%s)",
[newcod_cod_token['result']['cod_token']],type = self.dbobjJwt.stmts['get_token_by_cod']['pool'], close = True)
if newcod_token['error'] == 0 and newcod_token['result'] is not None:
# genera risposta tutto ok
response_obj = ResponseObj(httpcode=200)
response_obj.setError('200')
response_obj.setResult(token = newcod_token['result']['token'])
elif newcod_token['error'] == 0 and newcod_token['result'] is None:
response_obj = ResponseObj(httpcode=404)
response_obj.setError('jwtoken102')
elif newcod_token['error'] > 1:
response_obj = ResponseObj(debugMessage=newcod_token['result'].pgerror, httpcode=500,
devMessage=("PostgreSQL error code: %s" % newcod_token['result'].pgcode))
response_obj.setError('jwtoken105')
except tornado.web.MissingArgumentError as error:
response_obj = ResponseObj(debugMessage=error.log_message, httpcode=error.status_code,
devMessage=error.log_message)
response_obj.setError(str(error.status_code))
logging.getLogger(__name__).error('%s'% error,exc_info=True)
except ApplicationException as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError(inst.code)
#responsejson = response_obj.jsonWrite()
logging.getLogger(__name__).error('Exception',exc_info=True)
except Exception as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError('500')
logging.getLogger(__name__).error('Exception',exc_info=True)
finally:
logging.getLogger(__name__).warning('jwt/getByType handler executed')
return response_obj
def verify(self):
try:
#connJwt = jwtoken.lib.database.Database(globalsObj.DbConnections['jwtSlavedsn'])
if self.request.method == 'GET':
token = super(self.__class__, self).get_argument('token')
elif self.request.method == 'POST':
# leggi il json della richiesta
temp = RequestObjNew(self.request.body)
if temp.error["code"] == 2:
response_obj = ResponseObj(debugMessage=temp.error["message"], httpcode=400)
response_obj.setError('400')
logging.getLogger(__name__).error('Validation error. Json input error')
return response_obj
elif temp.error["code"] > 0:
raise tornado.web.HTTPError(httpcode=503, log_message=temp.error["message"])
token = temp.request['token']
#verifica = connJwt.verifyToken(token)
verifica = self.dbobjJwt.makeQuery("EXECUTE verify_token(%s)",
[token],type = self.dbobjJwt.stmts['verify_token']['pool'], close = True)
if verifica['error'] == 0:
if verifica['result'][0] == None:
response_obj = ResponseObj(httpcode=404)
response_obj.setError('jwtoken101')
elif verifica['result'][0]['error'] == 0:
response_obj = ResponseObj(httpcode=200)
response_obj.setError('200')
response_obj.setResult(jose = verifica['result'][0]['message'])
elif verifica['result'][0]['error'] > 0:
response_obj = ResponseObj(httpcode=401, devMessage=(verifica['result'][0]['message']))
response_obj.setError('jwtoken100')
elif verifica['error'] == 1:
response_obj = ResponseObj(debugMessage=verifica['result'].pgerror, httpcode=500,
devMessage=("PostgreSQL error code: %s" % verifica['result'].pgcode))
response_obj.setError('jwtoken105')
except tornado.web.MissingArgumentError as error:
response_obj = ResponseObj(debugMessage=error.log_message, httpcode=error.status_code,
devMessage=error.log_message)
response_obj.setError(str(error.status_code))
logging.getLogger(__name__).error('%s'% error,exc_info=True)
except ApplicationException as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError(inst.code)
#responsejson = response_obj.jsonWrite()
logging.getLogger(__name__).error('Exception',exc_info=True)
except Exception as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError('500')
logging.getLogger(__name__).error('Exception',exc_info=True)
finally:
logging.getLogger(__name__).warning('jwt/verify handler executed')
if self.request.method == 'POST':
response_obj.setID(temp.id)
return response_obj
| 42.769231 | 135 | 0.611511 | 1,066 | 10,008 | 5.54409 | 0.192308 | 0.096785 | 0.052115 | 0.045685 | 0.575973 | 0.528934 | 0.469543 | 0.439763 | 0.432995 | 0.366328 | 0 | 0.012691 | 0.275679 | 10,008 | 233 | 136 | 42.95279 | 0.802593 | 0.085731 | 0 | 0.378882 | 0 | 0 | 0.109483 | 0.008701 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049689 | false | 0 | 0.086957 | 0 | 0.167702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b56c27371d7864fd9724c051669c52b7b5c54a4 | 1,796 | py | Python | humans.py | AlexTaguchi/image-segmentation | a0cff755d5b6478bb70e30c623fb62a676cc851a | [
"MIT"
] | null | null | null | humans.py | AlexTaguchi/image-segmentation | a0cff755d5b6478bb70e30c623fb62a676cc851a | [
"MIT"
] | null | null | null | humans.py | AlexTaguchi/image-segmentation | a0cff755d5b6478bb70e30c623fb62a676cc851a | [
"MIT"
] | null | null | null | # Real-time human segmentation with a web camera
# Modules
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import time
import torch
from torchvision import transforms
# Use GPU if available
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Load Pretrained DeepLabV3
model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True)
model.eval()
model.to(device)
# Preprocess image
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# Start camera capture
capture = cv2.VideoCapture(0)
while(True):
# Capture mirror image video frame
_, frame = capture.read()
frame = cv2.flip(frame, 1)
# Convert frame to tensor
frame_tensor = preprocess(frame).unsqueeze(0).to(device)
# Predict image segmentation
with torch.no_grad():
output = model(frame_tensor)['out'][0].argmax(0)
# Group classes into human or background
output[output != 15] = 0
output[output == 15] = 1
# Resize output to frame shape
output = output.byte().cpu().numpy()
output = np.stack((output, output, output), -1)
output = cv2.resize(output, frame.shape[1::-1]).astype(bool)
# Create human and background masks
human = (frame * output).astype(float)
background = frame * np.invert(output)
# Apply transparent overlay to human class
overlay = output * np.array([[255, 0, 0]])
human = 0.66 * human + 0.33 * overlay
# Display frame with overlay
cv2.imshow('frame', human.astype('uint8') + background.astype('uint8'))
# Exit with q key
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release camera capture
capture.release()
cv2.destroyAllWindows()
| 26.411765 | 87 | 0.678174 | 246 | 1,796 | 4.926829 | 0.463415 | 0.049505 | 0.033003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046994 | 0.194321 | 1,796 | 67 | 88 | 26.80597 | 0.790601 | 0.242762 | 0 | 0 | 0 | 0 | 0.049144 | 0.015637 | 0 | 0 | 0.002978 | 0 | 0 | 1 | 0 | false | 0 | 0.194444 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b579891ec54a7eaab385d732105f141cf6b521b | 2,276 | py | Python | telesignenterprise/telebureau.py | Coffee-Meets-Bagel/python_telesign_enterprise | 7a9fbed581967c4c2fb9f9d3c1f8853dd67df58d | [
"MIT"
] | 3 | 2021-06-04T22:55:49.000Z | 2021-12-29T00:21:00.000Z | telesignenterprise/telebureau.py | Coffee-Meets-Bagel/python_telesign_enterprise | 7a9fbed581967c4c2fb9f9d3c1f8853dd67df58d | [
"MIT"
] | 2 | 2019-10-30T20:04:51.000Z | 2022-01-04T09:26:18.000Z | telesignenterprise/telebureau.py | Coffee-Meets-Bagel/python_telesign_enterprise | 7a9fbed581967c4c2fb9f9d3c1f8853dd67df58d | [
"MIT"
] | 1 | 2021-07-23T23:34:15.000Z | 2021-07-23T23:34:15.000Z | from __future__ import unicode_literals
from telesign.rest import RestClient
TELEBUREAU_CREATE_RESOURCE = "/v1/telebureau/event"
TELEBUREAU_RETRIEVE_RESOURCE = "/v1/telebureau/event/{reference_id}"
TELEBUREAU_DELETE_RESOURCE = "/v1/telebureau/event/{reference_id}"
class TelebureauClient(RestClient):
"""
TeleBureau is a service is based on TeleSign's watchlist, which is a proprietary database containing verified phone
numbers of users known to have committed online fraud. TeleSign crowd-sources this information from its customers.
Participation is voluntary, but you have to contribute in order to benefit.
"""
def __init__(self, customer_id, api_key, rest_endpoint='https://rest-ww.telesign.com', **kwargs):
super(TelebureauClient, self).__init__(customer_id, api_key, rest_endpoint=rest_endpoint, **kwargs)
def create_event(self, phone_number, fraud_type, occurred_at, **params):
"""
Creates a telebureau event corresponding to supplied data.
See https://developer.telesign.com/docs/telebureau-api for detailed API documentation.
"""
return self.post(TELEBUREAU_CREATE_RESOURCE,
phone_number=phone_number,
fraud_type=fraud_type,
occurred_at=occurred_at,
**params)
def retrieve_event(self, reference_id, **params):
"""
Retrieves the fraud event status. You make this call in your web application after completion of create
transaction for a telebureau event.
See https://developer.telesign.com/docs/telebureau-api for detailed API documentation.
"""
return self.get(TELEBUREAU_RETRIEVE_RESOURCE.format(reference_id=reference_id),
**params)
def delete_event(self, reference_id, **params):
"""
Deletes a previously submitted fraud event. You make this call in your web application after completion of the
create transaction for a telebureau event.
See https://developer.telesign.com/docs/telebureau-api for detailed API documentation.
"""
return self.delete(TELEBUREAU_DELETE_RESOURCE.format(reference_id=reference_id),
**params)
| 44.627451 | 119 | 0.692882 | 268 | 2,276 | 5.690299 | 0.373134 | 0.057705 | 0.04459 | 0.04918 | 0.449836 | 0.415738 | 0.331803 | 0.276721 | 0.276721 | 0.276721 | 0 | 0.001711 | 0.229789 | 2,276 | 50 | 120 | 45.52 | 0.868226 | 0.405536 | 0 | 0.15 | 0 | 0 | 0.096721 | 0.057377 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b590c3afdc8778783a821b7e7abd8d729518eda | 6,099 | py | Python | old_combine_chrX.py | nikbaya/chrX | 9d7859c60ecf35a5db13b973a7d2e44472a08ca6 | [
"MIT"
] | null | null | null | old_combine_chrX.py | nikbaya/chrX | 9d7859c60ecf35a5db13b973a7d2e44472a08ca6 | [
"MIT"
] | null | null | null | old_combine_chrX.py | nikbaya/chrX | 9d7859c60ecf35a5db13b973a7d2e44472a08ca6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 11:26:20 2018
@author: nbaya
"""
import os
import glob
import re
import pandas as pd
from subprocess import call
from joblib import Parallel, delayed
import multiprocessing
import sys
import numpy as np
v3_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/imputed-v3-results/"
#Get saved phenotypes
malefiles = (list(map(os.path.basename,glob.glob(v3_path+"*.male*.gz")))) #restrict to male files to prevent counting phenotype twice
find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
savedphenotypes = list(map(lambda filename: re.search(find,filename).group(1), malefiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#Get all phenotypes
allphenotypes = pd.Series.tolist(pd.read_table(v3_path+"phenotypes.both_sexes.tsv").iloc[:]["phenotype"]) #list of all phenotypes (male & female)
allphenotypes = pd.DataFrame({'phenotype':allphenotypes})
allphenotypes.to_csv(v3_path+"allphenotypeslist.tsv",sep = "\t")
# TEMPORARY -------------------------------------------------------------------
#savedFiles= (list(map(os.path.basename,glob.glob(chrX_path+"*.gz")))) #restrict to male files to prevent counting phenotype twice
#find = re.compile(r"^(.*?)\..*") #regex search term for grabbing all the text before the first period in a string
#newphenotypes = list(map(lambda filename: re.search(find,filename).group(1), savedFiles)) #list of all downloaded phenotypes (for me, it gives 78: 77 original samples + 20116_2)
#
#nextphenotypes = list(set(savedphenotypes).difference(set(newphenotypes)))
#
#len(nextphenotypes)
# -----------------------------------------------------------------------------
n_cores = multiprocessing.cpu_count()
#old method of extracting chrX
def prev_chrX_from_saved_phenotypes(ph):
tb_male = pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') #read files
tb_female = pd.read_csv((v3_path+ph+".imputed_v3.results.female.tsv.gz"), compression='gzip', sep='\t')
chrX_male = tb_male[tb_male.iloc[:]["variant"].str.match('X')][:] #get chrX variants for males
chrX_female = tb_female[tb_female.iloc[:]["variant"].str.match('X')][:] #get chrX variants for females
chrX = pd.merge(chrX_male,chrX_female, on = 'variant',suffixes = ("_male","_female"))
chrX.to_csv(chrX_path+ph+".chrX.tsv.gz",sep = '\t', compression = 'gzip')
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in savedphenotypes)
# TEMPORARY -------------------------------------------------------------------
#Parallel(n_jobs=n_cores,verbose = 50)(delayed(chrX_from_saved_phenotypes)(ph) for ph in nextphenotypes)
# -----------------------------------------------------------------------------
#def chrX_from_new_phenotypes(ph):
#
## call(["gsutil" ,"cp","gs://ukbb-gwas-imputed-v3-results/export1/"+ph+".**male*",
## "~/Documents/lab/ukbb-sexdiff/chrX/"])
#
#
# call('gsutil ls gs://ukbb-gwas-imputed-v3-results/export1/'+ph+'.**male*', shell=True)
## "~/Documents/lab/ukbb-sexdiff/chrX/',)
## call(["paste","<(cat", ph, ".imputed_v3.results.female.tsv.gz","|","zcat",
## "|" , "cut -f 1,2,3,5,6,8)", "<(cat", ph,".imputed_v3.results.male.tsv.gz" ,
## "|", "zcat", "|", "cut", "-f", "1,2,3,5,6,8)", "|", "awk" ,"\'", "NR==1{",
## "print", "\"variant\",\"n_female\",\"n_male\",\"frq_female\",\"frq_male\",\"beta_female\",\"se_female\",\"p_female\",\"beta_male\",\"se_male\",\"p_male\"",
## "}NR>1", "&&", "$1==$7{", "maff=$3/(2*$2);" , "mafm=$9/(2*$8);" ,
## "if(maff > .05 && maff<.95 && mafm > .05 && mafm < .95){",
## "print $1,$2,$8,maff,mafm,$4,$5,$6,$10,$11,$12} }\' | gzip >", ph, ".sexdiff.gz]"])
#
#testph = ['46','47']
#
#for ph in testph:
# chrX_from_new_phenotypes(ph)
#for ph in set(allphenotypes).difference(set(savedphenotypes)): #for all phenotypes not saved
# -----------------------------------------------------------------------------
chrX_path = "/Users/nbaya/Documents/lab/ukbb-sexdiff/chrX/data/"
ph = "1757"
#Males
tb_male = pd.read_csv((v3_path+ph+".imputed_v3.results.male.tsv.gz"), compression='gzip', sep='\t') #read files
chrX_male = tb_male[tb_male.iloc[:]["variant"].str.match('X')][:] #get chrX variants for males
chrX_male = chrX_male.reset_index() #necessary for upcoming concat between chrX_male and a3
a1 = np.asarray(chrX_male.iloc[:,0])
a2 = list(map(lambda variant: str(variant).split(':'), a1))
a3 = pd.DataFrame(np.asarray(a2).reshape((len(a2),4)))
chrX_male2 = pd.concat([a3[[0,1,3,2]],chrX_male], axis = 1).drop(['index','tstat','AC','ytx'], axis =1)
chrX_male2.rename(index=str, columns={0: "CHR", 1: "POS", 3: "EFFECT_ALLELE", 2: "NON_EFFECT_ALLELE",
"variant": "SNP", "nCompleteSamples": "N", "beta": "BETA",
"se": "SE", "pval": "P_VAL"})
chrX_male2.to_csv(chrX_path+ph+".chrX.male.tsv.gz",sep = '\t', compression = 'gzip')
#Females
tb_female = pd.read_csv((v3_path+ph+".imputed_v3.results.female.tsv.gz"), compression='gzip', sep='\t') #read files
chrX_female = tb_female[tb_female.iloc[:]["variant"].str.match('X')][:] #get chrX variants for females
chrX_female = chrX_female.reset_index() #necessary for upcoming concat between chrX_female and a3
a1 = np.asarray(chrX_female.iloc[:,0])
a2 = list(map(lambda variant: str(variant).split(':'), a1))
a3 = pd.DataFrame(np.asarray(a2).reshape((len(a2),4)))
chrX_female2 = pd.concat([a3[[0,1,3,2]],chrX_female], axis = 1).drop(['index','tstat','AC','ytx'], axis =1)
chrX_female2.rename(index=str, columns={0: "CHR", 1: "POS", 3: "EFFECT_ALLELE", 2: "NON_EFFECT_ALLELE",
"variant": "SNP", "nCompleteSamples": "N", "beta": "BETA",
"se": "SE", "pval": "P_VAL"})
chrX_female2.to_csv(chrX_path+ph+".chrX.female.tsv.gz",sep = '\t', compression = 'gzip')
| 42.950704 | 178 | 0.61174 | 846 | 6,099 | 4.281324 | 0.251773 | 0.022363 | 0.039757 | 0.029818 | 0.66317 | 0.638874 | 0.575097 | 0.525953 | 0.490061 | 0.466041 | 0 | 0.029395 | 0.135432 | 6,099 | 141 | 179 | 43.255319 | 0.6575 | 0.477947 | 0 | 0.333333 | 0 | 0 | 0.215693 | 0.091379 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.1875 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b5cff844879ff6c055ff9188fef15716ede158b | 315 | py | Python | 0x03-python-data_structures/10-divisible_by_2.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | 0x03-python-data_structures/10-divisible_by_2.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | 0x03-python-data_structures/10-divisible_by_2.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# 10-divisible_by_2.py
def divisible_by_2(my_list=[]):
"""Find all multiples of 2 in a list."""
multiples = []
for i in range(len(my_list)):
if my_list[i] % 2 == 0:
multiples.append(True)
else:
multiples.append(False)
return (multiples)
| 21 | 44 | 0.574603 | 45 | 315 | 3.866667 | 0.622222 | 0.103448 | 0.137931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035556 | 0.285714 | 315 | 14 | 45 | 22.5 | 0.737778 | 0.231746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b5e8dad9b7d75c51ac3e7b6542b8df80237881b | 5,045 | py | Python | catalyst_utils/views/api.py | uw-it-aca/catalyst-utils | 8f529758098021a76c28caa71f78a4b2d3232c1a | [
"Apache-2.0"
] | null | null | null | catalyst_utils/views/api.py | uw-it-aca/catalyst-utils | 8f529758098021a76c28caa71f78a4b2d3232c1a | [
"Apache-2.0"
] | 107 | 2021-11-10T01:13:22.000Z | 2022-03-31T18:07:49.000Z | catalyst_utils/views/api.py | uw-it-aca/catalyst-utils | 8f529758098021a76c28caa71f78a4b2d3232c1a | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.http import HttpResponse
from django.views import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from catalyst_utils.models import Person, Survey, Gradebook
from catalyst_utils.dao.file import read_file, build_archive
from userservice.user import UserService
from logging import getLogger
import json
import re
logger = getLogger(__name__)
@method_decorator(login_required, name='dispatch')
class APIView(View):
@property
def person(self):
if not hasattr(self, '_person'):
username = UserService().get_user()
self._person = Person.objects.get(login_name=username)
return self._person
@staticmethod
def json_response(content='', status=200):
return HttpResponse(json.dumps(content, sort_keys=True),
status=status,
content_type='application/json')
@staticmethod
def error_response(status, message='', content={}):
content['error'] = str(message)
return HttpResponse(json.dumps(content),
status=status,
content_type='application/json')
@staticmethod
def file_response(content, filename, content_type='text/csv'):
response = HttpResponse(content=content, status=200,
content_type=content_type)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(
re.sub(r'[,/]', '-', filename))
return response
@staticmethod
def sorted_tools(tools):
return sorted(tools,
key=lambda t: (t['created_date'], t['name'].upper()),
reverse=True)
class SurveyList(APIView):
def get(self, request, *args, **kwargs):
try:
owned_surveys = Survey.objects.by_owner(self.person)
netid_surveys = Survey.objects.by_netid_admin(self.person)
admin_surveys = Survey.objects.by_administrator(self.person)
except Person.DoesNotExist:
return self.json_response(status=204)
data = {
'owned_surveys': self.sorted_tools(
[s.json_data() for s in owned_surveys]),
'netid_surveys': self.sorted_tools(
[s.json_data() for s in netid_surveys]),
'admin_surveys': self.sorted_tools(
[s.json_data() for s in admin_surveys]),
}
return self.json_response(data)
class GradebookList(APIView):
def get(self, request, *args, **kwargs):
try:
owned_gradebooks = Gradebook.objects.by_owner(self.person)
netid_gradebooks = Gradebook.objects.by_netid_admin(self.person)
admin_gradebooks = Gradebook.objects.by_administrator(self.person)
except Person.DoesNotExist:
return self.json_response(status=204)
data = {
'owned_gradebooks': self.sorted_tools(
[s.json_data() for s in owned_gradebooks]),
'netid_gradebooks': self.sorted_tools(
[s.json_data() for s in netid_gradebooks]),
'admin_gradebooks': self.sorted_tools(
[s.json_data() for s in admin_gradebooks]),
}
return self.json_response(data)
class SurveyFile(APIView):
def get(self, request, *args, **kwargs):
survey_id = kwargs.get('survey_id')
try:
survey = Survey.objects.get(survey_id=survey_id)
except Survey.DoesNotExist:
return self.error_response(404, 'Not Found')
if not survey.is_administrator(self.person):
return self.error_response(401, 'Not Authorized')
try:
archive = build_archive([survey.export_path,
survey.responses_path,
survey.code_translation_path])
except ObjectDoesNotExist:
return self.error_response(404, 'Not Available')
return self.file_response(archive, survey.filename,
content_type='application/zip')
class GradebookFile(APIView):
def get(self, request, *args, **kwargs):
gradebook_id = kwargs.get('gradebook_id')
try:
gradebook = Gradebook.objects.get(gradebook_id=gradebook_id)
except Gradebook.DoesNotExist:
return self.error_response(404, 'Not Found')
if not gradebook.is_administrator(self.person):
return self.error_response(401, 'Not Authorized')
try:
return self.file_response(read_file(gradebook.export_path),
gradebook.filename,
content_type='application/vnd.ms-excel')
except ObjectDoesNotExist:
return self.error_response(404, 'Not Available')
| 36.294964 | 78 | 0.619425 | 540 | 5,045 | 5.607407 | 0.244444 | 0.042933 | 0.029723 | 0.031704 | 0.438243 | 0.415786 | 0.376156 | 0.331242 | 0.296235 | 0.227543 | 0 | 0.009967 | 0.284044 | 5,045 | 138 | 79 | 36.557971 | 0.82835 | 0.016254 | 0 | 0.311927 | 0 | 0 | 0.069355 | 0.004839 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082569 | false | 0 | 0.100917 | 0.018349 | 0.385321 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b5f835cc06515c390b13c5d1221de5dc5ebb27d | 784 | py | Python | examples/longify.py | hmckenzie/tea-lang | d88d63ea600c387d086d19bcb0c9ae54cc78cb68 | [
"Apache-2.0"
] | null | null | null | examples/longify.py | hmckenzie/tea-lang | d88d63ea600c387d086d19bcb0c9ae54cc78cb68 | [
"Apache-2.0"
] | null | null | null | examples/longify.py | hmckenzie/tea-lang | d88d63ea600c387d086d19bcb0c9ae54cc78cb68 | [
"Apache-2.0"
] | null | null | null | '''
Author: Eunice Jun (@emjun)
Date created: November, 4, 2019
Purpose: Transform a wide format dataset into long format
Use: python3 longify.py <data_in_wide_format.csv>
'''
import sys
import csv
import pandas as pd
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Misusing script. Must include EXACTLY ONE parameter: python3 longify.py <data_in_wide_format.csv>")
elif not sys.argv[1].endswith('.csv'):
print("Data file must be a CSV file!")
else:
wide_csv = sys.argv[1]
wide_df = pd.read_csv(wide_csv)
# long_df = pd.wide_to_long(wide_df, stubnames='Score', i=None, j='ID')
cols_to_collapse = ['AR', 'TV']
result_col = 'Score'
import pdb; pdb.set_trace()
long_df.to_csv()
| 29.037037 | 114 | 0.640306 | 118 | 784 | 4.016949 | 0.576271 | 0.063291 | 0.067511 | 0.084388 | 0.147679 | 0.147679 | 0.147679 | 0.147679 | 0 | 0 | 0 | 0.016694 | 0.235969 | 784 | 26 | 115 | 30.153846 | 0.774624 | 0.304847 | 0 | 0 | 0 | 0 | 0.274766 | 0.046729 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b607bc698224eb54df1cdcf13257fe7d16f4a93 | 2,241 | py | Python | akhelpers/Resnet_AK.py | sahilparekh/autokeras-models | 237b9900fbe83ef8f9882b257f01986289647797 | [
"MIT"
] | null | null | null | akhelpers/Resnet_AK.py | sahilparekh/autokeras-models | 237b9900fbe83ef8f9882b257f01986289647797 | [
"MIT"
] | null | null | null | akhelpers/Resnet_AK.py | sahilparekh/autokeras-models | 237b9900fbe83ef8f9882b257f01986289647797 | [
"MIT"
] | null | null | null | import autokeras as ak
from tensorflow.python.util import nest
from tf2cv.models.resnet import ResNet
LAYER_OPTIONS = [[1, 1, 1, 1], [2, 1, 1, 1], [2, 2, 1, 1], [2, 2, 2, 1], [2, 2, 2, 2], [3, 3, 3, 3],
[3, 4, 6, 3]]
class CustomResnetBlock(ak.Block):
def __init__(self, in_size=(224, 224), in_channels=3, layer_options=LAYER_OPTIONS, **kwargs):
super().__init__(**kwargs)
self.in_channels = in_channels
self.in_size = in_size
self.layers_options = layer_options
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
# Get HP Params for network
bottleneck = hp.Boolean('hp_bottleneck', default=False)
layers_option_idx = list(range(len(self.layers_options)))
layers_sel = hp.Choice('idx_layers', values=layers_option_idx)
layers = self.layers_options[layers_sel]
if self.in_size[0] < 100:
init_block_channels = 16
channels_per_layers = [16, 32, 64]
layers = layers[:3]
else:
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
width_scale = hp.Float('width_scale', min_value=0.5, max_value=1.5, step=0.1)
if width_scale != 1.0:
# it should not change the last block of last layer
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
# Create layers
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=True,
in_channels=self.in_channels,
in_size=self.in_size,
use_with_ak_classification=True).features
output_node = net(input_node)
return output_node
| 36.737705 | 106 | 0.599732 | 303 | 2,241 | 4.194719 | 0.336634 | 0.009441 | 0.080252 | 0.059009 | 0.040913 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047438 | 0.294511 | 2,241 | 60 | 107 | 37.35 | 0.756483 | 0.039714 | 0 | 0 | 0 | 0 | 0.015836 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.069767 | 0 | 0.162791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b60d399770654bd26d7c840b7fc93de1223aa09 | 766 | py | Python | Codes/data_convertor/change_text_labels.py | AmiirGholamii/semantic-segmentation | 16426afdcf9ef2449d5bc3cb86ca1c269e517dab | [
"MIT"
] | 2 | 2021-05-14T07:44:24.000Z | 2021-05-19T04:48:03.000Z | Codes/data_convertor/change_text_labels.py | AmiirGholamii/semantic-segmentation | 16426afdcf9ef2449d5bc3cb86ca1c269e517dab | [
"MIT"
] | null | null | null | Codes/data_convertor/change_text_labels.py | AmiirGholamii/semantic-segmentation | 16426afdcf9ef2449d5bc3cb86ca1c269e517dab | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
directory = "/home/rider/DataSets/Images/Development/humanoid_soccer_dataset/ScreenshotMasks"
for filename in os.listdir(directory):
if filename.endswith(".txt"):
blank_image = np.zeros((480,640), np.uint8)
with open(os.path.join(directory, filename)) as f:
lines = f.readlines()
for i in range(len(lines)):
splitted_list = lines[i].split(' ')
for j in range(len(splitted_list)-1):
blank_image[i][j] = (splitted_list[j])
cv2.imwrite(os.path.join(directory, filename.replace(".txt",".png")),blank_image)
cv2.waitKey(0)
# print(os.path.join(directory, filename))
continue
else:
continue
| 38.3 | 93 | 0.614883 | 98 | 766 | 4.72449 | 0.530612 | 0.064795 | 0.064795 | 0.12311 | 0.174946 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020979 | 0.253264 | 766 | 19 | 94 | 40.315789 | 0.788462 | 0.052219 | 0 | 0.111111 | 0 | 0 | 0.127072 | 0.109116 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b63d4b72d8214c1ed9a2a8335427946263ee241 | 3,524 | py | Python | src/python/serif/theory/serif_entity_theory.py | BBN-E/text-open | c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9 | [
"Apache-2.0"
] | 2 | 2022-03-24T14:37:51.000Z | 2022-03-24T19:56:45.000Z | src/python/serif/theory/serif_entity_theory.py | BBN-E/text-open | c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9 | [
"Apache-2.0"
] | null | null | null | src/python/serif/theory/serif_entity_theory.py | BBN-E/text-open | c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9 | [
"Apache-2.0"
] | null | null | null | import sys, os
from serif.theory.serif_theory import SerifTheory
from serif.theory.enumerated_type import MentionType
from serif.util.serifxml_utils import CountryIdentifier
class SerifEntityTheory(SerifTheory):
def num_mentions(self):
"""Returns the number or mentions in this Entity"""
return len(self.mentions)
def representative_mention(self):
"""Finds the mentions that best represents the Entity. Algorithm
ported from Java's DefaultRepresentativeMentionFinder."""
# Look for country name first but calculate longest name as well
longest_name_mention = None
longest_length = None
for mention in self.mentions:
if mention.mention_type != MentionType.name:
continue
name = mention.atomic_head.text.lower()
if longest_name_mention is None or len(name) > longest_length:
longest_name_mention = mention
longest_length = len(name)
if CountryIdentifier.is_country_string(name):
return mention
# Longest name
if longest_name_mention:
return longest_name_mention
# Earliest desc (or longest if tie)
earliest_desc_mention = None
earliest_char_offset = None
earliest_desc_mention_length = None
for mention in self.mentions:
if mention.mention_type != MentionType.desc:
continue
if (earliest_desc_mention is None or
mention.start_char < earliest_char_offset or
(mention.start_char == earliest_char_offset and
len(mention.text) > earliest_desc_mention_length)):
earliest_desc_mention = mention
earliest_char_offset = mention.start_char
earliest_desc_mention_length = len(mention.text)
if earliest_desc_mention:
return earliest_desc_mention
# Default, could happen with first person pronouns?
if len(self.mentions) > 0:
return self.mentions[0]
return None
def representative_name(self):
"""Finds the most 'representative name' from the list of Mentions.
If there is no name Mention in the Entity, this will return None.
Algorithm is ported from Java."""
rm = self.representative_mention()
if rm is not None and rm.mention_type == MentionType.name:
return rm
return None
def contains_mention(self, mention):
"""Returns true if given Mention is part of the Entity"""
for m in self.mentions:
if m == mention:
return True
return False
def has_name_mention(self):
"""Returns true if there is a name Mention in the Entity"""
for m in self.mentions:
if m.mention_type == MentionType.name:
return True
return False
def has_desc_mention(self):
"""Returns true if there is a desc Mention in the Entity"""
for m in self.mentions:
if m.mention_type == MentionType.desc:
return True
return False
def has_name_or_desc_mention(self):
"""Returns true if there is a name or desc Mention in the Entity"""
for m in self.mentions:
if (m.mention_type == MentionType.desc or
m.mention_type == MentionType.name):
return True
return False
| 37.094737 | 77 | 0.619467 | 417 | 3,524 | 5.069544 | 0.215827 | 0.062441 | 0.071902 | 0.045412 | 0.352412 | 0.326869 | 0.32403 | 0.260643 | 0.260643 | 0.173132 | 0 | 0.000838 | 0.322928 | 3,524 | 94 | 78 | 37.489362 | 0.885163 | 0.200908 | 0 | 0.276923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107692 | false | 0 | 0.061538 | 0 | 0.430769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b64d23b87d1099b18fa084331257778ef9465f0 | 1,655 | py | Python | scripts/bing-images-downloader.py | ZZY2357/auto-workflow | bea6f0c67da524fd08cbf282ea72d821f8d1c9ea | [
"MIT"
] | null | null | null | scripts/bing-images-downloader.py | ZZY2357/auto-workflow | bea6f0c67da524fd08cbf282ea72d821f8d1c9ea | [
"MIT"
] | null | null | null | scripts/bing-images-downloader.py | ZZY2357/auto-workflow | bea6f0c67da524fd08cbf282ea72d821f8d1c9ea | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
import os
import base64
keyword = input('What do you want? ')
save_floder = input('Where do you want to save images?(Default as the current directory) ')
if save_floder == '': save_floder = os.getcwd()
if not os.path.exists(save_floder): os.mkdir(save_floder)
url = 'https://cn.bing.com/images/search?q=%s&form=BESBTB&first=1&scenario=ImageBasicHover&ensearch=1' % keyword
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36'
}
print('Starting fetching image urls...')
r = requests.get(url, headers=headers)
html = r.text
soup = BeautifulSoup(html, 'lxml')
img_elements = soup.select('.mimg')
img_urls = []
for img_element in img_elements:
if 'src' in img_element.attrs:
img_urls.append(img_element['src'])
if 'data-src' in img_element.attrs:
img_urls.append(img_element['data-src'])
print('Starting downloading images...')
for i in range(len(img_urls)):
if 'data:image/' in img_urls[i]:
print('Warning: Not support base64')
continue
# img_urls[i] += (4 - len(img_urls[i]) % 4) * '='
# img_bytes = base64.b64decode(img_urls[i].split(',')[1])
# file_name = save_floder + '/' + str(i) + '.' + img_urls[i].split(';')[0].split('/')[1]
else:
r = requests.get(img_urls[i])
img_bytes = r.content
file_name = save_floder + '/' + str(i) + '.' + r.headers['Content-Type'].split('/')[1]
with open(file_name, 'wb') as f:
f.write(img_bytes)
print('Downloaded %s' % file_name)
| 34.479167 | 135 | 0.647734 | 248 | 1,655 | 4.197581 | 0.46371 | 0.067243 | 0.04611 | 0.028818 | 0.12488 | 0.12488 | 0.082613 | 0.082613 | 0.082613 | 0.082613 | 0 | 0.034763 | 0.183082 | 1,655 | 47 | 136 | 35.212766 | 0.735207 | 0.128097 | 0 | 0 | 0 | 0.057143 | 0.323141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.114286 | 0 | 0.114286 | 0.114286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b6ac29e4ec13d34dbb79b65c428b5255729e775 | 7,313 | py | Python | webex_adaptive_card.py | oborys/webex_card_bot | 823a2a1eca356a5f9e2a1158209c6ce8f715a5cf | [
"MIT"
] | null | null | null | webex_adaptive_card.py | oborys/webex_card_bot | 823a2a1eca356a5f9e2a1158209c6ce8f715a5cf | [
"MIT"
] | null | null | null | webex_adaptive_card.py | oborys/webex_card_bot | 823a2a1eca356a5f9e2a1158209c6ce8f715a5cf | [
"MIT"
] | null | null | null | from flask import Flask, request
import requests
import json
import configparser
from api_interaction import *
# read variables from config
credential = configparser.ConfigParser()
credential.read('cred.prod')
# Import credential
bearer_bot = credential['Webex']['WEBEX_TEAMS_TOKEN']
botEmail = credential['Webex']['WEBEX_BOT_EMAIL']
# WebhookUrl
webhookUrl = credential['Webex']['WEBEX_WEBHOOK_URL']
Meraki_API_KEY = credential['Webex']['Meraki_API_KEY']
headers_bot = {
"Accept": "application/json",
"Content-Type": "application/json; charset=utf-8",
"Authorization": "Bearer " + bearer_bot
}
app = Flask(__name__)
#### Functions
def createWebhook(bearer, webhookUrl):
hook = True
botWebhooks = send_webex_get("https://webexapis.com/v1/webhooks")["items"]
for webhook in botWebhooks:
if webhook["targetUrl"] == webhookUrl:
hook = False
if hook:
dataWebhook = {
"name": "Messages collab bot Webhook",
"resource": "messages",
"event": "created",
"targetUrl": webhookUrl
}
dataWebhookCard = {
"name": "Card Report collab bot Webhook",
"targetUrl": webhookUrl,
"resource": "attachmentActions",
"event": "created"
}
send_webex_post("https://webexapis.com/v1/webhooks/", dataWebhook)
send_webex_post("https://webexapis.com/v1/webhooks/", dataWebhookCard)
print("Webhook status: done")
def deleteWebHooks(bearer, webhookUrl):
webhookURL = "https://webexapis.com/v1/webhooks/"
botWebhooks = send_webex_get(webhookURL)["items"]
for webhook in botWebhooks:
send_webex_delete(webhookURL + webhook["id"])
def send_webex_get(url, payload=None,js=True):
if payload == None:
request = requests.get(url, headers=headers_bot)
else:
request = requests.get(url, headers=headers_bot, params=payload)
if js == True:
if request.status_code == 200:
try:
r = request.json()
except json.decoder.JSONDecodeError:
print("Error JSONDecodeError")
return("Error JSONDecodeError")
return r
else:
print (request)
return ("Error " + str(request.status_code))
return request
def send_webex_delete(url, payload=None):
if payload == None:
request = requests.delete(url, headers=headers_bot)
else:
request = requests.delete(url, headers=headers_bot, params=payload)
def send_webex_post(url, data):
request = requests.post(url, json.dumps(data), headers=headers_bot).json()
return request
def postNotificationToPerson(reportText, personEmail):
body = {
"toPersonEmail": personEmail,
"markdown": reportText,
"text": "This text would be displayed by Webex Teams clients that do not support markdown."
}
send_webex_post('https://webexapis.com/v1/messages', body)
def postCard(personEmail):
# open and read data from file as part of body for request
with open("adaptiveCard.json", "r", encoding="utf-8") as f:
data = f.read().replace('USER_EMAIL', personEmail)
# Add encoding, if you use non-Latin characters
data = data.encode("utf-8")
request = requests.post('https://webexapis.com/v1/messages', data=data, headers=headers_bot).json()
print("POST CARD TO ", personEmail)
def postCardDNAC(personEmail):
# open and read data from file as part of body for request
with open("adaptiveCardDNAC.json", "r", encoding="utf-8") as f:
data = f.read().replace('USER_EMAIL', personEmail)
# Add encoding, if you use non-Latin characters
data = data.encode("utf-8")
request = requests.post('https://webexapis.com/v1/messages', data=data, headers=headers_bot).json()
print("POST CARD TO ", personEmail)
def postCardMeraki(personEmail):
# open and read data from file as part of body for request
with open("adaptiveCardMeraki.json", "r", encoding="utf-8") as f:
data = f.read().replace('USER_EMAIL', personEmail)
# Add encoding, if you use non-Latin characters
data = data.encode("utf-8")
request = requests.post('https://webexapis.com/v1/messages', data=data, headers=headers_bot).json()
print("POST CARD TO ", personEmail)
@app.route('/', methods=['GET', 'POST'])
def webex_webhook():
if request.method == 'POST':
webhook = request.get_json(silent=True)
print("Webhook:")
print(webhook)
if webhook['resource'] == 'messages' and webhook['data']['personEmail'] != botEmail:
result = send_webex_get('https://webexapis.com/v1/messages/{0}'.format(webhook['data']['id']))
print("result messages", result)
in_message = result.get('text', '').lower()
print("in_message", in_message)
if in_message.startswith('/hi'):
personEmail = webhook['data']['personEmail']
postNotificationToPerson('Hi', personEmail)
elif in_message.startswith('/dnac'):
postCardDNAC(webhook['data']['personEmail'])
elif in_message.startswith('/post'):
postCardMeraki(webhook['data']['personEmail'])
else:
postCard(webhook['data']['personEmail'])
elif webhook['resource'] == 'attachmentActions':
result = send_webex_get('https://webexapis.com/v1/attachment/actions/{}'.format(webhook['data']['id']))
print("\n\n Result ", result)
person = send_webex_get('https://webexapis.com/v1/people/{}'.format(result['personId']))
personEmail = person["emails"][0]
postNotificationToPerson("Bot received your answer", personEmail)
if (result['inputs']['type'] == 'event_card'):
responseText = "Your Email " + personEmail + "\n" + "Date in Adaptive Card: " + result['inputs']['date'] + "\n" + "Text in Adaptive Card: " + result['inputs']['input_text']
postNotificationToPerson(responseText, personEmail)
elif (result['inputs']['type'] == 'api_operation_card'):
reportText = SimpleAPIoperation(dnac_url)
postNotificationToPerson(reportText[1], personEmail)
postNotificationToPerson(reportText[0], personEmail)
elif (result['inputs']['type'] == 'api_operation_card_post'):
reportText = merakiPostOperation(result['inputs']['admin_email'])
postNotificationToPerson(reportText, personEmail)
elif (result['inputs']['type'] == '3rd_party'):
pass
return "true"
elif request.method == 'GET':
message = "<center><img src=\"http://bit.ly/SparkBot-512x512\" alt=\"Webex Bot\" style=\"width:256; height:256;\"</center>" \
"<center><h2><b>Congratulations! Your <i style=\"color:#ff8000;\"></i> bot is up and running.</b></h2></center>" \
"<center><b><i>Please don't forget to create Webhooks to start receiving events from Webex Teams!</i></b></center>" \
"<center><b>Generate meeting token <a href='/token'>/token</a></b></center>"
return message
print("Start Bot")
deleteWebHooks(bearer_bot, webhookUrl)
createWebhook(bearer_bot, webhookUrl) | 41.551136 | 188 | 0.633803 | 812 | 7,313 | 5.615764 | 0.25 | 0.023684 | 0.041009 | 0.045833 | 0.378289 | 0.301754 | 0.294298 | 0.237281 | 0.182895 | 0.182895 | 0 | 0.007756 | 0.224258 | 7,313 | 176 | 189 | 41.551136 | 0.796051 | 0.051142 | 0 | 0.15493 | 0 | 0.014085 | 0.267109 | 0.026567 | 0.007042 | 0 | 0 | 0 | 0 | 1 | 0.070423 | false | 0.007042 | 0.035211 | 0 | 0.147887 | 0.084507 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b6b9817cbd176268a7a34bd88ce4df0849e1e97 | 798 | py | Python | library/ftx/asyncronous/account.py | danyanyam/ftx | 32076bc1135e5a1e2bc800f4fff8dff9d7da18f1 | [
"MIT"
] | 2 | 2021-09-23T22:59:24.000Z | 2021-09-24T05:49:35.000Z | library/ftx/asyncronous/account.py | danyanyam/ftx | 32076bc1135e5a1e2bc800f4fff8dff9d7da18f1 | [
"MIT"
] | null | null | null | library/ftx/asyncronous/account.py | danyanyam/ftx | 32076bc1135e5a1e2bc800f4fff8dff9d7da18f1 | [
"MIT"
] | null | null | null | from library.ftx.base import AsyncBaseApiClass
class Account(AsyncBaseApiClass):
"""https://docs.ftx.com/#account"""
def __init__(self, api_key: str, secret_key: str, subaccount_name: str = None):
super().__init__(api_key, secret_key, subaccount_name)
async def get_account_information(self):
""" https://docs.ftx.com/#get-account-information """
return await self.get('/api/account')
async def get_positions(self):
""" https://docs.ftx.com/#get-positions """
return await self.get('/api/positions')
async def change_account_leverage(self, leverage: float):
""" https://docs.ftx.com/#change-account-leverage """
assert leverage < 2
return await self.post('/api/account/leverage', data={'leverage': leverage})
| 38 | 84 | 0.669173 | 99 | 798 | 5.20202 | 0.353535 | 0.069903 | 0.093204 | 0.116505 | 0.16699 | 0.085437 | 0 | 0 | 0 | 0 | 0 | 0.001529 | 0.180451 | 798 | 20 | 85 | 39.9 | 0.785933 | 0.036341 | 0 | 0 | 0 | 0 | 0.089577 | 0.034202 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b71296702232873c1e4f5d1eea517c841d75064 | 2,980 | py | Python | slixmpp/plugins/xep_0319/idle.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 86 | 2016-07-04T13:26:02.000Z | 2022-02-19T10:26:21.000Z | slixmpp/plugins/xep_0319/idle.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 10 | 2016-09-30T18:55:41.000Z | 2020-05-01T14:22:47.000Z | slixmpp/plugins/xep_0319/idle.py | anirudhrata/slixmpp | 1fcee0e80a212eeb274d2f560e69099d8a61bf7f | [
"BSD-3-Clause"
] | 45 | 2016-09-30T18:48:41.000Z | 2022-03-18T21:39:33.000Z |
# Slixmpp: The Slick XMPP Library
# Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
# This file is part of Slixmpp.
# See the file LICENSE for copying permission.
from datetime import datetime, timezone
from typing import Optional
from slixmpp import JID
from slixmpp.stanza import Presence
from slixmpp.plugins import BasePlugin
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.xmlstream.handler import Callback
from slixmpp.xmlstream.matcher import StanzaPath
from slixmpp.plugins.xep_0319 import stanza
def get_local_timezone():
return datetime.now(timezone.utc).astimezone().tzinfo
class XEP_0319(BasePlugin):
name = 'xep_0319'
description = 'XEP-0319: Last User Interaction in Presence'
dependencies = {'xep_0012'}
stanza = stanza
def plugin_init(self):
self._idle_stamps = {}
register_stanza_plugin(Presence, stanza.Idle)
self.api.register(self._set_idle, 'set_idle', default=True)
self.api.register(self._get_idle, 'get_idle', default=True)
self.xmpp.register_handler(Callback(
'Idle Presence',
StanzaPath('presence/idle'),
self._idle_presence
))
self.xmpp.add_filter('out', self._stamp_idle_presence)
def session_bind(self, jid):
self.xmpp['xep_0030'].add_feature('urn:xmpp:idle:1')
def plugin_end(self):
self.xmpp['xep_0030'].del_feature(feature='urn:xmpp:idle:1')
self.xmpp.del_filter('out', self._stamp_idle_presence)
self.xmpp.remove_handler('Idle Presence')
async def idle(self, jid: Optional[JID] = None,
since: Optional[datetime] = None):
"""Set an idle duration for a JID
.. versionchanged:: 1.8.0
This function is now a coroutine.
"""
seconds = None
timezone = get_local_timezone()
if since is None:
since = datetime.now(timezone)
else:
seconds = datetime.now(timezone) - since
await self.api['set_idle'](jid, None, None, since)
await self.xmpp['xep_0012'].set_last_activity(jid=jid, seconds=seconds)
async def active(self, jid: Optional[JID] = None):
"""Reset the idle timer.
.. versionchanged:: 1.8.0
This function is now a coroutine.
"""
await self.api['set_idle'](jid, None, None, None)
await self.xmpp['xep_0012'].del_last_activity(jid)
def _set_idle(self, jid, node, ifrom, data):
self._idle_stamps[jid] = data
def _get_idle(self, jid, node, ifrom, data):
return self._idle_stamps.get(jid, None)
def _idle_presence(self, pres):
self.xmpp.event('presence_idle', pres)
async def _stamp_idle_presence(self, stanza):
if isinstance(stanza, Presence):
since = await self.api['get_idle'](stanza['from'] or self.xmpp.boundjid)
if since:
stanza['idle']['since'] = since
return stanza
| 33.483146 | 84 | 0.655034 | 388 | 2,980 | 4.871134 | 0.280928 | 0.042328 | 0.033862 | 0.020106 | 0.2 | 0.13545 | 0.078307 | 0.078307 | 0.046561 | 0.046561 | 0 | 0.021108 | 0.236913 | 2,980 | 88 | 85 | 33.863636 | 0.810026 | 0.054362 | 0 | 0 | 0 | 0 | 0.090308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.155172 | 0.034483 | 0.413793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b713daf543427117e79a8f8e7805cb3d4baae6c | 4,687 | py | Python | modules/ImageMagickInterface.py | CollinHeist/TitleCardMaker | a5e90b81177e47d565bb47ed429dbf46d8d696f0 | [
"MIT"
] | 5 | 2022-01-09T09:51:39.000Z | 2022-03-05T15:00:07.000Z | modules/ImageMagickInterface.py | CollinHeist/TitleCardMaker | a5e90b81177e47d565bb47ed429dbf46d8d696f0 | [
"MIT"
] | 17 | 2022-02-14T17:50:51.000Z | 2022-03-30T03:44:06.000Z | modules/ImageMagickInterface.py | CollinHeist/TitleCardMaker | a5e90b81177e47d565bb47ed429dbf46d8d696f0 | [
"MIT"
] | 1 | 2022-01-14T15:08:08.000Z | 2022-01-14T15:08:08.000Z | from shlex import split as command_split
from subprocess import Popen, PIPE
from modules.Debug import log
class ImageMagickInterface:
"""
This class describes an interface to ImageMagick. If initialized with a
valid docker container (name or ID), then all given ImageMagick commands
will be run through that docker container.
Note: This class does not validate the provided container corresponds to
a valid ImageMagick container. Commands are passed to docker so long as any
container is fiben.
The command I use for launching an ImageMagick container is:
>>> docker run --name="ImageMagick" --entrypoint="/bin/bash" \
-dit -v "/mnt/user/":"/mnt/user/" 'dpokidov/imagemagick'
"""
def __init__(self, container: str=None,
use_magick_prefix: bool=False) -> None:
"""
Constructs a new instance. If docker_id is None/0/False, then commands
will not use a docker container.
:param container: The container for sending requests to
ImageMagick, can be a name or container ID.
"""
# Definitions of this interface, i.e. whether to use docker and how
self.container = container
self.use_docker = bool(container)
# Whether to prefix commands with "magick" or not
self.prefix = 'magick ' if use_magick_prefix else ''
# Command history for debug purposes
self.__history = []
@staticmethod
def escape_chars(string: str) -> str:
"""
Escape the necessary characters within the given string so that they
can be sent to ImageMagick.
:param string: The string to escape.
:returns: Input string with all necessary characters escaped. This
assumes that text will be wrapped in "", and so only escapes
" and ` characters.
"""
# Handle possible None strings
if string is None:
return None
return string.replace('"', r'\"').replace('`', r'\`')
def run(self, command: str) -> (bytes, bytes):
"""
Wrapper for running a given command. This uses either the host machine
(i.e. direct calls); or through the provided docker container (if
preferences has been set; i.e. wrapped through "docker exec -t {id}
{command}").
:param command: The command (as string) to execute.
:returns: Tuple of the STDOUT and STDERR of the executed command.
"""
# If a docker image ID is specified, execute the command in that container
# otherwise, execute on the host machine (no docker wrapper)
if self.use_docker:
command = f'docker exec -t {self.container} {self.prefix}{command}'
else:
command = f'{self.prefix}{command}'
# Split command into list of strings for Popen
cmd = command_split(command)
# Execute, capturing stdout and stderr
stdout, stderr = b'', b''
try:
stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
# Add command to history
self.__history.append((command, stdout, stderr))
return stdout, stderr
except FileNotFoundError as e:
if 'docker' in str(e):
log.critical(f'ImageMagick docker container not found')
exit(1)
else:
log.error(f'Command error "{e}"')
return b'', b''
def run_get_output(self, command: str) -> str:
"""
Wrapper for run(), but return the byte-decoded stdout.
:param command: The command (as string) being executed.
:returns: The decoded stdout output of the executed command.
"""
return b''.join(self.run(command)).decode()
def delete_intermediate_images(self, *paths: tuple) -> None:
"""
Delete all the provided intermediate files.
:param paths: Any number of files to delete. Must be Path objects.
"""
# Delete (unlink) each image, don't raise FileNotFoundError if DNE
for image in paths:
image.unlink(missing_ok=True)
def print_command_history(self) -> None:
"""
Prints the command history of this Interface.
"""
for entry in self.__history:
command, stdout, stderr = entry
sep = '-' * 60
log.debug(f'Command: {command}\n\nstdout: {stdout}\n\nstderr: '
f'{stderr}\n{sep}')
| 33.241135 | 82 | 0.590356 | 558 | 4,687 | 4.910394 | 0.340502 | 0.027372 | 0.010949 | 0.016058 | 0.021898 | 0.021898 | 0 | 0 | 0 | 0 | 0 | 0.001265 | 0.325155 | 4,687 | 140 | 83 | 33.478571 | 0.865002 | 0.485172 | 0 | 0.045455 | 0 | 0 | 0.106393 | 0.021474 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.068182 | 0 | 0.340909 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b71dd0e376b1aea6b14bf0dfc56584ed3214480 | 3,939 | py | Python | domainbed/lib/Dataset_All.py | zhaoxin94/DomainBed | f880b13a6be82829c7b7c519a7cca54439bda524 | [
"MIT"
] | null | null | null | domainbed/lib/Dataset_All.py | zhaoxin94/DomainBed | f880b13a6be82829c7b7c519a7cca54439bda524 | [
"MIT"
] | null | null | null | domainbed/lib/Dataset_All.py | zhaoxin94/DomainBed | f880b13a6be82829c7b7c519a7cca54439bda524 | [
"MIT"
] | null | null | null | import random
from math import sqrt
import numpy as np
from torch.utils.data import ConcatDataset, Dataset
from torchvision import transforms
class DatasetAll_FDA(Dataset):
"""
Combine Seperated Datasets
"""
def __init__(self, data_list, alpha=1.0):
self.data = ConcatDataset(data_list)
self.pre_transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale(), lambda x: np.asarray(x)
])
self.post_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.alpha = alpha
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img, label = self.data[idx]
# randomly sample an item from the dataset
img_s, _ = self._sample_item()
# do pre_transform before FDA
img = self.pre_transform(img)
img_s = self.pre_transform(img_s)
# FDA
img_mix = self._colorful_spectrum_mix(img, img_s, self.alpha)
# do post_transform after FDA
img = self.post_transform(img)
img_mix = self.post_transform(img_mix)
img = [img, img_mix]
label = [label, label]
return img, label
def _colorful_spectrum_mix(self, img1, img2, alpha, ratio=1.0):
"""Input image size: ndarray of [H, W, C]"""
lam = np.random.uniform(0, alpha)
assert img1.shape == img2.shape
h, w, c = img1.shape
h_crop = int(h * sqrt(ratio))
w_crop = int(w * sqrt(ratio))
h_start = h // 2 - h_crop // 2
w_start = w // 2 - w_crop // 2
img1_fft = np.fft.fft2(img1, axes=(0, 1))
img2_fft = np.fft.fft2(img2, axes=(0, 1))
img1_abs, img1_pha = np.abs(img1_fft), np.angle(img1_fft)
img2_abs, img2_pha = np.abs(img2_fft), np.angle(img2_fft)
img1_abs = np.fft.fftshift(img1_abs, axes=(0, 1))
img2_abs = np.fft.fftshift(img2_abs, axes=(0, 1))
img1_abs_ = np.copy(img1_abs)
img2_abs_ = np.copy(img2_abs)
img1_abs[h_start:h_start + h_crop, w_start:w_start + w_crop] = \
lam * img2_abs_[h_start:h_start + h_crop, w_start:w_start + w_crop] + (1 - lam) * img1_abs_[
h_start:h_start + h_crop,
w_start:w_start + w_crop]
img1_abs = np.fft.ifftshift(img1_abs, axes=(0, 1))
img2_abs = np.fft.ifftshift(img2_abs, axes=(0, 1))
img21 = img1_abs * (np.e**(1j * img1_pha))
img21 = np.real(np.fft.ifft2(img21, axes=(0, 1)))
img21 = np.uint8(np.clip(img21, 0, 255))
return img21
def _sample_item(self):
idxs = list(range(len(self.data)))
selected_idx = random.sample(idxs, 1)[0]
return self.data[selected_idx]
class DatasetAll(Dataset):
"""
Combine Seperated Datasets
"""
def __init__(self, data_list):
self.data = ConcatDataset(data_list)
self.pre_transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale()
])
self.post_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
| 32.553719 | 117 | 0.561056 | 514 | 3,939 | 4.075875 | 0.212062 | 0.038186 | 0.023389 | 0.011456 | 0.466348 | 0.444391 | 0.444391 | 0.444391 | 0.444391 | 0.372792 | 0 | 0.059196 | 0.318101 | 3,939 | 120 | 118 | 32.825 | 0.720774 | 0.049251 | 0 | 0.345679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012346 | 1 | 0.098765 | false | 0 | 0.061728 | 0.037037 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b722402e45e22ead2f85ea3f8f782a3a420b3f1 | 19,001 | py | Python | Main.py | PositivePeriod/Touchable | 8ecb69bd72f16bc0c244c2e983316659d2db1eb5 | [
"MIT"
] | 1 | 2020-07-24T19:29:24.000Z | 2020-07-24T19:29:24.000Z | Main.py | PositivePeriod/Touchable | 8ecb69bd72f16bc0c244c2e983316659d2db1eb5 | [
"MIT"
] | 2 | 2022-01-13T03:01:41.000Z | 2022-03-12T00:40:55.000Z | Main.py | PositivePeriod/Touchable | 8ecb69bd72f16bc0c244c2e983316659d2db1eb5 | [
"MIT"
] | null | null | null | from Canvas import Canvas
from Detector import Detector
from GUI import GUI
from Tracker import Tracker
from Function import *
from Video import Video
from Pen import Pens
from Key import Key
from Image import ImageManager
import tkinter
import tkinter.messagebox
import tkinter.font
import tkinter.simpledialog
import time
import cv2
import os
class Touchable:
def __init__(self):
os.chdir(os.path.dirname(os.path.realpath(__file__)))
to_dir = [r'./data/', r'./data/pen_data/', r'./data/image_save/', r'./data/source/']
for dir_ in to_dir:
if not os.path.isdir(dir_):
os.mkdir(dir_)
self.pen = Pens(r'./data/pen_data/')
self.video = Video()
self.detector = Detector()
self.tracker = Tracker()
self.image_manager = ImageManager(self, r'./data/source/')
self.function = None
self.var = None
self.stop = None
self.canvas = Canvas()
self.gui = GUI(self)
self.key = Key(self, self.canvas)
self.gui.start_gui()
def show_camera(self):
if not self.video.is_working():
return False
top_level = tkinter.Toplevel(self.gui.window)
top_level.title('Touchable - Camera')
top_level.geometry('320x180')
canvas = tkinter.Canvas(top_level, bg='black')
canvas.place(x=0, y=0, relwidth=1, relheight=1)
top_level.update()
canvas.update()
try:
while True:
if self.video.is_working():
img = self.video.get_frame()
if img is not None:
width, height = canvas.winfo_width(), canvas.winfo_height()
scale, width_margin, height_margin = fit_resize(1280, 720, width, height)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_resize = cv2.resize(img_rgb, dsize=(int(1280 * scale), int(720 * scale)), interpolation=cv2.INTER_AREA)
photo = pil_to_tkinter(img_resize)
canvas.create_image(width // 2, height // 2, image=photo, anchor=tkinter.CENTER)
canvas.update()
else:
top_level.destroy()
break
except Exception as e:
print(f'Error in show_camera; {e}')
raise e
def set_detect(self):
if not self.video.is_working():
success = self.video.set_camera('on')
if not success:
print('Video is not working; cannot enter set_detect')
return False
self.var = {'run': True, 'hsv': (0, 0, 0), 'pick_hsv': (0, 0, 255), 'roi': None, 'pick_roi': None, 'clicked': False}
self.enter('set_detect')
ret_counter = 0
while True:
while self.var['run']: # determine detect color
try:
img = self.video.get_frame() # get image from camera; type(img) = numpy.nd array
if img is None:
ret_counter += 1
if ret_counter == 20:
return self.exit('set_detect')
time.sleep(0.1)
continue
else:
ret_counter = 0
except AttributeError as e:
print('AttributeError; set_detect', e)
return self.exit('set_detect')
self.detector.bg_subtract(img)
width, height = self.gui.widget['canvas'].winfo_width(), self.gui.widget['canvas'].winfo_height()
scale, width_margin, height_margin = fit_resize(1280, 720, width, height)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_resize = cv2.resize(img_rgb, dsize=(int(1280 * scale), int(720 * scale)),
interpolation=cv2.INTER_AREA)
photo = pil_to_tkinter(img_resize)
self.gui.widget['canvas'].create_image(width // 2, height // 2, image=photo, anchor=tkinter.CENTER)
roi_size = [150, 150]
roi = img[720 // 2 - roi_size[0]:720 // 2 + roi_size[0], 1280 // 2 - roi_size[1]:1280 // 2 + roi_size[1]]
circles = self.detector.find_circle(roi, set_detect=True, roi=roi_size)
d, u = convert_pos(scale, width_margin, height_margin, x=720 // 2 - roi_size[0],
y=720 // 2 + roi_size[1])
l, r = convert_pos(scale, width_margin, height_margin, x=1280 // 2 - roi_size[0],
y=1280 // 2 + roi_size[1])
self.gui.widget['canvas'].create_rectangle(l, d, r, u, width=2, outline='red')
if circles is None:
w, h = convert_pos(scale, width_margin, height_margin, relx=0.5, rely=0.9)
self.gui.widget['canvas'].create_rectangle(w - 100, h - 20, w + 100, h + 20, fill='red',
outline='red')
self.gui.widget['canvas'].create_text((w, h), font=tkinter.font.Font(size=15), fill='white',
text='Adjust the distance')
else:
x, y, max_rad = 0, 0, 0
for circle in circles: # for every circle
if circle[2] > max_rad: # circle[2] == radius
x, y, max_rad = circle[0], circle[1], circle[2] # circle center 좌표
self.var['roi'] = (img, (x, y), max_rad)
self.var['clicked'] = True
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv = center_color(img_hsv, x, y, int(max_rad * 0.5))
self.var['hsv'] = hsv
x, y = convert_pos(scale, width_margin, height_margin, x=x, y=y)
max_rad = int(max_rad * scale)
self.gui.widget['canvas'].create_line(x - 5, y, x + 5, y, fill='white')
self.gui.widget['canvas'].create_line(x, y - 5, x, y + 5, fill='white')
self.gui.widget['canvas'].create_oval(x - max_rad - 3, y - max_rad - 3, x + max_rad + 3,
y + max_rad + 3, outline=color_type(hsv, 'hsv', 'hex'),
width=6)
self.gui.widget['canvas'].create_oval(x - max_rad, y - max_rad, x + max_rad, y + max_rad,
outline='white', width=3)
self.gui.widget['palette'].delete('all')
self.gui.widget['palette'].create_rectangle(0, 0, self.gui.widget['palette'].winfo_width(),
self.gui.widget['palette'].winfo_height(),
fill=color_type(self.var['pick_hsv'], 'hsv', 'hex'))
self.gui.widget['canvas'].update()
self.gui.widget['palette'].update()
self.gui.widget['canvas'].delete('all')
if self.pen.make_pen(self):
break
else:
self.var['run'] = True
# TODO
# self.detector.set_backprojection(image=self.var['pick_roi'][0], pos=self.var['pick_roi'][1]
time.sleep(0.05)
self.detector.set_backprojection(image=self.var['pick_roi'][0], pos=self.var['pick_roi'][1],
rad=self.var['pick_roi'][2])
return self.exit('set_detect', True)
def detect(self, pen=None, color_reflect=0.01, back_image=None):
if self.pen.is_empty():
new_pen = self.set_detect()
if not new_pen:
print('No new pen; cannot enter detect')
return False
if not self.video.is_working():
print('Video is not working; cannot enter detect')
return False
if pen is None:
pen = self.pen.get_pen()
self.var = {'run': True, 'pen': pen, 'pos': None, 'target': None, 'mark': None, 'event': None, 'scale': 1}
self.enter('detect')
backup_pen_hsv = pen.access_hsv()
no_circle = 0
ret_counter = 0
self.gui.widget['canvas'].configure(bg='white')
self.detector.reset_bg_subtract()
last_result = None
tracked = False
tracker_roi = None
tracker_result = None
roi_size = 2
self.stop = False
while self.var['run']: # determine detect color # TODO turn off
try:
img = self.video.get_frame() # get image from camera; type(img) = numpy.nd array
if img is None:
ret_counter += 1
if ret_counter == 20:
print('Cannot get frame for long time; leave detect')
return self.exit('detect')
time.sleep(0.1)
continue
else:
ret_counter = 0
except AttributeError as e:
print('AttributeError; detect', e)
return self.exit('detect')
if no_circle > 20: # hard-coding / 20 can be change / for initialize color
print('No circle; reset color')
no_circle = 0
pen.access_hsv(backup_pen_hsv)
self.gui.widget['palette'].create_rectangle(0, 0, self.gui.widget['palette'].winfo_width(),
self.gui.widget['palette'].winfo_height(),
fill=color_type(pen.access_color(), 'hsv', 'rgb'))
width, height = self.gui.widget['canvas'].winfo_width(), self.gui.widget['canvas'].winfo_height()
if back_image is not None: # TODO
height_, width_, _ = back_image.shape
scale_, width_margin_, height_margin_ = fit_resize(width_, height_, width, height)
img_cvt = cv2.cvtColor(back_image, cv2.COLOR_BGR2RGB)
img_res = cv2.resize(img_cvt, dsize=(int(width_ * scale_), int(height_ * scale_)), interpolation=cv2.INTER_AREA)
photo = pil_to_tkinter(img_res)
self.gui.widget['canvas'].create_image(width // 2, height // 2, image=photo, anchor=tkinter.CENTER)
scale, width_margin, height_margin = fit_resize(1280, 720, width, height)
self.canvas.draw(scale, width_margin, height_margin)
result = None
# 0. Preprocessing
img_subtract = self.detector.bg_subtract(img)
'''
if self.stop:
time.sleep(0.01)
continue
'''
img_color = self.detector.backprojection(img_subtract)
img_color = cv2.bilateralFilter(img_color, 9, 75, 75)
img_color = self.detector.morph(img_color)
# 1. Contour
contours = self.detector.contour(img_color)
answer = self.detector.contour_process(contours)
if answer is not None:
contour, x, y, rad = answer
contour_color = self.detector.contour_color(img, contour)
if hsv_square_distance(pen.access_hsv(), contour_color, only_h=True) < 0.6 and rad > 10:
result = [[x, y], int(0.7*rad)] # calibration
cv2.circle(img, (x, y), rad, (255, 0, 0))
if result is None:
# 2. Tracker
if tracked:
pos, rad = tracker_roi
r1 = int(max(pos[1]-roi_size*rad, 0))
r2 = int(min(pos[1]+roi_size*rad, int(img.shape[0])))
r3 = int(max(pos[0]-roi_size*rad, 0))
r4 = int(min(pos[0]+roi_size*rad, int(img.shape[1])))
roi = img[r1:r2, r3:r4].copy()
rect = self.tracker.track(roi)
if rect is None:
tracked = False
tracker_result = None
else:
rect = [int(rect[0]+r3), int(rect[1]+r1), int(rect[2]+r3), int(rect[3]+r1)]
pos_ = [int((rect[0]+rect[2])/2), int((rect[1]+rect[3])/2)]
rad_ = min(int((-rect[0]+rect[2])/2), int((-rect[1]+rect[3])/2))
tracker_result = [pos_, rad_]
cv2.rectangle(img, (rect[0], rect[1]), (rect[2], rect[3]), (0, 0, 255), 3)
# 3. Detector
circles = self.detector.find_circle(img_color, blob=True) # TODO ROI
if circles is None:
no_circle += 1
tracked = False
self.tracker.reset()
if circles is not None:
no_circle = 0
temp_pos, temp_rad = [0, 0], 0
priority_ = 2 # small is good
if tracked:
for circle in circles: # for every circle
x, y, rad = circle
if rad < 10:
continue
in_rect = -int(rect[0] <= x <= rect[2] and rect[1] <= y <= rect[3])
center_hsv = center_color(img, x, y, int(rad*0.9))
hsv_distance = hsv_square_distance(center_hsv, pen.access_hsv(), only_h=True)
priority = hsv_distance-in_rect
if priority > 0.3:
continue
elif priority < priority_:
temp_pos, temp_rad, priority_ = [x, y], rad, priority
else:
for circle in circles: # for every circle
x, y, rad = circle
if rad < 10:
continue
center_hsv = center_color(img, x, y, int(rad * 0.9))
priority = hsv_square_distance(center_hsv, pen.access_hsv(), only_h=True)
if priority > 0.3:
continue
elif priority < priority_:
temp_pos, temp_rad, priority_ = [x, y], rad, priority
if priority_ != 2:
result = [temp_pos, int(temp_rad*0.7)] # calibration
cv2.circle(img, tuple(result[0]), result[1], (0, 0, 255))
if result is None:
# TODO - not needed
if tracker_result is not None:
if (not (0 < tracker_result[0][0] < 1280)) or (not(0 < tracker_result[0][1] < 720)):
outside = True
elif last_result is not None:
if (not (0 < last_result[0][0] < 1280)) or (not(0 < last_result[0][1] < 720)):
outside = True
tracked = False
else:
pos, rad = result
if last_result is None or square_distance(last_result[0], result[0], root=True) < 50:
last_result = result
tracked = True
self.tracker.reset()
if tracker_result is not None:
track_rad = max(rad, tracker_result[1], 50)
else:
track_rad = max(rad, 50)
tracker_roi = [pos, track_rad]
y1 = int(max(pos[1]-roi_size*track_rad, 0))
y2 = int(min(pos[1]+roi_size*track_rad, int(img.shape[0])))
x1 = int(max(pos[0]-roi_size*track_rad, 0))
x2 = int(min(pos[0]+roi_size*track_rad, int(img.shape[1])))
self.tracker.set(img, (x1, y1, x2-x1, y2-y1))
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255))
# self.detector.set_backprojection(image=img, pos=pos, rad=int(rad * 0.7 * 0.3)) # MIGHT ERROR - calibration
self.key.access_pos(pos)
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
temp_hsv = center_color(img_hsv, pos[0], pos[1], int(rad * 0.3))
pen.access_hsv([int(pen.access_hsv()[i_] * (1 - color_reflect) + temp_hsv[i_] * color_reflect) for i_ in range(3)])
width, height = self.gui.widget['canvas'].winfo_width(), self.gui.widget['canvas'].winfo_height()
scale, width_margin, height_margin = fit_resize(1280, 720, width, height)
x_, y_ = convert_pos(scale, width_margin, height_margin, x=pos[0], y=pos[1])
if self.key.access_event() is not None and self.key.access_event()[0] == '_':
cross_color = 'red'
else:
cross_color = 'black'
self.gui.widget['canvas'].create_line(x_ - 5, y_, x_ + 5, y_, fill=cross_color, width=1)
self.gui.widget['canvas'].create_line(x_, y_ - 5, x_, y_ + 5, fill=cross_color, width=1)
cv2.imshow('ori', img)
self.gui.widget['palette'].delete('all')
w, h = self.gui.widget['palette'].winfo_width(), self.gui.widget['palette'].winfo_height()
self.gui.widget['palette'].create_rectangle(0, 0, w, h, fill=color_type(pen.access_color(), 'hsv', 'hex'))
self.gui.widget['canvas'].update()
self.gui.widget['palette'].update()
self.gui.widget['canvas'].delete('all')
return self.exit('detect')
def stop_detect(self, reset_drawing=True):
if self.function == 'detect':
self.var['run'] = False
if reset_drawing:
self.canvas.clear()
def enter(self, command):
self.function = command
print(f'Enter {command}')
self.key.key_map(command)
def exit(self, command="all", success=False):
self.function = None
print(f'Leave {command}')
if not success:
if command == 'set_detect':
self.gui.widget['canvas'].delete('all')
self.gui.widget['palette'].delete('all')
elif command == 'detect':
self.gui.widget['canvas'].delete('all')
self.gui.widget['palette'].delete('all')
self.gui.widget['canvas'].configure(bg='black')
elif command == 'all':
self.video.close()
cv2.destroyAllWindows()
self.gui.window.destroy()
exit()
self.gui.widget['canvas'].update()
self.gui.widget['palette'].update()
return False
else:
return True
main = Touchable()
| 48.471939 | 131 | 0.495132 | 2,242 | 19,001 | 4.042373 | 0.117306 | 0.035529 | 0.060245 | 0.054507 | 0.507227 | 0.452499 | 0.399537 | 0.346464 | 0.323293 | 0.310714 | 0 | 0.034512 | 0.385453 | 19,001 | 391 | 132 | 48.595908 | 0.741629 | 0.032998 | 0 | 0.381924 | 0 | 0 | 0.054755 | 0 | 0 | 0 | 0 | 0.005115 | 0 | 1 | 0.020408 | false | 0 | 0.046647 | 0 | 0.104956 | 0.029155 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b737ca1f860daa1879d93647b7707dac737931f | 1,057 | py | Python | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Geometry/Two_Dimensional/Planform/wing_fuel_volume.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Geometry/Two_Dimensional/Planform/wing_fuel_volume.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Geometry/Two_Dimensional/Planform/wing_fuel_volume.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | ## @ingroup Methods-Geometry-Two_Dimensional-Cross_Section-Planform
# wing_fuel_volume.py
#
# Created: Apr 2014, T. Orra
# Modified: Sep 2016, E. Botero
# ----------------------------------------------------------------------
# Correlation-based methods for wing fuel capacity estimation
# ----------------------------------------------------------------------
## @ingroup Methods-Geometry-Two_Dimensional-Cross_Section-Planform
def wing_fuel_volume(wing):
"""Calculates the available fuel volume in a wing.
Assumptions:
None
Source:
Torenbeek, E., "Advanced Aircraft Design", 2013 (equation 10.30)
Inputs:
wing.
areas.reference [m^2]
aspect_ratio [-]
thickness_to_chord [-]
Outputs:
wing.volume [m^3]
Properties Used:
N/A
"""
# Unpack
sref = wing.areas.reference
ar = wing.aspect_ratio
tc = wing.thickness_to_chord
# Calculate
volume = 0.90* tc * sref** 1.5 * ar**-0.5 * 0.55
# Pack
wing.fuel_volume = volume | 25.166667 | 72 | 0.545885 | 115 | 1,057 | 4.886957 | 0.608696 | 0.05694 | 0.074733 | 0.088968 | 0.199288 | 0.199288 | 0.199288 | 0.199288 | 0 | 0 | 0 | 0.034483 | 0.231788 | 1,057 | 42 | 73 | 25.166667 | 0.657635 | 0.685904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b73dd9af423cd6336a9986151cd7a7b2c788948 | 4,559 | py | Python | bycycle/cyclepoints/zerox.py | ryanhammonds/bycycle | c285c5b1bf5de985cea3f0898bf8e2b01171feca | [
"Apache-2.0"
] | 48 | 2019-03-04T22:37:15.000Z | 2022-03-28T16:55:52.000Z | bycycle/cyclepoints/zerox.py | ryanhammonds/bycycle | c285c5b1bf5de985cea3f0898bf8e2b01171feca | [
"Apache-2.0"
] | 83 | 2019-02-01T19:09:23.000Z | 2022-01-10T20:27:29.000Z | bycycle/cyclepoints/zerox.py | ryanhammonds/bycycle | c285c5b1bf5de985cea3f0898bf8e2b01171feca | [
"Apache-2.0"
] | 15 | 2019-06-04T23:22:37.000Z | 2021-12-21T07:49:31.000Z | """Find zero-crossings for individual cycles."""
from operator import gt, lt
import numpy as np
###################################################################################################
###################################################################################################
def find_zerox(sig, peaks, troughs):
"""Find zero-crossings within each cycle, from identified peaks and troughs.
Parameters
----------
sig : 1d array
Time series.
peaks : 1d array
Samples of oscillatory peaks.
troughs : 1d array
Samples of oscillatory troughs.
Returns
-------
rises : 1d array
Samples at which oscillatory rising zero-crossings occur.
decays : 1d array
Samples at which oscillatory decaying zero-crossings occur.
Notes
-----
- Zero-crossings are defined as when the voltage crosses midway between one extrema and
the next. For example, a 'rise' is halfway from the trough to the peak.
- If this halfway voltage is crossed at multiple times, the temporal median is taken
as the zero-crossing.
- Sometimes, due to noise in estimating peaks and troughs when the oscillation
is absent, the estimated peak might be lower than an adjacent trough. If this
occurs, the rise and decay zero-crossings will be set to be halfway between
the peak and trough.
- Burst detection should be used to restrict phase estimation to periods with oscillations
present, in order to ignore periods of the signal in which estimation is poor.
Examples
--------
Find the rise and decay zero-crossings locations of a simulated signal:
>>> from neurodsp.sim import sim_bursty_oscillation
>>> from bycycle.cyclepoints import find_extrema
>>> fs = 500
>>> sig = sim_bursty_oscillation(10, fs, freq=10)
>>> peaks, troughs = find_extrema(sig, fs, f_range=(8, 12))
>>> rises, decays = find_zerox(sig, peaks, troughs)
"""
# Calculate the number of rises and decays
n_rises = len(peaks)
n_decays = len(troughs)
idx_bias = 0
# Offset values, depending on order of peaks & troughs
if peaks[0] < troughs[0]:
n_rises -= 1
else:
n_decays -= 1
idx_bias += 1
rises = _find_flank_midpoints(sig, 'rise', n_rises, troughs, peaks, idx_bias)
decays = _find_flank_midpoints(sig, 'decay', n_decays, peaks, troughs, idx_bias)
return rises, decays
def find_flank_zerox(sig, flank):
"""Find zero-crossings on rising or decaying flanks of a filtered signal.
Parameters
----------
sig : 1d array
Time series to detect zero-crossings in.
flank : {'rise', 'decay'}
Which flank, rise or decay, to use to get zero crossings.
Returns
-------
zero_xs : 1d array
Samples of the zero crossings.
Examples
--------
Find rising flanks in a filtered signal:
>>> from neurodsp.sim import sim_bursty_oscillation
>>> from neurodsp.filt import filter_signal
>>> sig = sim_bursty_oscillation(10, 500, freq=10)
>>> sig_filt = filter_signal(sig, 500, 'lowpass', 30)
>>> rises_flank = find_flank_zerox(sig_filt, 'rise')
"""
assert flank in ['rise', 'decay']
pos = sig <= 0 if flank == 'rise' else sig > 0
zero_xs = (pos[:-1] & ~pos[1:]).nonzero()[0]
# If no zero-crossing's found (peak and trough are same voltage), output dummy value
zero_xs = [int(len(sig) / 2)] if len(zero_xs) == 0 else zero_xs
return zero_xs
def _find_flank_midpoints(sig, flank, n_flanks, extrema_start, extrema_end, idx_bias):
"""Helper function for find_zerox."""
assert flank in ['rise', 'decay']
idx_bias = -idx_bias + 1 if flank == 'rise' else idx_bias
comp = gt if flank == 'rise' else lt
flanks = np.zeros(n_flanks, dtype=int)
for idx in range(n_flanks):
sig_temp = np.copy(sig[extrema_start[idx]:extrema_end[idx + idx_bias] + 1])
sig_temp -= (sig_temp[0] + sig_temp[-1]) / 2.
# If data is all zeros, just set the zero-crossing to be halfway between
if np.sum(np.abs(sig_temp)) == 0:
flanks[idx] = extrema_start[idx] + int(len(sig_temp) / 2.)
# If flank is actually an extrema, just set the zero-crossing to be halfway between
elif comp(sig_temp[0], sig_temp[-1]):
flanks[idx] = extrema_start[idx] + int(len(sig_temp) / 2.)
else:
flanks[idx] = extrema_start[idx] + int(np.median(find_flank_zerox(sig_temp, flank)))
return flanks
| 34.022388 | 99 | 0.622286 | 626 | 4,559 | 4.412141 | 0.276358 | 0.051774 | 0.025344 | 0.017379 | 0.250905 | 0.179942 | 0.093411 | 0.093411 | 0.093411 | 0.027516 | 0 | 0.01493 | 0.236017 | 4,559 | 133 | 100 | 34.278195 | 0.778065 | 0.554946 | 0 | 0.171429 | 0 | 0 | 0.024621 | 0 | 0 | 0 | 0 | 0 | 0.057143 | 1 | 0.085714 | false | 0 | 0.057143 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b7ada4d94b476f49373c95f6b93102fb37d26b1 | 1,327 | py | Python | SampleModels/BasicModel/AnalyseDrifters.py | fearghalodonncha/DeepCurrent | 8dfb19b701a225ead61d6015d95c703478035ce0 | [
"MIT"
] | 32 | 2018-03-31T22:19:25.000Z | 2022-03-14T01:35:23.000Z | SampleModels/BasicModel/AnalyseDrifters.py | fearghalodonncha/DeepCurrent | 8dfb19b701a225ead61d6015d95c703478035ce0 | [
"MIT"
] | 2 | 2020-04-02T06:13:13.000Z | 2021-06-10T07:15:07.000Z | SampleModels/BasicModel/AnalyseDrifters.py | fearghalodonncha/DeepCurrent | 8dfb19b701a225ead61d6015d95c703478035ce0 | [
"MIT"
] | 15 | 2018-06-27T02:55:23.000Z | 2021-09-09T07:51:23.000Z | import numpy as np
import matplotlib.pyplot as plt
def read_drifter(filename):
with open(filename) as f:
lines = f.readlines()
NPD = float(lines[3].split()[0]) ## NPD, number of particles specified on line 4
times_list = lines[4::2]
drifter_list = lines[5::2]
times_np = np.zeros([len(times_list)])
drift_x = np.zeros([len(times_list), int(NPD)])
drift_y = np.zeros([len(times_list), int(NPD)])
drift_z = np.zeros([len(times_list), int(NPD)])
for t in range(0, len(times_list)):
times_np[t] = float(times_list[t].split()[0])
for d in range(0, int(NPD)):
if t == 0:
step = 3
Lall = 1
else:
step = 3
Lall = 1
drift_x[t,d] = float(drifter_list[t].split()[1 - Lall + (d*step)])
drift_y[t,d] = float(drifter_list[t].split()[2 - Lall + (d*step)])
drift_z[t,d] = float(drifter_list[t].split()[3 - Lall + (d*step)])
drift_x[drift_x == 0] = np.nan
drift_y[drift_y == 0] = np.nan
return drift_x, drift_y, drift_z
def main():
drifter_filename = 'DRIFTER.OUT'
drift_x, drift_y, drift_z = read_drifter(drifter_filename)
plt.plot(drift_x , drift_y, '.')
plt.plot([0,105], [260,260])
if __name__ == "__main__":
main()
| 32.365854 | 86 | 0.568953 | 207 | 1,327 | 3.439614 | 0.275362 | 0.088483 | 0.08427 | 0.08427 | 0.297753 | 0.271067 | 0.220506 | 0.08427 | 0 | 0 | 0 | 0.031024 | 0.271289 | 1,327 | 40 | 87 | 33.175 | 0.705274 | 0.033158 | 0 | 0.117647 | 0 | 0 | 0.015625 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.147059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b7b8443e086f193aae994977d55ad1ff72e4870 | 9,013 | py | Python | src/trading_algorithm.py | Blocksize-Capital-GmbH/Quant-VM---Crypto-Arbitrage-Software | aefdab0a4a2ded2556bbf0289bdeb21a91da0b91 | [
"Apache-2.0"
] | 1 | 2022-03-20T14:34:51.000Z | 2022-03-20T14:34:51.000Z | src/trading_algorithm.py | Blocksize-Capital-GmbH/Quant-VM---Crypto-Arbitrage-Software | aefdab0a4a2ded2556bbf0289bdeb21a91da0b91 | [
"Apache-2.0"
] | null | null | null | src/trading_algorithm.py | Blocksize-Capital-GmbH/Quant-VM---Crypto-Arbitrage-Software | aefdab0a4a2ded2556bbf0289bdeb21a91da0b91 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import json
import psycopg2
from typing import Dict, List, Tuple, Union
from abc import abstractmethod
import src.helpers
import src.util
from src.base_with_database_logger import BaseWithDatabaseAndLogger
from src.client.custom_sdk_client import CustomClient
from src.helpers import DBMode
import src.sql_queries
class TradingAlgorithm(BaseWithDatabaseAndLogger):
def __init__(
self,
algo_name,
mode,
logger_wrapper: src.util.LoggerWrapper,
open_db_connection=False,
client=None
):
super().__init__(mode, logger_wrapper, open_db_connection)
self.__name: str = algo_name
query_id = src.sql_queries.query_algo_id(self.name)
raw_result = self.db_connector.execute_dql(query_id)
if len(raw_result) == 1:
self.__algo_id = raw_result[0][0]
else:
raise Exception("Too many results")
self.__current_order_id = None
self.logger_wrapper.order_id = self.__current_order_id
if self.mode in (DBMode.DEV, DBMode.TEST):
self.__simulation = True
else:
self.__simulation = False
self.__configuration = self.load_config()
if client:
self.__client = client
else:
self.__client = CustomClient(
os.getenv('API_KEY_BLOCKSIZE'),
logger=self.logger_wrapper.logger
)
try:
self.exchanges = None
exchange_configs = self.configuration["EXCHANGES"]
# TODO: remove BASE and QUOTE because they are replaced with
self.base = self.configuration["BASE"]
self.quote = self.configuration["QUOTE"]
self.precision = self.configuration["PRESCISION"]
self.lot_size = float(self.configuration["LOT_SIZE"])
self.min_lot_size = float(self.configuration["MIN_LOT_SIZE"])
self.fund_update_lock_period = self.configuration["FUND_UPDATE_LOCK_PERIOD"]
self.slippage_buffer_bps = self.configuration["SLIPPAGE_BUFFER_BPS"]
self.fund_buffer = float(self.configuration["FUND_BUFFER"])
currencies = set()
self.currency_pair_exchange_association = {}
for currency_pair in self.configuration["CURRENCY_PAIRS"]:
currencies.add(currency_pair['code_base'])
currencies.add(currency_pair['code_quote'])
self.currency_pair_exchange_association[currency_pair['symbol']] = []
for exchange_key, exchange in self.configuration["EXCHANGES"].items():
for exchange_currency_pairs in exchange['CURRENCY PAIRS']:
if exchange_currency_pairs['symbol'] == currency_pair['symbol']:
self.currency_pair_exchange_association[currency_pair['symbol']].append(exchange_key)
break
self.currencies = list(currencies)
self.set_exchange_data(exchange_configs)
self._init_fund_map()
self.update_funds()
except Exception:
self.logger_wrapper.logger.error(
"Error during configuration of the trader", exc_info=True
)
@abstractmethod
def trade_algorithm(self):
pass
@property
def client(self):
return self.__client
@property
def algo_id(self):
return self.__algo_id
@property
def current_order_id(self):
return self.__current_order_id
@property
def name(self):
return self.__name
@property
def simulation(self):
return self.__simulation
@property
def configuration(self):
return self.__configuration
@property
def client(self):
return self.__client
@name.setter
def name(self, name):
self.__name = name
@current_order_id.setter
def current_order_id(self, order_id):
self.__current_order_id = order_id
def set_exchange_data(self, exchanges_config: Dict[str, Dict[str, Union[float, Dict]]]):
self.exchanges = list(exchanges_config.keys())
for main_exchange, exchange_settings in exchanges_config.items():
self.fee_map[main_exchange] = exchange_settings["FEE"]
for ask_exchange in self.exchanges:
if ask_exchange == main_exchange:
continue
if main_exchange not in self.threshold_map.keys():
self.threshold_map[main_exchange] = {}
if ask_exchange in exchange_settings["THRESHOLDS"].keys():
self.threshold_map[main_exchange][ask_exchange] = exchange_settings["THRESHOLDS"][ask_exchange]
else:
self.threshold_map[main_exchange][ask_exchange] = exchange_settings["THRESHOLDS"]["DEFAULT"]
def update_funds(self):
balances_raw_resp = self.client.query_funds()
balances_all = balances_raw_resp.get('funds')
for item in balances_all:
exchange = item.get('name')
if exchange not in self.exchanges:
continue
balance = item.get('balances')
# if exchange should have data and it doesn't stop balance collection and return None
# reason: with incomplete balance statements we end up with wrong portfolio values
if balance is None:
self.logger_wrapper.logger.debug(
f"exchange data was missing, exchange: {exchange}"
)
# Todo implement multiple retries
self.update_funds()
return None
for balance_item in balance:
currency = balance_item.get('currency')
if currency not in self.currencies:
continue
self.funds[exchange][currency] = float(balance_item.get("amount"))
# Fund Management
#
def _init_fund_map(self):
self.funds = {}
for exchange in self.exchanges:
self.funds[exchange]: Dict[str, float] = {}
for currency in [self.base, self.quote]:
self.funds[exchange][currency] = 0.0
def load_config(self):
try:
with self.db_connector.connection as conn:
with conn.cursor() as cursor:
# query of standard configuration for trading algorithm
algo_config_query = src.sql_queries.query_algo_configuration(self.name)
cursor.execute(algo_config_query)
result_algo_configuration = cursor.fetchall()
query_currency_pairs_with_symbols = src.sql_queries.query_currency_pairs()
# query of currencies associated to algorithm
currency_pairs_query = src.sql_queries.query_algo_specific_currency_pairs(self.name)
cursor.execute(currency_pairs_query)
result_currency_pairs = cursor.fetchall()
currency_pairs = [{"code_base": item[2], "code_quote": item[4], "symbol": item[5]} for item in result_currency_pairs]
# query for exchanges
cursor.execute(src.sql_queries.query_algo_exchange_association(self.name))
result_exchanges = cursor.fetchall()
exchanges = {exchange[1]: {'EXCHANGE_NAME': exchange[1], "ID": exchange[0]} for exchange in result_exchanges}
# currency pairs available at exchanges
for key, exchange in exchanges.items():
cursor.execute(src.sql_queries.query_exchange_currency_pairs(self.name, exchange['ID']))
result_currency_pair_exchange = cursor.fetchall()
exchanges[key]['CURRENCY PAIRS'] = [{"code_base": item[1], "code_quote": item[2], "symbol": item[3]} for item in result_currency_pair_exchange]
# TODO: fees
for key, exchange in exchanges.items():
exchanges[key]['FEE'] = {"BUY": 0, "SELL": 0, "LIMIT_BUY": 0, "LIMIT_SELL": 0}
# TODO: thresholds
for key, exchange in exchanges.items():
exchanges[key]['THRESHOLDS'] = {'DEFAULT': -25}
configuration = {item[1]: item[2] for item in result_algo_configuration}
configuration['CURRENCY_PAIRS'] = currency_pairs
configuration['EXCHANGES'] = exchanges
return configuration
except(Exception, psycopg2.Error) as error:
self.logger_wrapper.logger.error(f"Unable to fetch configuration from database", exc_info=True)
with open("example_config.json") as config_file:
configuration = json.load(config_file)
return configuration
| 39.704846 | 167 | 0.606235 | 973 | 9,013 | 5.354573 | 0.208633 | 0.042418 | 0.017466 | 0.020729 | 0.200576 | 0.120154 | 0.075624 | 0.06142 | 0.024952 | 0.024952 | 0 | 0.004169 | 0.308 | 9,013 | 226 | 168 | 39.880531 | 0.831169 | 0.055032 | 0 | 0.156977 | 0 | 0 | 0.068682 | 0.002705 | 0 | 0 | 0 | 0.004425 | 0 | 1 | 0.087209 | false | 0.005814 | 0.063953 | 0.040698 | 0.215116 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b7f19efe5226324127b16d1d9afc2df6edb7254 | 1,891 | py | Python | list_2d_2.py | min-xu-ai/py_perf | ba9f07eefc8031a34fe77f19fc6be19d08344bff | [
"MIT"
] | null | null | null | list_2d_2.py | min-xu-ai/py_perf | ba9f07eefc8031a34fe77f19fc6be19d08344bff | [
"MIT"
] | null | null | null | list_2d_2.py | min-xu-ai/py_perf | ba9f07eefc8031a34fe77f19fc6be19d08344bff | [
"MIT"
] | null | null | null | #!/usr/bin/env pypy3
'''
Testing 2D list (list of lists) data structure.
'''
import time
import random
from lib import benchmark, random_tuple
g_list = []
g_size = 0
g_count = 0
g_get_keys = []
g_set_keys = []
def setup(size, density):
''' Populated the table.
:param int size: total entries
:param float density: (0,1] value for how many entries to add.
'''
assert size > 0, size
assert density > 0 and density <= 1, density
global g_list
global g_size
global g_count
g_list = [[None]*size for _ in range(size)]
count = size * size * 1.0 * density // 1
g_size = size
g_count = count
i = 0
while i < count:
idx = random.randint(0, size*size-1)
x, y = (idx // size, idx % size)
if g_list[x][y] is None:
g_list[x][y] = random_tuple()
i += 1
global g_get_keys
for i in range(1000000):
idx = random.randint(0, size*size-1)
g_get_keys.append((idx // size, idx % size))
global g_set_keys
g_set_keys = g_get_keys
def get():
''' Testing getting '''
global g_get_keys
global g_size
s = time.time()
for _x, _y in g_get_keys:
if g_list[_x][_y] is not None:
x = g_list[_x][_y]
return time.time() - s
def set():
''' Testing setting '''
global g_set_keys
global g_size
tmp = [1,2,3,4,5]
s = time.time()
for _x, _y in g_set_keys:
if g_list[_x][_y] is not None:
last = g_list[_x][_y]
g_list[_x][_y] = tmp
tmp = last
return time.time() - s
def scan():
global g_list
s = time.time()
for x in g_list:
for i in x:
if i is not None:
_ = i[0]
return time.time() - s
def main():
setup(700, 0.7)
benchmark(get)
benchmark(set)
benchmark(scan)
if __name__ == "__main__":
main()
| 21.735632 | 66 | 0.561079 | 301 | 1,891 | 3.299003 | 0.249169 | 0.060423 | 0.042296 | 0.049345 | 0.209466 | 0.141994 | 0.130916 | 0.07855 | 0.04431 | 0 | 0 | 0.027929 | 0.31835 | 1,891 | 86 | 67 | 21.988372 | 0.742436 | 0.114754 | 0 | 0.292308 | 0 | 0 | 0.004893 | 0 | 0 | 0 | 0 | 0 | 0.030769 | 1 | 0.076923 | false | 0 | 0.046154 | 0 | 0.169231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b7f2b6e0d9ea9418bfa786631467a10dace678f | 10,622 | py | Python | src/stepfunctions/inputs/placeholders.py | ParidelPooya/aws-step-functions-data-science-sdk-python | 173b4635d8fb3ce569515bcfb6fee1d5a2c29b63 | [
"Apache-2.0"
] | 211 | 2019-11-07T17:56:56.000Z | 2022-03-23T03:04:43.000Z | src/stepfunctions/inputs/placeholders.py | ParidelPooya/aws-step-functions-data-science-sdk-python | 173b4635d8fb3ce569515bcfb6fee1d5a2c29b63 | [
"Apache-2.0"
] | 179 | 2019-11-08T00:47:08.000Z | 2022-03-10T03:03:37.000Z | src/stepfunctions/inputs/placeholders.py | ParidelPooya/aws-step-functions-data-science-sdk-python | 173b4635d8fb3ce569515bcfb6fee1d5a2c29b63 | [
"Apache-2.0"
] | 86 | 2019-11-20T12:59:03.000Z | 2022-03-23T03:04:47.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import
import collections
import json
from stepfunctions.inputs.utils import flatten, replace_type_with_str
ValidationResult = collections.namedtuple('ValidationResult', 'valid keys_missing keys_type_mismatch')
class Placeholder(object):
"""
A collection of Placeholder variables.
"""
def __init__(self, schema=None, **kwargs):
"""
Args:
schema (dict, optional): Schema for the placeholder collection. (default: None)
Example below::
{
'ModelName': str,
'JobName': str,
'Hyperparameters': {
'tol': float
}
}
Keyword Args:
name (str, optional): Name of the placeholder variable. (default: None)
type (type, optional): Type of the placeholder variable. (default: None)
parent (Placeholder, optional): Parent variable for a placeholder variable. (default: None)
"""
self.store = {}
self.immutable = False
self.schema = schema
if self.schema:
self._set_schema(schema)
self._make_immutable()
self.json_str_template = "{}"
self.name = kwargs.get("name")
self.type = kwargs.get("type")
self.parent = kwargs.get("parent")
def get(self, name, type):
"""
Create a placeholder variable with an associated type.
Args:
name (str): Name of the placeholder variable.
type (type): Type of the placeholder variable.
Raises:
ValueError: If placeholder variable with the same name but different type already exists.
ValueError: If placeholder variable does not fit into a previously specified schema for the placeholder collection.
Returns:
Placeholder: Placeholder variable.
"""
if not self._is_valid_name(name):
raise ValueError('Key name can only be string or integer')
if name in self.store:
curr_variable = self.store[name]
if curr_variable.type != type:
raise ValueError('Key already exists with a different value type: {current_value_type}'.format(current_value_type=curr_variable.type))
return curr_variable
else:
self.store[name] = self._create_variable(name=name, parent=self, type=type)
return self.store[name]
def get_schema_as_dict(self):
"""
Generate a schema for the placeholder collection as a Python dictionary.
Returns:
dict: Placeholder collection schema.
"""
schema = {}
for k, v in self.store.items():
if v._is_empty():
schema[k] = v.type or str
else:
schema[k] = v.get_schema_as_dict()
return schema
def get_schema_as_json(self, pretty=False):
"""
Generate a schema for the placeholder collection as a JSON formatted string.
Args:
pretty (bool, optional): Boolean flag set to `True` if JSON string should be prettified. `False`, otherwise. (default: False)
Returns:
str: JSON formatted string representation of the block.
"""
dict_schema_str = replace_type_with_str(self.get_schema_as_dict())
if pretty:
return json.dumps(dict_schema_str, indent=4)
return json.dumps(dict_schema_str)
def contains(self, placeholder):
"""
Check if the placeholder collection contains the specified placeholder variable.
Args:
placeholder (Placeholder): Placeholder variable to search for, in the collection.
Returns:
bool: `True` if placeholder variable was found in the collection. `False`, otherwise.
"""
for k, v in self.store.items():
if placeholder == v:
return True
elif v.contains(placeholder):
return True
return False
def __contains__(self, placeholder):
"""
Containment check operator for placeholder variables.
"""
return self.contains(placeholder)
def validate(self, input):
"""
Validate a specified input against the placeholder collection schema.
Args:
input (dict): Input to validate against the placeholder collection schema.
Returns:
ValidationResult: Named tuple with the keys:
`valid` (Boolean): Representing the result of validation ,
`keys_missing` (list(str)): List of keys missing in the input ,
`keys_type_mismatch` (list(str), type, type): List of tuples with key name, expected type, and provided type.
"""
if input is None:
return False, None, None
flattened_schema = flatten(self.get_schema_as_dict())
flattened_input = flatten(input)
keys_missing = [i for i in flattened_schema if i not in flattened_input]
keys_type_mismatch = []
for k, v in flattened_input.items():
if k in flattened_schema and not isinstance(v, flattened_schema.get(k)):
keys_type_mismatch.append((k, flattened_schema.get(k), type(v)))
if len(keys_missing) > 0 or len(keys_type_mismatch) > 0:
valid = False
else:
valid = True
return ValidationResult(valid=valid, keys_missing=keys_missing, keys_type_mismatch=keys_type_mismatch)
def _create_variable(self, name, parent, type=None):
raise NotImplementedError
def _get_path(self):
"""
Get path to a placeholder variable node in the collection.
"""
path = []
node = self
while node.name is not None:
path.append(node.name)
node = node.parent
path.reverse()
return path
def _is_empty(self):
"""
Check if the store for a placeholder collection/variable is empty.
"""
return len(self.store) == 0
def _set_schema(self, schema, path=[]):
"""
Set the schema for a placeholder collection.
"""
for k, v in schema.items():
if isinstance(v, dict):
self._set_schema(v, path + [k])
else:
current = self
for node in path:
current = current.get(node, dict)
temp = current.get(k, v)
def _make_immutable(self):
"""
Make a placeholder collection (including all variables contained) immutable.
"""
for k, v in self.store.items():
if isinstance(v, Placeholder):
v._make_immutable()
self.immutable = True
def _is_valid_name(self, name):
if isinstance(name, str) or isinstance(name, int):
return True
else:
return False
def __getitem__(self, name):
"""
Subscript operator to build placeholder variables.
"""
if not self._is_valid_name(name):
raise ValueError('Key name can only be string or integer')
if name in self.store:
return self.store[name]
else:
self.store[name] = self._create_variable(name=name, parent=self)
return self.store[name]
def _join_path(self, path):
subscript_list = []
for i in path:
if isinstance(i, str):
subscript_list.append("['{}']".format(i))
elif isinstance(i, int):
subscript_list.append('[{}]'.format(i))
return "".join(subscript_list)
def to_jsonpath(self):
"""
Returns a JSON path representation of the placeholder variable to be used for step parameters.
Returns:
str: JSON path representation of the placeholder variable
"""
return self.json_str_template.format(self._join_path(self._get_path()))
class ExecutionInput(Placeholder):
"""
Top-level class for execution input placeholders.
"""
def __init__(self, schema=None, **kwargs):
super(ExecutionInput, self).__init__(schema, **kwargs)
self.json_str_template = '$$.Execution.Input{}'
def _create_variable(self, name, parent, type=None):
"""
Creates a placeholder variable for Workflow Input.
A placeholder variable can only be created if the collection is not immutable due to a pre-specified schema.
"""
if self.immutable:
raise ValueError("Placeholder variable does not conform to schema set for the placeholder collection.")
if type:
return ExecutionInput(name=name, parent=parent, type=type)
else:
return ExecutionInput(name=name, parent=parent)
class StepInput(Placeholder):
"""
Top-level class for step input placeholders.
"""
def __init__(self, schema=None, **kwargs):
super(StepInput, self).__init__(schema, **kwargs)
self.json_str_template = '${}'
def _create_variable(self, name, parent, type=None):
"""
Creates a placeholder variable for Step Input.
A placeholder variable can only be created if the collection is not immutable due to a pre-specified schema.
"""
if self.immutable:
raise ValueError("Placeholder variable does not conform to schema set for the placeholder collection.")
if type:
return StepInput(name=name, parent=parent, type=type)
else:
return StepInput(name=name, parent=parent)
| 36.129252 | 150 | 0.588119 | 1,194 | 10,622 | 5.10469 | 0.183417 | 0.065463 | 0.035439 | 0.026579 | 0.337818 | 0.264643 | 0.225431 | 0.210336 | 0.16735 | 0.136505 | 0 | 0.001685 | 0.329505 | 10,622 | 293 | 151 | 36.25256 | 0.854114 | 0.365468 | 0 | 0.272059 | 0 | 0 | 0.069477 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.029412 | 0 | 0.367647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b8031ca25667feb25f8274399a41253e2becc80 | 1,177 | py | Python | src/mrack/transformers/static.py | dav-pascual/mrack | f31b4ef1f1f847c3e95567ec012323be65a1e177 | [
"Apache-2.0"
] | 2 | 2021-05-26T15:57:13.000Z | 2021-08-21T02:14:01.000Z | src/mrack/transformers/static.py | dav-pascual/mrack | f31b4ef1f1f847c3e95567ec012323be65a1e177 | [
"Apache-2.0"
] | 81 | 2020-10-02T08:30:56.000Z | 2022-03-31T11:47:41.000Z | src/mrack/transformers/static.py | dav-pascual/mrack | f31b4ef1f1f847c3e95567ec012323be65a1e177 | [
"Apache-2.0"
] | 7 | 2020-10-02T08:13:57.000Z | 2022-03-31T11:22:53.000Z | # Copyright 2020 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Static transformer module."""
import typing
from copy import deepcopy
from mrack.transformers.transformer import Transformer
CONFIG_KEY = "static"
class StaticTransformer(Transformer):
"""
Static transformer.
Does almost no operation as there is nothing to provision.
"""
_config_key = CONFIG_KEY
_required_config_attrs: typing.List[str] = []
_required_host_attrs = ["name", "os", "group", "ip"]
def create_host_requirement(self, host):
"""Create single input for Static provisioner."""
self.dsp_name = "Static"
return deepcopy(host)
| 30.179487 | 74 | 0.725573 | 160 | 1,177 | 5.25625 | 0.63125 | 0.071344 | 0.030916 | 0.03805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008386 | 0.189465 | 1,177 | 38 | 75 | 30.973684 | 0.873166 | 0.595582 | 0 | 0 | 0 | 0 | 0.057604 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.818182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b816baf5eaa46bd1b527f1e92fb14dd928f8b46 | 1,185 | py | Python | data/states/splash.py | andarms/pyweek20 | 79a5ac58c3ca06be61e5a05af0abd78a8c79e8df | [
"MIT"
] | null | null | null | data/states/splash.py | andarms/pyweek20 | 79a5ac58c3ca06be61e5a05af0abd78a8c79e8df | [
"MIT"
] | null | null | null | data/states/splash.py | andarms/pyweek20 | 79a5ac58c3ca06be61e5a05af0abd78a8c79e8df | [
"MIT"
] | null | null | null | import pygame as pg
import state
from .. import util
class SplashState(state._State):
def __init__(self):
super(SplashState, self).__init__()
self.bg_color = (0,0,0)
self.text_color = (155,255,155)
self.duration = 3 #seg
self.image = pg.Surface(util.SCREEN_SIZE)
self.next = "MainMenu"
self.title = "HackerMan"
self.titleSurface = self.make_title_surface()
def start(self, data, current_time):
super(SplashState, self).start(data, current_time)
self.duration = 3
def make_title_surface(self):
font = pg.font.Font(util.FONTS['west-england.regular'], 40)
return font.render(self.title, False, self.text_color)
def handle_events(self, event):
if event.type == pg.KEYDOWN:
if event.key == pg.K_RETURN:
self.done = True
def update(self, dt, current_time, keys):
self.duration -= dt
if self.duration <= 0:
self.done = True
def render(self, surface):
self.image.fill(self.bg_color)
self.image.blit(self.titleSurface, util.SCREEN_RECT.center)
surface.blit(self.image, (0,0))
| 28.214286 | 67 | 0.616034 | 156 | 1,185 | 4.525641 | 0.397436 | 0.067989 | 0.056657 | 0.042493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021789 | 0.264135 | 1,185 | 41 | 68 | 28.902439 | 0.787844 | 0.002532 | 0 | 0.129032 | 0 | 0 | 0.031409 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.193548 | false | 0 | 0.096774 | 0 | 0.354839 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b86bd629224d587375d982d9e21ec4c5e570896 | 4,230 | py | Python | root/os/DSAA/DataStructuresAndAlgorithms/python/chutils/chutils/utils/time_get_lock_info.py | chyidl/chyidlTutorial | a033e0a57abf84fdbb61e57736822f9126db6ff7 | [
"MIT"
] | 5 | 2018-10-17T05:57:39.000Z | 2021-07-05T15:38:24.000Z | root/os/DSAA/DataStructuresAndAlgorithms/python/chutils/chutils/utils/time_get_lock_info.py | chyidl/chyidlTutorial | a033e0a57abf84fdbb61e57736822f9126db6ff7 | [
"MIT"
] | 2 | 2021-04-14T00:48:43.000Z | 2021-04-14T02:20:50.000Z | root/os/DSAA/DataStructuresAndAlgorithms/python/chutils/chutils/utils/time_get_lock_info.py | chyidl/chyidlTutorial | a033e0a57abf84fdbb61e57736822f9126db6ff7 | [
"MIT"
] | 3 | 2019-03-02T14:36:19.000Z | 2022-03-18T10:12:09.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# time_get_lock_info.py
# utils
#
# 🎂"Here's to the crazy ones. The misfits. The rebels.
# The troublemakers. The round pegs in the square holes.
# The ones who see things differently. They're not found
# of rules. And they have no respect for the status quo.
# You can quote them, disagree with them, glority or vilify
# them. About the only thing you can't do is ignore them.
# Because they change things. They push the human race forward.
# And while some may see them as the creazy ones, we see genius.
# Because the poeple who are crazy enough to think thay can change
# the world, are the ones who do."
#
# Created by Chyi Yaqing on 03/16/19 12:01.
# Copyright © 2019. Chyi Yaqing.
# All rights reserved.
#
# Distributed under terms of the MIT
"""
时钟的实现与C库函数绑定在一起,所以一些细节使基于特定平台的
"""
import os
import textwrap # Text wrapping and filling
import time # Time access and conversions
import hashlib
available_clocks = [
('clock', time.clock),
('monotonic', time.monotonic),
('perf_counter', time.perf_counter),
('process_time', time.process_time),
('thread_time', time.thread_time),
('time', time.time), # epoch [Unix time 1970.1.1 00:00] 开始之后的秒数以浮点数格式返回
]
for (clock_name, func) in available_clocks:
print(textwrap.dedent('''\
{name}:
adjustable : {info.adjustable}
implementation : {info.implementation}
monotonic : {info.monotonic}
resolution : {info.resolution}
current : {current}
''').format(
name=clock_name,
info=time.get_clock_info(clock_name),
current=func()))
# time.time() 从[epoch] 开始以后以浮点数格式返回秒
print("The time is: ", time.time())
# time.ctime() Convert a time expressed in seconds since the epoch to a string
# representing local time
print('The time is :', time.ctime())
later = time.time()+15
print('15 secs from now :', time.ctime(later))
# time.time() 函数返回的是系统时钟可以被用户或者系统服务更改,所以重复调用time()函数产生的
# 时间值可能会前后波动。monotonic()函数总是返回前向的时间值
# The monotonic is not affected by system clock updates.
start = time.monotonic()
time.sleep(0.1)
end = time.monotonic()
print('start : {:>9.2f}'.format(start))
print('end : {:>9.2f}'.format(end))
print('span : {:>9.2f}'.format(end - start))
# time.perf_counter() : fractional seconds of a performance counter
# 用于计算 sha1校验和的数据
data = open(__file__, 'rb').read()
loop_start = time.perf_counter()
for i in range(5):
iter_start = time.perf_counter()
h = hashlib.sha1()
for i in range(300000):
h.update(data)
cksum = h.digest()
now = time.perf_counter()
loop_elapsed = now - loop_start
iter_elapsed = now - iter_start
print(time.ctime(), ': {:0.3f} {:0.3f}'.format(iter_elapsed, loop_elapsed))
# struct_time : The type of the time value sequence returned by
def show_struct(s):
print(' tm_year :', s.tm_year)
print(' tm_mon :', s.tm_mon)
print(' tm_mday :', s.tm_mday)
print(' tm_hour :', s.tm_hour)
print(' tm_min :', s.tm_min)
print(' tm_sec :', s.tm_sec)
print(' tm_wday :', s.tm_wday)
print(' tm_yday :', s.tm_yday)
print(' tm_isdst:', s.tm_isdst)
print('gmtime: UTC')
show_struct(time.gmtime())
print('\nlocaltime:')
show_struct(time.localtime())
print('\nmktime:', time.mktime(time.localtime()))
# 当前时间依赖于时区设置, 时区可以由程序设置,也可以使用系统默认时区设置
# 改变时区并不会改变实际的时间,只是改变它的表现方式
def show_zone_info():
print(' TZ :', os.environ.get('TZ', '(not set)'))
print(' tzname :', time.tzname)
print(' Zone : {} ({})'.format(time.timezone, (time.timezone / 3600)))
print(' DST :', time.daylight)
print(' Time :', time.ctime())
print()
print('Default :')
show_zone_info()
ZONES = [
'GMT',
'Asia/Hong_Kong',
]
for zone in ZONES:
# 改变时区,首先设定环境变量TZ,然后调用tzset()
os.environ['TZ'] = zone
time.tzset()
print(zone, ':')
show_zone_info()
# 解析和格式化时间
# strptime() strftime()
now = time.ctime(1552717743.187825)
print('Now:', now)
parsed = time.strptime(now)
print('\nParsed:')
show_struct(parsed)
print('\nFormatted:', time.strftime("%a %b %d %H:%M:%S %Y", parsed))
| 27.647059 | 79 | 0.644681 | 582 | 4,230 | 4.582474 | 0.42268 | 0.035996 | 0.028121 | 0.022497 | 0.028496 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021226 | 0.20922 | 4,230 | 152 | 80 | 27.828947 | 0.775486 | 0.33948 | 0 | 0.023529 | 0 | 0 | 0.270742 | 0.007642 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023529 | false | 0 | 0.047059 | 0 | 0.070588 | 0.364706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b895d1b25f903e8bc77ab1b05b04c1d12622eea | 5,995 | py | Python | poisson_problem/poisson.py | timudk/solving_pdes_with_neural_nets | 4aeca4ee1aaa6054307e1051879bed3160ffc247 | [
"MIT"
] | 69 | 2019-04-16T06:42:22.000Z | 2021-04-06T02:39:21.000Z | poisson_problem/poisson.py | timudk/solving_pdes_with_neural_nets | 4aeca4ee1aaa6054307e1051879bed3160ffc247 | [
"MIT"
] | null | null | null | poisson_problem/poisson.py | timudk/solving_pdes_with_neural_nets | 4aeca4ee1aaa6054307e1051879bed3160ffc247 | [
"MIT"
] | 19 | 2019-04-16T14:31:47.000Z | 2021-06-05T21:46:53.000Z | import tensorflow as tf
tf.set_random_seed(42)
import numpy as np
from scipy import integrate
import neural_networks
import poisson_problem
import matplotlib.pyplot as plt
import sys, getopt
class sampling_from_dataset:
def __init__(self, filepath, total_samples):
self.filepath = filepath
self.total_samples = total_samples
self.last_grab_int = 0
self.last_grab_bou = 0
def load_dataset(self):
self.dataset = np.genfromtxt(self.filepath, delimiter=',')
def increase_grab_number(self, num, batchsize):
num += batchsize
if(num==self.total_samples):
return 0
else:
return num
def interior_samples(self, batchsize):
sampling_int_draw_x = self.dataset[self.last_grab_int:(self.last_grab_int+batchsize), 0]
sampling_int_draw_y = self.dataset[self.last_grab_int:(self.last_grab_int+batchsize), 1]
self.last_grab_int = self.increase_grab_number(self.last_grab_int, batchsize)
return sampling_int_draw_x, sampling_int_draw_y
def boundary_samples(self, batchsize):
sampling_bou_draw_x = self.dataset[self.last_grab_bou:(self.last_grab_bou+batchsize), 2]
sampling_bou_draw_y = self.dataset[self.last_grab_bou:(self.last_grab_bou+batchsize), 3]
self.last_grab_bou = self.increase_grab_number(self.last_grab_bou, batchsize)
return sampling_bou_draw_x, sampling_bou_draw_y
def main(argv):
# DEFAULT
SENSOR_DATA = False
N_LAYERS = 1
BATCHSIZE = 1000
MAX_ITER = 50000
DO_SAVE = False
SEED = 42
try:
opts, args = getopt.getopt(argv,"hb:n:m:d:r:s:",["batchsize=","n_layers=", "max_iterations=", "sensor_data=", "random_seed=", "save_network="])
except getopt.GetoptError:
print('poisson.py -b <batchsize> -n <n_layers> -m <max_iterations> -d <sensor_data> -r <random_seed> -s <save_network>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('poisson.py -b <batchsize> -n <n_layers> -m <max_iterations> -d <sensor_data> -r <random_seed> -s <save_network>')
sys.exit()
elif opt in ("-b", "--batchsize"):
BATCHSIZE = int(arg)
elif opt in ("-n", "--n_layers"):
N_LAYERS = int(arg)
elif opt in ("-m", "--max_iterations"):
MAX_ITER = int(arg)
elif opt in ("-d", "--sensor_data"):
if(int(arg)==1):
SENSOR_DATA = True
elif opt in ("-r", "--random_seed"):
SEED = int(arg)
tf.set_random_seed(SEED)
elif opt in ("-s", "--save_network"):
DO_SAVE = bool(int(arg))
if DO_SAVE:
print("Saving network after training.")
HIDDEN_UNITS = []
for i in range(N_LAYERS):
HIDDEN_UNITS.append(16)
if(SENSOR_DATA):
save_name = 'test_model/' + str(len(HIDDEN_UNITS)) + '_layer_sq_loss_' + str(BATCHSIZE) + '_m_iter_' + str(MAX_ITER) + '_rs_' + str(SEED) + '_wsd'
else:
save_name = 'test_model/' + str(len(HIDDEN_UNITS)) + '_layer_sq_loss_' + str(BATCHSIZE) + '_m_iter_' + str(MAX_ITER) + '_rs_' + str(SEED)
problem = poisson_problem.poisson_2d()
sampler = sampling_from_dataset('datasets/' + str(BATCHSIZE), BATCHSIZE)
sampler.load_dataset()
NUM_INPUTS = 2
neural_network = neural_networks.neural_network(NUM_INPUTS, 1, HIDDEN_UNITS)
int_var = tf.placeholder(tf.float64, [None, NUM_INPUTS])
bou_var = tf.placeholder(tf.float64, [None, NUM_INPUTS])
sensor_var = tf.placeholder(tf.float64, [None, NUM_INPUTS])
value_int = neural_network.value(int_var)
value_bou = neural_network.value(bou_var)
value_sensor = neural_network.value(sensor_var)
grad = neural_network.first_derivatives(int_var)
grad_grad= neural_network.second_derivatives(int_var)
grad_grad_sensor = neural_network.second_derivatives(sensor_var)
sol_int = tf.placeholder(tf.float64, [None, 1])
sol_bou = tf.placeholder(tf.float64, [None, 1])
sum_of_second_derivatives = 0.0
sum_of_second_derivatives_sensor = 0.0
for i in range(NUM_INPUTS):
sum_of_second_derivatives += grad_grad[i]
sum_of_second_derivatives_sensor += grad_grad_sensor[i]
loss_int = tf.square(sum_of_second_derivatives+sol_int)
loss_bou = tf.square(value_bou-sol_bou)
loss_sensor_int = tf.square(sum_of_second_derivatives_sensor)
loss_sensor_bou = tf.square(value_sensor)
loss = tf.sqrt(tf.reduce_mean(loss_int + loss_bou))
sensor_loss = tf.sqrt(tf.reduce_mean(loss_int) + tf.reduce_mean(loss_bou) + tf.reduce_mean(loss_sensor_int) + tf.reduce_mean(loss_sensor_bou))
train_scipy = tf.contrib.opt.ScipyOptimizerInterface(loss, method='BFGS', options={'gtol':1e-14, 'disp':True, 'maxiter':MAX_ITER})
train_scipy_sensor = tf.contrib.opt.ScipyOptimizerInterface(sensor_loss, method='BFGS', options={'gtol':1e-14, 'disp':True, 'maxiter':MAX_ITER})
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
int_draw_x, int_draw_y = sampler.interior_samples(BATCHSIZE)
int_draw_x = np.reshape(int_draw_x, (BATCHSIZE, 1))
int_draw_y = np.reshape(int_draw_y, (BATCHSIZE, 1))
boundary_draw_x, boundary_draw_y = sampler.boundary_samples(BATCHSIZE)
boundary_draw_x = np.reshape(boundary_draw_x, (BATCHSIZE, 1))
boundary_draw_y = np.reshape(boundary_draw_y, (BATCHSIZE, 1))
int_draw = np.concatenate([int_draw_x, int_draw_y], axis=1)
bou_draw = np.concatenate([boundary_draw_x, boundary_draw_y], axis=1)
f = problem.rhs(int_draw)
f = np.reshape(np.array(f), (BATCHSIZE, 1))
bou = problem.velocity(bou_draw)
bou = np.reshape(np.array(bou), (BATCHSIZE, 1))
if(SENSOR_DATA):
sensor_points_x = np.reshape(np.array([0.0, 1.0, 0.0, 1.0]), (4,1))
sensor_points_y = np.reshape(np.array([0.0, 0.0, 1.0, 1.0]), (4,1))
sensor_points = np.concatenate([sensor_points_x, sensor_points_y], axis=1)
print(sensor_points)
train_scipy_sensor.minimize(sess, feed_dict={sol_int:f, sol_bou:bou, int_var:int_draw, bou_var:bou_draw, sensor_var: sensor_points})
else:
train_scipy.minimize(sess, feed_dict={sol_int:f, sol_bou:bou, int_var:int_draw, bou_var:bou_draw})
if DO_SAVE:
save_path = saver.save(sess, save_name)
print("Model saved in path: %s" % save_path)
if __name__ == '__main__':
main(sys.argv[1:])
| 33.121547 | 148 | 0.732944 | 951 | 5,995 | 4.290221 | 0.17245 | 0.027451 | 0.041176 | 0.025735 | 0.388971 | 0.320588 | 0.277941 | 0.231863 | 0.203922 | 0.186765 | 0 | 0.015349 | 0.130609 | 5,995 | 180 | 149 | 33.305556 | 0.76746 | 0.001168 | 0 | 0.069767 | 0 | 0.015504 | 0.097895 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.054264 | 0 | 0.139535 | 0.03876 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b99148519a93c8543e9564b329c4137fc41b8bf | 1,509 | py | Python | PythonBot.py | quasiyoke/PythonBot | d665a1580b683b8dbf4c68f50e112eb9ec30f8d0 | [
"Apache-2.0"
] | 9 | 2021-07-07T16:57:17.000Z | 2021-11-14T17:45:10.000Z | PythonBot.py | quasiyoke/PythonBot | d665a1580b683b8dbf4c68f50e112eb9ec30f8d0 | [
"Apache-2.0"
] | null | null | null | PythonBot.py | quasiyoke/PythonBot | d665a1580b683b8dbf4c68f50e112eb9ec30f8d0 | [
"Apache-2.0"
] | 2 | 2021-11-20T10:26:18.000Z | 2021-11-26T09:18:13.000Z | from substrateinterface import SubstrateInterface, Keypair
from substrateinterface.exceptions import SubstrateRequestException
from scalecodec.type_registry import load_type_registry_file
import time
substrate = SubstrateInterface(
url='wss://ws.mof.sora.org',
ss58_format=69,
type_registry_preset='default',
type_registry=load_type_registry_file('custom_types.json'),
)
keypair = Keypair.create_from_mnemonic('<your 12 word passphrase here>')
call = substrate.compose_call(
call_module='LiquidityProxy',
call_function='swap',
call_params={
'dex_id': '0',
'input_asset_id': '0x0200050000000000000000000000000000000000000000000000000000000000',
'output_asset_id': '0x0200000000000000000000000000000000000000000000000000000000000000',
'swap_amount': {'WithDesiredInput': {'desired_amount_in': '13370000000000000000000', 'min_amount_out': '0'}},
'selected_source_types': ["XYKPool","MulticollateralBondingCurvePool"],
'filter_mode': 'AllowSelected'
}
)
while True:
try:
extrinsic = substrate.create_signed_extrinsic(call=call, keypair=keypair)
receipt = substrate.submit_extrinsic(extrinsic, wait_for_inclusion=False)
print("Extrinsic '{}' sent".format(receipt.extrinsic_hash))
# print("Extrinsic '{}' sent and included in block '{}'".format(receipt.extrinsic_hash, receipt.block_hash))
except Exception as e:
print("Failed to send: {}".format(e))
time.sleep(100)
| 33.533333 | 117 | 0.732936 | 152 | 1,509 | 7.019737 | 0.585526 | 0.056232 | 0.029991 | 0.037488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.128728 | 0.155732 | 1,509 | 44 | 118 | 34.295455 | 0.708791 | 0.070245 | 0 | 0 | 0 | 0 | 0.330951 | 0.162974 | 0 | 0 | 0.094353 | 0 | 0 | 1 | 0 | false | 0.032258 | 0.129032 | 0 | 0.129032 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b9b35f7c92754e4b2f2e40b05e20b3c368edfaa | 2,822 | py | Python | mutalyzer_mutator/mutator.py | mutalyzer/mutator | 43a9fc929e054552ef6a2ed2d0cdf71e49ebf005 | [
"MIT"
] | null | null | null | mutalyzer_mutator/mutator.py | mutalyzer/mutator | 43a9fc929e054552ef6a2ed2d0cdf71e49ebf005 | [
"MIT"
] | null | null | null | mutalyzer_mutator/mutator.py | mutalyzer/mutator | 43a9fc929e054552ef6a2ed2d0cdf71e49ebf005 | [
"MIT"
] | null | null | null | """
Module to mutate sequences based on a variants list.
Assumptions for which no check is performed:
- Only ``deletion insertion`` operations.
- Only exact locations, i.e., no uncertainties such as `10+?`.
- Locations are zero-based right-open with ``start > end``.
- There is no overlapping between variants locations.
Notes:
- If any of the above is not met, the result will be bogus.
- There can be empty inserted lists.
"""
from .util import reverse_complement
class UnknownInsertedSource(Exception):
pass
def _get_inverted(sequence):
"""
Reverse complement inversion using code extracted from BioPython.
"""
return reverse_complement(sequence)
def _get_start_end(location):
"""
Get the start and the end of a location object. For point locations both
start and end equal the position value.
"""
if location["type"] == "range":
return location["start"]["position"], location["end"]["position"]
elif location["type"] == "point":
return location["position"], location["position"]
def _get_inserted_sequence(inserted, sequences):
"""
Retrieves the actual sequence mentioned in the insertion.
"""
if inserted["source"] == "description":
sequence = inserted["sequence"]
elif inserted["source"] == "reference":
sequence = sequences[inserted["source"]][
slice(*_get_start_end(inserted["location"]))
]
elif isinstance(inserted["source"], dict) and inserted["source"].get("id"):
sequence = sequences[inserted["source"]["id"]][
slice(*_get_start_end(inserted["location"]))
]
else:
raise UnknownInsertedSource("Inserted source not supported.")
if (
inserted.get("repeat_number")
and inserted["repeat_number"].get("value") is not None
):
sequence = sequence * inserted.get("repeat_number")["value"]
if inserted.get("inverted"):
sequence = _get_inverted(sequence)
return sequence
def mutate(sequences, variants):
"""
Mutate the reference sequence under ``sequences["reference"]`` according
to the provided variants operations.
:arg dict sequences: Sequences dictionary.
:arg list variants: Operations list.
:returns: Mutated sequence.
:rtype: str
"""
reference = sequences["reference"]
variants = sorted(variants, key=lambda v: (_get_start_end(v["location"])))
parts = []
current_index = 0
for variant in variants:
start, end = _get_start_end(variant["location"])
parts.append(reference[current_index:start])
for insertion in variant["inserted"]:
parts.append(_get_inserted_sequence(insertion, sequences))
current_index = end
parts.append(reference[current_index:])
return "".join(parts)
| 29.395833 | 79 | 0.665131 | 321 | 2,822 | 5.741433 | 0.376947 | 0.030385 | 0.029843 | 0.033641 | 0.069452 | 0.034726 | 0 | 0 | 0 | 0 | 0 | 0.001355 | 0.21545 | 2,822 | 95 | 80 | 29.705263 | 0.831075 | 0.318214 | 0 | 0.045455 | 0 | 0 | 0.143013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.022727 | 0.022727 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b9b566f35bb3be3bbe04e1b0c6ea0b1acb1d8bc | 1,791 | py | Python | day11/day11_2.py | DanTGL/AdventOfCode2020 | bf7cd6a4fb7701155785b941facdc1e4859ba297 | [
"MIT"
] | null | null | null | day11/day11_2.py | DanTGL/AdventOfCode2020 | bf7cd6a4fb7701155785b941facdc1e4859ba297 | [
"MIT"
] | null | null | null | day11/day11_2.py | DanTGL/AdventOfCode2020 | bf7cd6a4fb7701155785b941facdc1e4859ba297 | [
"MIT"
] | null | null | null | import copy
from collections import defaultdict
inputs = [list(line) for line in open("day11/input").read().splitlines()]
nodes = defaultdict(lambda: [])
for y in range(len(inputs)):
for x in range(len(inputs[y])):
if inputs[y][x] != ".":
for i in range(-1, 2):
for j in range(-1, 2):
if 0 == j and 0 == i:
continue
index_x = x + j
index_y = y + i
while 0 <= index_y < len(inputs) and 0 <= index_x < len(inputs[y]):
if inputs[index_y][index_x] != ".":
nodes[x + len(inputs[y]) * y].append((index_y, index_x))
break
index_x += j
index_y += i
def round(seats):
result = copy.deepcopy(seats)
for y in range(len(seats)):
for x in range(len(seats[y])):
if seats[y][x] != ".":
occupied_adjacent = 0
for node in nodes[x + len(seats[y]) * y]:
neighbour = seats[node[0]][node[1]]
if neighbour == "#":
occupied_adjacent += 1
if seats[y][x] == "L" and occupied_adjacent == 0:
result[y][x] = "#"
elif seats[y][x] == "#" and occupied_adjacent >= 5:
result[y][x] = "L"
return result
seats = inputs
while True:
prev_seats = copy.deepcopy(seats)
seats = round(seats)
if prev_seats == seats:
break
total_occupied = 0
for y in range(len(seats)):
for x in range(len(seats[y])):
if seats[y][x] == "#":
total_occupied += 1
print("Total seats occupied: " + str(total_occupied))
| 28.887097 | 87 | 0.460078 | 221 | 1,791 | 3.642534 | 0.230769 | 0.069565 | 0.074534 | 0.074534 | 0.195031 | 0.119255 | 0.119255 | 0.119255 | 0.119255 | 0.119255 | 0 | 0.01687 | 0.404243 | 1,791 | 62 | 88 | 28.887097 | 0.737582 | 0 | 0 | 0.130435 | 0 | 0 | 0.023438 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.043478 | 0 | 0.086957 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3b9faf565558a1df6837f883c4af01c1961579e5 | 4,806 | py | Python | centersnap/utils.py | ibaiGorordo/ONNX-CenterSnap-6D-Pose-and-Shape-Estimation | f8f98b08cce5259348616db4150064d713f17445 | [
"MIT"
] | 13 | 2022-03-19T14:42:50.000Z | 2022-03-31T14:04:31.000Z | centersnap/utils.py | ibaiGorordo/ONNX-CenterSnap-6D-Pose-and-Shape-Estimation | f8f98b08cce5259348616db4150064d713f17445 | [
"MIT"
] | null | null | null | centersnap/utils.py | ibaiGorordo/ONNX-CenterSnap-6D-Pose-and-Shape-Estimation | f8f98b08cce5259348616db4150064d713f17445 | [
"MIT"
] | 1 | 2022-03-24T12:56:25.000Z | 2022-03-24T12:56:25.000Z | import numpy as np
import cv2
import open3d as o3d
from .original_repo_utils import *
np.random.seed(3)
MAX_CLASS_NUM = 100 # In the original model there are only 7 classes
segmenation_colors = np.random.randint(0, 255, (MAX_CLASS_NUM, 3)).astype("uint8")
def util_draw_seg(seg_map, image, alpha = 0.5):
# Convert segmentation prediction to colors
color_segmap = segmenation_colors[seg_map]
# Resize to match the image shape
color_segmap = cv2.resize(color_segmap, (image.shape[1],image.shape[0]))
# Fuse both images
if(alpha == 0):
combined_img = np.hstack((image, color_segmap))
else:
combined_img = cv2.addWeighted(image, alpha, color_segmap, (1-alpha),0)
return combined_img
def util_draw_depth(depth_map, image, max_depth = 2, alpha = 0.5):
# Normalize estimated depth to color it
if max_depth:
min_depth = 0
depth_map = depth_map/1000 # Convert to meters
else:
min_depth = depth_map.min()
max_depth = depth_map.max()
norm_depth_map = 255*(depth_map-min_depth)/(max_depth-min_depth)
norm_depth_map[norm_depth_map < 0] =0
norm_depth_map[norm_depth_map >= 255] = 255
# Normalize and color the image
color_depth = cv2.applyColorMap(cv2.convertScaleAbs(norm_depth_map,1), cv2.COLORMAP_PLASMA )
# Resize to match the image shape
color_depth = cv2.resize(color_depth, (image.shape[1],image.shape[0]))
# Fuse both images
if(alpha == 0):
combined_img = np.hstack((image, color_depth))
else:
combined_img = cv2.addWeighted(image, alpha, color_depth, (1-alpha),0)
return combined_img
def util_draw_heatmap(heatmap, image, alpha = 0.5):
# Normalize and color the image
color_heatmap = cv2.applyColorMap(cv2.convertScaleAbs(heatmap*255,1), cv2.COLORMAP_JET)
# Resize to match the image shape
color_heatmap = cv2.resize(color_heatmap, (image.shape[1],image.shape[0]))
# Fuse both images
if(alpha == 0):
combined_img = np.hstack((image, color_heatmap))
else:
combined_img = cv2.addWeighted(image, alpha, color_heatmap, (1-alpha),0)
return combined_img
def util_draw_points2d(points_2d_list, image, label_ids):
# Normalize and color the image
for i, points_2d in enumerate(points_2d_list):
color = (int(segmenation_colors[label_ids[i]][0]),
int(segmenation_colors[label_ids[i]][1]),
int(segmenation_colors[label_ids[i]][2]))
for point in points_2d.astype(int):
cv2.circle(image, (int(point[0]),int(point[1])), 1, color, -1)
return image
def util_draw_pose2d(boxes_2d_list, axes_2d_list, image, label_ids):
# Normalize and color the image
for i, (box, axis) in enumerate(zip(boxes_2d_list, axes_2d_list)):
color = (int(segmenation_colors[label_ids[i]][0]*0.5),
int(segmenation_colors[label_ids[i]][1]*0.5),
int(segmenation_colors[label_ids[i]][2]*0.5))
image = draw_bboxes(image, box, axis, color)
return image
def util_draw_2d(points_2d_list, boxes_2d_list, axes_2d_list, image, label_ids):
image = util_draw_points2d(points_2d_list, image, label_ids)
return util_draw_pose2d(boxes_2d_list, axes_2d_list, image, label_ids)
class Open3dVisualizer():
def __init__(self):
self.point_cloud = o3d.geometry.PointCloud()
self.boxes = o3d.geometry.LineSet()
self.o3d_started = False
self.vis = o3d.visualization.Visualizer()
self.vis.create_window()
def __call__(self, points_3d_list, boxes_3d_list, is_image = False):
self.update(points_3d_list, boxes_3d_list, is_image)
def update(self, points_3d_list, boxes_3d_list, is_image = False):
# Process points
all_points, all_boxes, all_lines = Open3dVisualizer.process_data(points_3d_list, boxes_3d_list)
# Add values to vectors
self.point_cloud.points = o3d.utility.Vector3dVector(all_points)
self.boxes.points = o3d.utility.Vector3dVector(all_boxes)
self.boxes.lines = o3d.utility.Vector2iVector(all_lines)
# Add geometries if it is the first time
if not self.o3d_started:
self.vis.add_geometry(self.point_cloud)
self.vis.add_geometry(self.boxes)
self.o3d_started = True
else:
self.vis.update_geometry(self.point_cloud)
self.vis.update_geometry(self.boxes)
self.vis.poll_events()
self.vis.update_renderer()
@staticmethod
def process_data(points_3d_list, boxes_3d_list):
all_points = points_3d_list[0]
all_boxes = boxes_3d_list[0]
all_lines = np.array(open_3d_lines)
box_count = 0
for points_3d, box_3d in zip(points_3d_list[1:], boxes_3d_list[1:]):
box_count += 1
all_points = np.vstack((all_points, points_3d))
all_boxes = np.vstack((all_boxes, box_3d))
all_lines = np.vstack((all_lines, np.array(open_3d_lines)+8*box_count))
# Fix axis to match open3d
all_points = -all_points[:,[0,1,2]]
all_boxes = -all_boxes[:,[0,1,2]]
all_points[:,0] = -all_points[:,0]
all_boxes[:,0] = -all_boxes[:,0]
return all_points, all_boxes, all_lines
| 28.105263 | 97 | 0.740117 | 773 | 4,806 | 4.324709 | 0.179819 | 0.025127 | 0.025127 | 0.04487 | 0.488783 | 0.44152 | 0.37242 | 0.3105 | 0.223153 | 0.165121 | 0 | 0.038005 | 0.140449 | 4,806 | 170 | 98 | 28.270588 | 0.771242 | 0.106742 | 0 | 0.134021 | 0 | 0 | 0.00117 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103093 | false | 0 | 0.041237 | 0 | 0.226804 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3ba02c62d0d88116daac3eef24c8c51ab27ced29 | 2,519 | py | Python | strokes_gained_calculations.py | brentonworley/strokes-gained | f3390de62a8987fd0a73ddb41837f7dcecb29387 | [
"MIT"
] | null | null | null | strokes_gained_calculations.py | brentonworley/strokes-gained | f3390de62a8987fd0a73ddb41837f7dcecb29387 | [
"MIT"
] | null | null | null | strokes_gained_calculations.py | brentonworley/strokes-gained | f3390de62a8987fd0a73ddb41837f7dcecb29387 | [
"MIT"
] | null | null | null | def calculate_strokes_gained(reference_value, user_putts):
'''Return the strokes gained based on reference and user input'''
return round((reference_value - user_putts), 2)
def calculate_strokes_gained_putting(reference_data, user_input):
'''Return the strokes gained value from a dictionary of user input
{distance, putts} and a list of reference strokes gained data.'''
# get the reference distance from the first entry in the baseline data
position = 0
not_matched = True
# loop through the reference data to find the right value of average putts
while not_matched:
# set up the reference data
baseline_data = reference_data[position]
reference_distance = baseline_data['distance']
reference_putts = baseline_data['putts']
min_reference_distance = reference_data[0]['distance']
max_reference_distance = reference_data[-1]['distance']
# first check that the input is within the putt_range
if user_input['distance'] < min_reference_distance:
# use the lowest value of the reference putts
reference_putts = reference_data[0]['putts']
not_matched = False
elif user_input['distance'] > max_reference_distance:
# use the highest value of the reference putts
reference_putts = reference_data[-1]['putts']
not_matched = False
# if we get an exact match
elif user_input['distance'] == reference_distance:
reference_putts = reference_data[position]['putts']
not_matched = False
# if the putt distance sits between baseline values
elif user_input['distance'] < reference_distance and user_input['distance'] > last_distance:
distance_range = reference_distance - last_distance
putt_range = reference_putts - last_putts
proportion = (user_input['distance'] - last_distance)/distance_range
#update the reference_putts
reference_putts = round(last_putts + (putt_range * proportion), 2)
not_matched = False
# keep track of the last distance if you don't get an exact match
last_distance = reference_distance
last_putts = reference_putts
position += 1
print(f"Your input of distance of {user_input['distance']} feet equates to a tour averge of {reference_putts} putts")
strokes_gained = calculate_strokes_gained(reference_putts, user_input['putts'])
return strokes_gained
| 45.8 | 121 | 0.687574 | 314 | 2,519 | 5.283439 | 0.254777 | 0.101266 | 0.081977 | 0.047016 | 0.203134 | 0.157926 | 0.112116 | 0.061483 | 0.061483 | 0 | 0 | 0.004199 | 0.243748 | 2,519 | 54 | 122 | 46.648148 | 0.866667 | 0.262406 | 0 | 0.125 | 0 | 0.03125 | 0.111232 | 0.013086 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.125 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e535a0eaed4fb2eca117828f9d5fa6d60c950b3 | 8,988 | py | Python | CRF/cnn_word_seg_torch.py | enjlife/bert4torch | 53694060fed0351649f87c79381740851a4a0b42 | [
"Apache-2.0"
] | 5 | 2021-09-09T03:25:58.000Z | 2022-02-22T06:43:08.000Z | CRF/cnn_word_seg_torch.py | enjlife/bert4torch | 53694060fed0351649f87c79381740851a4a0b42 | [
"Apache-2.0"
] | 1 | 2022-02-18T07:46:46.000Z | 2022-02-20T10:05:25.000Z | CRF/cnn_word_seg_torch.py | enjlife/bert4torch | 53694060fed0351649f87c79381740851a4a0b42 | [
"Apache-2.0"
] | null | null | null | import os
import torch.nn
from torch import nn
from crf_torch import CRF
import re
import random
import time
from torch.optim import Adam
import torch.nn.functional as F
from datetime import timedelta
# TODO 准确率计算函数的bug修复
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
class CnnWordSeg(nn.Module):
"""CNN 分词"""
def __init__(self, config):
super(CnnWordSeg, self).__init__()
vocab_size = config.vocab_size
hidden_size = config.hidden_size
num_labels = config.num_labels
self.embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=0)
self.conv1 = torch.nn.Sequential(
# 这里采用重复填充 padding=1填充一层
torch.nn.Conv1d(in_channels=hidden_size, out_channels=hidden_size,
kernel_size=3, stride=1, padding=1, padding_mode='replicate'),
torch.nn.ReLU()
)
self.conv2 = torch.nn.Sequential(
torch.nn.Conv1d(hidden_size, hidden_size, 3, 1, 1, padding_mode='replicate'),
torch.nn.ReLU()
)
self.conv3 = torch.nn.Sequential(
torch.nn.Conv1d(hidden_size, hidden_size, 3, 1, 1, padding_mode='replicate'),
torch.nn.ReLU()
)
self.dense = nn.Linear(hidden_size, 4)
self.crf = CRF(num_tags=num_labels, batch_first=True)
def forward(self, x, y, mask, test=False):
hidden_state = self.embedding(x) # (batch,seq_len,hidden_size)
hidden_state = hidden_state.permute(0, 2, 1) # 一维卷积是在length维度
hidden_state = self.conv1(hidden_state)
hidden_state = self.conv2(hidden_state)
hidden_state = self.conv3(hidden_state)
hidden_state = hidden_state.permute(0, 2, 1)
hidden_state = self.dense(hidden_state)
if not test:
hidden_state = self.crf(hidden_state, y, mask)
else:
hidden_state = self.crf.decode(hidden_state, mask)
return hidden_state
class DatasetIterater(object):
def __init__(self, data_list, batch_size, device):
self.batch_size = batch_size
self.data_list = data_list
self.n_batches = len(data_list) // batch_size
self.residue = False # 记录batch数量是否为整数
if len(data_list) % self.n_batches != 0:
self.residue = True
self.index = 0
self.device = device
def _to_tensor(self, datas):
max_len = max([len(data[0]) for data in datas])
x = torch.LongTensor([data[0] + [0]*(max_len-len(data[0])) for data in datas]).to(self.device)
y = torch.LongTensor([data[1] + [0]*(max_len-len(data[0])) for data in datas]).to(self.device)
mask = torch.ByteTensor([data[2] + [0]*(max_len-len(data[0])) for data in datas]).to(self.device)
return x, y, mask
def __next__(self):
if self.residue and self.index == self.n_batches:
batches = self.data_list[self.index * self.batch_size: len(self.data_list)]
self.index += 1
batches = self._to_tensor(batches)
return batches
elif self.index >= self.n_batches:
self.index = 0
raise StopIteration
else:
batches = self.data_list[self.index * self.batch_size: (self.index + 1) * self.batch_size]
self.index += 1
batches = self._to_tensor(batches)
return batches
def __iter__(self):
return self
def __len__(self):
if self.residue:
return self.n_batches + 1
else:
return self.n_batches
def build_dataset(path, max_len=32):
sents = open(path, 'r', encoding='utf8').read().strip().split('\n')
sents = [re.split(' +', s) for s in sents] # 词之间以两个空格隔开
sents = [[w for w in s if w] for s in sents] # 去掉空字符串
random.shuffle(sents) # 打乱语料,以便后面划分验证集
def build_vocab(sents, min_count=2):
chars = {}
for s in sents:
for c in ''.join(s):
if c in chars:
chars[c] += 1
else:
chars[c] = 1
chars = {i: j for i, j in chars.items() if j >= min_count}
id2char = {i+1: j for i, j in enumerate(chars.keys())}
char2id = {j: i for i, j in id2char.items()}
return id2char, char2id
id2char, char2id = build_vocab(sents)
def to_id():
datasets = []
for s in sents:
x, y = [], []
for w in s:
if not all(c in char2id for c in w):
continue
x.extend([char2id[c] for c in w])
if len(w) == 1:
y.append(0)
elif len(w) == 2:
y.extend([1, 3])
else:
y.extend([1] + [2] * (len(w) - 2) + [3])
if x:
datasets.append((x, y, [1]*len(x))) # x,y,mask
return datasets
data = to_id()
trains, valids = data[:-5000], data[-5000:]
return trains, valids, id2char, char2id
class Train:
def __init__(self, model, train_iter, dev_iter, config):
self.model = model
self.train_iter = train_iter
self.dev_iter = dev_iter
self.config = config
def train(self):
start_time = time.time()
self.model.train()
optimizer = Adam(self.model.parameters(), lr=self.config.lr)
total_batch = 0 # 记录进行到多少batch
dev_best_loss = float('inf') # dev 最小loss
for epoch in range(self.config.num_epochs):
print('Epoch [{}/{}]'.format(epoch + 1, self.config.num_epochs))
for i, (x, y, mask) in enumerate(self.train_iter):
self.model.zero_grad()
loss = self.model(x, y, mask)
loss.backward()
optimizer.step()
if total_batch % 100 == 0:
y_pre = self.model(x, y, mask, test=True)
y_true = y.cpu().numpy().tolist()
mask = mask.cpu().numpy().sum(axis=1).tolist()
train_acc, rec = self.cal_acc(y_pre, y_true, mask)
dev_loss, dev_acc, dev_rec = self.evaluate()
if dev_loss < dev_best_loss:
dev_best_loss = dev_loss
torch.save(model.state_dict(), config.save_path)
improve = '*'
else:
improve = ''
time_dif = get_time_dif(start_time)
msg = 'Iter: {0:>6}, Train Loss: {1:>5.2}, Train Acc: {2:>6.2%}, Rec: {3:>6.2%}, Val Loss: {4:>5.2}, Val Acc: {5:>6.2%}, Time: {6} {7}'
print(msg.format(total_batch, loss.item(), train_acc, rec, dev_loss, dev_acc, time_dif, improve))
model.train()
total_batch += 1
def evaluate(self):
self.model.eval()
loss_total = 0.0
acc_total = 0.0
rec_total = 0.0
n = 0
with torch.no_grad():
for x, y, mask in self.dev_iter:
loss = self.model(x, y, mask)
loss_total += loss.item()
y_pre = self.model(x, y, mask, test=True)
y_true = y.cpu().numpy().tolist()
mask = mask.cpu().numpy().sum(axis=1).tolist()
acc, rec = self.cal_acc(y_pre, y_true, mask)
acc_total += acc
rec_total += rec
n += 1
return loss_total/n, acc_total/n, rec_total/n
# 重写了准确率计算的函数,有bug待修复
def cal_acc(self, y_pre, y_true, mask):
n = len(y_pre)
acc, rec = 0.0, 0.0
for i in range(n):
length = mask[i]
tp = y_pre[i][:length]
tt = y_true[i][:length]
tt = set([i*2 + x for i, x in enumerate(tt) if x == 0 or x == 1])
tp = set([i*2 + x for i, x in enumerate(tp) if x == 0 or x == 1])
acc += len(tt & tp) / (len(tp)+1)
rec += len(tt & tp) / (len(tt)+1)
return acc/n, rec/n
class Config:
def __init__(self):
self.lr = 1e-3
self.num_epochs = 10
self.batch_size = 128
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_labels = 4
self.hidden_size = 128
self.path = '../data/icwb2/msr_training.utf8'
self.num_labels = 4
self.vocab_size = 0
self.save_path = 'model.ckpt'
if __name__ == '__main__':
config = Config()
train_data, valid_data, id2char, char2id = build_dataset(config.path)
config.vocab_size = len(id2char) + 1
train_iter = DatasetIterater(train_data, config.batch_size, config.device)
valid_iter = DatasetIterater(valid_data, config.batch_size, config.device)
model = CnnWordSeg(config).cuda(0)
train = Train(model, train_iter, valid_iter, config)
train.train() | 36.836066 | 161 | 0.549622 | 1,213 | 8,988 | 3.892828 | 0.176422 | 0.041931 | 0.011436 | 0.023295 | 0.275307 | 0.212622 | 0.191233 | 0.176832 | 0.155654 | 0.128971 | 0 | 0.024495 | 0.32777 | 8,988 | 244 | 162 | 36.836066 | 0.757034 | 0.022363 | 0 | 0.156398 | 0 | 0.004739 | 0.027607 | 0.003536 | 0 | 0 | 0 | 0.004098 | 0 | 1 | 0.075829 | false | 0 | 0.047393 | 0.004739 | 0.203791 | 0.009479 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e54656185e027ab6cdc457485c3e4f7aee1306c | 1,636 | py | Python | gs_quant/backtests/execution_engine.py | skyquant2/gs-quant | b7e648fa7912b13ad1fd503b643389e34587aa1e | [
"Apache-2.0"
] | 2 | 2021-06-22T12:14:38.000Z | 2021-06-23T15:51:08.000Z | gs_quant/backtests/execution_engine.py | skyquant2/gs-quant | b7e648fa7912b13ad1fd503b643389e34587aa1e | [
"Apache-2.0"
] | null | null | null | gs_quant/backtests/execution_engine.py | skyquant2/gs-quant | b7e648fa7912b13ad1fd503b643389e34587aa1e | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from gs_quant.backtests.data_handler import DataHandler
from gs_quant.backtests.event import *
import datetime as dt
class ExecutionEngine(object):
pass
class SimulatedExecutionEngine(ExecutionEngine):
def __init__(self, data_handler: DataHandler):
self.data_handler = data_handler
self.orders = []
def submit_order(self, order: OrderEvent):
self.orders.append(order)
self.orders.sort(key=lambda e: e.order.execution_end_time())
def ping(self, state: dt.datetime):
fill_events = []
while self.orders:
order: OrderBase = self.orders[0].order
end_time = order.execution_end_time()
if end_time > state:
break
else:
fill = FillEvent(order=order,
filled_price=order.execution_price(self.data_handler),
filled_units=order.execution_quantity(self.data_handler))
fill_events.append(fill)
self.orders.pop(0)
return fill_events
| 33.387755 | 90 | 0.675428 | 209 | 1,636 | 5.162679 | 0.526316 | 0.055607 | 0.055607 | 0.029657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00817 | 0.251834 | 1,636 | 48 | 91 | 34.083333 | 0.873366 | 0.337408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0.038462 | 0.115385 | 0 | 0.346154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e54b6e75de5f4de964911c5a74139115880c479 | 19,578 | py | Python | biosimulators_opencor/utils.py | biosimulators/Biosimulators_OpenCOR | e00645e372baf7475957af9487856ad9ddd18814 | [
"MIT"
] | null | null | null | biosimulators_opencor/utils.py | biosimulators/Biosimulators_OpenCOR | e00645e372baf7475957af9487856ad9ddd18814 | [
"MIT"
] | null | null | null | biosimulators_opencor/utils.py | biosimulators/Biosimulators_OpenCOR | e00645e372baf7475957af9487856ad9ddd18814 | [
"MIT"
] | null | null | null | """ Utilities for OpenCOR
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2021-05-28
:Copyright: 2021, BioSimulators Team
:License: MIT
"""
from .data_model import KISAO_ALGORITHM_MAP
from biosimulators_utils.config import get_config, Config # noqa: F401
from biosimulators_utils.data_model import ValueType # noqa: F401
from biosimulators_utils.log.data_model import TaskLog # noqa: F401
from biosimulators_utils.report.data_model import VariableResults # noqa: F401
from biosimulators_utils.sedml.data_model import ( # noqa: F401
SedDocument, ModelLanguage, ModelAttributeChange, UniformTimeCourseSimulation, Algorithm, Task, RepeatedTask,
VectorRange, SubTask, DataGenerator, Variable)
from biosimulators_utils.sedml.io import SedmlSimulationWriter
from biosimulators_utils.sedml import validation
from biosimulators_utils.simulator.utils import get_algorithm_substitution_policy
from biosimulators_utils.utils.core import validate_str_value, raise_errors_warnings
from biosimulators_utils.warnings import warn, BioSimulatorsWarning
from kisao.data_model import AlgorithmSubstitutionPolicy, ALGORITHM_SUBSTITUTION_POLICY_LEVELS
from kisao.utils import get_preferred_substitute_algorithm_by_ids
from unittest import mock
import copy
import lxml.etree
import opencor
import os
import tempfile
__all__ = [
'validate_task',
'validate_variable_xpaths',
'validate_simulation',
'get_opencor_algorithm',
'get_opencor_parameter_value',
'build_opencor_sedml_doc',
'save_task_to_opencor_sedml_file',
'load_opencor_simulation',
'validate_opencor_simulation',
'get_results_from_opencor_simulation',
'log_opencor_execution',
'get_mock_libcellml',
]
def validate_task(task, variables, config=None):
""" Validate that a simulation can be executed with OpenCOR
Args:
task (:obj:`Task`): request simulation task
variables (:obj:`list` of :obj:`Variable`): variables that should be recorded
config (:obj:`Config`, optional): BioSimulators common configuration
Returns:
:obj:`tuple:`:
* :obj:`Task`: possibly alternate task that OpenCOR should execute
* :obj:`lxml.etree._ElementTree`: element tree for model
* :obj:`dict`: dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it
"""
config = config or get_config()
model = task.model
sim = task.simulation
if config.VALIDATE_SEDML:
raise_errors_warnings(validation.validate_task(task),
error_summary='Task `{}` is invalid.'.format(task.id))
raise_errors_warnings(validation.validate_model_language(model.language, ModelLanguage.CellML),
error_summary='Language for model `{}` is not supported.'.format(model.id))
raise_errors_warnings(validation.validate_model_change_types(model.changes, (ModelAttributeChange,)),
error_summary='Changes for model `{}` are not supported.'.format(model.id))
raise_errors_warnings(*validation.validate_model_changes(model),
error_summary='Changes for model `{}` are invalid.'.format(model.id))
raise_errors_warnings(validation.validate_simulation_type(sim, (UniformTimeCourseSimulation, )),
error_summary='{} `{}` is not supported.'.format(sim.__class__.__name__, sim.id))
raise_errors_warnings(*validation.validate_simulation(sim),
error_summary='Simulation `{}` is invalid.'.format(sim.id))
raise_errors_warnings(*validation.validate_data_generator_variables(variables),
error_summary='Data generator variables for task `{}` are invalid.'.format(task.id))
# read model; TODO: support imports
model_etree = lxml.etree.parse(model.source)
# validate variables
opencor_variable_names = validate_variable_xpaths(variables, model_etree)
# validate simulation
opencor_simulation = validate_simulation(task.simulation)
# check that OpenCOR can execute the request algorithm (or a similar one)
opencor_algorithm = get_opencor_algorithm(task.simulation.algorithm, config=config)
# create new task to manage configuration for OpenCOR
opencor_task = copy.deepcopy(task)
opencor_task.simulation = opencor_simulation
opencor_task.simulation.algorithm = opencor_algorithm
return opencor_task, model_etree, opencor_variable_names
def validate_variable_xpaths(sed_variables, model_etree):
""" Get the names OpenCOR uses to refer to model variable
Args:
model_etree (:obj:`lxml.etree._ElementTree`): element tree for model
sed_variables (:obj:`list` of :obj:`Variable`): SED variables
Returns:
:obj:`dict`: dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it
"""
opencor_variable_names = {}
for sed_variable in sed_variables:
if not sed_variable.target:
msg = 'Symbols are not supported.'
raise NotImplementedError(msg)
namespaces = copy.copy(sed_variable.target_namespaces)
namespaces.pop(None, None)
obj_target, _, attrib_target = sed_variable.target.partition('/@')
xml_objs = model_etree.xpath(obj_target, namespaces=namespaces)
if len(xml_objs) == 0:
msg = (
'XPath targets of variables must reference unique observables. '
'The target `{}` of variable `{}` does not match any model elements.'
).format(sed_variable.target, sed_variable.id)
raise ValueError(msg)
if len(xml_objs) > 1:
msg = (
'XPath targets of variables must reference unique observables. '
'The target `{}` of variable `{}` matches multiple model elements.'
).format(sed_variable.target, sed_variable.id)
raise ValueError(msg)
xml_obj = xml_objs[0]
names = []
while True:
name = xml_obj.attrib.get('name', None)
names.append(name)
xml_obj = xml_obj.getparent()
ns, _, tag = xml_obj.tag[1:].partition('}')
if not name or not ns.startswith('http://www.cellml.org/cellml/'):
msg = 'Target `{}` of variable `{}` is not a valid observable.'.format(sed_variable.target, sed_variable.id)
raise ValueError(msg)
if tag == 'model':
break
if attrib_target:
names.insert(0, attrib_target)
opencor_variable_names[sed_variable.id] = '/'.join(reversed(names))
return opencor_variable_names
def validate_simulation(simulation):
""" Validate a simulation
Args:
simulation (:obj:`UniformTimeCourseSimulation`): requested simulation
Returns:
:obj:`UniformTimeCourseSimulation`: simulation instructions for OpenCOR
"""
number_of_steps = (
simulation.output_end_time - simulation.initial_time
) / (
simulation.output_end_time - simulation.output_start_time
) * simulation.number_of_steps
output_start_time = simulation.initial_time
if abs(number_of_steps - round(number_of_steps)) > 1e-8:
msg = (
'Number of steps must be an integer, not `{}`:'
'\n Initial time: {}'
'\n Output start time: {}'
'\n Output end time: {}'
'\n Number of steps (output start - end time) time: {}'
).format(
number_of_steps, simulation.initial_time,
simulation.output_start_time, simulation.output_end_time,
simulation.number_of_steps,
)
raise NotImplementedError(msg)
else:
number_of_steps = round(number_of_steps)
opencor_simulation = copy.deepcopy(simulation)
opencor_simulation.number_of_steps = number_of_steps
opencor_simulation.output_start_time = output_start_time
return opencor_simulation
def get_opencor_algorithm(requested_alg, config=None):
""" Get a possibly alternative algorithm that OpenCOR should execute
Args:
requested_alg (:obj:`Algorithm`): requested algorithm
config (:obj:`Config`, optional): configuration
Returns:
:obj:`Algorithm`: possibly alternative algorithm that OpenCOR should execute
"""
exec_alg = copy.deepcopy(requested_alg)
algorithm_substitution_policy = get_algorithm_substitution_policy(config=config)
exec_alg.kisao_id = get_preferred_substitute_algorithm_by_ids(
requested_alg.kisao_id, KISAO_ALGORITHM_MAP.keys(),
substitution_policy=algorithm_substitution_policy)
if exec_alg.kisao_id == requested_alg.kisao_id:
alg_specs = KISAO_ALGORITHM_MAP[exec_alg.kisao_id]
params_specs = alg_specs['parameters']
for change in list(exec_alg.changes):
param_specs = params_specs.get(change.kisao_id, None)
if param_specs:
is_valid, change.new_value = get_opencor_parameter_value(
change.new_value, param_specs['type'], param_specs.get('enum', None))
if not is_valid:
if (
ALGORITHM_SUBSTITUTION_POLICY_LEVELS[algorithm_substitution_policy]
> ALGORITHM_SUBSTITUTION_POLICY_LEVELS[AlgorithmSubstitutionPolicy.NONE]
):
warn('Unsupported value `{}` of {}-valued algorithm parameter `{}` (`{}`) was ignored.'.format(
change.new_value, param_specs['type'].name, param_specs['name'], change.kisao_id), BioSimulatorsWarning)
exec_alg.changes.remove(change)
else:
msg = '`{}` (`{}`) must a {}, not `{}`.'.format(
param_specs['name'], change.kisao_id, param_specs['type'].name, change.new_value)
raise ValueError(msg)
else:
if (
ALGORITHM_SUBSTITUTION_POLICY_LEVELS[algorithm_substitution_policy]
> ALGORITHM_SUBSTITUTION_POLICY_LEVELS[AlgorithmSubstitutionPolicy.NONE]
):
warn('Unsupported algorithm parameter `{}` was ignored.'.format(
change.kisao_id), BioSimulatorsWarning)
exec_alg.changes.remove(change)
else:
msg = '{} ({}) does not support parameter `{}`. {} support the following parameters:\n {}'.format(
alg_specs['name'], alg_specs['kisao_id'], change.kisao_id, alg_specs['name'],
'\n '.join(sorted('{}: {}'.format(param_kisao_id, param_specs['name'])
for param_kisao_id, param_specs in params_specs.items()))
)
raise NotImplementedError(msg)
else:
exec_alg.changes = []
return exec_alg
def get_opencor_parameter_value(value, value_type, enum_cls=None):
""" Get the OpenCOR representation of a value of a parameter
Args:
value (:obj:`str`): string-encoded parameter value
value_type (:obj:`ValueType`): expected type of the value
enum_cls (:obj:`type`): allowed values of the parameter
Returns:
:obj:`tuple`:
* :obj:`bool`: whether the value is valid
* :obj:`str`: OpenCOR representation of a value of a parameter
"""
if not validate_str_value(value, value_type):
return False, None
if enum_cls:
try:
return True, enum_cls[value].value
except KeyError:
pass
try:
return True, enum_cls[value.replace('KISAO:', 'KISAO_')].value
except KeyError:
pass
try:
return True, enum_cls(value).value
except ValueError:
pass
return False, None
else:
return True, value
def build_opencor_sedml_doc(task, variables, include_data_generators=False):
""" Create an OpenCOR-compatible SED-ML document for a task and its output variables
Args:
task (:obj:`Task`): SED task
variables (:obj:`list` of :obj:`Variable`): SED variables
include_data_generators (:obj:`bool`, optional): whether to export data generators
Returns:
:obj:`SedDocument`: SED document
"""
doc = SedDocument()
model_copy = copy.deepcopy(task.model)
model_copy.id = 'model'
model_copy.source = os.path.abspath(model_copy.source)
doc.models.append(model_copy)
sim_copy = copy.deepcopy(task.simulation)
sim_copy.id = 'simulation1'
doc.simulations.append(sim_copy)
basic_task = Task(id='task1', model=model_copy, simulation=sim_copy)
repeated_task = RepeatedTask(
id='repeatedTask',
range=VectorRange(id="once", values=[1]),
sub_tasks=[
SubTask(order=1, task=basic_task),
],
reset_model_for_each_iteration=True,
)
repeated_task.ranges = [repeated_task.range]
doc.tasks.append(basic_task)
doc.tasks.append(repeated_task)
if include_data_generators:
for variable in variables:
doc.data_generators.append(
DataGenerator(
id='data_generator_' + variable.id,
variables=[
Variable(id=variable.id, target=variable.target, target_namespaces=variable.target_namespaces, task=repeated_task),
],
math=variable.id,
)
)
return doc
def save_task_to_opencor_sedml_file(task, variables, include_data_generators=False):
""" Save a SED task to an OpenCOR-compatible SED-ML file
Args:
task (:obj:`Task`): SED task
variables (:obj:`list` of :obj:`Variable`): SED variables
include_data_generators (:obj:`bool`, optional): whether to export data generators
Returns:
:obj:`str`: path to SED-ML file for the SED document
"""
doc = build_opencor_sedml_doc(task, variables, include_data_generators=include_data_generators)
fid, sed_filename = tempfile.mkstemp(suffix='.sedml')
os.close(fid)
doc.models[0].source = os.path.relpath(doc.models[0].source, os.path.dirname(sed_filename))
# use a mocked version because libCellML cannot be installed into the OpenCOR docker image
with mock.patch.dict('sys.modules', libcellml=get_mock_libcellml()):
SedmlSimulationWriter().run(doc, sed_filename, validate_models_with_languages=False)
return sed_filename
def load_opencor_simulation(task, variables, include_data_generators=False):
""" Load an OpenCOR simulation
Args:
task (:obj:`Task`): SED task
variables (:obj:`list` of :obj:`Variable`): SED variables
include_data_generators (:obj:`bool`, optional): whether to export data generators
Returns:
:obj:`PythonQt.private.SimulationSupport.Simulation`: OpenCOR simulation
"""
# save SED-ML to a file
filename = save_task_to_opencor_sedml_file(task, variables, include_data_generators=include_data_generators)
# Read the SED-ML file
try:
opencor_sim = opencor.open_simulation(filename)
finally:
# clean up temporary SED-ML file
os.remove(filename)
validate_opencor_simulation(opencor_sim)
return opencor_sim
def validate_opencor_simulation(sim):
""" Validate an OpenCOR simulation
Args:
sim (:obj:`PythonQt.private.SimulationSupport.Simulation`): OpenCOR simulation)
Raises:
:obj:`ValueError`: if the simulation is invalid
"""
if sim.hasBlockingIssues() or not sim.valid():
msg = 'The task does not describe a valid simulation:\n\n {}'.format(
'\n\n '.join(
''.join(lxml.etree.fromstring('<root>' + issue + '</root>').itertext())
for issue in sim.issues()
)
)
raise ValueError(msg)
def get_results_from_opencor_simulation(opencor_sim, sed_task, sed_variables, opencor_variable_names):
""" Get the results of SED variables from an OpenCOR simulation
Args:
opencor_sim (:obj:`PythonQt.private.SimulationSupport.Simulation`): OpenCOR simulation
sed_task (:obj:`Task`): requested SED task
sed_variables (:obj:`list` of :obj:`Variable`): SED variables
opencor_variable_names (:obj:`dict`): dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it)
Returns:
:obj:`VariableResults`: results of the SED variables
"""
opencor_results = opencor_sim.results()
opencor_voi_results = opencor_results.voi()
opencor_states_results = opencor_results.states()
opencor_rates_results = opencor_results.rates()
opencor_constants_results = opencor_results.constants()
opencor_algebraic_results = opencor_results.algebraic()
sed_results = VariableResults()
invalid_variables = []
for sed_variable in sed_variables:
opencor_name = opencor_variable_names[sed_variable.id]
if opencor_name == opencor_voi_results.uri():
sed_results[sed_variable.id] = opencor_voi_results.values()[-(sed_task.simulation.number_of_steps + 1):]
elif opencor_name in opencor_states_results:
sed_results[sed_variable.id] = opencor_states_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):]
elif opencor_name in opencor_rates_results:
sed_results[sed_variable.id] = opencor_rates_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):]
elif opencor_name in opencor_constants_results:
sed_results[sed_variable.id] = opencor_constants_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):]
elif opencor_name in opencor_algebraic_results:
sed_results[sed_variable.id] = opencor_algebraic_results[opencor_name].values()[-(sed_task.simulation.number_of_steps + 1):]
else:
invalid_variables.append('{}: {}'.format(sed_variable.id, sed_variable.target))
if invalid_variables:
msg = (
'The target of each variable must be a valid observable. '
'The targets of the following variables are not valid observables.\n {}'
).format('\n '.join(invalid_variables))
raise ValueError(msg)
return sed_results
def log_opencor_execution(task, log):
""" Log information about how OpenCOR was used to execute the simulation
Args:
task (:obj:`Task`): SED task
log (:obj:`TaskLog`): execution log
"""
log.algorithm = task.simulation.algorithm.kisao_id
log.simulator_details = {
'method': 'OpenCOR.SimulationSupport.Simulation.run',
'algorithmParameters': [
{'kisaoID': change.kisao_id, 'value': change.new_value}
for change in task.simulation.algorithm.changes
],
}
def get_mock_libcellml():
""" Get a mocked version of libCellML
Returns:
:obj:`mock.Mock`: mocked libcellml module
"""
return mock.Mock(
Parser=lambda: mock.Mock(
parseModel=lambda: None,
errorCount=lambda: 0,
warningCount=lambda: 0,
),
Validator=lambda: mock.Mock(
validateModel=lambda model: None,
errorCount=lambda: 0,
warningCount=lambda: 0,
),
)
| 38.53937 | 141 | 0.659056 | 2,234 | 19,578 | 5.54342 | 0.149955 | 0.02043 | 0.017846 | 0.014858 | 0.383398 | 0.332122 | 0.276244 | 0.221657 | 0.187419 | 0.171592 | 0 | 0.003321 | 0.246348 | 19,578 | 507 | 142 | 38.615385 | 0.835988 | 0.209521 | 0 | 0.187291 | 0 | 0 | 0.115847 | 0.018057 | 0 | 0 | 0 | 0.001972 | 0 | 1 | 0.040134 | false | 0.010033 | 0.063545 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e559b65f4bffc816f6acc36951ebd073cffa8c9 | 3,407 | py | Python | arpym/statistics/saddle_point_quadn.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
] | 6 | 2021-04-10T13:24:30.000Z | 2022-03-26T08:20:42.000Z | arpym/statistics/saddle_point_quadn.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
] | null | null | null | arpym/statistics/saddle_point_quadn.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
] | 6 | 2019-08-13T22:02:17.000Z | 2022-02-09T17:49:12.000Z | # -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import norm
from scipy.optimize import brentq
from arpym.tools.transpose_square_root import transpose_square_root
def saddle_point_quadn(y, alpha, beta, gamma, mu, sigma2):
"""For details, see here.
Parameters
----------
y : array, shape(j_,)
alpha : scalar
beta : array, shape(n_,)
gamma : array, shape(n_, n_)
mu : array, shape(n_,)
sigma2 : array, shape(n_, n_)
Returns
-------
cdf : array, shape(j_,)
pdf : array, shape(j_,)
"""
y = np.asarray(y).copy().reshape(-1)
beta = np.asarray(beta).copy().reshape(-1, 1)
mu = np.asarray(mu).copy().reshape(-1, 1)
j_ = len(y)
# Step 1: Compute the eigenvalues and eigenvectors of l.T @ gamma @ l
l = transpose_square_root(sigma2, 'Cholesky')
lam, e = np.linalg.eig(l.T @ gamma @ l)
lam = lam.reshape(-1, 1)
# Step 2: Compute transformed parameters
alpha_tilde = alpha + beta.T @ mu + mu.T @ gamma @ mu
beta_tilde = beta + 2*gamma @ mu
gamma_tilde = e.T @ l.T @ beta_tilde
# Step 3: Compute the log-characteristic function and its derivatives
# log-characteristic function
def c_y(w):
return alpha_tilde * w - 0.5 * np.sum(np.log(1 - 2.*w*lam) -
w**2 * gamma_tilde**2 /
(1 - 2.*w*lam))
# first derivative
def c_y_prime(w):
return alpha_tilde + np.sum(lam / (1 - 2.*w*lam) +
gamma_tilde**2 * (w - w**2 * lam) /
(1 - 2.*w*lam)**2)
# second derivative
def c_y_second(w):
return np.array([np.sum(2. * (lam / (1 - 2.*w*lam))**2 +
gamma_tilde**2 / (1 - 2.*w*lam)**3)])
# Step 4: Find w_hat numerically using Brent's method
lam_max = np.max(lam)
lam_min = np.min(lam)
if lam_max > 0:
w_max = (1 - 1e-5) / (2 * lam_max)
else:
w_max = 1e20
if lam_min < 0:
w_min = (1 + 1e-5) / (2 * lam_min)
else:
w_min = -1e20
y_min = c_y_prime(w_min)
y_max = c_y_prime(w_max)
# initialize
w_hat = np.zeros(j_)
c_y_w_hat = np.zeros(j_) # c(w_hat)
c_y_second_w_hat = np.zeros(j_) # c''(w_hat)
idx = np.argsort(y)
w_last = w_min
for j in range(j_):
if y[idx[j]] <= y_min:
w_hat[idx[j]] = w_min
elif y[idx[j]] >= y_max:
w_hat[idx[j]] = w_max
else:
# Brent’s method for finding the root of the function.
# Since y is sorted and c_y_prime is a monotone increasing function
# it is guaranteed that the solution w is in the interval
# [w_last, w_max].
w_hat[idx[j]] = brentq(lambda w: c_y_prime(w) - y[idx[j]],
w_last, w_max)
w_last = w_hat[idx[j]]
c_y_w_hat[idx[j]] = c_y(w_hat[idx[j]])
c_y_second_w_hat[idx[j]] = c_y_second(w_hat[idx[j]])
# Step 5: Compute cdf and pdf
r = np.sign(w_hat) * np.sqrt(2. * (w_hat * y - c_y_w_hat))
u = w_hat * np.sqrt(c_y_second_w_hat)
cdf = norm.cdf(r) - norm.pdf(r) * (1. / u - 1. / r)
pdf = np.exp(c_y_w_hat - w_hat * y) / np.sqrt(2 * np.pi * c_y_second_w_hat)
return np.squeeze(cdf), np.squeeze(pdf)
| 30.693694 | 79 | 0.528618 | 546 | 3,407 | 3.089744 | 0.221612 | 0.052164 | 0.037344 | 0.037937 | 0.160047 | 0.097214 | 0.089508 | 0.07706 | 0.035566 | 0.035566 | 0 | 0.026304 | 0.330496 | 3,407 | 110 | 80 | 30.972727 | 0.713284 | 0.249486 | 0 | 0.050847 | 0 | 0 | 0.003222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067797 | false | 0 | 0.067797 | 0.050847 | 0.20339 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e57c1d666f0e679e553435b63623e54ee15e34a | 320 | py | Python | hardware/dht/__init__.py | jpalczewski/pills | ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26 | [
"MIT"
] | null | null | null | hardware/dht/__init__.py | jpalczewski/pills | ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26 | [
"MIT"
] | null | null | null | hardware/dht/__init__.py | jpalczewski/pills | ab0cf0feedbdfe069a0dad76c8a45ee9ab4cfc26 | [
"MIT"
] | null | null | null | from .DHT22 import sensor
import time
import pigpio
async def poll_once():
pi = pigpio.pi()
s = sensor(pi, 24, LED=None, power=None,DHT11=False)
s.trigger()
time.sleep(0.2)
humidity = s.humidity()
temperature = s.temperature()
s.cancel()
pi.stop()
return (humidity, temperature) | 17.777778 | 56 | 0.6375 | 44 | 320 | 4.613636 | 0.613636 | 0.187192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03252 | 0.23125 | 320 | 18 | 57 | 17.777778 | 0.792683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e5ba2a20b4cea3293ed973ff92b38716b7ec7fc | 2,267 | py | Python | test.py | gadolly/Deep_learning | b29248f97d576c36cad9eb0f67ed834d7a5aadad | [
"MIT"
] | null | null | null | test.py | gadolly/Deep_learning | b29248f97d576c36cad9eb0f67ed834d7a5aadad | [
"MIT"
] | null | null | null | test.py | gadolly/Deep_learning | b29248f97d576c36cad9eb0f67ed834d7a5aadad | [
"MIT"
] | null | null | null | # import the necessary packages
from keras.preprocessing import image as image_utils
from imagenet_utils import decode_predictions
from imagenet_utils import preprocess_input
from vgg16 import VGG16
import numpy as np
import argparse
import cv2
from keras.utils import np_utils
import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
args = vars(ap.parse_args())
# load the original image via OpenCV so we can draw on it and display
# it to our screen later
orig = cv2.imread(args["image"])
#cv2.imshow("test",orig)
# load the input image using the Keras helper utility while ensuring
# that the image is resized to 224x224 pxiels, the required input
# dimensions for the network -- then convert the PIL image to a
# NumPy array
print("[INFO] loading and preprocessing image...")
image = image_utils.load_img(args["image"], target_size=(224, 224))
image = image_utils.img_to_array(image)
# our image is now represented by a NumPy array of shape (3, 224, 224),
# but we need to expand the dimensions to be (1, 3, 224, 224) so we can
# pass it through the network -- we'll also preprocess the image by
# subtracting the mean RGB pixel intensity from the ImageNet dataset
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)
# load the VGG16 network
print("[INFO] loading network...")
model = VGG16(weights="imagenet")
# classify the image
print("[INFO] classifying image...")
preds = model.predict(image)
result = decode_predictions(preds, top=1)
(inID, label, val) = decode_predictions(preds)[0][0]
print(result[0])
print(len(result))
#result1 = ([col.strip() for col in part] for part in result)
#print(result1)
#print(decode_predictions(preds)[0])
# display the predictions to our screen
print("ImageNet ID: {}, Label: {}".format(inID, label))
cv2.putText(orig, "Label: {}".format(label), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
plt.ioff()
plt.imshow(orig)
plt.pause(1)
plt.show()
#cv2.imshow("Classification", orig)
#cv2.waitKey(0)
P = decode_predictions(preds)
(imagenetID, label, prob) = P[0][0]
#plt.show() | 30.635135 | 71 | 0.736215 | 353 | 2,267 | 4.665722 | 0.402266 | 0.051609 | 0.05343 | 0.02793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034179 | 0.148214 | 2,267 | 74 | 72 | 30.635135 | 0.818747 | 0.406264 | 0 | 0 | 0 | 0 | 0.134441 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.27027 | 0 | 0.27027 | 0.162162 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e615b3096b4af4bf6362be743bc75af467ed5a8 | 17,468 | py | Python | tests/test_requirements.py | domdfcoding/packing-tape | d8570033c8088c68527db918339c14aa6953264f | [
"MIT"
] | null | null | null | tests/test_requirements.py | domdfcoding/packing-tape | d8570033c8088c68527db918339c14aa6953264f | [
"MIT"
] | null | null | null | tests/test_requirements.py | domdfcoding/packing-tape | d8570033c8088c68527db918339c14aa6953264f | [
"MIT"
] | null | null | null | # stdlib
from typing import List, Sequence, Union
# 3rd party
import pytest
from coincidence.regressions import AdvancedDataRegressionFixture
from coincidence.selectors import min_version, not_windows, only_version
from domdf_python_tools.paths import PathPlus
from packaging.requirements import Requirement
from packaging.specifiers import Specifier, SpecifierSet
from pytest_regressions.data_regression import DataRegressionFixture
# this package
from shippinglabel.requirements import (
ComparableRequirement,
check_dependencies,
combine_requirements,
list_requirements,
parse_pyproject_dependencies,
parse_pyproject_extras,
parse_requirements,
read_requirements,
resolve_specifiers
)
class TestComparableRequirement:
@pytest.fixture(scope="class")
def req(self):
return ComparableRequirement('pytest==6.0.0; python_version <= "3.9"')
@pytest.mark.parametrize(
"other",
[
ComparableRequirement('pytest==6.0.0; python_version <= "3.9"'),
ComparableRequirement("pytest==6.0.0"),
ComparableRequirement("pytest"),
ComparableRequirement("pytest[extra]"),
Requirement('pytest==6.0.0; python_version <= "3.9"'),
Requirement("pytest==6.0.0"),
Requirement("pytest"),
Requirement("pytest[extra]"),
"pytest",
]
)
def test_eq(self, req, other):
assert req == req
assert req == other
@pytest.mark.parametrize(
"other",
[
"pytest-rerunfailures",
ComparableRequirement("pytest-rerunfailures"),
ComparableRequirement("pytest-rerunfailures==1.2.3"),
Requirement("pytest-rerunfailures"),
Requirement("pytest-rerunfailures==1.2.3"),
ComparableRequirement("pytest"),
ComparableRequirement("pytest[extra]"),
Requirement("pytest"),
Requirement("pytest[extra]"),
]
)
def test_gt(self, req, other):
assert req < other
@pytest.mark.parametrize(
"other",
[
"apeye",
ComparableRequirement("apeye"),
ComparableRequirement("apeye==1.2.3"),
Requirement("apeye"),
Requirement("apeye==1.2.3"),
]
)
def test_lt(self, req, other):
assert req > other
@pytest.mark.parametrize(
"other",
[
"pytest-rerunfailures",
ComparableRequirement("pytest-rerunfailures"),
ComparableRequirement("pytest-rerunfailures==1.2.3"),
ComparableRequirement('pytest==6.0.0; python_version <= "3.9"'),
Requirement("pytest-rerunfailures"),
Requirement("pytest-rerunfailures==1.2.3"),
Requirement('pytest==6.0.0; python_version <= "3.9"'),
ComparableRequirement("pytest==6.0.0"),
ComparableRequirement("pytest"),
ComparableRequirement("pytest[extra]"),
Requirement("pytest==6.0.0"),
Requirement("pytest"),
Requirement("pytest[extra]"),
"pytest",
]
)
def test_ge(self, req, other):
assert req <= other
assert req <= req
@pytest.mark.parametrize(
"other",
[
"apeye",
ComparableRequirement("apeye"),
ComparableRequirement("apeye==1.2.3"),
Requirement("apeye"),
Requirement("apeye==1.2.3"),
ComparableRequirement('pytest==6.0.0; python_version <= "3.9"'),
ComparableRequirement("pytest==6.0.0"),
ComparableRequirement("pytest"),
ComparableRequirement("pytest[extra]"),
Requirement('pytest==6.0.0; python_version <= "3.9"'),
Requirement("pytest==6.0.0"),
Requirement("pytest"),
Requirement("pytest[extra]"),
"pytest",
]
)
def test_le(self, req, other):
assert req >= other
assert req >= req
def test_combine_requirements():
reqs = [
ComparableRequirement("foo"),
ComparableRequirement("foo>2"),
ComparableRequirement("foo>2.5"),
ComparableRequirement("foo==3.2.1"),
ComparableRequirement("foo==3.2.3"),
ComparableRequirement("foo==3.2.5"),
]
assert combine_requirements(reqs) == [Requirement("foo==3.2.1,==3.2.3,==3.2.5,>2.5")]
assert str(combine_requirements(reqs)[0]) == "foo==3.2.1,==3.2.3,==3.2.5,>2.5"
assert str(combine_requirements(reqs)[0].specifier) == "==3.2.1,==3.2.3,==3.2.5,>2.5"
def test_combine_requirements_duplicates():
reqs = [
ComparableRequirement('typing-extensions>=3.6.4; python_version < "3.8"'),
ComparableRequirement("typing-extensions>=3.7.4.3"),
ComparableRequirement("typing-extensions>=3.7.4.3"),
ComparableRequirement("typing-extensions>=3.7.4.3"),
ComparableRequirement("typing-extensions>=3.7.4.3"),
ComparableRequirement("typing-extensions>=3.7.4.1"),
ComparableRequirement("typing-extensions>=3.7.4"),
ComparableRequirement('typing-extensions; python_version < "3.8"'),
]
combined_reqs = combine_requirements(reqs)
assert len(combined_reqs) == 2
assert combined_reqs[1] == ComparableRequirement("typing-extensions>=3.7.4.3")
assert combined_reqs[0] == ComparableRequirement('typing-extensions>=3.6.4; python_version < "3.8"')
reqs.append(reqs.pop(0))
combined_reqs = combine_requirements(reqs)
assert len(combined_reqs) == 2
assert combined_reqs[0] == ComparableRequirement("typing-extensions>=3.7.4.3")
assert combined_reqs[1] == ComparableRequirement('typing-extensions>=3.6.4; python_version < "3.8"')
def test_combine_requirements_differing_precision():
reqs = [
ComparableRequirement("lockfile>=0.9"),
ComparableRequirement("lockfile>=0.9"),
ComparableRequirement("lockfile>=0.12.2"),
]
assert combine_requirements(reqs) == [Requirement("lockfile>=0.12.2")]
@pytest.mark.parametrize(
"reqs, combined",
[
(
[
ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'),
ComparableRequirement('numpy>=1.19.1; platform_system != "Windows"')
],
[
ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'),
ComparableRequirement('numpy>=1.19.1; platform_system != "Windows"')
],
),
(
[
ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'),
ComparableRequirement("numpy>=1.19.1"),
],
[
ComparableRequirement('numpy==1.19.3; platform_system == "Windows"'),
ComparableRequirement("numpy>=1.19.1"),
],
),
(
[ComparableRequirement("numpy==1.19.3"), ComparableRequirement("numpy>=1.19.1")],
[ComparableRequirement("numpy==1.19.3")],
),
(
[ComparableRequirement("numpy<=1.19.3"), ComparableRequirement("numpy==1.19.1")],
[ComparableRequirement("numpy==1.19.1")],
),
(
[ComparableRequirement("numpy<=1.19.3"), ComparableRequirement("numpy<1.19.1")],
[ComparableRequirement("numpy<1.19.1")],
),
(
[ComparableRequirement("numpy>1.2.3"), ComparableRequirement("numpy>=1.2.2")],
[ComparableRequirement("numpy>1.2.3")],
),
]
)
def test_combine_requirements_markers(reqs, combined):
assert combine_requirements(reqs) == combined
@pytest.mark.parametrize(
"specifiers, resolved",
[
([Specifier(">1.2.3"), Specifier(">=1.2.2"), Specifier("<2")], SpecifierSet(">1.2.3,<2")),
([Specifier(">1.2.3"), Specifier(">=1.2.2")], SpecifierSet(">1.2.3")),
([Specifier(">=1.2.2"), Specifier("<2")], SpecifierSet(">=1.2.2,<2")),
([Specifier(">1.2.3"), Specifier("<2")], SpecifierSet(">1.2.3,<2")),
([Specifier("<1.2.2"), Specifier("<=1.2.3"), Specifier(">2")], SpecifierSet("<1.2.2,>2")),
([Specifier("<1.2.2"), Specifier("<=1.2.3")], SpecifierSet("<1.2.2")),
([Specifier("<=1.2.3"), Specifier(">2")], SpecifierSet("<=1.2.3,>2")),
([Specifier("<1.2.2"), Specifier(">2")], SpecifierSet("<1.2.2,>2")),
]
)
def test_resolve_specifiers(specifiers, resolved):
assert resolve_specifiers(specifiers) == resolved
requirements_a = [
"autodocsumm>=0.2.0",
"default-values>=0.2.0",
"domdf-sphinx-theme>=0.1.0",
"extras-require>=0.2.0",
"repo-helper-sphinx-theme>=0.0.2",
"seed-intersphinx-mapping>=0.1.1",
"sphinx>=3.0.3",
"ruamel-yaml>=0.16.12",
"sphinx-click>=2.5.0",
"sphinx-copybutton>=0.2.12",
"sphinx-notfound-page>=0.5",
"sphinx-prompt>=1.1.0",
"sphinx-tabs>=1.1.13",
"sphinx-toolbox>=1.7.1",
"sphinxcontrib-autoprogram>=0.1.5",
"sphinxcontrib-httpdomain>=1.7.0",
"sphinxemoji>=0.1.6",
"toctree-plus>=0.0.4",
]
requirements_b = [
"autodocsumm>=0.2.0",
"default-values>=0.2.0",
"domdf-sphinx-theme>=0.1.0",
"domdf-sphinx-theme>=0.1.0",
"extras-require>=0.2.0",
"repo-helper-sphinx-theme>=0.0.2",
"seed-intersphinx-mapping>=0.1.1",
"sphinx>=3.0.3",
"sphinx-click>=2.5.0",
"sphinx-copybutton>=0.2.12",
"sphinx-copybutton>=0.2.12",
"sphinx-notfound-page>=0.5",
"sphinx-prompt>=1.1.0",
"sphinx-tabs>=1.1.13",
"sphinx-toolbox>=1.7.1",
"ruamel.yaml>=0.16.12",
"sphinxcontrib-autoprogram>=0.1.5",
"sphinxcontrib-autoprogram>=0.1.5",
"sphinxcontrib-httpdomain>=1.7.0",
"sphinxemoji>=0.1.6",
"toctree-plus>=0.0.4",
"toctree-plus>=0.0.3",
]
requirements_c = [
'numpy==1.19.3; platform_system == "Windows"',
'numpy>=1.19.1; platform_system != "Windows"',
]
@pytest.mark.parametrize(
"requirements",
[
pytest.param(requirements_a, id='a'),
pytest.param(requirements_b, id='b'),
pytest.param(requirements_c, id='c'),
]
)
def test_read_requirements(
tmp_pathplus,
advanced_data_regression: AdvancedDataRegressionFixture,
requirements: List[str],
):
(tmp_pathplus / "requirements.txt").write_lines(requirements)
advanced_data_regression.check([
str(x) for x in sorted(read_requirements(tmp_pathplus / "requirements.txt")[0])
])
@pytest.mark.parametrize(
"requirements",
[
pytest.param(requirements_a, id='a'),
pytest.param(requirements_b, id='b'),
pytest.param(requirements_c, id='c'),
pytest.param(iter(requirements_a), id="iter(a)"),
pytest.param(iter(requirements_b), id="iter(b)"),
pytest.param(iter(requirements_c), id="iter(c)"),
pytest.param(set(requirements_a), id="set(a)"),
pytest.param(set(requirements_b), id="set(b)"),
pytest.param(set(requirements_c), id="set(c)"),
pytest.param(tuple(requirements_a), id="tuple(a)"),
pytest.param(tuple(requirements_b), id="tuple(b)"),
pytest.param(tuple(requirements_c), id="tuple(c)"),
]
)
def test_parse_requirements(
tmp_pathplus: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
requirements: List[str],
):
advanced_data_regression.check([str(x) for x in sorted(parse_requirements(requirements)[0])])
def test_read_requirements_invalid(
tmp_pathplus: PathPlus, advanced_data_regression: AdvancedDataRegressionFixture
):
(tmp_pathplus / "requirements.txt").write_lines([
"# another comment",
"autodocsumm>=apples",
"default-value---0.2.0",
"domdf-sphinx-theme!!!0.1.0",
"0.2.0",
'',
'',
"https://bbc.co.uk",
"toctree-plus>=0.0.4",
"# a comment",
])
with pytest.warns(UserWarning) as record:
requirements, comments = read_requirements(tmp_pathplus / "requirements.txt")
# check that only one warning was raised
assert len(record) == 3
# check that the message matches
for idx, warning in enumerate([
"Creating a LegacyVersion has been deprecated and will be removed in the next major release",
"Ignored invalid requirement 'domdf-sphinx-theme!!!0.1.0'",
"Ignored invalid requirement 'https://bbc.co.uk'",
]):
assert record[idx].message.args[0] == warning # type: ignore
advanced_data_regression.check([str(x) for x in sorted(requirements)])
assert comments == ["# another comment", "# a comment"]
def test_sort_mixed_requirements():
requirements: Sequence[Union[str, ComparableRequirement]] = [
"urllib3",
ComparableRequirement("six==1.15.0"),
"botocore",
ComparableRequirement("requests>=2.19.1"),
"python-dateutil",
]
assert sorted(requirements) == [
"botocore",
"python-dateutil",
ComparableRequirement("requests>=2.19.1"),
ComparableRequirement("six==1.15.0"),
"urllib3",
]
def test_check_dependencies(capsys):
deps = ["pytest", "domdf_python_tools", "madeup_module"]
missing_deps = check_dependencies(deps, False)
assert isinstance(missing_deps, list)
assert len(missing_deps) == 1
assert missing_deps == ["madeup_module"]
missing_deps = check_dependencies(deps)
captured = capsys.readouterr()
stdout = captured.out.split('\n')
assert stdout[0] == "The following modules are missing:"
assert stdout[1] == "['madeup_module']"
assert stdout[2] == "Please check the documentation."
assert stdout[3] == ''
assert isinstance(missing_deps, list)
assert len(missing_deps) == 1
assert missing_deps == ["madeup_module"]
missing_deps = check_dependencies(["pytest"])
captured = capsys.readouterr()
stdout = captured.out.split('\n')
assert stdout[0] == "All modules installed"
assert stdout[1] == ''
assert isinstance(missing_deps, list)
assert len(missing_deps) == 0
assert missing_deps == []
def test_comparable_requirement():
assert ComparableRequirement("foo") != ComparableRequirement("bar")
assert ComparableRequirement("foo") == ComparableRequirement("foo")
assert ComparableRequirement("foo>=1.2.3") == ComparableRequirement("foo >= 1.2.3")
def req_with_marker():
return ComparableRequirement('importlib-metadata>=1.5.0; python_version < "3.8"')
def req_without_marker():
return ComparableRequirement("importlib-metadata>=1.5.0")
def req_with_different_marker():
return ComparableRequirement('importlib-metadata>=1.5.0; python_version < "3.10"')
assert req_with_marker() == req_with_marker()
assert req_with_marker() is not req_with_marker()
assert req_without_marker() is not req_without_marker()
assert req_with_marker() != req_with_different_marker()
assert "importlib-metadata" in [req_with_marker()]
assert req_without_marker() in [req_with_marker()]
assert req_with_marker() in [req_with_marker()]
assert "importlib-metadata" in (req_with_marker(), )
assert req_without_marker() in (req_with_marker(), )
assert req_with_marker() in (req_with_marker(), )
assert {req_without_marker(), req_without_marker()} == {req_without_marker()}
assert {req_with_marker(), req_with_marker()} == {req_with_marker()}
assert hash(req_with_marker()) == hash(req_with_marker())
assert hash(req_with_marker()) != hash(req_without_marker())
assert req_without_marker() not in {req_with_marker()}
assert req_with_marker() in {req_with_marker()}
assert req_without_marker() != "123foo?"
only_36 = pytest.param("3.6", marks=only_version((3, 6), reason="Output differs on Python 3.6"))
only_37 = pytest.param("3.7", marks=only_version((3, 7), reason="Output differs on Python 3.7"))
only_38 = pytest.param("3.8", marks=only_version((3, 8), reason="Output differs on Python 3.8"))
min_38 = pytest.param("3.8+", marks=min_version((3, 8), reason="Output differs on Python 3.8+"))
only_39 = pytest.param("3.9", marks=only_version((3, 9), reason="Output differs on Python 3.9"))
only_310 = pytest.param("3.10", marks=only_version((3, 10), reason="Output differs on Python 3.10"))
@not_windows("Output differs on Windows")
@pytest.mark.parametrize("py_version", [
only_36,
only_37,
only_38,
only_39,
only_310,
])
@pytest.mark.parametrize(
"library", [
"shippinglabel",
"apeye",
"cachecontrol[filecache]",
"domdf-python-tools",
"domdf_python_tools",
]
)
@pytest.mark.parametrize("depth", [-1, 0, 1, 2, 3])
# @pytest.mark.parametrize("depth", [3])
def test_list_requirements(
data_regression: DataRegressionFixture,
library,
depth,
py_version,
):
data_regression.check(list(list_requirements(library, depth=depth)))
@not_windows("Output differs on Windows")
@pytest.mark.parametrize("py_version", [
only_36,
only_37,
min_38,
])
@pytest.mark.parametrize("depth", [-1, 0, 1, 2, 3])
# @pytest.mark.parametrize("depth", [3])
def test_list_requirements_pytest(
data_regression: DataRegressionFixture,
depth,
py_version,
):
data_regression.check(list(list_requirements("pytest", depth=depth)))
@pytest.fixture()
def pyproject_toml(tmp_pathplus: PathPlus):
filename = (tmp_pathplus / "pyproject.toml")
filename.write_lines([
"[build-system]",
'requires = [ "setuptools>=40.6.0", "wheel>=0.34.2",]',
'build-backend = "setuptools.build_meta"',
'',
"[project]",
"dependencies = [",
' "httpx",',
' "gidgethub[httpx]>4.0.0",',
" \"django>2.1; os_name != 'nt'\",",
" \"django>2.0; os_name == 'nt'\"",
']',
'',
"[project.optional-dependencies]",
"test = [",
' "pytest < 5.0.0",',
' "pytest-cov[all]"',
']',
"[tool.flit.metadata]",
"requires = [",
'\t"requests >=2.6",',
"\t\"configparser; python_version == '2.7'\",",
']',
'',
"[tool.flit.metadata.requires-extra]",
"test = [",
'\t"pytest >=2.7.3",',
'\t"pytest-cov",',
']',
])
return filename
@pytest.mark.parametrize("flavour", ["auto", "pep621", "flit"])
def test_parse_pyproject_dependencies(
pyproject_toml: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
flavour: str,
):
deps = parse_pyproject_dependencies(pyproject_toml, flavour) # type: ignore
advanced_data_regression.check(sorted(str(x) for x in deps))
@pytest.mark.parametrize("flavour", ["auto", "pep621", "flit"])
def test_parse_pyproject_extras(
pyproject_toml: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
flavour: str,
):
extras = parse_pyproject_extras(pyproject_toml, flavour) # type: ignore
advanced_data_regression.check({k: sorted(str(x) for x in v) for k, v in extras.items()})
| 30.916814 | 101 | 0.676551 | 2,184 | 17,468 | 5.277015 | 0.120879 | 0.006074 | 0.006247 | 0.042777 | 0.676443 | 0.632278 | 0.596529 | 0.554013 | 0.533623 | 0.464642 | 0 | 0.043826 | 0.141802 | 17,468 | 564 | 102 | 30.971631 | 0.724968 | 0.012365 | 0 | 0.447205 | 0 | 0.008282 | 0.28293 | 0.081028 | 0 | 0 | 0 | 0 | 0.122153 | 1 | 0.05176 | false | 0 | 0.028986 | 0.008282 | 0.093168 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e6171a69d7112d24e0deaed0a6f8f8e780b1f04 | 6,682 | py | Python | tests/ut/python/parallel/test_uniform_candidate_sampler.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | 2 | 2021-07-08T13:10:42.000Z | 2021-11-08T02:48:57.000Z | tests/ut/python/parallel/test_uniform_candidate_sampler.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
] | null | null | null | tests/ut/python/parallel/test_uniform_candidate_sampler.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.context as context
from mindspore import Tensor, Parameter
import mindspore.nn as nn
from mindspore.common.api import _executor
from mindspore.nn import TrainOneStepCell, Momentum
from mindspore.ops import operations as P
class Net(nn.Cell):
def __init__(self, embedding_weight, num_true, num_sampled, unique, range_max, seed, remove_accidential,
strategy1=None):
super(Net, self).__init__()
self.sampler = P.UniformCandidateSampler(num_true, num_sampled, unique, range_max, seed,
remove_accidential)
if strategy1:
self.sampler.shard(strategy1)
self.embedding_table = Parameter(embedding_weight, "embedding_weight")
self.gatherv2 = P.Gather()
self.reduce_sum = P.ReduceSum()
self.reduce_sum2 = P.ReduceSum()
self.reduce_sum3 = P.ReduceSum()
def construct(self, x):
out1, out2, out3 = self.sampler(x)
lookup = self.gatherv2(self.embedding_table, out1, 0)
loss = out1 - out3
loss = self.reduce_sum(loss, (0,))
loss2 = self.reduce_sum2(lookup, (0, 1))
loss3 = self.reduce_sum3(out2, (0, 1))
loss4 = loss + loss2 + loss3
return loss4
class Net2(nn.Cell):
def __init__(self, mul_weight, num_true, num_sampled, unique, range_max, seed, remove_accidential,
strategy1=None):
super(Net2, self).__init__()
self.sampler = P.UniformCandidateSampler(num_true, num_sampled, unique, range_max, seed,
remove_accidential)
self.cast = P.Cast()
self.weight = Parameter(mul_weight, "w1")
self.mul = P.Mul()
if strategy1:
self.sampler.shard(strategy1)
def construct(self, x):
x = self.mul(x, self.weight)
x = self.cast(x, ms.int32)
_, out2, _ = self.sampler(x)
return out2
_w = Tensor(np.ones([48, 16]), dtype=ms.float32)
_w1 = Tensor(np.ones([96, 64]), dtype=ms.float32)
_x = Tensor(np.ones([48, 16]), dtype=ms.int32)
def compile_net(net):
context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, _x)
context.reset_auto_parallel_context()
def test_uniform_candidate_sampler_no_full_0d_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1),)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_no_full_1d_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 4),)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_full_0d_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((8, 1),)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_full_1d_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8),)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_full_1d_unqiue_false():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8),)
net = Net(_w1, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1,
remove_accidential=False, strategy1=strategy1)
compile_net(net)
def test_uniform_candidate_sampler_auto_parllel():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net(_w1, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1,
remove_accidential=False, strategy1=None)
compile_net(net)
def test_uniform_candidate_sampler_auto_parllel_unqiue_true():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=False, strategy1=None)
compile_net(net)
def test_uniform_candidate_sampler_auto_parllel_remove_true():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=8, global_rank=0)
net = Net(_w1, num_true=16, num_sampled=16, unique=True, range_max=20, seed=1,
remove_accidential=True, strategy1=None)
compile_net(net)
def test_uniform_candidate_sampler_full_1d_remove_true():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8),)
net = Net(_w1, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1,
remove_accidential=True, strategy1=strategy1)
with pytest.raises(RuntimeError):
compile_net(net)
def test_uniform_candidate_sampler_as_final():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((1, 8),)
net = Net2(_w, num_true=16, num_sampled=16, unique=False, range_max=20, seed=1, remove_accidential=False,
strategy1=strategy1)
with pytest.raises(RuntimeError):
compile_net(net)
| 41.246914 | 109 | 0.700389 | 923 | 6,682 | 4.777898 | 0.192849 | 0.059864 | 0.032426 | 0.052154 | 0.648753 | 0.641043 | 0.624717 | 0.606576 | 0.599773 | 0.571655 | 0 | 0.03914 | 0.185573 | 6,682 | 161 | 110 | 41.503106 | 0.771224 | 0.09548 | 0 | 0.461538 | 0 | 0 | 0.030353 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128205 | false | 0 | 0.076923 | 0 | 0.239316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e622edaf8f47d87d5f8233d0e8589b835af46c3 | 3,464 | py | Python | lib/servers/data_vault.py | clayton-ho/EGGs_Control | 312f02488b47cf880c6e6600ce10856a871123df | [
"MIT"
] | null | null | null | lib/servers/data_vault.py | clayton-ho/EGGs_Control | 312f02488b47cf880c6e6600ce10856a871123df | [
"MIT"
] | null | null | null | lib/servers/data_vault.py | clayton-ho/EGGs_Control | 312f02488b47cf880c6e6600ce10856a871123df | [
"MIT"
] | null | null | null | # Copyright (C) 2007 Matthew Neeley
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
### BEGIN NODE INFO
[info]
name = Data Vault
version = 3.0.2
description = Store and retrieve numeric data
[startup]
cmdline = %PYTHON% %FILE%
timeout = 20
[shutdown]
message = 987654321
timeout = 5
### END NODE INFO
"""
from __future__ import absolute_import
import os
import sys
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
import labrad.util
import labrad.wrappers
from data_vault import SessionStore
from data_vault.server import DataVault
@inlineCallbacks
def load_settings(cxn, name):
"""Load settings from registry with fallback to command line if needed.
Attempts to load the data vault configuration for this node from the
registry. If not configured, we instead prompt the user to enter a path
to use for storing data, and save this config into the registry to be
used later.
"""
path = ['', 'Servers', name, 'Repository']
nodename = labrad.util.getNodeName()
reg = cxn.registry
yield reg.cd(path, True)
(dirs, keys) = yield reg.dir()
if nodename in keys:
datadir = yield reg.get(nodename)
elif '__default__' in keys:
datadir = yield reg.get('__default__')
else:
default_datadir = os.path.expanduser('~/.labrad/vault')
print('Could not load repository location from registry.')
print('Please enter data storage directory or hit enter to use')
print('the default directory ({}):'.format(default_datadir))
datadir = os.path.expanduser(input('>>>'))
if datadir == '':
datadir = default_datadir
if not os.path.exists(datadir):
os.makedirs(datadir)
# set as default and for this node
yield reg.set(nodename, datadir)
yield reg.set('__default__', datadir)
print('Data location configured in the registry at {}: {}'.format(\
path + [nodename], datadir))
print('To change this, edit the registry keys and restart the server.')
returnValue(datadir)
def main(argv=sys.argv):
@inlineCallbacks
def start():
opts = labrad.util.parseServerOptions(name=DataVault.name)
cxn = yield labrad.wrappers.connectAsync(
host=opts['host'], port=int(opts['port']), password=opts['password'])
datadir = yield load_settings(cxn, opts['name'])
yield cxn.disconnect()
session_store = SessionStore(datadir, hub=None)
server = DataVault(session_store)
session_store.hub = server
# Run the server. We do not need to start the reactor, but we will
# stop it after the data_vault shuts down.
labrad.util.runServer(server, run_reactor=False, stop_reactor=True)
_ = start()
reactor.run()
if __name__ == '__main__':
main()
| 32.679245 | 81 | 0.687356 | 464 | 3,464 | 5.047414 | 0.411638 | 0.020495 | 0.016652 | 0.024338 | 0.055508 | 0.044406 | 0 | 0 | 0 | 0 | 0 | 0.007407 | 0.220554 | 3,464 | 105 | 82 | 32.990476 | 0.86 | 0.374134 | 0 | 0.038462 | 0 | 0 | 0.160057 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0.019231 | 0.173077 | 0 | 0.230769 | 0.096154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8e65b59f5232680aea8dce90eae39a5dcfa86850 | 5,465 | py | Python | py-opentsdb.py | langerma/py-opentsdb | d652a96d3a53bf7c6785a1d586427d666bb3da96 | [
"BSD-2-Clause"
] | 2 | 2020-02-20T16:00:11.000Z | 2020-02-20T16:00:21.000Z | py-opentsdb.py | langerma/py-opentsdb | d652a96d3a53bf7c6785a1d586427d666bb3da96 | [
"BSD-2-Clause"
] | null | null | null | py-opentsdb.py | langerma/py-opentsdb | d652a96d3a53bf7c6785a1d586427d666bb3da96 | [
"BSD-2-Clause"
] | null | null | null | import requests
import pandas
try:
# Use ujson if available.
import ujson as json
except Exception:
import json
class OpenTSDBResponseSerie(object):
"""
A single OpenTSDB response serie i.e 1 element of the response
array.
Params:
**kwargs : OpenTSDB response serie data
"""
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
@property
def id(self):
"""
id for serie
Returns:
metric{sorted=tag,key=value}
"""
if len(self.tags.keys()) > 0:
tags = ",".join(["%s=%s" %
(k, self.tags[k]) for k in sorted(self.tags.keys())])
return "%s{%s}" % (self.metric, tags)
else:
return self.metric
def alias(self, functOrStr):
"""
User specified alias using lambda functions and string formatting using
metadata provided by opentsdb.
This function fails silently.
Params:
functOrStr : lambda function or python string format. When using lambda
functions, they must begin with '!' e.g. !lambda x: x....
Return:
Formatted alias on success and id or failure.
"""
flatData = self.__flattenedMetadata()
# Normalized alias
_alias = ""
if functOrStr.startswith("!"):
try:
_alias = eval(functOrStr[1:])(flatData)
except Exception as e:
pass
else:
try:
_alias = functOrStr % (flatData)
except Exception as e:
pass
if _alias == "":
return self.id
return _alias
def __flattenedMetadata(self):
"""
Flattens all metadata which is used for normalization
"""
return dict([("metric", self.metric)] +
[("tags.%s" % (k), v) for k, v in self.tags.items()])
def datapoints(self, convertTime=False):
"""
Converts datapoints
Params:
convertTime : Whether to convert epoch to pandas datetime
Return:
Array of tuples (time, value)
"""
if convertTime:
return dict([(pandas.to_datetime(int(k), unit='s'), v) for k, v in self.dps.items()])
return dict([(int(k), v) for k, v in self.dps.items()])
class OpenTSDBResponse(object):
""" Complete OpenTSDB response """
def __init__(self, otsdbResp):
"""
Params:
otsdbResp : raw opentsdb response as a str, list or tuple.
"""
if isinstance(otsdbResp, str) or isinstance(otsdbResp, unicode):
# string response
self._series = [ OpenTSDBResponseSerie(**s) for s in json.loads(otsdbResp) ]
elif isinstance(otsdbResp, list) or isinstance(otsdbResp, tuple):
# dict response
self._series = [ OpenTSDBResponseSerie(**s) for s in otsdbResp ]
else:
raise RuntimeError("Invalid type: %s" % (type(otsdbResp)))
@property
def series(self):
"""
Use iterator for better memory management
"""
for s in self._series:
yield s
def DataFrame(self, aliasTransform=None, convertTime=False):
"""
Converts an OpenTSDB array response into a DataFrame
Params:
convertTime : Whether to convert epoch to pandas datetime
aliasTransform : lambda function or string format to customize
serie name i.e. alias
Return:
OpenTSDB response DataFrame
"""
if aliasTransform == None:
return pandas.DataFrame(dict([
(s.id, s.datapoints(convertTime)) for s in self.series ]))
else:
return pandas.DataFrame(dict([
(s.alias(aliasTransform), s.datapoints(convertTime)) for s in self.series ]))
class BaseClient(object):
def __init__(self, host, port=4242, ssl=False):
if ssl:
self.url = "https://%s:%d" % (host, port)
else:
self.url = "http://%s:%d" % (host, port)
def queryUrl(self, **kwargs):
return str("%s/api/query?%s" % (self.url, self.__urlEncodedParams(**kwargs)))
def __urlEncodedParams(self, aggr="sum", rate=False, counter=False, end=None, **kwargs):
timeStr = "start=%s" % (kwargs["start"])
if end != None:
timeStr += "&end=%s" % (end)
if rate:
prefix = "%s:rate:%s" % (aggr, kwargs["metric"])
elif counter:
prefix = "%s:rate{counter,,1}:%s" % (aggr, kwargs["metric"])
else:
prefix = "%s:%s" % (aggr, kwargs["metric"])
# TODO: check
tagsStr = ",".join([ "%s=%s" % (k, kwargs["tags"][k]) for k in sorted(kwargs["tags"].keys()) ])
if tagsStr != "":
return "%s&m=%s{%s}" % (timeStr, prefix, tagsStr)
else:
return "%s&m=%s" % (timeStr, prefix)
class Client(BaseClient):
def query(self, **kwargs):
resp = requests.get(self.queryUrl(**kwargs))
if resp.status_code >= 200 and resp.status_code < 400:
return OpenTSDBResponse(resp.text)
#return resp.text
# error
return resp.text
| 31.589595 | 103 | 0.530101 | 586 | 5,465 | 4.890785 | 0.298635 | 0.004885 | 0.010468 | 0.00977 | 0.171668 | 0.147941 | 0.115143 | 0.110258 | 0.037683 | 0 | 0 | 0.003951 | 0.351693 | 5,465 | 172 | 104 | 31.773256 | 0.804968 | 0.226715 | 0 | 0.202247 | 0 | 0 | 0.05159 | 0.005881 | 0 | 0 | 0 | 0.005814 | 0 | 1 | 0.134831 | false | 0.022472 | 0.044944 | 0.011236 | 0.382022 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |