hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3a090e5c232242360194af34105d0efa576a5d9f
| 6,613
|
py
|
Python
|
src/test.py
|
0shimax/SE-Wavenet
|
f3cf8239175fec02565c81995e5b9f9e1bbd5eb1
|
[
"MIT"
] | null | null | null |
src/test.py
|
0shimax/SE-Wavenet
|
f3cf8239175fec02565c81995e5b9f9e1bbd5eb1
|
[
"MIT"
] | null | null | null |
src/test.py
|
0shimax/SE-Wavenet
|
f3cf8239175fec02565c81995e5b9f9e1bbd5eb1
|
[
"MIT"
] | null | null | null |
import argparse
from pathlib import Path
import torch
import torch.nn.functional as F
from sklearn.metrics import precision_recall_fscore_support, roc_curve, auc
import matplotlib.pyplot as plt
import numpy as np
from data.data_loader import ActivDataset, loader
from models.focal_loss import FocalLoss
from models.ete_waveform import EteWave
from models.post_process import as_seaquence
torch.manual_seed(555)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device:", device)
def main(args):
model = EteWave(args.n_class).to(device)
if Path(args.resume_model).exists():
print("load model:", args.resume_model)
model.load_state_dict(torch.load(args.resume_model))
test_data_file_names =\
[line.rstrip() for line in open(args.test_data_file_pointer_path)]
test_dataset = ActivDataset(test_data_file_names, args.root_dir,
seq_len=args.test_seq_len, time_step=args.time_step,
is_train=False)
test_loader = loader(test_dataset, 1, shuffle=False)
test(args, model, test_loader)
def test(args, model, data_loader):
model.eval()
test_loss = 0
segmentation_correct = 0
lack_classifier_correct = 0
total_len = 0
lack_total_len = 0
true_seq_labels = []
inf_seq_labels = []
true_finish_labels = []
inf_finish_labels = []
inf_finish_proba = []
true_finish_labels_mat = np.empty([len(data_loader), 5])
inf_finish_labels_mat = np.empty([len(data_loader), 5])
with torch.no_grad():
for i_batch, (l_data, l_target, l_lack_labels) in enumerate(data_loader):
l_data = l_data.to(device)
l_target = l_target.to(device)
l_lack_labels = l_lack_labels.to(device)
total_len += l_target.shape[-1]
lack_total_len += l_lack_labels.shape[-1]
output = model(l_data)
output = output.view([-1, output.shape[-1]])
targets = l_target.view(-1)
test_loss += F.cross_entropy(output, targets, ignore_index=-1).item()
pred = output.argmax(1)
pred = as_seaquence(pred.detach(), ahead=7)
segmentation_correct += pred.eq(targets.view_as(pred)).sum().item()
model.tatc.select_data_per_labels(l_data, pred, device)
tatc_output = model.tatc()
test_loss += F.cross_entropy(tatc_output, l_lack_labels.reshape(-1)).item()
tatc_pred = tatc_output.argmax(1)
print("true:", l_lack_labels[0])
print("inference:", tatc_pred)
lack_classifier_correct += tatc_pred.eq(l_lack_labels.view_as(tatc_pred)).sum().item()
true_seq_labels += targets.view_as(pred).cpu().tolist()
inf_seq_labels += pred.cpu().tolist()
lack_labels_cpu = l_lack_labels.view_as(tatc_pred).cpu().tolist()
tatc_pred_cpu = tatc_pred.cpu().tolist()
true_finish_labels += lack_labels_cpu
inf_finish_labels += tatc_pred_cpu
inf_finish_proba += tatc_output[:, 1].view(-1).cpu().tolist()
true_finish_labels_mat[i_batch] = lack_labels_cpu
inf_finish_labels_mat[i_batch] = tatc_pred_cpu
test_loss /= len(data_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Seg Accuracy: {}/{} ({:.0f}%), lack Accuracy: {}/{} ({:.0f}%)\n'
.format(test_loss,
segmentation_correct, total_len, 100. * segmentation_correct / total_len,
lack_classifier_correct, lack_total_len, 100. * lack_classifier_correct / lack_total_len))
print("seq f1:")
print(precision_recall_fscore_support(true_seq_labels, inf_seq_labels))
print("finish work:")
print(precision_recall_fscore_support(true_finish_labels, inf_finish_labels))
fpr, tpr, _ = roc_curve(true_finish_labels, inf_finish_proba)
plt.plot(fpr, tpr)
plt.savefig( Path(args.out_dir, 'finish_roc.png') )
print("finish work AUC:")
print(auc(fpr, tpr))
for i in range(args.n_class -1):
print("class {}:".format(i))
print(precision_recall_fscore_support(true_finish_labels_mat[:, i], inf_finish_labels_mat[:, i]))
print("低速:")
print(precision_recall_fscore_support(true_finish_labels_mat[:5, :].ravel(), inf_finish_labels_mat[:5, :].ravel()))
print("中速:")
print(precision_recall_fscore_support(true_finish_labels_mat[5:10, :].ravel(), inf_finish_labels_mat[5:10, :].ravel()))
print("高速:")
print(precision_recall_fscore_support(true_finish_labels_mat[10:15, :].ravel(), inf_finish_labels_mat[10:15, :].ravel()))
for i in range(5):
start = 15+i*3
end = 15+(i+1)*3
print("作業{}中断再開:".format(i+1))
print(precision_recall_fscore_support(true_finish_labels_mat[start:end, :].ravel(), inf_finish_labels_mat[start:end, :].ravel()))
for i in range(5):
start = 30+i*3
end = 30+(i+1)*3
print("作業{}中断:".format(i+1))
print(precision_recall_fscore_support(true_finish_labels_mat[start:end, :].ravel(), inf_finish_labels_mat[start:end, :].ravel()))
for i in range(5):
start = 45+i*3
end = 45+(i+1)*3
print("作業{}欠損:".format(i+1))
print(precision_recall_fscore_support(true_finish_labels_mat[start:end, :].ravel(), inf_finish_labels_mat[start:end, :].ravel()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default='/home/sh70k/mnt/tracker_data/test', help='path to dataset')
parser.add_argument('--n-class', type=int, default=6, help='number of class')
parser.add_argument('--test_seq-len', type=int, default=200, help='fixed seaquence length')
parser.add_argument('--time-step', type=float, default=.25, help='fixed time interbal of input data')
parser.add_argument('--test-data-file-pointer-path', default='./data/test_data_file_pointer', help='path to test data file pointer')
parser.add_argument('--resume-model', default='/home/sh70k/mnt/tracker_data/results/model_ckpt_v1_average.pth', help='path to trained model')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch-size', type=int, default=1, help='input batch size')
parser.add_argument('--out-dir', default='/home/sh70k/mnt/tracker_data/results', help='folder to output data and model checkpoints')
args = parser.parse_args()
Path(args.out_dir).mkdir(parents=True, exist_ok=True),
main(args)
| 42.121019
| 145
| 0.665356
| 933
| 6,613
| 4.411576
| 0.212219
| 0.072886
| 0.065598
| 0.068027
| 0.353013
| 0.29033
| 0.208698
| 0.16448
| 0.152575
| 0.109815
| 0
| 0.017574
| 0.199758
| 6,613
| 156
| 146
| 42.391026
| 0.760204
| 0
| 0
| 0.04918
| 0
| 0.008197
| 0.111447
| 0.02858
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016393
| false
| 0
| 0.090164
| 0
| 0.106557
| 0.204918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a0d56385a100828a93d1a548339d663fa8c3ed6
| 4,031
|
py
|
Python
|
code/ConvexHull.py
|
vijindal/cluspand
|
a3676594354ab59991fe75fccecdc3a400c7b153
|
[
"MIT"
] | null | null | null |
code/ConvexHull.py
|
vijindal/cluspand
|
a3676594354ab59991fe75fccecdc3a400c7b153
|
[
"MIT"
] | null | null | null |
code/ConvexHull.py
|
vijindal/cluspand
|
a3676594354ab59991fe75fccecdc3a400c7b153
|
[
"MIT"
] | null | null | null |
from structure_helper_class import structure_helper
from model_train_helper_class import model_train_helper
import matplotlib.pyplot as plt
import pandas as pd
from tabulate import tabulate
class convex_hull:
def get_convex_hull_points(structure_name_to_object_map, draw_hull = True, model = None, model_str = None):
#Getting a map from composition ratio to list of structure names
composition_ratio_to_structure_names_list_map = structure_helper.get_composition_ratio_to_structure_names_list_map(structure_name_to_object_map.values())
points = []
points_x = []
points_y = []
if model is not None:
prediction_dict = model_train_helper.get_prediction_dict(structure_name_to_object_map, model, model_str)
for composition, name_to_energy_map in prediction_dict.items():
for name, energy in name_to_energy_map.items():
if name not in model.used_structure_names_list:
continue
points.append((composition, energy, name))
points_x.append(composition)
points_y.append(energy)
else:
for composition, structure_names in composition_ratio_to_structure_names_list_map.items():
for name in structure_names:
points.append((composition, structure_name_to_object_map[name].total_energy_, name))
points_x.append(composition)
points_y.append(structure_name_to_object_map[name].total_energy_)
"""Computes the convex hull of a set of 2D points.
Input: an iterable sequence of (x, y) pairs representing the points.
Output: a list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coordinates.
Implements Andrew's monotone chain algorithm. O(n log n) complexity.
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(points))
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
lower = []
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
if draw_hull:
plt.scatter(points_x, points_y, marker='.')
return lower
def draw(structure_name_to_object_map, draw_hull = True, model = None, model_str = None):
# Build lower hull
lower = convex_hull.get_convex_hull_points(structure_name_to_object_map,
draw_hull, model, model_str)
print('\nPoints used for Convex Hull :\n')
pd.set_option('display.expand_frame_repr', False)
df = pd.DataFrame({'Composition':[lower[i][0] for i in range(len(lower))],
'Structure name':[lower[i][2] for i in range(len(lower))],
'Structure energy':[lower[i][1] for i in range(len(lower))]})
df.set_index('Composition')
print(tabulate(df, headers='keys', tablefmt='psql'))
lower_x = [lower[i][0] for i in range(len(lower))]
lower_y = [lower[i][1] for i in range(len(lower))]
if draw_hull:
plt.plot(lower_x, lower_y , marker='.', color='black')
plt.show()
| 47.988095
| 161
| 0.611015
| 521
| 4,031
| 4.516315
| 0.305182
| 0.022949
| 0.044624
| 0.062473
| 0.282618
| 0.266043
| 0.266043
| 0.233744
| 0.162346
| 0.078198
| 0
| 0.007473
| 0.302903
| 4,031
| 83
| 162
| 48.566265
| 0.829893
| 0.127512
| 0
| 0.074074
| 0
| 0
| 0.040051
| 0.00801
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.092593
| 0.018519
| 0.222222
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a0f2160b69e0995f3cc76e9cebbc03eb599b9f1
| 2,077
|
py
|
Python
|
libra/transaction/script.py
|
MaslDi/libra-client
|
0983adfcb6787f7a16de4bf364cdf5596c183d88
|
[
"MIT"
] | null | null | null |
libra/transaction/script.py
|
MaslDi/libra-client
|
0983adfcb6787f7a16de4bf364cdf5596c183d88
|
[
"MIT"
] | null | null | null |
libra/transaction/script.py
|
MaslDi/libra-client
|
0983adfcb6787f7a16de4bf364cdf5596c183d88
|
[
"MIT"
] | null | null | null |
from canoser import Struct, Uint8, bytes_to_int_list, hex_to_int_list
from libra.transaction.transaction_argument import TransactionArgument, normalize_public_key
from libra.bytecode import bytecodes
from libra.account_address import Address
class Script(Struct):
_fields = [
('code', [Uint8]),
('args', [TransactionArgument])
]
@classmethod
def gen_transfer_script(cls, receiver_address,micro_libra):
if isinstance(receiver_address, bytes):
receiver_address = bytes_to_int_list(receiver_address)
if isinstance(receiver_address, str):
receiver_address = hex_to_int_list(receiver_address)
code = bytecodes["peer_to_peer_transfer"]
args = [
TransactionArgument('Address', receiver_address),
TransactionArgument('U64', micro_libra)
]
return Script(code, args)
@classmethod
def gen_mint_script(cls, receiver_address,micro_libra):
receiver_address = Address.normalize_to_int_list(receiver_address)
code = bytecodes["mint"]
args = [
TransactionArgument('Address', receiver_address),
TransactionArgument('U64', micro_libra)
]
return Script(code, args)
@classmethod
def gen_create_account_script(cls, fresh_address):
fresh_address = Address.normalize_to_int_list(fresh_address)
code = bytecodes["create_account"]
args = [
TransactionArgument('Address', fresh_address),
TransactionArgument('U64', 0)
]
return Script(code, args)
@classmethod
def gen_rotate_auth_key_script(cls, public_key):
key = normalize_public_key(public_key)
code = bytecodes["rotate_authentication_key"]
args = [
TransactionArgument('ByteArray', key)
]
return Script(code, args)
@staticmethod
def get_script_bytecode(script_name):
return bytecodes[script_name]
| 36.438596
| 93
| 0.641791
| 206
| 2,077
| 6.140777
| 0.242718
| 0.142292
| 0.042688
| 0.063241
| 0.38419
| 0.365217
| 0.267984
| 0.180237
| 0.180237
| 0.180237
| 0
| 0.006012
| 0.279249
| 2,077
| 57
| 94
| 36.438596
| 0.839011
| 0
| 0
| 0.32
| 0
| 0
| 0.054896
| 0.02275
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.08
| 0.02
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a110cf9f81c51a45a9e039e2675a3d01dca6237
| 13,818
|
py
|
Python
|
SourceRepositoryTools/__init__.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | 1
|
2017-04-25T13:15:10.000Z
|
2017-04-25T13:15:10.000Z
|
SourceRepositoryTools/__init__.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | null | null | null |
SourceRepositoryTools/__init__.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | null | null | null |
# ----------------------------------------------------------------------
# |
# | __init__.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2018-02-18 14:37:39
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2018.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
import os
import sys
import textwrap
from collections import OrderedDict
# ----------------------------------------------------------------------
_script_fullpath = os.path.abspath(__file__) if "python" in sys.executable.lower() else sys.executable
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def GetFundamentalRepository():
# Get the location of the fundamental dir. This is "../" when invoked from
# a python script, but more complicated when invoked as part of a frozen
# binary.
# Don't import Constants here, as Constants relies on this for initialization
value = os.getenv("DEVELOPMENT_ENVIRONMENT_FUNDAMENTAL")
if value is None:
# If here, we are't running in a standard environment are are likely running
# as part of a frozen exe. See if we are running on a file system that is
# similar to Common_Environment.
assert "python" not in sys.executable.lower(), sys.executable
potential_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
if os.path.isdir(potential_dir):
value = potential_dir
if value is not None and value.endswith(os.path.sep):
value = value[:-len(os.path.sep)]
return value
# ----------------------------------------------------------------------
# This file may be invoked by our included version of python - all imports will
# work as expected. But sometimes, this file may be invoked by embedded versions
# of python (for example, when used as part of a Mercurial plugin). At that point,
# we need to go through a bit more work to ensure that module-level imports work
# as expected.
try:
import inflect
import six
import wrapt
# If here, everything was found and all is good
except ImportError:
# If here, we are in a foreign python environment. Hard-code an import path
# to a known location of these base-level libraries. Because the libraries are
# so basic, it doesn't matter which one we use; therefore pick the lowest common
# denominator.
fundamental_repo = GetFundamentalRepository()
python_root = os.path.join(fundamental_repo, "Tools", "Python", "v2.7.10")
assert os.path.isdir(python_root), python_root
for suffix in [ os.path.join("Windows", "Lib", "site-packages"),
os.path.join("Ubuntu", "lib", "python2.7", "site-packages"),
]:
potential_dir = os.path.join(python_root, suffix)
if os.path.isdir(potential_dir):
sys.path.insert(0, potential_dir)
break
# Try it again
import inflect
import six
import wrapt
del sys.path[0]
# ----------------------------------------------------------------------
# Backwards compatibility
from SourceRepositoryTools.Impl.Configuration import *
from SourceRepositoryTools.Impl import Constants
from SourceRepositoryTools.Impl.Utilities import DelayExecute, \
GetLatestVersion, \
GetRepositoryUniqueId, \
GetVersionedDirectory
# ----------------------------------------------------------------------
@wrapt.decorator
def ToolRepository(wrapped, instance, args, kwargs):
"""\
Signals that a repository is a tool repository (a repository that contains
items that help in the development process but doesn't contain primitives
used by other dependent repositories during the build process.
"""
return wrapped(*args, **kwargs)
# ----------------------------------------------------------------------
def CreateDependencyMap(root_dir):
# Note that this functionality if very similar to that found in ActivationData.
# The difference between the two is this function will compile a map of all repositories
# under the code dir, while the code in ActivationData will only traverse environment
# data created during setup. Theoretically, it is possible for ActivationData
# to be implemented in terms of this function, but that would be too inefficient for
# general use.
from CommonEnvironment.NamedTuple import NamedTuple
from CommonEnvironment import Shell
from CommonEnvironment import SourceControlManagement
from SourceRepositoryTools.Impl.EnvironmentBootstrap import EnvironmentBootstrap
# ----------------------------------------------------------------------
RepoInfo = NamedTuple( "RepoInfo",
"UniqueId",
"Name",
"Root",
"Configurations",
)
ConfigInfo = NamedTuple( "ConfigInfo",
"ReliesOn",
"ReliedUponBy",
)
DependencyInfo = NamedTuple( "DependencyInfo",
"Configuration",
"Dependency",
)
# ----------------------------------------------------------------------
assert os.path.isdir(root_dir), root_dir
environent = Shell.GetEnvironment()
repositories = OrderedDict()
for scm, directory in SourceControlManagement.EnumSCMDirectories(root_dir):
result = GetRepositoryUniqueId( directory,
scm=scm,
throw_on_error=False,
)
if result is None:
continue
repo_name, repo_id = result
assert repo_id not in repositories, (repo_id, directory, repositories[repo_id].Root)
repo_bootstrap_data = EnvironmentBootstrap.Load(directory, environment=environent)
repo_bootstrap_data.Name = repo_name
repo_bootstrap_data.Id = repo_id
repo_bootstrap_data.Root = directory
repo_bootstrap_data.PriorityModifier = 0
repositories[repo_id] = repo_bootstrap_data
# Order by priority
# ----------------------------------------------------------------------
def Walk(repo_id, priority_modifier):
assert repo_id in repositories, repo_id
repo_info = repositories[repo_id]
repo_info.PriorityModifier += priority_modifier
for configuration in six.itervalues(repo_info.Configurations):
for dependency in configuration.Dependencies:
Walk(dependency.Id, priority_modifier + 1)
# ----------------------------------------------------------------------
for repo_id in six.iterkeys(repositories):
Walk(repo_id, 1)
priority_values = list(six.iteritems(repositories))
priority_values.sort(key=lambda x: x[1].PriorityModifier, reverse=True)
# Convert the repositories into a structure that is easier to process
results = OrderedDict()
for unique_id, repo_info in priority_values:
results[unique_id] = RepoInfo( unique_id,
repo_info.Name,
repo_info.Root,
OrderedDict(),
)
for config_name in six.iterkeys(repo_info.Configurations):
results[unique_id].Configurations[config_name] = ConfigInfo([], [])
# Populate the dependencies
for unique_id, repo_info in priority_values:
for config_name, config_info in six.iteritems(repo_info.Configurations):
# It is possible that a dependency is included more than once (as will be the case if someone
# includes Common_Enviroment as a dependency even though a dependency on Common_Enviroment is
# implied). Ensure that we are only looking at unique dependencies.
these_dependencies = []
dependency_lookup = set()
for dependency in config_info.Dependencies:
if dependency.Id in dependency_lookup:
continue
these_dependencies.append(( dependency, repositories[dependency.Id].PriorityModifier ))
dependency_lookup.add(dependency.Id)
# Ensure that the dependencies are ordered in priority order
these_dependencies.sort(key=lambda x: x[0].Id, reverse=True)
for dependency, priority_modifier in these_dependencies:
results[unique_id].Configurations[config_name].ReliesOn.append(DependencyInfo(dependency.Configuration, results[dependency.Id]))
results[dependency.Id].Configurations[dependency.Configuration].ReliedUponBy.append(DependencyInfo(config_name, results[unique_id]))
# Ensure that we can index by repo path as well as id
for unique_id in list(six.iterkeys(results)):
results[results[unique_id].Root] = results[unique_id]
return results
# ----------------------------------------------------------------------
def DisplayDependencyMap( dependency_map,
output_stream=sys.stdout,
):
from CommonEnvironment.StreamDecorator import StreamDecorator
# ----------------------------------------------------------------------
for k, v in six.iteritems(dependency_map):
if not os.path.isdir(k):
continue
output_stream.write(textwrap.dedent(
"""\
Name: {name} ({unique_id})
Directory: {dir}
Configurations:
{configurations}
""").format( name=v.Name,
unique_id=v.UniqueId,
dir=k,
configurations=StreamDecorator.LeftJustify( '\n'.join([ textwrap.dedent(
"""\
{name}
ReliesOn:
{relies_on}
ReliedUponBy:
{relied_upon_by}
""").format( name=ck,
relies_on='\n'.join([ " - {} <{}> [{}]".format(item.Dependency.Name, item.Configuration, item.Dependency.Root) for item in cv.ReliesOn ]) if cv.ReliesOn else " <None>",
relied_upon_by='\n'.join([ " - {} <{}> [{}]".format(item.Dependency.Name, item.Configuration, item.Dependency.Root) for item in cv.ReliedUponBy ]) if cv.ReliedUponBy else " <None>",
)
for ck, cv in six.iteritems(v.Configurations)
]),
2,
skip_first_line=False,
),
))
# ----------------------------------------------------------------------
def EnumRepositories():
from SourceRepositoryTools.Impl.ActivationData import ActivationData
# ----------------------------------------------------------------------
for repo in ActivationData.Load(None, None).PrioritizedRepos:
yield repo
# ----------------------------------------------------------------------
def GetRepositoryRootForFile(filename):
dirname = os.path.dirname(filename)
while True:
if os.path.isfile(os.path.join(dirname, Constants.REPOSITORY_ID_FILENAME)):
return dirname
potential_dirname = os.path.dirname(dirname)
if potential_dirname == dirname:
break
dirname = potential_dirname
raise Exception("Unable to find the repository root for '{}'".format(filename))
| 45.453947
| 285
| 0.481473
| 1,170
| 13,818
| 5.57265
| 0.287179
| 0.018405
| 0.009202
| 0.004141
| 0.094479
| 0.070245
| 0.033742
| 0.033742
| 0.023006
| 0.023006
| 0
| 0.00431
| 0.361919
| 13,818
| 303
| 286
| 45.60396
| 0.735171
| 0.275655
| 0
| 0.098039
| 0
| 0
| 0.03741
| 0.003956
| 0
| 0
| 0
| 0
| 0.03268
| 1
| 0.045752
| false
| 0
| 0.130719
| 0
| 0.202614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a11c774870f73e9df814c0fb0e907ad67a018a8
| 2,075
|
py
|
Python
|
src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py
|
Ankk98/einsteinpy
|
e6c3e3939063a7698410163b6de52e499bb3c8ea
|
[
"MIT"
] | null | null | null |
src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py
|
Ankk98/einsteinpy
|
e6c3e3939063a7698410163b6de52e499bb3c8ea
|
[
"MIT"
] | null | null | null |
src/einsteinpy/tests/test_plotting/test_staticgeodesicplotter.py
|
Ankk98/einsteinpy
|
e6c3e3939063a7698410163b6de52e499bb3c8ea
|
[
"MIT"
] | null | null | null |
from unittest import mock
import astropy.units as u
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pytest
from einsteinpy.coordinates import SphericalDifferential
from einsteinpy.plotting import StaticGeodesicPlotter
@pytest.fixture()
def dummy_data():
sph_obj = SphericalDifferential(
306 * u.m,
np.pi / 2 * u.rad,
np.pi / 2 * u.rad,
0 * u.m / u.s,
0 * u.rad / u.s,
951.0 * u.rad / u.s,
)
t = 0 * u.s
m = 4e24 * u.kg
start_lambda = 0.0
end_lambda = 0.002
step_size = 0.5e-6
return sph_obj, t, m, start_lambda, end_lambda, step_size
def test_staticgeodesicplotter_has_axes(dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m)
assert isinstance(cl.ax, mpl.axes.SubplotBase)
assert cl.time.value == 0.0
assert cl._attractor_present is False
@mock.patch("einsteinpy.plotting.geodesics_static.plt.show")
def test_plot_calls_plt_show(mock_show, dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m)
cl.plot(sph_obj, el, ss)
cl.show()
mock_show.assert_called_with()
assert cl._attractor_present
@mock.patch("einsteinpy.plotting.geodesics_static.plt.savefig")
def test_plot_save_saves_plot(mock_save, dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m)
cl.plot(sph_obj, el, ss)
name = "test_plot.png"
cl.save(name)
mock_save.assert_called_with(name)
def test_plot_calls_draw_attractor_Manualscale(dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m, attractor_radius_scale=1500)
cl.plot(sph_obj, el, ss)
assert cl._attractor_present
assert cl.attractor_radius_scale == 1500
assert cl.get_curr_plot_radius != -1
def test_plot_calls_draw_attractor_AutoScale(dummy_data):
sph_obj, _, m, _, el, ss = dummy_data
cl = StaticGeodesicPlotter(m)
cl.plot(sph_obj, el, ss)
assert cl._attractor_present
assert cl.get_curr_plot_radius != -1
| 28.040541
| 63
| 0.700241
| 313
| 2,075
| 4.361022
| 0.268371
| 0.072527
| 0.052747
| 0.065934
| 0.452015
| 0.428571
| 0.386081
| 0.287912
| 0.287912
| 0.287912
| 0
| 0.021687
| 0.2
| 2,075
| 73
| 64
| 28.424658
| 0.800602
| 0
| 0
| 0.338983
| 0
| 0
| 0.051084
| 0.044819
| 0
| 0
| 0
| 0
| 0.186441
| 1
| 0.101695
| false
| 0
| 0.135593
| 0
| 0.254237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a1626ac2fa1019fb590d26ad03b0ec329ab6d9d
| 2,017
|
py
|
Python
|
deciphon_cli/console/scan.py
|
EBI-Metagenomics/deciphon-cli
|
aa090c886db1f4dacc6bc88b46b6ebcecb79eaab
|
[
"MIT"
] | null | null | null |
deciphon_cli/console/scan.py
|
EBI-Metagenomics/deciphon-cli
|
aa090c886db1f4dacc6bc88b46b6ebcecb79eaab
|
[
"MIT"
] | null | null | null |
deciphon_cli/console/scan.py
|
EBI-Metagenomics/deciphon-cli
|
aa090c886db1f4dacc6bc88b46b6ebcecb79eaab
|
[
"MIT"
] | null | null | null |
from enum import Enum
import typer
from fasta_reader import read_fasta
from deciphon_cli.core import ScanPost, SeqPost
from deciphon_cli.requests import get_json, get_plain, post_json
__all__ = ["app"]
app = typer.Typer()
class ScanIDType(str, Enum):
SCAN_ID = "scan_id"
JOB_ID = "job_id"
@app.command()
def add(
db_id: int = typer.Argument(...),
fasta_filepath: str = typer.Argument(...),
multi_hits: bool = typer.Argument(True),
hmmer3_compat: bool = typer.Argument(False),
):
scan = ScanPost(db_id=db_id, multi_hits=multi_hits, hmmer3_compat=hmmer3_compat)
with read_fasta(fasta_filepath) as f:
for item in f:
seq = SeqPost(name=item.id, data=item.sequence)
scan.seqs.append(seq)
typer.echo(post_json(f"/scans/", scan.dict()))
@app.command()
def get(
scan_id: int = typer.Argument(...),
id_type: ScanIDType = typer.Option(ScanIDType.SCAN_ID.value),
):
typer.echo((get_json(f"/scans/{scan_id}", {"id_type": id_type.value})))
@app.command()
def seq_list(scan_id: int = typer.Argument(...)):
typer.echo((get_json(f"/scans/{scan_id}/seqs")))
@app.command()
def list():
typer.echo((get_json(f"/scans")))
@app.command()
def prod_list(scan_id: int = typer.Argument(...)):
typer.echo((get_json(f"/scans/{scan_id}/prods")))
@app.command()
def prod_gff(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/gff"), nl=False)
@app.command()
def prod_path(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/path"), nl=False)
@app.command()
def prod_fragment(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/fragment"), nl=False)
@app.command()
def prod_amino(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/amino"), nl=False)
@app.command()
def prod_codon(scan_id: int = typer.Argument(...)):
typer.echo(get_plain(f"/scans/{scan_id}/prods/codon"), nl=False)
| 24.901235
| 84
| 0.67526
| 307
| 2,017
| 4.228013
| 0.214984
| 0.087827
| 0.100154
| 0.124807
| 0.432203
| 0.411402
| 0.320493
| 0.320493
| 0.298921
| 0.298921
| 0
| 0.001735
| 0.142786
| 2,017
| 80
| 85
| 25.2125
| 0.748988
| 0
| 0
| 0.226415
| 0
| 0
| 0.11651
| 0.090729
| 0
| 0
| 0
| 0
| 0
| 1
| 0.188679
| false
| 0
| 0.09434
| 0
| 0.339623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a163271adf00fd1d184016bb403b5d130a4068f
| 1,655
|
py
|
Python
|
neuralmaterial/lib/models/vgg.py
|
NejcHirci/material-addon
|
c08e2081413c3319b712c2f7193ac8013f601382
|
[
"MIT"
] | 4
|
2022-01-31T14:26:39.000Z
|
2022-02-06T06:34:27.000Z
|
neuralmaterial/lib/models/vgg.py
|
NejcHirci/material_addon
|
c08e2081413c3319b712c2f7193ac8013f601382
|
[
"MIT"
] | 2
|
2022-01-30T10:35:04.000Z
|
2022-01-30T10:35:04.000Z
|
neuralmaterial/lib/models/vgg.py
|
NejcHirci/material-addon
|
c08e2081413c3319b712c2f7193ac8013f601382
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
class VGG(nn.Module):
def __init__(self, features, pretrained):
super(VGG, self).__init__()
self.features = features
if not pretrained:
self._initialize_weights()
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(in_channels):
layers = []
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def vgg19(pretrained, in_channels):
model = VGG(make_layers(in_channels), pretrained)
if pretrained:
state_dict = load_state_dict_from_url('https://download.pytorch.org/models/vgg19-dcbb9e9d.pth')
model.load_state_dict(state_dict, strict=False)
return model
| 33.77551
| 113
| 0.578852
| 219
| 1,655
| 4.187215
| 0.360731
| 0.039258
| 0.061069
| 0.065431
| 0.175573
| 0.131952
| 0.080698
| 0.080698
| 0.080698
| 0.080698
| 0
| 0.060293
| 0.298489
| 1,655
| 48
| 114
| 34.479167
| 0.729544
| 0
| 0
| 0.076923
| 0
| 0
| 0.0429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.076923
| 0
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a16bef75430d1f8616b4661d929e57eb96f5d11
| 1,295
|
py
|
Python
|
quasimodo/cache/file_cache.py
|
Aunsiels/CSK
|
c88609bc76d865b4987aaf30ddf1247a2031b1a6
|
[
"MIT"
] | 16
|
2019-11-28T13:26:37.000Z
|
2022-02-09T09:53:10.000Z
|
quasimodo/cache/file_cache.py
|
Aunsiels/CSK
|
c88609bc76d865b4987aaf30ddf1247a2031b1a6
|
[
"MIT"
] | 1
|
2021-03-26T20:31:48.000Z
|
2021-07-15T08:52:47.000Z
|
quasimodo/cache/file_cache.py
|
Aunsiels/CSK
|
c88609bc76d865b4987aaf30ddf1247a2031b1a6
|
[
"MIT"
] | 3
|
2020-08-14T23:23:25.000Z
|
2021-12-24T14:02:35.000Z
|
import os
import shutil
class FileCache(object):
def __init__(self, cache_dir):
self.cache_dir = cache_dir + "/"
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
def write_cache(self, query, suggestions):
filename = self.cache_dir + query.replace(" ", "-").replace("'", "_").replace("/", "-")
with open(filename, "w") as f:
for suggestion in suggestions:
f.write(str(suggestion[0]) + "\t" + str(suggestion[1]) + "\n")
def read_cache(self, query):
filename = self.cache_dir + query.replace(" ", "-").replace("'", "_").replace("/", "-")
if os.path.isfile(filename):
suggestions = []
with open(filename) as f:
for line in f:
suggestion = line.strip().split("\t")
suggestions.append((suggestion[0], float(suggestion[1])))
return suggestions
else:
return None
def delete_cache(self):
# Only delete if we are sure it is a test
if "test" in self.cache_dir:
shutil.rmtree(self.cache_dir, ignore_errors=True)
def read_regex(self, regex):
raise NotImplementedError
def read_all(self):
raise NotImplementedError
| 32.375
| 95
| 0.565251
| 150
| 1,295
| 4.74
| 0.406667
| 0.101266
| 0.135021
| 0.056259
| 0.129395
| 0.129395
| 0.129395
| 0.129395
| 0
| 0
| 0
| 0.0044
| 0.29807
| 1,295
| 39
| 96
| 33.205128
| 0.777778
| 0.030116
| 0
| 0.133333
| 0
| 0
| 0.019139
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.066667
| 0
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a193908dfb0eb3ea9c064b546eae9b145317435
| 10,915
|
py
|
Python
|
txraft/test_txraft.py
|
tehasdf/txraft
|
860345e4a10d438d3fc69d752f09a06546c92d08
|
[
"MIT"
] | null | null | null |
txraft/test_txraft.py
|
tehasdf/txraft
|
860345e4a10d438d3fc69d752f09a06546c92d08
|
[
"MIT"
] | null | null | null |
txraft/test_txraft.py
|
tehasdf/txraft
|
860345e4a10d438d3fc69d752f09a06546c92d08
|
[
"MIT"
] | null | null | null |
from twisted.internet.defer import succeed
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase
from txraft import Entry, RaftNode, MockRPC, STATE
from txraft.commands import AppendEntriesCommand, RequestVotesCommand
class MockStoreDontUse(object):
def __init__(self, entries=None):
self.currentTerm = 0
self.votedFor = None
if entries is None:
entries = {}
self.log = entries
def getLastIndex(self):
if not self.log:
return succeed(0)
return succeed(max(self.log.iterkeys()))
def getLastTerm(self):
if not self.log:
return succeed(0)
return (self.getLastIndex()
.addCallback(lambda index: self.log[index].term)
)
def getByIndex(self, ix):
return succeed(self.log[ix])
def setVotedFor(self, votedFor):
self.votedFor = votedFor
return succeed(True)
def setCurrentTerm(self, currentTerm):
self.currentTerm = currentTerm
return succeed(True)
def getVotedFor(self):
return succeed(self.votedFor)
def getCurrentTerm(self):
return succeed(self.currentTerm)
def contains(self, term, index):
if term == index == 0:
return True
return index in self.log and self.log[index].term == term
def deleteAfter(self, ix, inclusive=True):
if not inclusive:
ix += 1
while True:
if not ix in self.log:
break
del self.log[ix]
ix += 1
def insert(self, entries):
for index, entry in entries.iteritems():
if index in self.log and self.log[index].term != entry.term:
self.deleteAfter(index)
for index, entry in entries.iteritems():
self.log[index] = entry
class TestMockStoreInsert(TestCase):
def test_empty(self):
store = MockStoreDontUse()
newentry = Entry(term=1, payload=True)
store.insert({1: newentry})
self.assertEqual(store.log, {1: newentry})
def test_noconflict(self):
oldentry = Entry(term=1, payload=True)
store = MockStoreDontUse({1: oldentry})
newentry = Entry(term=1, payload=True)
store.insert({2: newentry})
self.assertEqual(store.log, {1: oldentry, 2: newentry})
def test_conflict_last(self):
oldentry = Entry(term=1, payload=False)
store = MockStoreDontUse({1: oldentry})
newentry = Entry(term=2, payload=True)
store.insert({1: newentry})
self.assertEqual(store.log, {1: newentry})
def test_conflict_many(self):
oldentry1 = Entry(term=1, payload=1)
oldentry2 = Entry(term=1, payload=2)
oldentry3 = Entry(term=1, payload=3)
store = MockStoreDontUse({1: oldentry1, 2: oldentry2, 3: oldentry3})
newentry1 = Entry(term=2, payload=4)
newentry2 = Entry(term=2, payload=5)
newentry3 = Entry(term=2, payload=6)
store.insert({2: newentry1, 3: newentry2, 4: newentry3})
self.assertEqual(store.log, {1: oldentry1, 2: newentry1, 3: newentry2, 4: newentry3})
class TestElection(TestCase):
def test_three_up(self):
store1 = MockStoreDontUse()
store2 = MockStoreDontUse()
store3 = MockStoreDontUse()
rpc1 = MockRPC()
rpc2 = MockRPC()
rpc3 = MockRPC()
clock1 = Clock()
clock2 = Clock()
clock3 = Clock()
node1 = RaftNode(1, store1, rpc1, clock=clock1)
node2 = RaftNode(2, store2, rpc2, clock=clock2)
node3 = RaftNode(3, store3, rpc3, clock=clock3)
for rpc in [rpc1, rpc2, rpc3]:
for node in [node1, node2, node3]:
rpc.simpleAddNode(node)
clock1.advance(0.4)
self.assertIs(node1._state, STATE.LEADER)
def test_respond_requestVote(self):
store = MockStoreDontUse()
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId=2,
lastLogIndex=4,
lastLogTerm=4))
term, result = self.successResultOf(resp)
self.assertTrue(result)
votedFor = self.successResultOf(store.getVotedFor())
self.assertEqual(votedFor, 2)
def test_respond_requestVote_alreadyVoted(self):
store = MockStoreDontUse()
store.setVotedFor(3)
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId=2,
lastLogIndex=4,
lastLogTerm=4))
term, result = self.successResultOf(resp)
self.assertFalse(result)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId=3,
lastLogIndex=4,
lastLogTerm=4))
term, result = self.successResultOf(resp)
self.assertTrue(result)
def test_respond_requestVote_lowerTerm(self):
store = MockStoreDontUse()
store.setCurrentTerm(3)
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
resp = node.respond_requestVote(RequestVotesCommand(term=2,
candidateId='id',
lastLogIndex=4,
lastLogTerm=4))
term, result = self.successResultOf(resp)
self.assertFalse(result)
def test_respond_requestVote_oldLog(self):
store = MockStoreDontUse(entries={
2: Entry(term=2, payload=1),
3: Entry(term=3, payload=2)
})
store.setCurrentTerm(3)
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId='id',
lastLogIndex=2,
lastLogTerm=2))
term, result = self.successResultOf(resp)
self.assertFalse(result)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId='id',
lastLogIndex=4,
lastLogTerm=2))
term, result = self.successResultOf(resp)
self.assertFalse(result)
resp = node.respond_requestVote(RequestVotesCommand(term=4,
candidateId='id',
lastLogIndex=2,
lastLogTerm=3))
term, result = self.successResultOf(resp)
self.assertFalse(result)
class TestAppendEntries(TestCase):
def test_respond_appendEntries_simple(self):
store = MockStoreDontUse()
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
newentry = Entry(term=0, payload=1)
resp = node.respond_appendEntries(AppendEntriesCommand(term=0,
leaderId=2,
prevLogIndex=0,
prevLogTerm=0,
entries={1: newentry},
leaderCommit=1))
term, result = self.successResultOf(resp)
self.assertEqual(term, 0)
self.assertTrue(result)
self.assertEqual(store.log, {1: newentry})
def test_respond_appendEntries_empty(self):
store = MockStoreDontUse()
rpc = MockRPC()
clock = Clock()
node = RaftNode(1, store, rpc, clock=clock)
newentry = Entry(term=0, payload=1)
resp = node.respond_appendEntries(AppendEntriesCommand(term=0,
leaderId=2,
prevLogIndex=0,
prevLogTerm=0,
entries={},
leaderCommit=1))
term, result = self.successResultOf(resp)
self.assertEqual(term, 0)
self.assertTrue(result)
class TestCallingAppendEntries(TestCase):
def test_backwards(self):
clock = Clock()
leader_store = MockStoreDontUse(entries={
1: Entry(term=1, payload=1),
2: Entry(term=2, payload=2),
})
leader_store.setCurrentTerm(2)
leader_rpc = MockRPC()
leader = RaftNode(1, leader_store, leader_rpc, clock=clock)
follower_store = MockStoreDontUse()
follower_rpc = MockRPC()
follower = RaftNode(2, follower_store, follower_rpc, clock=clock)
leader_rpc.simpleAddNode(follower)
follower_rpc.simpleAddNode(leader)
d = leader._callAppendEntries(follower.id, {})
res = self.successResultOf(d)
self.assertEqual(leader_store.log, follower_store.log)
def test_add(self):
clock = Clock()
leader_store = MockStoreDontUse(entries={
1: Entry(term=1, payload=1),
2: Entry(term=2, payload=2),
3: Entry(term=2, payload=3),
})
leader_store.setCurrentTerm(2)
leader_rpc = MockRPC()
leader = RaftNode(1, leader_store, leader_rpc, clock=clock)
follower_store = MockStoreDontUse({
1: Entry(term=1, payload=1)
})
follower_rpc = MockRPC()
follower = RaftNode(2, follower_store, follower_rpc, clock=clock)
leader_rpc.simpleAddNode(follower)
follower_rpc.simpleAddNode(leader)
d = leader._callAppendEntries(follower.id, {})
res = self.successResultOf(d)
self.assertEqual(leader_store.log, follower_store.log)
def test_remove_incorrect(self):
clock = Clock()
leader_store = MockStoreDontUse(entries={
1: Entry(term=1, payload=1),
2: Entry(term=2, payload=2),
3: Entry(term=2, payload=3),
})
leader_store.setCurrentTerm(2)
leader_rpc = MockRPC()
leader = RaftNode(1, leader_store, leader_rpc, clock=clock)
follower_store = MockStoreDontUse({
1: Entry(term=1, payload=1),
2: Entry(term=5, payload=1)
})
follower_rpc = MockRPC()
follower = RaftNode(2, follower_store, follower_rpc, clock=clock)
leader_rpc.simpleAddNode(follower)
follower_rpc.simpleAddNode(leader)
d = leader._callAppendEntries(follower.id, {})
res = self.successResultOf(d)
self.assertEqual(leader_store.log, follower_store.log)
class TestCluster(TestCase):
def test_cluster(self):
nodes = []
for num in range(5):
clock = Clock()
rpc = MockRPC()
store = MockStoreDontUse()
node = RaftNode(num, store, rpc, clock=clock, electionTimeout=1)
nodes.append((node, rpc, store, clock))
for node1, rpc, _, _ in nodes:
for node2, _, _, _ in nodes:
if node1 is node2:
continue
rpc.simpleAddNode(node2)
for node, rpc, store, clock in nodes:
clock.advance(1.0)
# for node, rpc, store, clock in nodes:
# print 'asd', node._state
| 30.319444
| 93
| 0.599542
| 1,155
| 10,915
| 5.586147
| 0.127273
| 0.037663
| 0.026193
| 0.031618
| 0.629107
| 0.609888
| 0.57641
| 0.556107
| 0.532703
| 0.509764
| 0
| 0.026148
| 0.29574
| 10,915
| 359
| 94
| 30.4039
| 0.813191
| 0.006047
| 0
| 0.572464
| 0
| 0
| 0.000738
| 0
| 0
| 0
| 0
| 0
| 0.076087
| 1
| 0.094203
| false
| 0
| 0.018116
| 0.01087
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a19793608f407d01e4af46fb22f949e028fb9e8
| 6,867
|
py
|
Python
|
prototype/c2dn/script/analysis/extractData.py
|
Thesys-lab/C2DN
|
55aa7fc1cd13ab0c80a9c25aa0288b454616d83c
|
[
"Apache-2.0"
] | null | null | null |
prototype/c2dn/script/analysis/extractData.py
|
Thesys-lab/C2DN
|
55aa7fc1cd13ab0c80a9c25aa0288b454616d83c
|
[
"Apache-2.0"
] | null | null | null |
prototype/c2dn/script/analysis/extractData.py
|
Thesys-lab/C2DN
|
55aa7fc1cd13ab0c80a9c25aa0288b454616d83c
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
sys.path.append(os.path.expanduser("~/workspace/"))
from pyutils.common import *
def load_fe_metrics(ifilepath):
n_byte_partial_miss, n_req_partial_miss = 0, 0
n_byte_push_chunk, n_byte_chunk_hit, n_req_chunk_hit, n_byte_ICP_chunk = 0, 0, 0, 0
n_req_ICP_chunk, n_req_skip_chunk = 0, 0
n_req_chunk_resp_skipped = 0
with open(ifilepath) as ifile:
for line in ifile:
if not line.startswith("frontend"):
continue
if 'byte{reqType="allToClient"}' in line:
n_byte_to_client = float(line.split()[1])
elif 'nReq{reqType="allToClient"}' in line:
n_req_to_client = float(line.split()[1])
elif 'trafficType="origin"' in line:
n_byte_from_origin = float(line.split()[1])
elif 'reqType="fullObjMiss"' in line:
n_req_from_origin = float(line.split()[1])
elif 'traffic{trafficType="intra"}' in line:
n_byte_intra = float(line.split()[1])
elif 'traffic{trafficType="ICPFull"}' in line:
n_byte_ICP_full = float(line.split()[1])
elif 'traffic{trafficType="ICPChunk"}' in line:
n_byte_ICP_chunk = float(line.split()[1])
elif 'trafficType="pushFullObj"' in line:
n_byte_push_full = float(line.split()[1])
elif 'trafficType="pushChunk"' in line:
n_byte_push_chunk = float(line.split()[1])
elif 'nReq{reqType="ICPFull"}' in line:
n_req_ICP_full = float(line.split()[1])
elif 'nReq{reqType="ICPChunk"}' in line:
n_req_ICP_chunk = float(line.split()[1])
elif 'nReq{reqType="skipFetch"}' in line:
n_req_skip_chunk = float(line.split()[1])
elif 'frontend_nReq{reqType="chunkRespSkipped"}' in line:
n_req_chunk_resp_skipped = float(line.split()[1])
# elif 'traffic{trafficType="pushChunk"}' in line:
# n_byte_push_chunk = float(line.split()[1])
elif 'byte{reqType="chunkHit"}' in line:
n_byte_chunk_hit = float(line.split()[1])
elif 'nReq{reqType="chunkHit"}' in line:
n_req_chunk_hit = float(line.split()[1])
elif 'byte{reqType="partialHit_1"}' in line:
n_byte_partial_miss += float(line.split()[1]) / 3 * 2
elif 'byte{reqType="partialHit_2"}' in line:
n_byte_partial_miss += float(line.split()[1]) / 3
elif 'nReq{reqType="partialHit_1"}' in line:
n_req_partial_miss += float(line.split()[1])
elif 'nReq{reqType="partialHit_2"}' in line:
n_req_partial_miss += float(line.split()[1])
ret_dict = {
"n_byte_to_client": n_byte_to_client,
"n_req_to_client": n_req_to_client,
"n_byte_from_origin": n_byte_from_origin,
"n_req_from_origin": n_req_from_origin,
"n_byte_intra": n_byte_intra,
"n_byte_ICP_full": n_byte_ICP_full,
"n_req_ICP_full": n_req_ICP_full,
"n_byte_push_full": n_byte_push_full,
"n_byte_push_chunk": n_byte_push_chunk,
"n_byte_chunk_hit": n_byte_chunk_hit,
"n_req_chunk_hit": n_req_chunk_hit,
"n_req_skip_chunk": n_req_skip_chunk,
"n_req_chunk_resp_skipped": n_req_chunk_resp_skipped,
"n_byte_ICP_chunk": n_byte_ICP_chunk,
"n_req_ICP_chunk": n_req_ICP_chunk,
"n_byte_partial_miss": n_byte_partial_miss,
"n_req_partial_miss": n_req_partial_miss,
}
return ret_dict
def load_all_fe_metrics(ifile_dir, system):
all_data = []
for i in range(10):
try:
d = load_fe_metrics("{}/cdn{}/c2dn/metricFE".format(ifile_dir, i))
all_data.append(d)
except Exception as e:
print(e)
client_bytes = sum([d["n_byte_to_client"] for d in all_data])
origin_bytes = sum([d["n_byte_from_origin"] for d in all_data])
client_nreq = sum([d["n_req_to_client"] for d in all_data])
origin_nreq = sum([d["n_req_from_origin"] for d in all_data])
intra_bytes = sum([d["n_byte_intra"] for d in all_data])
# this is not accurate as it includes skipped chunk fetch
intra_get_bytes = sum([d["n_byte_ICP_full"] for d in all_data])
intra_push_bytes = sum([d["n_byte_push_full"] for d in all_data])
intra_get_nreq = sum([d["n_req_ICP_full"] for d in all_data])
if system == "C2DN":
# intra_get_nreq += (sum([d["n_req_ICP_chunk"] for d in all_data]) - sum([d["n_req_skip_chunk"] for d in all_data]))//3
# intra_get_nreq += sum([d["n_req_ICP_chunk"] for d in all_data]) // 3
intra_get_bytes += sum([d["n_byte_ICP_chunk"] for d in all_data])
intra_push_bytes += sum([d["n_byte_push_chunk"] for d in all_data])
print("bmr {:.4f} omr {:.4f} | bytes intra {:.4f} intra_get {:.4f} intra_push {:.4f} | nReq intra get (full) {:.4f}".format(
origin_bytes/client_bytes, origin_nreq/client_nreq,
intra_bytes/client_bytes, intra_get_bytes/client_bytes, intra_push_bytes/client_bytes,
intra_get_nreq/client_nreq,
))
if system == "C2DN":
chunk_serve_nreq = sum([d["n_req_chunk_hit"] for d in all_data])
chunk_serve_nreq += sum([d["n_req_partial_miss"] for d in all_data])
chunk_serve_bytes = sum([d["n_byte_chunk_hit"] for d in all_data])
chunk_serve_bytes += sum([d["n_byte_partial_miss"] for d in all_data])
print("serving with chunks: {:.4f} req {:.4f} bytes".format(
chunk_serve_nreq/client_nreq, chunk_serve_bytes/client_bytes,
))
if __name__ == "__main__":
BASE_DIR = "/nvme/log/p/2021-02-01/"
# load_all_fe_metrics(f"{BASE_DIR}/0124/aws_CDN_akamai2_expLatency_unavail0_1000G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0124/aws_C2DN_akamai2_expLatency_unavail0_43_1000G/", system="C2DN")
# load_all_fe_metrics(f"{BASE_DIR}/0125/aws_CDN_akamai2_expLatency_unavail1_1000G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0125/aws_C2DN_akamai2_expLatency_unavail1_43_1000G/", system="C2DN")
# load_all_fe_metrics(f"{BASE_DIR}/0127/aws_CDN_akamai1_expLatency_unavail0_100G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0127/aws_C2DN_akamai1_expLatency_unavail0_43_100G/", system="C2DN")
# load_all_fe_metrics(f"{BASE_DIR}/0130/aws_CDN_akamai1_expLatency_unavail0_100G/", system="CDN")
# load_all_fe_metrics(f"{BASE_DIR}/0130/aws_C2DN_akamai1_expLatency_unavail0_43_100G/", system="C2DN")
load_all_fe_metrics(f"{BASE_DIR}/aws_CDN_akamai2_expLatency_unavail0_1000G/", system="CDN")
load_all_fe_metrics(f"{BASE_DIR}/aws_C2DN_akamai2_expLatency_unavail0_43_1000G/", system="C2DN")
| 42.388889
| 128
| 0.642493
| 1,029
| 6,867
| 3.870748
| 0.119534
| 0.052724
| 0.035149
| 0.07532
| 0.75521
| 0.649761
| 0.584735
| 0.407733
| 0.337183
| 0.26111
| 0
| 0.031653
| 0.231688
| 6,867
| 161
| 129
| 42.652174
| 0.723275
| 0.165866
| 0
| 0.056604
| 0
| 0.009434
| 0.237953
| 0.116348
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.018868
| 0
| 0.04717
| 0.028302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a20f5e777be4409e899dec4e5460fecff5677e0
| 10,325
|
py
|
Python
|
baselines/baseline_summarunner/main.py
|
PKULiuHui/LiveBlogSum
|
b6a22521ee454e649981d70ddca6c89a1bac5a4c
|
[
"MIT"
] | null | null | null |
baselines/baseline_summarunner/main.py
|
PKULiuHui/LiveBlogSum
|
b6a22521ee454e649981d70ddca6c89a1bac5a4c
|
[
"MIT"
] | null | null | null |
baselines/baseline_summarunner/main.py
|
PKULiuHui/LiveBlogSum
|
b6a22521ee454e649981d70ddca6c89a1bac5a4c
|
[
"MIT"
] | null | null | null |
# coding:utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
import math
import re
import sys
from Vocab import Vocab
from Dataset import Dataset
from RNN_RNN import RNN_RNN
import os, json, argparse, random
sys.path.append('../../')
from myrouge.rouge import get_rouge_score
parser = argparse.ArgumentParser(description='SummaRuNNer')
# model
parser.add_argument('-save_dir', type=str, default='checkpoints1/')
parser.add_argument('-embed_dim', type=int, default=100)
parser.add_argument('-embed_num', type=int, default=100)
parser.add_argument('-hidden_size', type=int, default=200)
parser.add_argument('-pos_dim', type=int, default=50)
parser.add_argument('-pos_num', type=int, default=800)
parser.add_argument('-seg_num', type=int, default=10)
# train
parser.add_argument('-lr', type=float, default=1e-3)
parser.add_argument('-max_norm', type=float, default=5.0)
parser.add_argument('-batch_size', type=int, default=5)
parser.add_argument('-epochs', type=int, default=8)
parser.add_argument('-seed', type=int, default=1)
parser.add_argument('-embedding', type=str, default='../../word2vec/embedding.npz')
parser.add_argument('-word2id', type=str, default='../../word2vec/word2id.json')
parser.add_argument('-train_dir', type=str, default='../../data/bbc_opt/train/')
parser.add_argument('-valid_dir', type=str, default='../../data/bbc_opt/test/')
parser.add_argument('-sent_trunc', type=int, default=20)
parser.add_argument('-doc_trunc', type=int, default=10)
parser.add_argument('-blog_trunc', type=int, default=80)
parser.add_argument('-valid_every', type=int, default=100)
# test
parser.add_argument('-load_model', type=str, default='')
parser.add_argument('-test_dir', type=str, default='../../data/bbc_opt/test/')
parser.add_argument('-ref', type=str, default='outputs/ref/')
parser.add_argument('-hyp', type=str, default='outputs/hyp/')
parser.add_argument('-sum_len', type=int, default=1) # 摘要长度为原摘要长度的倍数
parser.add_argument('-mmr', type=float, default=0.75)
# other
parser.add_argument('-test', action='store_true')
parser.add_argument('-use_cuda', type=bool, default=False)
use_cuda = torch.cuda.is_available()
args = parser.parse_args()
if use_cuda:
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
args.use_cuda = use_cuda
def my_collate(batch):
return {key: [d[key] for d in batch] for key in batch[0]}
# 用rouge_1_f表示两个句子之间的相似度
def rouge_1_f(hyp, ref):
hyp = re.sub(r'[^a-z]', ' ', hyp.lower()).strip().split()
ref = re.sub(r'[^a-z]', ' ', ref.lower()).strip().split()
if len(hyp) == 0 or len(ref) == 0:
return .0
ref_flag = [0 for _ in ref]
hit = .0
for w in hyp:
for i in range(0, len(ref)):
if w == ref[i] and ref_flag[i] == 0:
hit += 1
ref_flag[i] = 1
break
p = hit / len(hyp)
r = hit / len(ref)
if math.fabs(p + r) < 1e-10:
f = .0
else:
f = 2 * p * r / (p + r)
return f
# 得到预测分数后,使用MMR策略进行重新排序,以消除冗余
def re_rank(sents, scores, ref_len):
summary = ''
chosen = []
cur_scores = [s for s in scores]
cur_len = 0
while len(chosen) <= len(scores):
sorted_idx = np.array(cur_scores).argsort()
cur_idx = sorted_idx[-1]
for i in range(len(cur_scores)):
new_score = args.mmr * scores[i] - (1 - args.mmr) * rouge_1_f(sents[i], sents[cur_idx])
cur_scores[i] = min(cur_scores[i], new_score)
cur_scores[cur_idx] = -1e20
chosen.append(cur_idx)
tmp = sents[cur_idx].split()
tmp_len = len(tmp)
if cur_len + tmp_len > ref_len:
summary += ' '.join(tmp[:ref_len - cur_len])
break
else:
summary += ' '.join(tmp) + ' '
cur_len += tmp_len
return summary.strip()
# 在验证集或测试集上测loss, rouge值
def evaluate(net, vocab, data_iter, train_next): # train_next指明接下来是否要继续训练
net.eval()
criterion = nn.MSELoss()
loss, r1, r2, rl, rsu = .0, .0, .0, .0, .0 # rouge-1,rouge-2,rouge-l,都使用recall值(长度限定为原摘要长度)
batch_num = .0
blog_num = .0
for i, batch in enumerate(tqdm(data_iter)):
# 计算loss
features, targets, sents_content, summaries, doc_nums, doc_lens = vocab.make_features(batch, args)
features, targets = Variable(features), Variable(targets.float())
if use_cuda:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_nums, doc_lens)
batch_num += 1
loss += criterion(probs, targets).data.item()
probs_start = 0 # 当前blog对应的probs起始下标
doc_lens_start = 0 # 当前blog对应的doc_lens起始下标
sents_start = 0 # 当前blog对应的sents_content起始下标
for i in range(0, args.batch_size):
sents_num = 0
for j in range(doc_lens_start, doc_lens_start + doc_nums[i]):
sents_num += doc_lens[j]
cur_probs = probs[probs_start:probs_start + sents_num]
cur_sents = sents_content[sents_start: sents_start + sents_num]
probs_start = probs_start + sents_num
doc_lens_start = doc_lens_start + doc_nums[i]
sents_start = sents_start + sents_num
if use_cuda:
cur_probs = cur_probs.cpu()
cur_probs = list(cur_probs.detach().numpy())
sorted_index = list(np.argsort(cur_probs)) # cur_probs顺序排序后对应的下标
sorted_index.reverse()
ref = summaries[i].strip()
ref_len = len(ref.split())
hyp = re_rank(cur_sents, cur_probs, ref_len)
score = get_rouge_score(hyp, ref)
r1 += score['ROUGE-1']['r']
r2 += score['ROUGE-2']['r']
rl += score['ROUGE-L']['r']
rsu += score['ROUGE-SU4']['r']
blog_num += 1
loss = loss / batch_num
r1 = r1 / blog_num
r2 = r2 / blog_num
rl = rl / blog_num
rsu = rsu / blog_num
if train_next: # 接下来要继续训练,将网络设成'train'状态
net.train()
return loss, r1, r2, rl, rsu
def train():
print('Loading vocab, train and val dataset...')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
with open(args.word2id) as f:
word2id = json.load(f)
vocab = Vocab(embed, word2id)
train_data = []
for fn in os.listdir(args.train_dir):
f = open(args.train_dir + fn, 'r')
train_data.append(json.load(f))
f.close()
train_dataset = Dataset(train_data)
val_data = []
for fn in os.listdir(args.valid_dir):
f = open(args.valid_dir + fn, 'r')
val_data.append(json.load(f))
f.close()
val_dataset = Dataset(val_data)
net = RNN_RNN(args, embed)
criterion = nn.BCELoss()
if use_cuda:
net.cuda()
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=my_collate)
val_iter = DataLoader(dataset=val_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=my_collate)
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
net.train()
min_loss = float('inf')
for epoch in range(1, args.epochs + 1):
for i, batch in enumerate(train_iter):
features, targets, _1, _2, doc_nums, doc_lens = vocab.make_features(batch, args)
features, targets = Variable(features), Variable(targets.float())
if use_cuda:
features = features.cuda()
targets = targets.cuda()
probs = net(features, doc_nums, doc_lens)
loss = criterion(probs, targets)
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(net.parameters(), args.max_norm)
optimizer.step()
print('EPOCH [%d/%d]: BATCH_ID=[%d/%d] loss=%f' % (
epoch, args.epochs, i, len(train_iter), loss))
cnt = (epoch - 1) * len(train_iter) + i
if cnt % args.valid_every == 0:
print('Begin valid... Epoch %d, Batch %d' % (epoch, i))
cur_loss, r1, r2, rl, rsu = evaluate(net, vocab, val_iter, True)
if cur_loss < min_loss:
min_loss = cur_loss
save_path = args.save_dir + 'RNN_RNN' + '_%d_%.4f_%.4f_%.4f_%.4f_%.4f' % (
cnt / args.valid_every, cur_loss, r1, r2, rl, rsu)
net.save(save_path)
print('Epoch: %2d Min_Val_Loss: %f Cur_Val_Loss: %f Rouge-1: %f Rouge-2: %f Rouge-l: %f Rouge-SU4: %f' %
(epoch, min_loss, cur_loss, r1, r2, rl, rsu))
def test():
print('Loading vocab and test dataset...')
embed = torch.Tensor(np.load(args.embedding)['embedding'])
args.embed_num = embed.size(0)
args.embed_dim = embed.size(1)
with open(args.word2id) as f:
word2id = json.load(f)
vocab = Vocab(embed, word2id)
test_data = []
for fn in os.listdir(args.test_dir):
f = open(args.test_dir + fn, 'r')
test_data.append(json.load(f))
f.close()
test_dataset = Dataset(test_data)
test_iter = DataLoader(dataset=test_dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=my_collate)
print('Loading model...')
if use_cuda:
checkpoint = torch.load(args.save_dir + args.load_model)
else:
checkpoint = torch.load(args.save_dir + args.load_model, map_location=lambda storage, loc: storage)
net = RNN_RNN(checkpoint['args'])
net.load_state_dict(checkpoint['model'])
if use_cuda:
net.cuda()
net.eval()
print('Begin test...')
test_loss, r1, r2, rl, rsu = evaluate(net, vocab, test_iter, False)
print('Test_Loss: %f Rouge-1: %f Rouge-2: %f Rouge-l: %f Rouge-SU4: %f' % (test_loss, r1, r2, rl, rsu))
if __name__ == '__main__':
if args.test:
test()
else:
train()
| 36.743772
| 120
| 0.606683
| 1,440
| 10,325
| 4.154167
| 0.168056
| 0.042126
| 0.079572
| 0.011702
| 0.293547
| 0.272651
| 0.242227
| 0.201772
| 0.192076
| 0.1777
| 0
| 0.017273
| 0.254237
| 10,325
| 280
| 121
| 36.875
| 0.75961
| 0.030218
| 0
| 0.209877
| 0
| 0.00823
| 0.088544
| 0.015608
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024691
| false
| 0
| 0.061728
| 0.004115
| 0.106996
| 0.032922
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a2b8a858ee6da50e87c4cd8bfce4156f67a9cc7
| 844
|
py
|
Python
|
lgtv.py
|
aakropotkin/PyWebOSTV
|
4c060541b397dc20f79049fa9390c1b6b1a7050b
|
[
"MIT"
] | null | null | null |
lgtv.py
|
aakropotkin/PyWebOSTV
|
4c060541b397dc20f79049fa9390c1b6b1a7050b
|
[
"MIT"
] | null | null | null |
lgtv.py
|
aakropotkin/PyWebOSTV
|
4c060541b397dc20f79049fa9390c1b6b1a7050b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p "[python3] ++ (with pkgs.python37Packages; [ requests future ws4py pytest pylint coveralls twine wheel ])"
# <<END Extended Shebang>>
import json
from pywebostv.discovery import *
from pywebostv.connection import *
from pywebostv.controls import *
with open('/home/camus/.lgtv.json') as f:
store = json.load(f)
client = WebOSClient(store['hostname'])
client.connect()
for status in client.register(store):
if status == WebOSClient.PROMPTED:
print("Please accept the connect on the TV!")
elif status == WebOSClient.REGISTERED:
print("Registration successful!")
ctrl = InputControl(client)
system = SystemControl(client)
media = MediaControl(client)
app = ApplicationControl(client)
inp = InputControl(client)
inp.connect_input()
# vim: set filetype=python :
| 28.133333
| 133
| 0.728673
| 104
| 844
| 5.903846
| 0.682692
| 0.063518
| 0.061889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006974
| 0.150474
| 844
| 29
| 134
| 29.103448
| 0.849372
| 0.246446
| 0
| 0
| 0
| 0
| 0.142405
| 0.03481
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.210526
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a2e8191805b6dc90c6ff13576324c98a0708604
| 2,102
|
py
|
Python
|
lutin_lua.py
|
generic-library/lua
|
1dddc5e025d94bd62ae6ca9e9e3f2cd11ed23a35
|
[
"MIT"
] | null | null | null |
lutin_lua.py
|
generic-library/lua
|
1dddc5e025d94bd62ae6ca9e9e3f2cd11ed23a35
|
[
"MIT"
] | null | null | null |
lutin_lua.py
|
generic-library/lua
|
1dddc5e025d94bd62ae6ca9e9e3f2cd11ed23a35
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import realog.debug as debug
import lutin.tools as tools
def get_type():
return "LIBRARY"
def get_desc():
return "Lua interpretic script module"
def get_licence():
return "MIT"
def get_compagny_type():
return "org"
def get_compagny_name():
return "lua"
def get_maintainer():
return "authors.txt"
def get_version():
return "version.txt"
def configure(target, my_module):
my_module.add_depend([
'elog',
'etk',
])
my_module.add_flag('c', [
'-DLUA_VERSION_TAG_NAME="\"5.2\""',
'-Wall',
])
my_module.add_flag('c', '-DLUA_COMPAT_ALL', export=True);
#ifeq ("$(TARGET_OS)","Windows")
# my_module.compile_flags_CC('-D_WIN32')
#else
my_module.add_flag('c', '-DLUA_USE_LINUX')
#endif
my_module.add_src_file([
'lua/lapi.cpp',
'lua/lauxlib.cpp',
'lua/lbaselib.cpp',
'lua/lbitlib.cpp',
'lua/lcode.cpp',
'lua/lcorolib.cpp',
'lua/lctype.cpp',
'lua/ldblib.cpp',
'lua/ldebug.cpp',
'lua/ldo.cpp',
'lua/ldump.cpp',
'lua/lfunc.cpp',
'lua/lgc.cpp',
'lua/linit.cpp',
'lua/liolib.cpp',
'lua/llex.cpp',
'lua/lmathlib.cpp',
'lua/lmem.cpp',
'lua/loadlib.cpp',
'lua/lobject.cpp',
'lua/lopcodes.cpp',
'lua/loslib.cpp',
'lua/lparser.cpp',
'lua/lstate.cpp',
'lua/lstring.cpp',
'lua/lstrlib.cpp',
'lua/ltable.cpp',
'lua/ltablib.cpp',
'lua/ltm.cpp',
'lua/lundump.cpp',
'lua/lvm.cpp',
'lua/lzio.cpp',
])
my_module.add_header_file([
'lua/ltm.h',
'lua/llimits.h',
'lua/lctype.h',
'lua/lgc.h',
'lua/lstring.h',
'lua/lzio.h',
'lua/lmem.h',
'lua/lobject.h',
'lua/lvm.h',
'lua/ldebug.h',
'lua/lundump.h',
'lua/lcode.h',
'lua/ltable.h',
'lua/lfunc.h',
'lua/lparser.h',
'lua/lopcodes.h',
'lua/lua.h',
'lua/ldo.h',
'lua/llex.h',
'lua/lapi.h',
'lua/lstate.h',
'lua/lualib.h',
'lua/lauxlib.h',
'lua/luaconf.h',
])
my_module.compile_version('c', 1999, gnu=False)
return True
| 18.438596
| 58
| 0.569458
| 297
| 2,102
| 3.902357
| 0.340067
| 0.160483
| 0.056946
| 0.038827
| 0.051769
| 0.051769
| 0
| 0
| 0
| 0
| 0
| 0.004941
| 0.229781
| 2,102
| 113
| 59
| 18.60177
| 0.710933
| 0.045195
| 0
| 0.044944
| 0
| 0
| 0.429715
| 0.016008
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089888
| false
| 0
| 0.022472
| 0.078652
| 0.202247
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a34c3856763aba4f082175e4e23858129d09e5b
| 3,595
|
py
|
Python
|
civbot/commands/cmd_add_game.py
|
thyjukki/Civi-Botti-2.0
|
7b9ff6bf3e97b90f61286e7688db731f91365e88
|
[
"MIT"
] | null | null | null |
civbot/commands/cmd_add_game.py
|
thyjukki/Civi-Botti-2.0
|
7b9ff6bf3e97b90f61286e7688db731f91365e88
|
[
"MIT"
] | 3
|
2020-04-28T09:19:11.000Z
|
2021-06-01T23:21:32.000Z
|
civbot/commands/cmd_add_game.py
|
thyjukki/Civi-Botti-2.0
|
7b9ff6bf3e97b90f61286e7688db731f91365e88
|
[
"MIT"
] | null | null | null |
import telegram
from telegram.ext import CommandHandler, ConversationHandler, MessageHandler, \
Filters
from civbot.commands.cmd_cancel import cancel_all
from civbot.models import User, Subscription
SELECT = 1
def add_game(bot, update):
user = User.get_or_none(User.id == update.message.from_user.id)
if not user:
update.message.reply_text('You are not registered!')
return ConversationHandler.END
chat_id = update.message.chat_id
if update.message.chat.type != 'private':
admin_ids = [
admin.user.id for admin in bot.get_chat_administrators(chat_id)
]
if update.message.from_user.id not in admin_ids:
update.message.reply_text('You are not admin of the group!')
return ConversationHandler.END
games = user.games
if len(games) == 0:
update.message.reply_text("You don't have any registered games")
return ConversationHandler.END
games = list(
filter(
lambda g: not (
Subscription.select().where(
Subscription.game == g
).where(
Subscription.chat_id == chat_id
).exists()
),
games
)
)
if len(games) == 0:
update.message.reply_text(
"You don't have any registered games not in this chat"
)
return ConversationHandler.END
games = list(filter(lambda g: g.active, games))
if len(games) == 0:
update.message.reply_text("You don't have any active games")
return ConversationHandler.END
custom_keyboard = []
for game in games:
custom_keyboard.append([game.name])
custom_keyboard.append(['cancel'])
reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard)
update.message.reply_text('Chose the game', reply_markup=reply_markup)
return SELECT
# noinspection PyUnusedLocal
def select_game(bot, update):
if update.message.text == 'cancel':
update.message.reply_text(
'Canceled',
reply_markup=telegram.ReplyKeyboardRemove()
)
return ConversationHandler.END
user = User.get_or_none(User.id == update.message.from_user.id)
game = [g for g in user.games if g.name == update.message.text]
if len(game) == 0:
update.message.reply_text(
'Game does not exist',
reply_markup=telegram.ReplyKeyboardRemove()
)
return ConversationHandler.END
game = game[0]
chat_id = update.message.chat_id
subscriptions = Subscription.select().where(
Subscription.game == game
).where(
Subscription.chat_id == chat_id
)
if subscriptions.exists():
update.message.reply_text(
'Game has already been added',
reply_markup=telegram.ReplyKeyboardRemove()
)
return ConversationHandler.END
Subscription.create(
game=game,
chat_id=chat_id
)
update.message.reply_text(
f'Subscribed to {game.name}.'
f' This chat will now start receiving notifications for the '
'game. To get notifications, send /register to me as private message',
reply_markup=telegram.ReplyKeyboardRemove())
return ConversationHandler.END
def handle():
return ConversationHandler(
entry_points=[CommandHandler('addgame', add_game)],
states={
SELECT: [MessageHandler(Filters.text, select_game)],
},
fallbacks=[CommandHandler('cancel', cancel_all)]
)
| 28.307087
| 79
| 0.628929
| 406
| 3,595
| 5.44335
| 0.246305
| 0.105882
| 0.081448
| 0.099548
| 0.447511
| 0.367421
| 0.318552
| 0.171041
| 0.125792
| 0.125792
| 0
| 0.002316
| 0.279277
| 3,595
| 126
| 80
| 28.531746
| 0.850637
| 0.007232
| 0
| 0.28866
| 0
| 0
| 0.118587
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030928
| false
| 0
| 0.041237
| 0.010309
| 0.185567
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a354a29d377cbf952a940a0b75110dea65c2d7e
| 1,355
|
py
|
Python
|
tutorials/W1D4_Optimization/solutions/W1D4_Tutorial1_Solution_9732cf5a.py
|
carsen-stringer/course-content-dl
|
27749aec56a3d2a43b3890483675ad0338a2680f
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
tutorials/W1D4_Optimization/solutions/W1D4_Tutorial1_Solution_9732cf5a.py
|
carsen-stringer/course-content-dl
|
27749aec56a3d2a43b3890483675ad0338a2680f
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
tutorials/W1D4_Optimization/solutions/W1D4_Tutorial1_Solution_9732cf5a.py
|
carsen-stringer/course-content-dl
|
27749aec56a3d2a43b3890483675ad0338a2680f
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
def rmsprop_update(loss, params, grad_sq, lr=1e-1, alpha=0.8):
"""Perform an RMSprop update on a collection of parameters
Args:
loss (tensor): A scalar tensor containing the loss whose gradient will be computed
params (iterable): Collection of parameters with respect to which we compute gradients
grad_sq (iterable): Moving average of squared gradients
lr (float): Scalar specifying the learning rate or step-size for the update
alpha (float): Moving average parameter
"""
# Clear up gradients as Pytorch automatically accumulates gradients from
# successive backward calls
zero_grad(params)
# Compute gradients on given objective
loss.backward()
for (par, gsq) in zip(params, grad_sq):
# Update estimate of gradient variance
gsq.data = alpha * gsq.data + (1-alpha) * par.grad.data**2
# Update parameters
par.data -= lr * (par.grad.data / (1e-8 + gsq.data)**0.5)
set_seed(2021)
model = MLP(in_dim=784, out_dim=10, hidden_dims=[])
print('\n The model parameters before the update are: \n')
print_params(model)
loss = loss_fn(model(X), y).to(DEVICE)
grad_sq = [0.0001*i for i in list(model.parameters())]
## Uncomment below to test your function
rmsprop_update(loss, list(model.parameters()), grad_sq=grad_sq, lr=1e-2)
print('\n The model parameters after the update are: \n')
print_params(model)
| 39.852941
| 90
| 0.724723
| 211
| 1,355
| 4.578199
| 0.473934
| 0.037267
| 0.035197
| 0.020704
| 0.109731
| 0.060041
| 0.060041
| 0
| 0
| 0
| 0
| 0.02307
| 0.168266
| 1,355
| 34
| 91
| 39.852941
| 0.834073
| 0.473801
| 0
| 0.133333
| 0
| 0
| 0.141194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.066667
| 0.266667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a35de756e73312c8d8aa96bb05d403a7ba20ad8
| 4,289
|
py
|
Python
|
tridentstream/inputs/rfs/handler.py
|
tridentstream/mediaserver
|
5d47d766df2e8dca076e41348062567a569019fd
|
[
"MIT"
] | 6
|
2020-01-03T14:50:09.000Z
|
2021-09-13T01:44:31.000Z
|
tridentstream/inputs/rfs/handler.py
|
tidalstream/mediaserver
|
5d47d766df2e8dca076e41348062567a569019fd
|
[
"MIT"
] | null | null | null |
tridentstream/inputs/rfs/handler.py
|
tidalstream/mediaserver
|
5d47d766df2e8dca076e41348062567a569019fd
|
[
"MIT"
] | null | null | null |
import logging
from urllib.parse import urljoin
import requests
from thomas import Item, StreamerBase, router
from unplugged import Schema, fields
from twisted.internet import threads
from ...exceptions import NotModifiedException, PathNotFoundException
from ...plugins import InputPlugin
from ...stream import Stream
logger = logging.getLogger(__name__)
class RemoteFilesystemInputSchema(Schema):
url = fields.String()
token = fields.String()
priority = fields.Integer(default=5)
class RemoteFilesystemStreamer:
def __init__(self, plugin, path):
self.plugin = plugin
self.path = path
def evaluate(self):
return self.plugin.config["priority"] + 1
def stream(self):
return self.plugin.stream(self.path)
class RemoteFilesystemInputPlugin(InputPlugin):
plugin_name = "remotefilesystem"
config_schema = RemoteFilesystemInputSchema
simpleadmin_templates = True
def __init__(self, config):
self.config = config
self.route_input_rfs_list = f"input_rfs_list_{self.name}"
router.register_handler(
self.route_input_rfs_list, self.thomas_list, False, True, False
)
self.route_input_rfs_stream = f"input_rfs_stream_{self.name}"
router.register_handler(
self.route_input_rfs_stream, self.thomas_stream, False, False, True
)
def unload(self):
router.unregister_handler(self.route_input_rfs_list)
router.unregister_handler(self.route_input_rfs_stream)
def get_headers(self):
return {"Authorization": f"Token {self.config['token']}"}
def get_item(self, path):
item = Item(id=path.strip().split("/")[-1], router=router)
item.expandable = True
item.streamable = True
self.add_routes(item, path, skip=True)
# item.add_route(self.route_input_rfs_list, False, True, False, kwargs={'path': path})
# item.streamable = True
# item.add_route(self.route_input_rfs_stream, False, False, True, kwargs={'path': path})
return item
def add_routes(self, item, path, skip=False):
if not skip:
if path:
path = f"{path}/{item.id}"
else:
path = item.id
if item.is_streamable:
item.add_route(
self.route_input_rfs_stream, False, False, True, kwargs={"path": path}
)
if item.is_listable:
if item.is_expanded:
for nested_item in item.nested_items:
self.add_routes(nested_item, path)
else:
item.add_route(
self.route_input_rfs_list, False, True, False, kwargs={"path": path}
)
def thomas_list(self, item, path, depth=0, modified_since=None):
logger.info(f"Listing path {path!r} with depth {depth}")
item_id = item.id
headers = self.get_headers()
if modified_since:
headers["If-Modified-Since"] = modified_since.strftime(
"%a, %d %b %Y %H:%M:%S GMT"
)
r = requests.get(
urljoin(self.config["url"].strip("/") + "/", path),
params={"depth": depth},
headers=headers,
)
if r.status_code == 200:
item = Item.unserialize(r.json(), router=router)
item.id = item_id
self.add_routes(item, path, skip=True)
return item
elif r.status_code == 304:
raise NotModifiedException()
elif r.status_code == 404 or r.status_code == 403:
raise PathNotFoundException()
else:
logger.warning(
f"Unknown status code {r.status_code} while listing {self.name}/{path}"
)
def thomas_stream(self, item, path):
logger.info(f"Trying to stream {path!r}")
return RemoteFilesystemStreamer(self, path)
def stream(self, path):
logger.info(f"Trying to stream {path!r}")
headers = self.get_headers()
r = requests.post(
urljoin(self.config["url"].strip("/") + "/", path), headers=headers
)
if r.status_code != 200:
raise PathNotFoundException()
return Stream.unserialize(r.json())
| 30.41844
| 96
| 0.609466
| 499
| 4,289
| 5.062124
| 0.236473
| 0.038005
| 0.055424
| 0.0673
| 0.289786
| 0.272367
| 0.241489
| 0.164687
| 0.16152
| 0.098179
| 0
| 0.006175
| 0.282583
| 4,289
| 140
| 97
| 30.635714
| 0.814755
| 0.045232
| 0
| 0.165049
| 0
| 0
| 0.087754
| 0.018577
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106796
| false
| 0
| 0.087379
| 0.029126
| 0.349515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a37961a35f717a520a82adff518def2441c92f7
| 2,024
|
py
|
Python
|
app/main/service/exp_service.py
|
ayoyin/REST-API
|
965cda0f87ba8055ee78e9300ca80d5ed79a41c8
|
[
"MIT"
] | 1
|
2021-06-01T14:35:11.000Z
|
2021-06-01T14:35:11.000Z
|
app/main/service/exp_service.py
|
ayoyin/REST-API
|
965cda0f87ba8055ee78e9300ca80d5ed79a41c8
|
[
"MIT"
] | 10
|
2021-05-26T22:27:59.000Z
|
2021-06-03T21:04:43.000Z
|
app/main/service/exp_service.py
|
ayoyin/REST-API
|
965cda0f87ba8055ee78e9300ca80d5ed79a41c8
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from model.exp_model import Experience, ExperienceSchema
class ExperienceService(object):
def __init__(self, app:Flask, db:SQLAlchemy) -> None:
self.app = app
self.db = db
self.exp_schema = ExperienceSchema()
self.exps_schema = ExperienceSchema(many=True)
# Creating new experience
def add_experience(self):
description = request.json['description']
employee_id = request.json['employee_id']
start_date = request.json['start_date']
end_date = request.json['end_date']
new_experience = Experience(employee_id, description, start_date, end_date)
self.db.session.add(new_experience)
self.db.session.commit()
return self.exp_schema.jsonify(new_experience)
# Retreiving all experiences
def get_experiences(self):
all_experiences = Experience.query.all()
return jsonify(self.exps_schema.dump(all_experiences))
# Retreiving single experience
def get_experience(self, id):
experience = Experience.query.get(id)
return self.exp_schema.jsonify(experience)
# Updating single experience
def update_experience(self, id):
experience = Experience.query.get(id)
employee_id = request.json['employee_id']
description = request.json['description']
start_date = request.json['start_date']
end_date = request.json['end_date']
experience.employee_id = employee_id
experience.description = description
experience.start_date = start_date
experience.end_date = end_date
self.db.session.commit()
return self.exp_schema.jsonify(experience)
# Deleting single experience
def delete_experience(self, id):
experience = Experience.query.get(id)
self.db.session.delete(experience)
self.db.session.commit()
return self.exp_schema.jsonify(experience)
| 31.625
| 83
| 0.682312
| 231
| 2,024
| 5.787879
| 0.203463
| 0.065819
| 0.048616
| 0.056844
| 0.414361
| 0.414361
| 0.314884
| 0.314884
| 0.211668
| 0.211668
| 0
| 0
| 0.228261
| 2,024
| 64
| 84
| 31.625
| 0.855954
| 0.065711
| 0
| 0.414634
| 0
| 0
| 0.042418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146341
| false
| 0
| 0.073171
| 0
| 0.365854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a393e7c4f3f1d263e29f99079506e54bfc2ef8b
| 367
|
py
|
Python
|
scripts/hackathon/create_evaluable_CAG.py
|
mikiec84/delphi
|
2e517f21e76e334c7dfb14325d25879ddf26d10d
|
[
"Apache-2.0"
] | 25
|
2018-03-03T11:57:57.000Z
|
2022-01-16T21:19:54.000Z
|
scripts/hackathon/create_evaluable_CAG.py
|
mikiec84/delphi
|
2e517f21e76e334c7dfb14325d25879ddf26d10d
|
[
"Apache-2.0"
] | 385
|
2018-02-21T16:52:06.000Z
|
2022-02-17T07:44:56.000Z
|
scripts/hackathon/create_evaluable_CAG.py
|
mikiec84/delphi
|
2e517f21e76e334c7dfb14325d25879ddf26d10d
|
[
"Apache-2.0"
] | 19
|
2018-03-20T01:08:11.000Z
|
2021-09-29T01:04:49.000Z
|
import sys
import pickle
def create_evaluable_CAG(input, output):
with open(input, "rb") as f:
G = pickle.load(f)
G.res = 200
G.assemble_transition_model_from_gradable_adjectives()
G.sample_from_prior()
with open(output, "wb") as f:
pickle.dump(G, f)
if __name__ == "__main__":
create_evaluable_CAG(sys.argv[1], sys.argv[2])
| 24.466667
| 58
| 0.6703
| 56
| 367
| 4.053571
| 0.607143
| 0.132159
| 0.15859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017123
| 0.20436
| 367
| 14
| 59
| 26.214286
| 0.760274
| 0
| 0
| 0
| 0
| 0
| 0.032698
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a3c22b7737a192dfe1f9e9024ae59ca8fe3e8e0
| 3,721
|
py
|
Python
|
inclearn/convnet/my_resnet.py
|
romilbhardwaj/incremental_learning.pytorch
|
77097ef4dd4fc6b6c35d13ef66856d6f8a15598d
|
[
"MIT"
] | 3
|
2019-07-01T14:43:05.000Z
|
2019-12-27T13:26:52.000Z
|
inclearn/convnet/my_resnet.py
|
rahulvigneswaran/incremental_learning.pytorch
|
786ecda7dbce5977894737d61cd5e3a30f61aac6
|
[
"MIT"
] | null | null | null |
inclearn/convnet/my_resnet.py
|
rahulvigneswaran/incremental_learning.pytorch
|
786ecda7dbce5977894737d61cd5e3a30f61aac6
|
[
"MIT"
] | null | null | null |
''' Incremental-Classifier Learning
Authors : Khurram Javed, Muhammad Talha Paracha
Maintainer : Khurram Javed
Lab : TUKL-SEECS R&D Lab
Email : 14besekjaved@seecs.edu.pk '''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class DownsampleStride(nn.Module):
def __init__(self, n=2):
super(DownsampleStride, self).__init__()
self._n = n
def forward(self, x):
return x[..., ::2, ::2]
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, increase_dim=False, last=False):
super(ResidualBlock, self).__init__()
self.increase_dim = increase_dim
if increase_dim:
first_stride = 2
planes = inplanes * 2
else:
first_stride = 1
planes = inplanes
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=first_stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
if increase_dim:
self.downsample = DownsampleStride()
self.pad = lambda x: torch.cat((x, x.mul(0)), 1)
self.last = last
def forward(self, x):
y = self.conv_a(x)
y = self.bn_a(y)
y = F.relu(y, inplace=True)
y = self.conv_b(y)
y = self.bn_b(y)
if self.increase_dim:
x = self.downsample(x)
x = self.pad(x)
if x.shape != y.shape:
import pdb; pdb.set_trace()
y = x + y
if self.last:
y = F.relu(y, inplace=True)
return y
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, n=5, channels=3):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
self.conv_1_3x3 = nn.Conv2d(channels, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(16, increase_dim=False, n=n)
self.stage_2 = self._make_layer(16, increase_dim=True, n=n-1)
self.stage_3 = self._make_layer(32, increase_dim=True, n=n-2)
self.stage_4 = ResidualBlock(64, increase_dim=False, last=True)
self.avgpool = nn.AvgPool2d(8)
self.out_dim = 64
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, planes, increase_dim=False, last=False, n=None):
layers = []
if increase_dim:
layers.append(
ResidualBlock(planes, increase_dim=True)
)
planes = 2 * planes
for i in range(n):
layers.append(ResidualBlock(planes))
return nn.Sequential(*layers)
def forward(self, x, feature=False, T=1, labels=False, scale=None, keep=None):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.stage_4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def resnet_rebuffi(n=5):
return CifarResNet(n=n)
| 27.562963
| 108
| 0.58452
| 514
| 3,721
| 4.068093
| 0.272374
| 0.068388
| 0.014347
| 0.021521
| 0.141081
| 0.0923
| 0.039216
| 0.039216
| 0.039216
| 0.039216
| 0
| 0.030987
| 0.297501
| 3,721
| 134
| 109
| 27.768657
| 0.768937
| 0.097017
| 0
| 0.082353
| 0
| 0
| 0.003352
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094118
| false
| 0
| 0.070588
| 0.023529
| 0.270588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a3ec3da72c85292efaee127eb5ad56d111e5946
| 2,095
|
py
|
Python
|
src/nlplib/general/thread.py
|
rectangletangle/nlplib
|
7dcc0daf050a73c03b7d7f0257ad0b862586a6e3
|
[
"BSD-2-Clause"
] | 1
|
2015-11-18T12:59:52.000Z
|
2015-11-18T12:59:52.000Z
|
src/nlplib/general/thread.py
|
rectangletangle/nlplib
|
7dcc0daf050a73c03b7d7f0257ad0b862586a6e3
|
[
"BSD-2-Clause"
] | null | null | null |
src/nlplib/general/thread.py
|
rectangletangle/nlplib
|
7dcc0daf050a73c03b7d7f0257ad0b862586a6e3
|
[
"BSD-2-Clause"
] | null | null | null |
''' Tools for dealing with multithreaded programs. '''
from concurrent.futures import ThreadPoolExecutor, as_completed
from nlplib.general.iterate import chunked
__all__ = ['simultaneously']
def simultaneously (function, iterable, max_workers=4) :
''' This runs the given function over the iterable concurrently, in a similar fashion to the built-in <map>
function. The output's order is not guaranteed to correspond the order of the input iterable. Therefor the
output order should be treated as undefined. The <max_workers> argument denotes the amount of worker threads to
use. '''
if max_workers < 1 :
raise ValueError('<simultaneously> requires at least one worker thread.')
with ThreadPoolExecutor(max_workers=max_workers) as executor :
futures = (executor.submit(function, item)
for item in iterable)
for chunk in chunked(futures, max_workers, trail=True) :
for future in as_completed(chunk) :
yield future.result()
def __demo__ () :
from urllib.request import urlopen
urls = ['http://amazon.com', 'http://ibm.com', 'http://google.com', 'http://python.org']
for html in simultaneously(lambda url : urlopen(url).read(1024), urls) :
print(html, end='\n\n')
def __test__ (ut) :
def double (string) :
return string * 2
inputs = ['foo', 'bar', 'baz']
outputs = {'foofoo', 'barbar', 'bazbaz'}
for kw in [{}, {'max_workers' : 1}, {'max_workers' : 231}] :
ut.assert_equal(set(simultaneously(double, inputs, **kw)), outputs)
for workers in [0, -1, -13421] :
ut.assert_raises(lambda : set(simultaneously(double, inputs, max_workers=workers)), ValueError)
class SomeArbitraryException (Exception) :
pass
def raise_something (string) :
raise SomeArbitraryException
ut.assert_raises(lambda : list(simultaneously(raise_something, inputs)), SomeArbitraryException)
if __name__ == '__main__' :
from nlplib.general.unittest import UnitTest
__test__(UnitTest())
__demo__()
| 32.734375
| 119
| 0.673031
| 252
| 2,095
| 5.420635
| 0.503968
| 0.065886
| 0.02489
| 0.04246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010982
| 0.217661
| 2,095
| 63
| 120
| 33.253968
| 0.822453
| 0.178998
| 0
| 0
| 0
| 0
| 0.114813
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.142857
| false
| 0.028571
| 0.114286
| 0.028571
| 0.314286
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a43287b070e57b4e1131e9830fa7848ee4816f3
| 1,424
|
py
|
Python
|
appdaemon/apps/exhaust/exhaust.py
|
Mithras/ha
|
d37f8673eed27a85f76c97ee3e924d2ddc033ee5
|
[
"MIT"
] | 3
|
2019-10-27T06:10:26.000Z
|
2020-07-21T01:27:11.000Z
|
appdaemon/apps/exhaust/exhaust.py
|
Mithras/ha
|
d37f8673eed27a85f76c97ee3e924d2ddc033ee5
|
[
"MIT"
] | null | null | null |
appdaemon/apps/exhaust/exhaust.py
|
Mithras/ha
|
d37f8673eed27a85f76c97ee3e924d2ddc033ee5
|
[
"MIT"
] | null | null | null |
import globals
class Exhaust(globals.Hass):
async def initialize(self):
config = self.args["config"]
self._input = config["input"]
self._temperature = config["temperature"]
self._min_temperature = float(config["min_temperature"])
self._max_temperature = float(config["max_temperature"])
await self._ensure_state_async()
await self.listen_state(self._temperature_callback_async,
entity=self._temperature)
async def _temperature_callback_async(self, entity, attribute, old, new, kwargs):
if old == new:
return
# self.log(f"TemperatureChange: old = {old}, new = {new}")
await self._ensure_state_async()
async def _ensure_state_async(self):
input = await self.get_state(self._input)
temperature = float(await self.get_state(self._temperature))
# self.log(f"EnsureState: input = {input}, temperature = {temperature}")
if temperature < self._min_temperature and input == "on":
# self.log("turn_off")
await self.call_service("input_boolean/turn_off",
entity_id=self._input)
elif temperature > self._max_temperature and input == "off":
# self.log("turn_on")
await self.call_service("input_boolean/turn_on",
entity_id=self._input)
| 40.685714
| 85
| 0.614466
| 157
| 1,424
| 5.286624
| 0.267516
| 0.075904
| 0.057831
| 0.06988
| 0.19759
| 0.086747
| 0.086747
| 0
| 0
| 0
| 0
| 0
| 0.278792
| 1,424
| 34
| 86
| 41.882353
| 0.808179
| 0.117978
| 0
| 0.166667
| 0
| 0
| 0.079936
| 0.034373
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a4437265de98cfb27b3d5feaa4dc75634628d02
| 2,159
|
py
|
Python
|
test/test.py
|
fmaida/rosie
|
3906d11231aadaf9095f00fde8a73bc186403660
|
[
"MIT"
] | null | null | null |
test/test.py
|
fmaida/rosie
|
3906d11231aadaf9095f00fde8a73bc186403660
|
[
"MIT"
] | null | null | null |
test/test.py
|
fmaida/rosie
|
3906d11231aadaf9095f00fde8a73bc186403660
|
[
"MIT"
] | null | null | null |
import os
import unittest
from rosie import Rosie
from rosie import DocumentNotFound
# from test import create
# create(100)
class RosieTest(unittest.TestCase):
def setUp(self):
basedir = os.path.join(os.path.expanduser("~"), "Documents",
"Progetti", "HTML-CSS", "rosie-output")
cartelle = []
cartelle.append(os.path.join(basedir, "_content"))
# cartelle.append(os.path.join(basedir, "_files"))
cartelle.append(os.path.join(basedir, "_images"))
self.rosie = Rosie(*cartelle)
self.rosie.registra_allegati(tag="Images",
estensioni=[".jpg", ".jpeg", ".png", ".gif"])
self.rosie.registra_allegati(tag="Files",
estensioni=[".zip", ".rar", ".7z"])
self.rosie.scan()
def test_documenti_trovati(self):
self.assertEqual(len(self.rosie.elenco), 100, "Ci dovevano essere 100 documenti")
def test_tutti_hanno_titolo_e_tag(self):
for indice, elemento in enumerate(self.rosie, start=1):
self.assertTrue("title" in elemento.meta.keys(),
"Non ci doveva essere un documento senza titolo")
self.assertTrue("date" in elemento.meta.keys(),
"Non ci doveva essere un documento senza data")
def test_il_primo_ha_almeno_un_immagine(self):
"""
Il primo elemento ha sempre almeno un'immagine, per via di come creo
i files nel pacchetto test
"""
ciccio = self.rosie.find("element0001")
self.assertTrue("images" in ciccio.meta,
"Il primo elemento doveva avere almeno un'immagine")
def test_la_ricerca_funziona(self):
"""
Quando cerca un'elemento (che so esistere) lo deve trovare
"""
ciccio = self.rosie.find("element0003")
self.assertTrue(ciccio is not None, "L'elemento N. 3 doveva esistere")
with self.assertRaises(DocumentNotFound):
self.rosie.find("element9999")
def tearDown(self):
# print(self.rosie.json())
pass
| 35.393443
| 89
| 0.593793
| 247
| 2,159
| 5.105263
| 0.461538
| 0.071372
| 0.031721
| 0.047581
| 0.199048
| 0.154639
| 0.080888
| 0.080888
| 0.080888
| 0.080888
| 0
| 0.015656
| 0.289949
| 2,159
| 61
| 90
| 35.393443
| 0.806915
| 0.126911
| 0
| 0
| 0
| 0
| 0.18709
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0.027778
| 0.111111
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a44e47df6767fcc400ca98f82e16bb29f7143a3
| 7,728
|
py
|
Python
|
HeifImagePlugin.py
|
uploadcare/heif-image-plugin
|
164230d08472403b709e2d0c78e8de0207e9312a
|
[
"MIT"
] | 6
|
2021-12-09T16:57:55.000Z
|
2022-03-22T13:34:53.000Z
|
HeifImagePlugin.py
|
uploadcare/heif-image-plugin
|
164230d08472403b709e2d0c78e8de0207e9312a
|
[
"MIT"
] | 5
|
2021-11-24T15:59:35.000Z
|
2022-03-11T16:29:53.000Z
|
HeifImagePlugin.py
|
uploadcare/heif-image-plugin
|
164230d08472403b709e2d0c78e8de0207e9312a
|
[
"MIT"
] | 1
|
2022-02-07T11:59:30.000Z
|
2022-02-07T11:59:30.000Z
|
import inspect
import subprocess
import tempfile
from copy import copy
from weakref import WeakKeyDictionary
import piexif
import pyheif
from cffi import FFI
from PIL import Image, ImageFile
from pyheif.error import HeifError
ffi = FFI()
_keep_refs = WeakKeyDictionary()
pyheif_supports_transformations = (
'transformations' in inspect.signature(pyheif.HeifFile).parameters
)
HEIF_ENC_BIN = 'heif-enc'
def _crop_heif_file(heif):
# Zero-copy crop before loading. Just shifts data pointer and updates meta.
crop = heif.transformations['crop']
if crop == (0, 0) + heif.size:
return heif
if heif.mode not in ("L", "RGB", "RGBA"):
raise ValueError("Unknown mode")
pixel_size = len(heif.mode)
offset = heif.stride * crop[1] + pixel_size * crop[0]
cdata = ffi.from_buffer(heif.data, require_writable=False) + offset
data = ffi.buffer(cdata, heif.stride * crop[3])
# Keep reference to the original data as long as "cdata + offset" is alive.
# Normally ffi.from_buffer should hold it for us but unfortunately
# cdata + offset creates a new cdata object without reference.
_keep_refs[cdata] = heif.data
new_heif = copy(heif)
new_heif.size = crop[2:4]
new_heif.transformations = dict(heif.transformations, crop=(0, 0) + crop[2:4])
new_heif.data = data
return new_heif
def _rotate_heif_file(heif):
"""
Heif files already contain transformation chunks imir and irot which are
dominate over Orientation tag in EXIF.
This is not aligned with other formats behaviour and we MUST fix EXIF after
loading to prevent unexpected rotation after resaving in other formats.
And we come up to there is no reasons to force rotation of HEIF images
after loading since we need update EXIF anyway.
"""
orientation = heif.transformations['orientation_tag']
if not (1 <= orientation <= 8):
return heif
exif = {'0th': {piexif.ImageIFD.Orientation: orientation}}
if heif.exif:
try:
exif = piexif.load(heif.exif)
exif['0th'][piexif.ImageIFD.Orientation] = orientation
except Exception:
pass
new_heif = copy(heif)
new_heif.transformations = dict(heif.transformations, orientation_tag=0)
new_heif.exif = piexif.dump(exif)
return new_heif
def _extract_heif_exif(heif_file):
"""
Unlike other helper functions, this alters heif_file in-place.
"""
heif_file.exif = None
clean_metadata = []
for item in heif_file.metadata or []:
if item['type'] == 'Exif':
if heif_file.exif is None:
if item['data'] and item['data'][0:4] == b"Exif":
heif_file.exif = item['data']
else:
clean_metadata.append(item)
heif_file.metadata = clean_metadata
class HeifImageFile(ImageFile.ImageFile):
format = 'HEIF'
format_description = "HEIF/HEIC image"
def _open(self):
try:
heif_file = pyheif.open(
self.fp, apply_transformations=not pyheif_supports_transformations)
except HeifError as e:
raise SyntaxError(str(e))
_extract_heif_exif(heif_file)
if pyheif_supports_transformations:
heif_file = _rotate_heif_file(heif_file)
self._size = heif_file.transformations['crop'][2:4]
else:
self._size = heif_file.size
self.mode = heif_file.mode
if heif_file.exif:
self.info['exif'] = heif_file.exif
if heif_file.color_profile:
# rICC is Restricted ICC. Still not sure can it be used.
# ISO/IEC 23008-12 says: The colour information 'colr' descriptive
# item property has the same syntax as the ColourInformationBox
# as defined in ISO/IEC 14496-12.
# ISO/IEC 14496-12 says: Restricted profile shall be of either
# the Monochrome or Three‐Component Matrix‐Based class of
# input profiles, as defined by ISO 15076‐1.
# We need to go deeper...
if heif_file.color_profile['type'] in ('rICC', 'prof'):
self.info['icc_profile'] = heif_file.color_profile['data']
self.tile = []
self.heif_file = heif_file
def load(self):
heif_file, self.heif_file = self.heif_file, None
if heif_file:
try:
heif_file = heif_file.load()
except HeifError as e:
cropped_file = e.code == 7 and e.subcode == 100
if not cropped_file or not ImageFile.LOAD_TRUNCATED_IMAGES:
raise
# Ignore EOF error and return blank image otherwise
self.load_prepare()
if heif_file.data:
if pyheif_supports_transformations:
heif_file = _crop_heif_file(heif_file)
self.frombytes(heif_file.data, "raw", (self.mode, heif_file.stride))
heif_file.data = None
return super().load()
def check_heif_magic(data):
return pyheif.check(data) != pyheif.heif_filetype_no
def _save(im, fp, filename):
# Save it before subsequent im.save() call
info = im.encoderinfo
if im.mode in ('P', 'PA'):
# disbled due to errors in libheif encoder
raise IOError("cannot write mode P as HEIF")
with tempfile.NamedTemporaryFile(suffix='.png') as tmpfile:
im.save(
tmpfile, format='PNG', optimize=False, compress_level=0,
icc_profile=info.get('icc_profile', im.info.get('icc_profile')),
exif=info.get('exif', im.info.get('exif'))
)
cmd = [HEIF_ENC_BIN, '-o', '/dev/stdout', tmpfile.name]
avif = info.get('avif')
if avif is None and filename:
ext = filename.rpartition('.')[2].lower()
avif = ext == 'avif'
if avif:
cmd.append('-A')
if info.get('encoder'):
cmd.extend(['-e', info['encoder']])
if info.get('quality') is not None:
cmd.extend(['-q', str(info['quality'])])
subsampling = info.get('subsampling')
if subsampling is not None:
if subsampling == 0:
subsampling = '444'
elif subsampling == 1:
subsampling = '422'
elif subsampling == 2:
subsampling = '420'
cmd.extend(['-p', 'chroma=' + subsampling])
if info.get('speed') is not None:
cmd.extend(['-p', 'speed=' + str(info['speed'])])
if info.get('concurrency') is not None:
cmd.extend(['-p', 'threads=' + str(info['concurrency'])])
try:
# Warning: Do not open stdout and stderr at the same time
with subprocess.Popen(cmd, stdout=subprocess.PIPE) as enc:
for data in iter(lambda: enc.stdout.read(128 * 1024), b''):
fp.write(data)
if enc.wait():
raise subprocess.CalledProcessError(enc.returncode, cmd)
except FileNotFoundError:
raise FileNotFoundError(
2, f"Can't find heif encoding binary. Install '{HEIF_ENC_BIN}' "
+ "or set `HeifImagePlugin.HEIF_ENC_BIN` to full path.")
Image.register_open(HeifImageFile.format, HeifImageFile, check_heif_magic)
Image.register_save(HeifImageFile.format, _save)
Image.register_mime(HeifImageFile.format, 'image/heif')
Image.register_extensions(HeifImageFile.format, [".heic", ".avif"])
# Don't use this extensions for saving images, use the ones above.
# They have added for quick file type detection only (i.g. by Django).
Image.register_extensions(HeifImageFile.format, [".heif", ".hif"])
| 34.044053
| 84
| 0.62073
| 981
| 7,728
| 4.769623
| 0.320082
| 0.063261
| 0.015388
| 0.013678
| 0.131652
| 0.078008
| 0
| 0
| 0
| 0
| 0
| 0.013057
| 0.276527
| 7,728
| 226
| 85
| 34.19469
| 0.823287
| 0.186077
| 0
| 0.108108
| 0
| 0
| 0.078592
| 0.004822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047297
| false
| 0.006757
| 0.067568
| 0.006757
| 0.175676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a490f04946e54025d2f9929396fe594e1a1e7a5
| 3,916
|
py
|
Python
|
utils/comm_mqtt.py
|
peacemaker07/iot_making_for_raspberry_pi
|
d37d1256ea99794ff1dde4de0cadcbee1e5d6679
|
[
"MIT"
] | null | null | null |
utils/comm_mqtt.py
|
peacemaker07/iot_making_for_raspberry_pi
|
d37d1256ea99794ff1dde4de0cadcbee1e5d6679
|
[
"MIT"
] | null | null | null |
utils/comm_mqtt.py
|
peacemaker07/iot_making_for_raspberry_pi
|
d37d1256ea99794ff1dde4de0cadcbee1e5d6679
|
[
"MIT"
] | null | null | null |
import json
import time
from utils.helper import RedisClient
from paho.mqtt.client import MQTT_ERR_SUCCESS
import paho.mqtt.client as mqtt
from utils.date_time import TimeMeasure
import tasks as tasks_mqtt
from utils.message import MsgShadowGet, MsgShadowUpdate
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class CommMqtt:
host = None
port = None
client = None
def __init__(self, host, port):
self.host = host
self.port = port
self.client = mqtt.Client(protocol=mqtt.MQTTv311)
def connect(self):
try:
result = self.client.connect(self.host, port=self.port, keepalive=60)
time.sleep(5)
except:
return False
return True if result == MQTT_ERR_SUCCESS else False
def disconnect(self):
time.sleep(1)
self.client.disconnect()
def publish_for_send_list(self, msg_obj, buf_list):
"""
Publish処理
送信データのリストを1件ずつPublishする
:param msg_obj: 送信メッセージのオブジェクト
:param buf_list: 送信するデータのリスト
:return: 送信成功送信バッファリスト、送信失敗送信バッファリスト(タプル)
"""
# 送信成功リスト
send_ok_buf_list = []
# 送信失敗リスト
send_ng_buf_list = []
# 再送信データが大量にあると通信が長引いてしまうため
# 一定時間、送信処理が続いた場合は次回の送信時に送信するようにする
time_measure = TimeMeasure(time_out_sec=60)
for idx, buf in enumerate(buf_list):
if time_measure.is_time_out():
# 次回起動時に送信する
send_ng_buf_list.append(buf)
continue
# Publish
result = self.publish(msg_obj, buf=buf, idx=idx)
if result:
send_ok_buf_list.append(buf)
else:
send_ng_buf_list.append(buf)
return send_ok_buf_list, send_ng_buf_list
def publish(self, msg_obj, buf=None, idx=0):
"""
Publishの実行
:param msg_obj: 送信メッセージオブジェクト
:param idx: 送信データのindex
:param buf: 送信データ
:return: 結果(True:成功、False:失敗)
"""
# Publishするトピック名を取得する
topic = msg_obj.get_pub_topic()
if not topic:
return False
# 送信メッセージを取得する
send_data = msg_obj.create_pub_data(buf, idx) if buf else {}
logger.debug('publish send_data:[%s]' % send_data)
try:
# Publish実行
result = self.client.publish(topic, json.dumps(send_data), qos=1)
except Exception as e:
logger.error("failed publish")
logger.error("type:{0}".format(type(e)))
logger.error("args:{0}".format(e.args))
logger.error("{0}".format(e))
result = False
return result
class CommMqttShadow(CommMqtt):
imsi = None
def __init__(self, host, port, imsi):
super().__init__(host, port)
self.imsi = imsi
def shadow_get(self):
redis_client = RedisClient()
msg_shadow_get = MsgShadowGet(imsi=self.imsi)
result_sub = tasks_mqtt.run_subscribe_by_mqtt.delay(self.host, self.port, msg_shadow_get.get_sub_topic())
time.sleep(2)
try:
self.connect()
result = self.publish(msg_shadow_get)
self.disconnect()
except Exception as e:
logger.error(e)
while not result_sub.ready():
time.sleep(1)
value = redis_client.get('token')
if not value:
return ''
payload_str = value.decode(encoding='utf-8')
if not payload_str:
return ''
return payload_str
def shadow_update(self, update_dict):
msg_shadow_update = MsgShadowUpdate(imsi=self.imsi)
time.sleep(2)
try:
self.connect()
result = self.publish(msg_shadow_update, buf=update_dict)
self.disconnect()
except Exception as e:
logger.error(e)
| 24.628931
| 113
| 0.59142
| 457
| 3,916
| 4.864333
| 0.28884
| 0.031489
| 0.016194
| 0.023392
| 0.138102
| 0.138102
| 0.08457
| 0.08457
| 0.08457
| 0.044984
| 0
| 0.007119
| 0.318437
| 3,916
| 158
| 114
| 24.78481
| 0.825777
| 0.09857
| 0
| 0.25
| 0
| 0
| 0.019056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.097826
| 0
| 0.336957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a4b65fb4152f97b12ef78ecb2e26b90659acced
| 255
|
py
|
Python
|
servo-test.py
|
dthompson-personal/pi-robot-shop
|
19ed4bc2727bc1681b7aed906fd95f58cc2f9fbe
|
[
"MIT"
] | 1
|
2019-01-08T00:12:38.000Z
|
2019-01-08T00:12:38.000Z
|
servo-test.py
|
dthompson-personal/pi-robot-shop
|
19ed4bc2727bc1681b7aed906fd95f58cc2f9fbe
|
[
"MIT"
] | null | null | null |
servo-test.py
|
dthompson-personal/pi-robot-shop
|
19ed4bc2727bc1681b7aed906fd95f58cc2f9fbe
|
[
"MIT"
] | null | null | null |
# simple servo test for PCA9685 with HS422
from servo.servo import *
from time import sleep
pca = PCA9685()
pca.setZero(0)
sleep(2)
for a in xrange(-67,67,1):
pca.setAngle(0,a)
sleep(0.05)
for a in xrange(67,0,-1):
pca.setAngle(0,a)
sleep(0.05)
| 18.214286
| 42
| 0.686275
| 50
| 255
| 3.5
| 0.44
| 0.045714
| 0.068571
| 0.137143
| 0.411429
| 0.251429
| 0.251429
| 0.251429
| 0
| 0
| 0
| 0.141509
| 0.168627
| 255
| 13
| 43
| 19.615385
| 0.683962
| 0.156863
| 0
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a4cbefcb62071a2d988ae8d1ba6c3ebd094217e
| 1,386
|
py
|
Python
|
lists_dictionary/Hello France.py
|
vasetousa/Python-fundamentals
|
3180c03de28b4f4d36d966221719069a7e18e521
|
[
"MIT"
] | null | null | null |
lists_dictionary/Hello France.py
|
vasetousa/Python-fundamentals
|
3180c03de28b4f4d36d966221719069a7e18e521
|
[
"MIT"
] | null | null | null |
lists_dictionary/Hello France.py
|
vasetousa/Python-fundamentals
|
3180c03de28b4f4d36d966221719069a7e18e521
|
[
"MIT"
] | null | null | null |
items = input().split("|") # items to buy
budged = int(input())
profit = 0
profit_price_list = []
profit_list = []
profit_price = 0
for index in items:
profit = 0
profit_price = 0
separator = index.split("->")
if separator[0] == "Clothes":
if not 0 < float(separator[1]) <= 50:
continue
elif separator[0] == "Shoes":
if not 0 < float(separator[1]) <= 35:
continue
elif separator[0] == "Accessories":
if not 0 < float(separator[1]) <= 20.50:
continue
budged -= float(separator[1]) # calculating budged left
profit_price += float(separator[1]) * 1.40 # calculating the price with 40% increase
profit += float(separator[1]) * 0.40 # profit = round(profit, 2) # calculating the profit after the 40% increase for each item
profit_price_list.append(round(profit_price, 2)) # list with the increased prices
profit_list.append(profit) # list with every items' profit
if budged <= 0:
budged += float(separator[1])
profit_price_list.pop()
profit_list.pop()
continue
profit_price = sum(profit_list)
price_after_40 = sum(profit_price_list)
budged += price_after_40
print(*profit_price_list)
print(f"Profit: {profit_price:.2f}")
print(); print()
if budged >= 150:
print("Hello, France!")
else:
print("Time to go.")
| 34.65
| 143
| 0.622655
| 184
| 1,386
| 4.559783
| 0.288043
| 0.144219
| 0.125149
| 0.039333
| 0.075089
| 0.075089
| 0
| 0
| 0
| 0
| 0
| 0.044146
| 0.248196
| 1,386
| 40
| 144
| 34.65
| 0.761036
| 0.165945
| 0
| 0.205128
| 0
| 0
| 0.067015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.128205
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a4f4e40f01a34131b926552b927be814c889324
| 7,875
|
py
|
Python
|
vision/crop_image_on_faces.py
|
timmahrt/toybox
|
1c063428ba85d26c8d9229b020503f6f57df2219
|
[
"MIT"
] | null | null | null |
vision/crop_image_on_faces.py
|
timmahrt/toybox
|
1c063428ba85d26c8d9229b020503f6f57df2219
|
[
"MIT"
] | null | null | null |
vision/crop_image_on_faces.py
|
timmahrt/toybox
|
1c063428ba85d26c8d9229b020503f6f57df2219
|
[
"MIT"
] | null | null | null |
'''
Created on Sep 8, 2018
Use autocropFaces() to crop out the material around faces in an image,
where the faces are automatically detected.
See the bottom for an example use script.
Used this as a starting reference point:
https://docs.opencv.org/3.3.0/d7/d8b/tutorial_py_face_detection.html
@author: tmahrt
'''
import os
from os.path import join
import cv2
from matplotlib import pyplot as plt
from PIL import Image
TRAINING_DATA_PATH = '/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml'
class NoFacesException(Exception):
def __init__(self, fn):
super(NoFacesException, self).__init__()
self.fn = fn
def __str__(self):
errStr = ("ERROR: Could not find faces in file `%s` with "
"training data: \n`%s`\n Please try again with a different "
"file, or different training set.")
return errStr % (self.fn, TRAINING_DATA_PATH)
class FaceRecognizer():
def __init__(self):
self.recognizer = cv2.CascadeClassifier(TRAINING_DATA_PATH)
def recognize(self, imgFn):
gray = cv2.imread(imgFn, 0)
faces = self.recognizer.detectMultiScale(gray, 1.3, 5)
if len(faces) == 0:
raise NoFacesException(imgFn)
return faces
def outputDebug(imgFn,
faces,
faceRegion=None,
helperRegion=None,
finalCropRegion=None):
img = cv2.imread(imgFn)
# The list of faces
for face in faces:
_drawRectangle(img, face, (255, 0, 0))
# All the faces fit tightly in this space
if faceRegion is not None:
_drawRectangle(img, faceRegion, (0, 0, 255))
# I used this to see various intermediate stages
if helperRegion is not None:
_drawRectangle(img, helperRegion, (0, 255, 0))
# The final cropping region
if finalCropRegion is not None:
_drawRectangle(img, finalCropRegion, (255, 255, 0))
img = _convertBgrToRGB(img)
plt.imshow(img)
plt.show()
def _convertBgrToRGB(img):
# https://stackoverflow.com/questions/15072736/extracting-a-region-from-an-image-using-slicing-in-python-opencv/15074748#15074748
return img[:, :, ::-1]
def _drawRectangle(img, xywh, color):
x, y, w, h = xywh
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
def encapsulateSubsquares(regionList):
'''
Given a list of squares, return a square that tightly fits all subsquares
Input is a list of the form [(x, y, w, h), () ]
Output is the (x, y, w, h) that wholly includes all input
'''
newRegionList = [(x, y, x + w, y + h) for x, y, w, h in regionList]
x0List, y0List, x1List, y1List = zip(*newRegionList)
x0 = min(x0List)
y0 = min(y0List)
x1 = max(x1List)
y1 = max(y1List)
return [x0, y0, x1 - x0, y1 - y0]
def modifyAspectRatio(sourceXYWH, targetRatio):
'''
Changes the ratio of the input square to be that of the target ratio
'''
sourceRatio = sourceXYWH[2] / sourceXYWH[3]
if targetRatio > sourceRatio:
newX1 = int(sourceXYWH[3] * targetRatio)
returnXYWH = [sourceXYWH[0], sourceXYWH[1],
newX1, sourceXYWH[3]]
else:
newY1 = int(sourceXYWH[2] / targetRatio)
returnXYWH = [sourceXYWH[0], sourceXYWH[1],
sourceXYWH[2], newY1]
return returnXYWH
def relativeRecenter(sourceXYWH, targetXYWH):
'''
Centers a square with respect to the center of a different square
'''
targetXCenter = targetXYWH[0] + (targetXYWH[2] / 2.0)
targetYCenter = targetXYWH[1] + (targetXYWH[3] / 2.0)
newX = int(targetXCenter - (sourceXYWH[2] / 2.0))
newY = int(targetYCenter - (sourceXYWH[3] / 2.0))
return (newX, newY, sourceXYWH[2], sourceXYWH[3])
def keepInsideImage(sourceXYWH, imageWH):
'''
Forces a square to be within the image that contains it
'''
left = sourceXYWH[0]
right = sourceXYWH[0] + sourceXYWH[2]
top = sourceXYWH[1]
bottom = sourceXYWH[1] + sourceXYWH[3]
newLeft = left
if left < 0 and right > imageWH[0]:
newLeft = (imageWH[0] - right)
elif left < 0:
newLeft = 0
elif right > imageWH[0]:
newLeft = imageWH[0] - sourceXYWH[2]
newTop = top
if top < 0 and bottom > imageWH[1]:
newTop = imageWH[1] / 2.0 - sourceXYWH[3]
elif top < 0:
newTop = 0
elif bottom > imageWH[1]:
newTop = imageWH[1] - sourceXYWH[3]
return [int(newLeft), int(newTop), sourceXYWH[2], sourceXYWH[3]]
def enforceMinSize(sourceXYWH, targetWH, imgWH):
'''
Increase the crop region to the target, but don't exceed the img dimensions
'''
newW = max((targetWH[0], sourceXYWH[2]))
newH = max((targetWH[1], sourceXYWH[3]))
newW = min((imgWH[0], newW))
newH = min((imgWH[1], newH))
return (sourceXYWH[0], sourceXYWH[1], newW, newH)
def autocropFaces(fn, outputFN, recognizer, targetWH=None, debug=False):
'''
Will crop an image based on all of the faces it automatically detects
targetWH: e.g. (300, 200); if specified, it the output will that size.
The area around the detected heads will be enlarged to permit
the necessary aspect ratio before scaling occurs. If the image
is smaller than the target, whitespace will be filled in.
debug: if True, an image will pop up showing detected faces and the
region that will be cropped. The image must be closed before
the code will continue
'''
faceList = recognizer.recognize(fn)
faceRegion = encapsulateSubsquares(faceList)
img = Image.open(fn)
imgWH = (img.width, img.height)
if targetWH is not None:
sizedFaceRegion = enforceMinSize(faceRegion, targetWH, imgWH)
proportionedFaceRegion = modifyAspectRatio(sizedFaceRegion,
targetWH[0] / targetWH[1])
regionToCenterIn = relativeRecenter(sizedFaceRegion,
faceRegion)
adjustedFaceRegion = relativeRecenter(proportionedFaceRegion,
regionToCenterIn)
adjustedFaceRegion = keepInsideImage(adjustedFaceRegion, imgWH)
# If the crop region is smaller than the targetWH, fill in
# the empty space with a white background
newImg = Image.new('RGB',
(adjustedFaceRegion[2], adjustedFaceRegion[3]),
(255, 255, 255))
newImg.paste(img, (-adjustedFaceRegion[0], -adjustedFaceRegion[1]))
img = newImg
if debug is True:
outputDebug(fn, faceList, faceRegion, sizedFaceRegion,
finalCropRegion=adjustedFaceRegion)
else:
img = img.crop(faceRegion)
if targetWH is not None:
img = img.resize(targetWH)
img.save(outputFN)
# Example use
if __name__ == "__main__":
def getThumbnailName(fn):
name, ext = os.path.splitext(fn)
return name + "_thumbnail" + ext
inputPath = os.path.abspath("../data/faces/")
outputPath = os.path.abspath("../data/faces/output")
targetWH = (300, 200)
if not os.path.exists(outputPath):
os.mkdir(outputPath)
_recognizer = FaceRecognizer()
for _fn in os.listdir(inputPath):
if ".jpg" not in _fn:
continue
inputFn = join(inputPath, _fn)
outputFn = join(outputPath, getThumbnailName(_fn))
try:
autocropFaces(inputFn, outputFn, _recognizer, targetWH, debug=True)
except NoFacesException:
print("No faces in: " + inputFn)
continue
| 30.761719
| 133
| 0.610159
| 923
| 7,875
| 5.152763
| 0.312026
| 0.023129
| 0.009462
| 0.003364
| 0.087679
| 0.044155
| 0
| 0
| 0
| 0
| 0
| 0.031831
| 0.289905
| 7,875
| 255
| 134
| 30.882353
| 0.81867
| 0.210032
| 0
| 0.055556
| 0
| 0
| 0.046286
| 0.01186
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097222
| false
| 0
| 0.034722
| 0.006944
| 0.208333
| 0.006944
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a5276bb48c6b9ee88490cc0b0a29ff3c27d3bba
| 2,920
|
py
|
Python
|
aiida_lsmo/workchains/multistage_ddec.py
|
ltalirz/aiida-lsmo
|
38a839af63686320ab070fada89241860e095b9e
|
[
"MIT"
] | null | null | null |
aiida_lsmo/workchains/multistage_ddec.py
|
ltalirz/aiida-lsmo
|
38a839af63686320ab070fada89241860e095b9e
|
[
"MIT"
] | null | null | null |
aiida_lsmo/workchains/multistage_ddec.py
|
ltalirz/aiida-lsmo
|
38a839af63686320ab070fada89241860e095b9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""MultistageDdecWorkChain workchain"""
from __future__ import absolute_import
from aiida.plugins import CalculationFactory, DataFactory, WorkflowFactory
from aiida.common import AttributeDict
from aiida.engine import WorkChain, ToContext
# import sub-workchains
Cp2kMultistageWorkChain = WorkflowFactory('cp2k.multistage') # pylint: disable=invalid-name
Cp2kDdecWorkChain = WorkflowFactory('ddec.cp2k_ddec') # pylint: disable=invalid-name
# import calculations
DdecCalculation = CalculationFactory('ddec') # pylint: disable=invalid-name
# import aiida data
CifData = DataFactory('cif') # pylint: disable=invalid-name
class MultistageDdecWorkChain(WorkChain):
"""A workchain that combines: Cp2kMultistageWorkChain + Cp2kDdecWorkChain"""
@classmethod
def define(cls, spec):
"""Define workflow specification."""
super(MultistageDdecWorkChain, cls).define(spec)
spec.expose_inputs(Cp2kMultistageWorkChain)
spec.expose_inputs(Cp2kDdecWorkChain, exclude=['cp2k_base'])
# specify the chain of calculations to be performed
spec.outline(cls.run_cp2kmultistage, cls.run_cp2kddec, cls.return_results)
spec.expose_outputs(Cp2kMultistageWorkChain, exclude=['output_structure'])
spec.expose_outputs(Cp2kDdecWorkChain, include=['structure_ddec'])
def run_cp2kmultistage(self):
"""Run CP2K-Multistage"""
cp2k_ms_inputs = AttributeDict(self.exposed_inputs(Cp2kMultistageWorkChain))
cp2k_ms_inputs['metadata']['call_link_label'] = 'call_cp2kmultistage'
running = self.submit(Cp2kMultistageWorkChain, **cp2k_ms_inputs)
self.report('Running Cp2MultistageWorkChain to move the structure')
return ToContext(ms_wc=running)
def run_cp2kddec(self):
"""Pass the Cp2kMultistageWorkChain outputs as inputs for
Cp2kDdecWorkChain: cp2k_base (metadata), cp2k_params, structure and WFN.
"""
cp2k_ddec_inputs = AttributeDict(self.exposed_inputs(Cp2kDdecWorkChain))
cp2k_ddec_inputs['cp2k_base'] = self.exposed_inputs(Cp2kMultistageWorkChain)['cp2k_base']
cp2k_ddec_inputs['cp2k_base']['cp2k']['parameters'] = self.ctx.ms_wc.outputs.last_input_parameters
cp2k_ddec_inputs['cp2k_base']['cp2k']['structure'] = self.ctx.ms_wc.outputs.output_structure
cp2k_ddec_inputs['cp2k_base']['cp2k']['parent_calc_folder'] = self.ctx.ms_wc.outputs.remote_folder
cp2k_ddec_inputs['metadata']['call_link_label'] = 'call_cp2kddec'
running = self.submit(Cp2kDdecWorkChain, **cp2k_ddec_inputs)
return ToContext(cp2k_ddec_wc=running)
def return_results(self):
"""Return exposed outputs and print the pk of the CifData w/DDEC"""
self.out_many(self.exposed_outputs(self.ctx.ms_wc, Cp2kMultistageWorkChain))
self.out_many(self.exposed_outputs(self.ctx.cp2k_ddec_wc, Cp2kDdecWorkChain))
| 45.625
| 106
| 0.743151
| 322
| 2,920
| 6.509317
| 0.307453
| 0.038168
| 0.046756
| 0.045802
| 0.238073
| 0.133588
| 0.034351
| 0.034351
| 0
| 0
| 0
| 0.020598
| 0.152055
| 2,920
| 63
| 107
| 46.349206
| 0.825929
| 0.203425
| 0
| 0
| 0
| 0
| 0.131718
| 0.009692
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.114286
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a5286d6d3711424348d457dbffee994d0ef9214
| 2,997
|
py
|
Python
|
ambari-server/src/test/python/TestServerUtils.py
|
panfeiyy/ambari
|
24077510723ede93d3024784f0b04422adaf56d6
|
[
"Apache-2.0"
] | 16
|
2018-05-24T10:28:24.000Z
|
2021-08-05T03:13:26.000Z
|
ambari-server/src/test/python/TestServerUtils.py
|
panfeiyy/ambari
|
24077510723ede93d3024784f0b04422adaf56d6
|
[
"Apache-2.0"
] | 8
|
2020-06-18T17:31:19.000Z
|
2022-03-02T08:32:03.000Z
|
ambari-server/src/test/python/TestServerUtils.py
|
panfeiyy/ambari
|
24077510723ede93d3024784f0b04422adaf56d6
|
[
"Apache-2.0"
] | 17
|
2018-07-06T08:57:00.000Z
|
2021-11-04T11:00:36.000Z
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
os.environ["ROOT"] = ""
from mock.mock import patch, MagicMock
from unittest import TestCase
import platform
from ambari_commons import os_utils
os_utils.search_file = MagicMock(return_value="/tmp/ambari.properties")
import shutil
project_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),os.path.normpath("../../../../"))
shutil.copyfile(project_dir+"/ambari-server/conf/unix/ambari.properties", "/tmp/ambari.properties")
with patch.object(platform, "linux_distribution", return_value = MagicMock(return_value=('Redhat', '6.4', 'Final'))):
with patch("os.path.isdir", return_value = MagicMock(return_value=True)):
with patch("os.access", return_value = MagicMock(return_value=True)):
with patch.object(os_utils, "parse_log4j_file", return_value={'ambari.log.dir': '/var/log/ambari-server'}):
from ambari_server.serverUtils import get_ambari_server_api_base
from ambari_server.serverConfiguration import CLIENT_API_PORT, CLIENT_API_PORT_PROPERTY, SSL_API, DEFAULT_SSL_API_PORT, SSL_API_PORT
@patch.object(platform, "linux_distribution", new = MagicMock(return_value=('Redhat', '6.4', 'Final')))
class TestServerUtils(TestCase):
def test_get_ambari_server_api_base(self):
# Test case of using http protocol
properties = FakeProperties({
SSL_API: "false",
CLIENT_API_PORT_PROPERTY: None
})
result = get_ambari_server_api_base(properties)
self.assertEquals(result, 'http://127.0.0.1:8080/api/v1/')
# Test case of using http protocol and custom port
properties = FakeProperties({
SSL_API: "false",
CLIENT_API_PORT_PROPERTY: "8033"
})
result = get_ambari_server_api_base(properties)
self.assertEquals(result, 'http://127.0.0.1:8033/api/v1/')
# Test case of using https protocol (and ssl port)
properties = FakeProperties({
SSL_API: "true",
SSL_API_PORT : "8443",
CLIENT_API_PORT_PROPERTY: None
})
result = get_ambari_server_api_base(properties)
self.assertEquals(result, 'https://127.0.0.1:8443/api/v1/')
class FakeProperties(object):
def __init__(self, prop_map):
self.prop_map = prop_map
def get_property(self, prop_name):
return self.prop_map[prop_name]
| 38.922078
| 140
| 0.746079
| 428
| 2,997
| 5.030374
| 0.357477
| 0.045982
| 0.046447
| 0.041802
| 0.343242
| 0.266605
| 0.229912
| 0.199257
| 0.158384
| 0.116117
| 0
| 0.019554
| 0.146813
| 2,997
| 76
| 141
| 39.434211
| 0.822448
| 0.295963
| 0
| 0.302326
| 0
| 0
| 0.166667
| 0.051429
| 0
| 0
| 0
| 0
| 0.069767
| 1
| 0.069767
| false
| 0
| 0.186047
| 0.023256
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a533adcbaa3e599ac553a4a4afcfe1138f8018d
| 828
|
py
|
Python
|
docs/md2ipynb.py
|
RingoIngo/gluon-ts
|
62fb20c36025fc969653accaffaa783671709564
|
[
"Apache-2.0"
] | 7
|
2021-07-20T21:46:28.000Z
|
2022-01-12T04:18:14.000Z
|
docs/md2ipynb.py
|
RingoIngo/gluon-ts
|
62fb20c36025fc969653accaffaa783671709564
|
[
"Apache-2.0"
] | null | null | null |
docs/md2ipynb.py
|
RingoIngo/gluon-ts
|
62fb20c36025fc969653accaffaa783671709564
|
[
"Apache-2.0"
] | 3
|
2021-08-28T06:01:27.000Z
|
2022-01-12T04:18:13.000Z
|
import sys
import time
from itertools import chain
from pathlib import Path
import nbformat
import notedown
def convert(path, timeout=40 * 60):
with path.open() as in_file:
notebook = notedown.MarkdownReader().read(in_file)
start = time.time()
notedown.run(notebook, timeout)
print(f"=== {path.name} finished evaluation in {time.time() - start} sec")
# need to add language info to for syntax highlight
notebook["metadata"].update(language_info={"name": "python"})
with path.with_suffix(".ipynb").open("w") as out_file:
out_file.write(nbformat.writes(notebook))
if __name__ == "__main__":
assert len(sys.argv) >= 2, "usage: input.md"
here = Path(".")
files = list(chain.from_iterable(map(here.glob, sys.argv[1:])))
for file in files:
convert(file)
| 23.657143
| 78
| 0.669082
| 114
| 828
| 4.72807
| 0.561404
| 0.033395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008996
| 0.194444
| 828
| 34
| 79
| 24.352941
| 0.7991
| 0.059179
| 0
| 0
| 0
| 0
| 0.145431
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.047619
| false
| 0
| 0.285714
| 0
| 0.333333
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a5679211ddca25bc7c34ee2ad4a2a92de9f338e
| 25,389
|
py
|
Python
|
kessk_web/device/views.py
|
yungs2017/kessk-switch
|
a56c73c756bb88e8ee38b7aa196fd58a4a802341
|
[
"BSD-3-Clause"
] | 9
|
2019-09-30T04:24:39.000Z
|
2021-07-15T06:08:20.000Z
|
kessk_web/device/views.py
|
yungs2017/kessk-switch
|
a56c73c756bb88e8ee38b7aa196fd58a4a802341
|
[
"BSD-3-Clause"
] | 6
|
2020-05-14T03:13:32.000Z
|
2022-02-10T10:23:46.000Z
|
kessk_web/device/views.py
|
yungs2017/kessk-switch
|
a56c73c756bb88e8ee38b7aa196fd58a4a802341
|
[
"BSD-3-Clause"
] | 2
|
2020-12-19T07:12:01.000Z
|
2021-05-24T02:21:15.000Z
|
# The 3-Clause BSD License
# Copyright (C) 2019, KessK, all rights reserved.
# Copyright (C) 2019, Kison.Y, all rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import hashlib
import random
import string
import time
from django.contrib.auth.models import User
from django.core.cache import cache
from django.http import JsonResponse
from django.shortcuts import render
from rest_framework.decorators import api_view
from common.AliyunIot import AliyunIot
from common.ExceptionAPI import AValidation400Error, response_json
from common.WechatCommonView import WechatCommonView
from common.config import ErrorCodes, DEVICE_MASK, DEVICE_NAME_DEFAULT, ALIYUN_IOT_CONTROL_APP_PRODUCT_KEY
from device.models import Device, DeviceBind, ControlDevice, AliyunIotRules
from device.wexinSignature import Signature
from rest_framework import status, generics
class BindView(WechatCommonView):
"""
Configure the device to connect to wifi AP in Wechat client
"""
template_name = "config-wechat-wifi.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
sign = Signature(self.full_url)
sign.sign()
print(sign.ret['nonceStr'])
print(sign.ret['jsapi_ticket'])
print(sign.ret['timestamp'])
print(sign.ret['url'])
context['sign'] = sign
return context
#
# class BindDeviceAPI(generics.CreateAPIView):
#
# def post(self, request, *args, **kwargs):
# print("ok")
@api_view(['POST'])
def bindDevice(request):
if not check_login(request):
raise AValidation400Error(detail="Unknow", code=ErrorCodes['global']['not_allowed'],
errcode=ErrorCodes['global']['not_allowed'])
chip_id = request.POST.get('chip')
if not request.session.get('userid') or not chip_id:
raise AValidation400Error(detail="Unknow", code=ErrorCodes['global']['required'],
errcode=ErrorCodes['global']['required'])
chip_id = str(chip_id).replace(DEVICE_MASK, '')
chip_id = str(chip_id).replace(':', '')
try:
device = Device.objects.get(device_chipid=chip_id)
user = User.objects.get(id=request.session['userid'])
except Device.DoesNotExist:
raise AValidation400Error(detail="Unknow", code=ErrorCodes['device']['not_exits'],
errcode=ErrorCodes['device']['not_exits'])
except User.DoesNotExist:
raise AValidation400Error(detail="Unknow", code=ErrorCodes['user']['not_exits'],
errcode=ErrorCodes['user']['not_exits'])
device_action = DeviceBindAction(device=device,user=user)
device_action.unbinding_device()
device_bind = device_action.bind_device()
return JsonResponse(response_json(data={'device_name':device_bind.device_name,'id':device_bind.id}), status=status.HTTP_201_CREATED)
@api_view(['POST'])
def bindShareDevice(request):
if not check_login(request):
raise AValidation400Error(detail="Unknow", code=ErrorCodes['global']['not_allowed'],
errcode=ErrorCodes['global']['not_allowed'])
share_code = request.POST.get('share_code')
if not request.session.get('userid') or not share_code:
raise AValidation400Error(detail="Unknow", code=ErrorCodes['global']['required'],
errcode=ErrorCodes['global']['required'])
share_info = cache.get(share_code)
if share_info is None:
raise AValidation400Error(detail="Unknow", code=ErrorCodes['device']['share_oft'],
errcode=ErrorCodes['device']['share_oft'])
user_id = share_info.get("user")
device_id = share_info.get("device")
try:
user = User.objects.get(id=user_id)
device = Device.objects.get(id=device_id)
current_user = User.objects.get(id=request.session.get('userid'))
device_bind = DeviceBind.objects.get(user=user, device=device, onActive=True)
except User.DoesNotExist or Device.DoesNotExist or DeviceBind.DoesNotExist:
raise AValidation400Error(detail="Unknow", code=ErrorCodes['global']['not_allowed'],
errcode=ErrorCodes['global']['not_allowed'])
device_action = DeviceBindAction(device=device, user=current_user)
device_bind = device_action.bind_device(origin_user=user)
return JsonResponse(response_json(data={'device_name': device_bind.device_name, 'id': device_bind.id}),
status=status.HTTP_201_CREATED)
@api_view(['PUT'])
def ccnameDevice(request):
if not check_login(request):
raise AValidation400Error(detail="Unknow", code=ErrorCodes['global']['not_allowed'],
errcode=ErrorCodes['global']['not_allowed'])
chip_id = request.POST.get('chip')
name = request.POST.get('name')
is_name = request.POST.get('is_name')
if not chip_id:
raise AValidation400Error(detail="Unknow", code=ErrorCodes['global']['required'],
errcode=ErrorCodes['global']['required'])
device_bind_action = DeviceBindAction(device=None,user=User.objects.get(id=request.session.get('userid')))
if not is_name:
device_bind = device_bind_action.update_device_name(device_bind_id=chip_id,name=name)
else:
device_bind = device_bind_action.update_device_name(device_bind_id=DeviceBind.objects.get(device__device_name=chip_id,user__id=request.session.get('userid'),onActive=True).id, name=name)
return JsonResponse(response_json(data={}),
status=status.HTTP_201_CREATED)
class DeviceBindAction():
def __init__(self,device,user):
self.device = device
self.user = user
self._deviceRule = DeviceRule(self.device,None)
control_device = self._deviceRule.create_control_device(self.user)
self._deviceRule.control_device = control_device
def unbinding_device(self):
self._deviceRule.delete_device_all_action()
try:
bind_log = DeviceBind.objects.filter(device=self.device, onActive=True).exclude(user=self.user)
bind_log.update(onActive=False, unbind_time=datetime.datetime.now())
bind_log = DeviceBind.objects.filter(device=self.device,onActive=True,origin_user__isnull=False).exclude(origin_user=self.user)
bind_log.update(onActive=False, unbind_time=datetime.datetime.now())
except DeviceBind.DoesNotExist:
pass
def unbind_user_device(self):
try:
if DeviceBind.objects.filter(device=self.device, user=self.user,onActive=True,origin_user=None).exists():
# Current user is the main user
self._deviceRule.delete_share_rule_action()
bind_log = DeviceBind.objects.filter(device=self.device, onActive=True, origin_user=self.user)
bind_log.update(onActive=False, unbind_time=datetime.datetime.now())
bind_log = DeviceBind.objects.filter(device=self.device, user=self.user, onActive=True)
bind_log.update(onActive=False, unbind_time=datetime.datetime.now())
# Delete rule action
self._deviceRule.delete_device2control_action()
# self._deviceRule.delete_control2device_action()
except DeviceBind.DoesNotExist:
pass
def get_user_device_name(self):
user_devices_count = DeviceBind.objects.filter(user=self.user, onActive=True).count() + 1
device_name = DEVICE_NAME_DEFAULT + str(user_devices_count)
return device_name
def bind_device(self,device_name=None,origin_user=None):
"""
Binding steps:
Step1. Create if not exists a binding log.
Step2. Create if not exists a device's rule.
Step3. Create if not exists a control device's rule # No more used
Step4. Create if there is no rule action from device to control device.
Step5. Create if there is no rule action from control device to device. # No more used
Step6. Create if there is no rule action from current control device to share's control device # No more used
Step7. Create if there is no rule action from share's control device to current control device # No more used
:param device_name:
:return:
"""
# Step.1
if not DeviceBind.objects.filter(user=self.user, device=self.device,onActive=True).exists():
if device_name is None:
device_name = self.get_user_device_name()
device_bind = DeviceBind(
device=self.device,
user=self.user,
origin_user=origin_user,
device_name=device_name,
onActive=True,
)
device_bind.save()
# Step.2-5
self._deviceRule.create_device2control_action()
# self._deviceRule.create_control2device_action()
#Step.6-7
# if origin_user is not None:
# origin_user_control = self._deviceRule.create_control_device(origin_user)
# self._deviceRule.create_share_rule_action(origin_user_control)
return DeviceBind.objects.get(user=self.user, device=self.device,onActive=True)
def update_device_name(self,device_bind_id,name):
try:
device_bind = DeviceBind.objects.get(id=device_bind_id)
except DeviceBind.DoesNotExist:
raise AValidation400Error(detail="Unknow", code=ErrorCodes['device']['not_exits'],
errcode=ErrorCodes['device']['not_exits'])
if not device_bind.user.id == self.user.id:
raise AValidation400Error(detail="Unknow", code=ErrorCodes['global']['not_allowed'],
errcode=ErrorCodes['global']['not_allowed'])
if name is None or name == device_bind.device_name:
pass
else:
device_bind.device_name = name
device_bind.save(update_fields=['device_name'])
return device_bind
class ControlDeviceAction():
def __init__(self,user):
self.user = user
self._aliyun = AliyunIot()
def create_control_device(self):
"""
Create a control device when it dose not exists.
Each user has only one control device
:return:
"""
if not ControlDevice.objects.filter(user=self.user).exists():
response = self._aliyun.register_control_device()
print('Aliyun response is ')
print(response)
if response is not None:
control_device = ControlDevice(
user=self.user,
product_name='KessK_Controllor',
device_name=response['DeviceName'],
product_key=response['ProductKey'],
device_secret=response['DeviceSecret'],
)
control_device.save()
return ControlDevice.objects.get(user=self.user)
def create_device2control_rule(self,device_bind,rule_name=None):
"""
Create Aliyun IoT rule from the esp8266 device to the control device.
It will only be created once.
:param device_bind:
:param rule_name:
:return:
"""
if rule_name is None:
rule_name = device_bind.device.device_name + "_2control_rule"
topic = "/"+device_bind.device.device_name+"/user/update"
if not AliyunIotRules.objects.filter(short_topic=topic,bind_device=device_bind).exists():
data = self._aliyun.create_rule(rule_name=rule_name,topic=topic,product_key=device_bind.device.product_key)
if data is not None:
aliyun_iot_relu = AliyunIotRules(
name=device_bind.device.device_name + self.user.first_name,
short_topic=topic,
ruleid=data["RuleId"],
bind_device=device_bind,
requestid=data["RequestId"]
)
aliyun_iot_relu.save()
data["rule_name"] = rule_name
return AliyunIotRules.objects.get(short_topic=topic,bind_device=device_bind)
def create_control2device_rule(self,device_bind,rule_name=None):
if rule_name is None:
rule_name = self.user.first_name + str(time.time()).replace('.','')
def create_device2control_rule_action(self,relu_id,rule_name,configuration,device_bind):
if not AliyunIotRules.objects.filter(ruleid=relu_id,action_config=configuration).exists():
data = self._aliyun.create_rule_action(relu_id,configuration)
if data is not None:
aliyun_iot_relu_ = AliyunIotRules(
name=rule_name + '_action_',
ruleid=relu_id,
bind_device=device_bind,
requestid=data["RequestId"],
action_type="REPUBLISH",
action_config=configuration,
)
aliyun_iot_relu_.save()
return AliyunIotRules.objects.get(ruleid=relu_id,action_config=configuration)
def start_rule(self,rule_id):
self._aliyun.start_rule(rule_id)
class DeviceRule():
def __init__(self,device,control_device):
self.device = device
self.control_device = control_device
self._aliyun = AliyunIot()
def create_control_device(self,user):
"""
Create a control device when it dose not exists.
Each user has only one control device
:return:
"""
if not ControlDevice.objects.filter(user=user).exists():
response = self._aliyun.register_control_device()
print('Aliyun response is ')
print(response)
if response is not None:
control_device = ControlDevice(
user=user,
product_name='KessK_Controllor',
device_name=response['DeviceName'],
product_key=response['ProductKey'],
device_secret=response['DeviceSecret'],
)
control_device.save()
return ControlDevice.objects.get(user=user)
def create_share_rule_action(self,origin_user_control):
# Get control device rule
control_device_rule = self.create_control_rule()
# Get share's control device rule
share_control_device_rule = self.create_rule(origin_user_control.device_name + "_2device_rule",
"/" + origin_user_control.device_name + "/user/update",
origin_user_control.product_key, origin_user_control.id, True)
# Create control device to share's control device action
configuration = "{\"topic\":\"/" + self.control_device.product_key + "/" + self.control_device.device_name + "/user/get\",\"topicType\":1}"
self.create_rule_action(share_control_device_rule.ruleid, configuration, self.control_device.id, True)
# Create share's control device to current control device
configuration = "{\"topic\":\"/" + origin_user_control.product_key + "/" + origin_user_control.device_name + "/user/get\",\"topicType\":1}"
self.create_rule_action(control_device_rule.ruleid, configuration, origin_user_control.id, True)
def delete_share_rule_action(self):
# Get all user share devices
all_share_bind_log = DeviceBind.objects.filter(device=self.device,origin_user=self.control_device.user,onActive=True)
control_device_rule = AliyunIotRules.objects.get(isControlDevice=True,device_id=self.control_device.id,isAction=False)
for share_bind_log in all_share_bind_log:
current_control_device = self.create_control_device(share_bind_log.user)
current_rule = AliyunIotRules.objects.get(isControlDevice=True,device_id=current_control_device.id,isAction=False)
try:
share_to_control_action = AliyunIotRules.objects.get(isAction=True,isControlDevice=True,
ruleid=control_device_rule.ruleid,
device_id=current_control_device.id)
self._aliyun.delete_rule_action(share_to_control_action.action_id)
share_to_control_action.delete()
except AliyunIotRules.DoesNotExist:
continue
try:
control_to_share_action = AliyunIotRules.objects.get(isAction=True, isControlDevice=True,
ruleid=current_rule.ruleid,
device_id=self.control_device.id)
self._aliyun.delete_rule_action(control_to_share_action.action_id)
control_to_share_action.delete()
except AliyunIotRules.DoesNotExist:
continue
def delete_device_all_action(self):
# Step.1 Delete device all actions. These rule action is from control devices to the esp8266 device
all_device_action = AliyunIotRules.objects.filter(isAction=True,isControlDevice=False,device_id=self.device.id)
for action in all_device_action:
self._aliyun.delete_rule_action(action.action_id)
action.delete()
# Step2. Delete all control devices actions. These rule action is from the esp8266 to control device
try:
device_rule = AliyunIotRules.objects.get(isAction=False,isControlDevice=False,device_id=self.device.id)
all_device_action = AliyunIotRules.objects.filter(ruleid=device_rule.ruleid,isAction=True)
for action in all_device_action:
self._aliyun.delete_rule_action(action.action_id)
action.delete()
except AliyunIotRules.DoesNotExist:
pass
def create_device_rule(self):
"""
Create Aliyun IoT rule from the esp8266 device to the control devices.
It will only be created once.
:return: The device's rule
"""
name = self.__md5(self.device.device_name + "_2control_rule")
topic = self.device.device_name + "/user/update"
return self.create_rule(name,topic,self.device.product_key,self.device.id,False)
def create_control_rule(self):
"""
Create Aliyun IoT rule from the control device device to the esp8266 devices.
It will only be created once.
:return: The device's rule
"""
name = self.__md5(self.control_device.device_name + "_2device_rule")
topic = "/" + self.control_device.device_name + "/user/update"
return self.create_rule(name,topic,self.control_device.product_key,self.control_device.id,True)
def create_device2control_action(self):
"""
Create action from esp8266 to control device
:return: The action object
"""
device_rule = self.create_device_rule()
configuration = "{\"topic\":\"/" + self.control_device.product_key + "/" + self.control_device.device_name + "/user/get\",\"topicType\":1}"
action = self.create_rule_action(device_rule.ruleid,configuration,self.control_device.id,True)
self._aliyun.start_rule(device_rule.ruleid)
return action
def create_control2device_action(self):
"""
Create action from control deivce to esp8266
:return: The action object
"""
device_rule = self.create_control_rule()
configuration = "{\"topic\":\"/" + self.device.product_key + "/" + self.device.device_name + "/user/get\",\"topicType\":1}"
action = self.create_rule_action(device_rule.ruleid, configuration, self.device.id, False)
self._aliyun.start_rule(device_rule.ruleid)
return action
def delete_device2control_action(self):
"""
Delete rule action from esp8266 to control device
:return:
"""
device_rule = self.create_device_rule()
try:
device_action = AliyunIotRules.objects.get(ruleid=device_rule.ruleid,isAction=True,device_id=self.control_device.id,isControlDevice=True)
except AliyunIotRules.DoesNotExist:
return
self._aliyun.delete_rule_action(device_action.action_id)
device_action.delete()
def delete_control2device_action(self):
"""
Delete rule action from control device to esp8266
:return:
"""
device_rule = self.create_control_rule()
try:
device_action = AliyunIotRules.objects.get(ruleid=device_rule.ruleid,isAction=True,device_id=self.device.id,isControlDevice=False)
except AliyunIotRules.DoesNotExist:
return
self._aliyun.delete_rule_action(device_action.action_id)
device_action.delete()
def create_rule_action(self,relu_id,configuration,device_id,is_control):
"""
Create Aliyun IoT rule action
Only one action per device or control device in each rule
:param relu_id:
:param configuration:
:param device_id:
:param is_control:
:return: The action object
"""
if not AliyunIotRules.objects.filter(ruleid=relu_id,action_config=configuration,isAction=True,device_id=device_id,isControlDevice=is_control).exists():
data = self._aliyun.create_rule_action(relu_id,configuration)
if data is not None:
aliyun_iot_relu_ = AliyunIotRules(
name=str(relu_id) + '_action_',
ruleid=relu_id,
isAction=True,
device_id=device_id,
action_id=data["ActionId"],
isControlDevice=is_control,
requestid=data["RequestId"],
action_type="REPUBLISH",
action_config=configuration,
)
aliyun_iot_relu_.save()
return AliyunIotRules.objects.get(ruleid=relu_id,action_config=configuration,isAction=True,device_id=device_id,isControlDevice=is_control)
def create_rule(self,rule_name,topic,product_key,device_id,is_control):
"""
Create Aliyun IoT rule
It will only be created once for each device or control device
:param rule_name:
:param topic:
:param product_key:
:param device_id:
:param is_control: if there is the control device's rule
:return: The device's rule
"""
if not AliyunIotRules.objects.filter(short_topic=topic,isControlDevice=is_control,device_id=device_id).exists():
data = self._aliyun.create_rule(rule_name=rule_name,topic=topic,product_key=product_key)
if data is not None:
aliyun_iot_relu = AliyunIotRules(
name=rule_name,
short_topic=topic,
ruleid=data["RuleId"],
isControlDevice=is_control,
device_id=device_id,
requestid=data["RequestId"]
)
aliyun_iot_relu.save()
# self._aliyun.start_rule(data["RuleId"])
return AliyunIotRules.objects.get(short_topic=topic,isControlDevice=is_control,device_id=device_id)
def __md5(self,str):
m = hashlib.md5()
m.update(str.encode("utf8"))
return m.hexdigest()[8:-8] + ''.join(random.sample(string.ascii_letters + string.digits, 4))
def check_login(request):
userid = request.session.get('userid')
if userid is None:
return False
return True
| 46.245902
| 194
| 0.65363
| 2,960
| 25,389
| 5.391216
| 0.114527
| 0.058654
| 0.015979
| 0.027071
| 0.633726
| 0.574884
| 0.50589
| 0.449116
| 0.381815
| 0.361073
| 0
| 0.007135
| 0.254795
| 25,389
| 549
| 195
| 46.245902
| 0.836311
| 0.177124
| 0
| 0.410765
| 0
| 0
| 0.063685
| 0.012589
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087819
| false
| 0.011331
| 0.048159
| 0
| 0.212465
| 0.022663
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a5bc5539f00418441249df40d6f8b47af45d0da
| 1,087
|
py
|
Python
|
examples/boilerplate/render/main.py
|
Sakuk3/DefCurse
|
22c7de689c2d4ec859ca70ecbe0d014034adfadc
|
[
"MIT"
] | null | null | null |
examples/boilerplate/render/main.py
|
Sakuk3/DefCurse
|
22c7de689c2d4ec859ca70ecbe0d014034adfadc
|
[
"MIT"
] | null | null | null |
examples/boilerplate/render/main.py
|
Sakuk3/DefCurse
|
22c7de689c2d4ec859ca70ecbe0d014034adfadc
|
[
"MIT"
] | null | null | null |
import models
from DefCurse import widgets
from DefCurse import style
from DefCurse import area
def render(model: models.Model, rows: int, cols: int):
areas = [
area.Area(
int(rows/2),
int(cols/2),
),
area.Area(
int(rows/2),
int(cols/2),
int(rows/2)
),
area.Area(
int(rows/2),
int(cols/2),
0,
int(cols/2)
),
area.Area(
int(rows/2),
int(cols/2),
int(rows/2),
int(cols/2),
),
]
a = widgets.labeled_box_widget(
areas[0],
"Main 0"
)
widgets.labeled_box_widget(
areas[1],
"Main 1"
)
widgets.labeled_box_widget(
areas[2],
"Main 2"
)
widgets.labeled_box_widget(
areas[3],
"Main 3"
)
widgets.text_widget(
a,
style.inverse(
"Hallo " +
style.bold("Welt ") +
" 4321"
) +
" 1234"
)
| 19.070175
| 54
| 0.420423
| 114
| 1,087
| 3.929825
| 0.263158
| 0.109375
| 0.107143
| 0.122768
| 0.535714
| 0.285714
| 0.267857
| 0.267857
| 0.232143
| 0.178571
| 0
| 0.049488
| 0.460902
| 1,087
| 57
| 55
| 19.070175
| 0.715017
| 0
| 0
| 0.396226
| 0
| 0
| 0.04136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.075472
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3a5bd10b62878bb2d6b8444b0e27578b7d011c76
| 579
|
py
|
Python
|
api/urls.py
|
cooleo/py_feeds_services
|
1d6ccb3695e091d001714aef8af210d6509f03b6
|
[
"Apache-2.0"
] | null | null | null |
api/urls.py
|
cooleo/py_feeds_services
|
1d6ccb3695e091d001714aef8af210d6509f03b6
|
[
"Apache-2.0"
] | null | null | null |
api/urls.py
|
cooleo/py_feeds_services
|
1d6ccb3695e091d001714aef8af210d6509f03b6
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url, include
from rest_framework import routers
from api.views import UserViewSet, GroupViewSet,FeedViewSet
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'groups', GroupViewSet)
router.register(r'feeds', FeedViewSet)
router.register(r'category', FeedViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| 34.058824
| 82
| 0.772021
| 76
| 579
| 5.842105
| 0.5
| 0.126126
| 0.135135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108808
| 579
| 17
| 83
| 34.058824
| 0.860465
| 0.177893
| 0
| 0
| 0
| 0
| 0.14346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28a42a406aff16efea2049670fcc9c1d85827d10
| 1,512
|
py
|
Python
|
3rdparty/cb58ref/setup.py
|
jgeofil/avax-python
|
b09e78e3d7e1c35db5ae42e3918e960e775f2d45
|
[
"MIT"
] | 25
|
2021-05-16T23:43:47.000Z
|
2022-03-29T03:08:30.000Z
|
setup.py
|
moreati/cb58ref
|
c9827f2cdd2eb55c52bc5de91ade573eab9de827
|
[
"MIT"
] | 2
|
2021-04-26T11:43:22.000Z
|
2021-06-04T07:55:22.000Z
|
3rdparty/cb58ref/setup.py
|
jgeofil/avax-python
|
b09e78e3d7e1c35db5ae42e3918e960e775f2d45
|
[
"MIT"
] | 4
|
2021-08-06T10:55:58.000Z
|
2022-03-29T08:03:05.000Z
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest>=3'
]
setup(
author="Alex Willmer",
author_email='alex@moreati.org.uk',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Reference implementation of CB58 encoding used by AVA",
#entry_points={
# 'console_scripts': [
# 'cb58ref=cb58ref.cli:main',
# ],
#},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='cb58 base58 ava',
name='cb58ref',
packages=find_packages(include=['cb58ref', 'cb58ref.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/moreati/cb58ref',
version='0.2.0',
zip_safe=True,
)
| 26.526316
| 72
| 0.630291
| 166
| 1,512
| 5.608434
| 0.548193
| 0.102041
| 0.134264
| 0.139635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029813
| 0.223545
| 1,512
| 56
| 73
| 27
| 0.763203
| 0.06746
| 0
| 0
| 0
| 0
| 0.39245
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023256
| 0
| 0.023256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28a484d541dac8a37bc08470e582fe2e7c7e91cc
| 1,009
|
py
|
Python
|
prepare_ce_data.py
|
akio-kobayashi/lc_lstm
|
c5367518ebf56d13a29794d90061fdfb06677e3e
|
[
"Apache-2.0"
] | null | null | null |
prepare_ce_data.py
|
akio-kobayashi/lc_lstm
|
c5367518ebf56d13a29794d90061fdfb06677e3e
|
[
"Apache-2.0"
] | null | null | null |
prepare_ce_data.py
|
akio-kobayashi/lc_lstm
|
c5367518ebf56d13a29794d90061fdfb06677e3e
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import sys
import subprocess
import time
import numpy as np
import random
import h5py
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, required=True, help='training data')
parser.add_argument('--align', type=str, required=True, help='alignment data')
parser.add_argument('--output', type=str, required=True, help='output file')
args = parser.parse_args()
with h5py.File(args.output, 'w') as output:
with h5py.File(args.data, 'r') as data:
keys = data.keys()
with h5py.File(args.align, 'r') as align:
for key in keys:
mat = data[key+'/data'][()]
seq = align[key+'/align'][()]
seq = seq.tolist()
output.create_group(key)
output.create_dataset(key+'/data', data=mat)
output.create_dataset(key+'/align', data=seq)
if __name__ == "__main__":
main()
| 30.575758
| 82
| 0.581764
| 124
| 1,009
| 4.612903
| 0.362903
| 0.055944
| 0.089161
| 0.09965
| 0.120629
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00551
| 0.280476
| 1,009
| 32
| 83
| 31.53125
| 0.782369
| 0
| 0
| 0
| 0
| 0
| 0.091179
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.296296
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28a65fd5ccf17c9151ab25e19828fabbbeef343e
| 627
|
py
|
Python
|
day04/aoc04_1.py
|
Dbof/adventofcode17
|
68a390a8601c3421340fa2a59b0497aa76e5f580
|
[
"MIT"
] | null | null | null |
day04/aoc04_1.py
|
Dbof/adventofcode17
|
68a390a8601c3421340fa2a59b0497aa76e5f580
|
[
"MIT"
] | null | null | null |
day04/aoc04_1.py
|
Dbof/adventofcode17
|
68a390a8601c3421340fa2a59b0497aa76e5f580
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
def has_duplicate(phrase):
seen = set()
words = phrase.split(' ')
for w in words:
if w in seen:
return True
seen.add(w)
return False
def check(text):
count = 0
phrases = text.split('\n')
for p in phrases:
if not has_duplicate(p):
count += 1
return count
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input>')
exit(1)
with open(sys.argv[1]) as f:
result = check(f.read().strip())
print('Result:', result)
| 17.416667
| 47
| 0.524721
| 86
| 627
| 3.709302
| 0.581395
| 0.065831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018824
| 0.322169
| 627
| 35
| 48
| 17.914286
| 0.731765
| 0.068581
| 0
| 0
| 0
| 0
| 0.053265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.043478
| 0
| 0.26087
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28a7314d913c35ef3d7bae8ca492ed8ba470e621
| 4,707
|
py
|
Python
|
setup.py
|
danmills0/pytket
|
4ac62896aa61c11ae1077246ab1931d0a8f9a9ac
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
danmills0/pytket
|
4ac62896aa61c11ae1077246ab1931d0a8f9a9ac
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
danmills0/pytket
|
4ac62896aa61c11ae1077246ab1931d0a8f9a9ac
|
[
"Apache-2.0"
] | null | null | null |
import setuptools
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
import sys
import setuptools
import os
import re
import platform
import subprocess
# from pathlib import Path
from os.path import expanduser, join
from distutils.version import LooseVersion
import io
__version__ = '0.2.2'
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
# Readme file as long_description (from cirq):
stream = io.open('README.md', encoding='utf-8')
stream.readline()
long_description = stream.read()
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
'-DBINDERS=' + 'release']
# cfg = 'Debug' if self.debug else 'Release'
# print(cfg)
cfg = 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
extensions = []
setup(
name='pytket',
version=__version__,
author='Seyon Sivarajah',
author_email='seyon.sivarajah@cambridgequantum.com',
python_requires='>=3.6',
url='https://github.com/CQCL/pytket',
description='Python module for interfacing with the CQC t|ket> library of quantum software',
long_description=long_description,
long_description_content_type='text/markdown',
license="Apache 2.0",
packages=setuptools.find_packages(),
install_requires=[
'sympy >=1.3',
'numpy'
],
ext_modules=extensions,
cmdclass={
'build_ext': CMakeBuild
},
classifiers=[
"Environment :: Console",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering"
],
zip_safe=False,
)
| 34.866667
| 98
| 0.607818
| 545
| 4,707
| 5.113761
| 0.420183
| 0.010764
| 0.018658
| 0.012917
| 0.072838
| 0.026552
| 0
| 0
| 0
| 0
| 0
| 0.015108
| 0.254727
| 4,707
| 134
| 99
| 35.126866
| 0.779361
| 0.085192
| 0
| 0.09434
| 0
| 0
| 0.249064
| 0.035581
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04717
| false
| 0
| 0.122642
| 0
| 0.226415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28a8e0d56673ed011c58970fc2cc9375a3c70f66
| 18,099
|
py
|
Python
|
u24_lymphocyte/third_party/treeano/sandbox/nodes/resnet.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 45
|
2015-04-26T04:45:51.000Z
|
2022-01-24T15:03:55.000Z
|
u24_lymphocyte/third_party/treeano/sandbox/nodes/resnet.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 8
|
2018-07-20T20:54:51.000Z
|
2020-06-12T05:36:04.000Z
|
u24_lymphocyte/third_party/treeano/sandbox/nodes/resnet.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 22
|
2018-05-21T23:57:20.000Z
|
2022-02-21T00:48:32.000Z
|
import functools
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
import canopy
from treeano.sandbox.nodes import batch_normalization as bn
fX = theano.config.floatX
@treeano.register_node("strided_downsample")
class StridedDownsampleNode(treeano.NodeImpl):
hyperparameter_names = ("strides",)
def compute_output(self, network, in_vw):
strides = network.find_hyperparameter(["strides"])
out_slices = []
out_shape = list(in_vw.shape)
for idx, stride in enumerate(strides):
out_slices.append(slice(None, None, stride))
size = out_shape[idx]
if size is not None:
out_shape[idx] = (size + stride - 1) // stride
network.create_vw(
"default",
variable=in_vw.variable[tuple(out_slices)],
shape=tuple(out_shape),
tags={"output"},
)
@treeano.register_node("resnet_init_conv_2d")
class ResnetInitConv2DNode(treeano.NodeImpl):
"""
NOTE: originally copy-pasted from Conv2DNode
"""
hyperparameter_names = ("inits",
"num_filters",
"filter_size",
"conv_stride",
"stride",
"conv_pad",
"pad")
def compute_output(self, network, in_vw):
# gather hyperparameters
num_filters = network.find_hyperparameter(["num_filters"])
filter_size = network.find_hyperparameter(["filter_size"])
stride = network.find_hyperparameter(["conv_stride", "stride"], (1, 1))
pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
pad = tn.conv.conv_parse_pad(filter_size, pad)
assert len(filter_size) == 2
# create weight
num_channels = in_vw.shape[1]
filter_shape = (num_filters, num_channels) + tuple(filter_size)
W = network.create_vw(
name="weight",
is_shared=True,
shape=filter_shape,
tags={"parameter", "weight"},
default_inits=[],
).variable
# calculate identity for resnet init
# ---
# read hyperparams
identity_ratio = network.find_hyperparameter(["identity_ratio"], 0.5)
ratio_on_input = network.find_hyperparameter(["ratio_on_input"], True)
# find center spatial location
dim0_idx = filter_shape[2] // 2
dim1_idx = filter_shape[3] // 2
# create identity kernel
ratio_idx = 1 if ratio_on_input else 0
init = np.zeros(filter_shape, dtype=theano.config.floatX)
for i in range(min(filter_shape[0],
filter_shape[1],
int(identity_ratio * filter_shape[ratio_idx]))):
init[i, i, dim0_idx, dim1_idx] += 1
out_var = T.nnet.conv2d(input=in_vw.variable,
filters=W + init,
input_shape=in_vw.shape,
filter_shape=filter_shape,
border_mode=pad,
subsample=stride)
out_shape = tn.conv.conv_output_shape(input_shape=in_vw.shape,
num_filters=num_filters,
axes=(2, 3),
conv_shape=filter_size,
strides=stride,
pads=pad)
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
@treeano.register_node("resnet_init_conv_2d_with_bias")
class ResnetInitConv2DWithBiasNode(treeano.Wrapper0NodeImpl):
hyperparameter_names = ResnetInitConv2DNode.hyperparameter_names
def architecture_children(self):
return [
tn.SequentialNode(
self._name + "_sequential",
[ResnetInitConv2DNode(self._name + "_conv"),
tn.AddBiasNode(self._name + "_bias",
broadcastable_axes=(0, 2, 3))])]
@treeano.register_node("zero_last_axis_partition")
class _ZeroLastAxisPartitionNode(treeano.NodeImpl):
"""
zeros out a fraction of a tensor
"""
hyperparameter_names = ("zero_ratio",
"axis")
def compute_output(self, network, in_vw):
zero_ratio = network.find_hyperparameter(["zero_ratio"])
axis = network.find_hyperparameter(["axis"], 1)
in_var = in_vw.variable
size = treeano.utils.as_fX(in_var.shape[axis])
num_zeros = T.round(zero_ratio * size).astype("int32")
idxs = [None] * (axis - 1) + [slice(-num_zeros, None)]
out_var = T.set_subtensor(in_var[idxs], 0)
network.create_vw(
"default",
variable=out_var,
shape=in_vw.shape,
tags={"output"},
)
def residual_block_conv_2d(name,
num_filters,
num_layers,
increase_dim=None,
conv_node=tn.Conv2DNode,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
input_num_filters=None,
projection_filter_size=(1, 1),
increase_dim_stride=(2, 2),
no_identity=False):
if increase_dim is not None:
assert increase_dim in {"projection", "pad"}
first_stride = increase_dim_stride
if increase_dim == "projection":
identity_node = tn.SequentialNode(
name + "_projection",
[tn.Conv2DNode(name + "_projectionconv",
num_filters=num_filters,
filter_size=projection_filter_size,
stride=first_stride,
pad="same"),
bn_node(name + "_projectionbn")])
elif increase_dim == "pad":
assert input_num_filters is not None
identity_node = tn.SequentialNode(
name + "_pad",
[StridedDownsampleNode(
name + "_stride",
strides=(1, 1) + first_stride),
tn.PadNode(
name + "_addpad",
padding=(0, (num_filters - input_num_filters) // 2, 0, 0))])
else:
first_stride = (1, 1)
identity_node = tn.IdentityNode(name + "_identity")
nodes = []
# first node
for i in range(num_layers):
if i == 0:
# first conv
# ---
# same as middle convs, but with stride
nodes += [
conv_node(name + "_conv%d" % i,
num_filters=num_filters,
stride=first_stride,
pad="same"),
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
else:
nodes += [
conv_node(name + "_conv%d" % i,
num_filters=num_filters,
stride=(1, 1),
pad="same"),
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
# for last conv, remove activation
nodes.pop()
residual_node = tn.SequentialNode(name + "_seq", nodes)
if no_identity:
# ability to disable resnet connections
return residual_node
else:
return tn.ElementwiseSumNode(name,
[identity_node,
residual_node])
def resnet_init_block_conv_2d(*args, **kwargs):
return residual_block_conv_2d(*args,
conv_node=ResnetInitConv2DNode,
**kwargs)
def resnet_init_projection_conv_2d(name,
num_filters,
num_layers,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
stride=(1, 1)):
nodes = []
# first node
for i in range(num_layers):
if i == 0:
# first conv
# ---
# same as middle convs, but with stride
nodes += [
tn.Conv2DNode(name + "_conv%d" % i,
num_filters=num_filters,
stride=stride,
pad="same"),
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
else:
nodes += [
ResnetInitConv2DNode(name + "_conv%d" % i,
num_filters=num_filters,
stride=(1, 1),
pad="same"),
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
# for last conv, remove activation
nodes.pop()
return tn.SequentialNode(name + "_seq", nodes)
def preactivation_residual_block_conv_2d(name,
num_filters,
num_layers,
increase_dim=None,
initial_block=False,
conv_node=tn.Conv2DNode,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
input_num_filters=None,
projection_filter_size=(1, 1),
increase_dim_stride=(2, 2),
no_identity=False):
"""
from http://arxiv.org/abs/1603.05027
"""
if increase_dim is not None:
assert increase_dim in {"projection", "pad"}
first_stride = increase_dim_stride
if increase_dim == "projection":
# TODO remove pre-activation when initial block
assert not initial_block
identity_node = tn.SequentialNode(
name + "_projection",
[
bn_node(name + "_projectionbn"),
activation_node(name + "_projectionactivation"),
tn.Conv2DNode(name + "_projectionconv",
num_filters=num_filters,
filter_size=projection_filter_size,
stride=first_stride,
pad="same"),
])
elif increase_dim == "pad":
assert input_num_filters is not None
identity_node = tn.SequentialNode(
name + "_pad",
[StridedDownsampleNode(
name + "_stride",
strides=(1, 1) + first_stride),
tn.PadNode(
name + "_addpad",
padding=(0, (num_filters - input_num_filters) // 2, 0, 0))])
else:
first_stride = (1, 1)
identity_node = tn.IdentityNode(name + "_identity")
nodes = []
# first node
for i in range(num_layers):
if i == 0:
# first conv
# ---
# maybe remove initial activation
if not initial_block:
nodes += [
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
]
# same as middle convs, but with stride
nodes += [
conv_node(name + "_conv%d" % i,
num_filters=num_filters,
stride=first_stride,
pad="same"),
]
else:
nodes += [
bn_node(name + "_bn%d" % i),
activation_node(name + "_activation%d" % i),
conv_node(name + "_conv%d" % i,
num_filters=num_filters,
stride=(1, 1),
pad="same"),
]
residual_node = tn.SequentialNode(name + "_seq", nodes)
if no_identity:
# ability to disable resnet connections
return residual_node
else:
return tn.ElementwiseSumNode(name,
[identity_node,
residual_node])
def generalized_residual(name, nodes, identity_ratio=0.5):
return tn.ElementwiseSumNode(
name,
[_ZeroLastAxisPartitionNode(name + "_zero",
zero_ratio=(1 - identity_ratio)),
tn.SequentialNode(
name + "_seq",
nodes)])
def generalized_residual_conv_2d(name,
num_filters,
include_preactivation=True,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
conv_node=tn.Conv2DNode,
identity_ratio=0.5):
"""
generalized resnet block based on pre-activation resnet
"""
nodes = []
if include_preactivation:
# add pre-activation
nodes += [
bn_node(name + "_bn"),
activation_node(name + "_activation"),
]
nodes += [conv_node(name + "_conv", num_filters=num_filters)]
return generalized_residual(name, nodes, identity_ratio)
def generalized_residual_block_conv_2d(name,
num_filters,
num_layers,
increase_dim=None,
initial_block=False,
bn_node=bn.BatchNormalizationNode,
activation_node=tn.ReLUNode,
conv_node=tn.Conv2DNode,
identity_ratio=0.5,
input_num_filters=None,
projection_filter_size=(1, 1),
increase_dim_stride=(2, 2),
no_identity=False):
if no_identity: # HACK for compatibility
identity_ratio = 0
nodes = []
if increase_dim is not None:
if increase_dim == "projection":
# TODO remove pre-activation when initial block
assert not initial_block
# TODO maybe reduce layers by 1 to have same depth
# num_layers -= 1
nodes += [tn.SequentialNode(
name + "_projection",
[bn_node(name + "_projectionbn"),
activation_node(name + "_projectionactivation"),
tn.Conv2DNode(name + "_projectionconv",
num_filters=num_filters,
filter_size=projection_filter_size,
stride=increase_dim_stride,
pad="same")])]
elif increase_dim == "pad":
assert input_num_filters is not None
nodes += [tn.SequentialNode(
name + "_pad",
[StridedDownsampleNode(
name + "_stride",
strides=(1, 1) + increase_dim_stride),
tn.PadNode(
name + "_addpad",
padding=(0, (num_filters - input_num_filters) // 2, 0, 0))])]
else:
raise ValueError(increase_dim)
for i in range(num_layers):
include_preactivation = (not initial_block) or (i != 0)
nodes += [generalized_residual_conv_2d(
"%s_%d" % (name, i),
include_preactivation=include_preactivation,
num_filters=num_filters,
activation_node=activation_node,
identity_ratio=identity_ratio)]
return tn.SequentialNode(name, nodes)
def pool_with_projection_2d(name,
projection_filters,
stride=(2, 2),
filter_size=(3, 3),
bn_node=bn.BatchNormalizationNode):
pool_node = tn.MaxPool2DNode(name + "_pool",
pool_size=stride,
stride=stride)
projection_node = tn.SequentialNode(
name + "_projection",
[tn.Conv2DNode(name + "_projectionconv",
num_filters=projection_filters,
filter_size=filter_size,
stride=stride,
pad="same"),
bn_node(name + "_projectionbn")])
return tn.ConcatenateNode(name, [pool_node, projection_node])
def forget_gate_conv_2d_node(name,
num_filters,
filter_size=(3, 3),
initial_bias=0):
return tn.ElementwiseProductNode(
name,
[tn.IdentityNode(name + "_identity"),
tn.SequentialNode(
name + "_forget",
[tn.Conv2DWithBiasNode(name + "_conv",
num_filters=num_filters,
filter_size=filter_size,
stride=(1, 1),
pad="same"),
tn.AddConstantNode(name + "_initial_bias", value=initial_bias),
tn.SigmoidNode(name + "_sigmoid")])])
| 37.70625
| 82
| 0.477485
| 1,615
| 18,099
| 5.074923
| 0.134365
| 0.059785
| 0.028551
| 0.031723
| 0.550268
| 0.533431
| 0.494998
| 0.473646
| 0.455832
| 0.45144
| 0
| 0.013312
| 0.435549
| 18,099
| 479
| 83
| 37.784969
| 0.788959
| 0.048677
| 0
| 0.571053
| 0
| 0
| 0.057041
| 0.005546
| 0
| 0
| 0
| 0.004175
| 0.021053
| 1
| 0.034211
| false
| 0
| 0.021053
| 0.010526
| 0.107895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28acfde090e21839e1960e00b53a1c31a3399db4
| 6,857
|
py
|
Python
|
autogalaxy/mock/mock.py
|
caoxiaoyue/PyAutoGalaxy
|
ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05
|
[
"MIT"
] | null | null | null |
autogalaxy/mock/mock.py
|
caoxiaoyue/PyAutoGalaxy
|
ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05
|
[
"MIT"
] | null | null | null |
autogalaxy/mock/mock.py
|
caoxiaoyue/PyAutoGalaxy
|
ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05
|
[
"MIT"
] | null | null | null |
from astropy import constants
import math
import autofit as af
import autoarray as aa
import autogalaxy as ag
from autoarray.mock.mock import *
from autofit.mock.mock import *
from autofit.mock import mock as af_m
# MockProfiles #
class MockLightProfile(ag.lp.LightProfile):
def __init__(
self,
image_2d=None,
image_2d_value=None,
image_2d_first_value=None,
value=None,
value1=None,
):
super().__init__()
self.image_2d = image_2d
self.image_2d_value = image_2d_value
self.image_2d_first_value = image_2d_first_value
self.value = value
self.value1 = value1
def image_2d_from(self, grid):
if self.image_2d is not None:
return self.image_2d
image_2d = np.ones(shape=(grid.shape[0]))
if self.image_2d_first_value is not None:
image_2d[0] = self.image_2d_first_value
return image_2d
class MockMassProfile(ag.mp.MassProfile):
def __init__(
self,
convergence_2d=None,
potential_2d=None,
deflections_yx_2d=None,
value=None,
value1=None,
):
super().__init__()
self.convergence_2d = convergence_2d
self.potential_2d = potential_2d
self.deflections_2d = deflections_yx_2d
self.value = value
self.value1 = value1
def convergence_2d_from(self, grid):
return self.convergence_2d
def potential_2d_from(self, grid):
return self.potential_2d
def deflections_yx_2d_from(self, grid):
return self.deflections_2d
# Mock Galaxy #
class MockGalaxy:
def __init__(self, value, shape=1):
self.value = value
self.shape = shape
@aa.grid_dec.grid_2d_to_structure
def image_2d_from(self, grid):
return np.full(shape=self.shape, fill_value=self.value)
@aa.grid_dec.grid_2d_to_structure
def convergence_2d_from(self, grid):
return np.full(shape=self.shape, fill_value=self.value)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid):
return np.full(shape=self.shape, fill_value=self.value)
@aa.grid_dec.grid_2d_to_structure
def deflections_yx_2d_from(self, grid):
return np.full(shape=(self.shape, 2), fill_value=self.value)
# Mock Cosmology #
class Value:
def __init__(self, value):
self.value = value
def to(self, *args, **kwargs):
return Value(value=self.value)
class MockCosmology:
def __init__(
self,
arcsec_per_kpc=0.5,
kpc_per_arcsec=2.0,
critical_surface_density=2.0,
cosmic_average_density=2.0,
):
self.arcsec_per_kpc = arcsec_per_kpc
self.kpc_per_arcsec = kpc_per_arcsec
self.critical_surface_density = critical_surface_density
self.cosmic_average_density = cosmic_average_density
def arcsec_per_kpc_proper(self, z):
return Value(value=self.arcsec_per_kpc)
def kpc_per_arcsec_proper(self, z):
return Value(value=self.kpc_per_arcsec)
def angular_diameter_distance(self, z):
return Value(value=1.0)
def angular_diameter_distance_z1z2(self, z1, z2):
const = constants.c.to("kpc / s") ** 2.0 / (
4 * math.pi * constants.G.to("kpc3 / (solMass s2)")
)
return Value(value=self.critical_surface_density * const.value)
def critical_density(self, z):
return Value(value=self.cosmic_average_density)
# Mock Model-Fitting #
class MockResult(af_m.MockResult):
def __init__(
self,
samples=None,
instance=None,
model=None,
analysis=None,
search=None,
mask=None,
model_image=None,
path_galaxy_tuples=None,
hyper_galaxy_image_path_dict=None,
hyper_model_image=None,
hyper_galaxy_visibilities_path_dict=None,
hyper_model_visibilities=None,
pixelization=None,
):
super().__init__(
samples=samples,
instance=instance,
model=model,
analysis=analysis,
search=search,
)
self.mask = mask
self.hyper_galaxy_image_path_dict = hyper_galaxy_image_path_dict
self.hyper_model_image = hyper_model_image
self.path_galaxy_tuples = path_galaxy_tuples
self.hyper_galaxy_visibilities_path_dict = hyper_galaxy_visibilities_path_dict
self.hyper_model_visibilities = hyper_model_visibilities
self.model_image = model_image
self.unmasked_model_image = model_image
self.pixelization = pixelization
self.max_log_likelihood_plane = ag.Plane(galaxies=[ag.Galaxy(redshift=0.5)])
@property
def last(self):
return self
class MockResults(af.ResultsCollection):
def __init__(
self,
samples=None,
instance=None,
model=None,
analysis=None,
search=None,
mask=None,
model_image=None,
hyper_galaxy_image_path_dict=None,
hyper_model_image=None,
hyper_galaxy_visibilities_path_dict=None,
hyper_model_visibilities=None,
pixelization=None,
):
"""
A collection of results from previous searchs. Results can be obtained using an index or the name of the search
from whence they came.
"""
super().__init__()
result = MockResult(
samples=samples,
instance=instance,
model=model,
analysis=analysis,
search=search,
mask=mask,
model_image=model_image,
hyper_galaxy_image_path_dict=hyper_galaxy_image_path_dict,
hyper_model_image=hyper_model_image,
hyper_galaxy_visibilities_path_dict=hyper_galaxy_visibilities_path_dict,
hyper_model_visibilities=hyper_model_visibilities,
pixelization=pixelization,
)
self.__result_list = [result]
@property
def last(self):
"""
The result of the last search
"""
if len(self.__result_list) > 0:
return self.__result_list[-1]
return None
def __getitem__(self, item):
"""
Get the result of a previous search by index
Parameters
----------
item: int
The index of the result
Returns
-------
result: Result
The result of a previous search
"""
return self.__result_list[item]
def __len__(self):
return len(self.__result_list)
| 27.210317
| 120
| 0.613825
| 816
| 6,857
| 4.810049
| 0.167892
| 0.032102
| 0.02242
| 0.028535
| 0.491975
| 0.448153
| 0.367898
| 0.31414
| 0.275924
| 0.275924
| 0
| 0.015417
| 0.309465
| 6,857
| 251
| 121
| 27.318725
| 0.813516
| 0.05848
| 0
| 0.462857
| 0
| 0
| 0.004294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.045714
| 0.08
| 0.342857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28aedc5062be8fe00618f3317176a7524c4110f1
| 9,441
|
py
|
Python
|
classification/active_learning_scatterplots_annotated_tresh.py
|
gbosetti/ca
|
3f37edc4b8f69f61d02b881242522f6fa15e2695
|
[
"MIT"
] | null | null | null |
classification/active_learning_scatterplots_annotated_tresh.py
|
gbosetti/ca
|
3f37edc4b8f69f61d02b881242522f6fa15e2695
|
[
"MIT"
] | 4
|
2021-06-08T22:30:03.000Z
|
2022-03-12T00:48:52.000Z
|
classification/active_learning_scatterplots_annotated_tresh.py
|
gbosetti/cati
|
3f37edc4b8f69f61d02b881242522f6fa15e2695
|
[
"MIT"
] | null | null | null |
import json
import ast
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.io as pio
import os
import numpy as np
import plotly
plotly.io.orca.config.executable = '/home/gabi/dev/miniconda3/bin/orca' #May be useful in Ubuntu
#PARAMS
logs_path = "C:\\Users\\gbosetti\\Desktop\\test\\logs"
output_path = "C:\\Users\\gbosetti\\Desktop"
# Functions
def draw_scatterplot(**kwargs):
data = []
annotations = []
for res in kwargs["results"]:
x = res[kwargs['x_axis_prop']]
y = res[kwargs['y_axis_prop']]
a,x_markers,y_markers = annotate_extrema(y, 5, 3.5, 0.75, x)
annotations = annotations + a
trace = go.Scatter(
x=res[kwargs["x_axis_prop"]],
y=res[kwargs["y_axis_prop"]],
name=res[kwargs["trace_name"]]
)
data.append(trace)
layout = go.Layout(
title=go.layout.Title(
text=kwargs["title"],
xref='paper',
x=0
),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text=kwargs["x_axis_label"],
font=dict(
size=18,
color='#7f7f7f'
)
)
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text=kwargs["y_axis_label"],
font=dict(
size=18,
color='#7f7f7f'
)
)
)
)
# add annotations
layout.update(dict(annotations=annotations))
fig = go.Figure(data=data, layout=layout)
pio.write_image(fig, kwargs["full_path"])
def inflexion_points(y,x):
# a state machine to find inflexion points
last_y = None
points = []
state = 0
for x_val,y_val in zip(x,y):
if state == 0:
last_y = y_val
last_x = x_val
state = 1
elif state == 1:
if last_y > y_val:
state = 2
last_y = y_val
last_x = x_val
points.append({"x":last_x,"y":last_y, "inflexion": False})
else:
last_y = y_val
last_x = x_val
points.append({"x":last_x,"y":last_y, "inflexion": False})
state = 3
elif state == 2:
if last_y < y_val:
# change state because found an inflexion point
state = 3
# the last one was an inflexion point, annotate using the previous values
points.append({"x":last_x,"y":last_y, "inflexion": True})
last_y = y_val
last_x = x_val
else:
# stay on the same state until the next inflexion point
points.append({"x":last_x,"y":last_y, "inflexion": False})
last_y = y_val
last_x = x_val
elif state == 3:
if last_y > y_val:
state = 2
# annotate
points.append({"x":last_x,"y":last_y, "inflexion": True})
last_y = y_val
last_x = x_val
else:
# stay on the same state until the next inflexion point
points.append({"x":last_x,"y":last_y, "inflexion": False})
last_y = y_val
last_x = x_val
# the last point can be tagged if needed
points.append({"x":last_x,"y":last_y, "inflexion": True})
return np.asarray(points)
def annotate_extrema(y, lag, threshold, influence,x):
ip = inflexion_points(x=x,y=y)
th = threshold_points(y,lag,threshold,influence)
state = 0
annotations = []
markers_x = []
markers_y = []
for signal,inflexion in zip(th["signals"], ip):
if state == 0:
if signal == 0:
# go to the next
state = 0
else:
state = 1
if inflexion["inflexion"]:
state = 0
annotations.append(go.layout.Annotation(text="("+"{:12.2f}".format(inflexion["x"]).strip()+";"+"{:12.2f}".format(inflexion["y"]).strip()+")", x=inflexion["x"], y=inflexion["y"],align="center", valign='bottom', showarrow=False))
markers_x.append(inflexion["x"])
markers_y.append(inflexion["y"])
elif state == 1:
if inflexion["inflexion"]:
state = 0
annotations.append(go.layout.Annotation(text="("+"{:12.2f}".format(inflexion["x"]).strip()+";"+"{:12.2f}".format(inflexion["y"]).strip()+")", x=inflexion["x"], y=inflexion["y"],align="center", valign='bottom', showarrow=False))
markers_x.append(inflexion["x"])
markers_y.append(inflexion["y"])
else:
# keep looking
state = 1
return annotations,markers_x,markers_y
# https://stackoverflow.com/questions/22583391/peak-signal-detection-in-realtime-timeseries-data/43512887#43512887
def threshold_points(y, lag, threshold, influence):
signals = np.zeros(len(y))
filteredY = np.array(y)
avgFilter = [0]*len(y)
stdFilter = [0]*len(y)
avgFilter[lag - 1] = np.mean(y[0:lag])
stdFilter[lag - 1] = np.std(y[0:lag])
for i in range(lag, len(y)):
if abs(y[i] - avgFilter[i-1]) > threshold * stdFilter [i-1]:
if y[i] > avgFilter[i-1]:
signals[i] = 1
else:
signals[i] = -1
filteredY[i] = influence * y[i] + (1 - influence) * filteredY[i-1]
avgFilter[i] = np.mean(filteredY[(i-lag+1):i+1])
stdFilter[i] = np.std(filteredY[(i-lag+1):i+1])
else:
signals[i] = 0
filteredY[i] = y[i]
avgFilter[i] = np.mean(filteredY[(i-lag+1):i+1])
stdFilter[i] = np.std(filteredY[(i-lag+1):i+1])
return dict(signals = np.asarray(signals),
avgFilter = np.asarray(avgFilter),
stdFilter = np.asarray(stdFilter))
def read_file(path):
file = open(path, "r")
logs = '['
for line in file:
line = line.replace('", "f1"', ', "f1"')
line = line.replace('", "recall"', ', "recall"')
line = line.replace('", "precision"', ', "precision"')
line = line.replace('", "positive_precision"', ', "positive_precision"')
line = line.replace('", "wrong_pred_answers"', ', "wrong_pred_answers"')
logs = logs + line
logs = logs[:-1]
logs = logs + ']'
return json.loads(logs.replace('\n', ','))
def process_results(logs):
loop_logs = [log for log in logs if 'loop' in log]
loops_values = [log["loop"] for log in logs if 'loop' in log] # datetime
accuracies = [log["accuracy"] for log in logs if 'loop' in log]
#diff_accuracies = [0 if log["diff_accuracy"] == 'None' else float(log["diff_accuracy"]) for log in logs if 'loop' in log]
precision = [log["precision"] for log in logs if 'loop' in log]
positive_precision = [log["positive_precision"] for log in logs if 'loop' in log]
recall = [log["recall"] for log in logs if 'loop' in log]
wrong_answers = [log["wrong_pred_answers"] for log in logs if 'loop' in log]
return loops_values, accuracies, wrong_answers, precision, positive_precision, recall #diff_accuracies, wrong_answers
def print_in_file(content, path):
file = open(path, "a+")
file.write(content)
file.close()
def draw_evolution(var_name, labeled_var_name, res):
draw_scatterplot(title="Evolution of " + labeled_var_name + " across loops", results=res,
x_axis_label="Loop", y_axis_label=labeled_var_name,
x_axis_prop="loops", y_axis_prop=var_name,
trace_name="scenario_name", full_path=os.path.join(output_path, '_ANNOTATED_EXT_HYP_' + labeled_var_name + '.png'))
# Initialization
logs_folders = [f.path for f in os.scandir(logs_path) if f.is_dir() ]
# Looping each session to get the HYP results
hyp_results = []
for path in logs_folders:
# Get all the HYP files for the session
session_files = [f for f in os.scandir(path) if not f.is_dir() and "_OUR_" in f.name]
# Get the logs of the only file for HYP
logs = read_file(session_files[0].path)
# Get the values from such file
loops_values, accuracies, wrong_answers, precision, positive_precision, recall = process_results(logs)
hyp_results.append({ "loops": loops_values,
"accuracies": accuracies, # "diff_accuracies": diff_accuracies,
"precision": precision,
"positive_precision": positive_precision,
"recall": recall,
"wrong_answers": wrong_answers,
"_total_wrong_answers": sum(wrong_answers),
"_total_loops": len(loops_values),
"scenario_name": "Secnario " + path[-1:], "_max_accuracy": round(max(accuracies), 2)})
print("hyp_results:\n", json.dumps(hyp_results, indent=4, sort_keys=True))
draw_evolution("accuracies", "accuracy", hyp_results)
# draw_evolution("diff_accuracies", "diff. accuracy", hyp_results)
draw_evolution("wrong_answers", "wrong answers", hyp_results)
draw_evolution("recall", "recall", hyp_results)
draw_evolution("precision", "precision", hyp_results)
draw_evolution("positive_precision", "positive precision", hyp_results)
| 37.169291
| 247
| 0.565724
| 1,203
| 9,441
| 4.281796
| 0.188695
| 0.017472
| 0.011648
| 0.017472
| 0.373908
| 0.323626
| 0.30926
| 0.30266
| 0.268103
| 0.218404
| 0
| 0.015568
| 0.299227
| 9,441
| 253
| 248
| 37.316206
| 0.762999
| 0.099142
| 0
| 0.343434
| 0
| 0
| 0.114858
| 0.012028
| 0.010101
| 0
| 0
| 0
| 0
| 1
| 0.040404
| false
| 0
| 0.040404
| 0
| 0.106061
| 0.010101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28b0de3981830a9c1ce4101e37d4ea75cec7989b
| 1,173
|
py
|
Python
|
dataloader.py
|
AmanPriyanshu/Federated-Neural-Collaborative-Filtering
|
44dd31cec644859faa44adf54ace3981d8be5bda
|
[
"MIT"
] | null | null | null |
dataloader.py
|
AmanPriyanshu/Federated-Neural-Collaborative-Filtering
|
44dd31cec644859faa44adf54ace3981d8be5bda
|
[
"MIT"
] | null | null | null |
dataloader.py
|
AmanPriyanshu/Federated-Neural-Collaborative-Filtering
|
44dd31cec644859faa44adf54ace3981d8be5bda
|
[
"MIT"
] | 1
|
2022-03-08T14:28:00.000Z
|
2022-03-08T14:28:00.000Z
|
import numpy as np
import os
class MovielensDatasetLoader:
def __init__(self, filename='./ml-1m/ratings.dat', npy_file='./ml-1m/ratings.npy', num_movies=None, num_users=None):
self.filename = filename
self.npy_file = npy_file
self.rating_tuples = self.read_ratings()
if num_users is None:
self.num_users = np.max(self.rating_tuples.T[0])
else:
self.num_users = num_users
if num_movies is None:
self.num_movies = np.max(self.rating_tuples.T[1])
else:
self.num_movies = num_movies
self.ratings = self.load_ui_matrix()
def read_ratings(self):
ratings = open(self.filename, 'r').readlines()
data = np.array([[int(i) for i in rating[:-1].split("::")[:-1]] for rating in ratings])
return data
def generate_ui_matrix(self):
data = np.zeros((self.num_users, self.num_movies))
for rating in self.rating_tuples:
data[rating[0]-1][rating[1]-1] = rating[2]
return data
def load_ui_matrix(self):
if not os.path.exists(self.npy_file):
ratings = self.generate_ui_matrix()
np.save(self.npy_file, ratings)
return np.load(self.npy_file)
if __name__ == '__main__':
dataloader = MovielensDatasetLoader()
print(dataloader.ratings)
| 30.868421
| 117
| 0.719523
| 187
| 1,173
| 4.278075
| 0.299465
| 0.0525
| 0.055
| 0.0325
| 0.055
| 0.055
| 0
| 0
| 0
| 0
| 0
| 0.010913
| 0.140665
| 1,173
| 38
| 118
| 30.868421
| 0.782738
| 0
| 0
| 0.121212
| 0
| 0
| 0.041738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.060606
| 0
| 0.30303
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28b1ad0e46f2ba4d47dfc0ef0bd3f82478359754
| 1,785
|
py
|
Python
|
tests/test_command.py
|
roskakori/sanpo
|
909ea663a9a4f12495decb828e2256e45a9cee73
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_command.py
|
roskakori/sanpo
|
909ea663a9a4f12495decb828e2256e45a9cee73
|
[
"BSD-3-Clause"
] | 2
|
2021-09-07T17:32:24.000Z
|
2022-01-13T20:44:41.000Z
|
tests/test_command.py
|
roskakori/sanpo
|
909ea663a9a4f12495decb828e2256e45a9cee73
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021, Thomas Aglassinger
# All rights reserved. Distributed under the BSD 3-Clause License.
from sanpo.command import main_without_logging_setup
from ._common import PoFileTest
class CommandTest(PoFileTest):
def test_can_show_help(self):
with self.assertRaises(SystemExit):
main_without_logging_setup(["--help"])
def test_can_show_version(self):
with self.assertRaises(SystemExit):
main_without_logging_setup(["--version"])
def test_can_sanitize_single_file(self):
self.write_po_file(self.test_can_sanitize_single_file.__name__)
initial_po_lines = self.po_lines()
self.assertEquals(main_without_logging_setup([self.po_path]), 0)
sanitized_po_lines = self.po_lines()
self.assertNotEqual(initial_po_lines, sanitized_po_lines)
def test_can_sanitize_multiple_files(self):
po_path_to_sanitized_po_lines_map = {}
file_count = 3
for file_number in range(1, file_count + 1):
test_name = f"{self.test_can_sanitize_multiple_files.__name__}_{file_number}"
self.write_po_file(test_name)
assert self.po_path not in po_path_to_sanitized_po_lines_map
po_path_to_sanitized_po_lines_map[self.po_path] = self.po_lines()
po_paths_to_sanitize = list(po_path_to_sanitized_po_lines_map.keys())
self.assertEquals(main_without_logging_setup(po_paths_to_sanitize), 0)
for po_path, initial_po_lines in po_path_to_sanitized_po_lines_map.items():
sanitized_po_lines = self.po_lines(po_path)
self.assertNotEqual(sanitized_po_lines, initial_po_lines)
def test_fails_on_non_existent_po_file(self):
self.assertEquals(main_without_logging_setup(["no_such.po"]), 1)
| 42.5
| 89
| 0.733894
| 252
| 1,785
| 4.690476
| 0.289683
| 0.100677
| 0.121827
| 0.116751
| 0.467005
| 0.377327
| 0.214044
| 0.145516
| 0.096447
| 0
| 0
| 0.007571
| 0.185994
| 1,785
| 41
| 90
| 43.536585
| 0.805919
| 0.057703
| 0
| 0.066667
| 0
| 0
| 0.051817
| 0.036927
| 0
| 0
| 0
| 0
| 0.266667
| 1
| 0.166667
| false
| 0
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28b25075889c486e4fe8f7d95019574b35bd45f1
| 3,371
|
py
|
Python
|
tests/test/test_sign_msgpack_keyreg.py
|
salvatorecorvaglia/ledger-app-algorand
|
549b863af169b3ce5c7721f2e6a7fca5b4bd05fb
|
[
"MIT"
] | 16
|
2019-06-12T11:46:12.000Z
|
2022-01-30T16:28:42.000Z
|
tests/test/test_sign_msgpack_keyreg.py
|
salvatorecorvaglia/ledger-app-algorand
|
549b863af169b3ce5c7721f2e6a7fca5b4bd05fb
|
[
"MIT"
] | 42
|
2019-07-26T13:31:03.000Z
|
2022-03-18T15:18:52.000Z
|
tests/test/test_sign_msgpack_keyreg.py
|
salvatorecorvaglia/ledger-app-algorand
|
549b863af169b3ce5c7721f2e6a7fca5b4bd05fb
|
[
"MIT"
] | 38
|
2019-04-08T14:16:22.000Z
|
2022-03-18T06:42:29.000Z
|
import pytest
import logging
import struct
import base64
import msgpack
import nacl.signing
import algosdk
from . import txn_utils
from . import ui_interaction
from . import speculos
@pytest.fixture
def keyreg_txn():
b64votekey = "eXq34wzh2UIxCZaI1leALKyAvSz/+XOe0wqdHagM+bw="
votekey_addr = algosdk.encoding.encode_address(base64.b64decode(b64votekey))
b64selkey = "X84ReKTmp+yfgmMCbbokVqeFFFrKQeFZKEXG89SXwm4="
selkey_addr = algosdk.encoding.encode_address(base64.b64decode(b64selkey))
txn = algosdk.transaction.KeyregTxn(
sender="YTOO52XR6UWNM6OUUDOGWVTNJYBWR5NJ3VCJTZUSR42JERFJFAG3NFD47U",
votekey=votekey_addr,
selkey=selkey_addr,
votefst= 6200000,
votelst=9500000,
votekd= 1730,
fee= 2000,
flat_fee=True,
first=6002000,
last=6003000,
gen="testnet-v1.0",
gh="SGO1GKSzyE7IEPItTxCByw9x8FmnrCDexi9/cOUJOiI="
)
return txn
def get_expected_messages(current_txn):
votepk = str(base64.b64encode(algosdk.encoding.decode_address(current_txn.votepk)),'ascii').lower()
vrfpk = str(base64.b64encode(algosdk.encoding.decode_address(current_txn.selkey)),'ascii').lower()
# if current_txn.? == True:
# participating_flag = 'yes'
# else:
# participating_flag = 'no'
messages = [['review', 'transaction'],
['txn type', 'key reg'],
['sender', current_txn.sender.lower()],
['fee (alg)', str(current_txn.fee*0.000001)],
['genesis id', current_txn.genesis_id.lower()],
['genesis hash', current_txn.genesis_hash.lower()],
['vote pk', votepk],
['vrf pk', vrfpk],
['vote first', str(current_txn.votefst)],
['vote last', str(current_txn.votelst)],
['key dilution', str(current_txn.votekd)],
['participating', 'yes'],
['sign', 'transaction']]
return messages
txn_labels = {
'review', 'txn type', 'sender', 'fee (alg)', 'genesis id', 'genesis hash', 'vote pk','vrf pk',
'vote first', 'vote last', 'key dilution', 'participating', 'sign'
}
conf_label = "sign"
def test_sign_msgpack_asset_validate_display(dongle, keyreg_txn):
"""
"""
decoded_txn= base64.b64decode(algosdk.encoding.msgpack_encode(keyreg_txn))
with dongle.screen_event_handler(ui_interaction.confirm_on_lablel, txn_labels, conf_label):
logging.info(decoded_txn)
_ = txn_utils.sign_algo_txn(dongle, decoded_txn)
messages = dongle.get_messages()
logging.info(messages)
logging.info(get_expected_messages(keyreg_txn))
assert get_expected_messages(keyreg_txn) == messages
def test_sign_msgpack_with_default_account(dongle, keyreg_txn):
"""
"""
apdu = struct.pack('>BBBBB', 0x80, 0x3, 0x0, 0x0, 0x0)
pubKey = dongle.exchange(apdu)
decoded_txn= base64.b64decode(algosdk.encoding.msgpack_encode(keyreg_txn))
with dongle.screen_event_handler(ui_interaction.confirm_on_lablel, txn_labels, conf_label):
logging.info(decoded_txn)
txnSig = txn_utils.sign_algo_txn(dongle, decoded_txn)
assert len(txnSig) == 64
verify_key = nacl.signing.VerifyKey(pubKey)
verify_key.verify(smessage=b'TX' + decoded_txn, signature=txnSig)
| 33.04902
| 103
| 0.660931
| 373
| 3,371
| 5.747989
| 0.340483
| 0.051306
| 0.024254
| 0.023321
| 0.301306
| 0.275187
| 0.275187
| 0.231343
| 0.198694
| 0.146455
| 0
| 0.044799
| 0.218629
| 3,371
| 101
| 104
| 33.376238
| 0.769172
| 0.027292
| 0
| 0.083333
| 0
| 0
| 0.147601
| 0.058426
| 0
| 0
| 0.00492
| 0
| 0.027778
| 1
| 0.055556
| false
| 0
| 0.138889
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28b391732090366f571ee26a22266dbf07b53e53
| 613
|
py
|
Python
|
VideoSearchEngine/page_rank.py
|
AkshatSh/VideoSearchEngine
|
57f64b241b8a7bbc377ce7826e1206f679f41def
|
[
"MIT"
] | 49
|
2018-05-22T09:06:18.000Z
|
2022-02-26T10:03:43.000Z
|
VideoSearchEngine/page_rank.py
|
AkshatSh/VideoSearchEngine
|
57f64b241b8a7bbc377ce7826e1206f679f41def
|
[
"MIT"
] | 17
|
2018-05-18T21:14:36.000Z
|
2019-06-06T09:17:18.000Z
|
VideoSearchEngine/page_rank.py
|
AkshatSh/VideoSearchEngine
|
57f64b241b8a7bbc377ce7826e1206f679f41def
|
[
"MIT"
] | 18
|
2018-06-06T22:14:26.000Z
|
2021-11-23T08:59:31.000Z
|
from sklearn.feature_extraction.text import TfidfVectorizer
from database_utils import get_all_data, remove_summary
from collections import OrderedDict
import operator
def rank_pages(summaries, query):
vect = TfidfVectorizer()
result = {}
for video in summaries:
tfidf = vect.fit_transform([video['summary'], query])
score = (tfidf * tfidf.T).A[1][0]
#if(score > 0.1):
result[video['name']] = score
return OrderedDict(sorted(result.items(), key=operator.itemgetter(1), reverse=True))
def main():
remove_summary('test')
if __name__ == 'main':
main()
| 26.652174
| 88
| 0.681892
| 76
| 613
| 5.342105
| 0.605263
| 0.064039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010121
| 0.194127
| 613
| 23
| 89
| 26.652174
| 0.811741
| 0.026101
| 0
| 0
| 0
| 0
| 0.031826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28b3adff40823a3f0d9ff8ca30f874e0ce8a4a4f
| 3,112
|
py
|
Python
|
generated-libraries/python/netapp/net/ifgrp_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/net/ifgrp_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/net/ifgrp_info.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class IfgrpInfo(NetAppObject):
"""
ifgrp name, type, and components.
"""
_interface_name = None
@property
def interface_name(self):
"""
The interface name.
"""
return self._interface_name
@interface_name.setter
def interface_name(self, val):
if val != None:
self.validate('interface_name', val)
self._interface_name = val
_links = None
@property
def links(self):
"""
array of interface names in interface group.
An ifgrp with no members is possible.
"""
return self._links
@links.setter
def links(self, val):
if val != None:
self.validate('links', val)
self._links = val
_favored = None
@property
def favored(self):
"""
interface that is favored.
Only applies if ifgrp-type = single.
"""
return self._favored
@favored.setter
def favored(self, val):
if val != None:
self.validate('favored', val)
self._favored = val
_ifgrp_type = None
@property
def ifgrp_type(self):
"""
Possible values: [single|multi|lacp].
"""
return self._ifgrp_type
@ifgrp_type.setter
def ifgrp_type(self, val):
if val != None:
self.validate('ifgrp_type', val)
self._ifgrp_type = val
_ifgrp_policy = None
@property
def ifgrp_policy(self):
"""
Possible values: [rr|mac|ip|port|single]. Default is ip.
"""
return self._ifgrp_policy
@ifgrp_policy.setter
def ifgrp_policy(self, val):
if val != None:
self.validate('ifgrp_policy', val)
self._ifgrp_policy = val
_nofavored = None
@property
def nofavored(self):
"""
interface that is not favored.
Only applies if ifgrp-type = single.
"""
return self._nofavored
@nofavored.setter
def nofavored(self, val):
if val != None:
self.validate('nofavored', val)
self._nofavored = val
@staticmethod
def get_api_name():
return "ifgrp-info"
@staticmethod
def get_desired_attrs():
return [
'interface-name',
'links',
'favored',
'ifgrp-type',
'ifgrp-policy',
'nofavored',
]
def describe_properties(self):
return {
'interface_name': { 'class': basestring, 'is_list': False, 'required': 'required' },
'links': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'favored': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'ifgrp_type': { 'class': basestring, 'is_list': False, 'required': 'required' },
'ifgrp_policy': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'nofavored': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 27.539823
| 96
| 0.547237
| 317
| 3,112
| 5.201893
| 0.205047
| 0.060036
| 0.054579
| 0.043663
| 0.289873
| 0.289873
| 0.289873
| 0.094603
| 0.054579
| 0
| 0
| 0
| 0.335154
| 3,112
| 112
| 97
| 27.785714
| 0.797003
| 0.116967
| 0
| 0.179487
| 0
| 0
| 0.135798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.012821
| 0.038462
| 0.410256
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28b3f20587976d38da80b634aca51223e642e85b
| 4,679
|
py
|
Python
|
nrpcalc/base/utils.py
|
TimothyStiles/nrpcalc
|
42ab25e929d472c2e808dd3bec6430bc80b42a06
|
[
"MIT"
] | 6
|
2020-07-27T17:59:19.000Z
|
2022-03-18T03:33:17.000Z
|
nrpcalc/base/utils.py
|
TimothyStiles/nrpcalc
|
42ab25e929d472c2e808dd3bec6430bc80b42a06
|
[
"MIT"
] | 3
|
2020-07-17T23:10:36.000Z
|
2021-09-10T05:19:47.000Z
|
nrpcalc/base/utils.py
|
TimothyStiles/nrpcalc
|
42ab25e929d472c2e808dd3bec6430bc80b42a06
|
[
"MIT"
] | 3
|
2020-07-27T17:59:22.000Z
|
2021-02-08T15:47:28.000Z
|
import os
import sys
from typing import Tuple
import pkg_resources
from Bio import SeqIO
import RNA
import numpy as np
complement_table = str.maketrans('ATGCU', 'TACGA')
def stream_fasta_seq_list(fasta_filename):
with open(fasta_filename, "rU") as handle:
for record in SeqIO.parse(handle, "fasta"):
yield str(record.seq)
def get_fasta_seq_list(fasta_filename):
return list(stream_fasta_seq_list(fasta_filename))
def stream_txt_seq_list(text_filename):
with open(text_filename) as infile:
for line in infile:
yield line.strip()
def get_txt_seq_list(text_filename):
return list(stream_txt_seq_list(text_filename))
def uniquify_background_list(background_list):
uniq_background_set = set()
while background_list:
uniq_background_set.add(background_list.pop())
background_list = []
while uniq_background_set:
background_list.append(uniq_background_set.pop())
return background_list
def stream_kmers(seq, k):
if k >= len(seq):
return [seq]
return (seq[i:i+k] for i in range(len(seq)-k+1))
def get_comp(seq):
return seq.translate(complement_table)
def get_revcomp(seq):
return get_comp(seq)[::-1]
def stream_min_kmers(seq, k):
for kmer in stream_kmers(seq, k):
yield min(kmer, get_revcomp(kmer))
class Fold(object):
def __init__(
self,
temp=37.0,
dangles=2,
part_type='RNA'):
if not part_type in ['RNA', 'DNA']:
part_type = 'RNA'
if part_type == 'DNA':
RNA.cvar.noGU = True
RNA.cvar.noGUclosure = True
self.parameter_directory = os.path.dirname(
os.path.abspath(__file__))#"/usr/local/share/ViennaRNA/"
# Temperature in Celsius;
# default=37.0 (float)
RNA.cvar.temperature = temp
# Dangling end energies (0,1,2);
# see RNAlib documentation;
# default=2 (int)
RNA.cvar.dangles = dangles
self.settings = RNA.md()
self.part_type = part_type
parameter_file = pkg_resources.resource_filename(
'nrpcalc', 'base/{}.par'.format(
self.part_type))
RNA.read_parameter_file(parameter_file)
if part_type == 'DNA':
self.clear_warning()
self.adjust = self.adjust_dG(temp)
def adjust_dG(self, temp):
# Adjustment according to Dirks et al.
kB = 0.00198717 # Boltzmann constant in kcal/mol/K
T = temp
a = [-3.983035, 301.797, 522528.9, 69.34881, 999.974950]
# Calculate the number of moles of water per liter (molarity) at temperature (T in deg C)
# Density of water calculated using data from
# Tanaka M., Girard, G., Davis, R., Peuto A., Bignell, N.
# Recommended table for the density of water..., Metrologia, 2001, 38, 301-309
pH2O = a[4] * (
1 - (T+a[0])**2 * (T+a[1]) / \
(a[2]) / \
(T+a[3])) / \
18.0152
return -kB * (T + 273.15) * np.log(pH2O)
def clear_warning(self):
clrlen = len('WARNING: stacking enthalpies not symmetric')
sys.stdout.write('\033[F\033[F\033[F\033[F')
sys.stdout.write(' '*clrlen+'\n')
sys.stdout.write(' '*clrlen+'\n')
sys.stdout.write(' '*clrlen+'\n')
sys.stdout.write(' '*clrlen+'\n')
sys.stdout.write('\033[F\033[F\033[F\033[F')
sys.stdout.flush()
def evaluate_mfe(self, seq, dg=False):
# MFE Structure Only
fc_obj = RNA.fold_compound(seq, self.settings)
struct,energy = fc_obj.mfe()
if not dg:
return struct
else:
return struct, energy
def evaluate_centroid(self, seq, dg=False):
# Centroid Structure Only
fc_obj = RNA.fold_compound(seq, self.settings)
fc_obj.pf()
struct,energy = fc_obj.centroid()
if not dg:
return struct
else:
return struct, energy
def design(self, seq, struct):
# Closest MFE Structure Sequence
inv = RNA.inverse_fold(seq, struct)[0]
if self.part_type == 'DNA':
inv = inv.replace('U', 'T').replace('u', 't')
return inv
def evaluate_mfe_dimer(self, seq1, seq2):
# MFE Dimer Structure and Energy
fc_obj = RNA.fold_compound(seq1+'&'+seq2, self.settings)
struct,energy = fc_obj.mfe_dimer()
struct1 = struct[:len(seq1)]
struct2 = struct[len(seq1):]
energy += self.adjust
return (struct1, struct2, energy)
if __name__ == '__main__':
pass
| 29.613924
| 97
| 0.596281
| 621
| 4,679
| 4.323672
| 0.341385
| 0.026816
| 0.031285
| 0.017877
| 0.236872
| 0.188827
| 0.144879
| 0.125512
| 0.125512
| 0.125512
| 0
| 0.037336
| 0.284462
| 4,679
| 158
| 98
| 29.613924
| 0.764636
| 0.125454
| 0
| 0.162162
| 0
| 0
| 0.041943
| 0.011773
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144144
| false
| 0.009009
| 0.063063
| 0.036036
| 0.342342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28b511c9bffc7778b947732b16ddfa8179fa7a1e
| 2,288
|
py
|
Python
|
src/tab_list_analyse.py
|
GwenIves/Scripts
|
d2ec5ae0df25f16d5c1fb766767ec358de7d2f97
|
[
"MIT"
] | null | null | null |
src/tab_list_analyse.py
|
GwenIves/Scripts
|
d2ec5ae0df25f16d5c1fb766767ec358de7d2f97
|
[
"MIT"
] | null | null | null |
src/tab_list_analyse.py
|
GwenIves/Scripts
|
d2ec5ae0df25f16d5c1fb766767ec358de7d2f97
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Analyses a hierarchical tab-indented list file
# and prints out subsection sizes on a requested nesting level
# Subsections with the same name in different subtrees
# are treated as continutaions of a single section
# The script accepts two command line parameters:
# file name
# indentation level
#
import sys
def get_headings(filename, indentation_level):
diff = 0
heading = ''
headings = {}
try:
with open(filename, 'r') as fh:
for line in fh:
line = line.rstrip()
if len(line) == 0:
continue
# Include commented out lines
if line[0] == '#':
line = line[1:].rstrip()
if len(line) == 0:
continue
if line == "==========EndOfList==========":
break
count = 0
for c in line:
if c == '\t':
count += 1
else:
break
line = line.lstrip()
if count <= indentation_level:
if len(heading) > 0:
headings[heading] = headings.get(heading, 0) + diff - 1
diff = 0
if count == indentation_level:
heading = line[:]
diff += 1
except EnvironmentError as err:
print(err)
if len(heading) > 0:
headings[heading] = headings.get(heading, 0) + diff - 1
return headings
def main():
if len(sys.argv) < 2:
print("usage: {0} <filename> <nesting level>".format(sys.argv[0]))
sys.exit(1)
if len(sys.argv) < 3:
level = 0
else:
level = int(sys.argv[2])
if level < 0:
level = 0
try:
headings = get_headings(sys.argv[1], level)
except FileNotFoundError:
print("Error: unable to process file {0}".format(sys.argv[1]))
sys.exit(1)
for heading in sorted(headings, key=headings.get, reverse=True):
if headings[heading] > 0:
print('{0}{1}'.format(heading.ljust(50, ' '), headings[heading]))
if __name__ == '__main__':
main()
| 25.142857
| 79
| 0.488636
| 250
| 2,288
| 4.42
| 0.38
| 0.027149
| 0.01991
| 0.027149
| 0.137557
| 0.137557
| 0.094118
| 0.094118
| 0.094118
| 0.094118
| 0
| 0.024763
| 0.399913
| 2,288
| 90
| 80
| 25.422222
| 0.780044
| 0.149038
| 0
| 0.357143
| 0
| 0
| 0.06095
| 0.014979
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.017857
| 0
| 0.071429
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28b9d3223ab59c39762f3f62adf5a1151d5a2567
| 1,357
|
py
|
Python
|
main.py
|
cortial-manon/vrep-robot-helper
|
8bae73c78d537c6fda383261f25d52a4df4d1787
|
[
"MIT"
] | null | null | null |
main.py
|
cortial-manon/vrep-robot-helper
|
8bae73c78d537c6fda383261f25d52a4df4d1787
|
[
"MIT"
] | null | null | null |
main.py
|
cortial-manon/vrep-robot-helper
|
8bae73c78d537c6fda383261f25d52a4df4d1787
|
[
"MIT"
] | null | null | null |
#based on the code from https://github.com/studywolf/blog/blob/master/VREP/two_link_arm/vrep_twolink_controller.py
#explained at https://studywolf.wordpress.com/2016/04/18/using-vrep-for-simulation-of-force-controlled-models/
import numpy as np
from VrepWorld import VrepWorld
#create the world object
world = VrepWorld()
#connect to vrep server
world.init()#scene="scenes\\ePuck_wall.ttt") #remote scene import not working at the moment
#create robot object linked to ePuck
robot = world.getRobot('ePuck')
#create obstacles
obstacle1 = world.setObstacle(0.3, 0.2)
obstacle2 = world.setObstacle(0.1, 0.25)
#table for storing positions of the robot
positions = []
try:
#start simulation
world.startRun(5)
robot.setWheelsVelocity(1, 1)
while not world.runFinished:
#robot.getProximitySensors()
#robotVelocity = robot.getVelocity()
robotPosition = robot.getPosition()
positions.append(np.copy(robotPosition)) # store for plotting
#update simulation
world.run()
#clean simulation
world.endRun()
finally:
#close connection even if we got an exception
world.close()
#plot robot positions
import matplotlib.pyplot as plt
positions = np.array(positions)
plt.plot(positions[:, 0], positions[:, 1], 'rx', label="Position de l'ePuck")
plt.axis([0, 1, 0,1])
plt.show()
| 23.807018
| 114
| 0.719971
| 185
| 1,357
| 5.254054
| 0.589189
| 0.006173
| 0.034979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024735
| 0.165807
| 1,357
| 57
| 115
| 23.807018
| 0.833922
| 0.463522
| 0
| 0
| 0
| 0
| 0.03662
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28babc6c1eea36a0a66fd271330d1972461ccef9
| 15,902
|
py
|
Python
|
ROAR/control_module/mpc_full_controller.py
|
cmcniel79/ROAR
|
cd94ec637e6e5df0eaac3d30ece00a2de74730ee
|
[
"Apache-2.0"
] | null | null | null |
ROAR/control_module/mpc_full_controller.py
|
cmcniel79/ROAR
|
cd94ec637e6e5df0eaac3d30ece00a2de74730ee
|
[
"Apache-2.0"
] | null | null | null |
ROAR/control_module/mpc_full_controller.py
|
cmcniel79/ROAR
|
cd94ec637e6e5df0eaac3d30ece00a2de74730ee
|
[
"Apache-2.0"
] | null | null | null |
from ROAR.control_module.controller import Controller
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
from ROAR.utilities_module.data_structures_models import Transform, Location
import numpy as np
import logging
from ROAR.agent_module.agent import Agent
from typing import Tuple
import json
from pathlib import Path
import cvxpy as cp
import scipy
import scipy.signal
import scipy.linalg
class MPCController(Controller):
def __init__(self, agent, steering_boundary: Tuple[float, float],
throttle_boundary: Tuple[float, float], **kwargs):
super().__init__(agent, **kwargs)
self.max_speed = self.agent.agent_settings.max_speed
self.throttle_boundary = throttle_boundary
self.steering_boundary = steering_boundary
self.config = json.load(
Path(agent.agent_settings.mpc_config_file_path).open(mode='r'))
self.controller = FullMPCController(agent=agent,
throttle_boundary=throttle_boundary,
steering_boundary=steering_boundary,
max_speed=self.max_speed,
config=self.config)
self.logger = logging.getLogger(__name__)
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl:
long_control, lat_control = self.controller.run_in_series(next_waypoint=next_waypoint,
target_speed=kwargs.get("target_speed", self.max_speed))
long_control = float(np.clip(long_control, *self.throttle_boundary))
lat_control = float(np.clip(lat_control, *self.steering_boundary))
return VehicleControl(throttle=long_control, steering=lat_control)
class FullMPCController(Controller):
def __init__(self, agent, config: dict,
throttle_boundary: Tuple[float, float],
steering_boundary: Tuple[float, float],
max_speed: float,
dt: float = 0.03, **kwargs):
super().__init__(agent, **kwargs)
self.config = config
self.max_speed = max_speed
self.throttle_boundary = throttle_boundary
self.steering_boundary = steering_boundary
self._dt = dt
self.A_matrices, self.B_matrices = self.construct_linearized_matrices(max_speed)
self.last_steer_CMD = 0
def get_throttle_CMD(self, Fr_x, vx):
"""Calculates the motor input command
Calculates the motor input command based on the optimal rear tire longitudinal force
given by solving the CVXPY problem. The optimal rear tire longitudinal force is then
used with the longitudinal dynamics model to solve for the actual motor input command.
Args:
Fr_x: Optimal rear tire longitudinal force
vx: Current longitudinal velocity
Returns:
Motor input command
"""
return (Fr_x + self.config['F_friction'] + self.config['C_d'] * vx**2) / self.config['b_motor']
def get_steer_CMD(self, Ff_y, beta, r, vx):
"""Calculates the steering input command
Calculates the steering input command based on the optimal front tire lateral force
given by solving the CVXPY problem. The optimal front tire lateral force is then
used with the lateral dynamics model to solve for the actual steering input command.
Args:
Ff_y: Optimal front tire lateral force
beta: Current side slip angle of vehicle
r: Current angular velocity
vx: Current longitudinal velocity
Returns:
steer_cmd
"""
# Makes sure the argument to the arcsin function on the following line is valid
arcsin_arg = np.clip(Ff_y / (-self.config['mu'] * self.config['Ff_z']), -1, 1)
alpha_f = np.tan(np.arcsin(arcsin_arg) / self.config['C']) / self.config['B']
steer_angle = np.arctan(beta + ((r * self.config['Lf']) / (vx + 10e-1))) - alpha_f
steer_cmd = steer_angle / self.config['max_angle']
self.last_steer_CMD = np.abs(steer_cmd)
return steer_cmd
def linearize_around_steer_angle(self, steer_angle_eq, speed_eq):
"""Calculates linearized state space equations
Linearizes and discretizes the state space equations of the vehicle dynamics model
around a given equilibrium steering angle and equilibrium speed.
Args:
steer_angle_eq: Equilibrium steering angle to linearize around
speed_eq: Equilibrium vehicle speed to linearize around
Returns:
Ad: The linearized and discretized A matrix in the state space model
Bd: The linearized and discretized B matrix in the state space model
"""
# Linearize system state equations around a steering angle and 100km/hr
beta_eq = np.arctan((self.config['Lr'] / self.config['wheelbase']) * np.tan(steer_angle_eq))
vx_eq = speed_eq * np.cos(beta_eq)
r_eq = (speed_eq / self.config['Lr']) * np.sin(beta_eq)
alpha_f = np.arctan(beta_eq + (r_eq * self.config['Lf']) / vx_eq) - steer_angle_eq
Ff_y_eq = -self.config['mu'] * self.config['Ff_z'] * np.sin(self.config['C'] * np.arctan(self.config['B'] * alpha_f))
Fr_y_eq = (self.config['Lf'] * Ff_y_eq * np.cos(steer_angle_eq)) / self.config['Lr']
# Find partial derivative entries for A and B matrices
a_13 = -(Fr_y_eq + Ff_y_eq * np.cos(steer_angle_eq)) / (self.config['mass'] * vx_eq)
a_31 = -vx_eq * r_eq
# Below is a more complex a_13 term that comes from Gonzales dissertation, found to not be needed but may be useful for improving performance
# a_31 = vx_eq * r_eq \
# + ((Ff_y_eq * np.cos(steer_angle_eq)) / mass) \
# * (1 /(1 + (beta_eq + ((r_eq * Lf) / vx_eq))**2))
Ac = np.array([
[0, -1, a_13],
[0, 0, 0,],
[a_31, 0, 0]])
b_11 = np.cos(steer_angle_eq) / (self.config['mass'] * vx_eq)
b_21 = np.cos(steer_angle_eq) * self.config['Lf'] / self.config['Izz']
b_31 = -np.sin(steer_angle_eq) / self.config['mass']
Bc = np.array([
[b_11, 0],
[b_21, 0],
[b_31, 1/self.config['mass']]])
# C and D are just for calling cont2discrete
Cc = np.zeros((3, 3))
Dc = np.zeros((3, 2))
system = (Ac, Bc, Cc, Dc)
Ad, Bd, Cd, Dd, dt = scipy.signal.cont2discrete(system, self._dt)
return Ad, Bd
def construct_linearized_matrices(self, speed_eq):
"""Constructs dicts to hold A and B matrices
Runs through the array of equilibrium steering angles and calculates
the linearized A and B matrices for each angle. Those matrices then get
put into dicts that can be called while CARLA is running. The vehicle dynamics
change at different steering angles so the optimizer needs to change which
matrices it is working with or else it cannot solve for optimal vehicle inputs
Args:
speed_eq: Equilibrium vehicle speed to linearize around
Returns:
A_matrices: Dict holding the linearized and discretized A matrices
B_matrices: Dict holding the linearized and discretized B matrices
"""
A_matrices = {}
B_matrices = {}
for angle in self.config['equilibrium_angles']:
A, B = self.linearize_around_steer_angle(angle, speed_eq)
A_matrices.update({angle: A})
B_matrices.update({angle: B})
return A_matrices, B_matrices
def get_linearized_matrices(self, steer_angle):
"""Returns the correct A and B matrices for a given angle
Args:
steer_angle: Current steering angle of the car (should be absolute value)
Returns:
A and B matrices for the given steering angle
"""
for i, angle_entry in enumerate(self.config['equilibrium_angles']):
if i > 0 and steer_angle < angle_entry:
angle_eq = self.config['equilibrium_angles'][i-1]
return self.A_matrices.get(angle_eq), self.B_matrices.get(angle_eq)
elif i == len(self.config['equilibrium_angles']) - 1:
angle_eq = self.config['equilibrium_angles'][-1]
return self.A_matrices.get(angle_eq), self.B_matrices.get(angle_eq)
def solve_cftoc(self, target_state, current_state, state_bounds, input_bounds):
"""Solves for optimal vehicle inputs
Takes in the current vehicle state and the target state that the car should be at,
and then solves for the optimal input sequence to reach the target state. Vehicle
states are beta, yaw and longitudinal speed for a total of 3 state variables.
Vehicle inputs are front tire lateral force and rear tire longitudinal force, for a
total of 2 input variables.
Args:
target_state: The state that the vehicle should be at
current_state: The current vehicle state
state_bounds: Bounds that the state variables should not exceed or be under
input_bounds: Bounds that the inputs should not exceed or be under
Returns:
The optimal steering and throttle commands for the current time step
"""
# Number of future time steps to optimize over
M = 10
# Number of state variables, which are beta, yaw and longitudinal speed
nx = 3
# Number of input variables, which are front tire lateral force and rear tire longitudinal force
nu = 2
# Initialize the array of variables for each time step
x = cp.Variable((nx, M + 1))
u = cp.Variable((nu, M))
# Initialize cost and constraints
cost = 0
constr = []
# Set Initial State
constr += [x[:, 0] == current_state]
# Get correct linearized dynamics matrices based on the last steering angle
A, B = self.get_linearized_matrices(self.last_steer_CMD * self.config['max_angle'])
for m in range(M):
# Cost function: basically a sum of squares between the current beta, yaw and speed values and the target values
# The different coefficients come from the magnitude of the state values (i.e. beta is on the range of 0-2 while
# longitudinal speed can range from 0-100), and the importance of the state variables as well.
cost += 10**3 * cp.sum_squares(x[0, m] - target_state[0])
cost += cp.sum_squares(x[2, m] - target_state[2])
# The cost function value relating to the yaw is removed when the car needs to make a large turn
if np.abs(target_state[0]) < np.pi / 20:
cost += 10**1 * cp.sum_squares(x[1, m] - target_state[1])
# Constraint for dynamic model
constr += [x[:, m + 1] == A @ x[:, m] + B @ u[:, m]]
# Constraints for setting bounds on the input values
constr += [input_bounds[:, 0] <= u[:, m]]
constr += [input_bounds[:, 1] >= u[:, m]]
u_delta_limits = np.array(self.config['delta_lim'])
if m < M - 1:
# Constraint limiting how much inputs can change between time steps - ensures "smoother" input profiles
constr += [u[:, m + 1] - u[:, m] <= u_delta_limits, u[:, m + 1] - u[:, m] >= -u_delta_limits]
# Set terminal cost values
cost += 10**3 * cp.sum_squares(x[0, M] - target_state[0])
cost += cp.sum_squares(x[2, M] - target_state[2])
# Again, the terminal cost function value relating to the yaw is removed when the car needs to make a large turn
if np.abs(target_state[0]) < np.pi / 20:
cost += 10**1 * cp.sum_squares(x[1, M] - target_state[1])
problem = cp.Problem(cp.Minimize(cost), constr)
try:
problem.solve(warm_start=True)
uOpt = u.value
# In case optimizer doesnt return any values for u
if uOpt is None or uOpt.size == 0:
if np.isnan(uOpt[0][0]):
if target_state[0] < 0:
Ff_y_cmd = 1000
else:
Ff_y_cmd = -1000
if np.isnan(uOpt[0][1]):
Fr_x_cmd = 5000
else:
Ff_y_cmd = u.value[0, 0]
Fr_x_cmd = u.value[1, 0]
except:
# Sometimes the solver cant find a solution at all for a time step, but input values still need to be returned
Ff_y_cmd = 0.0
Fr_x_cmd = 5000
return self.get_throttle_CMD(Fr_x_cmd, current_state[2]), self.get_steer_CMD(Ff_y_cmd, *current_state)
def run_in_series(self, next_waypoint: Transform, **kwargs) -> float:
# Calculate current steering angle, beta and vehicle speed. All angles are in radians
current_steer = self.last_steer_CMD * self.config['max_angle']
current_beta = np.arctan((self.config['Lr'] / self.config['wheelbase']) * np.tan(current_steer))
current_speed = Vehicle.get_speed(self.agent.vehicle)
# Longitudinal speed will be different from the vehicles current speed if beta != 0
current_vx = current_speed * np.cos(current_beta)
# Calculate a vector that represent where you are going
v_begin = self.agent.vehicle.transform.location.to_array()
current_yaw = np.deg2rad(self.agent.vehicle.transform.rotation.yaw)
direction_vector = np.array([-np.sin(current_yaw),
0,
-np.cos(current_yaw)])
v_end = v_begin + direction_vector
v_vec = np.array([(v_end[0] - v_begin[0]), 0, (v_end[2] - v_begin[2])])
# Calculate error projection
w_vec = np.array(
[
next_waypoint.location.x - v_begin[0],
0,
next_waypoint.location.z - v_begin[2],
]
)
v_vec_normed = v_vec / np.linalg.norm(v_vec)
w_vec_normed = w_vec / np.linalg.norm(w_vec)
error = np.arccos(np.dot(v_vec_normed, w_vec_normed))
_cross = np.cross(v_vec_normed, w_vec_normed)
if _cross[1] > 0:
error *= -1
# Set the target speed, target beta angle and target longitudinal velocity
target_speed = self.max_speed
target_beta = -error
target_vx = target_speed * np.cos(current_beta)
# The actual yaw is not needed or important for the optimization problem, as it just needs a "relative" yaw to solve with.
# However, the first yaw angle does need to be 0, as the linearized matrices were calculated with yaw = 0.
# The starting yaw is different for each map: for berkely minor map it is -1.570796 rad (90 degrees),
# for easy map it is 0 rad.
current_yaw = current_yaw - self.config['starting_yaw']
# Make sure the yaw angle is in [-pi/2, pi/2] or else the optimizer cannot solve for correct steering angle
current_yaw = np.mod(current_yaw + np.pi / 4, np.pi/2) - np.pi / 4
# Current optimization setup does not need state bounds, so that's why all state_bounds arrays are 0
motor_cmd, steer_cmd = self.solve_cftoc(
target_state=np.array([target_beta, current_yaw, target_vx]),
current_state=np.array([current_beta, current_yaw, current_vx]),
state_bounds=np.array([[0, 0], [0, 0], [0, 0]]),
input_bounds=np.array([[-6000, 6000], [-1000, 10000]]))
return motor_cmd, steer_cmd
| 43.807163
| 149
| 0.616338
| 2,180
| 15,902
| 4.329817
| 0.176147
| 0.041318
| 0.013985
| 0.012607
| 0.30268
| 0.230427
| 0.170993
| 0.146308
| 0.135502
| 0.102447
| 0
| 0.017502
| 0.295749
| 15,902
| 362
| 150
| 43.928177
| 0.825342
| 0.353163
| 0
| 0.089888
| 0
| 0
| 0.02462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0
| 0.073034
| 0
| 0.191011
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28bb50413a26c30eabe9689d01ddc125b69d1e97
| 2,268
|
py
|
Python
|
eahub/tests/test_localgroups_models.py
|
LisaJD/eahub.org
|
1fd69f9dea5178c4da8923c3497e6326f359d0b5
|
[
"MIT"
] | null | null | null |
eahub/tests/test_localgroups_models.py
|
LisaJD/eahub.org
|
1fd69f9dea5178c4da8923c3497e6326f359d0b5
|
[
"MIT"
] | null | null | null |
eahub/tests/test_localgroups_models.py
|
LisaJD/eahub.org
|
1fd69f9dea5178c4da8923c3497e6326f359d0b5
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from eahub.base.models import User
from eahub.localgroups.models import LocalGroup, Organisership
from eahub.profiles.models import Profile
class LocalGroupTestCase(TestCase):
def test_organisers_names(self):
local_group = LocalGroup()
local_group.save()
user1 = User()
user1.email = "user1@email.com"
user1.save()
user2 = User()
user2.email = "user2@email.com"
user2.save()
profile1 = Profile()
name1 = "Peter"
profile1.name = name1
profile1.user = user1
profile1.save()
profile2 = Profile()
name2 = "Mary"
profile2.name = name2
profile2.user = user2
profile2.save()
o1 = Organisership(user=user1, local_group=local_group)
o1.save()
o2 = Organisership(user=user2, local_group=local_group)
o2.save()
organiser_names = local_group.organisers_names()
self.assertEqual(f"{name1}, {name2}", organiser_names)
def test_organisers_names_handles_users_without_profiles(self):
local_group = LocalGroup()
local_group.save()
user_without_profile = User()
user_without_profile.save()
o = Organisership(user=user_without_profile, local_group=local_group)
o.save()
organisers_names = local_group.organisers_names()
self.assertEqual("User profile missing", organisers_names)
def test_get_exportable_field_names(self):
actual = LocalGroup.get_exportable_field_names()
expected_field_names = [
"id",
"slug",
"is_public",
"name",
"is_active",
"organisers_freetext",
"local_group_types",
"city_or_town",
"region",
"country",
"lat",
"lon",
"website",
"other_website",
"facebook_group",
"facebook_page",
"email",
"meetup_url",
"airtable_record",
"last_edited",
"other_info",
"organisers",
"organisers_emails",
]
self.assertListEqual(expected_field_names, actual)
| 26.682353
| 77
| 0.581129
| 221
| 2,268
| 5.701357
| 0.343891
| 0.103175
| 0.045238
| 0.047619
| 0.131746
| 0.131746
| 0.131746
| 0
| 0
| 0
| 0
| 0.019506
| 0.321869
| 2,268
| 84
| 78
| 27
| 0.79974
| 0
| 0
| 0.059701
| 0
| 0
| 0.130071
| 0
| 0
| 0
| 0
| 0
| 0.044776
| 1
| 0.044776
| false
| 0
| 0.059701
| 0
| 0.119403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28bd72b293eddba770453dd33bd72e6fed937e89
| 5,769
|
py
|
Python
|
cloudmosh/components/depth.py
|
rmmilewi/cloudmosh
|
a6387296ad5591f35a5bbfe0d20c5865eb98d07c
|
[
"MIT"
] | null | null | null |
cloudmosh/components/depth.py
|
rmmilewi/cloudmosh
|
a6387296ad5591f35a5bbfe0d20c5865eb98d07c
|
[
"MIT"
] | null | null | null |
cloudmosh/components/depth.py
|
rmmilewi/cloudmosh
|
a6387296ad5591f35a5bbfe0d20c5865eb98d07c
|
[
"MIT"
] | null | null | null |
from cloudmosh.components.base import CloudMoshComponent
import os
import numpy as np
# Keras / TensorFlow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '5'
from keras.models import load_model
import skimage.io
from skimage.transform import resize
from keras.engine.topology import Layer, InputSpec
import keras.utils.conv_utils as conv_utils
import tensorflow as tf
import keras.backend as K
from nutsflow.base import Nut,NutSink, NutSource, NutFunction
class AWBilinearUpSampling2D(Layer):
"""
This is a custom-defined layer needed by the Alhashim-Wonka network.
"""
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(AWBilinearUpSampling2D, self).__init__(**kwargs)
self.data_format = K.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
return (input_shape[0],
input_shape[1],
height,
width)
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return tf.image.resize_images(inputs, [height, width], method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(AWBilinearUpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AWDepthEstimator(Nut):
"""
Contains the code for the depth detection step, adapted
from https://github.com/ialhashim/DenseDepth, the repository
for the 2018 pre-print by Alhashim and Wonka entitled
'High Quality Monocular Depth Estimation via Transfer Learning'.
"""
#The network used by the Depth Detector expects images to be of size 640x480
EXPECTED_IMAGE_WIDTH = 640
EXPECTED_IMAGE_HEIGHT = 480
def __init__(self,modelPath,minDepth=10,maxDepth=1000,batchSize=2):
"""
modelPath: The path to the model file that contains the trained network (e.g. 'data/nyu.h5').
minDepth (optional): The minimum depth that the network is allowed to assign a pixel. Default 10.
maxDepth (optional): The maximum depth that the network is allowed to assign a pixel. Default 1000.
batchSize (optional): How many images the network should process at once. Default 2.
"""
super().__init__()
self._depthModelPath = modelPath
self._minDepth = minDepth
self._maxDepth = maxDepth
self._batchSize = batchSize
#Custom object needed for inference and training
custom_objects = {'BilinearUpSampling2D': AWBilinearUpSampling2D, 'depth_loss_function': None}
self._model = load_model(self._depthModelPath, custom_objects=custom_objects, compile=False)
def setMinDepth(self,minDepth):
self._minDepth = minDepth
def setMaxDepth(self,maxDepth):
self._maxDepth = maxDepth
def setBatchSize(self,batchSize):
self._batchSize = batchSize
def __resize(self,images,width,height):
"""
width: The desired width of the resulting image(s).
height: The desired height of the resulting image(s).
"""
shape = (images.shape[0],width,height,images.shape[3])
return resize(images, shape, preserve_range=True, mode='reflect')
def __depthNorm(self,x):
return self._maxDepth / x
def __rrshift__(self,iterable):
for data in iterable:
if len(data.shape) == 3:
#(width,height,color)
originalWidth = data.shape[0]
originalHeight = data.shape[1]
else:
#(index,width,height,color)
originalWidth = data.shape[1]
originalHeight = data.shape[2]
data = np.clip(data / 255, 0, 1)
# Support multiple RGBs, one RGB image, even grayscale
if len(data.shape) < 3:
#If the image(s) are grayscale, we convert them to an RGB equivalent (v -> <v,v,v>).
data = np.stack((data,data,data), axis=2)
if len(data.shape) < 4:
data = data.reshape((1, data.shape[0], data.shape[1], data.shape[2]))
if data.shape[-1] == 4:
#Drop the alpha component from RGBA. The network only cares about RGB.
#e.g. (1,640,480,4) -> (1,640,480,3)
data = data[:,:,:,:3]
#The network used by the Depth Detector expects images to be of size 640x480
data = self.__resize(data,width=AWDepthEstimator.EXPECTED_IMAGE_WIDTH,height=AWDepthEstimator.EXPECTED_IMAGE_HEIGHT)
# Compute predictions
predictions = self._model.predict(data, batch_size=self._batchSize)
# Put in expected range
predictions = np.clip(self.__depthNorm(predictions), self._minDepth, self._maxDepth)
#Resize to original width and height.
predictions = self.__resize(predictions,width=originalWidth,height=originalHeight)
yield predictions
| 38.97973
| 121
| 0.691454
| 796
| 5,769
| 4.866834
| 0.281407
| 0.056789
| 0.022716
| 0.026846
| 0.250387
| 0.232318
| 0.2127
| 0.2127
| 0.2127
| 0.205472
| 0
| 0.024662
| 0.205755
| 5,769
| 148
| 122
| 38.97973
| 0.820821
| 0.240076
| 0
| 0.25
| 0
| 0
| 0.03234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0.011364
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28bea69d4e6a6b28445e83be9513a3aebdc5d979
| 9,316
|
py
|
Python
|
diagnostics/model_test/verify_model.py
|
ami-GS/ngraph-tf
|
b5ac340f43bf70879ef6c180f69aac8241152c1e
|
[
"Apache-2.0"
] | null | null | null |
diagnostics/model_test/verify_model.py
|
ami-GS/ngraph-tf
|
b5ac340f43bf70879ef6c180f69aac8241152c1e
|
[
"Apache-2.0"
] | null | null | null |
diagnostics/model_test/verify_model.py
|
ami-GS/ngraph-tf
|
b5ac340f43bf70879ef6c180f69aac8241152c1e
|
[
"Apache-2.0"
] | null | null | null |
# ==============================================================================
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import argparse
import numpy as np
import ngraph_bridge
from google.protobuf import text_format
import json
import os
import sys
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory. ' + directory)
def set_os_env(select_device):
if select_device == 'CPU':
# run on TF only
ngraph_bridge.disable()
else:
if not ngraph_bridge.is_enabled():
ngraph_bridge.enable()
assert select_device[:
7] == "NGRAPH_", "Expecting device name to start with NGRAPH_"
back_end = select_device.split("NGRAPH_")
os.environ['NGRAPH_TF_BACKEND'] = back_end[1]
def calculate_output(param_dict, select_device, input_example):
"""Calculate the output of the imported graph given the input.
Load the graph def from graph file on selected device, then get the tensors based on the input and output name from the graph,
then feed the input_example to the graph and retrieves the output vector.
Args:
param_dict: The dictionary contains all the user-input data in the json file.
select_device: "NGRAPH" or "CPU".
input_example: A map with key is the name of the input tensor, and value is the random generated example
Returns:
The output vector obtained from running the input_example through the graph.
"""
graph_filename = param_dict["graph_location"]
output_tensor_name = param_dict["output_tensor_name"]
if not tf.gfile.Exists(graph_filename):
raise Exception("Input graph file '" + graph_filename +
"' does not exist!")
graph_def = tf.GraphDef()
if graph_filename.endswith("pbtxt"):
with open(graph_filename, "r") as f:
text_format.Merge(f.read(), graph_def)
else:
with open(graph_filename, "rb") as f:
graph_def.ParseFromString(f.read())
set_os_env(select_device)
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
if len(output_tensor_name) == 0:
# if no outputs are specified, then compare for all tensors
output_tensor_name = sum(
[[j.name for j in i.outputs] for i in graph.get_operations()],
[])
# Create the tensor to its corresponding example map
tensor_to_example_map = {}
for item in input_example:
t = graph.get_tensor_by_name(item)
tensor_to_example_map[t] = input_example[item]
#input_placeholder = graph.get_tensor_by_name(input_tensor_name)
output_tensor = [graph.get_tensor_by_name(i) for i in output_tensor_name]
config = tf.ConfigProto(
allow_soft_placement=True,
# log_device_placement=True,
inter_op_parallelism_threads=1)
with tf.Session(graph=graph, config=config) as sess:
output_tensor = sess.run(output_tensor, feed_dict=tensor_to_example_map)
return output_tensor, output_tensor_name
def calculate_norm(ngraph_output, tf_output, desired_norm):
"""Calculate desired_norm between vectors.
Calculate the L1/L2/inf norm between the NGRAPH and tensorflow output vectors.
Args:
ngraph_output: The output vector generated from NGRAPH graph.
tf_output: The output vector generated from tensorflow graph.
desired_norm: L1/L2/inf norm.
Returns:
Calculated norm between the vectors.
Raises:
Exception: If the dimension of the two vectors mismatch.
"""
if (ngraph_output.shape != tf_output.shape):
raise Exception('ngraph output and tf output dimension mismatch')
ngraph_output_squeezed = np.squeeze(ngraph_output)
tf_output_squeezed = np.squeeze(tf_output)
ngraph_output_flatten = ngraph_output_squeezed.flatten()
tf_output_flatten = tf_output_squeezed.flatten()
factor = np.prod(ngraph_output_squeezed.shape)
if desired_norm not in [1, 2, np.inf]:
raise Exception('Only L2, L2, and inf norms are supported')
n = np.linalg.norm((ngraph_output_flatten - tf_output_flatten),
desired_norm)
if desired_norm is np.inf:
return n
else:
return n / len(ngraph_output_flatten)
def parse_json():
"""
Parse the user input json file.
Returns:
A dictionary contains all the parsed parameters.
"""
with open(os.path.abspath(args.json_file)) as f:
parsed_json = json.load(f)
return parsed_json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--json_file", type=str, help="Model details in json format")
args = parser.parse_args()
if args.json_file is None:
raise ValueError("Supply a json file to start")
parameters = parse_json()
# Get reference/testing backend to compare
device1 = parameters["reference_backend"]
device2 = parameters["testing_backend"]
# Get L1/L2/Inf threshold value
l1_norm_threshold = parameters["l1_norm_threshold"]
l2_norm_threshold = parameters["l2_norm_threshold"]
inf_norm_threshold = parameters["inf_norm_threshold"]
# Create a folder to save output tensor arrays
output_folder = device1 + "-" + device2
createFolder(output_folder)
os.chdir(output_folder)
print("Model name: " + parameters["model_name"])
print("L1/L2/Inf norm configuration: {}, {}, {}".format(
l1_norm_threshold, l2_norm_threshold, inf_norm_threshold))
# Generate random input based on input_dimension
np.random.seed(100)
input_dimension = parameters["input_dimension"]
input_tensor_name = parameters["input_tensor_name"]
# Get random value range
rand_val_range = parameters["random_val_range"]
bs = int(parameters["batch_size"])
assert len(input_dimension) == len(
input_tensor_name
), "input_tensor_name dimension should match input_dimension in json file"
assert len(input_tensor_name) == len(
rand_val_range
), "Length of random_val_range should match input_tensor_name in json file"
# Matches the input tensors name with its required dimensions
input_tensor_dim_map = {}
for (dim, name, val_range) in zip(input_dimension, input_tensor_name,
rand_val_range):
random_input = np.random.randint(
val_range, size=[bs] + dim).astype('float32')
input_tensor_dim_map[name] = random_input
# Run the model on reference backend
result_tf_graph_arrs, out_tensor_names_cpu = calculate_output(
parameters, device1, input_tensor_dim_map)
# Run the model on testing backend
result_ngraph_arrs, out_tensor_names_ngraph = calculate_output(
parameters, device2, input_tensor_dim_map)
assert all(
[i == j for i, j in zip(out_tensor_names_cpu, out_tensor_names_ngraph)])
passed = True
th_dict = {
"L1": l1_norm_threshold,
"L2": l2_norm_threshold,
"inf": inf_norm_threshold
}
for tname, result_ngraph, result_tf_graph in zip(
out_tensor_names_cpu, result_ngraph_arrs, result_tf_graph_arrs):
new_out_layer = tname.replace("/", "_")
nparray_tf = np.array(result_tf_graph)
nparray_ngraph = np.array(result_ngraph)
np.save(device1 + "-" + new_out_layer + ".npy", nparray_tf)
np.save(device2 + "-" + new_out_layer + ".npy", nparray_ngraph)
l1_norm = calculate_norm(result_ngraph, result_tf_graph, 1)
l2_norm = calculate_norm(result_ngraph, result_tf_graph, 2)
inf_norm = calculate_norm(result_ngraph, result_tf_graph, np.inf)
norm_dict = {"L1": l1_norm, "L2": l2_norm, "inf": inf_norm}
print("\n[" + tname + "]")
#start the loop and check norms
for norm_name in norm_dict:
np.set_printoptions(precision=15)
if norm_dict[norm_name] > th_dict[norm_name]:
print(
"The %s norm is greater than %s threshold - %s norm: %f, %s threshold: %f"
% (norm_name, norm_name, norm_name, norm_dict[norm_name],
norm_name, th_dict[norm_name]))
passed = False
else:
print("The %s norm test passed - %s norm: %f, %s threshold: %f"
% (norm_name, norm_name, norm_dict[norm_name], norm_name,
th_dict[norm_name]))
if not passed:
sys.exit(1)
| 36.677165
| 130
| 0.659833
| 1,232
| 9,316
| 4.737013
| 0.233766
| 0.023989
| 0.020562
| 0.013708
| 0.127142
| 0.088588
| 0.047293
| 0.047293
| 0.025703
| 0.025703
| 0
| 0.008197
| 0.240447
| 9,316
| 253
| 131
| 36.822134
| 0.816563
| 0.254186
| 0
| 0.026846
| 0
| 0.006711
| 0.124632
| 0
| 0
| 0
| 0
| 0
| 0.026846
| 1
| 0.033557
| false
| 0.026846
| 0.060403
| 0
| 0.120805
| 0.04698
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28c180845ca339e6f881e886240beaf93a1ed892
| 10,953
|
py
|
Python
|
main.py
|
Tominous/Mario-1
|
28709143019d40cfeaa53737a01270ce14f99858
|
[
"Unlicense"
] | 1
|
2020-06-09T10:43:08.000Z
|
2020-06-09T10:43:08.000Z
|
main.py
|
Tominous/Mario-1
|
28709143019d40cfeaa53737a01270ce14f99858
|
[
"Unlicense"
] | null | null | null |
main.py
|
Tominous/Mario-1
|
28709143019d40cfeaa53737a01270ce14f99858
|
[
"Unlicense"
] | null | null | null |
import discord
from discord.ext import commands
import os
import random
from server import run_server
#token
token = os.environ.get("token")
#prefisso
bot = commands.Bot(command_prefix="m!", description="Nada.")
bot.remove_command('help')
#status
@bot.event
async def on_ready():
print("Sono online come", bot.user)
await bot.change_presence(activity=discord.Game(name="It's-a me, Mario! m!help"))
@bot.command(description='It s-a me, Mario!')
async def help(ctx):
await ctx.message.delete()
embed = discord.Embed(
title="Okeydokey!",
colour=discord.Colour(0xFF001E),
timestamp=ctx.message.created_at)
embed.set_footer(text=f"I am exploring {len(bot.guilds)} kingdoms")
for x in bot.commands:
if not x.hidden:
if not x.description:
embed.add_field(
name=f"{bot.command_prefix}{x.name}",
value=f'Descrizione non impostata!',
inline=False)
else:
embed.add_field(
name=f"{bot.command_prefix}{x.name}",
value=f'```{x.description}```',
inline=False)
mes = await ctx.send(embed=embed)
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) == '🔧'
await mes.add_reaction(emoji='🔧')
reaction, user = await bot.wait_for('reaction_add', check=check)
if reaction.emoji == "🔧":
await mes.delete()
#log
@bot.event
async def on_guild_join(guild):
ch = bot.get_channel(719316259237396491)
emb = discord.Embed(
description=f"{bot.user.mention} has arrived in the kingdom of **{guild.name}**\n King : **{guild.owner}**\n Inhabitants : **{guild.member_count}**",
colour=0xFF001E)
emb.set_footer(text=f"I am exploring {len(bot.guilds)} castel", icon_url=bot.user.avatar_url)
emb.set_thumbnail(url=guild.icon_url)
if guild.banner:
emb.set_image(url=guild.banner_url)
await ch.send(embed=emb)
@bot.event
async def on_guild_remove(guild):
ch = bot.get_channel(719316259237396491)
emb = discord.Embed(
description=f"{bot.user.mention} has abandoned the kingdom of **{guild.name}**\n King : **{guild.owner}**\n Inhabitants : **{guild.member_count}**",
colour=0xFF001E)
emb.set_footer(text=f"I am exploring {len(bot.guilds)} castel", icon_url=bot.user.avatar_url)
emb.set_thumbnail(url=guild.icon_url)
if guild.banner:
emb.set_image(url=guild.banner_url)
await ch.send(embed=emb)
#comandi
@bot.command(description='I repeat everything you write')
async def say(ctx, *, message):
a = commands.clean_content(use_nicknames=True)
message = await a.convert(ctx, message)
await ctx.send(message)
@bot.command(description='View support server')
async def support(ctx):
await ctx.message.delete()
embed = discord.Embed(
title="I'm-a-tired.",
description=
"[Support server](https://discord.gg/DF7KSsN)",
colour=0xFF001E)
await ctx.send(embed=embed, delete_after=20)
@bot.command(description='View source code')
async def source(ctx):
await ctx.message.delete()
embed = discord.Embed(
title="I'm-a-tired.",
description=
"The source code is available on [GitHub](https://github.com/Infinit7Even/Mario-)",
colour=0xFF001E)
await ctx.send(embed=embed, delete_after=20)
@bot.command(description='Invite Mario to your server')
async def invite(ctx):
await ctx.message.delete()
embed = discord.Embed(
title="Mamma mia!",
description=
"[Invite Mario](https://top.gg/bot/714550524829106296) in your server!",
colour=0xFF001E)
await ctx.send(embed=embed, delete_after=20)
@bot.command(description='Vote Mario In the Store')
async def vote(ctx):
await ctx.message.delete()
embed = discord.Embed(
title="Thank you so much for-to-playing my game!",
description="[Vote Mario!](https://top.gg/bot/714550524829106296)",
colour=0xFF001E)
await ctx.send(embed=embed, delete_after=20)
@bot.command(description='Bot credits')
async def credit(ctx):
await ctx.message.delete()
embed = discord.Embed(
title="Thank you so much for-to-playing my game!",
description="Bot developed da **Infinit7Even#1803** and **IT | Kewai#9029**",
colour=0xFF001E)
await ctx.send(embed=embed, delete_after=20)
@bot.command(description='Use this command if Mario isn t working properly')
async def fix(ctx):
await ctx.message.delete()
embed = discord.Embed(
title="Nighty, nighty. Ah, spaghetti... ah, ravioli... ah, mamma mia.",
description="Make sure Mario can read the messages, delete them and send links, if you still have problems contact Infinit7Even#1803.",
colour=0xFF001E)
await ctx.send(embed=embed, delete_after=20)
@bot.command(description='Bot response time in ms (Milliseconds)')
async def ping(ctx):
latency = bot.latency
await ctx.send('**Bot response time in ms (Milliseconds):**')
await ctx.send(latency)
#support
@bot.event
async def on_message(message):
await bot.process_commands(message)
if not message.author.bot:
if message.content.lower() == "m!say":
triggered = ['`To use that command type m!say message`']
await message.author.send(
f"{random.choice(triggered)}")
#triggered
@bot.event
async def on_message(message):
await bot.process_commands(message)
if not message.author.bot:
if message.content.lower() == "ciao":
triggered = ['Ehi, torna qua, scimmione!', 'Hi']
await message.channel.send(
f"{random.choice(triggered)}")
if message.content.lower() == "noice":
triggered = ['gg', 'k', 'kk']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "rip":
triggered = [
'https://tenor.com/view/rip-coffin-black-ghana-celebrating-gif-16743302', 'https://cdn.discordapp.com/attachments/611325092269522944/717659473057022013/SnapCrab_NoName_2020-6-3_10-42-9_No-00.png', 'https://tenor.com/view/davis-boreanaz-salute-uniform-gif-4762830'
]
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "f":
triggered = ['F', '```Press F to Pay Respect```']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "we":
triggered = ['Olah!', 'Welà']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "mario":
triggered = [
'Lets-a go!', 'Mamma mia!', 'Here we go!',
'It s-a me, **Mario!**', 'Okeydokey!', 'Im-a-tired.', 'Press "START" to play!', 'Hello there', 'I am back!'
]
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "start":
triggered = [
'Use `m!help` to open the menu']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "come va?":
triggered = [
'Bene, a te?', 'Alla grande!', 'Spettacularis!',
'It s-a me, **Mario!**', 'Good!'
]
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "bene":
triggered = [
'Ottimo!', 'Eccllente!', 'Fantastico!']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "m!say @everyone":
triggered = [
'F', 'Rip.']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "oh shit":
triggered = [
'OH SHIT, HERE WE GO AGAIN']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "mamma mia":
triggered = [
'Mamma Mia Marcello!']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "marcello":
triggered = [
'Mamma Mia Marcello!']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "luigi":
triggered = [
'Luigi! Che cosa ti trattiene!?']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "onesto":
triggered = [
'Ben detto fra!']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "ok":
triggered = [
'```Mario approves```']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "nintendo":
triggered = [
'Oh shit, my creator hasn t asked for rights yet', 'https://tenor.com/view/traffic-fbiopen-up-raid-gif-13450966']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "rossi":
triggered = [
'Wait!']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "giovanni":
triggered = [
'TIRAMI FUORI DA QUI!!!', 'Mamma mia!', 'Mamma mia Marcello!', 'Mamma miaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "gg":
triggered = [
'That s my bro.']
await message.channel.send(f"{random.choice(triggered)}")
if message.content.lower() == "mario dm":
triggered = ['I am back!']
await message.author.send(
f"{random.choice(triggered)}")
if message.content.lower() == "super mario":
triggered = ['bross WIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII', 'https://www.youtube.com/watch?v=9kdayFSHkyI']
await message.channel.send(
f"{random.choice(triggered)}")
if message.content.lower() == "fuck you":
triggered = ['Owowowow']
await message.channel.send(
f"{random.choice(triggered)}")
if message.content.lower() == "64":
triggered = ['What memories...']
await message.channel.send(
f"{random.choice(triggered)}")
if message.content.lower() == "yo":
triggered = ['risposta 1', 'risposta 2']
await message.channel.send(
f"{random.choice(triggered)}")
run_server()
bot.run(token)
| 37.382253
| 279
| 0.601936
| 1,302
| 10,953
| 5.025346
| 0.241935
| 0.035763
| 0.063579
| 0.083448
| 0.597127
| 0.575424
| 0.54715
| 0.54715
| 0.530796
| 0.502522
| 0
| 0.026017
| 0.252534
| 10,953
| 293
| 280
| 37.382253
| 0.772811
| 0.004108
| 0
| 0.491597
| 0
| 0.021008
| 0.311961
| 0.085856
| 0
| 0
| 0.006604
| 0
| 0
| 1
| 0.004202
| false
| 0
| 0.021008
| 0.004202
| 0.029412
| 0.004202
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28c25c0dfe99e2a00d332afa08326f2e3d25b1e8
| 19,506
|
py
|
Python
|
helpers.py
|
agartland/HLAPredCache
|
ebacc706df581a71ba3812282013263939cfbb61
|
[
"MIT"
] | null | null | null |
helpers.py
|
agartland/HLAPredCache
|
ebacc706df581a71ba3812282013263939cfbb61
|
[
"MIT"
] | null | null | null |
helpers.py
|
agartland/HLAPredCache
|
ebacc706df581a71ba3812282013263939cfbb61
|
[
"MIT"
] | null | null | null |
import numpy as np
import string
import re
__all__ = ['BADAA',
'AALPHABET',
'convertHLAAsterisk',
'isvalidmer',
'isvalidHLA',
'rankEpitopes',
'rankKmers',
'rankMers',
'getIC50',
'getMers',
'getMerInds',
'grabKmer',
'grabKmerInds',
'findpeptide',
'grabOverlappingKmer',
'overlappingMers']
BADAA = '-*BX#Z? '
AALPHABET = 'ACDEFGHIKLMNPQRSTVWY'
def convertHLAAsterisk(hlas):
"""Replace the * with _ in each HLA allele"""
repAsteriskPattern = re.compile('\*')
return [re.sub(repAsteriskPattern, '_', h) for h in hlas]
def isvalidmer(mer):
if not mer is None:
return not re.search('[%s]' % BADAA, mer)
else:
return False
def isvalidHLA(h, loci='AB'):
if h[0] in loci:
return True
else:
return False
def rankEpitopes(ba, hlaList, peptide, nmer = [8, 9, 10, 11], peptideLength = None):
"""Breaks peptide into kmers (all nmer lengths)
and rank all (hla, kmer) pairs by predicted IC50 in hlaPredCache ba
IDENTICAL to rankKmers but may have different performance?
Can be used to find the most likely optimal epitope in a peptide sequence.
Predictions that are not found in ba get a temporary prediction of 15 log-nM
Parameters
----------
ba : hlaPredCache
dict-like container of all (hla, kmer) IC50 values
hlaList : list
HLA alleles to be used as keys in ba
peptide : str
AA sequence
nmer : list
Integers indicating optimal lengths to be tested as kmers.
peptideLength : int or None
If a number is specified then a number of '.' padded kmers are included
so that there are always garaunteed to be a certain number of kmers and results
Returns
-------
ranks : ndarray int
Zero-based rankings of kmers based on predicted IC50 (lowest IC50, lowest rank)
sorti : ndarray int
Index that can be used to sort the returned arrays
kmers : ndarray object
Array of kmer strings in order by getMers() (can be sorted by rank with sorti)
ic50 : ndarray float
Predicted log-IC50 (log-nM) with the HLA allele with the lowest IC50
hla : ndarray object
Array of HLA alleles that were the best predicted binder to each kmer"""
merList = getMers(peptide, nmer, peptideLength)
kmers = np.empty((len(merList), len(hlaList)), dtype=object)
ic50 = np.ones((len(merList), len(hlaList))) * 15
hla = np.empty((len(merList), len(hlaList)), dtype=object)
for i, m in enumerate(merList):
for j, h in enumerate(hlaList):
kmers[i, j] = m
hla[i, j] = h
tmp = ba[(h, m)]
if not np.isnan(tmp):
ic50[i, j] = tmp
kmers = kmers.flatten()
ic50 = ic50.flatten()
hla = hla.flatten()
sorti = ic50.argsort()
ranks = np.empty(len(ic50), int)
ranks[sorti] = np.arange(len(ic50))
return (ranks, sorti, kmers, ic50, hla)
def rankKmers(ba, hlaList, peptide, nmer=[8, 9, 10, 11], peptideLength=None):
"""Breaks peptide into kmers (all nmer lengths)
and rank all (hla, kmer) pairs by predicted IC50 in hlaPredCache ba
IDENTICAL to rankEpitopes but may have different performance?
Can be used to find the most likely optimal epitope in a peptide sequence.
Predictions that are not found in ba get a temporary prediction of 15 log-nM
Parameters
----------
ba : hlaPredCache
dict-like container of all (hla, kmer) IC50 values
hlaList : list
HLA alleles to be used as keys in ba
peptide : str
AA sequence
nmer : list
Integers indicating optimal lengths to be tested as kmers.
peptideLength : int or None
If a number is specified then a number of '.' padded kmers are included
so that there are always garaunteed to be a certain number of kmers and results
Returns
-------
ranks : ndarray int
Zero-based rankings of kmers based on predicted IC50 (lowest IC50, lowest rank)
sorti : ndarray int
Index that can be used to sort the returned arrays
kmers : ndarray object
Array of kmer strings in order by getMers() (can be sorted by rank with sorti)
ic50 : ndarray float
Predicted log-IC50 (log-nM) with the HLA allele with the lowest IC50
hla : ndarray object
Array of HLA alleles that were the best predicted binder to each kmer"""
kmers = getMers(peptide, nmer, peptideLength)
result = rankMers(ba, hlaList, kmers)
return (result[0], result[1], kmers, result[2], result[3])
def rankMers(ba, hlaList, merList):
"""Ranks all (hla, mer) pairs by predicted IC50 found in hlaPredCache, ba
Can be used to find the most likely optimal epitope from a list.
Predictions that are not found in ba get a temporary prediction of 15 log-nM
Parameters
----------
ba : hlaPredCache
dict-like container of all (hla, kmer) IC50 values
hlaList : list
HLA alleles to be used as keys in ba
merList : list
Peptide sequences to be tests with each HLA allele
Returns
-------
ranks : ndarray int
Zero-based rankings of kmers based on predicted IC50 (lowest IC50, lowest rank)
sorti : ndarray int
Index that can be used to sort the returned arrays
kmers : ndarray object
Array of kmer strings in order by getMers() (can be sorted by rank with sorti)
ic50 : ndarray float
Predicted log-IC50 (log-nM) with the HLA allele with the lowest IC50
hla : ndarray object
Array of HLA alleles that were the best predicted binder to each kmer"""
ic50 = np.ones((len(merList))) * 15
hla = np.empty(len(merList), dtype=object)
for i, m in enumerate(merList):
if not '.' in m:
ic50[i], hla[i] = getIC50(ba, hlaList, m, returnHLA=True)
sorti = ic50.argsort()
ranks = np.empty(len(ic50), dtype=int)
ranks[sorti] = np.arange(len(ic50))
return (ranks, sorti, ic50, hla)
def getIC50(ba, hlaList, mer, nmer=[8, 9, 10, 11], returnHLA=False):
"""Return the IC50 from ba of the mer and its affinity with the most avid HLA in hlaList.
Or if len(pep)>11, return that of the most avid kmer
Parameters
----------
ba : hlaPredCache
dict-like container of all (hla, kmer) IC50 values
hlaList : list
HLA alleles to be used as keys in ba
mer : string
Peptide sequences to be tests with each HLA allele
nmer : list
Integers indicating optimal lengths to be tested as kmers.
returnHLA : bool
If True, return the HLA with the lowest binding affinity.
Returns
-------
ic50 : float
Log-IC50 from ba
hla : string (optional)
HLA allele with best binding"""
if ba is None:
raise NameError('Did not load IC50 values into ba!')
if len(mer) <= 11:
"""Minimum IC50 over the HLAs"""
ic50s = np.asarray([ba[(h, mer)] for h in hlaList])
hlas = hlaList
else:
"""Minimum IC50 over all the mers and all the HLAs"""
pairs = [getIC50(ba, hlaList, m, returnHLA=True) for m in getMers(mer, nmer)]
ic50s = np.asarray([p[0] for p in pairs])
hlas = [p[1] for p in pairs]
mini = np.argmin(ic50s)
if returnHLA:
return ic50s[mini], hlas[mini]
else:
return ic50s[mini]
def getMers(seq, nmer=[8, 9, 10, 11], seqLength=None):
"""Takes a AA sequence (string) and turns it into a list of 8, 9, 10, 11 mers
The seq will be padded with one or more '.' if it is shorter than seqLength
These indices will match the peptides created by getMers()
Paramters
---------
seq : str
Peptide sequence.
nmer : list
List of k's for the creation of all kmers.
seqLength : int
Minimum length of seq ('.' used for padding before applying the process)
Useful for garaunteeing that a certain number of kmers will be in the list.
Returns
-------
mers : list
All peptides of length nmer contained by seq"""
if not seqLength is None:
if len(seq) > seqLength:
seq = seq[:seqLength]
elif len(seq) < seqLength:
seq = string.ljust(seq, seqLength, '.')
mers = []
for n in nmer:
mers.extend([seq[i:i+n] for i in range(len(seq)-n+1)])
return mers
def getMerInds(seq, nmer=[8, 9, 10, 11], seqLength=None):
"""Takes a AA sequence (string) and turns it into a list of 8, 9, 10, 11 mers
The seq will be padded with one or more '.' if it is shorter than seqLength
These indices will match the peptides created by getMers()
Paramters
---------
seq : str
Peptide sequence.
nmer : list
List of k's for the creation of all kmers.
seqLength : int
Minimum length of seq ('.' used for padding before applying the process)
Useful for garaunteeing that a certain number of kmers will be in the list.
Returns
-------
mers : list
All peptides of length nmer contained by seq
mers : list
Seq indices for mers"""
if not seqLength is None:
if len(seq) > seqLength:
seq = seq[:seqLength]
elif len(seq) < seqLength:
seq = string.ljust(seq, seqLength, '.')
mers = []
inds = []
for n in nmer:
mers.extend([seq[i:i+n] for i in range(len(seq)-n+1)])
inds.extend([np.arange(n)+i for i in range(len(seq)-n+1)])
return mers, inds
def itermer(seq, k=9, gapped=True, yield_inds=False):
"""Generator over all k-mers in seq.
There are [len(seq) - k + 1] k-mers in seq.
Parameters
----------
seq : str
Sequence which will be broken into kmers.
k : int
Length of peptides to return.
gapped : bool
If True (default), yield the k-mer including gaps.
If False, yield the "non-gapped" k-mer from grabKmer
return_inds : bool
If True, also yield an array of indices from grabKmerInds
Yields
------
mer : str
If gapped, then a k-length peptide starting at starti from seq.
If seq[starti] is a gap then returns None.
If not gapped then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is None)
inds : nd.array (optional)
An array of indices for the mer"""
for i in range(len(seq) - k + 1):
g, ng = grabKmer(seq, i, k=k)
if gapped:
mer = g
else:
mer = ng
if yield_inds:
ginds, nginds = grabKmerInds(seq, i, k=k)
if gapped:
inds = ginds
else:
inds = nginds
yield (mer, inds)
else:
yield (mer,)
def grabKmer(seq, starti, k=9):
"""Grab the kmer from seq starting at position starti with length k
Return the gapped and non-gapped kmer
If seq[starti] is a gap then the non-gapped kmer is None.
If there are not enough non-gap AA to return after starti then it returns None
Parameters
----------
seq : str
Sequence from which peptide will be grabbed.
starti : int
Starting position of the kmer (zero-based indexing)
k : int
Length of the peptide to return.
Returns
-------
gapped : str
A k-length peptide starting at starti from seq.
nonGapped : str
A k-length peptide starting at starti from seq.
If seq[starti] is a gap then returns None.
If not then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is None)"""
if not isinstance(starti, int):
starti = int(starti)
if (starti+k-1) <= (len(seq)-1) and starti >= 0:
tmp = seq[starti:]
full = tmp[:k]
if full[0] == '-':
return None, None
elif '-' in full:
ng = tmp.replace('-', '')
if len(ng) >= k:
ng = ng[:k]
else:
ng = None
else:
ng = full
return full, ng
else:
return None, None
def grabKmerInds(seq, starti, k=9):
"""Grab the kmer from seq starting at position starti with length k
Return the indices of the gapped and non-gapped kmers
i.e. indices are such that seq[ind] == kmer
If seq[starti] is a gap then the non-gapped kmer is None.
If there are not enough non-gap AA to return after starti then it returns None
Parameters
----------
seq : str
Sequence from which peptide will be grabbed.
starti : int
Starting position of the kmer (zero-based indexing)
k : int
Length of the peptide to return.
Returns
-------
gapped : ndarray
A k-length vector starting with starti containing the indices for the kmer
nonGapped : ndarray
A k-length vector starting at starti.
If seq[starti] is a gap then returns an empty array.
If not then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is an empty array)"""
if not isinstance(starti, int):
starti = int(starti)
if (starti+k-1) <= (len(seq)-1) and starti >= 0:
tmp = np.arange(starti, len(seq))
full = tmp[:k]
"""If it starts with a gap then it is invalid (arbitary rule)"""
if seq[starti] == '-':
return np.empty(0), np.empty(0)
elif '-' in seq[starti:starti+k]:
"""If there's a gap somewhere else then go through one by one adding non-gapped indices"""
ng = []
for sitei in tmp:
if not seq[sitei] == '-':
ng.append(sitei)
"""If we get to k non-gapped AAs then return full,ng"""
if len(ng) == k:
return full, np.array(ng)
"""If we get to then end of the seq then return ng=None"""
return full, np.empty(0)
else:
"""If there are no gaps anywhere then just return k indices starting with starti"""
return full, full
else:
"""If its an invalid request then return None,None"""
return np.empty(0), np.empty(0)
def findpeptide(pep, seq, returnEnd = False):
"""Find pep in seq ignoring gaps but returning a start position that counts gaps
pep must match seq exactly (otherwise you should be using pairwise alignment)
Parameters
----------
pep : str
Peptide to be found in seq.
seq : str
Sequence to be searched.
returnEnd : bool
Flag to return the end position such that:
seq[startPos:endPos] = pep
Returns
-------
startPos : int
Start position (zero-indexed) of pep in seq or -1 if not found"""
ng = seq.replace('-', '')
ngInd = ng.find(pep)
ngCount = 0
pos = 0
"""Count the number of gaps prior to the non-gapped position. Add them to it to get the gapped position"""
while ngCount < ngInd or seq[pos] == '-':
if not seq[pos] == '-':
ngCount += 1
pos += 1
startPos = ngInd + (pos - ngCount)
if returnEnd:
if startPos == -1:
endPos = -1
else:
count = 0
endPos = startPos
while count < len(pep):
if not seq[endPos] == '-':
count += 1
endPos += 1
return startPos, endPos
else:
return startPos
def grabOverlappingKmer(seq, sitei, pos=0, k=9):
"""Grab the kmer from seq for which it is in the pos position at sitei
Return the gapped and non-gapped kmer
This is a generalization of grabKmer for pos = 0
If seq[sitei] is a gap then the non-gapped kmer is None.
If there are not enough non-gap AA to return before/after sitei then it returns None
Parameters
----------
seq : str
Sequence from which peptide will be grabbed.
sitei : int
Key position of the kmer (zero-based indexing)
pos : int
The position of the key sitei in the kmer.
k : int
Length of the peptide to return.
Returns
-------
gapped : str
A k-length peptide that overlaps sitei
nonGapped : str
A k-length peptide that overlaps sitei
If seq[sitei] is a gap then returns None.
If not then all gaps are removed before taking the k-length peptide
(if there aren't k AAs then return is None)"""
aaRight = k - pos
aaLeft = pos
if seq[sitei] == '-':
return None, None
if (sitei + aaRight) <= len(seq) and (sitei - aaLeft) >= 0:
if pos<k:
rh = seq[sitei:]
fullRH = rh[:aaRight]
if '-' in fullRH:
ngRH = rh.replace('-', '')
if len(ngRH) >= aaRight:
ngRH = ngRH[:aaRight]
else:
ngRH = None
else:
ngRH = fullRH
else:
fullRH = ''
ngRH = ''
if pos>0:
lh = seq[:sitei]
fullLH = lh[-aaLeft:]
if '-' in fullLH:
ngLH = lh.replace('-', '')
if len(ngLH) >= aaLeft:
ngLH = ngLH[-aaLeft:]
else:
ngLH = None
else:
ngLH = fullLH
else:
fullLH = ''
ngLH = ''
full = fullLH + fullRH
#print aaLeft,fullLH,",", aaRight,fullRH
if ngLH is None or ngRH is None:
ng = None
else:
ng = ngLH + ngRH
return full, ng
else:
return None, None
def overlappingMers(seq, sitei, nmer = [8, 9, 10, 11], padding = 0):
"""Create a list of kmers that overlap sitei in seq
Returns parallel lists of the mers, start positions and lengths
Parameters
----------
seq : str
sitei : int
Zero-based index into seq
nmer : list
Lengths of kmers to consider
padding : int
Allow kmer to be within padding.
Defalut is no padding (must overlap)
Returns
-------
mers : list
List of overlapping peptides
starti : list
List of start positions"""
def _overlappingMersNoPadding(seq, sitei, nmer):
mers = []
starti = []
for k in nmer:
for posi in range(k):
ng = grabOverlappingKmer(seq, sitei, pos=posi, k=k)[1]
if not ng is None:
mers.append(ng)
starti.append(sitei-posi)
#print sitei, posi, k, ng
mers, uniqi = np.unique(mers, return_index = True)
starti = np.array(starti)[uniqi]
return mers, starti
mers, starti = _overlappingMersNoPadding(seq, sitei, nmer = nmer)
if padding > 0:
for padi in (np.arange(padding) + 1):
for tmpSitei in [sitei+padi, sitei-padi]:
tmpMers, tmpStarti = _overlappingMersNoPadding(seq, tmpSitei, nmer)
mers = np.concatenate((mers, tmpMers))
starti = np.concatenate((starti, tmpStarti))
mers, uniqi = np.unique(mers, return_index = True)
starti = np.array(starti)[uniqi]
return mers, starti
| 32.838384
| 110
| 0.584589
| 2,672
| 19,506
| 4.26235
| 0.126871
| 0.007024
| 0.011063
| 0.004215
| 0.578189
| 0.56897
| 0.550707
| 0.536044
| 0.5054
| 0.498024
| 0
| 0.015943
| 0.324721
| 19,506
| 594
| 111
| 32.838384
| 0.848694
| 0.485082
| 0
| 0.295547
| 0
| 0
| 0.030758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064777
| false
| 0
| 0.012146
| 0
| 0.186235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28c5429706a9cf44dbc351c293ef49e987982fbe
| 5,697
|
py
|
Python
|
simfempy/examples/incompflow.py
|
anairabeze/simfempy
|
144362956263cb9b81f4bade15664d9cc640f93a
|
[
"MIT"
] | null | null | null |
simfempy/examples/incompflow.py
|
anairabeze/simfempy
|
144362956263cb9b81f4bade15664d9cc640f93a
|
[
"MIT"
] | null | null | null |
simfempy/examples/incompflow.py
|
anairabeze/simfempy
|
144362956263cb9b81f4bade15664d9cc640f93a
|
[
"MIT"
] | null | null | null |
assert __name__ == '__main__'
# in shell
import os, sys
simfempypath = os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir, os.path.pardir, os.path.pardir,'simfempy'))
sys.path.insert(0,simfempypath)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pygmsh
from simfempy.applications.stokes import Stokes
from simfempy.applications.navierstokes import NavierStokes
from simfempy.applications.problemdata import ProblemData
from simfempy.meshes.simplexmesh import SimplexMesh
from simfempy.meshes import plotmesh
# ================================================================c#
def main(testcase='drivenCavity'):
testcases = ['drivenCavity', 'backwardFacingStep', 'poiseuille']
# create mesh and data
if testcase=='drivenCavity':
mesh, data = drivenCavity(h=0.2, mu=0.00025)
elif testcase=='backwardFacingStep':
mesh, data = backwardFacingStep(h=0.1)
elif testcase=='poiseuille':
mesh, data = poiseuille(h=0.1)
else:
raise ValueError(f"test case must be in {testcases=}")
# plotmesh.meshWithBoundaries(mesh)
# create application
# stokes = Stokes(mesh=mesh, problemdata=data, linearsolver='iter_gmres_10')
stokes = Stokes(mesh=mesh, problemdata=data, linearsolver='umf')
# stokes = NavierStokes(mesh=mesh, problemdata=data, linearsolver='iter_gmres')
# stokes = NavierStokes(mesh=mesh, problemdata=data, linearsolver='iter_gcrotmk')
# stokes = NavierStokes(mesh=mesh, problemdata=data, linearsolver='umf')
result = stokes.solve()
print(f"{result.info['timer']}")
print(f"postproc:")
for p, v in result.data['global'].items(): print(f"{p}: {v}")
fig = plt.figure(figsize=(10, 8))
outer = gridspec.GridSpec(1, 3, wspace=0.2, hspace=0.2)
plotmesh.meshWithBoundaries(mesh, fig=fig, outer=outer[0])
plotmesh.meshWithData(mesh, data=result.data, title="Stokes", fig=fig, outer=outer[1])
plotmesh.meshWithData(mesh, title="Stokes", fig=fig, outer=outer[2],
quiver_data={"V":list(result.data['point'].values())})
plt.show()
# ================================================================c#
def drivenCavity(h=0.1, mu=0.001):
with pygmsh.geo.Geometry() as geom:
ms = [h*v for v in [1.,1.,0.2,0.2]]
p = geom.add_rectangle(xmin=0, xmax=1, ymin=0, ymax=1, z=0, mesh_size=ms)
geom.add_physical(p.surface, label="100")
for i in range(len(p.lines)): geom.add_physical(p.lines[i], label=f"{1000 + i}")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
# data.bdrycond.set("Dirichlet", [1000, 1001, 1002, 1003])
data.bdrycond.set("Dirichlet", [1001, 1002, 1003])
data.bdrycond.set("Navier", [1000])
# data.bdrycond.fct[1002] = lambda x, y, z: np.vstack((np.ones(x.shape[0]),np.zeros(x.shape[0])))
data.bdrycond.fct[1002] = [lambda x, y, z: 1, lambda x, y, z: 0]
# parameters
data.params.scal_glob["mu"] = mu
#TODO pass ncomp with mesh ?!
data.ncomp = 2
return SimplexMesh(mesh=mesh), data
# ================================================================ #
def backwardFacingStep(h=0.2, mu=0.02):
with pygmsh.geo.Geometry() as geom:
X = []
X.append([-1.0, 1.0])
X.append([-1.0, 0.0])
X.append([0.0, 0.0])
X.append([0.0, -1.0])
X.append([3.0, -1.0])
X.append([3.0, 1.0])
p = geom.add_polygon(points=np.insert(np.array(X), 2, 0, axis=1), mesh_size=h)
geom.add_physical(p.surface, label="100")
for i in range(len(p.lines)): geom.add_physical(p.lines[i], label=f"{1000 + i}")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [1000, 1001, 1002, 1003])
# data.bdrycond.set("Dirichlet", [1000, 1001, 1002, 1003, 1005])
data.bdrycond.set("Neumann", [1004])
data.bdrycond.set("Navier", [1005])
# data.bdrycond.fct[1000] = [lambda x, y, z: 1, lambda x, y, z: 0]
data.bdrycond.fct[1000] = [lambda x, y, z: y*(1-y), lambda x, y, z: 0]
# parameters
data.params.scal_glob["mu"] = mu
data.params.scal_glob["navier"] = 0.01
#TODO pass ncomp with mesh ?!
data.ncomp = 2
return SimplexMesh(mesh=mesh), data
# ================================================================ #
def poiseuille(h= 0.1, mu=0.02):
with pygmsh.geo.Geometry() as geom:
#ms = [h*v for v in [1.,1.,0.2,0.2]]
ms = h
p = geom.add_rectangle(xmin=-1.0, xmax=3.0, ymin=-1.0, ymax=1.0, z=0, mesh_size=ms)
geom.add_physical(p.surface, label="100")
for i in range(len(p.lines)): geom.add_physical(p.lines[i], label=f"{1000 + i}")
mesh = geom.generate_mesh()
data = ProblemData()
# boundary conditions
data.bdrycond.set("Dirichlet", [1000, 1003, 1002])
data.bdrycond.set("Neumann", [1001])
# data.bdrycond.fct[1002] = lambda x, y, z: np.vstack((np.ones(x.shape[0]),np.zeros(x.shape[0])))
data.bdrycond.fct[1003] = [lambda x, y, z: 1, lambda x, y, z: 0]
#--------------------------------------------------------------------------
#navier_slip_boundary
data.bdrycond.fct[1002] = [lambda x, y, z: 1, lambda x, y, z: 0]
#data.bdrycond.fct[1000] = [lambda x, y, z: 0, lambda x, y, z: 0]
#---------------------------------------------------------------------------
# parameters
data.params.scal_glob["mu"] = mu
data.params.scal_glob["navier"] = 0.01
#TODO pass ncomp with mesh ?!
data.ncomp = 2
return SimplexMesh(mesh=mesh), data
# ================================================================c#
main()
| 44.507813
| 129
| 0.584694
| 783
| 5,697
| 4.204342
| 0.195402
| 0.061968
| 0.034022
| 0.038275
| 0.591738
| 0.57017
| 0.543742
| 0.488153
| 0.442892
| 0.409478
| 0
| 0.056765
| 0.177462
| 5,697
| 127
| 130
| 44.858268
| 0.645753
| 0.270318
| 0
| 0.318182
| 0
| 0
| 0.07696
| 0.005341
| 0
| 0
| 0
| 0.007874
| 0.011364
| 1
| 0.045455
| false
| 0
| 0.113636
| 0
| 0.193182
| 0.034091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28c6551ca38cd065b2ced67935d3a361ea90ce26
| 11,816
|
py
|
Python
|
polecat/db/sql/expression/where.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | 4
|
2019-08-10T12:56:12.000Z
|
2020-01-21T09:51:20.000Z
|
polecat/db/sql/expression/where.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | 71
|
2019-04-09T05:39:21.000Z
|
2020-05-16T23:09:24.000Z
|
polecat/db/sql/expression/where.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | null | null | null |
import re
import ujson
from psycopg2.sql import SQL, Composable, Identifier
from polecat.utils import to_bool, to_tuple
from ...schema.column import ReverseColumn
from .expression import Expression
class DiscardValue:
pass
class Where:
FILTER_PROG = re.compile(r'^([a-zA-Z][a-zA-Z0-9_]+(?:__[a-zA-Z][a-zA-Z0-9_]+)*)$')
FILTER_TYPES = None
def __init__(self, *args, **kwargs):
self.root = self.parse_input(args, kwargs)
def get_sql(self, relation):
self.relation = relation
if self.root:
return self.root.get_sql(self)
else:
return None
def parse_input(self, args, kwargs):
root = None
for k, v in kwargs.items():
m = self.FILTER_PROG.match(k)
if not m:
raise ValueError(f'Unable to match filter condition: {k}')
target = m.group(1)
lookup, flt_cls = self.parse_target(target)
flt = flt_cls(self, lookup, v)
if root is None:
root = flt
else:
root = And(root, flt)
for a in args:
# TODO: Confirm that `a` is a proper FilterType.
root = And(root, a)
return root
def parse_target(self, target):
i = target.rfind('__')
if i != -1:
try:
return target[:i], self.FILTER_TYPES[target[i + 2:]]
except KeyError:
pass
return target, Equal
def merge(self, other, boolean='AND'):
# TODO: We should really do a check for duplicate filters.
if self.root:
if other.root:
if boolean == 'AND':
self.root = And(self.root, other.root)
else:
self.root = Or(self.root, other.root)
elif other.root:
self.root = other.root
def get_primary_columns(self):
return self.root.get_primary_columns()
class FilterType:
def __init__(self, filter, lookup, value):
self.parse_lookup(lookup)
self.parse_value(filter, value)
def get_sql(self, filter):
sql, args = self.eval(filter)
sql = self.eval_joins(filter, sql)
return sql, args
def eval(self, filter):
pass
# val = self.value
# if isinstance(self.value, str):
# val = val.format(**filter.context)
# values.append(val)
def eval_joins(self, filter, condition):
if not self.joins:
return condition
sql = '%s'
relation = filter.relation
args = []
for i, joined_column_name in enumerate(self.joins):
# TODO: Handle m2m, reverse fk, reverse m2m.
column = relation.get_column(joined_column_name)
if isinstance(column, ReverseColumn):
prev_tbl_name = relation.alias
prev_col_name = 'id'
col_name = column.related_column.name
else:
prev_tbl_name = relation.alias
prev_col_name = column.name
col_name = 'id'
relation = column.related_table
tbl = relation.alias
# TODO: Use Identifier
# TODO: PK field other than 'id'.
next = 'EXISTS (SELECT 1 FROM {} WHERE {} = {} AND %s)'
args.extend([
Identifier(tbl),
SQL('{}.{}').format(Identifier(prev_tbl_name), Identifier(prev_col_name)),
SQL('{}.{}').format(Identifier(tbl), Identifier(col_name))
])
sql = sql % next
sql = sql % '{}'
args.append(condition)
sql = SQL(sql).format(*args)
return sql
def parse_lookup(self, lookup):
lookup_parts = lookup.split('__')
if len(lookup_parts) < 1:
raise ValueError(f'invalid filter: {lookup}')
# if lookup_parts[-1] in Filter.FILTER_TYPES:
# self.type = lookup_parts.pop()
# else:
# self.type = 'eq'
self.joins = lookup_parts[:-1]
self.field = lookup_parts[-1]
def parse_value(self, filter, value):
# TODO: Oh this isn't nice. I need to be able to use fields to
# convert values.
if self.field == 'id':
try:
self.value = value.id
except AttributeError:
self.value = value
else:
self.value = value
def get_table_column(self, filter):
relation = filter.relation
table_name = relation.alias
for joined_column_name in self.joins:
column = relation.get_column(joined_column_name)
relation = column.related_table
table_name = relation.alias
return table_name, self.field
def format(self, format_string, *args):
# TODO: A little ugly. Now a lot ugly.
if isinstance(self.value, Composable):
format_string = format_string % '{}'
return SQL(format_string).format(*(args + (self.value,))), ()
elif isinstance(self.value, Expression):
value_sql, value_args = self.value.to_sql()
format_string = format_string % '{}'
return SQL(format_string).format(*(args + (value_sql,))), value_args
else:
value = self.get_value()
if value == DiscardValue:
sql_args = ()
else:
sql_args = to_tuple(self.get_value(), keep_none=True)
return SQL(format_string).format(*args), sql_args
def get_value(self):
return (self.value,)
def get_primary_columns(self):
# TODO: Test this.
if self.joins:
return (self.joins[0],)
return (self.field,)
class Equal(FilterType):
def eval(self, filter):
super().eval(filter)
try:
tbl, col = self.get_table_column(filter)
except KeyError:
raise ValueError(f'invalid attribute: {self.field}')
op = '=' if self.value is not None else 'IS'
return self.format(
'{}.{} {} %s',
Identifier(tbl),
Identifier(col),
SQL(op)
)
class NotEqual(FilterType):
def eval(self, filter):
super().eval(filter)
try:
tbl, col = self.get_table_column(filter)
except KeyError:
raise ValueError(f'invalid attribute: {self.field}')
op = '!=' if self.value is not None else 'IS NOT'
return self.format('{}.{} {} %s', tbl, col, op)
class Contains(FilterType):
def eval(self, filter):
super().eval(filter)
try:
tbl, col = self.get_table_column(filter)
except KeyError:
raise ValueError(f'invalid attribute: {self.field}')
op = self.get_operation()
return self.format('{}.{} {} %s', Identifier(tbl), Identifier(col), SQL(op))
def parse_value(self, filter, value):
value = '%{}%'.format(value)
self.value = value.replace('%', r'%%')
def get_operation(self):
return 'LIKE'
class ContainsInsensitive(Contains):
def get_operation(self):
return 'ILIKE'
class Less(FilterType):
def eval(self, filter):
super().eval(filter)
return self.format('{} < %s', self.field)
class Greater(FilterType):
def eval(self, filter):
super().eval(filter)
return self.format('{} > %s', self.field)
class LessEqual(FilterType):
def eval(self, filter):
super().eval(filter)
return self.format('{} <= %s', self.field)
class GreaterEqual(FilterType):
def eval(self, filter):
super().eval(filter)
return self.format('{} >= %s', self.field)
class In(FilterType):
def eval(self, filter):
super().eval(filter)
try:
tbl, col = self.get_table_column(filter)
except KeyError:
raise ValueError(f'invalid attribute: {self.field}')
return self.format('{}.{} = ANY (%s)', Identifier(tbl), Identifier(col))
def parse_value(self, filter, value):
if isinstance(value, (list, tuple, set)):
self.value = list(value)
else:
try:
self.value = ujson.loads(value)
except Exception:
raise ValueError(f'Unable to parse "in" filter value: {value}')
def get_value(self):
return ([self.value],)
class NotIn(In):
def eval(self, filter):
FilterType.eval(self, filter)
return self.format('{} NOT IN %s', self.field)
class IsNull(FilterType):
def eval(self, filter):
super().eval(filter)
try:
tbl, col = self.get_table_column(filter)
except KeyError:
raise ValueError(f'invalid attribute: {self.field}')
op = 'IS' if self.value else 'IS NOT'
return self.format(
'{}.{} {} NULL', Identifier(tbl), Identifier(col), SQL(op)
)
def parse_value(self, filter, value):
self.value = to_bool(value)
def get_value(self):
return DiscardValue
# class NotNull(FilterType):
# def eval(self, filter):
# super().eval(filter)
# return f'{self.field} NOT NULL'
class Overlap(FilterType):
def eval(self, filter):
super().eval(filter)
try:
tbl, col = self.get_table_column(filter)
except KeyError:
raise ValueError(f'invalid attribute: {self.field}')
return self.format('{}.{} && %s', tbl, col)
class WithinDistance(FilterType):
def __init__(self, filter, lookup, point, distance):
super().__init__(filter, lookup, distance)
self.value = (point, self.value)
def eval(self, filter):
super().eval(filter)
try:
tbl, col = self.get_table_column(filter)
except KeyError:
raise ValueError(f'invalid attribute: {self.field}')
return self.format('{}.{} <@> %s < %s', tbl, col)
# TODO: This may not be the fastest formulation: https://www.postgresql.org/docs/10/pgtrgm.html#id-1.11.7.41.8
class TrigramSimilar(FilterType):
def eval(self, filter):
super().eval(filter)
try:
tbl, col = self.get_table_column(filter)
except KeyError:
raise ValueError(f'invalid attribute: {self.field}')
return self.format('{}.{} % %s', tbl, col)
class Operator:
def __init__(self, left, right):
self.left = left
self.right = right
def get_sql(self, filter):
raise NotImplementedError
def eval_sides(self, filter):
left_sql, left_args = self.left.get_sql(filter)
right_sql, right_args = self.right.get_sql(filter)
return left_sql, right_sql, left_args + right_args
def get_primary_columns(self):
return self.left.get_primary_columns() + self.right.get_primary_columns()
class And(Operator):
def get_sql(self, filter):
left, right, args = self.eval_sides(filter)
# TODO: Making new SQLs here is probably a tiny bit inefficient.
if isinstance(self.left, Or):
left = SQL('({})').format(left)
if isinstance(self.right, Or):
right = SQL('({})').format(right)
return SQL('{} AND {}').format(left, right), args
class Or(Operator):
def get_sql(self, filter):
left, right, args = self.eval_sides(filter)
return SQL('{} OR {}').format(left, right), args
Where.FILTER_TYPES = {
'eq': Equal,
'ne': NotEqual,
'lt': Less,
'gt': Greater,
'le': LessEqual,
'ge': GreaterEqual,
'in': In,
'ct': Contains,
'cti': ContainsInsensitive,
'ni': NotIn,
'nu': IsNull,
# 'nn': NotNull,
'ov': Overlap,
# 'bt': Between,
'trigram_similar': TrigramSimilar
}
| 30.142857
| 110
| 0.565166
| 1,394
| 11,816
| 4.672166
| 0.160689
| 0.047597
| 0.034393
| 0.039152
| 0.406571
| 0.37786
| 0.341778
| 0.307078
| 0.29633
| 0.288961
| 0
| 0.003069
| 0.310511
| 11,816
| 391
| 111
| 30.219949
| 0.796367
| 0.07346
| 0
| 0.376271
| 0
| 0.00339
| 0.066477
| 0.004853
| 0
| 0
| 0
| 0.002558
| 0
| 1
| 0.145763
| false
| 0.010169
| 0.020339
| 0.023729
| 0.366102
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28c7010fc293f500f9e2e5f809119706506c2ca1
| 1,479
|
py
|
Python
|
autobahn/protocols/ws_client_protocol.py
|
olegpshenichniy/uvloop-dyno-track
|
b90e369a12077d390bd74aab833c2c562c5a2567
|
[
"MIT"
] | 2
|
2017-09-12T10:32:48.000Z
|
2017-09-27T14:47:37.000Z
|
autobahn/protocols/ws_client_protocol.py
|
olegpshenichniy/uvloop-dyno-track
|
b90e369a12077d390bd74aab833c2c562c5a2567
|
[
"MIT"
] | null | null | null |
autobahn/protocols/ws_client_protocol.py
|
olegpshenichniy/uvloop-dyno-track
|
b90e369a12077d390bd74aab833c2c562c5a2567
|
[
"MIT"
] | null | null | null |
import json
from autobahn.asyncio.websocket import WebSocketClientProtocol
from config import DEBUG, CLIENTS_MSGS_COUNT, CLIENTS_COUNT
class WSClientProtocol(WebSocketClientProtocol):
"""
Websocket client protocol.
"""
def __init__(self):
super(WSClientProtocol, self).__init__()
self._msgs_received = 0
self._disconect_after = CLIENTS_COUNT * CLIENTS_MSGS_COUNT - CLIENTS_MSGS_COUNT
def _print(self, msg):
if DEBUG:
print('Client {}: {}'.format(id(self), msg))
def onConnect(self, response):
self._print('connected: {}.'.format(response.peer))
def onOpen(self):
self._print('ws connection opened.')
msg_bin = json.dumps(
{
'client_id': id(self),
'message': 'Mauris blandit aliquet elit, eget tincidunt nibh pulvinar a.'
}
).encode('utf8')
for _ in range(CLIENTS_MSGS_COUNT):
self.sendMessage(msg_bin, isBinary=True)
def onMessage(self, payload, is_binary):
if is_binary:
self._print('binary msg {} received: {} bytes'.format(self._msgs_received, len(payload)))
self._msgs_received += 1
if self._msgs_received == self._disconect_after:
self._print('sendClose')
self.sendClose(code=1000, reason='we_are_tired')
def onClose(self, wasClean, code, reason):
self._print('connection closed: {}.'.format(reason))
| 30.183673
| 101
| 0.628803
| 162
| 1,479
| 5.469136
| 0.45679
| 0.05079
| 0.072235
| 0.051919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 0.25693
| 1,479
| 48
| 102
| 30.8125
| 0.799818
| 0.017579
| 0
| 0
| 0
| 0
| 0.141267
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.09375
| 0
| 0.3125
| 0.21875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28c80161e65709f4218b6dce11334fbf557a4f57
| 13,174
|
py
|
Python
|
tests/www/services/synapse_space/daa/test_grant_daa_access_service.py
|
ki-tools/sls_ki_synapse_admin_py
|
d9483d01000b61c4e8d129bdc06497ae1a27484b
|
[
"Apache-2.0"
] | null | null | null |
tests/www/services/synapse_space/daa/test_grant_daa_access_service.py
|
ki-tools/sls_ki_synapse_admin_py
|
d9483d01000b61c4e8d129bdc06497ae1a27484b
|
[
"Apache-2.0"
] | null | null | null |
tests/www/services/synapse_space/daa/test_grant_daa_access_service.py
|
ki-tools/sls_ki_synapse_admin_py
|
d9483d01000b61c4e8d129bdc06497ae1a27484b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import time
import json
from datetime import date, timedelta
from www.core import Synapse, Env
from www.services.synapse_space.daa import GrantDaaAccessService
import synapseclient as syn
@pytest.fixture
def mk_service(syn_test_helper, syn_client, mk_uniq_real_email, blank_daa_config, set_daa_config):
services = []
def _mk(config=None,
team_name=syn_test_helper.uniq_name(prefix='Team'),
institution_name=syn_test_helper.uniq_name(prefix='Institution'),
institution_short_name=syn_test_helper.uniq_name(prefix='Institution Short Name'),
user_identifier=mk_uniq_real_email(),
agreement_url='https://{0}/doc.pdf'.format(syn_test_helper.uniq_name()),
start_date=date.today(),
end_date=date.today() + timedelta(days=30),
comments=syn_test_helper.uniq_name(prefix='Comment'),
with_all=False,
with_data_collection=False,
with_emails=False):
if not config:
config = blank_daa_config
data_collection_name = None
emails = None
if with_data_collection or with_all:
project = syn_test_helper.create_project()
folder = syn_client.store(syn.Folder(name='Folder', parent=project))
collections = [
{"name": "Collection 1", "entities": [{"name": project.name, "id": project.id}]},
{"name": "Collection 2", "entities": [{"name": folder.name, "id": folder.id}]}
]
config['data_collections'] = collections
data_collection_name = collections[0]['name']
if with_emails or with_all:
emails = [mk_uniq_real_email(), mk_uniq_real_email()]
# Set the config in the Env so it's available to the service.
set_daa_config([config])
service = GrantDaaAccessService(config['id'],
team_name,
institution_name,
institution_short_name,
data_collection_name,
user_identifier,
agreement_url=agreement_url,
emails=emails,
start_date=start_date,
end_date=end_date,
comments=comments)
services.append(service)
return service
yield _mk
for service in services:
if service.team:
syn_test_helper.dispose_of(service.team)
@pytest.fixture
def assert_basic_service_success(syn_test_helper):
def _fn(service):
assert service.team is not None
assert len(service.errors) == 0
syn_test_helper.dispose_of(service.team)
yield _fn
@pytest.fixture
def assert_basic_service_errors(syn_test_helper):
def _fn(service):
assert len(service.errors) > 0
if service.team:
syn_test_helper.dispose_of(service.team)
yield _fn
def test_it_creates_the_team(mk_service, assert_basic_service_success):
service = mk_service()
assert service.execute() == service
assert_basic_service_success(service)
assert service.team.name == service.team_name
def test_it_does_not_create_duplicate_teams(mk_service, assert_basic_service_errors, syn_test_helper):
existing_team = syn_test_helper.create_team()
service = mk_service(team_name=existing_team.name)
assert service.execute() == service
assert_basic_service_errors(service)
assert service.team is None
assert len(service.errors) == 1
assert 'Error creating team:' in service.errors[0]
def test_it_assigns_the_team_to_the_synapse_entities_with_can_download_access(mk_service,
assert_basic_service_success,
syn_client):
service = mk_service(with_data_collection=True)
assert service.execute() == service
assert_basic_service_success(service)
assert service.data_collection is not None
for syn_id in [c['id'] for c in service.data_collection['entities']]:
syn_perms = syn_client.getPermissions(syn_id, principalId=service.team.id)
assert syn_perms
syn_perms.sort() == Synapse.CAN_DOWNLOAD_PERMS.sort()
def test_it_adds_managers_to_the_team(mk_service,
assert_basic_service_success,
syn_client,
blank_daa_config):
user_ids = [Env.Test.TEST_OTHER_SYNAPSE_USER_ID()]
blank_daa_config['team_manager_user_ids'] = user_ids
service = mk_service()
assert service.execute() == service
assert_basic_service_success(service)
syn_invites = syn_client.restGET('/team/{0}/openInvitation'.format(service.team.id))
invite_results = syn_invites.get('results')
assert len(invite_results) == len(user_ids)
for result in invite_results:
user_id = int(result.get('inviteeId'))
assert user_id in user_ids
team_acl = syn_client.restGET('/team/{0}/acl'.format(service.team.id))
acl_accesses = team_acl.get('resourceAccess')
for user_id in user_ids:
resource = next((r for r in acl_accesses if r['principalId'] == user_id))
assert resource.get('accessType').sort() == Synapse.TEAM_MANAGER_PERMS.sort()
def test_it_invites_the_emails_to_the_team(mk_service, assert_basic_service_success, syn_client):
service = mk_service(with_emails=True)
emails = service.emails
assert len(emails) >= 1
assert service.execute() == service
assert_basic_service_success(service)
syn_invites = syn_client.restGET('/team/{0}/openInvitation'.format(service.team.id))
assert syn_invites
invite_results = syn_invites.get('results')
assert len(invite_results) == len(emails)
for result in invite_results:
email = result.get('inviteeEmail')
assert email in emails
def test_it_writes_the_log_file_on_success(mk_service,
assert_basic_service_success,
syn_test_helper,
syn_client,
monkeypatch):
project = syn_test_helper.create_project()
folder = syn_client.store(syn.Folder(name='Synapse Admin Log', parent=project))
monkeypatch.setenv('SYNAPSE_SPACE_LOG_FOLDER_ID', folder.id)
service = mk_service(with_all=True)
assert service.institution_name is not None
assert service.institution_short_name is not None
assert service.data_collection_name is not None
assert len(service.emails) >= 1
assert service.agreement_url is not None
assert service.start_date is not None
assert service.end_date is not None
assert service.comments is not None
assert service.execute() == service
assert_basic_service_success(service)
files = list(Synapse.client().getChildren(folder))
assert len(files) == 1
file = Synapse.client().get(files[0]['id'])
assert file.name.endswith('_daa_grant_access.json')
with open(file.path, mode='r') as f:
jdata = json.loads(f.read())
jparms = jdata['parameters']
assert jparms['team_name'] == service.team_name
assert jparms['institution_name'] == service.institution_name
assert jparms['institution_short_name'] == service.institution_short_name
assert jparms['agreement_url'] == service.agreement_url
assert jparms['emails'] == service.emails
assert jparms['start_date'] == service.start_date.strftime('%Y-%m-%d')
assert jparms['end_date'] == service.end_date.strftime('%Y-%m-%d')
assert jparms['comments'] == service.comments
assert jparms['user'] == service.user_identifier
jteam = jdata['team']
assert jteam['id'] == service.team.id
assert jteam['name'] == service.team.name
jdc = jdata['data_collection']
assert jdc['name'] == service.data_collection['name']
assert jdc['entities'] == service.data_collection['entities']
def test_it_writes_the_log_file_on_failure(mk_service,
assert_basic_service_success,
syn_test_helper,
syn_client,
monkeypatch):
# TODO:
pass
def test_it_updates_the_access_agreement_table(mk_service,
assert_basic_service_success,
syn_test_helper,
syn_client,
blank_daa_config):
# Create a project with a table to update.
table_project = syn_test_helper.create_project()
cols = [
syn.Column(name='Organization', columnType='STRING', maximumSize=200),
syn.Column(name='Contact', columnType='STRING', maximumSize=200),
syn.Column(name='Synapse_Team_ID', columnType='INTEGER'),
syn.Column(name='Granted_Entity_IDs', columnType='STRING', maximumSize=1000),
syn.Column(name='Agreement_Link', columnType='LINK', maximumSize=1000),
syn.Column(name='Start_Date', columnType='DATE'),
syn.Column(name='End_Date', columnType='DATE'),
syn.Column(name='Comments', columnType='STRING', maximumSize=1000),
syn.Column(name='Test_Col_One', columnType='STRING', maximumSize=50),
syn.Column(name='Test_Col_Two', columnType='STRING', maximumSize=50)
]
schema = syn.Schema(name='KiData_Access_Agreements', columns=cols, parent=table_project)
syn_table = syn_client.store(schema)
blank_daa_config['agreement_table_id'] = syn_table.id
service = mk_service(with_all=True)
assert service.data_collection_name is not None
assert len(service.emails) >= 1
assert service.agreement_url is not None
assert service.start_date is not None
assert service.end_date is not None
assert service.comments is not None
assert service.execute() == service
assert_basic_service_success(service)
rows = list(syn_client.tableQuery(
"select {0} from {1}".format(', '.join([c['name'] for c in cols]), syn_table.id))
)
assert len(rows) == 1
row = rows[0]
assert row[2] == service.institution_name
assert row[3] == service.emails[0]
assert str(row[4]) == str(service.team.id)
assert row[5] == ', '.join('{0} ({1})'.format(c['id'], c['name']) for c in service.data_collection['entities'])
assert row[6] == service.agreement_url
assert row[7].strftime('%Y-%m-%d') == service.start_date.strftime('%Y-%m-%d')
assert row[8].strftime('%Y-%m-%d') == service.end_date.strftime('%Y-%m-%d')
assert row[9] == service.comments
def test_it_fails_if_the_access_agreement_table_does_not_have_the_required_columns(mk_service,
assert_basic_service_errors,
syn_test_helper,
syn_client,
blank_daa_config):
# Create a project with a table to update.
table_project = syn_test_helper.create_project()
cols = [
syn.Column(name=syn_test_helper.uniq_name(), columnType='STRING', maximumSize=200),
syn.Column(name=syn_test_helper.uniq_name(), columnType='STRING', maximumSize=200),
syn.Column(name=syn_test_helper.uniq_name(), columnType='STRING', maximumSize=200)
]
schema = syn.Schema(name='KiData_Access_Agreements', columns=cols, parent=table_project)
syn_table = syn_client.store(schema)
blank_daa_config['agreement_table_id'] = syn_table.id
service = mk_service()
assert service.execute() == service
assert_basic_service_errors(service)
assert service.errors
assert len(service.errors) == 1
assert 'Column: Organization does not exist in table' in service.errors[0]
###############################################################################
# Validations
###############################################################################
def test_validations_validate_team_name(syn_test_helper, syn_client):
existing_team = syn_test_helper.create_team(prefix='Team ')
# Wait for the team to be available from Synapse before checking.
tries = 0
while True:
tries += 1
try:
syn_client.getTeam(existing_team.name)
break
except ValueError:
if tries >= 10:
break
else:
time.sleep(3)
error = GrantDaaAccessService.Validations.validate_team_name(existing_team.name)
assert error == 'Team with name: "{0}" already exists.'.format(existing_team.name)
| 41.297806
| 115
| 0.617656
| 1,542
| 13,174
| 4.993515
| 0.144617
| 0.023636
| 0.043896
| 0.055195
| 0.543117
| 0.485844
| 0.447273
| 0.385714
| 0.345844
| 0.322597
| 0
| 0.007533
| 0.27448
| 13,174
| 318
| 116
| 41.427673
| 0.798075
| 0.016927
| 0
| 0.350598
| 0
| 0
| 0.079305
| 0.014704
| 0
| 0
| 0
| 0.003145
| 0.338645
| 1
| 0.063745
| false
| 0.003984
| 0.027888
| 0
| 0.095618
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28c8168a9876befd17a03652dfc26fe8e8b8d160
| 6,048
|
py
|
Python
|
scripts/verify/test_sampling/species_generator_funcs.py
|
nadiahpk/niche-neutral-riau-birds
|
83eeba57973d6912ad354592c84a03b5c24b3363
|
[
"Unlicense"
] | null | null | null |
scripts/verify/test_sampling/species_generator_funcs.py
|
nadiahpk/niche-neutral-riau-birds
|
83eeba57973d6912ad354592c84a03b5c24b3363
|
[
"Unlicense"
] | null | null | null |
scripts/verify/test_sampling/species_generator_funcs.py
|
nadiahpk/niche-neutral-riau-birds
|
83eeba57973d6912ad354592c84a03b5c24b3363
|
[
"Unlicense"
] | null | null | null |
import numpy as np
# create J[k,h], the number of individuals in niche k on island h
def draw_J(K, JV):
# secondary parameters
H = len(JV) # number of islands
J = list()
for k in range(K):
J.append([])
for h in range(H):
Jkh_float = JV[h] / K # number of individuals that can fit
# treat the fractional component of Jkh_float probabilistically
Jkh, prob = (int(Jkh_float // 1), Jkh_float%1)
if np.random.rand() < prob:
Jkh += 1
J[k].append(Jkh)
return(J)
# create D[k,h], the number of founding individuals in each niche k on island h
def calculate_D(mV, TV, J):
# secondary parameters
K = len(J) # number of niches
H = len(J[0]) # number of islands
D = list()
for k in range(K):
D.append([])
for h in range(H):
T = TV[h]
m = mV[h]
if np.isinf(T):
# then there is only one founding individual
D[k].append(1)
else:
# need to calculate using Chen & Chen's formula
W = J[k][h] * m / (1-m) # Watterson's theta for the local community
alpha = T/2
beta = (W-1)*T/(2*J[k][h])
if 1 / (1 + np.exp(-beta)) == 1:
# avoid overflow warning when beta too large (approx beta > 37, np.exp(beta) > 1e16)
Dkh = 1
else:
Dkh = ( T*(W-1)/2 ) / ( alpha*(np.exp(beta)-1) + beta*np.exp(beta) )
# round it, and if it's less than 1, set it to 1
Dkh = int(round(Dkh))
Dkh = 1 if Dkh < 1 else Dkh
D[k].append(Dkh)
return(D)
# create a sample using my species generator
def draw_sample_species_generator(theta, mV, J, D):
# secondary parameters
K = len(J) # number of niches
H = len(J[0]) # number of islands
thetak = theta/K # fundamental biodiversity number per niche (assumes equal niches)
# rows are niches, index is species ID and value is the no. of times that species has immigrated
ancestors = list() # stores a_k
community = list() # stores n_{k,h,i}
# count how many ancestors sampled from each niche
no_ancestors = [ 0 for k in range(K) ] # l_k
for k in range(K): # for each niche
ancestors.append([])
community.append([])
for h in range(H): # for each island
community[k].append([ 0 for a_k in range(len(ancestors[k])) ])
Jkh = J[k][h] # how many individuals in niche k in island h
# deal with special case, if Jkh = 1, then is a new immigrant
# necessary bc if Jkh = 1, then I = 0, then I/(I+j) = nan
if Jkh == 1:
# has to be a new immigrant
if np.random.rand() < thetak / ( thetak + no_ancestors[k] ):
# the immigrant was a new species
ancestors[k].append(1)
community[k][h].append(1)
else:
# the immigrant was a species we've seen before
prob_i = [ ai / no_ancestors[k] for ai in ancestors[k] ]
i_star = np.random.choice( range(len(prob_i)), 1, p = prob_i )[0]
ancestors[k][i_star] += 1
community[k][h][i_star] += 1
# increment the ancestors counter
no_ancestors[k] += 1
else: # if Jkh > 1
# first, sample the individuals who were founders T generations ago, when island separated
# from mainland (or, if T = inf, then Dkh = 1, therefore just sample the first immigrant)
Dkh = D[k][h]
for j in range(Dkh):
if np.random.rand() < thetak / ( thetak + no_ancestors[k] ):
# the immigrant was a new species
ancestors[k].append(1)
community[k][h].append(1)
else:
# the immigrant was a species we've seen before
prob_i = [ ai / no_ancestors[k] for ai in ancestors[k] ]
i_star = np.random.choice( range(len(prob_i)), 1, p = prob_i )[0]
ancestors[k][i_star] += 1
community[k][h][i_star] += 1
# increment the ancestors counter
no_ancestors[k] += 1
# now sample the remainder of the individuals, who are a mix of descendants
# and immigrants
I = mV[h] * (Jkh-1) / (1-mV[h]) # Etienne's immigration parameter
for j in range(Dkh, Jkh):
if (np.random.rand() < I / (I+j)):
# we have drawn an immigrant
if np.random.rand() < thetak / ( thetak + no_ancestors[k] ):
# the immigrant was a new species
ancestors[k].append(1)
community[k][h].append(1)
else:
# the immigrant was a species we've seen before
prob_i = [ ai / no_ancestors[k] for ai in ancestors[k] ]
i_star = np.random.choice( range(len(prob_i)), 1, p = prob_i )[0]
ancestors[k][i_star] += 1
community[k][h][i_star] += 1
# increment the ancestors counter
no_ancestors[k] += 1
else:
# it's a birth-death
prob_i = [ ni / j for ni in community[k][h] ]
i_star = np.random.choice( range(len(prob_i)), 1, p = prob_i )[0]
community[k][h][i_star] += 1
return(ancestors, community)
| 32.691892
| 106
| 0.475694
| 788
| 6,048
| 3.59264
| 0.214467
| 0.067114
| 0.038149
| 0.03391
| 0.452844
| 0.421406
| 0.372307
| 0.372307
| 0.372307
| 0.372307
| 0
| 0.017231
| 0.424272
| 6,048
| 184
| 107
| 32.869565
| 0.795807
| 0.303571
| 0
| 0.488636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034091
| false
| 0
| 0.011364
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28cb2fe8595e1829af00fa8ae1db21b69746fd37
| 767
|
py
|
Python
|
protonfixes/gamefixes/243470.py
|
Citiroller/protonfixes
|
6e0116bd1cd2172b6f0ff9905667bbc59595cdb7
|
[
"BSD-2-Clause"
] | 213
|
2018-10-06T01:40:26.000Z
|
2022-03-16T16:17:37.000Z
|
protonfixes/gamefixes/243470.py
|
Citiroller/protonfixes
|
6e0116bd1cd2172b6f0ff9905667bbc59595cdb7
|
[
"BSD-2-Clause"
] | 88
|
2018-10-06T17:38:56.000Z
|
2022-02-19T13:27:26.000Z
|
protonfixes/gamefixes/243470.py
|
Citiroller/protonfixes
|
6e0116bd1cd2172b6f0ff9905667bbc59595cdb7
|
[
"BSD-2-Clause"
] | 67
|
2018-10-09T16:57:16.000Z
|
2022-03-14T13:06:25.000Z
|
""" Game fix for Watch_Dogs
"""
# pylint: disable=C0103
import subprocess
from protonfixes import util
from protonfixes import splash
def main():
""" Fix the in-game sound
"""
util.protontricks('xact')
util.protontricks('winxp')
info_popup()
@util.once
def info_popup():
""" Show info popup on first run
"""
zenity_bin = splash.sys_zenity_path()
if not zenity_bin:
return
# pylint: disable=C0301
zenity_cmd = ' '.join([
zenity_bin,
'--info',
'--text',
'"If the game does not run the first time and complains that the UPlay launcher\nis not compatible with the operating system: cancel and restart the game."',
'--no-wrap'])
subprocess.Popen(zenity_cmd, shell=True)
| 22.558824
| 165
| 0.640156
| 100
| 767
| 4.81
| 0.59
| 0.056133
| 0.087318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013841
| 0.246415
| 767
| 33
| 166
| 23.242424
| 0.818339
| 0.168188
| 0
| 0
| 0
| 0.052632
| 0.299838
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.157895
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28cfe2649130d1fc2ca1713a506f572c7ef8b0ef
| 3,564
|
py
|
Python
|
test.py
|
ughiriccardo/retinaface-tf2
|
4791819fc7e47a63ffe695f0a3adccd6cfa5bb5e
|
[
"MIT"
] | null | null | null |
test.py
|
ughiriccardo/retinaface-tf2
|
4791819fc7e47a63ffe695f0a3adccd6cfa5bb5e
|
[
"MIT"
] | null | null | null |
test.py
|
ughiriccardo/retinaface-tf2
|
4791819fc7e47a63ffe695f0a3adccd6cfa5bb5e
|
[
"MIT"
] | null | null | null |
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import os
import numpy as np
import tensorflow as tf
import time
from PIL import Image
from modules.models import RetinaFaceModel
from modules.utils import (set_memory_growth, load_yaml, draw_bbox_landm,
pad_input_image, recover_pad_output,
get_bbox_imgs, get_one_image, get_faces)
flags.DEFINE_string('cfg_path', './configs/retinaface_res50.yaml',
'config file path')
flags.DEFINE_string('gpu', '0', 'which gpu to use')
flags.DEFINE_string('img_path', '', 'path to input image')
flags.DEFINE_boolean('webcam', False, 'get image source from webcam or not')
flags.DEFINE_float('iou_th', 0.4, 'iou threshold for nms')
flags.DEFINE_float('score_th', 0.5, 'score threshold for nms')
flags.DEFINE_float('down_scale_factor', 1.0, 'down-scale factor for inputs')
def main(_argv):
# init
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
logger = tf.get_logger()
logger.disabled = True
logger.setLevel(logging.FATAL)
set_memory_growth()
cfg = load_yaml(FLAGS.cfg_path)
# define network
model = RetinaFaceModel(cfg, training=False, iou_th=FLAGS.iou_th, score_th=FLAGS.score_th)
# load checkpoint
checkpoint_dir = './checkpoints/' + cfg['sub_name']
checkpoint = tf.train.Checkpoint(model=model)
if tf.train.latest_checkpoint(checkpoint_dir):
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
#print("[*] load ckpt from {}.".format(tf.train.latest_checkpoint(checkpoint_dir)))
else:
print("[*] Cannot find ckpt from {}.".format(checkpoint_dir))
exit()
if not os.path.exists(FLAGS.img_path):
print(f"cannot find image path from {FLAGS.img_path}")
exit()
print("[*] Processing on single image {}".format(FLAGS.img_path))
img_raw = cv2.imread(FLAGS.img_path)
img_height_raw, img_width_raw, _ = img_raw.shape
img = np.float32(img_raw.copy())
if FLAGS.down_scale_factor < 1.0:
img = cv2.resize(img, (0, 0), fx=FLAGS.down_scale_factor, fy=FLAGS.down_scale_factor, interpolation=cv2.INTER_LINEAR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# pad input image to avoid unmatched shape problem
img, pad_params = pad_input_image(img, max_steps=max(cfg['steps']))
# run model
outputs = model(img[np.newaxis, ...]).numpy()
# recover padding effect
outputs = recover_pad_output(outputs, pad_params)
# draw and save results
imgs = []
DIM = 64;
save_img_path = os.path.join('data/out_' + os.path.basename(FLAGS.img_path))
for prior_index in range(9):
if(prior_index < len(outputs)):
img = get_bbox_imgs(img_raw, outputs[prior_index], img_height_raw, img_width_raw)
img = cv2.resize(img, (DIM, DIM))
imgs.append(img)
else:
imgs.append(Image.new('RGB', (DIM, DIM)))
imga = imgs[0]
for img in imgs[1:3]:
imga = np.concatenate((imga, img), axis=1)
imgb = imgs[3]
for img in imgs[4:6]:
imgb = np.concatenate((imgb, img), axis=1)
imgf = np.concatenate((imga, imgb), axis=0)
imgc = imgs[6]
for img in imgs[7:9]:
imgc = np.concatenate((imgc, img), axis=1)
imgf = np.concatenate((imgf, imgc), axis=0)
cv2.imwrite(save_img_path, imgf)
print(f"[*] save result at {save_img_path}")
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| 33.622642
| 125
| 0.664703
| 515
| 3,564
| 4.394175
| 0.335922
| 0.027839
| 0.033142
| 0.030491
| 0.135219
| 0.120194
| 0.022978
| 0
| 0
| 0
| 0
| 0.014873
| 0.207632
| 3,564
| 105
| 126
| 33.942857
| 0.786473
| 0.062009
| 0
| 0.051948
| 0
| 0
| 0.142172
| 0.009298
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012987
| false
| 0.012987
| 0.12987
| 0
| 0.142857
| 0.051948
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28d3228ab5984fc81c4a723afce6ac8224b5d570
| 214
|
py
|
Python
|
Contest/ABC182/d/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC182/d/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
Contest/ABC182/d/main.py
|
mpses/AtCoder
|
9c101fcc0a1394754fcf2385af54b05c30a5ae2a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
n, *a = map(int, open(0).read().split())
from itertools import*
*S, = accumulate(a)
*M, = accumulate(S, max)
Z = ans = 0
for s, m in zip(S, M):
ans = max(ans, Z + m)
Z += s
print(ans)
| 21.4
| 40
| 0.570093
| 41
| 214
| 2.97561
| 0.609756
| 0.032787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017751
| 0.21028
| 214
| 10
| 41
| 21.4
| 0.704142
| 0.098131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28dcbab6ce14a5c552df454e459ab5d17982bfb0
| 1,627
|
py
|
Python
|
new_skeleton1/tests/test_player_repository.py
|
borko81/SU_OOP_2021
|
8c38682bd4a2b032ca09f85b0a579be152223a59
|
[
"MIT"
] | null | null | null |
new_skeleton1/tests/test_player_repository.py
|
borko81/SU_OOP_2021
|
8c38682bd4a2b032ca09f85b0a579be152223a59
|
[
"MIT"
] | null | null | null |
new_skeleton1/tests/test_player_repository.py
|
borko81/SU_OOP_2021
|
8c38682bd4a2b032ca09f85b0a579be152223a59
|
[
"MIT"
] | null | null | null |
import unittest
from project.player.beginner import Beginner
from project.player.player_repository import PlayerRepository
class TestPlayerRepo(unittest.TestCase):
def setUp(self):
self.repo = PlayerRepository()
def test_set_up(self):
self.assertEqual(self.repo.count, 0)
self.assertListEqual(self.repo.players, [])
def test_addplayer_when_player_name_exists(self):
p = Beginner("Borko")
self.repo.add(p)
with self.assertRaises(ValueError) as ex:
self.repo.add(p)
self.assertEqual(str(ex.exception), "Player Borko already exists!")
def test_add_player_when_name_is_new(self):
p = Beginner('Borko')
self.repo.add(p)
self.assertTrue(len(self.repo.players), 1)
self.assertEqual(self.repo.count, 1)
self.assertEqual(self.repo.players[0].username, 'Borko')
def test_remove_when_name_is_net_defined_should_raise_error(self):
p = Beginner('Borko')
self.repo.add(p)
with self.assertRaises(ValueError) as ex:
self.repo.remove("")
self.assertEqual(str(ex.exception), "Player cannot be an empty string!")
def test_remove_when_name_is_ncorect_remove_user(self):
p = Beginner('Borko')
self.repo.add(p)
self.repo.remove('Borko')
self.assertEqual(len(self.repo.players), 0)
self.assertEqual(self.repo.count, 0)
def test_find(self):
p = Beginner('Borko')
self.repo.add(p)
actual = self.repo.find('Borko')
self.assertEqual(p, actual)
if __name__ == '__main__':
unittest.main()
| 31.288462
| 80
| 0.657652
| 210
| 1,627
| 4.909524
| 0.290476
| 0.131911
| 0.064016
| 0.069835
| 0.458778
| 0.403492
| 0.234724
| 0.234724
| 0.205626
| 0.13967
| 0
| 0.004747
| 0.22311
| 1,627
| 51
| 81
| 31.901961
| 0.810918
| 0
| 0
| 0.35
| 0
| 0
| 0.066994
| 0
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.175
| false
| 0
| 0.075
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28dfb8dee6d42e22033971ee588b9325fc390cc8
| 640
|
py
|
Python
|
montype.py
|
xenolithcluster/mon
|
b3ecb7810857ae6890ec57cd862f79d8422ee99d
|
[
"Unlicense"
] | null | null | null |
montype.py
|
xenolithcluster/mon
|
b3ecb7810857ae6890ec57cd862f79d8422ee99d
|
[
"Unlicense"
] | null | null | null |
montype.py
|
xenolithcluster/mon
|
b3ecb7810857ae6890ec57cd862f79d8422ee99d
|
[
"Unlicense"
] | null | null | null |
from monstage import *
class MonType():
def __init__(self,sprites=None,stage=egg,becomes=None):
self.stage = stage
self.becomes = becomes
self._sprites = sprites
def setSprites(self,sprites):
if type(sprites) not in [list,tuple] or sprites == None:
raise TypeError
self._sprites = sprites
def getSprites(self):
return self._sprites
sprites = property(getSprites,setSprites)
bobo = MonType(sprites=["img/bobo.png","img/bobo2.png"],stage=bab)
plainegg = MonType(sprites=["img/egg1.png","img/egg2.png"],becomes=[bobo])
| 22.857143
| 74
| 0.614063
| 74
| 640
| 5.216216
| 0.459459
| 0.142487
| 0.139896
| 0.108808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006383
| 0.265625
| 640
| 27
| 75
| 23.703704
| 0.814894
| 0
| 0
| 0.133333
| 0
| 0
| 0.076682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.066667
| 0.066667
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28e78c6007647b288497c3988604a790b661d369
| 7,327
|
py
|
Python
|
examples/prefilter_test.py
|
abiedermann/worms
|
026c45a88d5c71b0e035ac83de6f4dc107316ed8
|
[
"Apache-2.0"
] | 4
|
2018-01-30T23:13:43.000Z
|
2021-02-12T22:36:54.000Z
|
examples/prefilter_test.py
|
abiedermann/worms
|
026c45a88d5c71b0e035ac83de6f4dc107316ed8
|
[
"Apache-2.0"
] | 9
|
2018-02-23T00:52:25.000Z
|
2022-01-26T00:02:32.000Z
|
examples/prefilter_test.py
|
abiedermann/worms
|
026c45a88d5c71b0e035ac83de6f4dc107316ed8
|
[
"Apache-2.0"
] | 4
|
2018-06-28T21:30:14.000Z
|
2022-03-30T17:50:42.000Z
|
import logging
import sys
import concurrent.futures as cf
from time import clock, time
import numpy as np
import pytest
from worms import simple_search_dag, Cyclic, grow_linear, NullCriteria
from worms.util import InProcessExecutor
from worms.database import CachingBBlockDB, CachingSpliceDB
from worms.ssdag_pose import make_pose_crit, make_pose
from worms.ssdag import graph_dump_pdb
from worms.filters.clash import prune_clashes
from worms.search import lossfunc_rand_1_in
logging.getLogger().setLevel(99)
# David's Defaults
# --max_chunk_length 170
# --nres_from_termini 80
# --max_sample 1e11
# --min_chunk_length 100
# --use_class True
# --prefix %s_n%s
# --err_cutoff 9.0
# --max_chain_length 400
# --min_seg_len 15
# --cap_number_of_pdbs_per_segment 150
# --clash_cutoff 1.5
# --superimpose_rmsd 0.7
# --superimpose_length 9
# --Nproc_for_sympose 8
# --max_number_of_fusions_to_evaluate 10000
# --database_files %s" '%(base,nrun,base,base,nrun,config_file,base,nrun,DATABASES)
def _dump_pdb(i, **kw):
pose = make_pose(**kw)
pose.dump_pdb("test_%i.pdb" % i)
def worm_grow_3(
bbdb,
spdb,
nbblocks=10,
shuffle_bblocks=0,
parallel=1,
verbosity=1,
monte_carlo=0,
clash_check=0,
dump_pdb=0,
cache_sync=0.001,
):
if clash_check < dump_pdb:
clash_check = dump_pdb * 100
ttot = time()
ssdag, tdb, tvertex, tedge = simple_search_dag(
[
("C3_N", "_N"),
("Het:NCy", "C_"),
# ('Het:CCC', 'C_'),
# ('Het:NN', 'NN'),
# ('Het:CC', 'CC'),
# ('Het:NNX', 'N_'),
],
(bbdb, spdb),
nbblocks=nbblocks,
timing=True,
verbosity=verbosity,
parallel=parallel,
cache_sync=cache_sync,
)
# crit = Cyclic(3, from_seg=2, origin_seg=0)
# crit = Cyclic(3)
# last_bb_same_as = crit.from_seg
crit = NullCriteria()
lf = crit.jit_lossfunc()
last_bb_same_as = -1
tgrow = time()
rslt = grow_linear(
ssdag,
# loss_function=lf,
loss_function=lossfunc_rand_1_in(1000),
parallel=parallel,
loss_threshold=1.0,
last_bb_same_as=last_bb_same_as,
monte_carlo=monte_carlo,
)
tgrow = time() - tgrow
Nres = len(rslt.err)
Ntot = np.prod([v.len for v in ssdag.verts])
logtot = np.log10(Ntot)
print(
"frac last_bb_same_as",
rslt.stats.n_last_bb_same_as[0] / rslt.stats.total_samples[0],
)
Nsparse = int(rslt.stats.total_samples[0])
Nsparse_rate = int(Nsparse / tgrow)
ttot = time() - ttot
if len(rslt.idx) == 0:
frac_redundant = 0
else:
frac_redundant = rslt.stats.n_redundant_results[0] / len(rslt.idx)
print(
f" worm_grow_3 {nbblocks:4} {ttot:7.1f}s {Nres:9,} logtot{logtot:4.1f} tv"
f" {tvertex:7.1f}s te {tedge:7.1f}s tg {tgrow:7.1f}s {Nsparse:10,} {Nsparse_rate:7,}/s {frac_redundant:4.1f}"
)
if len(rslt.err):
print("err 0 25 50 75 100", np.percentile(rslt.err, (0, 25, 50, 75, 100)))
sys.stdout.flush()
if not clash_check:
return
tclash = time()
norig = len(rslt.idx)
# rslt = prune_clashes(
# ssdag, crit, rslt, at_most=clash_check, thresh=4.0, parallel=parallel
# )
print(
"pruned clashes, %i of %i remain," % (len(rslt.idx), min(clash_check, norig)),
"took",
time() - tclash,
"seconds",
)
for i, idx in enumerate(rslt.idx[:10]):
graph_dump_pdb("graph_%i_nojoin.pdb" % i, ssdag, idx, rslt.pos[i], join=0)
# graph_dump_pdb('graph_%i.pdb' % i, ssdag, idx, rslt.pos[i])
return
if len(rslt.idx) > 0:
tpdb = time()
exe = cf.ThreadPoolExecutor if parallel else InProcessExecutor
with exe(max_workers=3) as pool:
futures = list()
for i in range(min(dump_pdb, len(rslt.idx))):
kw = dict(
bbdb=bbdb,
ssdag=ssdag,
# crit=crit,
i=i,
indices=rslt.idx[i],
positions=rslt.pos[i],
only_connected=False,
)
futures.append(pool.submit(_dump_pdb, **kw))
[f.result() for f in futures]
print("dumped %i structures" % min(dump_pdb, len(rslt.idx)), "time", time() - tpdb)
def main():
import argparse
import glob
import pyrosetta
pyrosetta.init("-mute all -beta")
parser = argparse.ArgumentParser()
parser.add_argument("--verbosity", type=int, dest="verbosity", default=0)
parser.add_argument("--parallel", type=int, dest="parallel", default=True)
parser.add_argument("--nbblocks", type=int, dest="nbblocks", default=4)
parser.add_argument("--clash_check", type=int, dest="clash_check", default=0)
parser.add_argument("--dump_pdb", type=int, dest="dump_pdb", default=0)
parser.add_argument("--cache_sync", type=float, dest="cache_sync", default=0.01)
parser.add_argument("--monte_carlo", type=int, dest="monte_carlo", default=0)
args = parser.parse_args()
bbdb = CachingBBlockDB(
dbfiles=[
"worms/data/c6_database.json",
"worms/data/HBRP_Cx_database.json",
"worms/data/HFuse_Cx_database.20180219.json",
"worms/data/HFuse_het_2chain_2arm_database.ZCON-103_2.20180406.json",
"worms/data/HFuse_het_2chain_2arm_database.ZCON-112_2.20180406.json",
"worms/data/HFuse_het_2chain_2arm_database.ZCON-127_2.20180406.json",
"worms/data/HFuse_het_2chain_2arm_database.ZCON-13_2.20180406.json",
"worms/data/HFuse_het_2chain_2arm_database.ZCON-15_2.20180406.json",
"worms/data/HFuse_het_2chain_2arm_database.ZCON-34_2.20180406.json",
"worms/data/HFuse_het_2chain_2arm_database.ZCON-37_2.20180406.json",
"worms/data/HFuse_het_2chain_2arm_database.ZCON-39_2.20180406.json",
"worms/data/HFuse_het_2chain_2arm_database.ZCON-9_2.20180406.json",
"worms/data/HFuse_het_3chain_2arm_database.Sh13_3.20180406.json",
"worms/data/HFuse_het_3chain_2arm_database.Sh13_3.20180416.json",
"worms/data/HFuse_het_3chain_2arm_database.Sh29_3.20180406.json",
"worms/data/HFuse_het_3chain_2arm_database.Sh29_3.20180416.json",
"worms/data/HFuse_het_3chain_2arm_database.Sh34_3.20180416.json",
"worms/data/HFuse_het_3chain_2arm_database.Sh3e_3.20180406.json",
"worms/data/HFuse_het_3chain_3arm_database.Sh13_3.20180406.json",
"worms/data/HFuse_het_3chain_3arm_database.Sh13_3.20180416.json",
"worms/data/HFuse_het_3chain_3arm_database.Sh29_3.20180406.json",
"worms/data/HFuse_het_3chain_3arm_database.Sh29_3.20180416.json",
"worms/data/HFuse_het_3chain_3arm_database.Sh34_3.20180416.json",
"worms/data/HFuse_het_3chain_3arm_database.Sh3e_3.20180406.json",
"worms/data/master_database_generation2.json",
"worms/data/test_db_file.json",
"worms/data/test_fullsize_prots.json",
],
read_new_pdbs=True,
verbosity=args.verbosity,
)
spdb = CachingSpliceDB()
worm_grow_3(
bbdb,
spdb,
nbblocks=args.nbblocks,
parallel=args.parallel,
verbosity=args.verbosity,
monte_carlo=args.monte_carlo,
clash_check=args.clash_check,
dump_pdb=args.dump_pdb,
cache_sync=args.cache_sync,
)
sys.stdout.flush()
if __name__ == "__main__":
main()
| 32.856502
| 115
| 0.661253
| 1,044
| 7,327
| 4.37069
| 0.25
| 0.053254
| 0.074074
| 0.086785
| 0.319088
| 0.289064
| 0.250931
| 0.235371
| 0.235371
| 0.225948
| 0
| 0.070432
| 0.207452
| 7,327
| 222
| 116
| 33.004505
| 0.715344
| 0.10782
| 0
| 0.1
| 0
| 0.011765
| 0.312327
| 0.239625
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017647
| false
| 0
| 0.094118
| 0
| 0.123529
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28e92acc31d96b35a53502cfb20ad7033a7cf662
| 2,476
|
py
|
Python
|
f_net/main.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 2
|
2021-09-04T09:08:38.000Z
|
2021-09-04T09:08:44.000Z
|
f_net/main.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | null | null | null |
f_net/main.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 5
|
2021-11-25T07:40:17.000Z
|
2022-03-22T11:13:39.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main file for pre-training or fine-tuning models."""
from absl import app
from absl import flags
from absl import logging
from clu import platform
import jax
from ml_collections import config_flags
import tensorflow as tf
from f_net import run_classifier
from f_net import run_pretraining
from f_net.configs.base import TrainingMode
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.mark_flags_as_required(["config"])
flags.DEFINE_string("workdir", None, "Work unit directory.", required=True)
flags.DEFINE_string(
"vocab_filepath",
None,
"Absolute path to SentencePiece vocab model.",
required=True)
flags.DEFINE_integer("random_seed", 0, "Integer for PRNG random seed.")
FLAGS = flags.FLAGS
def main(argv):
del argv
# Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make
# it unavailable to JAX.
tf.config.experimental.set_visible_devices([], "GPU")
logging.info("JAX process: %d / %d", jax.process_index(), jax.process_count())
logging.info("JAX devices: %r", jax.devices())
# Add a note so that we can tell which task is which JAX process.
platform.work_unit().set_task_status(
f"process_index: {jax.process_index()}, process_count: {jax.process_count()}"
)
platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,
FLAGS.workdir, "workdir")
train_mode = FLAGS.config.mode
if train_mode == TrainingMode.PRETRAINING:
train_lib = run_pretraining
elif train_mode == TrainingMode.CLASSIFICATION:
train_lib = run_classifier
else:
raise ValueError("Unknown training mode: %s" % train_mode)
train_lib.train_and_evaluate(FLAGS.config, FLAGS.workdir,
FLAGS.vocab_filepath, FLAGS.random_seed)
if __name__ == "__main__":
app.run(main)
| 32.578947
| 83
| 0.735057
| 349
| 2,476
| 5.063037
| 0.475645
| 0.033956
| 0.023769
| 0.01811
| 0.019242
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004885
| 0.173263
| 2,476
| 75
| 84
| 33.013333
| 0.858329
| 0.319063
| 0
| 0
| 0
| 0
| 0.186899
| 0.025841
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.238095
| 0
| 0.261905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28e9489a4ce0811a2281acebf64dae5129d76367
| 18,341
|
py
|
Python
|
mypyext/ml.py
|
VolkiTheDreamer/PythonRocks
|
f7b6cdf335687c6d111bf08387965ca3ecddd504
|
[
"Apache-2.0"
] | null | null | null |
mypyext/ml.py
|
VolkiTheDreamer/PythonRocks
|
f7b6cdf335687c6d111bf08387965ca3ecddd504
|
[
"Apache-2.0"
] | null | null | null |
mypyext/ml.py
|
VolkiTheDreamer/PythonRocks
|
f7b6cdf335687c6d111bf08387965ca3ecddd504
|
[
"Apache-2.0"
] | 2
|
2019-10-04T10:56:14.000Z
|
2022-03-06T18:18:59.000Z
|
import numpy as np
import pandas as pd
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, f1_score,roc_auc_score,roc_curve
from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.pipeline import Pipeline
import os, sys, site
import itertools
from numpy.random import uniform
from random import sample
from math import isnan
from multiprocessing import Pool
from scipy.spatial import distance
from sklearn.metrics.pairwise import cosine_similarity
def printAlgorithm(algo):
"""
You need the change the path.
"""
p=os.getcwd()
os.chdir(r"E:\OneDrive\Dökümanlar\GitHub\PythonRocks")
df=pd.read_excel("Algorithms.xlsx",skiprows=1)
print(df[df.Algorithm==algo].T)
os.chdir(p)
def adjustedr2(R_sq,y,y_pred,x):
return 1 - (1-R_sq)*(len(y)-1)/(len(y_pred)-x.shape[1]-1)
def calculate_aic_bic(n, mse, num_params):
"""
n=number of instances in y
"""
aic = n *np.log(mse) + 2 * num_params
bic = n * np.log(mse) + num_params * np.log(n)
# ssr = fitted.ssr #residual sum of squares
# AIC = N + N*np.log(2.0*np.pi*ssr/N)+2.0*(p+1)
# print(AIC)
# BIC = N + N*np.log(2.0*np.pi*ssr/N) + p*np.log(N)
# print(BIC)
return aic, bic
def printScores(y_test,y_pred,x=None,*, alg_type='c'):
"""
Args:
alg_type: c for classfication, r for regressin
"""
if alg_type=='c':
acc=accuracy_score(y_test,y_pred)
print("Accuracy:",acc)
recall=recall_score(y_test,y_pred)
print("Recall:",recall)
precision=precision_score(y_test,y_pred)
print("Precision:",precision)
f1=f1_score(y_test,y_pred)
print("F1:",f1)
return acc,recall,precision,f1
else:
mse=mean_squared_error(y_test,y_pred) #RMSE için squared=False yapılabilir ama bize mse de lazım
rmse=round(np.sqrt(mse),2)
print("RMSE:",rmse)
mae=round(mean_absolute_error(y_test,y_pred),2)
print("MAE:",mae)
r2=round(r2_score(y_test,y_pred),2)
print("r2:",r2)
adjr2=round(adjustedr2(r2_score(y_test,y_pred),y_test,y_pred,x),2)
print("Adjusted R2:",adjr2)
aic, bic=calculate_aic_bic(len(y_test),mse,len(x))
print("AIC:",round(aic,2))
print("BIC:",round(bic,2))
return (rmse,mae,r2,adjr2,round(aic,2),round(bic,2))
def draw_siluet(range_n_clusters,data,isbasic=True,printScores=True):
"""
Used for K-means
"""
if isbasic==False:
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(12,4)
ax1.set_xlim([-1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(data) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(data)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(data, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(data, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(data[:, 0], data[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
else:
ss = []
for n in range_n_clusters:
kmeans = KMeans(n_clusters=n)
kmeans.fit_transform(data)
labels = kmeans.labels_
score = silhouette_score(data, labels)
ss.append(score)
if printScores==True:
print(n,score)
plt.plot(range_n_clusters,ss)
def drawEpsilonDecider(data,n):
"""
for DBSCAN
n: # of neighbours
data:numpy array
"""
neigh = NearestNeighbors(n_neighbors=n)
nbrs = neigh.fit(data)
distances, indices = nbrs.kneighbors(data)
distances = np.sort(distances, axis=0)
distances = distances[:,1]
plt.ylabel("eps")
plt.plot(distances)
def draw_elbow(ks,data):
wcss = []
for i in ks:
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0) #k-means++ ensures that you get don’t fall into the random initialization trap.???????
kmeans.fit(data)
wcss.append(kmeans.inertia_)
plt.plot(ks, wcss)
plt.title('Elbow Method')
plt.xlabel('# of clusters')
plt.ylabel('WCSS')
plt.show()
#PCA biplot
def biplot(score,coeff,y,variance,labels=None):
"""
found here: https://stackoverflow.com/questions/39216897/plot-pca-loadings-and-loading-in-biplot-in-sklearn-like-rs-autoplot
"""
xs = score[:,0]
ys = score[:,1]
n = coeff.shape[0]
scalex = 1.0/(xs.max() - xs.min())
scaley = 1.0/(ys.max() - ys.min())
plt.scatter(xs * scalex,ys * scaley, c = y)
for i in range(n):
plt.arrow(0, 0, coeff[i,0], coeff[i,1],color = 'r',alpha = 0.5)
if labels is None:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, "Var"+str(i+1), color = 'g', ha = 'center', va = 'center')
else:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'center')
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel("PC{},Variance:{}".format(1,variance[0]))
plt.ylabel("PC{},Variance:{}".format(2,variance[1]))
plt.grid()
def PCAChart(X_pca,alpha=0.2):
n=X_pca.shape[1] #second dimension is the number of colums which is the number of components
if n==2:
plt.scatter(X_pca[:,0], X_pca[:,1],alpha=alpha);
elif n==3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
Axes3D.scatter(ax,xs=X_pca[:,0], ys=X_pca[:,1],zs=X_pca[:,2],alpha=alpha)
else:
print("n should be either 2 or 3")
def getfullitemsforOHE(wholedf,featlist,sort=True):
"""
wholedf should be the dataframe including both train and test set.
"""
def sortornot(X):
if sort==False:
return X
else:
return sorted(X)
fulllist=[]
for feat in featlist:
fulllist.append(sortornot(wholedf[feat].unique()))
return fulllist
def getfeaturenames(ct,dataframe):
final_features=[]
for trs in ct.transformers_:
trName=trs[0]
trClass=trs[1]
features=trs[2]
if isinstance(trClass,Pipeline):
n,tr=zip(*trClass.steps)
for t in tr: #t is a transformator object, tr is the list of all transoformators in the pipeline
if isinstance(t,OneHotEncoder):
for f in t.get_feature_names(features):
final_features.append("OHE_"+f)
break
else: #if not found onehotencoder, add the features directly
for f in features:
final_features.append(f)
elif isinstance(trClass,OneHotEncoder): #?type(trClass)==OneHotEncoder:
for f in trClass.get_feature_names(features):
final_features.append("OHE_"+f)
else:
#remainders
if trName=="remainder":
for i in features:
final_features.append(list(dataframe.columns)[i])
#all the others
else:
for f in features:
final_features.append(f)
return final_features
def featureImportanceEncoded(importance,feature_names,figsize=(8,6)):
plt.figure(figsize=figsize)
dfimp=pd.DataFrame(importance.reshape(-1,1).T,columns=feature_names).T
dfimp.index.name="Encoded"
dfimp.rename(columns={0: "Importance"},inplace=True)
dfimp.reset_index(inplace=True)
dfimp["Feature"]=dfimp["Encoded"].apply(lambda x:x[4:].split('_')[0] if "OHE" in x else x)
dfimp.groupby(by='Feature')["Importance"].sum().sort_values().plot(kind='barh');
def compareClassifiers(gs,tableorplot='plot',figsize=(10,5)):
cvres = gs.cv_results_
cv_results = pd.DataFrame(cvres)
cv_results['param_clf']=cv_results['param_clf'].apply(lambda x:str(x).split('(')[0])
cols={"mean_test_score":"MAX of mean_test_score","mean_fit_time":"MIN of mean_fit_time"}
summary=cv_results.groupby(by='param_clf').agg({"mean_test_score":"max", "mean_fit_time":"min"}).rename(columns=cols)
summary.sort_values(by='MAX of mean_test_score', ascending=False,inplace=True)
if tableorplot=='table':
return summary
else:
fig, ax1 = plt.subplots(figsize=figsize)
color = 'tab:red'
ax1.set_xticklabels('Classifiers', rotation=45,ha='right')
ax1.set_ylabel('MAX of mean_test_score', color=color)
ax1.bar(summary.index, summary['MAX of mean_test_score'], color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('MIN of mean_fit_time', color=color)
ax2.plot(summary.index, summary['MIN of mean_fit_time'], color=color)
ax2.tick_params(axis='y', labelcolor=color)
plt.show()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def CheckForClusterinTendencyWithHopkins(df):
"""
taken from https://matevzkunaver.wordpress.com/2017/06/20/hopkins-test-for-cluster-tendency/
the closer to 1, the higher probability of clustering tendency
"""
d = df.shape[1]
#d = len(vars) # columns
n = len(df) # rows
m = int(0.1 * n) # heuristic from article [1]
nbrs = NearestNeighbors(n_neighbors=1).fit(df.values)
rand_X = sample(range(0, n, 1), m)
ujd = []
wjd = []
for j in range(0, m):
u_dist, _ = nbrs.kneighbors(uniform(np.amin(df,axis=0),np.amax(df,axis=0),d).reshape(1, -1), 2, return_distance=True)
ujd.append(u_dist[0][1])
w_dist, _ = nbrs.kneighbors(df.iloc[rand_X[j]].values.reshape(1, -1), 2, return_distance=True)
wjd.append(w_dist[0][1])
H = sum(ujd) / (sum(ujd) + sum(wjd))
if isnan(H):
print(ujd, wjd)
H = 0
return H
def getNumberofCatsAndNumsFromDatasets(path,size=10_000_000):
"""
returns the number of features by their main type(i.e categorical or numeric or datetime)
args:
path:path of the files residing in.
size:size of the file(default is ~10MB). if chosen larger, it will take longer to return.
"""
os.chdir(path)
files=os.listdir()
liste=[]
for d in files:
try:
if os.path.isfile(d) and os.path.getsize(d)<size:
if os.path.splitext(d)[1]==".csv":
df=pd.read_csv(d,encoding = "ISO-8859-1")
elif os.path.splitext(d)[1]==".xlsx":
df=pd.read_excel(d)
else:
continue
nums=len(df.select_dtypes("number").columns)
date=len(df.select_dtypes(include=[np.datetime64]).columns)
cats=len(df.select_dtypes("O").columns)-date
liste.append((d,nums,cats,date))
except:
pass
dffinal=pd.DataFrame(liste,columns=["filename","numeric","categorical","datettime"])
dffinal.set_index("filename")
return dffinal
#Functions to run before and during modelling
def checkIfNumberOfInstanceEnough(df):
"""
o Çok az satır varsa daha fazla veri toplanması sağlanmalıdır
o Aşırı çok satır varsa kısmi sampling yapılabilir.(Detayları göreceğiz)
o Data çokluğundan emin değilseniz tamamıyla deneyin. Eğitim süresi çok uzun sürüyorsa aşamalı olarak azaltabilirsiniz.
"""
def checkIfNumberOFeatures(df):
"""
o Az kolon(feature) varsa yenileri temin edilmeye çalışılabilir
o Çok kolon varsa çeşitli boyut indirgeme ve önemli kolonları seçme yöntemleri uygulanır(Detayları sorna göreceğiz)
o Yine satırlardaki aynı mantıkla çok kolon olup olmadığında emin değilseniz önce tümüyle birlikte modelleme yapılır. Eğitim süresi uzun ise veya overfitting oluyorsa feature azaltma yöntemleri uygulanabilir.
Kolon sayısını azaltma sadece eğitim zamanını kısatlmakla kalmaz aynı zamanda overfittingi de engeller.
"""
def checkForImbalancednessForLabels(df):
"""
(Imbalanced ise train/test ayrımından sonra oversample yapılır)
"""
def remindForSomeProcesses():
"""
....
"""
print("transformasyon gerektirmeyen kısımlar: feature extraction, feaute selection, feature elimination")
def remindForDiscreteization():
"""
yüksek carianlitiy olan numeriklerde hangi durumlarda discretization?
"""
#arada X ve y manuel belirlenir
def traintest(X,y,testsize):
# önce trasin test yaptır, gerekirse başka parameterler de al
print("dont touch test set")
def remindForStep2FE():
print("transformasyon gerektiren işlemler step 2, hangileri?????????")
#bu arada aşağıdaki açıklamadaki ilk satır çalışablir
def buildModel(train,test):
"""
çoklu model mi kursak burda? VotingClassifier. parametre olarak pipelineları mı versek. evetse bi önjceki stepte bunu da hatıratsın, tellWhatAlgorithmsToUse bu da çalışsın tabi
fit trasnform
pedicr
skor kontrolü, çok düşükse underfitting sebeplerine bak, belli bi sebep yoksa yeni feature + yeni veri(azsa), veya yeni model
skor iyiyse cv kontrol
test setini ver
"""
def tellWhatAlgorithmsToUse(df,type):
"""
s ve u için ayrı ayrı mı?
"""
| 38.531513
| 213
| 0.601876
| 2,375
| 18,341
| 4.544
| 0.291368
| 0.015845
| 0.00556
| 0.009266
| 0.083117
| 0.061527
| 0.039288
| 0.034099
| 0.016679
| 0.008154
| 0
| 0.021779
| 0.284009
| 18,341
| 476
| 214
| 38.531513
| 0.80003
| 0.209858
| 0
| 0.062914
| 0
| 0
| 0.092658
| 0.00301
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086093
| false
| 0.003311
| 0.07947
| 0.003311
| 0.201987
| 0.072848
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28eb9384d1558fa0c10861f56ac8ad811737befd
| 845
|
py
|
Python
|
src/isaw.theme/isaw/theme/browser/viewlets/zotero.py
|
isawnyu/isaw.web
|
604499f9fa55d1ce9698ca05f85ddb54a88f1cab
|
[
"CC-BY-3.0"
] | null | null | null |
src/isaw.theme/isaw/theme/browser/viewlets/zotero.py
|
isawnyu/isaw.web
|
604499f9fa55d1ce9698ca05f85ddb54a88f1cab
|
[
"CC-BY-3.0"
] | 405
|
2015-03-12T18:20:25.000Z
|
2022-03-07T18:44:16.000Z
|
src/isaw.theme/isaw/theme/browser/viewlets/zotero.py
|
isawnyu/isaw.web
|
604499f9fa55d1ce9698ca05f85ddb54a88f1cab
|
[
"CC-BY-3.0"
] | 1
|
2016-11-07T21:18:49.000Z
|
2016-11-07T21:18:49.000Z
|
import re
from urlparse import urlparse
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.layout.viewlets.common import ViewletBase
ZOTERO_JSON_BASE = 'https://api.zotero.org{}?v=3&format=json'
Z_MATCH = re.compile(r'^/(groups|users)/\d+/items/[A-Z1-9]+$')
class PublicationZoteroViewlet(ViewletBase):
render = ViewPageTemplateFile('zotero.pt')
html_ref = None
json_ref = None
def update(self):
zotero_url = getattr(self.context, 'bibliographic_uri', None)
if not zotero_url:
return
parsed = urlparse(zotero_url)
zotero_path = parsed.path
domain = parsed.netloc
if domain == 'www.zotero.org' and Z_MATCH.match(zotero_path):
self.html_ref = zotero_url
self.json_ref = ZOTERO_JSON_BASE.format(zotero_path)
| 33.8
| 71
| 0.695858
| 109
| 845
| 5.229358
| 0.541284
| 0.063158
| 0.049123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004425
| 0.197633
| 845
| 24
| 72
| 35.208333
| 0.836283
| 0
| 0
| 0
| 0
| 0
| 0.138462
| 0.043787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28ec2d89ad8ce29a9ec68a6cf207b6114836df8c
| 1,079
|
py
|
Python
|
descender.py
|
illBeRoy/tldr-of-the-world-data
|
06d581eb117bdc79ebbe7af4f8ae4b26190d7231
|
[
"MIT"
] | null | null | null |
descender.py
|
illBeRoy/tldr-of-the-world-data
|
06d581eb117bdc79ebbe7af4f8ae4b26190d7231
|
[
"MIT"
] | null | null | null |
descender.py
|
illBeRoy/tldr-of-the-world-data
|
06d581eb117bdc79ebbe7af4f8ae4b26190d7231
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import json
import jinja2
import webbrowser
import graph
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('groups', help='json file describing seed groups')
args = parser.parse_args()
# load group from file
with open(args.groups, 'rb') as f:
groups = json.loads(f.read())
# load template from file
with open('descender.html.jinja', 'rb') as f:
template = jinja2.Template(f.read())
# load graph from file
graph = graph.Graph()
graph.load('./graph.pickle')
# find neighbours using the given groups and weight vector
for group in groups:
group['neighbours'] = graph.get_joint_neighbours(group['members'], group_size=50)
group['neighbours'] = [''.join([c for c in x if ord(c) < 128]) for x in group['neighbours']]
# generate output file
with open('/tmp/descender.results.html', 'wb') as f:
f.write(template.render({'groups': groups}))
# open it
webbrowser.open('file:///tmp/descender.results.html')
| 28.394737
| 100
| 0.656163
| 146
| 1,079
| 4.760274
| 0.458904
| 0.034532
| 0.051799
| 0.046043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008187
| 0.2076
| 1,079
| 37
| 101
| 29.162162
| 0.804678
| 0.159407
| 0
| 0
| 0
| 0
| 0.211111
| 0.067778
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.238095
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28f54f1fb9cdd4025290b813dd74c2874e584666
| 14,375
|
py
|
Python
|
4_Model_Updater/train_new_model.py
|
kshahnazari1998/SmartDota-Public
|
270ddabfd353c57e754c00b7a5365d99f4d5902f
|
[
"MIT"
] | null | null | null |
4_Model_Updater/train_new_model.py
|
kshahnazari1998/SmartDota-Public
|
270ddabfd353c57e754c00b7a5365d99f4d5902f
|
[
"MIT"
] | null | null | null |
4_Model_Updater/train_new_model.py
|
kshahnazari1998/SmartDota-Public
|
270ddabfd353c57e754c00b7a5365d99f4d5902f
|
[
"MIT"
] | null | null | null |
import json
import pandas as pd
import numpy as np
import random
from Sqldatabasehandler import sqlhandler
from datetime import datetime
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import PolynomialFeatures
import pickle
import torch
from torch import nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
import torch.nn.functional as F
class ModelUpdater:
def __init__(self, host, user, passwd, database):
"""
The constructor for the Class
"""
self.sqlhand = sqlhandler(host, user, passwd, database)
def update_model(self, batchsize=100000):
last_seq = self.get_last_game()
res = self.sqlhand.SqlQueryExec(
"SELECT count(*) FROM DotaMatches WHERE GameSEQ> %s",
True,
[
last_seq,
],
)
if res == -1:
return -1
new_games_count = self.sqlhand.get_row_result()
if new_games_count >= batchsize:
games = self.sqlhand.get_all_select_rows(
"SELECT * FROM DotaMatches WHERE GameSEQ>%s order by GameSEQ Limit %s",
[
last_seq,
batchsize,
],
)
cols = self.sqlhand.get_all_select_rows(
"SHOW columns FROM DotaMatches",
)
cols = [x[0] for x in cols]
games = pd.DataFrame(games)
games.columns = cols
games = games.dropna(subset=["Pick1Rad"])
model, linear = self.train_new_model(games)
now = datetime.now()
date_time = now.strftime("%m_%d_%Y_%H")
max_game_seq = games["GameSEQ"].max()
self.update_last_game(max_game_seq)
torch.save(model.state_dict(), "./model.pth")
torch.save(
model.state_dict(),
"./old_models/model_" + date_time + "_" + str(max_game_seq) + ".pth",
)
pickle.dump(linear, open(f"linear_model", "wb"))
pickle.dump(
linear,
open(
"./old_models/linear_model_"
+ date_time
+ "_"
+ str(max_game_seq)
+ ".pth",
"wb",
),
)
del games
self.update_model()
else:
return 0
def get_last_game(self):
try:
filepath = "last_game_seq.txt"
fp = open(filepath)
last_seq = int(fp.read())
fp.close()
return last_seq
except:
return -1
def update_last_game(self, lastseq):
try:
filepath = "last_game_seq.txt"
fp = open(filepath, "w")
fp.write(str(lastseq))
fp.close()
return 0
except:
return -1
def train_new_model(self, df):
df_no_leavers = df.query("Leavers==0")
class game_datasets(Dataset):
def __init__(self, rawdata):
X = rawdata.loc[:, "Pick1Rad":"Pick5Dir"]
y = rawdata["RadiantWin"]
self.x = torch.tensor(X.values)
self.y = torch.tensor(y.values)
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return len(self.y)
class GamePredictor_final(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.Linear(150, 50)
self.l2 = nn.Linear(50, 50)
self.l3 = nn.Linear(50, 1)
def forward(self, x):
# Pass the input tensor through each of our operations
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.l3(x)
return torch.sigmoid(x)
net = GamePredictor_final()
net.load_state_dict(torch.load("model.pth"))
net.train()
optimizer = optim.Adam(net.parameters(), lr=0.001)
Epochs = 1
for epoch in range(0, Epochs):
train_data_set = game_datasets(df_no_leavers)
train_data_loader = DataLoader(train_data_set, batch_size=10000)
train_data_iter = iter(train_data_loader)
for data in train_data_iter:
x, y = data
net.zero_grad()
x = self.game_datasets_transform_X(x, 10)
# print(x[100])
y = self.game_datasets_transform_Y(y, 10)
x = x.view(-1, 150).float()
y = y.view(-1, 1).float()
output = net(x)
loss_func = nn.MSELoss()
loss = loss_func(output, y)
loss.backward()
optimizer.step()
print("Done Training")
# Training SGD
train_data_set = game_datasets(df_no_leavers)
train_data_loader = DataLoader(train_data_set, batch_size=2500)
train_data_iter = iter(train_data_loader)
poly = PolynomialFeatures(degree=2)
loaded_model = pickle.load(open(f"linear_model", "rb"))
del train_data_set
for data in train_data_iter:
x, y = data
x = self.game_datasets_transform_X_SGD(x, 5)
y = self.game_datasets_transform_Y(y, 5)
x = x.view(-1, 300).float()
y = y.view(-1, 1).float()
x = x.numpy()
x = poly.fit_transform(x)
y = y.numpy().ravel()
loaded_model.partial_fit(x, y, [0, 1])
print("Done Training")
return net, loaded_model
def game_datasets_transform_X(self, data_X, mode=None, device="cpu"):
# If mode is none only the 10 picks are added.
# If mode is equal to 10 all possible combinations are added aswell.
# If mode is either 1,2,3,4,5 the picks with those scenarios are only added.
if mode is not None:
picks = data_X.t()
picks = picks.to(device)
# 1st picks
picks_rad = torch.zeros(data_X.shape[0], 150, device=device)
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[0].long())] = -1
picks_dire = torch.zeros(data_X.shape[0], 150, device=device)
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[5].long())
] = 1
if mode == 10:
res = torch.cat([picks_rad, picks_dire], dim=0)
if mode == 1:
return torch.cat([picks_rad, picks_dire], dim=0)
# 2nd picks
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[1].long())] = -1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[6].long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 2:
return torch.cat([picks_rad, picks_dire], dim=0)
# 3rd picks
picks_rad[
range(picks_rad.shape[0]), torch.LongTensor(picks[5:7].long())
] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[0:2].long())
] = -1
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[2].long())] = -1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[7].long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 3:
return torch.cat([picks_rad, picks_dire], dim=0)
# 4th picks
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[3].long())] = -1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[8].long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 4:
return torch.cat([picks_rad, picks_dire], dim=0)
# 5th picks
picks_rad[
range(picks_rad.shape[0]), torch.LongTensor(picks[7:9].long())
] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[2:4].long())
] = -1
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[4].long())] = -1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[9].long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 5:
return torch.cat([picks_rad, picks_dire], dim=0)
# All picks (Only for mode 10)
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[9].long())] = 1
res = torch.cat([res, picks_rad], dim=0)
return res
else:
picks = data_X.t()
picks = picks.to(device)
picks_all = torch.zeros(data_X.shape[0], 150, device=device)
picks_all[range(picks_all.shape[0]), picks[0:5]] = -1
picks_all[range(picks_all.shape[0]), picks[5:10]] = 1
return picks_all
def game_datasets_transform_X_SGD(self, data_X, mode=None, device="cpu"):
# If mode is none only the 10 picks are added.
# If mode is equal to 10 all possible combinations are added aswell.
# If mode is either 1,2,3,4,5 the picks with those scenarios are only added.
if mode is not None:
picks = data_X.t()
picks = picks.to(device)
# picks = data_X
# 1st picks
picks_rad = torch.zeros(data_X.shape[0], 300, device=device)
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[0].long())] = 1
picks_dire = torch.zeros(data_X.shape[0], 300, device=device)
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[5] + 150).long())
] = 1
if mode == 10:
res = torch.cat([picks_rad, picks_dire], dim=0)
if mode == 1:
return torch.cat([picks_rad, picks_dire], dim=0)
# 2nd picks
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[1].long())] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[6] + 150).long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 2:
return torch.cat([picks_rad, picks_dire], dim=0)
# 3rd picks
picks_rad[
range(picks_rad.shape[0]), torch.LongTensor((picks[5:7] + 150).long())
] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[0:2].long())
] = 1
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[2].long())] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[7] + 150).long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 3:
return torch.cat([picks_rad, picks_dire], dim=0)
# 4th picks
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[3].long())] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[8] + 150).long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 4:
return torch.cat([picks_rad, picks_dire], dim=0)
# 5th picks
picks_rad[
range(picks_rad.shape[0]), torch.LongTensor((picks[7:9] + 150).long())
] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor(picks[2:4].long())
] = 1
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[4].long())] = 1
picks_dire[
range(picks_dire.shape[0]), torch.LongTensor((picks[9] + 150).long())
] = 1
if mode == 10:
res = torch.cat([res, picks_rad, picks_dire], dim=0)
if mode == 5:
return torch.cat([picks_rad, picks_dire], dim=0)
# All picks (Only for mode 10)
picks_rad[range(picks_rad.shape[0]), torch.LongTensor(picks[9].long())] = 1
res = torch.cat([res, picks_rad], dim=0)
return res
else:
picks = data_X.t()
picks = picks.to(device)
picks_all = torch.zeros(data_X.shape[0], 150, device=device)
picks_all[range(picks_all.shape[0]), picks[0:5]] = -1
picks_all[range(picks_all.shape[0]), picks[5:10]] = 1
return picks_all
def game_datasets_transform_Y(self, data_Y, mode=None):
# y_trans = []
if mode == None:
return data_Y
y = data_Y.numpy()
# for i, y in enumerate(data_Y.numpy()):
if mode < 10:
# y_trans.append(y)
# y_trans.append(y)
res = np.tile(y, 2)
else:
res = np.tile(y, 11)
# res = np.concatenate([y,y])
# for _ in range(10):
# # y_trans.append(y)
# res = np.concatenate([res,y])
return torch.tensor(res)
if __name__ == "__main__":
# Define Dota game scraper and create database connection
try:
# Define Dota game scraper and create database connection
with open("keys.json") as f:
keys = json.load(f)
host = keys["database"]["host"]
print(host)
something = ModelUpdater(
host=keys["database"]["host"],
user=keys["database"]["user"],
passwd=keys["database"]["passwd"],
database=keys["database"]["database"],
)
something.update_model()
except Exception as e:
print(f"Error in Dota_skill_scraper.py. Can't start script. Error is {e}")
| 36.209068
| 88
| 0.517704
| 1,778
| 14,375
| 4.012373
| 0.142295
| 0.062798
| 0.046257
| 0.08831
| 0.640454
| 0.619288
| 0.599103
| 0.577236
| 0.555649
| 0.535744
| 0
| 0.035497
| 0.361113
| 14,375
| 396
| 89
| 36.300505
| 0.741289
| 0.066574
| 0
| 0.44373
| 0
| 0
| 0.039518
| 0.003593
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041801
| false
| 0.009646
| 0.045016
| 0.006431
| 0.180064
| 0.012862
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28f6f0c2610028a27b78a080b28387f6adc1ab80
| 2,227
|
py
|
Python
|
meshrcnn/structures/mask.py
|
MAYURGAIKWAD/meshrcnn
|
b47ecd47ca7de7055b7d141e63ddab286c5245f3
|
[
"BSD-3-Clause"
] | 1,028
|
2020-01-23T23:30:54.000Z
|
2022-03-27T22:33:50.000Z
|
meshrcnn/structures/mask.py
|
MAYURGAIKWAD/meshrcnn
|
b47ecd47ca7de7055b7d141e63ddab286c5245f3
|
[
"BSD-3-Clause"
] | 103
|
2020-01-24T05:29:48.000Z
|
2022-03-08T13:04:24.000Z
|
meshrcnn/structures/mask.py
|
MAYURGAIKWAD/meshrcnn
|
b47ecd47ca7de7055b7d141e63ddab286c5245f3
|
[
"BSD-3-Clause"
] | 179
|
2020-01-24T08:14:30.000Z
|
2022-03-19T00:34:05.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch.nn import functional as F
def crop_mask_within_box(mask, box, mask_size):
"""
Crop the mask content in the given box.
The cropped mask is resized to (mask_size, mask_size).
This function is used when generating training targets for mask head in Mask R-CNN.
Given original ground-truth masks for an image, new ground-truth mask
training targets in the size of `mask_size x mask_size`
must be provided for each predicted box. This function will be called to
produce such targets.
Args:
mask (Tensor): A tensor mask image.
box: 4 elements
mask_size (int):
Returns:
Tensor: ByteTensor of shape (mask_size, mask_size)
"""
# 1. Crop mask
roi = box.clone().int()
cropped_mask = mask[roi[1] : roi[3], roi[0] : roi[2]]
# 2. Resize mask
cropped_mask = cropped_mask.unsqueeze(0).unsqueeze(0)
cropped_mask = F.interpolate(cropped_mask, size=(mask_size, mask_size), mode="bilinear")
cropped_mask = cropped_mask.squeeze(0).squeeze(0)
# 3. Binarize
cropped_mask = (cropped_mask > 0).float()
return cropped_mask
def batch_crop_masks_within_box(masks, boxes, mask_side_len):
"""
Batched version of :func:`crop_mask_within_box`.
Args:
masks (Masks): store N masks for an image in 2D array format.
boxes (Tensor): store N boxes corresponding to the masks.
mask_side_len (int): the size of the mask.
Returns:
Tensor: A byte tensor of shape (N, mask_side_len, mask_side_len), where
N is the number of predicted boxes for this image.
"""
device = boxes.device
# Put boxes on the CPU, as the representation for masks is not efficient
# GPU-wise (possibly several small tensors for representing a single instance mask)
boxes = boxes.to(torch.device("cpu"))
masks = masks.to(torch.device("cpu"))
results = [crop_mask_within_box(mask, box, mask_side_len) for mask, box in zip(masks, boxes)]
if len(results) == 0:
return torch.empty(0, dtype=torch.float32, device=device)
return torch.stack(results, dim=0).to(device=device)
| 34.796875
| 97
| 0.68388
| 337
| 2,227
| 4.391691
| 0.367953
| 0.059459
| 0.037162
| 0.043243
| 0.037838
| 0.037838
| 0.037838
| 0
| 0
| 0
| 0
| 0.011008
| 0.224966
| 2,227
| 63
| 98
| 35.349206
| 0.846466
| 0.535249
| 0
| 0
| 0
| 0
| 0.015168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28f7bcf0fe258f3b0b26915576f9434aa6d0a9ec
| 1,727
|
py
|
Python
|
app/controller/org.py
|
Jimmy-Xu/fastapi_demo
|
f19c629cc7fa0e0e47e73e8688cd019bc74aa982
|
[
"MIT"
] | 12
|
2020-09-01T09:19:41.000Z
|
2022-03-17T05:48:50.000Z
|
app/controller/org.py
|
Jimmy-Xu/fastapi_demo
|
f19c629cc7fa0e0e47e73e8688cd019bc74aa982
|
[
"MIT"
] | null | null | null |
app/controller/org.py
|
Jimmy-Xu/fastapi_demo
|
f19c629cc7fa0e0e47e73e8688cd019bc74aa982
|
[
"MIT"
] | 3
|
2021-04-26T02:53:04.000Z
|
2021-11-01T14:32:38.000Z
|
from fastapi import APIRouter, Depends
from fastapi_plus.schema.base import ListArgsSchema, RespListSchema, RespIdSchema, RespBaseSchema
from fastapi_plus.utils.auth import get_auth_data
from fastapi_plus.utils.custom_route import CustomRoute
from ..schema.org import OrgInfoSchema, OrgRespDetailSchema
from ..service.org import OrgService
router = APIRouter(route_class=CustomRoute)
@router.post('/list', response_model=RespListSchema)
async def list(*, args: ListArgsSchema, auth_data: dict = Depends(get_auth_data)):
"""
读取组织数据列表
:param args: 请求参数集
:return: 组织列表结构
"""
args.user_id = auth_data.get('user_id')
return OrgService(auth_data).list(args)
@router.get('/{id}', response_model=OrgRespDetailSchema)
async def read(id: int, auth_data: dict = Depends(get_auth_data)):
"""
读取组织数据详情
:param id: 组织id
:return: 组织详情结构
"""
resp = OrgRespDetailSchema()
resp.detail = OrgService(auth_data).read(id)
return resp
@router.post('', response_model=RespIdSchema, response_model_exclude_none=True)
async def create(*, info: OrgInfoSchema, auth_data: dict = Depends(get_auth_data)):
"""
创建组织数据
:param info: 组织数据
:return:
"""
return OrgService(auth_data).create(info)
@router.put('/{id}', response_model=RespBaseSchema)
async def update(*, info: OrgInfoSchema, auth_data: dict = Depends(get_auth_data)):
"""
修改组织数据
:param info: 组织数据
:return:
"""
return OrgService(auth_data).update(info)
@router.delete("/{id}", response_model=RespBaseSchema)
async def delete(id: int, auth_data: dict = Depends(get_auth_data)):
"""
删除组织数据
:param id: 组织id
:return:
"""
return OrgService(auth_data).delete(id)
| 27.412698
| 97
| 0.712218
| 212
| 1,727
| 5.622642
| 0.283019
| 0.114094
| 0.055369
| 0.079698
| 0.322148
| 0.29698
| 0.234899
| 0.209732
| 0.137584
| 0
| 0
| 0
| 0.165605
| 1,727
| 62
| 98
| 27.854839
| 0.827203
| 0
| 0
| 0
| 0
| 0
| 0.018921
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.24
| 0
| 0.44
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28fa10ec4cb7ea617432d1a843efa65bb4d46c15
| 2,327
|
py
|
Python
|
nerodia/alert.py
|
harsh183/nerodia
|
69c5e4408432e85b5af0b2da03015f729809dac4
|
[
"MIT"
] | 83
|
2017-11-20T08:41:09.000Z
|
2022-02-09T21:01:47.000Z
|
nerodia/alert.py
|
harsh183/nerodia
|
69c5e4408432e85b5af0b2da03015f729809dac4
|
[
"MIT"
] | 28
|
2017-11-21T02:25:03.000Z
|
2021-04-15T15:26:30.000Z
|
nerodia/alert.py
|
harsh183/nerodia
|
69c5e4408432e85b5af0b2da03015f729809dac4
|
[
"MIT"
] | 14
|
2017-11-29T06:44:12.000Z
|
2021-09-06T04:53:44.000Z
|
from selenium.common.exceptions import NoAlertPresentException
import nerodia
from .exception import UnknownObjectException
from .wait.wait import Waitable, TimeoutError
class Alert(Waitable):
def __init__(self, browser):
self.browser = browser
self.alert = None
@property
def text(self):
"""
Returns the text of the alert
:rtype: str
:Example:
browser.alert.text #=> 'ok'
"""
self.wait_for_exists()
return self.alert.text
def ok(self):
"""
Closes alert or accepts prompts/confirms
:Example:
browser.alert.ok
browser.alert.exists #=> False
"""
self.wait_for_exists()
self.alert.accept()
self.browser.after_hooks.run()
def close(self):
"""
Closes alert or cancels prmopts/confirms
:Example:
browser.alert.close()
browser.alert.exists #=> False
"""
self.wait_for_exists()
self.alert.dismiss()
self.browser.after_hooks.run()
def set(self, value):
"""
Enters text to prompt
:param value: keys to send
:Example:
browser.alert.set('Text for prompt')
browser.alert.ok()
"""
self.wait_for_exists()
self.alert.send_keys(value)
@property
def exists(self):
"""
Returns True if alert, confirm, or prompt is present and False otherwise
:rtype: bool
:Example:
browser.alert.exists #=> True
"""
try:
self.assert_exists()
return True
except UnknownObjectException:
return False
present = exists
@property
def selector_string(self):
return 'alert'
def assert_exists(self):
try:
self.alert = self.browser.driver.switch_to.alert
except NoAlertPresentException:
raise UnknownObjectException('unable to locate alert')
def wait_for_exists(self):
if not nerodia.relaxed_locate:
return self.assert_exists()
try:
return self.wait_until(lambda a: a.exists, message='waiting for alert')
except TimeoutError:
raise UnknownObjectException('unable to locate alert')
| 23.039604
| 83
| 0.581006
| 244
| 2,327
| 5.442623
| 0.319672
| 0.072289
| 0.071536
| 0.051205
| 0.219127
| 0.203313
| 0.073795
| 0.073795
| 0.073795
| 0.073795
| 0
| 0
| 0.329179
| 2,327
| 100
| 84
| 23.27
| 0.850737
| 0.234207
| 0
| 0.304348
| 0
| 0
| 0.042913
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 1
| 0.195652
| false
| 0
| 0.086957
| 0.021739
| 0.456522
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28fa6ae216b4aa0d88457aec32b09566f1611604
| 1,448
|
py
|
Python
|
finetwork/distance_calculator/distance_calculator.py
|
annakuchko/FinNetwork
|
4566ff96b33fb5668f9b28f41a94791d1cf9249c
|
[
"MIT"
] | 5
|
2021-12-07T22:14:10.000Z
|
2022-03-30T14:09:15.000Z
|
finetwork/distance_calculator/distance_calculator.py
|
annakuchko/FinNetwork
|
4566ff96b33fb5668f9b28f41a94791d1cf9249c
|
[
"MIT"
] | null | null | null |
finetwork/distance_calculator/distance_calculator.py
|
annakuchko/FinNetwork
|
4566ff96b33fb5668f9b28f41a94791d1cf9249c
|
[
"MIT"
] | null | null | null |
from finetwork.distance_calculator import _distance_metrics
import pandas as pd
class CalculateDistance:
def __init__(self, data, method='pearson', scaled=False, sigma = 0.5):
self.data = data
self.method = method
self.scaled = scaled
self.sigma = sigma
def transform(self):
data = self.data
dist_dict = {}
for i in data.keys():
tmp = pd.DataFrame.from_dict({(v,k): data[i][v][k]['log_return']
for v in data[i].keys()
for k in data[i][v].keys()},
orient='index')
tmp.index = pd.MultiIndex.from_arrays(
[
[tmp.index[i][0] for i in range(len(tmp.index))],
[tmp.index[i][1] for i in range(len(tmp.index))]
]
)
tmp = tmp.reset_index().pivot('level_1', 'level_0')
distance_matrix = _distance_metrics._Metrics(
tmp,
method = self.method,
scaled=self.scaled, sigma=self.sigma
)._calculate()
distance_matrix.index = distance_matrix.index.get_level_values(
'level_0'
)
dist_dict[i] = distance_matrix
return dist_dict
| 34.47619
| 78
| 0.462017
| 149
| 1,448
| 4.308725
| 0.328859
| 0.062305
| 0.028037
| 0.034268
| 0.077882
| 0.077882
| 0.077882
| 0.077882
| 0
| 0
| 0
| 0.00861
| 0.438536
| 1,448
| 41
| 79
| 35.317073
| 0.781058
| 0
| 0
| 0
| 0
| 0
| 0.030561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.060606
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28fc0673c7bc0e68a3641dedb06915366e9c6c39
| 27,818
|
py
|
Python
|
aprastreioWin.py
|
Alexsussa/aprastreio
|
1159861edd932f61a849f63f9dc7e5d34b2f272b
|
[
"MIT"
] | null | null | null |
aprastreioWin.py
|
Alexsussa/aprastreio
|
1159861edd932f61a849f63f9dc7e5d34b2f272b
|
[
"MIT"
] | null | null | null |
aprastreioWin.py
|
Alexsussa/aprastreio
|
1159861edd932f61a849f63f9dc7e5d34b2f272b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
__version__ = 1.2
from tkinter.ttk import *
from tkinter.messagebox import *
from tkinter.scrolledtext import *
from tkinter import *
from bs4 import BeautifulSoup
from urllib.request import urlopen
from mailcomposer import MailComposer
from threading import Thread
import os
import sys
import sqlite3
import webbrowser
import ttips
import subprocess
import socket
listaRastreio = []
listaPendentes = []
listaEntregues = []
listaTodos = []
listaSepararEntregues = []
listaSepararPendentes = []
# Evita que o programa abra novamente enquanto enquanto ele já estiver aberto
pid = os.getpid()
pidfile = '/tmp/aprastreio.pid'
if not os.path.isfile(pidfile):
os.system(f'touch {pidfile}')
os.system(f'echo {pid} >> {pidfile}')
else:
sys.exit(-1)
# Cria o banco de dados caso ele não exista
db = os.path.expanduser('~/Dropbox/aprastreio/banco/')
if not os.path.exists(db):
os.makedirs(db)
banco = os.path.join(os.path.dirname(db), 'rastreios.db')
conexao = sqlite3.connect(banco, check_same_thread=False)
c = conexao.cursor()
c.execute('CREATE TABLE IF NOT EXISTS rastreio (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS entregues (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS pendentes (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
else:
banco = os.path.join(os.path.dirname(db), 'rastreios.db')
conexao = sqlite3.connect(banco, check_same_thread=False)
c = conexao.cursor()
c.execute('CREATE TABLE IF NOT EXISTS rastreio (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS entregues (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
c.execute('CREATE TABLE IF NOT EXISTS pendentes (id INTEGER PRIMARY KEY AUTOINCREMENT,'
'codrastreio TEXT VARCHAR(13) UNIQUE NOT NULL, objeto TEXT VARCHAR(50) NOT NULL)')
# Procura novas versões do software
def CheckUpdates(event=None):
janela.unbind('<Enter>')
versao = urlopen('https://www.dropbox.com/s/61rpf1xg8qr1vh1/version_linux.txt?dl=true').read()
if float(versao) > float(__version__):
subprocess.call(
['notify-send', 'AP - Rastreio Correios', 'Há uma nova versão disponível. Baixe agora!'])
showinfo(title='Atualização', message='Há uma nova versão disponível. Baixe agora!')
webbrowser.open('https://github.com/Alexsussa/aprastreio/releases/')
class Rastreio:
def __init__(self, master=None, rastreio='', objeto=''):
self.rastreio = rastreio
self.objeto = objeto
self.c1 = Frame(master)
self.c1['padx'] = 5
self.c1['pady'] = 3
self.c1.pack()
self.c2 = Frame(master)
self.c2.pack()
self.c3 = Frame(master)
self.c3.pack()
self.c4 = Frame(master)
self.c4.pack()
self.c5 = Frame(master)
self.c5.pack()
# Menu superior
menubar = Menu(janela)
arquivo = Menu(menubar, tearoff=0)
menubar.add_cascade(label='Arquivo', menu=arquivo)
menubar.add_separator()
arquivo.add_command(label='Sincronizar rastreios...',
command=lambda: Thread(target=self.NotifAltStatus).start(), accelerator='Ctrl+R')
# arquivo.add_command(label='Arquivar entregues', command=lambda: Thread(target=self.arquivarEntregues).start(), accelerator='Ctrl+B')
arquivo.add_command(label='Mover para entregues', command=lambda: Thread(target=self.arquivarRastreio).start(),
accelerator='Ctrl+B')
arquivo.add_command(label='Salvar', command=lambda: Thread(target=self.Cadastrar).start(), accelerator='Ctrl+S')
arquivo.add_command(label='Atualizar', command=lambda: Thread(target=self.Atualizar).start(),
accelerator='Ctrl+U')
arquivo.add_command(label='Deletar', command=lambda: Thread(target=self.Deletar).start(), accelerator='Ctrl+D')
arquivo.add_separator()
arquivo.add_command(label='Mostrar todos os rastreios',
command=lambda: {self.txtObjeto.config(values=self.listaTodos(event='<Button-1>')),
janela.bind('<<ComboboxSelected>>', self.BuscaTodos)})
arquivo.add_command(label='Mostar apenas os entregues',
command=lambda: {self.txtObjeto.config(values=self.listaEntregues(event='<Button-1>')),
janela.bind('<<ComboboxSelected>>', self.BuscaEntregues)})
"""arquivo.add_command(label='Mostar apenas os pendentes',
command=lambda: {self.txtObjeto.config(values=self.listaPendentes(event='<Button-1>')),
janela.bind('<<ComboboxSelected>>', self.BuscaPendentes)})"""
arquivo.add_separator()
arquivo.add_command(label='Sair', command=janela.destroy, accelerator='Ctrl+Q')
janela.bind('<Control-q>', self.JanExit)
janela.bind('<Control-Q>', self.JanExit)
ajuda = Menu(menubar, tearoff=0)
menubar.add_cascade(label='Ajuda', menu=ajuda)
ajuda.add_command(label='GitHub AP Rastreio...', command=lambda: Thread(
target=self.NavLink('https://github.com/Alexsussa/aprastreio/')).start(), accelerator='Ctrl+G')
ajuda.add_command(label='Checar atualizações...', command=lambda: Thread(target=CheckUpdates).start(),
accelerator='Ctrl+K')
ajuda.add_separator()
ajuda.add_command(label='Sobre', command=self.Sobre, accelerator='Ctrl+H')
janela.bind('<Control-h>', self.Sobre)
janela.bind('<Control-H>', self.Sobre)
janela.bind('<Control-g>', lambda e: Thread(target=self.NavLink('https://github.com/Alexsussa/aprastreio/')))
janela.bind('<Control-G>', lambda e: Thread(target=self.NavLink('https://github.com/Alexsussa/aprastreio/')))
janela.bind('<Control-k>', CheckUpdates)
janela.bind('<Control-K>', CheckUpdates)
janela.bind('<Control-b>', lambda e: Thread(target=self.arquivarRastreio).start())
janela.bind('<Control-B>', lambda e: Thread(target=self.arquivarRastreio).start())
janela.config(menu=menubar)
# Layout do programa
self.lbRastreio = Label(self.c1, text='RASTREIO:', fg='black')
self.lbRastreio.pack(side=LEFT)
self.txtRastreio = Entry(self.c1, width=14, bg='white', fg='black', selectbackground='blue',
selectforeground='white')
self.txtRastreio.pack(side=LEFT, padx=2)
self.lbObjeto = Label(self.c1, text='OBJETO:', fg='black')
self.lbObjeto.pack(side=LEFT)
self.txtObjeto = Combobox(self.c1, width=32, background='white', foreground='black',
values=self.listaTodos(event='<Button-1>'))
self.txtObjeto.pack(side=LEFT, padx=2)
janela.bind('<<ComboboxSelected>>', self.BuscaTodos)
self.btnRastrear = Button(self.c1, text='RASTREAR', fg='black',
command=lambda: {Thread(target=self.Rastrear).start(), self.BuscaRastreio()})
self.btnRastrear.pack(side=LEFT, padx=2)
janela.bind('<Return>', lambda e: {Thread(target=self.Rastrear).start(), self.BuscaRastreio()})
janela.bind('<KP_Enter>', lambda e: {Thread(target=self.Rastrear).start(), self.BuscaRastreio()})
self.campo = ScrolledText(self.c2, width=77, height=30, bg='lightgray', fg='black', state='disable',
selectbackground='blue', font=('sans-serif', '10'))
self.campo.pack(fill='both', expand=True, pady=5)
self.whatsappimg = PhotoImage(file='imagens/WhatsApp.png')
self.emailimg = PhotoImage(file='imagens/Email.png')
self.salvarimg = PhotoImage(file='imagens/Salvar.png')
self.atualizarimg = PhotoImage(file='imagens/Atualizar.png')
self.deletarimg = PhotoImage(file='imagens/Lixeira.png')
self.btnWhatsapp = Button(image=self.whatsappimg, command=lambda: Thread(target=self.WhatsApp).start())
self.btnWhatsapp.pack(side=RIGHT)
ttips.Create(self.btnWhatsapp, text='Enviar por WhatsApp, Ctrl+W')
janela.bind('<Control-w>', lambda e: Thread(target=self.WhatsApp).start())
janela.bind('<Control-W>', lambda e: Thread(target=self.WhatsApp).start())
self.btnEmail = Button(image=self.emailimg, command=lambda: Thread(target=self.Email).start())
self.btnEmail.pack(side=RIGHT)
ttips.Create(self.btnEmail, text='Enviar por Email, Ctrl+E')
janela.bind('<Control-e>', lambda e: Thread(target=self.Email).start())
janela.bind('<Control-E>', lambda e: Thread(target=self.Email).start())
self.btnSalvar = Button(image=self.salvarimg, command=lambda: [self.RastreioExiste(), self.Cadastrar()])
self.btnSalvar.pack(side=LEFT, padx=1)
ttips.Create(self.btnSalvar, text='Salvar, Ctrl+S')
janela.bind('<Control-s>', lambda e: Thread(target=self.Cadastrar).start())
janela.bind('<Control-S>', lambda e: Thread(target=self.Cadastrar).start())
self.btnAtualizar = Button(image=self.atualizarimg, command=self.Atualizar)
self.btnAtualizar.pack(side=LEFT, padx=1)
ttips.Create(self.btnAtualizar, text='Atualizar, Ctrl+U')
janela.bind('<Control-u>', lambda e: Thread(target=self.Atualizar).start())
janela.bind('<Control-U>', lambda e: Thread(target=self.Atualizar).start())
self.btnDeletar = Button(image=self.deletarimg, command=self.Deletar)
self.btnDeletar.pack(side=LEFT, padx=1)
ttips.Create(self.btnDeletar, text='Deletar, Ctrl+D')
janela.bind('<Control-d>', lambda e: Thread(target=self.Deletar).start())
janela.bind('<Control-D>', lambda e: Thread(target=self.Deletar).start())
self.lbCreditos = Label(text='AP Correios - 2020')
self.lbCreditos.pack(side=TOP)
self.lbCreditos = Label(text='Software criado por Alex Pinheiro')
self.lbCreditos.pack(side=BOTTOM)
self.mouseMenu = Menu(janela, tearoff=0)
self.mouseMenu.add_command(label='Recortar')
self.mouseMenu.add_command(label='Copiar')
self.mouseMenu.add_command(label='Colar')
janela.bind('<Control-L>', self.Limpar)
janela.bind('<Enter>', Thread(target=CheckUpdates).start())
janela.bind('<Control-r>', lambda e: Thread(target=self.NotifAltStatus).start())
janela.bind('<Control-R>', lambda e: Thread(target=self.NotifAltStatus).start())
# Move rastreio para a lista de entregues
def arquivarRastreio(self):
rastreio = self.txtRastreio.get()
objeto = self.txtObjeto.get()
if rastreio == '' or objeto == '':
showwarning(title='Aviso', message='Selecione um rastreio para mover.')
else:
c.execute(f'SELECT codrastreio FROM rastreio WHERE codrastreio = "{rastreio}"')
c.execute(f'INSERT INTO entregues SELECT * FROM rastreio WHERE codrastreio = "{rastreio}"')
c.execute(f'DELETE FROM rastreio WHERE codrastreio = "{rastreio}"')
conexao.commit()
listaTodos.clear()
self.txtObjeto.config(values=self.listaTodos())
self.Limpar()
showinfo(title='Status', message=f'Rastreio {rastreio} arquivado.')
# Fecha o programa principal
def JanExit(self, event=None):
janela.destroy()
def NavLink(self, url):
webbrowser.open_new_tab(url)
def Sobre(self, event=None):
popup = Toplevel()
sobre = Label(popup, text='AP - Rastreios v1.2')
sobre.pack(pady=20)
logo = PhotoImage(file='imagens/sobre.png')
bgimg = Label(popup, image=logo)
bgimg.pack()
bgimg.image = logo
mit = Label(popup, text='Licença\n', fg='blue', cursor='hand2')
mit.pack()
github = Label(popup, text='GitHub\n', fg='blue', cursor='hand2')
github.pack()
popup.title('Sobre')
popup.geometry('400x300')
popup.resizable(False, False)
popup.grab_set()
popup.focus_force()
popup.transient(janela)
mit.bind('<Button-1>', lambda e: Thread(
target=self.NavLink('https://github.com/Alexsussa/aprastreio/blob/master/LICENSE')).start())
github.bind('<Button-1>',
lambda e: Thread(target=self.NavLink('https://github.com/Alexsussa/aprastreio/')).start())
# Notificação de alteração de status dos rastreios
def NotifAltStatus(self, event=None):
try:
info = askyesno(title='ATUALIZANDO RASTREIOS',
message='Atualizando status dos rastreios...',
detail='Clique em SIM e aguarde até os objetos não entregues aparecerem na tela principal\nou clique em NÃO para atualizar manualmente mais tarde.')
if info == False:
pass
else:
janela.after(3600000, lambda: Thread(target=self.NotifAltStatus).start())
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Atualizando status dos rastreios...\n\nPor favor, aguarde...'])
c.execute('SELECT * FROM rastreio ORDER BY codrastreio')
self.Limpar()
for cod in c:
linkcorreios = urlopen(f'https://www.linkcorreios.com.br/?id={cod[1]}')
soup = BeautifulSoup(linkcorreios, 'html.parser')
lastStatus = soup.find('ul', attrs={'class': 'linha_status'})
last = lastStatus.text.strip().upper()
self.campo.delete(1.0, END)
if last[0:39] != 'STATUS: OBJETO ENTREGUE AO DESTINATÁRIO':
self.campo.config(state='normal')
self.campo.insert(INSERT, '-' * 80)
self.campo.insert(INSERT, '\n\nALTERAÇÃO DE STATUS')
self.campo.insert(INSERT, f'\n\n{cod[2]}\n{cod[1]}\n\n{last}\n\n', '-' * 80)
self.campo.config(state='disable')
subprocess.call(
['notify-send', 'AP - Rastreio Correios', f'ALTERAÇÂO DE STATUS\n\n{cod[2]}\n\n{last}\n\n'])
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Todos os objetos não entregues estão na tela principal.'])
except socket.error:
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.'])
showerror(title='AVISO', message='Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.')
def MenuMouse(self, event):
w = event.widget
self.mouseMenu.entryconfigure("Recortar", command=lambda: w.event_generate('<<Cut>>'))
self.mouseMenu.entryconfigure("Copiar", command=lambda: w.event_generate('<<Copy>>'))
self.mouseMenu.entryconfigure("Colar", command=lambda: w.event_generate('<<Paste>>'))
self.mouseMenu.tk_popup(event.x_root, event.y_root)
def Rastrear(self, event=None):
rastreio = self.txtRastreio.get()
objeto = self.txtObjeto.get()
if rastreio == '':
showwarning(title='AVISO', message='Digite um código de rastreio para rastrear.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
try:
subprocess.call(['notify-send', 'AP - Rastreio Correios', 'Rastreando encomenda...'])
linkcorreios = urlopen(f'https://www.linkcorreios.com.br/?id={rastreio}', timeout=20)
soup = BeautifulSoup(linkcorreios, 'html.parser')
status = soup.find('div', attrs={'class': 'singlepost'})
retorno = ''
if status:
retorno = status.text.strip().upper()
else:
retorno = 'O rastreamento não está disponível no momento:\n\n' \
'- Verifique se o código do objeto está correto;\n' \
'- O objeto pode demorar até 24 horas (após postagem) para ser rastreado no\nsistema dos Correios.'.strip().upper()
# print(retorno)
self.campo.config(state='normal')
self.campo.delete(1.0, END)
self.campo.insert(INSERT, retorno)
self.campo.config(state='disable')
lastStatus = soup.find('ul', attrs={'class': 'linha_status'})
if lastStatus:
last = lastStatus.text.strip().upper()
else:
last = 'O rastreamento não está disponível no momento:\n\n' \
'- Verifique se o código do objeto está correto;\n' \
'- O objeto pode demorar até 24 horas (após postagem) para ser rastreado no sistema dos Correios.'.strip().upper()
subprocess.call(['notify-send', 'AP - Rastreio Correios', f'{objeto}\n\n{last}'])
except socket.error:
subprocess.call(['notify-send', 'AP - Rastreio Correios',
'Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.'])
showerror(title='AVISO',
message='Tempo de resposta do servidor execedido.\n\nSem conexão com a internet.')
"""except socket.timeout:
subprocess.call(
['notify-send', 'AP - Rastreio Correios', 'Tempo de resposta do servidor execedido.'])
showerror(title='AVISO', message='Tempo de resposta do servidor execedido.')"""
def WhatsApp(self):
rastreio = self.txtRastreio.get().strip().upper()
if rastreio == '':
showerror(title='AVISO', message='Para fazer o envio pelo WhatsApp, primeiro busque pelo rastreio.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
rastreio = self.txtRastreio.get()
webbrowser.open(
f'https://web.whatsapp.com/send?phone=&text=Ol%c3%a1.%20Clique%20no%20link%20para%20rastrear%20o%20objeto%20c%c3%b3digo%20{rastreio}%0ahttps%3a%2f%2fwww.linkcorreios.com.br%2f{rastreio}%3fw%3d1&source=&data=')
def Email(self):
rastreio = self.txtRastreio.get().strip().upper()
if not os.path.exists('/usr/bin/thunderbird') and not os.path.exists('/usr/bin/evolution'):
showwarning(title='AVISO', message='Nenhum cliente de email está instalado em seu computador.')
else:
rastreio = self.txtRastreio.get().strip().upper()
if rastreio == '':
showerror(title='AVISO', message='Para fazer o envio pelo Email, primeiro busque pelo rastreio.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
mc = MailComposer()
rastreio = self.txtRastreio.get()
mc.subject = f'Código de Rastreio ({rastreio})'
mc.body = f'Boa tarde!\n\n Segue código de rastreio para acompanhamento do seu pedido:\n\n https://www.linkcorreios.com.br/?id={rastreio}.\n\n'
mc.display('AP - Rastreio Correios')
# webbrowser.open(f'https://www.linkcorreios.com.br/?id={rastreio}#envie_por_email')
def Cadastrar(self):
rastreio = self.txtRastreio.get().strip().upper()
if self.txtRastreio.get() == '' or self.txtObjeto.get() == '':
showwarning(title='AVISO', message='Para salvar digite o rastreio e o nome do objeto.')
elif len(rastreio) != 13:
showwarning(title='AVISO', message='Rastreio deve conter 13 dígitos\nsendo duas letras iniciais e '
'duas letras finais, como no\nexemplo abaixo:\n\n "OJ123456789BR"')
else:
rastreio = self.txtRastreio.get().strip().upper()
objeto = self.txtObjeto.get().strip().upper()
c.execute(f'INSERT INTO rastreio (codrastreio, objeto) VALUES ("{rastreio}", "{objeto}")')
conexao.commit()
self.txtRastreio.delete(0, END)
self.txtObjeto.delete(0, END)
listaPendentes.clear()
self.txtObjeto.config(values=self.listaPendentes())
showinfo(title='STATUS', message=f'Rastreio {rastreio} cadastrado com sucesso.')
def Atualizar(self):
rastreio = self.txtRastreio.get().strip().upper()
objeto = self.txtObjeto.get().strip().upper()
if self.txtRastreio.get() == '' or self.txtObjeto.get() == '':
showerror(title='AVISO', message='Para atualizar os dados procure pelo rastreio primeiro.')
else:
aviso = askyesno(title='AVISO', message='Você deseja atualizar os dados desse rastreio?')
if aviso == False:
pass
elif aviso == True:
c.execute(
f'UPDATE rastreio SET codrastreio = "{rastreio}", objeto = "{objeto}" WHERE codrastreio = "{rastreio}"')
conexao.commit()
self.txtRastreio.delete(0, END)
self.txtObjeto.delete(0, END)
listaPendentes.clear()
self.txtObjeto.config(values=self.listaPendentes())
showinfo(title='STATUS', message=f'Rastreio {rastreio} atualizado com sucesso.')
else:
return None
def Deletar(self):
rastreio = self.txtRastreio.get().strip().upper()
if self.txtRastreio.get() == '' or self.txtObjeto.get() == '':
showerror(title='AVISO', message='Para deletar os dados procure pelo rastreio primeiro.')
else:
aviso = askyesno(title='AVISO', message='Você realmente deseja DELETAR os dados desse rastreio?\n'
'Esta ação não poderá ser desfeita.')
if aviso == False:
pass
elif aviso == True:
c.execute(f'DELETE FROM rastreio WHERE codrastreio = "{rastreio}"')
conexao.commit()
self.txtRastreio.delete(0, END)
self.txtObjeto.delete(0, END)
listaPendentes.clear()
self.txtObjeto.config(values=self.listaPendentes())
showinfo(title='STATUS', message=f'Rastreio {rastreio} deletado com sucesso.')
else:
return None
def listaTodos(self, event=None):
c.execute(f'SELECT objeto FROM rastreio ORDER BY id')
for objeto in c:
if objeto[0] not in listaTodos:
listaTodos.append(objeto[0])
return tuple(reversed(listaTodos))
def listaPendentes(self, event=None):
self.txtObjeto.insert(INSERT, 'Mostrando apenas objetos pendentes')
self.Limpar()
c.execute(f'SELECT objeto FROM pendentes ORDER BY id')
for objeto in c:
if objeto[0] not in listaPendentes:
listaPendentes.append(objeto[0])
return tuple(reversed(listaPendentes))
def listaEntregues(self, event=None):
self.Limpar()
c.execute(f'SELECT objeto FROM entregues ORDER BY id')
for objeto in c:
if objeto[0] not in listaEntregues:
listaEntregues.append(objeto[0])
return tuple(reversed(listaEntregues))
def ListaRastreio(self, event=None):
c.execute(f'SELECT codrastreio FROM rastreio ORDER BY codrastreio')
for rastreio in c:
if rastreio[0] not in listaRastreio:
listaRastreio.append(rastreio[0])
return tuple(listaRastreio)
def BuscaPendentes(self, event=None):
objeto = self.txtObjeto.get().strip().upper()
c.execute(f'SELECT * FROM pendentes WHERE objeto = "{objeto}"')
for linha in c:
self.rastreio = linha[1]
self.objeto = linha[2]
self.txtRastreio.delete(0, END)
self.txtRastreio.insert(INSERT, self.rastreio)
self.txtObjeto.delete(0, END)
self.txtObjeto.insert(INSERT, self.objeto)
def BuscaTodos(self, event=None):
objeto = self.txtObjeto.get().strip().upper()
c.execute(f'SELECT * FROM rastreio WHERE objeto = "{objeto}"')
for linha in c:
self.rastreio = linha[1]
self.objeto = linha[2]
self.txtRastreio.delete(0, END)
self.txtRastreio.insert(INSERT, self.rastreio)
self.txtObjeto.delete(0, END)
self.txtObjeto.insert(INSERT, self.objeto)
def BuscaEntregues(self, event=None):
objeto = self.txtObjeto.get().strip().upper()
c.execute(f'SELECT * FROM entregues WHERE objeto = "{objeto}"')
for linha in c:
self.rastreio = linha[1]
self.objeto = linha[2]
self.txtRastreio.delete(0, END)
self.txtRastreio.insert(INSERT, self.rastreio)
self.txtObjeto.delete(0, END)
self.txtObjeto.insert(INSERT, self.objeto)
def BuscaRastreio(self, event=None):
rastreio = self.txtRastreio.get().strip().upper()
c.execute(f'SELECT * FROM rastreio WHERE codrastreio = "{rastreio}"')
for linha in c:
self.rastreio = linha[1]
self.objeto = linha[2]
self.txtRastreio.delete(0, END)
self.txtRastreio.insert(INSERT, self.rastreio)
self.txtObjeto.delete(0, END)
self.txtObjeto.insert(INSERT, self.objeto)
def RastreioExiste(self):
rastreio = self.txtRastreio.get().strip().upper()
c.execute(f'SELECT * FROM rastreio WHERE codrastreio = "{rastreio}"')
for item in c:
if rastreio == item[1]:
status = showinfo(title='STATUS',
message='Código já cadastrado.\nTecle ENTER para\nbuscar o nome do objeto.')
def Limpar(self, event=None):
self.campo.config(state='normal')
self.txtRastreio.delete(0, END)
self.txtObjeto.delete(0, END)
self.campo.delete(1.0, END)
self.campo.config(state='disable')
janela = Tk()
iconejanela = PhotoImage(file='imagens/iconejanela.png')
janela.tk.call('wm', 'iconphoto', janela._w, iconejanela)
janela.resizable(False, False)
janela.geometry('630x610')
Rastreio(janela)
janela.title('AP - RASTREIO CORREIOS v1.2')
janela.update()
janela.mainloop()
if janela.destroy or janela.quit:
pass
os.system(f'rm {pidfile}')
| 46.910624
| 225
| 0.60752
| 3,162
| 27,818
| 5.327324
| 0.166034
| 0.023508
| 0.029445
| 0.022559
| 0.600534
| 0.551202
| 0.507806
| 0.449629
| 0.400475
| 0.384862
| 0
| 0.012346
| 0.260443
| 27,818
| 592
| 226
| 46.989865
| 0.806445
| 0.020562
| 0
| 0.360341
| 0
| 0.012793
| 0.264556
| 0.005208
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051173
| false
| 0.008529
| 0.031983
| 0
| 0.098081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28fcf00920c199ce0f0b62aba120f4cb4d0c324d
| 5,480
|
py
|
Python
|
samples/attributes.py
|
DavidJohnGee/clicrud
|
f1f178ac44649efe7b7681d37e97d2632b8971b2
|
[
"Apache-2.0"
] | 9
|
2015-12-07T23:00:24.000Z
|
2021-06-23T21:31:47.000Z
|
samples/attributes.py
|
DavidJohnGee/clicrud
|
f1f178ac44649efe7b7681d37e97d2632b8971b2
|
[
"Apache-2.0"
] | 8
|
2016-04-05T12:36:54.000Z
|
2017-05-15T16:00:08.000Z
|
samples/attributes.py
|
DavidJohnGee/clicrud
|
f1f178ac44649efe7b7681d37e97d2632b8971b2
|
[
"Apache-2.0"
] | 7
|
2016-06-02T23:39:05.000Z
|
2021-03-25T20:52:46.000Z
|
#!/usr/bin/env python
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
class _attributes(dict):
def __init__(self):
# This is the dictionary that is generated with the attributes
self.devices = {}
def get_attributes(self, **kwargs):
"""This method gets all attributes in the associated list.
I've tried to avoid 'custom' work, but it's CLI. Tough.
If you want to have more attributes, build it in to this method.
"""
# Figure out how many devices in the stack and what
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show version | inc Management Module')
# Get the count of devices
_ndevices = len(_tmp)
logging.info("[attributes.py] Detected stack devices %s" % _ndevices)
# This section fills in the device type and number
_devcount = 1
for dev in (_tmp):
_tmp2 = dev.strip()
_tmp2 = _tmp2.split(" ")
self.devices[_devcount] = {'model': _tmp2[4]}
if _devcount < _ndevices:
_devcount += 1
# This section fills in the version of code
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show version | inc SW: Version')
_devcount = 1
for dev in (_tmp):
_tmp2 = dev.strip()
_tmp2 = _tmp2.split(" ")
self.devices[_devcount].update({'version': _tmp2[2]})
if _devcount < _ndevices:
_devcount += 1
logging.info("[attributes.py] Detected version of code %s" % _tmp2)
# This section fills in the uptime per device
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show version | inc uptime')
_devcount = 1
for dev in (_tmp):
_tmp2 = dev.strip()
_tmp2 = _tmp2.split(" ")
_tmp3 = ' '.join(_tmp2[6:])
self.devices[_devcount].update({'uptime': _tmp3})
if _devcount < _ndevices:
_devcount += 1
logging.info("[attributes.py] Detected uptime %s" % _tmp3)
# This section fills in the hostname
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show running-config | inc hostname')
if _tmp:
_devcount = 1
_tmp2 = str(_tmp)
_tmp2 = _tmp2.strip()
_tmp2 = _tmp2.split(" ")
for dev in range(_ndevices):
self.devices[_devcount].update({'hostname': _tmp2[1]})
if _devcount < _ndevices:
_devcount += 1
logging.info("[attributes.py] Detected hostname %s" % _tmp2[1])
if not _tmp:
self.devices[_devcount].update({'hostname': 'Not set'})
logging.info("[attributes.py] No hostname detected")
# This section fills in the serial
_tmp = self._transport_converter(
kwargs.get('transport'),
kwargs.get('instance'),
'show version | inc Serial')
_devcount = 1
for dev in (_tmp):
_tmp2 = dev.strip()
_tmp2 = _tmp2.split(" ")
self.devices[_devcount].update({'serial': _tmp2[3]})
if _devcount < _ndevices:
_devcount += 1
logging.info("[attributes.py] Detected serial number %s"
% _tmp2[3])
def set_attribute(self, **kwargs):
"""This method sets and can override each attribute.
Requires KWs: device (integer)
parameter (string)
value (anything)
"""
_device = kwargs.get('device')
_parameter = kwargs.get('parameter')
_value = kwargs.get('value')
self.devices[_device].update({_parameter: _value})
logging.info("[attributes.py] Manually set attribute: %s: %s",
_parameter, _value)
def _transport_converter(self, transport, instance, command):
"""This method converts between SSH and Telnet.
Ultimately abstracting away the differences between the two.
"""
if transport is 'telnet':
_output = instance.read(command)
return _output
if transport is 'ssh':
_output = instance.read(command)
return _output
| 37.793103
| 77
| 0.534672
| 566
| 5,480
| 4.998233
| 0.316254
| 0.041357
| 0.051962
| 0.056911
| 0.404383
| 0.323436
| 0.297278
| 0.297278
| 0.297278
| 0.297278
| 0
| 0.015425
| 0.372993
| 5,480
| 144
| 78
| 38.055556
| 0.807916
| 0.250547
| 0
| 0.54023
| 0
| 0
| 0.149171
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045977
| false
| 0
| 0.011494
| 0
| 0.091954
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28fd8a0aa5ca53d8cc4ae5edc75046373f2c1af3
| 1,929
|
py
|
Python
|
Q36_reversePairs.py
|
FreesiaLikesPomelo/-offer
|
14ac73cb46d13c7f5bbc294329a14f3c5995bc7a
|
[
"Apache-2.0"
] | null | null | null |
Q36_reversePairs.py
|
FreesiaLikesPomelo/-offer
|
14ac73cb46d13c7f5bbc294329a14f3c5995bc7a
|
[
"Apache-2.0"
] | null | null | null |
Q36_reversePairs.py
|
FreesiaLikesPomelo/-offer
|
14ac73cb46d13c7f5bbc294329a14f3c5995bc7a
|
[
"Apache-2.0"
] | null | null | null |
'''
面试题51. 数组中的逆序对
在数组中的两个数字,如果前面一个数字大于后面的数字,则这两个数字组成一个逆序对。输入一个数组,求出这个数组中的逆序对的总数。
示例 1:
输入: [7,5,6,4]
输出: 5
限制:
0 <= 数组长度 <= 50000
https://leetcode-cn.com/problems/shu-zu-zhong-de-ni-xu-dui-lcof/
执行用时 :1564 ms, 在所有 Python3 提交中击败了85.67%的用户
内存消耗 :18.5 MB, 在所有 Python3 提交中击败了100.00%的用户
'''
# merge-sort
# test cases:
# 1. input [] or [int]:return 0
# 2. function test: input sorted array: return
class Solution:
def merge(self, left: List[int], right: List[int]):
# return sortedList:List[int],inverNum:int
lidx = len(left)-1
ridx = len(right)-1
idx = ridx+lidx+1
result = list(range(idx+1))
inverNum = 0
while lidx>=0 and ridx>=0:
if left[lidx]>right[ridx]:
inverNum+=(ridx+1)
result[idx] = left[lidx]
idx-=1
lidx-=1
else:
result[idx] = right[ridx]
idx-=1
ridx-=1
if lidx<0:
# right list was left
while ridx>=0:
result[idx] = right[ridx]
idx-=1
ridx-=1
if ridx<0:
while lidx>=0:
result[idx] = left[lidx]
idx-=1
lidx-=1
return result, inverNum
def mergeSort(self, nums: List[int]):
# return sortedList:List[int],inverNum:int
if len(nums)<=1:
return nums, 0
mid = int(len(nums)/2)
inverNum = 0
left,lInverNum = self.mergeSort(nums[:mid])
right,rInverNum = self.mergeSort(nums[mid:])
result,tempInv = self.merge(left,right)
tempInv = lInverNum+rInverNum+tempInv
return result, tempInv
def reversePairs(self, nums: List[int]) -> int:
if nums==[] or len(nums)==1:
return 0
resList, inverNum = self.mergeSort(nums)
return inverNum
| 26.424658
| 64
| 0.524624
| 242
| 1,929
| 4.181818
| 0.35124
| 0.041502
| 0.050395
| 0.045455
| 0.189723
| 0.189723
| 0.189723
| 0.189723
| 0.057312
| 0
| 0
| 0.048722
| 0.350959
| 1,929
| 72
| 65
| 26.791667
| 0.759585
| 0.248834
| 0
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0
| 0
| 0.209302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28fd8b4c1c5abdea704fd69e0b99370a0f6f8997
| 21,954
|
py
|
Python
|
Apps/phgreynoise/greynoise_connector.py
|
ryanbsaunders/phantom-apps
|
1befda793a08d366fbd443894f993efb1baf9635
|
[
"Apache-2.0"
] | 2
|
2021-07-23T03:51:30.000Z
|
2021-08-12T14:13:04.000Z
|
Apps/phgreynoise/greynoise_connector.py
|
ryanbsaunders/phantom-apps
|
1befda793a08d366fbd443894f993efb1baf9635
|
[
"Apache-2.0"
] | 4
|
2021-10-04T09:22:02.000Z
|
2021-11-01T12:00:04.000Z
|
Apps/phgreynoise/greynoise_connector.py
|
ryanbsaunders/phantom-apps
|
1befda793a08d366fbd443894f993efb1baf9635
|
[
"Apache-2.0"
] | 2
|
2021-05-15T17:31:24.000Z
|
2021-07-23T03:51:42.000Z
|
# File: greynoise_connector.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
# Python 3 Compatibility imports
from __future__ import print_function, unicode_literals
# Phantom App imports
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
from greynoise_consts import *
import requests
import json
from requests.utils import requote_uri
from six.moves.urllib.parse import urljoin as _urljoin
import urllib.parse
def urljoin(base, url):
return _urljoin("%s/" % base.rstrip("/"), url.lstrip("/"))
class GreyNoiseConnector(BaseConnector):
"""Connector for GreyNoise App."""
def __init__(self):
"""GreyNoise App Constructor."""
super(GreyNoiseConnector, self).__init__()
self._session = None
self._app_version = None
self._api_key = None
def validate_parameters(self, param):
# Disable BaseConnector's validate functionality, since this App supports unicode domains and the
# validation routines don't
return phantom.APP_SUCCESS
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error messages from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = ERR_CODE_MSG
error_msg = e.args[0]
else:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
except:
error_code = ERR_CODE_MSG
error_msg = ERR_MSG_UNAVAILABLE
try:
if error_code in ERR_CODE_MSG:
error_text = "Error Message: {0}".format(error_msg)
else:
error_text = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
except:
self.debug_print(PARSE_ERR_MSG)
error_text = PARSE_ERR_MSG
return error_text
def _validate_integer(self, action_result, parameter, key):
if parameter:
try:
if not float(parameter).is_integer():
return action_result.set_status(phantom.APP_ERROR, VALID_INTEGER_MSG.format(key=key)), None
parameter = int(parameter)
except:
return action_result.set_status(phantom.APP_ERROR, VALID_INTEGER_MSG.format(key=key)), None
if parameter < 0:
return action_result.set_status(phantom.APP_ERROR, NON_NEGATIVE_INTEGER_MSG.format(key=key)), None
return phantom.APP_SUCCESS, parameter
def get_session(self):
if self._session is None:
self._session = requests.Session()
self._session.params.update({
"api-key": self._api_key
})
return self._session
def _make_rest_call(self, action_result, method, *args, error_on_404=True, **kwargs):
session = self.get_session()
response_json = None
status_code = None
try:
r = session.request(method, *args, **kwargs)
if r.status_code != 404 or error_on_404:
r.raise_for_status()
status_code = r.status_code
except requests.exceptions.HTTPError as e:
err_msg = self._get_error_message_from_exception(e)
err_msg = urllib.parse.unquote(err_msg)
ret_val = action_result.set_status(phantom.APP_ERROR,
"HTTP error occurred while making REST call: {0}".format(err_msg))
except Exception as e:
err_msg = self._get_error_message_from_exception(e)
ret_val = action_result.set_status(phantom.APP_ERROR,
"General error occurred while making REST call: {0}".format(err_msg))
else:
try:
response_json = r.json()
ret_val = phantom.APP_SUCCESS
except Exception as e:
err_msg = self._get_error_message_from_exception(e)
ret_val = action_result.set_status(phantom.APP_ERROR,
"Unable to parse JSON response. Error: {0}".format(err_msg))
return (ret_val, response_json, status_code)
def _check_apikey(self, action_result):
self.save_progress("Testing API key")
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
API_KEY_CHECK_URL,
headers=self._headers
)
if phantom.is_fail(ret_val):
self.save_progress("API key check Failed")
return ret_val
if response_json is None:
self.save_progress("No response from API")
return action_result.set_status(phantom.APP_ERROR, "No response from API")
elif response_json.get("message") == "pong":
self.save_progress("Validated API Key")
self.debug_print("Validated API Key")
return phantom.APP_SUCCESS
else:
self.save_progress("Invalid response from API")
try:
response_json = json.dumps(response_json)
except:
return action_result.set_status(phantom.APP_ERROR, "Invalid response from API")
return action_result.set_status(phantom.APP_ERROR, "Invalid response from API: %s" % response_json)
def _test_connectivity(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
self.save_progress("Test Connectivity Failed")
return ret_val
self.save_progress("Test Connectivity Passed")
return action_result.set_status(phantom.APP_SUCCESS)
def _lookup_ip(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
return ret_val
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
LOOKUP_IP_URL.format(ip=param["ip"]),
headers=self._headers
)
if phantom.is_fail(ret_val):
return ret_val
result_data = {}
action_result.add_data(result_data)
result_data.update(response_json)
try:
result_data["visualization"] = VISUALIZATION_URL.format(ip=result_data["ip"])
if result_data["code"] in CODES:
result_data["code_meaning"] = CODES[result_data["code"]]
else:
result_data["code_meaning"] = "This code is unmapped"
except KeyError:
return action_result.set_status(phantom.APP_ERROR, "Error occurred while processing API response")
return action_result.set_status(phantom.APP_SUCCESS)
def _ip_reputation(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
return ret_val
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
IP_REPUTATION_URL.format(ip=param["ip"]),
headers=self._headers
)
if phantom.is_fail(ret_val):
return ret_val
result_data = {}
action_result.add_data(result_data)
result_data.update(response_json)
try:
result_data["visualization"] = VISUALIZATION_URL.format(ip=result_data["ip"])
except KeyError:
return action_result.set_status(phantom.APP_ERROR, "Error occurred while processing API response")
return action_result.set_status(phantom.APP_SUCCESS)
def _gnql_query(self, param, is_poll=False, action_result=None):
if not is_poll:
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
if is_poll:
return ret_val, None
else:
return ret_val
first_flag = True
remaining_results_flag = True
scroll_token = ""
full_response = {}
size = param["size"]
# Validate 'size' action parameter
ret_val, size = self._validate_integer(action_result, size, SIZE_ACTION_PARAM)
if phantom.is_fail(ret_val):
if is_poll:
return action_result.get_status(), None
else:
return action_result.get_status()
while remaining_results_flag:
if first_flag:
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
GNQL_QUERY_URl,
headers=self._headers,
params=(('query', param["query"]),
('size', size))
)
full_response.update(response_json)
if "scroll" in full_response:
scroll_token = full_response["scroll"]
if "complete" in full_response or len(full_response["data"]) >= size:
remaining_results_flag = False
elif "message" in full_response:
if full_response["message"] == "no results":
remaining_results_flag = False
first_flag = False
if remaining_results_flag:
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
GNQL_QUERY_URl,
headers=self._headers,
params=(('query', param["query"]),
('size', size),
('scroll', scroll_token))
)
full_response["complete"] = response_json["complete"]
if "scroll" in response_json:
full_response["scroll"] = response_json["scroll"]
for item in response_json["data"]:
full_response["data"].append(item)
if "scroll" in full_response:
scroll_token = full_response["scroll"]
if "complete" in full_response or len(full_response["data"]) >= size:
remaining_results_flag = False
elif "message" in full_response:
if full_response["message"] == "no results":
remaining_results_flag = False
else:
remaining_results_flag = True
if phantom.is_fail(ret_val):
if is_poll:
return ret_val, None
else:
return ret_val
result_data = {}
action_result.add_data(result_data)
try:
for entry in full_response["data"]:
entry["visualization"] = VISUALIZATION_URL.format(ip=entry["ip"])
except KeyError:
error_msg = "Error occurred while processing API response"
if is_poll:
return action_result.set_status(phantom.APP_ERROR, error_msg), None
else:
return action_result.set_status(phantom.APP_ERROR, error_msg)
result_data.update(full_response)
if is_poll:
return ret_val, result_data
else:
return action_result.set_status(phantom.APP_SUCCESS)
def _lookup_ips(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
ret_val = self._check_apikey(action_result)
if phantom.is_fail(ret_val):
return ret_val
try:
ips = [x.strip() for x in param["ips"].split(",")]
ips = list(filter(None, ips))
if not ips:
return action_result.set_status(phantom.APP_ERROR, INVALID_COMMA_SEPARATED_VALUE_ERR_MSG.format(key='ips'))
ips = ",".join(ips)
ips_string = requote_uri(ips)
except Exception as e:
err = self._get_error_message_from_exception(e)
err_msg = "Error occurred while processing 'ips' action parameter. {0}".format(err)
return action_result.set_status(phantom.APP_ERROR, err_msg)
ret_val, response_json, status_code = self._make_rest_call(
action_result,
"get",
LOOKUP_IPS_URL.format(ips=ips_string),
headers=self._headers
)
if phantom.is_fail(ret_val):
return ret_val
result_data = []
action_result.add_data(result_data)
try:
for result in response_json:
if result["code"] in CODES:
result["code_meaning"] = CODES[result["code"]]
else:
result["code_meaning"] = "This code is unmapped"
result["visualization"] = VISUALIZATION_URL.format(ip=result["ip"])
result_data.append(result)
return action_result.set_status(phantom.APP_SUCCESS)
except Exception as e:
err = self._get_error_message_from_exception(e)
err_msg = "Error occurred while processing results: {0}".format(err)
return action_result.set_status(phantom.APP_ERROR, err_msg)
def _process_query(self, data):
# spawn container for every item returned
if data["count"] > 0:
try:
for entry in data["data"]:
ip = entry["ip"]
self.save_progress("Processing IP address {}".format(ip))
container = {
"custom_fields": {},
"data": {},
"name": "",
"description": "Container added by GreyNoise",
"label": self.get_config().get("ingest", {}).get("container_label"),
"sensitivity": "amber",
"source_data_identifier": "",
"tags": entry["tags"],
}
if entry["classification"] == "malicious":
container["severity"] = "high"
else:
container["severity"] = "low"
artifact_cef = {
'ip': entry['ip'],
'classification': entry['classification'],
'first_seen': entry['first_seen'],
'last_seen': entry['last_seen'],
'actor': entry['actor'],
'organization': entry['metadata']['organization'],
'asn': entry['metadata']['asn']
}
if entry['metadata']['country']:
artifact_cef['country'] = entry['metadata']['country']
if entry['metadata']['city']:
artifact_cef['city'] = entry['metadata']['city']
container["artifacts"] = [{
"cef": artifact_cef,
"description": "Artifact added by GreyNoise",
"label": container["label"],
"name": "GreyNoise Query Language Entry",
"source_data_identifier": container["source_data_identifier"],
"severity": container["severity"]
}]
container["name"] = "GreyNoise Query Language Entry"
ret_val, container_creation_msg, container_id = self.save_container(container)
if phantom.is_fail(ret_val):
self.save_progress("Error occurred while saving the container")
self.debug_print(container_creation_msg)
continue
self.save_progress("Created %s" % container_id)
except Exception as e:
err = self._get_error_message_from_exception(e)
err_msg = "Error occurred while processing query data. {}".format(err)
self.debug_print(err_msg)
return phantom.APP_ERROR
return phantom.APP_SUCCESS
else:
self.save_progress("No results matching your GNQL query were found")
return phantom.APP_SUCCESS
def _on_poll(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
if self.is_poll_now():
self.save_progress('Due to the nature of the API, the '
'artifact limits imposed by POLL NOW are '
'ignored. As a result POLL NOW will simply '
'create a container for each artifact.')
config = self.get_config()
param["query"] = config.get("on_poll_query")
if self.is_poll_now():
param["size"] = param.get(phantom.APP_JSON_CONTAINER_COUNT, 25)
else:
on_poll_size = config.get("on_poll_size", 25)
# Validate 'on_poll_size' config parameter
ret_val, on_poll_size = self._validate_integer(action_result, on_poll_size, ONPOLL_SIZE_CONFIG_PARAM)
if phantom.is_fail(ret_val):
return action_result.get_status()
param["size"] = on_poll_size
if param["query"] == "Please refer to the documentation":
self.save_progress("Default on poll query unchanged, please enter a valid GNQL query")
return action_result.set_status(phantom.APP_ERROR, "Default on poll query unchanged")
ret_val, data = self._gnql_query(param, is_poll=True, action_result=action_result)
if phantom.is_fail(ret_val):
return action_result.get_status()
ret_val = self._process_query(data)
if phantom.is_fail(ret_val):
return action_result.set_status(phantom.APP_ERROR, "Failed to process the query")
else:
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
action = self.get_action_identifier()
if action == "test_connectivity":
ret_val = self._test_connectivity(param)
elif action == "lookup_ip":
ret_val = self._lookup_ip(param)
elif action == "ip_reputation":
ret_val = self._ip_reputation(param)
elif action == "gnql_query":
ret_val = self._gnql_query(param)
elif action == "lookup_ips":
ret_val = self._lookup_ips(param)
elif action == "on_poll":
ret_val = self._on_poll(param)
return ret_val
def initialize(self):
"""Initialize the Phantom integration."""
self._state = self.load_state()
config = self.get_config()
self._api_key = config['api_key']
app_json = self.get_app_json()
self._app_version = app_json["app_version"]
self._headers = {
"Accept": "application/json",
"key": self._api_key,
"User-Agent": "greynoise-phantom-integration-v{0}".format(self._app_version)
}
return phantom.APP_SUCCESS
def finalize(self):
"""Finalize the Phantom integration."""
# Save the state, this data is saved across actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
if __name__ == "__main__":
import pudb
import argparse
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument("input_test_json", help="Input Test JSON file")
argparser.add_argument("-u", "--username", help="username", required=False)
argparser.add_argument("-p", "--password", help="password", required=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if username and password:
login_url = BaseConnector._get_phantom_base_url() + "login"
try:
print("Accessing the Login page")
r = requests.get(login_url, verify=False)
csrftoken = r.cookies["csrftoken"]
data = dict()
data["username"] = username
data["password"] = password
data["csrfmiddlewaretoken"] = csrftoken
headers = dict()
headers["Cookie"] = "csrftoken=" + csrftoken
headers["Referer"] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=False, data=data, headers=headers)
session_id = r2.cookies["sessionid"]
except Exception as e:
print("Unable to get session id from the platform. Error: " + str(e))
exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = GreyNoiseConnector()
connector.print_progress_message = True
if session_id is not None:
in_json["user_session_token"] = session_id
connector._set_csrf_info(csrftoken, headers["Referer"])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
exit(0)
| 38.856637
| 123
| 0.579029
| 2,452
| 21,954
| 4.902529
| 0.132545
| 0.063888
| 0.037434
| 0.041927
| 0.444555
| 0.412362
| 0.384827
| 0.377256
| 0.355711
| 0.333417
| 0
| 0.002717
| 0.329325
| 21,954
| 564
| 124
| 38.925532
| 0.813706
| 0.034709
| 0
| 0.401766
| 0
| 0
| 0.123858
| 0.004733
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039735
| false
| 0.01766
| 0.028698
| 0.004415
| 0.183223
| 0.024283
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
28ff68d107d4e01cf5ece21ad9bb66128f102b8f
| 373
|
py
|
Python
|
src/pgbackup/pgdump.py
|
narbutas/pgbackup
|
2bc65dc9c4cdba135e0ae68c71d034de50fddda8
|
[
"Apache-2.0"
] | null | null | null |
src/pgbackup/pgdump.py
|
narbutas/pgbackup
|
2bc65dc9c4cdba135e0ae68c71d034de50fddda8
|
[
"Apache-2.0"
] | null | null | null |
src/pgbackup/pgdump.py
|
narbutas/pgbackup
|
2bc65dc9c4cdba135e0ae68c71d034de50fddda8
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
import sys
def dump(url):
try:
return subprocess.Popen(['pg_dump', url], stdout=subprocess.PIPE)
except OSError as err:
print(f"Error: {err}")
sys.exit(1)
def dump_file_name(url, timestamp=None):
db_name = url.split('/')[-1]
db_name = db_name.split('?')[0]
if timestamp:
return f"{db_name}-{timestamp}.sql"
else:
return f"{db_name}.sql"
| 21.941176
| 67
| 0.686327
| 60
| 373
| 4.133333
| 0.516667
| 0.120968
| 0.072581
| 0.104839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.13941
| 373
| 17
| 68
| 21.941176
| 0.76324
| 0
| 0
| 0
| 0
| 0
| 0.157754
| 0.066845
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.466667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e900f1fbad104966ef7247511d53bc745e2f6385
| 1,190
|
py
|
Python
|
e2_s13.py
|
iansantana00/Python-Course
|
43852aa64c93099342ab4765b0fe8729a959449e
|
[
"MIT"
] | 2
|
2022-01-13T15:55:58.000Z
|
2022-02-11T23:18:34.000Z
|
e2_s13.py
|
iansantana00/Python-Course
|
43852aa64c93099342ab4765b0fe8729a959449e
|
[
"MIT"
] | null | null | null |
e2_s13.py
|
iansantana00/Python-Course
|
43852aa64c93099342ab4765b0fe8729a959449e
|
[
"MIT"
] | null | null | null |
numero_vogal = 0
espaço = 0
numero_consoante = 0
contador = 0
escrita = 0
arquivo = input('Digite o nome do seu arquivo (.txt): ')
with open(arquivo, 'w', encoding='utf-8') as texto:
while escrita != 'sair':
escrita = input('Digite: ')
texto.write(escrita)
texto.write('\n')
contador += 1
with open(arquivo, encoding='utf-8') as texto:
file = texto.read()
file.split('\n')
for vogal in file:
if vogal in ('a', 'A', 'e', 'E', 'i', 'I', 'o', 'O', 'u', 'U', 'á', 'Á', 'é', 'É', 'í', 'Í', 'ó', 'Ó', 'ú',
'Ú', 'Â', 'â', 'ã', 'Ã', 'Õ', 'õ', 'ô', 'Ô', 'ê', 'Ê'):
numero_vogal += 1
for consoante in file:
if consoante in ('Q', 'q', 'W', 'w', 'R', 'r', 'T', 't', 'Y', 'y', 'P', 'p', 'S', 's', 'D', 'F', 'f', 'g',
'G', 'h', 'H', 'J', 'j', 'K', 'k', 'L', 'l', 'Ç', 'ç', 'Z', 'z', 'X', 'x', 'C', 'c', 'V', 'v',
'B', 'b', 'N', 'n', 'M', 'm'):
numero_consoante += 1
print(f'O número de linhas do texto é {contador - 1}')
print(f'O número de vogais é {numero_vogal - 2}')
print(f'O número de consoantes é {numero_consoante - 2}')
| 30.512821
| 117
| 0.443697
| 182
| 1,190
| 2.868132
| 0.412088
| 0.063218
| 0.04023
| 0.074713
| 0.162835
| 0.061303
| 0
| 0
| 0
| 0
| 0
| 0.015421
| 0.291597
| 1,190
| 38
| 118
| 31.315789
| 0.603796
| 0
| 0
| 0
| 0
| 0
| 0.231771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e903dc912d5bbd81aedab3090527461e1da894a1
| 2,843
|
py
|
Python
|
mammoth/ensembl.py
|
hbc/mammoth_code
|
2e6909514e8ff232981ea2cb03f078257bc5c847
|
[
"MIT"
] | 1
|
2017-05-22T01:18:13.000Z
|
2017-05-22T01:18:13.000Z
|
mammoth/ensembl.py
|
hbc/mammoth_code
|
2e6909514e8ff232981ea2cb03f078257bc5c847
|
[
"MIT"
] | null | null | null |
mammoth/ensembl.py
|
hbc/mammoth_code
|
2e6909514e8ff232981ea2cb03f078257bc5c847
|
[
"MIT"
] | null | null | null |
"""ensembl interaction function"""
import os
import requests, sys
import yaml
import logging
import gffutils
from collections import defaultdict
import mammoth.logger as mylog
server = "http://rest.ensembl.org{ext}"
ext = "/sequence/id/{id}?type=cds"
prot = "/sequence/id/{id}?type=protein"
sequence = "/sequence/region/elephant/{chr}:{start}..{end}:{strand}?"
def query_sequence(chr, start, end, strand):
r = requests.get(server.format(ext=sequence.format(**locals())), headers={ "Content-Type" : "text/plain"})
if not r.ok:
r.raise_for_status()
return None
return yaml.load(r.text)
def query_exon(id):
r = requests.get(server.format(ext=ext.format(id=id)), headers={ "Content-Type" : "application/json"})
if not r.ok:
r.raise_for_status()
return None
return yaml.load(r.text)
def query_prot(id):
r = requests.get(server.format(ext=prot.format(id=id)), headers={ "Content-Type" : "application/json"})
if not r.ok:
r.raise_for_status()
return None
return yaml.load(r.text)
def _get_db(db):
return gffutils.FeatureDB(db_file)
def _convert_to_db(db):
out = "%s.db" % db
if os.path.exists(out):
return gffutils.FeatureDB(out)
gffutils.create_db(db, disable_infer_transcripts=True, disable_infer_genes=True, dbfn=out)
return gffutils.FeatureDB(out)
def get_genes(db):
db = _convert_to_db(db)
genome = defaultdict(dict)
exons_pos = defaultdict(dict)
for gene in db.features_of_type("gene"):
if "gene_name" not in gene.attributes:
continue
if gene.attributes["gene_biotype"][0] == "protein_coding":
exon_seen = set()
for tx in db.children(gene, featuretype='transcript', order_by='start'):
if tx.attributes["transcript_biotype"][0] == "protein_coding":
# txs.add(tx["transcript_id"])
exons = dict()
for e in db.children(tx, featuretype='exon', order_by='start'):
if e.attributes['exon_id'][0] not in exon_seen:
exons.update({int(e.attributes['exon_number'][0]): e.attributes['exon_id'][0]})
exons_pos.update({e.attributes['exon_id'][0]: {'chrom': e.chrom,
'start': e.start,
'end': e.end,
'strand': e.strand}})
exon_seen.add(e.attributes['exon_id'][0])
genome[gene.attributes["gene_name"][0]].update({tx.attributes["transcript_id"][0]: {'size': abs(tx.end-tx.start),
'exons': exons}})
return genome, exons_pos
| 38.418919
| 133
| 0.570172
| 353
| 2,843
| 4.458924
| 0.291785
| 0.015248
| 0.047649
| 0.043202
| 0.302414
| 0.219822
| 0.202668
| 0.16582
| 0.16582
| 0.16582
| 0
| 0.004455
| 0.289483
| 2,843
| 73
| 134
| 38.945205
| 0.774752
| 0.020401
| 0
| 0.233333
| 0
| 0
| 0.146508
| 0.040317
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.116667
| 0.016667
| 0.383333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e90a508ef0e30d0bdd4948ae4a031308ac6c728e
| 10,317
|
py
|
Python
|
pag_demo.py
|
Topaz1618/MeowFile
|
33878abfb552128368ad6bbf5396d45f21906ce3
|
[
"MIT"
] | null | null | null |
pag_demo.py
|
Topaz1618/MeowFile
|
33878abfb552128368ad6bbf5396d45f21906ce3
|
[
"MIT"
] | null | null | null |
pag_demo.py
|
Topaz1618/MeowFile
|
33878abfb552128368ad6bbf5396d45f21906ce3
|
[
"MIT"
] | null | null | null |
__copyright__ = """ Copyright (c) 2021 HangYan. """
__license__ = 'MIT license'
__version__ = '1.0'
__author__ = 'topaz1668@gmail.com'
from models import conn_db, UploadFiles
from sqlalchemy import func, distinct, or_, and_
import datetime
from datetime import timedelta
import time
import math
def string_to_ts(str_time):
try:
if not isinstance(str_time, str):
str_time = str(str_time)
ts = time.mktime(time.strptime(str_time, "%Y-%m-%d %H:%M:%S"))
return ts
except ValueError as e:
print("Catch error: ", e)
return 0
def ts_to_string(ts):
if not isinstance(ts, float):
ts = float(ts)
time_array = time.localtime(ts)
str_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
return str_time
# def page_limit():
# """ 10+ ms """
# session = conn_db()
# print(f"Before: {time.time()}")
# order_obj = session.query(ShopOrder).filter(
# ShopOrder.user_id == 1,
# ShopOrder.is_deleted == 0,
# ).all()
# print(f"After1: {time.time()}")
#
# print(len(order_obj))
# print(f"After2: {time.time()}")
#
#
# def page_limit_scalar():
# """ 5 ms """
# session = conn_db()
# print(f"Before: {time.time()}")
# total = session.query(func.count(distinct(ShopOrder.id))).filter(
# ShopOrder.user_id == 1,
# ShopOrder.is_deleted == 0,
# ).scalar()
#
# print(f"After: {time.time()} {total}")
#
# total_page = total / PAGE_LIMIT
# total_page = math.ceil(total_page)
#
# print("Total page: ", total_page)
# return total, total_page
#
#
# def slice_data(total, current_page=1):
# print(f"Current page: {current_page}")
# session = conn_db()
# start = (current_page -1) * PAGE_LIMIT
# end = total if PAGE_LIMIT * current_page > total else PAGE_LIMIT * current_page
# order_obj_list = session.query(ShopOrder).filter(
# ShopOrder.user_id == 1,
# ShopOrder.is_deleted == 0,
# )[start:end]
#
# for i in order_obj_list:
# print(i.id)
#
#
# def get_all():
# session = conn_db()
# order_obj_list = session.query(ShopOrder).filter(
# ShopOrder.user_id == 1,
# ).all()
#
# for i in order_obj_list:
# print(i.id)
#
#
# def order_by_colum():
# session = conn_db()
# results = session.query(ShopGoods).filter(ShopGoods.is_delete==0).order_by(ShopGoods.goods_price.desc()).all() # 高到低
# # results = session.query(ShopGoods).filter(ShopGoods.is_delete==0).order_by(ShopGoods.goods_price).all() # 低到高
#
# for i in results:
# print(i.goods_price)
#
# print(results)
#
#
# def order_by_join():
# session = conn_db()
# before = time.time()
# total = session.query(func.count(distinct(ShopGoods.id))).filter(
# or_(*[ShopGoods.menu_path == name for name in ["Actor"]]),
# ).scalar()
# print("Total: ", total)
#
# results = session.query(ShopGoods).filter(
# or_(*[ShopGoods.menu_path == name for name in ["Clothes", ]]),
# ).order_by(ShopGoods.goods_price.desc())[0:3] # 高到低
#
# # goods_list_obj = session.query(ShopGoods).filter(
# # or_(*[ShopGoods.goods_name == name for name in filter_list])).order_by(
# # ShopGoods.goods_price.desc())[start:end]
#
# after = time.time()
# for i in results:
# print(i.goods_price, i.goods_name)
#
# print(results, after - before)
#
#
# def order_by_or():
# session = conn_db()
# results = session.query(ShopMainMenu).filter(
# or_(
# ShopMainMenu.id == 1,
# ShopMainMenu.id == 2)).all()
#
# for i in results:
# print(i.name)
#
# print(results)
#
#
# def get_by_negate():
# # TEST_USER = ["15600803270", "15612345678", "15600000000", "15600809876", "15600800080","15600801111","15611111111","15612111111","15711111111","15600000001","15600000002","15600000003","15600802222","15611119999", "18310703270", "18310700909", "18434471028", "17747121395", "18622606402", "18610404330", "18582045352", "18262676236" ]
# # TEST_USER = ["15600803270", "15612345678", "18310703270", "18434471028",]
# session = conn_db()
# # total = session.query(func.count(distinct(ShopUser.id))).filter(
# # *[ShopUser.phonenum != name for name in TEST_USER]
# # ).scalar()
# #
# # session.close()
# # print("all data", total)
#
#
# def get_avg():
# TEST_USER = [
# "15600803270",
# "15612345678",
# "18310703270",
# "18434471028",
# "15600801111",
# "17747121395",
# "15600802222",
# "18622606402",
# # "18610404330",
# # "18582045352",
# # "18262676236",
# ]
#
# session = conn_db()
# access_sum = session.query(func.sum(distinct(ShopUser.access_times))).filter(
# *[ShopUser.phonenum != name for name in TEST_USER]
# ).scalar()
#
# total = session.query(func.count(distinct(ShopUser.id))).filter(
# *[ShopUser.phonenum != name for name in TEST_USER]
# ).scalar()
#
# access_time_avg = 0
# if total != 0:
# access_time_avg = round(access_sum / total, 2)
#
#
# session.close()
# print("all data", total)
#
#
# def test_about_cut_value():
# session = conn_db()
# start = 0
# end = 2
# uid = 1
# myitems_list_obj = session.query(ShopPersonalItems).filter(
# ShopPersonalItems.uid == 1,
# )[start:end]
# # print(myitems_list_obj)
#
# for myitems_obj in myitems_list_obj:
# print(myitems_obj.id)
#
#
# def or_and_toghter():
# TEST_USER = [
# "15600803270",
# "15612345678",
# ]
# old_users_list = ["15612345678", "15101231234", "15101231236"]
# session = conn_db()
# usage_amount = session.query(func.count(distinct(ShopUser.id))).filter(
# and_(
# *[ShopUser.phonenum != name for name in TEST_USER],
# or_(
# *[ShopUser.phonenum == name for name in old_users_list],
# ShopUser.access_times > 0,
# ))
# ).scalar()
#
# statistics_users_obj= session.query(ShopUser).filter(
# and_(*[ShopUser.phonenum != name for name in TEST_USER],
# or_(
# *[ShopUser.phonenum == name for name in old_users_list],
# ShopUser.access_times > 0,
# ))
# ).all()
#
# # for statistics_obj in statistics_users_obj:
# # print(statistics_obj.id, type(statistics_obj.access_times))
# # print("!!!!!", usage_amount)
#
# day_time = datetime.date.today()
#
# today_usage_amount = session.query(func.count(distinct(ShopUser.id))).filter(
# *[ShopUser.phonenum != name for name in TEST_USER],
# ShopUser.last_access_time > day_time
# ).scalar()
#
# print(">>>", today_usage_amount)
#
# today_usage_amount = session.query(ShopUser).filter(
# *[ShopUser.phonenum != name for name in TEST_USER],
# ShopUser.last_access_time > day_time
# ).all()
#
# for i in today_usage_amount:
# print("!!!", i.last_access_time)
#
#
# def test_about_or():
# session = conn_db()
# TEST_USER = ["15600803270"]
# utc_time = datetime.datetime.utcnow()
#
# # internal_user_amount = session.query(func.count(ShopMember.id)).filter(
# # ShopMember.senior_expire_time >= utc_time + timedelta(days=100*12*30),
# # ).scalar()
#
#
# internal_user_amount = session.query(ShopMember.id).filter(
# ShopMember.senior_expire_time >= utc_time + timedelta(days=1 * 12 * 30),
# ).join(ShopUser).filter(
# or_(*[ShopUser.phonenum == name for name in TEST_USER])
# ).scalar()
#
# member_list_obj = session.query(ShopMember).filter(
# ShopMember.senior_expire_time >= utc_time + timedelta(days=130 * 12 * 30)
# ).all()
#
# uid_list = []
# for member_obj in member_list_obj:
# uid_list.append(member_obj.id)
#
# user_list_obj = session.query(ShopUser).filter(
# or_(
# *[ShopUser.phonenum == name for name in TEST_USER],
# *[ShopUser.id == id for id in uid_list],
# )
# ).all()
#
#
# # print("!!!", user_list_obj)
#
# for i in user_list_obj:
# print(">>> ", i)
#
# # internal_user_amount = session.query(ShopMember).filter(
# # ShopMember.senior_expire_time > utc_time + timedelta(days=10*12*30),
# # ).all()
# #
# # for i in internal_user_amount:
# # print(i.uid, i.senior_expire_time)
#
# print("count", internal_user_amount)
#
# def tog():
# session = conn_db()
# TEST_USER = ["15600803270"]
# utc_time = datetime.datetime.utcnow()
#
# user_list_obj = session.query(ShopUser).filter(
# *[ShopUser.phonenum != name for name in TEST_USER]
# ).join(ShopMember).filter(
# ShopMember.senior_expire_time >= utc_time + timedelta(days=30 * 12 * 100),
# ).order_by(ShopUser.id.desc())[0:10]
#
# for i in user_list_obj:
# print(i.phonenum)
def show_all_data():
session = conn_db()
file_obj_list = session.query(UploadFiles).filter(
UploadFiles.is_intranet == True,
UploadFiles.is_delete == False,
).all()
utc_time = datetime.datetime.utcnow()
for file_obj in file_obj_list:
if utc_time - file_obj.upload_time > timedelta(days=1):
print(f"file name: {file_obj.filename} Time: {file_obj.upload_time}" )
def show_desc_data():
session = conn_db()
file_obj_list = session.query(UploadFiles).filter(
UploadFiles.is_intranet == True,
UploadFiles.is_delete == False,
).order_by(UploadFiles.id.desc()).all()
utc_time = datetime.datetime.utcnow()
for file_obj in file_obj_list:
# if utc_time - file_obj.upload_time > timedelta(days=1):
print(f"file name: {file_obj.filename} Time: {file_obj.upload_time}" )
if __name__ == "__main__":
PAGE_LIMIT = 12
# total, total_page = page_limit_scalar()
# or_and_toghter()
# get_all()
# slice_data(total)
# for i in range(1, 7):
# slice_data(total, i)
# order_by_colum()
# order_by_join()
# order_by_or()
# get_by_negate()
# get_avg()
# test_about_cut_value()
# a = None
# string_to_ts(a)
# test_about_or()
# tog()
show_desc_data()
| 29.226629
| 342
| 0.602113
| 1,243
| 10,317
| 4.747385
| 0.152051
| 0.052872
| 0.033045
| 0.033045
| 0.543298
| 0.523471
| 0.463989
| 0.447382
| 0.429758
| 0.3818
| 0
| 0.072094
| 0.24038
| 10,317
| 353
| 343
| 29.226629
| 0.680873
| 0.773287
| 0
| 0.297872
| 0
| 0
| 0.116164
| 0.02175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.12766
| 0
| 0.276596
| 0.06383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e90aae947ee6b59303ae1471afa7007b7d9e535a
| 4,490
|
py
|
Python
|
test/orb.py
|
Tythos/oyb
|
0653c4fa24c73f4f2cb2d1c1a29d318f6e9cbd79
|
[
"MIT"
] | 1
|
2017-08-05T16:16:32.000Z
|
2017-08-05T16:16:32.000Z
|
test/orb.py
|
Tythos/oyb
|
0653c4fa24c73f4f2cb2d1c1a29d318f6e9cbd79
|
[
"MIT"
] | null | null | null |
test/orb.py
|
Tythos/oyb
|
0653c4fa24c73f4f2cb2d1c1a29d318f6e9cbd79
|
[
"MIT"
] | null | null | null |
"""
"""
import datetime
import unittest
import numpy
from math import pi
import oyb
from oyb import earth, anomaly
class ClassTests(unittest.TestCase):
def test_default(self):
o = oyb.Orbit()
def test_args(self):
o = oyb.Orbit(a_m=1.064e7, e=0.42607, i_rad=39.687*pi/180, O_rad=130.32*pi/180, w_rad=42.373*pi/180, M_rad=4.2866)
def test_example4p3(self):
rEci_m = numpy.array([-6.045e6, -3.490e6, 2.5e6])
vEci_mps = numpy.array([-3.457e3, 6.618e3, 2.533e3])
o = oyb.Orbit.fromRV(rEci_m, vEci_mps)
h_m2ps = o.getAngMom()
tht_rad = anomaly.mean2true(o.M_rad, o.e)
T_s = o.getPeriod()
self.assertTrue(abs(h_m2ps - 5.831e10) / h_m2ps < 1e-3)
self.assertTrue(abs(o.i_rad - 153.2 * pi / 180) / o.i_rad < 1e-3)
self.assertTrue(abs(o.O_rad - 255.3 * pi / 180) / o.O_rad < 1e-3)
self.assertTrue(abs(o.e - 0.1712) / o.e < 1e-3)
self.assertTrue(abs(o.w_rad - 20.07 * pi / 180) / o.w_rad < 1e-3)
self.assertTrue(abs(tht_rad - 28.45 * pi / 180) / tht_rad < 1e-3)
self.assertTrue(abs(T_s - 2.278 * 3600) / T_s < 1e-3)
def test_example2p8(self):
o = oyb.Orbit.fromHTht(1.545e6, 126 * pi / 180, 8.52e5, 58 * pi / 180)
hPer_m, hApo_m = o.getShape()
T_s = o.getPeriod()
self.assertTrue(abs(o.a_m - 7.593e6) / o.a_m < 1e-3)
self.assertTrue(abs(o.e - 0.08164) / o.e < 1e-3)
self.assertTrue(abs(hPer_m - 5.955e5) / hPer_m < 1e-3)
self.assertTrue(abs(T_s - 1.829 * 3600) / T_s < 1e-3)
class FrameTests(unittest.TestCase):
def test_pqw(self):
o = oyb.Orbit(e=0.5, M_rad=0.5*pi)
rPqw_m = o.getRpqw()
def test_example4p7mod(self):
e = 0.4
a_m = 8e10 / (earth.mu_m3ps2 * (1 - e**2))
M_rad = anomaly.true2mean(30 * pi / 180, e)
o = oyb.Orbit(a_m=a_m, e=e, i_rad=30*pi/180, O_rad=40*pi/180, w_rad=60*pi/180, M_rad=M_rad)
rEci_m = o.getReci()
class J2Tests(unittest.TestCase):
def test_raan(self):
o = oyb.MeanJ2(a_m=6.718e6, e=8.931e-3, i_rad=51.43*pi/180)
dRaan_degpday = o.getRaanRate() * 180/pi * 86400
self.assertTrue(abs(dRaan_degpday - 5.181) / dRaan_degpday < 1e-3)
def test_aop(self):
o = oyb.MeanJ2(a_m=6.718e6, e=8.931e-3, i_rad=51.43*pi/180)
dAop_degpday = o.getAopRate() * 180/pi * 86400
self.assertTrue(abs(dAop_degpday - 3.920) / dAop_degpday < 1e-3)
def test_example4p9(self):
o = oyb.MeanJ2.fromSunSync(100 * 60)
self.assertTrue(abs(o.a_m - (7.5863e5 + earth.eqRad_m)) / o.a_m < 1e-3)
self.assertTrue(abs(o.i_rad - 98.43 * pi / 180) / o.i_rad < 1e-3)
def test_example4p10(self):
o = oyb.MeanJ2.fromConstAop(3 * 3600)
shape = o.getShape()
self.assertTrue(abs(shape[0] - 5.215e5) / shape[0] < 1e-3)
self.assertTrue(abs(shape[1] - 7.842e6) / shape[1] < 1e-3)
def test_example4p11(self):
rEci_m = numpy.array([-3.67e6, -3.87e6, 4.4e6])
vEci_mps = numpy.array([4.7e3, -7.4e3, 1e3])
o = oyb.MeanJ2.fromRV(rEci_m, vEci_mps)
rEciNew_m = o.getReci(o.tEpoch_dt + datetime.timedelta(4))
rNew_m = rEciNew_m.dot(rEciNew_m)**0.5
drEci_m = rEciNew_m - numpy.array([9.672e6, 4.32e6, -8.691e6])
self.assertTrue(drEci_m.dot(drEci_m)**0.5 / rNew_m < 1e-3)
class PropertyTests(unittest.TestCase):
def setUp(self):
hPer_km = 400
hApo_km = 4000
self.o = oyb.Orbit()
self.o.setShape(1e3 * hPer_km, 1e3 * hApo_km)
def test_a(self):
self.assertTrue(abs(self.o.e - 0.2098) / self.o.e < 1e-3)
def test_b(self):
h_m2ps = self.o.getAngMom()
self.assertTrue(abs(h_m2ps - 5.7172e10) / h_m2ps < 1e-3)
def test_cd(self):
vPer_mps, vApo_mps = self.o.getShapeVel()
self.assertTrue(abs(vPer_mps - 8.435e3) / vPer_mps < 1e-3)
self.assertTrue(abs(vApo_mps - 5.509e3) / vApo_mps < 1e-3)
def test_e(self):
self.assertTrue(abs(self.o.a_m - 8.578e6) / self.o.a_m < 1e-3)
def test_f(self):
T_s = self.o.getPeriod()
self.assertTrue(abs(T_s - 2.196 * 3600) / T_s < 1e-3)
def test_g(self):
rTaa_m = self.o.getTaaRad()
self.assertTrue(abs(rTaa_m - 8.387e6) / rTaa_m < 1e-3)
if __name__ == '__main__':
unittest.main()
| 38.376068
| 122
| 0.57951
| 753
| 4,490
| 3.293493
| 0.231076
| 0.141129
| 0.164516
| 0.082258
| 0.328629
| 0.25
| 0.175806
| 0.075
| 0.053226
| 0.033871
| 0
| 0.137534
| 0.264811
| 4,490
| 116
| 123
| 38.706897
| 0.613753
| 0
| 0
| 0.042553
| 0
| 0
| 0.001785
| 0
| 0
| 0
| 0
| 0
| 0.265957
| 1
| 0.191489
| false
| 0
| 0.06383
| 0
| 0.297872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e90be1f05a4443793696e6d766c9b0e422e47832
| 11,656
|
py
|
Python
|
src/python/WMComponent/JobArchiver/JobArchiverPoller.py
|
hufnagel/WMCore
|
b150cc725b68fc1cf8e6e0fa07c826226a4421fa
|
[
"Apache-2.0"
] | 1
|
2015-02-05T13:43:46.000Z
|
2015-02-05T13:43:46.000Z
|
src/python/WMComponent/JobArchiver/JobArchiverPoller.py
|
hufnagel/WMCore
|
b150cc725b68fc1cf8e6e0fa07c826226a4421fa
|
[
"Apache-2.0"
] | 1
|
2016-10-13T14:57:35.000Z
|
2016-10-13T14:57:35.000Z
|
src/python/WMComponent/JobArchiver/JobArchiverPoller.py
|
hufnagel/WMCore
|
b150cc725b68fc1cf8e6e0fa07c826226a4421fa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
The actual jobArchiver algorithm
"""
import logging
import os
import os.path
import shutil
import tarfile
import threading
from Utils.IteratorTools import grouper
from Utils.Timers import timeFunction
from WMCore.DAOFactory import DAOFactory
from WMCore.JobStateMachine.ChangeState import ChangeState
from WMCore.Services.ReqMgrAux.ReqMgrAux import isDrainMode
from WMCore.WMBS.Fileset import Fileset
from WMCore.WMBS.Job import Job
from WMCore.WMException import WMException
from WMCore.WorkQueue.WorkQueueExceptions import WorkQueueNoMatchingElements
from WMCore.WorkQueue.WorkQueueUtils import queueFromConfig
from WMCore.WorkerThreads.BaseWorkerThread import BaseWorkerThread
class JobArchiverPollerException(WMException):
"""
_JobArchiverPollerException_
The Exception handler for the job archiver.
"""
class JobArchiverPoller(BaseWorkerThread):
"""
Polls for Error Conditions, handles them
"""
def __init__(self, config):
"""
Initialise class members
"""
BaseWorkerThread.__init__(self)
self.config = config
self.changeState = ChangeState(self.config)
myThread = threading.currentThread()
self.daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
self.loadAction = self.daoFactory(classname="Jobs.LoadFromIDWithWorkflow")
# Variables
self.numberOfJobsToCluster = getattr(self.config.JobArchiver,
"numberOfJobsToCluster", 1000)
self.numberOfJobsToArchive = getattr(self.config.JobArchiver,
"numberOfJobsToArchive", 10000)
try:
self.logDir = getattr(config.JobArchiver, 'logDir',
os.path.join(config.JobArchiver.componentDir, 'logDir'))
if not os.path.isdir(self.logDir):
os.makedirs(self.logDir)
except Exception as ex:
msg = "Unhandled exception while setting up logDir!\n"
msg += str(ex)
logging.exception(msg)
raise JobArchiverPollerException(msg)
self.tier0Mode = hasattr(config, "Tier0Feeder")
try:
if not self.tier0Mode:
self.workQueue = queueFromConfig(self.config)
except Exception as ex:
msg = "Could not load workQueue"
msg += str(ex)
logging.error(msg)
# raise JobArchiverPollerException(msg)
return
def setup(self, parameters):
"""
Load DB objects required for queries
"""
return
def terminate(self, params):
"""
_terminate_
This function terminates the job after a final pass
"""
logging.debug("terminating. doing one more pass before we die")
self.algorithm(params)
return
@timeFunction
def algorithm(self, parameters=None):
"""
Performs the archiveJobs method, looking for each type of failure
And deal with it as desired.
"""
try:
self.archiveJobs()
self.pollForClosable()
self.markInjected()
except WMException:
myThread = threading.currentThread()
if getattr(myThread, 'transaction', None) is not None \
and getattr(myThread.transaction, 'transaction', None) is not None:
myThread.transaction.rollback()
raise
except Exception as ex:
myThread = threading.currentThread()
msg = "Caught exception in JobArchiver\n"
msg += str(ex)
msg += "\n\n"
if getattr(myThread, 'transaction', None) is not None \
and getattr(myThread.transaction, 'transaction', None) is not None:
myThread.transaction.rollback()
raise JobArchiverPollerException(msg)
return
def archiveJobs(self):
"""
_archiveJobs_
archiveJobs will handle the master task of looking for finished jobs,
and running the code that cleans them out.
"""
doneList = self.findFinishedJobs()
logging.info("Found %i finished jobs to archive", len(doneList))
jobCounter = 0
for slicedList in grouper(doneList, 10000):
self.cleanWorkArea(slicedList)
successList = []
failList = []
killList = []
for job in slicedList:
if job["outcome"] == "success":
successList.append(job)
elif job["outcome"] == "killed":
killList.append(job)
else:
failList.append(job)
myThread = threading.currentThread()
myThread.transaction.begin()
self.changeState.propagate(successList, "cleanout", "success")
self.changeState.propagate(failList, "cleanout", "exhausted")
self.changeState.propagate(killList, "cleanout", "killed")
myThread.transaction.commit()
jobCounter += len(slicedList)
logging.info("Successfully archived %d jobs out of %d.", jobCounter, len(doneList))
def findFinishedJobs(self):
"""
_findFinishedJobs_
Will actually, surprisingly, find finished jobs (i.e., jobs either exhausted or successful)
"""
jobList = []
jobListAction = self.daoFactory(classname="Jobs.GetAllJobs")
jobList1 = jobListAction.execute(state="success", limitRows=self.numberOfJobsToArchive)
jobList2 = jobListAction.execute(state="exhausted", limitRows=self.numberOfJobsToArchive)
jobList3 = jobListAction.execute(state="killed", limitRows=self.numberOfJobsToArchive)
jobList.extend(jobList1)
jobList.extend(jobList2)
jobList.extend(jobList3)
if len(jobList) == 0:
# Then nothing is ready
return []
# Put together a list of job IDs
binds = []
for jobID in jobList:
binds.append({"jobid": jobID})
results = self.loadAction.execute(jobID=binds)
if not isinstance(results, list):
results = [results]
doneList = []
for entry in results:
# One job per entry
tmpJob = Job(id=entry['id'])
tmpJob.update(entry)
doneList.append(tmpJob)
return doneList
def cleanWorkArea(self, doneList):
"""
_cleanWorkArea_
Upon workQueue realizing that a subscriptions is done, everything
regarding those jobs is cleaned up.
"""
for job in doneList:
# print "About to clean cache for job %i" % (job['id'])
self.cleanJobCache(job)
return
def cleanJobCache(self, job):
"""
_cleanJobCache_
Clears out any files still sticking around in the jobCache,
tars up the contents and sends them off
"""
cacheDir = job['cache_dir']
if not cacheDir or not os.path.isdir(cacheDir):
msg = "Could not find jobCacheDir %s" % (cacheDir)
logging.error(msg)
return
cacheDirList = os.listdir(cacheDir)
if cacheDirList == []:
os.rmdir(cacheDir)
return
# Now we need to set up a final destination
try:
# Label all directories by workflow
# Workflow better have a first character
workflow = job['workflow']
firstCharacter = workflow[0]
jobFolder = 'JobCluster_%i' \
% (int(job['id'] / self.numberOfJobsToCluster))
logDir = os.path.join(self.logDir, firstCharacter,
workflow, jobFolder)
if not os.path.exists(logDir):
os.makedirs(logDir)
except Exception as ex:
msg = "Exception while trying to make output logDir\n"
msg += str("logDir: %s\n" % (logDir))
msg += str(ex)
logging.error(msg)
raise JobArchiverPollerException(msg)
# Otherwise we have something in there
try:
tarName = 'Job_%i.tar.bz2' % (job['id'])
with tarfile.open(name=os.path.join(logDir, tarName), mode='w:bz2') as tarball:
for fileName in cacheDirList:
fullFile = os.path.join(cacheDir, fileName)
try:
tarball.add(name=fullFile, arcname='Job_%i/%s' % (job['id'], fileName))
except IOError:
logging.error('Cannot read %s, skipping', fullFile)
except Exception as ex:
msg = "Exception while opening and adding to a tarfile\n"
msg += "Tarfile: %s\n" % os.path.join(logDir, tarName)
msg += str(ex)
logging.error(msg)
logging.debug("cacheDirList: %s", cacheDirList)
raise JobArchiverPollerException(msg)
try:
shutil.rmtree('%s' % (cacheDir), ignore_errors=True)
except Exception as ex:
msg = "Error while removing the old cache dir.\n"
msg += "CacheDir: %s\n" % cacheDir
msg += str(ex)
logging.error(msg)
raise JobArchiverPollerException(msg)
return
def markInjected(self):
"""
_markInjected_
Mark any workflows that have been fully injected as injected
"""
if self.tier0Mode:
logging.debug("Component will not check workflows for injection status")
return
myThread = threading.currentThread()
getAction = self.daoFactory(classname="Workflow.GetInjectedWorkflows")
markAction = self.daoFactory(classname="Workflow.MarkInjectedWorkflows")
result = getAction.execute()
# Check each result to see if it is injected:
injected = []
for name in result:
try:
if self.workQueue.getWMBSInjectionStatus(name, isDrainMode(self.config)):
injected.append(name)
except WorkQueueNoMatchingElements:
# workflow not known - free to cleanup
injected.append(name)
except Exception as ex:
logging.exception("Injection status checking failed, investigate: %s", str(ex))
logging.info("Found %d workflows to mark as injected", len(injected))
# Now, mark as injected those that returned True
if len(injected) > 0:
myThread.transaction.begin()
markAction.execute(names=injected, injected=True)
myThread.transaction.commit()
return
def pollForClosable(self):
"""
_pollForClosable_
Search WMBS for filesets that can be closed and mark them as closed.
"""
myThread = threading.currentThread()
myThread.transaction.begin()
closableFilesetDAO = self.daoFactory(classname="Fileset.ListClosable")
closableFilesets = closableFilesetDAO.execute()
logging.info("Found %d filesets to be closed", len(closableFilesets))
for closableFileset in closableFilesets:
openFileset = Fileset(id=closableFileset)
openFileset.load()
logging.debug("Closing fileset %s", openFileset.name)
openFileset.markOpen(False)
myThread.transaction.commit()
| 34.081871
| 99
| 0.591455
| 1,115
| 11,656
| 6.156951
| 0.295964
| 0.033212
| 0.017334
| 0.019374
| 0.117407
| 0.098762
| 0.074727
| 0.064239
| 0.064239
| 0.055936
| 0
| 0.0038
| 0.322666
| 11,656
| 341
| 100
| 34.181818
| 0.865738
| 0.127316
| 0
| 0.283721
| 0
| 0
| 0.109093
| 0.013063
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0.004651
| 0.07907
| 0
| 0.190698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e90c9d97a14172ce328d8d9a5973b099b111668f
| 5,127
|
py
|
Python
|
mnist/mnist_dist.py
|
vibhatha/PytorchExamples
|
df356f120d6eef69a94586af93bff75af307582d
|
[
"Apache-2.0"
] | 3
|
2021-04-11T05:09:00.000Z
|
2021-08-11T09:58:53.000Z
|
mnist/mnist_dist.py
|
vibhatha/PytorchExamples
|
df356f120d6eef69a94586af93bff75af307582d
|
[
"Apache-2.0"
] | 4
|
2021-03-12T21:51:01.000Z
|
2021-03-14T16:03:13.000Z
|
mnist/mnist_dist.py
|
vibhatha/PytorchExamples
|
df356f120d6eef69a94586af93bff75af307582d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import argparse
from math import ceil
from random import Random
from socket import socket
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import os
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
import numpy as np
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
""" Dataset partitioning helper """
class Partition(object):
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, index):
data_idx = self.index[index]
return self.data[data_idx]
class DataPartitioner(object):
def __init__(self, data, sizes=[0.7, 0.2, 0.1], seed=1234):
self.data = data
self.partitions = []
rng = Random()
rng.seed(seed)
data_len = len(data)
indexes = [x for x in range(0, data_len)]
rng.shuffle(indexes)
for frac in sizes:
part_len = int(frac * data_len)
self.partitions.append(indexes[0:part_len])
indexes = indexes[part_len:]
def use(self, partition):
return Partition(self.data, self.partitions[partition])
""" Partitioning MNIST """
def partition_dataset():
print("Data Loading")
dataset = datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
#print(type(dataset), dataset.data)
size = dist.get_world_size()
bsz = int(128 / float(size))
partition_sizes = [1.0 / size for _ in range(size)]
print("Partition Sizes {}".format(partition_sizes))
partition = DataPartitioner(dataset, partition_sizes)
partition = partition.use(dist.get_rank())
train_set = torch.utils.data.DataLoader(partition,
batch_size=bsz,
shuffle=True)
return train_set, bsz
""" Gradient averaging. """
def average_gradients(model):
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
""" Distributed Synchronous SGD Example """
def run(rank, size):
if (rank == 0):
print("Run Fn")
torch.manual_seed(1234)
train_set, bsz = partition_dataset()
print("Data Points Per Rank {} of Size {}".format(len(train_set.dataset), size))
model = Net()
optimizer = optim.SGD(model.parameters(),
lr=0.01, momentum=0.5)
num_batches = ceil(len(train_set.dataset) / float(bsz))
if (rank == 0):
print("Started Training")
total_data = len(train_set)
epochs = 10
total_steps = epochs * total_data
for epoch in range(10):
epoch_loss = 0.0
count = 0
for data, target in train_set:
# print(
# "Data Size {}({},{}) of Rank {} : target {}, {}".format(data.shape, (data[0].numpy().dtype), type(data),
# rank, target, len(target)))
#print(data[0],target[0])
count = count + 1
result = '{0:.4g}'.format((count / float(total_steps)) * 100.0)
print("Progress {}% \r".format(result), end='\r')
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
epoch_loss += loss.item()
#print(epoch_loss)
loss.backward()
average_gradients(model)
optimizer.step()
if (rank == 0):
print('Rank ', dist.get_rank(), ', epoch ',
epoch, ': ', epoch_loss / num_batches)
def init_processes(rank, size, fn, backend='tcp'):
""" Initialize the distributed environment. """
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
if __name__ == "__main__":
world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
world_rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
print(world_rank, world_size)
init_processes(world_rank, world_size, run, backend='mpi')
| 29.635838
| 122
| 0.577921
| 639
| 5,127
| 4.477308
| 0.280125
| 0.005592
| 0.008389
| 0.012583
| 0.041244
| 0.026564
| 0.009088
| 0
| 0
| 0
| 0
| 0.028865
| 0.29725
| 5,127
| 172
| 123
| 29.80814
| 0.765196
| 0.062415
| 0
| 0.072581
| 0
| 0
| 0.039666
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08871
| false
| 0
| 0.129032
| 0.016129
| 0.282258
| 0.072581
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e90e71723fb83c3e9db45cf94c16cac0b3962eb2
| 1,218
|
py
|
Python
|
common/es/one_scripts.py
|
ltxhh/course
|
45c8e4e436d9f20effccc7ed0844dfd07d8348b1
|
[
"Apache-2.0"
] | null | null | null |
common/es/one_scripts.py
|
ltxhh/course
|
45c8e4e436d9f20effccc7ed0844dfd07d8348b1
|
[
"Apache-2.0"
] | null | null | null |
common/es/one_scripts.py
|
ltxhh/course
|
45c8e4e436d9f20effccc7ed0844dfd07d8348b1
|
[
"Apache-2.0"
] | null | null | null |
# -*- codeing = utf-8 -*-
# @Time : 2022/4/12 13:43
# @Author : linyaxuan
# @File : one_scripts.py
# @Software : PyCharm
"""
将数据库数据导入es
"""
import pymysql
import traceback
from elasticsearch import Elasticsearch
def get_db_data():
# 打开数据库连接(ip/数据库用户名/登录密码/数据库名)
db = pymysql.connect(host="127.0.0.1:3306", user="root", password="linyaxuan666",
database="course", charset='utf8')
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
sql = "SELECT * FROM tb_course"
# 使用 execute() 方法执行 SQL 查询
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
# 关闭数据库连接
db.close()
return results
def insert_data_to_es():
es = Elasticsearch("http://47.94.58.100:9200/")
es.indices.delete(index='course')
try:
i = -1
for row in get_db_data():
print(row)
print(row[1], row[2])
i += 1
es.index(index='course', body={
'id': i,
'title': row[1],
'desc': row[2],
})
except:
error = traceback.format_exc()
print("Error: unable to fecth data", error)
if __name__ == "__main__":
insert_data_to_es()
| 23.882353
| 85
| 0.559113
| 149
| 1,218
| 4.42953
| 0.624161
| 0.015152
| 0.027273
| 0.042424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052448
| 0.295567
| 1,218
| 51
| 86
| 23.882353
| 0.716783
| 0.183908
| 0
| 0
| 0
| 0
| 0.149284
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0.032258
| 0.096774
| 0
| 0.193548
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e910c8bb7a93d643dfe5883064380eb1ced0913d
| 1,343
|
py
|
Python
|
doubleRedirect.py
|
ebraminio/DeltaBot
|
14d427ca644c4e842f72802a0e07adcaecda7097
|
[
"CC0-1.0"
] | 10
|
2016-08-09T21:28:27.000Z
|
2021-12-23T17:22:04.000Z
|
doubleRedirect.py
|
ebraminio/DeltaBot
|
14d427ca644c4e842f72802a0e07adcaecda7097
|
[
"CC0-1.0"
] | 9
|
2016-12-31T10:48:11.000Z
|
2020-07-22T20:52:06.000Z
|
doubleRedirect.py
|
ebraminio/DeltaBot
|
14d427ca644c4e842f72802a0e07adcaecda7097
|
[
"CC0-1.0"
] | 11
|
2017-01-24T15:51:57.000Z
|
2022-02-10T00:35:18.000Z
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# licensed under CC-Zero: https://creativecommons.org/publicdomain/zero/1.0
import pywikibot
from pywikibot.data import api
import re
site = pywikibot.Site('wikidata', 'wikidata')
site.login()
repo = site.data_repository()
def redirect(fromId, toId):
# get token
params = {
'action': 'query',
'meta': 'tokens'
}
req = api.Request(site=site, **params)
data = req.submit()
# create redirect
params3 = {
'action': 'wbcreateredirect',
'from': fromId,
'to': toId,
'bot': 1,
'token': data['query']['tokens']['csrftoken']
}
req3 = api.Request(site=site, **params3)
data3 = req3.submit()
def main():
params = {
'action': 'query',
'list': 'querypage',
'qppage': 'DoubleRedirects',
'qplimit': 5000
}
req = api.Request(site=site, **params)
data = req.submit()
for m in data['query']['querypage']['results']:
try:
if m['ns'] == 0:
item1 = pywikibot.ItemPage(repo, m['title'])
item2 = item1.getRedirectTarget().getRedirectTarget().getID()
redirect(m['title'], item2)
except:
pass
if __name__ == "__main__":
main()
| 24.87037
| 78
| 0.527923
| 135
| 1,343
| 5.185185
| 0.533333
| 0.042857
| 0.06
| 0.077143
| 0.114286
| 0.114286
| 0.114286
| 0.114286
| 0.114286
| 0
| 0
| 0.019459
| 0.311243
| 1,343
| 53
| 79
| 25.339623
| 0.737297
| 0.102755
| 0
| 0.195122
| 0
| 0
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0.02439
| 0.073171
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e914dc16d8c9fee0bbb11e912b41acdddd08ad05
| 1,237
|
py
|
Python
|
leetcode/permutation.py
|
huynonstop/solving-everything
|
21c7c32f9e482e1e88d5ec8a03f8815d28f7ef39
|
[
"MIT"
] | null | null | null |
leetcode/permutation.py
|
huynonstop/solving-everything
|
21c7c32f9e482e1e88d5ec8a03f8815d28f7ef39
|
[
"MIT"
] | null | null | null |
leetcode/permutation.py
|
huynonstop/solving-everything
|
21c7c32f9e482e1e88d5ec8a03f8815d28f7ef39
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
return permute_unique(nums)
# https://leetcode.com/problems/permutations-ii/discuss/18602/9-line-python-solution-with-1-line-to-handle-duplication-beat-99-of-others-%3A-)
def permute_unique(nums):
rs = []
nums.sort()
def dfs(left_nums, path):
if not left_nums:
rs.append(path)
return
for i in range(len(left_nums)):
if i > 0 and nums[i] == nums[i - 1]:
continue
dfs(left_nums[:i] + left_nums[i+1:],
path + [left_nums[i]])
dfs(nums, [])
return rs
def permute_unique(nums):
n = len(nums)
rs = []
used = [False] * n
t = []
nums.sort()
def backtrack():
if len(t) == n:
rs.append(t[:])
return
for i in range(n):
if used[i]:
continue
if used[i - 1] and i > 0 and nums[i] == nums[i - 1]:
continue
used[i] = True
t.append(nums[i])
backtrack()
used[i] = False
t.pop()
backtrack()
return rs
permute_unique([1, 1, 2])
| 22.089286
| 142
| 0.501213
| 163
| 1,237
| 3.742331
| 0.361963
| 0.065574
| 0.083607
| 0.065574
| 0.134426
| 0.078689
| 0.078689
| 0.078689
| 0.078689
| 0
| 0
| 0.02402
| 0.36055
| 1,237
| 55
| 143
| 22.490909
| 0.747156
| 0.113177
| 0
| 0.365854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0
| 0.02439
| 0.02439
| 0.292683
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e91d0411a8febadb09ca20268e15414ccded8163
| 1,543
|
py
|
Python
|
pedurma/proofreading.py
|
Esukhia/pedurma
|
334b5957db30f514d396bd9defc9e9381f5b290b
|
[
"MIT"
] | null | null | null |
pedurma/proofreading.py
|
Esukhia/pedurma
|
334b5957db30f514d396bd9defc9e9381f5b290b
|
[
"MIT"
] | null | null | null |
pedurma/proofreading.py
|
Esukhia/pedurma
|
334b5957db30f514d396bd9defc9e9381f5b290b
|
[
"MIT"
] | 1
|
2021-11-04T07:04:05.000Z
|
2021-11-04T07:04:05.000Z
|
from pedurma.pecha import ProofreadNotePage
from pedurma.utils import from_yaml
def get_note_page_img_link(text_id, pg_num, repo_path):
text_meta = from_yaml((repo_path / text_id / "meta.yml"))
image_grp_id = text_meta.get("img_grp_id", "")
img_link = f"https://iiif.bdrc.io/bdr:{image_grp_id}::{image_grp_id}{int(pg_num):04}.jpg/full/max/0/default.jpg"
return img_link
def get_note_page(text_id, cur_pg_num, repo_path=None):
manual_note = (
repo_path / text_id / "manual_notes" / f"{cur_pg_num:04}.txt"
).read_text(encoding="utf-8")
google_note = (
repo_path / text_id / "google_notes" / f"{cur_pg_num:04}.txt"
).read_text(encoding="utf-8")
img_link = get_note_page_img_link(text_id, cur_pg_num, repo_path)
page = ProofreadNotePage(
manual=manual_note, google=google_note, img_link=img_link, page_num=cur_pg_num
)
return page
def get_note_pages(text_id, repo_path):
note_pages = []
page_paths = list((repo_path / text_id / "google_notes").iterdir())
page_paths.sort()
for page_path in page_paths:
page_num = int(page_path.stem)
note_pages.append(get_note_page(text_id, page_num, repo_path))
return note_pages
def update_note_page(text_id, page: ProofreadNotePage, repo_path=None):
new_manual_note_page = page.manual
cur_pg_num = page.page_num
(repo_path / text_id / "manual_notes" / f"{cur_pg_num:04}.txt").write_text(
new_manual_note_page, encoding="utf-8"
)
print(f"INFO: {cur_pg_num} updated")
| 35.068182
| 116
| 0.706416
| 253
| 1,543
| 3.905138
| 0.245059
| 0.066802
| 0.064777
| 0.07085
| 0.32996
| 0.271255
| 0.225709
| 0.138664
| 0.138664
| 0.138664
| 0
| 0.009382
| 0.171095
| 1,543
| 43
| 117
| 35.883721
| 0.763096
| 0
| 0
| 0.058824
| 0
| 0.029412
| 0.169799
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.264706
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e91ff99a3728e01c9518fdfe79d256b14ae28af1
| 353
|
py
|
Python
|
DataBase Sqlite3/NoteMeilheur.py
|
otmanabdoun/IHM-Python
|
624e961c2f6966b98bf2c1bc4dd276b812954ba1
|
[
"Apache-2.0"
] | 3
|
2021-12-08T10:34:55.000Z
|
2022-01-17T21:02:40.000Z
|
NoteMeilheur.py
|
otmanabdoun/IHM-Python
|
624e961c2f6966b98bf2c1bc4dd276b812954ba1
|
[
"Apache-2.0"
] | null | null | null |
NoteMeilheur.py
|
otmanabdoun/IHM-Python
|
624e961c2f6966b98bf2c1bc4dd276b812954ba1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 3 04:38:07 2021
@author: User
"""
import sqlite3
connexion = sqlite3.connect("dbM2IQL.db")
curseur = connexion.cursor()
curseur.execute("""SELECT e.Nom, c.note FROM Etudiant as e INNER JOIN
CF as c ON e.id = c.fk_etudiant
ORDER BY c.note DESC LIMIT 1""")
print(curseur.fetchone())
| 25.214286
| 70
| 0.651558
| 56
| 353
| 4.089286
| 0.767857
| 0.043668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05694
| 0.203966
| 353
| 14
| 71
| 25.214286
| 0.758007
| 0.206799
| 0
| 0
| 0
| 0
| 0.496154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e924f0db03f4f2a8c126f7c109a518852a2aa24a
| 6,850
|
py
|
Python
|
ProcessingData/get_gp-bias.py
|
gomes-lab/SARA_ScienceAdvances
|
61848d1c92a66bd58c8c195e5b2bb250ef8efb51
|
[
"MIT"
] | 1
|
2022-01-13T12:17:29.000Z
|
2022-01-13T12:17:29.000Z
|
ProcessingData/get_gp-bias.py
|
gomes-lab/SARA_ScienceAdvances
|
61848d1c92a66bd58c8c195e5b2bb250ef8efb51
|
[
"MIT"
] | null | null | null |
ProcessingData/get_gp-bias.py
|
gomes-lab/SARA_ScienceAdvances
|
61848d1c92a66bd58c8c195e5b2bb250ef8efb51
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Script to extract the gp bias features from microscopy images
"""
import sys
import json
import os
import copy as cp
import numpy as np
import glob
import matplotlib.pyplot as plt
import matplotlib
from numpy.polynomial import polynomial
import offsets as GS
from probability_dist import *
import data_storage as ds
import zone as LSA_Zone
from os import listdir
from matplotlib import cm
from collections import OrderedDict
import seaborn as sns
import itertools
#Set color schemes
cmaps = OrderedDict()
cmaps['Qualitative'] = ['Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']
plt.rcParams["image.cmap"] = "Set1"
plt.rcParams['axes.prop_cycle'] = plt.cycler(color=plt.cm.Set1.colors)
palette = itertools.cycle(sns.color_palette("muted"))
palette = sns.color_palette("muted")
def list_files(directory, extension):
return [f for f in listdir(directory) if f.endswith('.' + extension)]
def convert_bias_parameters(bias_parameters, center):
"""
Converts the sum of Gaussian parameters into a format the the GP can
interpret
"""
bias_parameters_new = []
for b in bias_parameters:
std = b[2] * 0.5 * np.sqrt(2.*np.log(2.))
bb = (b[0] * b[4], b[1] - center, std, b[3])
bias_parameters_new.append(bb)
return bias_parameters_new
def get_img_filename(pos, image_error, bx = 1., by = 1.):
"""
Convert position to a filename
"""
lsa = ds.LSA()
stripe = {}
stripe["x"] = pos[0]
stripe["y"] = pos[1]
if image_error:
stripe["x"] = round(pos[0]/bx)
stripe["y"] = round(pos[1]/by)
stripe["dwell"] = 0.
stripe["Tpeak"] = 0.
fn = lsa.image_name(stripe)
fn = fn[:9]
return fn
def get_filename(pos, img_dir, bx = 1., by = 1.):
"""
Captures an image with given settings.
"""
fn = get_img_filename(pos, image_error = True, bx = bx, by = by)
fn += "*.bmp"
if 'img_dir' in locals():
fn = os.path.join(img_dir, fn)
img_fn = glob.glob(fn)
if len(img_fn) > 0:
img_fn = sorted(img_fn)[0]
img = Image.open(img_fn)
mode = img.mode
if mode == "RGB":
r, g, b = img.split()
img = Image.merge("RGB", (b, g, r))
return img, img_fn
rescaling_datas = []
img_dir = "Bi2O3/Images/"
files = list_files(img_dir, "bmp")
exclude = []
for f in files[:]:
if f in exclude:
continue
rescaling_data = {}
#Parse information from the filename
meta_img = {}
fn_meta = f.split("_")
#The last part is the temperature in C
meta_img["Tpeak"] = float(fn_meta[-1].split(".")[0])
#The second last part is the temperature in dwell time in microsec
meta_img["dwell"] = float(fn_meta[-2])
meta_img["logtau"] = np.log10(float(fn_meta[-2]))
meta_img["pos"] = [float(fn_meta[0][1:])*2, float(fn_meta[1])*5]
meta_img["filename"] = f
pos = meta_img["pos"]
img, img_fn = get_filename(pos, img_dir, bx = 2., by = 5.)
plt_out = img_fn.replace("bmp", "png").replace("b", "aa")
zone = LSA_Zone.zone()
img_spec_offset = GS.img_spec_offset()
img_spec_offset.scale = 0.00092 #Scaling of pixels in mm
img_spec_offset.scale_imgcam = 0.0006680932 #Scaling of pixels in mm for imaging camera
img_spec_offset.offset = 0 #Offset of the spectrometer with respect to the image center in pixels.
img_spec_offset.offsety = 0 #Offset of the spectrometer with respect to the image center in pixels.
img_spec_offset.img_shift = img_spec_offset.offset * img_spec_offset.scale #The amount of shift along the x-axis in mm of the spectrum with respect to image
img_spec_offset.offset_global = [0., 0.]
zone.pos = pos
pd = probability_dist()
img, img_center_px, img_info, img_data, img_peaks = zone.image_from_file(img_fn, img_spec_offset)
if abs(img_center_px - zone.img_width * 0.5) > zone.img_width*0.1:
img_center_px = 0.5 * zone.img_width
img_center = zone.img_xdomain[0] + img_center_px/zone.img_width * (zone.img_xdomain[1] - zone.img_xdomain[0])
spec_center = img_center
peaks = np.array(img_peaks)
n_dense = 800
zone.spec_xdomain = [img_center-1.75, img_center+1.75]
x_plot = np.linspace(zone.spec_xdomain[0], zone.spec_xdomain[1], n_dense).reshape(-1,1)
dist_peaks, dist_lsa, dist_peaks_lsa, bias_parameters, LSA_width = pd.get_img_bias(peaks, img_center, spec_center, x_plot, lsa_frac = 1.)
bias_parameter_centered = convert_bias_parameters(bias_parameters, img_center)
#Convolve the uncertainty and the prior distribution
dist_sum_peaks = pd.sum(dist_peaks,"SumPeaks",1.)
dist_sum_peaks_lsa = pd.sum(dist_peaks_lsa,"SumPeaks",1.)
# Plot on three seperate axes
fig, axes = plt.subplots(nrows=2, sharex=True)
axes = axes.tolist()
axes[0].set_ylabel("Rescaling (a.u.)")
axes[1].set_ylabel("y pos (mm)")
axes[1].set_xlabel("x pos (mm)")
w1 = zone.img_xdomain[0] - img_center
w2 = zone.img_xdomain[1] - img_center
h1 = zone.img_ydomain[0] - 0.5 * (zone.img_ydomain[0] + zone.img_ydomain[1])
h2 = zone.img_ydomain[1] - 0.5 * (zone.img_ydomain[0] + zone.img_ydomain[1])
l1, = axes[0].plot(x_plot - img_center, dist_lsa, color=palette[3], label = "LSA bias")
axes[0].yaxis.set_ticks([])
axes.append(axes[0].twinx())
l2, = axes[2].plot(x_plot - img_center, dist_sum_peaks['dist'], color=palette[4], label = "RGB bias")
axes[2].yaxis.set_ticks([])
plt.legend([l1, l2],["LSA bias", "RGB bias"], loc = 'upper right', frameon=False)
# Size of the image in pixels (size of orginal image)
width, height = img.size
# Setting the points for cropped image
left = 0
top = height/2
right = width
bottom = height
# Cropped image of above dimension
img = img.crop((left, top, right, bottom))
width, height = img.size
im = axes[1].imshow(img, extent=[w1,w2,h1,h2], aspect = 'auto')
axes[1].set_xlim([-0.55, 0.55])
for bias_i in bias_parameter_centered[:-1]:
axes[1].axvline(x=bias_i[1], ymin = (h2), ymax = 2.2*h2,
color=palette[8], linewidth = 1.0)
title_str = "Dwell "+str(meta_img["dwell"])+"\u03bcs, Tpeak "+str(meta_img["Tpeak"])+"℃"
plt.title(title_str)
plt.savefig(plt_out, format='png')
plt.close(fig)
rescaling_data["meta_data"] = meta_img
rescaling_data["rescaling_parameters"] = bias_parameter_centered
rescaling_datas.append(rescaling_data)
# Serializing json
json_object = json.dumps(rescaling_datas, indent = 4)
# Writing to json
with open("bias.json", "w") as outfile:
outfile.write(json_object)
| 35.492228
| 163
| 0.646277
| 1,053
| 6,850
| 4.026591
| 0.265907
| 0.026415
| 0.033726
| 0.008491
| 0.167217
| 0.123821
| 0.049057
| 0.049057
| 0.049057
| 0.049057
| 0
| 0.029478
| 0.217518
| 6,850
| 192
| 164
| 35.677083
| 0.761194
| 0.132555
| 0
| 0.014286
| 0
| 0
| 0.065088
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.128571
| 0.007143
| 0.185714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e925522b3d3915457215980e5bca266c8fd2ff38
| 2,448
|
py
|
Python
|
monitoring/automation/monitor.py
|
shane0/flask-website-monitor
|
39031b9207c97baef4b10a792e038f241bcdc857
|
[
"MIT"
] | 1
|
2017-04-13T05:29:15.000Z
|
2017-04-13T05:29:15.000Z
|
monitoring/automation/monitor.py
|
shane0/flask-website-monitor
|
39031b9207c97baef4b10a792e038f241bcdc857
|
[
"MIT"
] | 1
|
2017-04-12T23:44:58.000Z
|
2017-04-12T23:44:58.000Z
|
monitoring/automation/monitor.py
|
shane0/flask-website-monitor
|
39031b9207c97baef4b10a792e038f241bcdc857
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A website monitor.
"""
import sys
import traceback
import requests
import re
import json
import datetime
DEFAULT_CONFIG_FILE = 'config.json'
def check():
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,ja;q=0.2',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
}
try:
config_file = DEFAULT_CONFIG_FILE #if not (args.config) else args.config
with open(config_file) as config_data:
config = json.load(config_data)
except:
return ('Fix your config.')
websites = config["websites"]
results = []
class Result:
def __init__(self, site, url, status):
self.site = site
self.url = url
self.status = status
def __str__(self):
return '%-8s %-25s %-45s' % (status, site, url)
def to_html(self):
color = 'green' if self.status == 'OK' else 'red'
return '''<tr style="height: 30px;">
<td style="text-align: center; color: %s">%s</td>
<td>%s</td>
<td><a href="%s">%s</a></td>
</tr>''' % (color, self.status, self.site, self.url, self.url)
now = datetime.datetime.now()
print(now)
for site in sorted(websites):
url = websites[site]
try:
res = requests.get(websites[site], headers=headers)
status = 'OK' if res.status_code == 200 else res.status_code
except:
status = 'TIMEOUT'
result = Result(site, url, status)
results.append(result)
print(result)
body = "<h3>Site Monitor - %s</h3>" % now
body += '<table class="table" >'
body += '''<thead><tr>
<th style="width: 15%%">STATUS</th>
<th style="width: 30%%">SITE</th>
<th style="width: 55%%">URL</th>
</tr></thead>'''
body_str = ''.join([r.to_html() for r in sorted(results, key=lambda rst: rst.site)])
body += '<tbody>%s</tbody>' % body_str
body += '</table>'
# test write to file
# f = open('result.html', 'w')
# f.write(body)
# f.close()
print(body)
return body
| 28.137931
| 126
| 0.541258
| 324
| 2,448
| 4.021605
| 0.42284
| 0.00921
| 0.027629
| 0.021489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032221
| 0.290033
| 2,448
| 86
| 127
| 28.465116
| 0.717491
| 0.069853
| 0
| 0.066667
| 0
| 0.05
| 0.335544
| 0.06145
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.1
| 0.016667
| 0.25
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e92912ace35fc868f85b6a3bdb13260570590334
| 412
|
py
|
Python
|
Chapter03/c3_27_datadotworld_1.py
|
andrewjcoxon/Hands-On-Data-Science-with-Anaconda
|
82504a059ecd284b3599fa9af2b3eb6bbd6e28f3
|
[
"MIT"
] | 25
|
2018-06-25T16:21:09.000Z
|
2022-02-08T09:28:29.000Z
|
Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter03/c3_27_datadotworld_1.py
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | null | null | null |
Hands-On-Data-Science-with-Anaconda-master/Hands-On-Data-Science-with-Anaconda-master/Chapter03/c3_27_datadotworld_1.py
|
manual123/Nacho-Jupyter-Notebooks
|
e75523434b1a90313a6b44e32b056f63de8a7135
|
[
"MIT"
] | 17
|
2018-06-15T02:55:30.000Z
|
2022-03-09T15:24:42.000Z
|
"""
Name : c3_27_datadotworld_1.py
Book : Hands-on Data Science with Anaconda)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan and James Yan
Date : 1/15/2018
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import datadotworld as dw
dataset = 'jonloyens/an-intro-to-dataworld-dataset'
data = dw.load_dataset(dataset, force_update=True)
list(dataset.dataframes)
| 27.466667
| 51
| 0.694175
| 56
| 412
| 5.017857
| 0.839286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033846
| 0.211165
| 412
| 15
| 52
| 27.466667
| 0.830769
| 0.565534
| 0
| 0
| 0
| 0
| 0.246835
| 0.246835
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e92a3ce5abab1bfe02516472d0fc6c56a482d48d
| 15,964
|
py
|
Python
|
strutil.py
|
IloveKanade/k3fmt
|
13a81562b9fc706dbf7fc05fcae130260bc2551d
|
[
"MIT"
] | null | null | null |
strutil.py
|
IloveKanade/k3fmt
|
13a81562b9fc706dbf7fc05fcae130260bc2551d
|
[
"MIT"
] | 3
|
2021-08-06T07:24:40.000Z
|
2022-03-23T06:58:36.000Z
|
strutil.py
|
IloveKanade/k3fmt
|
13a81562b9fc706dbf7fc05fcae130260bc2551d
|
[
"MIT"
] | 1
|
2021-08-04T08:41:33.000Z
|
2021-08-04T08:41:33.000Z
|
import re
import os
import errno
import string
import subprocess
import k3color
listtype = (tuple, list)
invisible_chars = ''.join(map(chr, list(range(0, 32))))
invisible_chars_re = re.compile('[%s]' % re.escape(invisible_chars))
def break_line(linestr, width):
lines = linestr.splitlines()
rst = []
space = ' '
if isinstance(linestr, k3color.Str):
space = k3color.Str(' ')
for line in lines:
words = line.split(' ')
buf = words[0]
for word in words[1:]:
if len(word) + len(buf) + 1 > width:
rst.append(buf)
buf = word
else:
buf += space + word
if buf != '':
rst.append(buf)
return rst
def line_pad(linestr, padding=''):
"""
:param linestr: multiple line string with `\n` as line separator.
:param padding: left padding string to add before each line.
It could also be a callable object that returns a string.
This is useful when creating dynamic padding.
:return: multiple line string with `\n` as line separator, with left padding added.
"""
lines = linestr.split("\n")
if type(padding) in (str, bytes):
lines = [padding + x for x in lines]
elif callable(padding):
lines = [padding(x) + x for x in lines]
lines = "\n".join(lines)
return lines
def _to_str(y):
if isinstance(y, k3color.Str):
pass
elif isinstance(y, int):
y = str(y)
elif isinstance(y, listtype):
y = str(y)
return y
def struct_repr(data, key=None):
"""
Render primitive or composite data to a structural representation string list.
:param data: a number, string, list or dict to render to a structural representation.
:param key: is a callable that is used to sort dict keys. It is used in sort: `keys.sort(key=key)`.
:return: a list of string.
Render a data to a multi-line structural(yaml-like) representation.
a = {
1: 3,
'x': {1:4, 2:5},
'l': [1, 2, 3],
}
for l in struct_repr(a):
print l
"""
# Output:
# 1 : 3
# l : - 1
# - 2
# - 3
# x : 1 : 4
# 2 : 5
if type(data) in listtype:
if len(data) == 0:
return ['[]']
max_width = 0
elt_lines = []
for elt in data:
sublines = struct_repr(elt)
sublines_max_width = max([len(x) for x in sublines])
if max_width < sublines_max_width:
max_width = sublines_max_width
elt_lines.append(sublines)
lines = []
for sublines in elt_lines:
# - subline[0]
# subline[1]
# ...
lines.append('- ' + sublines[0].ljust(max_width))
for l in sublines[1:]:
lines.append(' ' + l.ljust(max_width))
return lines
elif type(data) == dict:
if len(data) == 0:
return ['{}']
max_k_width = 0
max_v_width = 0
kvs = []
for k, v in data.items():
k = utf8str(k)
sublines = struct_repr(v)
sublines_max_width = max([len(x) for x in sublines])
if max_k_width < len(k):
max_k_width = len(k)
if max_v_width < sublines_max_width:
max_v_width = sublines_max_width
kvs.append((k, sublines))
kvs.sort(key=key)
lines = []
for k, sublines in kvs:
# foo : sub-0
# sub-1
# b : sub-0
# sub-0
lines.append(k.rjust(max_k_width) + ' : ' +
sublines[0].ljust(max_v_width))
for l in sublines[1:]:
lines.append(' '.rjust(max_k_width) +
' ' + l.ljust(max_v_width))
return lines
else:
data = filter_invisible_chars(data)
return [utf8str(data)]
def filter_invisible_chars(data):
"""
Filters invisible characters in a string or a unicode object
:param data: a string or unicode object to filter invisible characters
:return: a filtered string or unicode object
"""
# from pykit.strutil import filter_invisible_chars
# cases = [
# "1273883926293937729\000\001\031",
# "\x00\x01\x02\x03\x04\005",
# u"1122299299299299292",
# u"\x00\x01\x02\x03\x04\005",
# ]
#
# rst = []
# for case in cases:
# rst.append(strutil.filter_invisible_chars(case))
#
# for r in rst:
# print(r)
# '1273883926293937729'
# ''
# u'1122299299299299292'
# u''
if type(data) not in (bytes, str):
return data
return invisible_chars_re.sub('', data)
def _get_key_and_headers(keys, rows):
if keys is None:
if len(rows) == 0:
keys = []
else:
r0 = rows[0]
if type(r0) == dict:
keys = list(r0.keys())
keys.sort()
elif type(r0) in listtype:
keys = [i for i in range(len(r0))]
else:
keys = ['']
_keys = []
column_headers = []
for k in keys:
if type(k) not in listtype:
k = [k, k]
_keys.append(k[0])
column_headers.append(str(k[1]))
return _keys, column_headers
def utf8str(s):
if isinstance(s, bytes):
return str(s, "utf-8")
return str(s)
def format_line(items, sep=' ', aligns=''):
"""
It formats a list in a multi row manner.
It is compatible with colored string such as those created with `strutil.blue("blue-text")`.
:param items: elements in a line.
Each element could be a `string` or a `list` of `string`.
If it is a `list` of `string`, it would be rendered as a multi-row
element.
:param sep: specifies the separator between each element in a line.
By default it is a single space `" "`.
:param aligns: specifies alignment for each element.
- `l` for left-align.
- `r` for right-align.
If no alignment specified for i-th element, it will be aligned to right by default.
:return: formatted string.
format a line with multi-row columns.
"""
# items = [ 'name:',
# [ 'John',
# 'j is my nick'],
# [ 'age:' ],
# [ 26, ],
# [ 'experience:' ],
# [ '2000 THU',
# '2006 sina',
# '2010 other'
# ],
# ]
# format_line(items, sep=' | ', aligns = 'llllll')
#
# outputs:
# name: | John | age: | 26 | experience: | 2000 THU
# | j is my nick | | | | 2006 sina
# | | | | | 2010 other
aligns = [x for x in aligns] + [''] * len(items)
aligns = aligns[:len(items)]
aligns = ['r' if x == 'r' else x for x in aligns]
items = [(x if type(x) in listtype else [x])
for x in items]
items = [[_to_str(y)
for y in x]
for x in items]
maxHeight = max([len(x) for x in items] + [0])
def max_width(x):
return max([y.__len__()
for y in x] + [0])
widths = [max_width(x) for x in items]
items = [(x + [''] * maxHeight)[:maxHeight]
for x in items]
lines = []
for i in range(maxHeight):
line = []
for j in range(len(items)):
width = widths[j]
elt = items[j][i]
actualWidth = elt.__len__()
elt = utf8str(elt)
if actualWidth < width:
padding = ' ' * (width - actualWidth)
if aligns[j] == 'l':
elt = elt + padding
else:
elt = padding + elt
line.append(elt)
line = sep.join(line)
lines.append(line)
return "\n".join(lines)
def format_table(rows,
keys=None,
colors=None,
sep=' | ',
row_sep=None):
"""
Render a list of data into a table.
Number of rows is `len(rows)`.
Number of columns is `len(rows[0])`.
:param rows: list of items to render.
Element of list can be number, string, list or dict.
:param keys: specifies indexes(for list) or keys(for dict) to render.
It is a list.
Indexes or keys those are not in this list will not be rendered.
It can also be used to specify customized column headers, if element in
list is a 2-element tuple or list:
:param colors: specifies the color for each column.
It is a list of color values in number or color name strings.
If length of `colors` is smaller than the number of columns(the number of
indexes of a list, or keys of a dict), the colors are repeated for columns
after.
:param sep: specifies char to separate rows.
By default it is None, it means do not add line separator.
:param row_sep: specifies column separator char.
By default it is `" | "`.
:return: a list of string.
"""
keys, column_headers = _get_key_and_headers(keys, rows)
colors = _get_colors(colors, len(keys))
# element of lns is a mulit-column line
# lns = [
# # line 1
# [
# # column 1 of line 1
# ['name:', # row 1 of column 1 of line 1
# 'foo', # row 2 of column 1 of line 1
# ],
#
# # column 2 of line 1
# ['school:',
# 'foo',
# 'bar',
# ],
# ],
# ]
# headers
lns = [
[[a + ': ']
for a in column_headers]
]
for row in rows:
if row_sep is not None:
lns.append([[None] for k in keys])
if type(row) == dict:
ln = [struct_repr(row.get(k, ''))
for k in keys]
elif type(row) in listtype:
ln = [struct_repr(row[int(k)])
if len(row) > int(k) else ''
for k in keys]
else:
ln = [struct_repr(row)]
lns.append(ln)
def get_max_width(cols):
return max([len(utf8str(c[0]))
for c in cols] + [0])
max_widths = [get_max_width(cols) for cols in zip(*lns)]
rows = []
for row in lns:
ln = []
for i in range(len(max_widths)):
color = colors[i]
w = max_widths[i]
ln.append([k3color.Str(x.ljust(w), color)
if x is not None else row_sep * w
for x in row[i]])
rows.append(format_line(ln, sep=sep))
return rows
def _get_colors(colors, col_n):
if colors is None:
colors = []
colors = colors or ([None] * col_n)
while len(colors) < col_n:
colors.extend(colors)
colors = colors[:col_n]
return colors
def _findquote(line, quote):
if len(quote) == 0:
return -1, -1, []
i = 0
n = len(line)
escape = []
while i < n:
if line[i] == '\\':
escape.append(i)
i += 2
continue
if line[i] in quote:
quote_s = i - len(escape)
j = i
i += 1
while i < n and line[i] != line[j]:
if line[i] == '\\':
escape.append(i)
i += 2
continue
i += 1
if i < n:
quote_e = i - len(escape)
return quote_s, quote_e, escape
else:
return quote_s, -1, escape
i += 1
return -1, -1, escape
def tokenize(line, sep=None, quote='"\'', preserve=False):
"""
:param line: the line to tokenize.
:param sep: is None or a non-empty string separator to tokenize with.
If sep is None, runs of consecutive whitespace are regarded as a single
separator, and the result will contain no empty strings at the start or end
if the string has leading or trailing whitespace. Consequently, splitting
an empty string or a string consisting of just whitespace with a None
separator returns `[]`. Just like `str.split(None)`.
By default, `sep` is None.
:param quote:Every character in `quote` is regarded as a quote. Add a `\` prefix to make
an exception. Segment between the same quotes is preserved.
By default, `quote` is `'"\''`.
:param preserve: preserve the quote itself if `preserve` is `True`.
By default, `preserve` is `False`.
:return: a list of string.
"""
if sep == quote:
raise ValueError('diffrent sep and quote is required')
if sep is None:
if len(line) == 0:
return []
line = line.strip()
rst = ['']
n = len(line)
i = 0
while i < n:
quote_s, quote_e, escape = _findquote(line[i:], quote)
if len(escape) > 0:
lines = []
x = 0
for e in escape:
lines.append(line[x:i + e])
x = i + e + 1
lines.append(line[x:])
line = ''.join(lines)
n = len(line)
if quote_s < 0:
sub = n
else:
sub = i + quote_s
if i < sub:
sub_rst = line[i:sub].split(sep)
if sep is None:
if line[sub - 1] in string.whitespace:
sub_rst.append('')
if line[i] in string.whitespace:
sub_rst.insert(0, '')
head = rst.pop()
sub_rst[0] = head + sub_rst[0]
rst += sub_rst
if quote_s < 0:
break
# discard incomplete
# 'a b"c' -> ['a']
if quote_e < 0:
rst.pop()
break
head = rst.pop()
if preserve:
head += line[i + quote_s:i + quote_e + 1]
else:
head += line[i + quote_s + 1:i + quote_e]
rst.append(head)
i += quote_e + 1
return rst
def parse_colon_kvs(data):
data = tokenize(data, quote='"\'')
ret = {}
for buf in data:
if ':' not in buf:
raise ValueError('invalid arguments, arguments'
'need key-val like: "k:v"')
k, v = buf.split(':', 1)
ret[k] = v
return ret
def page(lines, max_lines=10, control_char=True, pager=('less',)):
"""
Display `lines` of string in console, with a pager program (`less`) if too many
lines.
It could be used in a interactive tool to display large content.
It output strings directly to stdout.
:param lines: is `list` of lines to display.
:param max_lines: specifies the max lines not to use a pager.
By default it is 10 lines.
:param control_char: specifies if to interpret controlling chars, such as color char in terminal.
:param pager: specifies the program as a pager.
It is a list of command and argument.
By default it is `('less',)`.
:return: Nothing
"""
if len(lines) > max_lines:
pp = {'stdin': subprocess.PIPE,
'stdout': None,
'stderr': None}
cmd_pager = list(pager)
if control_char:
if pager == ('less',):
cmd_pager += ['-r']
subproc = subprocess.Popen(cmd_pager,
close_fds=True,
cwd='./',
**pp)
try:
out, err = subproc.communicate(bytes('\n'.join(lines).encode("utf-8")))
except IOError as e:
if e[0] == errno.EPIPE:
pass
else:
raise
subproc.wait()
else:
os.write(1, bytes(('\n'.join(lines) + "\n").encode("utf-8")))
| 26.084967
| 103
| 0.5057
| 2,051
| 15,964
| 3.862019
| 0.163335
| 0.01515
| 0.00909
| 0.008837
| 0.144552
| 0.072844
| 0.035602
| 0.035602
| 0.01818
| 0.010605
| 0
| 0.025446
| 0.38211
| 15,964
| 611
| 104
| 26.12766
| 0.777575
| 0.330055
| 0
| 0.205212
| 0
| 0
| 0.017958
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052117
| false
| 0.006515
| 0.019544
| 0.006515
| 0.153094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e92ba6f82fbd7b5de0f238a51cd87521f2ccd146
| 16,920
|
py
|
Python
|
camera.py
|
Euclideon/udSDKPython
|
a82157ab6382fda6291bdcca9ec2a51203b95b2a
|
[
"MIT"
] | 4
|
2020-09-03T05:35:15.000Z
|
2021-11-08T04:31:55.000Z
|
camera.py
|
Euclideon/udSDKPython
|
a82157ab6382fda6291bdcca9ec2a51203b95b2a
|
[
"MIT"
] | 1
|
2020-08-18T06:49:21.000Z
|
2020-08-18T06:49:21.000Z
|
camera.py
|
Euclideon/udSDKPython
|
a82157ab6382fda6291bdcca9ec2a51203b95b2a
|
[
"MIT"
] | 1
|
2020-09-11T07:52:32.000Z
|
2020-09-11T07:52:32.000Z
|
import logging
import math
import numpy as np
import pyglet
import udSDK
logger = logging.getLogger(__name__)
class Camera():
"""
Base camera class for Euclideon udSDK Python Sample
This sets the default behaviour for a perspective camera
Stores the state of the camera, and provides functions for modifyting
that state
User input is passed from the UDViewport object vio
the set_{}Pressed functions (for mapped functions)
Mouse Input is passed through the on_mouse_drag function
This is intended to be subclassed for custom camera behaviour
"""
def __init__(self, renderTarget: udSDK.udRenderTarget):
self.normalSpeed = 0.3
self.fastSpeed = 1
self.moveSpeed = self.normalSpeed
self.moveVelocity = [0, 0, 0]
self.matrix = np.identity(4)
self._view = renderTarget
self.position = [0, 0, 0]
self.nearPlane = 0.01
self.farPlane = 2
self.FOV = 60
#booleans indicating button activation
self.forwardPressed = False
self.backPressed = False
self.rightPressed = False
self.leftPressed = False
self.upPressed = False
self.downPressed = False
self.shiftPressed = False
self.ctrlPressed = False
self.zoomInPressed = False
self.zoomOutPressed = False
self.theta = 0
self.phi = 0
self.zoom = 1
self.mouseSensitivity = 1 / 100
self.camRotation = [0, 0, 0]
self.lookAtTarget = [0, 0, 0]
self.rotationMatrix = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
self.facingDirection = [0, 1, 0]
self.rotationAxis = np.array([0,0,1])
self.tangentVector = np.array([0,1,0])
self._projectionMatrix = []
self.controlString = """
W,S,A,D: Move\n
E: Move up\n
C: Move Down\n
Click + drag: Look around\n
Shift (Hold): Increase speed\n
O: Zoom in\n
P: Zoom out\n
"""
def on_cast(self):
"""
To be called when this class is converted to from another Camera derived class
ensures that appropriate variables are set in lieu of __init__being called without
resetting all variables
Returns
-------
"""
pass
@property
def position(self):
return self.__position
@position.setter
def position(self, newposition):
self.__position = tuple(newposition)
self.matrix[3, :3] = newposition
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Camera, self.matrix.flatten())
def get_controls_string(self):
return self.controlString
def get_view_vertices(self):
"""
Returns
-------
the extents of the viewing volume projected onto 2d space
"""
#TODO make this correctly display the location of near and far plane
rat = np.tan(self.FOV/2/180*np.pi)/self.farPlane
nearLeft = [-self.nearPlane * rat, self.nearPlane/self.farPlane]
farLeft = [-self.farPlane * rat, self.farPlane/self.farPlane]
nearRight = [self.nearPlane * rat, self.nearPlane/self.farPlane]
farRight = [self.farPlane * rat, self.farPlane/self.farPlane]
return [farLeft, nearLeft, nearRight, farRight]
def set_forwardPressed(self, val:bool):
self.forwardPressed = val
def set_backPressed(self, val):
self.backPressed = val
def set_rightPressed(self, val):
self.rightPressed = val
def set_leftPressed(self, val):
self.leftPressed = val
def set_upPressed(self, val):
self.upPressed = val
def set_downPressed(self, val):
self.downPressed = val
def set_shiftPressed(self, val):
self.shiftPressed = val
def set_ctrlPressed(self, val):
self.ctrlPressed = val
def set_zoomInPressed(self, val):
self.zoomInPressed = val
def set_zoomOutPressed(self, val):
self.zoomOutPressed = val
def reset_projection(self):
self.set_projection_perspective()
def on_key_press(self, symbol, modifiers):
"""
Defined for passing key presses not mapped using the key bindings in the view port
override subclasses
Parameters
----------
symbol
modifiers
Returns
-------
"""
pass
def on_key_release(self, symbol, modifiers):
pass
def rotate_polar(self, vec, dtheta, dphi):
"""
takes change in polar coordiantes and updates the camera rotation
based on it
Returns
-------
the a copy of vector vec rotated by dtheta in the xy plane and phi
"""
r = math.sqrt(vec[0]**2+vec[1]**2+vec[2]**2)
theta = math.atan2(vec[1], vec[0])
phi = math.acos(vec[2]/r)
#prevent rotation such that the vector is pointing directly up or down
thresh = 0.1
if abs(phi + dphi) < thresh or abs(phi + dphi - math.pi) < thresh:
dphi = 0
xprime = r * math.sin(phi+dphi)*math.cos(theta+dtheta)
yprime = r * math.sin(phi+dphi) * math.sin(theta + dtheta)
zprime = r * math.cos(phi+dphi)
self.phi = phi
self.theta = theta
return [xprime, yprime, zprime]
def set_projection_perspective(self, near=None, far=None, FOV=None):
if near is None:
near = self.nearPlane
if far is None:
far = self.farPlane
if FOV is None:
FOV = self.FOV
else:
self.FOV = FOV
FOV = FOV/180*np.pi
e = 1/np.tan(FOV/2)
a = self._view.height/self._view.width
self._projectionMatrix = \
[
e*a, 0, 0, 0,
0, 0, (far+near)/(far-near), 1,
0, e, 0, 0,
0, 0, -(2*far*near)/(far-near), 0
]
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Projection, self._projectionMatrix)
def set_projection_ortho(self, left, right, top, bottom, near, far):
self._projectionMatrix = \
[
2/(right-left), 0, 0, 0,
0, 0, 2/(far-near), 0,
0, 2/(top - bottom), 0, 0,
-(right+left)/(right-left), -(top+bottom)/(top-bottom), -(far+near)/(far-near), 1
]
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Projection, self._projectionMatrix)
def set_rotation(self, x=0, y=-5, z=0, roll=0, pitch=0, yaw=0):
"""
Sets the camera matrix to have a rotation of yaw, pictch roll
Parameters
----------
x
y
z
roll
pitch
yaw
Returns
-------
"""
sy = math.sin(yaw)
cy = math.cos(yaw)
sp = math.sin(pitch)
cp = math.cos(pitch)
sr = math.sin(roll)
cr = math.cos(roll)
self.matrix = np.array([
[cy*cp, cy*sp*sr-sy*cr, cy*sp*cr+sy*sr, 0],
[sy*cp, sy*sp*sr+cy*cr, sy*sp*cr-cy*sr, 0],
[-sp, cp*sr, cp*cr, 0],
[x, y, z, 1]
])
self.rotationMatrix = self.matrix[:3, :3]
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Camera, self.matrix.flatten())
def axisAngle(self, axis, theta):
#cTheta = np.dot(np.array([0,1,0]), dPoint) / np.linalg.norm(dPoint)
#theta = np.arccos(cTheta)
cTheta = np.cos(theta)
sTheta = np.sin(theta)
self.matrix = np.array(
[
[cTheta + axis[0] ** 2 * (1 - cTheta), axis[0] * axis[1] * (1 - cTheta) - axis[2] * sTheta, axis[0] * axis[2] * (1 - cTheta), 0],
[axis[1] * axis[0] * (1 - cTheta) + axis[2] * sTheta, cTheta + axis[1] ** 2 * (1 - cTheta), axis[1] * axis[2] * (1 - cTheta) - axis[0] * sTheta, 0],
[axis[2] * axis[0] * (1 - cTheta) - axis[1] * sTheta, axis[2] * axis[1] * (1 - cTheta) + axis[0] * sTheta, cTheta + axis[2] ** 2 * (1 - cTheta), 0],
[self.position[0], self.position[1], self.position[2], 1]
]
)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
vec = self.rotate_polar(self.facingDirection,dx/100,dy/100)
self.look_direction(np.array(vec))
def look_at(self, lookAtPoint=None, cameraPosition=None):
"""
faces the camera at point2, positions the camera at point1
Parameters
----------
cameraPosition: position of the camera
lookAtPoint: x, y, z tuple to face the camera towards
"""
if cameraPosition is None:
cameraPosition = self.position
else:
self.position = cameraPosition
if lookAtPoint is None:
lookAtPoint = self.lookAtTarget
if not np.array_equal(lookAtPoint, cameraPosition):
#calculate our axis of rotation based on the distance between these points
dPoint = np.array(lookAtPoint) - np.array(cameraPosition)
else:
dPoint = np.array([1, 1, 0])
self.look_direction(dPoint)
def look_direction(self, dPoint: np.array):
"""
Points the camera in the direction vector dPoint
assumes that the tangent vector has a z value of zero (i.e. no roll)
Parameters
----------
dPoint
Returns
-------
"""
tangent = [0, 0, 0]
if dPoint[1] != 0:
tangent[0] = (dPoint[0]-np.sqrt(dPoint[0]**2+4*dPoint[1]**2))/(2*dPoint[1])
elif dPoint[2]>0:
tangent[0] = 1
else:
tangent[0] = -1
tangent[1] = 1-tangent[0]**2
tangent = -np.array(tangent)
tangent = tangent / np.sqrt(tangent.dot(tangent))
forward = dPoint/np.sqrt(dPoint.dot(dPoint))
axis = np.cross(tangent, forward)
axis = axis / np.sqrt(axis.dot(axis))
self.matrix = np.array(
[
[tangent[0], tangent[1], tangent[2], 0],
[forward[0], forward[1], forward[2], 0],
[axis[0], axis[1], axis[2], 0],
[self.position[0], self.position[1], self.position[2], 1]
]
)
self.rotationAxis = axis
self.tangentVector = tangent
self.rotationMatrix = self.matrix[:3, :3]
self.facingDirection = np.array([0,1,0]).dot(self.rotationMatrix).tolist()
self._view.SetMatrix(udSDK.udRenderTargetMatrix.Camera, self.matrix.flatten())
def update_move_direction(self):
"""
updates the velocity and projection based on what keys have been pressed since the last call
"""
self.moveVelocity = [0, 0, 0]# in local coordinates
if self.shiftPressed:
self.moveSpeed = self.fastSpeed
else:
self.moveSpeed = self.normalSpeed
if self.forwardPressed:
self.moveVelocity[1] += self.moveSpeed
if self.backPressed:
self.moveVelocity[1] -= self.moveSpeed
if self.rightPressed:
self.moveVelocity[0] += self.moveSpeed
if self.leftPressed:
self.moveVelocity[0] -= self.moveSpeed
if self.upPressed:
self.moveVelocity[2] += self.moveSpeed
if self.downPressed:
self.moveVelocity[2] -= self.moveSpeed
if self.zoomInPressed:
self.zoom += 1
if self.zoomOutPressed and self.zoom>1:
self.zoom -= 1
self.mouseSensitivity = 0.1/self.zoom
self.set_projection_perspective(self.nearPlane, self.farPlane, self.zoom)
self.moveVelocity = np.array(self.moveVelocity).dot(self.rotationMatrix).tolist()
def update_position(self, dt):
self.update_move_direction()
newposition = [0, 0, 0]
newposition[0] = self.position[0] + self.moveVelocity[0] * dt
newposition[1] = self.position[1] + self.moveVelocity[1] * dt
newposition[2] = self.position[2] + self.moveVelocity[2] * dt
self.position = newposition
class OrthoCamera(Camera):
def __init__(self, renderTarget):
super().__init__(renderTarget)
self.FOV = 90
def on_cast(self):
self.controlString = """
Ortho Camera (experimental):
W,S,A,D: Move\n
E: Move up\n
C: Move Down\n
Click + drag: Look around\n
Shift (Hold): Increase speed\n
O: Zoom in\n
P: Zoom out\n
"""
self.FOV = 90
def update_move_direction(self):
super().update_move_direction()
self.moveVelocity[2] = 0
v = np.array(self.moveVelocity)
mag = np.sqrt(v.dot(v))
if mag != 0:
self.moveVelocity = (v/mag).tolist()
if self.upPressed:
self.moveVelocity[2] += self.moveSpeed
if self.downPressed:
self.moveVelocity[2] -= self.moveSpeed
def update_position(self, dt):
super().update_position(dt)
ar = self._view.width/self._view.height
zoom = np.exp(self.zoom)
viewWidth = 100/self.zoom
self.mouseSensitivity = 0.1/ zoom
self.set_projection_ortho(-ar/2*viewWidth, ar/2*viewWidth, 1/ar/2*viewWidth, -1/ar/2*viewWidth, self.nearPlane, self.farPlane)
def reset_projection(self):
pass
class MapCamera(OrthoCamera):
"""
Orthographic camera that follows a target and remains a set height above it
"""
def __init__(self, renderTarget, target, elevation):
super().__init__(renderTarget)
self.target = target
self.elevation = elevation
class DefaultTarget(object):
def __init__(self):
self.position = [0, 0, 0]
def on_cast(self):
pass
#here we override the default control behaviour of the camera
def update_move_direction(self):
pass
def on_mouse_drag(self, *args, **kwargs):
pass
def update_position(self, dt):
self.position = [self.target.position[0], self.target.position[1], self.target.position[2]+self.elevation]
self.look_direction(np.array([0, 0, -1]))
ar = self._view.width/self._view.height
zoom = self.zoom
self.set_projection_ortho(-ar/2*self.position[2]/zoom, ar/2*self.position[2]/zoom, 1/ar/2*self.position[2]/zoom, -1/ar/2*self.position[2]/zoom,self.nearPlane,self.farPlane)
class OrbitCamera(Camera):
"""
Movement of this camera is relative to a fixed point in space
"""
def on_cast(self):
self.controlString = """
Orbit Camera (experimental):
W,S,A,D: Move\n
E: Move up\n
C: Move Down\n
Click + drag: Move rotation Centre\n
Shift (Hold): Increase speed\n
O: Zoom in\n
P: Zoom out\n
"""
def update_move_direction(self):
self.look_at()
super(OrbitCamera, self).update_move_direction()
#self.moveVelocity = np.array(self.moveVelocity).dot(self.rotationMatrix).tolist()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
horiz = dx * self.tangentVector * self.mouseSensitivity
vert = dy * self.rotationAxis * self.mouseSensitivity
if not self.ctrlPressed:
self.lookAtTarget = self.lookAtTarget + horiz + vert
else:
self.position = self.position - horiz - vert
class PerspectiveCamera(OrbitCamera):
def update_position(self, dt):
#self.facingDirection = np.array([0, 1, 0]).dot(self.rotationMatrix).tolist()
for i in range(3):
self.lookAtTarget[i] = self.position[i] + self.facingDirection[i]
super().update_position(dt)
class TrackCamera(Camera):
def update_position(self, dt):
self.lookAtTarget[1] += 0.0001
super().update_position(dt)
self.look_at()
class RecordCamera(Camera):
"""
A camera class for manual generation and replay of flythroughs of models
the user defines a set of waypoints by pressing space when the camera is positioned at
the desired locations
Pressing enter will replay the path
Backspace will delete the most recently added waypoint
"""
def __init__(self, *args, **kwargs):
super().__init(*args, **kwargs)
self.on_cast()
def on_cast(self):
self.controlString = """
Recording Camera:
W,S,A,D: Move\n
E: Move up\n
C: Move Down\n
Click + drag: Look around\n
Shift (Hold): Increase speed\n
O: Zoom in\n
P: Zoom out\n
Space: Record Position as Waypoint\n
Backspace: Remove Last Waypoint\n
Enter: Play back recorded path\n"""
try:
self.waypoints
except AttributeError:
self.waypoints = []
self.replayInd = 0
self.replaying = False
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.SPACE:
self.waypoints.append(self.position)
if symbol == pyglet.window.key.ENTER:
try:
self.position = self.waypoints[0]
except IndexError:
return
self.replaying = True
self.replayInd = 1
if symbol == pyglet.window.key.BACKSPACE:
self.waypoints.pop()
def update_move_direction(self):
try:
self.replaying
except AttributeError:
self.replaying = False
if not self.replaying:
super().update_move_direction()
return
#here we linearly interpolate the path and face the camera direction
#ddir = dir + np.array(self.lookAtTarget)-np.array(self.position)
#define the facing the one we are going in
dir = np.array(self.waypoints[self.replayInd]) - np.array(self.position)
mag = np.linalg.norm(dir) #how far away from the waypoint we are
ddir = dir/mag - np.array(self.facingDirection)
dir = dir/mag * self.moveSpeed #dir is now the velocity we want the camera to travel in
self.look_direction(np.array(self.facingDirection) + ddir / 10)
self.moveVelocity = (dir).tolist()
if abs(mag) < self.moveSpeed:
#we are as close as we can get in a single step to the waypoint
if self.replayInd+1 < len(self.waypoints):
#self.position = self.waypoints[self.replayInd]
#move to the next waypoint
self.replayInd += 1
else:
#end the replay
self.replaying = False
self.moveVelocity = [0, 0, 0]
return
#self.look_at(self.waypoints[self.replayInd+1])
| 29.32409
| 176
| 0.642317
| 2,326
| 16,920
| 4.6092
| 0.171539
| 0.006902
| 0.005317
| 0.015017
| 0.280011
| 0.210335
| 0.187389
| 0.136275
| 0.130118
| 0.130118
| 0
| 0.022629
| 0.232151
| 16,920
| 576
| 177
| 29.375
| 0.802571
| 0.177896
| 0
| 0.317585
| 0
| 0
| 0.068487
| 0
| 0
| 0
| 0
| 0.001736
| 0
| 1
| 0.128609
| false
| 0.018373
| 0.013123
| 0.005249
| 0.181102
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e932fb4ec343373146508adfa905b3c8915cb66b
| 4,831
|
py
|
Python
|
train.py
|
ppujol76/-Pere_Transformers
|
e267bcc6559c998accaed647cacbff253031f8b0
|
[
"MIT"
] | null | null | null |
train.py
|
ppujol76/-Pere_Transformers
|
e267bcc6559c998accaed647cacbff253031f8b0
|
[
"MIT"
] | null | null | null |
train.py
|
ppujol76/-Pere_Transformers
|
e267bcc6559c998accaed647cacbff253031f8b0
|
[
"MIT"
] | 1
|
2021-06-21T08:40:18.000Z
|
2021-06-21T08:40:18.000Z
|
import torch
import os
from model.visualization import Visualization
from panel.main import tensorboard_panel
from torch.utils.data.dataset import Subset
import random
import numpy as np
def write_on_tensorboard(epoch:int, loss:int, bleu:int, image, expected_captions, generated_captions):
tensorboard_panel.add_sentences_comparison(epoch,expected_captions[0],generated_captions[0])
tensorboard_panel.add_loss(epoch,loss)
tensorboard_panel.add_bleu(epoch,bleu)
tensorboard_panel.add_image(epoch,image,expected_captions[0],generated_captions[0])
def split_subsets(dataset,train_percentage=0.8,all_captions=True):
"""
Performs the split of the dataset into Train and Test
"""
if all_captions==True:
# Get a list of all indexes in the dataset and convert to a numpy array
all_indexes = np.array([*range(0,len(dataset))])
# Reshape the array so we can shuffle indexes in chunks of 5
all_indexes_mat = all_indexes.reshape(-1,5)
np.random.shuffle(all_indexes_mat)
all_indexes_shuffled = all_indexes_mat.flatten()
# Get the number of images for train and the rest are for test
num_train_imgs = int(len(all_indexes_shuffled)/5*train_percentage)
# Create the subsets for train and test
train_split = Subset(dataset,all_indexes_shuffled[0:num_train_imgs*5].tolist())
test_split = Subset(dataset,all_indexes_shuffled[num_train_imgs*5:].tolist())
else:
all_first_index = [*range(0,len(dataset),5)]
random.shuffle(all_first_index)
num_train_imgs = int(len(all_first_index)*train_percentage)
train_split = Subset(dataset,all_first_index[0:num_train_imgs])
test_split = Subset(dataset,all_first_index[num_train_imgs:])
return train_split,test_split
def train_single_epoch(epoch, model, train_loader, optimizer, criterion, device,scheduler):
"""
Train single epoch
"""
model.train()
for i, batch in enumerate(iter(train_loader)):
# Si volem entrenar només amb un batch
# if i==0:
# batch1 = batch
# img, target = batch1
img, target = batch
img, target = img.to(device), target.to(device)
optimizer.zero_grad()
output = model(img, target)
output = output.permute(1,2,0)
loss = criterion(output[:,:,:-1], target[:,1:]) # target[:,1:])
print(i, loss.item())
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=0.25)
optimizer.step()
# Aixo és per fer servir el scheduer Exponential, que s'ha de fer estep cada cop que vulguis abaixar la gamma.
# if (i+1)%10 == 0:
# scheduler.step()
# print(optimizer.param_groups[0]['lr'])
candidate_corpus = [model.vocab.generate_caption(torch.argmax(output[0].transpose(1, 0), dim=-1))]
reference_corpus = [model.vocab.generate_caption(target[0, 1:])]
bleu = 0
# bleu = bleu_score(candidate_corpus, reference_corpus)
print('--------------------------------------------------------------------------------------------------')
print('--------------------------------------------------------------------------------------------------')
print(f'Epoch {epoch} batch: {i} loss: {loss.item()}')
print('--------------------------------------------------------------------------------------------------')
print(candidate_corpus[0])
print(reference_corpus[0])
print('--------------------------------------------------------------------------------------------------')
# Ho comento per què em dona un error de cuda
# write_on_tensorboard(i+(epoch*len(train_loader)),loss.item(),bleu,img[0],reference_corpus,candidate_corpus)
def evaluate(model,test_loader, vocab, device,criterion):
model.eval()
total_loss = 0.
#device= 'cpu'
with torch.no_grad():
for idx, batch in enumerate(iter(test_loader)):
img, target = batch
img = img.to(device)
target = target.to(device)
for i in range(img.shape[0]):
sentence = model.inference(image=img[i].unsqueeze(0),vocab=vocab)
alphas = model.forward(image=img[i].unsqueeze(0), vocab=vocab)[1]
caption = ' '.join(sentence)
Visualization.plot_attention((img[0]), sentence, alphas) # showing expected and plotting attention
total_loss += target.numel()*criterion(sentence,target).item()
n += target.numel()
return total_loss / n, caption
def save_model(model, epoch):
"""
Function to save current model
"""
filename = os.path.join('model','checkpoints','Epoch_'+str(epoch)+'_model_state.pth')
model_state = {
'epoch':epoch,
'model':model.state_dict()
}
torch.save(model_state, filename)
def train(num_epochs, model, train_loader,test_loader, optimizer, criterion, device,log_interval,vocab,scheduler):
"""
Executes model training. Saves model to a file every 5 epoch.
"""
for epoch in range(1,num_epochs+1):
train_single_epoch(epoch, model, train_loader,optimizer, criterion, device, scheduler)
scheduler.step()
if epoch % 5 == 0:
save_model(model, epoch)
| 35.262774
| 114
| 0.673981
| 661
| 4,831
| 4.751891
| 0.291982
| 0.031837
| 0.022923
| 0.026743
| 0.201528
| 0.149316
| 0.063037
| 0.044572
| 0.044572
| 0.044572
| 0
| 0.01303
| 0.126268
| 4,831
| 136
| 115
| 35.522059
| 0.731106
| 0.195819
| 0
| 0.075
| 0
| 0
| 0.126599
| 0.102323
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.0875
| 0
| 0.1875
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e933799d41eabf2ce3d0578ad558fcf9ab8d220d
| 2,251
|
py
|
Python
|
views/probabilidade.py
|
pxcx/ambar-backend
|
350baabb492e4fbc1002ea851d1cef4fc999b81a
|
[
"MIT"
] | null | null | null |
views/probabilidade.py
|
pxcx/ambar-backend
|
350baabb492e4fbc1002ea851d1cef4fc999b81a
|
[
"MIT"
] | null | null | null |
views/probabilidade.py
|
pxcx/ambar-backend
|
350baabb492e4fbc1002ea851d1cef4fc999b81a
|
[
"MIT"
] | null | null | null |
from flask import jsonify
from sqlalchemy import func
from datetime import datetime, date
from models.previsao import Previsao, db
def configure(app):
# /probabilidade - retorna a probabilidade total de chuva
# - inicio (YYYY-MM-DD)
# - fim (YYYY-MM-DD)
@app.route('/probabilidade/<inicio>/<fim>', methods=['GET'])
def probabilidade(inicio, fim):
try:
# convertendo os parametros em datetime
inicio = datetime.strptime(inicio,'%Y-%m-%d')
fim = datetime.strptime(fim,'%Y-%m-%d')
# total de cidades cadastradas
totalCidades = db.session.query(func.count(Previsao.cidade).label('total_cidades')).\
filter(Previsao.date >= date(inicio.year, inicio.month, inicio.day)).\
filter(Previsao.date <= date(fim.year, fim.month, fim.day)).\
group_by(Previsao.cidade).\
first()
totalCidades = totalCidades.total_cidades
# buscando a probabilidade de chuva por dia
probabilidadeList = db.session.query(Previsao.date, Previsao.chuva_probabilidade).\
filter(Previsao.date >= date(inicio.year, inicio.month, inicio.day)).\
filter(Previsao.date <= date(fim.year, fim.month, fim.day)).\
all()
#formatando a saida
pa = 1/totalCidades
aux = {}
for i in probabilidadeList:
pb = i.chuva_probabilidade/100
if str(i.date) in aux:
aux[str(i.date)] = aux[str(i.date)] + pb*(pb*pa)/pa
else:
aux[str(i.date)] = pb*(pb*pa)/pa
out = 0
for key,val in aux.items():
if out > 0:
out = out + val*(val*pa)/pa
else:
out = val*(val*pa)/pa
return jsonify({'probabilidade_chuva': out})
except KeyError as e:
return jsonify({'error': 'O paramêtro "'+str(e)+'" não foi enviado.'})
except Exception as e:
return jsonify({'error': str(e)})
if __name__ == "__main__":
app.run(debug=True)
| 39.491228
| 97
| 0.52821
| 250
| 2,251
| 4.7
| 0.368
| 0.051064
| 0.061277
| 0.074894
| 0.251915
| 0.194043
| 0.194043
| 0.194043
| 0.161702
| 0.161702
| 0
| 0.004098
| 0.349622
| 2,251
| 57
| 98
| 39.491228
| 0.798497
| 0.099067
| 0
| 0.146341
| 0
| 0
| 0.06383
| 0.014349
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.097561
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e937f0e5ec885071b7daceb7fa5456d999a1e95f
| 293
|
py
|
Python
|
scripts/makeNegativesList.py
|
jccaicedo/localization-agent
|
d280acf355307b74e68dca9ec80ab293f0d18642
|
[
"MIT"
] | 8
|
2016-11-20T19:43:45.000Z
|
2020-12-09T04:58:05.000Z
|
scripts/makeNegativesList.py
|
jccaicedo/localization-agent
|
d280acf355307b74e68dca9ec80ab293f0d18642
|
[
"MIT"
] | 45
|
2015-05-04T20:41:05.000Z
|
2017-07-17T12:04:13.000Z
|
scripts/makeNegativesList.py
|
jccaicedo/localization-agent
|
d280acf355307b74e68dca9ec80ab293f0d18642
|
[
"MIT"
] | 9
|
2016-11-20T19:43:46.000Z
|
2020-09-01T21:01:54.000Z
|
import sys,os
import utils as cu
params = cu.loadParams('fullList positivesList output')
full = [x for x in open(params['fullList'])]
positives = [x for x in open(params['positivesList'])]
out = open(params['output'],'w')
for r in full:
if r not in positives:
out.write(r)
out.close()
| 22.538462
| 55
| 0.692833
| 48
| 293
| 4.229167
| 0.5
| 0.147783
| 0.049261
| 0.068966
| 0.167488
| 0.167488
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16041
| 293
| 12
| 56
| 24.416667
| 0.825203
| 0
| 0
| 0
| 0
| 0
| 0.194539
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e93a77efc359563f0911c10f45a8c7e3f5ed8fd4
| 1,354
|
py
|
Python
|
tests/test_model.py
|
alexdawn/rollinghub
|
6043c12520d7e0b0596f28c166616c1014e1f870
|
[
"MIT"
] | null | null | null |
tests/test_model.py
|
alexdawn/rollinghub
|
6043c12520d7e0b0596f28c166616c1014e1f870
|
[
"MIT"
] | 11
|
2019-08-18T21:37:28.000Z
|
2022-03-21T22:17:37.000Z
|
tests/test_model.py
|
alexdawn/rollinghub
|
6043c12520d7e0b0596f28c166616c1014e1f870
|
[
"MIT"
] | null | null | null |
import pytest
from rollinghub.db import get_db
def test_index(client, auth):
response = client.get('/')
assert b"Log In" in response.data
assert b"Register" in response.data
auth.login()
response = client.get('/')
assert b'Log Out' in response.data
assert b'test title' in response.data
assert b'by testman on 1900-01-01' in response.data
assert b'href="/1/update"' in response.data
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
'/1/delete',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == 'http://localhost/auth/login'
def test_author_required(app, client, auth):
# change the model author to another user
with app.app_context():
db, cur = get_db()
cur.execute('UPDATE model SET author_id = 2 WHERE id = 1')
db.commit()
auth.login()
# current user can't modify other user's post
assert client.post('/1/update').status_code == 403
assert client.post('/1/delete').status_code == 403
# current user doesn't see edit link
assert b'href="/1/update"' not in client.get('/').data
@pytest.mark.parametrize('path', (
'/2/update',
'/2/delete',
))
def test_exists_required(client, auth, path):
auth.login()
assert client.post(path).status_code == 404
| 27.08
| 72
| 0.656573
| 197
| 1,354
| 4.441624
| 0.370558
| 0.056
| 0.096
| 0.091429
| 0.257143
| 0.061714
| 0
| 0
| 0
| 0
| 0
| 0.024954
| 0.200886
| 1,354
| 49
| 73
| 27.632653
| 0.783734
| 0.087149
| 0
| 0.25
| 0
| 0
| 0.19237
| 0
| 0
| 0
| 0
| 0
| 0.305556
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e93be486b0635edc83619c16da55bfa370ed7c0e
| 19,672
|
py
|
Python
|
openpype/hosts/unreal/plugins/load/load_camera.py
|
Tilix4/OpenPype
|
8909bd890170880aa7ec8b673abaa25a9bdf40f2
|
[
"MIT"
] | 1
|
2022-02-08T15:40:41.000Z
|
2022-02-08T15:40:41.000Z
|
openpype/hosts/unreal/plugins/load/load_camera.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
openpype/hosts/unreal/plugins/load/load_camera.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Load camera from FBX."""
from pathlib import Path
import unreal
from unreal import EditorAssetLibrary
from unreal import EditorLevelLibrary
from unreal import EditorLevelUtils
from openpype.pipeline import (
AVALON_CONTAINER_ID,
legacy_io,
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
class CameraLoader(plugin.Loader):
"""Load Unreal StaticMesh from FBX"""
families = ["camera"]
label = "Load Camera"
representations = ["fbx"]
icon = "cube"
color = "orange"
def _get_data(self, asset_name):
asset_doc = legacy_io.find_one({
"type": "asset",
"name": asset_name
})
return asset_doc.get("data")
def _set_sequence_hierarchy(
self, seq_i, seq_j, min_frame_j, max_frame_j
):
tracks = seq_i.get_master_tracks()
track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
track = t
break
if not track:
track = seq_i.add_master_track(unreal.MovieSceneSubTrack)
subscenes = track.get_sections()
subscene = None
for s in subscenes:
if s.get_editor_property('sub_sequence') == seq_j:
subscene = s
break
if not subscene:
subscene = track.add_section()
subscene.set_row_index(len(track.get_sections()))
subscene.set_editor_property('sub_sequence', seq_j)
subscene.set_range(
min_frame_j,
max_frame_j + 1)
def _import_camera(
self, world, sequence, bindings, import_fbx_settings, import_filename
):
ue_version = unreal.SystemLibrary.get_engine_version().split('.')
ue_major = int(ue_version[0])
ue_minor = int(ue_version[1])
if ue_major == 4 and ue_minor <= 26:
unreal.SequencerTools.import_fbx(
world,
sequence,
bindings,
import_fbx_settings,
import_filename
)
elif (ue_major == 4 and ue_minor >= 27) or ue_major == 5:
unreal.SequencerTools.import_level_sequence_fbx(
world,
sequence,
bindings,
import_fbx_settings,
import_filename
)
else:
raise NotImplementedError(
f"Unreal version {ue_major} not supported")
def load(self, context, name, namespace, data):
"""
Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): subset name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
data (dict): Those would be data to be imprinted. This is not used
now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and avalon container
hierarchy = context.get('asset').get('data').get('parents')
root = "/Game/OpenPype"
hierarchy_dir = root
hierarchy_dir_list = []
for h in hierarchy:
hierarchy_dir = f"{hierarchy_dir}/{h}"
hierarchy_dir_list.append(hierarchy_dir)
asset = context.get('asset').get('name')
suffix = "_CON"
if asset:
asset_name = "{}_{}".format(asset, name)
else:
asset_name = "{}".format(name)
tools = unreal.AssetToolsHelpers().get_asset_tools()
# Create a unique name for the camera directory
unique_number = 1
if EditorAssetLibrary.does_directory_exist(f"{hierarchy_dir}/{asset}"):
asset_content = EditorAssetLibrary.list_assets(
f"{root}/{asset}", recursive=False, include_folder=True
)
# Get highest number to make a unique name
folders = [a for a in asset_content
if a[-1] == "/" and f"{name}_" in a]
f_numbers = []
for f in folders:
# Get number from folder name. Splits the string by "_" and
# removes the last element (which is a "/").
f_numbers.append(int(f.split("_")[-1][:-1]))
f_numbers.sort()
if not f_numbers:
unique_number = 1
else:
unique_number = f_numbers[-1] + 1
asset_dir, container_name = tools.create_unique_asset_name(
f"{hierarchy_dir}/{asset}/{name}_{unique_number:02d}", suffix="")
asset_path = Path(asset_dir)
asset_path_parent = str(asset_path.parent.as_posix())
container_name += suffix
EditorAssetLibrary.make_directory(asset_dir)
# Create map for the shot, and create hierarchy of map. If the maps
# already exist, we will use them.
h_dir = hierarchy_dir_list[0]
h_asset = hierarchy[0]
master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map"
if not EditorAssetLibrary.does_asset_exist(master_level):
EditorLevelLibrary.new_level(f"{h_dir}/{h_asset}_map")
level = f"{asset_path_parent}/{asset}_map.{asset}_map"
if not EditorAssetLibrary.does_asset_exist(level):
EditorLevelLibrary.new_level(f"{asset_path_parent}/{asset}_map")
EditorLevelLibrary.load_level(master_level)
EditorLevelUtils.add_level_to_world(
EditorLevelLibrary.get_editor_world(),
level,
unreal.LevelStreamingDynamic
)
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(level)
# Get all the sequences in the hierarchy. It will create them, if
# they don't exist.
sequences = []
frame_ranges = []
i = 0
for h in hierarchy_dir_list:
root_content = EditorAssetLibrary.list_assets(
h, recursive=False, include_folder=False)
existing_sequences = [
EditorAssetLibrary.find_asset_data(asset)
for asset in root_content
if EditorAssetLibrary.find_asset_data(
asset).get_class().get_name() == 'LevelSequence'
]
if not existing_sequences:
scene = tools.create_asset(
asset_name=hierarchy[i],
package_path=h,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
asset_data = legacy_io.find_one({
"type": "asset",
"name": h.split('/')[-1]
})
id = asset_data.get('_id')
start_frames = []
end_frames = []
elements = list(
legacy_io.find({"type": "asset", "data.visualParent": id}))
for e in elements:
start_frames.append(e.get('data').get('clipIn'))
end_frames.append(e.get('data').get('clipOut'))
elements.extend(legacy_io.find({
"type": "asset",
"data.visualParent": e.get('_id')
}))
min_frame = min(start_frames)
max_frame = max(end_frames)
scene.set_display_rate(
unreal.FrameRate(asset_data.get('data').get("fps"), 1.0))
scene.set_playback_start(min_frame)
scene.set_playback_end(max_frame)
sequences.append(scene)
frame_ranges.append((min_frame, max_frame))
else:
for e in existing_sequences:
sequences.append(e.get_asset())
frame_ranges.append((
e.get_asset().get_playback_start(),
e.get_asset().get_playback_end()))
i += 1
EditorAssetLibrary.make_directory(asset_dir)
cam_seq = tools.create_asset(
asset_name=f"{asset}_camera",
package_path=asset_dir,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
# Add sequences data to hierarchy
for i in range(0, len(sequences) - 1):
self._set_sequence_hierarchy(
sequences[i], sequences[i + 1],
frame_ranges[i + 1][0], frame_ranges[i + 1][1])
data = self._get_data(asset)
cam_seq.set_display_rate(
unreal.FrameRate(data.get("fps"), 1.0))
cam_seq.set_playback_start(0)
cam_seq.set_playback_end(data.get('clipOut') - data.get('clipIn') + 1)
self._set_sequence_hierarchy(
sequences[-1], cam_seq,
data.get('clipIn'), data.get('clipOut'))
settings = unreal.MovieSceneUserImportFBXSettings()
settings.set_editor_property('reduce_keys', False)
if cam_seq:
self._import_camera(
EditorLevelLibrary.get_editor_world(),
cam_seq,
cam_seq.get_bindings(),
settings,
self.fname
)
# Create Asset Container
unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
"asset_name": asset_name,
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"]
}
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container_name), data)
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(master_level)
asset_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
return asset_content
def update(self, container, representation):
ar = unreal.AssetRegistryHelpers.get_asset_registry()
root = "/Game/OpenPype"
asset_dir = container.get('namespace')
context = representation.get("context")
hierarchy = context.get('hierarchy').split("/")
h_dir = f"{root}/{hierarchy[0]}"
h_asset = hierarchy[0]
master_level = f"{h_dir}/{h_asset}_map.{h_asset}_map"
EditorLevelLibrary.save_current_level()
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[asset_dir],
recursive_paths=False)
sequences = ar.get_assets(filter)
filter = unreal.ARFilter(
class_names=["World"],
package_paths=[str(Path(asset_dir).parent.as_posix())],
recursive_paths=True)
maps = ar.get_assets(filter)
# There should be only one map in the list
EditorLevelLibrary.load_level(maps[0].get_full_name())
level_sequence = sequences[0].get_asset()
display_rate = level_sequence.get_display_rate()
playback_start = level_sequence.get_playback_start()
playback_end = level_sequence.get_playback_end()
sequence_name = f"{container.get('asset')}_camera"
# Get the actors in the level sequence.
objs = unreal.SequencerTools.get_bound_objects(
unreal.EditorLevelLibrary.get_editor_world(),
level_sequence,
level_sequence.get_bindings(),
unreal.SequencerScriptingRange(
has_start_value=True,
has_end_value=True,
inclusive_start=level_sequence.get_playback_start(),
exclusive_end=level_sequence.get_playback_end()
)
)
# Delete actors from the map
for o in objs:
if o.bound_objects[0].get_class().get_name() == "CineCameraActor":
actor_path = o.bound_objects[0].get_path_name().split(":")[-1]
actor = EditorLevelLibrary.get_actor_reference(actor_path)
EditorLevelLibrary.destroy_actor(actor)
# Remove the Level Sequence from the parent.
# We need to traverse the hierarchy from the master sequence to find
# the level sequence.
root = "/Game/OpenPype"
namespace = container.get('namespace').replace(f"{root}/", "")
ms_asset = namespace.split('/')[0]
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"{root}/{ms_asset}"],
recursive_paths=False)
sequences = ar.get_assets(filter)
master_sequence = sequences[0].get_asset()
sequences = [master_sequence]
parent = None
sub_scene = None
for s in sequences:
tracks = s.get_master_tracks()
subscene_track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
subscene_track = t
break
if subscene_track:
sections = subscene_track.get_sections()
for ss in sections:
if ss.get_sequence().get_name() == sequence_name:
parent = s
sub_scene = ss
# subscene_track.remove_section(ss)
break
sequences.append(ss.get_sequence())
# Update subscenes indexes.
i = 0
for ss in sections:
ss.set_row_index(i)
i += 1
if parent:
break
assert parent, "Could not find the parent sequence"
EditorAssetLibrary.delete_asset(level_sequence.get_path_name())
settings = unreal.MovieSceneUserImportFBXSettings()
settings.set_editor_property('reduce_keys', False)
tools = unreal.AssetToolsHelpers().get_asset_tools()
new_sequence = tools.create_asset(
asset_name=sequence_name,
package_path=asset_dir,
asset_class=unreal.LevelSequence,
factory=unreal.LevelSequenceFactoryNew()
)
new_sequence.set_display_rate(display_rate)
new_sequence.set_playback_start(playback_start)
new_sequence.set_playback_end(playback_end)
sub_scene.set_sequence(new_sequence)
self._import_camera(
EditorLevelLibrary.get_editor_world(),
new_sequence,
new_sequence.get_bindings(),
settings,
str(representation["data"]["path"])
)
data = {
"representation": str(representation["_id"]),
"parent": str(representation["parent"])
}
unreal_pipeline.imprint(
"{}/{}".format(asset_dir, container.get('container_name')), data)
EditorLevelLibrary.save_current_level()
asset_content = EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=False)
for a in asset_content:
EditorAssetLibrary.save_asset(a)
EditorLevelLibrary.load_level(master_level)
def remove(self, container):
path = Path(container.get("namespace"))
parent_path = str(path.parent.as_posix())
ar = unreal.AssetRegistryHelpers.get_asset_registry()
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"{str(path.as_posix())}"],
recursive_paths=False)
sequences = ar.get_assets(filter)
if not sequences:
raise Exception("Could not find sequence.")
world = ar.get_asset_by_object_path(
EditorLevelLibrary.get_editor_world().get_path_name())
filter = unreal.ARFilter(
class_names=["World"],
package_paths=[f"{parent_path}"],
recursive_paths=True)
maps = ar.get_assets(filter)
# There should be only one map in the list
if not maps:
raise Exception("Could not find map.")
map = maps[0]
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(map.get_full_name())
# Remove the camera from the level.
actors = EditorLevelLibrary.get_all_level_actors()
for a in actors:
if a.__class__ == unreal.CineCameraActor:
EditorLevelLibrary.destroy_actor(a)
EditorLevelLibrary.save_all_dirty_levels()
EditorLevelLibrary.load_level(world.get_full_name())
# There should be only one sequence in the path.
sequence_name = sequences[0].asset_name
# Remove the Level Sequence from the parent.
# We need to traverse the hierarchy from the master sequence to find
# the level sequence.
root = "/Game/OpenPype"
namespace = container.get('namespace').replace(f"{root}/", "")
ms_asset = namespace.split('/')[0]
filter = unreal.ARFilter(
class_names=["LevelSequence"],
package_paths=[f"{root}/{ms_asset}"],
recursive_paths=False)
sequences = ar.get_assets(filter)
master_sequence = sequences[0].get_asset()
sequences = [master_sequence]
parent = None
for s in sequences:
tracks = s.get_master_tracks()
subscene_track = None
for t in tracks:
if t.get_class() == unreal.MovieSceneSubTrack.static_class():
subscene_track = t
break
if subscene_track:
sections = subscene_track.get_sections()
for ss in sections:
if ss.get_sequence().get_name() == sequence_name:
parent = s
subscene_track.remove_section(ss)
break
sequences.append(ss.get_sequence())
# Update subscenes indexes.
i = 0
for ss in sections:
ss.set_row_index(i)
i += 1
if parent:
break
assert parent, "Could not find the parent sequence"
EditorAssetLibrary.delete_directory(str(path.as_posix()))
# Check if there isn't any more assets in the parent folder, and
# delete it if not.
asset_content = EditorAssetLibrary.list_assets(
parent_path, recursive=False, include_folder=True
)
if len(asset_content) == 0:
EditorAssetLibrary.delete_directory(parent_path)
| 35.509025
| 79
| 0.573861
| 2,083
| 19,672
| 5.167547
| 0.136822
| 0.012542
| 0.017559
| 0.013935
| 0.490803
| 0.404496
| 0.353586
| 0.304348
| 0.246284
| 0.222315
| 0
| 0.004503
| 0.333926
| 19,672
| 553
| 80
| 35.573237
| 0.816988
| 0.102176
| 0
| 0.402469
| 0
| 0
| 0.072368
| 0.019108
| 0
| 0
| 0
| 0
| 0.004938
| 1
| 0.014815
| false
| 0
| 0.049383
| 0
| 0.083951
| 0.004938
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|