hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3227a055c835557ad7f0f841ab6676069d791695
| 10,965
|
py
|
Python
|
verify/imagenet.py
|
CAS-LRJ/DeepPAC
|
75059572c23474d32a762aca5640f4d799fd992a
|
[
"Apache-2.0"
] | null | null | null |
verify/imagenet.py
|
CAS-LRJ/DeepPAC
|
75059572c23474d32a762aca5640f4d799fd992a
|
[
"Apache-2.0"
] | null | null | null |
verify/imagenet.py
|
CAS-LRJ/DeepPAC
|
75059572c23474d32a762aca5640f4d799fd992a
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torchvision import transforms
from PIL import Image
import numpy as np
import math
from sklearn.linear_model import LinearRegression
from .grid import Grid, grid_split
import torch.backends.cudnn as cudnn
'''
Global Constants:
TASK_NAME: Name of the verification task (deprecated)
PATH: The path of the model file. (Initialized in imagenet_verify)
mean, stdvar: The normalization parameters of the data. (Initialized in imagenet_verify, default mean=(0.4914,0.4822,0.4465) stdvar=(0.2023,0.1994,0.2010))
delta: The radius of the L-inf Ball. (Initialized in imagenet_verify, default 4/255)
significance, error: The significance and the error rate of the PAC-Model. (Initialized in imagenet_verify, default 0.01 and 0.001)
final_samples: The number of samples needed to calculate the final margin. (Initialized in imagenet_verify, default 1600, according to defualt error rate and significance)
Batchsize: The batchsize of sampling procedure. (Initialized in imagenet_verify, defualt 200)
device: Which device to be utilised by Pytorch. (Initialized in imagenet_verify, default 'cuda')
model: The Pytorch Network to be verified. (Initialized in imagenet_verify)
pretrans: The torchvision transform to process the image. (Resize and Tensorize)
normalization_trans: The normalization transform to normalize the data. (Initialized in imagenet_verify)
sampling_budget: The sampling limit for each stepwise splitting. (Initialized in imagenet_verify)
init_grid: The Grid for Imagenet Data (224*224)
Functions:
grid_batch_sample: Grid-based Sampling for Scenario Optimization (Untargetted)
scenario_optimization: Main Verification Function (Focused Learning, Stepwise-Splitting)
imagenet_verify: Entry Function
'''
pretrans = transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),
])
mean = (0.485, 0.456, 0.406)
stdvar = (0.229, 0.224, 0.225)
normalization_trans = transforms.Normalize(mean, stdvar)
sampling_budget = 20000
delta = 4/255
error = 1e-2
significance = 1e-3
Batchsize = 200
device = 'cuda'
init_grid = [Grid(0, 0, 224, 224)]
PATH = './models/imagenet_linf_4.pth'
def grid_batch_sample(grid_list, n_sample, batch_num, lower, upper, model, fixed_coeff=None, label=0):
global normalization_trans, device
feature_final = []
result_final = []
fixed_features = []
# Calculate the Iteration Number
n_iter = math.ceil(n_sample/batch_num)
model.eval()
for iter in range(n_iter):
samples = np.random.uniform(lower, upper, (batch_num,)+lower.shape)
samples_ = normalization_trans(
torch.tensor(samples)).float().to(device)
with torch.no_grad():
results_ = model(samples_).cpu().detach().numpy()
# Calculate the Untargeted Score Difference
results_ = np.max(np.delete(results_, label, 1),
1) - results_[:, label]
results_ = results_.reshape(batch_num, -1)
result_final.append(results_)
# Calculate the Fixed Constant
fixed_result_i = (samples.reshape(batch_num, -1) @
fixed_coeff.reshape(-1)).reshape((batch_num, -1))
fixed_features.append(fixed_result_i)
# Calculate the Grid Sum
feature_iter_i = []
for grid in grid_list:
for channel in range(3):
grid_data = samples[:, channel, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y]
grid_sum = np.sum(grid_data, axis=1, keepdims=True)
grid_sum = np.sum(grid_sum, axis=2, keepdims=True)
grid_sum = grid_sum.reshape(-1, 1)
feature_iter_i.append(grid_sum)
# Merge the Grid Sums
feature_iter_i = np.hstack(feature_iter_i)
feature_final.append(feature_iter_i)
# Merge the Batch Results
feature_final = np.vstack(feature_final)
result_final = np.vstack(result_final)
fixed_features = np.vstack(fixed_features)
return feature_final, result_final, fixed_features
def scenario_optimization(image, label):
global significance, error, init_grid, model, sampling_budget, delta
global pretrans, normalization_trans, Batchsize, final_samples
# Split into 7x7 small grids (32x32 split)
grid_list = grid_split(init_grid, 32, 32)
img = pretrans(image)
img_np = img.detach().numpy()
# Calculate the Lower and Upper Bounds
img_upper = np.clip(img_np+delta, 0., 1.)
img_lower = np.clip(img_np-delta, 0., 1.)
fixed_coeff = np.zeros((3, 224, 224))
# Grid Refinement Procedure
n_refine = 5
for refine_step in range(n_refine):
print('Stepwise Spliting #', refine_step, 'Start')
print('Sampling... (%d samples)' % sampling_budget)
features, scores, fixed_constant = grid_batch_sample(
grid_list, sampling_budget, Batchsize, img_lower, img_upper, model, fixed_coeff, label)
print('Constructing Template...')
# Linear Regression to construct the Coarse Model for Stepwise Splitting
reg = LinearRegression(fit_intercept=True).fit(
features, scores-fixed_constant)
coeff = np.array(reg.coef_).reshape(-1, 3)
# Use the L2 Norm to Identify the Important Grids
coeff_l2 = np.sqrt(np.sum(coeff*coeff, axis=1))
coeff_l2_index = np.argsort(coeff_l2)
coeff_l2_index_low = coeff_l2_index[:math.ceil(
len(coeff_l2_index)*0.75)]
coeff_l2_index_high = coeff_l2_index[math.ceil(
len(coeff_l2_index)*0.75):]
# Fix the Less Important Grids
for index in coeff_l2_index_low:
grid = grid_list[index]
fixed_coeff[0, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 0]
fixed_coeff[1, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 1]
fixed_coeff[2, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 2]
# Split the Rest Grids
print('Spliting...')
grid_list_i = []
for index in coeff_l2_index_high:
grid_list_i.append(grid_list[index])
grid_list = grid_split(grid_list_i, 2, 2)
del features, scores, fixed_constant
# Last Step, To Fix the Rest Grids
print('Last Step...')
features, scores, fixed_constant = grid_batch_sample(
grid_list, sampling_budget, Batchsize, img_lower, img_upper, model, fixed_coeff, label)
reg = LinearRegression(fit_intercept=True).fit(
features, scores-fixed_constant)
intercept = reg.intercept_
coeff = np.array(reg.coef_).reshape(-1, 3)
for index in range(len(coeff)):
grid = grid_list[index]
fixed_coeff[0, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 0]
fixed_coeff[1, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 1]
fixed_coeff[2, grid.leftup_x:grid.rightdown_x,
grid.leftup_y:grid.rightdown_y] = coeff[index, 2]
del features, scores, fixed_constant
# Calculate the Margin
features, scores, fixed_constant = grid_batch_sample(
init_grid, final_samples, Batchsize, img_lower, img_upper, model, fixed_coeff, label)
eps_max = np.max(np.abs(scores-fixed_constant-intercept))
print('Margin: ', eps_max)
del features, scores, fixed_constant
safe = True
unsafe = False
# Calculate the Maximum of the Learned Model, Find The Potential Counter-Example
val_max = fixed_coeff[fixed_coeff < 0]@img_lower[fixed_coeff < 0] + \
fixed_coeff[fixed_coeff > 0]@img_upper[fixed_coeff > 0] + \
intercept+eps_max
print('Evaluated Delta Max Value: ', val_max)
if val_max > 0:
print('Potential Counter-example Found')
safe = False
# Examine the Potential Counter-Example
ce = np.zeros_like(img_lower)
ce[fixed_coeff <= 0] = img_lower[fixed_coeff <= 0]
ce[fixed_coeff > 0] = img_upper[fixed_coeff > 0]
with torch.no_grad():
ce = normalization_trans(torch.tensor(ce).unsqueeze(0)).to(device)
scores = model(ce)[0]
print('True Label: ', torch.argmax(scores), 'Score: ', torch.max(
scores), 'Original Label:', label, 'Scores: ', scores[label])
if torch.argmax(scores) != label:
unsafe = True
print('Conter-example Confirmed')
if safe:
print('Network is PAC-model robust with error rate',
error, 'and confidence level', 1-significance)
return 1
elif unsafe:
print('Unsafe. Adversarial Example Found.')
return 0
print('Unknown. Potential Counter-Example exists.')
return 2
def imagenet_verify(model_class, args):
global delta, PATH, error, significance, final_samples, normalization_trans, mean, stdvar, dataset, device, model, Batchsize, sampling_budget
PATH = args.model
delta = args.radius/255.
error = args.epsilon
significance = args.eta
Batchsize = args.batchsize
image_path = args.image
final_samples = math.ceil(2/error*(math.log(1/significance)+1))
final_samples = math.ceil(final_samples/Batchsize)*Batchsize
model = model_class()
model.load_state_dict(torch.load(PATH))
if getattr(args, 'mean') != None:
mean = args.mean
if getattr(args, 'std') != None:
stdvar = args.std
if getattr(args, 'budget') != None:
sampling_budget = args.budget
normalization_trans = transforms.Normalize(mean, stdvar)
if args.gpu == False:
device = 'cpu'
np.random.seed(0)
if device == 'cuda':
cudnn.deterministic = True
cudnn.benchmark = False
model = model.to(device)
model.eval()
image = Image.open(image_path).convert('RGB')
if getattr(args, 'label') != None:
label = args.label
else:
label = int(torch.argmax(model(normalization_trans(
pretrans(image)).unsqueeze(0).to(device))[0]).cpu())
print('True Label: ', label)
try:
print('Verification Radius(L-inf): ', args.radius)
print('Mean: ', mean)
print('Std: ', stdvar)
return scenario_optimization(image, label)
except Exception as err:
print('Error: Verification Failed')
print(err)
| 43.685259
| 188
| 0.639216
| 1,388
| 10,965
| 4.865994
| 0.201729
| 0.032573
| 0.031093
| 0.039976
| 0.277169
| 0.236601
| 0.183447
| 0.171898
| 0.14584
| 0.139177
| 0
| 0.02526
| 0.263475
| 10,965
| 250
| 189
| 43.86
| 0.811045
| 0.055814
| 0
| 0.180851
| 0
| 0
| 0.059803
| 0.003329
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015957
| false
| 0
| 0.042553
| 0
| 0.085106
| 0.101064
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3228d6088055f54b7b82121a3d3e109e936942b3
| 1,623
|
py
|
Python
|
setup.py
|
cakebread/musubi
|
5b5f1bdf65fe07c14ff7bb2252c278f6ca0c903c
|
[
"BSD-2-Clause"
] | 5
|
2015-05-18T13:18:26.000Z
|
2020-01-14T08:24:08.000Z
|
setup.py
|
cakebread/musubi
|
5b5f1bdf65fe07c14ff7bb2252c278f6ca0c903c
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
cakebread/musubi
|
5b5f1bdf65fe07c14ff7bb2252c278f6ca0c903c
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
PROJECT = 'musubi'
VERSION = '0.2'
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
try:
long_description = open('README.rst', 'rt').read()
except IOError:
long_description = 'Uh oh, we may need a new hard drive.'
setup(
name=PROJECT,
version=VERSION,
description='Musubi is a command-line DNSBL checker and MX toolkit.',
long_description=long_description,
author='Rob Cakebread',
author_email='cakebread@gmail.com',
url='https://github.com/cakebread/musubi',
download_url='https://github.com/cakebread/musubi/tarball/master',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Environment :: Console',
],
platforms=['Any'],
scripts=[],
provides=[],
install_requires=['requests', 'dnspython', 'IPy', 'distribute',
'cliff', 'cliff-tablib', 'gevent', 'greenlet'],
namespace_packages=[],
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'musubi = musubi.main:main'
],
'musubi.cli': [
'ips = musubi.ips:GetIPs',
'mx = musubi.mx:GetMX',
'spf = musubi.spf:GetSPF',
'scan = musubi.scan:Scan',
],
},
zip_safe=False,
)
| 29.509091
| 73
| 0.590265
| 166
| 1,623
| 5.662651
| 0.63253
| 0.06383
| 0.079787
| 0.03617
| 0.068085
| 0.068085
| 0
| 0
| 0
| 0
| 0
| 0.005059
| 0.269254
| 1,623
| 54
| 74
| 30.055556
| 0.787521
| 0.012323
| 0
| 0.06383
| 0
| 0
| 0.408864
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.042553
| 0
| 0.042553
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3229164df79c432f6f7ad72e86350bc6d3ce6e18
| 1,048
|
py
|
Python
|
airflow_ml_dags/images/airflow-preprocess/preprocess.py
|
made-ml-in-prod-2021/holyketzer
|
f693f2d5fce8cced03873e2b89cbe10617996c64
|
[
"MIT"
] | null | null | null |
airflow_ml_dags/images/airflow-preprocess/preprocess.py
|
made-ml-in-prod-2021/holyketzer
|
f693f2d5fce8cced03873e2b89cbe10617996c64
|
[
"MIT"
] | 2
|
2021-05-21T09:09:23.000Z
|
2021-06-05T08:13:40.000Z
|
airflow_ml_dags/images/airflow-preprocess/preprocess.py
|
made-ml-in-prod-2021/holyketzer
|
f693f2d5fce8cced03873e2b89cbe10617996c64
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
import click
from datetime import date
@click.command("preprocess")
@click.option("--input-dir")
@click.option("--output-dir")
@click.option("--mode")
def preprocess(input_dir: str, output_dir, mode):
if mode == "data":
data = pd.read_csv(os.path.join(input_dir, "data.csv"))
data["FirstLength"] = data["First"].apply(len)
data["LastLength"] = data["Last"].apply(len)
file = "data.csv"
elif mode == "target":
data = pd.read_csv(os.path.join(input_dir, "target.csv"))
today = date.today()
data["Age"] = pd.to_datetime(data["Birthdate"]).apply(
lambda born: today.year - born.year - ((today.month, today.day) < (born.month, born.day))
)
data.drop(columns=["Birthdate"], inplace=True)
file = "target.csv"
else:
raise ValueError(f"unknown mode: '{mode}'")
os.makedirs(output_dir, exist_ok=True)
data.to_csv(os.path.join(output_dir, file), index=False)
if __name__ == '__main__':
preprocess()
| 29.942857
| 101
| 0.621183
| 140
| 1,048
| 4.514286
| 0.414286
| 0.050633
| 0.042722
| 0.061709
| 0.098101
| 0.098101
| 0.098101
| 0.098101
| 0.098101
| 0
| 0
| 0
| 0.204198
| 1,048
| 34
| 102
| 30.823529
| 0.757794
| 0
| 0
| 0
| 0
| 0
| 0.158397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.142857
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32298c15e29bc9b924d33fac9a984d4c8170430a
| 581
|
py
|
Python
|
estrutura_while/barra-de-progresso.py
|
BEp0/Estudos_de_Python
|
da32a01d3f4462b3e6b1b6035106895afe9c7627
|
[
"MIT"
] | 1
|
2021-02-15T19:14:44.000Z
|
2021-02-15T19:14:44.000Z
|
estrutura_while/barra-de-progresso.py
|
BEp0/Estudos_de_Python
|
da32a01d3f4462b3e6b1b6035106895afe9c7627
|
[
"MIT"
] | null | null | null |
estrutura_while/barra-de-progresso.py
|
BEp0/Estudos_de_Python
|
da32a01d3f4462b3e6b1b6035106895afe9c7627
|
[
"MIT"
] | null | null | null |
from time import sleep
from sys import stdout
def barra(v):
v = int(v)
print('[ ', end='')
for v in range(0, v):
print(f'-', end='', flush=True)
sleep(0.1)
print(' ]', end='\n')
def calcularNotas():
soma = 0
v = 0
for i in range(0, 2):
nota = float(input(f'\n{i + 1}º nota : '))
soma += nota
v = soma // 2
print('\nCALCULANDO: ', end='\b')
barra(v)
return print(f'MÉDIA FOI: {soma / 2}')
def main():
calcularNotas()
sleep(1)
print('\n__FIM__\n')
if __name__ == "__main__":
main()
| 17.088235
| 50
| 0.504303
| 85
| 581
| 3.305882
| 0.447059
| 0.042705
| 0.05694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027295
| 0.306368
| 581
| 33
| 51
| 17.606061
| 0.669975
| 0
| 0
| 0
| 0
| 0
| 0.141136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.08
| 0
| 0.24
| 0.24
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
322c0212f8148c0b38508aaf2672d99f9c4007b4
| 8,524
|
py
|
Python
|
src/apodeixi/text_layout/tests_unit/test_column_layout.py
|
ChateauClaudia-Labs/apodeixi
|
dd668e210e92cabc2682ad3049781c06e58e3101
|
[
"MIT"
] | null | null | null |
src/apodeixi/text_layout/tests_unit/test_column_layout.py
|
ChateauClaudia-Labs/apodeixi
|
dd668e210e92cabc2682ad3049781c06e58e3101
|
[
"MIT"
] | null | null | null |
src/apodeixi/text_layout/tests_unit/test_column_layout.py
|
ChateauClaudia-Labs/apodeixi
|
dd668e210e92cabc2682ad3049781c06e58e3101
|
[
"MIT"
] | null | null | null |
import sys as _sys
import pandas as _pd
from apodeixi.testing_framework.a6i_unit_test import ApodeixiUnitTest
from apodeixi.util.formatting_utils import DictionaryFormatter
from apodeixi.util.a6i_error import ApodeixiError, FunctionalTrace
from apodeixi.text_layout.column_layout import ColumnWidthCalculator
class Test_ColumnWidthCalculator(ApodeixiUnitTest):
def setUp(self):
super().setUp()
def test_sparse_layout(self):
self._shell_test_case('test_sparse_layout', viewport_width=50, viewport_height=40, max_word_length=20)
def test_thick_layout(self):
self._shell_test_case('test_thick_layout', viewport_width=100, viewport_height=40, max_word_length=20)
def _shell_test_case(self, name, viewport_width, viewport_height, max_word_length):
INPUT_FOLDER = self.input_data
INPUT_FILE = name + '_INPUT.csv'
OUTPUT_FOLDER = self.output_data
OUTPUT_FILE = name + '_OUTPUT.csv'
EXPECTED_FOLDER = self.expected_data
EXPECTED_FILE = name + '_EXPECTED.csv'
OUTPUT_COMPARISON_FILE = name + '_comparison_OUTPUT.txt'
EXPECTED_COMPARISON_FILE = name + '_comparison_EXPECTED.txt'
OUTPUT_EXPLAIN_FILE = name + '_explain_OUTPUT.txt'
EXPECTED_EXPLAIN_FILE = name + '_explain_EXPECTED.txt'
OUTPUT_RESULTS_FILE = name + '_results_OUTPUT.txt'
EXPECTED_RESULTS_FILE = name + '_results_EXPECTED.txt'
try:
root_trace = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Testing computation of column widths")
data_df = self.load_csv(root_trace, INPUT_FOLDER + '/' + INPUT_FILE)
calc = ColumnWidthCalculator( data_df = data_df,
viewport_width = viewport_width,
viewport_height = viewport_height,
max_word_length = max_word_length)
result_dict = calc.calc(root_trace)
output_df = calc.analysis_df
output_explain = '\n'.join(calc.explanations)
# Save DataFrame, explain and results in case the assertion below fails, so that we can do
# a visual comparison of OUTPUT vs EXPECTED csv files
output_df.to_csv(OUTPUT_FOLDER + '/' + OUTPUT_FILE)
with open(OUTPUT_FOLDER + '/' + OUTPUT_EXPLAIN_FILE, 'w') as file:
file .write(output_explain)
# Make results readable by creating a pretty
result_nice = DictionaryFormatter().dict_2_nice(parent_trace = root_trace, a_dict = result_dict)
with open(OUTPUT_FOLDER + '/' + OUTPUT_RESULTS_FILE, 'w') as file:
file .write(result_nice)
# Load the output we just saved, which we'll use for regression comparison since in Pandas the act of loading will
# slightly change formats and we want to apply the same such changes as were applied to the expected output,
# to avoid frivolous differences that don't deserve to cause this test to fail
loaded_output_df = self.load_csv(root_trace, OUTPUT_FOLDER + '/' + OUTPUT_FILE)
# Now load the expected output.
expected_df = self.load_csv(root_trace, EXPECTED_FOLDER + '/' + EXPECTED_FILE)
check, comparison_dict = self._compare_dataframes( df1 = loaded_output_df,
df1_name = "output",
df2 = expected_df,
df2_name = "expected")
df_comparison_nice = DictionaryFormatter().dict_2_nice(parent_trace = root_trace,
a_dict = comparison_dict,
flatten=True)
with open(OUTPUT_FOLDER + '/' + OUTPUT_COMPARISON_FILE, 'w') as file:
file .write(df_comparison_nice)
with open(EXPECTED_FOLDER + '/' + EXPECTED_COMPARISON_FILE, 'r') as file:
expected_df_comparison = file.read()
with open(EXPECTED_FOLDER + '/' + EXPECTED_EXPLAIN_FILE, 'r') as file:
expected_explain = file.read()
with open(EXPECTED_FOLDER + '/' + EXPECTED_RESULTS_FILE, 'r') as file:
expected_result = file.read()
except ApodeixiError as ex:
print(ex.trace_message())
self.assertTrue(1==2)
self.assertEqual(df_comparison_nice, expected_df_comparison)
self.assertTrue(check)
self.assertEqual(output_explain, expected_explain)
self.assertEqual(result_nice, expected_result)
def _compare_dataframes(self, df1, df2, df1_name, df2_name):
'''
Helper method used in lieu of dataframe.equals, which fails for spurious reasons.
Under this method's policy, two dataframes are equal if they have the same columns, indices, and are
point-wise equal.
Method returns two things: a boolean result of the comparison, and a dictionary to pin point where there are
differences, if any
'''
# Prepare an explanation of where the dataframes differ, if they do differ. This visibility helps with debugging
comparison_dict = {}
cols_1 = set(df1.columns)
cols_2 = set(df2.columns)
# Ensure determinism with sort
common_cols = list(cols_1.intersection(cols_2))
common_cols.sort()
missing_in_1 = list(cols_2.difference(cols_1))
missing_in_1.sort()
missing_in_2 = list(cols_1.difference(cols_2))
missing_in_2.sort()
comparison_dict[df1_name + ' shape'] = str(df1.shape)
comparison_dict[df2_name + ' shape'] = str(df2.shape)
if len(missing_in_1) > 0:
comparison_dict[df1_name + ' missing columns'] = '\n'.join(missing_in_1)
if len(missing_in_2) > 0:
comparison_dict[df2_name + ' missing columns'] = '\n'.join(missing_in_2)
# Initialize true until profen false
check = True
if not df1.index.equals(df2.index):
check = False
else: # Compare element by element for the common_cols
cell_dict = {}
for row in df1.iterrows():
row1_nb = row[0]
row1_data = row[1]
for col in common_cols: # use common_cols that is a deterministic list
val1 = row1_data[col]
val2 = df2.iloc[row1_nb][col]
if val1 != val2:
check = False
coords = col + '.row' + str(row1_nb)
cell_dict[coords] = "values differ"
cell_dict[coords + '.' + df1_name] = str(val1)
cell_dict[coords + '.' + df2_name] = str(val2)
comparison_dict['elt-by-elt comparison'] = cell_dict
if check:
comparison_dict['Result of elt-by-elt comparison'] = "Everything matches"
return check, comparison_dict
if __name__ == "__main__":
# execute only if run as a script
def main(args):
T = Test_ColumnWidthCalculator()
T.setUp()
what_to_do = args[1]
if what_to_do=='sparse_layout':
T.test_small_text()
main(_sys.argv)
| 51.660606
| 141
| 0.530737
| 876
| 8,524
| 4.88242
| 0.268265
| 0.032733
| 0.015198
| 0.009119
| 0.170447
| 0.119242
| 0.089783
| 0.042553
| 0.026654
| 0.026654
| 0
| 0.014303
| 0.40122
| 8,524
| 165
| 142
| 51.660606
| 0.823668
| 0.134209
| 0
| 0.018018
| 0
| 0
| 0.057569
| 0.012033
| 0
| 0
| 0
| 0
| 0.045045
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.126126
| 0.009009
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
322e21d79121fc682dbbeaf19bfb0822ed607a7a
| 4,236
|
py
|
Python
|
pru/db/geo/geo_admin.py
|
euctrl-pru/rt-python
|
da5d0040e250bd159845a0d43bf0b73eab368863
|
[
"MIT"
] | null | null | null |
pru/db/geo/geo_admin.py
|
euctrl-pru/rt-python
|
da5d0040e250bd159845a0d43bf0b73eab368863
|
[
"MIT"
] | null | null | null |
pru/db/geo/geo_admin.py
|
euctrl-pru/rt-python
|
da5d0040e250bd159845a0d43bf0b73eab368863
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
#
"""
Administration operations for the geo db.
"""
import os
import socket
import time
from pru.db.geo.geo_init import load_airspace, remove_all_sectors, tear_down
from pru.db.geo.geo_init import load_airports, remove_all_airports
from pru.db.geo.geo_init import load_user_airspace, remove_all_user_defined_sectors
from pru.db.common_init import create as create_db, DB_TYPE_GEO
from pru.db.geo.geo_init import create as create_geo_db
from pru.logger import logger
import pru.db.context as ctx
log = logger(__name__)
def remove_geo_db():
"""
Remove the db
"""
remove_all_sectors()
remove_all_airports()
remove_all_user_defined_sectors()
tear_down()
def create_geo_database():
"""
Create a geo db.
"""
log.info("Starting to create the geo db")
log.info("Waiting for the database to be ready")
log.info(f"Testing connection on host: {ctx.geo_db_hostname} and port {ctx.geo_db_port}")
# We need to sleep and retry ubtil the db wakes up
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
s.connect((ctx.geo_db_hostname, int(ctx.geo_db_port)))
s.close()
break
except socket.error as ex:
log.debug("Database not ready..")
time.sleep(5) # 5 seconds between tests
log.info("Geo database is now ready.")
if create_db(DB_TYPE_GEO):
if create_geo_db():
log.info("Geo database creation is complete.")
return True
else:
log.info("Failed to make the airspace db, could not create the tables.")
else:
log.info("Failed to make the airspace db, could not create the database.")
def initialise_airspace(sector_file_path, reset=False):
"""
Uses the provided file path to load the sectors file,
may be csv or geojson.
If no sectors file is found we return false.
Reset=True Remove all and replace with this file.
Reset=False Add these sectors to the sectors table. Note,
this is not an update.
return True if we succeeded
A tuple of (False, message) if we fail
"""
connection = ctx.get_connection(ctx.CONTEXT, ctx.DB_USER)
context = ctx.CONTEXT
if os.path.exists(sector_file_path):
if reset:
remove_all_sectors()
load_airspace(sector_file_path, context, connection)
return True
else:
return (False, "Path not found " + sector_file_path)
def initialise_airports(airports_file_path, reset=False):
"""
Uses the provided file path to load an airports file,
must be csv.
If no airports file is found we return false.
Reset=True Remove all and replace with this file.
Reset=False Add these airports to the sectors table. Note,
this is not an update.
return True if we succeeded
A tuple of (False, message) if we fail
"""
connection = ctx.get_connection(ctx.CONTEXT, ctx.DB_USER)
context = ctx.CONTEXT
if os.path.exists(airports_file_path):
if reset:
remove_all_airports()
load_airports(airports_file_path, context, connection)
return True
else:
return (False, "Path not found " + airports_file_path)
def initialise_user_airspace(user_sector_file_path, reset=False):
"""
Uses the provided file path to load the users sectors file,
may be csv or geojson.
If no sectors file is found we return false.
Reset=True Remove all and replace with this file.
Reset=False Add these sectors to the user sectors table. Note,
this is not an update.
return True if we succeeded
A tuple of (False, message) if we fail
"""
connection = ctx.get_connection(ctx.CONTEXT, ctx.DB_USER)
context = ctx.CONTEXT
if os.path.exists(user_sector_file_path):
if reset:
remove_all_user_defined_sectors()
load_user_airspace(user_sector_file_path, context, connection)
return True
else:
return (False, "Path not found " + user_sector_file_path)
| 31.377778
| 93
| 0.674929
| 619
| 4,236
| 4.449111
| 0.216478
| 0.043573
| 0.040668
| 0.017429
| 0.603486
| 0.553377
| 0.533043
| 0.502179
| 0.470588
| 0.470588
| 0
| 0.001892
| 0.25118
| 4,236
| 134
| 94
| 31.61194
| 0.86633
| 0.308074
| 0
| 0.352941
| 0
| 0
| 0.140733
| 0.007617
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073529
| false
| 0
| 0.147059
| 0
| 0.323529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
322f9af92fcd6688ac16683be314d7931fa1f2eb
| 4,040
|
py
|
Python
|
tests/test_autogeometry.py
|
fabiommendes/easymunk
|
420dfc4a006997c47887f6876876249674feb3cd
|
[
"MIT"
] | 1
|
2021-07-02T11:59:07.000Z
|
2021-07-02T11:59:07.000Z
|
tests/test_autogeometry.py
|
fabiommendes/easymunk
|
420dfc4a006997c47887f6876876249674feb3cd
|
[
"MIT"
] | null | null | null |
tests/test_autogeometry.py
|
fabiommendes/easymunk
|
420dfc4a006997c47887f6876876249674feb3cd
|
[
"MIT"
] | 1
|
2022-01-14T20:18:35.000Z
|
2022-01-14T20:18:35.000Z
|
from typing import List, Tuple
import easymunk as a
from easymunk import BB, Vec2d
class TestAutoGeometry:
def test_is_closed(self) -> None:
not_closed: List[Tuple[float, float]] = [(0, 0), (1, 1), (0, 1)]
closed: List[Tuple[float, float]] = [(0, 0), (1, 1), (0, 1), (0, 0)]
assert not a.is_closed(not_closed)
assert a.is_closed(closed)
def test_simplify_curves(self) -> None:
p1: List[Tuple[float, float]] = [(0, 0), (0, 10), (5, 11), (10, 10), (0, 10)]
expected = [(0, 0), (0, 10), (10, 10), (0, 10)]
actual = a.simplify_curves(p1, 1)
assert actual == expected
def test_simplify_vertexes(self) -> None:
p1: List[Tuple[float, float]] = [(0, 0), (0, 10), (5, 11), (10, 10), (0, 10)]
expected = [(0, 0), (0, 10), (10, 10), (0, 10)]
actual = a.simplify_vertexes(p1, 1)
assert actual == expected
def test_to_convex_hull(self) -> None:
p1: List[Tuple[float, float]] = [(0, 0), (0, 10), (5, 5), (10, 10), (10, 0)]
expected = [(0, 0), (10, 0), (10, 10), (0, 10), (0, 0)]
actual = a.to_convex_hull(p1, 1)
assert actual == expected
def test_convex_decomposition(self) -> None:
# TODO: Use a more complicated polygon as test case
p1: List[Tuple[float, float]] = [
(0, 0),
(5, 0),
(10, 10),
(20, 20),
(5, 5),
(0, 10),
(0, 0),
]
expected = [
[(5.0, 5.0), (6.25, 2.5), (20.0, 20.0), (5.0, 5.0)],
[(0.0, 0.0), (5.0, 0.0), (6.25, 2.5), (5.0, 5.0), (0.0, 10.0), (0.0, 0.0)],
]
actual = a.convex_decomposition(p1, 0.1)
actual.sort(key=len)
# TODO: The result of convex_decomposition is not stable between
# environments, so we cant have this assert here.
# assert actual == expected
def test_march_soft(self) -> None:
img = [
" xx ",
" xx ",
" xx ",
" xx ",
" xx ",
" xxxxx",
" xxxxx",
]
def sample_func(point: Tuple[float, float]) -> float:
x = int(point[0])
y = int(point[1])
if img[y][x] == "x":
return 1
return 0
pl_set = a.march_soft(BB(0, 0, 6, 6), 7, 7, 0.5, sample_func)
expected = [
[
(1.5, 6.0),
(1.5, 5.0),
(1.5, 4.0),
(1.5, 3.0),
(1.5, 2.0),
(1.5, 1.0),
(1.5, 0.0),
],
[
(3.5, 0.0),
(3.5, 1.0),
(3.5, 2.0),
(3.5, 3.0),
(3.5, 4.0),
(4.0, 4.5),
(5.0, 4.5),
(6.0, 4.5),
],
]
assert list(pl_set) == expected
def test_march_hard(self) -> None:
img = [
" xx ",
" xx ",
" xx ",
" xx ",
" xx ",
" xxxxx",
" xxxxx",
]
def sample_func(point: Tuple[float, float]) -> float:
x = int(point[0])
y = int(point[1])
if img[y][x] == "x":
return 1
return 0
actual = list(a.march_hard(BB(0, 0, 6, 6), 7, 7, 0.5, sample_func))
expected = [
[
(1.5, 6.0),
(1.5, 5.0),
(1.5, 4.0),
(1.5, 3.0),
(1.5, 2.0),
(1.5, 1.0),
(1.5, 0.0),
],
[
(3.5, 0.0),
(3.5, 1.0),
(3.5, 2.0),
(3.5, 3.0),
(3.5, 4.0),
(3.5, 4.5),
(4.0, 4.5),
(5.0, 4.5),
(6.0, 4.5),
],
]
assert actual == expected
| 28.652482
| 87
| 0.366832
| 531
| 4,040
| 2.728814
| 0.150659
| 0.048309
| 0.026915
| 0.078675
| 0.57902
| 0.545204
| 0.536922
| 0.458937
| 0.458937
| 0.458937
| 0
| 0.146297
| 0.455198
| 4,040
| 140
| 88
| 28.857143
| 0.51204
| 0.04604
| 0
| 0.630252
| 0
| 0
| 0.025981
| 0
| 0
| 0
| 0
| 0.007143
| 0.058824
| 1
| 0.07563
| false
| 0
| 0.02521
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32306c14bb390e41af15482d3244081bad57ece0
| 13,144
|
py
|
Python
|
darshan-util/pydarshan/darshan/backend/cffi_backend.py
|
gaocegege/darshan
|
2d54cd8ec96d26db23e9ca421df48d2031a4c55e
|
[
"mpich2"
] | null | null | null |
darshan-util/pydarshan/darshan/backend/cffi_backend.py
|
gaocegege/darshan
|
2d54cd8ec96d26db23e9ca421df48d2031a4c55e
|
[
"mpich2"
] | null | null | null |
darshan-util/pydarshan/darshan/backend/cffi_backend.py
|
gaocegege/darshan
|
2d54cd8ec96d26db23e9ca421df48d2031a4c55e
|
[
"mpich2"
] | null | null | null |
# -*- coding: utf-8 -*-
import cffi
import ctypes
import numpy as np
import pandas as pd
from darshan.api_def_c import load_darshan_header
from darshan.discover_darshan import find_utils
from darshan.discover_darshan import check_version
API_def_c = load_darshan_header()
ffi = cffi.FFI()
ffi.cdef(API_def_c)
libdutil = None
libdutil = find_utils(ffi, libdutil)
def log_open(filename):
"""
Opens a darshan logfile.
Args:
filename (str): Path to a darshan log file
Return:
log handle
"""
b_fname = filename.encode()
handle = libdutil.darshan_log_open(b_fname)
log = {"handle": handle, 'modules': None, 'name_records': None}
return log
def log_close(log):
"""
Closes the logfile and releases allocated memory.
"""
libdutil.darshan_log_close(log['handle'])
#modules = {}
return
def log_get_job(log):
"""
Returns a dictionary with information about the current job.
"""
job = {}
jobrec = ffi.new("struct darshan_job *")
libdutil.darshan_log_get_job(log['handle'], jobrec)
job['uid'] = jobrec[0].uid
job['start_time'] = jobrec[0].start_time
job['end_time'] = jobrec[0].end_time
job['nprocs'] = jobrec[0].nprocs
job['jobid'] = jobrec[0].jobid
mstr = ffi.string(jobrec[0].metadata).decode("utf-8")
md = {}
for kv in mstr.split('\n')[:-1]:
k,v = kv.split('=', maxsplit=1)
md[k] = v
job['metadata'] = md
return job
def log_get_exe(log):
"""
Get details about the executable (path and arguments)
Args:
log: handle returned by darshan.open
Return:
string: executeable path and arguments
"""
exestr = ffi.new("char[]", 4096)
libdutil.darshan_log_get_exe(log['handle'], exestr)
return ffi.string(exestr).decode("utf-8")
def log_get_mounts(log):
"""
Returns a list of available mounts recorded for the log.
Args:
log: handle returned by darshan.open
"""
mntlst = []
mnts = ffi.new("struct darshan_mnt_info **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_mounts(log['handle'], mnts, cnt)
for i in range(0, cnt[0]):
mntlst.append((ffi.string(mnts[0][i].mnt_path).decode("utf-8"),
ffi.string(mnts[0][i].mnt_type).decode("utf-8")))
return mntlst
def log_get_modules(log):
"""
Return a dictionary containing available modules including information
about the contents available for each module in the current log.
Args:
log: handle returned by darshan.open
Return:
dict: Modules with additional info for current log.
"""
# use cached module index if already present
if log['modules'] != None:
return log['modules']
modules = {}
mods = ffi.new("struct darshan_mod_info **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_modules(log['handle'], mods, cnt)
for i in range(0, cnt[0]):
modules[ffi.string(mods[0][i].name).decode("utf-8")] = \
{'len': mods[0][i].len, 'ver': mods[0][i].ver, 'idx': mods[0][i].idx}
# add to cache
log['modules'] = modules
return modules
def log_get_name_records(log):
"""
Return a dictionary resovling hash to string (typically a filepath).
Args:
log: handle returned by darshan.open
hash: hash-value (a number)
Return:
dict: the name records
"""
# used cached name_records if already present
if log['name_records'] != None:
return log['name_records']
name_records = {}
nrecs = ffi.new("struct darshan_name_record **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_name_records(log['handle'], nrecs, cnt)
for i in range(0, cnt[0]):
name_records[nrecs[0][i].id] = ffi.string(nrecs[0][i].name).decode("utf-8")
# add to cache
log['name_records'] = name_records
return name_records
def log_lookup_name_records(log, ids=[]):
"""
Resolve a single hash to it's name record string (typically a filepath).
Args:
log: handle returned by darshan.open
hash: hash-value (a number)
Return:
dict: the name records
"""
name_records = {}
#cids = ffi.new("darshan_record_id *") * len(ids)
whitelist = (ctypes.c_ulonglong * len(ids))(*ids)
whitelist_cnt = len(ids)
whitelistp = ffi.from_buffer(whitelist)
nrecs = ffi.new("struct darshan_name_record **")
cnt = ffi.new("int *")
libdutil.darshan_log_get_filtered_name_records(log['handle'], nrecs, cnt, ffi.cast("darshan_record_id *", whitelistp), whitelist_cnt)
for i in range(0, cnt[0]):
name_records[nrecs[0][i].id] = ffi.string(nrecs[0][i].name).decode("utf-8")
# add to cache
log['name_records'] = name_records
return name_records
def log_get_dxt_record(log, mod_name, mod_type, reads=True, writes=True, mode='dict'):
"""
Returns a dictionary holding a dxt darshan log record.
Args:
log: Handle returned by darshan.open
mod_name (str): Name of the Darshan module
mod_type (str): String containing the C type
Return:
dict: generic log record
Example:
The typical darshan log record provides two arrays, on for integer counters
and one for floating point counters:
>>> darshan.log_get_dxt_record(log, "DXT_POSIX", "struct dxt_file_record **")
{'rank': 0, 'read_count': 11, 'read_segments': array([...]), ...}
"""
modules = log_get_modules(log)
#name_records = log_get_name_records(log)
rec = {}
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules[mod_name]['idx'], buf)
if r < 1:
return None
filerec = ffi.cast(mod_type, buf)
clst = []
rec['id'] = filerec[0].base_rec.id
rec['rank'] = filerec[0].base_rec.rank
rec['hostname'] = ffi.string(filerec[0].hostname).decode("utf-8")
#rec['filename'] = name_records[rec['id']]
wcnt = filerec[0].write_count
rcnt = filerec[0].read_count
rec['write_count'] = wcnt
rec['read_count'] = rcnt
rec['write_segments'] = []
rec['read_segments'] = []
size_of = ffi.sizeof("struct dxt_file_record")
segments = ffi.cast("struct segment_info *", buf[0] + size_of )
for i in range(wcnt):
seg = {
"offset": segments[i].offset,
"length": segments[i].length,
"start_time": segments[i].start_time,
"end_time": segments[i].end_time
}
rec['write_segments'].append(seg)
for i in range(rcnt):
i = i + wcnt
seg = {
"offset": segments[i].offset,
"length": segments[i].length,
"start_time": segments[i].start_time,
"end_time": segments[i].end_time
}
rec['read_segments'].append(seg)
if mode == "pandas":
rec['read_segments'] = pd.DataFrame(rec['read_segments'])
rec['write_segments'] = pd.DataFrame(rec['write_segments'])
return rec
def log_get_generic_record(log, mod_name, mod_type, mode='numpy'):
"""
Returns a dictionary holding a generic darshan log record.
Args:
log: Handle returned by darshan.open
mod_name (str): Name of the Darshan module
mod_type (str): String containing the C type
Return:
dict: generic log record
Example:
The typical darshan log record provides two arrays, on for integer counters
and one for floating point counters:
>>> darshan.log_get_generic_record(log, "POSIX", "struct darshan_posix_file **")
{'counters': array([...], dtype=int64), 'fcounters': array([...])}
"""
modules = log_get_modules(log)
rec = {}
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules[mod_name]['idx'], buf)
if r < 1:
return None
rbuf = ffi.cast(mod_type, buf)
rec['id'] = rbuf[0].base_rec.id
rec['rank'] = rbuf[0].base_rec.rank
clst = []
for i in range(0, len(rbuf[0].counters)):
clst.append(rbuf[0].counters[i])
rec['counters'] = np.array(clst, dtype=np.int64)
cdict = dict(zip(counter_names(mod_name), rec['counters']))
flst = []
for i in range(0, len(rbuf[0].fcounters)):
flst.append(rbuf[0].fcounters[i])
rec['fcounters'] = np.array(flst, dtype=np.float64)
fcdict = dict(zip(fcounter_names(mod_name), rec['fcounters']))
if mode == "dict":
rec = {'counters': cdict, 'fcounter': fcdict}
if mode == "pandas":
rec = {
'counters': pd.DataFrame(cdict, index=[0]),
'fcounters': pd.DataFrame(fcdict, index=[0])
}
return rec
def counter_names(mod_name, fcnts=False):
"""
Returns a list of available counter names for the module.
By default only integer counter names are listed, unless fcnts is set to
true in which case only the floating point counter names are listed.
Args:
mod_name (str): Name of the module to return counter names.
fcnts (bool): Switch to request floating point counters instead of integer. (Default: False)
Return:
list: Counter names as strings.
"""
if mod_name == 'MPI-IO':
mod_name = 'MPIIO'
names = []
i = 0
if fcnts:
F = "f_"
else:
F = ""
end = "{0}_{1}NUM_INDICES".format(mod_name.upper(), F.upper())
var_name = "{0}_{1}counter_names".format(mod_name.lower(), F.lower())
while True:
try:
var = getattr(libdutil, var_name)
except:
var = None
if not var:
return None
name = ffi.string(var[i]).decode("utf-8")
if name == end:
break
names.append(name)
i += 1
return names
def fcounter_names(mod_name):
"""
Returns a list of available floating point counter names for the module.
Args:
mod_name (str): Name of the module to return counter names.
Return:
list: Available floiting point counter names as strings.
"""
return counter_names(mod_name, fcnts=True)
def log_get_bgq_record(log):
"""
Returns a darshan log record for BG/Q.
Args:
log: handle returned by darshan.open
"""
return log_get_generic_record(log, "BG/Q", "struct darshan_bgq_record **")
def log_get_hdf5_file_record(log):
"""
Returns a darshan log record for an HDF5 file.
Args:
log: handle returned by darshan.open
"""
return log_get_generic_record(log, "H5F", "struct darshan_hdf5_file **")
def log_get_hdf5_dataset_record(log):
"""
Returns a darshan log record for an HDF5 dataset.
Args:
log: handle returned by darshan.open
"""
return log_get_generic_record(log, "H5D", "struct darshan_hdf5_dataset **")
def log_get_lustre_record(log):
"""
Returns a darshan log record for Lustre.
Args:
log: handle returned by darshan.open
"""
modules = log_get_modules(log)
rec = {}
buf = ffi.new("void **")
r = libdutil.darshan_log_get_record(log['handle'], modules['LUSTRE']['idx'], buf)
if r < 1:
return None
rbuf = ffi.cast("struct darshan_lustre_record **", buf)
rec['id'] = rbuf[0].base_rec.id
rec['rank'] = rbuf[0].base_rec.rank
clst = []
for i in range(0, len(rbuf[0].counters)):
clst.append(rbuf[0].counters[i])
rec['counters'] = np.array(clst, dtype=np.int64)
cdict = dict(zip(counter_names('LUSTRE'), rec['counters']))
# FIXME
ostlst = []
for i in range(0, cdict['LUSTRE_STRIPE_WIDTH']):
print(rbuf[0].ost_ids[i])
rec['ost_ids'] = np.array(ostlst, dtype=np.int64)
print(rec['ost_ids'])
sys.exit()
if mode == "dict":
rec = {'counters': cdict, 'fcounter': fcdict}
if mode == "pandas":
rec = {
'counters': pd.DataFrame(cdict, index=[0]),
'fcounters': pd.DataFrame(fcdict, index=[0])
}
return rec
def log_get_mpiio_record(log):
"""
Returns a darshan log record for MPI-IO.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "MPI-IO", "struct darshan_mpiio_file **")
def log_get_pnetcdf_record(log):
"""
Returns a darshan log record for PnetCDF.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "PNETCDF", "struct darshan_pnetcdf_file **")
def log_get_posix_record(log):
"""
Returns a darshan log record for
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "POSIX", "struct darshan_posix_file **")
def log_get_stdio_record(log):
"""
Returns a darshan log record for STDIO.
Args:
log: handle returned by darshan.open
Returns:
dict: log record
"""
return log_get_generic_record(log, "STDIO", "struct darshan_stdio_file **")
| 24.295749
| 137
| 0.614197
| 1,785
| 13,144
| 4.371989
| 0.146218
| 0.029216
| 0.017299
| 0.040364
| 0.537032
| 0.47104
| 0.445284
| 0.440928
| 0.390441
| 0.380958
| 0
| 0.009684
| 0.253652
| 13,144
| 540
| 138
| 24.340741
| 0.785831
| 0.295268
| 0
| 0.337838
| 0
| 0
| 0.142028
| 0.002437
| 0
| 0
| 0
| 0.001852
| 0
| 1
| 0.09009
| false
| 0
| 0.031532
| 0
| 0.238739
| 0.009009
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32363a369f2abd8123a3c352cf5267f2cd8f6e3e
| 882
|
py
|
Python
|
pluggklockan.py
|
Vforsh03/Pluggklockan
|
845dbe82476ad3ecd8664b7cd99ce74311b92830
|
[
"MIT"
] | null | null | null |
pluggklockan.py
|
Vforsh03/Pluggklockan
|
845dbe82476ad3ecd8664b7cd99ce74311b92830
|
[
"MIT"
] | null | null | null |
pluggklockan.py
|
Vforsh03/Pluggklockan
|
845dbe82476ad3ecd8664b7cd99ce74311b92830
|
[
"MIT"
] | null | null | null |
import time
def countdown(time_sec, to_do):
while time_sec:
mins, secs = divmod(time_sec, 60)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
print(timeformat, end='\r')
time.sleep(1)
time_sec -= 1
if time_sec == 0:
print("Det här har du att göra: ")
for sak in to_do:
print(sak)
def main():
to_do = []
saker = int(input("Hur många saker ska du lägga till på listan?: "))
for _ in range(saker):
to_do.append(input("Sak: "))
while len(to_do) > 0:
tid = int(input("Hur många sekunder vill du tima: "))
countdown(tid, to_do)
to_do.remove(input("Vilken sak vill du ta bort? "))
print(to_do)
if len(to_do) == 0:
print("Du har inget att göra, gör vad fan du vill")
if __name__ == "__main__":
main()
| 25.941176
| 73
| 0.538549
| 127
| 882
| 3.559055
| 0.456693
| 0.079646
| 0.048673
| 0.070796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018644
| 0.331066
| 882
| 33
| 74
| 26.727273
| 0.747458
| 0
| 0
| 0
| 0
| 0
| 0.237927
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.038462
| 0
| 0.115385
| 0.192308
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32364b003eb60db5ffb76e4251c347561207ed8b
| 1,397
|
py
|
Python
|
gallery/views.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | null | null | null |
gallery/views.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | 8
|
2021-04-08T21:57:55.000Z
|
2022-03-12T00:50:38.000Z
|
gallery/views.py
|
mkbeh/Site-Nordic-Walking-
|
ba98f41db09ed448ecc4db175f65ef4fa2d64979
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import cache_page
from .models import PhotoAlbum, VideoAlbum
from blog.utils import get_pagination_page
def albums_list(request):
album_specific_data = {'photo': (PhotoAlbum, 'Фото альбомы'), 'video': (VideoAlbum, 'Видео альбомы')}
album_type = request.path.split('/')[2]
album_obj, album_type = album_specific_data.get(album_type)
albums = album_obj.objects.all().order_by('-created')
page = get_pagination_page(request, albums)
return render(
request,
'gallery/album.html',
{'albums': page.object_list, 'page': page, 'album_type': album_type}
)
@cache_page(10*60)
def album_detail(request, album_type, album_name):
album_specific_data = {'photo': (PhotoAlbum, 50), 'video': (VideoAlbum, 4)}
album_obj, num_pages = album_specific_data.get(album_type)
obj = get_object_or_404(album_obj, name=album_name)
if album_type == 'photo':
files = obj.images_set.all()
template = 'gallery/photo_detail.html'
else:
files = obj.videos_set.all()
template = 'gallery/video_detail.html'
page = get_pagination_page(request, files, num_pages)
return render(
request,
template,
{'album_name': album_name, 'files': page.object_list, 'page': page, 'total_files': len(files)}
)
| 32.488372
| 105
| 0.689334
| 182
| 1,397
| 5.016484
| 0.335165
| 0.078861
| 0.07448
| 0.030668
| 0.243154
| 0.063527
| 0
| 0
| 0
| 0
| 0
| 0.012324
| 0.186829
| 1,397
| 42
| 106
| 33.261905
| 0.791373
| 0
| 0
| 0.125
| 0
| 0
| 0.1267
| 0.035791
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3236d1e8e71e93e12b492398d92736947474b9fb
| 2,134
|
py
|
Python
|
test/test_post.py
|
enjoy233/zhihu-py3
|
bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc
|
[
"MIT"
] | 1,321
|
2015-02-16T13:19:42.000Z
|
2022-03-25T15:03:58.000Z
|
test/test_post.py
|
fru1tw4ter/zhihu-py3
|
bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc
|
[
"MIT"
] | 64
|
2015-07-03T12:30:08.000Z
|
2022-03-01T00:55:50.000Z
|
test/test_post.py
|
fru1tw4ter/zhihu-py3
|
bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc
|
[
"MIT"
] | 551
|
2015-02-22T11:21:40.000Z
|
2022-03-25T13:22:13.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
import unittest
import os
import json
from zhihu import Post
from test_utils import TEST_DATA_PATH
class ColumnTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
url = 'http://zhuanlan.zhihu.com/xiepanda/20202275'
post_path = os.path.join(TEST_DATA_PATH, 'column_post.json')
with open(post_path, 'r') as f:
post_json = json.load(f)
post_saved_path = os.path.join(TEST_DATA_PATH, 'post.md')
with open(post_saved_path, 'rb') as f:
cls.post_saved = f.read()
cls.post = Post(url)
cls.post.soup = post_json
cls.expected = {'column_in_name': 'xiepanda', 'slug': 20202275,
'column_name': '谢熊猫出没注意',
'author_name': '谢熊猫君', 'author_id': 'xiepanda',
'title': '为了做一个称职的吃货,他决定连着吃一百天转基因食物',
'upvote_num': 963, 'comment_num': 199}
def test_column_in_name(self):
self.assertEqual(self.expected['column_in_name'],
self.post.column_in_name)
def test_slug(self):
self.assertEqual(self.expected['slug'], self.post.slug)
def test_author(self):
self.assertEqual(self.expected['author_name'], self.post.author.name)
self.assertEqual(self.expected['author_id'], self.post.author.id)
def test_title(self):
self.assertEqual(self.expected['title'], self.post.title)
def test_upvote_num(self):
self.assertEqual(self.expected['upvote_num'], self.post.upvote_num)
def test_comment_num(self):
self.assertEqual(self.expected['comment_num'], self.post.comment_num)
def test_save(self):
save_name = 'post_save'
self.post.save(filepath=TEST_DATA_PATH, filename=save_name)
post_saved_path = os.path.join(TEST_DATA_PATH, save_name + '.md')
with open(post_saved_path, 'rb') as f:
post_saved = f.read()
os.remove(post_saved_path)
self.assertEqual(self.post_saved, post_saved)
| 34.419355
| 77
| 0.638238
| 279
| 2,134
| 4.637993
| 0.258065
| 0.062597
| 0.117465
| 0.146059
| 0.295981
| 0.170015
| 0.117465
| 0.097372
| 0.097372
| 0
| 0
| 0.01476
| 0.238051
| 2,134
| 61
| 78
| 34.983607
| 0.781058
| 0.02015
| 0
| 0.044444
| 0
| 0
| 0.131163
| 0.011967
| 0
| 0
| 0
| 0
| 0.177778
| 1
| 0.177778
| false
| 0
| 0.133333
| 0
| 0.333333
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
323ae527f5aea6328f8ca830f729b3e6114a8c51
| 503
|
py
|
Python
|
algorithm implement (python)/mergesort.py
|
yedkk/algorithm-design
|
433b70e8302ec91b74542e9144dd93fdb5b0f8d3
|
[
"MIT"
] | 2
|
2021-06-01T02:31:06.000Z
|
2021-06-01T02:39:45.000Z
|
algorithm implement (python)/mergesort.py
|
yedkk/algorithm-design
|
433b70e8302ec91b74542e9144dd93fdb5b0f8d3
|
[
"MIT"
] | null | null | null |
algorithm implement (python)/mergesort.py
|
yedkk/algorithm-design
|
433b70e8302ec91b74542e9144dd93fdb5b0f8d3
|
[
"MIT"
] | null | null | null |
def getArray():
line = input()
line = line.strip().split(' ')[1:]
s = []
for x in line:
s.append(int(x))
return s
def merge(s1, s2):
n1 = len(s1)
n2 = len(s2)
p1 = 0
p2 = 0
s = []
while(p1 < n1 or p2 < n2):
if(p1 < n1 and (p2 >= n2 or s1[p1] < s2[p2])):
s.append(s1[p1])
p1 += 1
else:
s.append(s2[p2])
p2 += 1
return s
def output(s):
print (len(s), end = ' ')
print (' '.join(map(str, s)), end = '')
s1 = getArray()
s2 = getArray()
s = merge(s1, s2)
output(s)
| 13.236842
| 48
| 0.508946
| 90
| 503
| 2.844444
| 0.377778
| 0.082031
| 0.078125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095109
| 0.26839
| 503
| 37
| 49
| 13.594595
| 0.600543
| 0
| 0
| 0.142857
| 0
| 0
| 0.005964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0
| 0
| 0.178571
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
323b7d2cb5ec3fee745d90ccfecbe50bdd67fcc2
| 1,276
|
py
|
Python
|
src/CSVtoJSON.py
|
CloudSevenConsulting/DustyDynamo
|
335e9a2efc71ccf42cf9dfc7c13fcf62cd5d9453
|
[
"MIT"
] | null | null | null |
src/CSVtoJSON.py
|
CloudSevenConsulting/DustyDynamo
|
335e9a2efc71ccf42cf9dfc7c13fcf62cd5d9453
|
[
"MIT"
] | null | null | null |
src/CSVtoJSON.py
|
CloudSevenConsulting/DustyDynamo
|
335e9a2efc71ccf42cf9dfc7c13fcf62cd5d9453
|
[
"MIT"
] | null | null | null |
import csv
import json
from pprint import pprint
import os
stockData = ['RIO']
for i in range(0,len(stockData)):
csvfile = open(stockData[i]+'.csv', 'r')
fieldnames = ("NetworkTime","StockID","Open","High", "Low", "Close", "Adj Close", "Volume")
reader = csv.DictReader( csvfile, fieldnames)
data = open(stockData[i]+'.json', 'w')
data.write('[\n')
for row in reader:
data.write('{ \n' \
+ '"MoteTimestamp": "%s",' %row['NetworkTime'] \
+ '\n"MoteID": %s,' %row['StockID'] \
+ '\n "StockData":{' \
+ '\n "OpenPrice": %s,' %row['Open'] \
+ '\n "HighPrice": %s,' %row['High'] \
+ '\n "LowPrice": %s,' %row['Low'] \
+ '\n "ClosePrice": %s,' %row['Close'] \
+ '\n "Adj Close": %s,' %row['Adj Close'] \
+ '\n "VolumeNumber": %s' %row['Volume'] \
+ '\n }' \
+ '\n},\n'
)
data.close()
with open(stockData[i]+'.json', 'rb+') as filehandle:
filehandle.seek(-3, os.SEEK_END)
filehandle.truncate()
filehandle.close()
with open(stockData[i]+'.json', 'a') as filehandle:
filehandle.write("\n]")
| 29.674419
| 95
| 0.462382
| 133
| 1,276
| 4.428571
| 0.368421
| 0.054329
| 0.095076
| 0.091681
| 0.091681
| 0.091681
| 0
| 0
| 0
| 0
| 0
| 0.002323
| 0.325235
| 1,276
| 42
| 96
| 30.380952
| 0.681765
| 0
| 0
| 0
| 0
| 0
| 0.259812
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
323d0642bd0b2e71b6ea4028021ab212c0e0889f
| 700
|
py
|
Python
|
core/api.py
|
rastos/Mi-Fit-and-Zepp-workout-exporter
|
e05dd7321b71dff6a4e2f4794d0d66d4eee2cbfa
|
[
"MIT"
] | 13
|
2021-04-13T14:27:58.000Z
|
2022-02-09T18:32:37.000Z
|
core/api.py
|
rastos/Mi-Fit-and-Zepp-workout-exporter
|
e05dd7321b71dff6a4e2f4794d0d66d4eee2cbfa
|
[
"MIT"
] | 3
|
2021-06-03T20:27:34.000Z
|
2021-06-04T06:24:18.000Z
|
core/api.py
|
rastos/Mi-Fit-and-Zepp-workout-exporter
|
e05dd7321b71dff6a4e2f4794d0d66d4eee2cbfa
|
[
"MIT"
] | 2
|
2021-06-03T20:29:54.000Z
|
2021-08-13T22:28:59.000Z
|
import requests
class Api:
def __init__(self, token):
self.token = token
def get_history(self):
r = requests.get('https://api-mifit-de2.huami.com/v1/sport/run/history.json', headers={
'apptoken': self.token
}, params={
'source': 'run.mifit.huami.com',
})
r.raise_for_status()
return r.json()
def get_detail(self, track_id, source):
r = requests.get('https://api-mifit-de2.huami.com/v1/sport/run/detail.json', headers={
'apptoken': self.token
}, params={
'trackid': track_id,
'source': source,
})
r.raise_for_status()
return r.json()
| 24.137931
| 95
| 0.547143
| 84
| 700
| 4.416667
| 0.380952
| 0.097035
| 0.06469
| 0.091644
| 0.571429
| 0.571429
| 0.38814
| 0.247978
| 0.247978
| 0.247978
| 0
| 0.008214
| 0.304286
| 700
| 28
| 96
| 25
| 0.753593
| 0
| 0
| 0.47619
| 0
| 0.095238
| 0.238571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.047619
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
323de0cd069365ae5cc57c4534ae993e3a17cc39
| 7,616
|
py
|
Python
|
Server/Python/tests/dbsserver_t/unittests/web_t/DBSMigrateModel_t.py
|
vkuznet/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 8
|
2015-08-14T04:01:32.000Z
|
2021-06-03T00:56:42.000Z
|
Server/Python/tests/dbsserver_t/unittests/web_t/DBSMigrateModel_t.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 162
|
2015-01-07T21:34:47.000Z
|
2021-10-13T09:42:41.000Z
|
Server/Python/tests/dbsserver_t/unittests/web_t/DBSMigrateModel_t.py
|
yuyiguo/DBS
|
14df8bbe8ee8f874fe423399b18afef911fe78c7
|
[
"Apache-2.0"
] | 16
|
2015-01-22T15:27:29.000Z
|
2021-04-28T09:23:28.000Z
|
#!/usr/bin/env python
"""
DBS 3 Migrate REST model unittests
The DBS3 Migration Service must be stopped before executing the unittest. In addition, take care
that no instance is running on the same DB. Else the single unittests can happen to fail due to
race conditions with DBS3 Migration Service.
"""
from dbsserver_t.utils.DBSRestApi import DBSRestApi
from dbsserver_t.utils.DBSDataProvider import DBSBlockDataProvider, create_child_data_provider
from dbsserver_t.utils.TestTools import expectedFailure
from itertools import chain
import os
import socket
import unittest
class DBSMigrateModel_t(unittest.TestCase):
_data_provider = None
_saved_data = {}
def __init__(self, methodName='runTest'):
super(DBSMigrateModel_t, self).__init__(methodName)
if not self._data_provider:
self.setUpClass()
@classmethod
def setUpClass(cls):
cls._data_provider = DBSBlockDataProvider(num_of_blocks=1, num_of_files=10, num_of_runs=10, num_of_lumis=10)
### According to https://svnweb.cern.ch/trac/CMSDMWM/ticket/4068, blocks and dataset migration should use
### separate input data. _independent(_child)_data_provider will provide them.
cls._independent_data_provider = DBSBlockDataProvider(num_of_blocks=5, num_of_files=10, num_of_runs=10,
num_of_lumis=10)
cls._parent_data_provider = DBSBlockDataProvider(num_of_blocks=1, num_of_files=10,
num_of_runs=10, num_of_lumis=10)
cls._child_data_provider = create_child_data_provider(cls._parent_data_provider)
cls._independent_child_data_provider = create_child_data_provider(cls._independent_data_provider)
config = os.environ['DBS_TEST_CONFIG']
service = os.environ.get("DBS_TEST_SERVICE", "DBSMigrate")
#Use one specific database cms_dbs3_local3@int2r for migration unittests when migration_test=True
cls._migrate_api = DBSRestApi(config, service, migration_test=True)
cls._migration_url = 'https://%s/dbs/dev/global/DBSWriter' % (socket.getfqdn())
cls._writer_api = DBSRestApi(config, 'DBSWriter')
def setUp(self):
pass
@expectedFailure
def test_01_migration_removal(self):
"""test01: Clean-up old migration requests. Test to remove migration requests between different DBS instances\n"""
for status in sorted(self._migrate_api.list('status'), key=lambda status: status['migration_request_id']):
data = {'migration_rqst_id': status['migration_request_id']}
if status['migration_status'] in (0, 3, 9) and status['create_by'] == os.getlogin():
self._migrate_api.insert('remove', data)
else:
self.assertRaises(Exception, self._migrate_api.insert, 'remove', data)
def test_02_migration_request(self):
"""test02: Negative test to request a migration between different DBS instances before injecting data.
This is a negative test because the block was not inserted into the source DB.\n"""
for block_name in (block['block']['block_name'] for block in self._child_data_provider.block_dump()):
toMigrate = {'migration_url' : self._migration_url,
'migration_input' : block_name}
self.assertRaises(Exception, self._migrate_api.insert, 'submit', toMigrate)
def test_03_insert_data_to_migrate(self):
"""test03: Insert data to migrate into source DBS instance. This is has to be done for the next several tests.\n"""
for block in chain(self._data_provider.block_dump(),
self._independent_data_provider.block_dump(),
self._parent_data_provider.block_dump(),
self._child_data_provider.block_dump(),
self._independent_child_data_provider.block_dump()):
self._writer_api.insert('bulkblocks', block)
def test_04_migration_request(self):
"""test04: Test to request a migration between different DBS instances by block.\n"""
for block_name in (block['block']['block_name'] for block in self._child_data_provider.block_dump()):
toMigrate = {'migration_url' : self._migration_url,
'migration_input' : block_name}
result = self._migrate_api.insert('submit', toMigrate)
self._saved_data.setdefault('migration_rqst_ids', []).append(result['migration_details']['migration_request_id'])
self._saved_data.setdefault('migration_inputs', []).append(block_name)
def test_05_migration_request(self):
"""test05: Test to request a migration between different DBS instances by dataset.\n"""
datasets = set((block['dataset']['dataset']
for block in chain(self._child_data_provider.block_dump(),
self._independent_child_data_provider.block_dump())))
for dataset in datasets:
toMigrate = {'migration_url' : self._migration_url,
'migration_input' : dataset}
result = self._migrate_api.insert('submit', toMigrate)
self._saved_data.setdefault('migration_rqst_ids', []).append(result['migration_details']['migration_request_id'])
def test_06_migration_status(self):
"""test06: Test to check the status of an ongoing migration between different DBS instances by id. \n"""
status = self._migrate_api.list('status')
self.assertTrue(isinstance(status, list))
for migration_rqst_id in self._saved_data['migration_rqst_ids']:
status = self._migrate_api.list('status', migration_rqst_id)
self.assertEqual(len(status), 1)
def test_07_migration_status(self):
"""test07: Test to check the status of an ongoing migration between different DBS instances by block. \n"""
for migration_input in self._saved_data['migration_inputs']:
status = self._migrate_api.list('status', block_name=migration_input)
self.assertEqual(len(status), 1)
def test_08_migration_status(self):
"""test08: Test to check the status of an ongoing migration between different DBS instances by dataset. \n"""
datasets = set((block_name.split('#', 1)[0] for block_name in self._saved_data['migration_inputs']))
for dataset in datasets:
status = self._migrate_api.list('status', dataset=dataset)
self.assertTrue(len(status)>=1)
def test_09_migration_removal(self):
"test09: Test to remove a pending migration request between different DBS instances. \n"
for migration_rqst_id in self._saved_data['migration_rqst_ids']:
data = {'migration_rqst_id': migration_rqst_id}
self._migrate_api.insert('remove', data)
def test_99_save_data_to_disk(self):
"""test99: Save data to disk to re-use data for migration server unittests. \n"""
self._data_provider.save('migration_unittest_data.pkl')
self._independent_data_provider.save('migration_unittest_independent_data.pkl')
self._parent_data_provider.save('migration_unittest_parent_data.pkl')
self._independent_child_data_provider.save('migration_unittest_independent_child_data.pkl')
self._child_data_provider.save('migration_unittest_child_data.pkl')
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(DBSMigrateModel_t)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| 55.591241
| 125
| 0.689207
| 950
| 7,616
| 5.211579
| 0.235789
| 0.065441
| 0.048071
| 0.038174
| 0.498283
| 0.45102
| 0.35003
| 0.322965
| 0.280347
| 0.267219
| 0
| 0.013414
| 0.216912
| 7,616
| 136
| 126
| 56
| 0.816734
| 0.207064
| 0
| 0.2
| 0
| 0
| 0.142833
| 0.029495
| 0
| 0
| 0
| 0
| 0.063158
| 1
| 0.136842
| false
| 0.010526
| 0.073684
| 0
| 0.242105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
323e018247ff04ecd6fd2937c2a4145cd45afc55
| 844
|
py
|
Python
|
setup.py
|
sgang007/audio_chat_client
|
e2c1caf6ec1a781be0d22f516e55434099514da1
|
[
"MIT"
] | null | null | null |
setup.py
|
sgang007/audio_chat_client
|
e2c1caf6ec1a781be0d22f516e55434099514da1
|
[
"MIT"
] | null | null | null |
setup.py
|
sgang007/audio_chat_client
|
e2c1caf6ec1a781be0d22f516e55434099514da1
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
# from distutils.core import setup
# import py2exe
# import sys
import os
del os.link
# sys.setrecursionlimit(5000)
with open('requirements.txt') as f:
required = f.read().splitlines()
def readme():
with open('README.md') as f:
return f.read()
setup(name='varta-chat',
version='1.0',
description='Audio Chat framework',
long_description=readme(),
url='https://github.com/sgang007/audio_chat_client',
author='Shubhojyoti Ganguly',
author_email='shubho.important@gmail.com',
license='MIT',
packages=find_packages(),
install_requires=required,
entry_points={
'console_scripts': [
'varta = client.__main__:key_listener',
]
},
include_package_data=True,
zip_safe=True)
| 23.444444
| 58
| 0.64455
| 99
| 844
| 5.323232
| 0.686869
| 0.041746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015432
| 0.232227
| 844
| 35
| 59
| 24.114286
| 0.79784
| 0.100711
| 0
| 0
| 0
| 0
| 0.267905
| 0.071618
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
323e28eb5aa06c996913613c2bfc7c17a0e85d7c
| 2,334
|
py
|
Python
|
kglib/tests/end_to_end/kgcn/diagnosis_debug.py
|
graknlabs/research
|
ae3ee07106739efd10f0627058210038ab5956d3
|
[
"Apache-2.0"
] | 13
|
2018-09-25T13:29:08.000Z
|
2018-12-10T11:04:38.000Z
|
kglib/tests/end_to_end/kgcn/diagnosis_debug.py
|
graknlabs/research
|
ae3ee07106739efd10f0627058210038ab5956d3
|
[
"Apache-2.0"
] | 23
|
2018-09-17T20:31:44.000Z
|
2018-12-14T11:21:52.000Z
|
kglib/tests/end_to_end/kgcn/diagnosis_debug.py
|
graknlabs/research
|
ae3ee07106739efd10f0627058210038ab5956d3
|
[
"Apache-2.0"
] | 1
|
2018-09-25T15:56:32.000Z
|
2018-09-25T15:56:32.000Z
|
#
# Copyright (C) 2021 Vaticle
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import sys
import unittest
from kglib.kgcn_tensorflow.examples.diagnosis.diagnosis import diagnosis_example
class TestDiagnosisExampleDebug(unittest.TestCase):
"""
A copy of the end-to-end test for local debugging. Requires a TypeDB server to be started in the background
manually. Run with:
bazel test //kglib/tests/end_to_end:diagnosis --test_output=streamed --spawn_strategy=standalone --action_env=PATH --test_arg=--<path/to/your/typedb/directory>
"""
def setUp(self):
self._typedb_binary_location = sys.argv.pop()
base_dir = os.getenv("TEST_SRCDIR") + "/" + os.getenv("TEST_WORKSPACE")
self._data_file_location = base_dir + sys.argv.pop()
self._schema_file_location = base_dir + sys.argv.pop()
def test_learning_is_done(self):
solveds_tr, solveds_ge = diagnosis_example(self._typedb_binary_location,
schema_file_path=self._schema_file_location,
seed_data_file_path=self._data_file_location)
self.assertGreaterEqual(solveds_tr[-1], 0.7)
self.assertLessEqual(solveds_tr[-1], 0.99)
self.assertGreaterEqual(solveds_ge[-1], 0.7)
self.assertLessEqual(solveds_ge[-1], 0.99)
if __name__ == "__main__":
# This handles the fact that additional arguments that are supplied by our py_test definition
# https://stackoverflow.com/a/38012249
unittest.main(argv=['ignored-arg'])
| 42.436364
| 163
| 0.707798
| 317
| 2,334
| 5.037855
| 0.492114
| 0.03757
| 0.018785
| 0.020038
| 0.072636
| 0.072636
| 0.036318
| 0
| 0
| 0
| 0
| 0.01619
| 0.206084
| 2,334
| 54
| 164
| 43.222222
| 0.845656
| 0.51928
| 0
| 0
| 0
| 0
| 0.041783
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
324140adbf8ce6a27b7f51c371562021ff506dae
| 1,668
|
py
|
Python
|
python/math_utils.py
|
PROrock/codin-game-puzzles
|
a0444719f9a629fc97b1da6f175ecd462a9ff59b
|
[
"MIT"
] | 1
|
2021-06-16T02:33:57.000Z
|
2021-06-16T02:33:57.000Z
|
python/math_utils.py
|
PROrock/codin-game-puzzles
|
a0444719f9a629fc97b1da6f175ecd462a9ff59b
|
[
"MIT"
] | null | null | null |
python/math_utils.py
|
PROrock/codin-game-puzzles
|
a0444719f9a629fc97b1da6f175ecd462a9ff59b
|
[
"MIT"
] | null | null | null |
def signum(x):
if x > 0: return 1
if x < 0: return -1
return 0
# copy of Python 3.5 implementation - probably not needed
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def gcd(a, b):
"""Greatest common divisor"""
return _gcd_internal(abs(a), abs(b))
def _gcd_internal(a, b):
"""Greatest common divisor internal"""
# Impl. notes: Euler algorithm, both a and b are not negative
# There exists faster algorithm (which uses division by 2, which is faster)
# -> Stein's algorithm https://en.wikipedia.org/wiki/Binary_GCD_algorithm
# print a, b
if a == b:
return a
if b == 1:
return 1
if a == 0 or b == 0:
return max(a, b)
return gcd(b, a % b)
def combinations_generator(n, k):
"""Generates all combinations of list of length n with k ones (lexicographically sorted).
Storing only one indices and creating the combination list might be more performant.
"""
combination = [1 if i >= n - k else 0 for i in xrange(n)]
while True:
yield combination
combination = copy(combination)
# get first one with zero before it
one_indices = [idx for idx, value in enumerate(combination) if value]
for one_idx_idx, one_idx in enumerate(one_indices):
combination[one_idx] = 0
if one_idx > 0 and one_idx - 1 != one_indices[one_idx_idx - 1]:
for i in xrange(one_idx_idx + 1):
combination[one_idx - i - 1] = 1
break
else:
# all combinations generated, breaking
break
| 32.076923
| 93
| 0.607914
| 253
| 1,668
| 3.909091
| 0.391304
| 0.016178
| 0.0273
| 0.020222
| 0.068756
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02209
| 0.294365
| 1,668
| 51
| 94
| 32.705882
| 0.818182
| 0.343525
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0.033333
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3247a207cdb1e57a605f9bb8949d6c37632fda73
| 3,707
|
py
|
Python
|
pymt/grids/map.py
|
mwtoews/pymt
|
81a8469b0d0d115d21186ec1d1c9575690d51850
|
[
"MIT"
] | null | null | null |
pymt/grids/map.py
|
mwtoews/pymt
|
81a8469b0d0d115d21186ec1d1c9575690d51850
|
[
"MIT"
] | null | null | null |
pymt/grids/map.py
|
mwtoews/pymt
|
81a8469b0d0d115d21186ec1d1c9575690d51850
|
[
"MIT"
] | null | null | null |
#! /bin/env python
"""
Examples
========
**Rectilinear**
Create a rectilinear grid that is 2x3::
(0) --- (1) --- (2)
| | |
| | |
| [0] | [1] |
| | |
| | |
(3) --- (4) --- (5)
Numbers in parens are node IDs, and numbers in square brackets are
cell IDs.
>>> g = RectilinearMap ([0, 2], [0, 1, 2])
>>> g.get_x ()
array([ 0., 1., 2., 0., 1., 2.])
>>> g.get_y ()
array([ 0., 0., 0., 2., 2., 2.])
Node 1 is shared by both cell 0, and 1; node 5 only is part of cell 1.
>>> g.get_shared_cells (1)
[0, 1]
>>> g.get_shared_cells (5)
[1]
Point (.5, 1.) is contained only within cell 0.
>>> g.is_in_cell (.5, 1., 0)
True
>>> g.is_in_cell (.5, 1., 1)
False
Point (1., 1.) is on a border and so is contained by both cells.
>>> g.is_in_cell (1, 1., 0)
True
>>> g.is_in_cell (1, 1., 1)
True
"""
from shapely.geometry import Point, asLineString, asPoint, asPolygon
from pymt.grids import (
Rectilinear,
Structured,
UniformRectilinear,
Unstructured,
UnstructuredPoints,
)
class UnstructuredMap(Unstructured):
name = "Unstructured"
def __init__(self, *args, **kwargs):
super(UnstructuredMap, self).__init__(*args, **kwargs)
self._point = {}
last_offset = 0
for (cell_id, offset) in enumerate(self._offset):
cell = self._connectivity[last_offset:offset]
last_offset = offset
for point_id in cell:
try:
self._point[point_id].append(cell_id)
except KeyError:
self._point[point_id] = [cell_id]
(point_x, point_y) = (self.get_x(), self.get_y())
self._polys = []
last_offset = 0
for (cell_id, offset) in enumerate(self._offset):
cell = self._connectivity[last_offset:offset]
last_offset = offset
(x, y) = (point_x.take(cell), point_y.take(cell))
if len(x) > 2:
self._polys.append(asPolygon(zip(x, y)))
elif len(x) == 2:
self._polys.append(asLineString(zip(x, y)))
else:
self._polys.append(asPoint(zip(x, y)))
def get_shared_cells(self, point_id):
"""
Parameters
----------
point_id: int
ID of a point in the grid.
Returns
-------
ndarray of int
Indices to cells that share a given node.
"""
return self._point[point_id]
def is_in_cell(self, x, y, cell_id):
"""Check if a point is in a cell.
Parameters
----------
x: float
x-coordinate of point to check.
y: float
y-coordinate of point to check.
cell_id: int
ID of the cell in the grid.
Returns
-------
bool
True if the point (x, y) is contained in the cell.
"""
pt = Point((x, y))
return self._polys[cell_id].contains(pt) or self._polys[cell_id].touches(pt)
class UnstructuredPointsMap(UnstructuredPoints):
name = "UnstructuredPoints"
def get_shared_cells(self, point_id): # pylint: disable=no-self-use
return []
def is_in_cell(self, x, y, cell_id): # pylint: disable=no-self-use
return False
class StructuredMap(Structured, UnstructuredMap):
name = "Structured"
class RectilinearMap(Rectilinear, UnstructuredMap):
name = "Rectilinear"
class UniformRectilinearMap(UniformRectilinear, UnstructuredMap):
name = "UniformRectilinear"
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| 23.916129
| 84
| 0.555705
| 468
| 3,707
| 4.228632
| 0.252137
| 0.027287
| 0.024255
| 0.018191
| 0.279434
| 0.235472
| 0.195048
| 0.123295
| 0.123295
| 0.100051
| 0
| 0.022683
| 0.310224
| 3,707
| 154
| 85
| 24.071429
| 0.751271
| 0.362018
| 0
| 0.218182
| 0
| 0
| 0.035566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.054545
| 0.036364
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3247e08ee12a7d9414679491f0b3e5ad060be2e8
| 27,447
|
py
|
Python
|
jogo.py
|
AdamastorLinsFrancaNetto/jogo-academic-journey
|
ad312d255abe23e243ba39732e972cf45f092b08
|
[
"MIT"
] | null | null | null |
jogo.py
|
AdamastorLinsFrancaNetto/jogo-academic-journey
|
ad312d255abe23e243ba39732e972cf45f092b08
|
[
"MIT"
] | null | null | null |
jogo.py
|
AdamastorLinsFrancaNetto/jogo-academic-journey
|
ad312d255abe23e243ba39732e972cf45f092b08
|
[
"MIT"
] | null | null | null |
import pygame
from conteudo import Conteudo, Nave, Tiro
import random
class Jogo:
def __init__(self):
self.fundo1 = Conteudo("arquivos/espaco.png", 0, 0)
self.fundo2 = Conteudo("arquivos/espaco.png", 0, -960)
self.nave = Nave("arquivos/nave1.png",630,750)
self.comando = Conteudo("arquivos/comando1.png", 40, 960)
self.comandoo = Conteudo("arquivos/comandoo1.png", 40, 960)
self.comandooo = Conteudo("arquivos/comandooo1.png", 40, 900)
self.comandoooo = Conteudo("arquivos/comandoooo1.png", 40, 960)
self.dialogo1 = Conteudo("arquivos/dialogo1.png", 330, 120)
self.dialogo2 = Conteudo("arquivos/dialogo4.png", 330, 120)
self.dialogo3 = Conteudo("arquivos/dialogo6.png", 330, 120)
self.dialogo4 = Conteudo("arquivos/dialogo8.png", 330, 120)
self.dialogo5 = Conteudo("arquivos/dialogo9.png", 330, 120)
self.armadura5 = Conteudo("arquivos/armadura5.png", 10, 10)
self.armadura4 = Conteudo("arquivos/armadura4.png", 10, 10)
self.armadura3 = Conteudo("arquivos/armadura3.png", 10, 10)
self.armadura2 = Conteudo("arquivos/armadura2.png", 10, 10)
self.armadura1 = Conteudo("arquivos/armadura1.png", 10, 10)
self.discernimento0 = Conteudo("arquivos/disc0.png", 820, 10)
self.discernimento1 = Conteudo("arquivos/disc1.png", 820, 10)
self.discernimento2 = Conteudo("arquivos/disc2.png", 820, 10)
self.discernimento3 = Conteudo("arquivos/disc3.png", 820, 10)
self.discernimento4 = Conteudo("arquivos/disc4.png", 820, 10)
self.discernimento5 = Conteudo("arquivos/disc5.png", 820, 10)
self.planetainimigo = Conteudo("arquivos/pr1.png", 910, -320)
self.planetaaliado = Conteudo("arquivos/pb1.png", 10, -600)
self.resiliencia0 = Conteudo("arquivos/resili0.png", 824, 56)
self.resiliencia1 = Conteudo("arquivos/resili1.png", 824, 56)
self.resiliencia2 = Conteudo("arquivos/resili2.png", 824, 56)
self.resiliencia3 = Conteudo("arquivos/resili3.png", 824, 56)
self.resiliencia4 = Conteudo("arquivos/resili4.png", 824, 56)
self.resiliencia5 = Conteudo("arquivos/resili5.png", 824, 56)
self.condecoracoes = Conteudo("arquivos/condecoracoes.png", 1010, 755)
self.condecoracao1 = Conteudo("arquivos/condecoracao1.png", 1010, 790)
self.condecoracao2 = Conteudo("arquivos/condecoracao2.png", 1100, 790)
self.condecoracao3 = Conteudo("arquivos/condecoracao3.png", 1190, 790)
self.destreza0 = Conteudo("arquivos/destreza0.png", 821, 104)
self.destreza1 = Conteudo("arquivos/destreza1.png", 821, 104)
self.destreza2 = Conteudo("arquivos/destreza2.png", 821, 104)
self.destreza3 = Conteudo("arquivos/destreza3.png", 821, 104)
self.destreza4 = Conteudo("arquivos/destreza4.png", 821, 104)
self.destreza5 = Conteudo("arquivos/destreza5.png", 821, 104)
self.gggg = Conteudo("arquivos/gggg1.png", 1000, -230)
self.ggg = Conteudo("arquivos/ggg1.png", 700, -180)
self.gg = Conteudo("arquivos/gg1.png", 400, -130)
self.g = Conteudo("arquivos/g1.png", 100, -100)
self.r = Conteudo("arquivos/r.png", 600, -50)
self.tiro = Tiro("arquivos/x1.png", -100,-100)
self.aste1 = Conteudo("arquivos/aste1.png", 840, -50)
self.aste2 = Conteudo("arquivos/aste2.png", 640, -120)
self.aste3 = Conteudo("arquivos/aste3.png", 440, -190)
self.aste11 = Conteudo("arquivos/aste11.png", 840, -50)
self.aste22 = Conteudo("arquivos/aste22.png", 640, -120)
self.aste33 = Conteudo("arquivos/aste33.png", 440, -190)
self.p1 = Conteudo("arquivos/p1.png", 0, -40)
self.p2 = Conteudo("arquivos/p2.png", 427, -40)
self.p3 = Conteudo("arquivos/p3.png", 854, -40)
self.i1 = Conteudo("arquivos/i1.png", 0, -40)
self.i2 = Conteudo("arquivos/i2.png", 427, -40)
self.i3 = Conteudo("arquivos/i3.png", 854, -40)
self.w1 = Conteudo("arquivos/w1.png", 0, -40)
self.w2 = Conteudo("arquivos/w2.png", 427, -40)
self.w3 = Conteudo("arquivos/w3.png", 854, -40)
self.f1 = Conteudo("arquivos/f1.png", 0, -40)
self.f2 = Conteudo("arquivos/f2.png", 427, -40)
self.f3 = Conteudo("arquivos/f3.png", 854, -40)
self.d1 = Conteudo("arquivos/d1.png", 0, -40)
self.d2 = Conteudo("arquivos/d2.png", 427, -40)
self.d3 = Conteudo("arquivos/d3.png", 854, -40)
self.fim = Conteudo("arquivos/fim.png", 0, 0)
self.boleana_dialogo = False
self.mudar_cena = False
self.foi = False
self.contagem_resili = 0
self.contagem_destre = 0
self.contagem_dialogo1 = 1
self.contagem_paliados = 1
self.contagem_pinimigos = 1
self.inicio_asteroides = 0
self.contagem_gggg = 1
self.contagem_ggg = 1
self.contagem_gg = 1
self.contagem_g = 1
self.contagem_r = 1
self.contagem_barreira = 0
self.inicio = 0
self.inicio1 = 0
self.final = 0
def draw(self, tela):
self.fundo1.draw(tela)
self.fundo2.draw(tela)
self.tiro.draw(tela)
self.nave.draw(tela)
self.comando.draw(tela)
if self.nave.contagem_discernimento == 5 and self.contagem_paliados == 6:
self.comandoo.draw(tela)
if self.nave.contagem_resiliencia == 5 and self.contagem_gggg == 6:
self.comandooo.draw(tela)
if self.inicio == 1:
self.comandoooo.draw(tela)
if self.comandoooo.personagens.rect[1] == 370:
self.dialogo4.draw(tela)
if self.inicio1 == 1:
self.condecoracao3.draw(tela)
if self.contagem_dialogo1 == 9:
self.dialogo5.draw(tela)
if self.final == 1:
self.fim.draw(tela)
self.armadura5.draw(tela)
self.armadura4.draw(tela)
self.armadura3.draw(tela)
self.armadura2.draw(tela)
self.armadura1.draw(tela)
if self.contagem_dialogo1 == 3:
self.planetaaliado.draw(tela)
if self.contagem_dialogo1 == 3:
self.planetainimigo.draw(tela)
self.aste1.draw(tela)
self.aste2.draw(tela)
self.aste3.draw(tela)
if self.contagem_dialogo1 == 3:
self.discernimento0.draw(tela)
if self.nave.contagem_discernimento == 1:
self.discernimento1.draw(tela)
if self.nave.contagem_discernimento == 2:
self.discernimento2.draw(tela)
if self.nave.contagem_discernimento == 3:
self.discernimento3.draw(tela)
if self.nave.contagem_discernimento == 4:
self.discernimento4.draw(tela)
if self.nave.contagem_discernimento == 5:
self.discernimento5.draw(tela)
if self.nave.contagem_resiliencia == 1:
self.resiliencia1.draw(tela)
if self.nave.contagem_resiliencia == 2:
self.resiliencia2.draw(tela)
if self.nave.contagem_resiliencia == 3:
self.resiliencia3.draw(tela)
if self.nave.contagem_resiliencia == 4:
self.resiliencia4.draw(tela)
if self.nave.contagem_resiliencia == 5:
self.resiliencia5.draw(tela)
if self.nave.contagem_destreza == 1:
self.destreza1.draw(tela)
if self.nave.contagem_destreza == 2:
self.destreza2.draw(tela)
if self.nave.contagem_destreza == 3:
self.destreza3.draw(tela)
if self.nave.contagem_destreza == 4:
self.destreza4.draw(tela)
if self.nave.contagem_destreza == 5:
self.destreza5.draw(tela)
if self.comando.personagens.rect[1] == 370:
self.dialogo1.draw(tela)
if self.comandoo.personagens.rect[1] == 370:
self.dialogo2.draw(tela)
if self.comandooo.personagens.rect[1] == 370:
self.dialogo3.draw(tela)
if self.contagem_resili == 1:
self.condecoracoes.draw(tela)
self.condecoracao1.draw(tela)
self.resiliencia0.draw(tela)
if self.contagem_destre == 1:
self.condecoracao2.draw(tela)
self.destreza0.draw(tela)
if self.inicio_asteroides == 1:
self.gggg.draw(tela)
self.ggg.draw(tela)
self.gg.draw(tela)
self.g.draw(tela)
self.r.draw(tela)
self.aste11.draw(tela)
self.aste22.draw(tela)
self.aste33.draw(tela)
if self.nave.contagem_resiliencia == 6:
self.comandooo.draw(tela)
if self.contagem_dialogo1 == 7:
self.p1.draw(tela)
self.p2.draw(tela)
self.p3.draw(tela)
if self.contagem_barreira == 1:
self.i1.draw(tela)
self.i2.draw(tela)
self.i3.draw(tela)
if self.contagem_barreira == 2:
self.w1.draw(tela)
self.w2.draw(tela)
self.w3.draw(tela)
if self.contagem_barreira == 3:
self.f1.draw(tela)
self.f2.draw(tela)
self.f3.draw(tela)
if self.contagem_barreira == 4:
self.d1.draw(tela)
self.d2.draw(tela)
self.d3.draw(tela)
def atualizacoes(self):
self.movimento_fundo()
self.nave.animacoes("nave", 2, 2)
self.comando.animacoes("comando", 2, 2)
self.comandoo.animacoes("comandoo", 2, 2)
self.comandooo.animacoes("comandooo", 2, 2)
self.comandoooo.animacoes("comandoooo", 2, 2)
self.tiro.animacoes("x",2,2)
self.planetas_inimigos()
self.planetas_aliados()
self.nave.colisao_planetas(self.planetainimigo.group, "planetainimigos")
self.nave.colisao_planetas(self.aste1.group, "aste1")
self.nave.colisao_planetas(self.aste2.group, "aste2")
self.nave.colisao_planetas(self.aste3.group, "aste3")
self.nave.colisao_planetas(self.planetaaliado.group, "planetaaliados")
self.tiro.colisao_tiro(self.planetainimigo.group, "planetainimigos")
self.tiro.colisao_tiro(self.planetaaliado.group, "planetaaliados")
self.tiro.colisao_tiroast1(self.aste1.group, "aste1")
self.tiro.colisao_tiroast1(self.aste2.group, "aste2")
self.tiro.colisao_tiroast1(self.aste3.group, "aste3")
self.nave.colisao_asteroides(self.gggg.group, "gggg")
self.nave.colisao_asteroides(self.ggg.group, "ggg")
self.nave.colisao_asteroides(self.gg.group, "gg")
self.nave.colisao_asteroides(self.g.group, "g")
self.nave.colisao_asteroides(self.r.group, "r")
self.nave.colisao_asteroides(self.aste11.group, "aste11")
self.nave.colisao_asteroides(self.aste22.group, "aste22")
self.nave.colisao_asteroides(self.aste33.group, "aste33")
self.tiro.colisao_tiroo(self.gggg.group, "gggg")
self.tiro.colisao_tiroo(self.ggg.group, "ggg")
self.tiro.colisao_tiroo(self.gg.group, "gg")
self.tiro.colisao_tiroo(self.g.group, "g")
self.tiro.colisao_tiroast2(self.aste11.group, "aste11")
self.tiro.colisao_tiroast2(self.aste22.group, "aste22")
self.tiro.colisao_tiroast2(self.aste33.group, "aste33")
self.nave.colisao_barreira(self.p1.group, "p1")
self.nave.colisao_barreira(self.p2.group, "p2")
self.nave.colisao_barreira(self.p3.group, "p3")
self.nave.colisao_barreira(self.i1.group, "i1")
self.nave.colisao_barreira(self.i2.group, "i2")
self.nave.colisao_barreira(self.i3.group, "i3")
self.nave.colisao_barreira(self.w1.group, "w1")
self.nave.colisao_barreira(self.w2.group, "w2")
self.nave.colisao_barreira(self.w3.group, "w3")
self.nave.colisao_barreira(self.f1.group, "f1")
self.nave.colisao_barreira(self.f2.group, "f2")
self.nave.colisao_barreira(self.f3.group, "f3")
self.nave.colisao_barreira(self.d1.group, "d1")
self.nave.colisao_barreira(self.d2.group, "d2")
self.nave.colisao_barreira(self.d3.group, "d3")
self.tiro.colisao_barreirat(self.p1.group, "p1")
self.tiro.colisao_barreirat(self.p2.group, "p2")
self.tiro.colisao_barreirat(self.p3.group, "p3")
self.tiro.colisao_barreirat(self.i1.group, "i1")
self.tiro.colisao_barreirat(self.i2.group, "i2")
self.tiro.colisao_barreirat(self.i3.group, "i3")
self.tiro.colisao_barreirat(self.w1.group, "w1")
self.tiro.colisao_barreirat(self.w2.group, "w2")
self.tiro.colisao_barreirat(self.w3.group, "w3")
self.tiro.colisao_barreirat(self.f1.group, "f1")
self.tiro.colisao_barreirat(self.f2.group, "f2")
self.tiro.colisao_barreirat(self.f3.group, "f3")
self.tiro.colisao_barreirat(self.d1.group, "d1")
self.tiro.colisao_barreirat(self.d2.group, "d2")
self.tiro.colisao_barreirat(self.d3.group, "d3")
self.quantidade_armadura()
self.quantidade_disernimento()
self.quantidade_resiliencia()
self.quantidade_destreza()
self.movimento_primeira()
self.movimento_segunda()
self.movimento_terceira()
self.asteroides()
self.disparado()
self.barreira()
self.movimento_quarta()
def movimento_primeira(self):
if self.nave.contagem_enter == 1:
self.comando.personagens.rect[1] -= 3
if self.comando.personagens.rect[1] <= 370:
self.comando.personagens.rect[1] = 370
self.nave.contagem_enter += 1
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao1.mpeg")
self.som_dialogo.play()
if self.contagem_dialogo1 == 3:
self.comando.personagens.rect[1] += 6
if self.comando.personagens.rect[1] >= 960:
self.comando.personagens.rect[1] = 960
self.comando.personagens.kill()
def movimento_segunda(self):
if self.nave.contagem_discernimento == 5 and self.contagem_paliados == 6 and self.contagem_dialogo1 == 3:
self.comandoo.personagens.rect[1] -= 3
if self.comandoo.personagens.rect[1] <= 370:
self.comandoo.personagens.rect[1] = 370
self.contagem_resili += 1
self.contagem_dialogo1 += 1
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao3.mpeg")
self.som_dialogo.play()
if self.contagem_dialogo1 == 5:
self.comandoo.personagens.rect[1] += 6
if self.comandoo.personagens.rect[1] >= 960:
self.comandoo.personagens.rect[1] = 960
self.comandoo.personagens.kill()
def movimento_terceira(self):
if self.nave.contagem_resiliencia == 5 and self.contagem_gggg == 6 and self.contagem_dialogo1 == 5:
self.comandooo.personagens.rect[1] -= 3
if self.comandooo.personagens.rect[1] <= 370:
self.comandooo.personagens.rect[1] = 370
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao4.mpeg")
self.som_dialogo.play()
self.contagem_destre += 1
self.contagem_dialogo1 += 1
if self.contagem_dialogo1 == 7:
self.comandooo.personagens.rect[1] += 6
if self.comandooo.personagens.rect[1] >= 960:
self.comandooo.personagens.rect[1] = 960
self.comandooo.personagens.kill()
def movimento_fundo(self):
self.fundo1.personagens.rect[1] += 4
self.fundo2.personagens.rect[1] += 4
if self.fundo1.personagens.rect[1] >= 960:
self.fundo1.personagens.rect[1] = 0
if self.fundo2.personagens.rect[1] >= 0:
self.fundo2.personagens.rect[1] = -960
def quantidade_armadura(self):
if self.nave.contagem_armadura == 4:
self.armadura5.personagens.kill()
if self.nave.contagem_armadura == 3:
self.armadura4.personagens.kill()
if self.nave.contagem_armadura == 2:
self.armadura3.personagens.kill()
if self.nave.contagem_armadura == 1:
self.armadura2.personagens.kill()
if self.nave.contagem_armadura == 0:
self.mudar_cena = True
def quantidade_disernimento(self):
if self.nave.contagem_discernimento == 1:
self.discernimento0.personagens.kill()
if self.nave.contagem_discernimento == 2:
self.discernimento1.personagens.kill()
if self.nave.contagem_discernimento == 3:
self.discernimento2.personagens.kill()
if self.nave.contagem_discernimento == 4:
self.discernimento3.personagens.kill()
if self.nave.contagem_discernimento == 5:
self.discernimento4.personagens.kill()
def quantidade_resiliencia(self):
if self.nave.contagem_resiliencia == 1:
self.resiliencia0.personagens.kill()
if self.nave.contagem_resiliencia == 2:
self.resiliencia1.personagens.kill()
if self.nave.contagem_resiliencia == 3:
self.resiliencia2.personagens.kill()
if self.nave.contagem_resiliencia == 4:
self.resiliencia3.personagens.kill()
if self.nave.contagem_resiliencia == 5:
self.resiliencia4.personagens.kill()
def quantidade_destreza(self):
if self.nave.contagem_destreza == 1:
self.destreza0.personagens.kill()
if self.nave.contagem_destreza == 2:
self.destreza1.personagens.kill()
if self.nave.contagem_destreza == 3:
self.destreza2.personagens.kill()
if self.nave.contagem_destreza == 4:
self.destreza3.personagens.kill()
if self.nave.contagem_destreza == 5:
self.destreza4.personagens.kill()
def dialogo(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_KP_ENTER:
self.boleana_dialogo = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_KP_ENTER:
self.boleana_dialogo = False
if self.boleana_dialogo:
self.dialogo1.personagens.kill()
self.contagem_dialogo1 +=1
self.dialogo1 = Conteudo("arquivos/dialogo" + str(self.contagem_dialogo1) + ".png", 330, 120)
if self.contagem_dialogo1 <= 2:
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao2.mpeg")
self.som_dialogo.play()
print("Nº dialogo:", self.contagem_dialogo1)
if self.contagem_dialogo1 == 5:
self.dialogo2.personagens.kill()
self.inicio_asteroides = 1
def planetas_inimigos(self):
if self.comando.personagens.rect[1] == 960 and self.contagem_pinimigos <= 5:
self.planetainimigo.personagens.rect[1] += 6
self.aste1.personagens.rect[1] += 7
self.aste2.personagens.rect[1] += 7
self.aste3.personagens.rect[1] += 7
if self.aste1.personagens.rect[1] >= 960 and self.contagem_pinimigos <= 5:
self.aste1.personagens.kill()
if self.contagem_pinimigos <= 4:
self.aste1 = Conteudo("arquivos/aste1.png", random.randrange(50, 1000), -50)
if self.aste2.personagens.rect[1] >= 960 and self.contagem_pinimigos <= 5:
self.aste2.personagens.kill()
if self.contagem_pinimigos <= 4:
self.aste2 = Conteudo("arquivos/aste2.png", random.randrange(50, 1000), -120)
if self.aste3.personagens.rect[1] >= 960 and self.contagem_pinimigos <= 5:
self.aste3.personagens.kill()
if self.contagem_pinimigos <= 4:
self.aste3 = Conteudo("arquivos/aste3.png", random.randrange(50, 1000), -190)
if self.planetainimigo.personagens.rect[1] >= 960 and self.contagem_pinimigos <= 5:
self.planetainimigo.personagens.kill()
self.contagem_pinimigos += 1
if self.contagem_pinimigos <= 5:
self.planetainimigo = Conteudo("arquivos/pr" + str(self.contagem_pinimigos) + ".png", random.randrange(50, 900), -320)
def planetas_aliados(self):
if self.comando.personagens.rect[1] == 960 and self.contagem_paliados <= 5:
self.planetaaliado.personagens.rect[1] += 6
if self.planetaaliado.personagens.rect[1] >= 960 and self.contagem_paliados <= 5:
self.planetaaliado.personagens.kill()
self.contagem_paliados += 1
if self.contagem_paliados <= 5:
self.planetaaliado = Conteudo("arquivos/pb" + str(self.contagem_paliados) + ".png", random.randrange(50, 900), -440)
def asteroides(self):
if self.inicio_asteroides == 1:
self.gggg.personagens.rect[1] += 4
self.ggg.personagens.rect[1] += 4
self.gg.personagens.rect[1] += 4
self.g.personagens.rect[1] += 4
self.r.personagens.rect[1] += 4
self.aste11.personagens.rect[1] += 7
self.aste22.personagens.rect[1] += 7
self.aste33.personagens.rect[1] += 7
if self.aste11.personagens.rect[1] >= 960 and self.contagem_gggg <= 4:
self.aste11.personagens.kill()
if self.contagem_gggg <= 4:
self.aste11 = Conteudo("arquivos/aste11.png", random.randrange(50, 1000), -50)
if self.aste22.personagens.rect[1] >= 960 and self.contagem_gggg <= 4:
self.aste22.personagens.kill()
if self.contagem_gggg <= 4:
self.aste22 = Conteudo("arquivos/aste22.png", random.randrange(50, 1000), -120)
if self.aste33.personagens.rect[1] >= 960 and self.contagem_gggg <= 4:
self.aste33.personagens.kill()
if self.contagem_gggg <= 4:
self.aste33 = Conteudo("arquivos/aste33.png", random.randrange(50, 1000), -190)
if self.gggg.personagens.rect[1] >= 960 and self.contagem_gggg <= 5:
self.gggg.personagens.kill()
self.contagem_gggg += 1
if self.contagem_gggg <= 5:
self.gggg = Conteudo("arquivos/gggg" + str(self.contagem_gggg) + ".png", random.randrange(50, 1000), -230)
if self.ggg.personagens.rect[1] >= 960 and self.contagem_ggg <= 5:
self.ggg.personagens.kill()
self.contagem_ggg += 1
if self.contagem_ggg <= 5:
self.ggg = Conteudo("arquivos/ggg" + str(self.contagem_ggg) + ".png", random.randrange(50, 1000), -180)
if self.gg.personagens.rect[1] >= 960 and self.contagem_gg <= 5:
self.gg.personagens.kill()
self.contagem_gg += 1
if self.contagem_gg <= 5:
self.gg = Conteudo("arquivos/gg" + str(self.contagem_gg) + ".png", random.randrange(50, 1000), -130)
if self.g.personagens.rect[1] >= 960 and self.contagem_g <= 5:
self.g.personagens.kill()
self.contagem_g += 1
if self.contagem_g <= 5:
self.g = Conteudo("arquivos/g" + str(self.contagem_g) + ".png", random.randrange(50, 1000), -100)
if self.r.personagens.rect[1] >= 960 and self.contagem_r <= 5:
self.r.personagens.kill()
self.contagem_r += 1
if self.contagem_r <= 5:
self.r = Conteudo("arquivos/r.png", random.randrange(50, 1000), -50)
def disparado(self):
if self.tiro.tiro:
self.tiro.personagens.rect[1] = (self.nave.personagens.rect[1] + 30)
self.tiro.personagens.rect[0] = (self.nave.personagens.rect[0] + 62)
self.foi = True
if self.foi:
if self.tiro.personagens.rect[1] >= 0:
self.tiro.personagens.rect[1] -= 15
if self.tiro.personagens.rect[1] == 0:
self.tiro.personagens.kill()
self.tiro.personagens.rect[1] = -200
self.tiro.personagens.rect[0] = -200
self.foi = False
if self.tiro.tiro:
self.tiro = Tiro("arquivos/x1.png", self.nave.personagens.rect[0] + 62, self.nave.personagens.rect[1] + 30)
def barreira(self):
if self.contagem_dialogo1 == 7:
self.p1.personagens.rect[1] += 3
self.p2.personagens.rect[1] += 3
self.p3.personagens.rect[1] += 3
if self.p1.personagens.rect[1] >= 960:
self.p1.personagens.kill()
self.p2.personagens.kill()
self.p3.personagens.kill()
self.contagem_barreira = 1
if self.contagem_barreira == 1:
self.i1.personagens.rect[1] += 3
self.i2.personagens.rect[1] += 3
self.i3.personagens.rect[1] += 3
if self.i1.personagens.rect[1] >= 960:
self.i1.personagens.kill()
self.i2.personagens.kill()
self.i3.personagens.kill()
self.contagem_barreira = 2
if self.contagem_barreira == 2:
self.w1.personagens.rect[1] += 3
self.w2.personagens.rect[1] += 3
self.w3.personagens.rect[1] += 3
if self.w1.personagens.rect[1] >= 960:
self.w1.personagens.kill()
self.w2.personagens.kill()
self.w3.personagens.kill()
self.contagem_barreira = 3
if self.contagem_barreira == 3:
self.f1.personagens.rect[1] += 3
self.f2.personagens.rect[1] += 3
self.f3.personagens.rect[1] += 3
if self.f2.personagens.rect[1] >= 960:
self.f1.personagens.kill()
self.f2.personagens.kill()
self.f3.personagens.kill()
self.contagem_barreira = 4
if self.contagem_barreira == 4:
self.d1.personagens.rect[1] += 3
self.d2.personagens.rect[1] += 3
self.d3.personagens.rect[1] += 3
if self.d1.personagens.rect[1] >= 960:
self.contagem_barreira = 5
self.d1.personagens.kill()
self.d2.personagens.kill()
self.d3.personagens.kill()
self.inicio = 1
def movimento_quarta(self):
if self.inicio == 1:
self.comandoooo.personagens.rect[1] -= 3
if self.comandoooo.personagens.rect[1] <= 370:
self.comandoooo.personagens.rect[1] = 370
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao5.mpeg")
self.som_dialogo.play()
self.inicio1 = 1
if self.contagem_dialogo1 == 8:
self.dialogo4.personagens.kill()
if self.contagem_dialogo1 == 9:
pygame.mixer.init()
self.som_dialogo = pygame.mixer.Sound("arquivos/gravacao6.mpeg")
self.som_dialogo.play()
self.dialogo5.personagens.kill()
if self.contagem_dialogo1 == 10:
self.comandoooo.personagens.rect[1] += 6
if self.comandoooo.personagens.rect[1] >= 960:
self.comandoooo.personagens.rect[1] = 960
self.comandoooo.personagens.kill()
self.final = 1
| 47.651042
| 134
| 0.603709
| 3,302
| 27,447
| 4.944579
| 0.071169
| 0.045569
| 0.087217
| 0.045201
| 0.624793
| 0.398359
| 0.307099
| 0.168494
| 0.108716
| 0.082134
| 0
| 0.066041
| 0.266805
| 27,447
| 575
| 135
| 47.733913
| 0.745279
| 0
| 0
| 0.152174
| 0
| 0
| 0.07102
| 0.024742
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032609
| false
| 0
| 0.005435
| 0
| 0.039855
| 0.001812
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32485d3d2f97d8719c9ad7891c585aced9f9c6ac
| 1,308
|
py
|
Python
|
xpresso/binders/dependants.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 75
|
2022-01-18T02:17:57.000Z
|
2022-03-24T02:30:04.000Z
|
xpresso/binders/dependants.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 73
|
2022-01-18T03:01:27.000Z
|
2022-03-27T16:41:38.000Z
|
xpresso/binders/dependants.py
|
adriangb/xpresso
|
43fcc360f7b19c00e0b78480f96390bcb4d28053
|
[
"MIT"
] | 3
|
2022-01-18T22:47:06.000Z
|
2022-01-25T02:03:53.000Z
|
import inspect
import typing
from di.api.dependencies import CacheKey
from di.dependant import Dependant, Marker
from xpresso._utils.typing import Protocol
from xpresso.binders.api import SupportsExtractor, SupportsOpenAPI
T = typing.TypeVar("T", covariant=True)
class SupportsMarker(Protocol[T]):
def register_parameter(self, param: inspect.Parameter) -> T:
...
class Binder(Dependant[typing.Any]):
def __init__(
self,
*,
openapi: SupportsOpenAPI,
extractor: SupportsExtractor,
) -> None:
super().__init__(call=extractor.extract, scope="connection")
self.openapi = openapi
self.extractor = extractor
@property
def cache_key(self) -> CacheKey:
return self.extractor
class BinderMarker(Marker):
def __init__(
self,
*,
extractor_marker: SupportsMarker[SupportsExtractor],
openapi_marker: SupportsMarker[SupportsOpenAPI],
) -> None:
self.extractor_marker = extractor_marker
self.openapi_marker = openapi_marker
def register_parameter(self, param: inspect.Parameter) -> Binder:
return Binder(
openapi=self.openapi_marker.register_parameter(param),
extractor=self.extractor_marker.register_parameter(param),
)
| 26.693878
| 70
| 0.683486
| 131
| 1,308
| 6.625954
| 0.328244
| 0.074885
| 0.065668
| 0.0553
| 0.103687
| 0.103687
| 0.103687
| 0
| 0
| 0
| 0
| 0
| 0.2263
| 1,308
| 48
| 71
| 27.25
| 0.857708
| 0
| 0
| 0.216216
| 0
| 0
| 0.00841
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.162162
| 0.054054
| 0.432432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3249b98ec0603abf9f97a5033a897bd1e2965b76
| 440
|
py
|
Python
|
Cisco/Python/Modulo_3/for/exercicio1.py
|
ThiagoKS-7/Python_Essencials_1_cisco
|
a417747e873f69bb307c4d36205797b191b5b45a
|
[
"MIT"
] | null | null | null |
Cisco/Python/Modulo_3/for/exercicio1.py
|
ThiagoKS-7/Python_Essencials_1_cisco
|
a417747e873f69bb307c4d36205797b191b5b45a
|
[
"MIT"
] | null | null | null |
Cisco/Python/Modulo_3/for/exercicio1.py
|
ThiagoKS-7/Python_Essencials_1_cisco
|
a417747e873f69bb307c4d36205797b191b5b45a
|
[
"MIT"
] | null | null | null |
def main():
import time
# Write a for loop that counts to five.
# Body of the loop - print the loop iteration number and the word "Mississippi".
# Body of the loop - use: time.sleep(1)
# Write a print function with the final message.
for i in range(5):
print(f'{i + 1} Mississipi')
time.sleep(1)
print("Ready or not, here i come!")
if __name__ == '__main__':
main()
| 27.5
| 89
| 0.584091
| 65
| 440
| 3.830769
| 0.630769
| 0.084337
| 0.072289
| 0.104418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013289
| 0.315909
| 440
| 16
| 90
| 27.5
| 0.813953
| 0.456818
| 0
| 0
| 0
| 0
| 0.236364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3252c61f7a71dbc22f9e4a1f7ba0cf98c90f9ea0
| 8,931
|
py
|
Python
|
pytorch-transformers-extensions/examples/run_inference.py
|
deepchatterjeevns/nlp_projects
|
8ea4a846138da0bcee2970907ea3340b1cdc74cb
|
[
"MIT"
] | 21
|
2019-07-25T08:39:56.000Z
|
2020-12-14T09:59:06.000Z
|
pytorch-transformers-extensions/examples/run_inference.py
|
deepchatterjeevns/nlp_projects
|
8ea4a846138da0bcee2970907ea3340b1cdc74cb
|
[
"MIT"
] | 1
|
2019-08-05T03:23:54.000Z
|
2019-08-05T03:24:39.000Z
|
pytorch-transformers-extensions/examples/run_inference.py
|
deepchatterjeevns/nlp_projects
|
8ea4a846138da0bcee2970907ea3340b1cdc74cb
|
[
"MIT"
] | 15
|
2019-07-31T13:37:14.000Z
|
2021-09-28T19:01:27.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Running inference for sequence classification on various datasets (Bert, XLM, XLNet)."""
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import numpy as np
from scipy.special import softmax
import torch
from torch.utils.data import (DataLoader, SequentialSampler, TensorDataset)
from tqdm import tqdm, trange
from pytorch_transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer)
from utils_dataset import (compute_metrics, convert_examples_to_features,
output_modes, processors, InputExample)
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
}
def inference(args, model, tokenizer, prefix=""):
inf_task = args.task_name
inf_dataset = load_example(args, inf_task, tokenizer)
inf_sampler = SequentialSampler(inf_dataset)
inf_dataloader = DataLoader(inf_dataset, sampler=inf_sampler, batch_size=1)
# Inference!
logger.info("***** Running inference {} *****".format(prefix))
preds = None
out_label_ids = None
for batch in tqdm(inf_dataloader, desc="Inferencing"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)
inf_loss, logits = outputs[:2]
pred_arr = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
logger.info("pred_arr: %s", pred_arr)
pred_prob = np.squeeze(softmax(pred_arr, axis=1))
logger.info("[0]: %s, [1]: %s", pred_prob[0], pred_prob[1])
if args.output_mode == "classification":
pred = np.argmax(pred_arr, axis=1)
elif args.output_mode == "regression":
pred = np.squeeze(pred_arr)
if pred == 0:
logger.info("Text is negative with confidence: %d ", pred_prob[0]*100)
else:
logger.info("Text is positive with confidence: %d ", pred_prob[1]*100)
def load_example(args, task, tokenizer):
processor = processors[task]()
output_mode = output_modes[task]
logger.info("Creating features from input")
label_list = processor.get_labels()
examples = [InputExample(guid=0, text_a=args.text, text_b=None, label='1')]
features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
cls_token_segment_id=2 if args.model_type in ['xlnet'] else 1,
pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--model_type", default=None, type=str, required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--text", default="None", type=str, required=True,
help="text to analyze")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger.warning("device: %s, ", args.device)
# Prepare task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
model.to(args.device)
logger.info("Inference parameters %s", args)
# Inference
inference(args, model, tokenizer)
if __name__ == "__main__":
main()
| 46.759162
| 163
| 0.665659
| 1,150
| 8,931
| 4.97913
| 0.28087
| 0.022005
| 0.032658
| 0.013098
| 0.166958
| 0.131855
| 0.108977
| 0.053091
| 0.036326
| 0.036326
| 0
| 0.006226
| 0.226738
| 8,931
| 190
| 164
| 47.005263
| 0.822908
| 0.128765
| 0
| 0.015625
| 0
| 0
| 0.155604
| 0
| 0.023438
| 0
| 0
| 0
| 0
| 1
| 0.023438
| false
| 0
| 0.09375
| 0
| 0.125
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3256173ee4e9a424745cf36c9f1ac6cf9bf2bc08
| 7,872
|
py
|
Python
|
tools/table.py
|
asterick/minimon.js
|
4876544525eb1bfef1b81a12807e7ba37cdd4949
|
[
"0BSD"
] | 5
|
2019-04-25T00:19:56.000Z
|
2020-09-02T01:24:40.000Z
|
tools/table.py
|
asterick/minimon.js
|
4876544525eb1bfef1b81a12807e7ba37cdd4949
|
[
"0BSD"
] | 6
|
2020-05-23T23:17:59.000Z
|
2022-02-17T21:50:46.000Z
|
tools/table.py
|
asterick/minimon.js
|
4876544525eb1bfef1b81a12807e7ba37cdd4949
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python3
# ISC License
#
# Copyright (c) 2019, Bryon Vandiver
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from json import dumps
import os
import csv
CSV_LOCATION = os.path.join(os.path.abspath(os.path.dirname(__file__)), 's1c88.csv')
op0s, op1s, op2s = [None] * 0x100, [None] * 0x100, [None] * 0x100
CONDITIONS = {
'C': 'cpu.reg.flag.c',
'NC': '!cpu.reg.flag.c',
'Z': 'cpu.reg.flag.z',
'NZ': '!cpu.reg.flag.z',
'V': 'cpu.reg.flag.v',
'NV': '!cpu.reg.flag.v',
'M': 'cpu.reg.flag.n',
'P': '!cpu.reg.flag.n',
'LT': 'cpu.reg.flag.n != cpu.reg.flag.v',
'LE': '(cpu.reg.flag.n != cpu.reg.flag.v) || cpu.reg.flag.z',
'GT': '(cpu.reg.flag.n == cpu.reg.flag.v) && !cpu.reg.flag.z',
'GE': 'cpu.reg.flag.n == cpu.reg.flag.v',
'F0': 'cpu.reg.flag.f0',
'F1': 'cpu.reg.flag.f1',
'F2': 'cpu.reg.flag.f2',
'F3': 'cpu.reg.flag.f3',
'NF0': '!cpu.reg.flag.f0',
'NF1': '!cpu.reg.flag.f1',
'NF2': '!cpu.reg.flag.f2',
'NF3': '!cpu.reg.flag.f3',
}
ARGUMENTS = {
'A': (8, False, False, 'a'),
'B': (8, False, False, 'b'),
'L': (8, False, False, 'l'),
'H': (8, False, False, 'h'),
'BR': (8, False, False, 'br'),
'SC': (8, False, False, 'sc'),
'EP': (8, False, False, 'ep'),
'XP': (8, False, False, 'xp'),
'YP': (8, False, False, 'yp'),
'NB': (8, False, False, 'nb'),
'BA': (16, False, False, 'ba'),
'HL': (16, False, False, 'hl'),
'IX': (16, False, False, 'ix'),
'IY': (16, False, False, 'iy'),
'SP': (16, False, False, 'sp'),
'PC': (16, False, False, 'pc'),
'#nn': (8, True, False, 'imm8'),
'rr': (8, True, False, 'imm8'),
'#mmnn': (16, True, False, 'imm16'),
'qqrr': (16, True, False, 'imm16'),
'[kk]': (16, True, True, 'vect'), # Special
'[hhll]': (-1, True, True, 'ind16'),
'[HL]': (-1, True, True, 'absHL'),
'[IX]': (-1, True, True, 'absIX'),
'[IY]': (-1, True, True, 'absIY'),
'[BR:ll]': (-1, True, True, 'absBR'),
'[SP+dd]': (-1, True, True, 'indDSP'),
'[IX+dd]': (-1, True, True, 'indDIX'),
'[IY+dd]': (-1, True, True, 'indDIY'),
'[IX+L]': (-1, True, True, 'indIIX'),
'[IY+L]': (-1, True, True, 'indIIY'),
}
OPERATIONS = {
'INC': (8, 'ReadWrite'),
'DEC': (8, 'ReadWrite'),
'SLA': (8, 'ReadWrite'),
'SLL': (8, 'ReadWrite'),
'SRA': (8, 'ReadWrite'),
'SRL': (8, 'ReadWrite'),
'RL': (8, 'ReadWrite'),
'RLC': (8, 'ReadWrite'),
'RR': (8, 'ReadWrite'),
'RRC': (8, 'ReadWrite'),
'CPL': (8, 'ReadWrite'),
'NEG': (8, 'ReadWrite'),
'LD': (8, 'Write', 'Read'),
'ADD': (8, 'ReadWrite', 'Read'),
'ADC': (8, 'ReadWrite', 'Read'),
'SUB': (8, 'ReadWrite', 'Read'),
'SBC': (8, 'ReadWrite', 'Read'),
'AND': (8, 'ReadWrite', 'Read'),
'OR': (8, 'ReadWrite', 'Read'),
'XOR': (8, 'ReadWrite', 'Read'),
'CP': (8, 'Read', 'Read'),
'BIT': (8, 'Read', 'Read'),
'CALL': (16, 'Read'),
'CARS': (8, 'Read'),
'CARL': (16, 'Read'),
'JRS': (8, 'Read'),
'JRL': (16, 'Read'),
'JP': (8, 'Read'),
'INT': (8, 'Read'),
'RETE': (8,),
'PUSH': (-1, 'Read'),
'POP': (-1, 'Write'),
'EX': (-1, 'ReadWrite', 'ReadWrite'),
'SWAP': (8, 'ReadWrite')
}
def get_name(*args):
return "inst_%s" % '_'.join([arg.lower() for arg in args if arg])
def format_arg(i, siz, mem, ind, nam):
if mem:
return "data%i" % i
else:
return "cpu.reg.%s" % nam
def format(cycles, op, *args):
condition = None
cycles, skipped = [int(c) for c in cycles.split(",") * 2][:2]
if len(args) > 0 and args[0] in CONDITIONS:
condition, args = args[0], args[1:]
try:
ops = OPERATIONS[op]
args = [ARGUMENTS[arg] for arg in args if arg]
default_size, directions = ops[0], ops[1:]
if len(args) >= 1:
size = max(default_size, *[s for s, i, m, n in args])
else:
size = default_size
name = get_name(op, condition, *[n for s, i, m, n in args])
print ("static int %s(Machine::State& cpu) {" % name)
for i, (siz, mem, ind, nam) in enumerate(args):
if ind:
print ("\tconst auto addr%i = calc_%s(cpu);" % (i, nam))
safety = "" if "Write" in directions[i] else "const "
if "Read" in directions[i]:
print ("\t%suint%i_t data%i = cpu_read%s(cpu, addr%i);" % (safety, size, i, size, i))
else:
print ("\tuint%i_t data%i;" % (size, i))
elif mem:
print ("\tconst uint%i_t data%i = cpu_imm%i(cpu);" % (size, i, siz))
if condition:
print ("\tif (!(%s)) {" % CONDITIONS[condition])
print ("\t\tcpu.reg.cb = cpu.reg.nb;")
print ("\t\treturn %i;" % skipped)
print ("\t}")
print ("\top_%s%i(%s);" % (op.lower(), size, ', '.join(['cpu']+[format_arg(i, *a) for i, a in enumerate(args)])));
block = False
for i, (siz, mem, ind, nam) in enumerate(args):
if ind and "Write" in directions[i]:
print ("\tcpu_write%s(cpu, data%i, addr%i);" % (size, i, i))
if nam in ['sc', 'nb'] and "Write" in directions[i]:
block = True
if block or op == 'RETE':
print ("\treturn %i + inst_advance(cpu); // Block IRQs" % cycles)
else:
print ("\treturn %i;" % cycles)
print ("}\n")
return name
except:
name = get_name(op, condition, *args)
print ("int clock_%s(Machine::State& cpu) {" % name)
print ("\t%s(cpu);" % name)
print ("\treturn %i;" % cycles)
print ("}\n")
return "clock_%s" % name
# Generate switch table
def dump_table(instructions, indent):
for i, t in enumerate(instructions):
if not t:
continue
print ("%scase 0x%02X: return %s(cpu);" % (indent, i, t))
#print (i, t)
print ("%sdefault: return inst_undefined(cpu);" % indent)
with open(CSV_LOCATION, 'r') as csvfile:
spamreader = csv.reader(csvfile)
next(spamreader)
for row in spamreader:
code, cycles0, op0, arg0_1, arg0_2, cycles1, op1, arg1_1, arg1_2, cycles2, op2, arg2_1, arg2_2 = row
code = int(code, 16)
if op0 != 'undefined':
op0s[code] = format(cycles0, op0, arg0_1, arg0_2)
if op1 != 'undefined':
op1s[code] = format(cycles1, op1, arg1_1, arg1_2)
if op2 != 'undefined':
op2s[code] = format(cycles2, op2, arg2_1, arg2_2)
print ("int inst_advance(Machine::State& cpu) {")
print ("\tswitch (cpu_imm8(cpu)) {")
dump_table(op0s, '\t')
print ("\tcase 0xCE:")
print ("\t\tswitch (cpu_imm8(cpu)) {")
dump_table(op1s, '\t\t')
print ("\t\t}")
print ("\tcase 0xCF:")
print ("\t\tswitch (cpu_imm8(cpu)) {")
dump_table(op2s, '\t\t')
print ("\t\t}")
print ("\t}")
print ("}")
| 32.528926
| 123
| 0.506225
| 1,067
| 7,872
| 3.689784
| 0.268978
| 0.042672
| 0.06604
| 0.016764
| 0.178054
| 0.137668
| 0.084836
| 0.062484
| 0.035052
| 0.035052
| 0
| 0.03328
| 0.282393
| 7,872
| 241
| 124
| 32.6639
| 0.663657
| 0.102769
| 0
| 0.086957
| 0
| 0.01087
| 0.264001
| 0.007644
| 0
| 0
| 0.003381
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.016304
| 0.005435
| 0.065217
| 0.163043
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
325927f14aed5b03fe28e7161da22ac9db1b0f2b
| 15,364
|
py
|
Python
|
test_log.py
|
erkooi/desp_tools
|
2bea2e44591ceeeb62cbfe163b4635a3157f6582
|
[
"Apache-2.0"
] | null | null | null |
test_log.py
|
erkooi/desp_tools
|
2bea2e44591ceeeb62cbfe163b4635a3157f6582
|
[
"Apache-2.0"
] | null | null | null |
test_log.py
|
erkooi/desp_tools
|
2bea2e44591ceeeb62cbfe163b4635a3157f6582
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
#
# Copyright (C) 2012
# ASTRON (Netherlands Institute for Radio Astronomy) <http://www.astron.nl/>
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
"""Test logging utilities
* Provide logging with standardized prefixes:
. time : self, if notime = 0
. verbosity level : self, if noVLevel = 0
. test case ID : self, if noTestId = 0
. message text : argument msgString, the actual text to log
* All append_log statements that have verbosity level equal or lower than the
test case verbosity level will get logged.
* The logging gets output to the stdio and to a file if a file name is provided.
* It is also possible to append other files to the test logging file.
* Best practise is to use the following verbosity levels for the append_log
argument:
-v 0 Log test result
-v 1 Log test title
-v 2 Log errors
-v 3 Log info
-v 4 Log error details
-v 5 Log info details
-v 6 Log debug
-v 7 Log debug details
"""
################################################################################
# System imports
import sys
import time
import common as cm
################################################################################
# Functions
class Testlog:
V_RESULT = 0
V_TITLE = 1
V_ERRORS = 2
V_INFO = 3
V_ERROR_DETAILS = 4
V_INFO_DETAILS = 5
V_DEBUG = 6
V_DEBUG_DETAILS = 7
_logName=None
def __init__(self, verbosity=11, testId='', sectionId='', logName=None):
self.verbosity = verbosity # Verbosity threshold used by append_log() to decide whether to log the input string or not
self._testId = testId # Test ID that optionally gets used as prefix in append_log line
self._sectionId = sectionId # Section ID that optionally gets used as prefix in append_log line
self._logName = logName # Name for the file that will contain the append_log
if self._logName != None:
try:
self._logFile = open(self._logName,'w')
except IOError:
print('ERROR : Can not open log file %s' % self._logName)
def __del__(self):
if self._logName != None:
self.close_log()
def close_log(self):
if self._logName != None:
self._logFile.close()
# The testId can should remain fixed at __init__, but the user can change the sectionId during the execution
def set_section_id(self, sectionId):
self._sectionId = sectionId
def verbose_levels(self):
return "0=result; 1=title; 2=errors; 3=info; 4=error details; 5=info details; 6=debug; 7=debug details"
# Print the message string and append it to the test log file in the Testlog style
def append_log(self, vLevel, msgString, noTime=0, noVLevel=0, noTestId=0, noSectionId=0):
if vLevel <= self.verbosity:
txt = ''
if noTime == 0:
t = time.localtime()
txt = txt + '[%d:%02d:%02d %02d:%02d:%02d]' % (t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
if noVLevel == 0:
txt = txt + ' - (%d) ' % vLevel
if noTestId == 0:
txt = txt + self._testId
if noSectionId == 0:
txt = txt + self._sectionId
txt = txt + msgString
print(txt)
#sys.stdout.flush()
if self._logName != None:
self._logFile.write(txt + '\n')
# Print the repeat message string at regular intervals and append it to the test log file in the Testlog style
def append_log_rep(self, vLevel, rep, nofRep, nofLog=5, noTime=0, noVLevel=0, noTestId=0, noSectionId=0):
if nofRep < nofLog:
logInterval = 1
else:
logInterval = nofRep//nofLog
if rep%logInterval==0 or rep==nofRep-1:
self.append_log(3, 'Rep-%d' % rep)
# Print the contents of an array to the test log file
def append_log_data(self, vLevel, prefixStr, data, radix='dec', dataWidth=8, nofColumns=16, rulers=False, noTime=0, noVLevel=0, noTestId=0, noSectionId=0):
if vLevel <= self.verbosity:
r = 0
columnWidth = dataWidth + 1 # use 1 space between columns
if rulers:
rowStr = 'Col:'
for i in range(nofColumns):
rowStr += '%*d' % (columnWidth, i)
self.append_log(vLevel, prefixStr + rowStr, noTime, noVLevel, noTestId, noSectionId)
self.append_log(vLevel, prefixStr + 'Row:', noTime, noVLevel, noTestId, noSectionId)
rowStr = prefixStr + ('%-4d' % r)
else:
rowStr = prefixStr
k = 0
# Make sure data is a list, otherwise the following fails
if cm.depth(data)==0:
data=cm.listify(data)
n = len(data)
for i in range(n):
if radix=='uns': rowStr += ' %*d' % (dataWidth, data[i])
if radix=='dec': rowStr += ' %*d' % (dataWidth, data[i])
if radix=='hex': rowStr += ' %0*x' % (dataWidth, data[i])
if k < nofColumns-1:
k = k + 1
else:
self.append_log(vLevel, prefixStr + rowStr, noTime, noVLevel, noTestId, noSectionId)
rowStr = prefixStr
r = r + 1
if rulers:
rowStr += ('%-4d' % r)
k = 0
if k!=0:
self.append_log(vLevel, prefixStr + rowStr, noTime, noVLevel, noTestId, noSectionId)
def data_to_string(self, data, dataWidth=4, dataLeft=False, fractionWidth=2, fractionExponent=False):
"""Print data to string with length dataWidth + 1 white space
Default print the data as %s string to support any type
If the data is float or complex then print it using fraction notation when
fractionExponent=False or using exponent notation when fractionExponent=True.
The fractionWidth specifies the width of the floating point value.
The data is printed left or right aligned dependent on dataLeft.
For all data types the returned data string has length dataWidth + 1 for a
white space such that it can be used as a fixed size element string when
printing a row of data on a line.
. data = the data, can be float complex or other e.g. int, string, tuple
. dataWidth = width of the printed data string
. dataLeft = when True then left align the data in the printed data string, else right align
. fractionWidth = width of the fraction in case of float data
. fractionExponent = when True print exponent in case of float data, else only print fraction
"""
if isinstance(data, float):
# Log in float format
if fractionExponent:
dataStr = '%.*e' % (fractionWidth, data) # log data as float with exponent
else:
dataStr = '%.*f' % (fractionWidth, data) # log data as float
elif isinstance(data, complex):
# Log in complex float format
if fractionExponent:
dataStr = '%.*e,' % (fractionWidth, data.real) # log data real part as float with exponent
dataStr += '%.*ej' % (fractionWidth, data.imag) # log data imag part as float with exponent
else:
dataStr = '%.*f,' % (fractionWidth, data.real) # log data real part as float
dataStr += '%.*fj' % (fractionWidth, data.imag) # log data imag part as float
else:
# Default log data as string
dataStr = '%s' % str(data) # the data can be any type that fits %s e.g. int, string, tuple
# the explicite conversion by str() is needed for tuple
# Left or right align the dataStr within dataWidth
if dataLeft:
dataStr = '%-*s ' % (dataWidth, dataStr)
else:
dataStr = '%*s ' % (dataWidth, dataStr)
return dataStr
def append_log_one_dimensional_list(self, vLevel, name, L, prefixStr='', dataWidth=4, dataLeft=False, fractionWidth=0, fractionExponent=False, colIndices=None):
"""Log list L[col] in one row with index labels
. vLevel = verbosity level
. name = name, title of the list
. L = the one dimensional list
. prefixStr = prefix string that is printed before every line, can e.g. be used for grep
. dataWidth = of data in column, see self.data_to_string
. dataLeft = of data in column, see self.data_to_string
. fractionWidth = of data in column, see self.data_to_string
. fractionExponent = of data in column, see self.data_to_string
. colIndices = when None then log counter index, else use index from list
Remarks:
. This append_log_one_dimensional_list is similar to using append_log_data with nofColumns=len(L)
. This append_log_one_dimensional_list is similar to append_log_two_dimensional_list with 1 row.
"""
if vLevel <= self.verbosity:
self.append_log(vLevel, '') # start with newline
self.append_log(vLevel, prefixStr + '%s:' % name)
nof_cols = len(L)
# Print row with column indices
if colIndices == None:
colIndices = list(range(nof_cols))
col_index_str = '. index : '
for col in colIndices:
col_index_str += '%*d ' % (dataWidth, col)
self.append_log(vLevel, prefixStr + col_index_str)
# Print row with data
line_str = '. value : '
uniqueL = cm.unique(L)
if len(uniqueL)==1:
line_str += 'all ' + self.data_to_string(uniqueL[0], dataWidth, dataLeft, fractionWidth, fractionExponent)
else:
for col in range(nof_cols):
line_str += self.data_to_string(L[col], dataWidth, dataLeft, fractionWidth, fractionExponent)
self.append_log(vLevel, prefixStr + '%s' % line_str)
self.append_log(vLevel, '') # end with newline
def append_log_two_dimensional_list(self, vLevel, name, A, prefixStr='', transpose=False, reverseCols=False, reverseRows=False,
dataWidth=4, dataLeft=False, fractionWidth=0, fractionExponent=False, colIndices=None, rowIndices=None):
"""
Log two dimensional list A[row][col] per row with index labels
. vLevel = verbosity level
. name = name, title of the list
. A = the two dimensional list
. prefixStr = prefix string that is printed before every line, can e.g. be used for grep
. transpose = when true transpose(A) to log rows as columns and columns as rows
. reverseCols = when true reverse the order of the columns
. reverseRows = when true reverse the order of the rows
. dataWidth = of data in column, see self.data_to_string
. dataLeft = of data in column, see self.data_to_string
. fractionWidth = of data in column, see self.data_to_string
. fractionExponent = of data in column, see self.data_to_string
. colIndices = when None then log counter index, else use index from list
. rowIndices = when None then log counter index, else use index from list (can be text index)
Remarks:
. The example recipy for making a two dimensional list of the form A[rows][cols] is:
A = [], row=[], row.append(element) for all cols, A.append(row) for all rows
or use cm.create_multidimensional_list([Number of rows][Number of cols])
"""
if vLevel <= self.verbosity:
self.append_log(vLevel, '') # start with newline
self.append_log(vLevel, prefixStr + '%s:' % name)
if transpose:
#print name, transpose
A = cm.transpose(A)
if reverseRows:
A = cm.reverse_rows_ud(A)
if reverseCols:
A = cm.reverse_cols_lr(A)
nof_rows = len(A)
nof_cols = len(A[0])
self.append_log(vLevel, prefixStr + 'col :')
# Print row with column indices
if colIndices == None:
colIndices = list(range(nof_cols))
if rowIndices == None:
rowIndices = list(range(nof_rows))
rowIndexLength = 6 # default row_str prefix length
else:
rowIndexLength = 3 + len(str(rowIndices[-1])) # use last row index string for row_str prefix length
col_index_str = ' ' * rowIndexLength
for col in colIndices:
col_index_str += '%*d ' % (dataWidth, col)
self.append_log(vLevel, prefixStr + col_index_str)
self.append_log(vLevel, prefixStr + 'row :')
# For each row print row index and row with data
for ri,row in enumerate(rowIndices):
row_str = '%3s : ' % row # row index, log index as string to support also text index
uniqueRow = cm.unique(A[ri])
if len(uniqueRow)==1:
row_str += 'all ' + self.data_to_string(uniqueRow[0], dataWidth, dataLeft, fractionWidth, fractionExponent)
else:
for col in range(nof_cols):
row_str += self.data_to_string(A[ri][col], dataWidth, dataLeft, fractionWidth, fractionExponent)
self.append_log(vLevel, prefixStr + '%s' % row_str)
self.append_log(vLevel, '') # end with newline
# Read the contents of a file and append that to the test log file
def append_log_file(self, vLevel, fileName):
try:
appFile = open(fileName,'r')
self.append_log(vLevel,appFile.read(),1,1,1,1)
appFile.close()
except IOError:
self.append_log(vLevel,'ERROR : Can not open file %s' % fileName)
| 48.466877
| 164
| 0.570034
| 1,894
| 15,364
| 4.536431
| 0.187434
| 0.036662
| 0.028748
| 0.039804
| 0.42237
| 0.385242
| 0.342877
| 0.329842
| 0.304353
| 0.261639
| 0
| 0.010481
| 0.329341
| 15,364
| 316
| 165
| 48.620253
| 0.82337
| 0.402044
| 0
| 0.309942
| 0
| 0.005848
| 0.041408
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.017544
| 0.005848
| 0.157895
| 0.011696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
325b56ca169aa22d3b3e5e502acb535b1e7a8a46
| 868
|
py
|
Python
|
subaudible/subparse.py
|
RobbieClarken/subaudible
|
f22bdec90693727b36eff426e96d6960387fb94d
|
[
"MIT"
] | null | null | null |
subaudible/subparse.py
|
RobbieClarken/subaudible
|
f22bdec90693727b36eff426e96d6960387fb94d
|
[
"MIT"
] | null | null | null |
subaudible/subparse.py
|
RobbieClarken/subaudible
|
f22bdec90693727b36eff426e96d6960387fb94d
|
[
"MIT"
] | null | null | null |
import re
def parse_srt(line_iter):
"""
Parses SubRip text into caption dicts.
Args:
line_iter: An iterator that yields lines of a SubRip file.
Yields:
dict: Caption dicts with `start`, `end` and `text` keys.
"""
line_iter = iter(line.rstrip('\r\n') for line in line_iter)
while True:
next(line_iter) # Skip counter
start, end = parse_time_line(next(line_iter))
text = '\n'.join(iter(line_iter.__next__, ''))
yield {'start': start, 'end': end, 'text': text}
def parse_time_line(line):
return (parse_time(time_str) for time_str in line.split('-->'))
def parse_time(time_str):
time_str = time_str.replace(',', '.')
match = re.search('(\d\d):(\d\d):(\d\d).(\d\d\d)', time_str)
h, m, s, ms = (int(s) for s in match.groups())
return 3600 * h + 60 * m + s + 1e-3 * ms
| 27.125
| 67
| 0.59447
| 136
| 868
| 3.617647
| 0.426471
| 0.03252
| 0.042683
| 0.04878
| 0.018293
| 0.018293
| 0.018293
| 0.018293
| 0
| 0
| 0
| 0.01214
| 0.240783
| 868
| 31
| 68
| 28
| 0.734446
| 0.221198
| 0
| 0
| 0
| 0
| 0.080871
| 0.045101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.066667
| 0.066667
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
325b89ab7374be326978f10a334f001191bd3ead
| 1,971
|
py
|
Python
|
application/models/basemodel.py
|
ahmedsadman/festive
|
e0e739f126de2e8368014398f5c928c410098da5
|
[
"MIT"
] | 2
|
2020-10-19T23:26:23.000Z
|
2020-10-20T02:14:10.000Z
|
application/models/basemodel.py
|
ahmedsadman/fest-management-api
|
e0e739f126de2e8368014398f5c928c410098da5
|
[
"MIT"
] | null | null | null |
application/models/basemodel.py
|
ahmedsadman/fest-management-api
|
e0e739f126de2e8368014398f5c928c410098da5
|
[
"MIT"
] | 1
|
2021-08-04T15:45:29.000Z
|
2021-08-04T15:45:29.000Z
|
from sqlalchemy import func
from application import db
from application.helpers.error_handlers import ServerError
class BaseModel(db.Model):
__abstract__ = True
def save(self):
"""save the item to database"""
try:
db.session.add(self)
db.session.commit()
except Exception as e:
raise ServerError(message="Failed to save the item", error=e)
def delete(self):
"""delete the item from database"""
try:
db.session.delete(self)
db.session.commit()
except Exception as e:
raise ServerError(message="Deletion failed", error=e)
@classmethod
def find_by_id(cls, id):
return cls.query.filter_by(id=id).first()
@classmethod
def find_query(cls, _filter):
"""Build the query with the given level one filters (filters that has
direct match with entity attributes, not any nested relationship).
Returns 'query' object"""
query = cls.query
exclude_lower = [int, bool]
for attr, value in _filter.items():
# func.lower doesn't work for INT/BOOL types in some production
# databases, so this should be properly handled
# ex: lower(event.id) won't work because event.id is INT type
# So the logic is, whenever the passed 'value' in this scope is
# INT, it means
# we don't need to lower anything. Just compare the vanilla value
_attr = getattr(cls, attr)
_attr = (
_attr if (type(value) in exclude_lower) else func.lower(_attr)
)
_value = (
value if (type(value) in exclude_lower) else func.lower(value)
)
query = query.filter(_attr == _value)
return query
@classmethod
def find(cls, _filter):
"""find all entities by given filter"""
return cls.find_query(_filter).all()
| 33.982759
| 78
| 0.597666
| 247
| 1,971
| 4.672065
| 0.437247
| 0.031196
| 0.046794
| 0.034662
| 0.169844
| 0.169844
| 0.169844
| 0.169844
| 0.169844
| 0.103986
| 0
| 0
| 0.316591
| 1,971
| 57
| 79
| 34.578947
| 0.856719
| 0.281583
| 0
| 0.243243
| 0
| 0
| 0.027656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.081081
| 0.027027
| 0.351351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
325fc49ee449fcf77d594c853f23436486f7b300
| 2,711
|
py
|
Python
|
tests/io/s3/test_s3_fetcher.py
|
ToucanToco/PeaKina
|
afaeec65d9b136d42331f140c3048d27bcddb6b1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/io/s3/test_s3_fetcher.py
|
ToucanToco/PeaKina
|
afaeec65d9b136d42331f140c3048d27bcddb6b1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/io/s3/test_s3_fetcher.py
|
ToucanToco/PeaKina
|
afaeec65d9b136d42331f140c3048d27bcddb6b1
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Any, Dict
import boto3
import pytest
from s3fs import S3FileSystem
from peakina.io.s3.s3_fetcher import S3Fetcher
@pytest.fixture
def s3_fetcher(s3_endpoint_url):
return S3Fetcher(client_kwargs={"endpoint_url": s3_endpoint_url})
def test_s3_fetcher_open(s3_fetcher):
dirpath = "s3://accessKey1:verySecretKey1@mybucket"
filepath = f"{dirpath}/0_0.csv"
with s3_fetcher.open(filepath) as f:
assert f.read() == b"a,b\n0,0\n0,1"
def test_s3_fetcher_listdir(s3_fetcher, mocker):
s3_mtime_mock = mocker.patch("peakina.io.s3.s3_fetcher.s3_mtime")
dirpath = "s3://accessKey1:verySecretKey1@mybucket"
assert s3_fetcher.listdir(dirpath) == [
"0_0.csv",
"0_1.csv",
"mydir",
]
assert s3_fetcher.mtime(f"{dirpath}/0_0.csv") > 0
assert s3_fetcher.mtime(f"{dirpath}/mydir") is None
s3_mtime_mock.assert_not_called()
def test_s3_fetcher_mtime(s3_fetcher):
dirpath = "s3://accessKey1:verySecretKey1@mybucket"
filepath = f"{dirpath}/0_0.csv"
assert s3_fetcher.mtime(filepath) > 0
def test_s3_fetcher_open_retry(s3_fetcher, s3_endpoint_url, mocker):
session = boto3.session.Session()
s3_client = session.client(
service_name="s3",
aws_access_key_id="accessKey1",
aws_secret_access_key="verySecretKey1",
endpoint_url=s3_endpoint_url,
)
dirpath = "s3://accessKey1:verySecretKey1@mybucket"
filepath = f"{dirpath}/for_retry_0_0.csv"
s3_client.upload_file("tests/fixtures/for_retry_0_0.csv", "mybucket", "for_retry_0_0.csv")
class S3FileSystemThatFailsOpen(S3FileSystem): # type:ignore[misc]
def __init__(self, key: str, secret: str, client_kwargs: Dict[str, Any]) -> None:
super().__init__(key=key, secret=secret, client_kwargs=client_kwargs)
self.invalidated_cache = False
def open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs):
if not self.invalidated_cache:
raise Exception("argh!")
return super().open(path, mode, block_size, cache_options, **kwargs)
def invalidate_cache(self, path=None):
self.invalidated_cache = True
mocker.patch("peakina.io.s3.s3_utils.s3fs.S3FileSystem", S3FileSystemThatFailsOpen)
logger_mock = mocker.patch("peakina.io.s3.s3_utils.logger")
with s3_fetcher.open(filepath) as f:
# ensure logger doesn't log credentials
logger_mock.warning.assert_called_once_with(
"could not open mybucket/for_retry_0_0.csv: argh!"
)
assert f.read() == b"a,b\n0,0\n0,1"
s3_client.delete_object(Bucket="mybucket", Key="tests/fixtures/for_retry_0_0.csv")
| 33.8875
| 94
| 0.693471
| 375
| 2,711
| 4.741333
| 0.266667
| 0.086052
| 0.025309
| 0.028121
| 0.431384
| 0.3009
| 0.245782
| 0.134421
| 0.102362
| 0.102362
| 0
| 0.040326
| 0.185909
| 2,711
| 79
| 95
| 34.316456
| 0.765292
| 0.020288
| 0
| 0.175439
| 0
| 0
| 0.220882
| 0.141726
| 0
| 0
| 0
| 0
| 0.140351
| 1
| 0.140351
| false
| 0
| 0.087719
| 0.017544
| 0.280702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32638416d54a115fde42bba19086c99e40948e61
| 802
|
py
|
Python
|
backend/events/tests/test_views.py
|
trfoss/parrot
|
2f120ee1ab82368f85b2b5a7f1c45afc26aa8963
|
[
"BSD-2-Clause"
] | 5
|
2019-02-25T02:24:51.000Z
|
2019-04-21T00:56:43.000Z
|
backend/events/tests/test_views.py
|
trfoss/parrot
|
2f120ee1ab82368f85b2b5a7f1c45afc26aa8963
|
[
"BSD-2-Clause"
] | 51
|
2019-02-06T03:36:27.000Z
|
2021-06-10T21:11:24.000Z
|
backend/events/tests/test_views.py
|
trfoss/parrot
|
2f120ee1ab82368f85b2b5a7f1c45afc26aa8963
|
[
"BSD-2-Clause"
] | 7
|
2019-02-06T04:37:10.000Z
|
2019-03-28T07:52:26.000Z
|
"""
backend/events/tests/test_views.py
Tests for the events page views. We use the test client. Read more at
https://docs.djangoproject.com/en/2.1/topics/testing/tools/
"""
import json
from django.test import TestCase
class EventsPageViewTests(TestCase):
"""Events page view tests for route /events/data
"""
fixtures = [
'event.json',
'team.json',
'teammember.json',
]
def test_events_data(self):
"""Test route /events/data
- it returns status code 200
- it returns a non-empty list
"""
response = self.client.get('/events/data')
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(isinstance(obj, list))
self.assertTrue(len(obj) > 0)
| 26.733333
| 69
| 0.63591
| 102
| 802
| 4.960784
| 0.588235
| 0.079051
| 0.059289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014851
| 0.244389
| 802
| 29
| 70
| 27.655172
| 0.820132
| 0.377805
| 0
| 0
| 0
| 0
| 0.100877
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3265c12d40cc56aa2b76c483dff904dc52c43391
| 11,333
|
py
|
Python
|
myfunds/web/views/crypto/views.py
|
anzodev/myfunds
|
9f6cda99f443cec064d15d7ff7780f297cbdfe10
|
[
"MIT"
] | null | null | null |
myfunds/web/views/crypto/views.py
|
anzodev/myfunds
|
9f6cda99f443cec064d15d7ff7780f297cbdfe10
|
[
"MIT"
] | null | null | null |
myfunds/web/views/crypto/views.py
|
anzodev/myfunds
|
9f6cda99f443cec064d15d7ff7780f297cbdfe10
|
[
"MIT"
] | null | null | null |
import csv
import io
from datetime import datetime
import peewee as pw
from flask import Blueprint
from flask import g
from flask import make_response
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from wtforms import Form
from wtforms import IntegerField
from wtforms import validators as vals
from myfunds.core.constants import CryptoDirection
from myfunds.core.models import CryptoActionLog
from myfunds.core.models import CryptoBalance
from myfunds.core.models import CryptoCurrency
from myfunds.core.models import CryptoTransaction
from myfunds.core.models import db_proxy
from myfunds.modules import cmc
from myfunds.web import ajax
from myfunds.web import auth
from myfunds.web import notify
from myfunds.web import utils
from myfunds.web.constants import DATETIME_FORMAT
from myfunds.web.forms import AddCryptoBalanceForm
from myfunds.web.forms import AddCyptoTransactionForm
from myfunds.web.forms import DeleteCryptoBalanceForm
from myfunds.web.forms import UpdateCryptoBalanceQuantityForm
USD_CODE = "USD"
USD_PRECISION = 2
CRYPTO_PRECISION = 8
bp = Blueprint("crypto", __name__, template_folder="templates")
@bp.route("/crypto")
@auth.login_required
def index():
currencies = CryptoCurrency.select().order_by(CryptoCurrency.symbol)
balances = (
CryptoBalance.select()
.join(CryptoCurrency)
.where(CryptoBalance.account == g.authorized_account)
.order_by(CryptoBalance.name, CryptoCurrency.symbol)
)
investments = (
CryptoTransaction.select(
pw.fn.COUNT(CryptoTransaction.id),
pw.fn.SUM(CryptoTransaction.amount),
)
.where(
(CryptoTransaction.account == g.authorized_account)
& (CryptoTransaction.direction == CryptoDirection.INVESTMENT)
)
.scalar(as_tuple=True)
)
if investments[1] is None:
investments = None
fixed_profit = (
CryptoTransaction.select(
pw.fn.COUNT(CryptoTransaction.id),
pw.fn.SUM(CryptoTransaction.amount),
)
.where(
(CryptoTransaction.account == g.authorized_account)
& (CryptoTransaction.direction == CryptoDirection.FIXED_PROFIT)
)
.scalar(as_tuple=True)
)
if fixed_profit[1] is None:
fixed_profit = None
amount_pattern = utils.make_amount_pattern(8)
return render_template(
"crypto/view.html",
currencies=currencies,
investments=investments,
fixed_profit=fixed_profit,
balances=balances,
amount_pattern=amount_pattern,
)
@bp.route("/crypto/balances/new", methods=["POST"])
@auth.login_required
def new_balance():
redirect_url = url_for("crypto.index")
form = AddCryptoBalanceForm(request.form)
utils.validate_form(form, redirect_url)
name = form.name.data
currency_id = form.currency_id.data
currency = CryptoCurrency.get_or_none(id=currency_id)
if currency is None:
notify.error("Currency not found.")
return redirect(redirect_url)
balance = CryptoBalance.create(
account=g.authorized_account,
currency=currency,
name=name,
quantity=0,
)
notify.info(f"New balance '{balance.name}' was created.")
return redirect(redirect_url)
@bp.route("/crypto/balances/delete", methods=["POST"])
@auth.login_required
def delete_balance():
redirect_url = url_for("crypto.index")
form = DeleteCryptoBalanceForm(request.form)
utils.validate_form(form, redirect_url)
balance_id = form.balance_id.data
balance = CryptoBalance.get_or_none(id=balance_id, account=g.authorized_account)
if balance is None:
notify.error("Balance not found.")
return redirect(redirect_url)
balance.delete_instance()
notify.info(f"Balance '{balance.name}' was deleted.")
return redirect(redirect_url)
@bp.route("/crypto/balances/update-quantity", methods=["POST"])
@auth.login_required
def update_quantity():
redirect_url = url_for("crypto.index")
form = UpdateCryptoBalanceQuantityForm(request.form)
form.quantity.validators.append(
vals.Regexp(utils.make_amount_pattern(CRYPTO_PRECISION))
)
utils.validate_form(form, redirect_url)
action = form.action.data
balance_id = form.balance_id.data
quantity = utils.amount_to_subunits(form.quantity.data, CRYPTO_PRECISION)
balance = CryptoBalance.get_or_none(id=balance_id, account=g.authorized_account)
if balance is None:
notify.error("Balance not found.")
return redirect(redirect_url)
quantity_before = balance.quantity
if action == "set":
balance.quantity = quantity
elif action == "add":
balance.quantity += quantity
else:
balance.quantity -= quantity
if balance.quantity < 0:
notify.error("Balance quantity can't be less then zero.")
return redirect(redirect_url)
with db_proxy.atomic():
CryptoActionLog.create(
account=g.authorized_account,
message=(
f"{action.capitalize()} {form.quantity.data} {balance.currency.symbol} "
f"for {balance.name} ({balance.id}), "
f"before: {utils.make_hrf_amount(quantity_before, CRYPTO_PRECISION)}, "
f"after: {utils.make_hrf_amount(balance.quantity, CRYPTO_PRECISION)}."
),
created_at=datetime.now(),
)
balance.save()
notify.info("Balance quantity was updated.")
return redirect(redirect_url)
@bp.route("/crypto/invest", methods=["POST"])
@auth.login_required
def invest():
redirect_url = url_for("crypto.index")
quantity_validator = vals.Regexp(utils.make_amount_pattern(CRYPTO_PRECISION))
price_validator = vals.Regexp(utils.make_amount_pattern(USD_PRECISION))
form = AddCyptoTransactionForm(request.form)
form.quantity.validators.append(quantity_validator)
form.price.validators.append(price_validator)
utils.validate_form(form, redirect_url)
currency_id = form.currency_id.data
quantity = form.quantity.data
price = form.price.data
amount = round(float(quantity) * float(price), USD_PRECISION)
currency = CryptoCurrency.get_or_none(id=currency_id)
if currency is None:
notify.error("Currency not found.")
return redirect(redirect_url)
with db_proxy.atomic():
creation_time = datetime.now()
CryptoTransaction.create(
account=g.authorized_account,
direction=CryptoDirection.INVESTMENT,
symbol=currency.symbol,
quantity=utils.amount_to_subunits(quantity, CRYPTO_PRECISION),
price=utils.amount_to_subunits(price, USD_PRECISION),
amount=utils.amount_to_subunits(amount, USD_PRECISION),
created_at=creation_time,
)
CryptoActionLog.create(
account=g.authorized_account,
message=(
f"Invest ${amount}, bought {quantity} {currency.symbol} by ${price}."
),
created_at=creation_time,
)
notify.info("New investment was added.")
return redirect(redirect_url)
@bp.route("/crypto/fix-profit", methods=["POST"])
@auth.login_required
def fix_profit():
redirect_url = url_for("crypto.index")
quantity_validator = vals.Regexp(utils.make_amount_pattern(CRYPTO_PRECISION))
price_validator = vals.Regexp(utils.make_amount_pattern(USD_PRECISION))
form = AddCyptoTransactionForm(request.form)
form.quantity.validators.append(quantity_validator)
form.price.validators.append(price_validator)
utils.validate_form(form, redirect_url)
currency_id = form.currency_id.data
quantity = form.quantity.data
price = form.price.data
amount = round(float(quantity) * float(price), USD_PRECISION)
currency = CryptoCurrency.get_or_none(id=currency_id)
if currency is None:
notify.error("Currency not found.")
return redirect(redirect_url)
with db_proxy.atomic():
creation_time = datetime.now()
CryptoTransaction.create(
account=g.authorized_account,
direction=CryptoDirection.FIXED_PROFIT,
symbol=currency.symbol,
quantity=utils.amount_to_subunits(quantity, CRYPTO_PRECISION),
price=utils.amount_to_subunits(price, USD_PRECISION),
amount=utils.amount_to_subunits(amount, USD_PRECISION),
created_at=creation_time,
)
CryptoActionLog.create(
account=g.authorized_account,
message=(
f"Fix profit ${amount}, sell {quantity} {currency.symbol} by ${price}."
),
created_at=creation_time,
)
notify.info("New profit fix was added.")
return redirect(redirect_url)
@bp.route("/ajax/balances-values")
@ajax.ajax_endpoint
@auth.login_required
def ajax_balances_values():
balances = (
CryptoBalance.select()
.join(CryptoCurrency)
.where(CryptoBalance.account == g.authorized_account)
)
currencies_ids = [i.currency.cmc_id for i in balances]
prices = cmc.fetch_prices(currencies_ids, USD_CODE)
data = {}
for b in balances:
price, amount = prices.get(b.currency.cmc_id), None
if price is not None:
amount = round(
float(utils.make_hrf_amount(b.quantity, CRYPTO_PRECISION)) * price,
USD_PRECISION,
)
data[int(b.id)] = {"price": price, "amount": amount}
return data
class ActionsFilterForm(Form):
offset = IntegerField(validators=[vals.Optional()])
limit = IntegerField(validators=[vals.Optional()])
@bp.route("/crypto/actions")
@auth.login_required
def actions():
filter_form = ActionsFilterForm(request.args)
utils.validate_form(filter_form, url_for("crypto.actions"), error_notify=None)
offset = filter_form.offset.data or 0
limit = filter_form.limit.data or 10
filters = {"offset": offset, "limit": limit}
limit_plus_one = limit + 1
query = (
CryptoActionLog.select()
.where(CryptoActionLog.account == g.authorized_account)
.order_by(CryptoActionLog.created_at.desc())
.offset(offset)
.limit(limit_plus_one)
)
actions = list(query)[:limit]
has_prev = offset > 0
has_next = len(query) == limit_plus_one
return render_template(
"crypto/actions.html",
filters=filters,
actions=actions,
has_prev=has_prev,
has_next=has_next,
)
@bp.route("/crypto/actions/export")
@auth.login_required
def export_actions():
actions = (
CryptoActionLog.select()
.where(CryptoActionLog.account == g.authorized_account)
.order_by(CryptoActionLog.created_at.desc())
)
buffer = io.StringIO()
csvwriter = csv.writer(buffer, delimiter=";", quoting=csv.QUOTE_ALL)
csvwriter.writerow(["Time", "Message"])
for i in actions.iterator():
csvwriter.writerow([i.created_at.strftime(DATETIME_FORMAT), i.message])
res = make_response(buffer.getvalue())
res.headers["Content-Disposition"] = "attachment; filename=actions.csv"
res.headers["Content-type"] = "text/csv"
return res
| 29.667539
| 88
| 0.682344
| 1,292
| 11,333
| 5.81192
| 0.157121
| 0.030763
| 0.03356
| 0.046611
| 0.557464
| 0.513118
| 0.471434
| 0.459449
| 0.395659
| 0.395659
| 0
| 0.001349
| 0.215212
| 11,333
| 381
| 89
| 29.745407
| 0.842928
| 0
| 0
| 0.432432
| 0
| 0
| 0.096532
| 0.019677
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030405
| false
| 0
| 0.101351
| 0
| 0.192568
| 0.006757
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32665f5e99814a1ca419ee599a7bb327ba8ffbf0
| 9,115
|
py
|
Python
|
src/modeci_mdf/interfaces/pytorch/mod_torch_builtins.py
|
29riyasaxena/MDF
|
476e6950d0f14f29463eb4f6e3be518dfb2160a5
|
[
"Apache-2.0"
] | 12
|
2021-01-18T20:38:21.000Z
|
2022-03-29T15:01:10.000Z
|
src/modeci_mdf/interfaces/pytorch/mod_torch_builtins.py
|
29riyasaxena/MDF
|
476e6950d0f14f29463eb4f6e3be518dfb2160a5
|
[
"Apache-2.0"
] | 101
|
2020-12-14T15:23:07.000Z
|
2022-03-31T17:06:19.000Z
|
src/modeci_mdf/interfaces/pytorch/mod_torch_builtins.py
|
29riyasaxena/MDF
|
476e6950d0f14f29463eb4f6e3be518dfb2160a5
|
[
"Apache-2.0"
] | 15
|
2020-12-04T22:37:14.000Z
|
2022-03-31T09:48:03.000Z
|
"""
Wrap commonly-used torch builtins in nn.Module subclass
for easier automatic construction of script
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class argmax(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.argmax(A)
class argmin(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.argmin(A)
class matmul(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.matmul(A, B.T)
class add(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.add(A, B)
class sin(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.sin(A)
class cos(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.cos(A)
class abs(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.abs(A)
class flatten(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.reshape(A, (1, -1))
class clip(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, min_val, max_val):
return torch.clamp(A, min_val, max_val)
class shape(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.tensor(A.size()).to(torch.int64)
class det(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.det(A)
class And(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.logical_and(A > 0, B > 0)
class Or(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.logical_or(A > 0, B > 0)
class Xor(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, B):
return torch.logical_xor(A > 0, B > 0)
class concat(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A, axis=0):
return torch.cat(A, axis)
class ceil(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.ceil(A)
class floor(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, A):
return torch.floor(A)
class bitshift(torch.nn.Module):
def __init__(self, DIR):
super().__init__()
self.dir = DIR
def forward(self, A, B):
if self.dir == "RIGHT":
return A.to(torch.int64) >> B.to(torch.int64)
else:
return A.to(torch.int64) << B.to(torch.int64)
class conv(torch.nn.Module):
def __init__(
self,
auto_pad="NOTSET",
kernel_shape=None,
group=1,
strides=[1, 1],
dilations=[1, 1],
pads=[0, 0, 0, 0],
):
super().__init__()
self.group = group
self.auto_pad = auto_pad
self.strides = tuple(strides)
self.dilations = tuple(dilations)
self.kernel_shape = kernel_shape
def forward(self, A, W, B=None):
if self.auto_pad == "NOTSET":
self.pads = tuple(pads)
elif self.auto_pad == "VALID":
self.pads = (0, 0, 0, 0)
elif self.auto_pad == "SAME_UPPER":
pad_dim1 = (
torch.ceil(torch.tensor(A.shape[2]).to(torch.float32) / strides[0])
.to(torch.int64)
.item()
)
pad_dim2 = (
torch.ceil(torch.tensor(A.shape[3]).to(torch.float32) / strides[1])
.to(torch.int64)
.item()
)
if pad_dim1 % 2 == 0 and pad_dim2 % 2 == 0:
self.pads = (pad_dim1 // 2, pad_dim1 // 2, pad_dim2 // 2, pad_dim2 // 2)
elif pad_dim1 % 2 == 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2,
pad_dim2 // 2,
pad_dim2 // 2 + 1,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 == 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2 + 1,
pad_dim2 // 2,
pad_dim2 // 2,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2 + 1,
pad_dim2 // 2,
pad_dim2 // 2 + 1,
)
elif self.auto_pad == "SAME_LOWER":
pad_dim1 = (
torch.ceil(torch.tensor(A.shape[2]).to(torch.float32) / strides[0])
.to(torch.int64)
.item()
)
pad_dim2 = (
torch.ceil(torch.tensor(A.shape[3]).to(torch.float32) / strides[1])
.to(torch.int64)
.item()
)
if pad_dim1 % 2 == 0 and pad_dim2 % 2 == 0:
self.pads = (pad_dim1 // 2, pad_dim1 // 2, pad_dim2 // 2, pad_dim2 // 2)
elif pad_dim1 % 2 == 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2,
pad_dim1 // 2,
pad_dim2 // 2 + 1,
pad_dim2 // 2,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 == 0:
self.pads = (
pad_dim1 // 2 + 1,
pad_dim1 // 2,
pad_dim2 // 2,
pad_dim2 / 2,
)
elif pad_dim1 % 2 != 0 and pad_dim2 % 2 != 0:
self.pads = (
pad_dim1 // 2 + 1,
pad_dim1 // 2,
pad_dim2 // 2 + 1,
pad_dim2 // 2,
)
A = F.pad(A, self.pads)
return F.conv2d(
A,
W,
bias=B,
stride=self.strides,
padding=self.pads,
dilation=self.dilations,
groups=self.group,
)
class elu(torch.nn.Module):
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = alpha
def forward(self, A):
return nn.ELU(alpha=self.alpha)(A.to(torch.float32))
class hardsigmoid(torch.nn.Module):
def __init__(self, alpha=0.2, beta=0.5):
super().__init__()
self.alpha = alpha
self.beta = beta
def forward(self, A):
return torch.clamp(self.alpha * (A.to(torch.float32)) + self.beta, 0, 1)
class hardswish(torch.nn.Module):
def __init__(self):
super().__init__()
self.alpha = 1.0 / 6
self.beta = 0.5
def forward(self, A):
return A * torch.clamp(self.alpha * (A.to(torch.float32)) + self.beta, 0, 1)
class hardmax(torch.nn.Module):
def __init__(self, axis=-1):
super().__init__()
self.axis = axis
def forward(self, A):
A = A.to(torch.float32)
rank = A.shape
if self.axis < 0:
self.axis += len(rank)
tensor = torch.arange(rank[self.axis])
repeats = []
repeats.append(1)
for i, idx in enumerate(reversed(rank[: self.axis])):
repeats.append(1)
tensor = torch.stack([tensor] * idx)
for i, idx in enumerate(rank[self.axis + 1 :]):
repeats.append(idx)
tensor = tensor.unsqueeze(-1).repeat(repeats)
repeats[-1] = 1
# b = torch.stack([torch.stack([torch.arange(4)] * 3)] *2)
# print(tensor.shape)
max_values, _ = torch.max(A, dim=self.axis)
# print(max_values, max_values.shape)
# tensor = torch.reshape(tensor, tuple(rank))
tensor[A != torch.unsqueeze(max_values, dim=self.axis)] = rank[self.axis]
# print(b)
first_max, _ = torch.min(tensor, dim=self.axis)
one_hot = torch.nn.functional.one_hot(first_max, rank[self.axis])
return one_hot
class compress(torch.nn.Module):
def __init__(self, axis=None):
self.axis = axis
super().__init__()
def forward(self, A, B):
idx = (B.to(torch.bool) != 0).nonzero().reshape(-1)
if self.axis != None:
return torch.index_select(A, self.axis, idx)
else:
return torch.index_select(A.reshape(-1), 0, idx)
# TODO: Many more to be implemented
__all__ = [
"argmax",
"argmin",
"matmul",
"add",
"sin",
"cos",
"abs",
"flatten",
"clip",
"shape",
"det",
"And",
"Or",
"Xor",
"concat",
"ceil",
"floor",
"bitshift",
"conv",
"elu",
"hardsigmoid",
"hardswish",
"compress",
]
| 23.798956
| 88
| 0.501042
| 1,139
| 9,115
| 3.763828
| 0.115891
| 0.055983
| 0.072778
| 0.089573
| 0.595055
| 0.524843
| 0.503149
| 0.470725
| 0.461628
| 0.446699
| 0
| 0.037545
| 0.360066
| 9,115
| 382
| 89
| 23.861257
| 0.697411
| 0.032913
| 0
| 0.458182
| 0
| 0
| 0.017837
| 0
| 0
| 0
| 0
| 0.002618
| 0
| 1
| 0.174545
| false
| 0
| 0.010909
| 0.072727
| 0.367273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
326698864c4df87b158debf66bd86b994c325aa0
| 8,068
|
py
|
Python
|
taf/testlib/snmphelpers.py
|
stepanandr/taf
|
75cb85861f8e9703bab7dc6195f3926b8394e3d0
|
[
"Apache-2.0"
] | 10
|
2016-12-16T00:05:58.000Z
|
2018-10-30T17:48:25.000Z
|
taf/testlib/snmphelpers.py
|
stepanandr/taf
|
75cb85861f8e9703bab7dc6195f3926b8394e3d0
|
[
"Apache-2.0"
] | 40
|
2017-01-04T23:07:05.000Z
|
2018-04-16T19:52:02.000Z
|
taf/testlib/snmphelpers.py
|
stepanandr/taf
|
75cb85861f8e9703bab7dc6195f3926b8394e3d0
|
[
"Apache-2.0"
] | 23
|
2016-12-30T05:03:53.000Z
|
2020-04-01T08:40:24.000Z
|
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``snmphelpers.py``
`SNMP specific helpers functions`
"""
import sys
import os
import shutil
import tarfile
from subprocess import Popen, PIPE
import pytest
import paramiko as paramiko
from . import helpers
from . import loggers
# create logger for module
def is_mibs_folder_empty(path):
"""Checks is MIBs folder empty of not.
Args:
path(str): path to MIBs folder
Returns:
bool: True if empty and False if not
Examples::
is_mibs_folder_empty()
"""
empty = True
if os.path.exists(path):
for file_n in os.listdir(path):
if 'ONS' in file_n or "ons" in file_n:
empty = False
return empty
def clear_mibs_folder(path):
"""Removes all ONS mibs from MIBS folder.
Args:
path(str): path to MIBs folder
Examples::
clear_mibs_folder()
"""
if os.path.exists(path):
shutil.rmtree(path)
def get_remote_file(hostname, port, username, password, remotepath, localpath):
"""Get remote file to local machine.
Args:
hostname(str): Remote IP-address
port(int): Remote SSH port
username(str): Remote host username for authentication
password(str): Remote host password for authentication
remotepath(str): Remote file to download location path
localpath(str): Local path to save remote file
Examples::
get_remote_file(host, port, username, password, tar_remotepath, tar_localpath)
"""
transport = paramiko.Transport((hostname, port))
transport.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.get(remotepath=remotepath, localpath=localpath)
finally:
sftp.close()
transport.close()
def untar_file(tar_path, untar_path):
"""Unpack tar file.
Args:
tar_path(str): Path to tar file
untar_path(str): Path where to unpack
Examples::
untar_file(tar_localpath, mib_path_txt)
"""
old_folder = os.path.join(untar_path, 'mibs')
if os.path.isfile(old_folder):
os.remove(old_folder)
tar = tarfile.open(tar_path)
tar.extractall(untar_path)
tar.close()
os.remove(tar_path)
def file_convert(mib_txt_path, mib_py_path):
"""Convert .txt MIB to .py.
Args:
mib_txt_path(str): Full path to .txt MIB.
mib_py_path(str): Full path to .py MIB
Examples::
file_convert(mib_txt_path, mib_py_path)
"""
mod_logger_snmp = loggers.module_logger(name=__name__)
# translate .txt mib into python format using 3rd party tools 'smidump'
smidump = Popen(['smidump', '-k', '-f', 'python', mib_txt_path], stdout=PIPE)
list_stdout = smidump.communicate()[0]
if len(list_stdout) == 0:
return "Fail"
# create tmp directory for filling MIBs dictionary
mib_path_tmp = os.path.join(mib_py_path, 'tmp')
if not os.path.exists(mib_path_tmp):
os.makedirs(mib_path_tmp)
# added tmp path into sys.path for imports converted MIB's
sys.path.append(mib_path_tmp)
# get file without extension
file_name = os.path.splitext(os.path.basename(mib_txt_path))[0]
# create .py name
temp_file_name = "{0}.py".format(file_name)
# create .tmp file path for imports
temp_file_path = os.path.join(mib_path_tmp, temp_file_name)
# save and import converted MIB's
with open(temp_file_path, "ab") as a:
a.write(list_stdout)
temp_module = __import__(os.path.splitext(os.path.basename(mib_txt_path))[0])
# update helpers.MIBS_DICT with MIB data
if "moduleName" in list(temp_module.MIB.keys()) and "nodes" in list(temp_module.MIB.keys()):
helpers.MIBS_DICT.update({temp_module.MIB["moduleName"]: list(temp_module.MIB["nodes"].keys())})
# clear tmp file path
sys.path.remove(mib_path_tmp)
os.remove(temp_file_path)
# translate MIB from .py into pysnmp format using 3rd party tools 'libsmi2pysnmp'
pipe = Popen(['libsmi2pysnmp', '--no-text'], stdout=PIPE, stdin=PIPE)
stdout = pipe.communicate(input=list_stdout)
# get MIB name from itself, add .py and save it.
mib_name = "{0}.py".format(temp_module.MIB["moduleName"])
mib_py_path = os.path.join(mib_py_path, mib_name)
mod_logger_snmp.debug("Convert %s to %s" % (file_name, temp_file_name))
with open(mib_py_path, 'a') as py_file:
for string in stdout:
if string is not None:
str_dict = string.decode('utf-8').split('\n')
for each_str in str_dict:
if "ModuleCompliance" in each_str:
if "ObjectGroup" in each_str:
py_file.write(each_str + '\n')
elif "Compliance)" in each_str:
pass
else:
py_file.write(each_str + '\n')
return mib_name
def convert_to_py(txt_dir_path, py_dir_path):
"""Converts .txt MIB's to .py.
Args:
txt_dir_path(str): Path to dir with .txt MIB's.
py_dir_path(str): Path to dir with .py MIB's
Examples::
convert_to_py(mib_path_tmp, mib_path)
"""
mod_logger_snmp = loggers.module_logger(name=__name__)
txt_dir_path = os.path.join(txt_dir_path, "MIB")
mod_logger_snmp.debug("Converts .txt MIB's to .py")
os.environ['SMIPATH'] = txt_dir_path
for mib in os.listdir(txt_dir_path):
mib_txt_path = os.path.join(txt_dir_path, mib)
retry_count = 3
retry = 1
while retry <= retry_count:
mib_py = file_convert(mib_txt_path, py_dir_path)
if mib_py not in os.listdir(py_dir_path):
mod_logger_snmp.debug("Converted MIB %s is not present at %s" % (mib, py_dir_path))
retry += 1
if retry > retry_count:
mod_logger_snmp.debug("Can not convert %s" % (mib, ))
else:
mod_logger_snmp.debug("Converted MIB %s is present at %s" % (mib, py_dir_path))
retry = retry_count + 1
shutil.rmtree(txt_dir_path)
shutil.rmtree(os.path.join(py_dir_path, "tmp"))
def create_mib_folder(config, path, env):
"""Creates MIB folder.
Args:
config(dict): Configuration dictionary.
path(str): Path to MIB folder.
env(Environment): Environment object.
Examples::
create_mib_folder()
"""
if config is None:
pytest.fail("UI settings not fount in environment configuration.")
host = config['host']
port = int(config['port'])
username = config['username']
password = config['password']
tar_folder = config['tar_remotepath']
tar_file = os.path.split(tar_folder)[1]
branch = env.env_prop['switchppVersion']
platform = getattr(getattr(env.switch[1], 'hw', None), 'snmp_path', None)
tar_remotepath = tar_folder.format(**locals())
if not os.path.exists(path):
os.makedirs(path)
tar_localpath = os.path.join(path, tar_file)
mib_path_tmp = os.path.join(path, 'tmp')
if not os.path.exists(mib_path_tmp):
os.makedirs(mib_path_tmp)
mib_path_txt = os.path.join(path, 'txt')
if not os.path.exists(mib_path_txt):
os.makedirs(mib_path_txt)
get_remote_file(host, port, username, password, tar_remotepath, tar_localpath)
untar_file(tar_localpath, mib_path_txt)
convert_to_py(mib_path_txt, path)
| 29.992565
| 104
| 0.649603
| 1,145
| 8,068
| 4.375546
| 0.20786
| 0.026347
| 0.01996
| 0.015569
| 0.244711
| 0.199601
| 0.162475
| 0.125349
| 0.079441
| 0.061876
| 0
| 0.004763
| 0.24529
| 8,068
| 268
| 105
| 30.104478
| 0.818033
| 0.318295
| 0
| 0.10084
| 0
| 0
| 0.081055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0.042017
| 0.084034
| 0
| 0.168067
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32675e661c420861aca3a72ce984ac5043cdeab4
| 2,868
|
py
|
Python
|
elexon_api/utils.py
|
GiorgioBalestrieri/elexon_api_tool
|
5b271e9d4a52dec5585a232833a699b8392ee6b0
|
[
"MIT"
] | 4
|
2019-06-07T11:14:46.000Z
|
2021-04-01T14:15:14.000Z
|
elexon_api/utils.py
|
GiorgioBalestrieri/elexon_api_tool
|
5b271e9d4a52dec5585a232833a699b8392ee6b0
|
[
"MIT"
] | null | null | null |
elexon_api/utils.py
|
GiorgioBalestrieri/elexon_api_tool
|
5b271e9d4a52dec5585a232833a699b8392ee6b0
|
[
"MIT"
] | 6
|
2019-02-28T20:24:26.000Z
|
2021-03-30T18:08:23.000Z
|
import os
from pathlib import Path
import pandas as pd
from collections import defaultdict
from typing import Dict, List
from .config import REQUIRED_D, API_KEY_FILENAME
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def get_required_parameters(service_code: str) -> List[str]:
"""Get list of required parameters for service."""
return REQUIRED_D[service_code]
def _get_path_to_module() -> Path:
"""Get path to this module."""
return Path(os.path.realpath(__file__)).parent
def get_api_key_path(filename=API_KEY_FILENAME) -> Path:
"""Load api key."""
path_to_dir = _get_path_to_module()
return path_to_dir / filename
class ElexonAPIException(Exception):
pass
def extract_df(r_dict: dict) -> pd.DataFrame:
"""Extract DataFrame from dictionary.
Parameters
----------
r_dict
Obtained from response through xmltodict.
"""
r_body = r_dict['responseBody']
r_items_list = r_body['responseList']['item']
try:
df_items = pd.DataFrame(r_items_list)
except Exception as e:
logger.warning(f"Failed to create DataFrame.", exc_info=True)
try:
df_items = pd.DataFrame(r_items_list, index=[0])
except Exception as e:
logger.error("Failed to create DataFrame.")
raise e
return df_items
def extract_df_by_record_type(r_dict: dict) -> Dict[str,pd.DataFrame]:
content: List[dict] = r_dict['responseBody']['responseList']['item']
records_d = split_list_of_dicts(content, 'recordType')
return {k: pd.DataFrame(l) for k,l in records_d.items()}
def split_list_of_dicts(dict_list: List[dict], key: str) -> Dict[str,List[dict]]:
"""Split list of dictionaries into multiples lists based on a specific key.
Output lists are stored in a dicionary with the value used as key.
Example:
>>> dict_list = [
{
"recordType": "a",
"foo": 1,
"bar": 1,
},
{
"recordType": "b",
"foo": 2,
"bar": 2,
},
{
"recordType": "b",
"foo": 3,
"bar": 3,
}
]
>>> split_list_of_dicts(dict_list, 'recordType')
{
"a": [
{
"recordType": "a",
"foo": 1,
"bar": 1,
},
],
"b": [
{
"recordType": "b",
"foo": 2,
"bar": 2,
},
{
"recordType": "b",
"foo": 3,
"bar": 3,
}
]
}
]
"""
result = defaultdict(list)
for d in dict_list:
result[d[key]].append(d)
return result
| 25.380531
| 81
| 0.540098
| 328
| 2,868
| 4.509146
| 0.335366
| 0.027045
| 0.02975
| 0.032454
| 0.183908
| 0.151454
| 0.093306
| 0.093306
| 0.051386
| 0.051386
| 0
| 0.006871
| 0.340307
| 2,868
| 113
| 82
| 25.380531
| 0.774841
| 0.360181
| 0
| 0.1
| 0
| 0
| 0.074442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0.025
| 0.175
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
326881582afe0e7d4f36578fa52df6c3b487641d
| 1,608
|
py
|
Python
|
relative_connectivity_of_subgraphs.py
|
doberse/RRI
|
e2fdc085d8040efc230a25eec670dd6839cbf1f7
|
[
"MIT"
] | null | null | null |
relative_connectivity_of_subgraphs.py
|
doberse/RRI
|
e2fdc085d8040efc230a25eec670dd6839cbf1f7
|
[
"MIT"
] | null | null | null |
relative_connectivity_of_subgraphs.py
|
doberse/RRI
|
e2fdc085d8040efc230a25eec670dd6839cbf1f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import networkx as nx
import pandas as pd
#Other nodes connected by one node
r=open('input_data/BC-related_RRI_network.txt')
ll=r.readlines()
r.close()
rna_pairs=[]
node_to_nodes={}
for l in ll:
ws=l.strip().split('\t')
qx=sorted(ws[0:2])
rna_pairs.append((qx[0],qx[1]))
for i in [0,1]:
if i==0:
j=1
else:
j=0
if qx[i] not in node_to_nodes:
node_to_nodes[qx[i]]=[qx[j]]
else:
node_to_nodes[qx[i]].append(qx[j])
#Dictionary of Node No.
r=open('input_data/RRI_node.csv')
r.readline()
no2node={}
for l in r:
ws=l.strip().split(',')
no2node[ws[0]]='~'.join(ws[1:7])
r.close()
#Sort nodes by node degree
node_degree={}
for k in node_to_nodes:
node_degree[k]=len(node_to_nodes[k])
df=pd.DataFrame(node_degree,index=['Degree'])
df=df.sort_values(by='Degree',axis=1,ascending=False)
nodes=df.columns.values
#Compute the relative conectivity of subgraphs
G=nx.Graph()
node_G=[]
w=open('RC_in_BC-related_RRI_network.csv','w')
w.write('Node,No.,Relative connectivity\n')
k=0
lim=len(nodes)
while k<lim:
node_key=nodes[k]
node_G.append(node_key)
G.add_node(node_key)#Add the node in subgraphs
for node in node_G:
if node in set(node_to_nodes[node_key]):
G.add_edge(node_key,node)#Add the edge in subgraphs
largest_components=max(nx.connected_components(G),key=len)
k+=1
w.write(no2node[node_key]+','+str(k)+','+str(len(largest_components)/float(len(node_G)))+'\n')
w.close()
| 26.360656
| 99
| 0.625622
| 276
| 1,608
| 3.485507
| 0.32971
| 0.043659
| 0.080042
| 0.046778
| 0.060291
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014937
| 0.208955
| 1,608
| 60
| 100
| 26.8
| 0.741352
| 0.121891
| 0
| 0.08
| 0
| 0
| 0.107887
| 0.068452
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
326dd27e7ff223645c2d0bf5d397fdea5ed20af2
| 2,632
|
py
|
Python
|
src/piotr/cmdline/fs.py
|
orangecms/piotr
|
f892ce6eaaa08ea81eb01943a388b64fbf3ccc44
|
[
"MIT"
] | 47
|
2021-07-02T08:39:02.000Z
|
2021-11-08T22:21:39.000Z
|
src/piotr/cmdline/fs.py
|
orangecms/piotr
|
f892ce6eaaa08ea81eb01943a388b64fbf3ccc44
|
[
"MIT"
] | 2
|
2021-07-08T09:25:30.000Z
|
2021-07-12T10:06:51.000Z
|
src/piotr/cmdline/fs.py
|
orangecms/piotr
|
f892ce6eaaa08ea81eb01943a388b64fbf3ccc44
|
[
"MIT"
] | 5
|
2021-07-08T08:29:17.000Z
|
2021-10-18T13:35:11.000Z
|
"""
FS commandline module.
Allows to:
- list host filesystems
- remove a specific host filesystem
- add a specific host filesystem
"""
from os.path import basename
from piotr.cmdline import CmdlineModule, module, command
from piotr.user import UserDirectory as ud
from piotr.util import confirm
@module('fs', 'List, add, remove Piotr host filesystems')
class FsModule(CmdlineModule):
def __init__(self):
super().__init__()
@command('List available host filesystems')
def list(self, options):
"""
List available FSs.
"""
self.title(' Installed host filesystems:')
print('')
count = 0
for fs in ud.get().getHostFilesystems():
fs_line = (self.term.bold + '{fs:<40}' + self.term.normal + \
'{extra:<40}').format(
fs=' > %s' % fs['file'],
extra='(version {version}, platform: {platform}, cpu: {cpu} ({endian}), type: {fstype})'.format(
version=fs['version'],
platform=fs['platform'],
cpu=fs['cpu'],
fstype=fs['type'],
endian='little-endian' if fs['endian']=='little' else 'big-endian'
)
)
print(fs_line)
count += 1
print('')
print(' %d filesystem(s) available' % count)
print('')
@command('Remove a specific filesystem', ['fs name'])
def remove(self, options):
"""
Remove filesystem from our repository.
Expects options[0] to be the name of the target filesystem to remove.
"""
if len(options) >= 1:
# Ask for confirm
if confirm('Are you sure to remove this filesystem'):
# Remove kernel by name
if ud.get().removeHostFs(options[0]):
print('Filesystem %s successfully removed.' % options[0])
else:
self.error('An error occurred while removing host filesystem.')
else:
self.important(' You must provide a host filesystem name to remove.')
@command('Add a specific host filesystem', ['path'])
def add(self, options):
"""
Add kernel to our kernel repository.
"""
if len(options) >= 1:
if ud.get().addHostFs(options[0]):
print('Host filesystem successfully added to our registry.')
else:
self.error('An error occurred while importing host filesystem.')
else:
self.important(' You must provide a filesystem file to add.')
| 32.9
| 112
| 0.549392
| 287
| 2,632
| 5.003484
| 0.341463
| 0.068245
| 0.027159
| 0.04805
| 0.14624
| 0.110028
| 0.110028
| 0.064067
| 0.064067
| 0
| 0
| 0.00681
| 0.330547
| 2,632
| 79
| 113
| 33.316456
| 0.808173
| 0.1269
| 0
| 0.183673
| 0
| 0.020408
| 0.307414
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.142857
| 0
| 0.244898
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
327639bba2a2aa36c47d30fbf67b64ee714db74b
| 2,975
|
py
|
Python
|
RNAstructure_Source/RNAstructure_python_interface/Error_handling.py
|
mayc2/PseudoKnot_research
|
33e94b84435d87aff3d89dbad970c438ac173331
|
[
"MIT"
] | null | null | null |
RNAstructure_Source/RNAstructure_python_interface/Error_handling.py
|
mayc2/PseudoKnot_research
|
33e94b84435d87aff3d89dbad970c438ac173331
|
[
"MIT"
] | null | null | null |
RNAstructure_Source/RNAstructure_python_interface/Error_handling.py
|
mayc2/PseudoKnot_research
|
33e94b84435d87aff3d89dbad970c438ac173331
|
[
"MIT"
] | null | null | null |
#automated error checking for RNAstructure python interface
from __future__ import print_function
import inspect
from functools import wraps
from collections import defaultdict
debug = False
class StructureError(Exception): pass
class RNAstructureInternalError(Exception):pass
lookup_exceptions = defaultdict(lambda:RuntimeError,
{ 1:IOError,
2:IOError,
3:IndexError,
4:IndexError,
5:EnvironmentError,
6:StructureError,
7:StructureError,
8:StructureError,
9:StructureError,
10:ValueError,
11:ValueError,
12:ValueError,
13:IOError,
14:RNAstructureInternalError,
15:ValueError,
16:ValueError,
17:ValueError,
18:ValueError,
19:ValueError,
20:ValueError,
21:RNAstructureInternalError,
22:RNAstructureInternalError,
23:ValueError,
24:ValueError,
25:ValueError,
26:ValueError
})
def check_for_errors(method):
@wraps(method)
def RNAstructure_error_checker(self,*args,**kwargs):
if debug: print ("checking for errors in %s" % method.__name__)
ret = method(self,*args,**kwargs)
error = self.GetErrorCode()
self.ResetError()
if error != 0:
raise lookup_exceptions[error]("Error in %s: " % method.__name__ +
self.GetErrorMessage(error))
return ret
return RNAstructure_error_checker
def check_for_init_errors(method):
@wraps(method)
def RNAstructure_error_checker(self,*args):
if debug: print ("checking for errors in %s" % method.__name__)
ret = method(self,*args)
error = self.GetErrorCode()
if error != 0:
raise RuntimeError("Error in call to %s.%s: " % (self.__name__,method.__name__) +
self.GetErrorMessage(error))
return ret
return RNAstructure_error_checker
def is_init(method):
result = inspect.ismethod(method) and method.__name__=="__init__"
if inspect.ismethod(method):
pass
return result
def not_excluded(method):
excluded = ["__repr__","__setattr__","__getattr__","__str__","__init__","<lambda>","swig_repr",
"GetErrorCode","GetErrorMessage","GetErrorMessageString","ResetError","fromFile","fromString"]
result = inspect.ismethod(method) and method.__name__ not in excluded
if inspect.ismethod(method):
if debug: print ("checking if", method.__name__ , "should be excluded: ",result)
return result
def decorate_methods(decorator,methodtype):
def decorate(cls):
for attr in inspect.getmembers(cls, methodtype):
if debug: print ("decorating %s!" % attr[0])
setattr(cls, attr[0], decorator(getattr(cls, attr[0])))
return cls
return decorate
| 35.416667
| 110
| 0.621176
| 299
| 2,975
| 5.913043
| 0.347826
| 0.039593
| 0.054299
| 0.033937
| 0.263575
| 0.263575
| 0.263575
| 0.218326
| 0.218326
| 0.218326
| 0
| 0.022482
| 0.282353
| 2,975
| 83
| 111
| 35.843373
| 0.805621
| 0.019496
| 0
| 0.233766
| 0
| 0
| 0.095336
| 0.007202
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103896
| false
| 0.038961
| 0.051948
| 0
| 0.285714
| 0.064935
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3276b79a61cf27161c545de376944d5851538c10
| 52,691
|
py
|
Python
|
Src/si_figs.py
|
jomimc/FoldAsymCode
|
1896e5768e738bb5d1921a3f4c8eaf7f66c06be9
|
[
"MIT"
] | 1
|
2020-10-07T14:24:06.000Z
|
2020-10-07T14:24:06.000Z
|
Src/si_figs.py
|
jomimc/FoldAsymCode
|
1896e5768e738bb5d1921a3f4c8eaf7f66c06be9
|
[
"MIT"
] | null | null | null |
Src/si_figs.py
|
jomimc/FoldAsymCode
|
1896e5768e738bb5d1921a3f4c8eaf7f66c06be9
|
[
"MIT"
] | null | null | null |
from collections import defaultdict, Counter
from itertools import product, permutations
from glob import glob
import json
import os
from pathlib import Path
import pickle
import sqlite3
import string
import sys
import time
import matplotlib as mpl
from matplotlib import colors
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from multiprocessing import Pool
import numpy as np
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12
from palettable.colorbrewer.diverging import PuOr_5, RdYlGn_6, PuOr_10, RdBu_10
from palettable.scientific.diverging import Cork_10
from scipy.spatial import distance_matrix, ConvexHull, convex_hull_plot_2d
from scipy.stats import linregress, pearsonr, lognorm
import seaborn as sns
import svgutils.compose as sc
import asym_io
from asym_io import PATH_BASE, PATH_ASYM, PATH_ASYM_DATA
import asym_utils as utils
import folding_rate
import paper_figs
import structure
PATH_FIG = PATH_ASYM.joinpath("Figures")
PATH_FIG_DATA = PATH_FIG.joinpath("Data")
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
####################################################################
### SI Figures
####################################################################
### FIG 1
def fig1(df, nx=3, ny=3, N=50):
fig, ax = plt.subplots(nx,ny, figsize=(12,12))
ax = ax.reshape(ax.size)
fig.subplots_adjust(hspace=.5)
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
cat = 'HS.D'
scop_desc = {row[1]:row[2] for row in pd.read_csv(PATH_BASE.joinpath('SCOP/scop-des-latest.txt')).itertuples()}
CF_count = sorted(df.CF.value_counts().items(), key=lambda x:x[1], reverse=True)[1:]
bold_idx = [0, 1, 2, 6, 8]
for i in range(nx*ny):
cf_id, count = CF_count[i]
countN, countC = utils.pdb_end_stats_disorder_N_C(df.loc[df.CF==cf_id], N=N, s1='SEQ_PDB2', s2='SS_PDB2')
base = np.zeros(len(countN['S']), dtype=float)
Yt = np.array([[sum(p.values()) for p in countN[s]] for s in cat]).sum(axis=0)
X = np.arange(base.size)
for j, s in enumerate(cat):
YN = np.array([sum(p.values()) for p in countN[s]])
YC = np.array([sum(p.values()) for p in countC[s]])
ax[i].plot(YN/Yt, '-', c=col[j], label=f"{s} N")
ax[i].plot(YC/Yt, ':', c=col[j], label=f"{s} C")
if i in bold_idx:
ax[i].set_title(f"{scop_desc[int(cf_id)][:40]}\nTotal sequences: {count}", fontweight='bold')
else:
ax[i].set_title(f"{scop_desc[int(cf_id)][:40]}\nTotal sequences: {count}")
ax[i].set_xlabel('Sequence distance from ends')
if not i%3:
ax[i].set_ylabel('Secondary\nstructure\nprobability')
handles = [Line2D([0], [0], ls=ls, c=c, label=l) for ls, c, l in zip(['-', '--'], ['k']*2, ['N', 'C'])] + \
[Line2D([0], [0], ls='-', c=c, label=l) for l, c in zip(lbls, col)]
ax[1].legend(handles=handles, bbox_to_anchor=(1.40, 1.45), frameon=False,
ncol=6, columnspacing=1.5, handlelength=2.0)
fig.savefig(PATH_FIG.joinpath("si1.pdf"), bbox_inches='tight')
####################################################################
### FIG 2
def fig2():
pfdb = asym_io.load_pfdb()
fig, ax = plt.subplots(1,2, figsize=(10,5))
fig.subplots_adjust(wspace=0.3)
X1 = np.log10(pfdb.loc[pfdb.use, 'L'])
X2 = np.log10(pfdb.loc[pfdb.use, 'CO'])
Y = pfdb.loc[pfdb.use, 'log_kf']
sns.regplot(X1, Y, ax=ax[0])
sns.regplot(X2, Y, ax=ax[1])
print(pearsonr(X1, Y))
print(pearsonr(X2, Y))
ax[0].set_ylabel(r'$\log_{10} k_f$')
ax[1].set_ylabel(r'$\log_{10} k_f$')
ax[0].set_xlabel(r'$\log_{10}$ Sequence Length')
ax[1].set_xlabel(r'$\log_{10}$ Contact Order')
fs = 14
for i, b in zip([0,1], list('ABCDEFGHI')):
ax[i].text( -0.10, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si2.pdf"), bbox_inches='tight')
####################################################################
### FIG 3
def fig3(pdb, Y='S_ASYM'):
LO = folding_rate.get_folding_translation_rates(pdb.copy(), which='lo')
HI = folding_rate.get_folding_translation_rates(pdb.copy(), which='hi')
fig, ax = plt.subplots()
lbls = ['Fit', r"$95\% CI$", r"$95\% CI$"]
for i, d in enumerate([pdb, LO, HI]):
print(f"{i}: frac R less than 0 = {utils.R_frac_1(d)}")
print(f"{i}: Euk frac (.1 < R < 10) = {utils.R_frac_2(d, k=5)}")
print(f"{i}: Prok frac (.1 < R < 10) = {utils.R_frac_2(d, k=10)}")
print(f"{i}: frac R faster than 'speed-limit' = {utils.R_frac_3(d)}")
print(f"{i}: frac R slower than 20 minutes = {utils.R_frac_4(d)}")
print()
sns.distplot(d['REL_RATE'], label=lbls[i], color=col[i])
ax.legend(loc='best', frameon=False)
ax.set_xlim(-6, 6)
ax.set_xlabel(r'$\log_{10}R$')
ax.set_ylabel('Density')
fig.savefig(PATH_FIG.joinpath("si3.pdf"), bbox_inches='tight')
####################################################################
### FIG 4
def fig4(pdb, Y='S_ASYM'):
LO = folding_rate.get_folding_translation_rates(pdb.copy(), which='lo')
HI = folding_rate.get_folding_translation_rates(pdb.copy(), which='hi')
# For the results using only 2-state proteins...
# HI = folding_rate.get_folding_translation_rates(pdb.copy(), which='best', only2s=True)
fig = plt.figure(figsize=(8,10.5))
gs = GridSpec(5,12, wspace=0.5, hspace=0.0, height_ratios=[1,0.5,1,0.5,1.5])
ax = [fig.add_subplot(gs[i*2,j*4:(j+1)*4]) for i in [0,1] for j in [0,1,2]] + \
[fig.add_subplot(gs[4,:5]), fig.add_subplot(gs[4,7:])]
X = np.arange(10)
width = .35
ttls = [r'$\alpha$ Helix', r'$\beta$ Sheet']
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[0]
c_sheet = custom_cmap[12]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
for k, pdb in enumerate([LO, HI]):
quantiles = pdb['REL_RATE'].quantile(np.arange(0,1.1,.1)).values
pdb['quant'] = pdb['REL_RATE'].apply(lambda x: utils.assign_quantile(x, quantiles))
enrich_data = pickle.load(open(PATH_FIG_DATA.joinpath("fig3_enrich.pickle"), 'rb'))
for i, Y in enumerate(['H_ASYM', 'S_ASYM']):
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
if i:
ax[k*3+i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[k*3+i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
else:
ax[k*3+i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
ax[k*3+i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[k*3+i].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[k*3+i].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
mean = np.mean(enrich_data[Y[0]], axis=0)
lo = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.025, axis=0))
hi = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.975, axis=0))
ax[k*3+2].barh([sep*j+(i+.7)*sep/3 for j in range(10)], mean, sep/3, xerr=(lo, hi), color=col[i], ec='k', alpha=.5, label=lbls[i], error_kw={'lw':.8})
ax[k*3+2].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
for i in [0,2]:
ax[k*3+i].set_yticks(np.arange(len(quantiles))*sep)
ax[k*3+i].set_yticklabels([round(x,1) for x in quantiles])
for i in range(2):
ax[k*3+i].spines['top'].set_visible(False)
ax[k*3+i].spines['right'].set_visible(False)
for i in range(1,3):
ax[k*3+i].spines['left'].set_visible(False)
ax[k*3+i].spines['top'].set_visible(False)
for i in range(3):
ax[k*3+i].set_ylim(0-sep/4, (0.5+sep/4)*1.05)
ax[k*3+1].set_yticks([])
ax[k*3+2].yaxis.set_label_position('right')
ax[k*3+2].yaxis.tick_right()
ax[k*3+0].set_xlabel(r"asym$_{\alpha}$")
ax[k*3+1].set_xlabel(r"asym$_{\beta}$")
ax[k*3+0].set_ylabel(r'$\log_{10}R$')
ax[k*3+2].set_xlabel('N terminal\nEnrichment')
plot_metric_space(fig, ax[6:])
fs = 14
for i, b in zip([0,3,6], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si4.pdf"), bbox_inches='tight')
def get_ci_index(X, Y):
xlo = np.quantile(X, 0.025)
xhi = np.quantile(X, 0.975)
ylo = np.quantile(Y, 0.025)
yhi = np.quantile(Y, 0.975)
return np.where((X>=xlo)&(X<=xhi)&(Y>=ylo)&(Y<=yhi))[0]
def plot_hull(boot_fit, patt, ax='', c='k', lw=1):
idx = get_ci_index(*boot_fit[:,:2].T)
tmp = boot_fit[idx].copy()
hull = ConvexHull(np.array([boot_fit[idx,1], boot_fit[idx, 0]]).T)
for simplex in hull.simplices:
if not isinstance(ax, str):
ax.plot(tmp[simplex, 1], tmp[simplex, 0], patt, c=c, lw=lw)
else:
plt.plot(tmp[simplex, 1], tmp[simplex, 0], patt, c=c, lw=lw)
def plot_metric_space(fig, ax):
fit = pickle.load(open(PATH_FIG_DATA.joinpath("boot_fit_met.pickle"), 'rb'))['AA']
boot_fit = pickle.load(open(PATH_FIG_DATA.joinpath("boot_fit_param.pickle"), 'rb'))
boot_fit_0 = pickle.load(open(PATH_FIG_DATA.joinpath("boot_fit_param_useall.pickle"), 'rb'))
X, Y = np.meshgrid(fit["c1"], fit["c2"])
cmap = colors.ListedColormap(sns.diverging_palette(230, 22, s=100, l=47, n=8))
bounds = np.linspace(-2, 2, 9)
norm = colors.BoundaryNorm(bounds, cmap.N)
im = []
ttls = ['Helices', 'Sheets']
for i in range(2):
im = ax[i].contourf(X, Y, fit['met'][:,:,i], bounds, cmap=cmap, vmin=-2, vmax=2, norm=norm)
cbar = fig.colorbar(im, ax=ax[i], fraction=0.046, pad=0.04, norm=norm, boundaries=bounds, ticks=bounds)
cbar.set_label(r"$R_{\mathregular{max}}$", labelpad=-5)
ax[i].set_xlabel('A')
ax[i].set_xlim(X.min(), X.max())
ax[i].set_ylabel('B')
ax[i].set_ylim(Y.max(), Y.min())
ax[i].invert_yaxis()
ax[i].set_aspect((np.max(X)-np.min(X))/(np.max(Y)-np.min(Y)))
ax[i].set_title(ttls[i])
col = ['k', '#79C726']
for i, boofi in enumerate([boot_fit, boot_fit_0]):
for j in range(2):
for bf, p in zip(boofi, ['-', ':']):
plot_hull(bf, p, ax[j], c=col[i])
c1 = [13.77, -6.07]
c1a = [11.36553036, -4.87716477]
c1b = [16.17819934, -7.27168306]
patt = ['*', 'o', 'o']
lbls = ['Fit', r"$95\% CI$", r"$95\% CI$"]
col = "#CB7CE6"
for i in range(2):
for coef, p, l in zip([c1, c1a, c1b], patt, lbls):
ax[i].plot([coef[0]], [coef[1]], p, label=l, fillstyle='none', ms=10, c=col, mew=2)
ax[i].legend(loc='best', frameon=False)
####################################################################
### FIG 5
def fig5():
fig, ax = plt.subplots(2,1)
fig.subplots_adjust(hspace=0.3)
bins = np.arange(0,620,20)
X = [bins[:-1] + np.diff(bins[:2])]
bins = np.arange(0,61,2.0)
X.append(bins[:-1] + np.diff(bins[:2]))
yellows = sns.diverging_palette(5, 55, s=95, l=77, n=13)
pinks = sns.diverging_palette(5, 55, s=70, l=52, n=13)
col = [yellows[12], pinks[0]]
col2 = [yellows[10], pinks[3]]
data = [pickle.load(open(PATH_FIG_DATA.joinpath(f"dom_{x}_dist_boot.pickle"), 'rb')) for x in ['aa', 'smco']]
for j in range(2):
for i in [1,2]:
MEAN, LO, HI = [np.array(x) for x in data[j][f"pos{i}"]]
ax[j].plot(X[j], MEAN, '--', c=col[i-1], label=f'position {i}')
ax[j].fill_between(X[j], LO, HI, color=col2[i-1], alpha=0.5)
ax[0].set_xlabel('Sequence Length')
ax[1].set_xlabel('Contact Order')
ax[0].set_ylabel('Density')
ax[1].set_ylabel('Density')
ax[0].legend(loc='upper right', frameon=False)
fig.savefig(PATH_FIG.joinpath("si5.pdf"), bbox_inches='tight')
####################################################################
### FIG 6
def fig6(X='REL_RATE', Y='S_ASYM'):
fig, ax = plt.subplots(1,2, figsize=(10,4))
fig.subplots_adjust(hspace=0.7, wspace=0.3)
sep = 0.40
col = Paired_12.hex_colors[5]
ttls = [f"Position {i}" for i in range(1,3)]
dom_pos_boot = pickle.load(open(PATH_FIG_DATA.joinpath("dom_pos_boot.pickle"), 'rb'))
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[11]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
# ttls = ["Two-domain", "Three-domain"]
xlbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
for i in range(2):
for j, (pos, dat) in enumerate(dom_pos_boot[2].items()):
quantiles = dat[0].mean(axis=0)
mean = dat[1][:,i,:].mean(axis=0)
lo = np.abs(np.quantile(dat[1][:,i,:], 0.025, axis=0) - mean)
hi = np.abs(np.quantile(dat[1][:,i,:], 0.975, axis=0) - mean)
ax[j].bar(np.arange(10)+(i+1)*sep, mean, sep, yerr=(lo, hi), color=col[i], label=xlbls[i], alpha=0.7, error_kw={'lw':.8})
ax[j].set_xticks(np.arange(len(quantiles)))
ax[j].set_xticklabels(np.round(quantiles, 1), rotation=90)
ax[i].spines['top'].set_visible(False)
ax[i].spines['right'].set_visible(False)
ax[i].set_title(ttls[i], loc='left')
ax[i].set_xlabel(r'$\log_{10}R$')
# ax[i,k].set_ylabel('N terminal\nEnrichment')
ax[i].set_ylabel("N Terminal Enrichment")
ax[0].legend(bbox_to_anchor=(1.17, 1.12), frameon=False, ncol=3)
fig.savefig(PATH_FIG.joinpath("si6.pdf"), bbox_inches='tight')
####################################################################
### FIG 7
def fig7(pdb, Y='D_ASYM'):
fig, ax = plt.subplots(3,3, figsize=(12,8))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
sep = 0.05
col = Paired_12.hex_colors[7]
xlbls = [r'$\log_{10} R$', 'Sequence Length', 'Contact Order']
ttls = ['Full sample', 'Eukaryotes', 'Prokaryotes']
for k, df in enumerate([pdb, pdb.loc[pdb.k_trans==5], pdb.loc[pdb.k_trans==10]]):
for i, X in enumerate(['REL_RATE', 'AA_PDB', 'CO']):
quantiles = df[X].quantile(np.arange(0,1.1,.1)).values
df['quant'] = df[X].apply(lambda x: utils.assign_quantile(x, quantiles))
ratio = []
for j in range(len(quantiles)-1):
left = len(df.loc[(df.quant==j)&(df[Y]<0)]) / max(1, len(df.loc[(df.quant==j)]))
right = len(df.loc[(df.quant==j)&(df[Y]>0)]) / max(1, len(df.loc[(df.quant==j)]))
ratio.append((right - left))
# print(ratio)
ax[i,k].bar([sep*j+sep/2 for j in range(10)], ratio, sep/2, color=[col if r > 0 else 'grey' for r in ratio], alpha=.5)
ax[i,k].set_xticks(np.arange(len(quantiles))*sep)
if i == 1:
ax[i,k].set_xticklabels([int(x) for x in quantiles], rotation=90)
else:
ax[i,k].set_xticklabels([round(x,1) for x in quantiles], rotation=90)
ax[i,k].set_xlabel(xlbls[i])
ax[i,k].set_ylabel('N terminal\nEnrichment')
ax[0,k].set_title(ttls[k])
fig.savefig(PATH_FIG.joinpath("si7.pdf"), bbox_inches='tight')
####################################################################
### FIG 8
def fig8(df_pdb):
fig = plt.figure()
gs = GridSpec(2,1, wspace=0.0, height_ratios=[.5,1])
ax = [fig.add_subplot(gs[1,0]), fig.add_subplot(gs[0,0])]
X = np.arange(-3, 3, 0.01)
Y = np.array([(10**x + 1)/max(10**x, 1) for x in X])
Y2 = (1+10**X) / np.array([max(1, 10**x+30./100.) for x in X])
ax[0].plot(X, Y, '-', label=r"$\tau_{ribo}=0$")
ax[0].plot(X, Y2, ':', label=r"$\tau_{ribo}=0.3\tau_{trans}$")
lbls = ['1ILO', '2OT2', '3BID']
patt = ['o', 's', '^']
for l, p in zip(lbls, patt):
X, Y = np.load(PATH_FIG_DATA.joinpath(f"{l}.npy"))
ax[0].plot(X, Y, p, label=l, alpha=0.5, mec='k', ms=7)
ax[0].set_xlim(-2.3, 2.3)
ax[0].set_ylim(1, 2.05)
ax[0].set_xlabel(r'$\log_{10} R$')
ax[0].set_ylabel("Speed-up")
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[0].legend(loc='upper right', frameon=False, bbox_to_anchor=(1.05, 1.00), ncol=1, labelspacing=.1)
fig8a(df_pdb, ax[1])
fig.savefig(PATH_FIG.joinpath("si8.pdf"), bbox_inches='tight')
def fig8a(df_pdb, ax):
lbls = ['2OT2', '1ILO', '3BID']
idx = [98212, 19922, 127370]
SS = df_pdb.loc[idx, 'SS_PDB2'].values
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
col_key = {'.':'grey', 'D':'grey', 'H':custom_cmap[3], 'S':custom_cmap[9]}
ec_key = {'.':'grey', 'D':'grey', 'H':custom_cmap[1], 'S':custom_cmap[11]}
wid_key = {'.':0.1, 'D':0.1, 'H':0.3, 'S':0.3}
lw_key = {'.':0.7, 'D':0.7, 'H':1.5, 'S':1.5}
for i, ss in enumerate(SS):
left = 0.
for j, strand in enumerate(new_figs.generate_strand(ss)):
s = strand[0]
ax.barh([i], [len(strand)], wid_key[s], left=[left], color=col_key[s], ec=ec_key[s], linewidth=lw_key[s])
left += len(strand) + 0.20
ax.annotate("N", xy=(-0.01, 1.0), xycoords='axes fraction')
ax.annotate("C", xy=(0.59, 1.0), xycoords='axes fraction')
for pos in ['left', 'right', 'top', 'bottom']:
ax.spines[pos].set_visible(False)
col = np.array(custom_cmap)[[3,9,1,11]]
ax.legend(handles=[mpatches.Patch(fc=c1, ec=c2, label=l) for c1, c2, l in zip(col[:2], col[2:], ['Helix', 'Sheet'])],
loc='upper right', frameon=False, ncol=1, bbox_to_anchor=(0.95, 1.10))
ax.set_xticks([])
ax.set_yticks(range(3))
ax.set_yticklabels(lbls)
ax.tick_params(axis='y', which='major', length=0, pad=10)
####################################################################
### FIG 9
def fig9(pdb, s='S'):
pdb = pdb.loc[(pdb.USE_RSA)]
pdb = pdb.loc[(pdb.SS_PDB2.str.len()==pdb.RSA.apply(len))]
path = PATH_FIG_DATA.joinpath("RSA_quantiles.pickle")
if path.exists():
quantiles, euk_quantiles, prok_quantiles = pickle.load(open(path, 'rb'))
else:
quantiles = [np.quantile([x for y in pdb['RSA'] for x in y if np.isfinite(x)], x/3) for x in range(1,4)]
euk_quantiles = [np.quantile([x for y in pdb.loc[pdb.k_trans==5, 'RSA'] for x in y if np.isfinite(x)], x/3) for x in range(1,4)]
prok_quantiles = [np.quantile([x for y in pdb.loc[pdb.k_trans==10, 'RSA'] for x in y if np.isfinite(x)], x/3) for x in range(1,4)]
pickle.dump([quantiles, euk_quantiles, prok_quantiles], open(path, 'wb'))
print(quantiles)
# fig, ax = plt.subplots(4,3, figsize=(8,8))
# fig.subplots_adjust(wspace=0.5)
fig = plt.figure(figsize=(12,9))
gs = GridSpec(5,3, wspace=0.3, height_ratios=[1,1,1,1,1])
ax = [fig.add_subplot(gs[j,i]) for i in range(3) for j in [0,1]] + \
[fig.add_subplot(gs[j,i]) for i in range(3) for j in [3,4]]
print("All proteins, all SS")
fig9a(pdb['RSA'], pdb['SS_PDB2'], quantiles, ax[:2], s='SH.D')
print("euk proteins, all ss")
fig9a(pdb.loc[pdb.k_trans==5, 'RSA'], pdb.loc[pdb.k_trans==5, 'SS_PDB2'], euk_quantiles, ax[2:4], s='SH.D')
print("Prok proteins, all SS")
fig9a(pdb.loc[pdb.k_trans==10, 'RSA'], pdb.loc[pdb.k_trans==10, 'SS_PDB2'], prok_quantiles, ax[4:6], s='SH.D')
print("Euk proteins, only SHC")
fig9a(pdb.loc[pdb.k_trans==5, 'RSA'], pdb.loc[pdb.k_trans==5, 'SS_PDB2'], euk_quantiles, ax[6:8], s='SH.')
print("Euk proteins, only S")
fig9a(pdb.loc[pdb.k_trans==5, 'RSA'], pdb.loc[pdb.k_trans==5, 'SS_PDB2'], euk_quantiles, ax[8:10], s='S')
print("Prok proteins, only S")
fig9a(pdb.loc[pdb.k_trans==10, 'RSA'], pdb.loc[pdb.k_trans==10, 'SS_PDB2'], prok_quantiles, ax[10:12], s='S')
ttls = ['All proteins\nAll residues', 'Eukaryotic proteins\nAll residues', 'Prokaryotic proteins\nAll residues',
'Eukaryotic proteins\nHelix, sheet and coil', 'Eukaryotic proteins\nOnly Sheets', 'Prokaryotic proteins\nOnly Sheets']
col = np.array(list(Paired_12.hex_colors))[[0,2,4,6]]
lbls = ['Buried', 'Middle', 'Exposed']
ax[0].set_ylabel('Solvent accessibility\nprobability')
ax[1].set_ylabel('Solvent accessibility\nasymmetry\n$\\log_2 (N / C)$')
ax[6].set_ylabel('Solvent accessibility\nprobability')
ax[7].set_ylabel('Solvent accessibility\nasymmetry\n$\\log_2 (N / C)$')
handles = [Line2D([0], [0], ls=ls, c=c, label=l) for ls, c, l in zip(['-', '--'], ['k']*2, ['N', 'C'])] + \
[Line2D([0], [0], ls='-', c=c, label=l) for l, c in zip(lbls, col)]
ax[8].legend(handles=handles, bbox_to_anchor=(1.30, 1.85), frameon=False,
ncol=5, columnspacing=1.5, handlelength=2.0, labelspacing=2.0)
for i, a in enumerate(ax):
if i % 2:
ax[i].set_xticks(range(0, 60, 10))
ax[i].set_xlabel('Sequence distance from ends')
else:
ax[i].set_xticks([])
ax[i].set_title(ttls[i//2])
ax[i].set_xlim(0, 50)
fig.savefig(PATH_FIG.joinpath("si9.pdf"), bbox_inches='tight')
def fig9a(rsa_list, ss_list, quantiles, ax, s='S'):
cat = 'BME'
countN, countC = utils.sheets_rsa_seq_dist(rsa_list, ss_list, quantiles, ss_key=s)
col = np.array(list(Paired_12.hex_colors))[[0,2,4,6]]
base = np.zeros(len(countN[cat[0]]), dtype=float)
YtN = np.array(list(countN.values())).sum(axis=0)
YtC = np.array(list(countC.values())).sum(axis=0)
X = np.arange(base.size)
for i, s in enumerate(cat):
YN = countN[s]
YC = countC[s]
ax[0].plot(YN/YtN, '-', c=col[i], label=f"{s} N")
ax[0].plot(YC/YtC, ':', c=col[i], label=f"{s} C")
ax[1].plot(np.log2(YN/YC*YtC/YtN), '-', c=col[i], label=f"{s}")
print(s, np.round((np.sum(YN[:20]) / np.sum(YtN[:20])) / (np.sum(YC[:20]) / np.sum(YtC[:20])), 2))
ax[1].plot([0]*base.size, ':', c='k')
ax[0].set_ylim(0,1)
ax[1].set_ylim(-1,1)
for a in ax:
a.set_xlim(X[0], X[-1])
####################################################################
### FIG 10
def fig10(pdb):
pfdb = asym_io.load_pfdb()
acpro = asym_io.load_acpro()
fig = plt.figure(figsize=(12,9))
gs = GridSpec(3,7, wspace=0.0, width_ratios=[5,0.2,5,0.4,3,1.0,6], height_ratios=[1,.3,1])
ax = [fig.add_subplot(gs[2,i*2]) for i in range(4)] + \
[fig.add_subplot(gs[0,0:3]), fig.add_subplot(gs[0,5:])]
# sns.distplot(pdb.ln_kf, ax=ax[5], label='PDB - PFDB fit', hist=False)
pdb = pdb.copy()
coef = folding_rate.linear_fit(np.log10(acpro['L']), acpro['log_kf']).params
pdb['ln_kf'] = folding_rate.pred_fold(np.log10(pdb.AA_PDB), coef)
pdb = utils.get_rel_rate(pdb)
fig10a(fig, ax[4])
fig10b(fig, ax[:4], pdb)
# sns.distplot(pdb.ln_kf, ax=ax[5], label='PDB - ACPro fit', hist=False)
# sns.distplot(pfdb.log_kf, ax=ax[5], label='PFDB data', kde=False, norm_hist=True)
# sns.distplot(acpro["ln kf"], ax=ax[5], label='KDB data', kde=False, norm_hist=True)
sns.regplot(np.log10(acpro['L']), acpro['log_kf'], label='ACPro data', scatter_kws={"alpha":0.5})
sns.regplot(np.log10(pfdb.loc[pfdb.use, 'L']), pfdb.loc[pfdb.use, 'log_kf'], label='PFDB data', scatter_kws={"alpha":0.5})
ax[5].legend(loc='best', frameon=False)
ax[5].set_xlabel(r"$\log_{10}L$")
ax[5].set_ylabel(r"$\log_{10}k_f$")
fs = 14
for i, b in zip([4,5,0,2,3], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.16, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si10.pdf"), bbox_inches='tight')
def fig10a(fig, ax):
Rdist_data = pickle.load(open(PATH_FIG_DATA.joinpath("R_dist_acpro.pickle"), 'rb'))
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
lbls = ['All', 'Prokaryotes', 'Eukaryotes']
for i, k in enumerate(['All', 'Prok', 'Euk']):
ax.plot(Rdist_data['grid'], Rdist_data[k][0], '-', c=col[i], label=lbls[i])
ax.fill_between(Rdist_data['grid'], Rdist_data[k][1], Rdist_data[k][2], color=col[i], alpha=0.5)
ax.plot([0,0], [0, 0.60], ':', c='k', alpha=0.7)
ax.set_xlabel(r'$\log_{10} R$')
ax.set_ylabel('Density')
ax.set_xticks(np.arange(-6, 5, 2))
ax.set_xlim(-7, 2)
ax.set_ylim(0, 0.60)
ax.legend(loc='upper center', bbox_to_anchor=(0.55, 1.17), frameon=False, ncol=3, columnspacing=2)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
def fig10b(fig, ax, pdb, Y='S_ASYM'):
ft = 12
X = np.arange(10)
width = .35
ttls = [r'$\alpha$ Helix', r'$\beta$ Sheet']
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
# col = np.array(Paired_12.hex_colors)[[1,5]]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[0]
c_sheet = custom_cmap[12]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
enrich_data = pickle.load(open(PATH_FIG_DATA.joinpath("fig3_enrich_acpro.pickle"), 'rb'))
quantiles = enrich_data['edges'].mean(axis=0)
for i, Y in enumerate(['H_ASYM', 'S_ASYM']):
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
# total = len(pdb)/10
# left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<0)]) / total
# right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>0)]) / total
# print(Y, j, ''.join([f"{x:6.3f}" for x in [left, right, left/right, right / left]]))
if i:
ax[i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
else:
ax[i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
ax[i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[i].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[i].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
mean = np.mean(enrich_data[Y[0]], axis=0)
lo = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.025, axis=0))
hi = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.975, axis=0))
ax[2].barh([sep*j+(i+.7)*sep/3 for j in range(10)], mean, sep/3, xerr=(lo, hi), color=col[i], ec='k', alpha=.5, label=lbls[i], error_kw={'lw':.8})
ax[2].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[0].set_yticks(np.arange(len(quantiles))*sep)
ax[0].set_yticklabels([round(x,1) for x in quantiles])
ax[2].legend(loc='upper center', ncol=2, columnspacing=1.5, frameon=False,
bbox_to_anchor=(0.52, 1.15))
for i, t in zip([0,1], ttls):
ax[i].set_title(t)
ax[i].set_xlim(-.15, .15)
ax[i].set_xticks([-.1, 0, .1])
for i in range(3):
ax[i].spines['top'].set_visible(False)
ax[i].spines['right'].set_visible(False)
ax[i].set_ylim(0-sep/4, 0.5+sep)
for i in [1,2]:
ax[i].spines['left'].set_visible(False)
ax[i].set_yticks([])
ax[0].set_xlabel(r"asym$_{\alpha}$")
ax[1].set_xlabel(r"asym$_{\beta}$")
ax[0].set_ylabel(r'$\log_{10}R$')
ax[2].set_xlabel('N terminal\nEnrichment')
pdb = pdb.loc[pdb.OC!='Viruses']
X = np.arange(10)
X = np.array([sep*j+(i+.7)*sep/3 for j in range(10)])
width = .175
ttls = ['Eukaryote ', 'Prokaryote ']
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
col = [custom_cmap[i] for i in [3, 9, 0, 12]]
paths = [f"fig3_enrich_{a}_acpro.pickle" for a in ['eukaryote', 'prokaryote']]
for i, path in enumerate(paths):
enrich_data = pickle.load(open(PATH_FIG_DATA.joinpath(path), 'rb'))
for j, Y in enumerate(['H_ASYM', 'S_ASYM']):
# adjust = (j - 1 + i*2)*width
adjust = (j*2 - 4.0 + i)*(sep/5)
mean = np.mean(enrich_data[Y[0]], axis=0)
lo = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.025, axis=0))
hi = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.975, axis=0))
print(i, Y, max(np.abs(mean)))
ax[3].barh(X+adjust, mean, sep/5.0, ec='k', xerr=(lo, hi), color=col[i*2+j],
label=ttls[i]+lbls[j], lw=0.001, error_kw={'lw':.2})
ax[3].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[3].set_yticks(np.arange(len(quantiles))*sep)
ax[3].set_ylabel(r'$\log_{10} R$')
ax[3].set_yticklabels([round(x,1) for x in quantiles])
ax[3].set_xlabel('N terminal\nEnrichment')
ax[3].set_xlim(-.42, .42)
ax[3].set_ylim(0-sep/4, 0.5+sep)
ax[3].spines['top'].set_visible(False)
ax[3].spines['left'].set_visible(False)
handles = [mpatches.Patch([], [], color=col[j*2+i], label=ttls[j]+lbls[i]) for i in [0,1] for j in [1,0]]
ax[3].legend(handles=handles, bbox_to_anchor=(1.05, 1.25), frameon=False,
loc='upper right', ncol=2, columnspacing=1.0, handlelength=1.5)
ax[3].yaxis.set_label_position('right')
ax[3].yaxis.tick_right()
####################################################################
### FIG 11
def fig11(pdb, X='AA_PDB', Y='CO', w=.1, ax='', fig=''):
if isinstance(ax, str):
fig, ax = plt.subplots(4,2, figsize=(9,12))
fig.subplots_adjust(wspace=0.0, hspace=0.65)
# ax = ax.reshape(ax.size)
pdb_CO = np.load(PATH_FIG_DATA.joinpath("pdb_config_CO.npy"))[:,:,0]
df = pdb.copy()
q = np.arange(w,1+w,w)
lbls = ['Helix', 'Sheet']
# cb_lbl = [r"$E_{\alpha}$", r"$E_{\beta}$"]
cb_lbl = [r"$asym_{\alpha}$", r"$asym_{\beta}$"]
vmax = 0.03
vmin = -vmax
for j, co in enumerate(pdb_CO.T):
df['CO'] = co
quant1 = [df[X].min()] + list(df[X].quantile(q).values)
quant2 = [df[Y].min()] + list(df[Y].quantile(q).values)
for i, Z in enumerate(['H_ASYM', 'S_ASYM']):
mean = []
for l1, h1 in zip(quant1[:-1], quant1[1:]):
for l2, h2 in zip(quant2[:-1], quant2[1:]):
samp = df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2), Z]
mean.append(samp.mean())
# left = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]<0)])
# right = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]>0)])
# tot = max(len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)]), 1)
# mean.append((right - left)/tot)
cmap = sns.diverging_palette(230, 22, s=100, l=47, as_cmap=True)
norm = colors.BoundaryNorm([vmin, vmax], cmap.N)
bounds = np.linspace(vmin, vmax, 3)
im = ax[j,i].imshow(np.array(mean).reshape(q.size, q.size).T, cmap=cmap, vmin=vmin, vmax=vmax)
cbar = fig.colorbar(im, cmap=cmap, ticks=bounds, ax=ax[j,i], fraction=0.046, pad=0.04)
cbar.set_label(cb_lbl[i], labelpad=-5)
ax[j,i].set_title(lbls[i])
ax[j,i].set_xticks(np.arange(q.size+1)-0.5)
ax[j,i].set_yticks(np.arange(q.size+1)-0.5)
ax[j,i].set_xticklabels([int(x) for x in quant1], rotation=90)
ax[j,i].set_yticklabels([int(round(x,0)) for x in quant2])
for a in ax.ravel():
a.invert_yaxis()
a.set_xlabel('Sequence Length')
a.set_ylabel('Contact Order')
a.tick_params(axis='both', which='major', direction='in')
fs = 14
for i, b in zip(range(4), list('ABCDEFGHI')):
ax[i,0].text( -0.20, 1.16, b, transform=ax[i,0].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si11.pdf"), bbox_inches='tight')
def fig12(pdb, X='REL_RATE', Y='S_ASYM', w=0.1):
fig = plt.figure(figsize=(8,12))
gs = GridSpec(3,2, wspace=0.4, hspace=0.5, width_ratios=[1,1])
ax_all = [[fig.add_subplot(gs[j,i]) for i in [0,1]] for j in range(3)]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
mid = 39
sep = 0.05
lbls = ['Sheet', 'Helix']
quantiles = pdb[X].quantile(np.arange(0,1+w,w)).values
# print(np.round(quantiles, 2))
pdb['quant'] = pdb[X].apply(lambda x: utils.assign_quantile(x, quantiles))
# pdb['quant'] = np.random.choice(pdb['quant'], len(pdb), replace=False)
for ax, threshold in zip(ax_all, [0, 0.025, 0.05]):
print(f"threshold = {threshold}")
for i, Y in enumerate(['S_ASYM', 'H_ASYM']):
ratio1 = []
ratio2 = []
lefts = []
rights = []
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio1.append((right - left))
ratio2.append(np.log2(right / left))
print(Y, j, left, right)
xgrid = [sep*j+(i+1.0)*sep/3 for j in range(len(quantiles)-1)]
ax[0].barh(xgrid, ratio1, sep/3, color=col[i], alpha=.5)
ax[1].barh(xgrid, ratio2, sep/3, color=col[i], alpha=.5)
ax[0].set_xticks(np.arange(-0.3, 0.4, 0.1))
for a in ax:
a.set_yticks(np.arange(len(quantiles))*sep)
a.set_yticklabels([round(x,1) for x in quantiles])
a.plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
a.spines['top'].set_visible(False)
a.spines['right'].set_visible(False)
a.set_ylim(0, 0.5)
a.set_ylabel(r'$\log_{10}R$')
ax[0].set_xlim(-0.35, 0.35)
ax[1].set_xlim(-1.50, 1.50)
ax[0].set_xlabel(r'$P(\mathregular{{asym}} \geq {0}) - P(\mathregular{{asym}} \leq -{0})$'.format(*[threshold]*2))
ax[1].set_xlabel(r'$\log_{{2}} \frac{{P(\mathregular{{asym}} \geq {0})}}{{P(\mathregular{{asym}} \leq -{0})}} $'.format(*[threshold]*2))
fig.savefig(PATH_FIG.joinpath("si12.pdf"), bbox_inches='tight')
def fig13(df, X='AA_PDB', Y='CO', w=.1, ax='', fig=''):
if isinstance(ax, str):
fig, ax = plt.subplots(1,3, figsize=(15,4))
fig.subplots_adjust(wspace=0.5)
q = np.arange(w,1+w,w)
quant1 = [df[X].min()] + list(df[X].quantile(q).values)
quant2 = [df[Y].min()] + list(df[Y].quantile(q).values)
lbls = ['Helix', 'Sheet']
cb_lbl = [r"$asym_{\alpha}$", r"$asym_{\beta}$"]
vmax = 0.03
vmin = -vmax
count = []
for i, Z in enumerate(['H_ASYM', 'S_ASYM']):
mean = []
for l1, h1 in zip(quant1[:-1], quant1[1:]):
for l2, h2 in zip(quant2[:-1], quant2[1:]):
samp = df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2), Z]
mean.append(samp.mean())
# left = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]<0)])
# right = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]>0)])
# tot = max(len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)]), 1)
# mean.append((right - left)/tot)
if not i:
count.append(len(samp))
# print(len(samp))
mean = np.array(mean).reshape(q.size, q.size)
count = np.array(count).reshape(q.size, q.size)
cmap = sns.diverging_palette(230, 22, s=100, l=47, as_cmap=True)
norm = colors.BoundaryNorm([vmin, vmax], cmap.N)
bounds = np.linspace(vmin, vmax, 3)
im = ax[i].imshow(mean.T, cmap=cmap, vmin=vmin, vmax=vmax)
cbar = fig.colorbar(im, cmap=cmap, ticks=bounds, ax=ax[i], fraction=0.046, pad=0.04)
cbar.set_label(cb_lbl[i], labelpad=-5)
ax[i].set_title(lbls[i])
ax[i].set_xticks(np.arange(q.size+1)-0.5)
ax[i].set_yticks(np.arange(q.size+1)-0.5)
ax[i].set_xticklabels([int(x) for x in quant1], rotation=90)
ax[i].set_yticklabels([int(round(x,0)) for x in quant2])
for i in [2]:
cmap = plt.cm.Greys
# norm = colors.BoundaryNorm([-.04, .04], cmap.N)
# bounds = np.linspace(-.04, .04, 5)
im = ax[i].imshow(np.array(count).reshape(q.size, q.size).T, cmap=cmap, vmin=0)
cbar = fig.colorbar(im, cmap=cmap, ax=ax[i], fraction=0.046, pad=0.04)
cbar.set_label('Count')
ax[i].set_title('Distribution')
ax[i].set_xticks(np.arange(q.size+1)-0.5)
ax[i].set_yticks(np.arange(q.size+1)-0.5)
ax[i].set_xticklabels([int(x) for x in quant1], rotation=90)
ax[i].set_yticklabels([int(round(x,0)) for x in quant2])
for a in ax:
a.invert_yaxis()
a.set_xlabel('Sequence Length')
a.set_ylabel('Contact Order')
a.tick_params(axis='both', which='major', direction='in')
fs = 14
for i, b in zip([0,1,2], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si13.pdf"), bbox_inches='tight')
def scop_ss():
fig, ax = plt.subplots(2,1)
cat = 'HS.D'
N = 50
X = np.arange(50)
Nboot, Cboot, asym, enrich_edges, enrich_vals = pickle.load(open(PATH_FIG_DATA.joinpath(f"pdb_scop_indep.pickle"), 'rb'))
data = [Nboot, Cboot, asym]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
for j, s in enumerate(cat):
ax[0].plot(X, data[0][s]['mean']/4, '-', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[0][s]['hi']/4, data[0][s]['lo']/4, color="grey", label=f"{s} N", alpha=0.5)
ax[0].plot(X, data[1][s]['mean']/4, '--', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[1][s]['hi']/4, data[1][s]['lo']/4, color="grey", label=f"{s} N", alpha=0.2)
print(s, round(np.mean(data[2][s]['mean']), 2), round(np.mean(data[2][s]['mean'][:20]), 2), round(np.mean(data[2][s]['mean'][20:]), 2))
ax[1].plot(X, np.log2(data[2][s]['mean']), '-', c=col[j], label=lbls[j])
ax[1].fill_between(X, np.log2(data[2][s]['hi']), np.log2(data[2][s]['lo']), color="grey", label=f"{s} N", alpha=0.2)
ax[1].set_ylim(-1, 1.3)
ax[1].plot([0]*50, '-', c='k')
ax[1].set_yticks(np.arange(-1,1.5,0.5))
ax[0].set_ylim(0, 0.6)
ax[1].set_xlabel('Sequence distance from ends')
ax[0].set_ylabel('Secondary structure\nprobability')
ax[1].set_ylabel('Structural asymmetry\n$\\log_2 (N / C)$')
fs = 14
for i, b in zip([0,1], list('ABCDEFGHI')):
ax[i].text( -0.10, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si14.pdf"), bbox_inches='tight')
def percentage_asym(x):
return np.sign(x) * 100*2**(abs(x)) - np.sign(x) * 100
def fig15():
fig, ax = plt.subplots(3,1, figsize=(10,10))
cat = 'HS.D'
N = 100
X = np.arange(N)
Nboot, Cboot, asym, = pickle.load(open(PATH_FIG_DATA.joinpath(f"pdb_ss_max_asym.pickle"), 'rb'))
data = [Nboot, Cboot, asym]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
X2 = np.arange(5)
for j, s in enumerate(cat):
ax[0].plot(X, data[0][s]['mean']/2, '-', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[0][s]['hi']/2, data[0][s]['lo']/2, color="grey", label=f"{s} N", alpha=0.5)
ax[0].plot(X, data[1][s]['mean']/2, '--', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[1][s]['hi']/2, data[1][s]['lo']/2, color="grey", label=f"{s} N", alpha=0.2)
for k in range(5):
print(s, round(np.mean(data[2][s]['mean']), 2), round(np.mean(data[2][s]['mean'][k*20:(k+1)*20]), 2))
ax[1].plot(X, np.log2(data[2][s]['mean']), '-', c=col[j], label=lbls[j])
ax[1].fill_between(X, np.log2(data[2][s]['hi']), np.log2(data[2][s]['lo']), color="grey", label=f"{s} N", alpha=0.2)
if s in 'HS':
Y2 = [percentage_asym(np.log2(data[2][s]['mean'])[k*20:(k+1)*20].mean()) for k in range(5)]
ax[2].bar(X2, Y2, 0.5, color=col[j], label=lbls[j], ec='k')
ax[1].set_ylim(-1.5, 2.0)
ax[1].plot([0]*100, '-', c='k')
ax[2].plot([0]*5, '-', c='k')
ax[1].set_yticks(np.arange(-1,2.5,0.5))
ax[0].set_ylim(0, 0.6)
ax[2].set_xticks(np.arange(5))
ax[2].set_xticklabels([f"{i*20} - {(i+1)*20}" for i in range(5)])
ax[0].set_xlabel('Sequence distance from ends')
ax[1].set_xlabel('Sequence distance from ends')
ax[2].set_xlabel('Sequence distance from ends')
ax[0].set_ylabel('Secondary structure\nprobability')
ax[1].set_ylabel('Structural asymmetry\n$\\log_2 (N / C)$')
ax[2].set_ylabel('Percentage asymmetry')
fs = 14
for i, b in zip([0,1,2], list('ABCDEFGHI')):
ax[i].text( -0.10, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si15.pdf"), bbox_inches='tight')
def oligomer(pdb, X='REL_RATE', Y='S_ASYM', w=0.1):
pdb = pdb.copy()
fig = plt.figure(figsize=(8,8))
gs = GridSpec(2,2, wspace=0.4, hspace=0.5, width_ratios=[1,1])
ax_all = [[fig.add_subplot(gs[j,i]) for i in [0,1]] for j in range(2)]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
mid = 39
sep = 0.05
threshold = 0
lbls = [r'$E_{\beta}$', r'$E_{\alpha}$']
ttls = ['Monomers', 'Oligomers']
for ax, idx, ttl in zip(ax_all, [pdb.NPROT==1, pdb.NPROT>1], ttls):
quantiles = pdb.loc[idx, X].quantile(np.arange(0,1+w,w)).values
pdb['quant'] = pdb.loc[idx, X].apply(lambda x: utils.assign_quantile(x, quantiles))
for i, Y in enumerate(['S_ASYM', 'H_ASYM']):
ratio1 = []
ratio2 = []
lefts = []
rights = []
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[(idx)&(pdb.quant==j), Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(idx)&(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(idx)&(pdb.quant==j)]), 1)
right = len(pdb.loc[(idx)&(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(idx)&(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio1.append((right - left))
ratio2.append(np.log2(right / left))
xgrid = [sep*j+(i+1.0)*sep/3 for j in range(len(quantiles)-1)]
ax[0].barh(xgrid, ratio1, sep/3, color=col[i], alpha=.5, label=lbls[i])
ax[1].barh(xgrid, ratio2, sep/3, color=col[i], alpha=.5)
ax[0].set_xticks(np.arange(-0.3, 0.4, 0.1))
for a in ax:
a.set_yticks(np.arange(len(quantiles))*sep)
a.set_yticklabels([round(x,1) for x in quantiles])
a.plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
a.spines['top'].set_visible(False)
a.spines['right'].set_visible(False)
a.set_ylim(0, 0.5)
a.set_ylabel(r'$\log_{10}R$')
a.set_title(f"{ttl}, N={np.sum(idx)}")
ax[0].set_xlim(-0.35, 0.35)
ax[1].set_xlim(-1.50, 1.50)
ax[0].set_xlabel(r'$P(\mathregular{{asym}} \geq {0}) - P(\mathregular{{asym}} \leq -{0})$'.format(*[threshold]*2))
ax[1].set_xlabel(r'$\log_{{2}} \frac{{P(\mathregular{{asym}} \geq {0})}}{{P(\mathregular{{asym}} \leq -{0})}} $'.format(*[threshold]*2))
ax[0].legend(loc='upper center', ncol=2, columnspacing=3, frameon=False,
bbox_to_anchor=(1.20, 1.20))
fig.savefig(PATH_FIG.joinpath("si16.pdf"), bbox_inches='tight')
fig.savefig(PATH_FIG.joinpath("oligomers.png"), bbox_inches='tight')
def scop2(X='REL_RATE', Y='S_ASYM', w=0.1):
fig, ax = plt.subplots(figsize=(10,6))
edges, data = pickle.load(open(PATH_FIG_DATA.joinpath("pdb_scop_indep.pickle"), 'rb'))[3:]
edges = edges[0]
sep = 0.05
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
for i, Y in enumerate(['H_ASYM', 'S_ASYM']):
mean = np.mean(data[:,i], axis=0)
lo = np.abs(mean - np.quantile(data[:,i], 0.025, axis=0))
hi = np.abs(mean - np.quantile(data[:,i], 0.975, axis=0))
ax.barh([sep*j+(i+.7)*sep/3 for j in range(10)], mean, sep/3, xerr=(lo, hi), color=col[i], ec='k', alpha=.5, label=lbls[i], error_kw={'lw':.8})
ax.plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax.set_yticks(np.arange(len(edges))*sep)
ax.set_yticklabels([round(x,1) for x in edges])
ax.legend(loc='upper center', ncol=2, columnspacing=3, frameon=False,
bbox_to_anchor=(0.52, 1.06))
ax.set_xlim(-.38, .38)
ax.set_xticks(np.arange(-.3, .4, .1))
# To create this figure, you need to download the complete
# Human and E. coli proteomes at:
# https://alphafold.ebi.ac.uk/download
# and then change the code so that "base" points to the
# folder that contains the downloaded ".pdb" files
def disorder_proteome(N=100):
fig, ax = plt.subplots(1,2, figsize=(12,4))
lbls = ["Human", "Ecoli"]
ttls = ["Human", "E. coli"]
for i, l in enumerate(lbls):
path = PATH_FIG_DATA.joinpath(f"alphafold_{l}.npy")
if not path.exists():
base = PATH_BASE.joinpath(f"AlphaFold/{l}")
countN = np.zeros(N, float)
countC = np.zeros(N, float)
tot = np.zeros(N, float)
with Pool(50) as pool:
dis = list(pool.imap_unordered(utils.get_disorder_from_conf, base.glob("*pdb"), 10))
for d in dis:
n = min(int(len(d)/2), N)
countN[:n] = countN[:n] + d[:n]
countC[:n] = countC[:n] + d[-n:][::-1]
tot[:n] = tot[:n] + 1
fracN = countN / tot
fracC = countC / tot
np.save(path, np.array([fracN, fracC]))
else:
fracN, fracC = np.load(path)
ax[i].plot(np.arange(N)+1, fracN, '-', label='N')
ax[i].plot(np.arange(N)+1, fracC, '--', label='C')
ax[i].set_title(ttls[i])
ax[i].set_xlabel("Sequence distance from ends")
ax[i].set_ylabel("Disorder probability")
ax[i].set_ylim(0, 1)
ax[i].legend(loc='best', frameon=False)
fig.savefig(PATH_FIG.joinpath("si17.pdf"), bbox_inches='tight')
def kfold_vs_ss():
pfdb = asym_io.load_pfdb()
fig, ax = plt.subplots(figsize=(8,8))
for c in pfdb.Class.unique():
X = np.log10(pfdb.loc[pfdb.Class==c, 'L'])
Y = pfdb.loc[pfdb.Class==c, 'log_kf']
sns.regplot(X, Y, label=c)
ax.set_xlabel(r"$\log_{10}$ Sequence Length")
ax.set_ylabel(r"$\log_{10} k_f$")
ax.legend(loc='best', frameon=False)
fig.savefig(PATH_FIG.joinpath("si18.pdf"), bbox_inches='tight')
def hbond_asym(pdb, Xl='REL_RATE', Y='hb_asym', w=0.1):
fig = plt.figure(figsize=(9,6))
gs = GridSpec(1,2, wspace=0.2, hspace=0.0, width_ratios=[1,.3])
ax = [fig.add_subplot(gs[i]) for i in [0,1]]
col = np.array(Paired_12.hex_colors)[[1,3]]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
quantiles = pdb[Xl].quantile(np.arange(0,1+w,w)).values
ratio = []
lefts = []
rights = []
threshold = 0.00
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio.append((right - left))
ax[0].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[0].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[0], alpha=.5)
ax[0].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[0].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
ax[0].set_yticks(np.arange(len(quantiles))*sep)
ax[0].set_yticklabels([round(x,1) for x in quantiles])
ax[1].barh([sep*j+sep/2 for j in range(len(quantiles)-1)], ratio, sep/2, color=[col[0] if r > 0 else 'grey' for r in ratio], alpha=.5)
ax[1].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[1].spines['top'].set_visible(False)
ax[1].spines['right'].set_visible(False)
ax[1].spines['left'].set_visible(False)
ax[1].set_yticks([])
for a in ax:
a.set_ylim(0, 0.60)
ax[0].set_xlabel('Asymmetry in mean hydrogen bond length')
ax[0].set_ylabel(r'$\log_{10}R$')
ax[1].set_xlabel('N terminal enrichment')
fig.savefig(PATH_FIG.joinpath("si19.pdf"), bbox_inches='tight')
def hyd_asym(pdb, Xl='REL_RATE', Y='hyd_asym', w=0.1):
fig = plt.figure(figsize=(9,6))
gs = GridSpec(1,2, wspace=0.2, hspace=0.0, width_ratios=[1,.3])
ax = [fig.add_subplot(gs[i]) for i in [0,1]]
col = np.array(Paired_12.hex_colors)[[1,3]]
bins = np.linspace(-4.5, 4.5, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
quantiles = pdb[Xl].quantile(np.arange(0,1+w,w)).values
ratio = []
lefts = []
rights = []
threshold = 0.00
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio.append((right - left))
ax[0].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[0].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[0], alpha=.5)
ax[0].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[0].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
ax[0].set_yticks(np.arange(len(quantiles))*sep)
ax[0].set_yticklabels([round(x,1) for x in quantiles])
ax[1].barh([sep*j+sep/2 for j in range(len(quantiles)-1)], ratio, sep/2, color=[col[0] if r > 0 else 'grey' for r in ratio], alpha=.5)
ax[1].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[1].spines['top'].set_visible(False)
ax[1].spines['right'].set_visible(False)
ax[1].spines['left'].set_visible(False)
ax[1].set_yticks([])
for a in ax:
a.set_ylim(0, 0.60)
ax[0].set_xlabel('Asymmetry in mean hydrophobicity')
ax[0].set_ylabel(r'$\log_{10}R$')
ax[1].set_xlabel('N terminal enrichment')
fig.savefig(PATH_FIG.joinpath("si20.pdf"), bbox_inches='tight')
| 42.085463
| 162
| 0.557723
| 8,901
| 52,691
| 3.21065
| 0.073475
| 0.008713
| 0.007768
| 0.012492
| 0.698194
| 0.622227
| 0.570929
| 0.537756
| 0.493317
| 0.460004
| 0
| 0.055077
| 0.209523
| 52,691
| 1,251
| 163
| 42.119105
| 0.631053
| 0.042721
| 0
| 0.397089
| 0
| 0.006237
| 0.103354
| 0.014935
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030146
| false
| 0
| 0.034304
| 0.00104
| 0.066528
| 0.02183
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
327857254668f20b13612c825f93043e95b1c5c9
| 3,449
|
py
|
Python
|
test_beam_search.py
|
slegroux/slgBeam
|
733049ad4a97f582bc169623941cfbdf3efea207
|
[
"Apache-2.0"
] | null | null | null |
test_beam_search.py
|
slegroux/slgBeam
|
733049ad4a97f582bc169623941cfbdf3efea207
|
[
"Apache-2.0"
] | null | null | null |
test_beam_search.py
|
slegroux/slgBeam
|
733049ad4a97f582bc169623941cfbdf3efea207
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# (c) 2020 Sylvain Le Groux <slegroux@ccrma.stanford.edu>
import pytest
from pytest import approx
import numpy as np
import torch
from IPython import embed
from beam_search import Tokenizer, Score, BeamSearch
@pytest.fixture(scope='module')
def data():
mat = torch.Tensor(np.genfromtxt('data/rnnOutput.csv',delimiter=';')[:,: -1])
# mat = mat.unsqueeze(0)
classes = ' !"#&\'()*+,-./0123456789:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_'
mat_prob = np.array([[0.2, 0.0, 0.8],
[0.4, 0.0, 0.6]])
syms = 'ab-'
bs = BeamSearch(syms, mat_prob)
bs2 = BeamSearch(classes, mat)
data = {'probs': mat_prob, 'syms': syms, 'bs': bs, 'mat': mat, 'classes': classes, 'bs2': bs2}
return(data)
def test_data(data):
assert data['probs'].shape == (2,3)
assert data['mat'].shape == (100, 80)
assert len(data['classes']) == 79
def test_tokenizer(data):
tok = Tokenizer(data['syms'])
assert(tok.char2int('b') == 1)
assert(tok.int2char(1) == 'b')
tok2 = Tokenizer(data['classes'])
assert(tok2.char2int('Y') == 51)
assert(tok2.int2char(51) == 'Y')
def test_score(data):
tok = Tokenizer(data['syms'])
score = Score(tok, data['probs'])
assert score(1,'-') == 0.6
tok2 = Tokenizer(data['classes'])
score = Score(tok2, data['mat'])
assert float(score(0,' ')) == approx(float(0.946499))
def test_init(data):
beam_search = data['bs']
b, nb, s_b, s_nb = beam_search.init_paths()
assert b == {''}
assert nb == {'a', 'b'}
assert s_b == {'-': 0.8}
assert s_nb == {'b': 0.0, 'a': 0.2}
bs2 = data['bs2']
b, nb, s_b, s_nb = bs2.init_paths()
assert b == {''}
# assert s_b == {'-': 0.8}
def test_prune(data):
bs = data['bs']
path_b, path_nb = bs.prune_paths({''}, {'a','b'}, {'-':0.2}, {'a': 0.1,'b': 0.3}, 2)
assert path_b == {''}
assert path_nb == {'b'}
print(bs.score_b, bs.score_nb)
def test_extend_blank(data):
bs = data['bs']
init_b, init_nb, init_s_b, init_s_nb = bs.init_paths()
print("init:", init_b, init_nb, init_s_b, init_s_nb)
# incidentally init global b & nb paths
path_b, path_nb = bs.prune_paths(init_b, init_nb,init_s_b, init_s_nb, 2)
print("Pruned: ", path_b, path_nb)
print(bs.score_b, bs.score_nb)
new_path_b, new_score_b = bs.extend_with_blank(path_b, path_nb, 1)
print(new_path_b, new_score_b)
def test_extend_syms(data):
bs = data['bs']
init_b, init_nb, init_s_b, init_s_nb = bs.init_paths()
print("init:", init_b, init_nb, init_s_b, init_s_nb)
# incidentally init global b & nb paths
path_b, path_nb = bs.prune_paths(init_b, init_nb,init_s_b, init_s_nb, 2)
print("Pruned: ", path_b, path_nb)
print(bs.score_b, bs.score_nb)
new_path_nb, new_score_nb = bs.extend_with_symbol(path_b, path_nb, 1)
print(new_path_nb, new_score_nb)
def test_merge(data):
bs = data['bs']
init_b, init_nb, init_s_b, init_s_nb = bs.init_paths()
path_b, path_nb = bs.prune_paths(init_b, init_nb,init_s_b, init_s_nb, 2)
new_path_b, new_score_b = bs.extend_with_blank(path_b, path_nb, 1)
new_path_nb, new_score_nb = bs.extend_with_symbol(path_b, path_nb, 1)
bs.merge_paths(new_path_b, new_path_nb, new_score_b, new_score_nb)
def test_decode(data):
bs = data['bs']
print("decoded: ", bs.decode(2))
bs2 = data['bs2']
print("decoded: ", bs2.decode(1))
| 33.813725
| 98
| 0.632647
| 573
| 3,449
| 3.544503
| 0.162304
| 0.039389
| 0.044313
| 0.054161
| 0.464796
| 0.402757
| 0.373215
| 0.351059
| 0.339242
| 0.339242
| 0
| 0.034446
| 0.19194
| 3,449
| 101
| 99
| 34.148515
| 0.694295
| 0.057988
| 0
| 0.37037
| 0
| 0
| 0.056121
| 0
| 0
| 0
| 0
| 0
| 0.197531
| 1
| 0.123457
| false
| 0
| 0.074074
| 0
| 0.197531
| 0.135802
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
327872875221fcfb18f1db81613c4a83884de390
| 3,404
|
py
|
Python
|
src/main/python/hydra/kafkatest/maxrate_test.py
|
bopopescu/hydra
|
ec0793f8c1f49ceb93bf1f1a9789085b68d55f08
|
[
"Apache-2.0"
] | 10
|
2016-05-28T15:56:43.000Z
|
2018-01-03T21:30:58.000Z
|
src/main/python/hydra/kafkatest/maxrate_test.py
|
bopopescu/hydra
|
ec0793f8c1f49ceb93bf1f1a9789085b68d55f08
|
[
"Apache-2.0"
] | 17
|
2016-06-06T22:15:28.000Z
|
2020-07-22T20:28:12.000Z
|
src/main/python/hydra/kafkatest/maxrate_test.py
|
bopopescu/hydra
|
ec0793f8c1f49ceb93bf1f1a9789085b68d55f08
|
[
"Apache-2.0"
] | 5
|
2016-06-01T22:01:44.000Z
|
2020-07-22T20:12:49.000Z
|
__author__ = 'annyz'
from pprint import pprint, pformat # NOQA
import logging
import os
import sys
from datetime import datetime
from hydra.lib import util
from hydra.kafkatest.runtest import RunTestKAFKA
from hydra.lib.boundary import Scanner
from optparse import OptionParser
l = util.createlogger('runSuitMaxRate', logging.INFO)
class RunSuitMaxRate(object):
def __init__(self, options):
l.info(" Starting Max Rate ....")
pwd = os.getcwd()
fname = 'kafkasuit.test.log'
ofile = open(pwd + '/' + fname, 'w')
ofile.truncate()
ofile.write('Starting at :' + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '\n')
# setattr(options, 'test_duration', 15)
setattr(options, 'msg_batch', 100)
setattr(options, 'msg_rate', 10000)
setattr(options, 'keep_running', False)
setattr(options, 'acks', 0)
setattr(options, 'linger_ms', 0)
setattr(options, 'consumer_max_buffer_size', 0)
self.first_test = None
# Parameters
client_set = [30, 60, 120, 240, 480, 960, 1920, 3840, 7680, 10000]
for client_count in client_set:
setattr(options, 'total_sub_apps', int(client_count / 10))
if not self.first_test:
runner = RunTestKAFKA(options, None)
self.first_test = runner
self.first_test.start_appserver()
else:
# Keep the old runner
# But rescale the app
runner.set_options(options)
runner.scale_sub_app()
if client_count < 50:
scanner = Scanner(runner.run, 30000)
elif client_count < 200:
scanner = Scanner(runner.run, 10000)
else:
scanner = Scanner(runner.run, 500)
(status, rate, drop) = scanner.find_max_rate()
l.info("Found for Client Count %d Max message Rate %d with drop %f" %
(client_count, rate, drop))
maxrate_drop = drop
maxrate_rate = rate
if True and maxrate_drop != 0:
l.info("Searching for no-drop rate")
scanner_drop = Scanner(runner.run, maxrate_rate / 2)
(status, step_cnt, nodrop, nodrop_rate) = scanner_drop.search(0.5, 0.01)
l.info("Found for Client Count %d Max message Rate %d with no drop (%f)" %
(client_count, nodrop_rate, nodrop))
else:
nodrop_rate = rate
# Delete all launched apps once the required drop is achieved for this set
runner.delete_all_launched_apps()
self.first_test.stop_appserver()
l.info("TestSuite Completed.")
sys.exit(0)
def Run(argv): # NOQA
usage = ('python %prog --c_pub --c_sub'
' --test_duration=<time to run test> --msg_batch=<msg burst batch before sleep>')
parser = OptionParser(description='kafka scale maxrate test master',
version="0.1", usage=usage)
parser.add_option("--test_duration", dest='test_duration', type='int', default=15)
parser.add_option("--msg_batch", dest='msg_batch', type='int', default=100)
parser.add_option("--config_file", dest='config_file', type='string', default='hydra.ini')
(options, args) = parser.parse_args()
RunSuitMaxRate(options)
return True
| 38.247191
| 94
| 0.595476
| 414
| 3,404
| 4.73913
| 0.413043
| 0.057085
| 0.033129
| 0.035168
| 0.044852
| 0.044852
| 0.044852
| 0.044852
| 0.044852
| 0.044852
| 0
| 0.033638
| 0.292597
| 3,404
| 88
| 95
| 38.681818
| 0.781146
| 0.050235
| 0
| 0.042857
| 0
| 0
| 0.177985
| 0.013953
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.128571
| 0
| 0.185714
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
327a4fc033970cf2fec138ab6d2ea6fa9e580d97
| 1,574
|
py
|
Python
|
map_report.py
|
porcpine1967/aoe2stats
|
52965e437b8471753186ba1fc34cb773807eb496
|
[
"MIT"
] | null | null | null |
map_report.py
|
porcpine1967/aoe2stats
|
52965e437b8471753186ba1fc34cb773807eb496
|
[
"MIT"
] | null | null | null |
map_report.py
|
porcpine1967/aoe2stats
|
52965e437b8471753186ba1fc34cb773807eb496
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
""" Writes out map popularity of last two pools."""
from datetime import datetime, timedelta
from utils.map_pools import map_type_filter, pools
from utils.tools import execute_sql, last_time_breakpoint, map_name_lookup
SQL = """SELECT map_type, COUNT(*) as cnt
FROM matches
WHERE started BETWEEN {:0.0f} AND {:0.0f}
{}
AND team_size = {}
GROUP BY map_type
ORDER BY cnt DESC"""
def run():
""" Run the report."""
map_names = map_name_lookup()
weeks = pools()[-2:]
for size in (1, 2):
print("TEAM" if size > 1 else "1v1")
week_infos = []
for idx, week in enumerate(weeks):
week_info = []
year = int(week[:4])
month = int(week[4:6])
day = int(week[6:])
start = last_time_breakpoint(datetime(year, month, day))
end = start + timedelta(days=14)
sql = SQL.format(
start.timestamp(), end.timestamp(), map_type_filter(week, size), size
)
total = 0
for map_type, count in execute_sql(sql):
week_info.append((map_names[map_type], count,))
total += count
hold = []
for name, count in week_info:
hold.append("{:17}: {:4.1f}%".format(name, 100.0 * count / total))
week_infos.append(hold)
print("{:^24} {:^24}".format(*weeks))
for idx in range(len(week_infos[0])):
print("{} {}".format(week_infos[0][idx], week_infos[1][idx]))
if __name__ == "__main__":
run()
| 32.122449
| 85
| 0.560991
| 209
| 1,574
| 4.043062
| 0.401914
| 0.049704
| 0.042604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028907
| 0.296696
| 1,574
| 48
| 86
| 32.791667
| 0.734417
| 0.051461
| 0
| 0
| 0
| 0
| 0.136486
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.076923
| 0
| 0.102564
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
327cb6d4121abb0fa5a0265759fdf829da140dce
| 6,303
|
py
|
Python
|
tempdb/postgres.py
|
runfalk/tempdb
|
a19f7568db1795025c9ec8adfd84a9544f9a6966
|
[
"MIT"
] | 2
|
2021-01-17T00:01:14.000Z
|
2021-01-18T09:26:56.000Z
|
tempdb/postgres.py
|
runfalk/tempdb
|
a19f7568db1795025c9ec8adfd84a9544f9a6966
|
[
"MIT"
] | null | null | null |
tempdb/postgres.py
|
runfalk/tempdb
|
a19f7568db1795025c9ec8adfd84a9544f9a6966
|
[
"MIT"
] | null | null | null |
import getpass
import os
import platform
import psycopg2
import sys
import tempfile
from glob import glob
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, quote_ident
from subprocess import check_output, PIPE, Popen
from time import sleep
from ._compat import ustr
from .utils import is_executable, Uri, Version
__all__ = [
"PostgresFactory",
"PostgresCluster",
]
class PostgresFactory(object):
def __init__(self, pg_bin_dir, superuser=None):
# Temporary value until the first time we request it
self._version = None
self.initdb = os.path.join(pg_bin_dir, "initdb")
if not is_executable(self.initdb):
raise ValueError(
"Unable to find initdb command in {}".format(pg_bin_dir)
)
self.postgres = os.path.join(pg_bin_dir, "postgres")
if not is_executable(self.postgres):
raise ValueError(
"Unable to find postgres command in {}".format(pg_bin_dir)
)
if superuser is None:
superuser = getpass.getuser()
self.superuser = superuser
@property
def version(self):
if self._version is None:
self._version = get_version(self.postgres)
return self._version
def init_cluster(self, data_dir=None):
"""
Create a postgres cluster that trusts all incoming connections.
This is great for testing, but a horrible idea for production usage.
:param data_dir: Directory to create cluster in. This directory will
be automatically created if necessary.
:return: Path to the created cluster that can be used by load_cluster()
"""
if data_dir is None:
data_dir = tempfile.mkdtemp()
# If the target directory is not empty we don't want to risk wiping it
if os.listdir(data_dir):
raise ValueError((
"The given data directory {} is not empty. A new cluster will "
"not be created."
).format(data_dir))
check_output([
self.initdb,
"-U", self.superuser,
"-A", "trust",
data_dir
])
return data_dir
def create_temporary_cluster(self):
data_dir = self.init_cluster()
# Since we know this database should never be loaded again we disable
# safe guards Postgres has to prevent data corruption
return self.load_cluster(
data_dir,
is_temporary=True,
fsync=False,
full_page_writes=False,
)
def load_cluster(self, data_dir, is_temporary=False, **params):
uri = Uri(
scheme="postgresql",
user=self.superuser,
host=data_dir,
params=params,
)
return PostgresCluster(self.postgres, uri, is_temporary)
class PostgresCluster(object):
def __init__(self, postgres_bin, uri, is_temporary=False):
if uri.host is None or not uri.host.startswith("/"):
msg = "{!r} doesn't point to a UNIX socket directory"
raise ValueError(msg.format(uri))
self.uri = uri
self.is_temporary = is_temporary
self.returncode = None
cmd = [
postgres_bin,
"-D", uri.host,
"-k", uri.host,
"-c", "listen_addresses=",
]
# Add additional configuration from kwargs
for k, v in uri.params.items():
if isinstance(v, bool):
v = "on" if v else "off"
cmd.extend(["-c", "{}={}".format(k, v)])
# Start cluster
self.process = Popen(
cmd,
stdout=PIPE,
stderr=PIPE,
)
# Wait for a ".s.PGSQL.<id>" file to appear before continuing
while not glob(os.path.join(uri.host, ".s.PGSQL.*")):
sleep(0.1)
# Superuser connection
self.conn = psycopg2.connect(
ustr(self.uri.replace(database="postgres"))
)
self.conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
def __del__(self):
self.close()
def iter_databases(self):
with self.conn.cursor() as c:
default_databases = {"postgres", "template0", "template1"}
c.execute("SELECT datname FROM pg_database")
for name, in c:
if name not in default_databases:
yield name
def create_database(self, name, template=None):
if name in self.iter_databases():
raise KeyError("The database {!r} already exists".format(name))
with self.conn.cursor() as c:
sql = "CREATE DATABASE {}".format(quote_ident(name, c))
if template is not None:
sql += " TEMPLATE {}".format(quote_ident(template, c))
c.execute(sql)
return PostgresDatabase(self, self.uri.replace(database=name))
def get_database(self, name):
if name not in self.iter_databases():
raise KeyError("The database {!r} doesn't exist".format(name))
return PostgresDatabase(self, self.uri.replace(database=name))
def close(self):
if self.process is None:
return
# Kill all connections but this control connection. This prevents
# the server waiting for connections to close indefinately
with self.conn.cursor() as c:
c.execute("""
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE pid != pg_backend_pid()
""")
self.conn.close()
self.process.terminate()
self.returncode = self.process.wait()
# Remove temporary clusters when closing
if self.is_temporary:
for path, dirs, files in os.walk(self.uri.host, topdown=False):
for f in files:
os.remove(os.path.join(path, f))
for d in dirs:
os.rmdir(os.path.join(path, d))
os.rmdir(self.uri.host)
self.process = None
class PostgresDatabase(object):
def __init__(self, cluster, uri):
self.cluster = cluster
self.uri = uri
@property
def dsn(self):
return ustr(self.uri)
| 30.597087
| 79
| 0.58369
| 750
| 6,303
| 4.778667
| 0.3
| 0.023438
| 0.011161
| 0.01423
| 0.122489
| 0.095703
| 0.055246
| 0.055246
| 0.055246
| 0
| 0
| 0.001645
| 0.324766
| 6,303
| 205
| 80
| 30.746341
| 0.840461
| 0.137712
| 0
| 0.075342
| 0
| 0
| 0.11287
| 0.004656
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089041
| false
| 0.013699
| 0.082192
| 0.006849
| 0.246575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
327ee9780e46ebbfd9024596b22934ad7011175f
| 426
|
py
|
Python
|
nymph/modules/tool.py
|
smilelight/nymph
|
c8da2211f7a8f58d1c6d327b243e419ed9e64ead
|
[
"Apache-2.0"
] | 1
|
2020-08-10T00:58:14.000Z
|
2020-08-10T00:58:14.000Z
|
nymph/modules/tool.py
|
smilelight/nymph
|
c8da2211f7a8f58d1c6d327b243e419ed9e64ead
|
[
"Apache-2.0"
] | null | null | null |
nymph/modules/tool.py
|
smilelight/nymph
|
c8da2211f7a8f58d1c6d327b243e419ed9e64ead
|
[
"Apache-2.0"
] | 1
|
2021-07-03T07:06:41.000Z
|
2021-07-03T07:06:41.000Z
|
# -*- coding: utf-8 -*-
import pandas as pd
def save_dict_to_csv(dict_data: dict, csv_path: str):
indexes = list(dict_data.keys())
columns = list(list(dict_data.values())[0].keys())
data = []
for row in dict_data:
data.append([item for item in dict_data[row].values()])
pd_data = pd.DataFrame(data, index=indexes, columns=columns)
pd_data.to_csv(csv_path, encoding='utf8')
return pd_data
| 30.428571
| 64
| 0.666667
| 67
| 426
| 4.029851
| 0.462687
| 0.148148
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008646
| 0.185446
| 426
| 13
| 65
| 32.769231
| 0.769452
| 0.049296
| 0
| 0
| 0
| 0
| 0.009926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
328065cc7a0c80c52a732c0213b03b1281db7d57
| 1,035
|
py
|
Python
|
Python/rockpaperscissors/rockpaperscissors.py
|
rvrheenen/OpenKattis
|
7fd59fcb54e86cdf10f56c580c218c62e584f391
|
[
"MIT"
] | 12
|
2016-10-03T20:43:43.000Z
|
2021-06-12T17:18:42.000Z
|
Python/rockpaperscissors/rockpaperscissors.py
|
rvrheenen/OpenKattis
|
7fd59fcb54e86cdf10f56c580c218c62e584f391
|
[
"MIT"
] | null | null | null |
Python/rockpaperscissors/rockpaperscissors.py
|
rvrheenen/OpenKattis
|
7fd59fcb54e86cdf10f56c580c218c62e584f391
|
[
"MIT"
] | 10
|
2017-11-14T19:56:37.000Z
|
2021-02-02T07:39:57.000Z
|
# WORKS BUT ISN'T FAST ENOUGH
first_run = True
while(True):
inp = input().split()
if len(inp) == 1:
break
if first_run:
first_run = False
else:
print()
nPlayers, nGames = [int(x) for x in inp]
resultsW = [0] * nPlayers
resultsL = [0] * nPlayers
for i in range( int( ((nGames*nPlayers)*(nPlayers - 1)) / 2 ) ):
p1, p1move, p2, p2move = [int(x) if x.isdigit() else x for x in input().split()]
if p1move == p2move:
continue
if (p1move == "scissors" and p2move == "paper") or (p1move == "paper" and p2move == "rock") or (p1move == "rock" and p2move == "scissors"):
resultsW[p1-1] += 1
resultsL[p2-1] += 1
else:
resultsW[p2-1] += 1
resultsL[p1-1] += 1
for i in range(nPlayers):
w_plus_l = resultsL[i] + resultsW[i]
if w_plus_l == 0:
print("-")
else:
print("%.3f" % (resultsL[i] / w_plus_l))
print("\n\n\n\n\n\n\n")
print(resultsW)
| 32.34375
| 147
| 0.510145
| 142
| 1,035
| 3.65493
| 0.338028
| 0.023121
| 0.028902
| 0.030829
| 0.013487
| 0.013487
| 0
| 0
| 0
| 0
| 0
| 0.044733
| 0.330435
| 1,035
| 32
| 148
| 32.34375
| 0.704185
| 0.026087
| 0
| 0.1
| 0
| 0
| 0.052632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3280c700cb467b6fd44a96a8f003a083cb2e0a5f
| 9,460
|
py
|
Python
|
monitorcontrol/monitor_control.py
|
klwlau/monitorcontrol
|
92d07c7a93585de14551ba1f1dd8bb3a009c4842
|
[
"MIT"
] | null | null | null |
monitorcontrol/monitor_control.py
|
klwlau/monitorcontrol
|
92d07c7a93585de14551ba1f1dd8bb3a009c4842
|
[
"MIT"
] | null | null | null |
monitorcontrol/monitor_control.py
|
klwlau/monitorcontrol
|
92d07c7a93585de14551ba1f1dd8bb3a009c4842
|
[
"MIT"
] | null | null | null |
###############################################################################
# Copyright 2019 Alex M.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
from . import vcp
import sys
from typing import Type, List, Union, Iterable
class Monitor:
"""
A physical monitor attached to a Virtual Control Panel (VCP).
Generated with :py:meth:`get_monitors()` or
:py:meth:`iterate_monitors()`.
Args:
vcp: virtual control panel for the monitor
"""
#: Power modes and their integer values.
POWER_MODES = {
"on": 0x01,
"standby": 0x02,
"suspend": 0x03,
"off_soft": 0x04,
"off_hard": 0x05,
}
def __init__(self, vcp: Type[vcp.VCP]):
self.vcp = vcp
self.code_maximum = {}
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self):
"""
Opens the connection to the VCP.
Raises:
VCPError: failed to open VCP
"""
self.vcp.open()
def close(self):
"""
Closes the connection to the VCP.
Raises:
VCPError: failed to close VCP
"""
self.vcp.close()
def _get_code_maximum(self, code: Type[vcp.VCPCode]) -> int:
"""
Gets the maximum values for a given code, and caches in the
class dictionary if not already found.
Args:
code: feature code definition class
Returns:
maximum value for the given code
Raises:
TypeError: code is write only
"""
if not code.readable:
raise TypeError(f"code is not readable: {code.name}")
if code.value in self.code_maximum:
return self.code_maximum[code.value]
else:
_, maximum = self.vcp.get_vcp_feature(code.value)
self.code_maximum[code.value] = maximum
return maximum
def _set_vcp_feature(self, code: Type[vcp.VCPCode], value: int):
"""
Sets the value of a feature on the virtual control panel.
Args:
code: feature code definition class
value: feature value
Raises:
TypeError: code is ready only
ValueError: value is greater than the maximum allowable
VCPError: failed to get VCP feature
"""
if code.type == "ro":
raise TypeError(f"cannot write read-only code: {code.name}")
elif code.type == "rw":
maximum = self._get_code_maximum(code)
if value > maximum:
raise ValueError(
f"value of {value} exceeds code maximum of {maximum}"
)
self.vcp.set_vcp_feature(code.value, value)
def _get_vcp_feature(self, code: Type[vcp.VCPCode]) -> int:
"""
Gets the value of a feature from the virtual control panel.
Args:
code: feature code definition class
Returns:
current feature value
Raises:
TypeError: code is write only
VCPError: failed to get VCP feature
"""
if code.type == "wo":
raise TypeError(f"cannot read write-only code: {code.name}")
current, maximum = self.vcp.get_vcp_feature(code.value)
return current
@property
def luminance(self) -> int:
"""
Gets the monitors back-light luminance.
Returns:
current luminance value
Raises:
VCPError: failed to get luminance from the VCP
"""
code = vcp.get_vcp_code_definition("image_luminance")
return self._get_vcp_feature(code)
@luminance.setter
def luminance(self, value: int):
"""
Sets the monitors back-light luminance.
Args:
value: new luminance value (typically 0-100)
Raises:##### have not implemented or checked
ValueError: luminance outside of valid range
VCPError: failed to set luminance in the VCP
"""
code = vcp.get_vcp_code_definition("image_luminance")
self._set_vcp_feature(code, value)
@property
def contrast(self) -> int:
"""
Gets the monitors contrast.
Returns:
current contrast value
Raises:
VCPError: failed to get contrast from the VCP
"""
code = vcp.get_vcp_code_definition("image_contrast")
return self._get_vcp_feature(code)
@contrast.setter
def contrast(self, value: int):
"""
Sets the monitors back-light contrast.
Args:
value: new contrast value (typically 0-100)
Raises:
ValueError: contrast outside of valid range
VCPError: failed to set contrast in the VCP
"""
code = vcp.get_vcp_code_definition("image_contrast")
self._set_vcp_feature(code, value)
@property
def power_mode(self) -> int:
"""
The monitor power mode.
When used as a getter this returns the integer value of the
monitor power mode.
When used as a setter an integer value or a power mode
string from :py:attr:`Monitor.POWER_MODES` may be used.
Raises:
VCPError: failed to get or set the power mode
ValueError: set power state outside of valid range
KeyError: set power mode string is invalid
"""
code = vcp.get_vcp_code_definition("display_power_mode")
return self._get_vcp_feature(code)
@power_mode.setter
def power_mode(self, value: Union[int, str]):
if isinstance(value, str):
mode_value = Monitor.POWER_MODES[value]
elif isinstance(value, int):
mode_value = value
else:
raise TypeError("unsupported mode type: " + repr(type(value)))
if mode_value not in Monitor.POWER_MODES.values():
raise ValueError(f"cannot set reserved mode value: {mode_value}")
code = vcp.get_vcp_code_definition("display_power_mode")
self._set_vcp_feature(code, mode_value)
def get_vcps() -> List[Type[vcp.VCP]]:
"""
Discovers virtual control panels.
This function should not be used directly in most cases, use
:py:meth:`get_monitors()` or :py:meth:`iterate_monitors()` to
get monitors with VCPs.
Returns:
List of VCPs in a closed state.
Raises:
NotImplementedError: not implemented for your operating system
VCPError: failed to list VCPs
"""
if sys.platform == "win32" or sys.platform.startswith("linux"):
return vcp.get_vcps()
else:
raise NotImplementedError(f"not implemented for {sys.platform}")
def get_monitors() -> List[Monitor]:
"""
Creates a list of all monitors.
Returns:
List of monitors in a closed state.
Raises:
NotImplementedError: not implemented for your operating system
VCPError: failed to list VCPs
Example:
Setting the power mode of all monitors to standby::
for monitor in get_monitors():
try:
monitor.open()
# put monitor in standby mode
monitor.power_mode = "standby"
except VCPError:
print("uh-oh")
raise
finally:
monitor.close()
Setting all monitors to the maximum brightness using the
context manager::
for monitor in get_monitors():
with monitor as m:
# set back-light luminance to 100%
m.luminance = 100
"""
return [Monitor(v) for v in get_vcps()]
def iterate_monitors() -> Iterable[Monitor]:
"""
Iterates through all monitors, opening and closing the VCP for
each monitor.
Yields:
Monitor in an open state.
Raises:
NotImplementedError: not implemented for this platform
VCPError: failed to list VCPs
Example:
Setting all monitors to the maximum brightness::
for monitor in iterate_monitors():
monitor.luminance = 100
"""
for v in get_vcps():
monitor = Monitor(v)
with monitor:
yield monitor
| 30.031746
| 79
| 0.595455
| 1,137
| 9,460
| 4.85752
| 0.226913
| 0.015209
| 0.034764
| 0.014123
| 0.380952
| 0.319392
| 0.266341
| 0.227413
| 0.151729
| 0.091979
| 0
| 0.005852
| 0.313531
| 9,460
| 314
| 80
| 30.127389
| 0.844626
| 0.502537
| 0
| 0.186813
| 0
| 0
| 0.111233
| 0
| 0
| 0
| 0.005479
| 0
| 0
| 1
| 0.186813
| false
| 0
| 0.032967
| 0
| 0.340659
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
328135201e01cdb2208c77c5703c4b619db0d327
| 6,201
|
py
|
Python
|
algorithms/vae.py
|
ENSP-AI-Mentoring/machine-learning-algorithms
|
d53d5342f79d08066e158228cab6240872f61f72
|
[
"Apache-2.0"
] | 1
|
2021-11-14T19:46:46.000Z
|
2021-11-14T19:46:46.000Z
|
algorithms/vae.py
|
ENSP-AI-Mentoring/machine-learning-algorithms
|
d53d5342f79d08066e158228cab6240872f61f72
|
[
"Apache-2.0"
] | null | null | null |
algorithms/vae.py
|
ENSP-AI-Mentoring/machine-learning-algorithms
|
d53d5342f79d08066e158228cab6240872f61f72
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from tqdm import tqdm
class VAE:
def __init__(
self,
train_data,
test_data,
in_dim,
encoder_width,
decoder_width,
latent_dim,
device=None,
):
# device
self.name = "VAE"
if device is None:
device = torch.device("cuda") if device else torch.device("cpu")
self.device = device
self.latent_dim = latent_dim
self.encoder_width = encoder_width
self.decoder_width = decoder_width
self.in_dim = in_dim
# initialize encoder/decoder weights and biases
self.weights, self.biases = self.init_vae_params(
in_dim, encoder_width, decoder_width, latent_dim
)
# config dataset
self.train_data = train_data
data = next(iter(train_data))
self.example_size = data.size()
self.test_data = test_data
def train(self, batch_size, max_epoch, lr, weight_decay):
optimizer = self._get_optimizer(lr, weight_decay)
hist_loss = []
train_dataloader = DataLoader(
self.train_data, batch_size, shuffle=True, drop_last=True, num_workers=0
)
# print initial loss
data = next(iter(train_dataloader))
Xground = data.view((batch_size, -1)).to(self.device)
loss = self._vae_loss(Xground)
tk = tqdm(range(max_epoch))
for epoch in tk:
for ii, data in enumerate(train_dataloader):
Xground = data.view((batch_size, -1)).to(self.device)
optimizer.zero_grad()
loss = self._vae_loss(Xground)
# backward propagate
loss.backward()
optimizer.step()
hist_loss.append(loss.item())
tk.set_postfix({"val_loss": hist_loss[-1], "epoch": epoch})
return np.array(hist_loss)
def test1(self, batch_size):
"""data reconstruction test"""
test_dataloader = DataLoader(
self.test_data, batch_size, shuffle=True, drop_last=True, num_workers=0
)
data = next(iter(test_dataloader))
Xground = data.view((batch_size, -1)).to(self.device)
z_mean, z_logstd = self._encoding(Xground)
epsi = torch.randn(z_logstd.size()).to(self.device)
z_star = z_mean + torch.exp(0.5 * z_logstd) * epsi
Xstar = self._decoding(z_star)
Xstar = torch.sigmoid(Xstar)
Xstar = Xstar.view(data.size())
return data, Xstar
def test2(self, batch_size):
"""distribution transformation test(generate artificial dataset from random noises)"""
Z = torch.randn((batch_size, self.latent_dim)).to(self.device)
Xstar = self._decoding(Z).view((-1, *self.example_size))
return Xstar
def _vae_loss(self, Xground):
"""compute VAE loss = kl_loss + likelihood_loss"""
# KL loss
z_mean, z_logstd = self._encoding(Xground)
kl_loss = 0.5 * torch.sum(
1 + z_logstd - z_mean ** 2 - torch.exp(z_logstd), dim=1
)
# likelihood loss
epsi = torch.randn(z_logstd.size()).to(self.device)
z_star = z_mean + torch.exp(0.5 * z_logstd) * epsi # reparameterize trick
Xstar = self._decoding(z_star)
llh_loss = Xground * torch.log(1e-12 + Xstar) + (1 - Xground) * torch.log(
1e-12 + 1 - Xstar
)
llh_loss = torch.sum(llh_loss, dim=1)
var_loss = -torch.mean(kl_loss + llh_loss)
return var_loss
def _get_optimizer(self, lr, weight_decay):
opt_params = []
# adding weights to optimization paramters list
for k, v in self.weights.items():
opt_params.append({"params": v, "lr": lr})
# adding biases to optimization parameters list
for k, v in self.biases.items():
opt_params.append({"params": v, "lr": lr})
return Adam(opt_params, lr=lr, weight_decay=weight_decay)
def _encoding(self, X):
# Kingma Supplemtary C.2
output = (
torch.matmul(X, self.weights["encoder_hidden_w"])
+ self.biases["encoder_hidden_b"]
)
output = torch.tanh(output)
mean_output = (
torch.matmul(output, self.weights["latent_mean_w"])
+ self.biases["latent_mean_b"]
)
logstd_output = (
torch.matmul(output, self.weights["latent_std_w"])
+ self.biases["latent_std_b"]
)
return mean_output, logstd_output
def _decoding(self, Z):
output = (
torch.matmul(Z, self.weights["decoder_hidden_w"])
+ self.biases["decoder_hidden_b"]
)
output = torch.tanh(output)
Xstar = (
torch.matmul(output, self.weights["decoder_out_w"])
+ self.biases["decoder_out_b"]
)
Xstar = torch.sigmoid(Xstar)
return Xstar
def init_vae_params(self, in_dim, encoder_width, decoder_width, latent_dim):
weights = {
"encoder_hidden_w": self.xavier_init(in_dim, encoder_width),
"latent_mean_w": self.xavier_init(encoder_width, latent_dim),
"latent_std_w": self.xavier_init(encoder_width, latent_dim),
"decoder_hidden_w": self.xavier_init(latent_dim, decoder_width),
"decoder_out_w": self.xavier_init(decoder_width, in_dim),
}
biases = {
"encoder_hidden_b": self.xavier_init(1, encoder_width),
"latent_mean_b": self.xavier_init(1, latent_dim),
"latent_std_b": self.xavier_init(1, latent_dim),
"decoder_hidden_b": self.xavier_init(1, decoder_width),
"decoder_out_b": self.xavier_init(1, in_dim),
}
return weights, biases
def xavier_init(self, in_d, out_d):
xavier_stddev = np.sqrt(2.0 / (in_d + out_d))
W = torch.normal(
size=(in_d, out_d),
mean=0.0,
std=xavier_stddev,
requires_grad=True,
device=self.device,
)
return W
| 31.8
| 94
| 0.588776
| 777
| 6,201
| 4.442728
| 0.185328
| 0.028679
| 0.040556
| 0.021727
| 0.348204
| 0.275492
| 0.237833
| 0.182213
| 0.110371
| 0.110371
| 0
| 0.008555
| 0.302532
| 6,201
| 194
| 95
| 31.963918
| 0.789595
| 0.067247
| 0
| 0.161972
| 0
| 0
| 0.055401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070423
| false
| 0
| 0.035211
| 0
| 0.176056
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32837c01862960b0796752083e66eefb2afb0c24
| 1,244
|
py
|
Python
|
qfig.py
|
mth1haha/BlockchainQueueingNetwork
|
611dc84b857efbec22edfe5f3a1bb8f7052a39aa
|
[
"Apache-2.0"
] | 1
|
2021-11-30T08:22:43.000Z
|
2021-11-30T08:22:43.000Z
|
qfig.py
|
mth1haha/BlockchainQueueingNetwork
|
611dc84b857efbec22edfe5f3a1bb8f7052a39aa
|
[
"Apache-2.0"
] | null | null | null |
qfig.py
|
mth1haha/BlockchainQueueingNetwork
|
611dc84b857efbec22edfe5f3a1bb8f7052a39aa
|
[
"Apache-2.0"
] | 1
|
2020-11-25T08:48:25.000Z
|
2020-11-25T08:48:25.000Z
|
import simpy as sp
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats, integrate
def client(env, lamda, q, tic):
meant = 1/lamda
while True:
t = np.random.exponential(meant)
yield env.timeout(t)
q.put('job')
tic.append(env.now)
def server(env, alpha, mu1, mu2, q, toc):
mean1 = 1/mu1
mean2 = 1/mu2
while True:
yield q.get()
p = np.random.uniform()
if p < alpha:
t = np.random.exponential(mean1)
else:
t = np.random.exponential(mean2)
yield env.timeout(t)
toc.append(env.now)
lamda = 75
alpha = 0.333
mu1 = 370
mu2 = 370*(0.666)
num_bins = 50
runtime = 1000 #运行多长时间
tic = [] #每个任务进系统的时间点
toc = [] #每个任务出系统的时间点
env = sp.Environment()
q = sp.Store(env)
env.process(client(env, lamda, q, tic))
env.process(server(env, alpha, mu1, mu2, q, toc))
env.run(until=runtime)
l = len(tic)
a = toc
b = toc
#b = toc[0:l:40]
histdata = [b[i] - b[i-1] for i in range(1, len(b))]
sns.distplot(histdata, kde=False, fit=stats.expon)
plt.xlabel("inter departure time (s)")
plt.xlim(0,0.15)
#plt.ylim(0,100)
plt.savefig('dist1.png')
plt.show()
#plt.hist(histdata, num_bins)
#plt.show()
| 20.393443
| 52
| 0.619775
| 203
| 1,244
| 3.788177
| 0.458128
| 0.041612
| 0.035111
| 0.078023
| 0.109233
| 0.062419
| 0.062419
| 0
| 0
| 0
| 0
| 0.053236
| 0.229904
| 1,244
| 60
| 53
| 20.733333
| 0.749478
| 0.07717
| 0
| 0.086957
| 0
| 0
| 0.031607
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.108696
| 0
| 0.152174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
328b211073d9f2b0d84385aebf512b9639d8569d
| 1,133
|
py
|
Python
|
application/utils/data_transfer_objects.py
|
charles-crawford/sentiment
|
38cfd6af1cc81ad1858621a182cd76dc3e5f04db
|
[
"MIT"
] | null | null | null |
application/utils/data_transfer_objects.py
|
charles-crawford/sentiment
|
38cfd6af1cc81ad1858621a182cd76dc3e5f04db
|
[
"MIT"
] | null | null | null |
application/utils/data_transfer_objects.py
|
charles-crawford/sentiment
|
38cfd6af1cc81ad1858621a182cd76dc3e5f04db
|
[
"MIT"
] | null | null | null |
from flask_restx.fields import String, Boolean, Raw, List, Float, Nested
class DataTransferObjects:
def __init__(self, ns):
self.ns = ns
self.general_responses = {200: 'OK',
404: "Resource not found",
400: "Bad Request",
500: "Internal Server Error"}
self.plain_text = self.ns.model('plain_text', {
'plain_text': String(example='some sample text')
})
self.text_list = self.ns.model('text_list', {
'text_list': List(String(), example=['This is the first sentence.', 'This is the second sentence.'])
})
self.label = self.ns.model('label', {
'value': String(example='POSITIVE'),
'confidence': Float(example=.9)
})
self.prediction = self.ns.model('prediction', {
'text': String(example='some sample text'),
'labels': List(Nested(self.label))
})
self.predictions = self.ns.model('predictions', {
'predictions': List(Nested(self.prediction))
})
| 33.323529
| 112
| 0.529568
| 116
| 1,133
| 5.068966
| 0.448276
| 0.071429
| 0.093537
| 0.071429
| 0.105442
| 0.105442
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.33451
| 1,133
| 33
| 113
| 34.333333
| 0.762599
| 0
| 0
| 0.2
| 0
| 0
| 0.218005
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.04
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
329003760fc6877a5fb340f8c2de344d9c2c4d3e
| 13,284
|
py
|
Python
|
grover.py
|
raulillo82/TFG-Fisica-2021
|
8acfd748c7f49ea294606a9c185227927ec2e256
|
[
"MIT"
] | null | null | null |
grover.py
|
raulillo82/TFG-Fisica-2021
|
8acfd748c7f49ea294606a9c185227927ec2e256
|
[
"MIT"
] | null | null | null |
grover.py
|
raulillo82/TFG-Fisica-2021
|
8acfd748c7f49ea294606a9c185227927ec2e256
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
'''
* Copyright (C) 2021 Raúl Osuna Sánchez-Infante
*
* This software may be modified and distributed under the terms
* of the MIT license. See the LICENSE.txt file for details.
'''
##################
#Needed libraries#
##################
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import qiskit as q
import sys
from qiskit.visualization import plot_histogram
from qiskit.providers.ibmq import least_busy
from random import getrandbits
'''
Grover's algorithim. Intro
'''
#######################
#Functions definitions#
#######################
'''
Usage function
calling the program with "-h" or "--help" will display the help without returning an error (help was intended)
calling the progam with no options or wrong ones, will display the same help but returning an error
Please bear in mind that some combination of options are simply ignored, see the text of this function itself
'''
def usage():
print("Usage: " + str((sys.argv)[0]) + " i j k l")
print("i: Number of qubits (2 or 3, will yield error if different)")
print("j: Number of solutions (only taken into account if i=3, otherwise ignored). Can only be 1 or 2, will yield error otherwise")
print("k: Number of iterations (only taken into account for i=3 and j=1, othwerise ignored). Can only be 1 or 2, will yield error otherwise")
print("l: Perform computations in real quantum hardware, can only be 0 (no) or 1 (yes), will yield error otherwise")
if len(sys.argv) == 2 and (str((sys.argv)[1]) == "-h" or str((sys.argv)[1]) == "--help"):
exit(0)
else:
exit(1)
'''
Check whether parameter is an integer
'''
def is_intstring(s):
try:
int(s)
return True
except ValueError:
return False
'''
Initialization:
Simply apply an H gate to every qubit
'''
def initialize():
if len(sys.argv) == 1:
print ("No arguments given")
usage()
elif len(sys.argv) > 5 or str((sys.argv)[1]) == "-h" or str((sys.argv)[1]) == "--help" or (not (is_intstring(sys.argv[1]))) or (int((sys.argv)[1]) != 2 and (int((sys.argv)[1]) != 3)):
#elif (int((sys.argv)[1]) != 2 and (int((sys.argv)[1]) != 3)):
usage()
else:
#print ("Rest of cases")
for arg in sys.argv[2:]:
if not is_intstring(arg):
sys.exit("All arguments must be integers. Exit.")
qc = q.QuantumCircuit((sys.argv)[1])
#Apply a H-gate to all qubits in qc
for i in range(qc.num_qubits):
qc.h(i)
qc.barrier()
return qc
'''
Implement multi controlled Z-gate, easy to reutilize
'''
def mctz(qc):
qc.h(2)
qc.mct(list(range(2)), 2)
qc.h(2)
'''
Oracle metaimplementation
This function will simply call one of the possibles oracles functions
'''
def oracle (qc):
#Generate some random bits and implement the oracle accordingly with the result
bits=getrandbits(qc.num_qubits)
#2 qubits
if int((sys.argv)[1]) == 2:
print("Random bits to search for are (decimal representation): " + str(bits))
oracle_2_qubits(qc,bits)
#3 qubits
elif int((sys.argv)[1]) == 3:
#Single solution
if int((sys.argv)[2]) == 1:
'''
Explanation:
less than sqrt(N) iterations will be needed (so will need to "floor" (truncate) the result)
As 2 < sqrt(8) < 3 --> n=2 for 100% prob. With n=1, p=0.78125=78,125%
In the classical case, p=1/4=25% (single query followed by a random guess: 1/8 + 7/8 · 1/7 = 1/4 = 25%)
Classical results with two runs, p=1/8+7/8·1/7+6/8·1/6= 1/4 + 1/8 = 3/8 = 0.375 = 37,5%
'''
print("Random bits to search for are (decimal representation): " + str(bits))
#Check whether 1 or 2 iterations were requested
if (int((sys.argv)[3]) == 1) or (int((sys.argv)[3]) == 2):
iterations = int((sys.argv)[3])
for i in range(iterations):
oracle_3_qubits_single_solution(qc,bits)
diffusion(grover_circuit)
#For any other case, wrong arguments were used, exit
else:
usage()
#2 possible solutions
elif int((sys.argv)[2]) == 2:
'''
Explanation:
less than sqrt(N/M) times (M=2 different results to look for) will be needed (so will need to "floor" (truncate) the result)
As sqrt(8/2) = 2 --> n=1 for a theoretical 100% prob. In the classical case, 13/28 = 46,4%
'''
#A list instead of a single element will be used, initialize it with the previous value as first element
bits=[bits]
#Generate the second element, also randomly
bits.append(getrandbits(qc.num_qubits))
#Elements have to be different, regenerate as many times as needed till different
while bits[0] == bits[1]:
bits[1]=getrandbits(3)
#When done, sort the list of random bits. Order does not matter for our upcoming permutations
bits.sort()
print("Random bits to search for are (decimal representation): " + str(bits[0]) + " and " + str(bits[1]))
oracle_3_qubits_2_solutions(qc,bits)
#Algorithm only implemented for 1 or 2 possible solution(s), exit if something different requested
else:
usage()
#Algorithm only implemented for 1 or 2 qubits, exit if something different requested
else:
usage()
'''
Oracle implementation for 2 qubits.
Simply a controlled-Z gate (cz in qiskit).
For qubits different to 1, an x-gate is needed before and after the cz-gate
'''
def oracle_2_qubits(qc,bits):
if bits == 0: #00
qc.x(0)
qc.x(1)
qc.cz(0, 1)
qc.x(0)
qc.x(1)
elif bits == 1: #01
qc.x(1)
qc.cz(0,1)
qc.x(1)
elif bits == 2: #10
qc.x(0)
qc.cz(0,1)
qc.x(0)
elif bits == 3: #11
qc.cz(0,1)
qc.barrier()
'''
Oracle implementation for 3 qubits and single solution.
Reference for oracles: https://www.nature.com/articles/s41467-017-01904-7 (table 1)
'''
def oracle_3_qubits_single_solution(qc,bits):
if bits == 0:
for i in range(3):
qc.x(i)
mctz(qc)
for i in range(3):
qc.x(i)
elif bits == 1:
for i in range(1, 3):
qc.x(i)
mctz(qc)
for i in range(1, 3):
qc.x(i)
elif bits == 2:
for i in range(0, 3, 2):
qc.x(i)
mctz(qc)
for i in range(0, 3, 2):
qc.x(i)
elif bits == 3:
qc.x(2)
mctz(qc)
qc.x(2)
elif bits == 4:
for i in range(2):
qc.x(i)
mctz(qc)
for i in range(2):
qc.x(i)
elif bits == 5:
qc.x(1)
mctz(qc)
qc.x(1)
elif bits == 6:
qc.x(0)
mctz(qc)
qc.x(0)
elif bits == 7:
mctz(qc)
qc.barrier()
'''
Oracle implementation for 3 qubits and two possible solutions.
Reference for oracles: https://www.nature.com/articles/s41467-017-01904-7 (table 2)
'''
def oracle_3_qubits_2_solutions(qc,bits):
if (bits[0] == 0 and bits[1] == 1):
for i in range(1,3):
qc.z(i)
qc.cz(1, 2)
elif (bits[0] == 0 and bits[1] == 2):
for i in range(0, 3, 2):
qc.z(i)
qc.cz(0, 2)
elif (bits[0] == 0 and bits[1] == 3):
for i in range(3):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 0 and bits[1] == 4):
for i in range(2):
qc.z(i)
qc.cz(0, 1)
elif (bits[0] == 0 and bits[1] == 5):
for i in range(3):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 0 and bits[1] == 6):
for i in range(3):
qc.z(i)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 0 and bits[1] == 7):
for i in range(3):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 2):
for i in range(2):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 1 and bits[1] == 3):
qc.z(0)
qc.cz(0, 2)
elif (bits[0] == 1 and bits[1] == 4):
for i in range(0, 3, 2):
qc.z(i)
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 5):
qc.z(0)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 6):
qc.z(0)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 1 and bits[1] == 7):
qc.z(0)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 3):
qc.z(1)
qc.cz(1, 2)
elif (bits[0] == 2 and bits[1] == 4):
for i in range(1,3):
qc.z(i)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 5):
qc.z(1)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 6):
qc.z(1)
qc.cz(0, 1)
elif (bits[0] == 2 and bits[1] == 7):
qc.z(1)
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 4):
qc.z(2)
qc.cz(1, 2)
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 5):
qc.cz(0, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 6):
qc.cz(1, 2)
qc.cz(0, 1)
elif (bits[0] == 3 and bits[1] == 7):
qc.cz(0, 1)
elif (bits[0] == 4 and bits[1] == 5):
qc.z(2)
qc.cz(1, 2)
elif (bits[0] == 4 and bits[1] == 6):
qc.z(2)
qc.cz(0, 2)
elif (bits[0] == 4 and bits[1] == 7):
qc.z(2)
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 5 and bits[1] == 6):
qc.cz(1, 2)
qc.cz(0, 2)
elif (bits[0] == 5 and bits[1] == 7):
qc.cz(0, 2)
elif (bits[0] == 6 and bits[1] == 7):
qc.cz(1, 2)
qc.barrier()
'''
Diffusion operator: Flip sign and amplify
For 2 qubits, simply apply H and Z to each qubit, then cz, and then apply H again to each qubit:
'''
def diffusion(qc):
if qc.num_qubits == 2:
qc.h(0)
qc.h(1)
qc.z(0)
qc.z(1)
qc.cz(0,1)
qc.h(0)
qc.h(1)
elif qc.num_qubits == 3:
#Apply diffusion operator
for i in range(3):
qc.h(i)
qc.x(i)
# multi-controlled-toffoli
mctz(qc)
qc.barrier()
for i in range(3):
qc.x(i)
qc.h(i)
#qc.barrier()
'''
Add measurements and plot the quantum circuit:
'''
def measure(qc):
qc.measure_all()
qc.draw('mpl')
plt.draw()
plt.title("Quantum Circuit")
'''
Generate results from quantum simulator (no plotting)
'''
def results_qsim(qc):
backend = q.Aer.get_backend('qasm_simulator')
job = q.execute(qc, backend, shots = 1024)
return job
'''
Generate results from real quantum hardware (no plotting)
'''
def results_qhw(qc):
'''
#Only needed if credentials are not stored (e.g., deleted and regeneration is needed
token='XXXXXXXX' #Use token from ibm quantum portal if needed to enable again, should be stored under ~/.qiskit directory
q.IBMQ.save_account(token)
'''
provider = q.IBMQ.load_account()
provider = q.IBMQ.get_provider()
device = q.providers.ibmq.least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 3 and
not x.configuration().simulator and x.status().operational==True))
print("Running on current least busy device: ", device)
transpiled_grover_circuit = q.transpile(qc, device, optimization_level=3)
qobj = q.assemble(transpiled_grover_circuit)
job = device.run(qobj)
q.tools.monitor.job_monitor(job, interval=2)
return job
'''
Plot results
'''
def draw_job (job,title):
result = job.result()
counts = result.get_counts()
plot_histogram(counts)
plt.draw()
plt.title(title)
##############################
#End of functions definitions#
##############################
################################
#Program actually starts here!!#
################################
#Initialization
grover_circuit = initialize()
#Generate the oracle randomly according to the command line arguments
oracle(grover_circuit)
#Diffusion
if (not(int(sys.argv[1]) == 3 and int(sys.argv[2]) == 1)):
diffusion(grover_circuit)
#Add measurements
measure(grover_circuit)
#Generate results in simulator
job_sim = results_qsim(grover_circuit)
#Plot these results
draw_job(job_sim, "Quantum simulator output")
#Generate results in quantum hw if requested
if int(sys.argv[4]) == 1:
plt.show(block=False)
plt.draw()
#Next line needed for keeping computations in background while still seeing the previous plots
plt.pause(0.001)
#Generate results in real quantum hardware
job_qhw = results_qhw(grover_circuit)
#Plot these results as well
draw_job(job_qhw, "Quantum hardware output")
#Keep plots active when done till they're closed, used for explanations during presentations
plt.show()
| 29.851685
| 187
| 0.546522
| 2,078
| 13,284
| 3.464389
| 0.179981
| 0.029449
| 0.025698
| 0.033616
| 0.373246
| 0.317405
| 0.301014
| 0.247118
| 0.217947
| 0.211557
| 0
| 0.054229
| 0.300361
| 13,284
| 444
| 188
| 29.918919
| 0.720034
| 0.141448
| 0
| 0.577703
| 0
| 0.010135
| 0.089647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043919
| false
| 0
| 0.023649
| 0
| 0.084459
| 0.033784
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3294741b0f8e1bf0eeabf4019d19a68a63e99c23
| 1,419
|
py
|
Python
|
tests/bind_tests/diagram_tests/strategies.py
|
lycantropos/voronoi
|
977e0b3e5eff2dd294e2e6ce1a8030c763e86233
|
[
"MIT"
] | null | null | null |
tests/bind_tests/diagram_tests/strategies.py
|
lycantropos/voronoi
|
977e0b3e5eff2dd294e2e6ce1a8030c763e86233
|
[
"MIT"
] | null | null | null |
tests/bind_tests/diagram_tests/strategies.py
|
lycantropos/voronoi
|
977e0b3e5eff2dd294e2e6ce1a8030c763e86233
|
[
"MIT"
] | null | null | null |
from hypothesis import strategies
from hypothesis_geometry import planar
from tests.bind_tests.hints import (BoundCell,
BoundDiagram,
BoundEdge,
BoundVertex)
from tests.bind_tests.utils import (bound_source_categories,
to_bound_multipoint,
to_bound_multisegment)
from tests.strategies import (doubles,
integers_32,
sizes)
from tests.utils import to_maybe
booleans = strategies.booleans()
coordinates = doubles
empty_diagrams = strategies.builds(BoundDiagram)
source_categories = strategies.sampled_from(bound_source_categories)
cells = strategies.builds(BoundCell, sizes,
source_categories)
vertices = strategies.builds(BoundVertex, coordinates, coordinates)
edges = strategies.builds(BoundEdge, to_maybe(vertices), cells,
booleans, booleans)
cells_lists = strategies.lists(cells)
edges_lists = strategies.lists(edges)
vertices_lists = strategies.lists(vertices)
diagrams = strategies.builds(BoundDiagram, cells_lists, edges_lists,
vertices_lists)
multipoints = planar.multipoints(integers_32).map(to_bound_multipoint)
multisegments = planar.multisegments(integers_32).map(to_bound_multisegment)
| 44.34375
| 76
| 0.653982
| 132
| 1,419
| 6.80303
| 0.287879
| 0.089087
| 0.066815
| 0.040089
| 0.044543
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005911
| 0.284708
| 1,419
| 31
| 77
| 45.774194
| 0.878818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.206897
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32955f3ecdc5ec46e6e7127a3ed57f1411af2c54
| 2,381
|
py
|
Python
|
apps/blog/serializers.py
|
yc19890920/dble_fastapi_blog
|
dd9b8984d849df893d4fea270e8b75ac12d01241
|
[
"Apache-2.0"
] | null | null | null |
apps/blog/serializers.py
|
yc19890920/dble_fastapi_blog
|
dd9b8984d849df893d4fea270e8b75ac12d01241
|
[
"Apache-2.0"
] | 2
|
2021-03-31T19:56:46.000Z
|
2021-04-30T21:19:15.000Z
|
apps/blog/serializers.py
|
yc19890920/dble_fastapi_blog
|
dd9b8984d849df893d4fea270e8b75ac12d01241
|
[
"Apache-2.0"
] | null | null | null |
"""
@Author: YangCheng
@contact: 1248644045@qq.com
@Software: Y.C
@Time: 2020/7/21 15:22
"""
from typing import List
from pydantic import BaseModel, Field
from tortoise import Tortoise
from tortoise.contrib.pydantic import pydantic_model_creator, pydantic_queryset_creator
from lib.tortoise.pydantic import json_encoders
from .models import Tag, Category, Article
Tortoise.init_models(["apps.blog.models"], "models")
class PydanticResponse(BaseModel):
index: int
limit: int
total: int
# -*- tag -*-
# Tag create/update
TagCreateRequest = pydantic_model_creator(
Tag, name="TagCreateRequest", exclude_readonly=True
)
TagCreateResponse = pydantic_model_creator(
Category, name="TagCreateResponse", exclude=["articles"]
)
TagCreateResponse.Config.json_encoders = json_encoders
# Tag List
TagListSerializer = pydantic_queryset_creator(
Tag, name="TagListSerializer", exclude=["articles"]
)
class TagListResponse(PydanticResponse):
results: List[TagListSerializer]
class TagResponse(BaseModel):
id: int
name: str
# -*- Category -*-
# Category create/update
CategoryCreateRequest = pydantic_model_creator(
Category, name="CategoryCreateRequest", exclude_readonly=True
)
CategoryCreateResponse = pydantic_model_creator(
Category, name="CategoryCreateResponse", exclude=("articles",)
)
CategoryCreateResponse.Config.json_encoders = json_encoders
# Category List
CategoryListSerializer = pydantic_queryset_creator(
Category, name="CategoryListSerializer", exclude=("articles",)
)
class CategoryListResponse(PydanticResponse):
results: List[CategoryListSerializer]
# -*- Article -*-
# Article create/update
class ArticleCreateRequest(BaseModel):
title: str = Field(..., description="Title")
content: str = Field(..., description="Content")
abstract: str = None
status: str = Field(default="publish", description="Content")
category_id: int = Field(..., description="category_id")
tags: List[int] = Field(..., description="tag_id list")
ArticleCreateResponse = pydantic_model_creator(
Article, name="ArticleCreateResponse"
)
ArticleCreateResponse.Config.json_encoders = json_encoders
ArticleListSerializer = pydantic_queryset_creator(
Article, name="ArticleListSerializer"
)
# Article List
class ArticleListResponse(PydanticResponse):
results: List[ArticleCreateResponse]
| 25.063158
| 87
| 0.761025
| 237
| 2,381
| 7.506329
| 0.320675
| 0.047218
| 0.067454
| 0.047218
| 0.104553
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01015
| 0.131037
| 2,381
| 94
| 88
| 25.329787
| 0.849686
| 0.099118
| 0
| 0
| 0
| 0
| 0.121653
| 0.050258
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.481481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
329a1a34027b83c6621340af222a98c0d43067e0
| 1,102
|
py
|
Python
|
Python/image_analysis_centerlines/analysis_example.py
|
fromenlab/guides
|
ac9831265f8219d5b5a8ee3a441fc77c7ae4fe3b
|
[
"MIT"
] | null | null | null |
Python/image_analysis_centerlines/analysis_example.py
|
fromenlab/guides
|
ac9831265f8219d5b5a8ee3a441fc77c7ae4fe3b
|
[
"MIT"
] | null | null | null |
Python/image_analysis_centerlines/analysis_example.py
|
fromenlab/guides
|
ac9831265f8219d5b5a8ee3a441fc77c7ae4fe3b
|
[
"MIT"
] | null | null | null |
from skimage import img_as_bool, io, color, morphology
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Testing process
# Import images
one = img_as_bool(color.rgb2gray(io.imread('1.jpg')))
cross = img_as_bool(color.rgb2gray(io.imread('cross.jpg')))
grid = img_as_bool(color.rgb2gray(io.imread('grid.jpg')))
# Get skeleton
one_skel = morphology.skeletonize(one)
cross_skel = morphology.skeletonize(cross)
grid_skel = morphology.skeletonize(grid)
# Get medial axis
one_med, one_med_distance = morphology.medial_axis(one, return_distance=True)
cross_med, cross_med_distance = morphology.medial_axis(cross, return_distance=True)
grid_med, grid_med_distance = morphology.medial_axis(grid, return_distance=True)
# Get skeleton distance
one_skel_distance = one_med_distance*one_skel
# Data processing for "1.jpg"
one_skel_nonzero = one_skel_distance.nonzero()
trans = np.transpose(one_skel_nonzero)
df_coords = pd.DataFrame(data = trans, columns = ["y", "x"])
df_dist = pd.DataFrame(data = one_skel_distance[one_skel_nonzero])
combined = pd.concat([df_coords, df_dist], axis=1)
| 34.4375
| 83
| 0.791289
| 170
| 1,102
| 4.864706
| 0.3
| 0.067715
| 0.043531
| 0.050786
| 0.221282
| 0.108827
| 0.108827
| 0
| 0
| 0
| 0
| 0.006036
| 0.098004
| 1,102
| 32
| 84
| 34.4375
| 0.825956
| 0.098004
| 0
| 0
| 0
| 0
| 0.024292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
329a5ba2f15a3280c3c7c2b2a6a0114abcec0cf9
| 485
|
py
|
Python
|
resources/settings.py
|
Miriel-py/Room-Wizard
|
83d86fe8e8fed8bb073b38465cd0e97b1a6113b8
|
[
"MIT"
] | null | null | null |
resources/settings.py
|
Miriel-py/Room-Wizard
|
83d86fe8e8fed8bb073b38465cd0e97b1a6113b8
|
[
"MIT"
] | null | null | null |
resources/settings.py
|
Miriel-py/Room-Wizard
|
83d86fe8e8fed8bb073b38465cd0e97b1a6113b8
|
[
"MIT"
] | null | null | null |
# global_data.py
import os
from dotenv import load_dotenv
# Read the bot token from the .env file
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
DEBUG_MODE = os.getenv('DEBUG_MODE')
BOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DB_FILE = os.path.join(BOT_DIR, 'database/room_wizard_db.db')
LOG_FILE = os.path.join(BOT_DIR, 'logs/discord.log')
DEV_GUILDS = [730115558766411857]
# Embed color
EMBED_COLOR = 0x6C48A7
DEFAULT_FOOTER = 'Just pinning things.'
| 24.25
| 69
| 0.764948
| 78
| 485
| 4.5
| 0.512821
| 0.08547
| 0.074074
| 0.08547
| 0.205128
| 0.11396
| 0
| 0
| 0
| 0
| 0
| 0.053364
| 0.11134
| 485
| 20
| 70
| 24.25
| 0.761021
| 0.131959
| 0
| 0
| 0
| 0
| 0.203349
| 0.062201
| 0
| 0
| 0.019139
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
329eec6934c9b0ff2824d0ffd01a1902dae80850
| 1,767
|
py
|
Python
|
detection_algorithms/temporal_anomaly_detection/model_def.py
|
hanahs-deepfake-detection/detection-algorithms
|
6d7ec53eaf333adb10a1aba448f80fceaf7722be
|
[
"MIT"
] | null | null | null |
detection_algorithms/temporal_anomaly_detection/model_def.py
|
hanahs-deepfake-detection/detection-algorithms
|
6d7ec53eaf333adb10a1aba448f80fceaf7722be
|
[
"MIT"
] | null | null | null |
detection_algorithms/temporal_anomaly_detection/model_def.py
|
hanahs-deepfake-detection/detection-algorithms
|
6d7ec53eaf333adb10a1aba448f80fceaf7722be
|
[
"MIT"
] | null | null | null |
"""
Model Definition
"""
from tensorflow import keras
from tensorflow.keras.applications import ResNet101V2
from tensorflow.keras.layers import (
BatchNormalization, Conv2D, Dense, Dropout, Flatten, LSTM, MaxPool2D,
TimeDistributed, Lambda
)
import tensorflow as tf
from .spatial_transformer.bilinear_sampler import BilinearSampler
def gen_model(batch_size, video_frames):
inputs = keras.Input((video_frames, 384, 512, 3), batch_size=batch_size)
x = TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu'))(inputs)
x = TimeDistributed(MaxPool2D())(x)
x = TimeDistributed(Conv2D(32, kernel_size=(3, 3), activation='relu'))(x)
x = TimeDistributed(MaxPool2D())(x)
x = TimeDistributed(Flatten())(x)
x = TimeDistributed(Dense(64, activation='tanh', kernel_initializer='zeros'))(x)
x = TimeDistributed(Dropout(0.5))(x)
x = TimeDistributed(Dense(6, activation='tanh', kernel_initializer='zeros',
bias_initializer=lambda shape, dtype=None: tf.constant(
[1, 0, 0, 0, 1, 0], tf.float32
)))(x)
x = Lambda(lambda ls: tf.concat([ls[0], tf.reshape(ls[1],
(batch_size, video_frames, -1))], -1))([x, inputs])
x = TimeDistributed(BilinearSampler(input_shape=(batch_size, 384, 512, 3),
output_shape=(batch_size, 224, 224, 3)))(x)
resnet = ResNet101V2(include_top=False, weights=None)
x = TimeDistributed(resnet)(x)
x = TimeDistributed(Flatten())(x)
x = LSTM(32, return_sequences=True)(x)
x = LSTM(32)(x)
x = Dense(10, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs=inputs, outputs=x)
return model
| 42.071429
| 84
| 0.654782
| 220
| 1,767
| 5.163636
| 0.322727
| 0.022887
| 0.104754
| 0.035211
| 0.242077
| 0.178697
| 0.088028
| 0.088028
| 0.088028
| 0.088028
| 0
| 0.04745
| 0.200905
| 1,767
| 41
| 85
| 43.097561
| 0.757082
| 0.009055
| 0
| 0.114286
| 0
| 0
| 0.021228
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.142857
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
329f8f1e2538fb2f56b719613eee2ed54216347d
| 4,884
|
py
|
Python
|
osspeak/platforms/windows.py
|
OSSpeak/OSSpeak
|
327c38a37684165f87bf8d76ab2ca135b43b8ab7
|
[
"MIT"
] | 1
|
2020-03-17T10:24:41.000Z
|
2020-03-17T10:24:41.000Z
|
osspeak/platforms/windows.py
|
OSSpeak/OSSpeak
|
327c38a37684165f87bf8d76ab2ca135b43b8ab7
|
[
"MIT"
] | 12
|
2016-09-28T05:16:00.000Z
|
2020-11-27T22:32:40.000Z
|
osspeak/platforms/windows.py
|
OSSpeak/OSSpeak
|
327c38a37684165f87bf8d76ab2ca135b43b8ab7
|
[
"MIT"
] | null | null | null |
'''
Collection of Windows-specific I/O functions
'''
import msvcrt
import time
import ctypes
from platforms import winconstants, winclipboard
EnumWindows = ctypes.windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
def flush_io_buffer():
while msvcrt.kbhit():
print(msvcrt.getch().decode('utf8'), end='')
def close_active_window():
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.user32.PostMessageA(hwnd, winconstants.WM_CLOSE, 0, 0)
def get_active_window_name():
hwnd = ctypes.windll.user32.GetForegroundWindow()
return get_window_title(hwnd)
def maximize_active_window():
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.user32.ShowWindow(hwnd, 3)
def minimize_active_window():
hwnd = ctypes.windll.user32.GetForegroundWindow()
ctypes.windll.user32.ShowWindow(hwnd, 6)
def get_window_title(hwnd):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
return buff.value
def get_matching_windows(title_list):
matches = {}
def window_enum_callback(hwnd, lParam):
if IsWindowVisible(hwnd):
window_name = get_window_title(hwnd).lower()
for name in title_list:
if name not in window_name:
return True
matches[window_name] = hwnd
return True
EnumWindows(EnumWindowsProc(window_enum_callback), 0)
return matches
def activate_window(title, position=1):
if position > 0:
position -= 1
matches = get_matching_windows(title)
sorted_keys = list(sorted(matches.keys(), key=len))
key = sorted_keys[position]
hwnd = matches[key]
# magic incantations to activate window consistently
IsIconic = ctypes.windll.user32.IsIconic
ShowWindow = ctypes.windll.user32.ShowWindow
GetForegroundWindow = ctypes.windll.user32.GetForegroundWindow
GetWindowThreadProcessId = ctypes.windll.user32.GetWindowThreadProcessId
BringWindowToTop = ctypes.windll.user32.BringWindowToTop
AttachThreadInput = ctypes.windll.user32.AttachThreadInput
SetForegroundWindow = ctypes.windll.user32.SetForegroundWindow
SystemParametersInfo = ctypes.windll.user32.SystemParametersInfoA
if IsIconic(hwnd):
ShowWindow(hwnd, winconstants.SW_RESTORE)
if GetForegroundWindow() == hwnd:
return True
ForegroundThreadID = GetWindowThreadProcessId(GetForegroundWindow(), None)
ThisThreadID = GetWindowThreadProcessId(hwnd, None)
if AttachThreadInput(ThisThreadID, ForegroundThreadID, True):
BringWindowToTop(hwnd)
SetForegroundWindow(hwnd)
AttachThreadInput(ThisThreadID, ForegroundThreadID, False)
if GetForegroundWindow() == hwnd:
return True
timeout = ctypes.c_int()
zero = ctypes.c_int(0)
SystemParametersInfo(winconstants.SPI_GETFOREGROUNDLOCKTIMEOUT, 0, ctypes.byref(timeout), 0)
(winconstants.SPI_SETFOREGROUNDLOCKTIMEOUT, 0, ctypes.byref(zero), winconstants.SPIF_SENDCHANGE)
BringWindowToTop(hwnd)
SetForegroundWindow(hwnd)
SystemParametersInfo(winconstants.SPI_SETFOREGROUNDLOCKTIMEOUT, 0, ctypes.byref(timeout), winconstants.SPIF_SENDCHANGE)
if GetForegroundWindow() == hwnd:
return True
return False
def get_mouse_location():
pt = winconstants.POINT()
ctypes.windll.user32.GetCursorPos(ctypes.byref(pt))
return pt.x, pt.y
def mouse_click(button, direction, number):
event_nums = get_mouse_event_nums(button, direction)
for i in range(number):
for num in event_nums:
ctypes.windll.user32.mouse_event(num, 0, 0, 0, 0)
def mouse_move(x=None, y=None, relative=False):
startx, starty = get_mouse_location()
if not relative:
if x is None: x = startx
if y is None: y = starty
ctypes.windll.user32.SetCursorPos(x, y)
return
if x is None: x = 0
if y is None: y = 0
ctypes.windll.user32.SetCursorPos(startx + x, starty + y)
def get_clipboard_contents():
return winclipboard.init_windows_clipboard()[1]()
def set_clipboard_contents(text):
return winclipboard.init_windows_clipboard()[0](str(text))
def get_mouse_event_nums(button, direction):
if button == 'left' and direction == 'down': return [2]
if button == 'left' and direction == 'up': return [4]
if button == 'left' and direction == 'both': return [2, 4]
if button == 'right' and direction == 'down': return [8]
if button == 'right' and direction == 'up': return [16]
if button == 'right' and direction == 'both': return [8, 16]
| 37.282443
| 123
| 0.719287
| 559
| 4,884
| 6.159213
| 0.255814
| 0.080163
| 0.120244
| 0.053732
| 0.250363
| 0.118211
| 0.069997
| 0.069997
| 0.069997
| 0.069997
| 0
| 0.020025
| 0.182023
| 4,884
| 131
| 124
| 37.282443
| 0.841802
| 0.019656
| 0
| 0.148148
| 0
| 0
| 0.010667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138889
| false
| 0
| 0.037037
| 0.018519
| 0.296296
| 0.009259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32a0d30f56c4a1916c5ad0aef5a7b50495e1860b
| 715
|
py
|
Python
|
sudokusolver/common/messenger.py
|
Blondberg/SudokuSolver
|
4a6f1f927d41f7a39a953b9784b28d570edf1f09
|
[
"MIT"
] | null | null | null |
sudokusolver/common/messenger.py
|
Blondberg/SudokuSolver
|
4a6f1f927d41f7a39a953b9784b28d570edf1f09
|
[
"MIT"
] | null | null | null |
sudokusolver/common/messenger.py
|
Blondberg/SudokuSolver
|
4a6f1f927d41f7a39a953b9784b28d570edf1f09
|
[
"MIT"
] | null | null | null |
# messenger.py - contains functions to create different kinds of messages like info or error
# color the text, usage: print bcolors.WARNING + "Warning: No active frommets remain. Continue?" + bcolors.ENDC
BCOLORS = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m'
}
# Information message
def info(message):
print(BCOLORS['OKBLUE'] + message + BCOLORS['ENDC'])
# Action message
def action(message):
print(BCOLORS['OKGREEN'] + message + BCOLORS['ENDC'])
# Error message
def error(message):
print(BCOLORS['FAIL'] + message + BCOLORS['ENDC'])
| 23.833333
| 111
| 0.633566
| 87
| 715
| 5.206897
| 0.528736
| 0.10596
| 0.125828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063574
| 0.186014
| 715
| 29
| 112
| 24.655172
| 0.714777
| 0.348252
| 0
| 0
| 0
| 0
| 0.297826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0
| 0
| 0.1875
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32a23291b7486cbc9a87ce5a914dd735071b20e4
| 554
|
py
|
Python
|
test.py
|
w0w/miniPFC
|
63b1bf608de03efada2a1b57c0370b6a7c2bf1ad
|
[
"MIT"
] | null | null | null |
test.py
|
w0w/miniPFC
|
63b1bf608de03efada2a1b57c0370b6a7c2bf1ad
|
[
"MIT"
] | null | null | null |
test.py
|
w0w/miniPFC
|
63b1bf608de03efada2a1b57c0370b6a7c2bf1ad
|
[
"MIT"
] | null | null | null |
import json
import RPi.GPIO as GPIO
from modules.sensor import getTempC, getHumidity
def loadConfig():
with open('./config/pin.json') as data_file:
data = json.load(data_file)
return data
currentPins = loadConfig().values()
def bootActuators():
'''Assumes that pi is booting and set off all the relays'''
GPIO.setmode(GPIO.BOARD)
for i, p in enumerate(currentPins):
GPIO.setup(p, GPIO.OUT)
GPIO.output(p, GPIO.HIGH)
print(p, GPIO.input(p))
print('Actuators turned off')
bootActuators()
| 25.181818
| 63
| 0.66426
| 76
| 554
| 4.815789
| 0.644737
| 0.040984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220217
| 554
| 22
| 64
| 25.181818
| 0.847222
| 0.095668
| 0
| 0
| 0
| 0
| 0.074597
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.1875
| 0
| 0.375
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32a426fd1c9efac97183a6c708ae91ac77c14062
| 1,170
|
py
|
Python
|
example.py
|
clagraff/habu
|
28d05c2fa2204b26177bbaed969648b92b89c735
|
[
"MIT"
] | null | null | null |
example.py
|
clagraff/habu
|
28d05c2fa2204b26177bbaed969648b92b89c735
|
[
"MIT"
] | null | null | null |
example.py
|
clagraff/habu
|
28d05c2fa2204b26177bbaed969648b92b89c735
|
[
"MIT"
] | null | null | null |
import json
import habu
def do_req(uri, *args, **kwargs):
route_data = {
"/": {
"_links": {
"people": { "href": "/people" },
"animals": { "href": "/animals" }
}
},
"/people": {
"_links": {
"self": { "href": "/products" }
},
"_embedded": {
"people": [
{ "_links": { "self": { "href": "/people/clagraff" } }, "name": "Curtis", "age": 22 }
]
},
"total": 1
},
"/people/clagraff": {
"_links": {
"self": { "href": "/people/clagraff" }
},
"name": "Curtis",
"age": 22
}
}
return route_data[uri]
def main():
habu.set_request_func(do_req)
api = habu.enter("/")
people = api.people()
print("There are %i people" % people.total)
for person in people.embedded.people:
print("Hi! I am %s and I am %i years old" % (person.name, person.age))
curtis = habu.enter("/people/clagraff")
print(curtis)
if __name__ == "__main__":
main()
| 23.4
| 105
| 0.417949
| 105
| 1,170
| 4.47619
| 0.438095
| 0.119149
| 0.082979
| 0.080851
| 0.178723
| 0.178723
| 0.178723
| 0.178723
| 0.178723
| 0
| 0
| 0.007194
| 0.405983
| 1,170
| 49
| 106
| 23.877551
| 0.669065
| 0
| 0
| 0.073171
| 0
| 0
| 0.232479
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.04878
| 0
| 0.121951
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32a62b611ae086d7c010dc8106960f0f8f3738b2
| 1,162
|
py
|
Python
|
notify_tweet.py
|
mkaraki/WatchTweets
|
9b0a4ef66e38311453fff99d02091758b1bd0df5
|
[
"MIT"
] | null | null | null |
notify_tweet.py
|
mkaraki/WatchTweets
|
9b0a4ef66e38311453fff99d02091758b1bd0df5
|
[
"MIT"
] | 1
|
2022-01-26T18:03:15.000Z
|
2022-01-26T18:03:35.000Z
|
notify_tweet.py
|
mkaraki/WatchTweets
|
9b0a4ef66e38311453fff99d02091758b1bd0df5
|
[
"MIT"
] | null | null | null |
import json
import os
import requests
from dotenv import load_dotenv
# You have to configure in this file to notify other services
def notifyHandler(tweet):
notifyDiscord(tweet)
return
def notifyDiscord(tweet, find_user_info=False):
msg = tweet['text']
if ('entities' in tweet and 'urls' in tweet['entities']):
for (i, url) in enumerate(tweet['entities']['urls']):
msg = msg.replace(url['url'], url['expanded_url'])
c = {
'embeds': [{
'description': msg,
'author': {
'name': tweet['author_id'],
'url': 'https://twitter.com/intent/user?user_id=' + tweet['author_id'],
},
'title': 'Tweet',
'url': 'https://twitter.com/intent/like?tweet_id=' + tweet['id'],
'footer': {
'text': 'Twitter',
'icon_url': 'http://github.com/twitter.png',
},
'timestamp': tweet['created_at'],
}]
}
requests.post(os.getenv('DISCORD_WEBHOOK_URL'), json.dumps(
c), headers={'Content-Type': 'application/json'})
return
load_dotenv(override=True)
| 26.409091
| 87
| 0.553356
| 129
| 1,162
| 4.883721
| 0.534884
| 0.031746
| 0.04127
| 0.057143
| 0.07619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288296
| 1,162
| 43
| 88
| 27.023256
| 0.76179
| 0.050775
| 0
| 0.0625
| 0
| 0
| 0.286104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32ac15da27e5771cb19e9b355fd09244b1a2fee3
| 561
|
py
|
Python
|
misprogs/sensor_Luz_LCD.py
|
dacocube/CursoGalileo
|
1dac903031d9ff61174cb0c5e00e3f3795ea60de
|
[
"Apache-2.0"
] | null | null | null |
misprogs/sensor_Luz_LCD.py
|
dacocube/CursoGalileo
|
1dac903031d9ff61174cb0c5e00e3f3795ea60de
|
[
"Apache-2.0"
] | null | null | null |
misprogs/sensor_Luz_LCD.py
|
dacocube/CursoGalileo
|
1dac903031d9ff61174cb0c5e00e3f3795ea60de
|
[
"Apache-2.0"
] | null | null | null |
import signal
import sys
import time
import pyupm_grove as grove
import pyupm_i2clcd as lcd
def interruptHandler(signal, frame):
sys.exit(0)
if __name__=='__main__':
signal.signal(signal.SIGINT, interruptHandler)
myLcd = lcd.Jhd1313m1(0, 0x3E,0x62)
sensorluz=grove.GroveLight(0)
coloR=255
colorG=200
colorB=100
myLcd.setColor(coloR,colorG,colorB)
#read the input and print, waiting 1/2 seconds between reading
while True:
valorSensor=sensorluz.value()
myLcd.setCursor(0,0)
myLcd.write('%6d'% valorSensor)
time.sleep(0.5)
del sensorluz
| 20.777778
| 63
| 0.761141
| 82
| 561
| 5.085366
| 0.634146
| 0.052758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061856
| 0.135472
| 561
| 26
| 64
| 21.576923
| 0.797938
| 0.108734
| 0
| 0
| 0
| 0
| 0.022044
| 0
| 0
| 0
| 0.016032
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.238095
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32b26100558c8d0079fd4f055056d994cd62c099
| 9,553
|
py
|
Python
|
clustviz/clarans.py
|
barbarametzler/ClustViz
|
a460e1ffb5195dfe1e12bca106366901d169a690
|
[
"MIT"
] | 6
|
2019-11-14T11:22:54.000Z
|
2020-03-01T09:14:21.000Z
|
clustviz/clarans.py
|
barbarametzler/ClustViz
|
a460e1ffb5195dfe1e12bca106366901d169a690
|
[
"MIT"
] | 2
|
2020-07-21T07:49:07.000Z
|
2021-04-06T16:16:09.000Z
|
clustviz/clarans.py
|
barbarametzler/ClustViz
|
a460e1ffb5195dfe1e12bca106366901d169a690
|
[
"MIT"
] | 5
|
2020-07-14T15:22:00.000Z
|
2022-03-19T19:45:32.000Z
|
import random
from typing import Tuple, Dict, Any
import scipy
import itertools
import graphviz
import numpy as np
import pandas as pd
from clustviz.pam import plot_pam
from pyclustering.utils import euclidean_distance_square
from pyclustering.cluster.clarans import clarans as clarans_pyclustering
class clarans(clarans_pyclustering):
def process(self, plotting: bool = False):
"""!
@brief Performs cluster analysis in line with rules of CLARANS algorithm.
@return (clarans) Returns itself (CLARANS instance).
@see get_clusters()
@see get_medoids()
"""
random.seed()
# loop for a numlocal number of times
for _ in range(0, self.__numlocal):
print("numlocal: ", _)
# set (current) random medoids
self.__current = random.sample(
range(0, len(self.__pointer_data)), self.__number_clusters
)
# update clusters in line with random allocated medoids
self.__update_clusters(self.__current)
# optimize configuration
self.__optimize_configuration()
# obtain cost of current cluster configuration and compare it with the best obtained
estimation = self.__calculate_estimation()
if estimation < self.__optimal_estimation:
print(
"Better configuration found with medoids: {0} and cost: {1}".format(
self.__current[:], estimation
)
)
self.__optimal_medoids = self.__current[:]
self.__optimal_estimation = estimation
if plotting is True:
self.__update_clusters(self.__optimal_medoids)
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
else:
print(
"Configuration found does not improve current best one because its cost is {0}".format(
estimation
)
)
if plotting is True:
self.__update_clusters(self.__current[:])
plot_pam(
self.__pointer_data,
dict(zip(self.__current[:], self.__clusters)),
)
self.__update_clusters(self.__optimal_medoids)
if plotting is True:
print("FINAL RESULT:")
plot_pam(
self.__pointer_data,
dict(zip(self.__optimal_medoids, self.__clusters)),
)
return self
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
counter = 0
while index_neighbor < self.__maxneighbor:
# get random current medoid that is to be replaced
current_medoid_index = self.__current[
random.randint(0, self.__number_clusters - 1)
]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(
0, len(self.__pointer_data) - 1
)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(
point_index, current_medoid_index
)
other_medoid_cluster_index = self.__belong[
other_medoid_index
]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[current_medoid_index],
)
# from the point to candidate median
distance_candidate = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[candidate_medoid_index],
)
# from the point to nearest (own) medoid
distance_nearest = float("inf")
if (point_medoid_index != candidate_medoid_index) and (
point_medoid_index != current_medoid_cluster_index
):
distance_nearest = euclidean_distance_square(
self.__pointer_data[point_index],
self.__pointer_data[point_medoid_index],
)
# apply rules for cost calculation
if point_cluster_index == current_medoid_cluster_index:
# case 1:
if distance_candidate >= distance_nearest:
candidate_cost += (
distance_nearest - distance_current
)
# case 2:
else:
candidate_cost += (
distance_candidate - distance_current
)
elif point_cluster_index == other_medoid_cluster_index:
# case 3 ('nearest medoid' is the representative object of that cluster and object is more
# similar to 'nearest' than to 'candidate'):
if distance_candidate > distance_nearest:
pass
# case 4:
else:
candidate_cost += (
distance_candidate - distance_nearest
)
if candidate_cost < 0:
counter += 1
# set candidate that has won
self.__current[
current_medoid_cluster_index
] = candidate_medoid_index
# recalculate clusters
self.__update_clusters(self.__current)
# reset iterations and starts investigation from the begining
index_neighbor = 0
else:
index_neighbor += 1
print("Medoid set changed {0} times".format(counter))
def compute_cost_clarans(data: pd.DataFrame, _cur_choice: list) -> Tuple[float, Dict[Any, list]]:
"""
A function to compute the configuration cost. (modified from that of CLARA)
:param data: The input dataframe.
:param _cur_choice: The current set of medoid choices.
:return: The total configuration cost, the medoids.
"""
total_cost = 0.0
medoids = {}
for idx in _cur_choice:
medoids[idx] = []
for i in list(data.index):
choice = -1
min_cost = np.inf
for m in medoids:
# fast_euclidean from CLARA
tmp = np.linalg.norm(data.loc[m] - data.loc[i])
if tmp < min_cost:
choice = m
min_cost = tmp
medoids[choice].append(i)
total_cost += min_cost
# print("total_cost: ", total_cost)
return total_cost, medoids
def plot_tree_clarans(data: pd.DataFrame, k: int) -> None:
"""
plot G_{k,n} as in the paper of CLARANS; only to use with small input data.
:param data: input DataFrame.
:param k: number of points in each combination (possible set of medoids).
"""
n = len(data)
num_points = int(scipy.special.binom(n, k))
num_neigh = k * (n - k)
if (num_points > 50) or (num_neigh > 10):
print(
"Either graph nodes are more than 50 or neighbors are more than 10, the graph would be too big"
)
return
# all possibile combinations of k elements from input data
name_nodes = list(itertools.combinations(list(data.index), k))
dot = graphviz.Digraph(comment="Clustering")
# draw nodes, also adding the configuration cost
for i in range(num_points):
tot_cost, meds = compute_cost_clarans(data, list(name_nodes[i]))
tc = round(tot_cost, 3)
dot.node(str(name_nodes[i]), str(name_nodes[i]) + ": " + str(tc))
# only connect nodes if they have k-1 common elements
for i in range(num_points):
for j in range(num_points):
if i != j:
if (
len(set(list(name_nodes[i])) & set(list(name_nodes[j])))
== k - 1
):
dot.edge(str(name_nodes[i]), str(name_nodes[j]))
graph = graphviz.Source(dot) # .view()
display(graph)
| 36.185606
| 114
| 0.539098
| 980
| 9,553
| 4.968367
| 0.234694
| 0.036147
| 0.040049
| 0.022592
| 0.240501
| 0.177449
| 0.121586
| 0.111316
| 0.104539
| 0.084822
| 0
| 0.006726
| 0.39307
| 9,553
| 263
| 115
| 36.323194
| 0.833046
| 0.185596
| 0
| 0.231707
| 0
| 0
| 0.038522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0.006098
| 0.060976
| 0
| 0.109756
| 0.036585
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32b489e63deb6a7323ecb9996f33d06edac172bd
| 1,507
|
py
|
Python
|
bin/demo_findit_backup_url.py
|
cariaso/metapub
|
bfa361dd6e5de8ee0859e596d490fb478f7dcfba
|
[
"Apache-2.0"
] | 28
|
2019-09-09T08:12:31.000Z
|
2021-12-17T00:09:14.000Z
|
bin/demo_findit_backup_url.py
|
cariaso/metapub
|
bfa361dd6e5de8ee0859e596d490fb478f7dcfba
|
[
"Apache-2.0"
] | 33
|
2019-11-07T05:36:04.000Z
|
2022-01-29T01:14:57.000Z
|
bin/demo_findit_backup_url.py
|
cariaso/metapub
|
bfa361dd6e5de8ee0859e596d490fb478f7dcfba
|
[
"Apache-2.0"
] | 10
|
2019-09-09T10:04:05.000Z
|
2021-06-08T16:00:14.000Z
|
from __future__ import absolute_import, print_function, unicode_literals
import os
import requests
from metapub.findit import FindIt
from metapub.exceptions import *
from requests.packages import urllib3
urllib3.disable_warnings()
OUTPUT_DIR = 'findit'
CURL_TIMEOUT = 4000
def try_request(url):
# verify=False means it ignores bad SSL certs
OK_STATUS_CODES = [200, 301, 302, 307]
response = requests.get(url, stream=True, timeout=CURL_TIMEOUT, verify=False)
if response.status_code in OK_STATUS_CODES:
if response.headers.get('content-type').find('pdf') > -1:
return True
return False
def try_backup_url(pmid):
source = FindIt(pmid=pmid)
if not source.pma:
return
if source.url:
print(pmid, source.pma.journal, source.url, try_request(source.url))
else:
print(pmid, source.pma.journal, source.reason)
try:
if source.backup_url is not None:
print(pmid, source.pma.journal, source.backup_url, try_request(source.backup_url))
else:
print(pmid, source.pma.journal, "no backup url")
except Exception as err:
print(pmid, '%r' % err)
if __name__=='__main__':
import sys
try:
start_pmid = int(sys.argv[1])
except (IndexError, TypeError) as err:
print("Supply a pubmed ID as the starting point for this script.")
sys.exit()
for pmid in range(start_pmid, start_pmid+1000):
try_backup_url(pmid)
| 28.433962
| 98
| 0.666224
| 205
| 1,507
| 4.717073
| 0.44878
| 0.055843
| 0.062048
| 0.074457
| 0.136505
| 0.136505
| 0.066184
| 0
| 0
| 0
| 0
| 0.020924
| 0.238885
| 1,507
| 52
| 99
| 28.980769
| 0.822145
| 0.028534
| 0
| 0.1
| 0
| 0
| 0.069178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.175
| 0
| 0.3
| 0.175
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32b5c206b4bd2dca61a6557018af529be9b8ba2f
| 3,939
|
py
|
Python
|
kgcnn/layers/conv/dmpnn_conv.py
|
the16thpythonist/gcnn_keras
|
27d794095b684333d93149c825d84b85df8c30ff
|
[
"MIT"
] | 47
|
2021-03-10T10:15:42.000Z
|
2022-03-14T00:53:40.000Z
|
kgcnn/layers/conv/dmpnn_conv.py
|
the16thpythonist/gcnn_keras
|
27d794095b684333d93149c825d84b85df8c30ff
|
[
"MIT"
] | 36
|
2021-05-06T15:06:51.000Z
|
2022-03-02T13:06:16.000Z
|
kgcnn/layers/conv/dmpnn_conv.py
|
the16thpythonist/gcnn_keras
|
27d794095b684333d93149c825d84b85df8c30ff
|
[
"MIT"
] | 11
|
2021-04-05T02:14:27.000Z
|
2022-03-02T03:25:52.000Z
|
import tensorflow as tf
from kgcnn.layers.base import GraphBaseLayer
from kgcnn.layers.gather import GatherNodesOutgoing, GatherNodesIngoing
from kgcnn.layers.pooling import PoolingLocalEdges
from kgcnn.layers.modules import LazySubtract
@tf.keras.utils.register_keras_serializable(package='kgcnn', name='DMPNNGatherEdgesPairs')
class DMPNNGatherEdgesPairs(GraphBaseLayer):
"""Gather edge pairs that also works for invalid indices given a certain pair, i.e. if a edge does not have its
reverse counterpart in the edge indices list.
This class is used in `DMPNN <https://pubs.acs.org/doi/full/10.1021/acs.jcim.9b00237>`_ .
"""
def __init__(self, **kwargs):
"""Initialize layer."""
super(DMPNNGatherEdgesPairs, self).__init__(**kwargs)
self.gather_layer = GatherNodesIngoing()
def build(self, input_shape):
"""Build layer."""
super(DMPNNGatherEdgesPairs, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Forward pass.
Args:
inputs (list): [edges, pair_index]
- edges (tf.RaggedTensor): Node embeddings of shape (batch, [M], F)
- pair_index (tf.RaggedTensor): Edge indices referring to edges of shape (batch, [M], 1)
Returns:
list: Gathered edge embeddings that match the reverse edges of shape (batch, [M], F) for selection_index.
"""
self.assert_ragged_input_rank(inputs)
edges, pair_index = inputs
index_corrected = tf.RaggedTensor.from_row_splits(
tf.where(pair_index.values >= 0, pair_index.values, tf.zeros_like(pair_index.values)),
pair_index.row_splits, validate=self.ragged_validate)
edges_paired = self.gather_layer([edges, index_corrected], **kwargs)
edges_corrected = tf.RaggedTensor.from_row_splits(
tf.where(pair_index.values >= 0, edges_paired.values, tf.zeros_like(edges_paired.values)),
edges_paired.row_splits, validate=self.ragged_validate)
return edges_corrected
@tf.keras.utils.register_keras_serializable(package='kgcnn', name='DMPNNPPoolingEdgesDirected')
class DMPNNPPoolingEdgesDirected(GraphBaseLayer):
"""Pooling of edges for around a target node as defined by
`DMPNN <https://pubs.acs.org/doi/full/10.1021/acs.jcim.9b00237>`_ . This slightly different than the normal node
aggregation from message passing like networks. Requires edge pairs for this implementation.
"""
def __init__(self, **kwargs):
"""Initialize layer."""
super(DMPNNPPoolingEdgesDirected, self).__init__(**kwargs)
self.pool_edge_1 = PoolingLocalEdges(pooling_method="sum")
self.gather_edges = GatherNodesOutgoing()
self.gather_pairs = DMPNNGatherEdgesPairs()
self.subtract_layer = LazySubtract()
def build(self, input_shape):
"""Build layer."""
super(DMPNNPPoolingEdgesDirected, self).build(input_shape)
def call(self, inputs, **kwargs):
"""Forward pass.
Args:
inputs: [nodes, edges, edge_index, edge_reverse_pair]
- nodes (tf.RaggedTensor): Node embeddings of shape (batch, [N], F)
- edges (tf.RaggedTensor): Edge or message embeddings of shape (batch, [M], F)
- edge_index (tf.RaggedTensor): Edge indices referring to nodes of shape (batch, [M], 2)
- edge_reverse_pair (tf.RaggedTensor): Pair mappings for reverse edges (batch, [M], 1)
Returns:
tf.RaggedTensor: Edge embeddings of shape (batch, [M], F)
"""
n, ed, edi, edp = inputs
pool_edge_receive = self.pool_edge_1([n, ed, edi], **kwargs) # Sum pooling of all edges
ed_new = self.gather_edges([pool_edge_receive, edi], **kwargs)
ed_not = self.gather_pairs([ed, edp], **kwargs)
out = self.subtract_layer([ed_new, ed_not], **kwargs)
return out
| 43.766667
| 117
| 0.67276
| 477
| 3,939
| 5.381551
| 0.289308
| 0.049085
| 0.032723
| 0.030386
| 0.354889
| 0.340475
| 0.293728
| 0.201792
| 0.172965
| 0.131671
| 0
| 0.010081
| 0.219345
| 3,939
| 89
| 118
| 44.258427
| 0.824715
| 0.369383
| 0
| 0.15
| 0
| 0
| 0.026258
| 0.020569
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.15
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32b877d4916dd5d40bd6976997b7ef7d01823785
| 349
|
py
|
Python
|
api/admin.py
|
jchmura/suchary-django
|
af2e8a62d222fd6eb18f29af95c23ab098ccc2a6
|
[
"MIT"
] | null | null | null |
api/admin.py
|
jchmura/suchary-django
|
af2e8a62d222fd6eb18f29af95c23ab098ccc2a6
|
[
"MIT"
] | 2
|
2021-03-19T21:54:17.000Z
|
2021-06-10T19:20:12.000Z
|
api/admin.py
|
jchmura/suchary-django
|
af2e8a62d222fd6eb18f29af95c23ab098ccc2a6
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from api.models import Device
class DeviceAdmin(admin.ModelAdmin):
list_display = ['android_id', 'alias', 'model', 'os_version', 'version', 'created', 'last_seen', 'active']
list_filter = ['active']
search_fields = ['registration_id', 'android_id', 'alias']
admin.site.register(Device, DeviceAdmin)
| 26.846154
| 110
| 0.713467
| 42
| 349
| 5.738095
| 0.690476
| 0.074689
| 0.116183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13467
| 349
| 12
| 111
| 29.083333
| 0.798013
| 0
| 0
| 0
| 0
| 0
| 0.272206
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32bd83533b8a10d702670e0618e12d21f2714992
| 712
|
py
|
Python
|
f8a_jobs/handlers/flow.py
|
sawood14012/fabric8-analytics-jobs
|
a7d850dfef5785144676b9a3b4e29942161e5347
|
[
"Apache-2.0"
] | 5
|
2017-05-04T11:22:31.000Z
|
2018-08-24T16:12:30.000Z
|
f8a_jobs/handlers/flow.py
|
sawood14012/fabric8-analytics-jobs
|
a7d850dfef5785144676b9a3b4e29942161e5347
|
[
"Apache-2.0"
] | 325
|
2017-05-03T08:44:03.000Z
|
2021-12-13T21:03:49.000Z
|
f8a_jobs/handlers/flow.py
|
sawood14012/fabric8-analytics-jobs
|
a7d850dfef5785144676b9a3b4e29942161e5347
|
[
"Apache-2.0"
] | 28
|
2017-05-02T05:09:32.000Z
|
2021-03-11T09:42:34.000Z
|
"""Schedule multiple flows of a type."""
from .base import BaseHandler
class FlowScheduling(BaseHandler):
"""Schedule multiple flows of a type."""
def execute(self, flow_name, flow_arguments):
"""Schedule multiple flows of a type, do filter expansion if needed.
:param flow_name: flow name that should be scheduled
:param flow_arguments: a list of flow arguments per flow
"""
for node_args in flow_arguments:
if self.is_filter_query(node_args):
for args in self.expand_filter_query(node_args):
self.run_selinon_flow(flow_name, args)
else:
self.run_selinon_flow(flow_name, node_args)
| 33.904762
| 76
| 0.651685
| 94
| 712
| 4.734043
| 0.425532
| 0.089888
| 0.141573
| 0.155056
| 0.305618
| 0.305618
| 0
| 0
| 0
| 0
| 0
| 0
| 0.273876
| 712
| 20
| 77
| 35.6
| 0.860735
| 0.345506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32bdf6c9f66952e90bfd46bcfa58f2ec034c3c0d
| 1,032
|
py
|
Python
|
mako/stats/notifier.py
|
zer0tonin/mako
|
12420056e13e1acd333e686537d5ebc909450620
|
[
"MIT"
] | null | null | null |
mako/stats/notifier.py
|
zer0tonin/mako
|
12420056e13e1acd333e686537d5ebc909450620
|
[
"MIT"
] | 1
|
2021-06-02T04:22:46.000Z
|
2021-06-02T04:22:46.000Z
|
mako/stats/notifier.py
|
zer0tonin/mako
|
12420056e13e1acd333e686537d5ebc909450620
|
[
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger(__name__)
class Notifier:
def __init__(self, redis):
self.redis = redis
async def notify_guilds(self):
guilds_set = "guilds"
logger.debug("Scanning {}".format(guilds_set))
result = []
async for guild_id in self.redis.isscan(guilds_set):
result.extend(await self.notify_guild(guild_id))
return result
async def notify_guild(self, guild_id):
notify_list = "guilds:{}:notify".format(guild_id)
level_zset = "guilds:{}:levels".format(guild_id)
result = []
logger.debug("Popping {} queue".format(notify_list))
user_id = await self.redis.lpop(notify_list)
while user_id is not None:
logger.debug("Accessing {} zset for user: {}".format(level_zset, user_id))
level = await self.redis.zscore(level_zset, user_id)
result.append((guild_id, user_id, level))
user_id = await self.redis.lpop(notify_list)
return result
| 30.352941
| 86
| 0.631783
| 130
| 1,032
| 4.761538
| 0.323077
| 0.087237
| 0.067851
| 0.048465
| 0.109855
| 0.109855
| 0.109855
| 0.109855
| 0
| 0
| 0
| 0
| 0.255814
| 1,032
| 33
| 87
| 31.272727
| 0.80599
| 0
| 0
| 0.25
| 0
| 0
| 0.092054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.041667
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32c304191982cf35da8aed8e53fd875c3bef3ba2
| 1,505
|
py
|
Python
|
PageObjectModel/Test/addAndEditionData.py
|
lblaszkowski/Arena
|
61f924bc7c3994ec7714fe68f60b02b35ccd286b
|
[
"Apache-2.0"
] | null | null | null |
PageObjectModel/Test/addAndEditionData.py
|
lblaszkowski/Arena
|
61f924bc7c3994ec7714fe68f60b02b35ccd286b
|
[
"Apache-2.0"
] | null | null | null |
PageObjectModel/Test/addAndEditionData.py
|
lblaszkowski/Arena
|
61f924bc7c3994ec7714fe68f60b02b35ccd286b
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from selenium import webdriver
from PageObjectModel.Pages.addAndEditionDataPage import AddAndEditionData_Page
from time import sleep
url = 'https://buggy-testingcup.pgs-soft.com/'
class AddAndEditionDataPage(unittest.TestCase):
def setUp(self, browser="mozilla", task="task_3"):
if browser == "chrome" or browser == "ch":
self.driver = webdriver.Chrome(executable_path=r'../Drivers/ChromeDrive_74/chromedriver.exe')
self.driver.maximize_window()
self.driver.get(url + task)
elif browser == "mozilla" or browser == "ff":
self.driver = webdriver.Firefox(executable_path=r'../Drivers/FirefoxDrive_24/geckodriver.exe')
self.driver.maximize_window()
self.driver.get(url + task)
else:
print("Brak przeglądarki")
raise Exception("Brak przeglądarki")
return self.driver
def tearDown(self):
self.driver.close()
self.driver.quit()
def test_AddAndEditionData(self):
AddandEditionData = AddAndEditionData_Page(self.driver)
AddandEditionData.menuButtonClick()
AddandEditionData.dropdownMenuClick()
AddandEditionData.editFile()
AddandEditionData.fieldName("Jan")
AddandEditionData.fieldSurname("Nowak")
AddandEditionData.fieldNotes("Testowy napis")
AddandEditionData.fieldPhone("10981234098")
AddandEditionData.fieldImage()
AddandEditionData.saveButton()
| 32.717391
| 106
| 0.67907
| 142
| 1,505
| 7.126761
| 0.528169
| 0.098814
| 0.037549
| 0.043478
| 0.092885
| 0.092885
| 0.092885
| 0.092885
| 0.092885
| 0.092885
| 0
| 0.013594
| 0.21794
| 1,505
| 45
| 107
| 33.444444
| 0.846219
| 0
| 0
| 0.121212
| 0
| 0
| 0.145333
| 0.056
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.121212
| 0
| 0.272727
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32c57ec480ef32335403cba14fba78c713f0eb97
| 741
|
py
|
Python
|
azext_script/compilers/az/handlers/HDInsight.py
|
yorek/adl
|
d9da1b7d46c71415e38a6efe5b1c8d45b02b3704
|
[
"MIT"
] | null | null | null |
azext_script/compilers/az/handlers/HDInsight.py
|
yorek/adl
|
d9da1b7d46c71415e38a6efe5b1c8d45b02b3704
|
[
"MIT"
] | 1
|
2018-10-15T05:51:38.000Z
|
2018-10-15T05:51:38.000Z
|
azext_script/compilers/az/handlers/HDInsight.py
|
yorek/adl
|
d9da1b7d46c71415e38a6efe5b1c8d45b02b3704
|
[
"MIT"
] | 1
|
2018-10-18T18:41:02.000Z
|
2018-10-18T18:41:02.000Z
|
from .Generic import GenericHandler
class HDInsightHandler(GenericHandler):
azure_object = "hdinsight"
def execute(self):
fqn = self.get_full_resource_name()
self.add_context_parameter("resource-group", "group")
if fqn == "hdinsight" and self.action == "create":
self.add_context_parameter("location", "location")
if 'storage account' in self.context:
storage_account = self.context["storage account"]
storage_account += ".blob.core.windows.net"
self.add_parameter("storage-account", storage_account)
cmd = super(HDInsightHandler, self).execute()
self.save_to_context()
return cmd
| 30.875
| 70
| 0.618084
| 75
| 741
| 5.92
| 0.506667
| 0.189189
| 0.063063
| 0.103604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.279352
| 741
| 23
| 71
| 32.217391
| 0.831461
| 0
| 0
| 0
| 0
| 0
| 0.17027
| 0.02973
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32c6c31592e8107e78ef2bb52771dcffacd50781
| 393
|
py
|
Python
|
html_mining/twitter.py
|
sourceperl/sandbox
|
bbe1be52c3e51906a8ec94411c4df6a95dcbb39c
|
[
"MIT"
] | null | null | null |
html_mining/twitter.py
|
sourceperl/sandbox
|
bbe1be52c3e51906a8ec94411c4df6a95dcbb39c
|
[
"MIT"
] | null | null | null |
html_mining/twitter.py
|
sourceperl/sandbox
|
bbe1be52c3e51906a8ec94411c4df6a95dcbb39c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
r = requests.get("https://twitter.com/ThePSF", headers={"User-Agent": ""})
if r.status_code == 200:
s = BeautifulSoup(r.content, "html.parser")
# extract tweets
l_tw = []
for p in s.find_all("p", attrs={"class": "tweet-text"}):
l_tw.append(p.text.strip())
print(l_tw)
| 23.117647
| 74
| 0.62341
| 58
| 393
| 4.137931
| 0.775862
| 0.0375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 0.183206
| 393
| 16
| 75
| 24.5625
| 0.728972
| 0.147583
| 0
| 0
| 0
| 0
| 0.189759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
32c80a80f478110db9183291633d248502cd65ad
| 590
|
py
|
Python
|
warehouse_labeling_machines/libs/utils.py
|
sdg97/warehouse_labeling_machines
|
3650b9fb2d3fef85ee01925acf0a9266dafe746a
|
[
"Apache-2.0"
] | null | null | null |
warehouse_labeling_machines/libs/utils.py
|
sdg97/warehouse_labeling_machines
|
3650b9fb2d3fef85ee01925acf0a9266dafe746a
|
[
"Apache-2.0"
] | null | null | null |
warehouse_labeling_machines/libs/utils.py
|
sdg97/warehouse_labeling_machines
|
3650b9fb2d3fef85ee01925acf0a9266dafe746a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import decimal
import multiprocessing
import random
def roundDecimal(v):
'''
Sembra che l'arrotondamento di un decimal sia più complicato del previsto
'''
return v.quantize(decimal.Decimal('0.01'), rounding=decimal.ROUND_HALF_UP)
def maybeStart(startCb, debug):
'''
Ogni tanto esegue questa callback...
Ad ogni restart di un worker in maniera casuale esegue la callback
'''
if debug:
return
workers = multiprocessing.cpu_count() * 2 + 1
if random.randrange(workers) == 0:
startCb()
| 21.851852
| 78
| 0.666102
| 75
| 590
| 5.2
| 0.72
| 0.020513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015317
| 0.225424
| 590
| 26
| 79
| 22.692308
| 0.838074
| 0.366102
| 0
| 0
| 0
| 0
| 0.011834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.272727
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
086749fe086bfe8b53982e2dc76e87c1e91b6cc7
| 1,596
|
py
|
Python
|
code/p3.py
|
OscarFlores-IFi/CDINP19
|
7fb0cb6ff36b9a10bcfa0772b172c5e49996df48
|
[
"MIT"
] | null | null | null |
code/p3.py
|
OscarFlores-IFi/CDINP19
|
7fb0cb6ff36b9a10bcfa0772b172c5e49996df48
|
[
"MIT"
] | null | null | null |
code/p3.py
|
OscarFlores-IFi/CDINP19
|
7fb0cb6ff36b9a10bcfa0772b172c5e49996df48
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 09:18:37 2019
@author: if715029
"""
import pandas as pd
import numpy as np
import sklearn.metrics as skm
import scipy.spatial.distance as sc
#%% Leer datos
data = pd.read_excel('../data/Test de películas(1-16).xlsx', encoding='latin_1')
#%% Seleccionar datos (a mi estilo)
pel = pd.DataFrame()
for i in range((len(data.T)-5)//3):
pel = pel.append(data.iloc[:,6+i*3])
pel = pel.T
print(pel)
#%% Seleccionar datos (estilo Riemann)
csel = np.arange(6,243,3)
cnames = list(data.columns.values[csel])
datan = data[cnames]
#%% Promedios
movie_prom = datan.mean(axis=0)
user_prom = datan.mean(axis=1)
#%% Calificaciones a binarios (>= 3)
datan = datan.copy()
datan[datan<3] = 0
datan[datan>=3] = 1
#%% Calcular distancias de indices de similitud
#D1 = sc.pdist(datan,'hamming') # hamming == matching
D1 = sc.pdist(datan,'jaccard')
D1 = sc.squareform(D1)
#D2 = sc.pdist(data_b,'jaccard') # hamming == matching
#D2 = sc.squareform(D2)
Isim1 = 1-D1
#%% Seleccionar usuario y determinar sus parecidos
user = 1
Isim_user = Isim1[user]
Isim_user_sort = np.sort(Isim_user)
indx_user = np.argsort(Isim_user)
#%% Recomendación de películas p1.
USER = datan.loc[user]
USER_sim = datan.loc[indx_user[-2]]
indx_recomend1 = (USER_sim==1)&(USER==0)
recomend1 = list(USER.index[indx_recomend1])
#%% Recomendación peliculas p2.
USER = datan.loc[user]
USER_sim = np.mean(datan.loc[indx_user[-6:-1]],axis = 0)
USER_sim[USER_sim<=.5]=0
USER_sim[USER_sim>.5]=1
indx_recomend2 = (USER_sim==1)&(USER==0)
recomend2 = list(USER.index[indx_recomend2])
| 21.863014
| 80
| 0.697368
| 260
| 1,596
| 4.184615
| 0.419231
| 0.051471
| 0.012868
| 0.03125
| 0.095588
| 0.071691
| 0
| 0
| 0
| 0
| 0
| 0.051338
| 0.133459
| 1,596
| 72
| 81
| 22.166667
| 0.735358
| 0.308271
| 0
| 0.057143
| 0
| 0
| 0.046468
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.114286
| 0
| 0.114286
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0867a27f2b0a9d65b0fbacf348d77dfbc3427264
| 1,187
|
py
|
Python
|
itao/utils/qt_logger.py
|
MaxChangInnodisk/itao
|
b0745eb48bf67718ef00db566c4cc19896d903a7
|
[
"MIT"
] | null | null | null |
itao/utils/qt_logger.py
|
MaxChangInnodisk/itao
|
b0745eb48bf67718ef00db566c4cc19896d903a7
|
[
"MIT"
] | null | null | null |
itao/utils/qt_logger.py
|
MaxChangInnodisk/itao
|
b0745eb48bf67718ef00db566c4cc19896d903a7
|
[
"MIT"
] | null | null | null |
import logging
class CustomLogger:
def __init__(self):
pass
""" Create logger which name is 'dev' """
def create_logger(self, name='dev', log_file='itao.log', write_mode='w'):
logger = logging.getLogger(name)
# setup LEVEL
logger.setLevel(logging.DEBUG)
# setup formatter
formatter = logging.Formatter(
"%(asctime)s %(levelname)-.4s %(message)s",
"%m-%d %H:%M:%S")
# setup handler
stream_handler = logging.StreamHandler()
file_handler = logging.FileHandler(log_file, write_mode, 'utf-8')
# add formatter into handler
stream_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
# add handler into logger
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
logger.info('Create Logger: {}'.format(name))
return logger
""" get logger """
def get_logger(self, name='dev', log_file='itao.log', write_mode='w'):
logger = logging.getLogger(name)
return logger if logger.hasHandlers() else self.create_logger(name, log_file, write_mode)
| 34.911765
| 97
| 0.615838
| 134
| 1,187
| 5.298507
| 0.380597
| 0.067606
| 0.039437
| 0.047887
| 0.188732
| 0.188732
| 0.188732
| 0.188732
| 0.188732
| 0.188732
| 0
| 0.002299
| 0.26706
| 1,187
| 34
| 97
| 34.911765
| 0.813793
| 0.077506
| 0
| 0.095238
| 0
| 0
| 0.096993
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.047619
| 0.047619
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08687783aacc944c351fc37618c9c87ef69b3d6b
| 2,296
|
py
|
Python
|
scripts/ndvi_diff.py
|
hkfrei/pythonRemoteSensing
|
c8681d859313ee5ad01e5b9753f8c43462268624
|
[
"MIT"
] | 1
|
2019-12-18T21:54:22.000Z
|
2019-12-18T21:54:22.000Z
|
scripts/ndvi_diff.py
|
hkfrei/pythonRemoteSensing
|
c8681d859313ee5ad01e5b9753f8c43462268624
|
[
"MIT"
] | null | null | null |
scripts/ndvi_diff.py
|
hkfrei/pythonRemoteSensing
|
c8681d859313ee5ad01e5b9753f8c43462268624
|
[
"MIT"
] | 1
|
2020-07-01T16:44:21.000Z
|
2020-07-01T16:44:21.000Z
|
import numpy
import rasterio
import gdal
print('all modules imported')
# path to the folder with the ndvi rasters
base_path = "/Users/hk/Downloads/gaga/"
# shapefile with forest mask
forest_mask = base_path + "waldmaske_wgs84.shp"
# initialize the necessary rasters for the ndvi calculation.
ndvi_2017 = rasterio.open(base_path + "ndvi_17.tiff", driver="GTiff")
ndvi_2018 = rasterio.open(base_path + "ndvi_18.tiff", driver="GTiff")
# print out metadata about the ndvi's
print(ndvi_2018.count) # number of raster bands
print(ndvi_2017.count) # number of raster bands
print(ndvi_2018.height) # column count
print(ndvi_2018.dtypes) # data type of the raster e.g. ('float64',)
print(ndvi_2018.crs) # projection of the raster e.g. EPSG:32632
print("calculate ndvi difference")
# this is will give us an array of values, not an actual raster image.
ndvi_diff_array = numpy.subtract(ndvi_2018.read(1), ndvi_2017.read(1))
print("reclassify")
# reclassify
ndvi_diff_reclass_array = numpy.where(
ndvi_diff_array <= -0.05, 1, 9999.0
)
# create a new (empty) raster for the "original" diff
ndvi_diff_image = rasterio.open(base_path + "ndvi_diff.tif", "w", driver="Gtiff", width=ndvi_2018.width,
height=ndvi_2018.height, count=1, crs=ndvi_2018.crs, transform=ndvi_2018.transform,
dtype='float64')
# create a new (empty) raster for the reclassified diff
ndvi_diff_reclass_image = rasterio.open(base_path + "ndvi_reclass_diff.tif", "w", driver="Gtiff", width=ndvi_2018.width,
height=ndvi_2018.height, count=1, crs=ndvi_2018.crs,
transform=ndvi_2018.transform, dtype='float64')
# write the ndvi's to raster
ndvi_diff_image.write(ndvi_diff_array.astype("float64"), 1)
ndvi_diff_reclass_image.write(ndvi_diff_reclass_array.astype("float64"), 1)
ndvi_diff_image.close()
ndvi_diff_reclass_image.close()
# extract forest areas
# Make sure to add correct Nodata and Alpha values. They have to match the reclassified values.
warp_options = gdal.WarpOptions(cutlineDSName=forest_mask, cropToCutline=True, dstNodata=9999, dstAlpha=9999)
gdal.Warp(base_path + "change_masked.tif", base_path + "ndvi_reclass_diff.tif", options=warp_options)
print("finished")
| 41.745455
| 120
| 0.726916
| 339
| 2,296
| 4.731563
| 0.351032
| 0.069825
| 0.037406
| 0.049875
| 0.354738
| 0.308603
| 0.218204
| 0.143392
| 0.143392
| 0.143392
| 0
| 0.058547
| 0.166812
| 2,296
| 54
| 121
| 42.518519
| 0.779927
| 0.275261
| 0
| 0
| 0
| 0
| 0.153613
| 0.04068
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.28125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0869fc3b1af3273cc468fc0da2d162910f894bff
| 3,610
|
py
|
Python
|
studio/model.py
|
NunoEdgarGFlowHub/studio
|
42b221892a81535842ff25cbbcc434d6422a19e5
|
[
"Apache-2.0"
] | null | null | null |
studio/model.py
|
NunoEdgarGFlowHub/studio
|
42b221892a81535842ff25cbbcc434d6422a19e5
|
[
"Apache-2.0"
] | null | null | null |
studio/model.py
|
NunoEdgarGFlowHub/studio
|
42b221892a81535842ff25cbbcc434d6422a19e5
|
[
"Apache-2.0"
] | null | null | null |
"""Data providers."""
import os
try:
# try-except statement needed because
# pip module is not available in google app engine
import pip
except ImportError:
pip = None
import yaml
import six
from .artifact_store import get_artifact_store
from .http_provider import HTTPProvider
from .firebase_provider import FirebaseProvider
from .s3_provider import S3Provider
from .gs_provider import GSProvider
from . import logs
def get_config(config_file=None):
config_paths = []
if config_file:
if not os.path.exists(config_file):
raise ValueError('User config file {} not found'
.format(config_file))
config_paths.append(os.path.expanduser(config_file))
config_paths.append(os.path.expanduser('~/.studioml/config.yaml'))
config_paths.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"default_config.yaml"))
for path in config_paths:
if not os.path.exists(path):
continue
with(open(path)) as f:
config = yaml.load(f.read())
def replace_with_env(config):
for key, value in six.iteritems(config):
if isinstance(value, six.string_types):
config[key] = os.path.expandvars(value)
elif isinstance(value, dict):
replace_with_env(value)
replace_with_env(config)
return config
raise ValueError('None of the config paths {} exits!'
.format(config_paths))
def get_db_provider(config=None, blocking_auth=True):
if not config:
config = get_config()
verbose = parse_verbosity(config.get('verbose'))
logger = logs.getLogger("get_db_provider")
logger.setLevel(verbose)
logger.debug('Choosing db provider with config:')
logger.debug(config)
if 'storage' in config.keys():
artifact_store = get_artifact_store(
config['storage'],
blocking_auth=blocking_auth,
verbose=verbose)
else:
artifact_store = None
assert 'database' in config.keys()
db_config = config['database']
if db_config['type'].lower() == 'firebase':
return FirebaseProvider(
db_config,
blocking_auth,
verbose=verbose,
store=artifact_store)
elif db_config['type'].lower() == 'http':
return HTTPProvider(db_config,
verbose=verbose,
blocking_auth=blocking_auth)
elif db_config['type'].lower() == 's3':
return S3Provider(db_config,
verbose=verbose,
store=artifact_store,
blocking_auth=blocking_auth)
elif db_config['type'].lower() == 'gs':
return GSProvider(db_config,
verbose=verbose,
store=artifact_store,
blocking_auth=blocking_auth)
else:
raise ValueError('Unknown type of the database ' + db_config['type'])
def parse_verbosity(verbosity=None):
if verbosity is None:
return parse_verbosity('info')
if verbosity == 'True':
return parse_verbosity('info')
logger_levels = {
'debug': 10,
'info': 20,
'warn': 30,
'error': 40,
'crit': 50
}
if isinstance(verbosity, six.string_types) and \
verbosity in logger_levels.keys():
return logger_levels[verbosity]
else:
return int(verbosity)
| 28.88
| 77
| 0.591967
| 396
| 3,610
| 5.217172
| 0.277778
| 0.058083
| 0.029042
| 0.046467
| 0.188771
| 0.135528
| 0.135528
| 0.135528
| 0.093901
| 0.061955
| 0
| 0.005645
| 0.313019
| 3,610
| 124
| 78
| 29.112903
| 0.827419
| 0.027978
| 0
| 0.145833
| 0
| 0
| 0.082524
| 0.006568
| 0
| 0
| 0
| 0
| 0.010417
| 1
| 0.041667
| false
| 0
| 0.114583
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
086b6939a15a14e2ba2c7a9bf78818444b385782
| 7,310
|
py
|
Python
|
extendPlugins/minecraft.py
|
f88af65a/XyzB0ts
|
21a557288877b24f337f16002d8bb72b155f2551
|
[
"MIT"
] | 4
|
2021-10-17T11:54:07.000Z
|
2022-03-18T13:10:11.000Z
|
extendPlugins/minecraft.py
|
f88af65a/XyzB0ts
|
21a557288877b24f337f16002d8bb72b155f2551
|
[
"MIT"
] | null | null | null |
extendPlugins/minecraft.py
|
f88af65a/XyzB0ts
|
21a557288877b24f337f16002d8bb72b155f2551
|
[
"MIT"
] | 1
|
2021-10-16T09:51:25.000Z
|
2021-10-16T09:51:25.000Z
|
import asyncio
import json
import socket
import time
from botsdk.util.BotPlugin import BotPlugin
from botsdk.util.Error import printTraceBack
def getMcRequestData(ip, port):
data = (b"\x00\xff\xff\xff\xff\x0f"
+ bytes([len(ip.encode("utf8"))])
+ ip.encode("utf8")
+ int.to_bytes(port, 2, byteorder="big")
+ b"\x01\x01\x00")
return bytes([len(data) - 2]) + data
def getVarInt(b):
b = list(b)
b.reverse()
ans = 0
for i in b:
ans <<= 7
ans |= (i & 127)
return ans
class plugin(BotPlugin):
"/[mcbe/mcpe] ip [端口]"
def onLoad(self):
self.name = "minecraft"
self.addTarget("GroupMessage", "mc", self.getMc)
self.addTarget("GroupMessage", "mcbe", self.getBe)
self.addTarget("GROUP:1", "mc", self.getMc)
self.addTarget("GROUP:1", "mcbe", self.getBe)
self.addBotType("Mirai")
self.addBotType("Kaiheila")
self.canDetach = True
async def getMc(self, request):
"/mc ip [端口]不写默认25565"
data = request.getFirstTextSplit()
serverIp = None
serverPort = 25565
if len(data) < 2:
await request.sendMessage("缺少参数\n/mc ip [端口]不写默认25565")
return
if len(data) >= 2:
serverIp = data[1]
if len(data) >= 3:
if not (data[2].isnumeric()
and int(data[2]) >= 0
and int(data[2]) <= 65535):
request.sendMessage("端口有误")
return
serverPort = int(data[2])
# 初始化socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(0)
loop = asyncio.get_event_loop()
# 连接
try:
await loop.sock_connect(sock, (serverIp, serverPort))
except Exception:
await request.sendMessage("连接失败")
return
requestData = getMcRequestData(serverIp, serverPort)
# 发送
try:
await loop.sock_sendall(sock, requestData)
except Exception:
await request.sendMessage("请求发送失败")
return
# 接受
responseData = bytes()
breakFlag = True
dataSize = 10000000
stime = time.time()
while time.time() - stime <= 2 and breakFlag:
for i in range(0, len(responseData)):
if int(responseData[i]) & 128 == 0:
dataSize = getVarInt(responseData[0:i + 1]) + i + 1
break
if len(responseData) == dataSize:
breakFlag = False
break
rdata = await loop.sock_recv(sock, 10240)
if len(rdata) == 0:
await request.sendMessage("接受请求时连接断开")
return -1
responseData += rdata
await asyncio.sleep(0)
for i in range(0, len(responseData)):
if int(responseData[i]) & 128 == 0:
responseData = responseData[i + 2:]
break
for i in range(0, len(responseData)):
if int(responseData[i]) & 128 == 0:
responseData = responseData[i + 1:]
break
responseData = json.loads(responseData)
description = ""
if "text" in responseData["description"]:
description = responseData["description"]["text"]
if "extra" in responseData["description"]:
for i in responseData["description"]["extra"]:
if "text" in i:
description += i["text"]
try:
printData = "信息:{0}\n版本:{1}\n人数:{2}/{3}".format(
description, responseData["version"]["name"],
responseData["players"]["online"],
responseData["players"]["max"])
if "playerlist" in data:
printData += "\n在线玩家:\n"
for i in range(0, len(responseData["players"]["sample"])):
printData += (responseData
["players"]["sample"][i]["name"])
if i != len(responseData["players"]["sample"]) - 1:
printData += "\n"
await request.sendMessage(printData)
except Exception:
await request.sendMessage("解析过程中出错")
printTraceBack()
async def getBe(self, request):
"/mcbe ip [端口]不写默认19132"
data = request.getFirstTextSplit()
serverIp = None
serverPort = 19132
if len(data) < 2:
await request.sendMessage("缺少参数\n/mcbe ip [端口]不写默认19132")
return
if len(data) >= 2:
serverIp = data[1]
if len(data) == 3:
if not (data[2].isnumeric()
and int(data[2]) >= 0
and int(data[2]) <= 65535):
request.sendMessage("端口有误")
return
serverPort = int(data[2])
# 初始化socket
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.settimeout(0)
loop = asyncio.get_event_loop()
# 连接
try:
await loop.sock_connect(sock, (serverIp, serverPort))
except Exception:
await request.sendMessage("连接失败")
return
requestData = (b"\x01"
+ b"\x00" * 8
+ b"\x00\xff\xff\x00\xfe\xfe\xfe"
+ b"\xfe\xfd\xfd\xfd\xfd\x12\x34\x56\x78"
+ b"\x00" * 8)
# 发送
try:
await loop.sock_sendall(sock, requestData)
except Exception:
await request.sendMessage("请求发送失败")
return
# 接受
responseData = bytes()
breakFlag = True
stime = time.time()
while time.time() - stime <= 2 and breakFlag:
try:
responseData = await loop.sock_recv(sock, 10240)
except Exception:
responseData = b""
if len(responseData) == 0:
sock.close()
await request.sendMessage("接收过程中连接断开")
return
breakFlag = False
await asyncio.sleep(0)
responseData = responseData[35:].decode()
responseData = responseData.split(";")
printData = ""
try:
printData += f"服务器名:{responseData[1]}\n"
printData += f"人数:{responseData[4]}/{responseData[5]}\n"
printData += f"游戏模式:{responseData[8]}\n"
printData += (
f"版本:{responseData[0]} {responseData[2]} {responseData[3]}"
)
await request.sendMessage(printData)
except Exception:
await request.sendMessage("解析过程中出错")
printTraceBack()
def handle(*args, **kwargs):
return plugin(*args, **kwargs)
| 36.733668
| 79
| 0.477291
| 687
| 7,310
| 5.056769
| 0.232897
| 0.072539
| 0.079447
| 0.046632
| 0.48129
| 0.467473
| 0.423719
| 0.415947
| 0.415947
| 0.39407
| 0
| 0.037037
| 0.409029
| 7,310
| 198
| 80
| 36.919192
| 0.76713
| 0.008071
| 0
| 0.466292
| 0
| 0
| 0.097742
| 0.027808
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022472
| false
| 0
| 0.033708
| 0.005618
| 0.134831
| 0.078652
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
086ccdd01316fbb3c32c9928ed64ba2001cd4f5d
| 2,583
|
py
|
Python
|
main.py
|
brpaz/ulauncher-dockerhub
|
22e646bda40328373a4d90fa0aece2cac0187a42
|
[
"MIT"
] | 3
|
2020-09-04T07:56:47.000Z
|
2022-01-05T13:19:25.000Z
|
main.py
|
brpaz/ulauncher-dockerhub
|
22e646bda40328373a4d90fa0aece2cac0187a42
|
[
"MIT"
] | null | null | null |
main.py
|
brpaz/ulauncher-dockerhub
|
22e646bda40328373a4d90fa0aece2cac0187a42
|
[
"MIT"
] | null | null | null |
""" Main Module """
import logging
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.DoNothingAction import DoNothingAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
from ulauncher.api.shared.action.OpenUrlAction import OpenUrlAction
from dockerhub.client import Client
logger = logging.getLogger(__name__)
class DockerHubExtension(Extension):
""" Main Extension Class """
def __init__(self):
""" Initializes the extension """
super(DockerHubExtension, self).__init__()
self.dockerhub = Client()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
def search_repositories(self, query):
""" Shows the a list of DockerHub repositories """
if len(query) < 3:
return RenderResultListAction([
ExtensionResultItem(
icon='images/icon.png',
name='Keep typing to search on Docker Hub ...',
highlightable=False,
on_enter=DoNothingAction())
])
repos = self.dockerhub.search_repos(query)
items = []
if not repos:
return RenderResultListAction([
ExtensionResultItem(
icon="images/icon.png",
name="No results found matching your criteria",
highlightable=False,
on_enter=HideWindowAction())
])
for repo in repos[:8]:
items.append(
ExtensionResultItem(icon='images/icon.png',
name="%s 🟊 %s" %
(repo["name"], repo["stars"]),
description=repo["description"],
on_enter=OpenUrlAction(repo["url"])))
return RenderResultListAction(items)
class KeywordQueryEventListener(EventListener):
""" Listener that handles the user input """
# pylint: disable=unused-argument,no-self-use
def on_event(self, event, extension):
""" Handles the event """
query = event.get_argument() or ""
return extension.search_repositories(query)
if __name__ == '__main__':
DockerHubExtension().run()
| 34.905405
| 85
| 0.622145
| 233
| 2,583
| 6.781116
| 0.381974
| 0.065823
| 0.081013
| 0.083544
| 0.182278
| 0.111392
| 0.086076
| 0.086076
| 0
| 0
| 0
| 0.001085
| 0.286101
| 2,583
| 73
| 86
| 35.383562
| 0.855206
| 0.080527
| 0
| 0.163265
| 0
| 0
| 0.068862
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.204082
| 0
| 0.387755
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0871266d4d435da659b3d90a1e0729b53c28c39c
| 2,448
|
py
|
Python
|
game/gamesrc/objects/character.py
|
ranka47/battle-of-hogwarts
|
e7b2265ebe5661249dd28e472c49b74c1bbcdf23
|
[
"BSD-3-Clause"
] | 2
|
2019-02-24T00:20:47.000Z
|
2020-04-24T15:50:31.000Z
|
game/gamesrc/objects/character.py
|
ranka47/battle-of-hogwarts
|
e7b2265ebe5661249dd28e472c49b74c1bbcdf23
|
[
"BSD-3-Clause"
] | null | null | null |
game/gamesrc/objects/character.py
|
ranka47/battle-of-hogwarts
|
e7b2265ebe5661249dd28e472c49b74c1bbcdf23
|
[
"BSD-3-Clause"
] | 1
|
2019-01-05T15:51:37.000Z
|
2019-01-05T15:51:37.000Z
|
"""
Template for Characters
Copy this module up one level and name it as you like, then
use it as a template to create your own Character class.
To make new logins default to creating characters
of your new type, change settings.BASE_CHARACTER_TYPECLASS to point to
your new class, e.g.
settings.BASE_CHARACTER_TYPECLASS = "game.gamesrc.objects.mychar.MyChar"
Note that objects already created in the database will not notice
this change, you have to convert them manually e.g. with the
@typeclass command.
"""
from ev import Character as DefaultCharacter
from ev import Script
import random
class Character(DefaultCharacter):
"""
The Character is like any normal Object (see example/object.py for
a list of properties and methods), except it actually implements
some of its hook methods to do some work:
at_basetype_setup - always assigns the default_cmdset to this object type
(important!)sets locks so character cannot be picked up
and its commands only be called by itself, not anyone else.
(to change things, use at_object_creation() instead)
at_after_move - launches the "look" command
at_post_puppet(player) - when Player disconnects from the Character, we
store the current location, so the "unconnected" character
object does not need to stay on grid but can be given a
None-location while offline.
at_pre_puppet - just before Player re-connects, retrieves the character's
old location and puts it back on the grid with a "charname
has connected" message echoed to the room
"""
def at_object_creation(self):
self.db.score = 0
self.db.health_max = 100
self.db.health = self.db.health_max
self.db.will = 100
self.db.respawns = 0
houses = ["Gryffindor","Hufflepuff","Slytherin","Ravenclaw"]
self.db.house = houses[random.randint(0, len(houses) - 1)]
self.db.dementors = 0
self.db.spiders = 0
self.db.willow = 0
self.db.rodents = 0
self.db.boggart = 0
self.db.parallax = 0
self.db.dragon = 0
def respawn(self):
self.msg("You lost a life and respawn with all your default powers")
self.db.health = self.db.health_max
self.db.score -= 50
self.db.will = 100
self.db.respawns += 1
| 38.25
| 79
| 0.663807
| 353
| 2,448
| 4.546742
| 0.504249
| 0.071028
| 0.03053
| 0.028037
| 0.071028
| 0.071028
| 0.071028
| 0.041122
| 0.041122
| 0
| 0
| 0.012878
| 0.270425
| 2,448
| 64
| 80
| 38.25
| 0.885778
| 0.607843
| 0
| 0.16
| 0
| 0
| 0.10694
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.12
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0873053669c5a9be614101baec79eda2eb276cb9
| 3,170
|
py
|
Python
|
lesson5/lesson5_task4.py
|
nekdfl/GB-python-developer
|
ca3f34bac2a92a930779f89357941bfa9634b3d4
|
[
"MIT"
] | null | null | null |
lesson5/lesson5_task4.py
|
nekdfl/GB-python-developer
|
ca3f34bac2a92a930779f89357941bfa9634b3d4
|
[
"MIT"
] | null | null | null |
lesson5/lesson5_task4.py
|
nekdfl/GB-python-developer
|
ca3f34bac2a92a930779f89357941bfa9634b3d4
|
[
"MIT"
] | null | null | null |
"""
Создать (не программно) текстовый файл со следующим содержимым:
One — 1
Two — 2
Three — 3
Four — 4
Необходимо написать программу, открывающую файл на чтение и считывающую построчно данные.
При этом английские числительные должны заменяться на русские.
Новый блок строк должен записываться в новый текстовый файл.
"""
def readfile(filepath):
res = ""
with open(filepath, 'r') as f:
res = f.read()
return res
def make_dict(task2_data, delimiter=" - "):
# print(task2_data)
res_dict = {}
for lnum, line in enumerate(task2_data.split("\n")):
lnum += 1 # номер строки начинается с 0
if line != "":
try:
strelemcnt = len(line.split(delimiter))
if strelemcnt == 2:
# print(f"Обработка строки {lnum} ok")
word, nn = line.split(delimiter)
res_dict[nn] = word
else:
raise RuntimeError(f"Ошибка ввода данных. Неверное количество аргументов в строке {lnum}.")
except ValueError as e:
raise ValueError(f"Неверный формат числа в строке {lnum}. Ошибка {e}")
return res_dict
def translate(en_dict, ru_dict):
pass
resdict = {}
for key in en_dict.keys():
resdict[key] = ru_dict[key]
return resdict
def write_dict(filepath, dict, delimeter):
pass
lines = []
for key in dict.keys():
line = dict[key] + delimeter + key
lines.append(line)
with open(filepath, 'w+') as f:
f.writelines("\n".join(lines))
f.seek(0)
print(f"содержимое выходного файла {filepath}\n{f.read()}")
def full_variant():
infile_name = "task4_data_in.txt"
outfile_name = "task4_data_out.txt"
ru_dict = {'1': 'Один', '2': 'Два', '3': 'Три', '4': 'Четыре'}
try:
task2_data = readfile(infile_name)
except IOError as e:
print(f"Ошибка работы с файлом: {e}")
try:
file_data_dict = make_dict(task2_data)
except ValueError as e:
print(f"{e}")
exit(1)
except RuntimeError as e:
print(f"{e}")
exit(2)
try:
resdict = translate(file_data_dict, ru_dict)
# print(resdict)
except KeyError as e:
print(f"В словаре переводчика нет значения для {e}")
exit(3)
write_dict(outfile_name, resdict, " - ")
print("Программа завершена")
def short_variant():
infile_name = "task4_data_in.txt"
outfile_name = "task4_data_out.txt"
ru_dict = {'1': 'Один', '2': 'Два', '3': 'Три', '4': 'Четыре'}
en_dict = {'1': 'one', '2': 'Two', '3': 'Three', '4': 'Four'}
delimeter = " - "
res_lines = []
with open(infile_name, "r") as ifile:
for line in ifile:
for kword in en_dict.keys():
if line.count(kword):
res_lines.append(ru_dict[kword] + delimeter + kword)
with open(outfile_name, "w+") as ofile:
ofile.writelines("\n".join(res_lines))
ofile.seek(0)
print(f"содержимое выходного файла {outfile_name}\n{ofile.read()}")
if __name__ == "__main__":
# main()
short_variant()
| 26.864407
| 111
| 0.582334
| 412
| 3,170
| 4.351942
| 0.34466
| 0.023424
| 0.029002
| 0.020078
| 0.147239
| 0.147239
| 0.131623
| 0.092582
| 0.092582
| 0.092582
| 0
| 0.014641
| 0.288959
| 3,170
| 117
| 112
| 27.094017
| 0.779059
| 0.131861
| 0
| 0.207792
| 0
| 0
| 0.173421
| 0.018985
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077922
| false
| 0.025974
| 0
| 0
| 0.116883
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0876136eb46ef1d30f09dbd0eff572dd1e4a0144
| 28,812
|
py
|
Python
|
generator.py
|
jimstorch/DGGen
|
cdecbc4bfa491a634aac370de05b21bb6f6cf8e1
|
[
"Apache-2.0"
] | 19
|
2016-12-04T12:43:43.000Z
|
2022-01-25T01:00:24.000Z
|
generator.py
|
jimstorch/DGGen
|
cdecbc4bfa491a634aac370de05b21bb6f6cf8e1
|
[
"Apache-2.0"
] | 9
|
2017-01-04T16:33:00.000Z
|
2021-11-16T06:02:16.000Z
|
generator.py
|
jimstorch/DGGen
|
cdecbc4bfa491a634aac370de05b21bb6f6cf8e1
|
[
"Apache-2.0"
] | 7
|
2016-12-04T12:43:47.000Z
|
2022-02-04T13:10:58.000Z
|
#!/usr/bin/env python3
import argparse
import csv
import datetime
import json
import logging
import os
import sys
import warnings
from collections import defaultdict
from copy import copy
from dataclasses import dataclass
from itertools import islice, cycle, chain
from random import randint, shuffle, choice, sample
from textwrap import shorten, wrap
from typing import List, Any, Dict, Tuple
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen import canvas
script_name = os.path.basename(sys.argv[0])
description = """
Generate characters for the Delta Green pen-and-paper roleplaying game from Arc Dream Publishing.
"""
__version__ = "1.4"
logger = logging.getLogger(script_name)
TEXT_COLOR = (0, 0.1, 0.5)
DEFAULT_FONT = "Special Elite"
MONTHS = ("JAN", "FEB", "MAR", "APR", "MAY", "JUN", "JUL", "AUG", "SEP", "OCT", "NOV", "DEC")
SUGGESTED_BONUS_CHANCE = 75
def main():
options = get_options()
init_logger(options.verbosity)
logger.debug(options)
data = load_data(options)
pages_per_sheet = 2 if options.equip else 1
professions = [data.professions[options.type]] if options.type else data.professions.values()
p = Need2KnowPDF(options.output, professions, pages_per_sheet=pages_per_sheet)
for profession in professions:
label = generate_label(profession)
p.bookmark(label)
for sex in islice(
cycle(["female", "male"]), options.count or profession["number_to_generate"]
):
c = Need2KnowCharacter(
data=data,
sex=sex,
profession=profession,
label_override=options.label,
employer_override=options.employer,
)
if options.equip:
c.equip(profession.get("equipment-kit", None))
c.print_footnotes()
p.add_page(c.d)
if pages_per_sheet >= 2:
p.add_page_2(c.e)
p.save_pdf()
logger.info("Wrote %s", options.output)
class Need2KnowCharacter(object):
statpools = [
[13, 13, 12, 12, 11, 11],
[15, 14, 12, 11, 10, 10],
[17, 14, 13, 10, 10, 8],
]
DEFAULT_SKILLS = {
"accounting": 10,
"alertness": 20,
"athletics": 30,
"bureaucracy": 10,
"criminology": 10,
"disguise": 10,
"dodge": 30,
"drive": 20,
"firearms": 20,
"first aid": 10,
"heavy machinery": 10,
"history": 10,
"humint": 10,
"melee weapons": 30,
"navigate": 10,
"occult": 10,
"persuade": 20,
"psychotherapy": 10,
"ride": 10,
"search": 20,
"stealth": 10,
"survival": 10,
"swim": 20,
"unarmed combat": 40,
}
BONUS = [
"accounting",
"alertness",
"anthropology",
"archeology",
"art1",
"artillery",
"athletics",
"bureaucracy",
"computer science",
"craft1value",
"criminology",
"demolitions",
"disguise",
"dodge",
"drive",
"firearms",
"first aid",
"forensics",
"heavy machinery",
"heavy weapons",
"history",
"humint",
"law",
"medicine",
"melee weapons",
"militaryscience1value",
"navigate",
"occult",
"persuade",
"pharmacy",
"pilot1value",
"psychotherapy",
"ride",
"science1value",
"search",
"sigint",
"stealth",
"surgery",
"survival",
"swim",
"unarmed combat",
"language1",
]
def __init__(self, data, sex, profession, label_override=None, employer_override=None):
self.data = data
self.profession = profession
self.sex = sex
# Hold all dictionaries
self.d = {}
self.e = {}
self.footnotes = defaultdict(
iter(
["*", "†", "‡", "§", "¶", "**", "††", "‡‡", "§§", "¶¶", "***", "†††", "‡‡‡", "§§§"]
).__next__
)
self.generate_demographics(label_override, employer_override)
self.generate_stats()
self.generate_derived_attributes()
self.generate_skills()
def generate_demographics(self, label_override, employer_override):
if self.sex == "male":
self.d["male"] = "X"
self.d["name"] = (
choice(self.data.family_names).upper() + ", " + choice(self.data.male_given_names)
)
else:
self.d["female"] = "X"
self.d["name"] = (
choice(self.data.family_names).upper() + ", " + choice(self.data.female_given_names)
)
self.d["profession"] = label_override or self.profession["label"]
self.d["employer"] = employer_override or ", ".join(
e
for e in [self.profession.get("employer", ""), self.profession.get("division", "")]
if e
)
self.d["nationality"] = "(U.S.A.) " + choice(self.data.towns)
self.d["age"] = "%d (%s %d)" % (randint(24, 55), choice(MONTHS), (randint(1, 28)))
def generate_stats(self):
rolled = [[sum(sorted([randint(1, 6) for _ in range(4)])[1:]) for _ in range(6)]]
pool = choice(self.statpools + rolled)
shuffle(pool)
for score, stat in zip(
pool, ["strength", "constitution", "dexterity", "intelligence", "power", "charisma"]
):
self.d[stat] = score
self.d[f"{stat}_x5"] = score * 5
self.d[f"{stat}_distinguishing"] = self.distinguishing(stat, score)
def generate_derived_attributes(self):
self.d["hitpoints"] = int(round((self.d["strength"] + self.d["constitution"]) / 2.0))
self.d["willpower"] = self.d["power"]
self.d["sanity"] = self.d["power"] * 5
self.d["breaking point"] = self.d["power"] * 4
self.damage_bonus = ((self.d["strength"] - 1) >> 2) - 2
self.d["damage bonus"] = "DB=%d" % self.damage_bonus
def generate_skills(self):
# Default skills
self.d.update(self.DEFAULT_SKILLS)
# Professional skills
self.d.update(self.profession["skills"]["fixed"])
for skill, score in sample(
self.profession["skills"].get("possible", {}).items(),
self.profession["skills"].get("possible-count", 0),
):
self.d[skill] = score
for i in range(self.profession["bonds"]):
self.d[f"bond{i}"] = self.d["charisma"]
# Bonus skills
self.generate_bonus_skills(self.profession)
def generate_bonus_skills(self, profession):
bonus_skills = [
s
for s in profession["skills"].get("bonus", [])
if randint(1, 100) <= SUGGESTED_BONUS_CHANCE
] + sample(self.BONUS, len(self.BONUS))
bonuses_applied = 0
while bonuses_applied < 8:
skill = bonus_skills.pop(0)
boosted = self.d.get(skill, 0) + 20
if boosted <= 80:
self.d[skill] = boosted
bonuses_applied += 1
logger.debug("%s, boosted %s to %s", self, skill, boosted)
else:
logger.info(
"%s, Skipped boost - %s already at %s", self, skill, self.d.get(skill, 0)
)
def __str__(self):
return ", ".join(
[
self.d.get(i)
for i in ("name", "profession", "employer", "department")
if self.d.get(i)
]
)
def distinguishing(self, field, value):
return choice(self.data.distinguishing.get((field, value), [""]))
def equip(self, kit_name=None):
weapons = [self.data.weapons["unarmed"]]
if kit_name:
kit = self.data.kits[kit_name]
weapons += self.build_weapon_list(kit["weapons"])
gear = []
for item in kit["armour"] + kit["gear"]:
notes = (
(" ".join(self.store_footnote(n) for n in item["notes"]) + " ")
if "notes" in item
else ""
)
text = notes + (self.data.armour[item["type"]] if "type" in item else item["text"])
gear.append(text)
wrapped_gear = list(chain(*[wrap(item, 55, subsequent_indent=" ") for item in gear]))
if len(wrapped_gear) > 22:
logger.warning("Too much gear - truncated.")
for i, line in enumerate(wrapped_gear):
self.e[f"gear{i}"] = line
if len(weapons) > 7:
logger.warning("Too many weapons %s - truncated.", weapons)
for i, weapon in enumerate(weapons[:7]):
self.equip_weapon(i, weapon)
def build_weapon_list(self, weapons_to_add):
result = []
for weapon_to_add in weapons_to_add:
if "type" in weapon_to_add:
weapon = copy(self.data.weapons.get(weapon_to_add["type"], None))
if weapon:
if "notes" in weapon_to_add:
weapon["notes"] = weapon_to_add["notes"]
result += (
[weapon]
if "chance" not in weapon_to_add
or weapon_to_add["chance"] >= randint(1, 100)
else []
)
else:
logger.error("Unknown weapon type %s", weapon_to_add["type"])
elif "one-of" in weapon_to_add:
result += self.build_weapon_list([choice(weapon_to_add["one-of"])])
elif "both" in weapon_to_add:
result += self.build_weapon_list(w for w in weapon_to_add["both"])
else:
logger.error("Don't understand weapon %r", weapon_to_add)
return result
def equip_weapon(self, slot, weapon):
self.e[f"weapon{slot}"] = shorten(weapon["name"], 15, placeholder="…")
roll = int(self.d.get(weapon["skill"], 0) + (weapon["bonus"] if "bonus" in weapon else 0))
self.e[f"weapon{slot}_roll"] = f"{roll}%"
if "base-range" in weapon:
self.e[f"weapon{slot}_range"] = weapon["base-range"]
if "ap" in weapon:
self.e[f"weapon{slot}_ap"] = f"{weapon['ap']}"
if "lethality" in weapon:
lethality = weapon["lethality"]
lethality_note_indicator = (
self.store_footnote(lethality["special"]) if "special" in lethality else None
)
self.e[f"weapon{slot}_lethality"] = (
f"{lethality['rating']}%" if lethality["rating"] else ""
) + (f" {lethality_note_indicator}" if lethality_note_indicator else "")
if "ammo" in weapon:
self.e[f"weapon{slot}_ammo"] = f"{weapon['ammo']}"
if "kill-radius" in weapon:
self.e[f"weapon{slot}_kill_radius"] = f"{weapon['kill-radius']}"
if "notes" in weapon:
self.e[f"weapon{slot}_note"] = " ".join(self.store_footnote(n) for n in weapon["notes"])
if "damage" in weapon:
damage = weapon["damage"]
damage_note_indicator = (
self.store_footnote(damage["special"]) if "special" in damage else None
)
if "dice" in damage:
damage_modifier = (damage["modifier"] if "modifier" in damage else 0) + (
self.damage_bonus if "db-applies" in damage and damage["db-applies"] else 0
)
damage_roll = f"{damage['dice']}D{damage['die-type']}" + (
f"{damage_modifier:+d}" if damage_modifier else ""
)
else:
damage_roll = ""
self.e[f"weapon{slot}_damage"] = damage_roll + (
f" {damage_note_indicator}" if damage_note_indicator else ""
)
def print_footnotes(self):
notes = list(
chain(
*[
wrap(f"{pointer} {note}", 40, subsequent_indent=" ")
for (note, pointer) in list(self.footnotes.items())
]
)
)
if len(notes) > 12:
logger.warning("Too many footnotes - truncated.")
for i, note in enumerate(notes[:12]):
self.e[f"note{i}"] = note
def store_footnote(self, note):
"""Returns indicator character"""
return self.footnotes[note] if note else None
class Need2KnowPDF(object):
# Location of form fields in Points (1/72 inch) - 0,0 is bottom-left - and font size
field_xys = {
# Personal Data
"name": (75, 693, 11),
"profession": (343, 693, 11),
"employer": (75, 665, 11),
"nationality": (343, 665, 11),
"age": (185, 640, 11),
"birthday": (200, 640, 11),
"male": (98, 639, 11),
"female": (76, 639, 11),
# Statistical Data
"strength": (136, 604, 11),
"constitution": (136, 586, 11),
"dexterity": (136, 568, 11),
"intelligence": (136, 550, 11),
"power": (136, 532, 11),
"charisma": (136, 514, 11),
"strength_x5": (172, 604, 11),
"constitution_x5": (172, 586, 11),
"dexterity_x5": (172, 568, 11),
"intelligence_x5": (172, 550, 11),
"power_x5": (172, 532, 11),
"charisma_x5": (172, 514, 11),
"strength_distinguishing": (208, 604, 11),
"constitution_distinguishing": (208, 586, 11),
"dexterity_distinguishing": (208, 568, 11),
"intelligence_distinguishing": (208, 550, 11),
"power_distinguishing": (208, 532, 11),
"charisma_distinguishing": (208, 514, 11),
"damage bonus": (555, 200, 11),
"hitpoints": (195, 482, 11),
"willpower": (195, 464, 11),
"sanity": (195, 446, 11),
"breaking point": (195, 428, 11),
"bond0": (512, 604, 11),
"bond1": (512, 586, 11),
"bond2": (512, 568, 11),
"bond3": (512, 550, 11),
# Applicable Skill Sets
"accounting": (200, 361, 11),
"alertness": (200, 343, 11),
"anthropology": (200, 325, 11),
"archeology": (200, 307, 11),
"art1": (200, 289, 11),
"art2": (200, 281, 11),
"artillery": (200, 253, 11),
"athletics": (200, 235, 11),
"bureaucracy": (200, 217, 11),
"computer science": (200, 200, 11),
"craft1label": (90, 185, 9),
"craft1value": (200, 185, 9),
"craft2label": (90, 177, 9),
"craft2value": (200, 177, 9),
"craft3label": (90, 169, 9),
"craft3value": (200, 169, 9),
"craft4label": (90, 161, 9),
"craft4value": (200, 161, 9),
"criminology": (200, 145, 11),
"demolitions": (200, 127, 11),
"disguise": (200, 109, 11),
"dodge": (200, 91, 11),
"drive": (200, 73, 11),
"firearms": (200, 54, 11),
"first aid": (361, 361, 11),
"forensics": (361, 343, 11),
"heavy machinery": (361, 325, 11),
"heavy weapons": (361, 307, 11),
"history": (361, 289, 11),
"humint": (361, 270, 11),
"law": (361, 253, 11),
"medicine": (361, 235, 11),
"melee weapons": (361, 217, 11),
"militaryscience1value": (361, 199, 11),
"militaryscience1label": (327, 199, 11),
"militaryscience2value": (361, 186, 11),
"militaryscience2label": (327, 186, 11),
"navigate": (361, 163, 11),
"occult": (361, 145, 11),
"persuade": (361, 127, 11),
"pharmacy": (361, 109, 11),
"pilot1value": (361, 91, 9),
"pilot1label": (290, 91, 9),
"pilot2value": (361, 83, 9),
"pilot2label": (290, 83, 9),
"psychotherapy": (361, 54, 11),
"ride": (521, 361, 11),
"science1label": (442, 347, 9),
"science1value": (521, 347, 9),
"science2label": (442, 340, 9),
"science2value": (521, 340, 9),
"science3label": (442, 333, 9),
"science3value": (521, 333, 9),
"science4label": (442, 326, 9),
"science4value": (521, 326, 9),
"search": (521, 307, 11),
"sigint": (521, 289, 11),
"stealth": (521, 270, 11),
"surgery": (521, 253, 11),
"survival": (521, 235, 11),
"swim": (521, 217, 11),
"unarmed combat": (521, 200, 11),
"unnatural": (521, 181, 11),
"language1": (521, 145, 11),
"language2": (521, 127, 11),
"language3": (521, 109, 11),
"skill1": (521, 91, 11),
"skill2": (521, 73, 11),
"skill3": (521, 54, 11),
# 2nd page
"weapon0": (85, 480, 11),
"weapon0_roll": (175, 480, 11),
"weapon0_range": (215, 480, 11),
"weapon0_damage": (270, 480, 11),
"weapon0_ap": (345, 480, 11),
"weapon0_lethality": (410, 480, 11),
"weapon0_kill_radius": (462, 480, 11),
"weapon0_ammo": (525, 480, 11),
"weapon0_note": (560, 480, 11),
"weapon1": (85, 461, 11),
"weapon1_roll": (175, 461, 11),
"weapon1_range": (215, 461, 11),
"weapon1_damage": (270, 461, 11),
"weapon1_ap": (345, 461, 11),
"weapon1_lethality": (410, 461, 11),
"weapon1_kill_radius": (462, 461, 11),
"weapon1_ammo": (525, 461, 11),
"weapon1_note": (560, 461, 11),
"weapon2": (85, 442, 11),
"weapon2_roll": (175, 442, 11),
"weapon2_range": (215, 442, 11),
"weapon2_damage": (270, 442, 11),
"weapon2_ap": (345, 442, 11),
"weapon2_lethality": (410, 442, 11),
"weapon2_kill_radius": (462, 442, 11),
"weapon2_ammo": (525, 442, 11),
"weapon2_note": (560, 442, 11),
"weapon3": (85, 423, 11),
"weapon3_roll": (175, 423, 11),
"weapon3_range": (215, 423, 11),
"weapon3_damage": (270, 423, 11),
"weapon3_ap": (345, 423, 11),
"weapon3_lethality": (410, 423, 11),
"weapon3_kill_radius": (462, 423, 11),
"weapon3_ammo": (525, 423, 11),
"weapon3_note": (560, 423, 11),
"weapon4": (85, 404, 11),
"weapon4_roll": (175, 404, 11),
"weapon4_range": (215, 404, 11),
"weapon4_damage": (270, 404, 11),
"weapon4_ap": (345, 404, 11),
"weapon4_lethality": (410, 404, 11),
"weapon4_kill_radius": (462, 404, 11),
"weapon4_ammo": (525, 404, 11),
"weapon4_note": (560, 404, 11),
"weapon5": (85, 385, 11),
"weapon5_roll": (175, 385, 11),
"weapon5_range": (215, 385, 11),
"weapon5_damage": (270, 385, 11),
"weapon5_ap": (345, 385, 11),
"weapon5_lethality": (410, 385, 11),
"weapon5_kill_radius": (462, 385, 11),
"weapon5_ammo": (525, 385, 11),
"weapon5_note": (560, 385, 11),
"weapon6": (85, 366, 11),
"weapon6_roll": (175, 366, 11),
"weapon6_range": (215, 366, 11),
"weapon6_damage": (270, 366, 11),
"weapon6_ap": (345, 366, 11),
"weapon6_lethality": (410, 366, 11),
"weapon6_kill_radius": (465, 366, 11),
"weapon6_ammo": (525, 366, 11),
"weapon6_note": (560, 366, 11),
"gear0": (75, 628, 8),
"gear1": (75, 618, 8),
"gear2": (75, 608, 8),
"gear3": (75, 598, 8),
"gear4": (75, 588, 8),
"gear5": (75, 578, 8),
"gear6": (75, 568, 8),
"gear7": (75, 558, 8),
"gear8": (75, 548, 8),
"gear9": (75, 538, 8),
"gear10": (75, 528, 8),
"gear11": (323, 628, 8),
"gear12": (323, 618, 8),
"gear13": (323, 608, 8),
"gear14": (323, 598, 8),
"gear15": (323, 588, 8),
"gear16": (323, 578, 8),
"gear17": (323, 568, 8),
"gear18": (323, 558, 8),
"gear19": (323, 548, 8),
"gear20": (323, 538, 8),
"gear21": (323, 528, 8),
"note0": (50, 40, 8),
"note1": (50, 30, 8),
"note2": (50, 20, 8),
"note3": (50, 10, 8),
"note4": (240, 40, 8),
"note5": (240, 30, 8),
"note6": (240, 20, 8),
"note7": (240, 10, 8),
"note8": (410, 40, 8),
"note9": (410, 30, 8),
"note10": (410, 20, 8),
"note11": (410, 10, 8),
}
# Fields that also get a multiplier
x5_stats = ["strength", "constitution", "dexterity", "intelligence", "power", "charisma"]
def __init__(self, filename, professions, pages_per_sheet=1):
self.filename = filename
self.pages_per_sheet = pages_per_sheet
self.c = canvas.Canvas(self.filename)
# Set US Letter in points
self.c.setPageSize((612, 792))
self.c.setAuthor("https://github.com/jimstorch/DGGen")
self.c.setTitle("Delta Green Agent Roster")
self.c.setSubject("Pre-generated characters for the Delta Green RPG")
# Register Custom Fonts
pdfmetrics.registerFont(TTFont("Special Elite", "data/SpecialElite.ttf"))
pdfmetrics.registerFont(TTFont("OCRA", "data/OCRA.ttf"))
if len(professions) > 1:
self.generate_toc(professions, pages_per_sheet)
def generate_toc(self, professions, pages_per_sheet):
"""Build a clickable Table of Contents on page 1"""
self.bookmark("Table of Contents")
self.c.setFillColorRGB(0, 0, 0)
self.c.setFont("OCRA", 10)
now = datetime.datetime.utcnow().isoformat() + "Z"
self.c.drawString(150, 712, "DGGEN DTG " + now)
self.c.drawString(150, 700, "CLASSIFIED/DG/NTK//")
self.c.drawString(150, 688, "SUBJ ROSTER/ACTIVE/NOCELL/CONUS//")
top = 650
pagenum = 2
for count, profession in enumerate(professions):
label = generate_label(profession)
chapter = "{:.<40}".format(shorten(label, 37, placeholder="")) + "{:.>4}".format(
pagenum
)
self.c.drawString(150, top - self.line_drop(count), chapter)
self.c.linkAbsolute(
label,
label,
(145, (top - 6) - self.line_drop(count), 470, (top + 18) - self.line_drop(count)),
)
pagenum += profession["number_to_generate"] * pages_per_sheet
if pages_per_sheet == 1:
chapter = "{:.<40}".format("Blank Character Sheet Second Page") + "{:.>4}".format(
pagenum + profession["number_to_generate"]
)
self.c.drawString(150, top - self.line_drop(pagenum), chapter)
self.c.linkAbsolute(
"Back Page",
"Back Page",
(
145,
(top - 6) - self.line_drop(pagenum),
470,
(top + 18) - self.line_drop(pagenum),
),
)
self.c.showPage()
@staticmethod
def line_drop(count, linesize=22):
return count * linesize
def bookmark(self, text):
self.c.bookmarkPage(text)
self.c.addOutlineEntry(text, text)
def draw_string(self, x, y, size, text):
self.c.setFont(DEFAULT_FONT, size)
self.c.setFillColorRGB(*TEXT_COLOR)
self.c.drawString(x, y, str(text))
def fill_field(self, field, value):
try:
x, y, s = self.field_xys[field]
self.draw_string(x, y, s, str(value))
except KeyError:
logger.error("Unknown field %s", field)
def add_page(self, d):
# Add background. ReportLab will cache it for repeat
self.c.drawImage("data/Character Sheet NO BACKGROUND FRONT.jpg", 0, 0, 612, 792)
for key in d:
self.fill_field(key, d[key])
# Tell ReportLab we're done with current page
self.c.showPage()
def add_page_2(self, e):
# Add background. ReportLab will cache it for repeat
self.c.drawImage("data/Character Sheet NO BACKGROUND BACK.jpg", 0, 0, 612, 792)
for key in e:
self.fill_field(key, e[key])
# Tell ReportLab we're done with current page
self.c.showPage()
def save_pdf(self):
if self.pages_per_sheet == 1:
self.bookmark("Back Page")
self.c.drawImage("data/Character Sheet NO BACKGROUND BACK.jpg", 0, 0, 612, 792)
self.c.showPage()
self.c.save()
def generate_label(profession):
return ", ".join(
e
for e in [
profession.get("label", ""),
profession.get("employer", ""),
profession.get("division", ""),
]
if e
)
def get_options():
"""Get options and arguments from argv string."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-v",
"--verbosity",
action="count",
default=0,
help="specify up to three times to increase verbosity, "
"i.e. -v to see warnings, -vv for information messages, or -vvv for debug messages.",
)
parser.add_argument("-V", "--version", action="version", version=__version__)
parser.add_argument(
"-o",
"--output",
action="store",
default=f"DeltaGreenPregen-{datetime.datetime.now() :%Y-%m-%d-%H-%M}.pdf",
help="Output PDF file. Defaults to %(default)s.",
)
parser.add_argument(
"-t", "--type", action="store", help=f"Select single profession to generate."
)
parser.add_argument("-l", "--label", action="store", help="Override profession label.")
parser.add_argument(
"-c",
"--count",
type=int,
action="store",
help="Generate this many characters of each profession.",
)
parser.add_argument(
"-e", "--employer", action="store", help="Set employer for all generated characters."
)
parser.add_argument(
"-u",
"--unequipped",
action="store_false",
dest="equip",
help="Don't generate equipment.",
default=True,
)
data = parser.add_argument_group(title="Data", description="Data file locations")
data.add_argument(
"--professions",
action="store",
default="data/professions.json",
help="Data file for professions - defaults to %(default)s",
)
return parser.parse_args()
@dataclass
class Data:
male_given_names: List[str]
female_given_names: List[str]
family_names: List[str]
towns: List[str]
professions: Dict[str, Any]
kits: Dict[str, Any]
weapons: Dict[str, Any]
armour: Dict[str, Any]
distinguishing: Dict[Tuple[str, int], List[str]]
def load_data(options):
with open("data/boys1986.txt") as f:
male_given_names = f.read().splitlines()
with open("data/girls1986.txt") as f:
female_given_names = f.read().splitlines()
with open("data/surnames.txt") as f:
family_names = f.read().splitlines()
with open("data/towns.txt") as f:
towns = f.read().splitlines()
with open(options.professions) as f:
professions = json.load(f)
with open("data/equipment.json") as f:
equipment = json.load(f)
kits = equipment["kits"]
weapons = equipment["weapons"]
armour = equipment["armour"]
distinguishing = {}
with open("data/distinguishing-features.csv") as f:
for row in csv.DictReader(f):
for value in range(int(row["from"]), int(row["to"]) + 1):
distinguishing.setdefault((row["statistic"], value), []).append(
row["distinguishing"]
)
data = Data(
male_given_names=male_given_names,
female_given_names=female_given_names,
family_names=family_names,
towns=towns,
professions=professions,
kits=kits,
weapons=weapons,
armour=armour,
distinguishing=distinguishing,
)
return data
def init_logger(verbosity, stream=sys.stdout):
"""Initialize logger and warnings according to verbosity argument.
Verbosity levels of 0-3 supported."""
is_not_debug = verbosity <= 2
level = (
[logging.ERROR, logging.WARNING, logging.INFO][verbosity] if is_not_debug else logging.DEBUG
)
log_format = (
"%(message)s"
if is_not_debug
else "%(asctime)s %(levelname)-8s %(name)s %(module)s.py:%(funcName)s():%(lineno)d %(message)s"
)
logging.basicConfig(level=level, format=log_format, stream=stream)
if is_not_debug:
warnings.filterwarnings("ignore")
if __name__ == "__main__":
sys.exit(main())
| 34.797101
| 103
| 0.530508
| 3,317
| 28,812
| 4.509798
| 0.208321
| 0.011699
| 0.009559
| 0.00722
| 0.128217
| 0.078548
| 0.061167
| 0.051006
| 0.036099
| 0.031018
| 0
| 0.097958
| 0.313342
| 28,812
| 827
| 104
| 34.839178
| 0.656945
| 0.025198
| 0
| 0.050754
| 0
| 0.002743
| 0.20271
| 0.021715
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.024691
| 0.005487
| 0.096022
| 0.002743
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
087670710e46b9499b04f22d8a01fa0767bf4b47
| 9,093
|
py
|
Python
|
tests/test_remote.py
|
bcyran/philipstv
|
6037724d5fab0b72265c2de2c0441a64f6e00c00
|
[
"MIT"
] | null | null | null |
tests/test_remote.py
|
bcyran/philipstv
|
6037724d5fab0b72265c2de2c0441a64f6e00c00
|
[
"MIT"
] | null | null | null |
tests/test_remote.py
|
bcyran/philipstv
|
6037724d5fab0b72265c2de2c0441a64f6e00c00
|
[
"MIT"
] | null | null | null |
from typing import Union
from unittest.mock import Mock, create_autospec
import pytest
from pytest import MonkeyPatch
from philipstv import PhilipsTVAPI, PhilipsTVPairer, PhilipsTVRemote, PhilipsTVRemoteError
from philipstv.model import (
AllChannels,
AmbilightColor,
AmbilightColors,
AmbilightLayer,
AmbilightPower,
AmbilightPowerValue,
AmbilightTopology,
Application,
ApplicationComponent,
ApplicationIntent,
Applications,
Channel,
ChannelID,
ChannelList,
ChannelShort,
CurrentChannel,
CurrentVolume,
DeviceInfo,
InputKey,
InputKeyValue,
PowerState,
PowerStateValue,
SetChannel,
Volume,
)
CHANNELS = AllChannels(
version=1,
id="all",
list_type="MixedSources",
medium="mixed",
operator="OPER",
install_country="Poland",
channel=[
Channel(
ccid=35,
preset="1",
name="Polsat HD",
onid=1537,
tsid=24,
sid=2403,
service_type="audio_video",
type="DVB_C",
logo_version=33,
),
Channel(
ccid=40,
preset="3",
name="TVN HD",
onid=666,
tsid=24,
sid=2403,
service_type="audio_video",
type="DVB_C",
logo_version=33,
),
],
)
APPLICATION_SPOTIFY = Application(
intent=ApplicationIntent(
component=ApplicationComponent(
package_name="com.spotify.tv.android",
class_name="com.spotify.tv.android.SpotifyTVActivity",
),
action="android.intent.action.MAIN",
),
label="Spotify",
order=0,
id="com.spotify.tv.android.SpotifyTVActivity-com.spotify.tv.android",
type="app",
)
APPLICATION_NETFLIX = Application(
intent=ApplicationIntent(
component=ApplicationComponent(
package_name="com.netflix.ninja",
class_name="com.netflix.ninja.MainActivity",
),
action="android.intent.action.MAIN",
),
label="Netflix",
order=0,
id="com.netflix.ninja.MainActivity-com.netflix.ninja",
type="app",
)
APPLICATIONS = Applications(
version=0,
applications=[APPLICATION_SPOTIFY, APPLICATION_NETFLIX],
)
@pytest.fixture
def api_mock() -> Mock:
return create_autospec(PhilipsTVAPI, spec_set=True, instance=True) # type: ignore
def test_host(api_mock: Mock) -> None:
expected_host = "192.168.0.66"
api_mock.host = expected_host
result = PhilipsTVRemote(api_mock).host
assert result == expected_host
def test_auth(api_mock: PhilipsTVAPI) -> None:
expected_credentials = ("<key>", "<secret>")
remote = PhilipsTVRemote(api_mock)
remote.auth = expected_credentials
assert remote.auth == expected_credentials
assert api_mock.auth == expected_credentials
def test_pair(api_mock: Mock, monkeypatch: MonkeyPatch) -> None:
given_id = "<id>"
pairer_mock = create_autospec(PhilipsTVPairer)
pairer_mock.return_value = pairer_mock
monkeypatch.setattr("philipstv.remote.PhilipsTVPairer", pairer_mock)
def fake_callback() -> str:
return "str"
PhilipsTVRemote(api_mock).pair(fake_callback, given_id)
pairer_mock.pair.assert_called_once_with(fake_callback)
device_info = pairer_mock.call_args.args[1]
assert isinstance(device_info, DeviceInfo)
assert device_info.id == given_id
def test_pair_no_id(api_mock: Mock, monkeypatch: MonkeyPatch) -> None:
pairer_mock = create_autospec(PhilipsTVPairer)
pairer_mock.return_value = pairer_mock
monkeypatch.setattr("philipstv.remote.PhilipsTVPairer", pairer_mock)
PhilipsTVRemote(api_mock).pair(lambda: "str")
device_info = pairer_mock.call_args.args[1]
assert isinstance(device_info, DeviceInfo)
assert device_info.id.isalnum()
assert len(device_info.id) == 16
def test_get_power(api_mock: Mock) -> None:
api_mock.get_powerstate.return_value = PowerState(powerstate=PowerStateValue.STANDBY)
result = PhilipsTVRemote(api_mock).get_power()
assert result is False
def test_set_power(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).set_power(True)
api_mock.set_powerstate.assert_called_once_with(PowerState(powerstate=PowerStateValue.ON))
def test_get_volume(api_mock: Mock) -> None:
api_mock.get_volume.return_value = CurrentVolume(muted=False, current=15, min=0, max=60)
result = PhilipsTVRemote(api_mock).get_volume()
assert result == 15
def test_set_volume(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).set_volume(20)
api_mock.set_volume.assert_called_once_with(Volume(current=20, muted=False))
def test_get_current_channel(api_mock: Mock) -> None:
api_mock.get_current_channel.return_value = CurrentChannel(
channel=ChannelShort(ccid=5, preset="10", name="TVN HD"),
channel_list=ChannelList(id="allcab", version="1"),
)
result = PhilipsTVRemote(api_mock).get_current_channel()
assert result == "TVN HD"
@pytest.mark.parametrize(
"input, expected",
[
(1, SetChannel(channel=ChannelID(ccid=35))),
("Polsat HD", SetChannel(channel=ChannelID(ccid=35))),
(3, SetChannel(channel=ChannelID(ccid=40))),
("TVN HD", SetChannel(channel=ChannelID(ccid=40))),
],
)
def test_set_channel(api_mock: Mock, input: Union[int, str], expected: SetChannel) -> None:
api_mock.get_all_channels.return_value = CHANNELS
remote = PhilipsTVRemote(api_mock)
remote.set_channel(input)
api_mock.set_channel.assert_called_once_with(expected)
remote.set_channel(input)
api_mock.get_all_channels.assert_called_once()
def test_set_channel_error(api_mock: Mock) -> None:
api_mock.get_current_channel.return_value = CHANNELS
with pytest.raises(PhilipsTVRemoteError):
PhilipsTVRemote(api_mock).set_channel("random channel")
def test_get_all_channels(api_mock: Mock) -> None:
api_mock.get_all_channels.return_value = CHANNELS
result = PhilipsTVRemote(api_mock).get_all_channels()
assert result == {1: "Polsat HD", 3: "TVN HD"}
def test_input_key(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).input_key(InputKeyValue.STANDBY)
api_mock.input_key.assert_called_once_with(InputKey(key=InputKeyValue.STANDBY))
def test_get_ambilight_power(api_mock: Mock) -> None:
api_mock.get_ambilight_power.return_value = AmbilightPower(power=AmbilightPowerValue.OFF)
result = PhilipsTVRemote(api_mock).get_ambilight_power()
assert result is False
def test_set_ambilight_power(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).set_ambilight_power(True)
api_mock.set_ambilight_power.assert_called_once_with(
AmbilightPower(power=AmbilightPowerValue.ON)
)
def test_set_ambilight_color(api_mock: Mock) -> None:
PhilipsTVRemote(api_mock).set_ambilight_color(AmbilightColor(r=0, g=69, b=255))
api_mock.set_ambilight_cached.assert_called_once_with(AmbilightColor(r=0, g=69, b=255))
def test_set_ambilight_color_sides(api_mock: Mock) -> None:
left_color = AmbilightColor(r=255, g=0, b=0)
top_color = AmbilightColor(r=0, g=255, b=0)
right_color = AmbilightColor(r=0, g=0, b=255)
bottom_color = AmbilightColor(r=125, g=0, b=125)
topology = AmbilightTopology(layers=1, left=2, top=3, right=2, bottom=3)
api_mock.get_ambilight_topology.return_value = topology
PhilipsTVRemote(api_mock).set_ambilight_color(
left=left_color, top=top_color, right=right_color, bottom=bottom_color
)
api_mock.set_ambilight_cached.assert_called_once_with(
AmbilightColors(
__root__={
"layer1": AmbilightLayer(
left={str(point): left_color for point in range(topology.left)},
top={str(point): top_color for point in range(topology.top)},
right={str(point): right_color for point in range(topology.right)},
bottom={str(point): bottom_color for point in range(topology.bottom)},
)
}
)
)
def test_get_applications(api_mock: Mock) -> None:
api_mock.get_applications.return_value = APPLICATIONS
result = PhilipsTVRemote(api_mock).get_applications()
assert result == ["Spotify", "Netflix"]
@pytest.mark.parametrize(
"app, expected",
[
("Spotify", APPLICATION_SPOTIFY),
("Netflix", APPLICATION_NETFLIX),
],
)
def test_launch_application(api_mock: Mock, app: str, expected: ApplicationIntent) -> None:
api_mock.get_applications.return_value = APPLICATIONS
remote = PhilipsTVRemote(api_mock)
remote.launch_application(app)
api_mock.launch_application.assert_called_once_with(expected)
remote.launch_application(app)
api_mock.get_applications.assert_called_once()
def test_launch_application_error(api_mock: Mock) -> None:
api_mock.get_applications.return_value = APPLICATIONS
with pytest.raises(PhilipsTVRemoteError):
PhilipsTVRemote(api_mock).launch_application("whatever")
| 29.144231
| 94
| 0.698119
| 1,083
| 9,093
| 5.598338
| 0.174515
| 0.073891
| 0.036286
| 0.03711
| 0.505195
| 0.369949
| 0.280719
| 0.241959
| 0.179119
| 0.12535
| 0
| 0.015448
| 0.195535
| 9,093
| 311
| 95
| 29.237942
| 0.813397
| 0.00132
| 0
| 0.25431
| 0
| 0
| 0.066637
| 0.035136
| 0
| 0
| 0
| 0
| 0.107759
| 1
| 0.094828
| false
| 0
| 0.025862
| 0.008621
| 0.12931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0879ba08e89fa5f242f50ddb01acf847e7896d29
| 9,612
|
py
|
Python
|
a2t/src/test_runner.py
|
syeda-khurrath/fabric8-analytics-common
|
421f7e27869c5695ed73b51e6422e097aba00108
|
[
"Apache-2.0"
] | null | null | null |
a2t/src/test_runner.py
|
syeda-khurrath/fabric8-analytics-common
|
421f7e27869c5695ed73b51e6422e097aba00108
|
[
"Apache-2.0"
] | 4
|
2019-05-20T08:27:47.000Z
|
2019-05-20T08:29:57.000Z
|
a2t/src/test_runner.py
|
codeready-analytics/fabric8-analytics-common
|
a763c5534d601f2f40a0f02c02914c49ea23669d
|
[
"Apache-2.0"
] | 1
|
2020-10-05T21:12:44.000Z
|
2020-10-05T21:12:44.000Z
|
"""Implementation of benchmarks.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from random import randint
from fastlog import log
from time import time
from queue import Queue
from threading import Thread
from report_generator import generate_csv_report
from component_generator import ComponentGenerator
from setup import parse_tags
# directory containing test results
RESULT_DIRECTORY = "test_results"
def check_number_of_results(queue_size, component_analysis_count, stack_analysis_count):
"""Check if we really got the same number of results as expected.
When the server respond by any HTTP error code (4xx, 5xx), the results
are NOT stored in the queue. This means that number of results stored
in the queue might be less than number of threads set up by user via
CLI parameters in certain situations. This function check this situation.
"""
log.info("queue size: {size}".format(size=queue_size))
expected = component_analysis_count + 2 * stack_analysis_count
if queue_size != expected:
log.warning("Warning: {expected} results expected, but only {got} is presented".format(
expected=expected, got=queue_size))
log.warning("This means that {n} analysis ends with error or exception".format(
n=expected - queue_size))
def prepare_component_generators(python_payload, maven_payload, npm_payload):
"""Prepare all required component generators for selected payload types."""
component_generator = ComponentGenerator()
g_python = component_generator.generator_for_ecosystem("pypi")
g_maven = component_generator.generator_for_ecosystem("maven")
g_npm = component_generator.generator_for_ecosystem("npm")
generators = []
if python_payload:
generators.append(g_python)
if maven_payload:
generators.append(g_maven)
if npm_payload:
generators.append(g_npm)
return generators
def initialize_generators(generators):
"""Initialize the generators randomly so we don't start from the 1st item."""
for i in range(randint(10, 100)):
for g in generators:
next(g)
def component_analysis_benchmark(queue, threads, component_analysis, thread_count,
python_payload, maven_payload, npm_payload):
"""Component analysis benchmark."""
generators = prepare_component_generators(python_payload, maven_payload, npm_payload)
initialize_generators(generators)
for t in range(thread_count):
g = generators[randint(0, len(generators) - 1)]
ecosystem, component, version = next(g)
with log.indent():
log.info("Component analysis for E/P/V {} {} {}".format(ecosystem, component, version))
t = Thread(target=component_analysis.start,
args=(t, ecosystem, component, version, queue))
t.start()
threads.append(t)
# skip some items
for i in range(randint(5, 25)):
next(g)
def stack_analysis_benchmark(queue, threads, stack_analysis, thread_count,
python_payload, maven_payload, npm_payload):
"""Stack analysis benchmark."""
# TODO: read automagically from the filelist
manifests = (
("maven", "clojure_1_6_0.xml"),
("maven", "clojure_1_7_0.xml"),
("maven", "clojure_1_8_0.xml"),
("maven", "clojure_junit.xml"),
("pypi", "click_6_star.txt"),
("pypi", "array_split.txt"),
("pypi", "fastlog_urllib_requests.txt"),
("pypi", "requests_latest.txt"),
("pypi", "numpy_latest.txt"),
("pypi", "flask_latest.txt"),
("pypi", "scipy_latest.txt"),
("pypi", "pygame_latest.txt"),
("pypi", "pyglet_latest.txt"),
("pypi", "dash_latest.txt"),
("pypi", "pudb_latest.txt"),
("pypi", "pytest_latest.txt"),
("pypi", "numpy_1_11_0.txt"),
("pypi", "numpy_1_12_0.txt"),
("pypi", "numpy_1_16_2.txt"),
("pypi", "numpy_1_16_3.txt"),
("pypi", "numpy_scipy.txt"),
("pypi", "pytest_2_0_0.txt"),
("pypi", "pytest_2_0_1.txt"),
("pypi", "pytest_3_2_2.txt"),
("pypi", "requests_2_20_0.txt"),
("pypi", "requests_2_20_1.txt"),
("pypi", "requests_2_21_0.txt"),
("pypi", "scipy_1_1_0.txt"),
("pypi", "scipy_1_2_0.txt"),
("pypi", "scipy_1_2_1.txt"),
("npm", "array.json"),
("npm", "dependency_array.json"),
("npm", "dependency_emitter_component.json"),
("npm", "dependency_jquery.json"),
("npm", "dependency_jquery_react.json"),
("npm", "dependency_lodash.json"),
("npm", "dependency_lodash_react_jquery.json"),
("npm", "dependency_react.json"),
("npm", "dependency_to_function.json"),
("npm", "dependency_to_function_vue_array.json"),
("npm", "dependency_underscore.json"),
("npm", "dependency_underscore_react_jquery.json"),
("npm", "dependency_vue.json"),
("npm", "dependency_vue_to_function.json"),
("npm", "empty.json"),
("npm", "jquery.json"),
("npm", "lodash.json"),
("npm", "mocha.json"),
("npm", "no_requirements.json"),
("npm", "underscore.json"),
("npm", "wisp.json"),
)
for t in range(thread_count):
manifest_idx = randint(0, len(manifests) - 1)
manifest = manifests[manifest_idx]
with log.indent():
log.info("Stack analysis")
ecosystem = manifest[0]
manifest_file = manifest[1]
t = Thread(target=stack_analysis.start,
args=(t, ecosystem, manifest_file, queue))
t.start()
threads.append(t)
def wait_for_all_threads(threads):
"""Wait for all threads to finish."""
log.info("Waiting for all threads to finish")
for t in threads:
t.join()
log.success("Done")
def run_test(cfg, test, i, component_analysis, stack_analysis):
"""Run one selected test."""
test_name = test["Name"]
log.info("Starting test #{n} with name '{desc}'".format(n=i, desc=test_name))
with log.indent():
start = time()
threads = []
queue = Queue()
with log.indent():
component_analysis_count = int(test["Component analysis"])
stack_analysis_count = int(test["Stack analysis"])
python_payload = test["Python payload"] in ("Yes", "yes")
maven_payload = test["Maven payload"] in ("Yes", "yes")
npm_payload = test["NPM payload"] in ("Yes", "yes")
component_analysis_benchmark(queue, threads, component_analysis,
component_analysis_count,
python_payload, maven_payload, npm_payload)
stack_analysis_benchmark(queue, threads, stack_analysis,
stack_analysis_count,
python_payload, maven_payload, npm_payload)
wait_for_all_threads(threads)
queue_size = queue.qsize()
check_number_of_results(queue_size, component_analysis_count, stack_analysis_count)
end = time()
# TODO: use better approach to join paths
filename = RESULT_DIRECTORY + "/" + test_name.replace(" ", "_") + ".csv"
log.info("Generating test report into file '{filename}'".format(filename=filename))
generate_csv_report(queue, test, start, end, end - start, filename)
def run_all_loaded_tests(cfg, tests, component_analysis, stack_analysis):
"""Run all tests read from CSV file."""
i = 1
for test in tests:
run_test(cfg, test, i, component_analysis, stack_analysis)
i += 1
def run_tests_with_tags(cfg, tests, tags, component_analysis, stack_analysis):
"""Run tests read from CSV file that are marged by any of tags provided in tags parameter."""
i = 1
for test in tests:
test_tags = parse_tags(test["Tags"])
test_name = test["Name"]
if tags <= test_tags:
run_test(cfg, test, i, component_analysis, stack_analysis)
i += 1
else:
log.info("Skipping test #{n} with name '{desc}'".format(n=i, desc=test_name))
def no_tests(tests):
"""Predicate for number of tests."""
return not tests or len(tests) == 0
def start_tests(cfg, tests, tags, component_analysis, stack_analysis):
"""Start all tests using the already loaded configuration."""
log.info("Run tests")
with log.indent():
if no_tests(tests):
log.error("No tests loaded!")
sys.exit(-1)
if len(tests) == 1:
log.success("Loaded 1 test")
else:
log.success("Loaded {n} tests".format(n=len(tests)))
if not tags:
run_all_loaded_tests(cfg, tests, component_analysis, stack_analysis)
else:
run_tests_with_tags(cfg, tests, tags, component_analysis, stack_analysis)
| 37.546875
| 99
| 0.634415
| 1,209
| 9,612
| 4.837055
| 0.224152
| 0.029925
| 0.037791
| 0.04617
| 0.350205
| 0.233755
| 0.197503
| 0.161936
| 0.15236
| 0.112175
| 0
| 0.011998
| 0.24563
| 9,612
| 255
| 100
| 37.694118
| 0.794511
| 0.168435
| 0
| 0.168539
| 0
| 0
| 0.212343
| 0.046668
| 0
| 0
| 0
| 0.003922
| 0
| 1
| 0.061798
| false
| 0
| 0.050562
| 0
| 0.123596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
087e86827c6cc73f03d6554fcf8f36b2777a11b4
| 1,221
|
py
|
Python
|
win/python/CAO/calcClient.py
|
kioto/ORiN2Sample
|
a7a9007b696fdd3ab29f1ec5cededc59b232fae2
|
[
"MIT"
] | null | null | null |
win/python/CAO/calcClient.py
|
kioto/ORiN2Sample
|
a7a9007b696fdd3ab29f1ec5cededc59b232fae2
|
[
"MIT"
] | null | null | null |
win/python/CAO/calcClient.py
|
kioto/ORiN2Sample
|
a7a9007b696fdd3ab29f1ec5cededc59b232fae2
|
[
"MIT"
] | null | null | null |
import win32com.client
import time
class CalcClient(object):
def __init__(self):
# CAOエンジンの作成
self._eng = win32com.client.Dispatch('CAO.CaoEngine')
self._ws = self._eng.Workspaces(0)
self._ctrl = self._ws.AddController('bb1', 'CaoProv.Blackboard')
# 変数の追加
self._var_cmd = self._ctrl.AddVariable('cmd')
self._var_val1 = self._ctrl.AddVariable('val1')
self._var_val2 = self._ctrl.AddVariable('val2')
self._var_res = self._ctrl.AddVariable('res')
self._var_ack = self._ctrl.AddVariable('ack')
def calc(self, cmd_str, val1, val2):
print(f'calc({cmd_str}, {val1}, {val2})')
self._var_val1.Value = val1
self._var_val2.Value = val2
self._var_cmd.Value = cmd_str # ここで計算が実行
# 計算の終了待ち
while True:
if self._var_ack.Value is True:
break
time.sleep(0.1)
res = self._var_res.Value
print(' = ', res)
time.sleep(1)
if __name__ == '__main__':
cc = CalcClient()
cc.calc('ADD', 123, 567)
cc.calc('SUB', 123, 567)
cc.calc('MUL', 123, 567)
cc.calc('DIV', 123, 567)
| 29.780488
| 73
| 0.564292
| 150
| 1,221
| 4.293333
| 0.36
| 0.108696
| 0.147516
| 0.055901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052817
| 0.302211
| 1,221
| 41
| 74
| 29.780488
| 0.703052
| 0.027027
| 0
| 0
| 0
| 0
| 0.091783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.166667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08804de9d3324b167c6447b69cc226552d4b7bbe
| 282
|
py
|
Python
|
Mundo-1/exercicio-05.py
|
FRafaelPA/Praticando-Python
|
d8a46beceeae2ac20acf4c63f86a32cba537c896
|
[
"MIT"
] | null | null | null |
Mundo-1/exercicio-05.py
|
FRafaelPA/Praticando-Python
|
d8a46beceeae2ac20acf4c63f86a32cba537c896
|
[
"MIT"
] | null | null | null |
Mundo-1/exercicio-05.py
|
FRafaelPA/Praticando-Python
|
d8a46beceeae2ac20acf4c63f86a32cba537c896
|
[
"MIT"
] | null | null | null |
'''
Faça um programa que leia um número inteiro e mostre na tela o seu sucessor e seu antecessor.
'''
n = int(input('Entre com um valor: '))
antecessor = n - 1
sucessor = n + 1
msg = 'o antecessor do número {} é {} e seu sucessor é {}'.format(n, antecessor, sucessor)
print(msg)
| 23.5
| 93
| 0.673759
| 47
| 282
| 4.042553
| 0.574468
| 0.115789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008889
| 0.202128
| 282
| 12
| 94
| 23.5
| 0.835556
| 0.329787
| 0
| 0
| 0
| 0
| 0.38674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0886c3adb37d4bb2d284b34954bef308daf23bd3
| 508
|
py
|
Python
|
001-Python-basico/008-desafio-pratico.py
|
clebertonf/Python-course
|
a57f405cbd27f96e0cb61128df31e9249c79a962
|
[
"MIT"
] | null | null | null |
001-Python-basico/008-desafio-pratico.py
|
clebertonf/Python-course
|
a57f405cbd27f96e0cb61128df31e9249c79a962
|
[
"MIT"
] | null | null | null |
001-Python-basico/008-desafio-pratico.py
|
clebertonf/Python-course
|
a57f405cbd27f96e0cb61128df31e9249c79a962
|
[
"MIT"
] | null | null | null |
from datetime import date
year_current_date = date.today().year
def get_info(name, age, height, weight):
year_birth = year_current_date - age
imc = round(weight / (height ** 2), 2)
print(f"{name} tem {age} anos, {height} de altura e pesa {weight} KG.")
print(f"O IMC do {name} é: {imc}")
print(f"{name} nasceu em {year_birth}")
get_info("Cleberton", 28, 1.69, 75)
# Função recebe algumas informaçoes por parametro, e retorna ano de nascimento, imc
# com algumas frases customizadas
| 28.222222
| 83
| 0.687008
| 80
| 508
| 4.2625
| 0.6125
| 0.052786
| 0.087977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.185039
| 508
| 17
| 84
| 29.882353
| 0.801932
| 0.222441
| 0
| 0
| 0
| 0.111111
| 0.313776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08873554c1a8d8174ca6425485bfe2a0d0880e6a
| 2,306
|
py
|
Python
|
tests/components/speedtestdotnet/test_init.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/speedtestdotnet/test_init.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/speedtestdotnet/test_init.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for SpeedTest integration."""
from unittest.mock import patch
import speedtest
from openpeerpower import config_entries
from openpeerpower.components import speedtestdotnet
from openpeerpower.setup import async_setup_component
from tests.common import MockConfigEntry
async def test_setup_with_config(opp):
"""Test that we import the config and setup the integration."""
config = {
speedtestdotnet.DOMAIN: {
speedtestdotnet.CONF_SERVER_ID: "1",
speedtestdotnet.CONF_MANUAL: True,
speedtestdotnet.CONF_SCAN_INTERVAL: "00:01:00",
}
}
with patch("speedtest.Speedtest"):
assert await async_setup_component(opp, speedtestdotnet.DOMAIN, config)
async def test_successful_config_entry(opp):
"""Test that SpeedTestDotNet is configured successfully."""
entry = MockConfigEntry(
domain=speedtestdotnet.DOMAIN,
data={},
)
entry.add_to_opp(opp)
with patch("speedtest.Speedtest"), patch(
"openpeerpower.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
await opp.config_entries.async_setup(entry.entry_id)
assert entry.state is config_entries.ConfigEntryState.LOADED
assert forward_entry_setup.mock_calls[0][1] == (
entry,
"sensor",
)
async def test_setup_failed(opp):
"""Test SpeedTestDotNet failed due to an error."""
entry = MockConfigEntry(
domain=speedtestdotnet.DOMAIN,
data={},
)
entry.add_to_opp(opp)
with patch("speedtest.Speedtest", side_effect=speedtest.ConfigRetrievalError):
await opp.config_entries.async_setup(entry.entry_id)
assert entry.state is config_entries.ConfigEntryState.SETUP_RETRY
async def test_unload_entry(opp):
"""Test removing SpeedTestDotNet."""
entry = MockConfigEntry(
domain=speedtestdotnet.DOMAIN,
data={},
)
entry.add_to_opp(opp)
with patch("speedtest.Speedtest"):
await opp.config_entries.async_setup(entry.entry_id)
assert await opp.config_entries.async_unload(entry.entry_id)
await opp.async_block_till_done()
assert entry.state is config_entries.ConfigEntryState.NOT_LOADED
assert speedtestdotnet.DOMAIN not in opp.data
| 28.825
| 82
| 0.717259
| 268
| 2,306
| 5.958955
| 0.276119
| 0.073262
| 0.030056
| 0.067627
| 0.365686
| 0.349405
| 0.349405
| 0.319975
| 0.319975
| 0.319975
| 0
| 0.004849
| 0.195143
| 2,306
| 79
| 83
| 29.189873
| 0.855603
| 0.013877
| 0
| 0.320755
| 0
| 0
| 0.077184
| 0.03301
| 0
| 0
| 0
| 0
| 0.132075
| 1
| 0
| false
| 0
| 0.113208
| 0
| 0.113208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0888b580bb9eb1968da656fe5efb329d6602a748
| 616
|
py
|
Python
|
case/xpath.py
|
xierensong/learnPython
|
33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455
|
[
"MIT"
] | null | null | null |
case/xpath.py
|
xierensong/learnPython
|
33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455
|
[
"MIT"
] | null | null | null |
case/xpath.py
|
xierensong/learnPython
|
33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455
|
[
"MIT"
] | 1
|
2018-10-11T08:20:44.000Z
|
2018-10-11T08:20:44.000Z
|
import requests
from lxml import etree
if __name__ == '__main__':
headers = {"User-Agent":'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
url = 'https://www.apache.org/dist/ant/'
sourceHTML = requests.get(url, headers = headers)
selector = etree.HTML(sourceHTML.text)
folder_list = selector.xpath('//pre[position()=1]/a[@href]')
for elmt in folder_list:
#
href_TT = elmt.get('href')
print('href_TT ', href_TT)
if href_TT[len(href_TT)-1] == '/':
print('folder_list', elmt.attrib)
| 41.066667
| 144
| 0.63474
| 89
| 616
| 4.213483
| 0.640449
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058943
| 0.201299
| 616
| 15
| 145
| 41.066667
| 0.703252
| 0
| 0
| 0
| 0
| 0.076923
| 0.349026
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
088ddea79b72540b919336ee600c90b0505ded86
| 5,132
|
py
|
Python
|
jelm/tests/unit/test_jelm_class.py
|
endremborza/jelm
|
6916bbd4ceb909ad3350c56d3a149bdb97671489
|
[
"MIT"
] | null | null | null |
jelm/tests/unit/test_jelm_class.py
|
endremborza/jelm
|
6916bbd4ceb909ad3350c56d3a149bdb97671489
|
[
"MIT"
] | null | null | null |
jelm/tests/unit/test_jelm_class.py
|
endremborza/jelm
|
6916bbd4ceb909ad3350c56d3a149bdb97671489
|
[
"MIT"
] | null | null | null |
import pytest
from jelm import Jelm, Node, Edge
from jelm.tests.network_case_set_class import NetwokCaseTemplate
def test_eq(jelm_pair_case: NetwokCaseTemplate):
jelm_pair_case.evaluate_fun(non_altering_function=lambda x: x)
assert not (10 == Jelm())
assert not ("fing" == Jelm())
def test_jelm_repr(jelm_pair_case: NetwokCaseTemplate):
def repr_check(el: Jelm):
repr_string = el.__repr__()
assert "jelm" in repr_string
assert str(len(el.nodes.keys())) in repr_string
return el
jelm_pair_case.evaluate_fun(non_altering_function=repr_check)
def test_neighbors(jelm_pair_case: NetwokCaseTemplate):
def neighbor_check(el: Jelm):
for nid, n in el.nodes.items():
for nid2 in n.neighbors.keys():
assert nid in el.get_node(nid2).neighbors.keys()
for nid3 in n.target_neighbors.keys():
assert nid in el.get_node(nid3).source_neighbors.keys()
return el
jelm_pair_case.evaluate_fun(non_altering_function=neighbor_check)
def test_add_node_as_object_w_cases(jelm_pair_case: NetwokCaseTemplate):
def add_node_as_obj(el: Jelm):
el.add_object({"type": "node", "id": "n10"})
return el
def assert_node_as_obj_added(el: Jelm):
assert isinstance(el.get_node("n10"), Node)
def catch_node_as_obj_add(el: Jelm, e):
assert isinstance(e, ValueError)
assert isinstance(el.get_node("n10"), Node)
jelm_pair_case.evaluate_fun(
altering_function=add_node_as_obj,
assert_alteration=assert_node_as_obj_added,
catch_alteration_exception=catch_node_as_obj_add,
)
def test_add_edge_as_object_w_cases(jelm_pair_case: NetwokCaseTemplate):
def add_edge_as_obj(el: Jelm):
el.add_object({"type": "edge", "source": "n1", "target": "n2"})
return el
def assert_edge_as_obj_added(el: Jelm):
n = el.get_node("n1")
assert "n2" in n.neighbors.keys()
assert "n1" in el.get_node("n2").neighbors
assert "n2" in n.target_neighbors.keys()
def catch_edge_as_obj_add(el: Jelm, e):
assert isinstance(e, KeyError)
assert ("n1" not in el.nodes.keys()) or ("n2" not in el.nodes.keys())
jelm_pair_case.evaluate_fun(
altering_function=add_edge_as_obj,
assert_alteration=assert_edge_as_obj_added,
catch_alteration_exception=catch_edge_as_obj_add,
)
def test_add_edge_jelm_object_w_cases(jelm_pair_case: NetwokCaseTemplate):
def add_edge_jelm_obj(el: Jelm):
el.add_object(Edge(source="n1", target="n2", id="fing"))
return el
def assert_edge_jelm_obj_added(el: Jelm):
n = el.get_node("n1")
assert "n2" in n.neighbors.keys()
assert "n1" in el.get_node("n2").neighbors
assert "n2" in n.target_neighbors.keys()
edge_ids = [e.id for e in n.neighbors["n2"]]
assert "fing" in edge_ids
def catch_edge_jelm_obj_add(el: Jelm, e):
assert isinstance(e, KeyError)
assert ("n1" not in el.nodes.keys()) or ("n2" not in el.nodes.keys())
jelm_pair_case.evaluate_fun(
altering_function=add_edge_jelm_obj,
assert_alteration=assert_edge_jelm_obj_added,
catch_alteration_exception=catch_edge_jelm_obj_add,
)
def test_init():
el = Jelm(metadata={"author": "John Doe"}, objects=[])
assert isinstance(el.objects, list)
assert isinstance(el.metadata, dict)
el2 = Jelm(metadata={"author": "John Doe"}, nodes={})
assert el == el2
el3 = Jelm()
assert not (el == el3)
el4_1 = Jelm(nodes={"id1": Node(id="n1")})
el4_2 = Jelm(objects=[{"type": "node", "id": "n1"}])
assert el4_1 == el4_2
def test_init_w_cases(jelm_pair_case: NetwokCaseTemplate):
def transform_init(el):
el_from_nodes = Jelm(metadata=el.metadata, nodes=el.nodes)
assert el_from_nodes == el
return el_from_nodes
jelm_pair_case.evaluate_fun(non_altering_function=transform_init)
def test_add_object():
el = Jelm()
el.add_object({"type": "node", "id": "n1"})
el.add_object(Node(id="n2"))
el.add_object({"type": "edge", "source": "n1", "target": "n2"})
el.add_object(Node(id="n3", attributes={"priority": "low"}))
with pytest.raises(ValueError):
el.add_object({"no": "type"})
with pytest.raises(ValueError):
el.add_object({"type": "wrong"})
with pytest.raises(ValueError):
el.add_object(10)
el.add_edge("n3", "n2")
el.add_node("n4", {"order": "latest"})
assert len(set([type(o) for o in el.objects])) > 1
assert isinstance(el.objects[0], Node)
assert isinstance(el.objects[2], Edge)
def test_iter():
el = Jelm(
metadata={"author": "John Doe"},
objects=[
{"type": "node", "id": "n1"},
{"type": "node", "id": "n2"},
{"type": "edge", "source": "n1", "target": "n2"},
],
)
for idx, o in enumerate(el):
if idx < 2:
assert isinstance(o, Node)
else:
assert isinstance(o, Edge)
| 25.034146
| 77
| 0.639517
| 726
| 5,132
| 4.242424
| 0.140496
| 0.031169
| 0.054545
| 0.068182
| 0.647078
| 0.516558
| 0.502273
| 0.356494
| 0.271429
| 0.237662
| 0
| 0.016153
| 0.227981
| 5,132
| 204
| 78
| 25.156863
| 0.761232
| 0
| 0
| 0.223141
| 0
| 0
| 0.055144
| 0
| 0
| 0
| 0
| 0
| 0.305785
| 1
| 0.181818
| false
| 0
| 0.024793
| 0
| 0.256198
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
088f0e150b58a95dbcc3bacf169c6bdc57e4eedc
| 6,582
|
py
|
Python
|
trajectory_prediction/evaluation.py
|
libai2019/dataset-api
|
2f793821864f32bd210c17060a70682488bb74e0
|
[
"Apache-2.0"
] | 385
|
2018-07-02T22:21:25.000Z
|
2022-03-28T13:12:47.000Z
|
trajectory_prediction/evaluation.py
|
libai2019/dataset-api
|
2f793821864f32bd210c17060a70682488bb74e0
|
[
"Apache-2.0"
] | 102
|
2018-08-01T10:40:40.000Z
|
2022-03-16T10:32:44.000Z
|
trajectory_prediction/evaluation.py
|
libai2019/dataset-api
|
2f793821864f32bd210c17060a70682488bb74e0
|
[
"Apache-2.0"
] | 98
|
2018-07-12T18:36:42.000Z
|
2022-03-20T04:38:03.000Z
|
'''
Evaluation code for trajectory prediction.
We record the objects in the last frame of every sequence in test dataset as considered objects, which is stored in considered_objects.txt.
We compare the error between your predicted locations in the next 3s(six positions) and the ground truth for these considered objects.
To run this script, make sure that your results are in required format.
'''
import os
import argparse
import numpy as np
def evaluation(frame_data_result, frame_data_gt, consider_peds):
# defined length of predicted trajectory
predict_len = 6
# the counter for testing sequences
sequence_count = 0
# weighted coefficient for vehicles, pedestrians, bicyclists respectively
vehicle_coe = 0.2
pedestrian_coe = 0.58
bicycle_coe = 0.22
# error for missing considered objects
miss_error = 100
# record displacement error for three types of objects
vehicle_error = []
pedestrian_error = []
bicycle_error = []
# record final displacement error for three types of objects
vehicle_final_error = []
pedestrian_final_error = []
bicycle_final_error = []
for i in range(0, len(frame_data_result) - predict_len + 1, predict_len):
current_consider_ped = consider_peds[sequence_count]
sequence_count = sequence_count + 1
for j in range(i, i + predict_len):
for ped_gt in frame_data_gt[j]:
if current_consider_ped.count(int(ped_gt[0])):
# ignore unknown objects
if ped_gt[1] == 5:
continue
# error will be large if missing considered objects
error = miss_error
for ped_res in frame_data_result[j]:
if int(ped_res[0]) == int(ped_gt[0]):
error = distance([ped_gt[2], ped_gt[3]], [ped_res[2], ped_res[3]])
break
# distribute the error to different types of objects
if ped_gt[1] == 1 or ped_gt[1] == 2:
vehicle_error.append(error)
if j == i + predict_len - 1:
vehicle_final_error.append(error)
elif ped_gt[1] == 3:
pedestrian_error.append(error)
if j == i + predict_len - 1:
pedestrian_final_error.append(error)
elif ped_gt[1] == 4:
bicycle_error.append(error)
if j == i + predict_len - 1:
bicycle_final_error.append(error)
# the mean error for objects
vehicle_mean_error = sum(vehicle_error) / len(vehicle_error)
pedestrian_mean_error = sum(pedestrian_error) / len(pedestrian_error)
bicycle_mean_error = sum(bicycle_error) / len(bicycle_error)
# the final error for objects
vehicle_final_error = sum(vehicle_final_error) / len(vehicle_final_error)
pedestrian_final_error = sum(pedestrian_final_error) / len(pedestrian_final_error)
bicycle_final_error = sum(bicycle_final_error) / len(bicycle_final_error)
# weighted sum of mean error
WSADE = vehicle_mean_error * vehicle_coe + pedestrian_mean_error * pedestrian_coe + bicycle_mean_error * bicycle_coe
# weighted sum of final error
WSFDE = vehicle_final_error * vehicle_coe + pedestrian_final_error * pedestrian_coe + bicycle_final_error * bicycle_coe
print('WSADE:', WSADE)
print('ADEv, ADEp, ADEb:', vehicle_mean_error, pedestrian_mean_error, bicycle_mean_error)
print('WSFDE:', WSFDE)
print('FDEv, FDEp, FDEb:',vehicle_final_error, pedestrian_final_error, bicycle_final_error)
return (WSADE, vehicle_mean_error, pedestrian_mean_error, bicycle_mean_error,
WSFDE, vehicle_final_error, pedestrian_final_error, bicycle_final_error)
def readConsiderObjects(filename):
print('Load file: ', filename)
# load considered objects of each sequence
consider_peds = []
with open(filename, 'r') as file_to_read:
while True:
lines = file_to_read.readline()
if not lines:
break
curLine = lines.strip().split(" ")
intLine = map(int, curLine)
consider_peds.append(intLine)
return consider_peds
def readTrajectory(filename):
print('Load file: ',filename)
raw_data = []
# load all the data in the file
with open(filename, 'r') as file_to_read:
while True:
lines = file_to_read.readline()
if not lines:
break
timestamp, id, type, x, y = [float(i) for i in lines.split()]
raw_data.append((timestamp, id, type, x, y))
# get frame list
frameList = []
for i in range(len(raw_data)):
if frameList.count(raw_data[i][0]) == 0:
frameList.append(raw_data[i][0])
counter = 0
frame_data = []
for ind, frame in enumerate(frameList):
pedsInFrame = []
# Extract all pedestrians in current frame
for r in range(counter, len(raw_data)):
row = raw_data[r]
if raw_data[r][0] == frame:
pedsInFrame.append([row[1], row[2], row[3], row[4]])
counter += 1
else:
break
frame_data.append(pedsInFrame)
return frame_data
def distance(pos1, pos2):
# Euclidean distance
return np.sqrt(pow(pos1[0]-pos2[0], 2) + pow(pos1[1]-pos2[1], 2))
def main():
parser = argparse.ArgumentParser(
description='Evaluation self localization.')
parser.add_argument('--gt_dir', default='./test_eval_data/prediction_gt.txt',
help='the dir of ground truth')
parser.add_argument('--object_file', default='./test_eval_data/considered_objects.txt',
help='the dir of considered objects')
parser.add_argument('--res_file', default='./test_eval_data/prediction_result.txt',
help='the dir of results')
args = parser.parse_args()
# load results
file_result = args.res_file
frame_data_result = readTrajectory(file_result)
# load ground truth
file_gt = args.gt_dir
frame_data_gt = readTrajectory(file_gt)
# load considered objects
file_consider_objects = args.object_file
consider_peds = readConsiderObjects(file_consider_objects)
# Do evaluation
evaluation(frame_data_result, frame_data_gt, consider_peds)
if __name__ == '__main__':
main()
| 36.364641
| 139
| 0.628836
| 827
| 6,582
| 4.758162
| 0.2237
| 0.066074
| 0.034562
| 0.027446
| 0.282084
| 0.209911
| 0.191105
| 0.191105
| 0.153748
| 0.038628
| 0
| 0.012553
| 0.285931
| 6,582
| 180
| 140
| 36.566667
| 0.824681
| 0.17244
| 0
| 0.147826
| 0
| 0
| 0.059095
| 0.020499
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.026087
| 0.008696
| 0.104348
| 0.052174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0891695cf058c07ea805662895cf40325fd7ce37
| 2,561
|
py
|
Python
|
shellfoundry/commands/install_command.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | null | null | null |
shellfoundry/commands/install_command.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:21:02.000Z
|
2021-03-25T23:21:02.000Z
|
shellfoundry/commands/install_command.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/python
# -*- coding: utf-8 -*-
import click
import os
try:
# Python 2.x version
from urllib2 import HTTPError, URLError
except:
# Python 3.x version
from urllib.error import HTTPError, URLError
from shellfoundry.exceptions import FatalError
from shellfoundry.utilities.config_reader import Configuration, CloudShellConfigReader
from shellfoundry.utilities.installer import ShellInstaller
from shellfoundry.utilities.shell_config_reader import ShellConfigReader
from shellfoundry.utilities.shell_package import ShellPackage
from shellfoundry.utilities.shell_package_installer import ShellPackageInstaller
class InstallCommandExecutor(object):
def __init__(self, cloudshell_config_reader=None, installer=None, shell_config_reader=None,
shell_package_installer=None):
self.cloudshell_config_reader = cloudshell_config_reader or Configuration(CloudShellConfigReader())
self.installer = installer or ShellInstaller()
self.shell_config_reader = shell_config_reader or ShellConfigReader()
self.shell_package_installer = shell_package_installer or ShellPackageInstaller()
def install(self):
current_path = os.getcwd()
shell_package = ShellPackage(current_path)
if shell_package.is_layer_one():
click.secho("Installing a L1 shell directly via shellfoundry is not supported. "
"Please follow the L1 shell import procedure described in help.quali.com.", fg="yellow")
else:
if shell_package.is_tosca():
self.shell_package_installer.install(current_path)
else:
self._install_old_school_shell()
click.secho('Successfully installed shell', fg='green')
def _install_old_school_shell(self):
error = None
try:
cloudshell_config = self.cloudshell_config_reader.read()
shell_config = self.shell_config_reader.read()
self.installer.install(shell_config.name, cloudshell_config)
except HTTPError as e:
if e.code == 401:
raise FatalError('Login to CloudShell failed. Please verify the credentials in the config')
error = str(e)
except URLError:
raise FatalError('Connection to CloudShell Server failed. Please make sure it is up and running properly.')
except Exception as e:
error = str(e)
if error:
raise FatalError("Failed to install shell. CloudShell responded with: '{}'".format(error))
| 43.40678
| 119
| 0.705193
| 289
| 2,561
| 6.058824
| 0.363322
| 0.068532
| 0.071388
| 0.051399
| 0.042262
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004539
| 0.225693
| 2,561
| 58
| 120
| 44.155172
| 0.878467
| 0.029676
| 0
| 0.12766
| 0
| 0
| 0.157661
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.234043
| 0
| 0.319149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0896e29401ea1989cb26ef01107f5729035c11a7
| 4,405
|
py
|
Python
|
app/__main__.py
|
pablohawz/tfg-Scan-Paint-clone
|
056cd50d9e4274620cf085a41ed9d326e16dd47b
|
[
"MIT"
] | null | null | null |
app/__main__.py
|
pablohawz/tfg-Scan-Paint-clone
|
056cd50d9e4274620cf085a41ed9d326e16dd47b
|
[
"MIT"
] | null | null | null |
app/__main__.py
|
pablohawz/tfg-Scan-Paint-clone
|
056cd50d9e4274620cf085a41ed9d326e16dd47b
|
[
"MIT"
] | null | null | null |
# This Python file uses the following encoding: utf-8
from app.package.views.Calibrate_view import CalibrateView
from app.package.controllers.Calibrate_controller import CalibrateController
from app.package.models.Calibrate_model import CalibrateModel
import sys
import matplotlib
from PySide2.QtWidgets import QApplication
from PySide2 import QtCore
from .package.models.NewProjectModel import NewProjectModel
from .package.models.DataAcquisitionModel import DataAcquisitionModel
from .package.models.DisplayResultsModel import DisplayResultsModel
from .package.controllers.Navigator import Navigator
from .package.controllers.NewProjectController import NewProjectController
from .package.controllers.DataAcquisitionController import (
DataAcquisitionController)
from .package.controllers.DisplayResultsController import (
DisplayResultsController)
from .package.views.MainWindow import MainWindow
from .package.views.NewProjectView import NewProjectView
from .package.views.DataAcquisitionView import DataAcquisitionView
from .package.views.DisplayResultsView import DisplayResultsView
class App(QApplication):
# Diccionario que mapea nombres con Vistas
views = {}
@staticmethod
def log(msg: str) -> None:
print(f'[App] {msg}')
def __init__(self, args):
super(App, self).__init__(args)
self.navigator = Navigator()
self.navigator.navigator.connect(self.change_view)
# MODELS
self.new_project_model = NewProjectModel()
self.data_acquisition_model = DataAcquisitionModel()
self.display_results_model = DisplayResultsModel()
self.calibrate_model = CalibrateModel()
# CONTROLLERS
self.new_project_controller = NewProjectController(
self.new_project_model, self.navigator)
self.data_acquisition_controller = DataAcquisitionController(
self.data_acquisition_model, self.navigator)
self.display_results_controller = DisplayResultsController(
self.display_results_model, self.navigator)
self.calibrate_controller = CalibrateController(
self.calibrate_model, self.navigator)
# VIEWS
self.main_view = MainWindow(None, self.navigator)
self.new_project_view = NewProjectView(
self.new_project_model, self.new_project_controller)
self.data_acquisition_view = DataAcquisitionView(
self.data_acquisition_model, self.data_acquisition_controller)
self.display_results_view = DisplayResultsView(
self.display_results_model, self.display_results_controller)
self.calibrate_view = CalibrateView(
self.calibrate_model, self.calibrate_controller)
self.views['main_view'] = self.main_view
self.views['new_project'] = self.new_project_view
self.views['data_acquisition'] = self.data_acquisition_view
self.views['display_results'] = self.display_results_view
self.views['calibrate'] = self.calibrate_view
self.change_view('new_project')
@QtCore.Slot(str)
def change_view(self, name_view, closeOthers=True):
self.log(f'Navigating to {name_view}')
_view = self.views.get(name_view)
if _view is None:
raise Exception(f'{name_view} is not part of Views dictionary.')
if closeOthers:
self.log('closing other views...')
for view in self.views:
if view != name_view:
self.views.get(view).close()
_view.open()
sys._excepthook = sys.excepthook
def exception_hook(exctype, value, traceback):
print(exctype, value, traceback)
sys._excepthook(exctype, value, traceback)
sys.exit(1)
sys.excepthook = exception_hook
def main():
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
app = App([])
sys.exit(app.exec_())
matplotlib.use('tkagg')
if __name__ == "__main__":
main()
# if __name__ == "__main__":
# import cProfile
# cProfile.run('main()', 'output.dat')
# import pstats
# from pstats import SortKey
# with open("output_time.dat", "w") as f:
# p = pstats.Stats("output.dat", stream=f)
# p.sort_stats("time").print_stats()
# with open("output_calls.dat", "w") as f:
# p = pstats.Stats("output.dat", stream=f)
# p.sort_stats("calls").print_stats()
| 33.120301
| 76
| 0.711691
| 481
| 4,405
| 6.297297
| 0.247401
| 0.039947
| 0.032354
| 0.018818
| 0.081215
| 0.029713
| 0.029713
| 0.029713
| 0.029713
| 0.029713
| 0
| 0.001132
| 0.197503
| 4,405
| 132
| 77
| 33.371212
| 0.855728
| 0.119864
| 0
| 0
| 0
| 0
| 0.048187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0
| 0.222222
| 0
| 0.308642
| 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
089a04fda175104b7a74e5689381760d2e0c8310
| 1,513
|
py
|
Python
|
PyEEA/analysis/SimulationAnalysisEngine.py
|
ThomasJFR/PyEEA
|
7927ee5ff1de8d3cf9e9654899ea4c2c0284519c
|
[
"MIT"
] | 1
|
2020-06-15T03:16:06.000Z
|
2020-06-15T03:16:06.000Z
|
PyEEA/analysis/SimulationAnalysisEngine.py
|
ThomasJFR/PyEEA
|
7927ee5ff1de8d3cf9e9654899ea4c2c0284519c
|
[
"MIT"
] | 1
|
2020-06-19T04:56:21.000Z
|
2020-06-19T04:56:21.000Z
|
PyEEA/analysis/SimulationAnalysisEngine.py
|
ThomasJFR/PyEEA
|
7927ee5ff1de8d3cf9e9654899ea4c2c0284519c
|
[
"MIT"
] | null | null | null |
from numpy.random import standard_normal
from numbers import Number
def simulation_analysis(project, sim_dict, iterations=250, valuator=None):
"""
Purpose:
Analyses the effects of uncertainty of a system by performing a Monte Carlo simulation.
Args:
project: An instance of Project to perform the simulation on
sim_dict: A dict where the key is the name of the cashflow to simulate and the value
is either a number defining the standard deviation for the cashflow as a percentage, or a
function defining some way to modify the cashflow by an amount
"""
# Make every sim_fun value a callable, converting numbers to stdev functions
for key in sim_dict:
if isinstance(sim_dict[key], Number):
stdev = sim_dict[key]
def std_dist(amt):
return amt * stdev * standard_normal()
sim_dict[key] = std_dist
valuator = valuator or project.npw
if not callable(valuator):
return TypeError("Valuator must be a callable construct!")
# Perform the simulation
valuations = []
for _ in range(iterations):
with project as p:
for key in sim_dict:
sim_fun = sim_dict[key]
n_cashflows = len(p[key])
for n in range(n_cashflows):
cf = p[key][n]
cf.amount += sim_fun(cf.amount)
valuations.append(valuator())
return valuations
| 35.186047
| 110
| 0.61996
| 197
| 1,513
| 4.664975
| 0.431472
| 0.060936
| 0.043526
| 0.023939
| 0.032644
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002921
| 0.321216
| 1,513
| 42
| 111
| 36.02381
| 0.891918
| 0.364177
| 0
| 0.086957
| 0
| 0
| 0.04126
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.086957
| 0.043478
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08a37f1f4c2faa26bde495db95f37f4816d7caf0
| 12,652
|
py
|
Python
|
dh/network/__init__.py
|
dhaase-de/dh-python-dh
|
40b04407e5f67ec261f559263718ec2b2588dabb
|
[
"MIT"
] | null | null | null |
dh/network/__init__.py
|
dhaase-de/dh-python-dh
|
40b04407e5f67ec261f559263718ec2b2588dabb
|
[
"MIT"
] | null | null | null |
dh/network/__init__.py
|
dhaase-de/dh-python-dh
|
40b04407e5f67ec261f559263718ec2b2588dabb
|
[
"MIT"
] | null | null | null |
"""
Tools for network communication.
"""
import abc
import io
import json
import socket
import struct
import sys
import time
import zlib
import dh.ejson
import dh.utils
# NumPy is only needed for some parts and is optional
try:
import numpy as np
except ImportError as e:
_NUMPY_ERROR = e
else:
_NUMPY_ERROR = None
###
#%% socket message types
###
class SocketMessageType(abc.ABC):
"""
Base class providing `send()` and `recv()` methods for sending and
receiving (higher-level) messages via the socket `socket`.
"""
@abc.abstractmethod
def send(self, socket, x):
pass
@abc.abstractmethod
def recv(self, socket):
pass
class ByteSocketMessageType(SocketMessageType):
"""
Class providing methods for sending and receiving byte *messages* of up to
4 GiB in size via a given socket.
Each message has a fixed-length (four byte) header, specifying the length
of the message content. Thus, calls to `send()` and `recv()` always
ensure that the entire message is being sent/received.
If `compress` is `True`, messages are compressed before sending and
decompressed after receiving. This reduces the network load but costs more
time. The value for `compress` must be the same for both the server and the
client.
"""
def __init__(self, compress=False):
self._compress = compress
def _recvn(self, socket, byteCount):
"""
Receive and return a fixed number of `byteCount` bytes from the socket.
"""
b = io.BytesIO()
while True:
currentByteCount = b.getbuffer().nbytes
if currentByteCount >= byteCount:
break
packet = socket.recv(byteCount - currentByteCount)
if len(packet) == 0:
return None
b.write(packet)
return b.getvalue()
def send(self, socket, b):
if self._compress:
b = zlib.compress(b)
header = struct.pack(">I", int(len(b)))
socket.sendall(header + b)
def recv(self, socket):
header = self._recvn(socket, 4)
if header is None:
return None
length = struct.unpack(">I", header)[0]
b = self._recvn(socket, length)
if self._compress:
b = zlib.decompress(b)
return b
class NumpySocketMessageType(ByteSocketMessageType):
"""
Class providing `send()` and `recv()` methods for sending and receiving
NumPy ndarray objects via the given socket.
"""
def __init__(self, *args, **kwargs):
if _NUMPY_ERROR is not None:
raise _NUMPY_ERROR
super().__init__(*args, **kwargs)
def send(self, socket, x):
b = io.BytesIO()
np.save(file=b, arr=x, allow_pickle=False, fix_imports=False)
super().send(socket, b.getvalue())
def recv(self, socket):
b = io.BytesIO(super().recv(socket))
return np.load(file=b, allow_pickle=False, fix_imports=False)
class JsonSocketMessageType(ByteSocketMessageType):
"""
Class providing `send()` and `recv()` methods for sending and receiving
JSON-serializable objects via the given socket.
"""
def send(self, socket, x):
j = json.dumps(x, ensure_ascii=True)
b = bytes(j, "ascii")
super().send(socket, b)
def recv(self, socket):
b = super().recv(socket)
j = b.decode("ascii")
x = json.loads(j)
return x
class ExtendedJsonSocketMessageType(ByteSocketMessageType):
"""
Class providing `send()` and `recv()` methods for sending and receiving
JSON-serializable (with extended range of supported types, see
`dh.ejson`) objects via the given socket.
.. seealso:: `dh.ejson`.
"""
def send(self, socket, x):
j = dh.ejson.dumps(x)
b = bytes(j, "ascii")
super().send(socket, b)
def recv(self, socket):
b = super().recv(socket)
j = b.decode("ascii")
x = dh.ejson.loads(j)
return x
###
#%% extended socket with support for multiple message types
###
class MessageSocket():
"""
This is a wrapper class for `socket.socket` which supports the methods
`msend()` and `mrecv()`, which send/receive entire (higher-level) messages.
For both methods, the `messageType` argument must be an instance of the
class `SocketMessageType`.
Note: in this context, 'message' means a high-level, user-defined object,
not the 'message' used in the context of `socket.socket.recvmsg` and
`socket.socket.sendmsg`.
"""
def __init__(self, socket):
self._socket = socket
def msend(self, messageType, x):
messageType.send(self._socket, x)
def mrecv(self, messageType):
return messageType.recv(self._socket)
###
#%% socket servers/clients
###
class SocketServer(abc.ABC):
"""
Simple socket server which accepts connections on the specified `host`
and `port` and communicates with the client as specified in
`communicate()`.
See http://stackoverflow.com/a/19742674/1913780 for an explanation of
`nodelay`.
"""
def __init__(self, host="", port=7214, backlog=5, nodelay=True):
print("Creating socket...")
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if nodelay:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
print("Binding socket to {}:{}...".format(host if len(host) > 0 else "*", port))
self._socket.bind((host, port))
self._backlog = backlog
self._nodelay = nodelay
def _print(self, text):
print("[{}] {}".format(dh.utils.dtstr(compact=False), text))
def run(self):
self._socket.listen(self._backlog)
while True:
self._print("Waiting for connection...")
sys.stdout.flush()
(connectionSocket, connectionAddress) = self._socket.accept()
self._print("Accepted connection from {}:{}".format(connectionAddress[0], connectionAddress[1]))
t0 = time.time()
try:
self.communicate(MessageSocket(connectionSocket))
connectionSocket.close()
except Exception as e:
self._print("** {}: {}".format(type(e).__name__, e))
self._print("Finished request from {}:{} after {} ms".format(connectionAddress[0], connectionAddress[1], dh.utils.around((time.time() - t0) * 1000.0)))
@abc.abstractmethod
def communicate(self, socket):
"""
Implements the entire communication happening for one connection with a
client via high-level socket messages (see `SocketMessageType`).
Counterpart of `SocketClient.communicate`. See specific client/server
implementations for examples.
"""
pass
class SocketClient(abc.ABC):
"""
Simple socket client which connects to the server on the specified `host`
and `port` each time `query()` is called. The communication with the server
is specified in `communicate()`.
See http://stackoverflow.com/a/19742674/1913780 for an explanation of
`nodelay`.
"""
def __init__(self, host, port=7214, nodelay=True):
self._host = host
self._port = port
self._nodelay = nodelay
def query(self, *args, **kwargs):
# establish connection with the server
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._nodelay:
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self._socket.connect((self._host, self._port))
# actual communication, keep result
result = self.communicate(MessageSocket(self._socket), *args, **kwargs)
# close connection
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
return result
@abc.abstractmethod
def communicate(self, socket, *args, **kwargs):
"""
Implements the entire communication happening for one connection with a
server via high-level socket messages (see `SocketMessageType`).
Counterpart of `SocketServer.communicate`. See specific client/server
implementations for examples.
"""
pass
class ImageProcessingServer(SocketServer):
"""
Special case of `SocketServer` which accepts a NumPy array and JSON-encoded
parameters and returns a NumPy array. The counterpart is the
`ImageProcessingClient` class.
To specify the processing behavior, sub-class this class and implement
the static method `process(data, params)`.
"""
def communicate(self, socket):
# receive input image and parameters
data = socket.mrecv(NumpySocketMessageType())
params = socket.mrecv(JsonSocketMessageType())
# process
try:
result = self.process(data=data, params=params)
except Exception as e:
self._print("** {}: {}".format(type(e).__name__, e))
result = np.zeros(shape=(0, 0), dtype="uint8")
# send result image
socket.msend(NumpySocketMessageType(), result)
@staticmethod
@abc.abstractmethod
def process(data, params):
"""
This function specifies the processing behavior of this server and must
be implemeted by the user.
"""
pass
class ImageProcessingClient(SocketClient):
"""
Special case of `SocketClient` which sends a NumPy array and JSON-encoded
parameters and receives a NumPy array. The counterpart is the
`ImageProcessingServer` class.
The processing behavior is specified by sub-classing
`ImageProcessingServer` and implementing the static method
`process(data, params)`.
"""
def communicate(self, socket, data, params):
# send input image and parameters
socket.msend(NumpySocketMessageType(), data)
socket.msend(JsonSocketMessageType(), params)
# receive result image
return socket.mrecv(NumpySocketMessageType())
def process(self, data, params):
"""
Just another name for the `query` method (to better show the connection
to the server's `process` method).
"""
return self.query(data=data, params=params)
class ImageProcessingServer2(SocketServer):
"""
Special case of `SocketServer` which accepts a NumPy array and JSON-encoded
parameters and returns a NumPy array plus a JSON-encodable object. The
counterpart is the `ImageProcessingClient2` class.
To specify the processing behavior, sub-class this class and implement
the static method `process(data, params)`.
"""
def communicate(self, socket):
# receive input image and parameters
data = socket.mrecv(NumpySocketMessageType())
params = socket.mrecv(JsonSocketMessageType())
# process
try:
(result, info) = self.process(data=data, params=params)
except Exception as e:
self._print("** {}: {}".format(type(e).__name__, e))
result = np.zeros(shape=(0, 0), dtype="uint8")
info = None
# send result image and info
socket.msend(NumpySocketMessageType(), result)
socket.msend(JsonSocketMessageType(), info)
@staticmethod
@abc.abstractmethod
def process(data, params):
"""
This function specifies the processing behavior of this server and must
be implemeted by the user.
"""
pass
class ImageProcessingClient2(SocketClient):
"""
Special case of `SocketClient` which sends a NumPy array and JSON-encoded
parameters and receives a NumPy array and a JSON-encoded object. The
counterpart is the `ImageProcessingServer2` class.
The processing behavior is specified by sub-classing
`ImageProcessingServer` and implementing the static method
`process(data, params)`.
"""
def communicate(self, socket, data, params):
# send input image and parameters
socket.msend(NumpySocketMessageType(), data)
socket.msend(JsonSocketMessageType(), params)
# receive result image
result = socket.mrecv(NumpySocketMessageType())
info = socket.mrecv(JsonSocketMessageType())
return (result, info)
def process(self, data, params):
"""
Just another name for the `query` method (to better show the connection
to the server's `process` method).
"""
return self.query(data=data, params=params)
| 30.858537
| 163
| 0.641084
| 1,467
| 12,652
| 5.462849
| 0.207226
| 0.041178
| 0.010981
| 0.017969
| 0.518343
| 0.481283
| 0.44597
| 0.441228
| 0.441228
| 0.426504
| 0
| 0.007221
| 0.255691
| 12,652
| 409
| 164
| 30.933985
| 0.843793
| 0.373064
| 0
| 0.437158
| 0
| 0
| 0.028552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169399
| false
| 0.032787
| 0.076503
| 0.005464
| 0.382514
| 0.054645
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08a7afeb8a1abc10ec91968f8b8eddea6a7e071a
| 16,361
|
py
|
Python
|
qtable/engine.py
|
ihgazni2/qtable
|
269bb1052d7c7aeeae4d0b1024746fae38870c40
|
[
"MIT"
] | null | null | null |
qtable/engine.py
|
ihgazni2/qtable
|
269bb1052d7c7aeeae4d0b1024746fae38870c40
|
[
"MIT"
] | null | null | null |
qtable/engine.py
|
ihgazni2/qtable
|
269bb1052d7c7aeeae4d0b1024746fae38870c40
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import elist.elist as elel
import edict.edict as eded
import tlist.tlist as tltl
import copy
__all__ = [
'_append_col',
'_append_cols',
'_append_row',
'_append_rows',
'_cn2clocs',
'_col',
'_cols',
'_columns_map',
'_crop',
'_get_clocs',
'_get_rlocs',
'_getitem',
'_index_map',
'_insert_col',
'_insert_cols',
'_insert_row',
'_insert_rows',
'_ltd_index_first',
'_ltd_index_last',
'_name2ilocs',
'_prepend_col',
'_prepend_cols',
'_prepend_row',
'_prepend_rows',
'_reindex_cols',
'_reindex_rows',
'_rename_cols',
'_rename_rows',
'_repl_col',
'_repl_cols',
'_repl_row',
'_repl_rows',
'_rmcol',
'_rmcols',
'_rmrow',
'_rmrows',
'_rn2rlocs',
'_row',
'_rows',
'_setitem',
'_subtb',
'_swapcol',
'_swaprow',
'_transpose',
'_fliplr',
'_flipud'
]
#all operations will generate a new Qtable(copy.deepcopy), and will not change the original Qtable
#columns col-names-list no-duplicate-names-permitted
#index rowname-names-list no-duplicate-names-permitted
#df pd.DataFrame
def _index_map(df):
d = elel.ivdict(list(df.index))
return(d)
def _columns_map(df):
d = elel.ivdict(list(df.columns))
return(d)
def _name2ilocs(rowname,colname,**kwargs):
if('index_map' in kwargs):
index_map = kwargs['index_map']
else:
df = kwargs['DF']
index_map = _index_map(df)
if('columns_map' in kwargs):
columns_map = kwargs['columns_map']
else:
df = kwargs['DF']
columns_map = _columns_map(df)
kl,vl = eded.d2kvlist(index_map)
rlocs = elel.indexes_all(vl,rowname)
kl,vl = eded.d2kvlist(columns_map)
clocs = elel.indexes_all(vl,colname)
return((rlocs,clocs))
# index_map = _index_map(df)
# columns_map = _columns_map(df)
# _getitem(df,rowname,colname,rloc=0,cloc=0)
# rloc relative-row-position
# cloc relative-col-position
def _getitem(df,rowname,colname,*args,**kwargs):
rlocs,clocs = _name2ilocs(rowname,colname,index_map=kwargs['index_map'],columns_map=kwargs['columns_map'])
rslt = df.iloc[rlocs,clocs]
args = list(args)
if(args.__len__()==0):
pass
else:
rloc = args[0]
cloc = args[1]
rslt = rslt.iloc[rloc,cloc]
return(rslt)
def _setitem(df,rowname,colname,value,*args,**kwargs):
rlocs,clocs = _name2ilocs(rowname,colname,index_map=kwargs['index_map'],columns_map=kwargs['columns_map'])
rslt = df.iloc[rlocs,clocs]
args = list(args)
if(args.__len__()==0):
rslt = value
else:
rloc = args[0]
cloc = args[1]
rslt.iloc[rloc,cloc] = value
df.iloc[rlocs,clocs] = rslt
#rn ---------------------rowname
def _rn2rlocs(rowname,**kwargs):
if('index_map' in kwargs):
index_map = kwargs['index_map']
else:
df = kwargs['DF']
index_map = _index_map(df)
kl,vl = eded.d2kvlist(index_map)
rlocs = elel.indexes_all(vl,rowname)
rlocs.sort()
return(rlocs)
def _row(df,rowname,*args,**kwargs):
rlocs = _rn2rlocs(rowname,**kwargs)
args = list(args)
if(args.__len__()==0):
pass
else:
rlocs = elel.select_seqs(rlocs,args)
return(df.iloc[rlocs])
#cn ---------------------colname
def _cn2clocs(colname,**kwargs):
if('columns_map' in kwargs):
columns_map = kwargs['columns_map']
else:
df = kwargs['DF']
columns_map = _columns_map(df)
kl,vl = eded.d2kvlist(columns_map)
clocs = elel.indexes_all(vl,colname)
clocs.sort()
return(clocs)
def _col(df,colname,*args,**kwargs):
clocs = _cn2clocs(colname,**kwargs)
args = list(args)
if(args.__len__()==0):
pass
else:
clocs = elel.select_seqs(clocs,args)
return(df.iloc[:,clocs])
def _get_rlocs(rownames,**kwargs):
rlocs = []
for i in range(rownames.__len__()):
rowname = rownames[i]
tmp = _rn2rlocs(rowname,**kwargs)
rlocs = elel.concat(rlocs,tmp)
rlocs.sort()
return(rlocs)
def _get_clocs(colnames,**kwargs):
clocs = []
for i in range(colnames.__len__()):
colname = colnames[i]
tmp = _cn2clocs(colname,**kwargs)
clocs = elel.concat(clocs,tmp)
clocs.sort()
return(clocs)
def _rows(df,*rownames,**kwargs):
rownames = list(rownames)
if(isinstance(rownames[0],list)):
rownames = rownames[0]
else:
pass
rlocs = _get_rlocs(rownames,**kwargs)
return(df.iloc[rlocs])
def _cols(df,*colnames,**kwargs):
colnames = list(colnames)
if(isinstance(colnames[0],list)):
colnames = colnames[0]
else:
pass
clocs = _get_clocs(colnames,**kwargs)
return(df.iloc[:,clocs])
def _subtb(df,rownames,colnames,**kwargs):
rownames = elel.uniqualize(rownames)
colnames = elel.uniqualize(colnames)
rlocs = _get_rlocs(rownames,**kwargs)
clocs = _get_clocs(colnames,**kwargs)
return(df.iloc[rlocs,clocs])
def _ltd_index_first(ltd,value):
for i in range(ltd.__len__()):
if(ltd[i] == value):
return(i)
else:
pass
raise ValueError("value not exist")
def _ltd_index_last(ltd,value):
for i in range(ltd.__len__()-1,-1,-1):
if(ltd[i] == value):
return(i)
else:
pass
raise ValueError("value not exist")
def _crop(df,top,left,bot,right,**kwargs):
imd = kwargs['index_map']
top = _ltd_index_first(imd,top)
bot = _ltd_index_last(imd,bot)
cmd = kwargs['columns_map']
left = _ltd_index_first(cmd,left)
right = _ltd_index_last(cmd,right)
rownames = list(df.index[top:bot+1])
colnames = list(df.columns[left:right+1])
return(_subtb(df,rownames,colnames,**kwargs))
def _swapcol(df,colname1,colname2,*args,**kwargs):
df = copy.deepcopy(df)
clocs1 = _cn2clocs(colname1,**kwargs)
clocs2 = _cn2clocs(colname2,**kwargs)
args = list(args)
if(args.__len__()==0):
which1 = 0
which2 = 0
elif(args.__len__()==1):
which1 = args[0]
which2 = 0
else:
which1 = args[0]
which2 = args[1]
cloc1 = clocs1[which1]
cloc2 = clocs2[which2]
clocs = elel.init_range(0,df.columns.__len__(),1)
clocs = elel.iswap(clocs,cloc1,cloc2)
return(df.iloc[:,clocs])
def _reindex_cols(df,*columns,**kwargs):
df = copy.deepcopy(df)
columns = list(columns)
if(isinstance(columns[0],list)):
columns = columns[0]
else:
pass
clocs_array = []
for i in range(columns.__len__()):
clocs = _cn2clocs(columns[i],**kwargs)
clocs_array.append(clocs)
if("whiches" in kwargs):
whiches = kwargs['whiches']
else:
whiches = elel.init(clocs_array.__len__(),0)
clocs = elel.batexec(lambda clocs,which:clocs[which],clocs_array,whiches)
return(df.iloc[:,clocs])
def _swaprow(df,rowname1,rowname2,*args,**kwargs):
df = copy.deepcopy(df)
rlocs1 = _rn2rlocs(rowname1,**kwargs)
rlocs2 = _rn2rlocs(rowname2,**kwargs)
args = list(args)
if(args.__len__()==0):
which1 = 0
which2 = 0
elif(args.__len__()==1):
which1 = args[0]
which2 = 0
else:
which1 = args[0]
which2 = args[1]
rloc1 = rlocs1[which1]
rloc2 = rlocs2[which2]
rlocs = elel.init_range(0,df.columns.__len__(),1)
rlocs = elel.iswap(rlocs,rloc1,rloc2)
return(df.iloc[rlocs])
def _reindex_rows(df,*index,**kwargs):
df = copy.deepcopy(df)
index = list(index)
if(isinstance(index[0],list)):
index = index[0]
else:
pass
rlocs_array = []
for i in range(index.__len__()):
rlocs = _rn2rlocs(index[i],**kwargs)
rlocs_array.append(rlocs)
if("whiches" in kwargs):
whiches = kwargs['whiches']
else:
whiches = elel.init(rlocs_array.__len__(),0)
rlocs = elel.batexec(lambda rlocs,which:rlocs[which],rlocs_array,whiches)
return(df.iloc[rlocs])
def _rmcol(df,colname,*args,**kwargs):
df = copy.deepcopy(df)
clocs = _cn2clocs(colname,**kwargs)
if(args.__len__()==0):
whiches = elel.init_range(0,clocs.__len__(),1)
else:
whiches = list(args)
clocs = elel.select_seqs(clocs,whiches)
all_clocs = elel.init_range(0,df.columns.__len__(),1)
lefted_clocs = elel.select_seqs_not(all_clocs,clocs)
return(df.iloc[:,lefted_clocs])
def _rmcols(df,*colnames,**kwargs):
df = copy.deepcopy(df)
colnames = list(colnames)
if(isinstance(colnames[0],list)):
colnames = colnames[0]
else:
pass
clocs_array = []
for i in range(colnames.__len__()):
clocs = _cn2clocs(colnames[i],**kwargs)
clocs_array.append(clocs)
if("whiches" in kwargs):
whiches = kwargs['whiches']
clocs = elel.batexec(lambda clocs,which:clocs[which],clocs_array,whiches)
else:
#by default remove all
clocs = elel.concat(*clocs_array)
all_clocs = elel.init_range(0,df.columns.__len__(),1)
lefted_clocs = elel.select_seqs_not(all_clocs,clocs)
return(df.iloc[:,lefted_clocs])
def _rmrow(df,rowname,*args,**kwargs):
df = copy.deepcopy(df)
rlocs = _rn2rlocs(rowname,**kwargs)
if(args.__len__()==0):
whiches = elel.init_range(0,rlocs.__len__(),1)
else:
whiches = list(args)
rlocs = elel.select_seqs(rlocs,whiches)
all_rlocs = elel.init_range(0,df.index.__len__(),1)
lefted_rlocs = elel.select_seqs_not(all_rlocs,rlocs)
return(df.iloc[lefted_rlocs])
def _rmrows(df,*rownames,**kwargs):
df = copy.deepcopy(df)
rownames = list(rownames)
if(isinstance(rownames[0],list)):
rownames = rownames[0]
else:
pass
rlocs_array = []
for i in range(rownames.__len__()):
rlocs = _rn2rlocs(rownames[i],**kwargs)
rlocs_array.append(rlocs)
if("whiches" in kwargs):
whiches = kwargs['whiches']
rlocs = elel.batexec(lambda rlocs,which:rlocs[which],rlocs_array,whiches)
else:
#by default remove all
rlocs = elel.concat(*rlocs_array)
all_rlocs = elel.init_range(0,df.index.__len__(),1)
lefted_rlocs = elel.select_seqs_not(all_rlocs,rlocs)
return(df.iloc[lefted_rlocs])
def _insert_col(df,pos,*args,**kwargs):
df = copy.deepcopy(df)
if(isinstance(pos,int)):
pass
else:
clocs = _cn2clocs(pos,**kwargs)
if('which' in kwargs):
which = kwargs['which']
else:
which = 0
pos = clocs[which] + 1
args = list(args)
if(args.__len__() == 1):
colname = list(args[0].keys())[0]
values = list(args[0].values())[0]
else:
colname = args[0]
if(isinstance(args[1],list)):
values = args[1]
else:
values = args[1:]
####
####
df.insert(pos,colname,values,kwargs['allow_duplicates'])
return(df)
def _insert_cols(df,pos,*args,**kwargs):
df = copy.deepcopy(df)
if(isinstance(pos,int)):
pass
else:
clocs = _cn2clocs(pos,**kwargs)
if('which' in kwargs):
which = kwargs['which']
else:
which = 0
pos = clocs[which] + 1
args = list(args)
if(isinstance(args[0],dict)):
kl,vl = eded.d2kvlist(args[0])
else:
if(isinstance(args[1],list)):
kl = elel.select_evens(args)
vl = elel.select_odds(args)
else:
kl,vl = elel.brkl2kvlist(args,df.index.__len__()+1)
for i in range(kl.__len__()):
colname = kl[i]
values = vl[i]
df.insert(pos+i,colname,values,kwargs['allow_duplicates'])
return(df)
def _insert_row(df,pos,*args,**kwargs):
df = df.T
df = _insert_col(df,pos,*args,**kwargs)
df = df.T
return(df)
def _insert_rows(df,pos,*args,**kwargs):
df = df.T
df = _insert_cols(df,pos,*args,**kwargs)
df = df.T
return(df)
def _append_col(df,*args,**kwargs):
pos = df.columns.__len__()
return(_insert_col(df,pos,*args,**kwargs))
def _append_cols(df,*args,**kwargs):
pos = df.columns.__len__()
return(_insert_cols(df,pos,*args,**kwargs))
def _append_row(df,*args,**kwargs):
pos = df.index.__len__()
return(_insert_row(df,pos,*args,**kwargs))
def _append_rows(df,*args,**kwargs):
pos = df.index.__len__()
return(_insert_rows(df,pos,*args,**kwargs))
def _prepend_col(df,*args,**kwargs):
return(_insert_col(df,0,*args,**kwargs))
def _prepend_cols(df,*args,**kwargs):
return(_insert_cols(df,0,*args,**kwargs))
def _prepend_row(df,*args,**kwargs):
return(_insert_row(df,0,*args,**kwargs))
def _prepend_rows(df,*args,**kwargs):
return(_insert_rows(df,0,*args,**kwargs))
def _rename_cols(df,*colnames):
df = copy.deepcopy(df)
colnames = list(colnames)
if(isinstance(colnames[0],list)):
colnames = colnames[0]
else:
pass
df.columns = colnames
return(df)
def _rename_rows(df,*rownames):
df = copy.deepcopy(df)
rownames = list(rownames)
if(isinstance(rownames[0],list)):
rownames = rownames[0]
else:
pass
df.index = rownames
return(df)
def _repl_col(df,pos,*args,**kwargs):
df = copy.deepcopy(df)
if(isinstance(pos,int)):
pos = pos + 1
else:
clocs = _cn2clocs(pos,**kwargs)
if('which' in kwargs):
which = kwargs['which']
else:
which = 0
pos = clocs[which] + 1
args = list(args)
if(args.__len__() == 1):
colname = list(args[0].keys())[0]
values = list(args[0].values())[0]
else:
colname = args[0]
if(isinstance(args[1],list)):
values = args[1]
else:
values = args[1:]
df.insert(pos,colname,values,kwargs['allow_duplicates'])
pos = pos -1
all_clocs = elel.init_range(0,df.columns.__len__(),1)
all_clocs.remove(pos)
return(df.iloc[:,all_clocs])
def _repl_cols(df,poses,*args,**kwargs):
df = copy.deepcopy(df)
args = list(args)
if(isinstance(args[0],dict)):
kl,vl = eded.d2kvlist(args[0])
else:
if(isinstance(args[1],list)):
kl = elel.select_evens(args)
vl = elel.select_odds(args)
else:
kl,vl = elel.brkl2kvlist(args,df.index.__len__()+1)
if(isinstance(poses[0],int)):
pass
else:
colnames = poses
clocs_array = []
for i in range(colnames.__len__()):
clocs = _cn2clocs(colnames[i],**kwargs)
clocs_array.append((clocs,i))
if("whiches" in kwargs):
whiches = kwargs['whiches']
clocs_array = elel.mapv(clocs_array,lambda ele:ele[0])
clocs = elel.batexec(lambda clocs,which:clocs[which],clocs_array,whiches)
poses = clocs
else:
#by default replace all
nkl = []
nvl = []
nclocs = []
for i in range(clocs_array.__len__()):
clocs = clocs_array[i][0]
index = clocs_array[i][1]
tmpkl = elel.init(clocs.__len__(),kl[i])
tmpvl = elel.init(clocs.__len__(),vl[i])
nkl = elel.concat(nkl,tmpkl)
nvl = elel.concat(nvl,tmpvl)
nclocs = elel.concat(nclocs,clocs)
#batsort
poses = nclocs
kl,vl = elel.batsorted(nclocs,nkl,nvl)
poses = elel.mapv(poses,lambda pos:pos+1)
poses.sort()
for i in range(0,poses.__len__()):
pos = poses[i]
df.insert(pos,kl[i],vl[i],kwargs['allow_duplicates'])
pos = pos -1
all_clocs = elel.init_range(0,df.columns.__len__(),1)
all_clocs.remove(pos)
df = df.iloc[:,all_clocs]
return(df)
def _repl_row(df,pos,*args,**kwargs):
df = df.T
df = _repl_col(df,pos,*args,**kwargs)
df = df.T
return(df)
def _repl_rows(df,poses,*args,**kwargs):
df = df.T
df = _repl_cols(df,poses,*args,**kwargs)
df = df.T
return(df)
def _transpose(df):
df = copy.deepcopy(df)
df = df.T
return(df)
def _fliplr(df,**kwargs):
columns = list(df.columns)
columns.reverse()
df = _reindex_cols(df,columns,**kwargs)
return(df)
def _flipud(df,**kwargs):
index = list(df.index)
index.reverse()
df = _reindex_rows(df,index,**kwargs)
return(df)
| 27.40536
| 110
| 0.603753
| 2,150
| 16,361
| 4.351628
| 0.077209
| 0.038478
| 0.020522
| 0.025652
| 0.670051
| 0.594912
| 0.540188
| 0.521377
| 0.492946
| 0.448055
| 0
| 0.015329
| 0.242406
| 16,361
| 596
| 111
| 27.451342
| 0.739492
| 0.032883
| 0
| 0.591954
| 0
| 0
| 0.049693
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088123
| false
| 0.030651
| 0.011494
| 0.007663
| 0.099617
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08ab2c42e46cc085323887951f27802509bc2c01
| 1,131
|
py
|
Python
|
pythonDesafios/desafio058.py
|
mateusdev7/desafios-python
|
6160ddc84548c7af7f5775f9acabe58238f83008
|
[
"MIT"
] | null | null | null |
pythonDesafios/desafio058.py
|
mateusdev7/desafios-python
|
6160ddc84548c7af7f5775f9acabe58238f83008
|
[
"MIT"
] | null | null | null |
pythonDesafios/desafio058.py
|
mateusdev7/desafios-python
|
6160ddc84548c7af7f5775f9acabe58238f83008
|
[
"MIT"
] | null | null | null |
from random import randint
from time import sleep
opcao = 123
cont = 0
while opcao != 0:
print('-=-' * 20)
print('Vou pensar em um número entre 0 e 10, quer tentar adivinhar?')
print('-=-' * 20)
print('\n[ 1 ] Sim [ 0 ] Não')
opcao = int(input('Escolha uma das opções acima\n>'))
if opcao == 1:
computador = randint(0, 10) # O computador sorteia um número de 0 a 10
usuario = int(input('\nEscolha um número entre 0 e 10: ').strip())
cont += 1
while usuario != computador:
if usuario < computador:
print('Mais... Tente novamente')
else:
print('Menos... Tente novamente')
usuario = int(input('Insira outro número: '))
cont += 1
if usuario == computador:
print('\nPARABÉNS. Você ACERTOU!!!')
print('Calculando a quantide de tentivas necessárias...')
sleep(1)
print('-=-' * 15)
print(f'Você precisou de {cont} tentativa(s) para acertar.')
print('-=-'* 15)
elif opcao == 0:
print('Você saiu do jogo.')
| 35.34375
| 78
| 0.535809
| 137
| 1,131
| 4.423358
| 0.50365
| 0.039604
| 0.036304
| 0.046205
| 0.056106
| 0.056106
| 0
| 0
| 0
| 0
| 0
| 0.042272
| 0.330681
| 1,131
| 32
| 79
| 35.34375
| 0.758256
| 0.035367
| 0
| 0.2
| 0
| 0
| 0.338532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08ab8c8ec2777c51be6f0455ab77ed9f159c8995
| 1,896
|
py
|
Python
|
FeatureEngineeringPy_DataScience/demo153_rarecategories.py
|
mahnooranjum/Programming_DataScience
|
f7a4215d4615b3f8460c3a1944a585628cf6930d
|
[
"MIT"
] | null | null | null |
FeatureEngineeringPy_DataScience/demo153_rarecategories.py
|
mahnooranjum/Programming_DataScience
|
f7a4215d4615b3f8460c3a1944a585628cf6930d
|
[
"MIT"
] | null | null | null |
FeatureEngineeringPy_DataScience/demo153_rarecategories.py
|
mahnooranjum/Programming_DataScience
|
f7a4215d4615b3f8460c3a1944a585628cf6930d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Demo153_RareCategories.ipynb
## Rare Categories
- Labels
- The number of labels in the dataset are different
- __high cardinality__ refers to uniqueness of data values
- The lower the cardinality, the more duplicated elements in a column
- A column with the lowest possible cardinality would have the same value for every row
- Highly cardinal variables dominate tree based algorithms
- Labels may only be present in the training data set, but not in the test data set
- Labels may appear in the test set that were not present in the training set
__Tree methods are biased towards variables with many labels__
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from google.colab import drive
drive.mount('/content/gdrive')
data = pd.read_csv("gdrive/My Drive/Colab Notebooks/FeatureEngineering/train.csv")
cat_cols = ['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked']
for i in cat_cols:
print('Number of categories in the variable {}: {}'.format(i,len(data[i].unique())))
print('Total rows: {}'.format(len(data)))
data['Sex'].value_counts()
data['Cabin_processed'] = data['Cabin'].astype(str).str[0]
data['Cabin_processed_X'] = data['Cabin'].astype(str).str[1]
cat_cols = [ 'Sex', 'Embarked', 'Cabin_processed']
for i in cat_cols:
sns.catplot(x=i, kind='count', data=data)
data['Cabin_processed'].value_counts() / len(data)
for i in cat_cols:
sns.catplot(x=i,data=data, hue='Survived', kind='count', palette="ch:.25")
"""### Transform Rare Labels"""
_temp = pd.Series(data['Cabin_processed'].value_counts() / len(data))
_temp.sort_values(ascending=False)
_temp
_temp = pd.Series(data['Cabin_processed'].value_counts() / len(data))
_temp
for i in _labels:
data['Cabin_processed'].replace(i, 'rare', inplace=True)
_temp = pd.Series(data['Cabin_processed'].value_counts() / len(data))
_temp
| 26.704225
| 88
| 0.728903
| 289
| 1,896
| 4.650519
| 0.432526
| 0.060268
| 0.09375
| 0.068452
| 0.220982
| 0.18006
| 0.18006
| 0.153274
| 0.153274
| 0.116071
| 0
| 0.004884
| 0.136076
| 1,896
| 71
| 89
| 26.704225
| 0.815629
| 0.341245
| 0
| 0.310345
| 0
| 0
| 0.273927
| 0.031353
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.172414
| 0
| 0.172414
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08af904e7f82a923beed7c2fa65793eb9bf02793
| 878
|
py
|
Python
|
popbl_servicesapp/flask_app/order/application/api_client.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | null | null | null |
popbl_servicesapp/flask_app/order/application/api_client.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | 1
|
2021-06-02T00:57:11.000Z
|
2021-06-02T00:57:11.000Z
|
popbl_servicesapp/flask_app/order/application/api_client.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | null | null | null |
import requests
import json
from os import environ
from .models import Order, Piece
from .BLConsul import BLConsul
GATEWAY_PORT = environ.get("HAPROXY_PORT")
GATEWAY_ADDRESS = environ.get("HAPROXY_IP")
MACHINE_SERVICE = "machine"
PAYMENT_SERVICE = "payment"
DELIVERY_SERVICE = "delivery"
AUTH_SERVICE = "auth"
CA_CERT = environ.get("RABBITMQ_CA_CERT")
consul = BLConsul.get_instance()
class ApiClient:
@staticmethod
def auth_get_pubkey():
consul_dict = consul.get_service(AUTH_SERVICE)
print("CONSUL RESPONSE {}".format(consul_dict))
address = consul_dict['Address']
port = str(consul_dict['Port'])
r = requests.get("http://{}:{}/{}/pubkey".format(address, port, AUTH_SERVICE), verify=False)
if r.status_code == 200:
content = json.loads(r.content)
return content["publicKey"].encode("utf-8")
| 29.266667
| 100
| 0.693622
| 109
| 878
| 5.385321
| 0.46789
| 0.068143
| 0.057922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005556
| 0.179954
| 878
| 29
| 101
| 30.275862
| 0.809722
| 0
| 0
| 0
| 0
| 0
| 0.146925
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.208333
| 0
| 0.333333
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08b08e4c091db6970d8bd9b3e8f858f92dfeb9ac
| 2,569
|
py
|
Python
|
polya/modules/congruence_closure_module.py
|
holtzermann17/polya
|
6d611bf47185249a96f4cf7ee9b3884bc70a15ac
|
[
"Apache-2.0"
] | 24
|
2015-01-01T18:21:40.000Z
|
2021-08-29T01:56:14.000Z
|
polya/modules/congruence_closure_module.py
|
holtzermann17/polya
|
6d611bf47185249a96f4cf7ee9b3884bc70a15ac
|
[
"Apache-2.0"
] | 1
|
2018-09-06T17:53:13.000Z
|
2018-09-07T13:57:39.000Z
|
polya/modules/congruence_closure_module.py
|
holtzermann17/polya
|
6d611bf47185249a96f4cf7ee9b3884bc70a15ac
|
[
"Apache-2.0"
] | 4
|
2017-02-08T15:04:09.000Z
|
2021-05-02T15:13:05.000Z
|
####################################################################################################
#
# congruence_closure_module.py
#
# Authors:
# Jeremy Avigad
# Rob Lewis
#
# This module maintains a union-find structure for terms in Blackboard, which is currently only used
# for congruence closure. It should perhaps be integrated differently into Blackboard.
#
# Contains a set for each equality class (up to constant multiples) of terms, and tracks which terms
# appear as arguments to which function terms.
#
####################################################################################################
import polya.main.terms as terms
import polya.main.messages as messages
import polya.util.timer as timer
import fractions
import itertools
class CongClosureModule:
def __init__(self):
pass
def update_blackboard(self, B):
"""
Checks the blackboard B for function terms with equal arguments, and asserts that the
function terms are equal.
"""
def eq_func_terms(f1, f2):
"""
Returns true if f1 and f2 have the same name and arity, and all args are equal.
"""
if f1.func_name != f2.func_name or len(f1.args) != len(f2.args):
return False
for i in range(len(f1.args)):
arg1, arg2 = f1.args[i], f2.args[i]
if arg1.coeff == 0:
eq = B.implies(arg2.term.index, terms.EQ, 0, 0) or arg2.coeff == 0
else:
eq = B.implies(arg1.term.index, terms.EQ,
fractions.Fraction(arg2.coeff, arg1.coeff), arg2.term.index)
if not eq:
return False
return True
timer.start(timer.CCM)
messages.announce_module('congruence closure module')
func_classes = {}
for i in (d for d in range(B.num_terms) if isinstance(B.term_defs[d], terms.FuncTerm)):
name = B.term_defs[i].func_name
func_classes[name] = func_classes.get(name, []) + [i]
for name in func_classes:
tinds = func_classes[name]
for (i, j) in itertools.combinations(tinds, 2):
# ti and tj are function terms with the same symbols. check if they're equal.
f1, f2 = B.term_defs[i], B.term_defs[j]
if eq_func_terms(f1, f2):
B.assert_comparison(terms.IVar(i) == terms.IVar(j))
timer.stop(timer.CCM)
def get_split_weight(self, B):
return None
| 36.7
| 100
| 0.54963
| 323
| 2,569
| 4.28483
| 0.386997
| 0.03974
| 0.026012
| 0.028902
| 0.021676
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015899
| 0.289996
| 2,569
| 70
| 101
| 36.7
| 0.742873
| 0.256131
| 0
| 0.054054
| 0
| 0
| 0.015281
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 1
| 0.108108
| false
| 0.027027
| 0.135135
| 0.027027
| 0.378378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08b3ea49c776eba1ca9a6e036f7a93721ad3e46b
| 3,280
|
py
|
Python
|
build.py
|
Jackcava/mappingToFHIR
|
3189b55121a50ee1c4734227cde6da58ed6cb576
|
[
"MIT"
] | null | null | null |
build.py
|
Jackcava/mappingToFHIR
|
3189b55121a50ee1c4734227cde6da58ed6cb576
|
[
"MIT"
] | null | null | null |
build.py
|
Jackcava/mappingToFHIR
|
3189b55121a50ee1c4734227cde6da58ed6cb576
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import csv
def buildPat(row,key):
if key == "extension.valueAddress.city":
return row.A01_DESC_LUOGO_NASCITA
elif key == "identifier.value":
return row.A01_ID_PERSONA
elif key == "name.family":
return row.A01_COGNOME
elif key == "name.given":
return row.A01_NOME
elif key == "gender":
if row.A01_SESSO=='M':
return 'male'
elif row.A01_SESSO=='F':
return 'female'
else:
return 'unknown'
elif key == "birthDate":
if isinstance(row.A01_DATA_NASCITA,str):
return row.A01_DATA_NASCITA[:10]
else:
return row.A01_DATA_NASCITA.strftime("%Y-%m-%d")
elif key == "contact.relationship.coding.code":
if row.A02_DESC_TELEFONO1 in ("MAMMA","PAPA'","MADRE","PADRE"):
return 'PRN'
elif row.A02_DESC_TELEFONO1 == "ZIA":
return 'AUNT'
elif row.A02_DESC_TELEFONO1 == "ZIO":
return 'UNCLE'
else:
return ''
elif key == "contact.relationship.coding.display":
if row.A02_DESC_TELEFONO1 in ("MAMMA","PAPA'","MADRE","PADRE"):
return 'parent'
elif row.A02_DESC_TELEFONO1 == "ZIA":
return 'aunt'
elif row.A02_DESC_TELEFONO1 == "ZIO":
return 'uncle'
else:
return ''
elif key == "contact.telecom.emailvalue":
return row.A02_EMAIL
elif key == "contact.telecom.phonevalue":
return row.A02_NUM_TELEFONO1
elif key == "contact.relationship.coding.code2":
if row.A02_DESC_TELEFONO2 in ("MAMMA","PAPA'","PAPA","MADRE","PADRE"):
return 'PRN'
elif row.A02_DESC_TELEFONO2 == "ZIA":
return 'AUNT'
elif row.A02_DESC_TELEFONO2 == "ZIO":
return 'UNCLE'
else:
return ''
elif key == "contact.relationship.coding.display2":
if row.A02_DESC_TELEFONO2 in ("MAMMA","PAPA'","PAPA","MADRE","PADRE"):
return 'parent'
elif row.A02_DESC_TELEFONO2 == "ZIA":
return 'aunt'
elif row.A02_DESC_TELEFONO2 == "ZIO":
return 'uncle'
else:
return ''
elif key == "contact.telecom.phonevalue2":
return row.A02_NUM_TELEFONO2
def buildCond(row,key):
if key == "extension.valueDateTime":
if isinstance(row.DT_REGISTRAZIONE,str):
return row.DT_REGISTRAZIONE[:10]
else:
return row.DT_REGISTRAZIONE.strftime("%Y-%m-%d")
elif key == "bodySite.coding.code":
if row.TITOLO_LIV2 == "Sottosede":
return row.CODICE_LIV2
elif key == "bodySite.text":
if row.TITOLO_LIV2 == "Sottosede":
return row.DESC_LIV2
elif key == "stage.summary.text":
if row.TITOLO_LIV2 == "Stadio":
stadio = row.CODICE_LIV2.split()[1]
return stadio
elif key == "subject.reference":
return "Patient/"+row.ID_PAZIENTE
elif key == "recordedDate":
if isinstance(row.DT_REGISTRAZIONE,str):
return row.DT_REGISTRAZIONE[:10]
else:
return row.DT_REGISTRAZIONE.strftime("%Y-%m-%d")
elif key == "description":
return row.DESC_LIV2
| 34.893617
| 78
| 0.576524
| 383
| 3,280
| 4.785901
| 0.245431
| 0.06874
| 0.065466
| 0.061102
| 0.608838
| 0.518276
| 0.508456
| 0.47245
| 0.47245
| 0.454992
| 0
| 0.034273
| 0.297256
| 3,280
| 93
| 79
| 35.268817
| 0.760954
| 0
| 0
| 0.505495
| 0
| 0
| 0.197866
| 0.080793
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021978
| false
| 0
| 0.032967
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08b53ae263a1ae583483ba9e1d84efca2906ad4a
| 2,109
|
py
|
Python
|
sources-filter-list.py
|
kerberizer/wikimedia-scripts
|
18b78d5cc0042d5efcb355a65f4309fb4ae97eaf
|
[
"CC0-1.0"
] | null | null | null |
sources-filter-list.py
|
kerberizer/wikimedia-scripts
|
18b78d5cc0042d5efcb355a65f4309fb4ae97eaf
|
[
"CC0-1.0"
] | null | null | null |
sources-filter-list.py
|
kerberizer/wikimedia-scripts
|
18b78d5cc0042d5efcb355a65f4309fb4ae97eaf
|
[
"CC0-1.0"
] | 1
|
2016-07-31T07:26:33.000Z
|
2016-07-31T07:26:33.000Z
|
#!/usr/bin/env python3
import locale
import sys
from datetime import datetime as dt
import pywikibot as pwb
def main(argv):
dump_only = False
if len(argv) > 1:
if argv.pop() == '--dump':
dump_only = True
else:
print('Error: Unrecognized option.', file=sys.stderr)
sys.exit(1)
wik = pwb.Site(code='bg', fam='wikipedia')
params = {
'action': 'query',
'format': 'json',
'list': 'abusefilters',
'formatversion': '2',
'abfstartid': '12',
'abfendid': '12',
'abfprop': 'pattern',
}
pattern = pwb.data.api.Request(
site=wik,
parameters=params
).submit()['query']['abusefilters'][0]['pattern']
site_list = [_[5:][:-4].replace('\\.', '.') for _ in pattern.splitlines() if _[2:5] == "'\\b"]
site_list.sort()
if dump_only:
for site in site_list:
print('* {}'.format(site))
else:
list_page_name = 'Уикипедия:Патрульори/СФИН'
list_page = pwb.Page(wik, list_page_name)
lnum_page = pwb.Page(wik, list_page_name + '/N')
lupd_page = pwb.Page(wik, list_page_name + '/U')
list_page.text = '{{' + list_page_name + '/H}}\n'
site_index = ''
for site in site_list:
if site[0] != site_index:
list_page.text += '\n<h3> {} </h3>\n'.format(site[0].capitalize())
site_index = site[0]
list_page.text += '* {}\n'.format(site)
list_page.text += '\n{{' + list_page_name + '/F}}'
lnum_page.text = str(len(site_list))
locale.setlocale(locale.LC_TIME, 'bg_BG.UTF-8')
lupd_page.text = dt.now().strftime('%H:%M на %e %B %Y').lower()
locale.resetlocale(locale.LC_TIME)
list_page.save(summary='Бот: актуализация', quiet=True)
lnum_page.save(summary='Бот: актуализация', quiet=True)
lupd_page.save(summary='Бот: актуализация', quiet=True)
if __name__ == '__main__':
main(sys.argv)
# vim: set ts=4 sts=4 sw=4 tw=100 et:
| 31.477612
| 98
| 0.543385
| 266
| 2,109
| 4.12406
| 0.406015
| 0.087511
| 0.065634
| 0.038286
| 0.208751
| 0.177758
| 0.177758
| 0
| 0
| 0
| 0
| 0.016678
| 0.289237
| 2,109
| 66
| 99
| 31.954545
| 0.715143
| 0.027027
| 0
| 0.075472
| 0
| 0
| 0.157073
| 0.012195
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.075472
| 0
| 0.09434
| 0.037736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08c039eccfb3500006401f61d37873f932777364
| 1,120
|
py
|
Python
|
douyin/hot/trend.py
|
miaotiaotech/DouYin
|
e996ad99ce27e0d13f2856c497fd4b4f05f95b56
|
[
"MIT"
] | 657
|
2018-10-24T16:58:04.000Z
|
2022-03-15T03:58:04.000Z
|
douyin/hot/trend.py
|
1997lw/DouYin
|
5859f4db5258ad10926fddaa2b4074c85581d419
|
[
"MIT"
] | 15
|
2018-10-30T09:40:11.000Z
|
2020-08-09T13:58:31.000Z
|
douyin/hot/trend.py
|
1997lw/DouYin
|
5859f4db5258ad10926fddaa2b4074c85581d419
|
[
"MIT"
] | 249
|
2018-10-25T07:12:14.000Z
|
2022-02-21T07:49:58.000Z
|
from douyin.utils import fetch
from douyin.config import hot_trend_url, common_headers
from douyin.utils.tranform import data_to_music, data_to_topic
from douyin.structures.hot import HotTrend
from douyin.utils.common import parse_datetime
# define trend query params
query = {
'version_code': '2.9.1',
'count': '10',
}
def trend():
"""
get trend result
:return:
"""
offset = 0
while True:
query['cursor'] = str(offset)
result = fetch(hot_trend_url, headers=common_headers, params=query, verify=False)
category_list = result.get('category_list')
datetime = parse_datetime(result.get('extra', {}).get('now'))
final = []
for item in category_list:
# process per category
if item.get('desc') == '热门话题':
final.append(data_to_topic(item.get('challenge_info', {})))
if item.get('desc') == '热门音乐':
final.append(data_to_music(item.get('music_info', {})))
yield HotTrend(datetime=datetime, data=final, offset=offset, count=int(query.get('count')))
offset += 10
| 32
| 99
| 0.633036
| 142
| 1,120
| 4.838028
| 0.429577
| 0.07278
| 0.065502
| 0.037846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009335
| 0.234821
| 1,120
| 34
| 100
| 32.941176
| 0.792299
| 0.065179
| 0
| 0
| 0
| 0
| 0.093567
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.208333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
08c1a85992031481a6829f933c45c2206c709fa4
| 288
|
py
|
Python
|
hashing/hashing.py
|
subhamsagar524/Learn-Blockchain
|
316f30ed9d43f6ab806ca87b9b83c0237ef69828
|
[
"MIT"
] | null | null | null |
hashing/hashing.py
|
subhamsagar524/Learn-Blockchain
|
316f30ed9d43f6ab806ca87b9b83c0237ef69828
|
[
"MIT"
] | null | null | null |
hashing/hashing.py
|
subhamsagar524/Learn-Blockchain
|
316f30ed9d43f6ab806ca87b9b83c0237ef69828
|
[
"MIT"
] | 1
|
2020-03-13T06:32:46.000Z
|
2020-03-13T06:32:46.000Z
|
# Import the hashing Library
import hashlib
# Get the string as input
word = input("Enter the word for Hashing: ")
# Get the hashing
hashed_code = hashlib.sha256(word.encode())
final = hashed_code.hexdigest()
# Print the result
print("Hashed with 256 bit: ")
print(final)
| 20.571429
| 45
| 0.704861
| 41
| 288
| 4.902439
| 0.560976
| 0.099502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 0.197917
| 288
| 13
| 46
| 22.153846
| 0.844156
| 0.288194
| 0
| 0
| 0
| 0
| 0.262032
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|