hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e026df5666a9c260f8a2d313e1edc3eee3cad4f7
| 9,053
|
py
|
Python
|
code/counterfactual_generative_networks-main/imagenet/train_cgn.py
|
dummyxyz1/re_counterfactual_generative
|
4dda8e17a1123a564d60be82c17e9589155fb2e2
|
[
"MIT"
] | null | null | null |
code/counterfactual_generative_networks-main/imagenet/train_cgn.py
|
dummyxyz1/re_counterfactual_generative
|
4dda8e17a1123a564d60be82c17e9589155fb2e2
|
[
"MIT"
] | null | null | null |
code/counterfactual_generative_networks-main/imagenet/train_cgn.py
|
dummyxyz1/re_counterfactual_generative
|
4dda8e17a1123a564d60be82c17e9589155fb2e2
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from os.path import join
import pathlib
from tqdm import tqdm
import argparse
import torch
from torch import nn, optim
from torch.autograd import Variable
import torchvision
from torchvision.transforms import Pad
from torchvision.utils import make_grid
import repackage
repackage.up()
from imagenet.models import CGN
from imagenet.config import get_cfg_defaults
from shared.losses import *
from utils import Optimizers
from inception_score import *
def save_sample_sheet(cgn, u_fixed, sample_path, ep_str):
cgn.eval()
dev = u_fixed.to(cgn.get_device())
ys = [15, 251, 330, 382, 385, 483, 559, 751, 938, 947, 999]
to_save = []
with torch.no_grad():
for y in ys:
# generate
y_vec = cgn.get_class_vec(y, sz=1)
inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation)
x_gt, mask, premask, foreground, background, bg_mask = cgn(inp)
x_gen = mask * foreground + (1 - mask) * background
# build class grid
to_plot = [premask, foreground, background, x_gen, x_gt]
grid = make_grid(torch.cat(to_plot).detach().cpu(),
nrow=len(to_plot), padding=2, normalize=True)
# add unnormalized mask
mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu()
grid = torch.cat([mask, grid], 2)
# save to disk
to_save.append(grid)
del to_plot, mask, premask, foreground, background, x_gen, x_gt
# save the image
path = join(sample_path, f'cls_sheet_' + ep_str + '.png')
torchvision.utils.save_image(torch.cat(to_save, 1), path)
cgn.train()
def save_sample_single(cgn, u_fixed, sample_path, ep_str):
cgn.eval()
dev = u_fixed.to(cgn.get_device())
ys = [15, 251, 330, 382, 385, 483, 559, 751, 938, 947, 999]
with torch.no_grad():
for y in ys:
# generate
y_vec = cgn.get_class_vec(y, sz=1)
inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation)
_, mask, premask, foreground, background, _ = cgn(inp)
x_gen = mask * foreground + (1 - mask) * background
# save_images
path = join(sample_path, f'{y}_1_premask_' + ep_str + '.png')
torchvision.utils.save_image(premask, path, normalize=True)
path = join(sample_path, f'{y}_2_mask_' + ep_str + '.png')
torchvision.utils.save_image(mask, path, normalize=True)
path = join(sample_path, f'{y}_3_texture_' + ep_str + '.png')
torchvision.utils.save_image(foreground, path, normalize=True)
path = join(sample_path, f'{y}_4_bgs_' + ep_str + '.png')
torchvision.utils.save_image(background, path, normalize=True)
path = join(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png')
torchvision.utils.save_image(x_gen, path, normalize=True)
cgn.train()
def fit(cfg, cgn, opts, losses):
inception_score_val = list()
# total number of episodes, accounted for batch accumulation
episodes = cfg.TRAIN.EPISODES
episodes *= cfg.TRAIN.BATCH_ACC
# directories for experiments
time_str = datetime.now().strftime("%Y_%m_%d_%H_%M")
if cfg.WEIGHTS_PATH:
weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent)
start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:])
sample_path = weights_path.replace('weights', 'samples')
ep_range = (start_ep, start_ep + episodes)
else:
model_path = join('imagenet', 'experiments',
f'cgn_{time_str}_{cfg.MODEL_NAME}')
weights_path = join(model_path, 'weights')
sample_path = join(model_path, 'samples')
pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True)
ep_range = (0, episodes)
# fixed noise sample
u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt')
if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE:
u_fixed = cgn.get_noise_vec()
torch.save(u_fixed, u_fixed_path)
else:
u_fixed = torch.load(u_fixed_path)
# Training Loop
cgn.train()
L_l1, L_perc, L_binary, L_mask, L_text, L_bg = losses
save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet
pbar = tqdm(range(*ep_range))
for i, ep in enumerate(pbar):
x_gt, mask, premask, foreground, background, background_mask = cgn()
x_gen = mask * foreground + (1 - mask) * background
# Losses
losses_g = {}
losses_g['l1'] = L_l1(x_gen, x_gt)
losses_g['perc'] = L_perc(x_gen, x_gt)
losses_g['binary'] = L_binary(mask)
losses_g['mask'] = L_mask(mask)
losses_g['perc_text'] = L_text(x_gt, mask, foreground)
losses_g['bg'] = L_bg(background_mask)
# backprop
losses_g = {k: v.mean() for k, v in losses_g.items()}
g_loss = sum(losses_g.values())
g_loss.backward()
if (i+1) % cfg.TRAIN.BATCH_ACC == 0:
opts.step(['shape', 'bg', 'texture'])
# Saving
if not i % cfg.LOG.SAVE_ITER:
ep_str = f'ep_{ep:07}'
save_samples(cgn, u_fixed, sample_path, ep_str)
torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth'))
# Logging
if cfg.LOG.LOSSES:
msg = ''.join([f"[{k}: {v:.3f}]" for k, v in losses_g.items()])
pbar.set_description(msg)
# Calculate Inception SCore
if cfg.LOG.INCEPTION_SCORE:
score, score_std = inception_score(x_gen)
inception_score_val.append(score)
def main(cfg):
# model init
cgn = CGN(
batch_sz=cfg.TRAIN.BATCH_SZ,
truncation=cfg.MODEL.TRUNCATION,
pretrained=True,
)
print("------CGN-------")
print(cgn)
if cfg.WEIGHTS_PATH:
weights = torch.load(cfg.WEIGHTS_PATH)
weights = {k.replace('module.', ''): v for k, v in weights.items()}
cgn.load_state_dict(weights)
# optimizers
opts = Optimizers()
opts.set('shape', cgn.f_shape, cfg.LR.SHAPE)
opts.set('texture', cgn.f_text, cfg.LR.TEXTURE)
opts.set('bg', cgn.f_bg, cfg.LR.BG)
# losses
L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1)
L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC)
L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY)
L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK)
L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT)
L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG)
losses = (L_l1, L_perc, L_binary, L_mask, L_text, L_bg)
# push to device and train
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cgn = cgn.to(device)
losses = (l.to(device) for l in losses)
fit(cfg, cgn, opts, losses)
def merge_args_and_cfg(args, cfg):
cfg.MODEL_NAME = args.model_name
cfg.WEIGHTS_PATH = args.weights_path
cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise
cfg.LOG.SAVE_SINGLES = args.save_singles
cfg.LOG.SAVE_ITER = args.save_iter
cfg.LOG.LOSSES = args.log_losses
cfg.LOG.INCEPTION_SCORE = True
cfg.TRAIN.EPISODES = args.episodes
cfg.TRAIN.BATCH_SZ = args.batch_sz
cfg.TRAIN.BATCH_ACC = args.batch_acc
cfg.MODEL.TRUNCATION = args.truncation
return cfg
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='tmp',
help='Weights and samples will be saved under experiments/model_name')
parser.add_argument('--weights_path', default='',
help='provide path to continue training')
parser.add_argument('--sampled_fixed_noise', default=False, action='store_true',
help='If you want a different noise vector than provided in the repo')
parser.add_argument('--save_singles', default=False, action='store_true',
help='Save single images instead of sheets')
parser.add_argument('--truncation', type=float, default=1.0,
help='Truncation value for noise sampling')
parser.add_argument('--episodes', type=int, default=300,
help="We don't do dataloading, hence, one episode = one gradient update.")
parser.add_argument('--batch_sz', type=int, default=1,
help='Batch size, use in conjunciton with batch_acc')
parser.add_argument('--batch_acc', type=int, default=4000,
help='pseudo_batch_size = batch_acc*batch size')
parser.add_argument('--save_iter', type=int, default=4000,
help='Save samples/weights every n iter')
parser.add_argument('--log_losses', default=False, action='store_true',
help='Print out losses')
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg = merge_args_and_cfg(args, cfg)
print(cfg)
main(cfg)
| 37.720833
| 98
| 0.629515
| 1,262
| 9,053
| 4.290808
| 0.209984
| 0.016621
| 0.031394
| 0.019945
| 0.294552
| 0.24229
| 0.195568
| 0.117452
| 0.117452
| 0.075716
| 0
| 0.016157
| 0.247984
| 9,053
| 239
| 99
| 37.878661
| 0.77923
| 0.036121
| 0
| 0.132597
| 0
| 0
| 0.10672
| 0.008501
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027624
| false
| 0
| 0.099448
| 0
| 0.132597
| 0.016575
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e02908fac191deeaa9eb04515ee51d7d466320c5
| 1,695
|
py
|
Python
|
url.py
|
matthieucan/shorturl
|
a7f7fab61e8b23b352590797ca4959ed166c865e
|
[
"WTFPL"
] | 1
|
2018-10-19T01:57:29.000Z
|
2018-10-19T01:57:29.000Z
|
url.py
|
matthieucan/shorturl
|
a7f7fab61e8b23b352590797ca4959ed166c865e
|
[
"WTFPL"
] | null | null | null |
url.py
|
matthieucan/shorturl
|
a7f7fab61e8b23b352590797ca4959ed166c865e
|
[
"WTFPL"
] | null | null | null |
def base_conv(n, input_base=10, output_base=10):
"""
Converts a number n from base input_base to base output_base.
The following symbols are used to represent numbers:
0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
n can be an int if input_base <= 10, and a string otherwise.
The result will be a string.
"""
numbers = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
## base 10 conversion
n = str(n)
size = len(n)
baseten = 0
for i in range(size):
baseten += numbers.index(n[i]) * input_base ** (size - 1 - i)
## base output_base conversion
# we search the biggest number m such that n^m < x
max_power = 0
while output_base ** (max_power + 1) <= baseten:
max_power += 1
result = ""
for i in range(max_power + 1):
coeff = baseten / (output_base ** (max_power - i))
baseten -= coeff * (output_base ** (max_power - i))
result += numbers[coeff]
return result
if __name__ == "__main__":
assert(base_conv(10) == "10")
assert(base_conv(42) == "42")
assert(base_conv(5673576) == "5673576")
assert(base_conv(10, input_base=2) == "2")
assert(base_conv(101010, input_base=2) == "42")
assert(base_conv(43, input_base=10, output_base=2) == "101011")
assert(base_conv(256**3 - 1, input_base=10, output_base=16) == "ffffff")
assert(base_conv("d9bbb9d0ceabf", input_base=16, output_base=8) ==
"154673563503165277")
assert(base_conv("154673563503165277", input_base=8, output_base=10) ==
"3830404793297599")
assert(base_conv(0, input_base=3, output_base=50) == "0")
| 36.06383
| 78
| 0.640708
| 224
| 1,695
| 4.638393
| 0.321429
| 0.084697
| 0.134745
| 0.049086
| 0.097209
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119507
| 0.234808
| 1,695
| 46
| 79
| 36.847826
| 0.681573
| 0.215339
| 0
| 0
| 0
| 0
| 0.125581
| 0.048062
| 0
| 0
| 0
| 0
| 0.344828
| 1
| 0.034483
| false
| 0
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e02960393a5a94bda69769c1a73c609b148e700d
| 13,612
|
py
|
Python
|
src/qtt/qiskit/passes.py
|
codecrap/qtt
|
39a8bf21f7bcab94940a66f4d553a14bf34f82b0
|
[
"MIT"
] | null | null | null |
src/qtt/qiskit/passes.py
|
codecrap/qtt
|
39a8bf21f7bcab94940a66f4d553a14bf34f82b0
|
[
"MIT"
] | null | null | null |
src/qtt/qiskit/passes.py
|
codecrap/qtt
|
39a8bf21f7bcab94940a66f4d553a14bf34f82b0
|
[
"MIT"
] | null | null | null |
import logging
from typing import Dict, List, Optional
import numpy as np
import qiskit
from qiskit.circuit import Barrier, Delay, Reset
from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate,
PhaseGate, RXGate, RYGate, RZGate, U1Gate,
U2Gate, U3Gate, UGate)
from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate,
SGate, TdgGate, TGate,
ZGate)
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler.basepasses import TransformationPass
logger = logging.getLogger(__name__)
class RemoveSmallRotations(TransformationPass):
"""Return a circuit with small rotation gates removed."""
def __init__(self, epsilon: float = 0, modulo2pi=False):
"""Remove all small rotations from a circuit
Args:
epsilon: Threshold for rotation angle to be removed
modulo2pi: If True, then rotations multiples of 2pi are removed as well
"""
super().__init__()
self.epsilon = epsilon
self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1))
self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2))
self.mod2pi = modulo2pi
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the pass on `dag`.
Args:
dag: input dag.
Returns:
Output dag with small rotations removed
"""
def modulo_2pi(x):
x = float(x)
return np.mod(x + np.pi, 2 * np.pi) - np.pi
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag1)
elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag2)
return dag
class RemoveDiagonalGatesAfterInput(TransformationPass):
"""Remove diagonal gates (including diagonal 2Q gates) at the start of a circuit.
Transpiler pass to remove diagonal gates (like RZ, T, Z, etc) at the start of a circuit.
Including diagonal 2Q gates. Nodes after a reset are also included.
"""
def run(self, dag):
"""Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`.
Args:
dag (DAGCircuit): the DAG to be optimized.
Returns:
DAGCircuit: the optimized DAG.
"""
diagonal_1q_gates = (RZGate, ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate)
diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate)
nodes_to_remove = set()
for input_node in (dag.input_map.values()):
try:
successor = next(dag.quantum_successors(input_node))
except StopIteration:
continue
if successor.type == "op" and isinstance(successor.op, diagonal_1q_gates):
nodes_to_remove.add(successor)
def valid_predecessor(s):
""" Return True of node is valid predecessor for removal """
if s.type == 'in':
return True
if s.type == "op" and isinstance(s.op, Reset):
return True
return False
if successor.type == "op" and isinstance(successor.op, diagonal_2q_gates):
predecessors = dag.quantum_predecessors(successor)
if all(valid_predecessor(s) for s in predecessors):
nodes_to_remove.add(successor)
for node_to_remove in nodes_to_remove:
dag.remove_op_node(node_to_remove)
return dag
class DecomposeU(TransformationPass):
""" Decompose U gates into elementary rotations Rx, Ry, Rz
The U gates are decomposed using McKay decomposition.
"""
def __init__(self, verbose=0):
"""
Args:
"""
super().__init__()
self._subdags = []
self.verbose = verbose
self.initial_layout = None
def ugate_replacement_circuit(self, ugate):
qc = QuantumCircuit(1)
if isinstance(ugate, (U3Gate, UGate)):
theta, phi, lam = ugate.params
if theta == np.pi/2:
# a u2 gate
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
else:
# from https://arxiv.org/pdf/1707.03429.pdf
qc.rz(lam, 0)
qc.rx(np.pi / 2, 0)
qc.rz(theta + np.pi, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi, 0)
elif isinstance(ugate, U2Gate):
phi, lam = ugate.params
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
elif isinstance(ugate, (U1Gate, PhaseGate)):
lam, = ugate.params
qc.rz(lam, 0)
else:
raise Exception(f'unknown gate type {ugate}')
return qc
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input DAG.
Returns:
Output DAG where ``U`` gates have been decomposed.
"""
# Walk through the DAG and expand each node if required
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)):
subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op))
dag.substitute_node_with_dag(node, subdag)
return dag
class DecomposeCX(TransformationPass):
""" Decompose CX into CZ and single qubit rotations
"""
def __init__(self, mode: str = 'ry'):
"""
Args:
"""
super().__init__()
self._subdags: List = []
self.initial_layout = None
self.gate = qiskit.circuit.library.CXGate
self.decomposition = QuantumCircuit(2)
if mode == 'ry':
self.decomposition.ry(-np.pi / 2, 1)
self.decomposition.cz(0, 1)
self.decomposition.ry(np.pi / 2, 1)
else:
self.decomposition.h(1)
self.decomposition.cz(0, 1)
self.decomposition.h(1)
self._dag = circuit_to_dag(self.decomposition)
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input dag.
Returns:
output dag where ``CX`` was expanded.
"""
# Walk through the DAG and expand each non-basis node
for node in dag.op_nodes(self.gate):
dag.substitute_node_with_dag(node, self._dag)
return dag
class SequentialPass(TransformationPass):
"""Adds barriers between gates to make the circuit sequential."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for node in dag.op_nodes():
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
logger.info('SequentialPass: adding node {node.name}')
if node.name in ['barrier', 'measure']:
continue
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class LinearTopologyParallelPass(TransformationPass):
"""Adds barriers to enforce a linear topology
The barrier are placed between gates such that no two qubit gates are executed
at the same time and only single qubit gates on non-neighboring qubits can
be executed in parallel. It assumes a linear topology."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for ii, layer in enumerate(dag.layers()):
gates_1q = []
gates_2q = []
other_gates = []
for node in layer['graph'].op_nodes():
if len(node.qargs) == 2:
gates_2q.append(node)
elif len(node.qargs) == 1:
gates_1q.append(node)
else:
logging.info(f'layer {ii}: other type of node {node}')
other_gates.append(node)
even = []
odd = []
for node in gates_1q:
if node.qargs[0].index % 2 == 0:
even.append(node)
else:
odd.append(node)
logging.info(
f'layer {ii}: 2q gates {len(gates_2q)}, even {len(even)} odd {len(odd)}, other {len(other_gates)}')
if len(even) > 0:
for node in even:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
if len(odd) > 0:
for node in odd:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in gates_2q:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in other_gates:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class DelayPass(TransformationPass):
"""Adds delay gates when the qubits are idle.
For every layer of the circuit it finds the gate that
lasts the longest and applies appropriate delays on the
other qubits.
"""
def __init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float] = None):
"""
Args:
gate_durations: Gate durations in the units of dt
"""
super().__init__()
self.gate_durations = gate_durations
self.delay_quantum = delay_quantum
def add_delay_to_dag(self, duration, dag, qargs, cargs):
if self.delay_quantum:
number_of_delays = int(duration/self.delay_quantum)
for ii in range(number_of_delays):
dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs)
else:
dag.apply_operation_back(Delay(duration), qargs, cargs)
@staticmethod
def _determine_delay_target_qubits(dag, layer):
""" Determine qubits in specified layer which require a delay gate """
partition = layer['partition']
lst = list(dag.qubits)
for el in partition:
for q in el:
if q in lst:
lst.remove(q)
return lst
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for layer_idx, layer in enumerate(dag.layers()):
max_duration = 0
durations = {}
for node in layer['graph'].op_nodes():
if node.name in self.gate_durations:
max_duration = max(max_duration, self.gate_durations[node.name])
for q in node.qargs:
durations[q] = self.gate_durations[node.name]
else:
logger.info('layer {layer_idx}, could not find duration for node {node.name}')
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
partition = layer['partition']
if len(partition) == 0:
continue
lst = DelayPass._determine_delay_target_qubits(dag, layer)
logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}')
for el in lst:
logger.info(f'apply_operation_back: {[el]}')
self.add_delay_to_dag(max_duration, new_dag, [el], [])
for q in durations:
if max_duration - durations[q] > 0:
self.add_delay_to_dag(max_duration - durations[q], new_dag, [q], [])
return new_dag
| 36.591398
| 115
| 0.563914
| 1,622
| 13,612
| 4.580148
| 0.176326
| 0.028268
| 0.033921
| 0.036748
| 0.399515
| 0.351595
| 0.312559
| 0.28833
| 0.265042
| 0.249832
| 0
| 0.011152
| 0.341243
| 13,612
| 371
| 116
| 36.690027
| 0.81733
| 0.149868
| 0
| 0.4125
| 0
| 0.004167
| 0.035193
| 0.001881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0.05
| 0.045833
| 0
| 0.195833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e029ad3e92c68df36a0c0c69723e696b156c5364
| 5,616
|
py
|
Python
|
IAFNNESTA.py
|
JonathanAlis/IAFNNESTA
|
6845bed7e41a162a60e65d709f37cf975c8c8a4e
|
[
"MIT"
] | 3
|
2021-05-13T05:51:42.000Z
|
2022-02-06T13:36:52.000Z
|
IAFNNESTA.py
|
JonathanAlis/IAFNNESTA
|
6845bed7e41a162a60e65d709f37cf975c8c8a4e
|
[
"MIT"
] | null | null | null |
IAFNNESTA.py
|
JonathanAlis/IAFNNESTA
|
6845bed7e41a162a60e65d709f37cf975c8c8a4e
|
[
"MIT"
] | 1
|
2022-02-06T13:36:39.000Z
|
2022-02-06T13:36:39.000Z
|
def help():
return '''
Isotropic-Anisotropic Filtering Norm Nesterov Algorithm
Solves the filtering norm minimization + quadratic term problem
Nesterov algorithm, with continuation:
argmin_x || iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2 < delta
If no filter is provided, solves the L1.
Continuation is performed by sequentially applying Nesterov's algorithm
with a decreasing sequence of values of mu0 >= mu >= muf
The observation matrix A must be a projector (non projector not implemented yet)
Inputs:
IAFNNESTA(b, #Observed data, a m x 1 array
A=identity,At=identity, # measurement matrix and adjoint (either a matrix, function handles)
muf=0.0001, #final mu value, smaller leads to higher accuracy
delta, #l2 error bound. This enforces how close the variable
#must fit the observations b, i.e. || y - Ax ||_2 <= delta
#If delta = 0, enforces y = Ax
#delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise).
L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic) norms
verbose=0, #whether to print internal steps
maxit=1000, #maximum iterations at the inner loop
x0=[], #initial solution, if not provided, will be At(b)
U=identity,Ut=identity, #Analysis/Synthesis operators
stopTest=1, #stopTest == 1 : stop when the relative change in the objective
function is less than TolVar
stopTest == 2 : stop with the l_infinity norm of difference in
the xk variable is less than TolVar
TolVar = 1e-5, #tolerance for the stopping criteria
AAtinv=[], #not implemented
normU=1, #if U is provided, this should be norm(U)
H=[],Ht=[]): #filter operations in sparse matrix form
#also accepts the string 'tv' as input,
#in that case, calculates the tv norm
Outputs:
return xk, #estimated x reconstructed signal
niter, #number of iterations
residuals #first column is the residual at every step,
#second column is the value of f_mu at every step
'''
import IAFNNesterov
import numpy as np
from scipy import sparse
import fil2mat
def identity(x):
return x
def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]):
if delta<0:
raise Exception('Delta must not be negative')
if not callable(A): #If not function
A=lambda x:np.matmul(A,x)
At=lambda x:np.matmul(np.transpose(A),x)
b=b.reshape((-1,1))
Atb=At(b)
if sig_size==0:
sig_size=Atb.shape
if callable(AAtinv):
AtAAtb = At( AAtinv(b) )
else:
if len(AAtinv)>0:
AAtinv=lambda x: np.matmul(AAtinv,x)
AtAAtb = At( AAtinv(b) )
else: #default
AtAAtb = Atb
AAtinv=identity
if len(x0)==0:
x0 = AtAAtb
if len(H)==0:
Hf=identity
Hft=identity
else:
if not sparse.issparse(H):
if isinstance(H, str):
if H=='tv':
hs=[]
hs.append(np.array([[1,-1]]))
hs.append(np.array([[1],[-1]]))
H,_,_,_=fil2mat.fil2mat(hs,sig_size)
else:
print('H not recognized. Must be a sparse matrix, a list of filters or the string tv')
else:
#list of filters:
H,_,_,_=fil2mat.fil2mat(H,sig_size)
#print(H.shape)
#print(H)
#print(type(H))
Ht=H.transpose()
Hf=lambda x: H@x
Hft=lambda x: Ht@x
HU=lambda x: Hf(U(x))
UtHt=lambda x: Ut(Hft(x))
typemin=''
if L1w>0:
typemin+="iso"
if L2w>0:
typemin+="aniso"
typemin+='tropic '
if callable(H):
typemin+='filtering norm '
mu0=0
if L1w>0:
mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1))
if L2w>0:
mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2))
niter = 0
Gamma = np.power(muf/mu0,1/MaxIntIter)
mu = mu0
Gammat= np.power(TolVar/0.1,1/MaxIntIter)
TolVar = 0.1
for i in range(MaxIntIter):
mu = mu*Gamma
TolVar=TolVar*Gammat;
if verbose>0:
#if k%verbose==0:
print("\tBeginning %s Minimization; mu = %g\n" %(typemin,mu))
xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft)
xplug = xk
niter = niter_int + niter
if i==0:
residuals=res
else:
residuals = np.vstack((residuals, res))
return xk.reshape(sig_size)
if __name__ == "__main__":
print(help())
| 35.770701
| 215
| 0.51834
| 710
| 5,616
| 4.060563
| 0.322535
| 0.016996
| 0.009365
| 0.015609
| 0.04093
| 0.027749
| 0.015956
| 0.015956
| 0.015956
| 0
| 0
| 0.03298
| 0.379095
| 5,616
| 156
| 216
| 36
| 0.793806
| 0.016026
| 0
| 0.10084
| 0
| 0.033613
| 0.518029
| 0.01214
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02521
| false
| 0
| 0.033613
| 0.016807
| 0.092437
| 0.033613
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e029f3704209eae0d9983e10eec83eadf0c6a288
| 6,952
|
py
|
Python
|
hypatia/util/__init__.py
|
pfw/hypatia
|
407cd62e4817c85188aa6abdf204c5aaff5ab570
|
[
"ZPL-2.1"
] | null | null | null |
hypatia/util/__init__.py
|
pfw/hypatia
|
407cd62e4817c85188aa6abdf204c5aaff5ab570
|
[
"ZPL-2.1"
] | null | null | null |
hypatia/util/__init__.py
|
pfw/hypatia
|
407cd62e4817c85188aa6abdf204c5aaff5ab570
|
[
"ZPL-2.1"
] | null | null | null |
import itertools
import BTrees
from persistent import Persistent
from ZODB.broken import Broken
from zope.interface import implementer
_marker = object()
from .. import exc
from ..interfaces import (
IResultSet,
STABLE,
)
@implementer(IResultSet)
class ResultSet(object):
"""Implements :class:`hypatia.interfaces.IResultSet`"""
family = BTrees.family64
def __init__(self, ids, numids, resolver, sort_type=None):
self.ids = ids # only guaranteed to be iterable, not sliceable
self.numids = numids
self.resolver = resolver
self.sort_type = sort_type
def __len__(self):
return self.numids
def sort(
self, index, reverse=False, limit=None, sort_type=None, raise_unsortable=True
):
if sort_type is None:
sort_type = self.sort_type
ids = self.ids
if not hasattr(ids, "__len__"):
# indexes have no obligation to be able to sort generators
ids = list(ids)
self.ids = ids
ids = index.sort(
self.ids,
reverse=reverse,
limit=limit,
sort_type=sort_type,
raise_unsortable=raise_unsortable,
)
numids = self.numids
if limit:
numids = min(numids, limit)
return self.__class__(ids, numids, self.resolver, sort_type=STABLE)
def first(self, resolve=True):
# return the first object or None
resolver = self.resolver
if resolver is None or not resolve:
for id_ in self.ids:
# if self.ids is not a list or a tuple, allow this result set
# to be iterated after first() is called and allow first() to
# be idempotent
if not hasattr(self.ids, "__len__"):
self.ids = itertools.chain([id_], self.ids)
return id_
else:
for id_ in self.ids:
# if self.ids is not a list or a tuple, allow this result set
# to be iterated after first() is called and allow first() to
# be idempotent
if not hasattr(self.ids, "__len__"):
self.ids = itertools.chain([id_], self.ids)
return resolver(id_)
def one(self, resolve=True):
if self.numids == 1:
return self.first(resolve=resolve)
if self.numids > 1:
raise exc.MultipleResults(self)
else:
raise exc.NoResults(self)
def _resolve_all(self, resolver):
for id_ in self.ids:
yield resolver(id_)
def all(self, resolve=True):
resolver = self.resolver
if resolver is None or not resolve:
return self.ids
else:
return self._resolve_all(resolver)
def __iter__(self):
return iter(self.all())
def intersect(self, docids):
"""Intersect this resultset with a sequence of docids or
another resultset. Returns a new ResultSet."""
# NB: we can't use an intersection function here because
# self.ids may be a generator
if isinstance(docids, ResultSet):
docids = docids.ids
filtered_ids = [x for x in self.ids if x in docids]
return self.__class__(filtered_ids, len(filtered_ids), self.resolver)
class BaseIndexMixin(object):
"""Mixin class for indexes that implements common behavior"""
family = BTrees.family64
def discriminate(self, obj, default):
"""See interface IIndexInjection"""
if callable(self.discriminator):
value = self.discriminator(obj, _marker)
else:
value = getattr(obj, self.discriminator, _marker)
if value is _marker:
return default
if isinstance(value, Persistent):
raise ValueError("Catalog cannot index persistent object %s" % value)
if isinstance(value, Broken):
raise ValueError("Catalog cannot index broken object %s" % value)
return value
def reindex_doc(self, docid, obj):
"""See interface IIndexInjection"""
self.unindex_doc(docid)
self.index_doc(docid, obj)
def indexed_count(self):
"""See IIndexedDocuments"""
return len(self.indexed())
def not_indexed_count(self):
"""See IIndexedDocuments"""
return len(self.not_indexed())
def docids(self):
"""See IIndexedDocuments"""
not_indexed = self.not_indexed()
indexed = self.indexed()
if len(not_indexed) == 0:
return self.family.IF.Set(indexed)
elif len(indexed) == 0:
return not_indexed
indexed = self.family.IF.Set(indexed)
return self.family.IF.union(not_indexed, indexed)
def docids_count(self):
"""See IIndexedDocuments"""
return len(self.docids())
def apply_intersect(self, query, docids):
"""Default apply_intersect implementation"""
result = self.apply(query)
if docids is None:
return result
return self.family.IF.weightedIntersection(result, docids)[1]
def _negate(self, apply_func, *args, **kw):
positive = apply_func(*args, **kw)
all = self.docids()
if len(positive) == 0:
return all
return self.family.IF.difference(all, positive)
def qname(self):
# used in query representations; __name__ should be set by
# catalog __setitem__ but if it's not, we fall back to a generic
# representation
return getattr(
self,
"__name__",
str(self),
)
def resultset_from_query(self, query, names=None, resolver=None):
# default resultset factory; meant to be overridden by systems that
# have a default resolver. NB: although the default implementation
# below does not access "self", so it would appear that this could be
# turned into a classmeth or staticmethod, subclasses that override may
# expect self, so this is a plain method.
docids = query._apply(names)
numdocs = len(docids)
return ResultSet(docids, numdocs, resolver)
def flush(self, *arg, **kw):
"""Hookable by upstream systems"""
pass
class RichComparisonMixin(object):
# Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison
def __eq__(self, other):
raise NotImplementedError("Equality not implemented")
def __lt__(self, other):
raise NotImplementedError("Less than not implemented")
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
| 31.174888
| 85
| 0.60889
| 829
| 6,952
| 4.926417
| 0.254524
| 0.032566
| 0.014691
| 0.01763
| 0.199804
| 0.158668
| 0.158668
| 0.148384
| 0.108717
| 0.108717
| 0
| 0.002061
| 0.302215
| 6,952
| 222
| 86
| 31.315315
| 0.839827
| 0.205121
| 0
| 0.131944
| 0
| 0
| 0.028666
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.180556
| false
| 0.006944
| 0.048611
| 0.048611
| 0.451389
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e02ae313e5c6ccbda99f1c423609cc20c6a48485
| 483
|
py
|
Python
|
examples/example_without_CommandSet/my_listeners.py
|
LeConstellationniste/DiscordFramework
|
24d4b9b7cb0a21d3cec9d5362ab0828c5e15a3af
|
[
"CC0-1.0"
] | 1
|
2021-01-27T14:55:03.000Z
|
2021-01-27T14:55:03.000Z
|
examples/example_without_CommandSet/my_listeners.py
|
LeConstellationniste/DiscordFramework
|
24d4b9b7cb0a21d3cec9d5362ab0828c5e15a3af
|
[
"CC0-1.0"
] | null | null | null |
examples/example_without_CommandSet/my_listeners.py
|
LeConstellationniste/DiscordFramework
|
24d4b9b7cb0a21d3cec9d5362ab0828c5e15a3af
|
[
"CC0-1.0"
] | null | null | null |
import asyncio
import discord
# Just with a function to add to the bot.
async def on_message(message):
if not message.author.bot:
await message.channel.send(f"{message.author.mention} a envoyé un message!")
# A Listener already created with the function
from discordEasy.objects import Listener
async def on_message(message):
if not message.author.bot:
await message.channel.send(f"{message.author.mention} a envoyé un message!")
listener_on_message = Listener(on_message)
| 28.411765
| 78
| 0.784679
| 75
| 483
| 4.986667
| 0.413333
| 0.096257
| 0.053476
| 0.090909
| 0.561497
| 0.561497
| 0.561497
| 0.561497
| 0.561497
| 0.561497
| 0
| 0
| 0.130435
| 483
| 17
| 79
| 28.411765
| 0.890476
| 0.173913
| 0
| 0.6
| 0
| 0
| 0.2267
| 0.120907
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e02beca3eabc9ebe9a2e1d16196b54fbf1a8bc1b
| 4,024
|
py
|
Python
|
pianonet/serving/app.py
|
robgon-art/pianonet
|
8d8a827bc8d310b8ce3f66259bbdf72648e9ca32
|
[
"MIT"
] | 14
|
2020-09-01T11:16:28.000Z
|
2021-05-02T18:04:21.000Z
|
pianonet/serving/app.py
|
robgon-art/pianonet
|
8d8a827bc8d310b8ce3f66259bbdf72648e9ca32
|
[
"MIT"
] | 5
|
2020-11-13T18:46:05.000Z
|
2022-02-10T01:16:13.000Z
|
pianonet/serving/app.py
|
robgon-art/pianonet
|
8d8a827bc8d310b8ce3f66259bbdf72648e9ca32
|
[
"MIT"
] | 3
|
2020-09-02T15:05:00.000Z
|
2021-05-02T18:04:24.000Z
|
import os
import random
from flask import Flask, request, send_from_directory
from werkzeug.utils import secure_filename
from pianonet.core.pianoroll import Pianoroll
from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll
app = Flask(__name__)
base_path = "/app/"
# base_path = "/Users/angsten/PycharmProjects/pianonet"
performances_path = os.path.join(base_path, 'data', 'performances')
def get_random_midi_file_name():
"""
Get a random midi file name that will not ever collide.
"""
return str(random.randint(0, 10000000000000000000)) + ".midi"
def get_performance_path(midi_file_name):
"""
Returns full path to performaqnce midi file given a file name.
"""
return os.path.join(performances_path, midi_file_name)
@app.route('/')
def alive():
return 'OK'
@app.route('/performances/', methods=['GET'])
def get_performance():
"""
Returns the requested performance as midi file.
Expected query string is 'midi_file_name', such as 1234.midi
"""
performance_midi_file_name = request.args.get('midi_file_name')
performance_midi_file_name = secure_filename(performance_midi_file_name)
print(performance_midi_file_name)
if performance_midi_file_name == None:
return {"http_code": 400, "code": "BadRequest", "message": "midi_file_name not found in request."}
midi_file_path = get_performance_path(performance_midi_file_name)
if not os.path.exists(midi_file_path):
return {
"http_code": 404,
"code": "Not Found",
"message": "midi_file " + performance_midi_file_name + " not found."
}
with open(midi_file_path, 'rb') as midi_file:
return send_from_directory(performances_path, performance_midi_file_name)
@app.route('/create-performance', methods=['POST'])
def performance():
"""
Expects post form data as follows:
seed_midi_file_data: Midi file that forms the seed for a performance as string encoding like "8,2,3,4,5..."
seconds_to_generate: Number of seconds of new notes to generate
model_complexity: Quality of model to use, one of ['low', 'medium', 'high', 'highest']
"""
seed_midi_file_data = request.form.get('seed_midi_file_data')
if seed_midi_file_data == None:
return {"http_code": 400, "code": "BadRequest", "message": "seed_midi_file_data not found in request."}
else:
seed_midi_file_int_array = [int(x) for x in seed_midi_file_data.split(',')]
frame = bytearray()
for i in seed_midi_file_int_array:
frame.append(i)
saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name())
with open(saved_seed_midi_file_path, 'wb') as midi_file:
midi_file.write(frame)
seconds_to_generate = request.form.get('seconds_to_generate')
if seconds_to_generate == None:
return {"http_code": 400, "code": "BadRequest", "message": "seconds_to_generate not found in request."}
else:
seconds_to_generate = float(seconds_to_generate)
model_complexity = request.form.get('model_complexity', 'low')
if model_complexity == 'low':
model_name = "micro_1"
else:
model_name = "r9p0_3500kparams_approx_9_blocks_model"
model_path = os.path.join(base_path, 'models', model_name)
input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True)
input_pianoroll.trim_silence_off_ends()
final_pianoroll = get_performance_from_pianoroll(
pianoroll_seed=input_pianoroll,
num_time_steps=int(48 * seconds_to_generate),
model_path=model_path,
)
midi_file_name = get_random_midi_file_name()
midi_file_path = get_performance_path(midi_file_name)
final_pianoroll.save_to_midi_file(midi_file_path)
return {"http_code": 200, "code": "Success", "message": "", "midi_file_name": midi_file_name}
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 31.685039
| 115
| 0.706511
| 551
| 4,024
| 4.794918
| 0.266788
| 0.13626
| 0.095382
| 0.069644
| 0.273656
| 0.133989
| 0.067373
| 0.047691
| 0
| 0
| 0
| 0.018037
| 0.187127
| 4,024
| 126
| 116
| 31.936508
| 0.789667
| 0.146123
| 0
| 0.043478
| 0
| 0
| 0.155403
| 0.011313
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072464
| false
| 0
| 0.086957
| 0.014493
| 0.289855
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e02cef0666c1161f8f7f1e91555b80b350dae71e
| 4,965
|
py
|
Python
|
app.py
|
rafalbigaj/epidemic-model-visualization
|
35829180b5a53697b336e8615d854a21b3395f59
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
rafalbigaj/epidemic-model-visualization
|
35829180b5a53697b336e8615d854a21b3395f59
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
rafalbigaj/epidemic-model-visualization
|
35829180b5a53697b336e8615d854a21b3395f59
|
[
"Apache-2.0"
] | null | null | null |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import logging
import json
import os
import pandas as pd
from datetime import datetime
from datetime import timedelta
from urllib import parse
import requests
logger = logging.getLogger(__name__)
external_stylesheets = [dbc.themes.DARKLY]
is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') != ''
port = int(os.environ.get('PORT', 8050))
host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1')
wml_api_key = os.environ['WML_API_KEY']
wml_scoring_url = os.environ['WML_SCORING_URL']
url = parse.urlparse(wml_scoring_url)
wml_base_url = url._replace(path='').geturl()
wml_instance_id = url.path.split('/')[3]
logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG)
logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance else 'local', host, port)
logger.info('WML URL: %s', wml_base_url)
logger.info('WML instance ID: %s', wml_instance_id)
wml_credentials = {
"apikey": wml_api_key,
"instance_id": wml_instance_id,
"url": wml_base_url,
}
iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token'
def _get_token():
data = {
'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
'apikey': wml_credentials['apikey']
}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(iam_token_endpoint, data=data, headers=headers)
return response.json()['access_token']
def score(token, algorithm, start_date, country, predict_range, s, i, r):
headers = {'Authorization': 'Bearer ' + token}
payload = {
"fields": ["algorithm", "start_date", "country", "predict_range", "S0", "I0", "R0"],
"values": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]]
}
logger.info('Scoring with payload: %s', json.dumps(payload))
response = requests.post(wml_scoring_url, json=payload, headers=headers)
if response.status_code == 200:
result = response.json()
else:
raise Exception('Scoring error [{}]: {}'.format(response.status_code, response.text))
n_days = len(result['values'])
index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)]
return pd.DataFrame(result['values'], columns=result['fields'], index=index)
def serve_layout():
token = _get_token()
# predict_range = 14
# sir_result = score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
# logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000, 20, 10)
# days = list(sir_result.index)
days = list(calibration_result.index)
calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0)
calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0)
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'),
secondary_y=False,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode="markers", marker=dict(size=8)),
secondary_y=False,
)
fig.update_layout(
title="Prediction of confirmed cases for Poland",
template="plotly_dark",
height=900
)
fig.update_xaxes(title_text="Date")
fig.update_yaxes(title_text="Total confirmed cases", secondary_y=False, range=[0, 6000])
fig.update_yaxes(title_text="New cases per day", secondary_y=True, range=[0, 1000])
# fig = go.Figure(
# data=[
# go.Scatter(x=days, y=sir_result['I'], name='SIR'),
# go.Scatter(x=days, y=logistic_result['I'], name='Logistic'),
# ],
# layout=go.Layout(
# title="COVID19 infected prediction in Poland",
# template="plotly_dark",
# height=600
# )
# )
return html.Div(children=[
html.H1(children='COVID-19 Predictions with Watson Machine Learning'),
dcc.Graph(
id='example-graph',
figure=fig
)
])
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = serve_layout
if __name__ == '__main__':
app.run_server(debug=(not is_cf_instance), port=port, host=host)
| 34.006849
| 132
| 0.67291
| 663
| 4,965
| 4.828054
| 0.321267
| 0.06373
| 0.011246
| 0.016245
| 0.207748
| 0.130272
| 0.099344
| 0.099344
| 0.099344
| 0.099344
| 0
| 0.02381
| 0.179456
| 4,965
| 145
| 133
| 34.241379
| 0.761905
| 0.110574
| 0
| 0.080808
| 0
| 0
| 0.186364
| 0.021364
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.141414
| 0
| 0.20202
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e02e814aa08f31a0fd4f302fa151aca0b7af7756
| 984
|
py
|
Python
|
setup.py
|
Commonists/pageview-api
|
39e8b3c3c82f64a500e3dd4f306451c81c7e31b7
|
[
"MIT"
] | 21
|
2015-12-02T12:06:38.000Z
|
2022-02-11T16:16:06.000Z
|
setup.py
|
Commonists/pageview-api
|
39e8b3c3c82f64a500e3dd4f306451c81c7e31b7
|
[
"MIT"
] | 3
|
2016-04-19T19:56:25.000Z
|
2020-08-27T09:52:42.000Z
|
setup.py
|
Commonists/pageview-api
|
39e8b3c3c82f64a500e3dd4f306451c81c7e31b7
|
[
"MIT"
] | 6
|
2017-10-27T15:39:51.000Z
|
2020-12-17T02:11:52.000Z
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""Setup script."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import pageviewapi
version = pageviewapi.__version__
except ImportError:
version = 'Undefined'
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
]
packages = ['pageviewapi']
requires = ['requests', 'attrdict']
setup(
name='pageviewapi',
version=version,
author='Commonists',
author_email='ps.huard@gmail.com',
url='http://github.com/Commonists/pageview-api',
description='Wikimedia Pageview API client',
long_description=open('README.md').read(),
license='MIT',
packages=packages,
install_requires=requires,
classifiers=classifiers
)
| 22.883721
| 52
| 0.670732
| 99
| 984
| 6.59596
| 0.676768
| 0.082695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002509
| 0.190041
| 984
| 42
| 53
| 23.428571
| 0.816813
| 0.054878
| 0
| 0.121212
| 0
| 0
| 0.390033
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.151515
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e030b341c624d43cef697abc742e82664391c682
| 416
|
py
|
Python
|
task1b.py
|
juby-gif/assignment1
|
3d39478fdc371e80a546caac545561145afbb080
|
[
"BSD-3-Clause"
] | null | null | null |
task1b.py
|
juby-gif/assignment1
|
3d39478fdc371e80a546caac545561145afbb080
|
[
"BSD-3-Clause"
] | null | null | null |
task1b.py
|
juby-gif/assignment1
|
3d39478fdc371e80a546caac545561145afbb080
|
[
"BSD-3-Clause"
] | null | null | null |
#a2_t1b.py
#This program is to convert Celsius to Kelvin
def c_to_k(c):
k = c + 273.15 #Formula to convert Celsius to Kelvin
return k
def f_to_c(f):
fa = (f-32) * 5/9 #Formula to convert Fareheit to Celsius
return fa
c = 25.0
f = 100.0
k = c_to_k(c)
fa = f_to_c(f)
print("Celsius of " + str(c) + " is " + str(k) + " in Kelvin")
print("Farenheit of " + str(f) + " is " + str(fa) + " in Celsius")
| 24.470588
| 67
| 0.605769
| 82
| 416
| 2.963415
| 0.378049
| 0.032922
| 0.131687
| 0.148148
| 0.197531
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057325
| 0.245192
| 416
| 16
| 68
| 26
| 0.716561
| 0.305288
| 0
| 0
| 0
| 0
| 0.192982
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e032bc66a6f5b0a211c59ba883502067921d3427
| 2,961
|
py
|
Python
|
tests/test_dsl.py
|
goodreferences/ElasticQuery
|
579e387c5a7c1cbbeab999050c0d2faa80ded821
|
[
"MIT"
] | null | null | null |
tests/test_dsl.py
|
goodreferences/ElasticQuery
|
579e387c5a7c1cbbeab999050c0d2faa80ded821
|
[
"MIT"
] | null | null | null |
tests/test_dsl.py
|
goodreferences/ElasticQuery
|
579e387c5a7c1cbbeab999050c0d2faa80ded821
|
[
"MIT"
] | null | null | null |
# ElasticQuery
# File: tests/test_dsl.py
# Desc: tests for ElasticQuery DSL objects (Filter, Query, Aggregate)
from os import path
from unittest import TestCase
from jsontest import JsonTest
from elasticquery import Query, Aggregate, Suggester
from elasticquery.exceptions import (
NoQueryError, NoAggregateError, NoSuggesterError,
MissingArgError
)
from .util import assert_equal
CLASS_NAMES = {
'_query': Query
}
def _test_query(self, query, test_name, test_data):
method = getattr(query, test_name)
def parse_arg(arg):
if isinstance(arg, list):
return [parse_arg(a) for a in arg]
else:
return (
CLASS_NAMES[arg](arg, {})
if (isinstance(arg, basestring) and arg.startswith('_'))
else arg
)
args = test_data.get('args', [])
args = parse_arg(args)
kwargs = test_data.get('kwargs', {})
kwargs = {
k: parse_arg(v) if isinstance(v, list) else parse_arg(v)
for k, v in kwargs.iteritems()
}
output = method(*args, **kwargs).dict()
assert_equal(self, output, test_data['output'])
class TestQueries(TestCase):
__metaclass__ = JsonTest
jsontest_files = path.join('tests', 'queries')
jsontest_function = lambda self, test_name, test_data: (
_test_query(self, Query, test_name, test_data)
)
class TestAggregates(TestCase):
__metaclass__ = JsonTest
jsontest_files = path.join('tests', 'aggregates')
jsontest_function = lambda self, test_name, test_data: (
_test_query(self, Aggregate, test_name, test_data)
)
class TestSuggesters(TestCase):
__metaclass__ = JsonTest
jsontest_files = path.join('tests', 'suggesters')
jsontest_function = lambda self, test_name, test_data: (
_test_query(self, Suggester, test_name, test_data)
)
class TestFails(TestCase):
def test_no_query(self):
with self.assertRaises(NoQueryError):
Query.doesnotexist()
def test_no_aggregate(self):
with self.assertRaises(NoAggregateError):
Aggregate.doesnotexist()
def test_no_suggester(self):
with self.assertRaises(NoSuggesterError):
Suggester.doesnotexist()
def test_missing_arg(self):
with self.assertRaises(MissingArgError):
Query.term(None)
def test_invalid_arg(self):
# Test passing not a list
with self.assertRaises(ValueError):
Query.bool(must=set())
# And now an invalid list
with self.assertRaises(ValueError):
Query.bool(must=[None])
# And now an invalid list
with self.assertRaises(ValueError):
Query.bool(must=[Aggregate.terms('test', 'test')])
# And now an invalid list
with self.assertRaises(ValueError):
Query.range('field', gte=['error'])
# Empty list should be OK/ignored
Query.bool(must=[])
| 26.675676
| 72
| 0.646066
| 339
| 2,961
| 5.448378
| 0.277286
| 0.043313
| 0.086627
| 0.060639
| 0.371955
| 0.323768
| 0.323768
| 0.323768
| 0.185707
| 0.185707
| 0
| 0
| 0.251266
| 2,961
| 110
| 73
| 26.918182
| 0.833108
| 0.078352
| 0
| 0.136986
| 0
| 0
| 0.030515
| 0
| 0
| 0
| 0
| 0
| 0.136986
| 1
| 0.09589
| false
| 0
| 0.082192
| 0
| 0.383562
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e036c8bce2480207e7560bdb8a009054bcbca43d
| 1,333
|
py
|
Python
|
Task/Parallel-calculations/Python/parallel-calculations-2.py
|
LaudateCorpus1/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | 1
|
2018-11-09T22:08:38.000Z
|
2018-11-09T22:08:38.000Z
|
Task/Parallel-calculations/Python/parallel-calculations-2.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | null | null | null |
Task/Parallel-calculations/Python/parallel-calculations-2.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | 1
|
2018-11-09T22:08:40.000Z
|
2018-11-09T22:08:40.000Z
|
import multiprocessing
# ========== #Python3 - concurrent
from math import floor, sqrt
numbers = [
112272537195293,
112582718962171,
112272537095293,
115280098190773,
115797840077099,
1099726829285419]
# numbers = [33, 44, 55, 275]
def lowest_factor(n, _start=3):
if n % 2 == 0:
return 2
search_max = int(floor(sqrt(n))) + 1
for i in range(_start, search_max, 2):
if n % i == 0:
return i
return n
def prime_factors(n, lowest):
pf = []
while n > 1:
pf.append(lowest)
n //= lowest
lowest = lowest_factor(n, max(lowest, 3))
return pf
# ========== #Python3 - concurrent
def prime_factors_of_number_with_lowest_prime_factor(numbers):
pool = multiprocessing.Pool(processes=5)
factors = pool.map(lowest_factor,numbers)
low_factor,number = max((l,f) for l,f in zip(factors,numbers))
all_factors = prime_factors(number,low_factor)
return number,all_factors
if __name__ == '__main__':
print('For these numbers:')
print('\n '.join(str(p) for p in numbers))
number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers)
print(' The one with the largest minimum prime factor is {}:'.format(number))
print(' All its prime factors in order are: {}'.format(all_factors))
| 28.361702
| 84
| 0.650413
| 177
| 1,333
| 4.689266
| 0.39548
| 0.072289
| 0.031325
| 0.048193
| 0.115663
| 0.115663
| 0.115663
| 0.115663
| 0.115663
| 0
| 0
| 0.108738
| 0.227307
| 1,333
| 46
| 85
| 28.978261
| 0.697087
| 0.068267
| 0
| 0
| 0
| 0
| 0.105178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.055556
| 0
| 0.277778
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e037f80102198e6c3f910c89e80dfa13f614bfb4
| 1,109
|
py
|
Python
|
BigData/sparkTask/test.py
|
Rainstyd/rainsty
|
9a0d5f46c20faf909c4194f315fb9960652cffc6
|
[
"Apache-2.0"
] | 1
|
2020-03-25T01:13:35.000Z
|
2020-03-25T01:13:35.000Z
|
BigData/sparkTask/test.py
|
Rainstyed/rainsty
|
f74e0ccaf16d1871c9d1870bd8a7c8a63243fcf5
|
[
"Apache-2.0"
] | 1
|
2022-01-06T23:49:21.000Z
|
2022-01-06T23:49:21.000Z
|
BigData/sparkTask/test.py
|
rainstyd/rainsty
|
9a0d5f46c20faf909c4194f315fb9960652cffc6
|
[
"Apache-2.0"
] | 1
|
2020-03-20T08:48:36.000Z
|
2020-03-20T08:48:36.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author: rainsty
@file: test.py
@time: 2020-01-04 18:36:57
@description:
"""
import os
from pyspark.sql import SparkSession
os.environ['JAVA_HOME'] = '/root/jdk'
os.environ['SPARK_HOME'] = '/root/spark'
os.environ['PYTHON_HOME'] = "/root/python"
os.environ['PYSPARK_PYTHON'] = "/usr/bin/python"
os.environ['SPARK_MASTER_IP'] = 'rainsty'
def create_spark_context():
sc = SparkSession.builder \
.appName("TestSparkSession") \
.master("spark://rainsty:7077") \
.config('spark.executor.num', '1')\
.config('spark.executor.memory', '512m')\
.config("spark.executor.cores", '1')\
.config('spark.cores.max', '1')\
.config('spark.driver.memory', '512m') \
.getOrCreate()
return sc
logFile = "/root/spark/README.md"
spark = create_spark_context()
logData = spark.read.text(logFile).cache()
numAs = logData.filter(logData.value.contains('a')).count()
numBs = logData.filter(logData.value.contains('b')).count()
print("Lines with a: %i, lines with b: %i" % (numAs, numBs))
spark.stop()
| 24.108696
| 60
| 0.640216
| 142
| 1,109
| 4.929577
| 0.507042
| 0.064286
| 0.081429
| 0.071429
| 0.094286
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030011
| 0.158702
| 1,109
| 45
| 61
| 24.644444
| 0.720257
| 0.103697
| 0
| 0
| 0
| 0
| 0.314721
| 0.04264
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.16
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e039092d052960d2f6c3a01770cd6d300e7b630a
| 8,810
|
py
|
Python
|
json_codegen/generators/python3_marshmallow/object_generator.py
|
expobrain/json-schema-codegen
|
e22b386333c6230e5d6f5984fd947fdd7b947e82
|
[
"MIT"
] | 21
|
2018-06-15T16:08:57.000Z
|
2022-02-11T16:16:11.000Z
|
json_codegen/generators/python3_marshmallow/object_generator.py
|
expobrain/json-schema-codegen
|
e22b386333c6230e5d6f5984fd947fdd7b947e82
|
[
"MIT"
] | 14
|
2018-08-09T18:02:19.000Z
|
2022-01-24T18:04:17.000Z
|
json_codegen/generators/python3_marshmallow/object_generator.py
|
expobrain/json-schema-codegen
|
e22b386333c6230e5d6f5984fd947fdd7b947e82
|
[
"MIT"
] | 4
|
2018-11-30T18:19:10.000Z
|
2021-11-18T04:04:36.000Z
|
import ast
from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name
class ObjectGenerator(object):
@staticmethod
def _get_property_name(node_assign):
name = node_assign.targets[0]
return name.id
@staticmethod
def _nesting_class(node_assign):
for node in ast.walk(node_assign):
if isinstance(node, ast.Call):
if node.func.attr == "Nested":
return class_name(node.args[0].id)
@staticmethod
def _non_primitive_nested_list(node_assign):
if node_assign.value.func.attr == "List":
return (
len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr == "Nested"
)
else:
return False
@staticmethod
def _init_non_primitive_nested_class(node_assign, object_, prop):
"""
If the nested list is non-primitive, initialise sub-classes in a list comp
If the nest is primitive, we can simply get it
Marshmallow will do the type marshalling
"""
return ast.ListComp(
elt=ast.Call(
func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)),
args=[ast.Name(id="el")],
keywords=[],
),
generators=[
ast.comprehension(
target=ast.Name(id="el"),
iter=ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])],
keywords=[],
),
ifs=[],
is_async=0,
)
],
)
@staticmethod
def _get_key_from_object(object_, prop):
return ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop)],
keywords=[],
)
@staticmethod
def _hint_required_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword):
if "required" in node.arg:
value = ast.Subscript(
value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop))
)
return value
@staticmethod
def _get_default_for_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "required":
return value
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "default":
default_value = [
keyword.value
for keyword in node_assign.value.keywords
if keyword.arg == "default"
][0]
value.args.append(default_value)
return value
else:
return value
@staticmethod
def assign_property(node_assign, object_):
"""
Required property -> self.prop = parent_dict["prop"]
Optional property -> self.prop = parent_dict.get("prop")
Primative nested list -> self.prop = parent_dict.get("prop")
Non-primative nested list -> self.props = [PropertyClass(el) for el in parent_dict.get('props', {})]
"""
prop = ObjectGenerator._get_property_name(node_assign)
if ObjectGenerator._non_primitive_nested_list(node_assign):
value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop)
else:
# Assign the property as self.prop = table.get("prop")
value = ObjectGenerator._get_key_from_object(object_, prop)
# If the property is required, assign as self.prop = table["prop"]
value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop)
value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop)
return ast.AnnAssign(
target=ast.Attribute(value=ast.Name(id="self"), attr=prop),
value=value,
simple=0,
annotation=Annotations(node_assign).type,
)
@staticmethod
def construct_class(schema):
name = class_name(schema.name)
name_lower = name.lower()
# Bundle function arguments and keywords
fn_arguments = ast.arguments(
args=[
ast.arg(arg="self", annotation=None),
ast.arg(arg=name_lower, annotation=ast.Name(id="dict")),
],
vararg=None,
kwarg=None,
kwonlyargs=[],
kw_defaults=[],
defaults=[],
)
fn_body = [
ObjectGenerator.assign_property(node, name_lower)
for node in schema.body
if isinstance(node, ast.Assign)
]
# pass if no Assign nodes
if len(fn_body) == 0:
fn_body = [ast.Pass()]
# Generate class constructor
class_body = [
ast.FunctionDef(
name="__init__", args=fn_arguments, body=fn_body, decorator_list=[], returns=None
),
ObjectGenerator._construct_to_("json")(schema),
ObjectGenerator._construct_to_("dict")(schema),
ObjectGenerator.construct_from_json(schema),
]
return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[])
@staticmethod
def _construct_to_(output):
if output == "json":
method = "dumps"
elif output == "dict":
method = "dump"
else:
raise NotImplementedError("Only deserialisation to json or dict supported")
def _construct_to_helper(schema):
fn_args = ast.arguments(
args=[ast.arg(arg="self", annotation=None)],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[],
)
fn_body = [
ast.Return(
value=ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(
arg="strict", value=ast.NameConstant(value=True)
)
],
),
attr=method,
),
args=[ast.Name(id="self")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name=f"to_{output}", args=fn_args, body=fn_body, decorator_list=[], returns=None
)
return _construct_to_helper
@staticmethod
def construct_from_json(schema):
fn_args = ast.arguments(
args=[
ast.arg(arg="json", annotation=ast.Name(id="str")),
ast.arg(arg="only", annotation=None),
],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[ast.NameConstant(value=None)],
)
fn_body = [
ast.Return(
ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(arg="strict", value=ast.NameConstant(value=True)),
ast.keyword(arg="only", value=ast.Name(id="only")),
],
),
attr="loads",
),
args=[ast.Name(id="json")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name="from_json",
args=fn_args,
body=fn_body,
decorator_list=[ast.Name(id="staticmethod")],
returns=None,
)
| 34.414063
| 108
| 0.484222
| 827
| 8,810
| 4.975816
| 0.169287
| 0.055893
| 0.032807
| 0.023815
| 0.409964
| 0.37497
| 0.323451
| 0.293074
| 0.263183
| 0.196598
| 0
| 0.001743
| 0.413848
| 8,810
| 255
| 109
| 34.54902
| 0.79512
| 0.075596
| 0
| 0.402913
| 0
| 0
| 0.029182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058252
| false
| 0.004854
| 0.009709
| 0.004854
| 0.145631
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e03c2a58883f30a7a78a6973c7fd5ce571d96bba
| 1,746
|
py
|
Python
|
result2gaofentype/pkl2txt_ggm.py
|
G-Naughty/Fine-grained-OBB-Detection
|
8c82c4c178f0b6bba077ff9d906a81bf8e04789c
|
[
"Apache-2.0"
] | 2
|
2022-02-06T07:45:03.000Z
|
2022-03-11T14:18:32.000Z
|
result2gaofentype/pkl2txt_ggm.py
|
G-Naughty/Fine-grained-OBB-Detection
|
8c82c4c178f0b6bba077ff9d906a81bf8e04789c
|
[
"Apache-2.0"
] | null | null | null |
result2gaofentype/pkl2txt_ggm.py
|
G-Naughty/Fine-grained-OBB-Detection
|
8c82c4c178f0b6bba077ff9d906a81bf8e04789c
|
[
"Apache-2.0"
] | null | null | null |
import BboxToolkit as bt
import pickle
import copy
import numpy as np
path1="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl"
path2="/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl"#
with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl
data2 = pickle.load(f)
with open(path1,'rb') as f:
obbdets = pickle.load(f)
polydets=copy.deepcopy(obbdets)
for i in range(len(obbdets)):
for j in range(len(obbdets[0][1])):
data=obbdets[i][1][j]
if data.size!= 0:
polys=[]
for k in range(len(data)):
poly = bt.obb2poly(data[k][0:5])
poly=np.append(poly,data[k][5])
polys.append(poly)
else:
polys=[]
polydets[i][1][j]=polys
savepath="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/"
for i in range(len(polydets)):
txtfile=savepath+polydets[i][0]+".txt"
f = open(txtfile, "w")
for j in range(len(polydets[0][1])):
if polydets[i][1][j]!=[]:
for k in range(len(polydets[i][1][j])):
f.write(str(polydets[i][1][j][k][0])+" "+
str(polydets[i][1][j][k][1])+" "+
str(polydets[i][1][j][k][2])+" "+
str(polydets[i][1][j][k][3])+" "+
str(polydets[i][1][j][k][4])+" "+
str(polydets[i][1][j][k][5])+" "+
str(polydets[i][1][j][k][6])+" "+
str(polydets[i][1][j][k][7])+" "+
str(data2["cls"][j])+" "+
str(polydets[i][1][j][k][8])+"\n")
f.close()
| 40.604651
| 95
| 0.512027
| 248
| 1,746
| 3.552419
| 0.298387
| 0.029512
| 0.044268
| 0.14983
| 0.38252
| 0.287174
| 0.133939
| 0.133939
| 0.133939
| 0.133939
| 0
| 0.037873
| 0.289233
| 1,746
| 43
| 96
| 40.604651
| 0.672039
| 0.032646
| 0
| 0.04878
| 0
| 0
| 0.148697
| 0.135071
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.097561
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e03e3fafddd8bfe7f29e435a8b1b27b522698dbd
| 938
|
py
|
Python
|
initializer_3d.py
|
HarperCallahan/taichi_ferrofluid
|
6113f6c7d9d9d612b6dadc500cf91b576c2d05ea
|
[
"MIT"
] | null | null | null |
initializer_3d.py
|
HarperCallahan/taichi_ferrofluid
|
6113f6c7d9d9d612b6dadc500cf91b576c2d05ea
|
[
"MIT"
] | null | null | null |
initializer_3d.py
|
HarperCallahan/taichi_ferrofluid
|
6113f6c7d9d9d612b6dadc500cf91b576c2d05ea
|
[
"MIT"
] | null | null | null |
import taichi as ti
import utils
from apic_extension import *
@ti.data_oriented
class Initializer3D: # tmp initializer
def __init__(self, res, x0, y0, z0, x1, y1, z1):
self.res = res
self.x0 = int(res * x0)
self.y0 = int(res * y0)
self.z0 = int(res * z0)
self.x1 = int(res * x1)
self.y1 = int(res * y1)
self.z1 = int(res * z1)
@ti.kernel
def init_kernel(self, cell_type : ti.template()):
for i, j, k in cell_type:
if i >= self.x0 and i <= self.x1 and \
j >= self.y0 and j <= self.y1 and \
k >= self.z0 and k <= self.z1:
cell_type[i, j, k] = utils.FLUID
def init_scene(self, simulator):
self.init_kernel(simulator.cell_type)
dx = simulator.dx
simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 * dx, self.z0 * dx), (self.x1 * dx, self.y1 * dx, self.z1 * dx))
| 31.266667
| 136
| 0.557569
| 146
| 938
| 3.472603
| 0.308219
| 0.071006
| 0.011834
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048062
| 0.312367
| 938
| 29
| 137
| 32.344828
| 0.737985
| 0.015991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e03ebf0129e76590fab9b3f72a3301cc3f5c22ca
| 1,265
|
py
|
Python
|
copy_block_example.py
|
MilesCranmer/bifrost_paper
|
654408cd7e34e7845cee58100fe459e1422e4859
|
[
"MIT"
] | null | null | null |
copy_block_example.py
|
MilesCranmer/bifrost_paper
|
654408cd7e34e7845cee58100fe459e1422e4859
|
[
"MIT"
] | null | null | null |
copy_block_example.py
|
MilesCranmer/bifrost_paper
|
654408cd7e34e7845cee58100fe459e1422e4859
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
import bifrost as bf
from bifrost.pipeline import TransformBlock
from bifrost.ndarray import copy_array
class CopyBlock(TransformBlock):# $\tikzmark{block-start}$
"""Copy the input ring to output ring"""
def __init__(self, iring, space):
super(CopyBlock, self).__init__(iring)
self.orings = [self.create_ring(space=space)]
def on_sequence(self, iseq):
return deepcopy(iseq.header)
def on_data(self, ispan, ospan):
copy_array(ospan.data, ispan.data)#$\tikzmark{block-end}$
def copy_block(iring, space):
return CopyBlock(iring, space)
bc = bf.BlockChainer()
bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096)
bc.custom(copy_block)(space='cuda')# $\tikzmark{gpu-start}$
bc.views.split_axis('time', 256, label='fine_time')
bc.blocks.fft(axes='fine_time', axis_labels='freq')
bc.blocks.detect(mode='scalar')
bc.blocks.transpose(['time', 'pol', 'freq'])#$\tikzmark{gpu-end}$
bc.blocks.copy(space='system')
bc.blocks.quantize('i8')
bc.blocks.write_sigproc()
pipeline = bf.get_default_pipeline()# $\tikzmark{pipeline-start}$
pipeline.shutdown_on_signals()
pipeline.run()#$\tikzmark{pipeline-end}$
| 30.853659
| 98
| 0.674308
| 166
| 1,265
| 4.975904
| 0.463855
| 0.067797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007678
| 0.176285
| 1,265
| 40
| 99
| 31.625
| 0.785029
| 0.196838
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.148148
| 0.074074
| 0.407407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e041875337916a4d8560bbab0e0b68edca74373b
| 13,929
|
py
|
Python
|
src/solutions/common/integrations/cirklo/api.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/common/integrations/cirklo/api.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/common/integrations/cirklo/api.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import cloudstorage
import logging
from babel.dates import format_datetime
from datetime import datetime
from google.appengine.ext import ndb, deferred, db
from typing import List
from xlwt import Worksheet, Workbook, XFStyle
from mcfw.cache import invalidate_cache
from mcfw.consts import REST_TYPE_TO
from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException
from mcfw.restapi import rest
from mcfw.rpc import returns, arguments
from rogerthat.bizz.gcs import get_serving_url
from rogerthat.bizz.service import re_index_map_only
from rogerthat.consts import FAST_QUEUE
from rogerthat.models import ServiceIdentity
from rogerthat.models.settings import ServiceInfo
from rogerthat.rpc import users
from rogerthat.rpc.users import get_current_session
from rogerthat.utils import parse_date
from rogerthat.utils.service import create_service_identity_user
from shop.models import Customer
from solutions import translate
from solutions.common.bizz import SolutionModule, broadcast_updates_pending
from solutions.common.bizz.campaignmonitor import send_smart_email_without_check
from solutions.common.consts import OCA_FILES_BUCKET
from solutions.common.dal import get_solution_settings
from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \
list_whitelisted_merchants, list_cirklo_cities
from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \
SignupMails, CirkloAppInfo
from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \
WhitelistVoucherServiceTO
from solutions.common.restapi.services import _check_is_city
def _check_permission(city_sln_settings):
if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules:
raise HttpForbiddenException()
if len(city_sln_settings.modules) != 1:
_check_is_city(city_sln_settings.service_user)
@rest('/common/vouchers/cities', 'get', silent_result=True)
@returns([dict])
@arguments(staging=bool)
def api_list_cirklo_cities(staging=False):
return list_cirklo_cities(staging)
@rest('/common/vouchers/services', 'get', silent_result=True)
@returns(CirkloVoucherListTO)
@arguments()
def get_cirklo_vouchers_services():
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
to = CirkloVoucherListTO()
to.total = 0
to.results = []
to.cursor = None
to.more = False
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email())
if not cirklo_city:
return to
cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id)
cirklo_dict = {}
cirklo_emails = []
for merchant in cirklo_merchants:
if merchant['email'] in cirklo_emails:
logging.error('Duplicate found %s', merchant['email'])
continue
cirklo_emails.append(merchant['email'])
cirklo_dict[merchant['email']] = merchant
qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant]
osa_merchants = []
for merchant in qry:
if merchant.service_user_email:
osa_merchants.append(merchant)
else:
cirklo_merchant = cirklo_dict.get(merchant.data['company']['email'])
if cirklo_merchant:
if merchant.data['company']['email'] in cirklo_emails:
cirklo_emails.remove(merchant.data['company']['email'])
if not merchant.whitelisted:
merchant.whitelisted = True
merchant.put()
elif merchant.whitelisted:
merchant.whitelisted = False
merchant.put()
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else False
to.results.append(
CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo signup'))
if osa_merchants:
customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in osa_merchants]
customers_dict = {customer.id: customer for customer in db.get(customer_to_get)}
info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT)
for merchant in osa_merchants]
models = ndb.get_multi(info_keys)
for service_info, merchant in zip(models, osa_merchants):
customer = customers_dict[merchant.customer_id]
if not customer.service_user:
merchant.key.delete()
continue
cirklo_merchant = cirklo_dict.get(customer.user_email)
should_save = False
if cirklo_merchant:
if customer.user_email in cirklo_emails:
cirklo_emails.remove(customer.user_email)
if not merchant.whitelisted:
merchant.whitelisted = True
should_save = True
elif merchant.whitelisted:
merchant.whitelisted = False
should_save = True
if should_save:
merchant.put()
service_identity_user = create_service_identity_user(customer.service_user)
deferred.defer(re_index_map_only, service_identity_user)
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else False
service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA signup')
service_to.populate_from_info(service_info, customer)
to.results.append(service_to)
for email in cirklo_emails:
cirklo_merchant = cirklo_dict[email]
to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant))
return to
@rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO)
@returns(CirkloVoucherServiceTO)
@arguments(data=WhitelistVoucherServiceTO)
def whitelist_voucher_service(data):
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity
if not cirklo_city:
raise HttpNotFoundException('No cirklo settings found.')
is_cirklo_only_merchant = '@' not in data.id
if is_cirklo_only_merchant:
merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant
language = merchant.get_language()
else:
merchant = CirkloMerchant.create_key(data.id).get()
language = get_solution_settings(users.User(merchant.service_user_email)).main_language
if data.accepted:
email_id = cirklo_city.get_signup_accepted_mail(language)
if not email_id:
raise HttpBadRequestException('City settings aren\'t fully setup yet.')
whitelist_merchant(cirklo_city.city_id, data.email)
deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,
_queue=FAST_QUEUE)
else:
email_id = cirklo_city.get_signup_accepted_mail(language)
if not email_id:
raise HttpBadRequestException('City settings aren\'t fully setup yet.')
deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,
_queue=FAST_QUEUE)
whitelist_date = datetime.now().isoformat() + 'Z' if data.accepted else None
if not is_cirklo_only_merchant:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get()
customer = Customer.get_by_id(merchant.customer_id) # type: Customer
if data.accepted:
service_identity_user = create_service_identity_user(customer.service_user)
deferred.defer(re_index_map_only, service_identity_user)
to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup')
to.populate_from_info(service_info, customer)
return to
else:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup')
@rest('/common/vouchers/cirklo', 'get')
@returns(CirkloCityTO)
@arguments()
def api_vouchers_get_cirklo_settings():
service_user = users.get_current_user()
city = CirkloCity.get_by_service_email(service_user.email())
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo', 'put')
@returns(CirkloCityTO)
@arguments(data=CirkloCityTO)
def api_vouchers_save_cirklo_settings(data):
service_user = users.get_current_user()
if not get_current_session().shop:
lang = get_solution_settings(service_user).main_language
raise HttpForbiddenException(translate(lang, 'no_permission'))
other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity
if not data.city_id:
if other_city:
other_city.key.delete()
return CirkloCityTO.from_model(None)
key = CirkloCity.create_key(data.city_id)
city = key.get()
if not city:
city = CirkloCity(key=key, service_user_email=service_user.email())
elif city.service_user_email != service_user.email():
raise HttpBadRequestException('City id %s is already in use by another service' % data.city_id)
if other_city and other_city.key != key:
other_city.key.delete()
invalidate_cache(get_city_id_by_service_email, service_user.email())
city.logo_url = data.logo_url
city.signup_enabled = data.signup_enabled
city.signup_logo_url = data.signup_logo_url
city.signup_names = None
city.signup_mail = SignupMails.from_to(data.signup_mail)
if data.signup_name_nl and data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_fr)
elif data.signup_name_nl:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_nl)
elif data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr,
fr=data.signup_name_fr)
og_info = city.app_info and city.app_info.to_dict()
info = CirkloAppInfo(enabled=data.app_info.enabled,
title=data.app_info.title,
buttons=data.app_info.buttons)
sln_settings = get_solution_settings(service_user)
if info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only():
city.app_info = info
sln_settings.updates_pending = True
sln_settings.put()
broadcast_updates_pending(sln_settings)
city.put()
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo/export', 'post')
@returns(dict)
@arguments()
def api_export_cirklo_services():
service_user = users.get_current_user()
city_sln_settings = get_solution_settings(service_user)
_check_permission(city_sln_settings)
all_services = get_cirklo_vouchers_services()
if all_services.cursor:
raise NotImplementedError()
book = Workbook(encoding='utf-8')
sheet = book.add_sheet('Cirklo') # type: Worksheet
language = city_sln_settings.main_language
sheet.write(0, 0, translate(language, 'reservation-name'))
sheet.write(0, 1, translate(language, 'Email'))
sheet.write(0, 2, translate(language, 'address'))
sheet.write(0, 3, translate(language, 'Phone number'))
sheet.write(0, 4, translate(language, 'created'))
sheet.write(0, 5, translate(language, 'merchant_registered'))
date_format = XFStyle()
date_format.num_format_str = 'dd/mm/yyyy'
row = 0
for service in all_services.results:
row += 1
sheet.write(row, 0, service.name)
sheet.write(row, 1, service.email)
sheet.write(row, 2, service.address)
sheet.write(row, 3, service.phone_number)
sheet.write(row, 4, parse_date(service.creation_date), date_format)
sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered else translate(language, 'No'))
date = format_datetime(datetime.now(), format='medium', locale='en_GB')
gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-'))
content_type = 'application/vnd.ms-excel'
with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file:
book.save(gcs_file)
deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400)
return {
'url': get_serving_url(gcs_path),
}
| 42.858462
| 120
| 0.710604
| 1,715
| 13,929
| 5.510204
| 0.172012
| 0.031429
| 0.022011
| 0.010053
| 0.367725
| 0.314921
| 0.280529
| 0.248571
| 0.225291
| 0.188677
| 0
| 0.0037
| 0.204537
| 13,929
| 324
| 121
| 42.990741
| 0.849188
| 0.051619
| 0
| 0.301115
| 0
| 0
| 0.047395
| 0.016531
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026022
| false
| 0
| 0.115242
| 0.003717
| 0.174721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e042a55525baf01a1dd738c8dd3863fa44f09d50
| 1,624
|
py
|
Python
|
aplpy/tests/test_grid.py
|
nbrunett/aplpy
|
f5d128faf3568adea753d52c11ba43014d25d90a
|
[
"MIT"
] | null | null | null |
aplpy/tests/test_grid.py
|
nbrunett/aplpy
|
f5d128faf3568adea753d52c11ba43014d25d90a
|
[
"MIT"
] | null | null | null |
aplpy/tests/test_grid.py
|
nbrunett/aplpy
|
f5d128faf3568adea753d52c11ba43014d25d90a
|
[
"MIT"
] | 1
|
2018-02-26T03:04:19.000Z
|
2018-02-26T03:04:19.000Z
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
from astropy.tests.helper import pytest
from .. import FITSFigure
def test_grid_addremove():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.remove_grid()
f.add_grid()
f.close()
def test_grid_showhide():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.hide()
f.grid.show()
f.close()
def test_grid_spacing():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_xspacing(1.)
f.grid.set_xspacing('tick')
with pytest.raises(ValueError):
f.grid.set_xspacing('auto')
f.grid.set_yspacing(2.)
f.grid.set_yspacing('tick')
with pytest.raises(ValueError):
f.grid.set_yspacing('auto')
f.close()
def test_grid_color():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_color('black')
f.grid.set_color('#003344')
f.grid.set_color((1.0, 0.4, 0.3))
f.close()
def test_grid_alpha():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_alpha(0.0)
f.grid.set_alpha(0.3)
f.grid.set_alpha(1.0)
f.close()
def test_grid_linestyle():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linestyle('solid')
f.grid.set_linestyle('dashed')
f.grid.set_linestyle('dotted')
f.close()
def test_grid_linewidth():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linewidth(0)
f.grid.set_linewidth(2)
f.grid.set_linewidth(5)
f.close()
| 20.049383
| 39
| 0.618842
| 255
| 1,624
| 3.780392
| 0.2
| 0.103734
| 0.149378
| 0.074689
| 0.529046
| 0.40249
| 0.40249
| 0.40249
| 0.323651
| 0.323651
| 0
| 0.040094
| 0.216749
| 1,624
| 80
| 40
| 20.3
| 0.717767
| 0
| 0
| 0.484375
| 0
| 0
| 0.029557
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109375
| false
| 0
| 0.0625
| 0
| 0.171875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0435a8bdb5ad3ee4a83d670d6af34fbe9094657
| 12,910
|
py
|
Python
|
vz.py
|
ponyatov/vz
|
f808dd0dca9b6aa7a3e492d2ee0797ab96cd23a1
|
[
"MIT"
] | null | null | null |
vz.py
|
ponyatov/vz
|
f808dd0dca9b6aa7a3e492d2ee0797ab96cd23a1
|
[
"MIT"
] | null | null | null |
vz.py
|
ponyatov/vz
|
f808dd0dca9b6aa7a3e492d2ee0797ab96cd23a1
|
[
"MIT"
] | null | null | null |
import os, sys
class Object:
## @name constructor
def __init__(self, V):
self.value = V
self.nest = []
def box(self, that):
if isinstance(that, Object): return that
if isinstance(that, str): return S(that)
raise TypeError(['box', type(that), that])
## @name dump / string
def test(self): return self.dump(test=True)
def __repr__(self): return self.dump(test=False)
def dump(self, cycle=[], depth=0, prefix='', test=False):
# head
def pad(depth): return '\n' + '\t' * depth
ret = pad(depth) + self.head(prefix, test)
# subtree
return ret
def head(self, prefix='', test=False):
gid = '' if test else f' @{id(self):x}'
return f'{prefix}<{self.tag()}:{self.val()}>{gid}'
def __format__(self, spec=''):
if not spec: return self.val()
raise TypeError(['__format__', spec])
def tag(self): return self.__class__.__name__.lower()
def val(self): return f'{self.value}'
## @name operator
def __iter__(self):
return iter(self.nest)
def __floordiv__(self, that):
self.nest.append(self.box(that)); return self
class Primitive(Object):
pass
class S(Primitive):
def __init__(self, V=None, end=None, pfx=None, sfx=None):
super().__init__(V)
self.end = end; self.pfx = pfx; self.sfx = sfx
def gen(self, to, depth=0):
ret = ''
if self.pfx is not None:
ret += f'{to.tab*depth}{self.pfx}\n'
if self.value is not None:
ret += f'{to.tab*depth}{self.value}\n'
for i in self:
ret += i.gen(to, depth + 1)
if self.end is not None:
ret += f'{to.tab*depth}{self.end}\n'
if self.sfx is not None:
ret += f'{to.tab*depth}{self.sfx}\n'
return ret
class Sec(S):
def gen(self, to, depth=0):
ret = ''
if self.pfx is not None:
ret += f'{to.tab*depth}{self.pfx}\n' if self.pfx else '\n'
if self.nest and self.value is not None:
ret += f'{to.tab*depth}{to.comment} \\ {self}\n'
for i in self:
ret += i.gen(to, depth + 0)
if self.nest and self.value is not None:
ret += f'{to.tab*depth}{to.comment} / {self}\n'
if self.sfx is not None:
ret += f'{to.tab*depth}{self.sfx}\n' if self.pfx else '\n'
return ret
class IO(Object):
def __init__(self, V):
super().__init__(V)
self.path = V
class Dir(IO):
def __floordiv__(self, that):
assert isinstance(that, IO)
that.path = f'{self.path}/{that.path}'
return super().__floordiv__(that)
def sync(self):
try: os.mkdir(self.path)
except FileExistsError: pass
for i in self: i.sync()
class File(IO):
def __init__(self, V, ext='', tab=' ' * 4, comment='#'):
super().__init__(V + ext)
self.top = Sec(); self.bot = Sec()
self.tab = tab; self.comment = comment
def sync(self):
with open(self.path, 'w') as F:
F.write(self.top.gen(self))
for i in self: F.write(i.gen(self))
F.write(self.bot.gen(self))
class giti(File):
def __init__(self, V='.gitignore'):
super().__init__(V)
self.bot // f'!{self}'
class Makefile(File):
def __init__(self, V='Makefile'):
super().__init__(V, tab='\t')
class pyFile(File):
def __init__(self, V, ext='.py'):
super().__init__(V, ext)
class jsonFile(File):
def __init__(self, V, ext='.json', comment='//'):
super().__init__(V, ext, comment=comment)
class Meta(Object): pass
class Class(Meta):
def __init__(self, C, sup=[]):
assert callable(C)
super().__init__(C.__name__)
self.clazz = C; self.sup = sup
def gen(self, to, depth=0):
ret = S(f'class {self}:', pfx='') // 'pass'
return ret.gen(to, depth)
class Project(Meta):
def __init__(self, V=None, title='', about=''):
if not V: V = os.getcwd().split('/')[-1]
super().__init__(V)
#
self.TITLE = title if title else f'{self}'
self.ABOUT = about
self.AUTHOR = 'Dmitry Ponyatov'
self.EMAIL = 'dponyatov@gmail.com'
self.GITHUB = 'https://github.com/ponyatov'
self.YEAR = 2020
self.LICENSE = 'All rights reserved'
self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}'
#
self.dirs()
self.mk()
self.src()
self.vscode()
self.apt()
def apt(self):
self.apt = File('apt', '.txt'); self.d // self.apt
self.apt \
// 'git make curl' // 'code meld' \
// 'python3 python3-venv' \
// 'build-essential g++'
def vscode(self):
self.vscode = Dir('.vscode'); self.d // self.vscode
self.settings()
self.tasks()
def settings(self):
self.settings = jsonFile('settings'); self.vscode // self.settings
#
def multi(key, cmd):
return (S('{', '},')
// f'"command": "multiCommand.{key}",'
// (S('"sequence": [', ']')
// '"workbench.action.files.saveAll",'
// (S('{"command": "workbench.action.terminal.sendSequence",')
// f'"args": {{"text": "\\u000D {cmd} \\u000D"}}}}'
)))
self.multi = \
(Sec('multi')
// (S('"multiCommand.commands": [', '],')
// multi('f11', 'make meta')
// multi('f12', 'make all')
))
#
self.files = (Sec()
// f'"{self}/**":true,'
)
self.exclude = \
(Sec()
// (S('"files.exclude": {', '},') // self.files))
self.watcher = \
(Sec()
// (S('"files.watcherExclude": {', '},') // self.files))
self.assoc = \
(Sec()
// (S('"files.associations": {', '},')))
self.files = (Sec('files', pfx='')
// self.exclude
// self.watcher
// self.assoc)
#
self.editor = (Sec('editor', pfx='')
// '"editor.tabSize": 4,'
// '"editor.rulers": [80],'
// '"workbench.tree.indent": 32,'
)
#
self.settings \
// (S('{', '}')
// self.multi
// self.files
// self.editor)
def tasks(self):
self.tasks = jsonFile('tasks'); self.vscode // self.tasks
def task(clazz, cmd):
return (S('{', '},')
// f'"label": "{clazz}: {cmd}",'
// f'"type": "shell",'
// f'"command": "make {cmd}",'
// f'"problemMatcher": []'
)
self.tasks \
// (S('{', '}')
// '"version": "2.0.0",'
// (S('"tasks": [', ']')
// task('project', 'install')
// task('project', 'update')
// task('git', 'dev')
// task('git', 'shadow')
))
def src(self):
self.py()
self.test()
self.config()
def config(self):
self.config = pyFile('config'); self.d // self.config
self.config \
// f"{'SECURE_KEY':<11} = {os.urandom(0x22)}" \
// f"{'HOST':<11} = '127..0.0.1'" \
// f"{'PORT':<11} = 12345"
def py(self):
self.py = pyFile(f'{self}'); self.d // self.py
self.py \
// 'import os, sys'
for i in [Object, S, Sec, IO, Dir, File, Meta, Class, Project]:
self.py // Class(i)
self.py // Class(Primitive, [Object])
self.py \
// S('Project().sync()', pfx='')
def test(self):
self.test = pyFile(f'test_{self}'); self.d // self.test
self.test \
// 'import pytest' \
// f'from {self} import *' \
// 'def test_any(): assert True'
def dirs(self):
self.d = Dir(f'{self}'); self.giti = giti(); self.d // self.giti
self.giti.top // '*~' // '*.swp' // '*.log'; self.giti.top.sfx = ''
self.giti // f'/{self}/' // '/__pycache__/'
self.giti.bot.pfx = ''
#
self.bin = Dir('bin'); self.d // self.bin
def mk(self):
self.mk = Makefile(); self.d // self.mk
#
self.mk.var = Sec('var', pfx=''); self.mk // self.mk.var
self.mk.var \
// f'{"MODULE":<11} = $(notdir $(CURDIR))' \
// f'{"OS":<11} = $(shell uname -s)' \
// f'{"CORES":<11} = $(shell grep processor /proc/cpuinfo | wc -l)'
#
self.mk.dir = Sec('dir', pfx=''); self.mk // self.mk.dir
self.mk.dir \
// f'{"CWD":<11} = $(CURDIR)' \
// f'{"BIN":<11} = $(CWD)/bin' \
// f'{"DOC":<11} = $(CWD)/doc' \
// f'{"LIB":<11} = $(CWD)/lib' \
// f'{"SRC":<11} = $(CWD)/src' \
// f'{"TMP":<11} = $(CWD)/tmp'
#
self.mk.tool = Sec('tool', pfx=''); self.mk // self.mk.tool
self.mk.tool \
// f'CURL = curl -L -o' \
// f'PY = $(shell which python3)' \
// f'PYT = $(shell which pytest)' \
// f'PEP = $(shell which autopep8)'
#
self.mk.package = Sec('package', pfx=''); self.mk // self.mk.package
self.mk.package \
// f'SYSLINUX_VER = 6.0.3'
#
self.mk.src = Sec('src', pfx=''); self.mk // self.mk.src
self.mk.src \
// f'Y += $(MODULE).py test_$(MODULE).py' \
// f'P += config.py' \
// f'S += $(Y)'
#
self.mk.cfg = Sec('cfg', pfx=''); self.mk // self.mk.cfg
self.mk.cfg \
// f'PEPS = E26,E302,E305,E401,E402,E701,E702'
#
self.mk.all = Sec('all', pfx=''); self.mk // self.mk.all
self.mk.all \
// (S('meta: $(Y)', pfx='.PHONY: meta')
// '$(MAKE) test'
// '$(PY) $(MODULE).py'
// '$(PEP) --ignore=$(PEPS) --in-place $?')
self.mk.all \
// (S('test: $(Y)', pfx='\n.PHONY: test')
// '$(PYT) test_$(MODULE).py')
#
self.mk.rule = Sec('rule', pfx=''); self.mk // self.mk.rule
#
self.mk.doc = Sec('doc', pfx=''); self.mk // self.mk.doc
self.mk.doc \
// S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc')
self.mk.doc \
// (S('doc/pyMorphic.pdf:')
// '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf')
#
self.mk.install = Sec('install', pfx=''); self.mk // self.mk.install
self.mk.install // '.PHONY: install update'
self.mk.install \
// (S('install: $(OS)_install doc')
// '$(MAKE) test'
)
self.mk.install \
// (S('update: $(OS)_update doc')
// '$(MAKE) test'
)
self.mk.install \
// (S('Linux_install Linux_update:',
pfx='.PHONY: Linux_install Linux_update')
// 'sudo apt update'
// 'sudo apt install -u `cat apt.txt`')
#
self.mk.merge = Sec('merge', pfx=''); self.mk // self.mk.merge
self.mk.merge \
// 'SHADOW ?= ponymuck'
self.mk.merge \
// 'MERGE = Makefile .gitignore README.md apt.txt $(S)' \
// 'MERGE += .vscode bin doc lib src tmp'
self.mk.merge \
// (S('dev:', pfx='\n.PHONY: dev')
// 'git push -v'
// 'git checkout $@'
// 'git checkout $(SHADOW) -- $(MERGE)'
)
self.mk.merge \
// (S('shadow:', pfx='\n.PHONY: shadow')
// 'git push -v'
// 'git checkout $(SHADOW)'
)
self.mk.merge \
// (S('release:', pfx='\n.PHONY: release')
)
self.mk.merge \
// (S('zip:', pfx='\n.PHONY: zip')
)
def sync(self):
self.readme()
self.d.sync()
def readme(self):
self.readme = File('README', '.md'); self.d // self.readme
self.readme \
// f'#  `{self}`' // f'## {self.TITLE}'
self.readme \
// '' // self.COPYRIGHT // '' // f'github: {self.GITHUB}/{self}'
self.readme // self.ABOUT
Project(
title='ViZual language environment',
about='''
* object (hyper)graph interpreter
'''
).sync()
| 32.849873
| 96
| 0.449109
| 1,494
| 12,910
| 3.792503
| 0.182731
| 0.059301
| 0.022944
| 0.025415
| 0.174373
| 0.106601
| 0.090364
| 0.077833
| 0.077833
| 0.072361
| 0
| 0.012408
| 0.35701
| 12,910
| 392
| 97
| 32.933673
| 0.67016
| 0.005035
| 0
| 0.178462
| 0
| 0.006154
| 0.226868
| 0.0401
| 0
| 0
| 0.000312
| 0
| 0.009231
| 1
| 0.126154
| false
| 0.012308
| 0.012308
| 0.024615
| 0.209231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0448da70febec0759bc638d5a460760c3964480
| 402
|
py
|
Python
|
tcpserver.py
|
justforbalance/CSnet
|
c1e049f63d245c5d464a2d6e9aa7d3daf15bf2b6
|
[
"MIT"
] | null | null | null |
tcpserver.py
|
justforbalance/CSnet
|
c1e049f63d245c5d464a2d6e9aa7d3daf15bf2b6
|
[
"MIT"
] | null | null | null |
tcpserver.py
|
justforbalance/CSnet
|
c1e049f63d245c5d464a2d6e9aa7d3daf15bf2b6
|
[
"MIT"
] | null | null | null |
from socket import *
serverPort = 12001
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.bind(('', serverPort))
serverSocket.listen(1)
print("the server is ready to receive")
while True:
connectionSocket,addr = serverSocket.accept()
sentence = connectionSocket.recv(1024).decode()
sentence = sentence.upper()
connectionSocket.send(sentence.encode())
connectionSocket.close()
| 33.5
| 51
| 0.753731
| 44
| 402
| 6.840909
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.129353
| 402
| 12
| 52
| 33.5
| 0.831429
| 0
| 0
| 0
| 0
| 0
| 0.074442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e045d172e3aa9769db37dd0c8977af6b2b83dca1
| 10,889
|
py
|
Python
|
armi/reactor/tests/test_zones.py
|
youngmit/armi
|
67688e4e67d2a217dfc7b1ccfa64028c20b57a5b
|
[
"Apache-2.0"
] | null | null | null |
armi/reactor/tests/test_zones.py
|
youngmit/armi
|
67688e4e67d2a217dfc7b1ccfa64028c20b57a5b
|
[
"Apache-2.0"
] | null | null | null |
armi/reactor/tests/test_zones.py
|
youngmit/armi
|
67688e4e67d2a217dfc7b1ccfa64028c20b57a5b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for Zones"""
import copy
import unittest
import armi
from armi import settings
from armi.reactor import assemblies
from armi.reactor import blueprints
from armi.reactor import geometry
from armi.reactor import grids
from armi.reactor import reactors
from armi.reactor import zones
from armi.reactor.flags import Flags
from armi.reactor.tests import test_reactors
from armi.utils import pathTools
from armi.settings.fwSettings import globalSettings
THIS_DIR = pathTools.armiAbsDirFromName(__name__)
class Zone_TestCase(unittest.TestCase):
def setUp(self):
bp = blueprints.Blueprints()
geom = geometry.SystemLayoutInput()
geom.symmetry = "third core periodic"
r = reactors.Reactor(settings.getMasterCs(), bp)
r.add(reactors.Core("Core", settings.getMasterCs(), geom))
r.core.spatialGrid = grids.hexGridFromPitch(1.0)
aList = []
for ring in range(10):
a = assemblies.HexAssembly("fuel")
a.spatialLocator = r.core.spatialGrid[ring, 1, 0]
a.parent = r.core
aList.append(a)
self.aList = aList
def test_addAssemblyLocations(self):
zone = zones.Zone("TestZone")
zone.addAssemblyLocations(self.aList)
for a in self.aList:
self.assertIn(a.getLocation(), zone)
self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList)
def test_iteration(self):
locs = [a.getLocation() for a in self.aList]
zone = zones.Zone("TestZone")
zone.addAssemblyLocations(self.aList)
for aLoc in zone:
self.assertIn(aLoc, locs)
# loop twice to make sure it iterates nicely.
for aLoc in zone:
self.assertIn(aLoc, locs)
def test_addRing(self):
zone = zones.Zone("TestZone")
zone.addRing(5)
self.assertIn("A5003", zone)
self.assertNotIn("A6002", zone)
zone.addRing(6, 3, 9)
self.assertIn("A6003", zone)
self.assertIn("A6009", zone)
self.assertNotIn("A6002", zone)
self.assertNotIn("A6010", zone)
class Zones_InReactor(unittest.TestCase):
def setUp(self):
self.o, self.r = test_reactors.loadTestReactor()
def test_buildRingZones(self):
o, r = self.o, self.r
cs = o.cs
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = []
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 1)
self.assertEqual(9, r.core.numRings)
cs["ringZones"] = [5, 8]
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 2)
zone = zonez["ring-1"]
self.assertEqual(len(zone), (5 * (5 - 1) + 1))
zone = zonez["ring-2"]
# Note that the actual number of rings in the reactor model is 9. Even though we
# asked for the last zone to to to 8, the zone engine should bump it out. Not
# sure if this is behavior that we want to preserve, but at least it's being
# tested properly now.
self.assertEqual(len(zone), (9 * (9 - 1) + 1) - (5 * (5 - 1) + 1))
cs["ringZones"] = [5, 7, 8]
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 3)
zone = zonez["ring-3"]
self.assertEqual(len(zone), 30) # rings 8 and 9. See above comment
def test_removeZone(self):
o, r = self.o, self.r
cs = o.cs
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [5, 8]
# produce 2 zones, with the names ringzone0 and ringzone1
daZones = zones.buildZones(r.core, cs)
daZones.removeZone("ring-1")
# The names list should only house the only other remaining zone now
self.assertEqual(["ring-2"], daZones.names)
# if indexed like a dict, the zones object should give a key error from the removed zone
with self.assertRaises(KeyError):
daZones["ring-1"]
# Ensure we can still iterate through our zones object
for name in daZones.names:
aZone = daZones[name]
def test_findZoneAssemblyIsIn(self):
cs = self.o.cs
cs["ringZones"] = [5, 7, 8]
daZones = zones.buildZones(self.r.core, cs)
for zone in daZones:
a = self.r.core.getAssemblyWithStringLocation(zone.locList[0])
aZone = daZones.findZoneAssemblyIsIn(a)
self.assertEqual(aZone, zone)
# lets test if we get a none and a warning if the assembly does not exist in a zone
a = self.r.core.getAssemblyWithStringLocation(
daZones[daZones.names[0]].locList[0]
) # get assem from first zone
daZones.removeZone(
daZones.names[0]
) # remove a zone to ensure that our assem does not have a zone anymore
self.assertEqual(daZones.findZoneAssemblyIsIn(a), None)
class Zones_InRZReactor(unittest.TestCase):
def test_splitZones(self):
# Test to make sure that we can split a zone containing control and fuel assemblies.
# Also test that we can separate out assemblies with differing numbers of blocks.
o, r = test_reactors.loadTestReactor(inputFileName="partisnTestReactor.yaml")
cs = o.cs
cs["splitZones"] = False
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
diverseZone = "ring-4"
r.core.buildZones(cs)
daZones = r.core.zones
# lets make one of the assemblies have an extra block
zoneLocations = daZones.getZoneLocations(diverseZone)
originalAssemblies = r.core.getLocationContents(
zoneLocations, assemblyLevel=True
)
fuel = [a for a in originalAssemblies if a.hasFlags(Flags.FUEL)][0]
newBlock = copy.deepcopy(fuel[-1])
fuel.add(newBlock)
# should contain a zone for every ring zone
# we only want one ring zone for this test, containing assemblies of different types.
zoneTup = tuple(daZones.names)
for zoneName in zoneTup:
if zoneName != diverseZone:
daZones.removeZone(zoneName)
# this should split diverseZone into multiple zones by nodalization type.
cs["splitZones"] = True
zones.splitZones(r.core, cs, daZones)
# test to make sure that we split the ring zone correctly
self.assertEqual(len(daZones["ring-4-primary-control-5"]), 2)
self.assertEqual(len(daZones["ring-4-middle-fuel-5"]), 3)
self.assertEqual(len(daZones["ring-4-middle-fuel-6"]), 1)
def test_createHotZones(self):
# Test to make sure createHotZones identifies the highest p/f location in a zone
# Test to make sure createHotZones can remove the peak assembly from that zone and place it in a new zone
# Test that the power in the old zone and the new zone is conserved.
# Test that if a hot zone can not be created from a single assembly zone.
o, r = test_reactors.loadTestReactor(inputFileName="partisnTestReactor.yaml")
cs = o.cs
cs["splitZones"] = False
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [9] # build one giant zone
r.core.buildZones(cs)
daZones = r.core.zones
originalassemblies = []
originalPower = 0.0
peakZonePFRatios = []
# Create a single assembly zone to verify that it will not create a hot zone
single = zones.Zone("single")
daZones.add(single)
aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation()
single.append(aLoc)
# Set power and flow.
# Also gather channel peak P/F ratios, assemblies and power.
for zone in daZones:
powerToFlow = []
zoneLocations = daZones.getZoneLocations(zone.name)
assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True)
power = 300.0
flow = 300.0
for a in assems:
a.getFirstBlock().p.power = power
assemblyPower = a.calcTotalParam("power")
a[-1].p.THmassFlowRate = flow
powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate)
originalPower += assemblyPower
originalassemblies.append(a)
power += 1
flow -= 1
peakZonePFRatios.append(max(powerToFlow))
daZones = zones.createHotZones(r.core, daZones)
# Test that the hot zones have the peak P/F from the host channels
i = 0
for zone in daZones:
if zone.hotZone:
hotAssemLocation = daZones.getZoneLocations(zone.name)
hotAssem = r.core.getLocationContents(
hotAssemLocation, assemblyLevel=True
)[0]
self.assertEqual(
peakZonePFRatios[i],
hotAssem.calcTotalParam("power") / hotAssem[-1].p.THmassFlowRate,
)
i += 1
powerAfterHotZoning = 0.0
assembliesAfterHotZoning = []
# Check that power is conserved and that we did not lose any assemblies
for zone in daZones:
locs = daZones.getZoneLocations(zone.name)
assems = r.core.getLocationContents(locs, assemblyLevel=True)
for a in assems:
assembliesAfterHotZoning.append(a)
powerAfterHotZoning += a.calcTotalParam("power")
self.assertEqual(powerAfterHotZoning, originalPower)
self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies))
# check that the original zone with 1 channel has False for hotzone
self.assertEqual(single.hotZone, False)
# check that we have the correct number of hot and normal zones.
hotCount = 0
normalCount = 0
for zone in daZones:
if zone.hotZone:
hotCount += 1
else:
normalCount += 1
self.assertEqual(hotCount, 1)
self.assertEqual(normalCount, 2)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Zones_InReactor.test_buildRingZones']
unittest.main()
| 39.740876
| 113
| 0.629718
| 1,332
| 10,889
| 5.119369
| 0.246246
| 0.016131
| 0.026397
| 0.018478
| 0.237865
| 0.189471
| 0.162047
| 0.162047
| 0.104414
| 0.087696
| 0
| 0.017314
| 0.27863
| 10,889
| 273
| 114
| 39.886447
| 0.850796
| 0.24162
| 0
| 0.239796
| 0
| 0
| 0.04813
| 0.008529
| 0
| 0
| 0
| 0
| 0.153061
| 1
| 0.05102
| false
| 0
| 0.071429
| 0
| 0.137755
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e046ccaf1594be44b4bc74501cfe08b79d45a1d7
| 490
|
py
|
Python
|
Examples/WorkingWithOutlookMSGs/CreateAndSaveOutlookNote.py
|
Muzammil-khan/Aspose.Email-Python-Dotnet
|
04ca3a6f440339f3ddf316218f92d15d66f24e7e
|
[
"MIT"
] | 5
|
2019-01-28T05:17:12.000Z
|
2020-04-14T14:31:34.000Z
|
Examples/WorkingWithOutlookMSGs/CreateAndSaveOutlookNote.py
|
Muzammil-khan/Aspose.Email-Python-Dotnet
|
04ca3a6f440339f3ddf316218f92d15d66f24e7e
|
[
"MIT"
] | 1
|
2019-01-28T16:07:26.000Z
|
2021-11-25T10:59:52.000Z
|
Examples/WorkingWithOutlookMSGs/CreateAndSaveOutlookNote.py
|
Muzammil-khan/Aspose.Email-Python-Dotnet
|
04ca3a6f440339f3ddf316218f92d15d66f24e7e
|
[
"MIT"
] | 6
|
2018-07-16T14:57:34.000Z
|
2020-08-30T05:59:52.000Z
|
import aspose.email.mapi.msg as msg
from aspose.email.mapi import MapiNote, NoteSaveFormat, NoteColor
def run():
dataDir = "Data/"
#ExStart: CreateAndSaveOutlookNote
note3 = MapiNote()
note3.subject = "Blue color note"
note3.body = "This is a blue color note";
note3.color = NoteColor.YELLOW
note3.height = 500
note3.width = 500
note3.save(dataDir + "CreateAndSaveOutlookNote_out.msg", NoteSaveFormat.MSG)
#ExEnd: CreateAndSaveOutlookNote
if __name__ == '__main__':
run()
| 25.789474
| 77
| 0.746939
| 60
| 490
| 5.95
| 0.583333
| 0.061625
| 0.084034
| 0.10084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031026
| 0.144898
| 490
| 18
| 78
| 27.222222
| 0.821002
| 0.130612
| 0
| 0
| 0
| 0
| 0.200472
| 0.075472
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e04830a8bb6dffa22a3b7aa461ea3221561a26cd
| 6,114
|
py
|
Python
|
nonebot/internal/adapter/template.py
|
mobyw/nonebot2
|
36663f1a8a51bd89f4a60110047e73719adcc73d
|
[
"MIT"
] | null | null | null |
nonebot/internal/adapter/template.py
|
mobyw/nonebot2
|
36663f1a8a51bd89f4a60110047e73719adcc73d
|
[
"MIT"
] | null | null | null |
nonebot/internal/adapter/template.py
|
mobyw/nonebot2
|
36663f1a8a51bd89f4a60110047e73719adcc73d
|
[
"MIT"
] | null | null | null |
import functools
from string import Formatter
from typing import (
TYPE_CHECKING,
Any,
Set,
Dict,
List,
Type,
Tuple,
Union,
Generic,
Mapping,
TypeVar,
Callable,
Optional,
Sequence,
cast,
overload,
)
if TYPE_CHECKING:
from .message import Message, MessageSegment
TM = TypeVar("TM", bound="Message")
TF = TypeVar("TF", str, "Message")
FormatSpecFunc = Callable[[Any], str]
FormatSpecFunc_T = TypeVar("FormatSpecFunc_T", bound=FormatSpecFunc)
class MessageTemplate(Formatter, Generic[TF]):
"""消息模板格式化实现类。
参数:
template: 模板
factory: 消息类型工厂,默认为 `str`
"""
@overload
def __init__(
self: "MessageTemplate[str]", template: str, factory: Type[str] = str
) -> None:
...
@overload
def __init__(
self: "MessageTemplate[TM]", template: Union[str, TM], factory: Type[TM]
) -> None:
...
def __init__(self, template, factory=str) -> None:
self.template: TF = template
self.factory: Type[TF] = factory
self.format_specs: Dict[str, FormatSpecFunc] = {}
def add_format_spec(
self, spec: FormatSpecFunc_T, name: Optional[str] = None
) -> FormatSpecFunc_T:
name = name or spec.__name__
if name in self.format_specs:
raise ValueError(f"Format spec {name} already exists!")
self.format_specs[name] = spec
return spec
def format(self, *args, **kwargs):
"""根据传入参数和模板生成消息对象"""
return self._format(args, kwargs)
def format_map(self, mapping: Mapping[str, Any]) -> TF:
"""根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用"""
return self._format([], mapping)
def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF:
msg = self.factory()
if isinstance(self.template, str):
msg += self.vformat(self.template, args, kwargs)
elif isinstance(self.template, self.factory):
template = cast("Message[MessageSegment]", self.template)
for seg in template:
msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else seg
else:
raise TypeError("template must be a string or instance of Message!")
return msg # type:ignore
def vformat(
self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any]
) -> TF:
used_args = set()
result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(list(used_args), args, kwargs)
return result
def _vformat(
self,
format_string: str,
args: Sequence[Any],
kwargs: Mapping[str, Any],
used_args: Set[Union[int, str]],
recursion_depth: int,
auto_arg_index: int = 0,
) -> Tuple[TF, int]:
if recursion_depth < 0:
raise ValueError("Max string recursion exceeded")
results: List[Any] = [self.factory()]
for (literal_text, field_name, format_spec, conversion) in self.parse(
format_string
):
# output the literal text
if literal_text:
results.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# handle arg indexing when empty field_names are given.
if field_name == "":
if auto_arg_index is False:
raise ValueError(
"cannot switch from manual field specification to "
"automatic field numbering"
)
field_name = str(auto_arg_index)
auto_arg_index += 1
elif field_name.isdigit():
if auto_arg_index:
raise ValueError(
"cannot switch from manual field specification to "
"automatic field numbering"
)
# disable auto arg incrementing, if it gets
# used later on, then an exception will be raised
auto_arg_index = False
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
assert format_spec is not None
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion) if conversion else obj
# expand the format spec, if needed
format_control, auto_arg_index = self._vformat(
format_spec,
args,
kwargs,
used_args,
recursion_depth - 1,
auto_arg_index,
)
# format the object and append to the result
formatted_text = self.format_field(obj, str(format_control))
results.append(formatted_text)
return functools.reduce(self._add, results), auto_arg_index
def format_field(self, value: Any, format_spec: str) -> Any:
formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec)
if formatter is None and not issubclass(self.factory, str):
segment_class: Type["MessageSegment"] = self.factory.get_segment_class()
method = getattr(segment_class, format_spec, None)
if callable(method) and not cast(str, method.__name__).startswith("_"):
formatter = getattr(segment_class, format_spec)
return (
super().format_field(value, format_spec)
if formatter is None
else formatter(value)
)
def _add(self, a: Any, b: Any) -> Any:
try:
return a + b
except TypeError:
return a + str(b)
| 33.048649
| 85
| 0.56248
| 666
| 6,114
| 4.995496
| 0.244745
| 0.033063
| 0.032462
| 0.013526
| 0.151488
| 0.113616
| 0.097385
| 0.097385
| 0.085963
| 0.085963
| 0
| 0.001257
| 0.349199
| 6,114
| 184
| 86
| 33.228261
| 0.834883
| 0.095846
| 0
| 0.102941
| 0
| 0
| 0.067664
| 0.004195
| 0
| 0
| 0
| 0
| 0.007353
| 1
| 0.080882
| false
| 0
| 0.029412
| 0
| 0.183824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e048b527992db2f1543fe57b684fc1f640173519
| 328
|
py
|
Python
|
python_Project/Day_16-20/test_2.py
|
Zzz-ww/Python-prac
|
c97f2c16b74a2c1df117f377a072811cc596f98b
|
[
"MIT"
] | null | null | null |
python_Project/Day_16-20/test_2.py
|
Zzz-ww/Python-prac
|
c97f2c16b74a2c1df117f377a072811cc596f98b
|
[
"MIT"
] | null | null | null |
python_Project/Day_16-20/test_2.py
|
Zzz-ww/Python-prac
|
c97f2c16b74a2c1df117f377a072811cc596f98b
|
[
"MIT"
] | null | null | null |
"""
嵌套的列表的坑
"""
names = ['关羽', '张飞', '赵云', '马超', '黄忠']
courses = ['语文', '数学', '英语']
# 录入五个学生三门课程的成绩
scores = [[None] * len(courses) for _ in range(len(names))]
for row, name in enumerate(names):
for col, course in enumerate(courses):
scores[row][col] = float(input(f'请输入{name}的{course}的成绩:'))
print(scores)
| 25.230769
| 66
| 0.591463
| 46
| 328
| 4.195652
| 0.652174
| 0.082902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185976
| 328
| 13
| 67
| 25.230769
| 0.722846
| 0.067073
| 0
| 0
| 0
| 0
| 0.12709
| 0.073579
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e04ce14b43e2b6f0784e3b17efec18f6e25f76d2
| 1,897
|
py
|
Python
|
lib/core/parse/cmdline.py
|
vikas-kundu/phonedict
|
6795cab0024e792340c43d95552162a985b891f6
|
[
"MIT"
] | null | null | null |
lib/core/parse/cmdline.py
|
vikas-kundu/phonedict
|
6795cab0024e792340c43d95552162a985b891f6
|
[
"MIT"
] | null | null | null |
lib/core/parse/cmdline.py
|
vikas-kundu/phonedict
|
6795cab0024e792340c43d95552162a985b891f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# coded by Vikas Kundu https://github.com/vikas-kundu
# -------------------------------------------
import sys
import getopt
import time
import config
from lib.core.parse import banner
from lib.core import util
from lib.core import installer
def options():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install'])
if((len(sys.argv)==9) or (len(sys.argv)==2)):
pass
else:
print("Error! Some parameter is missing please check!")
time.sleep(2)
banner.usage()
sys.exit()
except getopt.GetoptError as err:
print(err)
banner.usage()
sys.exit(2)
for (o, a) in opts:
if(o in('-i','--install')):
if(util.packages_check()==False):
installer.start_install()
else:
print("Packages already installed!")
sys.exit()
elif (o in ('-w', '--wizard')):
config.wizard=True
elif o in ('-h','--help'):
banner.usage()
sys.exit()
elif o in ('-m','--mode'):
config.str_mode=str(a)
elif o in ('-t','--task'):
config.str_task=str(a)
elif o in ('-c','--country'):
config.str_country=str(a.lower().strip('"\''))
elif o in ('-o','--output'):
config.str_output=str(a.strip('"\''))
elif o in ('-n','--number'):
config.str_number=str(a.strip('"\''))
else:
print("Something went wrong with argument parsing!")
time.sleep(2)
banner.usage()
sys.exit()
| 28.313433
| 129
| 0.461255
| 216
| 1,897
| 4.018519
| 0.421296
| 0.02765
| 0.056452
| 0.082949
| 0.12212
| 0.064516
| 0.064516
| 0
| 0
| 0
| 0
| 0.006552
| 0.356352
| 1,897
| 66
| 130
| 28.742424
| 0.704341
| 0.072746
| 0
| 0.265306
| 0
| 0
| 0.150977
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0.020408
| 0.142857
| 0
| 0.163265
| 0.081633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e04da5eb604fc61099ea52110ba3398380247444
| 2,660
|
py
|
Python
|
shoutcast_api/shoutcast_request.py
|
scls19fr/shoutcast_api
|
89a9e826b82411ae5f24ea28e1b1cb22eaaa0890
|
[
"MIT"
] | 6
|
2020-03-03T06:07:31.000Z
|
2021-11-24T19:20:12.000Z
|
shoutcast_api/shoutcast_request.py
|
scls19fr/shoutcast_api
|
89a9e826b82411ae5f24ea28e1b1cb22eaaa0890
|
[
"MIT"
] | 6
|
2020-11-17T20:30:30.000Z
|
2020-11-22T04:09:36.000Z
|
shoutcast_api/shoutcast_request.py
|
scls19fr/shoutcast_api
|
89a9e826b82411ae5f24ea28e1b1cb22eaaa0890
|
[
"MIT"
] | 1
|
2020-11-17T20:11:38.000Z
|
2020-11-17T20:11:38.000Z
|
import xmltodict
import json
from .models import Tunein
from .utils import _init_session
from .Exceptions import APIException
base_url = 'http://api.shoutcast.com'
tunein_url = 'http://yp.shoutcast.com/{base}?id={id}'
tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')]
def call_api_xml(endpoint, params=None, session=None):
session = _init_session(session)
request_url = "{}{}".format(base_url, endpoint)
response = session.get(request_url, params=params)
if response.status_code == 200:
response_as_dict = xmltodict.parse(response.content)
api_response = response_as_dict.get('response')
if api_response:
api_status_code = int(api_response.get('statusCode'))
message = "statusText:{}, statusDetailText:{}".format(
api_response.get('statusText'), api_response.get('statusDetailText')
)
raise APIException(message, code=api_status_code)
return response_as_dict
raise APIException(response.content, code=response.status_code)
def call_api_json(endpoint, params=None, session=None):
session = _init_session(session)
request_url = "{}{}".format(base_url, endpoint)
response = session.get(request_url, params=params)
if response.status_code == 200:
json_response = json.loads(response.content.decode('utf-8'))
api_response = json_response.get('response')
api_status_code = int(api_response.get('statusCode'))
if api_status_code != 200:
message = "statusText:{}, statusDetailText:{}".format(
api_response.get('statusText'), api_response.get('statusDetailText', '')
)
raise APIException(message, code=api_status_code)
return json_response.get('response')['data']
raise APIException(response.reason, code=response.status_code)
def call_api_tunein(station_id: int, session=None):
session = _init_session(session)
url = tunein_url.format(base=tuneins[2], id=station_id)
response = session.get(url)
if response.status_code == 200:
api_response = xmltodict.parse(response.content.decode('utf-8'))
return api_response
raise APIException(response.reason, code=response.status_code)
def call_api_tunein_any(base: Tunein, station_id: int, session=None):
session = _init_session(session)
url = tunein_url.format(base=base, id=station_id)
response = session.get(url)
if response.status_code == 200:
return response.content.decode('utf-8')
raise APIException(response.reason, code=response.status_code)
| 38.550725
| 119
| 0.697368
| 327
| 2,660
| 5.455657
| 0.17737
| 0.07287
| 0.080717
| 0.049327
| 0.68722
| 0.645179
| 0.645179
| 0.627242
| 0.597534
| 0.543722
| 0
| 0.009166
| 0.179699
| 2,660
| 68
| 120
| 39.117647
| 0.808433
| 0
| 0
| 0.433962
| 0
| 0
| 0.122556
| 0.027444
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.09434
| 0
| 0.245283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0530a4b979886c9eec477ba716b7cb1d54f44a5
| 12,101
|
py
|
Python
|
xdl/utils/prop_limits.py
|
mcrav/xdl
|
c120a1cf50a9b668a79b118700930eb3d60a9298
|
[
"MIT"
] | null | null | null |
xdl/utils/prop_limits.py
|
mcrav/xdl
|
c120a1cf50a9b668a79b118700930eb3d60a9298
|
[
"MIT"
] | null | null | null |
xdl/utils/prop_limits.py
|
mcrav/xdl
|
c120a1cf50a9b668a79b118700930eb3d60a9298
|
[
"MIT"
] | null | null | null |
"""Prop limits are used to validate the input given to xdl elements. For
example, a volume property should be a positive number, optionally followed by
volume units. The prop limit is used to check that input supplied is valid for
that property.
"""
import re
from typing import List, Optional
class PropLimit(object):
"""Convenience class for storing prop limit. A prop limit is essentially a
regex for validating the input to a given prop. For example, checking
appropriate units are used or a value is within a certain range.
Either ``regex`` or ``enum`` must be given when instantiating. If ``enum``
is given it will override whatever is given for ``regex`` and ``hint``.
``hint`` and ``default`` are both optional, but recommended, at least when
using ``regex`` not ``enum``.
Arguments:
regex (str): Regex pattern that should match with valid values and not
match with invalid values.
hint (str): Useful hint for what valid value should look like, e.g.
"Volume should be a number followed by volume units, e.g. '5 mL'."
default (str): Default valid value. Should use standard units of the
quantity involved, e.g. for volume, '0 mL'.
enum (List[str]): List of values that the prop can take. This is used
to automatically generate a regex from the list of allowed values.
"""
def __init__(
self,
regex: Optional[str] = None,
hint: Optional[str] = '',
default: Optional[str] = '',
enum: Optional[List[str]] = [],
):
if not regex and not enum:
raise ValueError(
'Either `regex` or `enum` argument must be given.')
self.default = default
# If enum given generate regex from this
self.enum = enum
if enum:
if not regex:
self.regex = self.generate_enum_regex()
else:
self.regex = regex
if not hint:
self.hint = self.generate_enum_hint()
else:
self.hint = hint
# Otherwise just set regex as attribute
else:
self.regex = regex
self.hint = hint
def validate(self, value: str) -> bool:
"""Validate given value against prop limit regex.
Args:
value (str): Value to validate against prop limit.
Returns:
bool: True if the value matches the prop limit, otherwise False.
"""
return re.match(self.regex, value) is not None
def generate_enum_regex(self) -> str:
"""Generate regex from :py:attr:`enum`. Regex will match any of the
items in :py:attr:`enum`.
Returns:
str: Regex that will match any of the strings in the :py:attr:`enum`
list.
"""
regex = r'('
for item in self.enum:
regex += item + r'|'
regex = regex[:-1] + r')'
return regex
def generate_enum_hint(self) -> str:
"""Generate hint from :py:attr:`enum`. Hint will list all items in
:py:attr:`enum`.
Returns:
str: Hint listing all items in :py:attr:`enum`.
"""
s = 'Expecting one of '
for item in self.enum[:-1]:
s += f'"{item}", '
s = s[:-2] + f' or "{self.enum[-1]}".'
return s
##################
# Regex patterns #
##################
#: Pattern to match a positive or negative float,
#: e.g. '0', '-1', '1', '-10.3', '10.3', '0.0' would all be matched by this
#: pattern.
FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)'
#: Pattern to match a positive float,
#: e.g. '0', 1', '10.3', '0.0' would all be matched by this pattern, but not
#: '-10.3' or '-1'.
POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)'
#: Pattern to match boolean strings, specifically matching 'true' and 'false'
#: case insensitvely.
BOOL_PATTERN: str = r'(false|False|true|True)'
#: Pattern to match all accepted volumes units case insensitvely, or empty string.
VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?'
#: Pattern to match all accepted mass units, or empty string.
MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?'
#: Pattern to match all accepted temperature units, or empty string.
TEMP_UNITS_PATTERN: str = r'(°C|K|F)?'
#: Pattern to match all accepted time units, or empty string.
TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?'
#: Pattern to match all accepted pressure units, or empty string.
PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?'
#: Pattern to match all accepted rotation speed units, or empty string.
ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?'
#: Pattern to match all accepted length units, or empty string.
DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?'
#: Pattern to match all accepted mol units, or empty string.
MOL_UNITS_PATTERN = r'(mmol|mol)?'
###############
# Prop limits #
###############
def generate_quantity_units_pattern(
quantity_pattern: str,
units_pattern: str,
hint: Optional[str] = '',
default: Optional[str] = ''
) -> PropLimit:
"""
Convenience function to generate PropLimit object for different quantity
types, i.e. for variations on the number followed by unit pattern.
Args:
quantity_pattern (str): Pattern to match the number expected. This will
typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``.
units_pattern (str): Pattern to match the units expected or empty
string. Empty string is matched as not including units is allowed
as in this case standard units are used.
hint (str): Hint for the prop limit to tell the user what correct input
should look like in the case of an errror.
default (str): Default value for the prop limit, should use standard
units for the prop involved.
"""
return PropLimit(
regex=r'^((' + quantity_pattern + r'[ ]?'\
+ units_pattern + r'$)|(^' + quantity_pattern + r'))$',
hint=hint,
default=default
)
# NOTE: It is important here that defaults use the standard unit for that
# quantity type as XDL app uses this to add in default units.
#: Prop limit for volume props.
VOLUME_PROP_LIMIT: PropLimit = PropLimit(
regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[ ]?'\
+ VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN + r'))$',
hint='Expecting number followed by standard volume units, e.g. "5.5 mL"',
default='0 mL',
)
#: Prop limit for mass props.
MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
MASS_UNITS_PATTERN,
hint='Expecting number followed by standard mass units, e.g. "2.3 g"',
default='0 g'
)
#: Prop limit for mol props.
MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
MOL_UNITS_PATTERN,
hint='Expecting number followed by mol or mmol, e.g. "2.3 mol".',
default='0 mol',
)
#: Prop limit for temp props.
TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
FLOAT_PATTERN,
TEMP_UNITS_PATTERN,
hint='Expecting number in degrees celsius or number followed by standard temperature units, e.g. "25", "25°C", "298 K".',
default='25°C',
)
#: Prop limit for time props.
TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
TIME_UNITS_PATTERN,
hint='Expecting number followed by standard time units, e.g. "15 mins", "3 hrs".',
default='0 secs'
)
#: Prop limit for pressure props.
PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
PRESSURE_UNITS_PATTERN,
hint='Expecting number followed by standard pressure units, e.g. "50 mbar", "1 atm".',
default='1013.25 mbar'
)
#: Prop limit for rotation speed props.
ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
ROTATION_SPEED_UNITS_PATTERN,
hint='Expecting RPM value, e.g. "400 RPM".',
default='400 RPM',
)
#: Prop limit for wavelength props.
WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(
POSITIVE_FLOAT_PATTERN,
DISTANCE_UNITS_PATTERN,
hint='Expecting wavelength, e.g. "400 nm".',
default='400 nm'
)
#: Prop limit for any props requiring a positive integer such as ``repeats``.
#: Used if no explicit property is given and prop type is ``int``.
POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit(
r'[0-9]+',
hint='Expecting positive integer value, e.g. "3"',
default='1',
)
#: Prop limit for any props requiring a positive float. Used if no explicit
#: prop type is given and prop type is ``float``.
POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit(
regex=POSITIVE_FLOAT_PATTERN,
hint='Expecting positive float value, e.g. "3", "3.5"',
default='0',
)
#: Prop limit for any props requiring a boolean value. Used if no explicit prop
#: type is given and prop type is ``bool``.
BOOL_PROP_LIMIT: PropLimit = PropLimit(
BOOL_PATTERN,
hint='Expecting one of "false" or "true".',
default='false',
)
#: Prop limit for ``WashSolid`` ``stir`` prop. This is a special case as the
#: value can be ``True``, ``False`` or ``'solvent'``.
WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit(
r'(' + BOOL_PATTERN + r'|solvent)',
enum=['true', 'solvent', 'false'],
hint='Expecting one of "true", "false" or "solvent".',
default='True'
)
#: Prop limit for ``Separate`` ``purpose`` prop. One of 'extract' or 'wash'.
SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash'])
#: Prop limit for ``Separate`` ``product_phase`` prop. One of 'top' or 'bottom'.
SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom'])
#: Prop limit for ``Add`` ``purpose`` prop. One of 'neutralize', 'precipitate',
#: 'dissolve', 'basify', 'acidify' or 'dilute'.
ADD_PURPOSE_PROP_LIMIT = PropLimit(
enum=[
'neutralize',
'precipitate',
'dissolve',
'basify',
'acidify',
'dilute',
]
)
#: Prop limit for ``HeatChill`` ``purpose`` prop. One of 'control-exotherm',
#: 'reaction' or 'unstable-reagent'.
HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit(
enum=['control-exotherm', 'reaction', 'unstable-reagent']
)
#: Prop limit for ``Stir`` ``purpose`` prop. 'dissolve' is only option.
STIR_PURPOSE_PROP_LIMIT = PropLimit(
enum=['dissolve']
)
#: Prop limit for ``Reagent`` ``role`` prop. One of 'solvent', 'reagent',
#: 'catalyst', 'substrate', 'acid', 'base' or 'activating-agent'.
REAGENT_ROLE_PROP_LIMIT = PropLimit(
enum=[
'solvent',
'reagent',
'catalyst',
'substrate',
'acid',
'base',
'activating-agent'
]
)
#: Prop limit for ``Component`` ``component_type`` prop. One of 'reactor',
#: 'filter', 'separator', 'rotavap' or 'flask'.
COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit(
enum=['reactor', 'filter', 'separator', 'rotavap', 'flask']
)
#: Pattern matching a float of value 100, e.g. '100', '100.0', '100.000' would
#: all be matched.
_hundred_float: str = r'(100(?:[.][0]+)?)'
#: Pattern matching any float between 10.000 and 99.999.
_ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)'
#: Pattern matching any float between 0 and 9.999.
_zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)'
#: Pattern matching float between 0 and 100. Used for percentages.
PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit(
r'^(' + _hundred_float + '|'\
+ _ten_to_ninety_nine_float + '|' + _zero_to_ten_float + ')$',
hint='Expecting number from 0-100 representing a percentage, e.g. "50", "8.5".',
default='0',
)
| 35.591176
| 277
| 0.650029
| 1,661
| 12,101
| 4.627333
| 0.198073
| 0.055035
| 0.046838
| 0.031616
| 0.288967
| 0.175904
| 0.136482
| 0.10994
| 0.080926
| 0.080926
| 0
| 0.015435
| 0.218329
| 12,101
| 339
| 278
| 35.696165
| 0.796807
| 0.441203
| 0
| 0.109195
| 0
| 0.028736
| 0.288471
| 0.090353
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028736
| false
| 0
| 0.011494
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0531fdc3eeb8a1247c13837ac5c2a532816fd2e
| 3,884
|
py
|
Python
|
dit/utils/bindargs.py
|
leoalfonso/dit
|
e7d5f680b3f170091bb1e488303f4255eeb11ef4
|
[
"BSD-3-Clause"
] | 1
|
2021-03-15T08:51:42.000Z
|
2021-03-15T08:51:42.000Z
|
dit/utils/bindargs.py
|
leoalfonso/dit
|
e7d5f680b3f170091bb1e488303f4255eeb11ef4
|
[
"BSD-3-Clause"
] | null | null | null |
dit/utils/bindargs.py
|
leoalfonso/dit
|
e7d5f680b3f170091bb1e488303f4255eeb11ef4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Provides usable args and kwargs from inspect.getcallargs.
For Python 3.3 and above, this module is unnecessary and can be achieved using
features from PEP 362:
http://www.python.org/dev/peps/pep-0362/
For example, to override a parameter of some function:
>>> import inspect
>>> def func(a, b=1, c=2, d=3):
... return a, b, c, d
...
>>> def override_c(*args, **kwargs):
... sig = inspect.signature(override)
... ba = sig.bind(*args, **kwargs)
... ba['c'] = 10
... return func(*ba.args, *ba.kwargs)
...
>>> override_c(0, c=3)
(0, 1, 10, 3)
Also useful:
http://www.python.org/dev/peps/pep-3102/
"""
import sys
import inspect
from inspect import getcallargs
try:
from inspect import getfullargspec
except ImportError:
# Python 2.X
from collections import namedtuple
from inspect import getargspec
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(f):
args, varargs, varkw, defaults = getargspec(f)
kwonlyargs = []
kwonlydefaults = None
annotations = getattr(f, '__annotations__', {})
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations)
def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs):
"""Binds arguments and keyword arguments to a function or method.
Returns a tuple (bargs, bkwargs) suitable for manipulation and passing
to the specified function.
`bargs` consists of the bound args, varargs, and kwonlyargs from
getfullargspec. `bkwargs` consists of the bound varkw from getfullargspec.
Both can be used in a call to the specified function. Any default
parameter values are included in the output.
Examples
--------
>>> def func(a, b=3, *args, **kwargs):
... pass
>>> bindcallargs(func, 5)
((5, 3), {})
>>> bindcallargs(func, 5, 4, 3, 2, 1, hello='there')
((5, 4, 3, 2, 1), {'hello': 'there'})
>>> args, kwargs = bindcallargs(func, 5)
>>> kwargs['b'] = 5 # overwrite default value for b
>>> func(*args, **kwargs)
"""
# It is necessary to choose an unlikely variable name for the function.
# The reason is that any kwarg by the same name will cause a TypeError
# due to multiple values being passed for that argument name.
func = _fUnCtIoN_
callargs = getcallargs(func, *args, **kwargs)
spec = getfullargspec(func)
# Construct all args and varargs and use them in bargs
bargs = [callargs[arg] for arg in spec.args]
if spec.varargs is not None:
bargs.extend(callargs[spec.varargs])
bargs = tuple(bargs)
# Start with kwonlyargs.
bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs}
# Add in kwonlydefaults for unspecified kwonlyargs only.
# Since keyword only arguements aren't allowed in python2, and we
# don't support python 3.0, 3.1, 3.2, this should never be executed:
if spec.kwonlydefaults is not None: # pragma: no cover
bkwargs.update({k: v for k, v in spec.kwonlydefaults.items()
if k not in bkwargs})
# Add in varkw.
if spec.varkw is not None:
bkwargs.update(callargs[spec.varkw])
return bargs, bkwargs
def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs):
# Should match functionality of bindcallargs_32 for Python > 3.3.
sig = inspect.signature(_fUnCtIoN_)
ba = sig.bind(*args, **kwargs)
# Add in all default values
for param in sig.parameters.values():
if param.name not in ba.arguments:
ba.arguments[param.name] = param.default
return ba.args, ba.kwargs
if sys.version_info[0:2] < (3,3):
bindcallargs = bindcallargs_leq32
else:
bindcallargs = bindcallargs_geq33
| 31.072
| 79
| 0.65036
| 512
| 3,884
| 4.894531
| 0.339844
| 0.035914
| 0.020351
| 0.028731
| 0.106145
| 0.090982
| 0.090982
| 0.05826
| 0.05826
| 0
| 0
| 0.022004
| 0.239444
| 3,884
| 124
| 80
| 31.322581
| 0.826337
| 0.514933
| 0
| 0
| 0
| 0
| 0.054372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.162791
| 0
| 0.302326
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0543e59c4fcb122d63759114f58b779ede6cdce
| 540
|
py
|
Python
|
graph/articulation_points.py
|
fujihiraryo/library
|
cdb01e710219d7111f890d09f89531916dd03533
|
[
"MIT"
] | null | null | null |
graph/articulation_points.py
|
fujihiraryo/library
|
cdb01e710219d7111f890d09f89531916dd03533
|
[
"MIT"
] | 4
|
2020-12-16T10:00:00.000Z
|
2021-02-12T12:51:50.000Z
|
graph/articulation_points.py
|
fujihiraryo/python-kyopro-library
|
cdb01e710219d7111f890d09f89531916dd03533
|
[
"MIT"
] | null | null | null |
from depth_first_search import DFS
def articulation_points(graph):
n = len(graph)
dfs = DFS(graph)
order = [None] * n
for i, x in enumerate(dfs.preorder):
order[x] = i
lower = order[:]
for x in dfs.preorder[::-1]:
for y in graph[x]:
if y == dfs.parent[x]:
continue
lower[x] = min(lower[x], lower[y])
if len(dfs.children[0]) > 1:
yield 0
for x in range(1, n):
if any(order[x] <= lower[y] for y in dfs.children[x]):
yield x
| 25.714286
| 62
| 0.522222
| 83
| 540
| 3.361446
| 0.385542
| 0.032258
| 0.043011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014045
| 0.340741
| 540
| 20
| 63
| 27
| 0.769663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e055245acd2ad8d01c1ab4aacd02a9a0e3b9e3b6
| 1,558
|
py
|
Python
|
database.py
|
AndreAngelucci/popcorn_time_bot
|
710b77b59d6c62569c1bf6984c7cf9adac8ea840
|
[
"MIT"
] | null | null | null |
database.py
|
AndreAngelucci/popcorn_time_bot
|
710b77b59d6c62569c1bf6984c7cf9adac8ea840
|
[
"MIT"
] | 1
|
2021-06-02T00:39:42.000Z
|
2021-06-02T00:39:42.000Z
|
database.py
|
AndreAngelucci/popcorn_time_bot
|
710b77b59d6c62569c1bf6984c7cf9adac8ea840
|
[
"MIT"
] | null | null | null |
import pymongo
from conf import Configuracoes
class Mongo_Database:
""" Singleton com a conexao com o MongoDB """
_instancia = None
def __new__(cls, *args, **kwargs):
if not(cls._instancia):
cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs)
return cls._instancia
def __init__(self,):
#pega a string de conexao no arquivo de configuracao
string_conexao = Configuracoes().get_config("database", "string_connection")
assert (string_conexao != ""), "String de conexao indefinida"
try:
self.mongo_client = pymongo.MongoClient(string_conexao)
self.collection_filmes = self.mongo_client["popcorn_time"]["filmes"]
self.collection_tweets = self.mongo_client["twitter_log"]["tweets"]
except:
raise Exception("Nao foi possivel se conectar ao B.D.")
print("Conectado a", string_conexao)
def grava_filmes(self, lista_filmes):
#verifica se o filme ja existe
#se nao existir, grava e adiciona a lista de novos filmes
novos = []
try:
for filme in lista_filmes:
if (self.collection_filmes.count_documents({"_id": filme["_id"]}) == 0):
self.collection_filmes.insert_one(filme)
novos.append(filme)
finally:
return novos
def grava_tweet(self, tweet_info):
#grava o retorno dos tweets
self.collection_tweets.insert_one(tweet_info)
| 39.948718
| 88
| 0.617458
| 179
| 1,558
| 5.122905
| 0.47486
| 0.076336
| 0.049073
| 0.034896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000903
| 0.288832
| 1,558
| 38
| 89
| 41
| 0.826715
| 0.129012
| 0
| 0.068966
| 0
| 0
| 0.104677
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.137931
| false
| 0
| 0.068966
| 0
| 0.344828
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0573523b4d451bef7e8afb67ef1d49c8d3db2d3
| 1,051
|
py
|
Python
|
Other_Python/Kernel_Methods/matrix_operations.py
|
Romit-Maulik/Tutorials-Demos-Practice
|
a58ddc819f24a16f7059e63d7f201fc2cd23e03a
|
[
"MIT"
] | null | null | null |
Other_Python/Kernel_Methods/matrix_operations.py
|
Romit-Maulik/Tutorials-Demos-Practice
|
a58ddc819f24a16f7059e63d7f201fc2cd23e03a
|
[
"MIT"
] | null | null | null |
Other_Python/Kernel_Methods/matrix_operations.py
|
Romit-Maulik/Tutorials-Demos-Practice
|
a58ddc819f24a16f7059e63d7f201fc2cd23e03a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 22 14:36:48 2020
@author: matth
"""
import autograd.numpy as np
#%% Kernel operations
# Returns the norm of the pairwise difference
def norm_matrix(matrix_1, matrix_2):
norm_square_1 = np.sum(np.square(matrix_1), axis = 1)
norm_square_1 = np.reshape(norm_square_1, (-1,1))
norm_square_2 = np.sum(np.square(matrix_2), axis = 1)
norm_square_2 = np.reshape(norm_square_2, (-1,1))
d1=matrix_1.shape
d2=matrix_2.shape
if d1[1]!=d2[1]:
matrix_1=np.transpose(matrix_1)
inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2))
norm_diff = -2 * inner_matrix + norm_square_1 + np.transpose(norm_square_2)
return norm_diff
# Returns the pairwise inner product
def inner_matrix(matrix_1, matrix_2):
d1=matrix_1.shape
d2=matrix_2.shape
if d1[1]!=d2[1]:
matrix_1=np.transpose(matrix_1)
return np.matmul(matrix_1, np.transpose(matrix_2))
if __name__ == '__main__':
print('This is the matrix operations file')
| 25.02381
| 79
| 0.676499
| 172
| 1,051
| 3.854651
| 0.296512
| 0.116139
| 0.090498
| 0.108597
| 0.440422
| 0.28356
| 0.28356
| 0.28356
| 0.184012
| 0.184012
| 0
| 0.068884
| 0.198858
| 1,051
| 42
| 80
| 25.02381
| 0.718527
| 0.164605
| 0
| 0.380952
| 0
| 0
| 0.048387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.238095
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0576a003dfb918c45d8ae2afa80c98a64287387
| 2,371
|
py
|
Python
|
cors/resources/cors-makeheader.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
cors/resources/cors-makeheader.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 7,642
|
2018-05-28T09:38:03.000Z
|
2022-03-31T20:55:48.000Z
|
cors/resources/cors-makeheader.py
|
meyerweb/wpt
|
f04261533819893c71289614c03434c06856c13e
|
[
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
import json
from wptserve.utils import isomorphic_decode
def main(request, response):
origin = request.GET.first(b"origin", request.headers.get(b'origin') or b'none')
if b"check" in request.GET:
token = request.GET.first(b"token")
value = request.server.stash.take(token)
if value is not None:
if request.GET.first(b"check", None) == b"keep":
request.server.stash.put(token, value)
body = u"1"
else:
body = u"0"
return [(b"Content-Type", b"text/plain")], body
if origin != b'none':
response.headers.set(b"Access-Control-Allow-Origin", origin)
if b'origin2' in request.GET:
response.headers.append(b"Access-Control-Allow-Origin", request.GET.first(b'origin2'))
#Preflight
if b'headers' in request.GET:
response.headers.set(b"Access-Control-Allow-Headers", request.GET.first(b'headers'))
if b'credentials' in request.GET:
response.headers.set(b"Access-Control-Allow-Credentials", request.GET.first(b'credentials'))
if b'methods' in request.GET:
response.headers.set(b"Access-Control-Allow-Methods", request.GET.first(b'methods'))
code_raw = request.GET.first(b'code', None)
if code_raw:
code = int(code_raw)
else:
code = None
if request.method == u'OPTIONS':
#Override the response code if we're in a preflight and it's asked
if b'preflight' in request.GET:
code = int(request.GET.first(b'preflight'))
#Log that the preflight actually happened if we have an ident
if b'token' in request.GET:
request.server.stash.put(request.GET[b'token'], True)
if b'location' in request.GET:
if code is None:
code = 302
if code >= 300 and code < 400:
response.headers.set(b"Location", request.GET.first(b'location'))
headers = {}
for name, values in request.headers.items():
if len(values) == 1:
headers[isomorphic_decode(name)] = isomorphic_decode(values[0])
else:
#I have no idea, really
headers[name] = values
headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b''))
body = json.dumps(headers)
if code:
return (code, b"StatusText"), [], body
else:
return body
| 33.871429
| 100
| 0.619148
| 329
| 2,371
| 4.43465
| 0.261398
| 0.13708
| 0.113091
| 0.120631
| 0.191912
| 0.126114
| 0.126114
| 0.100754
| 0.100754
| 0.100754
| 0
| 0.008451
| 0.251371
| 2,371
| 69
| 101
| 34.362319
| 0.813521
| 0.065795
| 0
| 0.078431
| 0
| 0
| 0.162822
| 0.064224
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.039216
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e057de6d96dbc248f4a0c02caf3e3c52ad4ff136
| 1,053
|
py
|
Python
|
device_osc_grid.py
|
wlfyit/PiLightsLib
|
98e39af45f05d0ee44e2f166de5b654d58df33ae
|
[
"MIT"
] | null | null | null |
device_osc_grid.py
|
wlfyit/PiLightsLib
|
98e39af45f05d0ee44e2f166de5b654d58df33ae
|
[
"MIT"
] | null | null | null |
device_osc_grid.py
|
wlfyit/PiLightsLib
|
98e39af45f05d0ee44e2f166de5b654d58df33ae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from pythonosc import osc_bundle_builder
from pythonosc import osc_message_builder
from pythonosc import udp_client
from .device import DeviceObj
# OSC Grid Object
class OSCGrid(DeviceObj):
def __init__(self, name, width, height, ip, port, bri=1):
DeviceObj.__init__(self, name, "osc_grid", width, height)
self.buffer = []
self.brightness = bri
self.osc = udp_client.SimpleUDPClient(ip, port)
def set(self, r, g, b, x=0, y=0):
DeviceObj.set(self, r, g, b, x, y)
# Set Pixel
builder = osc_message_builder.OscMessageBuilder(address="/light/{0}/{1}/color".format(x, y))
builder.add_arg(r)
builder.add_arg(g)
builder.add_arg(b)
self.buffer.append(builder.build())
def show(self):
DeviceObj.show(self)
# Update Display
bundle = osc_bundle_builder.OscBundleBuilder(0)
for m in self.buffer:
bundle.add_content(m)
self.osc.send(bundle.build())
self.buffer.clear()
| 24.488372
| 100
| 0.636277
| 142
| 1,053
| 4.556338
| 0.415493
| 0.061824
| 0.088099
| 0.068006
| 0.034003
| 0.034003
| 0
| 0
| 0
| 0
| 0
| 0.008838
| 0.247863
| 1,053
| 42
| 101
| 25.071429
| 0.808081
| 0.058879
| 0
| 0
| 0
| 0
| 0.028369
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e05894d94e1647d1250203e64a76b21248195718
| 1,274
|
py
|
Python
|
test.py
|
iron-io/iron_cache_python
|
f68f5a5e216e3189397ffd7d243de0d53bf7c764
|
[
"BSD-2-Clause"
] | 3
|
2015-08-01T13:30:16.000Z
|
2021-03-22T10:25:57.000Z
|
test.py
|
iron-io/iron_cache_python
|
f68f5a5e216e3189397ffd7d243de0d53bf7c764
|
[
"BSD-2-Clause"
] | 1
|
2015-06-02T08:53:44.000Z
|
2015-06-02T09:59:17.000Z
|
test.py
|
iron-io/iron_cache_python
|
f68f5a5e216e3189397ffd7d243de0d53bf7c764
|
[
"BSD-2-Clause"
] | 3
|
2015-05-12T18:13:52.000Z
|
2016-09-08T20:43:40.000Z
|
from iron_cache import *
import unittest
import requests
class TestIronCache(unittest.TestCase):
def setUp(self):
self.cache = IronCache("test_cache")
def test_get(self):
self.cache.put("test_item", "testing")
item = self.cache.get("test_item")
self.assertEqual(item.value, "testing")
def test_delete(self):
self.cache.put("test_item", "will be deleted")
self.cache.delete("test_item")
self.assertRaises(requests.exceptions.HTTPError,
self.cache.get, "test_item")
def test_increment(self):
self.cache.put("test_item", 2)
self.cache.increment("test_item")
item = self.cache.get("test_item")
self.assertEqual(item.value, 3)
self.cache.increment("test_item", amount=42)
item = self.cache.get("test_item")
self.assertEqual(item.value, 45)
def test_decrement(self):
self.cache.put("test_item", 100)
self.cache.decrement("test_item")
item = self.cache.get("test_item")
self.assertEqual(item.value, 99)
self.cache.decrement("test_item", amount=98)
item = self.cache.get("test_item")
self.assertEqual(item.value, 1)
if __name__ == '__main__':
unittest.main()
| 31.073171
| 56
| 0.631868
| 161
| 1,274
| 4.819876
| 0.254658
| 0.185567
| 0.092784
| 0.123711
| 0.592784
| 0.453608
| 0.329897
| 0.329897
| 0.329897
| 0.329897
| 0
| 0.014315
| 0.232339
| 1,274
| 40
| 57
| 31.85
| 0.779141
| 0
| 0
| 0.151515
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.151515
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e05b4851d3707561c8c65e7a4b20ce903889be85
| 1,550
|
py
|
Python
|
src/sv-pipeline/04_variant_resolution/scripts/merge_RdTest_genotypes.py
|
leipzig/gatk-sv
|
96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a
|
[
"BSD-3-Clause"
] | 76
|
2020-06-18T21:31:43.000Z
|
2022-03-02T18:42:58.000Z
|
src/sv-pipeline/04_variant_resolution/scripts/merge_RdTest_genotypes.py
|
iamh2o/gatk-sv
|
bf3704bd1d705339577530e267cd4d1b2f77a17f
|
[
"BSD-3-Clause"
] | 195
|
2020-06-22T15:12:28.000Z
|
2022-03-28T18:06:46.000Z
|
src/sv-pipeline/04_variant_resolution/scripts/merge_RdTest_genotypes.py
|
iamh2o/gatk-sv
|
bf3704bd1d705339577530e267cd4d1b2f77a17f
|
[
"BSD-3-Clause"
] | 39
|
2020-07-03T06:47:18.000Z
|
2022-03-03T03:47:25.000Z
|
#!/usr/bin/env python
import argparse
DELIMITER = "\t"
def merge(genotypes_filename, gq_filename, merged_filename):
with open(genotypes_filename, "r") as genotypes, open(gq_filename, "r") as gq, open(merged_filename, "w") as merged:
# Integrity check: do the files have same columns?
genotypes_header = genotypes.readline().rstrip().split(DELIMITER)
gq_header = gq.readline().rstrip().split(DELIMITER)
if not genotypes_header == gq_header:
raise ValueError("The files do not have same number/order of columns")
n_cols = len(gq_header)
for genotypes_line, gq_line in zip(genotypes, gq):
x = genotypes_line.rstrip().split(DELIMITER)
y = gq_line.rstrip().split(DELIMITER)
# Check if lines in the files are in the correct order.
if not x[0:4] == y[0:4]:
raise ValueError(f"The lines in the files are not in the same order; "
f"expected the following lines to match.\n{x[0:4]}\n{y[0:4]}")
h = DELIMITER.join(x[0:4])
for i in range(4, n_cols):
merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('genotypes')
parser.add_argument('GQ')
parser.add_argument('fout')
args = parser.parse_args()
merge(args.genotypes, args.GQ, args.fout)
| 36.046512
| 120
| 0.627742
| 210
| 1,550
| 4.466667
| 0.361905
| 0.010661
| 0.085288
| 0.059701
| 0.03838
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009483
| 0.251613
| 1,550
| 42
| 121
| 36.904762
| 0.799138
| 0.079355
| 0
| 0
| 0
| 0.037037
| 0.132022
| 0.018258
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e05cac875b2516b4ba7c777d72d8ac768173cf38
| 3,091
|
py
|
Python
|
crawling/sns/main.py
|
CSID-DGU/2021-2-OSSP2-TwoRolless-2
|
e9381418e3899d8e1e78415e9ab23b73b4f30a95
|
[
"MIT"
] | null | null | null |
crawling/sns/main.py
|
CSID-DGU/2021-2-OSSP2-TwoRolless-2
|
e9381418e3899d8e1e78415e9ab23b73b4f30a95
|
[
"MIT"
] | null | null | null |
crawling/sns/main.py
|
CSID-DGU/2021-2-OSSP2-TwoRolless-2
|
e9381418e3899d8e1e78415e9ab23b73b4f30a95
|
[
"MIT"
] | 1
|
2021-10-15T05:19:20.000Z
|
2021-10-15T05:19:20.000Z
|
import tweepy
import traceback
import time
import pymongo
from tweepy import OAuthHandler
from pymongo import MongoClient
from pymongo.cursor import CursorType
twitter_consumer_key = ""
twitter_consumer_secret = ""
twitter_access_token = ""
twitter_access_secret = ""
auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(twitter_access_token, twitter_access_secret)
api = tweepy.API(auth)
def crawllTwit(snsname, findtag):
account = snsname
tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended')
snsList = []
snsTime = []
url = []
pic = []
i = 0
for tweet in tweets:
flag = tweet.full_text.find(findtag)
if flag >= 0:
ttp = tweet.full_text.split("https://")
gong = ""
count = 0
for slist in ttp:
if count == (len(ttp) - 1):
break
gong = gong + slist
count += 1
snsList.append(gong)
snsTime.append(tweet.created_at)
tmp = f"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}"
url.append(tmp)
i += 1
media = tweet.entities.get('media', [])
if (len(media) > 0):
pic.append(media[0]['media_url'])
else:
pic.append("")
j = 0
while j < len(snsList):
if j == 10:
break
snsList[j] = snsList[j].replace('<', '<')
snsList[j] = snsList[j].replace('>', '>')
snsList[j] = snsList[j].replace('▶️', ' ⇒ ')
j += 1
mydb = my_client['TwoRolless']
mycol = mydb['sns']
for k in range(0, len(snsList)):
if k == 15:
break
x = mycol.insert_one(
{
"tag": findtag,
"time": snsTime[k],
"text": snsList[k],
"img": pic[k],
"url": url[k]
}
)
conn_str = ""
my_client = pymongo.MongoClient(conn_str)
if __name__ == '__main__':
while True:
print("cycles start")
mydb = my_client['TwoRolless']
mycol = mydb['sns']
mycol.remove({})
crawllTwit("@m_thelastman", "더라스트맨")
crawllTwit("@Musical_NarGold", "나르치스와_골드문트")
crawllTwit("@rndworks", "더데빌")
crawllTwit("@ninestory9", "엘리펀트송")
crawllTwit("@companyrang", "쿠로이저택엔누가살고있을까")
crawllTwit("@companyrang", "난쟁이들")
crawllTwit("@page1company", "곤투모로우")
crawllTwit("@HONGcompany", "더모먼트")
crawllTwit("@orchardmusical", "칠칠")
crawllTwit("@livecorp2011", "팬레터")
crawllTwit("@shownote", "젠틀맨스가이드")
crawllTwit("@od_musical", "지킬앤하이드")
crawllTwit("@kontentz", "엔딩노트")
crawllTwit("@i_seensee", "빌리")
crawllTwit("@doublek_ent", "은하철도의")
crawllTwit("@Insight_Since96", "뱀파이어아더")
print("cycle end")
print("sleep 30 seconds")
time.sleep(30)
print("sleep end")
| 29.438095
| 126
| 0.547072
| 325
| 3,091
| 5.061538
| 0.44
| 0.029179
| 0.032827
| 0.043769
| 0.175684
| 0.133739
| 0.041337
| 0
| 0
| 0
| 0
| 0.014085
| 0.310903
| 3,091
| 104
| 127
| 29.721154
| 0.756808
| 0
| 0
| 0.075269
| 0
| 0
| 0.155613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010753
| false
| 0
| 0.075269
| 0
| 0.086022
| 0.043011
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e05cbd467aaeb3118a784785e85a274a27c23842
| 698
|
py
|
Python
|
demos/interactive-classifier/config.py
|
jepabe/Demo_earth2
|
ab20c3a9114904219688b16f8a1273e68927e6f9
|
[
"Apache-2.0"
] | 1,909
|
2015-04-22T20:18:22.000Z
|
2022-03-31T13:42:03.000Z
|
demos/interactive-classifier/config.py
|
jepabe/Demo_earth2
|
ab20c3a9114904219688b16f8a1273e68927e6f9
|
[
"Apache-2.0"
] | 171
|
2015-09-24T05:49:49.000Z
|
2022-03-14T00:54:50.000Z
|
demos/interactive-classifier/config.py
|
jepabe/Demo_earth2
|
ab20c3a9114904219688b16f8a1273e68927e6f9
|
[
"Apache-2.0"
] | 924
|
2015-04-23T05:43:18.000Z
|
2022-03-28T12:11:31.000Z
|
#!/usr/bin/env python
"""Handles Earth Engine service account configuration."""
import ee
# The service account email address authorized by your Google contact.
# Set up a service account as described in the README.
EE_ACCOUNT = 'your-service-account-id@developer.gserviceaccount.com'
# The private key associated with your service account in Privacy Enhanced
# Email format (.pem suffix). To convert a private key from the RSA format
# (.p12 suffix) to .pem, run the openssl command like this:
# openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem
EE_PRIVATE_KEY_FILE = 'privatekey.pem'
EE_CREDENTIALS = ee.ServiceAccountCredentials(EE_ACCOUNT, EE_PRIVATE_KEY_FILE)
| 41.058824
| 79
| 0.787966
| 101
| 698
| 5.356436
| 0.574257
| 0.12939
| 0.066543
| 0.05915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009917
| 0.133238
| 698
| 16
| 80
| 43.625
| 0.884298
| 0.683381
| 0
| 0
| 0
| 0
| 0.320574
| 0.253589
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e061aa108e5ec8060888f9dff1215ff5763d024a
| 2,847
|
py
|
Python
|
projects/scocen/cmd_components_simple.py
|
mikeireland/chronostar
|
fcf37614e1d145f3a5e265e54512bf8cd98051a0
|
[
"MIT"
] | 4
|
2018-05-28T11:05:42.000Z
|
2021-05-14T01:13:11.000Z
|
projects/scocen/cmd_components_simple.py
|
mikeireland/chronostar
|
fcf37614e1d145f3a5e265e54512bf8cd98051a0
|
[
"MIT"
] | 13
|
2019-08-14T07:30:24.000Z
|
2021-11-08T23:44:29.000Z
|
projects/scocen/cmd_components_simple.py
|
mikeireland/chronostar
|
fcf37614e1d145f3a5e265e54512bf8cd98051a0
|
[
"MIT"
] | 4
|
2016-04-21T08:25:26.000Z
|
2021-02-25T06:53:52.000Z
|
"""
Plot CMDs for each component.
"""
import numpy as np
from astropy.table import Table
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.ion()
# Pretty plots
from fig_settings import *
############################################
# Some things are the same for all the plotting scripts and we put
# this into a single library to avoid confusion.
import scocenlib as lib
data_filename = lib.data_filename
comps_filename = lib.comps_filename
compnames = lib.compnames
colors = lib.colors
############################################
# Minimal probability required for membership
pmin_membership = 0.5
############################################
# how to split subplots
grid = [5, 5]
# CMD limits
xlim = [-1, 5]
ylim = [17, -3]
############################################
# Read data
try:
tab = tab0
comps = comps0
except:
tab0 = Table.read(data_filename)
Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10) # tab['parallax'] in micro arcsec
tab0['Gmag'] = Gmag
comps0 = Table.read(comps_filename)
tab = tab0
comps = comps0
# Main sequence parametrization
# fitpar for pmag, rpmag
fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507]
poly = np.poly1d(fitpar)
x = np.linspace(1, 4, 100)
y = poly(x)
m = y > 4
yms = y[m]
xms = x[m]
def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim):
ax.plot(xms, yms, c='brown', label='Median main sequence', linewidth=1)
ax.plot(xms, yms - 1, c='brown', label='1 mag above the median', linewidth=1, linestyle='--')
ax.plot(xms, yms - 1.5, c='brown', label='1.5 mag above the median', linewidth=1, linestyle='--')
ax.axvline(x=0.369, linewidth=0.5, color='k') # F
ax.axvline(x=0.767, linewidth=0.5, color='k') # G
ax.axvline(x=0.979, linewidth=0.5, color='k') # K
ax.axvline(x=1.848, linewidth=0.5, color='k') # M
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
return ax
print('Plotting %d components.'%len(comps))
fig=plt.figure()
for i, c in enumerate(comps):
ax = fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust this if needed
comp_ID = c['comp_ID']
col=tab['membership%s'%comp_ID]
mask = col > pmin_membership
t=tab[mask]
if len(t)>100:
alpha=0.5
else:
alpha=1
t.sort('membership%s'%comp_ID)
#~ t.reverse()
#~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha)
ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet)
ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim)
age=c['Age']
ax.set_title('%s (%.2f$\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'], len(t)))
#~ plt.tight_layout()
plt.show()
| 26.858491
| 122
| 0.601686
| 444
| 2,847
| 3.774775
| 0.38964
| 0.008353
| 0.023866
| 0.038186
| 0.182578
| 0.127685
| 0.127685
| 0.127685
| 0.082339
| 0.026253
| 0
| 0.062366
| 0.183351
| 2,847
| 105
| 123
| 27.114286
| 0.658495
| 0.162979
| 0
| 0.065574
| 0
| 0
| 0.113199
| 0
| 0
| 0
| 0
| 0.009524
| 0
| 1
| 0.016393
| false
| 0
| 0.098361
| 0
| 0.131148
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e06418cb46f2f01ccc35fc22e565190b30c821ed
| 16,478
|
py
|
Python
|
curlypiv/synthetics/microsig.py
|
sean-mackenzie/curlypiv
|
21c96c1bb1ba2548c4d5bebb389eb66ff58f851d
|
[
"MIT"
] | null | null | null |
curlypiv/synthetics/microsig.py
|
sean-mackenzie/curlypiv
|
21c96c1bb1ba2548c4d5bebb389eb66ff58f851d
|
[
"MIT"
] | 1
|
2021-06-14T17:24:43.000Z
|
2021-06-14T17:24:43.000Z
|
curlypiv/synthetics/microsig.py
|
sean-mackenzie/curlypiv
|
21c96c1bb1ba2548c4d5bebb389eb66ff58f851d
|
[
"MIT"
] | null | null | null |
# microsig
"""
Author: Maximilliano Rossi
More detail about the MicroSIG can be found at:
Website:
https://gitlab.com/defocustracking/microsig-python
Publication:
Rossi M, Synthetic image generator for defocusing and astigmatic PIV/PTV, Meas. Sci. Technol., 31, 017003 (2020)
DOI:10.1088/1361-6501/ab42bb.
"""
import numpy as np
import imageio
import tkinter as tk
import os
from os import listdir
from os.path import isfile, basename, join, isdir
import sys
import glob
# import time as tm
from tkinter import filedialog
# ----- code adapted by Sean MacKenzie ------
# 2.0 define class
class CurlypivMicrosigCollection(object):
def __init__(self, testSetup, synCol, use_gui=False,
use_internal_setting=False, setting_file=None,
use_internal_data=False, data_files=None,
to_internal_sequence=False, destination_folder=None,
output_dtype='np.uint16'):
if not isinstance(testSetup, object):
raise ValueError("{} must be a CurlypivTestSetup class object".format(testSetup))
if not isinstance(synCol, object):
raise ValueError("{} must be a CurlypivSyntheticCollection class object".format(synCol))
valid_output_dtype = ['np.uint16', 'np.uint8']
if output_dtype not in valid_output_dtype:
raise ValueError("{} must be one of {}".format(output_dtype, valid_output_dtype))
self.testSetup = testSetup
self.synCol = synCol
self.use_gui = use_gui
self.output_dtype = output_dtype
if self.use_gui:
run()
else:
if use_internal_setting:
self.setting_file = self.synCol.microsigSetup
else:
if not isinstance(setting_file, str):
raise ValueError("{} must be a filepath to microsig settings text file".format(setting_file))
self.setting_file = os.path.abspath(setting_file)
if use_internal_data:
raise ValueError("script to use internal data still in development")
else:
if not isinstance(data_files, str):
raise ValueError("{} must be a filepath to particle location text files".format(data_files))
all_files = glob.glob(data_files + '/*.txt')
save_files = []
for ff in [f for f in all_files if f.endswith('.txt')]:
save_files.append(ff)
save_files.sort()
self.data_files = save_files
if to_internal_sequence:
raise ValueError("script to use internal data still in development")
else:
if not isinstance(destination_folder, str):
raise ValueError("{} must be a filepath to write output images".format(destination_folder))
self.destination_folder = os.path.abspath(destination_folder)
self.generate()
def generate(self):
# %%
mic = {}
f = open(self.setting_file)
for x in f:
words = x.split()
mic[words[0]] = float(words[2])
mic['pixel_dim_x'] = int(mic['pixel_dim_x'])
mic['pixel_dim_y'] = int(mic['pixel_dim_y'])
mic['n_rays'] = int(mic['n_rays'])
# %%
ii = 0;
ii_tot = len(self.data_files)
for data in self.data_files:
ii = ii + 1
print('creating image {0} of {1} ...'.format(ii, ii_tot))
P = np.genfromtxt(data)
if len(P.shape) == 1:
P = np.array([P])
head, tail = os.path.split(data)
I = take_image(mic, P)
if self.output_dtype == 'np.uint16':
imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')),
np.uint16(I))
elif self.output_dtype == 'np.uint8':
imageio.imwrite(os.path.join(self.destination_folder, (tail[:-3] + 'tif')),
np.uint8(I))
print('done!')
# %%
def sorter(f):
sorting = int(f[:-4])
return sorting
def run():
# %%
root = tk.Tk()
root.attributes('-topmost', True)
root.withdraw()
setting_file = filedialog.askopenfilenames(
title="Select settings file", parent=root,
filetypes=(("txt files", "*.txt"), ("all files", "*.*")))
if not setting_file:
sys.exit('input file not valid')
data_files = filedialog.askopenfilenames(
title="Select data file(s)", parent=root,
filetypes=(("txt files", "*.txt"), ("all files", "*.*")))
if not setting_file:
sys.exit('input file not valid')
destination_folder = filedialog.askdirectory(
title="Select destination file", parent=root)
if not setting_file:
sys.exit('input file not valid')
# %%
mic = {}
f = open(setting_file[0])
for x in f:
words = x.split()
mic[words[0]] = float(words[2])
mic['pixel_dim_x'] = int(mic['pixel_dim_x'])
mic['pixel_dim_y'] = int(mic['pixel_dim_y'])
mic['n_rays'] = int(mic['n_rays'])
# %%
ii = 0;
ii_tot = len(data_files)
for data in data_files:
ii = ii + 1
print('creating image {0} of {1} ...'.format(ii, ii_tot))
P = np.genfromtxt(data)
if len(P.shape) == 1:
P = np.array([P])
head, tail = os.path.split(data)
I = take_image(mic, P)
print('done!')
# %%
def take_image(mic, P):
# NOTE: x and xp represent here light fields and should not be confused$
# with particle image coordinates which are represented by P
I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x']));
dp_s = np.unique(P[:, 3])
if P.shape[1] == 5 or P.shape[1] == 8:
k_id = P[:, -1]
else:
k_id = np.ones(P.shape[0])
if P.shape[1] <= 5 and dp_s.size == 1:
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(dp_s * mic['magnification'] / mic['pixel_size']) ** 2))
xp = create_particle(dp_s, n_points, mic['n_rays'])
for ii in range(0, P.shape[0]):
Id = image_spherical(mic, xp, P[ii, 0:3])
I = I + Id * k_id[ii]
elif P.shape[1] <= 5 and dp_s.size != 1:
for ii in range(0, P.shape[0]):
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2))
xp = create_particle(P[ii, 3], n_points, mic['n_rays'])
Id = image_spherical(mic, xp, P[ii, 0:3])
I = I + Id * k_id[ii]
elif P.shape[1] >= 7:
for ii in range(0, P.shape[0]):
n_points = int(np.round(mic['points_per_pixel'] * 2 * np.pi *
(P[ii, 3] * mic['magnification'] / mic['pixel_size']) ** 2))
ecc = P[ii, 4]
if ecc > 1:
# area elipsoid/area sphere
fact = 1 / 2 * (1 + ecc / np.sqrt(1 - 1 / ecc ** 2)
* np.arcsin(np.sqrt(1 - 1 / ecc ** 2)))
n_points = int(np.round(fact * n_points))
elif ecc < 1:
# area elipsoid/area sphere
fact = 1 / 2 * (1 + ecc ** 2 / np.sqrt(1 - ecc ** 2)
* np.arctan(np.sqrt(1 - ecc ** 2)))
n_points = int(np.round(fact * n_points))
xp = create_ellipsoid(P[ii, 3:7], n_points, mic['n_rays'])
Id = image_spherical(mic, xp, P[ii, 0:3]);
I = I + Id * k_id[ii]
I = I * mic['gain']
if mic['background_mean'] != 0:
I = I + mic['background_mean']
if mic['background_noise'] != 0:
Irand = np.random.normal(0, mic['background_noise'],
(mic['pixel_dim_y'], mic['pixel_dim_x']))
I = I + np.round(Irand)
# I = np.round(I+random('norm',0,mic.background_noise,...
# mic.pixel_dim_y,mic.pixel_dim_x));
return I
# %%
def image_spherical(mic, xp, P1):
# take image of a particle with a spherical lens
# NOTE: x and xp represent here light fields and should not be confused$
# with particle image coordinates which are represented by P1
lens_radius = (np.tan(np.arcsin(mic['numerical_aperture']))
* (1 + 1 / mic['magnification']) * mic['focal_length'])
# distance lens-ccd
dCCD = -mic['focal_length'] * (mic['magnification'] + 1);
# distance particle-lens
dPART = P1[2] + mic['focal_length'] * (1 / mic['magnification'] + 1);
# linear transformation from the object plane to the lens plane
T2 = np.array([[1, 0, dPART, 0],
[0, 1, 0, dPART],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# light field right before the lens
x = np.linalg.inv(T2) @ xp
# remove rays outside of the lens aperture
ind = x[0, :] ** 2 + x[1, :] ** 2 <= lens_radius ** 2
x = x[:, ind]
# transformation of the light field with spherical lens
a = x[0, :];
b = x[1, :]
c = x[2, :];
d = x[3, :]
# radius of curvature of the lens
rk = mic['focal_length'] * (mic['ri_lens'] / mic['ri_medium'] - 1) * 2
dum = a * 0
# refraction medium-lens
# ray-vector befor lens
Vr = np.vstack((1 + dum, c, d))
Vr = (Vr / np.tile(np.sqrt(sum(Vr ** 2)), (3, 1)))
# normal-vector to the lens surface
Vl = np.vstack((rk + dum, a, b))
Vl = (Vl / np.tile(np.sqrt(sum(Vl ** 2)), (3, 1)))
# tangent-vector to the lens surface
Vrot = np.cross(Vr, Vl, axisa=0, axisb=0)
Vrot = np.cross(Vrot, Vl, axisa=1, axisb=0).transpose()
Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1))
# angle after snell-law correction
vx = np.sum(Vr * Vl, axis=0) # dot product!
vy = np.sum(Vr * Vrot, axis=0) # dot product!
th11 = np.arcsin(mic['ri_medium'] / mic['ri_lens'] *
np.sin(np.arctan(vy / vx)))
# new ray-vector inside the lens
Vr11 = (Vl * np.tile(np.cos(th11), (3, 1)) +
Vrot * np.tile(np.sin(th11), (3, 1)))
Vr = Vr11 / np.tile(Vr11[0, :], (3, 1))
# refraction lens-medium
# normal-vector to the lens surface
Vl2 = np.vstack((Vl[0, :], -Vl[1:, :]))
# tangent-vector to the lens surface
Vrot = np.cross(Vr, Vl2, axisa=0, axisb=0)
Vrot = np.cross(Vrot, Vl2, axisa=1, axisb=0).transpose()
Vrot = Vrot / np.tile(np.sqrt(sum(Vrot ** 2)), (3, 1))
# angle after snell-law correction
vx = np.sum(Vr * Vl2, axis=0) # dot product!
vy = np.sum(Vr * Vrot, axis=0) # dot product!
th11 = np.arcsin(mic['ri_lens'] / mic['ri_medium'] *
np.sin(np.arctan(vy / vx)))
# new ray-vector outside the lens
Vr11 = (Vl2 * np.tile(np.cos(th11), (3, 1)) +
Vrot * np.tile(np.sin(th11), (3, 1)))
Vr = Vr11 / np.tile(Vr11[0, :], (3, 1))
# light field after the spherical lens
x[2, :] = Vr[1, :]
x[3, :] = Vr[2, :]
if mic['cyl_focal_length'] == 0:
# linear transformation from the lens plane to the ccd plane
T1 = np.array([[1, 0, -dCCD, 0],
[0, 1, 0, -dCCD],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# light field at the ccd plane
xs = np.linalg.inv(T1) @ x
else:
# # linear transformation from the lens plane to the cyl_lens plane
T1c = np.array([[1, 0, -dCCD * 1 / 3, 0],
[0, 1, 0, -dCCD * 1 / 3],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# # light field at the cylindrical lens plane
xc = np.linalg.inv(T1c) @ x
# # light field after the cylindrical lens plane
Tc = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[-1 / mic['cyl_focal_length'], 0, 1, 0],
[0, 0, 0, 1]])
xc_a = np.linalg.inv(Tc) @ xc
# # light field at the ccd plane
T1 = np.array([[1, 0, -dCCD * 2 / 3, 0],
[0, 1, 0, -dCCD * 2 / 3],
[0, 0, 1, 0],
[0, 0, 0, 1]]);
# # light field at the ccd plane
xs = np.linalg.inv(T1) @ xc_a
# transform the position in pixel units
X = np.round(xs[0, :] / mic['pixel_size'] + P1[0])
Y = np.round(xs[1, :] / mic['pixel_size'] + P1[1])
# remove rays outside the CCD
ind = np.all([X > 0, X <= mic['pixel_dim_x'], Y > 0, Y <= mic['pixel_dim_y'],
X.imag == 0, Y.imag == 0], axis=0)
# count number of rays in each pixel
countXY = np.sort(Y[ind] + (X[ind] - 1) * mic['pixel_dim_y'])
indi, ia = np.unique(countXY, return_index=True)
nCounts = np.hstack((ia[1:], countXY.size + 1)) - ia
# prepare image
I = np.zeros((mic['pixel_dim_y'], mic['pixel_dim_x']))
Ifr = I.flatten('F')
Ifr[indi.astype(int) - 1] = nCounts
I = Ifr.reshape(mic['pixel_dim_y'], mic['pixel_dim_x'], order='F')
return I
# %%
def create_particle(D, Ns, Nr):
R = D / 2
V = spiral_sphere(Ns)
V[0:2, V[0, :] > 0] = -V[0:2, V[0, :] > 0]
x = R * V[0, :]
y = R * V[1, :]
z = R * V[2, :]
V0 = spiral_sphere(Nr + 2)
V0 = V0[:, 1:-1]
u = np.tile(x, (Nr, 1))
v = np.tile(y, (Nr, 1))
s = u * 0
t = u * 0
phs = np.random.uniform(-np.pi, np.pi, z.size)
cs = np.cos(phs)
sn = np.sin(phs)
for k in range(0, Ns):
Rot = np.array([[cs[k], -sn[k], 0],
[sn[k], cs[k], 0], [0, 0, 1]])
Vr = Rot @ V0
Vr[0, :] = -abs(Vr[0, :])
s[:, k] = Vr[1, :] / Vr[0, :]
t[:, k] = Vr[2, :] / Vr[0, :]
u[:, k] = y[k] - s[:, k] * x[k]
v[:, k] = z[k] - t[:, k] * x[k]
xp = np.vstack((u.flatten('F'), v.flatten('F'),
s.flatten('F'), t.flatten('F')))
return xp
# %%
def create_ellipsoid(Deab, Ns, Nr):
D = Deab[0];
ecc = Deab[1]
alpha = Deab[2];
beta = Deab[3]
R = D / 2
V = spiral_sphere(Ns)
V = R * V
V[2, :] = V[2, :] * ecc
R_beta = np.array([[np.cos(beta), 0, np.sin(beta)],
[0, 1, 0],
[-np.sin(beta), 0, np.cos(beta)]])
R_alpha = np.array([[np.cos(alpha), -np.sin(alpha), 0],
[np.sin(alpha), np.cos(alpha), 0],
[0, 0, 1]])
Vf = R_alpha @ (R_beta @ V)
ii1 = (Vf[1, :] == np.min(Vf[1, :])).nonzero()[0][0]
ii2 = (Vf[1, :] == np.max(Vf[1, :])).nonzero()[0][0]
ii3 = (Vf[2, :] == np.min(Vf[2, :])).nonzero()[0][0]
ii4 = (Vf[2, :] == np.max(Vf[2, :])).nonzero()[0][0]
Vdum = Vf[:, [ii1, ii2, ii3, ii4]]
A = np.c_[Vdum[1, :], Vdum[2, :], np.ones(Vdum.shape[1])]
C, _, _, _ = np.linalg.lstsq(A, Vdum[0, :], rcond=None)
V1dum = C[0] * Vf[1, :] + C[1] * Vf[2, :] + C[2]
ind = (Vf[0, :] - V1dum) < 0
x = Vf[0, ind]
y = Vf[1, ind]
z = Vf[2, ind]
Ns = z.size
V0 = spiral_sphere(Nr + 2)
V0 = V0[:, 1:-1]
u = np.tile(x, (Nr, 1))
v = np.tile(y, (Nr, 1))
s = u * 0
t = u * 0
phs = np.random.uniform(-np.pi, np.pi, z.size)
cs = np.cos(phs)
sn = np.sin(phs)
for k in range(0, Ns):
Rot = np.array([[cs[k], -sn[k], 0],
[sn[k], cs[k], 0], [0, 0, 1]])
Vr = Rot @ V0
Vr[0, :] = -abs(Vr[0, :])
s[:, k] = Vr[1, :] / Vr[0, :]
t[:, k] = Vr[2, :] / Vr[0, :]
u[:, k] = y[k] - s[:, k] * x[k]
v[:, k] = z[k] - t[:, k] * x[k]
xp = np.vstack((u.flatten('F'), v.flatten('F'),
s.flatten('F'), t.flatten('F')))
return xp
# %%
def spiral_sphere(N):
gr = (1 + np.sqrt(5)) / 2 # golden ratio
ga = 2 * np.pi * (1 - 1 / gr) # golden angle
ind_p = np.arange(0, N) # particle (i.e., point sample) index
lat = np.arccos(1 - 2 * ind_p / (
N - 1)) # latitude is defined so that particle index is proportional to surface area between 0 and lat
lon = ind_p * ga # position particles at even intervals along longitude
# Convert from spherical to Cartesian co-ordinates
x = np.sin(lat) * np.cos(lon)
y = np.sin(lat) * np.sin(lon)
z = np.cos(lat)
V = np.vstack((x, y, z))
return V
# %%
if __name__ == '__main__':
run()
| 32.956
| 119
| 0.506615
| 2,403
| 16,478
| 3.384519
| 0.158136
| 0.009591
| 0.028403
| 0.01623
| 0.486045
| 0.458502
| 0.434526
| 0.430714
| 0.391369
| 0.363089
| 0
| 0.04022
| 0.327042
| 16,478
| 499
| 120
| 33.022044
| 0.693209
| 0.139641
| 0
| 0.397015
| 0
| 0
| 0.094134
| 0.001915
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026866
| false
| 0
| 0.026866
| 0
| 0.074627
| 0.01194
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e06b5b33923a9795875422db89edadd2030423bd
| 292
|
py
|
Python
|
working/tkinter_widget/test.py
|
songdaegeun/school-zone-enforcement-system
|
b5680909fd5a348575563534428d2117f8dc2e3f
|
[
"MIT"
] | null | null | null |
working/tkinter_widget/test.py
|
songdaegeun/school-zone-enforcement-system
|
b5680909fd5a348575563534428d2117f8dc2e3f
|
[
"MIT"
] | null | null | null |
working/tkinter_widget/test.py
|
songdaegeun/school-zone-enforcement-system
|
b5680909fd5a348575563534428d2117f8dc2e3f
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import threading
def test():
while 1:
img1=cv2.imread('captured car1.jpg')
print("{}".format(img1.shape))
print("{}".format(img1))
cv2.imshow('asd',img1)
cv2.waitKey(1)
t1 = threading.Thread(target=test)
t1.start()
| 18.25
| 44
| 0.60274
| 39
| 292
| 4.512821
| 0.641026
| 0.119318
| 0.170455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058296
| 0.236301
| 292
| 15
| 45
| 19.466667
| 0.730942
| 0
| 0
| 0
| 0
| 0
| 0.082474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e06beb7e97ea00b98e3ff8423b4c33335a68172e
| 7,856
|
py
|
Python
|
ceilometer/compute/virt/hyperv/utilsv2.py
|
aristanetworks/ceilometer
|
8776b137f82f71eef1241bcb1600de10c1f77394
|
[
"Apache-2.0"
] | 2
|
2015-09-07T09:15:26.000Z
|
2015-09-30T02:13:23.000Z
|
ceilometer/compute/virt/hyperv/utilsv2.py
|
aristanetworks/ceilometer
|
8776b137f82f71eef1241bcb1600de10c1f77394
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/compute/virt/hyperv/utilsv2.py
|
aristanetworks/ceilometer
|
8776b137f82f71eef1241bcb1600de10c1f77394
|
[
"Apache-2.0"
] | 1
|
2019-09-16T02:11:41.000Z
|
2019-09-16T02:11:41.000Z
|
# Copyright 2013 Cloudbase Solutions Srl
#
# Author: Claudiu Belu <cbelu@cloudbasesolutions.com>
# Alessandro Pilotti <apilotti@cloudbasesolutions.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from ceilometer.compute.virt import inspector
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class HyperVException(inspector.InspectorException):
pass
class UtilsV2(object):
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_PROC_SETTING = 'Msvm_ProcessorSettingData'
_SYNTH_ETH_PORT = 'Msvm_SyntheticEthernetPortSettingData'
_ETH_PORT_ALLOC = 'Msvm_EthernetPortAllocationSettingData'
_PORT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_STORAGE_ALLOC = 'Msvm_StorageAllocationSettingData'
_VS_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_METRICS_ME = 'Msvm_MetricForME'
_BASE_METRICS_VALUE = 'Msvm_BaseMetricValue'
_CPU_METRIC_NAME = 'Aggregated Average CPU Utilization'
_NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic'
_NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic'
# Disk metrics are supported from Hyper-V 2012 R2
_DISK_RD_METRIC_NAME = 'Disk Data Read'
_DISK_WR_METRIC_NAME = 'Disk Data Written'
def __init__(self, host='.'):
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._init_cimv2_wmi_conn(host)
self._host_cpu_info = None
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def _init_cimv2_wmi_conn(self, host):
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def get_host_cpu_info(self):
if not self._host_cpu_info:
host_cpus = self._conn_cimv2.Win32_Processor()
self._host_cpu_info = (host_cpus[0].MaxClockSpeed, len(host_cpus))
return self._host_cpu_info
def get_all_vms(self):
vms = [(v.ElementName, v.Name) for v in
self._conn.Msvm_ComputerSystem(['ElementName', 'Name'],
Caption="Virtual Machine")]
return vms
def get_cpu_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
cpu_sd = self._get_vm_resources(vm, self._PROC_SETTING)[0]
cpu_metrics_def = self._get_metric_def(self._CPU_METRIC_NAME)
cpu_metric_aggr = self._get_metrics(vm, cpu_metrics_def)
cpu_used = 0
if cpu_metric_aggr:
cpu_used = long(cpu_metric_aggr[0].MetricValue)
return (cpu_used,
int(cpu_sd.VirtualQuantity),
long(vm.OnTimeInMilliseconds))
def get_vnic_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
ports = self._get_vm_resources(vm, self._ETH_PORT_ALLOC)
vnics = self._get_vm_resources(vm, self._SYNTH_ETH_PORT)
metric_def_in = self._get_metric_def(self._NET_IN_METRIC_NAME)
metric_def_out = self._get_metric_def(self._NET_OUT_METRIC_NAME)
for port in ports:
vnic = [v for v in vnics if port.Parent == v.path_()][0]
metric_value_instances = self._get_metric_value_instances(
port.associators(wmi_result_class=self._PORT_ACL_SET_DATA),
self._BASE_METRICS_VALUE)
metric_values = self._sum_metric_values_by_defs(
metric_value_instances, [metric_def_in, metric_def_out])
yield {
'rx_mb': metric_values[0],
'tx_mb': metric_values[1],
'element_name': vnic.ElementName,
'address': vnic.Address
}
def get_disk_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_def_r = self._get_metric_def(self._DISK_RD_METRIC_NAME)
metric_def_w = self._get_metric_def(self._DISK_WR_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_def_r, metric_def_w])
# Thi sis e.g. the VHD file location
if disk.HostResource:
host_resource = disk.HostResource[0]
yield {
# Values are in megabytes
'read_mb': metric_values[0],
'write_mb': metric_values[1],
'instance_id': disk.InstanceID,
'host_resource': host_resource
}
def _sum_metric_values(self, metrics):
tot_metric_val = 0
for metric in metrics:
tot_metric_val += long(metric.MetricValue)
return tot_metric_val
def _sum_metric_values_by_defs(self, element_metrics, metric_defs):
metric_values = []
for metric_def in metric_defs:
if metric_def:
metrics = self._filter_metrics(element_metrics, metric_def)
metric_values.append(self._sum_metric_values(metrics))
else:
# In case the metric is not defined on this host
metric_values.append(0)
return metric_values
def _get_metric_value_instances(self, elements, result_class):
instances = []
for el in elements:
associators = el.associators(wmi_result_class=result_class)
if associators:
instances.append(associators[0])
return instances
def _get_metric_values(self, element, metric_defs):
element_metrics = element.associators(
wmi_association_class=self._METRICS_ME)
return self._sum_metric_values_by_defs(element_metrics, metric_defs)
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
raise inspector.InstanceNotFoundException(
_('VM %s not found on Hyper-V') % vm_name)
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def _get_metrics(self, element, metric_def):
return self._filter_metrics(
element.associators(
wmi_association_class=self._METRICS_ME), metric_def)
def _filter_metrics(self, all_metrics, metric_def):
return [v for v in all_metrics if
v.MetricDefinitionId == metric_def.Id]
def _get_metric_def(self, metric_def):
metric = self._conn.CIM_BaseMetricDefinition(ElementName=metric_def)
if metric:
return metric[0]
def _get_vm_setting_data(self, vm):
vm_settings = vm.associators(
wmi_result_class=self._VS_SETTING_DATA)
# Avoid snapshots
return [s for s in vm_settings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def _get_vm_resources(self, vm, resource_class):
setting_data = self._get_vm_setting_data(vm)
return setting_data.associators(wmi_result_class=resource_class)
| 37.056604
| 78
| 0.66777
| 995
| 7,856
| 4.901508
| 0.257286
| 0.042444
| 0.018659
| 0.019684
| 0.157064
| 0.11421
| 0.046135
| 0.046135
| 0.046135
| 0.02276
| 0
| 0.008355
| 0.253437
| 7,856
| 211
| 79
| 37.232227
| 0.823188
| 0.126782
| 0
| 0.062069
| 0
| 0
| 0.088261
| 0.03791
| 0
| 0
| 0
| 0
| 0
| 1
| 0.124138
| false
| 0.006897
| 0.041379
| 0.013793
| 0.365517
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e06fed7cfa54e3e815b314104d5c76b1f273336e
| 1,126
|
py
|
Python
|
src/cli.py
|
cajones314/avocd2019
|
268e03c5d1bb5b3e14459b831916bb7846f40def
|
[
"MIT"
] | null | null | null |
src/cli.py
|
cajones314/avocd2019
|
268e03c5d1bb5b3e14459b831916bb7846f40def
|
[
"MIT"
] | null | null | null |
src/cli.py
|
cajones314/avocd2019
|
268e03c5d1bb5b3e14459b831916bb7846f40def
|
[
"MIT"
] | null | null | null |
# system
from io import IOBase, StringIO
import os
# 3rd party
import click
# internal
from days import DayFactory
# import logging
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
# ch = logging.StreamHandler()
# logger.addHandler(ch)
@click.group(invoke_without_command=True)
@click.option('-d', '--day', required=True, type=click.IntRange(1, 31), metavar="<1..31>", help="Day you want to select.")
@click.option('-p', '--puzzle', required=True, type=click.IntRange(1, 2), metavar="<1|2>", help="Puzzle you want to run.")
@click.option('-i', '--input', required=True, type=click.Path(exists=True), help="Path to puzzle data.")
def cli(day: int, puzzle: int, input: str):
filename = os.path.join(input, f"{day:02}_puzzle_{puzzle}.txt")
if os.path.exists(filename):
input_stream = open(filename, "r")
else:
input_stream = StringIO('')
avocd = DayFactory(day, input_stream)
try:
print(avocd.run(puzzle))
except NotImplementedError:
print(f"Puzzle {puzzle} for day {day} not implemented.")
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
cli()
| 28.15
| 122
| 0.69627
| 159
| 1,126
| 4.811321
| 0.490566
| 0.043137
| 0.062745
| 0.082353
| 0.078431
| 0.078431
| 0
| 0
| 0
| 0
| 0
| 0.013333
| 0.134103
| 1,126
| 39
| 123
| 28.871795
| 0.771282
| 0.175844
| 0
| 0
| 0
| 0
| 0.203704
| 0.030501
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.238095
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0709fa966341538c2d49529de984d39878ed846
| 3,885
|
py
|
Python
|
RPI/yolov5/algorithm/planner/algorithms/hybrid_astar/draw/draw.py
|
Aditya239233/MDP
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
[
"MIT"
] | 4
|
2022-01-14T15:06:43.000Z
|
2022-01-18T14:45:04.000Z
|
RPI/yolov5/algorithm/planner/algorithms/hybrid_astar/draw/draw.py
|
Aditya239233/MDP
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
[
"MIT"
] | null | null | null |
RPI/yolov5/algorithm/planner/algorithms/hybrid_astar/draw/draw.py
|
Aditya239233/MDP
|
87491e1d67e547c11f4bdd5d784d120473429eae
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import math
from algorithm.planner.utils.car_utils import Car_C
PI = np.pi
class Arrow:
def __init__(self, x, y, theta, L, c):
angle = np.deg2rad(30)
d = 0.3 * L
w = 2
x_start = x
y_start = y
x_end = x + L * np.cos(theta)
y_end = y + L * np.sin(theta)
theta_hat_L = theta + PI - angle
theta_hat_R = theta + PI + angle
x_hat_start = x_end
x_hat_end_L = x_hat_start + d * np.cos(theta_hat_L)
x_hat_end_R = x_hat_start + d * np.cos(theta_hat_R)
y_hat_start = y_end
y_hat_end_L = y_hat_start + d * np.sin(theta_hat_L)
y_hat_end_R = y_hat_start + d * np.sin(theta_hat_R)
plt.plot([x_start, x_end], [y_start, y_end], color=c, linewidth=w)
plt.plot([x_hat_start, x_hat_end_L],
[y_hat_start, y_hat_end_L], color=c, linewidth=w)
plt.plot([x_hat_start, x_hat_end_R],
[y_hat_start, y_hat_end_R], color=c, linewidth=w)
class Car:
def __init__(self, x, y, yaw, w, L):
theta_B = PI + yaw
xB = x + L / 4 * np.cos(theta_B)
yB = y + L / 4 * np.sin(theta_B)
theta_BL = theta_B + PI / 2
theta_BR = theta_B - PI / 2
x_BL = xB + w / 2 * np.cos(theta_BL) # Bottom-Left vertex
y_BL = yB + w / 2 * np.sin(theta_BL)
x_BR = xB + w / 2 * np.cos(theta_BR) # Bottom-Right vertex
y_BR = yB + w / 2 * np.sin(theta_BR)
x_FL = x_BL + L * np.cos(yaw) # Front-Left vertex
y_FL = y_BL + L * np.sin(yaw)
x_FR = x_BR + L * np.cos(yaw) # Front-Right vertex
y_FR = y_BR + L * np.sin(yaw)
plt.plot([x_BL, x_BR, x_FR, x_FL, x_BL],
[y_BL, y_BR, y_FR, y_FL, y_BL],
linewidth=1, color='black')
Arrow(x, y, yaw, L / 2, 'black')
def draw_car(x, y, yaw, steer, color='black', extended_car=True):
if extended_car:
car = np.array([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB, Car_C.ACTUAL_RF, Car_C.ACTUAL_RF, -Car_C.ACTUAL_RB, -Car_C.ACTUAL_RB],
[Car_C.W / 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2, Car_C.W/2, -Car_C.W/2, -Car_C.W/2, Car_C.W/2]])
else:
car = np.array([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB],
[Car_C.W / 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2]])
wheel = np.array([[-Car_C.TR, -Car_C.TR, Car_C.TR, Car_C.TR, -Car_C.TR],
[Car_C.TW / 4, -Car_C.TW / 4, -Car_C.TW / 4, Car_C.TW / 4, Car_C.TW / 4]])
rlWheel = wheel.copy()
rrWheel = wheel.copy()
frWheel = wheel.copy()
flWheel = wheel.copy()
Rot1 = np.array([[math.cos(yaw), -math.sin(yaw)],
[math.sin(yaw), math.cos(yaw)]])
Rot2 = np.array([[math.cos(steer), math.sin(steer)],
[-math.sin(steer), math.cos(steer)]])
frWheel = np.dot(Rot2, frWheel)
flWheel = np.dot(Rot2, flWheel)
frWheel += np.array([[Car_C.WB], [-Car_C.WD / 2]])
flWheel += np.array([[Car_C.WB], [Car_C.WD / 2]])
rrWheel[1, :] -= Car_C.WD / 2
rlWheel[1, :] += Car_C.WD / 2
frWheel = np.dot(Rot1, frWheel)
flWheel = np.dot(Rot1, flWheel)
rrWheel = np.dot(Rot1, rrWheel)
rlWheel = np.dot(Rot1, rlWheel)
car = np.dot(Rot1, car)
frWheel += np.array([[x], [y]])
flWheel += np.array([[x], [y]])
rrWheel += np.array([[x], [y]])
rlWheel += np.array([[x], [y]])
car += np.array([[x], [y]])
plt.plot(car[0, :], car[1, :], color)
plt.plot(frWheel[0, :], frWheel[1, :], color)
plt.plot(rrWheel[0, :], rrWheel[1, :], color)
plt.plot(flWheel[0, :], flWheel[1, :], color)
plt.plot(rlWheel[0, :], rlWheel[1, :], color)
Arrow(x, y, yaw, Car_C.WB * 0.8, color)
| 33.491379
| 148
| 0.53565
| 687
| 3,885
| 2.79476
| 0.117904
| 0.095833
| 0.036458
| 0.04375
| 0.38125
| 0.307813
| 0.247396
| 0.231771
| 0.183854
| 0.163021
| 0
| 0.022521
| 0.291377
| 3,885
| 115
| 149
| 33.782609
| 0.6749
| 0.019305
| 0
| 0
| 0
| 0
| 0.003943
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.047059
| 0
| 0.105882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e072653c74adbd64f985b81e9b674ad50e5a700a
| 27,779
|
py
|
Python
|
aws_deploy/ecs/helper.py
|
jmsantorum/aws-deploy
|
f117cff3a5440ee42470feaa2a83263c3212cf10
|
[
"BSD-3-Clause"
] | null | null | null |
aws_deploy/ecs/helper.py
|
jmsantorum/aws-deploy
|
f117cff3a5440ee42470feaa2a83263c3212cf10
|
[
"BSD-3-Clause"
] | null | null | null |
aws_deploy/ecs/helper.py
|
jmsantorum/aws-deploy
|
f117cff3a5440ee42470feaa2a83263c3212cf10
|
[
"BSD-3-Clause"
] | 1
|
2021-08-05T12:07:11.000Z
|
2021-08-05T12:07:11.000Z
|
import json
import re
from datetime import datetime
from json.decoder import JSONDecodeError
import click
from boto3.session import Session
from boto3_type_annotations.ecs import Client
from botocore.exceptions import ClientError, NoCredentialsError
from dateutil.tz.tz import tzlocal
from dictdiffer import diff
JSON_LIST_REGEX = re.compile(r'^\[.*\]$')
LAUNCH_TYPE_EC2 = 'EC2'
LAUNCH_TYPE_FARGATE = 'FARGATE'
def read_env_file(container_name, file):
env_vars = []
try:
with open(file) as f:
for line in f:
if line.startswith('#') or not line.strip() or '=' not in line:
continue
key, value = line.strip().split('=', 1)
env_vars.append((container_name, key, value))
except Exception as e:
raise EcsTaskDefinitionCommandError(str(e))
return tuple(env_vars)
class EcsClient(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, region_name=None,
profile_name=None):
session = Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
profile_name=profile_name
)
self.boto: Client = session.client('ecs')
self.events = session.client('events')
def describe_services(self, cluster_name, service_name):
return self.boto.describe_services(
cluster=cluster_name,
services=[service_name]
)
def describe_task_definition(self, task_definition_arn):
try:
return self.boto.describe_task_definition(
taskDefinition=task_definition_arn,
include=[
'TAGS',
]
)
except ClientError:
raise UnknownTaskDefinitionError(
u'Unknown task definition arn: %s' % task_definition_arn
)
def list_tasks(self, cluster_name, service_name):
return self.boto.list_tasks(
cluster=cluster_name,
serviceName=service_name
)
def describe_tasks(self, cluster_name, task_arns):
return self.boto.describe_tasks(cluster=cluster_name, tasks=task_arns)
def register_task_definition(self, family, containers, volumes, role_arn,
execution_role_arn, tags, additional_properties):
if tags:
additional_properties['tags'] = tags
return self.boto.register_task_definition(
family=family,
containerDefinitions=containers,
volumes=volumes,
taskRoleArn=role_arn,
executionRoleArn=execution_role_arn,
**additional_properties
)
def deregister_task_definition(self, task_definition_arn):
return self.boto.deregister_task_definition(
taskDefinition=task_definition_arn
)
def update_service(self, cluster, service, desired_count, task_definition):
if desired_count is None:
return self.boto.update_service(
cluster=cluster,
service=service,
taskDefinition=task_definition
)
return self.boto.update_service(
cluster=cluster,
service=service,
desiredCount=desired_count,
taskDefinition=task_definition
)
def run_task(self, cluster, task_definition, count, started_by, overrides,
launchtype='EC2', subnets=(), security_groups=(),
public_ip=False, platform_version=None):
if launchtype == LAUNCH_TYPE_FARGATE:
if not subnets or not security_groups:
msg = 'At least one subnet (--subnet) and one security ' \
'group (--securitygroup) definition are required ' \
'for launch type FARGATE'
raise TaskPlacementError(msg)
network_configuration = {
"awsvpcConfiguration": {
"subnets": subnets,
"securityGroups": security_groups,
"assignPublicIp": "ENABLED" if public_ip else "DISABLED"
}
}
if platform_version is None:
platform_version = 'LATEST'
return self.boto.run_task(
cluster=cluster,
taskDefinition=task_definition,
count=count,
startedBy=started_by,
overrides=overrides,
launchType=launchtype,
networkConfiguration=network_configuration,
platformVersion=platform_version,
)
return self.boto.run_task(
cluster=cluster,
taskDefinition=task_definition,
count=count,
startedBy=started_by,
overrides=overrides
)
def update_rule(self, cluster, rule, task_definition):
target = self.events.list_targets_by_rule(Rule=rule)['Targets'][0]
target['Arn'] = task_definition.arn.partition('task-definition')[0] + 'cluster/' + cluster
target['EcsParameters']['TaskDefinitionArn'] = task_definition.arn
self.events.put_targets(Rule=rule, Targets=[target])
return target['Id']
class EcsService(dict):
def __init__(self, cluster, service_definition=None, **kwargs):
self._cluster = cluster
super(EcsService, self).__init__(service_definition, **kwargs)
def set_task_definition(self, task_definition):
self[u'taskDefinition'] = task_definition.arn
@property
def cluster(self):
return self._cluster
@property
def name(self):
return self.get(u'serviceName')
@property
def task_definition(self):
return self.get(u'taskDefinition')
@property
def desired_count(self):
return self.get(u'desiredCount')
@property
def deployment_created_at(self):
for deployment in self.get(u'deployments'):
if deployment.get(u'status') == u'PRIMARY':
return deployment.get(u'createdAt')
return datetime.now()
@property
def deployment_updated_at(self):
for deployment in self.get(u'deployments'):
if deployment.get(u'status') == u'PRIMARY':
return deployment.get(u'updatedAt')
return datetime.now()
@property
def errors(self):
return self.get_warnings(
since=self.deployment_updated_at
)
@property
def older_errors(self):
return self.get_warnings(
since=self.deployment_created_at,
until=self.deployment_updated_at
)
def get_warnings(self, since=None, until=None):
since = since or self.deployment_created_at
until = until or datetime.now(tz=tzlocal())
errors = {}
for event in self.get(u'events'):
if u'unable' not in event[u'message']:
continue
if since < event[u'createdAt'] < until:
errors[event[u'createdAt']] = event[u'message']
return errors
class EcsTaskDefinition(object):
def __init__(self, containerDefinitions, volumes, family, revision, status, taskDefinitionArn,
requiresAttributes=None, taskRoleArn=None, executionRoleArn=None, compatibilities=None, tags=None,
**kwargs):
self.containers = containerDefinitions
self.volumes = volumes
self.family = family
self.revision = revision
self.status = status
self.arn = taskDefinitionArn
self.requires_attributes = requiresAttributes or {}
self.role_arn = taskRoleArn or ''
self.execution_role_arn = executionRoleArn or ''
self.tags = tags
self.additional_properties = kwargs
self._diff = []
# the compatibilities parameter is returned from the ECS API, when
# describing a task, but may not be included, when registering a new
# task definition. Just storing it for now.
self.compatibilities = compatibilities
@property
def container_names(self):
for container in self.containers:
yield container['name']
@property
def images(self):
for container in self.containers:
yield container['name'], container['image']
@property
def family_revision(self):
return f'{self.family}:{self.revision}'
@property
def updated(self) -> bool:
return self._diff != []
@property
def diff(self):
return self._diff
def show_diff(self, show_diff: bool = False):
if show_diff:
click.secho('Task definition modified:')
for d in self._diff:
click.secho(f' {str(d)}', fg='blue')
click.secho('')
def diff_raw(self, task_b):
containers_a = {c['name']: c for c in self.containers}
containers_b = {c['name']: c for c in task_b.containers}
requirements_a = sorted([r['name'] for r in self.requires_attributes])
requirements_b = sorted([r['name'] for r in task_b.requires_attributes])
for container in containers_a:
containers_a[container]['environment'] = {e['name']: e['value'] for e in
containers_a[container].get('environment', {})}
for container in containers_b:
containers_b[container]['environment'] = {e['name']: e['value'] for e in
containers_b[container].get('environment', {})}
for container in containers_a:
containers_a[container]['secrets'] = {e['name']: e['valueFrom'] for e in
containers_a[container].get('secrets', {})}
for container in containers_b:
containers_b[container]['secrets'] = {e['name']: e['valueFrom'] for e in
containers_b[container].get('secrets', {})}
composite_a = {
'containers': containers_a,
'volumes': self.volumes,
'requires_attributes': requirements_a,
'role_arn': self.role_arn,
'execution_role_arn': self.execution_role_arn,
'compatibilities': self.compatibilities,
'additional_properties': self.additional_properties,
}
composite_b = {
'containers': containers_b,
'volumes': task_b.volumes,
'requires_attributes': requirements_b,
'role_arn': task_b.role_arn,
'execution_role_arn': task_b.execution_role_arn,
'compatibilities': task_b.compatibilities,
'additional_properties': task_b.additional_properties,
}
return list(diff(composite_a, composite_b))
def get_overrides(self):
override = dict()
overrides = []
for diff in self.diff:
if override.get('name') != diff.container:
override = dict(name=diff.container)
overrides.append(override)
if diff.field == 'command':
override['command'] = self.get_overrides_command(diff.value)
elif diff.field == 'environment':
override['environment'] = self.get_overrides_env(diff.value)
elif diff.field == 'secrets':
override['secrets'] = self.get_overrides_secrets(diff.value)
return overrides
@staticmethod
def parse_command(command):
if re.match(JSON_LIST_REGEX, command):
try:
return json.loads(command)
except JSONDecodeError as e:
raise EcsTaskDefinitionCommandError(
f"command should be valid JSON list. Got following command: {command} resulting in error: {str(e)}"
)
return command.split()
@staticmethod
def get_overrides_command(command):
return EcsTaskDefinition.parse_command(command)
@staticmethod
def get_overrides_env(env):
return [{"name": e, "value": env[e]} for e in env]
@staticmethod
def get_overrides_secrets(secrets):
return [{"name": s, "valueFrom": secrets[s]} for s in secrets]
def get_tag(self, key):
for tag in self.tags:
if tag['key'] == key:
return tag['value']
return None
def set_tag(self, key: str, value: str):
if key and value:
done = False
for tag in self.tags:
if tag['key'] == key:
if tag['value'] != value:
diff = EcsTaskDefinitionDiff(
container=None,
field=f"tags['{key}']",
value=value,
old_value=tag['value']
)
self._diff.append(diff)
tag['value'] = value
done = True
break
if not done:
diff = EcsTaskDefinitionDiff(container=None, field=f"tags['{key}']", value=value, old_value=None)
self._diff.append(diff)
self.tags.append({'key': key, 'value': value})
def set_images(self, tag=None, **images):
self.validate_container_options(**images)
for container in self.containers:
if container['name'] in images:
new_image = images[container['name']]
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='image',
value=new_image,
old_value=container['image']
)
self._diff.append(diff)
container['image'] = new_image
elif tag:
image_definition = container['image'].rsplit(':', 1)
new_image = f'{image_definition[0]}:{tag.strip()}'
# check if tag changes
if new_image != container['image']:
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='image',
value=new_image,
old_value=container['image']
)
self._diff.append(diff)
container['image'] = new_image
def set_commands(self, **commands):
self.validate_container_options(**commands)
for container in self.containers:
if container['name'] in commands:
new_command = commands[container['name']]
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='command',
value=new_command,
old_value=container.get('command')
)
self._diff.append(diff)
container['command'] = self.parse_command(new_command)
def set_environment(self, environment_list, exclusive=False, env_file=((None, None),)):
environment = {}
if None not in env_file[0]:
for env in env_file:
line = read_env_file(env[0], env[1])
environment_list = line + environment_list
for env in environment_list:
environment.setdefault(env[0], {})
environment[env[0]][env[1]] = env[2]
self.validate_container_options(**environment)
for container in self.containers:
if container['name'] in environment:
self.apply_container_environment(
container=container,
new_environment=environment[container['name']],
exclusive=exclusive,
)
elif exclusive is True:
self.apply_container_environment(
container=container,
new_environment={},
exclusive=exclusive,
)
def apply_container_environment(self, container, new_environment, exclusive=False):
environment = container.get('environment', {})
old_environment = {env['name']: env['value'] for env in environment}
if exclusive is True:
merged = new_environment
else:
merged = old_environment.copy()
merged.update(new_environment)
if old_environment == merged:
return
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='environment',
value=merged,
old_value=old_environment
)
self._diff.append(diff)
container['environment'] = [
{"name": e, "value": merged[e]} for e in merged
]
def set_secrets(self, secrets_list, exclusive=False):
secrets = {}
for secret in secrets_list:
secrets.setdefault(secret[0], {})
secrets[secret[0]][secret[1]] = secret[2]
self.validate_container_options(**secrets)
for container in self.containers:
if container['name'] in secrets:
self.apply_container_secrets(
container=container,
new_secrets=secrets[container['name']],
exclusive=exclusive,
)
elif exclusive is True:
self.apply_container_secrets(
container=container,
new_secrets={},
exclusive=exclusive,
)
def apply_container_secrets(self, container, new_secrets, exclusive=False):
secrets = container.get('secrets', {})
old_secrets = {secret['name']: secret['valueFrom'] for secret in secrets}
if exclusive is True:
merged = new_secrets
else:
merged = old_secrets.copy()
merged.update(new_secrets)
if old_secrets == merged:
return
diff = EcsTaskDefinitionDiff(
container=container['name'],
field='secrets',
value=merged,
old_value=old_secrets
)
self._diff.append(diff)
container['secrets'] = [
{"name": s, "valueFrom": merged[s]} for s in merged
]
def validate_container_options(self, **container_options):
for container_name in container_options:
if container_name not in self.container_names:
raise UnknownContainerError(f'Unknown container: {container_name}')
def set_role_arn(self, role_arn):
if role_arn:
diff = EcsTaskDefinitionDiff(
container=None,
field='role_arn',
value=role_arn,
old_value=self.role_arn
)
self.role_arn = role_arn
self._diff.append(diff)
def set_execution_role_arn(self, execution_role_arn):
if execution_role_arn:
diff = EcsTaskDefinitionDiff(
container=None,
field='execution_role_arn',
value=execution_role_arn,
old_value=self.execution_role_arn
)
self.execution_role_arn = execution_role_arn
self._diff.append(diff)
class EcsTaskDefinitionDiff(object):
def __init__(self, container, field, value, old_value):
self.container = container
self.field = field
self.value = value
self.old_value = old_value
def __repr__(self):
if self.field == 'environment':
return '\n'.join(self._get_environment_diffs(
self.container,
self.value,
self.old_value,
))
elif self.field == 'secrets':
return '\n'.join(self._get_secrets_diffs(
self.container,
self.value,
self.old_value,
))
elif self.container:
return f'Changed {self.field} of container "{self.container}" to: "{self.value}" (was: "{self.old_value}")'
else:
return f'Changed {self.field} to: "{self.value}" (was: "{self.old_value}")'
@staticmethod
def _get_environment_diffs(container, env, old_env):
diffs = []
for name, value in env.items():
old_value = old_env.get(name)
if value != old_value or value and not old_value:
message = f'Changed environment "{name}" of container "{container}" to: "{value}"'
diffs.append(message)
for old_name in old_env.keys():
if old_name not in env.keys():
message = f'Removed environment "{old_name}" of container "{container}"'
diffs.append(message)
return diffs
@staticmethod
def _get_secrets_diffs(container, secrets, old_secrets):
diffs = []
for name, value in secrets.items():
old_value = old_secrets.get(name)
if value != old_value or not old_value:
message = f'Changed secret "{name}" of container "{container}" to: "{value}"'
diffs.append(message)
for old_name in old_secrets.keys():
if old_name not in secrets.keys():
message = f'Removed secret "{old_name}" of container "{container}"'
diffs.append(message)
return diffs
class EcsAction(object):
def __init__(self, client: EcsClient, cluster_name: str, service_name: str):
self._client = client
self._cluster_name = cluster_name
self._service_name = service_name
try:
if service_name:
self._service = self.get_service()
except IndexError:
raise EcsConnectionError(
u'An error occurred when calling the DescribeServices '
u'operation: Service not found.'
)
except ClientError as e:
raise EcsConnectionError(str(e))
except NoCredentialsError:
raise EcsConnectionError(
u'Unable to locate credentials. Configure credentials '
u'by running "aws configure".'
)
def get_service(self):
services_definition = self._client.describe_services(
cluster_name=self._cluster_name,
service_name=self._service_name
)
return EcsService(
cluster=self._cluster_name,
service_definition=services_definition[u'services'][0]
)
def get_current_task_definition(self, service):
return self.get_task_definition(service.task_definition)
def get_task_definition(self, task_definition):
task_definition_payload = self._client.describe_task_definition(
task_definition_arn=task_definition
)
task_definition = EcsTaskDefinition(
tags=task_definition_payload.get('tags', None),
**task_definition_payload[u'taskDefinition']
)
return task_definition
def update_task_definition(self, task_definition):
response = self._client.register_task_definition(
family=task_definition.family,
containers=task_definition.containers,
volumes=task_definition.volumes,
role_arn=task_definition.role_arn,
execution_role_arn=task_definition.execution_role_arn,
tags=task_definition.tags,
additional_properties=task_definition.additional_properties
)
new_task_definition = EcsTaskDefinition(**response[u'taskDefinition'])
return new_task_definition
def deregister_task_definition(self, task_definition):
self._client.deregister_task_definition(task_definition.arn)
def update_service(self, service, desired_count=None):
response = self._client.update_service(
cluster=service.cluster,
service=service.name,
desired_count=desired_count,
task_definition=service.task_definition
)
return EcsService(self._cluster_name, response[u'service'])
def is_deployed(self, service):
if len(service[u'deployments']) != 1:
return False
running_tasks = self._client.list_tasks(
cluster_name=service.cluster,
service_name=service.name
)
if not running_tasks[u'taskArns']:
return service.desired_count == 0
running_count = self.get_running_tasks_count(
service=service,
task_arns=running_tasks[u'taskArns']
)
return service.desired_count == running_count
def get_running_tasks_count(self, service, task_arns):
running_count = 0
tasks_details = self._client.describe_tasks(
cluster_name=self._cluster_name,
task_arns=task_arns
)
for task in tasks_details[u'tasks']:
arn = task[u'taskDefinitionArn']
status = task[u'lastStatus']
if arn == service.task_definition and status == u'RUNNING':
running_count += 1
return running_count
@property
def client(self):
return self._client
@property
def service(self):
return self._service
@property
def cluster_name(self):
return self._cluster_name
@property
def service_name(self):
return self._service_name
class DeployAction(EcsAction):
def deploy(self, task_definition):
try:
self._service.set_task_definition(task_definition)
return self.update_service(self._service)
except ClientError as e:
raise EcsError(str(e))
class ScaleAction(EcsAction):
def scale(self, desired_count):
try:
return self.update_service(self._service, desired_count)
except ClientError as e:
raise EcsError(str(e))
class RunAction(EcsAction):
def __init__(self, client, cluster_name):
super(RunAction, self).__init__(client, cluster_name, None)
self._client = client
self._cluster_name = cluster_name
self.started_tasks = []
def run(self, task_definition, count, started_by, launchtype, subnets,
security_groups, public_ip, platform_version):
try:
result = self._client.run_task(
cluster=self._cluster_name,
task_definition=task_definition.family_revision,
count=count,
started_by=started_by,
overrides=dict(containerOverrides=task_definition.get_overrides()),
launchtype=launchtype,
subnets=subnets,
security_groups=security_groups,
public_ip=public_ip,
platform_version=platform_version,
)
self.started_tasks = result['tasks']
return True
except ClientError as e:
raise EcsError(str(e))
class UpdateAction(EcsAction):
def __init__(self, client):
super(UpdateAction, self).__init__(client, None, None)
class DiffAction(EcsAction):
def __init__(self, client):
super(DiffAction, self).__init__(client, None, None)
class EcsError(Exception):
pass
class EcsConnectionError(EcsError):
pass
class UnknownContainerError(EcsError):
pass
class TaskPlacementError(EcsError):
pass
class UnknownTaskDefinitionError(EcsError):
pass
class EcsTaskDefinitionCommandError(EcsError):
pass
| 34.767209
| 119
| 0.585586
| 2,825
| 27,779
| 5.536991
| 0.105841
| 0.059967
| 0.017389
| 0.010357
| 0.341772
| 0.266206
| 0.222478
| 0.187572
| 0.141926
| 0.093211
| 0
| 0.001385
| 0.32431
| 27,779
| 798
| 120
| 34.810777
| 0.831966
| 0.006984
| 0
| 0.26015
| 0
| 0.004511
| 0.077991
| 0.003843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103759
| false
| 0.009023
| 0.015038
| 0.031579
| 0.231579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e07447362c2cd948e8959b2a92a8309441af1ece
| 3,715
|
py
|
Python
|
sbm.py
|
emmaling27/networks-research
|
be209e2b653a1fe9eec480a94538d59104e4aa23
|
[
"MIT"
] | null | null | null |
sbm.py
|
emmaling27/networks-research
|
be209e2b653a1fe9eec480a94538d59104e4aa23
|
[
"MIT"
] | null | null | null |
sbm.py
|
emmaling27/networks-research
|
be209e2b653a1fe9eec480a94538d59104e4aa23
|
[
"MIT"
] | null | null | null |
import networkx as nx
from scipy.special import comb
import attr
@attr.s
class Count(object):
"""Count class with monochromatic and bichromatic counts"""
n = attr.ib()
monochromatic = attr.ib(default=0)
bichromatic = attr.ib(default=0)
def count_edge(self, u, v):
if (u < self.n / 2) != (v < self.n / 2):
self.bichromatic += 1
else:
self.monochromatic += 1
class SBM():
"""SBM class with predicted numbers of wedges and local bridges and actual counts"""
def __init__(self, n, p, q, seed=0):
self.n = n
self.p = p
self.q = q
self.g = nx.generators.community.stochastic_block_model(
[int(self.n / 2), int(self.n / 2)],
[[p, q], [q, p]],
seed=seed)
def is_bichromatic(self, u, v):
return (u < self.n / 2) != (v < self.n / 2)
def get_bichromatic_fraction(self):
bichromatic = 0
for (x, y) in self.g.edges():
if self.is_bichromatic(x, y):
bichromatic += 1
return bichromatic / len(self.g.edges())
def is_local_bridge(self, u, v):
return not set(self.g.neighbors(u)).intersection(set(self.g.neighbors(v)))
def count_local_bridges(self):
monochromatic, bichromatic = 0, 0
for (u, v) in self.g.edges():
if self.is_local_bridge(u, v):
if self.is_bichromatic(u, v):
bichromatic += 1
else:
monochromatic += 1
return monochromatic, bichromatic
def _count_possible_edges(self, local_bridge):
count = Count(self.n)
for u in range(self.n):
for v in range(u+1, self.n):
if not self.g.has_edge(u, v) and \
(self.is_local_bridge(u, v) == local_bridge):
count.count_edge(u, v)
return count
def count_possible_local_bridges(self):
return self._count_possible_edges(local_bridge=True)
def count_possible_closures(self):
return self._count_possible_edges(local_bridge=False)
def count_wedges(self):
count = Count(self.n)
for v in self.g.nodes():
sorted_neighbors = sorted(self.g.neighbors(v))
for i in range(len(sorted_neighbors)):
for j in range(i + 1, len(sorted_neighbors)):
if not self.g.has_edge(sorted_neighbors[i], sorted_neighbors[j]):
count.count_edge(sorted_neighbors[i], sorted_neighbors[j])
return count
def predicted_wedges(self):
return Count(
self.n,
monochromatic=3 * 2 * comb(self.n/2, 3) * self.p**2 * (1-self.p) \
+ self.n * comb(self.n/2, 2) * self.q**2 * (1-self.p),
bichromatic=2 * self.n * comb(self.n/2, 2) * self.p * self.q * (1-self.q)
)
def predicted_local_bridges(self):
return Count(
self.n,
monochromatic=2 * (1-self.p) * comb(self.n/2, 2) * (1-self.p**2)**(self.n/2-2) * (1-self.q**2)**(self.n/2),
bichromatic=(1-self.q) * (self.n/2) ** 2 * (1-self.p*self.q)**(self.n-2)
)
def predicted_possible_closures(self):
return Count(
self.n,
monochromatic=2 * (1-self.p) * comb(self.n/2, 2) * (1 - (1-self.p**2)**(self.n/2-2) * (1-self.q**2)**(self.n/2)),
bichromatic=(1-self.q) * (self.n/2) ** 2 * (1 - (1-self.p*self.q)**(self.n-2))
)
def predicted_possible_edges(self):
return Count(
self.n,
monochromatic=2 * (1-self.p) * comb(self.n/2, 2),
bichromatic=(1-self.q) * (self.n/2) ** 2
)
| 35.04717
| 125
| 0.54105
| 528
| 3,715
| 3.698864
| 0.143939
| 0.084485
| 0.064516
| 0.035842
| 0.395801
| 0.366615
| 0.317972
| 0.260625
| 0.181772
| 0.169483
| 0
| 0.02902
| 0.313594
| 3,715
| 106
| 126
| 35.04717
| 0.736863
| 0.035532
| 0
| 0.183908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16092
| false
| 0
| 0.034483
| 0.091954
| 0.390805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0745cd9bd4ca77f2c09e9dd6bb425b9d75991b3
| 4,516
|
py
|
Python
|
src/data/graph/ops/anagram_transform_op.py
|
PhilHarnish/forge
|
663f19d759b94d84935c14915922070635a4af65
|
[
"MIT"
] | 2
|
2020-08-18T18:43:09.000Z
|
2020-08-18T20:05:59.000Z
|
src/data/graph/ops/anagram_transform_op.py
|
PhilHarnish/forge
|
663f19d759b94d84935c14915922070635a4af65
|
[
"MIT"
] | null | null | null |
src/data/graph/ops/anagram_transform_op.py
|
PhilHarnish/forge
|
663f19d759b94d84935c14915922070635a4af65
|
[
"MIT"
] | null | null | null |
from typing import Callable, Collection, Iterable, List, Union
from data.anagram import anagram_iter
from data.graph import _op_mixin, bloom_mask, bloom_node, bloom_node_reducer
Transformer = Callable[['bloom_node.BloomNode'], 'bloom_node.BloomNode']
_SPACE_MASK = bloom_mask.for_alpha(' ')
def merge_fn(
host: 'bloom_node.BloomNode',
sources: List['bloom_node.BloomNode'],
extra: list,
whitelist: Collection = None,
blacklist: Collection = None,
**kwargs) -> None:
del kwargs
assert len(sources) == 1
exit_node = sources[0]
assert len(extra) == 1
state = _normalize_state(exit_node, extra[0])
children = list(state)
# TODO: Need a cleaner way to inject and rerun these nodes.
if len(children) == 1:
host.op = _op_mixin.Op(_op_mixin.OP_IDENTITY, children)
else:
host.op = _op_mixin.Op(_op_mixin.OP_ADD, children)
# HACK: This duplicates BloomNode._expand, essentially.
for key, reduced in bloom_node_reducer.reduce(
host, whitelist=whitelist, blacklist=blacklist):
host.link(key, reduced)
class _AnagramTransformIndex(object):
"""Singleton object used during anagram traversal."""
def __init__(
self,
exit_node: 'bloom_node.BloomNode',
root: anagram_iter.AnagramIter) -> None:
self._exit_node = exit_node
reference = bloom_node.BloomNode()
reference.distance(0)
reference.weight(1, True)
reference_choice_paths = {}
for choice, _ in root.available():
reference_choice_paths[choice] = choice(reference)
self._reference_choice_paths = reference_choice_paths
self._child_cache = {}
def iter(
self,
anagrams: anagram_iter.AnagramIter,
) -> Iterable['bloom_node.BloomNode']:
for child_choice, child_anagrams in anagrams.items():
key = (child_choice, child_anagrams)
if key not in self._child_cache:
self._child_cache[key] = self._make_child(child_choice, child_anagrams)
yield self._child_cache[key]
def _make_child(
self,
choice: Transformer,
anagrams: anagram_iter.AnagramIter) -> 'bloom_node.BloomNode':
children = list(anagrams.available())
if not children:
return choice(self._exit_node)
elif len(children) == 1:
child_choice, child_duplicates = children[0]
node = self._exit_node
while child_duplicates:
node = child_choice(node)
child_duplicates -= 1
return choice(node)
# Compute requirements from exits.
node = self._exit_node // _AnagramState(self, anagrams)
node.provide_mask = self._exit_node.provide_mask
node.require_mask = self._exit_node.require_mask
node.lengths_mask = self._exit_node.lengths_mask
node.annotate({'anagrams': anagrams})
node.max_weight = self._exit_node.max_weight
nodes_with_spaces = []
for child_choice, child_duplicates in children:
path = self._reference_choice_paths[child_choice]
if path.require_mask and path.require_mask & _SPACE_MASK:
nodes_with_spaces.append(path)
node.provide_mask |= path.provide_mask
node.require_mask |= path.require_mask
node.lengths_mask = bloom_mask.lengths_product(
node.lengths_mask, path.lengths_mask, duplicates=child_duplicates)
if nodes_with_spaces:
# Distance and provide masks should be correct. Reset required values.
# Any route to any of the spaces is now okay but 1+ must be taken.
node.require_mask = bloom_mask.REQUIRE_NOTHING
for node_with_spaces in nodes_with_spaces:
# Only require what all node_with_spaces require.
node.require_mask &= node_with_spaces.require_mask
return choice(node)
class _AnagramState(object):
def __init__(
self,
index: _AnagramTransformIndex,
anagrams: anagram_iter.AnagramIter):
self._index = index
self._anagrams = anagrams
def __iter__(self) -> Iterable['bloom_node.BloomNode']:
yield from self._index.iter(self._anagrams)
def __repr__(self) -> str:
return '_AnagramState(%s)' % self._anagrams
__str__ = __repr__
def _normalize_state(
exit_node: 'bloom_node.BloomNode',
index: Union[Iterable, anagram_iter.AnagramIter]) -> _AnagramState:
if isinstance(index, _AnagramState):
return index
# `index` is an iterable list of ???, one-by-one these will be taken as a
# route to the `exit_node`.
initial_anagrams = anagram_iter.from_choices(index)
index = _AnagramTransformIndex(exit_node, initial_anagrams)
return _AnagramState(index, initial_anagrams)
| 35.84127
| 79
| 0.717449
| 579
| 4,516
| 5.267703
| 0.24525
| 0.039344
| 0.059016
| 0.014426
| 0.066885
| 0.015738
| 0.015738
| 0.015738
| 0
| 0
| 0
| 0.003005
| 0.189548
| 4,516
| 125
| 80
| 36.128
| 0.830328
| 0.104739
| 0
| 0.098039
| 0
| 0
| 0.051117
| 0
| 0
| 0
| 0
| 0.008
| 0.019608
| 1
| 0.078431
| false
| 0
| 0.029412
| 0.009804
| 0.196078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0756c223fe0a2644bdda0e4b367139a612e5089
| 943
|
py
|
Python
|
setup.py
|
gibsonMatt/stacks-pairwise
|
8f3cde603c2bfed255f6c399557e9332072886fb
|
[
"MIT"
] | null | null | null |
setup.py
|
gibsonMatt/stacks-pairwise
|
8f3cde603c2bfed255f6c399557e9332072886fb
|
[
"MIT"
] | null | null | null |
setup.py
|
gibsonMatt/stacks-pairwise
|
8f3cde603c2bfed255f6c399557e9332072886fb
|
[
"MIT"
] | null | null | null |
import pathlib
import os
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# specify requirements of your package here
REQUIREMENTS = ['biopython', 'numpy', 'pandas']
setup(name='stacksPairwise',
version='0.0.0',
description='Calculate pairwise divergence (pairwise pi) from Stacks `samples.fa` output fle',
long_description=README,
long_description_content_type="text/markdown",
url='https://github.com/gibsonmatt/stacks-pairwise',
author='Matt Gibson',
author_email='matthewjsgibson@gmail.com',
license='MIT',
packages=['stacksPairwise'],
install_requires=REQUIREMENTS,
entry_points={
"console_scripts": [
"stacksPairwise=stacksPairwise.__main__:main"
]
},
keywords='genetics genotyping sequencing Stacks'
)
| 29.46875
| 100
| 0.694592
| 103
| 943
| 6.194175
| 0.669903
| 0.00627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003937
| 0.191941
| 943
| 31
| 101
| 30.419355
| 0.833333
| 0.110286
| 0
| 0
| 0
| 0
| 0.398802
| 0.081437
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e0774173b092651de83171acaf096405634f72ae
| 2,536
|
py
|
Python
|
projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object.py
|
klemenkotar/dcrl
|
457be7af1389db37ec12e165dfad646e17359162
|
[
"MIT"
] | 18
|
2021-06-09T04:50:47.000Z
|
2022-02-04T22:56:56.000Z
|
projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object.py
|
klemenkotar/dcrl
|
457be7af1389db37ec12e165dfad646e17359162
|
[
"MIT"
] | null | null | null |
projects/tutorials/object_nav_ithor_dagger_then_ppo_one_object.py
|
klemenkotar/dcrl
|
457be7af1389db37ec12e165dfad646e17359162
|
[
"MIT"
] | 4
|
2021-06-09T06:20:25.000Z
|
2022-03-13T03:11:17.000Z
|
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from allenact.algorithms.onpolicy_sync.losses import PPO
from allenact.algorithms.onpolicy_sync.losses.imitation import Imitation
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from projects.tutorials.object_nav_ithor_ppo_one_object import (
ObjectNavThorPPOExperimentConfig,
)
class ObjectNavThorDaggerThenPPOExperimentConfig(ObjectNavThorPPOExperimentConfig):
"""A simple object navigation experiment in THOR.
Training with DAgger and then PPO.
"""
@classmethod
def tag(cls):
return "ObjectNavThorDaggerThenPPO"
@classmethod
def training_pipeline(cls, **kwargs):
dagger_steos = int(1e4)
ppo_steps = int(1e6)
lr = 2.5e-4
num_mini_batch = 2 if not torch.cuda.is_available() else 6
update_repeats = 4
num_steps = 128
metric_accumulate_interval = cls.MAX_STEPS * 10 # Log every 10 max length tasks
save_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 1.0
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=metric_accumulate_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={
"ppo_loss": PPO(clip_decay=LinearDecay(ppo_steps), **PPOConfig),
"imitation_loss": Imitation(), # We add an imitation loss.
},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["imitation_loss"],
teacher_forcing=LinearDecay(
startp=1.0, endp=0.0, steps=dagger_steos,
),
max_stage_steps=dagger_steos,
),
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps,),
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
| 34.27027
| 88
| 0.635252
| 273
| 2,536
| 5.619048
| 0.413919
| 0.031291
| 0.043025
| 0.05867
| 0.078227
| 0.078227
| 0
| 0
| 0
| 0
| 0
| 0.018364
| 0.291404
| 2,536
| 73
| 89
| 34.739726
| 0.835281
| 0.054811
| 0
| 0.079365
| 0
| 0
| 0.033179
| 0.01092
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.126984
| 0.015873
| 0.206349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e077592087a48a19c044b7ca66417c720c7d2548
| 12,328
|
py
|
Python
|
BioCAT/src/Calculating_scores.py
|
DanilKrivonos/BioCAT-nrp-BIOsynthesis-Caluster-Analyzing-Tool
|
d58d330e3e11380c0c917a0ad9c12a51447f1624
|
[
"MIT"
] | 4
|
2021-04-16T14:42:47.000Z
|
2021-06-11T14:29:35.000Z
|
BioCAT/src/Calculating_scores.py
|
DanilKrivonos/BioCAT-nrp-BIOsynthesis-Caluster-Analyzing-Tool
|
d58d330e3e11380c0c917a0ad9c12a51447f1624
|
[
"MIT"
] | 3
|
2021-07-23T09:30:59.000Z
|
2021-11-07T17:40:59.000Z
|
BioCAT/src/Calculating_scores.py
|
DanilKrivonos/BioCAT-nrp-BIOsynthesis-Caluster-Analyzing-Tool
|
d58d330e3e11380c0c917a0ad9c12a51447f1624
|
[
"MIT"
] | 1
|
2022-02-27T17:19:50.000Z
|
2022-02-27T17:19:50.000Z
|
from numpy import array
from pickle import load
from pandas import read_csv
import os
from BioCAT.src.Combinatorics import multi_thread_shuffling, multi_thread_calculating_scores, make_combine, get_score, get_max_aminochain, skipper
# Importing random forest model
modelpath = os.path.dirname(os.path.abspath(__file__)) + '/RFC.dump'
Rf = load(open(modelpath, 'rb'))
# The function generate list of shuflled matrix
def make_shuffle_matrix(matrix, cpu, iterat):
"""
The functuion generate massive of shuffled matrix.
Parameters
----------
matrix : pandas DataFrame
PSSM profile.
cpu : int
Number of tred used.
iterat : int
Number of iterations of shuffling.
Returns
-------
module_shuffling_matrix : list
List of matrix, shuffled by module.
substrate_shuffling_matrix : list
List of matrix, shuffled by substrate.
"""
module_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='module', iterations=iterat, threads=cpu)
substrate_shuffling_matrix = multi_thread_shuffling(matrix, ShufflingType='substrate', iterations=iterat, threads=cpu)
return module_shuffling_matrix, substrate_shuffling_matrix
# The fujnction finds suquence with maximum possible value, results from alignment
def get_MaxSeq(matrix, variant_seq):
"""
The functuion parallel calculation of scores for shuffled matrix.
Parameters
----------
matrix : pandas DataFrame
PSSM profile.
variant_seq : list
Variant of core peptide chain.
Returns
-------
shuffled_scores : list
List of scores for shuffled matrix.
"""
MaxSeq = []
subs = matrix.keys()[1: ]
# Find sequence, wich have maximum alignment score
for idx in matrix.index:
MAX_value = max(list(matrix.iloc[idx][1:]))
for key in subs:
if matrix[key][idx] == MAX_value:
MaxSeq.append(key) # If two smonomer have same value
break
# Making two variants of MaxSeq
MaxSeq_full = MaxSeq.copy()
MaxSeq_nan = MaxSeq.copy()
for max_sub_idx in range(len(MaxSeq)):
if variant_seq[max_sub_idx] == 'nan':
MaxSeq_nan[max_sub_idx] = 'nan' # Adding nan to MaxSeq
return MaxSeq_full, MaxSeq_nan
# The function gives an information about clusters
def get_cluster_info(table, BGC_ID, target_file):
"""
The functuion return information about cluster.
Parameters
----------
table : pandas DataFrame
Table with meta inforamtion about NRPS clusters.
BGC_ID : str
PSSM cluster ID.
target_file : pandas DataFrame
PSSM profile.
Returns
-------
Name : str
Cluster ID.
Coord_cluster : str
Coordinate of cluster.
strand : str
Strand of cluster.
"""
for ind in table[table['ID'].str.contains(BGC_ID)].index:
Name = table[table['ID'].str.contains(target_file.split('.')[0].split('_A_')[1])]['Name'][ind]
Coord_cluster = table['Coordinates of cluster'][ind]
strand = table['Gen strand'][ind]
break
return Name, Coord_cluster, strand
# Calculate scores
def calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat):
"""
Calculating scores.
Parameters
----------
variant_seq : list
Variant of core peptide chain.
matrix : pandas DataFrame
PSSM profile.
substrate_shuffling_matrix : list
List of matrix, shuffled by substrate.
module_shuffling_matrix : list
List of matrix, shuffled by module.
cpu : int
Number of threads used.
iterat : int
Number of iterations of shuffling.
Returns
-------
Sln_score : float
Mln_score : float
Slt_score : float
Mlt_score : float
Sdn_score : float
Mdn_score : float
Sdt_score : float
Mdt_score : float
Scores, which calculated with shuffling matrix by different variants.
M - module shuffling S - substrate shuffling
l - logarithmic transformation of score d - raw score
n - MaxSeq with nan replacement t - MaxSeq without nan replacement
Relative_score : float
Relative score (Probability of target class)
Binary : float
Binary score of cluster matching.
"""
# Finding suquence with maximum possible value, results from alignment
MaxSeq_full, MaxSeq_nan = get_MaxSeq(matrix, variant_seq)
# Calculating shuffled scores
Sln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Mln_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Slt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Mlt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value='log', iterations=iterat, threads=cpu))
Sdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
Mdn_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_nan, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
Sdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, substrate_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
Mdt_shuffled_score = array(multi_thread_calculating_scores(MaxSeq_full, module_shuffling_matrix, type_value=None, iterations=iterat, threads=cpu))
# Calculating scores for target sequence
log_target_score = get_score(variant_seq, matrix, type_value='log')
non_log_target_score = get_score(variant_seq, matrix, type_value=None)
# Calculating features scores
Sln_score = len(Sln_shuffled_score[Sln_shuffled_score < log_target_score])/len(Sln_shuffled_score)
Mln_score = len(Mln_shuffled_score[Mln_shuffled_score < log_target_score])/len(Mln_shuffled_score)
Slt_score = len(Slt_shuffled_score[Slt_shuffled_score < log_target_score])/len(Slt_shuffled_score)
Mlt_score = len(Mlt_shuffled_score[Mlt_shuffled_score < log_target_score])/len(Mlt_shuffled_score)
Sdn_score = len(Sdn_shuffled_score[Sdn_shuffled_score < non_log_target_score])/len(Sdn_shuffled_score)
Mdn_score = len(Mdn_shuffled_score[Mdn_shuffled_score < non_log_target_score])/len(Mdn_shuffled_score)
Sdt_score = len(Sdt_shuffled_score[Sdt_shuffled_score < non_log_target_score])/len(Sdt_shuffled_score)
Mdt_score = len(Mdt_shuffled_score[Mdt_shuffled_score < non_log_target_score])/len(Mdt_shuffled_score)
# Calculating Relative score
Relative_score = round(Rf.predict_proba([[Sln_score, Mln_score,
Sdn_score, Mdn_score,
Sdt_score, Mdt_score,
Slt_score, Mlt_score
]])[0][1], 3)
Binary = Rf.predict([[Sln_score, Mln_score,
Sdn_score, Mdn_score,
Sdt_score, Mdt_score,
Slt_score, Mlt_score
]])[0]
return Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary
def give_results(tsv_out, folder, files, table, ID, PeptideSeq, skip, cpu, iterat):
"""
The functuion return information about cluster.
Parameters
----------
tsv_out : dict
Empty dictionary for adding results.
folder : str
Path to PSSMs.
files : list
List of PSSMs.
table : pandas DataFrame
Table with meta inforamtion about NRPS clusters.
ID : str
Name of substance.
PeptideSeq : dict
Core peptide chains for different biosynthesis types (e.g. A, B, or C).
kip : int
Number of presumptive skip.
cpu : int
Number of threads used.
iterat : int
Number of iterations of shuffling.
Returns
-------
tsv_out : dict
Full dictionary for adding results.
"""
for target_file in files:
try:
BGC_ID = target_file.split('.')[0].split('_A_')[1]
except:
continue
if '_A_' not in target_file:
continue
Name, Coord_cluster, strand = get_cluster_info(table, BGC_ID, target_file) # Getting information about cluster
BGC = read_csv(folder + target_file, sep='\t')
# Skipping mode
if skip == 0:
BGC = [BGC]
else:
BGC == skipper(BGC, skip)
for matrix in BGC:
# Check quality of matrix
if len(matrix) == 1:
continue
check = 0
values = matrix.drop(matrix.columns[0], axis=1).values
for i in values:
if all(i) == 0:
check += 1
if check == len(values): # If thes condition is True, the matrix of unrecognized monomers
continue
# Generating shuffling matrix
module_shuffling_matrix, substrate_shuffling_matrix = make_shuffle_matrix(matrix, cpu, iterat)
for BS_type in PeptideSeq:# For every biosynthesis profile pathways
if PeptideSeq[BS_type] == None: # If in sequence only nan monomers
continue
if len(PeptideSeq[BS_type]) == 0: # If have not the variant
continue
# Check correctness of PeptideSeq
length_max= get_max_aminochain(PeptideSeq[BS_type])
EPs = make_combine(PeptideSeq[BS_type], length_max, matrix, delta=3)
if EPs is None: # If length sequnce can't be scaled to cluster size
continue
for variant_seq in EPs:
Sln_score, Mln_score, Slt_score, Mlt_score, Sdn_score, Mdn_score, Sdt_score, Mdt_score, Relative_score, Binary = calculate_scores(variant_seq, matrix, substrate_shuffling_matrix, module_shuffling_matrix, cpu, iterat)
#Recordind dictionary
tsv_out['Chromosome ID'].append(Name)
tsv_out['Coordinates of cluster'].append(Coord_cluster)
tsv_out['Strand'].append(strand)
tsv_out['Substance'].append(ID)
tsv_out['BGC ID'].append(BGC_ID)
tsv_out['Putative linearized NRP sequence'].append('--'.join(variant_seq))
tsv_out['Biosynthesis profile'].append('Type {}'.format(BS_type))
tsv_out['Sln score'].append(Sln_score) #shaffling substrates in matrix with log score and nan in maximally possible sequence
tsv_out['Mln score'].append(Mln_score) #shaffling modules matrix with log score and nan in maximally possible sequence
tsv_out['Sdn score'].append(Sdn_score) #shaffling substrates matrix without log score and nan in maximally possible sequence
tsv_out['Mdn score'].append(Mdn_score) #shaffling modules matrix without log score and nan in maximally possible sequence
tsv_out['Sdt score'].append(Sdt_score) #shaffling substrates matrix without log score in maximally possible sequence
tsv_out['Mdt score'].append(Mdt_score) #shaffling modules matrix without log score in maximally possible sequence
tsv_out['Slt score'].append(Slt_score) #shaffling substrates matrix with log score in maximally possible sequence
tsv_out['Mlt score'].append(Mlt_score) #shaffling modules matrix with log score in maximally possible sequence
tsv_out['Relative score'].append(Relative_score) #Final score
tsv_out['Binary'].append(Binary) #Binary value
return tsv_out
| 42.510345
| 236
| 0.649903
| 1,487
| 12,328
| 5.149294
| 0.169469
| 0.054329
| 0.030168
| 0.033956
| 0.539637
| 0.474468
| 0.438684
| 0.372731
| 0.325454
| 0.319054
| 0
| 0.002117
| 0.271901
| 12,328
| 289
| 237
| 42.657439
| 0.850936
| 0.332414
| 0
| 0.127119
| 0
| 0
| 0.039943
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042373
| false
| 0
| 0.042373
| 0
| 0.127119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e07835355388fff9c6902a335662f753bb73c86c
| 14,599
|
py
|
Python
|
Template.py
|
rainshen49/citadel-trading-comp
|
3c3b6464f548d4920f46b5f5cd113ebc4a1d08a5
|
[
"MIT"
] | 2
|
2018-12-11T03:33:06.000Z
|
2021-09-21T01:12:58.000Z
|
Template.py
|
rainshen49/citadel-trading-comp
|
3c3b6464f548d4920f46b5f5cd113ebc4a1d08a5
|
[
"MIT"
] | null | null | null |
Template.py
|
rainshen49/citadel-trading-comp
|
3c3b6464f548d4920f46b5f5cd113ebc4a1d08a5
|
[
"MIT"
] | null | null | null |
import signal
import requests
import time
from math import floor
shutdown = False
MAIN_TAKER = 0.0065
MAIN_MAKER = 0.002
ALT_TAKER = 0.005
ALT_MAKER = 0.0035
TAKER = (MAIN_TAKER + ALT_TAKER)*2
MAKER = MAIN_MAKER + ALT_MAKER
TAKEMAIN = MAIN_TAKER - ALT_MAKER
TAKEALT = ALT_TAKER - MAIN_MAKER
BUFFER = 0.01
NaN = float('nan')
class ApiException(Exception):
pass
class Book(object):
def __init__(self, sym, json):
global NaN
self.sym = sym
self.json = json
# could be cached
self.bids = self.json['bids']
self.asks = self.json['asks']
self.ask_price = 1
self.asks_quantity_left = 0
self.bid_price = 1
self.bids_quantity_left = 0
if self.bids:
self.bid_price = self.bids[0]['price']
if self.asks:
self.ask_price = self.asks[0]['price']
def bids_room(self):
if self.bids:
quantity = sum([b['quantity']
for b in self.bids if b['price'] == self.bid_price])
filled = sum([b['quantity_filled']
for b in self.bids if b['price'] == self.bid_price])
return quantity - filled
else:
return 0
def asks_room(self):
if self.asks:
quantity = sum([b['quantity']
for b in self.asks if b['price'] == self.ask_price])
filled = sum([b['quantity_filled']
for b in self.asks if b['price'] == self.ask_price])
return quantity - filled
else:
return 0
class Limits(dict):
def __init__(self, json):
self.update(json)
self.gross_limit = int(json['gross_limit'])
self.net_limit = int(json['net_limit'])
self.gross = int(json['gross'])
self.net = int(json['net'])
class OHLC(dict):
def __init__(self, sym, json):
self.sym = sym
self.update(json)
self.tick = json['tick']
self.open = json['open']
self.high = json['high']
self.low = json['low']
self.close = json['close']
class Shock(dict):
def __init__(self, news, currtick):
self.ticker = news['ticker']
self.elapsed = currtick - news['tick']
headline = news['headline']
try:
self.amount = float(headline[-6:].replace('$', ''))
except:
self.amount = 0
class Session(object):
def __init__(self, url, key):
self.url = url
self.key = key
self.tick = -1
def __enter__(self):
self.session = requests.Session()
self.session.headers.update({'X-API-Key': self.key})
return self
def __exit__(self, type, value, traceback):
self.session.close()
def get_tick(self):
while True:
resp = self.session.get(self.url + '/v1/case', params=None)
if not resp.ok:
raise ApiException('could not get tick: ' + str(resp))
json = resp.json()
if json['status'] == 'STOPPED' or shutdown:
return False
if json['tick'] != self.tick:
self.tick = json['tick']
print('.', self.tick)
return True
# this timer is unnecessary, network latency should be enough
time.sleep(0.1)
def get_book(self, sym):
resp = self.session.get(
self.url + '/v1/securities/book', params={'ticker': sym})
if not resp.ok:
raise ApiException('could not get book: ' + str(resp))
return Book(sym, resp.json())
def send_order(self, sym, side, price, size):
resp = self.session.post(self.url + '/v1/orders', params={
'ticker': sym, 'type': 'LIMIT', 'action': side, 'quantity': size, 'price': price})
if resp.ok:
print('sent order', side, sym, size, '@', price)
else:
print('failed to send order', side, sym,
size, '@', price, ':', resp.text)
def getLimit(self):
resp = self.session.get(self.url+'/v1/limits')
if not resp.ok:
raise ApiException('could not get limit: '+str(resp))
return Limits(resp.json()[0])
def getSecurities(self, sym=None):
if sym is None:
resp = self.session.get(self.url+'/v1/securities')
else:
resp = self.session.get(
self.url+'/v1/securities', params={'ticker': sym})
if not resp.ok:
raise ApiException('could not get position: '+str(resp))
json = resp.json()
return {sec['ticker']: {k: sec[k] for k in [
"position",
"vwap",
"nlv",
"last",
"bid",
"bid_size",
"ask",
"ask_size",
"unrealized",
"realized"
]} for sec in json}
def get_OHLC(self, sym, ticks=50):
resp = self.session.get(
self.url + '/v1/securities/history', params={'ticker': sym,'limit':ticks})
if not resp.ok:
raise ApiException('could not get OHLC: ' + str(resp))
return [OHLC(sym, ohlc) for ohlc in resp.json()]
def buy(self, sym, price, size):
self.send_order(sym, 'BUY', price, size)
def sell(self, sym, price, size):
self.send_order(sym, 'SELL', price, size)
def send_market(self, sym, side, size):
resp = self.session.post(self.url + '/v1/orders', params={
'ticker': sym, 'type': 'MARKET', 'action': side, 'quantity': size})
if resp.ok:
json = resp.json()
print('market order', side, sym, size, '@', json['vwap'])
return json['vwap']
else:
print('failed to send order', side, sym,
size, '@Market:', resp.text)
return 0
def buyM(self, sym, size):
return self.send_market(sym, 'BUY', size)
def sellM(self, sym, size):
return self.send_market(sym, 'SELL', size)
def getNews(self):
resp = self.session.get(self.url + '/v1/news', params={'limit': 10})
if not resp.ok:
raise ApiException('failed to get news', resp.text)
else:
json = resp.json()
# only care about recent news
return [Shock(news, self.tick) for news in json if news['tick'] > self.tick-4]
def getTrader(self):
resp = self.session.get(self.url + '/v1/trader')
if not resp.ok:
raise ApiException('failed to get trader info', resp.text)
else:
json = resp.json()
return json
def main():
# price does change in every tick
# check position
# plain arbitradge
# index arbitrage
# shock handling
# wave riding
# pairTickers = [('WMT-M', 'WMT-A'), ('CAT-M', 'CAT-A'), ('MMM-M', 'MMM-A')]
with Session('http://localhost:9998', 'VHK3DEDE') as session:
while session.get_tick():
try:
shock_runner(session)
exchange_arbitrage(session, "WMT-M", "WMT-A")
exchange_arbitrage(session, "CAT-M", "CAT-A")
exchange_arbitrage(session, "MMM-M", "MMM-A")
index_arbitrage(session, ['WMT', 'MMM', 'CAT'])
except Exception as ex:
print("error", str(ex))
# trader = session.getTrader()
# print(trader['nlv'])
# TODO: position cleaner: try to reduce gross position loss-free
# TODO: implement range runner for the last x ticks
def avg(arr):
return sum(arr)/float(len(arr))
def window_trend(left,right):
leftavg = avg(left)
rightavg = avg(right)
if rightavg > leftavg:
return 1
elif rightavg < leftavg:
return -1
else:
return 0
def splitarr(arr):
n = len(arr)
left = arr[:n//2]
right = arr[n//2:]
return left,right
def wwindow_trend(prices):
left, right = splitarr(prices)
trend = window_trend(left,right)
lleft, lright = splitarr(left)
rleft, rright = splitarr(right)
trendl = window_trend(lleft,lright)
trendr = window_trend(rleft,rright)
return trend + trendl + trendr
def trend_runner(session, ticker):
if session.tick<20:
return
# short term trend
prices = session.get_OHLC(ticker, 20)
highs = [price.high for price in prices]
lows = [price.low for price in prices]
highTrend = wwindow_trend(highs)
lowTrend = wwindow_trend(lows)
if highTrend+lowTrend < -4:
# volatile, but no trend
session.buyM(ticker,1000)
if highTrend+lowTrend > 4:
session.sellM(ticker,1000)
print(ticker,"short hightrend",highTrend,"lowtrend",lowTrend)
if session.tick<100:
return
prices = session.get_OHLC(ticker, 100)
highs = [price.high for price in prices]
lows = [price.low for price in prices]
highTrend = wwindow_trend(highs)
lowTrend = wwindow_trend(lows)
# grown too much
if highTrend+lowTrend < -4:
# volatile, but no trend
session.sellM(ticker,1000)
# dropped too much
if highTrend+lowTrend > 4:
session.buyM(ticker,1000)
print(ticker,"long hightrend",highTrend,"lowtrend",lowTrend)
def shock_runner(session):
shocks = session.getNews()
quantity = 50000
for shock in sorted(shocks, key=lambda s: s.elapsed):
Mticker = shock.ticker+"-M"
Aticker = shock.ticker+"-A"
if shock.elapsed < 2:
if shock.amount > MAIN_TAKER + BUFFER*2:
session.buyM(Mticker, quantity)
session.buyM(Aticker, quantity)
elif - shock.amount > MAIN_TAKER + BUFFER*2:
session.sellM(Mticker, quantity)
session.sellM(Aticker, quantity)
print('shock', shock.ticker, shock.amount)
if shock.elapsed == 2:
if shock.amount > MAIN_TAKER + BUFFER*2:
session.sellM(Mticker, quantity)
session.sellM(Aticker, quantity)
elif - shock.amount > MAIN_TAKER + BUFFER*2:
session.buyM(Mticker, quantity)
session.buyM(Aticker, quantity)
print('post shock', shock.ticker, shock.amount)
TAKER4 = MAIN_TAKER * 5
def index_arbitrage(session, tickers):
secs = session.getSecurities()
ETF = secs['ETF']
etfBid = ETF['bid']
etfAsk = ETF['ask']
bestBids = {}
bestBidsQ = {}
bestAsks = {}
bestAsksQ = {}
for ticker in tickers:
tickerM = ticker+"-M"
tickerA = ticker+"-A"
Mticker = secs[tickerM]
Aticker = secs[tickerA]
Mbid = Mticker['bid']
Abid = Aticker['bid']
Mask = Mticker['ask']
Aask = Aticker['ask']
if Mbid >= Abid:
bestBids[tickerM] = Mbid
bestBidsQ[tickerM] = Mticker['bid_size']
else:
bestBids[tickerA] = Abid
bestBidsQ[tickerA] = Aticker['bid_size']
if Mask <= Aask:
bestAsks[tickerM] = Mask
bestAsksQ[tickerM] = Mticker['ask_size']
else:
bestAsks[tickerA] = Aask
bestAsksQ[tickerA] = Aticker['ask_size']
compositBid = sum(bestBids.values())
compositBidQ = min(bestBidsQ.values())
compositAsk = sum(bestAsks.values())
compositAskQ = min(bestAsksQ.values())
boughtprice = 0
soldprice = 0
if etfBid - compositAsk > TAKER4+BUFFER:
quantity = ETF['bid_size'] if ETF['bid_size'] < compositAskQ else compositAskQ
if quantity == 0:
return
quantity = min([quantity, 50000])
soldprice = session.sellM('ETF', quantity)
for ticker in bestAsks:
boughtprice += session.buyM(ticker, quantity)
print('Plan ETF', etfBid, 'Stocks', compositAsk)
print('Actual ETF', soldprice, 'Stocks', boughtprice)
elif compositBid - etfAsk > TAKER4+BUFFER:
quantity = ETF['ask_size'] if ETF['ask_size'] < compositBidQ else compositBidQ
if quantity == 0:
return
quantity = min([quantity, 50000])
for ticker in bestBids:
soldprice += session.sellM(ticker, quantity)
boughtprice = session.buyM('ETF', quantity)
print('Plan Stocks', compositBid, 'ETF', etfAsk)
print('Actual Stocks', soldprice, 'ETF', boughtprice)
# TODO: send limit orders and use market to cover unfilled ones after
def exchange_arbitrage(session, mticker, aticker):
global NaN
mbook = session.get_book(mticker)
masks_room = mbook.asks_room()
mbids_room = mbook.bids_room()
abook = session.get_book(aticker)
aasks_room = abook.asks_room()
abids_room = abook.bids_room()
# a lot of room, make market orders
if mbook.bid_price - abook.ask_price > TAKER+BUFFER*2:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sellM(mbook.sym, quantity)
session.buyM(abook.sym, quantity)
elif abook.bid_price - mbook.ask_price > TAKER+BUFFER*2:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sellM(abook.sym, quantity)
session.buyM(mbook.sym, quantity)
# only a little room, make limit orders
if mbook.bid_price - abook.ask_price > BUFFER:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sell(mbook.sym, mbook.bid_price, quantity)
session.buy(abook.sym, abook.ask_price, quantity)
elif abook.bid_price - mbook.ask_price > BUFFER:
quantity = aasks_room if aasks_room < mbids_room else mbids_room
quantity = min([quantity, 50000])
session.sell(abook.sym, abook.bid_price, quantity)
session.buy(mbook.sym, mbook.ask_price, quantity)
def sigint(signum, frame):
global shutdown
signal.signal(signal.SIGINT, signal.SIG_DFL)
shutdown = True
if __name__ == '__main__':
signal.signal(signal.SIGINT, sigint)
main()
| 33.407323
| 116
| 0.558052
| 1,713
| 14,599
| 4.66725
| 0.166375
| 0.011382
| 0.018762
| 0.018011
| 0.353096
| 0.316573
| 0.306316
| 0.295184
| 0.219262
| 0.174359
| 0
| 0.014465
| 0.32283
| 14,599
| 436
| 117
| 33.483945
| 0.794255
| 0.046784
| 0
| 0.298295
| 0
| 0
| 0.075723
| 0.001635
| 0
| 0
| 0
| 0.002294
| 0
| 1
| 0.090909
| false
| 0.002841
| 0.011364
| 0.008523
| 0.196023
| 0.039773
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e079004173a435849592703f1baaf8e8d87ed079
| 9,131
|
py
|
Python
|
workflows/workflow.py
|
sunnyfloyd/panderyx
|
82f03625159833930ff044a43a6619ab710ff159
|
[
"MIT"
] | null | null | null |
workflows/workflow.py
|
sunnyfloyd/panderyx
|
82f03625159833930ff044a43a6619ab710ff159
|
[
"MIT"
] | null | null | null |
workflows/workflow.py
|
sunnyfloyd/panderyx
|
82f03625159833930ff044a43a6619ab710ff159
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Optional, Union
from tools import tools
from exceptions import workflow_exceptions
class Workflow:
"""A class to represent a workflow.
Workflow class provides set of methods to manage state of the workflow.
It allows for tool insertions, removals and modifications.
When workflow is run data flow is built and each tool linked to the workflow
instance is executed in determined order. Tool outputs are then consolidated
in a JSON format.
"""
TOOL_CHOICES = {
"generic": tools.GenericTool,
"large_generic": tools.LargeGenericTool,
"input": tools.InputTool,
}
def __init__(self) -> None:
"""Initializes Workflow class with root tool.
Workflow class is initialized with root tool with tool ID `0`. `_root`
points to root tool directly.
"""
self._root = tools.RootTool(id=0)
self._tools = {0: self._root}
self._used_ids = {0}
def insert_tool(
self,
tool_choice: str,
input_ids: Optional[Union[list[int], int]] = None,
output_ids: Optional[Union[list[int], int]] = None,
coordinates: Optional[tuple[int, int]] = None,
) -> tools.Tool:
"""Inserts a new tool to the current workflow.
Args:
tool_choice (str): determines what tool is created (based on the
available choices defined within the Workflow class).
input_ids (list[int], int]): starting input or inputs for the tool
identified by their IDs. Defaults to None.
output_ids (list[int], int): starting output or outputs for the tool
identified by their IDs. Defaults to None.
coordinates (tuple[int, int]): coordinates for the tool on canvas.
Defaults to None.
Raises:
workflow_exceptions.ToolNotAvailable: indicates that provided string
does not refer to an available tool from the Workflow class.
Returns:
tools.Tool: instance of a Tool's class.
"""
try:
tool_class = self.TOOL_CHOICES[tool_choice]
except KeyError:
raise workflow_exceptions.ToolNotAvailable
next_id = self._get_next_tool_id()
tool = tool_class(id=next_id)
self._tools[next_id] = tool
self._add_tool_id(next_id)
if input_ids is not None:
self.add_tool_input(tool_id=tool.id, input_ids=input_ids)
if output_ids is not None:
output_ids = self._clean_tool_ids(output_ids)
for output_id in output_ids:
self.add_tool_input(tool_id=output_id, input_ids=tool.id)
if coordinates is not None:
self.set_tool_coordinates(tool_id=tool.id, coordinates=coordinates)
return tool
def remove_tool(self, tool_ids: Union[list[int], int]) -> None:
"""Removes existing tool from the current workflow.
Removes the tool from the workflow and updates inputs and outputs of the
linked tool instances.
Args:
tool_ids (list[int], int): tool ID or IDs that ought to be removed.
Raises:
workflow_exceptions.RootCannotBeDeleted: indicates that selected
tool for removal is a root which cannot be deleted.
"""
tool_ids = self._clean_tool_ids(tool_ids)
for tool_id in tool_ids:
tool = self._get_tool_by_id(tool_id)
if tool.is_root:
raise workflow_exceptions.RootCannotBeDeleted
# remove tool from linked tools' inputs
tool_outputs = tool.outputs
for output_id in tool_outputs:
self.remove_tool_input(tool_id=output_id, input_ids=tool.id)
# remove tool from linked tools' outputs
tool_inputs = tool.inputs
for input_id in tool_inputs:
self.remove_tool_input(tool_id=tool.id, input_ids=input_id)
del self._tools[tool_id]
def add_tool_input(
self, tool_id: int, input_ids: Union[list[int], int]
) -> tools.Tool:
"""Adds new input(s) for the tool existing in the current workflow.
Args:
tool_id (int): tool ID to which input(s) should be added.
input_ids (list[int], int]): input(s) to be added to the tool
identified by their IDs.
Returns:
tools.Tool: instance of a Tool's class.
"""
tool = self._get_tool_by_id(tool_id)
input_ids = self._clean_tool_ids(input_ids)
for input_id in input_ids:
tool.add_input(input_id)
self._tools[input_id].add_output(tool_id)
return tool
def remove_tool_input(
self, tool_id: int, input_ids: Union[list[int], int]
) -> tools.Tool:
"""Removes input(s) from the tool existing in the current workflow.
Args:
tool_id (int): tool ID from which input(s) should be removed.
input_ids (list[int], int]): input(s) to be removed from the tool
identified by their IDs.
Returns:
tools.Tool: instance of a Tool's class.
"""
tool = self._get_tool_by_id(tool_id)
input_ids = self._clean_tool_ids(input_ids)
for input_id in input_ids:
tool.remove_input(input_id)
self._tools[input_id].remove_output(tool_id)
return tool
def set_tool_config(self, tool_id: int, data: dict) -> tools.Tool:
"""Sets tool's config to passed data dict.
Args:
tool_id (int): tool ID for which config should be set.
data (dict): dict of parameters for given tool.
Returns:
tools.Tool: instance of a Tool's class.
"""
tool = self._get_tool_by_id(tool_id)
tool.config = data
return tool
def set_tool_coordinates(
self, tool_id: int, coordinates: Optional[tuple[int, int]] = None
) -> tools.Tool:
"""Sets (x, y) coordinates for the tool existing in the current workflow.
If no coordinates are passed to this method, default coordinates will be
calculated using `_get_default_coordinates()` internal method.
Args:
tool_id (int): tool ID for which coordinates are to be set.
coordinates (tuple[int, int]): tuple of (x, y) coordinates.
Defaults to None.
Returns:
tools.Tool: instance of a Tool's class.
"""
# I need to decide where to put a check if coordinates will fit a canvas
tool = self._get_tool_by_id(tool_id)
coordinates = (
coordinates if coordinates is not None else self._get_default_coordinates()
)
tool.coordinates = coordinates
return tool
def _get_default_coordinates(self) -> tuple[int, int]:
# might require more sophisticated logic in the future
return (0, 0)
def _get_tool_by_id(self, tool_id: int) -> tools.Tool:
"""Returns an instance of a Tool class selected by its ID.
Args:
tool_id (int): tool ID.
Raises:
workflow_exceptions.ToolDoesNotExist: indicates that for provided ID
there is no tool in this workflow.
Returns:
tools.Tool: instance of a Tool's class.
"""
try:
tool = self._tools[tool_id]
except KeyError:
raise workflow_exceptions.ToolDoesNotExist
return tool
def _clean_tool_ids(self, tool_ids: Union[list[int], int]) -> list[int]:
"""Returns a validated list of tool ID(s).
Checks whether passed tool ID(s) exist in the current workflow
and returns the list of tool IDs. If at least one of the provided tool
IDs is not found, it raises an exception.
Args:
tool_ids (list[int], int): tool ID(s) to be cleaned.
Raises:
workflow_exceptions.ToolDoesNotExist: indicates that at least one of
the provided tool IDs is not present in the current workflow.
Returns:
list[int]: list of checked tool IDs.
"""
cleaned_tool_ids = (
list(set(tool_ids)) if isinstance(tool_ids, list) else [tool_ids]
)
if any(tool_id not in self._tools for tool_id in cleaned_tool_ids):
raise workflow_exceptions.ToolDoesNotExist
return cleaned_tool_ids
def _add_tool_id(self, tool_id: int) -> None:
"""Adds an ID to the used ID pool.
Args:
tool_id (int): ID to be added to the used ID pool.
"""
self._used_ids.add(tool_id)
def _get_next_tool_id(self) -> int:
"""Returns a next available ID to be used for a tool instance.
Returns:
int: next available tool ID.
"""
return max(self._used_ids) + 1
def _build_flow(self) -> None:
NotImplementedError
def __len__(self) -> int:
return len(self._tools) - 1
| 33.818519
| 87
| 0.61461
| 1,221
| 9,131
| 4.411957
| 0.15561
| 0.05569
| 0.022276
| 0.019491
| 0.430295
| 0.317616
| 0.278819
| 0.247633
| 0.194728
| 0.164841
| 0
| 0.001273
| 0.311685
| 9,131
| 269
| 88
| 33.944238
| 0.855847
| 0.432045
| 0
| 0.257143
| 0
| 0
| 0.005623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.038095
| 0.019048
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eb2fde0bae97bffa51893b405703a8d74ef6c29
| 14,826
|
py
|
Python
|
PLM/options.py
|
vtta2008/pipelineTool
|
2431d2fc987e3b31f2a6a63427fee456fa0765a0
|
[
"Apache-2.0"
] | 7
|
2017-12-22T02:49:58.000Z
|
2018-05-09T05:29:06.000Z
|
PLM/options.py
|
vtta2008/pipelineTool
|
2431d2fc987e3b31f2a6a63427fee456fa0765a0
|
[
"Apache-2.0"
] | null | null | null |
PLM/options.py
|
vtta2008/pipelineTool
|
2431d2fc987e3b31f2a6a63427fee456fa0765a0
|
[
"Apache-2.0"
] | 3
|
2019-03-11T21:54:52.000Z
|
2019-11-25T11:23:17.000Z
|
# -*- coding: utf-8 -*-
"""
Script Name:
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
import os
from PySide2.QtWidgets import (QFrame, QStyle, QAbstractItemView, QSizePolicy, QLineEdit, QPlainTextEdit,
QGraphicsItem, QGraphicsView, QGraphicsScene, QRubberBand, QCalendarWidget, )
from PySide2.QtCore import QEvent, QSettings, QSize, Qt, QDateTime
from PySide2.QtGui import QColor, QPainter, QFont, QTextCursor
SingleSelection = QCalendarWidget.SingleSelection
NoSelection = QCalendarWidget.NoSelection
SingleLetterDay = QCalendarWidget.SingleLetterDayNames
ShortDay = QCalendarWidget.ShortDayNames
LongDay = QCalendarWidget.LongDayNames
NoHoriHeader = QCalendarWidget.NoHorizontalHeader
NoVertHeader = QCalendarWidget.NoVerticalHeader
IsoWeekNum = QCalendarWidget.ISOWeekNumbers
SelectMode = QCalendarWidget.SelectionMode
HoriHeaderFm = QCalendarWidget.HorizontalHeaderFormat
VertHeaderFm = QCalendarWidget.VerticalHeaderFormat
DayOfWeek = Qt.DayOfWeek
Sunday = Qt.Sunday
Monday = Qt.Monday
Tuesday = Qt.Tuesday
Wednesday = Qt.Wednesday
Thursday = Qt.Thursday
Friday = Qt.Friday
Saturday = Qt.Saturday
ICONSIZE = 32
ICONBUFFER = -1
BTNTAGSIZE = QSize(87, 20)
TAGBTNSIZE = QSize(87-1, 20-1)
BTNICONSIZE = QSize(ICONSIZE, ICONSIZE)
ICONBTNSIZE = QSize(ICONSIZE+ICONBUFFER, ICONSIZE+ICONBUFFER)
DAMG_LOGO_COLOR = QColor(0, 114, 188, 255)
# Basic color
GlobalColor = Qt.GlobalColor
WHITE = QColor(Qt.white)
LIGHTGRAY = QColor(Qt.lightGray)
GRAY = QColor(Qt.gray)
DARKGRAY = QColor(Qt.darkGray)
BLACK = QColor(Qt.black)
RED = QColor(Qt.red)
GREEN = QColor(Qt.green)
BLUE = QColor(Qt.blue)
DARKRED = QColor(Qt.darkRed)
DARKGREEN = QColor(Qt.darkGreen)
DARKBLUE = QColor(Qt.darkBlue)
CYAN = QColor(Qt.cyan)
MAGENTA = QColor(Qt.magenta)
YELLOW = QColor(Qt.yellow)
DARKCYAN = QColor(Qt.darkCyan)
DARKMAGENTA = QColor(Qt.darkMagenta)
DARKYELLOW = QColor(Qt.darkYellow)
# Dark Palette color
Color_BACKGROUND_LIGHT = QColor('#505F69')
COLOR_BACKGROUND_NORMAL = QColor('#32414B')
COLOR_BACKGROUND_DARK = QColor('#19232D')
COLOR_FOREGROUND_LIGHT = QColor('#F0F0F0')
COLOR_FOREGROUND_NORMAL = QColor('#AAAAAA')
COLOR_FOREGROUND_DARK = QColor('#787878')
COLOR_SELECTION_LIGHT = QColor('#148CD2')
COLOR_SELECTION_NORMAL = QColor('#1464A0')
COLOR_SELECTION_DARK = QColor('#14506E')
# Nice color
blush = QColor(246, 202, 203, 255)
petal = QColor(247, 170, 189, 255)
petunia = QColor(231, 62, 151, 255)
deep_pink = QColor(229, 2, 120, 255)
melon = QColor(241, 118, 110, 255)
pomegranate = QColor(178, 27, 32, 255)
poppy_red = QColor(236, 51, 39, 255)
orange_red = QColor(240, 101, 53, 255)
olive = QColor(174, 188, 43, 255)
spring = QColor(227, 229, 121, 255)
yellow = QColor(255, 240, 29, 255)
mango = QColor(254, 209, 26, 255)
cantaloupe = QColor(250, 176, 98, 255)
tangelo = QColor(247, 151, 47, 255)
burnt_orange = QColor(236, 137, 36, 255)
bright_orange = QColor(242, 124, 53, 255)
moss = QColor(176, 186, 39, 255)
sage = QColor(212, 219, 145, 255)
apple = QColor(178, 215, 140, 255)
grass = QColor(111, 178, 68, 255)
forest = QColor(69, 149, 62, 255)
peacock = QColor(21, 140, 167, 255)
teal = QColor(24, 157, 193, 255)
aqua = QColor(153, 214, 218, 255)
violet = QColor(55, 52, 144, 255)
deep_blue = QColor(15, 86, 163, 255)
hydrangea = QColor(150, 191, 229, 255)
sky = QColor(139, 210, 244, 255)
dusk = QColor(16, 102, 162, 255)
midnight = QColor(14, 90, 131, 255)
seaside = QColor(87, 154, 188, 255)
poolside = QColor(137, 203, 225, 255)
eggplant = QColor(86, 5, 79, 255)
lilac = QColor(222, 192, 219, 255)
chocolate = QColor(87, 43, 3, 255)
blackout = QColor(19, 17, 15, 255)
stone = QColor(125, 127, 130, 255)
gravel = QColor(181, 182, 185, 255)
pebble = QColor(217, 212, 206, 255)
sand = QColor(185, 172, 151, 255)
ignoreARM = Qt.IgnoreAspectRatio
scrollAsNeed = Qt.ScrollBarAsNeeded
scrollOff = Qt.ScrollBarAlwaysOff
scrollOn = Qt.ScrollBarAlwaysOn
SiPoMin = QSizePolicy.Minimum # Size policy
SiPoMax = QSizePolicy.Maximum
SiPoExp = QSizePolicy.Expanding
SiPoPre = QSizePolicy.Preferred
SiPoIgn = QSizePolicy.Ignored
frameStyle = QFrame.Sunken | QFrame.Panel
center = Qt.AlignCenter # Alignment
right = Qt.AlignRight
left = Qt.AlignLeft
top = Qt.AlignTop
bottom = Qt.AlignBottom
hori = Qt.Horizontal
vert = Qt.Vertical
dockL = Qt.LeftDockWidgetArea # Docking area
dockR = Qt.RightDockWidgetArea
dockT = Qt.TopDockWidgetArea
dockB = Qt.BottomDockWidgetArea
dockAll = Qt.AllDockWidgetAreas
datetTimeStamp = QDateTime.currentDateTime().toString("hh:mm - dd MMMM yy") # datestamp
PRS = dict(password = QLineEdit.Password, center = center , left = left , right = right,
spmax = SiPoMax , sppre = SiPoPre, spexp = SiPoExp, spign = SiPoIgn,
expanding = QSizePolicy.Expanding, spmin = SiPoMin,)
# -------------------------------------------------------------------------------------------------------------
""" Event """
NO_WRAP = QPlainTextEdit.NoWrap
NO_FRAME = QPlainTextEdit.NoFrame
ELIDE_RIGHT = Qt.ElideRight
ELIDE_NONE = Qt.ElideNone
# -------------------------------------------------------------------------------------------------------------
""" Window state """
StateNormal = Qt.WindowNoState
StateMax = Qt.WindowMaximized
StateMin = Qt.WindowMinimized
State_Selected = QStyle.State_Selected
# -------------------------------------------------------------------------------------------------------------
""" Nodegraph setting variables """
ASPEC_RATIO = Qt.KeepAspectRatio
SMOOTH_TRANS = Qt.SmoothTransformation
SCROLLBAROFF = Qt.ScrollBarAlwaysOff # Scrollbar
SCROLLBARON = Qt.ScrollBarAlwaysOn
SCROLLBARNEED = Qt.ScrollBarAsNeeded
WORD_WRAP = Qt.TextWordWrap
INTERSECT_ITEM_SHAPE = Qt.IntersectsItemShape
CONTAIN_ITEM_SHAPE = Qt.ContainsItemShape
MATCH_EXACTLY = Qt.MatchExactly
DRAG_ONLY = QAbstractItemView.DragOnly
# -------------------------------------------------------------------------------------------------------------
""" UI flags """
ITEMENABLE = Qt.ItemIsEnabled
ITEMMOVEABLE = QGraphicsItem.ItemIsMovable
ITEMSENDGEOCHANGE = QGraphicsItem.ItemSendsGeometryChanges
ITEMSCALECHANGE = QGraphicsItem.ItemScaleChange
ITEMPOSCHANGE = QGraphicsItem.ItemPositionChange
DEVICECACHE = QGraphicsItem.DeviceCoordinateCache
SELECTABLE = QGraphicsItem.ItemIsSelectable
MOVEABLE = QGraphicsItem.ItemIsMovable
FOCUSABLE = QGraphicsItem.ItemIsFocusable
PANEL = QGraphicsItem.ItemIsPanel
NOINDEX = QGraphicsScene.NoIndex # Scene
RUBBER_DRAG = QGraphicsView.RubberBandDrag # Viewer
RUBBER_REC = QRubberBand.Rectangle
POS_CHANGE = QGraphicsItem.ItemPositionChange
NODRAG = QGraphicsView.NoDrag
NOFRAME = QGraphicsView.NoFrame
ANCHOR_NO = QGraphicsView.NoAnchor
ANCHOR_UNDERMICE = QGraphicsView.AnchorUnderMouse
ANCHOR_CENTER = QGraphicsView.AnchorViewCenter
CACHE_BG = QGraphicsView.CacheBackground
UPDATE_VIEWRECT = QGraphicsView.BoundingRectViewportUpdate
UPDATE_FULLVIEW = QGraphicsView.FullViewportUpdate
UPDATE_SMARTVIEW = QGraphicsView.SmartViewportUpdate
UPDATE_BOUNDINGVIEW = QGraphicsView.BoundingRectViewportUpdate
UPDATE_MINIMALVIEW = QGraphicsView.MinimalViewportUpdate
STAY_ON_TOP = Qt.WindowStaysOnTopHint
STRONG_FOCUS = Qt.StrongFocus
SPLASHSCREEN = Qt.SplashScreen
FRAMELESS = Qt.FramelessWindowHint
CUSTOMIZE = Qt.CustomizeWindowHint
CLOSEBTN = Qt.WindowCloseButtonHint
MINIMIZEBTN = Qt.WindowMinimizeButtonHint
AUTO_COLOR = Qt.AutoColor
# -------------------------------------------------------------------------------------------------------------
""" Drawing """
ANTIALIAS = QPainter.Antialiasing # Painter
ANTIALIAS_TEXT = QPainter.TextAntialiasing
ANTIALIAS_HIGH_QUALITY = QPainter.HighQualityAntialiasing
SMOOTH_PIXMAP_TRANSFORM = QPainter.SmoothPixmapTransform
NON_COSMETIC_PEN = QPainter.NonCosmeticDefaultPen
NO_BRUSH = Qt.NoBrush # Brush
NO_PEN = Qt.NoPen # Pen
ROUND_CAP = Qt.RoundCap
ROUND_JOIN = Qt.RoundJoin
PATTERN_SOLID = Qt.SolidPattern # Pattern
LINE_SOLID = Qt.SolidLine # Line
LINE_DASH = Qt.DashLine
LINE_DOT = Qt.DotLine
LINE_DASH_DOT = Qt.DashDotDotLine
TRANSPARENT = Qt.transparent
TRANSPARENT_MODE = Qt.TransparentMode
# -------------------------------------------------------------------------------------------------------------
""" Meta Object """
QUEUEDCONNECTION = Qt.QueuedConnection
# -------------------------------------------------------------------------------------------------------------
""" Keyboard and cursor """
TEXT_BOLD = QFont.Bold
TEXT_NORMAL = QFont.Normal
MONO_SPACE = QFont.Monospace
TEXT_MENEOMIC = Qt.TextShowMnemonic
KEY_PRESS = QEvent.KeyPress
KEY_RELEASE = QEvent.KeyRelease
KEY_ALT = Qt.Key_Alt
KEY_DEL = Qt.Key_Delete
KEY_TAB = Qt.Key_Tab
KEY_SHIFT = Qt.Key_Shift
KEY_CTRL = Qt.Key_Control
KEY_BACKSPACE = Qt.Key_Backspace
KEY_ENTER = Qt.Key_Enter
KEY_RETURN = Qt.Key_Return
KEY_F = Qt.Key_F
KEY_S = Qt.Key_S
ALT_MODIFIER = Qt.AltModifier
CTRL_MODIFIER = Qt.ControlModifier
SHIFT_MODIFIER = Qt.ShiftModifier
NO_MODIFIER = Qt.NoModifier
CLOSE_HAND_CUSOR = Qt.ClosedHandCursor
SIZEF_CURSOR = Qt.SizeFDiagCursor
windows = os.name = 'nt'
DMK = Qt.AltModifier if windows else CTRL_MODIFIER
MOUSE_LEFT = Qt.LeftButton
MOUSE_RIGHT = Qt.RightButton
MOUSE_MIDDLE = Qt.MiddleButton
NO_BUTTON = Qt.NoButton
ARROW_NONE = Qt.NoArrow # Cursor
CURSOR_ARROW = Qt.ArrowCursor
CURSOR_SIZEALL = Qt.SizeAllCursor
MOVE_OPERATION = QTextCursor.MoveOperation
MOVE_ANCHOR = QTextCursor.MoveMode.MoveAnchor
KEEP_ANCHOR = QTextCursor.MoveMode.KeepAnchor
ACTION_MOVE = Qt.MoveAction # Action
ignoreARM = Qt.IgnoreAspectRatio
# -------------------------------------------------------------------------------------------------------------
""" Set number """
RELATIVE_SIZE = Qt.RelativeSize # Size
INI = QSettings.IniFormat
NATIVE = QSettings.NativeFormat
INVALID = QSettings.InvalidFormat
SYS_SCOPE = QSettings.SystemScope
USER_SCOPE = QSettings.UserScope
# -------------------------------------------------------------------------------------------------------------
# Created by Trinh Do on 5/6/2020 - 3:13 AM
# © 2017 - 2020 DAMGteam. All rights reserved
| 43.994065
| 114
| 0.475651
| 1,091
| 14,826
| 6.351971
| 0.511457
| 0.019625
| 0.008081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058014
| 0.391947
| 14,826
| 337
| 115
| 43.994065
| 0.710593
| 0.096587
| 0
| 0.008547
| 0
| 0
| 0.006299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.004274
| 0.017094
| 0
| 0.017094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eb3ad476194898d48e135372f34d1ee69bc79d8
| 2,509
|
py
|
Python
|
Crawling/ssafyCrawling.py
|
Nyapy/FMTG
|
dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e
|
[
"MIT"
] | null | null | null |
Crawling/ssafyCrawling.py
|
Nyapy/FMTG
|
dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e
|
[
"MIT"
] | null | null | null |
Crawling/ssafyCrawling.py
|
Nyapy/FMTG
|
dcf0a35dbbcd50d5bc861b04ac0db41d27e57b6e
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sys
import time
import urllib.request
import os
sys.stdin = open('idpwd.txt')
site = input()
id = input()
pwd = input()
# selenium에서 사용할 웹 드라이버 절대 경로 정보
chromedriver = 'C:\Webdriver\chromedriver.exe'
# selenum의 webdriver에 앞서 설치한 chromedirver를 연동한다.
driver = webdriver.Chrome(chromedriver)
# driver로 특정 페이지를 크롤링한다.
driver.get(site)
driver.find_element_by_name('userId').send_keys(id)
driver.find_element_by_name('userPwd').send_keys(pwd)
driver.find_element_by_class_name('form-btn').click()
driver.set_window_size(1600, 800)
driver.find_element_by_xpath("//a[@href='/edu/lectureroom/openlearning/openLearningList.do']/span").click()
# driver.find_element_by_id('searchContNm').send_keys('aps')
#
# driver.find_element_by_xpath("//button[@onclick='fnSearch();']").click()
driver.find_elements_by_xpath("//*[contains(text(), '5기_B반_Java(1)')]")[0].click()
driver.find_element_by_xpath("//span[@class='file-name']").click()
driver.switch_to.window(driver.window_handles[1])
print(driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].get_attribute('disabled'))
# driver.find_elements_by_xpath("//button[@title='마지막 페이지']")[0].click()
# print(driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].get_attribute('disabled'))
# url 가져오기 + find 함수 연습
# pre = driver.current_url
# find = pre.find('/index.html')
# url = pre[:find]
# src = driver.find_element_by_class_name("background").get_attribute('src')
# print(src)
## 다음페이지 넘기기
# for i in driver.find_elements_by_xpath("//button[@title='다음 페이지']"):
# print(i)
cnt = 1
# url = driver.find_elements_by_class_name("background")[-1].get_attribute('src')
# print(url)
# urllib.request.urlretrieve(url, '123.jpg')
# os.system("curl " + url + " > test.jpg")
time.sleep(2)
driver.get_screenshot_as_file("hi.png")
# for i in driver.find_elements_by_class_name("background"):
# time.sleep(2)
# print(i.get_attribute('style'))
# i.screenshot(str(cnt)+'.png')
# cnt += 1
while 1:
time.sleep(0.4)
driver.save_screenshot('APS/C/'+str(cnt)+'.png')
# print(driver.find_element_by_class_name("background").get_attribute('src'))
# driver.find_element_by_class_name("background").screenshot(str(cnt)+'.png')
driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].click()
cnt += 1
if driver.find_elements_by_xpath("//button[@title='다음 페이지']")[0].get_attribute('disabled') == 'disabled':
break
| 32.166667
| 109
| 0.719012
| 369
| 2,509
| 4.661247
| 0.341463
| 0.110465
| 0.098837
| 0.110465
| 0.437209
| 0.337209
| 0.32093
| 0.247674
| 0.223837
| 0.2
| 0
| 0.012362
| 0.09725
| 2,509
| 77
| 110
| 32.584416
| 0.74702
| 0.461538
| 0
| 0
| 0
| 0
| 0.230886
| 0.092354
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eb4432b0091105498b6cde85c1c9de8fc2676cc
| 1,433
|
py
|
Python
|
100days/day95/StringIO_demo.py
|
chainren/python-learn
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
[
"Apache-2.0"
] | null | null | null |
100days/day95/StringIO_demo.py
|
chainren/python-learn
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
[
"Apache-2.0"
] | 16
|
2020-02-12T03:09:30.000Z
|
2022-03-12T00:08:59.000Z
|
100days/day95/StringIO_demo.py
|
chainren/python-learn
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
[
"Apache-2.0"
] | null | null | null |
from io import StringIO
# 定义一个 StringIO 对象,写入并读取其在内存中的内容
f = StringIO()
f.write('Python-100')
str = f.getvalue() # 读取写入的内容
print('写入内存中的字符串为:%s' %str)
f.write('\n') # 追加内容
f.write('坚持100天')
f.close() # 关闭
f1 = StringIO('Python-100' + '\n' + '坚持100天')
# 读取内容
print(f1.read())
f1.close()
# 假设的爬虫数据输出函数 outputData()
def outputData():
dataOne = '我是 1 号爬虫数据\n'
dataTwo = '我是 2 号爬虫数据\n'
dataThree = '我是 3 号爬虫数据'
data = dataOne + dataTwo + dataThree
return data
# dataStr 为爬虫数据字符串
dataStr = outputData()
# 1. 将 outputData() 函数返回的内容写入内存中
dataIO = StringIO(dataStr)
# 假设的爬虫数据输出函数 outputData()
def outputData():
dataOne = '我是 1 号爬虫数据\n'
dataTwo = '我是 2 号爬虫数据\n'
dataThree = '我是 3 号爬虫数据'
data = dataOne + dataTwo + dataThree
return data
# dataStr 为爬虫数据字符串
dataStr = outputData()
# 1. 将 outputData() 函数返回的内容写入内存中
dataIO = StringIO(dataStr)
# 1.1 输出 StringIO 在内存中写入的数据
print('1.1内存中写入的数据为:\n%s' %dataIO.getvalue())
# 1.2 按行输出写入的数据方式一
print('1.2按行输出写入的数据方式一:')
for data in dataIO.readlines():
print(data.strip('\n')) # 去掉每行数据末尾的换行符
# 1.2 按行输出写入的数据方式一
print('1.2按行输出写入的数据方式一:')
for data in dataIO.readlines():
print(data.strip('\n')) # 去掉每行数据末尾的换行符
# 1.3 按行输出写入的数据方式二
# 由于上一步的操作,此时文件指针指向数据末尾(32),我们需要将指针指向起始位置
print('由于上一步操作的输出,此时文件指针位置为:%d' %dataIO.tell())
# 将文件指针指向起始位置,方便下面的演示
dataIO.seek(0)
print('1.3按行输出写入的数据方式二:')
for data in dataIO:
print(data.strip('\n'))
| 18.61039
| 47
| 0.673412
| 187
| 1,433
| 5.160428
| 0.358289
| 0.02487
| 0.027979
| 0.046632
| 0.58342
| 0.58342
| 0.58342
| 0.58342
| 0.58342
| 0.58342
| 0
| 0.035533
| 0.175157
| 1,433
| 77
| 48
| 18.61039
| 0.78088
| 0.251221
| 0
| 0.589744
| 0
| 0
| 0.20038
| 0.021842
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.025641
| 0
| 0.128205
| 0.25641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eb4f1bf9aa917694ffc04ea836799d3bd9b4710
| 2,751
|
py
|
Python
|
tests/test_cli.py
|
Nate1729/FinPack
|
d76fd5e6538298d5596d5b0f7d3be2bc6520c431
|
[
"Apache-2.0"
] | 1
|
2022-01-28T20:05:22.000Z
|
2022-01-28T20:05:22.000Z
|
tests/test_cli.py
|
Nate1729/FinPack
|
d76fd5e6538298d5596d5b0f7d3be2bc6520c431
|
[
"Apache-2.0"
] | 30
|
2021-11-22T19:07:54.000Z
|
2021-12-18T03:00:47.000Z
|
tests/test_cli.py
|
Nate1729/FinPack
|
d76fd5e6538298d5596d5b0f7d3be2bc6520c431
|
[
"Apache-2.0"
] | 2
|
2021-12-13T20:27:52.000Z
|
2021-12-17T18:39:40.000Z
|
"""Contains tests for finpack/core/cli.py
"""
__copyright__ = "Copyright (C) 2021 Matt Ferreira"
import os
import unittest
from importlib import metadata
from docopt import docopt
from finpack.core import cli
class TestCli(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.DATA_DIR = "temp"
os.mkdir(cls.DATA_DIR)
@classmethod
def tearDownClass(cls):
os.rmdir(cls.DATA_DIR)
def test_version_option(self):
argv = ["--version"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["--version"])
def test_init_no_options(self):
argv = ["init"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
def test_init_with_filepath_option(self):
argv = ["init", "--filepath=temp/data.csv"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
self.assertEqual(args["--filepath"], "temp/data.csv")
def test_init_with_sample_dataset_option(self):
argv = ["init", "--sample-dataset"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
self.assertTrue(args["--sample-dataset"])
def test_init_with_overwrite_option(self):
argv = ["init", "--overwrite"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["init"])
self.assertTrue(args["--overwrite"])
def test_balsheet_no_option(self):
argv = ["balsheet"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
def test_balsheet_with_filepath_option(self):
argv = ["balsheet", "--filepath=temp/data2.csv"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--filepath"], "temp/data2.csv")
def test_balsheet_with_levels_default(self):
argv = ["balsheet"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--levels"], "3")
def test_balsheet_with_levels_option(self):
argv = ["balsheet", "--levels=2"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--levels"], "2")
def test_balsheet_with_date_default(self):
argv = ["balsheet"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--date"], "today")
def test_balsheet_with_date_option(self):
argv = ["balsheet", "--date=2021-12-01"]
args = docopt(cli.__doc__, argv=argv)
self.assertTrue(args["balsheet"])
self.assertEqual(args["--date"], "2021-12-01")
| 25.238532
| 62
| 0.623773
| 324
| 2,751
| 5.012346
| 0.185185
| 0.112069
| 0.144089
| 0.108374
| 0.576355
| 0.461207
| 0.461207
| 0.461207
| 0.461207
| 0.435345
| 0
| 0.011748
| 0.226463
| 2,751
| 108
| 63
| 25.472222
| 0.75141
| 0.013813
| 0
| 0.393939
| 0
| 0
| 0.149298
| 0.018108
| 0
| 0
| 0
| 0
| 0.287879
| 1
| 0.19697
| false
| 0
| 0.075758
| 0
| 0.287879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eb8ddc2c0219670903c4425de4ca4b63a33f316
| 10,124
|
py
|
Python
|
recipe_engine/internal/commands/__init__.py
|
Acidburn0zzz/luci
|
d8993f4684839b58f5f966dd6273d1d8fd001eae
|
[
"Apache-2.0"
] | 1
|
2021-04-24T04:03:01.000Z
|
2021-04-24T04:03:01.000Z
|
recipe_engine/internal/commands/__init__.py
|
Acidburn0zzz/luci
|
d8993f4684839b58f5f966dd6273d1d8fd001eae
|
[
"Apache-2.0"
] | null | null | null |
recipe_engine/internal/commands/__init__.py
|
Acidburn0zzz/luci
|
d8993f4684839b58f5f966dd6273d1d8fd001eae
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""This package houses all subcommands for the recipe engine.
See implementation_details.md for the expectations of the modules in this
directory.
"""
import argparse
import errno
import logging
import os
import pkgutil
import sys
if sys.version_info >= (3, 5): # we're running python > 3.5
OS_WALK = os.walk
else:
# From vpython
from scandir import walk as OS_WALK
# pylint: disable=wrong-import-position
from .. import simple_cfg
from ..recipe_deps import RecipeDeps
from ..recipe_module_importer import RecipeModuleImporter
LOG = logging.getLogger(__name__)
# This incantation finds all loadable submodules of ourself. The
# `prefix=__name__` bit is so that these modules get loaded with the correct
# import names, i.e.
#
# recipe_engine.internal.commands.<submodule>
#
# If omitted, then these submodules can get double loaded as both:
#
# <submodule> AND
# recipe_engine.internal.commands.<submodule>
#
# Which can both interfere with the global python module namespace, and lead to
# strange errors when doing type assertions (since all data in these modules
# will be loaded under two different names; classes will fail isinstance checks
# even though they are "the same").
_COMMANDS = [
loader.find_module(module_name).load_module(module_name)
for (loader, module_name, _) in pkgutil.walk_packages(
__path__, prefix=__name__+'.')
if '.' not in module_name[len(__name__)+1:]
]
# Order all commands by an optional __cmd_priority__ field, and then by module
# name.
_COMMANDS.sort(
key=lambda mod: (
not hasattr(mod, '__cmd_priority__'), # modules defining priority first
getattr(mod, '__cmd_priority__', None), # actual priority
mod.__name__ # name
))
# Now actually set these commands on ourself so that 'mock' works correctly.
#
# This is needed to allow some tests (though it may be worth adjusting these
# tests later to not need this. Just delete this function and see which tests
# fail to find the dependencies on this behavior).
def _patch_our_attrs():
self = sys.modules[__name__]
self.__all__ = [mod.__name__[len(__name__)+1:] for mod in _COMMANDS]
for modname, mod in zip(self.__all__, _COMMANDS):
setattr(self, modname, mod)
_patch_our_attrs()
def _check_recipes_cfg_consistency(recipe_deps):
"""Checks all recipe.cfg files for the loaded recipe_deps and logs
inconsistent dependencies.
Args:
recipe_deps (RecipeDeps) - The loaded+fetched recipe deps
for the current run.
"""
actual = recipe_deps.main_repo.simple_cfg.deps
# For every repo we loaded
for repo_name in actual:
required_deps = recipe_deps.repos[repo_name].simple_cfg.deps
for req_repo_name, req_spec in required_deps.iteritems():
# If this depends on something we didn't load, log an error.
if req_repo_name not in actual:
LOG.error(
'%r depends on %r, but your recipes.cfg is missing an '
'entry for this.', repo_name, req_repo_name)
continue
actual_spec = actual[req_repo_name]
if req_spec.revision == actual_spec.revision:
# They match, it's all good.
continue
LOG.warn(
'recipes.cfg depends on %r @ %s, but %r depends on version %s.',
req_repo_name, actual_spec.revision, repo_name, req_spec.revision)
def _cleanup_pyc(recipe_deps):
"""Removes any .pyc files from the recipes/recipe_module directories.
Args:
* recipe_deps (RecipeDeps) - The loaded recipe dependencies.
"""
for repo in recipe_deps.repos.itervalues():
for to_walk in (repo.recipes_dir, repo.modules_dir):
for root, _dirs, files in OS_WALK(to_walk):
for fname in files:
if not fname.endswith('.pyc'):
continue
try:
to_clean = os.path.join(root, fname)
LOG.info('cleaning %r', to_clean)
os.unlink(to_clean)
except OSError as ex:
# If multiple things are cleaning pyc's at the same time this can
# race. Fortunately we only care that SOMETHING deleted the pyc :)
if ex.errno != errno.ENOENT:
raise
def _common_post_process(args):
# TODO(iannucci): We should always do logging.basicConfig() (probably with
# logging.WARNING), even if no verbose is passed. However we need to be
# careful as this could cause issues with spurious/unexpected output.
# Once the recipe engine is on native build.proto, this should be safe to
# do.
if args.verbose > 0:
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
if args.verbose > 1:
logging.getLogger().setLevel(logging.DEBUG)
else:
# Prevent spurious "No handlers could be found for ..." stderr messages.
# Once we always set a basicConfig (per TODO above), this can go away as
# well.
logging.root.manager.emittedNoHandlerWarning = True
if args.pid_file:
try:
with open(args.pid_file, 'w') as pid_file:
pid_file.write('%d\n' % os.getpid())
except Exception:
logging.exception("unable to write pidfile")
args.recipe_deps = RecipeDeps.create(
args.main_repo_path,
args.repo_override,
args.proto_override,
)
_check_recipes_cfg_consistency(args.recipe_deps)
# Allows:
# import RECIPE_MODULES.repo_name.module_name.submodule
sys.meta_path = [RecipeModuleImporter(args.recipe_deps)] + sys.meta_path
_cleanup_pyc(args.recipe_deps)
# Remove flags that subcommands shouldn't use; everything from this point on
# should ONLY use args.recipe_deps.
del args.main_repo_path
del args.verbose
del args.repo_override
def _add_common_args(parser):
class _RepoOverrideAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
tokens = values.split('=', 2)
if len(tokens) != 2:
raise ValueError('Override must have the form: repo=path')
repo_name, path = tokens
override_dict = getattr(namespace, self.dest)
if repo_name in override_dict:
raise ValueError('An override is already defined for [%s] (%s)' % (
repo_name, override_dict[repo_name]))
path = os.path.abspath(os.path.expanduser(path))
if not os.path.isdir(path):
raise ValueError('Override path [%s] is not a directory' % (path,))
override_dict[repo_name] = path
def _package_to_main_repo(value):
try:
value = os.path.abspath(value)
except Exception as ex: # pylint: disable=broad-except
parser.error(
'--package %r could not be converted to absolute path: %r' % (
value, ex,))
recipes_cfg_rel = simple_cfg.RECIPES_CFG_LOCATION_REL
if not value.endswith(recipes_cfg_rel):
parser.error('--package must end with %r.' % (recipes_cfg_rel,))
# We know the arg ends with 'infra/config/recipes.cfg', so chop those
# elements off the path to get the path to the recipe repo root.
for _ in simple_cfg.RECIPES_CFG_LOCATION_TOKS:
value = os.path.dirname(value)
return value
# TODO(iannucci): change --package to --repo-path and avoid having recipes.py
# pass the path to the recipes.cfg. This is preferable because the location of
# recipes.cfg MUST be discovered for recipe dependencies; the RepoSpec
# protobuf doesn't specify where the recipes.cfg is in the dependency repos
# (nor can it, even if it was dynamic; this would be a nightmare to maintain,
# and the autoroller would need to discover it automatically ANYWAY. If we
# allow it to be relocatable, the engine needs to be able to discover it, in
# which case the minimal information is still 'repo root').
parser.add_argument(
'--package',
dest='main_repo_path', type=_package_to_main_repo, required=True,
help='Path to recipes.cfg of the recipe repo to operate on.')
parser.add_argument(
'--verbose', '-v', action='count',
help='Increase logging verboisty')
parser.add_argument('-O', '--repo-override', metavar='ID=PATH',
action=_RepoOverrideAction, default={},
help='Override a repo repository path with a local one.')
parser.add_argument('--pid-file', metavar='PATH',
help=(
'Absolute path to a file where the engine should write its pid. '
'Path must be absolute and not exist.'))
def _proto_override_abspath(value):
try:
value = os.path.abspath(value)
except Exception as ex: # pylint: disable=broad-except
parser.error(
'--proto-override %r could not be converted to absolute path: %r' % (
value, ex,))
return value
# Override the location of the folder containing the `PB` module. This should
# only be used for recipe bundles, so we don't bother giving it a shortform
# option, and suppress the option's help to avoid confusing users.
parser.add_argument(
'--proto-override', type=_proto_override_abspath, help=argparse.SUPPRESS)
parser.set_defaults(
postprocess_func=lambda error, args: None,
)
def parse_and_run():
"""Parses the command line and runs the chosen subcommand.
Returns the command's return value (either int or None, suitable as input to
`os._exit`).
"""
parser = argparse.ArgumentParser(
description='Interact with the recipe system.')
_add_common_args(parser)
subp = parser.add_subparsers(dest='command')
for module in _COMMANDS:
description = module.__doc__
helplines = []
for line in description.splitlines():
line = line.strip()
if not line:
break
helplines.append(line)
module.add_arguments(subp.add_parser(
module.__name__.split('.')[-1], # use module's short name
formatter_class=argparse.RawDescriptionHelpFormatter,
help=' '.join(helplines),
description=description,
))
args = parser.parse_args()
_common_post_process(args)
args.postprocess_func(parser.error, args)
return args.func(args)
| 35.152778
| 80
| 0.697452
| 1,424
| 10,124
| 4.785112
| 0.308287
| 0.022014
| 0.014382
| 0.010566
| 0.072791
| 0.046962
| 0.037276
| 0.037276
| 0.037276
| 0.037276
| 0
| 0.002132
| 0.212465
| 10,124
| 287
| 81
| 35.275261
| 0.852502
| 0.367641
| 0
| 0.145455
| 0
| 0
| 0.13239
| 0
| 0
| 0
| 0
| 0.003484
| 0
| 1
| 0.054545
| false
| 0
| 0.066667
| 0
| 0.145455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eb8efd29824103fb230c6103a6e3a8b1b30a534
| 7,295
|
py
|
Python
|
openfl/pipelines/stc_pipeline.py
|
sarthakpati/openfl
|
8edebfd565d94f709a7d7f06d9ee38a7975c066e
|
[
"Apache-2.0"
] | null | null | null |
openfl/pipelines/stc_pipeline.py
|
sarthakpati/openfl
|
8edebfd565d94f709a7d7f06d9ee38a7975c066e
|
[
"Apache-2.0"
] | null | null | null |
openfl/pipelines/stc_pipeline.py
|
sarthakpati/openfl
|
8edebfd565d94f709a7d7f06d9ee38a7975c066e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""STCPipelinemodule."""
import numpy as np
import gzip as gz
from .pipeline import TransformationPipeline, Transformer
class SparsityTransformer(Transformer):
"""A transformer class to sparsify input data."""
def __init__(self, p=0.01):
"""Initialize.
Args:
p (float): sparsity ratio (Default=0.01)
"""
self.lossy = True
self.p = p
def forward(self, data, **kwargs):
"""Sparsify data and pass over only non-sparsified elements by reducing the array size.
Args:
data: an numpy array from the model tensor_dict
Returns:
condensed_data: an numpy array being sparsified.
metadata: dictionary to store a list of meta information.
"""
metadata = {'int_list': list(data.shape)}
# sparsification
data = data.astype(np.float32)
flatten_data = data.flatten()
n_elements = flatten_data.shape[0]
k_op = int(np.ceil(n_elements * self.p))
topk, topk_indices = self._topk_func(flatten_data, k_op)
#
condensed_data = topk
sparse_data = np.zeros(flatten_data.shape)
sparse_data[topk_indices] = topk
nonzero_element_bool_indices = sparse_data != 0.0
metadata['bool_list'] = list(nonzero_element_bool_indices)
return condensed_data, metadata
# return sparse_data, metadata
def backward(self, data, metadata, **kwargs):
"""Recover data array with the right shape and numerical type.
Args:
data: an numpy array with non-zero values.
metadata: dictionary to contain information for recovering back to original data array.
Returns:
recovered_data: an numpy array with original shape.
"""
data = data.astype(np.float32)
data_shape = metadata['int_list']
nonzero_element_bool_indices = list(metadata['bool_list'])
recovered_data = np.zeros(data_shape).reshape(-1).astype(np.float32)
recovered_data[nonzero_element_bool_indices] = data
recovered_data = recovered_data.reshape(data_shape)
return recovered_data
@staticmethod
def _topk_func(x, k):
"""Select top k values.
Args:
x: an numpy array to be sorted out for top-k components.
k: k most maximum values.
Returns:
topk_mag: components with top-k values.
indices: indices of the top-k components.
"""
# quick sort as default on magnitude
idx = np.argsort(np.abs(x))
# sorted order, the right most is the largest magnitude
length = x.shape[0]
start_idx = length - k
# get the top k magnitude
topk_mag = np.asarray(x[idx[start_idx:]])
indices = np.asarray(idx[start_idx:])
if min(topk_mag) - 0 < 10e-8: # avoid zeros
topk_mag = topk_mag + 10e-8
return topk_mag, indices
class TernaryTransformer(Transformer):
"""A transformer class to ternerize input data."""
def __init__(self):
"""Initialize."""
self.lossy = True
def forward(self, data, **kwargs):
"""Ternerize data into positive mean value, negative mean value and zero value.
Args:
data: an flattened numpy array
Returns:
int_data: an numpy array being terneraized.
metadata: dictionary to store a list of meta information.
"""
# ternarization, data is sparse and flattened
mean_topk = np.mean(np.abs(data))
out_ = np.where(data > 0.0, mean_topk, 0.0)
out = np.where(data < 0.0, -mean_topk, out_)
int_array, int2float_map = self._float_to_int(out)
metadata = {'int_to_float': int2float_map}
return int_array, metadata
def backward(self, data, metadata, **kwargs):
"""Recover data array back to the original numerical type.
Args:
data: an numpy array with non-zero values.
Returns:
metadata: dictionary to contain information for recovering back to original data array.
data (return): an numpy array with original numerical type.
"""
# TODO
import copy
data = copy.deepcopy(data)
int2float_map = metadata['int_to_float']
for key in int2float_map:
indices = data == key
data[indices] = int2float_map[key]
return data
@staticmethod
def _float_to_int(np_array):
"""Create look-up table for conversion between floating and integer types.
Args:
np_array:
Returns:
int_array:
int_to_float_map:
"""
flatten_array = np_array.reshape(-1)
unique_value_array = np.unique(flatten_array)
int_array = np.zeros(flatten_array.shape, dtype=np.int)
int_to_float_map = {}
float_to_int_map = {}
# create table
for idx, u_value in enumerate(unique_value_array):
int_to_float_map.update({idx: u_value})
float_to_int_map.update({u_value: idx})
# assign to the integer array
indices = np.where(flatten_array == u_value)
int_array[indices] = idx
int_array = int_array.reshape(np_array.shape)
return int_array, int_to_float_map
class GZIPTransformer(Transformer):
"""A transformer class to losslessly compress data."""
def __init__(self):
"""Initialize."""
self.lossy = False
def forward(self, data, **kwargs):
"""Compress data into numpy of float32.
Args:
data: an numpy array with non-zero values
Returns:
compressed_bytes :
metadata: dictionary to contain information for recovering back to original data array
"""
bytes_ = data.astype(np.float32).tobytes()
compressed_bytes = gz.compress(bytes_)
metadata = {}
return compressed_bytes, metadata
def backward(self, data, metadata, **kwargs):
"""Decompress data into numpy of float32.
Args:
data: an numpy array with non-zero values
metadata: dictionary to contain information for recovering back to original data array
Returns:
data:
"""
decompressed_bytes_ = gz.decompress(data)
data = np.frombuffer(decompressed_bytes_, dtype=np.float32)
return data
class STCPipeline(TransformationPipeline):
"""A pipeline class to compress data lossly using sparsity and ternerization methods."""
def __init__(self, p_sparsity=0.01, n_clusters=6, **kwargs):
"""Initialize a pipeline of transformers.
Args:
p_sparsity (float): Sparsity factor (Default=0.01)
n_cluster (int): Number of K-Means clusters (Default=6)
Returns:
Data compression transformer pipeline object
"""
# instantiate each transformer
self.p = p_sparsity
transformers = [SparsityTransformer(self.p), TernaryTransformer(), GZIPTransformer()]
super(STCPipeline, self).__init__(transformers=transformers, **kwargs)
| 33.159091
| 99
| 0.622207
| 878
| 7,295
| 4.995444
| 0.223235
| 0.02508
| 0.02736
| 0.029184
| 0.312814
| 0.21637
| 0.206794
| 0.181943
| 0.170999
| 0.149567
| 0
| 0.012004
| 0.291981
| 7,295
| 219
| 100
| 33.310502
| 0.837173
| 0.376422
| 0
| 0.183908
| 0
| 0
| 0.014595
| 0
| 0
| 0
| 0
| 0.004566
| 0
| 1
| 0.137931
| false
| 0
| 0.045977
| 0
| 0.321839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ebe32fa6550f0c6be308f3edf45681f0583afc5
| 730
|
py
|
Python
|
scripts/compare.py
|
SnoozeTime/nes
|
4d60562c59e175485eb3dff043c0c78473034cdb
|
[
"Unlicense"
] | 1
|
2022-01-07T02:00:36.000Z
|
2022-01-07T02:00:36.000Z
|
scripts/compare.py
|
SnoozeTime/nes
|
4d60562c59e175485eb3dff043c0c78473034cdb
|
[
"Unlicense"
] | 6
|
2020-12-12T03:21:55.000Z
|
2022-02-18T11:22:28.000Z
|
scripts/compare.py
|
SnoozeTime/nes
|
4d60562c59e175485eb3dff043c0c78473034cdb
|
[
"Unlicense"
] | 1
|
2018-12-02T20:42:10.000Z
|
2018-12-02T20:42:10.000Z
|
import sys
def load_log_sp(filename):
data = []
with open(filename) as f:
for line in f.readlines():
tokens = line.split(" ")
spidx = line.find("SP:")
endidx = line.find(' ', spidx)
data.append((line[0:4], line[spidx+3:endidx]))
return data
if __name__ == "__main__":
mylog = sys.argv[1]
correctlog = sys.argv[2]
mylog_sp = load_log_sp(mylog)
correctlog_sp = load_log_sp(correctlog)
for (i, ((nb1, sp1), (nb2, sp2))) in enumerate(zip(mylog_sp, correctlog_sp)):
print('{} {} - {} vs {}'.format(
nb1, nb2, sp1, sp2))
if sp1.lower() != sp2.lower() or int(nb1.lower(),16) != int(nb2.lower(), 16):
break
| 30.416667
| 85
| 0.545205
| 98
| 730
| 3.877551
| 0.5
| 0.055263
| 0.071053
| 0.057895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040153
| 0.283562
| 730
| 23
| 86
| 31.73913
| 0.686424
| 0
| 0
| 0
| 0
| 0
| 0.039726
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.15
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ebf6e6f4a1667f2d0b5238c117fa44dfca6f7c4
| 10,203
|
py
|
Python
|
tercer_modelo.py
|
nahuelalmeira/deepLearning
|
f1fcd06f5735c8be9272b0c8392b1ae467c08582
|
[
"MIT"
] | null | null | null |
tercer_modelo.py
|
nahuelalmeira/deepLearning
|
f1fcd06f5735c8be9272b0c8392b1ae467c08582
|
[
"MIT"
] | null | null | null |
tercer_modelo.py
|
nahuelalmeira/deepLearning
|
f1fcd06f5735c8be9272b0c8392b1ae467c08582
|
[
"MIT"
] | null | null | null |
"""Exercise 1
Usage:
$ CUDA_VISIBLE_DEVICES=2 python practico_1_train_petfinder.py --dataset_dir ../ --epochs 30 --dropout 0.1 0.1 --hidden_layer_sizes 200 100
To know which GPU to use, you can check it with the command
$ nvidia-smi
"""
import argparse
import os
import mlflow
import pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, models
import warnings
warnings.filterwarnings("ignore")
from auxiliary import process_features, load_dataset, build_columns, log_dir_name
TARGET_COL = 'AdoptionSpeed'
def read_args():
parser = argparse.ArgumentParser(
description='Training a MLP on the petfinder dataset')
# Here you have some examples of classifier parameters. You can add
# more arguments or change these if you need to.
parser.add_argument('--experiment_name', type=str, default='Base model',
help='Name of the experiment, used in mlflow.')
parser.add_argument('--dataset_dir', default='../petfinder_dataset', type=str,
help='Directory with the training and test files.')
parser.add_argument('--hidden_layer_sizes', nargs='+', default=[100], type=int,
help='Number of hidden units of each hidden layer.')
parser.add_argument('--epochs', default=50, type=int,
help='Number of epochs to train.')
parser.add_argument('--dropout', nargs='+', default=[0.5], type=float,
help='Dropout ratio for every layer.')
parser.add_argument('--batch_size', type=int, default=32,
help='Number of instances in each batch.')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='Learning rate.')
args = parser.parse_args()
assert len(args.hidden_layer_sizes) == len(args.dropout)
return args
def print_args(args):
print('-------------------------------------------')
print('PARAMS ------------------------------------')
print('-------------------------------------------')
print('--experiment_name ', args.experiment_name)
print('--dataset_dir ', args.dataset_dir)
print('--epochs ', args.epochs)
print('--hidden_layer_sizes', args.hidden_layer_sizes)
print('--dropout ', args.dropout)
print('--batch_size ', args.batch_size)
print('--learning_rate ', args.learning_rate)
print('-------------------------------------------')
def main():
args = read_args()
print_args(args)
experiment_name = args.experiment_name
batch_size = args.batch_size
learning_rate = args.learning_rate
hidden_layer_sizes = args.hidden_layer_sizes
dropout = args.dropout
epochs = args.epochs
### Output directory
dir_name = log_dir_name(args)
print()
print(dir_name)
print()
output_dir = os.path.join('experiments', experiment_name, dir_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dataset, dev_dataset, test_dataset = load_dataset(args.dataset_dir)
nlabels = dataset[TARGET_COL].unique().shape[0]
columns = [
'Gender', 'Color1', 'Vaccinated', 'Dewormed',
'Breed1',
'Age', 'Fee', 'Quantity']
one_hot_columns, embedded_columns, numeric_columns = build_columns(dataset, columns)
# TODO (optional) put these three types of columns in the same dictionary with "column types"
X_train, y_train = process_features(dataset, one_hot_columns, numeric_columns, embedded_columns)
direct_features_input_shape = (X_train['direct_features'].shape[1],)
X_dev, y_dev = process_features(dev_dataset, one_hot_columns, numeric_columns, embedded_columns)
###########################################################################################################
### TODO: Shuffle train dataset - Done
###########################################################################################################
shuffle_len = X_train['direct_features'].shape[0]
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(shuffle_len).batch(batch_size)
###########################################################################################################
dev_ds = tf.data.Dataset.from_tensor_slices((X_dev, y_dev)).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices(process_features(
test_dataset, one_hot_columns, numeric_columns, embedded_columns, test=True)[0]).batch(batch_size)
###########################################################################################################
### TODO: Build the Keras model - Done
###########################################################################################################
tf.keras.backend.clear_session()
# Add one input and one embedding for each embedded column
embedding_layers = []
inputs = []
for embedded_col, max_value in embedded_columns.items():
input_layer = layers.Input(shape=(1,), name=embedded_col)
inputs.append(input_layer)
# Define the embedding layer
embedding_size = int(max_value / 4)
embedding_layers.append(
tf.squeeze(layers.Embedding(input_dim=max_value, output_dim=embedding_size)(input_layer), axis=-2))
print('Adding embedding of size {} for layer {}'.format(embedding_size, embedded_col))
# Add the direct features already calculated
direct_features_input = layers.Input(shape=direct_features_input_shape, name='direct_features')
inputs.append(direct_features_input)
# Concatenate everything together
features = layers.concatenate(embedding_layers + [direct_features_input])
denses = []
dense1 = layers.Dense(hidden_layer_sizes[0], activation='relu')(features)
denses.append(dense1)
if len(hidden_layer_sizes) > 1:
for hidden_layer_size in hidden_layer_sizes[1:]:
dense = layers.Dense(hidden_layer_size, activation='relu')(denses[-1])
denses.append(dense)
output_layer = layers.Dense(nlabels, activation='softmax')(dense1)
model = models.Model(inputs=inputs, outputs=output_layer)
###########################################################################################################
###########################################################################################################
### TODO: Fit the model - Done
###########################################################################################################
mlflow.set_experiment(experiment_name)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
logdir = "logs/scalars/" + dir_name
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
with mlflow.start_run(nested=True):
# Log model hiperparameters first
mlflow.log_param('hidden_layer_size', hidden_layer_sizes)
mlflow.log_param('dropout', dropout)
mlflow.log_param('embedded_columns', embedded_columns)
mlflow.log_param('one_hot_columns', one_hot_columns)
mlflow.log_param('numeric_columns', numeric_columns) # Not using these yet
mlflow.log_param('epochs', epochs)
mlflow.log_param('batch_size', batch_size)
mlflow.log_param('learning_rate', learning_rate)
# Train
history = model.fit(train_ds, epochs=epochs,
validation_data=dev_ds,
callbacks=[tensorboard_callback])
#######################################################################################################
### TODO: analyze history to see if model converges/overfits
#######################################################################################################
output_csv = os.path.join(output_dir, 'history.pickle')
with open(output_csv, 'bw') as f:
pickle.dump(history.history, f)
#######################################################################################################
#######################################################################################################
### TODO: Evaluate the model, calculating the metrics. - Done
#######################################################################################################
loss, accuracy = model.evaluate(dev_ds)
print("*** Dev loss: {} - accuracy: {}".format(loss, accuracy))
mlflow.log_metric('loss', loss)
mlflow.log_metric('accuracy', accuracy)
predictions = model.predict(test_ds)
#######################################################################################################
#######################################################################################################
### TODO: Convert predictions to classes - Done
#######################################################################################################
prediction_classes = np.argmax(predictions, axis=1)
#######################################################################################################
#######################################################################################################
### TODO: Save the results for submission - Done
#######################################################################################################
output_csv = os.path.join(output_dir, 'submit.csv')
submissions = pd.DataFrame(prediction_classes, columns=[TARGET_COL], index=test_dataset.PID)
submissions.to_csv(output_csv)
#######################################################################################################
###########################################################################################################
print('All operations completed')
if __name__ == '__main__':
main()
| 45.346667
| 138
| 0.51181
| 968
| 10,203
| 5.161157
| 0.269628
| 0.033026
| 0.035228
| 0.01201
| 0.1245
| 0.074059
| 0.074059
| 0.042234
| 0
| 0
| 0
| 0.005103
| 0.174066
| 10,203
| 224
| 139
| 45.549107
| 0.587754
| 0.095462
| 0
| 0.038168
| 0
| 0
| 0.161946
| 0.027039
| 0
| 0
| 0
| 0.004464
| 0.007634
| 1
| 0.022901
| false
| 0
| 0.083969
| 0
| 0.114504
| 0.145038
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ebfda6d11cf85e7a67d60d7c46e294592497198
| 7,576
|
py
|
Python
|
catpy/applications/export.py
|
catmaid/catpy
|
481d87591a6dfaedef2767dcddcbed7185ecc8b8
|
[
"MIT"
] | 5
|
2018-04-24T15:45:31.000Z
|
2021-06-18T17:38:07.000Z
|
catpy/applications/export.py
|
catmaid/catpy
|
481d87591a6dfaedef2767dcddcbed7185ecc8b8
|
[
"MIT"
] | 35
|
2017-05-12T21:49:54.000Z
|
2022-03-12T00:47:09.000Z
|
catpy/applications/export.py
|
catmaid/catpy
|
481d87591a6dfaedef2767dcddcbed7185ecc8b8
|
[
"MIT"
] | 4
|
2017-08-24T12:15:41.000Z
|
2019-10-13T01:05:34.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from pkg_resources import parse_version
from warnings import warn
from copy import deepcopy
import networkx as nx
from networkx.readwrite import json_graph
from catpy.applications.base import CatmaidClientApplication
NX_VERSION_INFO = parse_version(nx.__version__)._key[1]
err_msg = (
"Tried to treat the edge's source/target fields as indices into the list of nodes, but failed. "
"See issue #26 [1]. "
"Has CATMAID upgraded to networkx 2.x? [2]\n\n"
"[1]: https://github.com/catmaid/catpy/issues/26\n"
"[2]: https://github.com/catmaid/CATMAID/blob/master/django/requirements.txt"
)
def convert_nodelink_data(jso):
"""NetworkX serialises graphs differently in v1.x and v2.x.
This converts v1-style data (as emitted by CATMAID) to v2-style data.
See issue #26 https://github.com/catmaid/catpy/issues/26
Parameters
----------
jso : dict
Returns
-------
dict
"""
if NX_VERSION_INFO < (2, 0):
warn(
"You are converting networkx v1-style JSON (emitted by CATMAID) to v2-style JSON,"
" but you are using networkx v1"
)
out = deepcopy(jso)
for edge in out["links"]:
for label in ["source", "target"]:
try:
edge[label] = out["nodes"][edge[label]]["id"]
except (KeyError, IndexError):
raise RuntimeError(err_msg)
return out
class ExportWidget(CatmaidClientApplication):
def get_swc(self, skeleton_id, linearize_ids=False):
"""
Get a single skeleton in SWC format.
Parameters
----------
skeleton_id : int or str
linearize_ids : bool
Returns
-------
str
"""
return self.get(
(self.project_id, "skeleton", skeleton_id, "swc"),
{"linearize_ids": "true" if linearize_ids else "false"},
)
def get_connector_archive(self, *args, **kwargs):
"""Not implemented: requires an async job"""
raise NotImplementedError("Requires an async job")
def get_treenode_archive(self, *args, **kwargs):
"""Not implemented: requires an async job"""
raise NotImplementedError("Requires an async job")
def get_networkx_dict(self, *skeleton_ids):
"""
Get the data for a networkx graph of the given skeletons in node-link format.
In networkx 1.x, as used by CATMAID and therefore returned by this method,
"source" and "target" in the dicts in "links" refer to nodes by their indices in the "nodes" array.
See ``convert_nodelink_data`` function to convert into networkx 2.x-compatible format.
https://networkx.readthedocs.io/en/networkx-1.11/reference/generated/networkx.readwrite.json_graph.node_link_data.html
Parameters
----------
skeleton_ids : array-like of (int or str)
Returns
-------
dict
"""
return self.post(
(self.project_id, "graphexport", "json"),
data={"skeleton_list": list(skeleton_ids)},
)
def get_networkx(self, *skeleton_ids):
"""
Get a networkx MultiDiGraph of the given skeletons.
Parameters
----------
skeleton_ids : array-like of (int or str)
Returns
-------
networkx.MultiDiGraph
"""
data = self.get_networkx_dict(*skeleton_ids)
if NX_VERSION_INFO >= (2, 0):
data = convert_nodelink_data(data)
return json_graph.node_link_graph(data, directed=True)
def get_neuroml(self, skeleton_ids, skeleton_inputs=tuple()):
"""
Get NeuroML v1.8.1 (level 3, NetworkML) for the given skeletons, possibly with their input synapses
constrained to another set of skeletons.
N.B. If len(skeleton_ids) > 1, skeleton_inputs will be ignored and only synapses within the first skeleton
set will be used in the model.
Parameters
----------
skeleton_ids : array-like
Skeletons whose NeuroML to return
skeleton_inputs : array-like, optional
If specified, only input synapses from these skeletons will be added to the NeuroML
Returns
-------
str
NeuroML output string
"""
data = {"skids": list(skeleton_ids)}
if skeleton_inputs:
if len(skeleton_ids) > 1:
warn(
"More than one skeleton ID was selected: ignoring skeleton input constraints"
)
else:
data["inputs"] = list(skeleton_inputs)
return self.post((self.project_id, "neuroml", "neuroml_level3_v181"), data=data)
def get_treenode_and_connector_geometry(self, *skeleton_ids):
"""
Get the treenode and connector information for the given skeletons. The returned dictionary will be of the form
{
"skeletons": {
skeleton_id1: {
"treenodes": {
treenode_id1: {
"location": [x, y, z],
"parent_id": id_of_parent_treenode
},
treenode_id2: ...
},
"connectors": {
connector_id1: {
"location": [x, y, z],
"presynaptic_to": [list, of, treenode, ids],
"postsynaptic_to": [list, of, treenode, ids]
},
connector_id2: ...
}
},
skeleton_id2: ...
}
}
Parameters
----------
skeleton_ids : array-like of (int or str)
Returns
-------
dict
"""
# todo: factor API call into MorphologyFetcher
skeletons = dict()
warnings = set()
relation_names = {0: "presnaptic_to", 1: "postsynaptic_to"}
for skeleton_id in skeleton_ids:
data = self.get(
"{}/{}/1/0/compact-skeleton".format(self.project_id, skeleton_id)
)
skeleton = {"treenodes": dict(), "connectors": dict()}
for treenode in data[0]:
skeleton["treenodes"][int(treenode[0])] = {
"location": treenode[3:6],
"parent_id": None if treenode[1] is None else int(treenode[1]),
}
for connector in data[1]:
# NOT the database relation ID
# {pre: 0, post: 1, gj: 2}
relation_number = connector[2]
if relation_number not in relation_names:
continue
conn_id = int(connector[1])
if conn_id not in skeleton["connectors"]:
skeleton["connectors"][conn_id] = {
rn: [] for rn in relation_names.values()
}
skeleton["connectors"][conn_id]["location"] = connector[3:6]
skeleton["connectors"][conn_id][relation_names[relation_number]].append(
connector[0]
)
skeletons[int(skeleton_id)] = skeleton
warn(
"Skeleton representations contained some unknown treenode->connector relation IDs:\n\t"
"\n\t".join(sorted(warnings))
)
return {"skeletons": skeletons}
| 31.566667
| 126
| 0.551082
| 827
| 7,576
| 4.912938
| 0.290206
| 0.037903
| 0.007876
| 0.017721
| 0.179424
| 0.137091
| 0.103126
| 0.086389
| 0.086389
| 0.086389
| 0
| 0.013274
| 0.343717
| 7,576
| 239
| 127
| 31.698745
| 0.803902
| 0.347413
| 0
| 0.054348
| 0
| 0.021739
| 0.205586
| 0.006102
| 0
| 0
| 0
| 0.004184
| 0
| 1
| 0.086957
| false
| 0
| 0.076087
| 0
| 0.23913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ec3a322173dd7c7c650f060b94c615e6cceb769
| 19,118
|
py
|
Python
|
release/scripts/modules/bl_i18n_utils/utils_spell_check.py
|
dvgd/blender
|
4eb2807db1c1bd2514847d182fbb7a3f7773da96
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/modules/bl_i18n_utils/utils_spell_check.py
|
dvgd/blender
|
4eb2807db1c1bd2514847d182fbb7a3f7773da96
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
release/scripts/modules/bl_i18n_utils/utils_spell_check.py
|
dvgd/blender
|
4eb2807db1c1bd2514847d182fbb7a3f7773da96
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2020-12-02T20:05:42.000Z
|
2020-12-02T20:05:42.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import enchant
import os
import pickle
import re
class SpellChecker:
"""
A basic spell checker.
"""
# These must be all lower case for comparisons
uimsgs = {
# OK words
"adaptively", "adaptivity",
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
"chamfer",
"couldn", # couldn't
"decrement",
"derivate",
"deterministically",
"doesn", # doesn't
"duplications",
"effector",
"equi", # equi-angular, etc.
"fader",
"globbing",
"hasn", # hasn't
"hetero",
"hoc", # ad-hoc
"incompressible",
"indices",
"instantiation",
"iridas",
"isn", # isn't
"iterable",
"kyrgyz",
"latin",
"merchantability",
"mplayer",
"ons", # add-ons
"pong", # ping pong
"scalable",
"shadeless",
"shouldn", # shouldn't
"smoothen",
"spacings",
"teleport", "teleporting",
"vertices",
"wasn", # wasn't
# Merged words
"antialiasing", "antialias",
"arcsine", "arccosine", "arctangent",
"autoclip",
"autocomplete",
"autoexec",
"autoexecution",
"autogenerated",
"autolock",
"automasking",
"autoname",
"autopack",
"autosave",
"autoscale",
"autosmooth",
"autosplit",
"backface", "backfacing",
"backimage",
"backscattered",
"bandnoise",
"bindcode",
"bitdepth",
"bitflag", "bitflags",
"bitrate",
"blackbody",
"blendfile",
"blendin",
"bonesize",
"boundbox",
"boxpack",
"buffersize",
"builtin", "builtins",
"bytecode",
"chunksize",
"customdata",
"dataset", "datasets",
"de",
"deadzone",
"deconstruct",
"defocus",
"denoise", "denoised", "denoising", "denoiser",
"deselect", "deselecting", "deselection",
"despill", "despilling",
"dirtree",
"editcurve",
"editmesh",
"filebrowser",
"filelist",
"filename", "filenames",
"filepath", "filepaths",
"forcefield", "forcefields",
"fulldome", "fulldomes",
"fullscreen",
"gridline",
"hardlight",
"hemi",
"hostname",
"inbetween",
"inscatter", "inscattering",
"libdata",
"lightprobe", "lightprobes",
"lightless",
"lineset",
"linestyle", "linestyles",
"localview",
"lookup", "lookups",
"mathutils",
"micropolygon",
"midlevel",
"midground",
"mixdown",
"multi",
"multifractal",
"multiframe",
"multilayer",
"multipaint",
"multires", "multiresolution",
"multisampling",
"multiscatter",
"multitexture",
"multithreaded",
"multiuser",
"multiview",
"namespace",
"nodetree", "nodetrees",
"keyconfig",
"offscreen",
"online",
"playhead",
"popup", "popups",
"pre",
"precache", "precaching",
"precalculate",
"precomputing",
"prefetch",
"premultiply", "premultiplied",
"prepass",
"prepend",
"preprocess", "preprocessing",
"preseek",
"promillage",
"pushdown",
"raytree",
"readonly",
"realtime",
"reinject", "reinjected",
"rekey",
"remesh",
"reprojection", "reproject", "reprojecting",
"resize",
"restpose",
"retarget", "retargets", "retargeting", "retargeted",
"retiming",
"rigidbody",
"ringnoise",
"rolloff",
"runtime",
"scanline",
"screenshot", "screenshots",
"seekability",
"selfcollision",
"shadowbuffer", "shadowbuffers",
"singletexture",
"spellcheck", "spellchecking",
"startup",
"stateful",
"starfield",
"studiolight",
"subflare", "subflares",
"subframe", "subframes",
"subclass", "subclasses", "subclassing",
"subdirectory", "subdirectories", "subdir", "subdirs",
"subitem",
"submode",
"submodule", "submodules",
"subpath",
"subsize",
"substep", "substeps",
"targetless",
"textbox", "textboxes",
"tilemode",
"timestamp", "timestamps",
"timestep", "timesteps",
"todo",
"tradeoff",
"un",
"unassociate", "unassociated",
"unbake",
"unclosed",
"uncomment",
"unculled",
"undeformed",
"undistort", "undistorted", "undistortion",
"ungroup", "ungrouped",
"unhide",
"unindent",
"unkeyed",
"unlink", "unlinked",
"unmute",
"unphysical",
"unpremultiply",
"unprojected",
"unprotect",
"unreacted",
"unreferenced",
"unregister",
"unselect", "unselected", "unselectable",
"unsets",
"unshadowed",
"unspill",
"unstitchable", "unstitch",
"unsubdivided", "unsubdivide",
"untrusted",
"vectorscope",
"whitespace", "whitespaces",
"worldspace",
"workflow",
"workspace", "workspaces",
# Neologisms, slangs
"affectable",
"animatable",
"automagic", "automagically",
"blobby",
"blockiness", "blocky",
"collider", "colliders",
"deformer", "deformers",
"determinator",
"editability",
"effectors",
"expander",
"instancer",
"keyer",
"lacunarity",
"linkable",
"numerics",
"occluder", "occluders",
"overridable",
"passepartout",
"perspectively",
"pixelate",
"pointiness",
"polycount",
"polygonization", "polygonalization", # yuck!
"scalings",
"selectable", "selectability",
"shaper",
"smoothen", "smoothening",
"spherize", "spherized",
"stitchable",
"symmetrize",
"trackability",
"transmissivity",
"rasterized", "rasterization", "rasterizer",
"renderer", "renderers", "renderable", "renderability",
# Really bad!!!
"convertor",
"fullscr",
# Abbreviations
"aero",
"amb",
"anim",
"aov",
"app",
"bbox", "bboxes",
"bksp", # Backspace
"bool",
"calc",
"cfl",
"config", "configs",
"const",
"coord", "coords",
"degr",
"diff",
"dof",
"dupli", "duplis",
"eg",
"esc",
"expr",
"fac",
"fra",
"fract",
"frs",
"grless",
"http",
"init",
"irr", # Irradiance
"kbit", "kb",
"lang", "langs",
"lclick", "rclick",
"lensdist",
"loc", "rot", "pos",
"lorem",
"luma",
"mbs", # mouse button 'select'.
"mem",
"multicam",
"num",
"ok",
"orco",
"ortho",
"pano",
"persp",
"pref", "prefs",
"prev",
"param",
"premul",
"quad", "quads",
"quat", "quats",
"recalc", "recalcs",
"refl",
"sce",
"sel",
"spec",
"struct", "structs",
"subdiv",
"sys",
"tex",
"texcoord",
"tmr", # timer
"tri", "tris",
"udim", "udims",
"upres", # Upresolution
"usd",
"uv", "uvs", "uvw", "uw", "uvmap",
"ve",
"vec",
"vel", # velocity!
"vert", "verts",
"vis",
"vram",
"xor",
"xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
"xy", "xz", "yx", "yz", "zx", "zy",
# General computer/science terms
"affine",
"albedo",
"anamorphic",
"anisotropic", "anisotropy",
"bitangent",
"boid", "boids",
"ceil",
"compressibility",
"curvilinear",
"equiangular",
"equisolid",
"euler", "eulers",
"fribidi",
"gettext",
"hashable",
"hotspot",
"interocular",
"intrinsics",
"irradiance",
"isosurface",
"jitter", "jittering", "jittered",
"keymap", "keymaps",
"lambertian",
"laplacian",
"metadata",
"msgfmt",
"nand", "xnor",
"normals",
"numpad",
"octahedral",
"octree",
"omnidirectional",
"opengl",
"openmp",
"parametrization",
"photoreceptor",
"poly",
"polyline", "polylines",
"probabilistically",
"pulldown", "pulldowns",
"quantized",
"quartic",
"quaternion", "quaternions",
"quintic",
"samplerate",
"sawtooth",
"scrollback",
"scrollbar",
"scroller",
"searchable",
"spacebar",
"subtractive",
"superellipse",
"tooltip", "tooltips",
"trackpad",
"tuple",
"unicode",
"viewport", "viewports",
"viscoelastic",
"vorticity",
"waveform", "waveforms",
"wildcard", "wildcards",
"wintab", # Some Windows tablet API
# General computer graphics terms
"anaglyph",
"bezier", "beziers",
"bicubic",
"bilinear",
"bindpose",
"binormal",
"blackpoint", "whitepoint",
"blinn",
"bokeh",
"catadioptric",
"centroid",
"chroma",
"chrominance",
"clearcoat",
"codec", "codecs",
"collada",
"compositing",
"crossfade",
"cubemap", "cubemaps",
"cuda",
"deinterlace",
"dropoff",
"duotone",
"dv",
"eigenvectors",
"emissive",
"equirectangular",
"fisheye",
"framerate",
"gimbal",
"grayscale",
"icosphere",
"inpaint",
"kerning",
"lightmap",
"linearlight",
"lossless", "lossy",
"luminance",
"mantaflow",
"matcap",
"midtones",
"mipmap", "mipmaps", "mip",
"ngon", "ngons",
"ntsc",
"nurb", "nurbs",
"perlin",
"phong",
"pinlight",
"qi",
"radiosity",
"raycasting",
"raytrace", "raytracing", "raytraced",
"refractions",
"remesher", "remeshing", "remesh",
"renderfarm",
"scanfill",
"shader", "shaders",
"shadowmap", "shadowmaps",
"softlight",
"specular", "specularity",
"spillmap",
"sobel",
"stereoscopy",
"texel",
"timecode",
"tonemap",
"toon",
"transmissive",
"vividlight",
"volumetrics",
"voronoi",
"voxel", "voxels",
"vsync",
"wireframe",
"zmask",
"ztransp",
# Blender terms
"audaspace",
"azone", # action zone
"backwire",
"bbone",
"bendy", # bones
"bmesh",
"breakdowner",
"bspline",
"bweight",
"colorband",
"datablock", "datablocks",
"despeckle",
"depsgraph",
"dopesheet",
"dupliface", "duplifaces",
"dupliframe", "dupliframes",
"dupliobject", "dupliob",
"dupligroup",
"duplivert",
"dyntopo",
"editbone",
"editmode",
"eevee",
"fcurve", "fcurves",
"fedge", "fedges",
"filmic",
"fluidsim",
"freestyle",
"enum", "enums",
"gizmogroup",
"gons", # N-Gons
"gpencil",
"idcol",
"keyframe", "keyframes", "keyframing", "keyframed",
"lookdev",
"luminocity",
"mathvis",
"metaball", "metaballs", "mball",
"metaelement", "metaelements",
"metastrip", "metastrips",
"movieclip",
"mpoly",
"mtex",
"nabla",
"navmesh",
"outliner",
"overscan",
"paintmap", "paintmaps",
"polygroup", "polygroups",
"poselib",
"pushpull",
"pyconstraint", "pyconstraints",
"qe", # keys...
"shaderfx", "shaderfxs",
"shapekey", "shapekeys",
"shrinkfatten",
"shrinkwrap",
"softbody",
"stucci",
"subdiv",
"subtype",
"sunsky",
"tessface", "tessfaces",
"texface",
"timeline", "timelines",
"tosphere",
"uilist",
"userpref",
"vcol", "vcols",
"vgroup", "vgroups",
"vinterlace",
"vse",
"wasd", "wasdqe", # keys...
"wetmap", "wetmaps",
"wpaint",
"uvwarp",
# UOC (Ugly Operator Categories)
"cachefile",
"paintcurve",
"ptcache",
"dpaint",
# Algorithm/library names
"ashikhmin", # Ashikhmin-Shirley
"arsloe", # Texel-Marsen-Arsloe
"beckmann",
"blackman", # Blackman-Harris
"blosc",
"burley", # Christensen-Burley
"catmull",
"catrom",
"chebychev",
"courant",
"cryptomatte", "crypto",
"embree",
"hosek",
"kutta",
"lennard",
"marsen", # Texel-Marsen-Arsloe
"mikktspace",
"minkowski",
"minnaert",
"moskowitz", # Pierson-Moskowitz
"musgrave",
"nayar",
"netravali",
"nishita",
"ogawa",
"oren",
"peucker", # Ramer-Douglas-Peucker
"pierson", # Pierson-Moskowitz
"preetham",
"prewitt",
"ramer", # Ramer-Douglas-Peucker
"runge",
"sobol",
"verlet",
"wilkie",
"worley",
# Acronyms
"aa", "msaa",
"ao",
"api",
"asc", "cdl",
"ascii",
"atrac",
"avx",
"bsdf",
"bssrdf",
"bw",
"ccd",
"cmd",
"cmos",
"cpus",
"ctrl",
"cw", "ccw",
"dev",
"djv",
"dpi",
"dvar",
"dx",
"eo",
"fh",
"fk",
"fov",
"fft",
"futura",
"fx",
"gfx",
"ggx",
"gl",
"glsl",
"gpl",
"gpu", "gpus",
"hc",
"hdc",
"hdr", "hdri", "hdris",
"hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
"hsv", "hsva", "hsl",
"id",
"ies",
"ior",
"itu",
"jonswap",
"lhs",
"lmb", "mmb", "rmb",
"kb",
"mocap",
"msgid", "msgids",
"mux",
"ndof",
"ppc",
"precisa",
"px",
"qmc",
"rdp",
"rgb", "rgba",
"rhs",
"rv",
"sdl",
"sl",
"smpte",
"ssao",
"ssr",
"svn",
"tma",
"ui",
"unix",
"vbo", "vbos",
"vr",
"wxyz",
"xr",
"ycc", "ycca",
"yrgb",
"yuv", "yuva",
# Blender acronyms
"bli",
"bpy",
"bvh",
"dbvt",
"dop", # BLI K-Dop BVH
"ik",
"nla",
"py",
"qbvh",
"rna",
"rvo",
"simd",
"sph",
"svbvh",
# Files types/formats
"avi",
"attrac",
"autocad",
"autodesk",
"bmp",
"btx",
"cineon",
"dpx",
"dwaa",
"dwab",
"dxf",
"eps",
"exr",
"fbx",
"fbxnode",
"ffmpeg",
"flac",
"gltf",
"gzip",
"ico",
"jpg", "jpeg", "jpegs",
"json",
"matroska",
"mdd",
"mkv",
"mpeg", "mjpeg",
"mtl",
"ogg",
"openjpeg",
"osl",
"oso",
"piz",
"png", "pngs",
"po",
"quicktime",
"rle",
"sgi",
"stl",
"svg",
"targa", "tga",
"tiff",
"theora",
"vorbis",
"vp9",
"wav",
"webm",
"xiph",
"xml",
"xna",
"xvid",
}
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
_split_words = re.compile(_valid_words).findall
@classmethod
def split_words(cls, text):
return [w for w in cls._split_words(text) if w]
def __init__(self, settings, lang="en_US"):
self.settings = settings
self.dict_spelling = enchant.Dict(lang)
self.cache = set(self.uimsgs)
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'rb') as f:
self.cache |= set(pickle.load(f))
def __del__(self):
cache = self.settings.SPELL_CACHE
if cache and os.path.exists(cache):
with open(cache, 'wb') as f:
pickle.dump(self.cache, f)
def check(self, txt):
ret = []
if txt in self.cache:
return ret
for w in self.split_words(txt):
w_lower = w.lower()
if w_lower in self.cache:
continue
if not self.dict_spelling.check(w):
ret.append((w, self.dict_spelling.suggest(w)))
else:
self.cache.add(w_lower)
if not ret:
self.cache.add(txt)
return ret
| 23.145278
| 103
| 0.437703
| 1,348
| 19,118
| 6.181751
| 0.807122
| 0.00864
| 0.00468
| 0.00684
| 0.029281
| 0.023761
| 0.016081
| 0.016081
| 0.016081
| 0.016081
| 0
| 0.001216
| 0.397583
| 19,118
| 825
| 104
| 23.173333
| 0.722324
| 0.080605
| 0
| 0.010485
| 0
| 0.001311
| 0.367117
| 0.002632
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005242
| false
| 0.002621
| 0.005242
| 0.001311
| 0.02228
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ec3b7be918911b5b776d40be78266905df319e1
| 7,175
|
py
|
Python
|
naslib/predictors/mlp.py
|
gmeyerlee/NASLib
|
21dbceda04cc1faf3d8b6dd391412a459218ef2b
|
[
"Apache-2.0"
] | null | null | null |
naslib/predictors/mlp.py
|
gmeyerlee/NASLib
|
21dbceda04cc1faf3d8b6dd391412a459218ef2b
|
[
"Apache-2.0"
] | null | null | null |
naslib/predictors/mlp.py
|
gmeyerlee/NASLib
|
21dbceda04cc1faf3d8b6dd391412a459218ef2b
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from naslib.utils.utils import AverageMeterGroup
from naslib.predictors.utils.encodings import encode
from naslib.predictors import Predictor
# NOTE: faster on CPU
device = torch.device("cpu")
print("device:", device)
def accuracy_mse(prediction, target, scale=100.0):
prediction = prediction.detach() * scale
target = (target) * scale
return F.mse_loss(prediction, target)
class FeedforwardNet(nn.Module):
def __init__(
self,
input_dims: int = 5,
num_layers: int = 3,
layer_width: list = [10, 10, 10],
output_dims: int = 1,
activation="relu",
):
super(FeedforwardNet, self).__init__()
assert (
len(layer_width) == num_layers
), "number of widths should be \
equal to the number of layers"
self.activation = eval("F." + activation)
all_units = [input_dims] + layer_width
self.layers = nn.ModuleList(
[nn.Linear(all_units[i], all_units[i + 1]) for i in range(num_layers)]
)
self.out = nn.Linear(all_units[-1], 1)
# make the init similar to the tf.keras version
for l in self.layers:
torch.nn.init.xavier_uniform_(l.weight)
torch.nn.init.zeros_(l.bias)
torch.nn.init.xavier_uniform_(self.out.weight)
torch.nn.init.zeros_(self.out.bias)
def forward(self, x):
for layer in self.layers:
x = self.activation(layer(x))
return self.out(x)
def basis_funcs(self, x):
for layer in self.layers:
x = self.activation(layer(x))
return x
class MLPPredictor(Predictor):
def __init__(
self,
encoding_type="adjacency_one_hot",
ss_type="nasbench201",
hpo_wrapper=False,
hparams_from_file=False
):
self.encoding_type = encoding_type
self.ss_type = ss_type
self.hpo_wrapper = hpo_wrapper
self.default_hyperparams = {
"num_layers": 20,
"layer_width": 20,
"batch_size": 32,
"lr": 0.001,
"regularization": 0.2,
}
self.hyperparams = None
self.hparams_from_file = hparams_from_file
def get_model(self, **kwargs):
predictor = FeedforwardNet(**kwargs)
return predictor
def fit(self, xtrain, ytrain, train_info=None, epochs=500, loss="mae", verbose=0):
if self.hparams_from_file and self.hparams_from_file not in ['False', 'None'] \
and os.path.exists(self.hparams_from_file):
self.hyperparams = json.load(open(self.hparams_from_file, 'rb'))['mlp']
print('loaded hyperparams from', self.hparams_from_file)
elif self.hyperparams is None:
self.hyperparams = self.default_hyperparams.copy()
num_layers = self.hyperparams["num_layers"]
layer_width = self.hyperparams["layer_width"]
batch_size = self.hyperparams["batch_size"]
lr = self.hyperparams["lr"]
regularization = self.hyperparams["regularization"]
self.mean = np.mean(ytrain)
self.std = np.std(ytrain)
if self.encoding_type is not None:
_xtrain = np.array(
[
encode(arch, encoding_type=self.encoding_type, ss_type=self.ss_type)
for arch in xtrain
]
)
else:
_xtrain = xtrain
_ytrain = np.array(ytrain)
X_tensor = torch.FloatTensor(_xtrain).to(device)
y_tensor = torch.FloatTensor(_ytrain).to(device)
train_data = TensorDataset(X_tensor, y_tensor)
data_loader = DataLoader(
train_data,
batch_size=batch_size,
shuffle=True,
drop_last=False,
pin_memory=False,
)
self.model = self.get_model(
input_dims=_xtrain.shape[1],
num_layers=num_layers,
layer_width=num_layers * [layer_width],
)
self.model.to(device)
optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(0.9, 0.99))
if loss == "mse":
criterion = nn.MSELoss().to(device)
elif loss == "mae":
criterion = nn.L1Loss().to(device)
self.model.train()
for e in range(epochs):
meters = AverageMeterGroup()
for b, batch in enumerate(data_loader):
optimizer.zero_grad()
input = batch[0].to(device)
target = batch[1].to(device)
prediction = self.model(input).view(-1)
loss_fn = criterion(prediction, target)
# add L1 regularization
params = torch.cat(
[
x[1].view(-1)
for x in self.model.named_parameters()
if x[0] == "out.weight"
]
)
loss_fn += regularization * torch.norm(params, 1)
loss_fn.backward()
optimizer.step()
mse = accuracy_mse(prediction, target)
meters.update(
{"loss": loss_fn.item(), "mse": mse.item()}, n=target.size(0)
)
if verbose and e % 100 == 0:
print("Epoch {}, {}, {}".format(e, meters["loss"], meters["mse"]))
train_pred = np.squeeze(self.query(xtrain))
train_error = np.mean(abs(train_pred - ytrain))
return train_error
def query(self, xtest, info=None, eval_batch_size=None):
if self.encoding_type is not None:
xtest = np.array(
[
encode(arch, encoding_type=self.encoding_type, ss_type=self.ss_type)
for arch in xtest
]
)
X_tensor = torch.FloatTensor(xtest).to(device)
test_data = TensorDataset(X_tensor)
eval_batch_size = len(xtest) if eval_batch_size is None else eval_batch_size
test_data_loader = DataLoader(
test_data, batch_size=eval_batch_size, pin_memory=False
)
self.model.eval()
pred = []
with torch.no_grad():
for _, batch in enumerate(test_data_loader):
prediction = self.model(batch[0].to(device)).view(-1)
pred.append(prediction.cpu().numpy())
pred = np.concatenate(pred)
return np.squeeze(pred)
def set_random_hyperparams(self):
if self.hyperparams is None:
params = self.default_hyperparams.copy()
else:
params = {
"num_layers": int(np.random.choice(range(5, 25))),
"layer_width": int(np.random.choice(range(5, 25))),
"batch_size": 32,
"lr": np.random.choice([0.1, 0.01, 0.005, 0.001, 0.0001]),
"regularization": 0.2,
}
self.hyperparams = params
return params
| 32.466063
| 88
| 0.564739
| 845
| 7,175
| 4.620118
| 0.246154
| 0.027664
| 0.030738
| 0.029201
| 0.152152
| 0.089139
| 0.089139
| 0.0625
| 0.0625
| 0.0625
| 0
| 0.01762
| 0.327666
| 7,175
| 220
| 89
| 32.613636
| 0.791667
| 0.012125
| 0
| 0.110497
| 0
| 0
| 0.038255
| 0
| 0
| 0
| 0
| 0
| 0.005525
| 1
| 0.049724
| false
| 0
| 0.060773
| 0
| 0.160221
| 0.016575
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ec3f2a1fe20def9bc91ffbd4b3742d74abb33b3
| 1,301
|
py
|
Python
|
pythonforandroid/recipes/libx264/__init__.py
|
Joreshic/python-for-android
|
c60e02d2e32e31a3a754838c51e9242cbadcd9e8
|
[
"MIT"
] | 1
|
2019-09-03T13:44:06.000Z
|
2019-09-03T13:44:06.000Z
|
pythonforandroid/recipes/libx264/__init__.py
|
Joreshic/python-for-android
|
c60e02d2e32e31a3a754838c51e9242cbadcd9e8
|
[
"MIT"
] | null | null | null |
pythonforandroid/recipes/libx264/__init__.py
|
Joreshic/python-for-android
|
c60e02d2e32e31a3a754838c51e9242cbadcd9e8
|
[
"MIT"
] | 1
|
2018-11-15T07:58:30.000Z
|
2018-11-15T07:58:30.000Z
|
from pythonforandroid.toolchain import Recipe, shprint, current_directory, ArchARM
from os.path import exists, join, realpath
from os import uname
import glob
import sh
class LibX264Recipe(Recipe):
version = 'x264-snapshot-20170608-2245-stable' # using mirror url since can't use ftp
url = 'http://mirror.yandex.ru/mirrors/ftp.videolan.org/x264/snapshots/{version}.tar.bz2'
md5sum = 'adf3b87f759b5cc9f100f8cf99276f77'
def should_build(self, arch):
build_dir = self.get_build_dir(arch.arch)
return not exists(join(build_dir, 'lib', 'libx264.a'))
def build_arch(self, arch):
with current_directory(self.get_build_dir(arch.arch)):
env = self.get_recipe_env(arch)
configure = sh.Command('./configure')
shprint(configure,
'--cross-prefix=arm-linux-androideabi-',
'--host=arm-linux',
'--disable-asm',
'--disable-cli',
'--enable-pic',
'--disable-shared',
'--enable-static',
'--prefix={}'.format(realpath('.')),
_env=env)
shprint(sh.make, '-j4', _env=env)
shprint(sh.make, 'install', _env=env)
recipe = LibX264Recipe()
| 37.171429
| 93
| 0.583397
| 142
| 1,301
| 5.239437
| 0.542254
| 0.043011
| 0.032258
| 0.040323
| 0.112903
| 0.061828
| 0
| 0
| 0
| 0
| 0
| 0.052688
| 0.285165
| 1,301
| 34
| 94
| 38.264706
| 0.747312
| 0.027671
| 0
| 0
| 0
| 0.034483
| 0.248614
| 0.081552
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.172414
| 0
| 0.413793
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ec517fad6215e10cf8fdc40288d6f1a4376050d
| 17,499
|
py
|
Python
|
apps/notifications/tests/test_views.py
|
SCiO-systems/qcat
|
8c2b8e07650bc2049420fa6de758fba7e50c2f28
|
[
"Apache-2.0"
] | null | null | null |
apps/notifications/tests/test_views.py
|
SCiO-systems/qcat
|
8c2b8e07650bc2049420fa6de758fba7e50c2f28
|
[
"Apache-2.0"
] | null | null | null |
apps/notifications/tests/test_views.py
|
SCiO-systems/qcat
|
8c2b8e07650bc2049420fa6de758fba7e50c2f28
|
[
"Apache-2.0"
] | null | null | null |
import logging
from unittest import mock
from unittest.mock import call
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.signing import Signer
from django.urls import reverse
from django.http import Http404
from django.test import RequestFactory
from braces.views import LoginRequiredMixin
from django.test import override_settings
from model_mommy import mommy
from apps.notifications.models import Log, StatusUpdate, MemberUpdate, ReadLog, \
ActionContextQuerySet
from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, \
LogQuestionnairesListView, LogInformationUpdateCreateView, \
LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView
from apps.qcat.tests import TestCase
class LogListViewTest(TestCase):
def setUp(self):
self.view = LogListView()
self.url_path = reverse('notification_partial_list')
self.request = RequestFactory().get(self.url_path)
self.user = {}
self.request.user = self.user
self.view_instance = self.setup_view(
view=self.view, request=self.request
)
member_add_log = mommy.make(
_model=Log,
id=8,
action=settings.NOTIFICATIONS_ADD_MEMBER
)
self.change_log = mommy.make(
_model=Log,
id=42,
action=settings.NOTIFICATIONS_CHANGE_STATUS
)
mommy.make(_model=StatusUpdate, log=self.change_log)
mommy.make(_model=MemberUpdate, log=member_add_log)
def get_view_with_get_querystring(self, param):
request = RequestFactory().get(
'{url}?{param}'.format(url=self.url_path, param=param)
)
request.user = self.user
return self.setup_view(view=self.view, request=request)
def test_force_login(self):
self.assertIsInstance(self.view_instance, LoginRequiredMixin)
def test_queryset_method(self):
self.assertEqual(
self.view_instance.queryset_method,
'user_log_list'
)
def test_queryset_method_pending(self):
self.assertEqual(
self.get_view_with_get_querystring('is_pending').queryset_method,
'user_pending_list'
)
def test_get_paginate_by(self):
self.assertEqual(
self.view_instance.get_paginate_by(None),
settings.NOTIFICATIONS_LIST_PAGINATE_BY
)
def test_get_paginate_by_teaser(self):
self.assertEqual(
self.get_view_with_get_querystring('is_teaser').get_paginate_by(None),
settings.NOTIFICATIONS_TEASER_PAGINATE_BY
)
@mock.patch('apps.notifications.views.Log.actions.user_log_list')
def test_get_queryset(self, mock_actions):
self.view_instance.get_queryset()
mock_actions.assert_called_once_with(user={})
@mock.patch('apps.notifications.views.Log.actions.user_pending_list')
def test_get_queryset_pending(self, mock_actions):
self.get_view_with_get_querystring('is_pending').get_queryset()
mock_actions.assert_called_once_with(user={})
@mock.patch.object(LogListView, 'add_user_aware_data')
def test_get_context_data_logs(self, mock_add_user_aware_data):
self.view_instance.object_list = 'foo'
self.view_instance.get_context_data()
mock_add_user_aware_data.assert_called_once_with('foo')
def _test_add_user_aware_data(self):
# for faster tests, mock all the elements. elements are created here
# as this makes the tests more readable.
pth = 'apps.notifications.views.Log.actions'
with mock.patch('{}.read_id_list'.format(pth)) as read_id_list:
read_id_list.return_value = [42]
with mock.patch('{}.user_pending_list'.format(pth)) as pending:
pending.values_list.return_value = [8, 42]
logs = Log.objects.all()
return list(self.view_instance.add_user_aware_data(logs))
def test_add_user_aware_data_keys(self):
data_keys = self._test_add_user_aware_data()[0].keys()
for key in ['id', 'created', 'text', 'is_read', 'is_todo', 'edit_url']:
self.assertTrue(key in data_keys)
def test_add_user_aware_data_is_read(self):
data = self._test_add_user_aware_data()
# logs are ordered by creation date - 42 is the newer one
self.assertTrue(data[0]['is_read'])
def test_add_user_aware_data_is_not_read(self):
data = self._test_add_user_aware_data()
self.assertFalse(data[1]['is_read'])
#def test_add_user_aware_data_is_todo(self):
# data = self._test_add_user_aware_data()
# self.assertTrue(data[1]['is_todo'])
def test_add_user_aware_data_is_not_todo(self):
data = self._test_add_user_aware_data()
self.assertFalse(data[0]['is_todo'])
@override_settings(NOTIFICATIONS_ACTIONS={'foo': 'bar', 'result': '42'})
def test_statuses_in_context(self):
self.view_instance.object_list = []
context = self.view_instance.get_context_data()
self.assertDictEqual(
context['statuses'],
{'foo': 'bar', 'result': '42'}
)
@mock.patch('apps.notifications.views.Log.actions.user_log_list')
def test_status_filter_queryset(self, mock_user_log_list):
mock_user_log_list.return_value = []
self.assertEqual(
[], self.view_instance.get_queryset()
)
@mock.patch('apps.notifications.views.Log.actions.user_log_list')
def test_status_filter_queryset_for_status(self, mock_user_log_list):
mock_user_log_list.return_value = Log.objects.filter()
view = self.view
view.get_statuses = mock.MagicMock(return_value=[3])
view_instance = self.setup_view(
view=view, request=self.request
)
self.assertQuerysetEqual(
view_instance.get_queryset(),
[self.change_log.id],
transform=lambda item: item.id
)
def test_get_status_invalid(self):
request = RequestFactory().get('{}?statuses=foo'.format(self.url_path))
view = self.setup_view(self.view, request)
self.assertEqual(view.get_statuses(), [])
@override_settings(NOTIFICATIONS_ACTIONS={'2': 'bar'})
def test_get_status_invalid_config(self):
request = RequestFactory().get('{}?statuses=1'.format(self.url_path))
view = self.setup_view(self.view, request)
self.assertEqual(view.get_statuses(), [])
def test_get_status_valid(self):
request = RequestFactory().get('{}?statuses=1,2,3'.format(self.url_path))
view = self.setup_view(self.view, request)
self.assertEqual(view.get_statuses(), [1, 2, 3])
class ReadLogUpdateViewTest(TestCase):
def setUp(self):
self.view = ReadLogUpdateView()
self.request = RequestFactory().post(
reverse('notification_read'),
data={'user': 123, 'log': 'log', 'checked': 'true'}
)
self.user = mock.MagicMock(id=123)
self.request.user = self.user
self.view_instance = self.setup_view(view=self.view, request=self.request)
def test_validate_data_all_keys(self):
self.assertFalse(
self.view_instance.validate_data()
)
def test_validate_data_id_type(self):
self.assertFalse(
self.view_instance.validate_data(checked='1', log='1', user='foo')
)
def test_validate_data_invalid_user(self):
self.assertFalse(
self.view_instance.validate_data(checked='456', log='1', user='456')
)
def test_validate_data_valid(self):
self.assertTrue(
self.view_instance.validate_data(checked='1', log='1', user='123')
)
@mock.patch('apps.notifications.views.ReadLog.objects.update_or_create')
def test_post_valid_checked(self, mock_get_or_create):
self.view_instance.post(request=self.request)
mock_get_or_create.assert_called_once_with(
user_id='123', log_id='log', defaults={'is_read': True}
)
@mock.patch('apps.notifications.views.ReadLog.objects.update_or_create')
def test_post_valid_unchecked(self, mock_get_or_create):
request = RequestFactory().post(
reverse('notification_read'),
data={'user': 123, 'log': 'log', 'checked': 'false'}
)
self.view_instance.post(request=request)
mock_get_or_create.assert_called_once_with(
user_id='123', log_id='log', defaults={'is_read': False}
)
@mock.patch.object(ReadLogUpdateView, 'validate_data')
def test_post_invalid(self, mock_validate_data):
logging.disable(logging.CRITICAL)
mock_validate_data.return_value = False
with self.assertRaises(Http404):
self.view_instance.post(request=self.request)
class LogCountViewTest(TestCase):
def setUp(self):
super().setUp()
self.request = RequestFactory().get(reverse('notification_new_count'))
self.request.user = mommy.make(_model=get_user_model())
self.view = self.setup_view(view=LogCountView(), request=self.request)
mommy.make(
_model=Log,
catalyst=self.request.user,
action=settings.NOTIFICATIONS_CHANGE_STATUS,
_quantity=4
)
mommy.make(
_model=Log,
catalyst=self.request.user,
action=settings.NOTIFICATIONS_EDIT_CONTENT,
_quantity=2
)
@mock.patch('apps.notifications.views.Log.actions.only_unread_logs')
def test_get_unread_only(self, mock_only_unread_logs):
self.view.get(request=self.request)
mock_only_unread_logs.assert_called_once_with(
user=self.request.user
)
def test_log_count(self):
response = self.view.get(request=self.request)
self.assertEqual(response.content, b'4')
def test_log_count_one_read(self):
mommy.make(
_model=ReadLog,
log=Log.objects.filter(action=settings.NOTIFICATIONS_CHANGE_STATUS).first(),
user=self.request.user,
is_read=True
)
response = self.view.get(request=self.request)
self.assertEqual(response.content, b'3')
class LogQuestionnairesListViewTest(TestCase):
def setUp(self):
super().setUp()
self.request = RequestFactory().get(reverse('notification_questionnaire_logs'))
self.request.user = 'foo'
self.view = self.setup_view(view=LogQuestionnairesListView(), request=self.request)
@mock.patch.object(ActionContextQuerySet, 'user_log_list')
def test_get_questionnaire_logs(self, mock_user_log_list):
self.view.get_questionnaire_logs('foo')
mock_user_log_list.assert_called_once_with(user='foo')
@mock.patch.object(LogQuestionnairesListView, 'get_questionnaire_logs')
def test_get(self, mock_get_questionnaire_logs):
mock_get_questionnaire_logs.return_value = ['foo_1', 'foo_2', 'bar_3']
response = self.view.get(self.request)
self.assertEqual(
response.content, b'{"questionnaires": ["bar_3", "foo_1", "foo_2"]}'
)
class LogInformationUpdateCreateViewTest(TestCase):
def setUp(self):
super().setUp()
self.url = reverse('notification_inform_compiler')
self.view = LogInformationUpdateCreateView()
self.request = RequestFactory().get(self.url)
self.request.user = 'foo'
self.view = self.setup_view(view=self.view, request=self.request)
def test_get_compiler_query(self):
questionnaire = mock.MagicMock()
self.view.get_compiler(questionnaire)
self.assertEqual(
questionnaire.method_calls[0],
call.questionnairemembership_set.get(role='compiler')
)
def test_get_compiler(self):
sentinel = mock.sentinel
questionnaire = mock.MagicMock()
questionnaire.questionnairemembership_set.get.return_value = sentinel
self.assertEqual(
self.view.get_compiler(questionnaire),
sentinel.user
)
@mock.patch('apps.notifications.views.query_questionnaire')
def test_get_questionnaire(self, mock_query_questionnaire):
one_questionnaire = mock.MagicMock()
one_questionnaire.first = lambda : 'foo'
mock_query_questionnaire.return_value = one_questionnaire
self.assertEqual(
self.view.get_questionnaire('foo'), 'foo'
)
@mock.patch('apps.notifications.views.query_questionnaire')
def test_get_questionnaire_raises(self, mock_query_questionnaire):
not_exists = mock.MagicMock()
not_exists.exists = lambda : False
mock_query_questionnaire.return_value = not_exists
with self.assertRaises(Http404):
self.view.get_questionnaire('foo')
@mock.patch('apps.notifications.views.query_questionnaire')
def test_get_questionnaire_calls_filter(self, mock_query_questionnaire):
self.view.get_questionnaire('foo')
mock_query_questionnaire.assert_called_once_with(
identifier='foo', request=self.request
)
@override_settings(NOTIFICATIONS_FINISH_EDITING='setting')
@mock.patch.object(LogInformationUpdateCreateView, 'get_questionnaire')
@mock.patch.object(LogInformationUpdateCreateView, 'get_compiler')
def test_post(self, mock_get_compiler, mock_get_questionnaire):
compiler = mock.MagicMock()
mock_get_questionnaire.return_value = mock.sentinel.questionnaire
mock_get_compiler.return_value = compiler
request = RequestFactory().post(self.url, data={
'identifier': 'foo',
'message': 'bar'
})
with mock.patch('apps.notifications.views.InformationLog') as mock_create:
self.setup_view(view=self.view, request=self.request).post(request)
mock_create.assert_called_once_with(
action='setting',
questionnaire=mock.sentinel.questionnaire,
receiver=compiler,
sender='foo'
)
class LogSubscriptionPreferencesMixinTest(TestCase):
def setUp(self):
self.url = reverse('notification_preferences')
self.view = LogSubscriptionPreferencesView()
self.request = RequestFactory().get(self.url)
self.user = mommy.make(_model=get_user_model())
self.obj = self.user.mailpreferences
self.request.user = self.user
self.request._messages = mock.MagicMock()
self.view = self.setup_view(view=self.view, request=self.request)
self.view.object = self.obj
def test_get_initial(self):
self.obj.wanted_actions = 'some,thing,yay'
self.assertEqual(
['some', 'thing', 'yay'],
self.view.get_initial()['wanted_actions']
)
def test_get_form_valid_changed_language(self):
self.view.object = mock.MagicMock()
self.view.object.has_changed_language = False
form = mock.MagicMock()
form.changed_data = ['language']
self.view.form_valid(form)
self.assertTrue(self.view.object.has_changed_language)
def test_get_form_valid_message(self):
self.view.form_valid(mock.MagicMock())
self.assertTrue(self.request._messages.method_calls)
class SignedLogSubscriptionPreferencesViewTest(TestCase):
def setUp(self):
self.user = mommy.make(_model=get_user_model())
self.obj = self.user.mailpreferences
self.view = SignedLogSubscriptionPreferencesView()
self.request = RequestFactory().get(str(self.obj.get_signed_url()))
self.request._messages = mock.MagicMock()
self.view = self.setup_view(view=self.view, request=self.request)
self.view.object = self.obj
def test_get_success_url_signed(self):
mock_user = mock.MagicMock(return_value=self.user)
mock_user.is_authenticated = False
mock_user.id = self.user.id
self.request.user = mock_user
self.assertEqual(
self.view.get_success_url(),
self.obj.get_signed_url()
)
def test_get_success_url_user(self):
self.request.user = self.user
self.assertEqual(
self.view.get_success_url(),
reverse('notification_preferences')
)
def test_get_object_user(self):
self.request.user = self.user
self.assertEqual(
self.view.get_object(),
self.obj
)
def test_get_signed_object(self):
mock_user = mock.MagicMock(return_value=self.user)
mock_user.is_authenticated = False
mock_user.id=self.user.id
self.request.user = mock_user
self.view.kwargs['token'] = mock.MagicMock()
with mock.patch.object(Signer, 'unsign') as mock_unsign:
mock_unsign.return_value = self.obj.id
self.assertEqual(
self.view.get_object(), self.obj
)
mock_unsign.assert_called_with(self.view.kwargs['token'])
def test_get_signed_object_404(self):
mock_user = mock.MagicMock(return_value=self.user)
mock_user.is_authenticated = False
mock_user.id = self.user.id
self.request.user = mock_user
self.view.kwargs['token'] = mock.MagicMock()
with self.assertRaises(Http404):
self.view.get_object()
| 37.958785
| 91
| 0.66821
| 2,067
| 17,499
| 5.375907
| 0.106434
| 0.047516
| 0.021598
| 0.021598
| 0.553456
| 0.454824
| 0.37374
| 0.339093
| 0.313265
| 0.293647
| 0
| 0.006123
| 0.225327
| 17,499
| 460
| 92
| 38.041304
| 0.813588
| 0.016401
| 0
| 0.274151
| 0
| 0
| 0.082127
| 0.043824
| 0
| 0
| 0
| 0.002174
| 0.117493
| 1
| 0.138381
| false
| 0
| 0.039164
| 0
| 0.201044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ec667f34cc8524a0bd9453e82114220e88aef5a
| 813
|
py
|
Python
|
photos/urls.py
|
charlesmugambi/Instagram
|
3a9dfc32c45bf9f221b22b7075ce31b1a16dcba7
|
[
"MIT"
] | null | null | null |
photos/urls.py
|
charlesmugambi/Instagram
|
3a9dfc32c45bf9f221b22b7075ce31b1a16dcba7
|
[
"MIT"
] | null | null | null |
photos/urls.py
|
charlesmugambi/Instagram
|
3a9dfc32c45bf9f221b22b7075ce31b1a16dcba7
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^image/$', views.add_image, name='upload_image'),
url(r'^profile/$', views.profile_info, name='profile'),
url(r'^update/$', views.profile_update, name='update'),
url(r'^comment/(?P<image_id>\d+)', views.comment, name='comment'),
url(r'^search/', views.search_results, name = 'search_results'),
url(r'^follow/(?P<user_id>\d+)', views.follow, name = 'follow'),
url(r'^unfollow/(?P<user_id>\d+)', views.unfollow, name='unfollow'),
url(r'^likes/(\d+)/$', views.like_images,name='likes')
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 42.789474
| 80
| 0.675277
| 117
| 813
| 4.581197
| 0.307692
| 0.067164
| 0.078358
| 0.067164
| 0.048507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120541
| 813
| 18
| 81
| 45.166667
| 0.74965
| 0
| 0
| 0
| 0
| 0
| 0.242312
| 0.093481
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ec7068e816bc6b2d31f51831d9d75f6ffc1151c
| 11,247
|
py
|
Python
|
bread.py
|
vgfang/breadbot
|
e58807431945e6d4de8dfc6c4dc4c90caebf88ca
|
[
"MIT"
] | null | null | null |
bread.py
|
vgfang/breadbot
|
e58807431945e6d4de8dfc6c4dc4c90caebf88ca
|
[
"MIT"
] | null | null | null |
bread.py
|
vgfang/breadbot
|
e58807431945e6d4de8dfc6c4dc4c90caebf88ca
|
[
"MIT"
] | null | null | null |
import random
import math
from fractions import Fraction
from datetime import datetime
from jinja2 import Template
# empty class for passing to template engine
class Recipe:
def __init__(self):
return
# returns flour percent using flour type
def get_special_flour_percent(flourType: str, breadFlourPercent:int) -> int:
if flourType == 'Hard Red Whole Wheat' or flourType == 'Hard White Wheat':
percentages = [0,25,30,35,40,45,50]
percentages = list(filter(lambda x: 100-breadFlourPercent >= x, percentages))
return random.choice(percentages)
elif flourType == 'Rye' and breadFlourPercent >= 75:
percentages = [0,10,15,20]
percentages = list(filter(lambda x: 100-breadFlourPercent >= x, percentages))
return random.choice(percentages)
else:
percentages = [0,10,15,20,25.30]
percentages = list(filter(lambda x: 100-breadFlourPercent >= x, percentages))
return random.choice(percentages)
# returns multiplied spoon units from teaspoon fraction input, 3 tsp = 1 tbsp
def spoon_mult(tsp: Fraction(), multiplier: float) -> str:
tsp *= Fraction(multiplier)
spoonString = ""
if tsp >= 3: # use tablespoons
tablespoons = int(tsp // 3)
remainder = (tsp % 3) / 3
if tablespoons != 0:
spoonString += f"{tablespoons} "
if remainder.numerator != 0:
spoonString += f"{remainder.numerator}/{remainder.denominator} "
return f"{spoonString}tbsp"
else:
teaspoons = int(tsp // 1)
remainder = tsp % 1
if teaspoons != 0:
spoonString += f"{teaspoons} "
if remainder.numerator != 0:
spoonString += f"{remainder.numerator}/{remainder.denominator} "
return f"{spoonString}tsp"
# returns amount given the type of flavoring(spices)
def get_flavor_amount(flavor: str, flourAmount: int) -> str:
colorsDict = {}
scale = 4 # floors to the 500g/scale for clean fractional multiplication
multiplier = math.floor(flourAmount/500*scale) / scale
# flavors in category
red = ('Cardamom', 'Nutmeg','Hazelnut','Almond','Lemon Extract','Peppermint')
blue = ('Cinnamon', 'Allspice')
green = ('Vanilla', 'Instant Coffee')
purple = ('Orange Zest', 'Lime Zest', 'Lemon Zest', 'Ginger')
orange = ('Lavender', 'Hojicha', 'Matcha', 'Earl Grey', 'Oolong')
# default possible teaspoon values list for flour = 500, 3 tsp = 1 tbsp
redAmt = list(map(Fraction, [1/4, 1/2]))
blueAmt = list(map(Fraction, [1/4, 1/2, 1]))
greenAmt = list(map(Fraction, [1/2, 1, 3/2]))
purpleAmt = list(map(Fraction, [2, 3, 9/2]))
orangeAmt = list(map(Fraction, [9]))
# random tablespoons
colorsDict[red] = list(map(lambda x: spoon_mult(x, multiplier), redAmt))
colorsDict[blue] = list(map(lambda x: spoon_mult(x, multiplier), blueAmt))
colorsDict[green] = list(map(lambda x: spoon_mult(x, multiplier), greenAmt))
colorsDict[purple] = list(map(lambda x: spoon_mult(x, multiplier), purpleAmt))
colorsDict[orange] = list(map(lambda x: spoon_mult(x, multiplier), orangeAmt))
for color in colorsDict.keys():
if flavor in color:
return random.choice(colorsDict[color])
# print("Error in Flavor Input: " + flavor)
return "get_flavor_amount wrong input"
# returns list of spices using number of spices
def get_spices(spicesNum: int) -> [str]:
spicesList = ['Cinnamon', 'Allspice', 'Cardamom', 'Nutmeg']
if spicesNum > len(spicesList):
print("WARNING: spicesNum exceeds spices of num")
return spicesList
if spicesNum == 1:
return random.sample(['Cinnamon', 'Cardamom'], 1)
return random.sample(spicesList, spicesNum)
# check if extract is nut
def is_nut(extract: str) -> bool:
nuts = ['Hazelnut','Almond']
return extract in nuts
# checks if extract1 and extract2 are both allowed based on zest/extract same flavor
def zest_extract_same_flavor(extract1: str, extract2: str) -> bool:
if extract1 == extract2:
return False
e1 = extract1.split(" ") # may need to change if new types are added
e2 = extract2.split(" ")
if len(e1) != 2 or len(e2) != 2:
return False
if e1[0]==e2[0] and 'Zest' in [e1[1],e2[1]] and 'Extract' in [e1[1],e2[1]]:
return True
return False
# return list of extracts using number of extracts
def get_extracts(extractsNum: int) -> [str]:
if extractsNum == 0:
return []
allowedExtracts = ['Vanilla', 'Hazelnut', 'Almond', 'Lemon Extract', 'Peppermint',
'Orange Zest', 'Lime Zest', 'Lemon Zest', 'Ginger']
# if more than one, vanilla must be included
currentExtracts = ['Vanilla']
allowedExtracts.remove('Vanilla')
extractsLeft = extractsNum-1
while extractsLeft > 0:
if len(allowedExtracts) <= 0:
print("Incorrecnt number of extracts")
return "Incorrecnt number of extracts"
newExtract = random.choice(allowedExtracts)
# one nut at a time
if True in map(is_nut, currentExtracts) and is_nut(newExtract):
allowedExtracts.remove(newExtract)
continue # skips decrement, try again
# no zest + extract comibination of the same flavor
for currentExtract in currentExtracts:
exit = False
if zest_extract_same_flavor(currentExtract, newExtract):
allowedExtracts.remove(newExtract)
exit = True # skips decrement, try again
if exit:
continue
# passed restraints, remove it from allowed
currentExtracts.append(newExtract)
if newExtract in allowedExtracts:
allowedExtracts.remove(newExtract)
extractsLeft -= 1
return currentExtracts
# return percentage of enrichment
def get_enrichment_percent(enrichment: str) -> int:
if enrichment == 'Cream Cheese':
return 10
return 5
# return liquid percent from liquid tpye
def get_liquid_percent(liquidType: str) -> int:
if liquidType in ['Heavy Cream', 'Coconut Milk']:
return 13
elif liquidType in ['Cow Milk']:
return 63
# print("Error in liquidType input.")
return -1
# return fruit puree fruit choice(s), omitted fruit chance weighting for now
def get_fruit_purees() -> [str]:
fruitPureesNum = random.randint(1,2)
fruitPureesChoices = ['Banana','Apple','Cherry','Strawberry','Fig','Mango']
return random.sample(fruitPureesChoices, fruitPureesNum)
# retrun fruit puree percent from 0-2 fruitPurees using random generation
def get_fruit_purees_percent(fruitPurees) -> [float]:
totalFruitPureePercent = random.choice([25,30,35,40,45,50])
fruitPureeNum = len(fruitPurees)
if fruitPureeNum == 1:
return [totalFruitPureePercent]
elif fruitPureeNum == 2:
firstPercent = random.randint(0,totalFruitPureePercent)
return [firstPercent, totalFruitPureePercent - firstPercent]
return [0]
# returns rounded ml conversion from percent, used in template
def to_g(flourMl, percent) -> int:
return round(flourMl * percent/100)
# takes filename and writes an html recipe file
def generate_recipe(breadname: str, filename: str, flourGramInput: int) -> str:
# ALL NUMBERICAL VALUES REPRESENT PERCENTAGES
r = Recipe()
r.breadname = breadname
r.totalFlourGrams = flourGramInput
r.totalLiquidPercent = 63
r.preferment = random.choice(['Poolish', 'None'])
r.breadFlourPercent = random.choice([75, 50])
# FLOUR STYLE
r.breadShape = random.choice(['Pullman', 'Regular'])
# FLOUR TYPES
r.specialFlour = random.choice([
'Einkorn',
'Khorasan',
'Spelt',
'Emmer',
'Semolina (Durum)',
'Hard Red Whole Wheat',
'Regular Whole Wheat',
'Hard White Wheat',
'Rye'
])
r.specialFlourPercent = get_special_flour_percent(r.specialFlour, r.breadFlourPercent)
r.whiteFlourPercent = 100 - r.breadFlourPercent - r.specialFlourPercent
# SPICES/FLAVORING
spicesNum = random.randint(0,4)
r.spices = get_spices(spicesNum)
extractsNum = random.randint(0,3)
r.extracts = get_extracts(extractsNum)
teaList = ['Lavender', 'Hojicha', 'Matcha', 'Earl Grey', 'Oolong', 'Instant Coffee']
r.tea = random.choice(teaList)
# illegal with fruit purees and all extracts but ginger, almond, and hazelnut
# BASIC INGREDIENTS
r.sugar = random.choice(['Brown Sugar','White Sugar','Honey','Molasses'])
r.sugarPercent = random.choice([5,10,15])
r.salt = 'Table Salt'
r.saltPercent = random.choice([1,1.5,2])
r.yeast = random.choice(['Instant Yeast','Active Yeast'])
r.yeastPercent = 0.62
# ENRICHMENTS – All 5% , only one chosen
enrichmentList = ['Olive Oil','Butter','Cream Cheese','Coconut oil']
if r.tea == 'Instant Coffee':
enrichmentList.remove('Olive Oil')
r.enrichment = random.choice(enrichmentList)
r.enrichmentPercent = get_enrichment_percent(r.enrichment)
if r.enrichment == 'Cream Cheese':
r.totalLiquidPercent -= 5
# LIQUIDS
# cap total liquid at 60% when these sugars are used
if r.sugar in ['Honey', 'Molasses']:
r.totalLiquidPercent = 60
# cow milk only if there is no preferemnt
viableLiquids = ['Heavy Cream', 'Coconut Milk', 'Cow Milk']
if r.preferment != 'None':
viableLiquids.remove('Cow Milk')
r.liquid = random.choice(viableLiquids)
r.liquidPercent = get_liquid_percent(r.liquid)
## LIQUIDS - FRUIT PUREE
r.fruitPurees = []
r.fruitPureesPercent = []
if r.preferment != 'Poolish':
# 50 percent chance to include
# sugar reduction by 5 percent
r.sugarPercent -= 5
r.fruitPurees = get_fruit_purees()
r.fruitPureesPercent = get_fruit_purees_percent(r.fruitPurees)
# account for cow milk
r.liquidPercent = min(r.liquidPercent, r.totalLiquidPercent - sum(r.fruitPureesPercent))
r.waterPercent = max(0, r.totalLiquidPercent - sum(r.fruitPureesPercent) - r.liquidPercent)
# BICOLOR ROLL
r.isBicolorRoll = False
if len(r.fruitPureesPercent) > 0 or r.tea in ['Lavender', 'Hojicha', 'Matcha', 'Earl Grey', 'Oolong']:
r.isBicolorRoll = random.choice([True,False])
# COCOA POWDER
r.cocoaPowderPercent = 0
cocoaPowderAllowedExtracts = ['Ginger', 'Almond', 'Hazelnut']
if r.fruitPurees == [] and any(not x in cocoaPowderAllowedExtracts for x in r.extracts): # allowed
if random.randint(0,2) == 0:
r.tea = '' # removes tea
r.cocoaPowderPercent = round(random.choice([5,10])/100 * r.whiteFlourPercent,1)
r.whiteFlourPercent = round(r.whiteFlourPercent - r.cocoaPowderPercent,1)
# WRITE FORMAT
time = datetime.now()
r.datetime = time.strftime('%A, %b %d %Y')
templateFile = open("./template.html")
templateString = templateFile.read()
## Conversion to ml for percentages
r.totalLiquidGrams = to_g(r.totalFlourGrams, r.totalLiquidPercent)
r.breadFlourGrams = to_g(r.totalFlourGrams, r.breadFlourPercent)
r.specialFlourGrams = to_g(r.totalFlourGrams, r.specialFlourPercent)
r.whiteFlourGrams = to_g(r.totalFlourGrams, r.whiteFlourPercent)
r.sugarGrams = to_g(r.totalFlourGrams, r.sugarPercent)
r.saltGrams = to_g(r.totalFlourGrams, r.saltPercent)
r.yeastGrams = to_g(r.totalFlourGrams, r.yeastPercent)
r.spicesAmt = list(map(lambda x: get_flavor_amount(x, r.totalFlourGrams), r.spices))
r.extractsAmt = list(map(lambda x: get_flavor_amount(x, r.totalFlourGrams), r.extracts))
r.teaAmt = get_flavor_amount(r.tea, r.totalFlourGrams)
r.enrichmentGrams = to_g(r.totalFlourGrams, r.enrichmentPercent)
r.waterGrams = to_g(r.totalFlourGrams, r.waterPercent)
r.liquidGrams = to_g(r.totalFlourGrams, r.liquidPercent)
r.fruitPureesGrams = list(map(lambda x: to_g(r.totalFlourGrams,x), r.fruitPureesPercent))
r.cocoaPowderGrams = round(r.cocoaPowderPercent/100 * r.totalFlourGrams)
template = Template(templateString)
htmlString = template.render(r = r)
outfile = open(f'{filename}', 'w')
outfile.write(htmlString)
outfile.close()
templateFile.close()
return htmlString
| 36.996711
| 103
| 0.727927
| 1,465
| 11,247
| 5.541297
| 0.251877
| 0.028086
| 0.027223
| 0.025745
| 0.166297
| 0.126386
| 0.100148
| 0.087337
| 0.066396
| 0.066396
| 0
| 0.021022
| 0.145639
| 11,247
| 304
| 104
| 36.996711
| 0.823707
| 0.156664
| 0
| 0.087719
| 0
| 0
| 0.126762
| 0.009539
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061404
| false
| 0
| 0.02193
| 0.008772
| 0.219298
| 0.008772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ec7d9e291a15b37ad7f7b106420f6f50a25a3a0
| 1,248
|
py
|
Python
|
tutorial/test input.py
|
nataliapryakhina/FA_group3
|
3200464bc20d38a85af9ad3583a360db4ffb7f8d
|
[
"MIT"
] | null | null | null |
tutorial/test input.py
|
nataliapryakhina/FA_group3
|
3200464bc20d38a85af9ad3583a360db4ffb7f8d
|
[
"MIT"
] | null | null | null |
tutorial/test input.py
|
nataliapryakhina/FA_group3
|
3200464bc20d38a85af9ad3583a360db4ffb7f8d
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
from os import listdir
from tensorflow.keras.callbacks import ModelCheckpoint
dataDir = "./data/trainSmallFA/"
files = listdir(dataDir)
files.sort()
totalLength = len(files)
inputs = np.empty((len(files), 3, 64, 64))
targets = np.empty((len(files), 3, 64, 64))
for i, file in enumerate(files):
npfile = np.load(dataDir + file)
d = npfile['a']
inputs[i] = d[0:3] # inx, iny, mask
targets[i] = d[3:6] # p, velx, vely
# print("inputs shape = ", inputs.shape)
print(np.shape(targets[:, 1, :, :].flatten()))
maxvel = np.amax(np.sqrt(targets[:, 1, :, :]* targets[:, 1, :, :]
+ targets[:, 2, :, :]* targets[:, 2, :, :]))
print(maxvel)
targets[:, 1:3, :, :] /= maxvel
targets[:, 0, :, :] /= np.amax(targets[:, 0, :, :])
for input in inputs:
plt.figure(num=None, figsize=(20, 10), dpi=80, facecolor='w', edgecolor='k')
# predicted data
plt.subplot(331)
plt.title('x vel')
plt.imshow(input[0, :, :], cmap='jet') # vmin=-100,vmax=100, cmap='jet')
plt.colorbar()
plt.subplot(332)
plt.title('y vel')
plt.imshow(input[1, :, :], cmap='jet')
plt.colorbar()
plt.show()
| 30.439024
| 80
| 0.600962
| 177
| 1,248
| 4.237288
| 0.457627
| 0.042667
| 0.026667
| 0.04
| 0.109333
| 0.053333
| 0.053333
| 0
| 0
| 0
| 0
| 0.042786
| 0.194712
| 1,248
| 41
| 81
| 30.439024
| 0.703483
| 0.091346
| 0
| 0.058824
| 0
| 0
| 0.034544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ec8d0b22163c94b04ce1660f7662d06d776efe5
| 2,781
|
py
|
Python
|
pepper/responder/brain.py
|
cltl/pepper
|
5d34fc5074473163aa9273016d89e5e2b8edffa9
|
[
"MIT"
] | 29
|
2018-01-20T08:51:42.000Z
|
2022-01-25T11:59:28.000Z
|
pepper/responder/brain.py
|
cltl/pepper
|
5d34fc5074473163aa9273016d89e5e2b8edffa9
|
[
"MIT"
] | 32
|
2018-09-20T13:09:34.000Z
|
2021-06-04T15:23:45.000Z
|
pepper/responder/brain.py
|
cltl/pepper
|
5d34fc5074473163aa9273016d89e5e2b8edffa9
|
[
"MIT"
] | 10
|
2018-10-25T02:45:21.000Z
|
2020-10-03T12:59:10.000Z
|
from pepper.framework import *
from pepper import logger
from pepper.language import Utterance
from pepper.language.generation.thoughts_phrasing import phrase_thoughts
from pepper.language.generation.reply import reply_to_question
from .responder import Responder, ResponderType
from pepper.language import UtteranceType
from pepper.knowledge import sentences, animations
from random import choice
import re
from typing import Optional, Union, Tuple, Callable
class BrainResponder(Responder):
def __init__(self):
self._log = logger.getChild(self.__class__.__name__)
@property
def type(self):
return ResponderType.Brain
@property
def requirements(self):
return [TextToSpeechComponent, BrainComponent]
def respond(self, utterance, app):
# type: (Utterance, Union[TextToSpeechComponent, BrainComponent]) -> Optional[Tuple[float, Callable]]
try:
utterance.analyze()
self._log.debug("TRIPLE: {}".format(utterance.triple))
if utterance.triple is not None:
brain_response_statement = []
brain_response_question = []
if utterance.type == UtteranceType.QUESTION:
brain_response_question = app.brain.query_brain(utterance)
reply = reply_to_question(brain_response_question)
self._log.info("REPLY to question: {}".format(reply))
else:
brain_response_statement = app.brain.update(utterance, reason_types=True) # Searches for types in dbpedia
reply = phrase_thoughts(brain_response_statement, True, True, True)
self._log.info("REPLY to statement: {}".format(reply))
if (isinstance(reply, str) or isinstance(reply, unicode)) and reply != "":
# Return Score and Response
# Make sure to not execute the response here, but just to return the response function
return 1.0, lambda: app.say(re.sub(r"[\s+_]", " ", reply))
elif brain_response_statement:
# Thank Human for the Data!
return 1.0, lambda: app.say("{} {}".format(choice([choice(sentences.THANK), choice(sentences.HAPPY)]),
choice(sentences.PARSED_KNOWLEDGE)), animations.HAPPY)
elif brain_response_question:
# Apologize to human for not knowing
return 1.0, lambda: app.say("{} {}".format(choice(sentences.SORRY),
choice(sentences.NO_ANSWER)), animations.ASHAMED)
except Exception as e:
self._log.error(e)
| 41.507463
| 126
| 0.612729
| 287
| 2,781
| 5.780488
| 0.369338
| 0.062688
| 0.0434
| 0.025316
| 0.072333
| 0.050633
| 0.038577
| 0.038577
| 0
| 0
| 0
| 0.003086
| 0.300971
| 2,781
| 66
| 127
| 42.136364
| 0.850309
| 0.108234
| 0
| 0.044444
| 0
| 0
| 0.028306
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.244444
| 0.044444
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ecb2c7a8dccded4280171cf1a9314223cfca421
| 3,611
|
py
|
Python
|
tests/components/airthings/test_config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/airthings/test_config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/airthings/test_config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Test the Airthings config flow."""
from unittest.mock import patch
import airthings
from homeassistant import config_entries
from homeassistant.components.airthings.const import CONF_ID, CONF_SECRET, DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.common import MockConfigEntry
TEST_DATA = {
CONF_ID: "client_id",
CONF_SECRET: "secret",
}
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch("airthings.get_token", return_value="test_token",), patch(
"homeassistant.components.airthings.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Airthings"
assert result2["data"] == TEST_DATA
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"airthings.get_token",
side_effect=airthings.AirthingsAuthError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"airthings.get_token",
side_effect=airthings.AirthingsConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass: HomeAssistant) -> None:
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"airthings.get_token",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_DATA,
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_flow_entry_already_exists(hass: HomeAssistant) -> None:
"""Test user input for config_entry that already exists."""
first_entry = MockConfigEntry(
domain="airthings",
data=TEST_DATA,
unique_id=TEST_DATA[CONF_ID],
)
first_entry.add_to_hass(hass)
with patch("airthings.get_token", return_value="token"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=TEST_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
| 30.601695
| 84
| 0.675159
| 426
| 3,611
| 5.462441
| 0.197183
| 0.083799
| 0.058015
| 0.085088
| 0.529437
| 0.495917
| 0.453373
| 0.421573
| 0.421573
| 0.421573
| 0
| 0.004931
| 0.213791
| 3,611
| 117
| 85
| 30.863248
| 0.814723
| 0.008585
| 0
| 0.388235
| 0
| 0
| 0.11371
| 0.015398
| 0
| 0
| 0
| 0
| 0.164706
| 1
| 0
| false
| 0
| 0.082353
| 0
| 0.082353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ecc375d6cf3b58f62ba3d07d23244af90a9b759
| 1,036
|
py
|
Python
|
worker/main.py
|
Devalent/facial-recognition-service
|
342e31fa7d016992d938b0121b03f0e8fe776ea8
|
[
"MIT"
] | null | null | null |
worker/main.py
|
Devalent/facial-recognition-service
|
342e31fa7d016992d938b0121b03f0e8fe776ea8
|
[
"MIT"
] | null | null | null |
worker/main.py
|
Devalent/facial-recognition-service
|
342e31fa7d016992d938b0121b03f0e8fe776ea8
|
[
"MIT"
] | null | null | null |
from aiohttp import web
import base64
import io
import face_recognition
async def encode(request):
request_data = await request.json()
# Read base64 encoded image
url = request_data['image'].split(',')[1]
image = io.BytesIO(base64.b64decode(url))
# Load image data
np_array = face_recognition.load_image_file(image)
# Find face locations
locations = face_recognition.face_locations(np_array)
# Create face encodings
encodings = face_recognition.face_encodings(np_array, locations)
results = []
for i in range(len(locations)):
top, right, bottom, left = locations[i]
result = {
'x': left,
'y': top,
'width': right - left,
'height': bottom - top,
'encodings': encodings[i].tolist()
}
results.append(result)
return web.json_response(results)
def main():
app = web.Application()
app.router.add_post('/encode', encode)
web.run_app(app, host='0.0.0.0', port='3000')
main()
| 22.521739
| 68
| 0.625483
| 127
| 1,036
| 4.976378
| 0.480315
| 0.094937
| 0.060127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022021
| 0.254826
| 1,036
| 45
| 69
| 23.022222
| 0.796632
| 0.080116
| 0
| 0
| 0
| 0
| 0.048523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.142857
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ece61d6db781e687c9a0cc4ff7c881e2a9a0b06
| 346
|
py
|
Python
|
project4/test/test_arm.py
|
XDZhelheim/CS205_C_CPP_Lab
|
f585fd685a51e19fddc9c582846547d34442c6ef
|
[
"MIT"
] | 3
|
2022-01-11T08:12:40.000Z
|
2022-03-27T08:15:45.000Z
|
project4/test/test_arm.py
|
XDZhelheim/CS205_C_CPP_Lab
|
f585fd685a51e19fddc9c582846547d34442c6ef
|
[
"MIT"
] | null | null | null |
project4/test/test_arm.py
|
XDZhelheim/CS205_C_CPP_Lab
|
f585fd685a51e19fddc9c582846547d34442c6ef
|
[
"MIT"
] | 2
|
2022-03-03T03:01:20.000Z
|
2022-03-27T08:16:02.000Z
|
import os
if __name__ == "__main__":
dims = ["32", "64", "128", "256", "512", "1024", "2048"]
for dim in dims:
os.system(
f"perf stat -e r11 -x, -r 10 ../matmul.out ../data/mat-A-{dim}.txt ../data/mat-B-{dim}.txt ./out/out-{dim}.txt 2>>res_arm.csv"
)
print(f"Finished {dim}")
print("Finished.")
| 26.615385
| 138
| 0.514451
| 53
| 346
| 3.188679
| 0.716981
| 0.106509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100386
| 0.251445
| 346
| 13
| 139
| 26.615385
| 0.552124
| 0
| 0
| 0
| 0
| 0.111111
| 0.504323
| 0.132565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ecf9572bf4b2d6c4df42c5a6542407de0db8c29
| 6,920
|
py
|
Python
|
jaxformer/hf/sample.py
|
salesforce/CodeGen
|
2ca076874ca2d26c2437df2968f6c43df92748bc
|
[
"BSD-3-Clause"
] | 105
|
2022-03-29T23:45:55.000Z
|
2022-03-31T23:57:14.000Z
|
jaxformer/hf/sample.py
|
salesforce/CodeGen
|
2ca076874ca2d26c2437df2968f6c43df92748bc
|
[
"BSD-3-Clause"
] | 2
|
2022-03-31T04:18:49.000Z
|
2022-03-31T17:58:09.000Z
|
jaxformer/hf/sample.py
|
salesforce/CodeGen
|
2ca076874ca2d26c2437df2968f6c43df92748bc
|
[
"BSD-3-Clause"
] | 6
|
2022-03-30T06:05:39.000Z
|
2022-03-31T21:01:27.000Z
|
# Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import os
import re
import time
import random
import argparse
import torch
from transformers import GPT2TokenizerFast
from jaxformer.hf.codegen.modeling_codegen import CodeGenForCausalLM
########################################################################
# util
class print_time:
def __init__(self, desc):
self.desc = desc
def __enter__(self):
print(self.desc)
self.t = time.time()
def __exit__(self, type, value, traceback):
print(f'{self.desc} took {time.time()-self.t:.02f}s')
def set_env():
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
def set_seed(seed, deterministic=True):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = not deterministic
# torch.use_deterministic_algorithms(deterministic)
def cast(model, fp16=True):
if fp16:
model.half()
return model
########################################################################
# model
def create_model(ckpt, fp16=True):
if fp16:
return CodeGenForCausalLM.from_pretrained(ckpt, revision='float16', torch_dtype=torch.float16, low_cpu_mem_usage=True)
else:
return CodeGenForCausalLM.from_pretrained(ckpt)
def create_tokenizer():
t = GPT2TokenizerFast.from_pretrained('gpt2')
t.max_model_input_sizes['gpt2'] = 1e20
return t
def include_whitespace(t, n_min=2, n_max=20, as_special_tokens=False):
t.add_tokens([' ' * n for n in reversed(range(n_min, n_max))], special_tokens=as_special_tokens)
return t
def include_tabs(t, n_min=2, n_max=20, as_special_tokens=False):
t.add_tokens(['\t' * n for n in reversed(range(n_min, n_max))], special_tokens=as_special_tokens)
return t
def create_custom_gpt2_tokenizer():
t = create_tokenizer()
t = include_whitespace(t=t, n_min=2, n_max=32, as_special_tokens=False)
t = include_tabs(t=t, n_min=2, n_max=10, as_special_tokens=False)
return t
########################################################################
# sample
def sample(
device,
model,
tokenizer,
context,
pad_token_id,
num_return_sequences=1,
temp=0.2,
top_p=0.95,
max_length_sample=128,
max_length=2048
):
input_ids = tokenizer(
context,
truncation=True,
padding=True,
max_length=max_length,
return_tensors='pt',
).input_ids
input_ids_len = input_ids.shape[1]
assert input_ids_len < max_length
with torch.no_grad():
input_ids = input_ids.to(device)
tokens = model.generate(
input_ids,
do_sample=True,
num_return_sequences=num_return_sequences,
temperature=temp,
max_length=input_ids_len + max_length_sample,
top_p=top_p,
pad_token_id=pad_token_id,
use_cache=True,
)
text = tokenizer.batch_decode(tokens[:, input_ids_len:, ...])
return text
def truncate(completion):
def find_re(string, pattern, start_pos):
m = pattern.search(string, start_pos)
return m.start() if m else -1
terminals = [
re.compile(r, re.MULTILINE)
for r in
[
'^#',
re.escape('<|endoftext|>'),
"^'''",
'^"""',
'\n\n\n'
]
]
prints = list(re.finditer('^print', completion, re.MULTILINE))
if len(prints) > 1:
completion = completion[:prints[1].start()]
defs = list(re.finditer('^def', completion, re.MULTILINE))
if len(defs) > 1:
completion = completion[:defs[1].start()]
start_pos = 0
terminals_pos = [pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1]
if len(terminals_pos) > 0:
return completion[:min(terminals_pos)]
else:
return completion
def test_truncate():
assert truncate('\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#') == '\nif len_a > len_b:\n result = a\nelse:\n result = b'
########################################################################
# main
def main():
# (0) constants
models_nl = ['codegen-350M-nl', 'codegen-2B-nl', 'codegen-6B-nl', 'codegen-16B-nl']
models_pl = ['codegen-350M-multi', 'codegen-2B-multi', 'codegen-6B-multi', 'codegen-16B-multi', 'codegen-350M-mono', 'codegen-2B-mono', 'codegen-6B-mono', 'codegen-16B-mono']
models = models_nl + models_pl
# (1) params
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, choices=models, default='codegen-350M-mono')
parser.add_argument('--device', type=str, default='cuda:0')
parser.add_argument('--rng-seed', type=int, default=42)
parser.add_argument('--rng-deterministic', type=bool, default=True)
parser.add_argument('--p', type=float, default=0.95)
parser.add_argument('--t', type=float, default=0.2)
parser.add_argument('--max-length', type=int, default=128)
parser.add_argument('--batch-size', type=int, default=1)
parser.add_argument('--no-fp16', action="store_false")
parser.add_argument('--pad', type=int, default=50256)
parser.add_argument('--context', type=str, default='def helloworld():')
args = parser.parse_args()
# (2) preamble
set_env()
set_seed(args.rng_seed, deterministic=args.rng_deterministic)
device = torch.device(args.device)
if device.type == "cpu":
args.no_fp16 = False
if args.model.startswith("codegen-16B"):
args.no_fp16 = True
ckpt = f'./checkpoints/{args.model}'
# (3) load
with print_time('loading parameters'):
model = create_model(ckpt=ckpt, fp16=args.no_fp16).to(device)
with print_time('loading tokenizer'):
if args.model in models_pl:
tokenizer = create_custom_gpt2_tokenizer()
else:
tokenizer = create_tokenizer()
tokenizer.padding_side = 'left'
tokenizer.pad_token = args.pad
# (4) sample
with print_time('sampling'):
completion = sample(device=device, model=model, tokenizer=tokenizer, context=args.context, pad_token_id=args.pad, num_return_sequences=args.batch_size, temp=args.t, top_p=args.p, max_length_sample=args.max_length)[0]
truncation = truncate(completion)
print('=' * 100)
print(completion)
print('=' * 100)
print(args.context+truncation)
print('=' * 100)
if __name__ == '__main__':
test_truncate()
main()
print('done.')
| 27.244094
| 224
| 0.619509
| 884
| 6,920
| 4.645928
| 0.253394
| 0.024105
| 0.045532
| 0.005844
| 0.1215
| 0.073533
| 0.073533
| 0.068176
| 0.068176
| 0.068176
| 0
| 0.024032
| 0.212283
| 6,920
| 253
| 225
| 27.351779
| 0.729407
| 0.048988
| 0
| 0.0875
| 0
| 0.00625
| 0.112138
| 0.011787
| 0
| 0
| 0
| 0
| 0.0125
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.225
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ed02b6b55177c4481e9ea0e870de71a75e2629f
| 12,734
|
py
|
Python
|
retrain_with_rotnet.py
|
ericdaat/self-label
|
7c12f834c7b6bd5bee2f7f165aab33d4c4e50b51
|
[
"MIT"
] | 440
|
2020-02-17T06:54:38.000Z
|
2022-03-24T09:32:13.000Z
|
retrain_with_rotnet.py
|
ericdaat/self-label
|
7c12f834c7b6bd5bee2f7f165aab33d4c4e50b51
|
[
"MIT"
] | 21
|
2020-02-28T06:40:20.000Z
|
2022-03-11T10:59:09.000Z
|
retrain_with_rotnet.py
|
ericdaat/self-label
|
7c12f834c7b6bd5bee2f7f165aab33d4c4e50b51
|
[
"MIT"
] | 53
|
2020-02-27T13:05:49.000Z
|
2022-03-07T02:33:01.000Z
|
import argparse
import warnings
warnings.simplefilter("ignore", UserWarning)
import files
from tensorboardX import SummaryWriter
import os
import numpy as np
import time
import torch
import torch.optim
import torch.nn as nn
import torch.utils.data
import torchvision
import torchvision.transforms as tfs
from data import DataSet,return_model_loader
from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage
def RotationDataLoader(image_dir, is_validation=False,
batch_size=256, crop_size=224, num_workers=4,shuffle=True):
normalize = tfs.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transforms = tfs.Compose([
tfs.RandomResizedCrop(crop_size),
tfs.RandomGrayscale(p=0.2),
tfs.ColorJitter(0.4, 0.4, 0.4, 0.4),
tfs.RandomHorizontalFlip(),
tfs.Lambda(lambda img: torch.stack([normalize(tfs.ToTensor()(
tfs.functional.rotate(img, angle))) for angle in [0, 90, 180, 270]]
))
])
if is_validation:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/val', transforms))
else:
dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/train', transforms))
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
drop_last=False
)
return loader
class Optimizer:
def __init__(self):
self.num_epochs = 30
self.lr = 0.05
self.lr_schedule = lambda epoch: (self.lr * (0.1 ** (epoch//args.lrdrop)))*(epoch<80) + (epoch>=80)*self.lr*(0.1**3)
self.momentum = 0.9
self.weight_decay = 10**(-5)
self.resume = True
self.checkpoint_dir = None
self.writer = None
self.K = args.ncl
self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.val_loader = RotationDataLoader(args.imagenet_path, is_validation=True,
batch_size=args.batch_size, num_workers=args.workers,shuffle=True)
def optimize_epoch(self, model, optimizer, loader, epoch, validation=False):
print(f"Starting epoch {epoch}, validation: {validation} " + "="*30)
loss_value = AverageMeter()
rotacc_value = AverageMeter()
# house keeping
if not validation:
model.train()
lr = self.lr_schedule(epoch)
for pg in optimizer.param_groups:
pg['lr'] = lr
else:
model.eval()
XE = torch.nn.CrossEntropyLoss().to(self.dev)
l_dl = 0 # len(loader)
now = time.time()
batch_time = MovingAverage(intertia=0.9)
for iter, (data, label, selected) in enumerate(loader):
now = time.time()
if not validation:
niter = epoch * len(loader.dataset) + iter*args.batch_size
data = data.to(self.dev)
mass = data.size(0)
where = np.arange(mass,dtype=int) * 4
data = data.view(mass * 4, 3, data.size(3), data.size(4))
rotlabel = torch.tensor(range(4)).view(-1, 1).repeat(mass, 1).view(-1).to(self.dev)
#################### train CNN ###########################################
if not validation:
final = model(data)
if args.onlyrot:
loss = torch.Tensor([0]).to(self.dev)
else:
if args.hc == 1:
loss = XE(final[0][where], self.L[selected])
else:
loss = torch.mean(torch.stack([XE(final[k][where], self.L[k, selected]) for k in range(args.hc)]))
rotloss = XE(final[-1], rotlabel)
pred = torch.argmax(final[-1], 1)
total_loss = loss + rotloss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
correct = (pred == rotlabel).to(torch.float)
rotacc = correct.sum() / float(mass)
else:
final = model(data)
pred = torch.argmax(final[-1], 1)
correct = (pred == rotlabel.cuda()).to(torch.float)
rotacc = correct.sum() / float(mass)
total_loss = torch.Tensor([0])
loss = torch.Tensor([0])
rotloss = torch.Tensor([0])
rotacc_value.update(rotacc.item(), mass)
loss_value.update(total_loss.item(), mass)
batch_time.update(time.time() - now)
now = time.time()
print(
f"Loss: {loss_value.avg:03.3f}, RotAcc: {rotacc_value.avg:03.3f} | {epoch: 3}/{iter:05}/{l_dl:05} Freq: {mass / batch_time.avg:04.1f}Hz:",
end='\r', flush=True)
# every few iter logging
if (iter % args.logiter == 0):
if not validation:
print(niter, " Loss: {0:.3f}".format(loss.item()), flush=True)
with torch.no_grad():
if not args.onlyrot:
pred = torch.argmax(final[0][where], dim=1)
pseudoloss = XE(final[0][where], pred)
if not args.onlyrot:
self.writer.add_scalar('Pseudoloss', pseudoloss.item(), niter)
self.writer.add_scalar('lr', self.lr_schedule(epoch), niter)
self.writer.add_scalar('Loss', loss.item(), niter)
self.writer.add_scalar('RotLoss', rotloss.item(), niter)
self.writer.add_scalar('RotAcc', rotacc.item(), niter)
if iter > 0:
self.writer.add_scalar('Freq(Hz)', mass/(time.time() - now), niter)
# end of epoch logging
if self.writer and (epoch % self.log_interval == 0):
write_conv(self.writer, model, epoch)
if validation:
print('val Rot-Acc: ', rotacc_value.avg)
self.writer.add_scalar('val Rot-Acc', rotacc_value.avg, epoch)
files.save_checkpoint_all(self.checkpoint_dir, model, args.arch,
optimizer, self.L, epoch,lowest=False)
return {'loss': loss_value.avg}
def optimize(self, model, train_loader):
"""Perform full optimization."""
first_epoch = 0
model = model.to(self.dev)
self.optimize_times = [0]
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),
weight_decay=self.weight_decay,
momentum=self.momentum,
lr=self.lr)
if self.checkpoint_dir is not None and self.resume:
self.L, first_epoch = files.load_checkpoint_all(self.checkpoint_dir, model=None, opt=None)
print('loaded from: ', self.checkpoint_dir,flush=True)
print('first five entries of L: ', self.L[:5], flush=True)
print('found first epoch to be', first_epoch, flush=True)
first_epoch = 0
self.optimize_times = [0]
self.L = self.L.cuda()
print("model.headcount ", model.headcount, flush=True)
#####################################################################################
# Perform optmization ###############################################################
lowest_loss = 1e9
epoch = first_epoch
while epoch < (self.num_epochs+1):
if not args.val_only:
m = self.optimize_epoch(model, optimizer, train_loader, epoch, validation=False)
if m['loss'] < lowest_loss:
lowest_loss = m['loss']
files.save_checkpoint_all(self.checkpoint_dir, model, args.arch,
optimizer, self.L, epoch, lowest=True)
else:
print('='*30 +' doing only validation ' + "="*30)
epoch = self.num_epochs
m = self.optimize_epoch(model, optimizer, self.val_loader, epoch, validation=True)
epoch += 1
print(f"Model optimization completed. Saving final model to {os.path.join(self.checkpoint_dir, 'model_final.pth.tar')}")
torch.save(model, os.path.join(self.checkpoint_dir, 'model_final.pth.tar'))
return model
def get_parser():
parser = argparse.ArgumentParser(description='Retrain with given labels combined with RotNet loss')
# optimizer
parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of epochs')
parser.add_argument('--batch-size', default=64, type=int, metavar='BS', help='batch size')
parser.add_argument('--lr', default=0.05, type=float, metavar='FLOAT', help='initial learning rate')
parser.add_argument('--lrdrop', default=30, type=int, metavar='INT', help='multiply LR by 0.1 every')
# architecture
parser.add_argument('--arch', default='alexnet', type=str, help='alexnet or resnet')
parser.add_argument('--archspec', default='big', type=str, help='big or small for alexnet ')
parser.add_argument('--ncl', default=1000, type=int, metavar='INT', help='number of clusters')
parser.add_argument('--hc', default=1, type=int, metavar='INT', help='number of heads')
parser.add_argument('--init', default=False, action='store_true', help='initialization of network to PyTorch 0.4')
# what we do in this code
parser.add_argument('--val-only', default=False, action='store_true', help='if we run only validation set')
parser.add_argument('--onlyrot', default=False, action='store_true', help='if train only RotNet')
# housekeeping
parser.add_argument('--data', default="Imagenet", type=str)
parser.add_argument('--device', default="0", type=str, metavar='N', help='GPU device')
parser.add_argument('--exp', default='./rot-retrain', metavar='DIR', help='path to result dirs')
parser.add_argument('--workers', default=6, type=int, metavar='N', help='number workers (default: 6)')
parser.add_argument('--imagenet-path', default='/home/ubuntu/data/imagenet', type=str, help='')
parser.add_argument('--comment', default='rot-retrain', type=str, help='comment for tensorboardX')
parser.add_argument('--log-interval', default=1, type=int, metavar='INT', help='save stuff every x epochs')
parser.add_argument('--logiter', default=200, type=int, metavar='INT', help='log every x-th batch')
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
name = "%s" % args.comment.replace('/', '_')
try:
args.device = [int(item) for item in args.device.split(',')]
except AttributeError:
args.device = [int(args.device)]
setup_runtime(seed=42, cuda_dev_id=args.device)
print(args, flush=True)
print()
print(name,flush=True)
writer = SummaryWriter('./runs/%s/%s'%(args.data,name))
writer.add_text('args', " \n".join(['%s %s' % (arg, getattr(args, arg)) for arg in vars(args)]))
# Setup model and train_loader
print('Commencing!', flush=True)
model, train_loader = return_model_loader(args)
train_loader = RotationDataLoader(args.imagenet_path, is_validation=False,
crop_size=224, batch_size=args.batch_size, num_workers=args.workers,
shuffle=True)
# add additional head to the network for RotNet loss.
if args.arch == 'alexnet':
if args.hc == 1:
model.__setattr__("top_layer0", nn.Linear(4096, args.ncl))
model.top_layer = None
model.headcount = args.hc+1
model.__setattr__("top_layer%s" % args.hc, nn.Linear(4096, 4))
else:
if args.hc == 1:
model.__setattr__("top_layer0", nn.Linear(2048*int(args.archspec), args.ncl))
model.top_layer = None
model.headcount = args.hc+1
model.__setattr__("top_layer%s" % args.hc, nn.Linear(2048*int(args.archspec), 4))
if args.init:
for mod in model.modules():
mod.apply(weight_init)
# Setup optimizer
o = Optimizer()
o.writer = writer
o.lr = args.lr
o.num_epochs = args.epochs
o.resume = True
o.log_interval = args.log_interval
o.checkpoint_dir = os.path.join(args.exp, 'checkpoints')
# Optimize
o.optimize(model, train_loader)
| 44.369338
| 154
| 0.569185
| 1,531
| 12,734
| 4.615284
| 0.214892
| 0.0242
| 0.045712
| 0.018823
| 0.219643
| 0.201953
| 0.145344
| 0.093122
| 0.082649
| 0.082649
| 0
| 0.020361
| 0.286477
| 12,734
| 286
| 155
| 44.524476
| 0.757319
| 0.023166
| 0
| 0.151515
| 0
| 0.008658
| 0.109091
| 0.014414
| 0.004329
| 0
| 0
| 0
| 0
| 1
| 0.021645
| false
| 0
| 0.064935
| 0
| 0.108225
| 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ed057bb216be080ba95c6d1f2a7ce1ab1dfd4f5
| 1,341
|
py
|
Python
|
tests/vie.py
|
Jinwithyoo/han
|
931a271e56134dcc35029bf75260513b60884f6c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/vie.py
|
Jinwithyoo/han
|
931a271e56134dcc35029bf75260513b60884f6c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/vie.py
|
Jinwithyoo/han
|
931a271e56134dcc35029bf75260513b60884f6c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from tests import HangulizeTestCase
from hangulize.langs.vie import Vietnamese
class VietnameseTestCase(HangulizeTestCase):
""" http://korean.go.kr/09_new/dic/rule/rule_foreign_0218.jsp """
lang = Vietnamese()
def test_1st(self):
"""제1항
nh는 이어지는 모음과 합쳐서 한 음절로 적는다. 어말이나 자음 앞에서는
받침 ‘ㄴ' 으로 적되, 그 앞의 모음이 a인 경우에는 a와 합쳐 ‘아인'으로
적는다.
"""
self.assert_examples({
# u'Nha Trang': u'냐짱',
# u'Hô Chi Minh': u'호찌민',
# u'Thanh Hoa': u'타인호아',
# u'Đông Khanh': u'동카인',
})
def test_2nd(self):
"""제2항
qu는 이어지는 모음이 a일 경우에는 합쳐서 ‘꽈'로 적는다.
"""
self.assert_examples({
'Quang': '꽝',
# u'hat quan ho': u'핫꽌호',
'Quôc': '꾸옥',
'Quyên': '꾸옌',
})
def test_3rd(self):
"""제3항
y는 뒤따르는 모음과 합쳐서 한 음절로 적는다.
"""
self.assert_examples({
'yên': '옌',
'Nguyên': '응우옌',
})
def test_4th(self):
"""제4항
어중의 l이 모음 앞에 올 때에는 ‘ㄹㄹ'로 적는다.
다만, 인명의 성과 이름은 별개의 단어로 보아 이 규칙을 적용하지 않는다.
"""
self.assert_examples({
# u'klông put': u'끌롱쁫',
'Pleiku': '쁠래이꾸',
# u'Ha Long': u'할롱',
# u'My Lay': u'밀라이',
})
| 24.833333
| 69
| 0.463833
| 176
| 1,341
| 3.471591
| 0.698864
| 0.045827
| 0.11784
| 0.10311
| 0.042553
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018315
| 0.389262
| 1,341
| 54
| 70
| 24.833333
| 0.727717
| 0.38777
| 0
| 0.363636
| 0
| 0
| 0.060432
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ed195167a4ca32696adae9b1a096d1817a006fd
| 639
|
py
|
Python
|
src/smallestLetter/target.py
|
rajitbanerjee/leetcode
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
[
"CC0-1.0"
] | null | null | null |
src/smallestLetter/target.py
|
rajitbanerjee/leetcode
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
[
"CC0-1.0"
] | null | null | null |
src/smallestLetter/target.py
|
rajitbanerjee/leetcode
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
[
"CC0-1.0"
] | 1
|
2021-04-28T18:17:55.000Z
|
2021-04-28T18:17:55.000Z
|
class Solution:
def nextGreatestLetter(self, letters: list, target: str) -> str:
if target < letters[0] or target >= letters[-1]:
return letters[0]
left, right = 0, len(letters) - 1
while left < right:
mid = left + (right - left) // 2
if letters[mid] > target:
right = mid
else:
left = mid + 1
return letters[right]
if __name__ == '__main__':
letters = ["c", "f", "j"]
target = "a"
print(f"Input: letters = {letters}, target = {target}")
print(f"Output: {Solution().nextGreatestLetter(letters, target)}")
| 31.95
| 70
| 0.528951
| 71
| 639
| 4.647887
| 0.422535
| 0.081818
| 0.084848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016355
| 0.330203
| 639
| 19
| 71
| 33.631579
| 0.754673
| 0
| 0
| 0
| 0
| 0
| 0.176839
| 0.061033
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.235294
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ed367645577a295c7ca8d2261bca85d6a1facb8
| 978
|
py
|
Python
|
matplotlib/gallery_python/pyplots/dollar_ticks.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 13
|
2020-01-04T07:37:38.000Z
|
2021-08-31T05:19:58.000Z
|
matplotlib/gallery_python/pyplots/dollar_ticks.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 3
|
2020-06-05T22:42:53.000Z
|
2020-08-24T07:18:54.000Z
|
matplotlib/gallery_python/pyplots/dollar_ticks.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 9
|
2020-10-19T04:53:06.000Z
|
2021-08-31T05:20:01.000Z
|
"""
============
Dollar Ticks
============
Use a `~.ticker.FormatStrFormatter` to prepend dollar signs on y axis labels.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Fixing random state for reproducibility
np.random.seed(19680801)
fig, ax = plt.subplots()
ax.plot(100*np.random.rand(20))
formatter = ticker.FormatStrFormatter('$%1.2f')
ax.yaxis.set_major_formatter(formatter)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_visible(False)
tick.label2.set_visible(True)
tick.label2.set_color('green')
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
import matplotlib
matplotlib.ticker
matplotlib.ticker.FormatStrFormatter
matplotlib.axis.Axis.set_major_formatter
matplotlib.axis.Axis.get_major_ticks
matplotlib.axis.Tick
| 22.227273
| 77
| 0.673824
| 122
| 978
| 5.311475
| 0.52459
| 0.111111
| 0.052469
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020666
| 0.109407
| 978
| 43
| 78
| 22.744186
| 0.723307
| 0.290389
| 0
| 0
| 0
| 0
| 0.018303
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ed3c5718d5548ba82fc7cde7bd8e347ef468e10
| 6,746
|
py
|
Python
|
Chibrary/utils.py
|
chiro2001/chibrary
|
da31ef80df394cfb260fbe2c1e675f28717fea3e
|
[
"MIT"
] | null | null | null |
Chibrary/utils.py
|
chiro2001/chibrary
|
da31ef80df394cfb260fbe2c1e675f28717fea3e
|
[
"MIT"
] | null | null | null |
Chibrary/utils.py
|
chiro2001/chibrary
|
da31ef80df394cfb260fbe2c1e675f28717fea3e
|
[
"MIT"
] | 1
|
2021-09-21T16:40:58.000Z
|
2021-09-21T16:40:58.000Z
|
import json
import re
from flask import request, abort, jsonify
from Chibrary import config
from Chibrary.config import logger
from Chibrary.exceptions import *
from functools import wraps
from urllib import parse
from Chibrary.server import db
def parse_url_query(url: str) -> dict:
if not url.lower().startswith('http://') \
and not url.lower().startswith('https://'):
return {}
query = url[url.rindex('/') + 1:]
if '?' not in query:
return {}
query = query[query.index('?') + 1:]
lines = query.split('&')
result = {}
for line in lines:
if line.count('=') != 1:
continue
key, val = line.split('=')
# 注意这里的类型转化处理
if val == 'undefined':
val = None
else:
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
pass
if val is not None:
if type(val) is str:
result[key] = parse.unquote(val)
else:
result[key] = val
return result
def form_url_query(url: str, data: dict):
# if not url.lower().startswith('http://') \
# and not url.lower().startswith('https://'):
# logger.warning('Provided wrong url %s !' % url)
# return url
# if len(data) == 0:
# return url
# query = '?'
# for key in data:
# # 特事特办(?)
# if type(data[key]) is str and '/' in data[key]:
# query = query + parse.urlencode({key: data[key]}) + '&'
# else:
# query = query + key + '=' + parse.quote(str(data[key])) + '&'
# query = query[:-1]
# return url + query
# 这里是+和%20的坑
return url + '?' + parse.urlencode(data).replace('+', '%20')
def remove_ids_dfs(data: dict):
if '_id' in data:
del data['_id']
for key in data:
if type(data[key]) is dict:
data[key] = remove_ids_dfs(data[key])
return data
"""
返回值格式:
{
code: ...,
message: ...,
data: ...,
}
"""
def make_result(code: int, message=None, data=None):
result = {
'code': code,
}
# 根据code选message
if message is None:
try:
result['message'] = config.code[str(code)]
except ValueError:
logger.warning('Error code %s not found!' % code)
result['message'] = config.code['0']
else:
result['message'] = message
if data is not None:
# 一定要删除所有_id元素
data = remove_ids_dfs(data)
result['data'] = data
return result
def make_error_result(error):
return make_result(1, message=str(error))
def dump(data):
return json.dumps(data)
def check_args(args: dict, requirements: list):
for r in requirements:
if r not in args:
return False
return True
def format_file_size(size_by_bytes: int) -> str:
units = ['B', 'KB', 'MB', 'GB', 'TB']
# 最终数值应该在1~999之间
index = 0
unit = units[index]
while size_by_bytes > 1000:
index = index + 1
unit = units[index]
size_by_bytes = size_by_bytes / 1000
if index == len(units):
break
if size_by_bytes > 20:
return "%.0f%s" % (size_by_bytes, unit)
return "%.2f%s" % (size_by_bytes, unit)
# 用户在header里面加上Authorization: {token}
def login_check(f):
@wraps(f)
def decorated(*args, **kwargs):
headers = dict(request.headers)
if 'Authorization' not in headers:
return make_result(3) # login error
token = headers['Authorization']
if db.token_find_by_token(token) is None:
return make_result(3) # login error
return f(*args, **kwargs)
return decorated
# 用户在header里面加上Authorization: {token}
def admin_check(f):
@wraps(f)
def decorated(*args, **kwargs):
headers = dict(request.headers)
if 'Authorization' not in headers:
return make_result(3) # login error
token = headers['Authorization']
token_data = db.token_find_by_token(token)
if token_data is None:
return make_result(3) # login error
# 用户level大于等于10表示有管理员效力
user = db.user_find(username=token_data['username'])
if user is None:
return make_result(3) # login error,不会有效
if user['info']['level'] < 10:
return make_result(10) # No permission
return f(*args, **kwargs)
return decorated
# 必须在request过程中调用,获取不到直接打断
def get_user_from_headers():
headers = dict(request.headers)
if 'Authorization' not in headers:
abort(jsonify(make_result(3))) # login error
token = headers['Authorization']
token_data = db.token_find_by_token(token)
if token_data is None:
abort(jsonify(make_result(3))) # login error
# 用户level大于等于10表示有管理员效力
user = db.user_find(username=token_data['username'])
if user is None:
abort(jsonify(make_result(3))) # login error,不会有效
return user
def check_admin_abort():
headers = dict(request.headers)
if 'Authorization' not in headers:
abort(jsonify(make_result(3))) # login error
token = headers['Authorization']
token_data = db.token_find_by_token(token)
if token_data is None:
abort(jsonify(make_result(3))) # login error
# 用户level大于等于10表示有管理员效力
user = db.user_find(username=token_data['username'])
if user is None:
abort(jsonify(make_result(3))) # login error,不会有效
if user['info']['level'] < 10:
abort(jsonify(make_result(10))) # No permission
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
# try:
# import unicodedata
# unicodedata.numeric(s)
# return True
# except (TypeError, ValueError):
# pass
return False
# def url_check(url: str):
# url = url.lower()
# reg = "^(https|http|ftp|rtsp|mms)\\://?([a-zA-Z0-9\\.\\-]+(\\:[a-zA-Z0-9\\.&%\\$\\-]+)*@)?((25[0-5]|2" \
# "[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]" \
# "{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|" \
# "2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|([a-zA-Z0-9\\-]+\\.)*[a-zA-Z0-9\\-]+\\.[a-zA-Z]" \
# "{2,4})(\\:[0-9]+)?(/[^/][a-zA-Z0-9\\.\\,\\?\\'\\\\/\\+&%\\$\\=~_\\-@]*)*$"
# print(re.search(url, reg))
if __name__ == '__main__':
print(parse_url_query('http://blog.com/sss/ssss/s?wd=dsfa&a=fdsa&a=1&b=1.1&a=s'))
print(format_file_size(20250000))
# print(url_check('http://www.bilibili.com/'))
| 28.82906
| 116
| 0.554996
| 891
| 6,746
| 4.096521
| 0.189675
| 0.041096
| 0.033151
| 0.048219
| 0.430137
| 0.4
| 0.373699
| 0.372603
| 0.353973
| 0.353973
| 0
| 0.035582
| 0.279277
| 6,746
| 233
| 117
| 28.95279
| 0.715138
| 0.243848
| 0
| 0.456376
| 0
| 0.006711
| 0.065274
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.100671
| false
| 0.013423
| 0.060403
| 0.020134
| 0.328859
| 0.013423
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ed4e33e928545ea0125662f34b75db4ebefd622
| 897
|
py
|
Python
|
tests/mappers/fields/test_float_field.py
|
Arfey/aiohttp_admin2
|
2b3782389ec9e25809635811b76ef8111b27d8ba
|
[
"MIT"
] | 12
|
2021-10-15T11:48:12.000Z
|
2022-03-24T07:31:43.000Z
|
tests/mappers/fields/test_float_field.py
|
Arfey/aiohttp_admin2
|
2b3782389ec9e25809635811b76ef8111b27d8ba
|
[
"MIT"
] | 2
|
2021-12-29T16:31:05.000Z
|
2021-12-30T00:50:40.000Z
|
tests/mappers/fields/test_float_field.py
|
Arfey/aiohttp_admin2
|
2b3782389ec9e25809635811b76ef8111b27d8ba
|
[
"MIT"
] | null | null | null |
from aiohttp_admin2.mappers import Mapper
from aiohttp_admin2.mappers import fields
class FloatMapper(Mapper):
field = fields.FloatField()
def test_correct_float_type():
"""
In this test we check success convert to float type.
"""
mapper = FloatMapper({"field": 1})
mapper.is_valid()
assert mapper.data["field"] == 1.0
mapper = FloatMapper({"field": 2})
mapper.is_valid()
assert mapper.data["field"] == 2.0
mapper = FloatMapper({"field": -3})
mapper.is_valid()
assert mapper.data["field"] == -3.0
mapper = FloatMapper({"field": 0})
mapper.is_valid()
assert mapper.data["field"] == 0.0
def test_wrong_float_type():
"""
In this test we check error when we received wrong float type.
"""
assert FloatMapper({"field": "string"}).is_valid() is False
assert FloatMapper({"field": []}).is_valid() is False
| 21.878049
| 66
| 0.645485
| 117
| 897
| 4.82906
| 0.316239
| 0.169912
| 0.155752
| 0.134513
| 0.438938
| 0.332743
| 0.332743
| 0
| 0
| 0
| 0
| 0.019718
| 0.208473
| 897
| 40
| 67
| 22.425
| 0.776056
| 0.128205
| 0
| 0.2
| 0
| 0
| 0.074567
| 0
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ed5587a827c8b8f54d7f90abf4042432f650675
| 1,163
|
py
|
Python
|
autotest/t038_test.py
|
jdlarsen-UA/flopy
|
bf2c59aaa689de186bd4c80685532802ac7149cd
|
[
"CC0-1.0",
"BSD-3-Clause"
] | 2
|
2021-09-06T01:08:58.000Z
|
2021-09-06T06:02:15.000Z
|
autotest/t038_test.py
|
jdlarsen-UA/flopy
|
bf2c59aaa689de186bd4c80685532802ac7149cd
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
autotest/t038_test.py
|
jdlarsen-UA/flopy
|
bf2c59aaa689de186bd4c80685532802ac7149cd
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
"""
Try to load all of the MODFLOW-USG examples in ../examples/data/mfusg_test.
These are the examples that are distributed with MODFLOW-USG.
"""
import os
import flopy
# make the working directory
tpth = os.path.join("temp", "t038")
if not os.path.isdir(tpth):
os.makedirs(tpth)
# build list of name files to try and load
usgpth = os.path.join("..", "examples", "data", "mfusg_test")
usg_files = []
for path, subdirs, files in os.walk(usgpth):
for name in files:
if name.endswith(".nam"):
usg_files.append(os.path.join(path, name))
#
def test_load_usg():
for fusg in usg_files:
d, f = os.path.split(fusg)
yield load_model, f, d
# function to load a MODFLOW-USG model and then write it back out
def load_model(namfile, model_ws):
m = flopy.modflow.Modflow.load(
namfile, model_ws=model_ws, version="mfusg", verbose=True, check=False
)
assert m, f"Could not load namefile {namfile}"
assert m.load_fail is False
m.change_model_ws(tpth)
m.write_input()
return
if __name__ == "__main__":
for fusg in usg_files:
d, f = os.path.split(fusg)
load_model(f, d)
| 25.844444
| 78
| 0.663801
| 186
| 1,163
| 4.010753
| 0.408602
| 0.048257
| 0.040214
| 0.0563
| 0.091153
| 0.091153
| 0.091153
| 0.091153
| 0.091153
| 0.091153
| 0
| 0.003289
| 0.215821
| 1,163
| 44
| 79
| 26.431818
| 0.814693
| 0.232158
| 0
| 0.142857
| 0
| 0
| 0.092971
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ed9a8ae3cb2f6c51bd79bc87c61d261f1d3fcce
| 3,488
|
py
|
Python
|
pyhanko_certvalidator/asn1_types.py
|
MatthiasValvekens/certvalidator
|
246c5075ecdb6d50b14c93fdc97a9d0470f84821
|
[
"MIT"
] | 4
|
2020-11-11T13:59:05.000Z
|
2022-03-13T14:06:10.000Z
|
pyhanko_certvalidator/asn1_types.py
|
MatthiasValvekens/certvalidator
|
246c5075ecdb6d50b14c93fdc97a9d0470f84821
|
[
"MIT"
] | 1
|
2020-11-11T11:29:37.000Z
|
2020-11-11T11:29:37.000Z
|
pyhanko_certvalidator/asn1_types.py
|
MatthiasValvekens/certvalidator
|
246c5075ecdb6d50b14c93fdc97a9d0470f84821
|
[
"MIT"
] | 2
|
2020-11-11T10:33:32.000Z
|
2022-03-13T14:06:11.000Z
|
from typing import Optional
from asn1crypto import core, x509, cms
__all__ = [
'Target', 'TargetCert', 'Targets', 'SequenceOfTargets',
'AttrSpec', 'AAControls'
]
class TargetCert(core.Sequence):
_fields = [
('target_certificate', cms.IssuerSerial),
('target_name', x509.GeneralName, {'optional': True}),
('cert_digest_info', cms.ObjectDigestInfo, {'optional': True})
]
class Target(core.Choice):
_alternatives = [
('target_name', x509.GeneralName, {'explicit': 0}),
('target_group', x509.GeneralName, {'explicit': 1}),
('target_cert', TargetCert, {'explicit': 2})
]
class Targets(core.SequenceOf):
_child_spec = Target
# Blame X.509...
class SequenceOfTargets(core.SequenceOf):
_child_spec = Targets
class AttrSpec(core.SequenceOf):
_child_spec = cms.AttCertAttributeType
class AAControls(core.Sequence):
_fields = [
('path_len_constraint', core.Integer, {'optional': True}),
('permitted_attrs', AttrSpec, {'optional': True, 'implicit': 0}),
('excluded_attrs', AttrSpec, {'optional': True, 'implicit': 1}),
('permit_unspecified', core.Boolean, {'default': True})
]
def accept(self, attr_id: cms.AttCertAttributeType) -> bool:
attr_id_str = attr_id.native
excluded = self['excluded_attrs'].native
if excluded is not None:
excluded = frozenset(excluded)
if excluded is not None and attr_id_str in excluded:
return False
permitted = self['permitted_attrs'].native
if permitted is not None:
permitted = frozenset(permitted)
if permitted is not None and attr_id_str in permitted:
return True
return bool(self['permit_unspecified'])
@classmethod
def read_extension_value(cls, cert: x509.Certificate) \
-> Optional['AAControls']:
# handle AA controls (not natively supported by asn1crypto, so
# not available as an attribute).
try:
return next(
ext['extn_value'].parsed
for ext in cert['tbs_certificate']['extensions']
if ext['extn_id'].native == 'aa_controls'
)
except StopIteration:
return None
def _make_tag_explicit(field_decl):
tag_dict = field_decl[2]
if 'explicit' in tag_dict:
return
tag_dict['explicit'] = tag_dict['implicit']
del tag_dict['implicit']
def _make_tag_implicit(field_decl):
tag_dict = field_decl[2]
if 'implicit' in tag_dict:
return
tag_dict['implicit'] = tag_dict['explicit']
del tag_dict['explicit']
# Deal with wbond/asn1crypto#218
_make_tag_explicit(cms.RoleSyntax._fields[1])
_make_tag_explicit(cms.SecurityCategory._fields[1])
# Deal with wbond/asn1crypto#220
_make_tag_implicit(cms.AttCertIssuer._alternatives[1])
# patch in attribute certificate extensions
# Note: unlike in Certomancer, we don't do this one conditionally, since
# we need the actual Python types to agree with what we export
ext_map = x509.ExtensionId._map
ext_specs = x509.Extension._oid_specs
ext_map['2.5.29.55'] = 'target_information'
ext_specs['target_information'] = SequenceOfTargets
ext_map['2.5.29.56'] = 'no_rev_avail'
ext_specs['no_rev_avail'] = core.Null
ext_map['1.3.6.1.5.5.7.1.6'] = 'aa_controls'
ext_specs['aa_controls'] = AAControls
ext_map['1.3.6.1.5.5.7.1.4'] = 'audit_identity'
ext_specs['audit_identity'] = core.OctetString
| 30.068966
| 73
| 0.663417
| 430
| 3,488
| 5.155814
| 0.351163
| 0.031574
| 0.016238
| 0.031123
| 0.144339
| 0.078484
| 0.058638
| 0.058638
| 0.01263
| 0.01263
| 0
| 0.026997
| 0.214163
| 3,488
| 115
| 74
| 30.330435
| 0.781831
| 0.097764
| 0
| 0.075
| 0
| 0
| 0.199617
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.025
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eda0495743701a807a727479d2ba40e2e1b5552
| 910
|
py
|
Python
|
python/csv/csv_dict_writer.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
python/csv/csv_dict_writer.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
python/csv/csv_dict_writer.py
|
y2ghost/study
|
c5278611b0a732fe19e3d805c0c079e530b1d3b2
|
[
"MIT"
] | null | null | null |
import csv
def csv_dict_writer(path, headers, data):
with open(path, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',',
fieldnames=headers)
writer.writeheader()
for record in data:
writer.writerow(record)
if __name__ == '__main__':
data = '''book_title,author,publisher,pub_date,isbn
Python 101,Mike Driscoll, Mike Driscoll,2020,123456789
wxPython Recipes,Mike Driscoll,Apress,2018,978-1-4842-3237-8
Python Interviews,Mike Driscoll,Packt Publishing,2018,9781788399081'''
records = []
for line in data.splitlines():
records.append(line.strip().split(','))
headers = records.pop(0)
list_of_dicts = []
for row in records:
my_dict = dict(zip(headers, row))
list_of_dicts.append(my_dict)
csv_dict_writer('output_dict.csv', headers, list_of_dicts)
| 31.37931
| 74
| 0.650549
| 114
| 910
| 4.991228
| 0.578947
| 0.084359
| 0.057996
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072857
| 0.230769
| 910
| 28
| 75
| 32.5
| 0.74
| 0
| 0
| 0
| 0
| 0
| 0.289011
| 0.143956
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0edaae48c98ecfaf21b42f1bc713fce970f11754
| 1,687
|
py
|
Python
|
models/cnn_layer.py
|
RobinRojowiec/intent-recognition-in-doctor-patient-interviews
|
b91c7a9f3ad70edd0f39b56e3219f48d1fcf2078
|
[
"Apache-2.0"
] | null | null | null |
models/cnn_layer.py
|
RobinRojowiec/intent-recognition-in-doctor-patient-interviews
|
b91c7a9f3ad70edd0f39b56e3219f48d1fcf2078
|
[
"Apache-2.0"
] | null | null | null |
models/cnn_layer.py
|
RobinRojowiec/intent-recognition-in-doctor-patient-interviews
|
b91c7a9f3ad70edd0f39b56e3219f48d1fcf2078
|
[
"Apache-2.0"
] | 1
|
2021-11-24T18:48:47.000Z
|
2021-11-24T18:48:47.000Z
|
import torch
import torch.nn as nn
from torch.nn.functional import max_pool1d
from utility.model_parameter import Configuration, ModelParameter
class CNNLayer(nn.Module):
def __init__(self, config: Configuration, vocab_size=30000, use_embeddings=True, embed_dim=-1, **kwargs):
super(CNNLayer, self).__init__()
# set parameters
self.max_seq_length = config.get_int(ModelParameter.MAX_LENGTH)
self.use_gpu = torch.cuda.is_available()
if embed_dim == -1:
self.embedding_dim = config.get_int(ModelParameter.EMBEDDING_SIZE)
else:
self.embedding_dim = embed_dim
self.max_length = config.get_int(ModelParameter.MAX_LENGTH)
self.use_embeddings = use_embeddings
self.conv_out_channels = config.get_int(ModelParameter.CHANNELS)
self.filter_sizes = [2]
# create and initialize layers
self.embedding = nn.Embedding(vocab_size, self.embedding_dim)
self.relu = nn.ReLU()
self.convolutions = nn.ModuleList(
[nn.Conv2d(1, self.conv_out_channels, (K, self.embedding_dim)) for K in self.filter_sizes])
self.dropout = nn.Dropout(0.3)
def get_output_length(self):
return len(self.filter_sizes) * self.conv_out_channels
def forward(self, samples, **kwargs):
encoded_samples = self.encode(samples)
return encoded_samples
def encode(self, samples):
x = self.embedding(samples)
x = x.unsqueeze(1)
x = [self.relu(conv(x)).squeeze(3) for conv in self.convolutions]
x = [max_pool1d(i, i.size(2)).squeeze(2) for i in x]
x = self.dropout(torch.cat(x, 1))
return x
| 37.488889
| 109
| 0.670421
| 226
| 1,687
| 4.792035
| 0.336283
| 0.072022
| 0.044321
| 0.09603
| 0.088643
| 0.088643
| 0.088643
| 0.088643
| 0.088643
| 0
| 0
| 0.014559
| 0.226437
| 1,687
| 44
| 110
| 38.340909
| 0.815326
| 0.025489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0.029412
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0edcbb01b3b82f3bf4be9564d133e3829ce06411
| 4,429
|
py
|
Python
|
NLP programmes in Python/9.Text Clustering/kmeans.py
|
AlexandrosPlessias/NLP-Greek-Presentations
|
4ae9d635a777f24bae5238b9f195bd17d00040ea
|
[
"MIT"
] | null | null | null |
NLP programmes in Python/9.Text Clustering/kmeans.py
|
AlexandrosPlessias/NLP-Greek-Presentations
|
4ae9d635a777f24bae5238b9f195bd17d00040ea
|
[
"MIT"
] | null | null | null |
NLP programmes in Python/9.Text Clustering/kmeans.py
|
AlexandrosPlessias/NLP-Greek-Presentations
|
4ae9d635a777f24bae5238b9f195bd17d00040ea
|
[
"MIT"
] | null | null | null |
import nltk
import re
import csv
import string
import collections
import numpy as np
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import WordPunctTokenizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
""""Pre - Processing: tokenization, stopwords removal, remove words(with size 1), lower capitalization & lemmatization"""
def preprocessing(text):
# text = text.decode("utf8")
# remove punctuation
text = punctuation(text)
# remove extra spaces
text = re.sub(' +', ' ', text)
# tokenize into words
tokens = text.split(" ")
# remove number
tokens = [word for word in tokens if word.isalpha()]
# remove stopwords
stop = stopwords.words('english')
tokens = [token for token in tokens if token not in stop]
# remove words less than three letters
tokens = [word for word in tokens if len(word) >= 3]
# lower capitalization
tokens = [word.lower() for word in tokens]
# keep only real words
tokens = KeepRealWords(tokens)
# lemmatize
lmtzr = WordNetLemmatizer()
tokens = [lmtzr.lemmatize(word) for word in tokens]
# return only tokens with size over 1
if len(tokens) > 0:
preprocessed_text = " ".join(tokens)
return preprocessed_text
return None
def KeepRealWords(text):
wpt = WordPunctTokenizer()
only_recognized_words = []
for s in text:
tokens = wpt.tokenize(s)
if tokens: # check if empty string
for t in tokens:
if wordnet.synsets(t):
only_recognized_words.append(t) # only keep recognized words
return only_recognized_words
def punctuation(text):
translator = str.maketrans(string.punctuation, ' '*len(string.punctuation)) # map punctuation to space
return (text.translate(translator))
""""Read Data"""
# Open sms corpus.
sms_file = open('SMSSpamCollection.txt', encoding="utf8") # Check the structure of this file!
sms_data = []
sms_labels = []
# CSV Reader LABEL & DATA are separated by TAB.
csv_reader = csv.reader(sms_file,delimiter='\t')
# Store labels and data.
for line in csv_reader:
sms_text = preprocessing(line[1])
if ( sms_text != None):
# adding the sms_id
sms_labels.append( line[0])
# adding the cleaned text We are calling preprocessing method
sms_data.append(sms_text)
sms_file.close()
"""Sampling steps (70:30)"""
trainset_size = int(round(len(sms_data)*0.70))
# I chose this threshold for 70:30 train and test split.
print('The training set size for this classifier is ' + str(trainset_size) + '\n')
x_train = np.array([''.join(el) for el in sms_data[0:trainset_size]]) # train sms_data (70%).
y_train = np.array([el for el in sms_labels[0:trainset_size]]) # train sms_labels (70%).
x_test = np.array([''.join(el) for el in sms_data[trainset_size+1:len(sms_data)]]) # test sms_data (30%).
y_test = np.array([el for el in sms_labels[trainset_size+1:len(sms_labels)]]) # test sms_labels (30%).
"""We are building a TFIDF vectorizer here"""
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(min_df=2, ngram_range=(1, 2), stop_words='english', strip_accents='unicode', norm='l2')
X_train = vectorizer.fit_transform(x_train)
X_test = vectorizer.transform(x_test)
"""Text Clustering - K Means"""
from sklearn.cluster import KMeans, MiniBatchKMeans
print('--> Text Clustering - K Means')
true_k = 5
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1)
kmini = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=False) #verbose=opts.verbose
# we are using the same test,train data in TFIDF form as we did in text classification
km_model = km.fit(X_train)
print("For K-mean clustering ")
clustering = collections.defaultdict(list)
for idx, label in enumerate(km_model.labels_):
clustering[label].append(idx)
print(clustering)
kmini_model = kmini.fit(X_train)
print("For K-mean Mini batch clustering ")
clustering = collections.defaultdict(list)
for idx, label in enumerate(kmini_model.labels_):
clustering[label].append(idx)
print(clustering)
| 34.069231
| 141
| 0.685482
| 609
| 4,429
| 4.866995
| 0.316913
| 0.018893
| 0.012146
| 0.020243
| 0.197368
| 0.163968
| 0.163968
| 0.11471
| 0.064103
| 0.045884
| 0
| 0.013976
| 0.208399
| 4,429
| 129
| 142
| 34.333333
| 0.831432
| 0.172499
| 0
| 0.076923
| 0
| 0
| 0.062768
| 0.00643
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.179487
| 0
| 0.269231
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0edd17d0b784bbe0102b923ddf6f8c3e0cea3855
| 7,304
|
py
|
Python
|
common/utils.py
|
paTRICK-swk/P-STMO
|
def1bff3fcc4f1e3b1dd69c8d3c2d77f412e3b75
|
[
"MIT"
] | 8
|
2022-03-16T02:55:45.000Z
|
2022-03-31T08:29:05.000Z
|
common/utils.py
|
paTRICK-swk/P-STMO
|
def1bff3fcc4f1e3b1dd69c8d3c2d77f412e3b75
|
[
"MIT"
] | 2
|
2022-03-24T23:29:23.000Z
|
2022-03-31T02:59:39.000Z
|
common/utils.py
|
paTRICK-swk/P-STMO
|
def1bff3fcc4f1e3b1dd69c8d3c2d77f412e3b75
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import hashlib
from torch.autograd import Variable
import os
def deterministic_random(min_value, max_value, data):
digest = hashlib.sha256(data.encode()).digest()
raw_value = int.from_bytes(digest[:4], byteorder='little', signed=False)
return int(raw_value / (2 ** 32 - 1) * (max_value - min_value)) + min_value
def mpjpe_cal(predicted, target):
assert predicted.shape == target.shape
return torch.mean(torch.norm(predicted - target, dim=len(target.shape) - 1))
def test_calculation(predicted, target, action, error_sum, data_type, subject, MAE=False):
error_sum = mpjpe_by_action_p1(predicted, target, action, error_sum)
if not MAE:
error_sum = mpjpe_by_action_p2(predicted, target, action, error_sum)
return error_sum
def mpjpe_by_action_p1(predicted, target, action, action_error_sum):
assert predicted.shape == target.shape
batch_num = predicted.size(0)
frame_num = predicted.size(1)
dist = torch.mean(torch.norm(predicted - target, dim=len(target.shape) - 1), dim=len(target.shape) - 2)
if len(set(list(action))) == 1:
end_index = action[0].find(' ')
if end_index != -1:
action_name = action[0][:end_index]
else:
action_name = action[0]
action_error_sum[action_name]['p1'].update(torch.mean(dist).item()*batch_num*frame_num, batch_num*frame_num)
else:
for i in range(batch_num):
end_index = action[i].find(' ')
if end_index != -1:
action_name = action[i][:end_index]
else:
action_name = action[i]
action_error_sum[action_name]['p1'].update(torch.mean(dist[i]).item()*frame_num, frame_num)
return action_error_sum
def mpjpe_by_action_p2(predicted, target, action, action_error_sum):
assert predicted.shape == target.shape
num = predicted.size(0)
pred = predicted.detach().cpu().numpy().reshape(-1, predicted.shape[-2], predicted.shape[-1])
gt = target.detach().cpu().numpy().reshape(-1, target.shape[-2], target.shape[-1])
dist = p_mpjpe(pred, gt)
if len(set(list(action))) == 1:
end_index = action[0].find(' ')
if end_index != -1:
action_name = action[0][:end_index]
else:
action_name = action[0]
action_error_sum[action_name]['p2'].update(np.mean(dist) * num, num)
else:
for i in range(num):
end_index = action[i].find(' ')
if end_index != -1:
action_name = action[i][:end_index]
else:
action_name = action[i]
action_error_sum[action_name]['p2'].update(np.mean(dist), 1)
return action_error_sum
def p_mpjpe(predicted, target):
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0 ** 2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0 ** 2, axis=(1, 2), keepdims=True))
X0 /= normX
Y0 /= normY
H = np.matmul(X0.transpose(0, 2, 1), Y0)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1))
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY
t = muX - a * np.matmul(muY, R)
predicted_aligned = a * np.matmul(predicted, R) + t
return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape) - 1), axis=len(target.shape) - 2)
def define_actions( action ):
actions = ["Directions","Discussion","Eating","Greeting",
"Phoning","Photo","Posing","Purchases",
"Sitting","SittingDown","Smoking","Waiting",
"WalkDog","Walking","WalkTogether"]
if action == "All" or action == "all" or action == '*':
return actions
if not action in actions:
raise( ValueError, "Unrecognized action: %s" % action )
return [action]
def define_error_list(actions):
error_sum = {}
error_sum.update({actions[i]: {'p1':AccumLoss(), 'p2':AccumLoss()} for i in range(len(actions))})
return error_sum
class AccumLoss(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val
self.count += n
self.avg = self.sum / self.count
def get_varialbe(split, target):
num = len(target)
var = []
if split == 'train':
for i in range(num):
temp = Variable(target[i], requires_grad=False).contiguous().type(torch.cuda.FloatTensor)
var.append(temp)
else:
for i in range(num):
temp = Variable(target[i]).contiguous().cuda().type(torch.cuda.FloatTensor)
var.append(temp)
return var
def print_error(data_type, action_error_sum, is_train):
mean_error_p1, mean_error_p2 = print_error_action(action_error_sum, is_train)
return mean_error_p1, mean_error_p2
def print_error_action(action_error_sum, is_train):
mean_error_each = {'p1': 0.0, 'p2': 0.0}
mean_error_all = {'p1': AccumLoss(), 'p2': AccumLoss()}
if is_train == 0:
print("{0:=^12} {1:=^10} {2:=^8}".format("Action", "p#1 mm", "p#2 mm"))
for action, value in action_error_sum.items():
if is_train == 0:
print("{0:<12} ".format(action), end="")
mean_error_each['p1'] = action_error_sum[action]['p1'].avg * 1000.0
mean_error_all['p1'].update(mean_error_each['p1'], 1)
mean_error_each['p2'] = action_error_sum[action]['p2'].avg * 1000.0
mean_error_all['p2'].update(mean_error_each['p2'], 1)
if is_train == 0:
print("{0:>6.2f} {1:>10.2f}".format(mean_error_each['p1'], mean_error_each['p2']))
if is_train == 0:
print("{0:<12} {1:>6.2f} {2:>10.2f}".format("Average", mean_error_all['p1'].avg, \
mean_error_all['p2'].avg))
return mean_error_all['p1'].avg, mean_error_all['p2'].avg
def save_model(previous_name, save_dir,epoch, data_threshold, model, model_name):
# if os.path.exists(previous_name):
# os.remove(previous_name)
torch.save(model.state_dict(),
'%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
previous_name = '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100)
return previous_name
def save_model_new(save_dir,epoch, data_threshold, lr, optimizer, model, model_name):
# if os.path.exists(previous_name):
# os.remove(previous_name)
# torch.save(model.state_dict(),
# '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
torch.save({
'epoch': epoch,
'lr': lr,
'optimizer': optimizer.state_dict(),
'model_pos': model.state_dict(),
},
'%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
| 32.035088
| 118
| 0.607065
| 1,036
| 7,304
| 4.084942
| 0.177606
| 0.043478
| 0.056238
| 0.028355
| 0.502599
| 0.453686
| 0.408554
| 0.331758
| 0.305293
| 0.278828
| 0
| 0.030025
| 0.238499
| 7,304
| 227
| 119
| 32.176211
| 0.730852
| 0.032859
| 0
| 0.298701
| 0
| 0
| 0.055043
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 1
| 0.097403
| false
| 0
| 0.032468
| 0
| 0.220779
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eddc7235cfdc03253ec66ce28f34006def0e26e
| 301
|
py
|
Python
|
warg_client/client/apis/controller/attack_controller.py
|
neel4os/warg-client
|
4d97904977a6f6865610afd04ca00ddfbad38ff9
|
[
"MIT"
] | null | null | null |
warg_client/client/apis/controller/attack_controller.py
|
neel4os/warg-client
|
4d97904977a6f6865610afd04ca00ddfbad38ff9
|
[
"MIT"
] | null | null | null |
warg_client/client/apis/controller/attack_controller.py
|
neel4os/warg-client
|
4d97904977a6f6865610afd04ca00ddfbad38ff9
|
[
"MIT"
] | null | null | null |
from subprocess import run
def perform_shutdown(body):
arg = ""
if body["reboot"]:
_is_reboot = arg + "-r"
else:
_is_reboot = arg + "-h"
time_to_shutdown = str(body['timeToShutdown'])
result = run(["/sbin/shutdown", _is_reboot, time_to_shutdown])
return body
| 23.153846
| 66
| 0.621262
| 38
| 301
| 4.631579
| 0.578947
| 0.136364
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245847
| 301
| 12
| 67
| 25.083333
| 0.77533
| 0
| 0
| 0
| 0
| 0
| 0.126246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee18d6b7b8309b3efbe99ae9ad5cbadde515b83
| 1,136
|
py
|
Python
|
questions/serializers.py
|
aneumeier/questions
|
fe5451b70d85cd5203b4cb624103c1eb154587d9
|
[
"BSD-3-Clause"
] | null | null | null |
questions/serializers.py
|
aneumeier/questions
|
fe5451b70d85cd5203b4cb624103c1eb154587d9
|
[
"BSD-3-Clause"
] | null | null | null |
questions/serializers.py
|
aneumeier/questions
|
fe5451b70d85cd5203b4cb624103c1eb154587d9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8
"""
:mod:`question.serializers` -- serializers
"""
from rest_framework import serializers
from .models import Question, PossibleAnswer
from category.models import Category
class PossibleAnswerSerializer(serializers.ModelSerializer):
class Meta:
model = PossibleAnswer
fields = (
'id',
'possible_answer',
)
class QuestionSerializer(serializers.ModelSerializer):
category = serializers.StringRelatedField()
possible_answer = serializers.StringRelatedField(many=True)
class Meta:
model = Question
fields = (
'id',
'question',
'category',
'possible_answer',
'male_answer_count',
'female_answer_count',
'all_answer_count',
)
class CategorySerializer(serializers.ModelSerializer):
def count(self):
"""
{{ category.question_set.count }}
"""
return self.question_set.count()
class Meta:
model = Category
fields = (
'id',
'title',
)
| 21.846154
| 63
| 0.588028
| 93
| 1,136
| 7.053763
| 0.430108
| 0.118902
| 0.064024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001277
| 0.310739
| 1,136
| 51
| 64
| 22.27451
| 0.836526
| 0.101232
| 0
| 0.333333
| 0
| 0
| 0.110212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.090909
| 0
| 0.393939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee1c1606231abe837f3edc4544d4485e01f3d4a
| 6,484
|
py
|
Python
|
mixcoatl/admin/api_key.py
|
zomGreg/mixcoatl
|
dd8d7e206682955b251d7f858fffee56b11df8c6
|
[
"Apache-2.0"
] | null | null | null |
mixcoatl/admin/api_key.py
|
zomGreg/mixcoatl
|
dd8d7e206682955b251d7f858fffee56b11df8c6
|
[
"Apache-2.0"
] | null | null | null |
mixcoatl/admin/api_key.py
|
zomGreg/mixcoatl
|
dd8d7e206682955b251d7f858fffee56b11df8c6
|
[
"Apache-2.0"
] | null | null | null |
"""
mixcoatl.admin.api_key
----------------------
Implements access to the DCM ApiKey API
"""
from mixcoatl.resource import Resource
from mixcoatl.decorators.lazy import lazy_property
from mixcoatl.decorators.validations import required_attrs
from mixcoatl.utils import uncamel, camelize, camel_keys, uncamel_keys
import json
class ApiKey(Resource):
"""An API key is an access key and secret key that provide API access into DCM."""
PATH = 'admin/ApiKey'
COLLECTION_NAME = 'apiKeys'
PRIMARY_KEY = 'access_key'
def __init__(self, access_key=None, endpoint=None, *args, **kwargs):
Resource.__init__(self, endpoint=endpoint)
self.__access_key = access_key
@property
def access_key(self):
"""The primary identifier of the `ApiKey`. Same as `DCM_ACCESS_KEY`"""
return self.__access_key
@lazy_property
def account(self):
"""`dict` - The account with which this API key is associated."""
return self.__account
@lazy_property
def activation(self):
"""`str` - The date and time when this key was activated."""
return self.__activation
@lazy_property
def expiration(self):
"""`str` - The date and time when this API key should automatically be made inactivate."""
return self.__expiration
@expiration.setter
def expiration(self, e):
self.__expiration = e
@lazy_property
def customer(self):
"""`dict` - The customer to whom this API key belongs."""
return self.__customer
@lazy_property
def customer_management_key(self):
"""`bool` - Identifies whether or not this key can be used across all customer accounts."""
return self.__customer_management_key
@lazy_property
def description(self):
"""`str` - A user-friendly description of this API key."""
return self.__description
@description.setter
def description(self, d):
self.__description = d
@lazy_property
def name(self):
"""`str` - The user-friendly name used to identify the key."""
return self.__name
@name.setter
def name(self, n):
self.__name = n
@lazy_property
def secret_key(self):
"""`str` - The secret part of this API key."""
return self.__secret_key
@lazy_property
def state(self):
"""`str` - The status of the key *(i.e. `ACTIVE`)*"""
return self.__state
@lazy_property
def system_management_key(self):
"""`bool` - Identifies if the key can be used for DCM system management functions"""
return self.__system_management_key
@lazy_property
def user(self):
"""`dict` - The user associated with this API key. Account-level keys return `{'user_id': -1}`"""
return self.__user
@required_attrs(['description', 'name'])
def create(self):
"""Call the API to generate an API key from the current instance of `ApiKey`"""
payload = {
'generateApiKey': [{'description': self.description, 'name': self.name}]}
s = self.post(data=json.dumps(payload))
if self.last_error is None:
self.__access_key = s['apiKeys'][0]['accessKey']
self.load()
else:
raise ApiKeyGenerationException(self.last_error)
def invalidate(self, reason='key deleted via mixcoatl'):
"""Call the API to invalidate the current instance of `ApiKey`
This is the same as deleting the api key
:param reason: the reason for invalidating the key
:type reason: str.
:returns: True
:raises: :class:`ApiKeyInvalidationException`
"""
params = {'reason': reason}
self.delete(params=params)
if self.last_error is None:
return True
else:
raise ApiKeyInvalidationException(self.last_error)
@classmethod
def generate_api_key(cls, key_name, description, expiration=None):
"""Generates a new API key
>>> ApiKey.generate_api_key('my-api-key', 'this is my api key')
{'access_key':'ABCDEFGHIJKL':....}
:param key_name: the name for the key
:type key_name: str.
:param description: the description for the key
:type description: str.
:param expiration: *unused for now*
:type expiration: str.
:returns: :class:`ApiKey`
:raises: :class:`ApiKeyGenerationException`
"""
a = cls()
a.name = key_name
a.description = description
a.create()
return a
@classmethod
def all(cls, keys_only=False, endpoint=None, **kwargs):
"""Get all api keys
.. note::
The keys used to make the request determine results visibility
:param keys_only: Only return `access_key` instead of `ApiKey` objects
:type keys_only: bool.
:param detail: The level of detail to return - `basic` or `extended`
:type detail: str.
:param account_id: Display all system keys belonging to `account_id`
:type account_id: int.
:param user_id: Display all keys belonging to `user_id`
:type user_id: int.
:returns: `list` - of :class:`ApiKey` or :attr:`access_key`
"""
if 'access_key' in kwargs:
r = Resource(cls.PATH + "/" + kwargs['access_key'], endpoint=endpoint)
params = {}
else:
r = Resource(cls.PATH, endpoint=endpoint)
if 'detail' in kwargs:
r.request_details = kwargs['detail']
else:
r.request_details = 'basic'
if 'account_id' in kwargs:
params = {'accountId': kwargs['account_id']}
elif 'user_id' in kwargs:
params = {'userId': kwargs['user_id']}
else:
params = {}
x = r.get(params=params)
if r.last_error is None:
if keys_only is True:
return [i[camelize(cls.PRIMARY_KEY)]
for i in x[cls.COLLECTION_NAME]]
else:
return [type(cls.__name__, (object,), i)
for i in uncamel_keys(x)[uncamel(cls.COLLECTION_NAME)]]
else:
raise ApiKeyException(r.last_error)
class ApiKeyException(BaseException):
pass
class ApiKeyGenerationException(ApiKeyException):
pass
class ApiKeyInvalidationException(ApiKeyException):
pass
| 31.173077
| 105
| 0.610734
| 772
| 6,484
| 4.968912
| 0.226684
| 0.023462
| 0.043014
| 0.01877
| 0.081856
| 0.037539
| 0.01512
| 0.01512
| 0
| 0
| 0
| 0.000431
| 0.284547
| 6,484
| 207
| 106
| 31.323672
| 0.826471
| 0.329272
| 0
| 0.236842
| 0
| 0
| 0.051231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175439
| false
| 0.026316
| 0.04386
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee3d5ffc425ea5928ae83711b91532c1603b60f
| 7,589
|
py
|
Python
|
psdaq/psdaq/control_gui/QWTable.py
|
ZhenghengLi/lcls2
|
94e75c6536954a58c8937595dcac295163aa1cdf
|
[
"BSD-3-Clause-LBNL"
] | 16
|
2017-11-09T17:10:56.000Z
|
2022-03-09T23:03:10.000Z
|
psdaq/psdaq/control_gui/QWTable.py
|
ZhenghengLi/lcls2
|
94e75c6536954a58c8937595dcac295163aa1cdf
|
[
"BSD-3-Clause-LBNL"
] | 6
|
2017-12-12T19:30:05.000Z
|
2020-07-09T00:28:33.000Z
|
psdaq/psdaq/control_gui/QWTable.py
|
ZhenghengLi/lcls2
|
94e75c6536954a58c8937595dcac295163aa1cdf
|
[
"BSD-3-Clause-LBNL"
] | 25
|
2017-09-18T20:02:43.000Z
|
2022-03-27T22:27:42.000Z
|
"""Class :py:class:`QWTable` is a QTableView->QWidget for tree model
======================================================================
Usage ::
# Run test: python lcls2/psdaq/psdaq/control_gui/QWTable.py
from psdaq.control_gui.QWTable import QWTable
w = QWTable()
Created on 2019-03-28 by Mikhail Dubrovin
Re-designed after copy psana/graphqt/QWTable.py -> psdaq/control_gui/
"""
import logging
logger = logging.getLogger(__name__)
from PyQt5.QtWidgets import QTableView, QVBoxLayout, QAbstractItemView, QSizePolicy
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt, QModelIndex
from psdaq.control_gui.QWIcons import icon
class QWTable(QTableView):
def __init__(self, **kwargs):
parent = kwargs.get('parent', None)
QTableView.__init__(self, parent)
self._name = self.__class__.__name__
icon.set_icons()
self.is_connected_item_changed = False
self._si_model = QStandardItemModel()
self.set_selection_mode()
self.fill_table_model(**kwargs) # defines self._si_model
self.setModel(self._si_model)
self.connect_control()
self.set_style()
def connect_control(self):
self.connect_item_selected_to(self.on_item_selected)
self.clicked.connect(self.on_click)
self.doubleClicked.connect(self.on_double_click)
self.connect_item_changed_to(self.on_item_changed)
#def __del__(self):
# QTableView.__del__(self) - it does not have __del__
def set_selection_mode(self, smode=QAbstractItemView.ExtendedSelection):
logger.debug('Set selection mode: %s'%smode)
self.setSelectionMode(smode)
def connect_item_changed_to(self, recipient):
self._si_model.itemChanged.connect(recipient)
self.is_connected_item_changed = True
def disconnect_item_changed_from(self, recipient):
if self.is_connected_item_changed:
self._si_model.itemChanged.disconnect(recipient)
self.is_connected_item_changed = False
def connect_item_selected_to(self, recipient):
self.selectionModel().currentChanged[QModelIndex, QModelIndex].connect(recipient)
def disconnect_item_selected_from(self, recipient):
#self.selectionModel().selectionChanged[QModelIndex, QModelIndex].disconnect(recipient)
self.selectionModel().currentChanged[QModelIndex, QModelIndex].disconnect(recipient)
def set_style(self):
self.setStyleSheet("QTableView::item:hover{background-color:#00FFAA;}")
#self.setSizePolicy(QSizePolicy::Preferred,QSizePolicy::Fixed)
self.set_exact_widget_size()
def set_exact_widget_size(self):
"""set window size exactly matching actual size of QTableView.
"""
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.resizeColumnsToContents()
self.setFixedSize(self.horizontalHeader().length()+self.verticalHeader().width(),\
self.verticalHeader().length()+self.horizontalHeader().height())
def fill_table_model(self, **kwargs):
self.clear_model()
self._si_model.setHorizontalHeaderLabels(['col0', 'col1', 'col2', 'col3', 'col4'])
self._si_model.setVerticalHeaderLabels(['row0', 'row1', 'row2', 'row3'])
for row in range(0, 4):
for col in range(0, 6):
item = QStandardItem("itemA %d %d"%(row,col))
item.setIcon(icon.icon_table)
item.setCheckable(True)
self._si_model.setItem(row,col,item)
if col==2: item.setIcon(icon.icon_folder_closed)
if col==3: item.setText('Some text')
#self._si_model.appendRow(item)
def clear_model(self):
rows,cols = self._si_model.rowCount(), self._si_model.columnCount()
self._si_model.removeRows(0, rows)
self._si_model.removeColumns(0, cols)
def selected_indexes(self):
return self.selectedIndexes()
def selected_items(self):
indexes = self.selectedIndexes()
return [self._si_model.itemFromIndex(i) for i in self.selectedIndexes()]
def getFullNameFromItem(self, item):
#item = self._si_model.itemFromIndex(ind)
ind = self._si_model.indexFromItem(item)
return self.getFullNameFromIndex(ind)
def getFullNameFromIndex(self, ind):
item = self._si_model.itemFromIndex(ind)
if item is None: return None
self._full_name = item.text()
self._getFullName(ind)
return self._full_name
def _getFullName(self, ind):
ind_par = self._si_model.parent(ind)
if(ind_par.column() == -1):
item = self._si_model.itemFromIndex(ind)
self.full_name = '/' + self._full_name
#logger.debug('Item full name:' + self._full_name)
return self._full_name
else:
item_par = self._si_model.itemFromIndex(ind_par)
self._full_name = item_par.text() + '/' + self._full_name
self._getFullName(ind_par)
# def resizeEvent(self, e):
# logger.debug('resizeEvent')
# QTableView.resizeEvent(self, e)
def closeEvent(self, event): # if the x is clicked
logger.debug('closeEvent')
QTableView.closeEvent(self, event)
def on_click(self, index):
item = self._si_model.itemFromIndex(index)
msg = 'on_click: item in row:%02d text: %s' % (index.row(), item.text())
logger.debug(msg)
def on_double_click(self, index):
item = self._si_model.itemFromIndex(index)
msg = 'on_double_click: item in row:%02d text: %s' % (index.row(), item.text())
logger.debug(msg)
def on_item_selected(self, ind_sel, ind_desel):
#logger.debug("ind selected: ", ind_sel.row(), ind_sel.column())
#logger.debug("ind deselected: ", ind_desel.row(),ind_desel.column())
item = self._si_model.itemFromIndex(ind_sel)
logger.debug('on_item_selected: "%s" is selected' % (item.text() if item is not None else None))
#logger.debug('on_item_selected: %s' % self.getFullNameFromItem(item))
def on_item_changed(self, item):
state = ['UNCHECKED', 'TRISTATE', 'CHECKED'][item.checkState()]
logger.debug('abstract on_item_changed: "%s" at state %s' % (self.getFullNameFromItem(item), state))
def process_selected_items(self):
selitems = self.selected_items()
msg = '%d Selected items:' % len(selitems)
for i in selitems:
msg += '\n %s' % i.text()
logger.info(msg)
if __name__ == '__main__':
def keyPressEvent(self, e):
logger.info('keyPressEvent, key=%s' % e.key())
if e.key() == Qt.Key_Escape:
self.close()
elif e.key() == Qt.Key_S:
self.process_selected_items()
else:
logger.info('Keys:'\
'\n ESC - exit'\
'\n S - show selected items'\
'\n')
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG)
app = QApplication(sys.argv)
w = QWTable()
#w.setGeometry(100, 100, 700, 300)
w.setWindowTitle('QWTable')
w.move(100,50)
w.show()
app.exec_()
del w
del app
# EOF
| 32.431624
| 122
| 0.644617
| 893
| 7,589
| 5.235162
| 0.263158
| 0.029519
| 0.054118
| 0.04107
| 0.174759
| 0.133904
| 0.046631
| 0.046631
| 0.046631
| 0.046631
| 0
| 0.009221
| 0.228357
| 7,589
| 233
| 123
| 32.570815
| 0.789105
| 0.156542
| 0
| 0.103704
| 0
| 0
| 0.077382
| 0.007691
| 0
| 0
| 0
| 0
| 0
| 1
| 0.17037
| false
| 0
| 0.051852
| 0.007407
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee3f4cb54a20c54630494d1b68aa8ef7ce66afa
| 1,948
|
py
|
Python
|
src/grailbase/mtloader.py
|
vadmium/grailbrowser
|
ca94e6db2359bcb16c0da256771550d1327c6d33
|
[
"CNRI-Python",
"CNRI-Jython"
] | 9
|
2015-03-23T23:21:42.000Z
|
2021-08-01T01:47:22.000Z
|
src/grailbase/mtloader.py
|
vadmium/grailbrowser
|
ca94e6db2359bcb16c0da256771550d1327c6d33
|
[
"CNRI-Python",
"CNRI-Jython"
] | null | null | null |
src/grailbase/mtloader.py
|
vadmium/grailbrowser
|
ca94e6db2359bcb16c0da256771550d1327c6d33
|
[
"CNRI-Python",
"CNRI-Jython"
] | 11
|
2015-03-23T23:22:22.000Z
|
2020-06-08T14:24:17.000Z
|
"""Extension loader for filetype handlers.
The extension objects provided by MIMEExtensionLoader objects have four
attributes: parse, embed, add_options, and update_options. The first two
are used as handlers for supporting the MIME type as primary and embeded
resources. The last two are (currently) only used for printing.
"""
__version__ = '$Revision: 2.4 $'
from . import extloader
import string
class MIMEExtensionLoader(extloader.ExtensionLoader):
def find(self, name):
new_name = string.replace(name, "-", "_")
major, minor = tuple(string.split(new_name, "/"))
if minor:
modname = "%s_%s" % (major, minor)
else:
modname = major
mod = self.find_module(modname)
ext = None
if not mod and modname != major:
ext = self.get(major + "/")
elif mod:
ext = MIMETypeExtension(name, mod, modname)
return ext
class MIMETypeExtension:
def __init__(self, type, mod, modname):
self.type = type
self.__load_attr(mod, "parse_" + modname, "parse")
self.__load_attr(mod, "embed_" + modname, "embed")
self.__load_attr(mod, "add_options")
self.__load_attr(mod, "update_settings")
def __repr__(self):
classname = self.__class__.__name__
modulename = self.__class__.__module__
if self.parse and self.embed:
flags = " [displayable, embeddable]"
elif self.embed:
flags = " [embeddable]"
elif self.parse:
flags = " [displayable]"
else:
# not very useful, now is it?
flags = ""
return "<%s.%s for %s%s>" % (modulename, classname, self.type, flags)
def __load_attr(self, mod, name, load_as=None):
load_as = load_as or name
if hasattr(mod, name):
v = getattr(mod, name)
else:
v = None
setattr(self, load_as, v)
| 31.934426
| 77
| 0.602669
| 230
| 1,948
| 4.852174
| 0.378261
| 0.035842
| 0.043011
| 0.053763
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001449
| 0.291581
| 1,948
| 60
| 78
| 32.466667
| 0.807246
| 0.181212
| 0
| 0.068182
| 0
| 0
| 0.089421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee482f843ff11fa45eb748eba4af3c343f6b618
| 38,737
|
py
|
Python
|
eventstreams_sdk/adminrest_v1.py
|
IBM/eventstreams-python-sdk
|
cc898e6901c35d1b43e2be7d152c6d770d967b23
|
[
"Apache-2.0"
] | 2
|
2021-05-06T10:18:21.000Z
|
2021-09-17T05:19:57.000Z
|
eventstreams_sdk/eventstreams_sdk/adminrest_v1.py
|
IBM/eventstreams-python-sdk
|
cc898e6901c35d1b43e2be7d152c6d770d967b23
|
[
"Apache-2.0"
] | 1
|
2021-03-16T17:08:20.000Z
|
2021-03-18T18:13:49.000Z
|
eventstreams_sdk/eventstreams_sdk/adminrest_v1.py
|
IBM/eventstreams-python-sdk
|
cc898e6901c35d1b43e2be7d152c6d770d967b23
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# (C) Copyright IBM Corp. 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IBM OpenAPI SDK Code Generator Version: 3.25.0-2b3f843a-20210115-164628
"""
The administration REST API for IBM Event Streams on Cloud.
"""
from typing import Dict, List
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class AdminrestV1(BaseService):
"""The adminrest V1 service."""
DEFAULT_SERVICE_URL = 'https://adminrest.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'adminrest'
@classmethod
def new_instance(cls,
service_name: str = DEFAULT_SERVICE_NAME,
) -> 'AdminrestV1':
"""
Return a new client for the adminrest service using the specified
parameters and external configuration.
"""
authenticator = get_authenticator_from_environment(service_name)
service = cls(
authenticator
)
service.configure_service(service_name)
return service
def __init__(self,
authenticator: Authenticator = None,
) -> None:
"""
Construct a new client for the adminrest service.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
#########################
# default
#########################
def create_topic(self,
*,
name: str = None,
partitions: int = None,
partition_count: int = None,
configs: List['ConfigCreate'] = None,
**kwargs
) -> DetailedResponse:
"""
Create a new topic.
Create a new topic.
:param str name: (optional) The name of topic to be created.
:param int partitions: (optional) The number of partitions.
:param int partition_count: (optional) The number of partitions, this field
takes precedence over 'partitions'. Default value is 1 if not specified.
:param List[ConfigCreate] configs: (optional) The config properties to be
set for the new topic.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if configs is not None:
configs = [convert_model(x) for x in configs]
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_topic')
headers.update(sdk_headers)
data = {
'name': name,
'partitions': partitions,
'partition_count': partition_count,
'configs': configs
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/topics'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def list_topics(self,
*,
topic_filter: str = None,
per_page: int = None,
page: int = None,
**kwargs
) -> DetailedResponse:
"""
Get a list of topics.
Returns a list containing information about all of the Kafka topics that are
defined for an instance of the Event Streams service. If there are currently no
topics defined then an empty list is returned.
:param str topic_filter: (optional) A filter to be applied to the topic
names. A simple filter can be specified as a string with asterisk (`*`)
wildcards representing 0 or more characters, e.g. `topic-name*` will filter
all topic names that begin with the string `topic-name` followed by any
character sequence. A more complex filter pattern can be used by
surrounding a regular expression in forward slash (`/`) delimiters, e.g.
`/topic-name.* /`.
:param int per_page: (optional) The number of topic names to be returns.
:param int page: (optional) The page number to be returned. The number 1
represents the first page. The default value is 1.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `List[TopicDetail]` result
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_topics')
headers.update(sdk_headers)
params = {
'topic_filter': topic_filter,
'per_page': per_page,
'page': page
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/topics'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_topic(self,
topic_name: str,
**kwargs
) -> DetailedResponse:
"""
Get detailed information on a topic.
Get detailed information on a topic.
:param str topic_name: The topic name for the topic to be listed.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `TopicDetail` object
"""
if topic_name is None:
raise ValueError('topic_name must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_topic')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['topic_name']
path_param_values = self.encode_path_vars(topic_name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/admin/topics/{topic_name}'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def delete_topic(self,
topic_name: str,
**kwargs
) -> DetailedResponse:
"""
Delete a topic.
Delete a topic.
:param str topic_name: The topic name for the topic to be listed.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if topic_name is None:
raise ValueError('topic_name must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_topic')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['topic_name']
path_param_values = self.encode_path_vars(topic_name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/admin/topics/{topic_name}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def update_topic(self,
topic_name: str,
*,
new_total_partition_count: int = None,
configs: List['ConfigUpdate'] = None,
**kwargs
) -> DetailedResponse:
"""
Increase the number of partitions and/or update one or more topic configuration parameters.
Increase the number of partitions and/or update one or more topic configuration
parameters.
:param str topic_name: The topic name for the topic to be listed.
:param int new_total_partition_count: (optional) The new partition number
to be increased.
:param List[ConfigUpdate] configs: (optional) The config properties to be
updated for the topic. Valid config keys are 'cleanup.policy',
'retention.ms', 'retention.bytes', 'segment.bytes', 'segment.ms',
'segment.index.bytes'.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if topic_name is None:
raise ValueError('topic_name must be provided')
if configs is not None:
configs = [convert_model(x) for x in configs]
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_topic')
headers.update(sdk_headers)
data = {
'new_total_partition_count': new_total_partition_count,
'configs': configs
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['topic_name']
path_param_values = self.encode_path_vars(topic_name)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/admin/topics/{topic_name}'.format(**path_param_dict)
request = self.prepare_request(method='PATCH',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def get_mirroring_topic_selection(self,
**kwargs
) -> DetailedResponse:
"""
Get current topic selection for mirroring.
Get current topic selection for mirroring.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `MirroringTopicSelection` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_mirroring_topic_selection')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/mirroring/topic-selection'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
def replace_mirroring_topic_selection(self,
*,
includes: List[str] = None,
**kwargs
) -> DetailedResponse:
"""
Replace topic selection for mirroring.
Replace topic selection for mirroring. This operation replaces the complete set of
mirroring topic selections.
:param List[str] includes: (optional)
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `MirroringTopicSelection` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='replace_mirroring_topic_selection')
headers.update(sdk_headers)
data = {
'includes': includes
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/mirroring/topic-selection'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def get_mirroring_active_topics(self,
**kwargs
) -> DetailedResponse:
"""
Get topics that are being actively mirrored.
Get topics that are being actively mirrored.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `MirroringActiveTopics` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_mirroring_active_topics')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/admin/mirroring/active-topics'
request = self.prepare_request(method='GET',
url=url,
headers=headers)
response = self.send(request)
return response
##############################################################################
# Models
##############################################################################
class ReplicaAssignmentBrokers():
"""
ReplicaAssignmentBrokers.
:attr List[int] replicas: (optional)
"""
def __init__(self,
*,
replicas: List[int] = None) -> None:
"""
Initialize a ReplicaAssignmentBrokers object.
:param List[int] replicas: (optional)
"""
self.replicas = replicas
@classmethod
def from_dict(cls, _dict: Dict) -> 'ReplicaAssignmentBrokers':
"""Initialize a ReplicaAssignmentBrokers object from a json dictionary."""
args = {}
if 'replicas' in _dict:
args['replicas'] = _dict.get('replicas')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ReplicaAssignmentBrokers object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'replicas') and self.replicas is not None:
_dict['replicas'] = self.replicas
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ReplicaAssignmentBrokers object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ReplicaAssignmentBrokers') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ReplicaAssignmentBrokers') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigCreate():
"""
ConfigCreate.
:attr str name: (optional) The name of the config property.
:attr str value: (optional) The value for a config property.
"""
def __init__(self,
*,
name: str = None,
value: str = None) -> None:
"""
Initialize a ConfigCreate object.
:param str name: (optional) The name of the config property.
:param str value: (optional) The value for a config property.
"""
self.name = name
self.value = value
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigCreate':
"""Initialize a ConfigCreate object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'value' in _dict:
args['value'] = _dict.get('value')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigCreate object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigCreate object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigCreate') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigCreate') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ConfigUpdate():
"""
ConfigUpdate.
:attr str name: (optional) The name of the config property.
:attr str value: (optional) The value for a config property.
:attr bool reset_to_default: (optional) When true, the value of the config
property is reset to its default value.
"""
def __init__(self,
*,
name: str = None,
value: str = None,
reset_to_default: bool = None) -> None:
"""
Initialize a ConfigUpdate object.
:param str name: (optional) The name of the config property.
:param str value: (optional) The value for a config property.
:param bool reset_to_default: (optional) When true, the value of the config
property is reset to its default value.
"""
self.name = name
self.value = value
self.reset_to_default = reset_to_default
@classmethod
def from_dict(cls, _dict: Dict) -> 'ConfigUpdate':
"""Initialize a ConfigUpdate object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'value' in _dict:
args['value'] = _dict.get('value')
if 'reset_to_default' in _dict:
args['reset_to_default'] = _dict.get('reset_to_default')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ConfigUpdate object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'value') and self.value is not None:
_dict['value'] = self.value
if hasattr(self, 'reset_to_default') and self.reset_to_default is not None:
_dict['reset_to_default'] = self.reset_to_default
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ConfigUpdate object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ConfigUpdate') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ConfigUpdate') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MirroringActiveTopics():
"""
Topics that are being actively mirrored.
:attr List[str] active_topics: (optional)
"""
def __init__(self,
*,
active_topics: List[str] = None) -> None:
"""
Initialize a MirroringActiveTopics object.
:param List[str] active_topics: (optional)
"""
self.active_topics = active_topics
@classmethod
def from_dict(cls, _dict: Dict) -> 'MirroringActiveTopics':
"""Initialize a MirroringActiveTopics object from a json dictionary."""
args = {}
if 'active_topics' in _dict:
args['active_topics'] = _dict.get('active_topics')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MirroringActiveTopics object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'active_topics') and self.active_topics is not None:
_dict['active_topics'] = self.active_topics
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this MirroringActiveTopics object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'MirroringActiveTopics') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'MirroringActiveTopics') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class MirroringTopicSelection():
"""
Mirroring topic selection payload.
:attr List[str] includes: (optional)
"""
def __init__(self,
*,
includes: List[str] = None) -> None:
"""
Initialize a MirroringTopicSelection object.
:param List[str] includes: (optional)
"""
self.includes = includes
@classmethod
def from_dict(cls, _dict: Dict) -> 'MirroringTopicSelection':
"""Initialize a MirroringTopicSelection object from a json dictionary."""
args = {}
if 'includes' in _dict:
args['includes'] = _dict.get('includes')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a MirroringTopicSelection object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'includes') and self.includes is not None:
_dict['includes'] = self.includes
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this MirroringTopicSelection object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'MirroringTopicSelection') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'MirroringTopicSelection') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ReplicaAssignment():
"""
ReplicaAssignment.
:attr int id: (optional) The ID of the partition.
:attr ReplicaAssignmentBrokers brokers: (optional)
"""
def __init__(self,
*,
id: int = None,
brokers: 'ReplicaAssignmentBrokers' = None) -> None:
"""
Initialize a ReplicaAssignment object.
:param int id: (optional) The ID of the partition.
:param ReplicaAssignmentBrokers brokers: (optional)
"""
self.id = id
self.brokers = brokers
@classmethod
def from_dict(cls, _dict: Dict) -> 'ReplicaAssignment':
"""Initialize a ReplicaAssignment object from a json dictionary."""
args = {}
if 'id' in _dict:
args['id'] = _dict.get('id')
if 'brokers' in _dict:
args['brokers'] = ReplicaAssignmentBrokers.from_dict(_dict.get('brokers'))
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ReplicaAssignment object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'brokers') and self.brokers is not None:
_dict['brokers'] = self.brokers.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ReplicaAssignment object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ReplicaAssignment') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ReplicaAssignment') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TopicConfigs():
"""
TopicConfigs.
:attr str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:attr str min_insync_replicas: (optional) The value of config property
'min.insync.replicas'.
:attr str retention_bytes: (optional) The value of config property
'retention.bytes'.
:attr str retention_ms: (optional) The value of config property 'retention.ms'.
:attr str segment_bytes: (optional) The value of config property
'segment.bytes'.
:attr str segment_index_bytes: (optional) The value of config property
'segment.index.bytes'.
:attr str segment_ms: (optional) The value of config property 'segment.ms'.
"""
def __init__(self,
*,
cleanup_policy: str = None,
min_insync_replicas: str = None,
retention_bytes: str = None,
retention_ms: str = None,
segment_bytes: str = None,
segment_index_bytes: str = None,
segment_ms: str = None) -> None:
"""
Initialize a TopicConfigs object.
:param str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:param str min_insync_replicas: (optional) The value of config property
'min.insync.replicas'.
:param str retention_bytes: (optional) The value of config property
'retention.bytes'.
:param str retention_ms: (optional) The value of config property
'retention.ms'.
:param str segment_bytes: (optional) The value of config property
'segment.bytes'.
:param str segment_index_bytes: (optional) The value of config property
'segment.index.bytes'.
:param str segment_ms: (optional) The value of config property
'segment.ms'.
"""
self.cleanup_policy = cleanup_policy
self.min_insync_replicas = min_insync_replicas
self.retention_bytes = retention_bytes
self.retention_ms = retention_ms
self.segment_bytes = segment_bytes
self.segment_index_bytes = segment_index_bytes
self.segment_ms = segment_ms
@classmethod
def from_dict(cls, _dict: Dict) -> 'TopicConfigs':
"""Initialize a TopicConfigs object from a json dictionary."""
args = {}
if 'cleanup.policy' in _dict:
args['cleanup_policy'] = _dict.get('cleanup.policy')
if 'min.insync.replicas' in _dict:
args['min_insync_replicas'] = _dict.get('min.insync.replicas')
if 'retention.bytes' in _dict:
args['retention_bytes'] = _dict.get('retention.bytes')
if 'retention.ms' in _dict:
args['retention_ms'] = _dict.get('retention.ms')
if 'segment.bytes' in _dict:
args['segment_bytes'] = _dict.get('segment.bytes')
if 'segment.index.bytes' in _dict:
args['segment_index_bytes'] = _dict.get('segment.index.bytes')
if 'segment.ms' in _dict:
args['segment_ms'] = _dict.get('segment.ms')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TopicConfigs object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'cleanup_policy') and self.cleanup_policy is not None:
_dict['cleanup.policy'] = self.cleanup_policy
if hasattr(self, 'min_insync_replicas') and self.min_insync_replicas is not None:
_dict['min.insync.replicas'] = self.min_insync_replicas
if hasattr(self, 'retention_bytes') and self.retention_bytes is not None:
_dict['retention.bytes'] = self.retention_bytes
if hasattr(self, 'retention_ms') and self.retention_ms is not None:
_dict['retention.ms'] = self.retention_ms
if hasattr(self, 'segment_bytes') and self.segment_bytes is not None:
_dict['segment.bytes'] = self.segment_bytes
if hasattr(self, 'segment_index_bytes') and self.segment_index_bytes is not None:
_dict['segment.index.bytes'] = self.segment_index_bytes
if hasattr(self, 'segment_ms') and self.segment_ms is not None:
_dict['segment.ms'] = self.segment_ms
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TopicConfigs object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'TopicConfigs') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TopicConfigs') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class TopicDetail():
"""
TopicDetail.
:attr str name: (optional) The name of the topic.
:attr int partitions: (optional) The number of partitions.
:attr int replication_factor: (optional) The number of replication factor.
:attr int retention_ms: (optional) The value of config property 'retention.ms'.
:attr str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:attr TopicConfigs configs: (optional)
:attr List[ReplicaAssignment] replica_assignments: (optional) The replia
assignment of the topic.
"""
def __init__(self,
*,
name: str = None,
partitions: int = None,
replication_factor: int = None,
retention_ms: int = None,
cleanup_policy: str = None,
configs: 'TopicConfigs' = None,
replica_assignments: List['ReplicaAssignment'] = None) -> None:
"""
Initialize a TopicDetail object.
:param str name: (optional) The name of the topic.
:param int partitions: (optional) The number of partitions.
:param int replication_factor: (optional) The number of replication factor.
:param int retention_ms: (optional) The value of config property
'retention.ms'.
:param str cleanup_policy: (optional) The value of config property
'cleanup.policy'.
:param TopicConfigs configs: (optional)
:param List[ReplicaAssignment] replica_assignments: (optional) The replia
assignment of the topic.
"""
self.name = name
self.partitions = partitions
self.replication_factor = replication_factor
self.retention_ms = retention_ms
self.cleanup_policy = cleanup_policy
self.configs = configs
self.replica_assignments = replica_assignments
@classmethod
def from_dict(cls, _dict: Dict) -> 'TopicDetail':
"""Initialize a TopicDetail object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'partitions' in _dict:
args['partitions'] = _dict.get('partitions')
if 'replicationFactor' in _dict:
args['replication_factor'] = _dict.get('replicationFactor')
if 'retentionMs' in _dict:
args['retention_ms'] = _dict.get('retentionMs')
if 'cleanupPolicy' in _dict:
args['cleanup_policy'] = _dict.get('cleanupPolicy')
if 'configs' in _dict:
args['configs'] = TopicConfigs.from_dict(_dict.get('configs'))
if 'replicaAssignments' in _dict:
args['replica_assignments'] = [ReplicaAssignment.from_dict(x) for x in _dict.get('replicaAssignments')]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a TopicDetail object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'partitions') and self.partitions is not None:
_dict['partitions'] = self.partitions
if hasattr(self, 'replication_factor') and self.replication_factor is not None:
_dict['replicationFactor'] = self.replication_factor
if hasattr(self, 'retention_ms') and self.retention_ms is not None:
_dict['retentionMs'] = self.retention_ms
if hasattr(self, 'cleanup_policy') and self.cleanup_policy is not None:
_dict['cleanupPolicy'] = self.cleanup_policy
if hasattr(self, 'configs') and self.configs is not None:
_dict['configs'] = self.configs.to_dict()
if hasattr(self, 'replica_assignments') and self.replica_assignments is not None:
_dict['replicaAssignments'] = [x.to_dict() for x in self.replica_assignments]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this TopicDetail object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'TopicDetail') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'TopicDetail') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| 37.755361
| 115
| 0.596226
| 4,282
| 38,737
| 5.205044
| 0.074731
| 0.021716
| 0.021536
| 0.013999
| 0.689295
| 0.658471
| 0.631281
| 0.587536
| 0.559494
| 0.549847
| 0
| 0.002026
| 0.299249
| 38,737
| 1,025
| 116
| 37.792195
| 0.819046
| 0.324677
| 0
| 0.637343
| 0
| 0
| 0.115538
| 0.021401
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132855
| false
| 0
| 0.012567
| 0
| 0.29623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee595b8e0ae941415e84128e8515b5e48db04fe
| 2,013
|
py
|
Python
|
ml-scripts/dump-data-to-learn.py
|
thejoeejoee/SUI-MIT-VUT-2020-2021
|
aee307aa772c5a0e97578da5ebedd3e2cd39ab91
|
[
"MIT"
] | null | null | null |
ml-scripts/dump-data-to-learn.py
|
thejoeejoee/SUI-MIT-VUT-2020-2021
|
aee307aa772c5a0e97578da5ebedd3e2cd39ab91
|
[
"MIT"
] | null | null | null |
ml-scripts/dump-data-to-learn.py
|
thejoeejoee/SUI-MIT-VUT-2020-2021
|
aee307aa772c5a0e97578da5ebedd3e2cd39ab91
|
[
"MIT"
] | 1
|
2021-01-15T19:01:45.000Z
|
2021-01-15T19:01:45.000Z
|
#!/usr/bin/env python3
# Project: VUT FIT SUI Project - Dice Wars
# Authors:
# - Josef Kolář <xkolar71@stud.fit.vutbr.cz>
# - Dominik Harmim <xharmi00@stud.fit.vutbr.cz>
# - Petr Kapoun <xkapou04@stud.fit.vutbr.cz>
# - Jindřich Šesták <xsesta05@stud.fit.vutbr.cz>
# Year: 2020
# Description: Generates game configurations.
import random
import sys
from argparse import ArgumentParser
import time
from signal import signal, SIGCHLD
from utils import run_ai_only_game, BoardDefinition
parser = ArgumentParser(prog='Dice_Wars')
parser.add_argument('-p', '--port', help="Server port", type=int, default=5005)
parser.add_argument('-a', '--address', help="Server address", default='127.0.0.1')
procs = []
def signal_handler():
""" Handler for SIGCHLD signal that terminates server and clients. """
for p in procs:
try:
p.kill()
except ProcessLookupError:
pass
PLAYING_AIs = [
'xkolar71_orig',
'xkolar71_2',
'xkolar71_3',
'xkolar71_4',
]
def board_definitions():
while True:
random.seed(int(time.time()))
yield BoardDefinition(random.randint(1, 10 ** 10), random.randint(1, 10 ** 10), random.randint(1, 10 ** 10))
def main():
args = parser.parse_args()
signal(SIGCHLD, signal_handler)
boards_played = 0
try:
for board_definition in board_definitions():
boards_played += 1
run_ai_only_game(
args.port, args.address, procs, PLAYING_AIs,
board_definition,
fixed=random.randint(1, 10 ** 10),
client_seed=random.randint(1, 10 ** 10),
debug=True, logdir='logs',
)
print(f'Played {boards_played} games.', file=sys.stderr)
except (Exception, KeyboardInterrupt) as e:
sys.stderr.write("Breaking the tournament because of {}\n".format(repr(e)))
for p in procs:
p.kill()
raise
if __name__ == '__main__':
main()
| 25.807692
| 116
| 0.623448
| 249
| 2,013
| 4.903614
| 0.493976
| 0.053235
| 0.05733
| 0.06552
| 0.07371
| 0.044226
| 0.044226
| 0.044226
| 0.044226
| 0.044226
| 0
| 0.040505
| 0.251863
| 2,013
| 77
| 117
| 26.142857
| 0.770252
| 0.193741
| 0
| 0.125
| 0
| 0
| 0.11505
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.020833
| 0.125
| 0
| 0.1875
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee5bd4b8f792f655e11610a4d7c25b151f76873
| 4,041
|
py
|
Python
|
testing/conftest.py
|
davidszotten/pdbpp
|
3d90d83902e1d19840d0419362a41c654f93251e
|
[
"BSD-3-Clause"
] | null | null | null |
testing/conftest.py
|
davidszotten/pdbpp
|
3d90d83902e1d19840d0419362a41c654f93251e
|
[
"BSD-3-Clause"
] | null | null | null |
testing/conftest.py
|
davidszotten/pdbpp
|
3d90d83902e1d19840d0419362a41c654f93251e
|
[
"BSD-3-Clause"
] | null | null | null |
import functools
import sys
from contextlib import contextmanager
import pytest
_orig_trace = None
def pytest_configure():
global _orig_trace
_orig_trace = sys.gettrace()
@pytest.fixture(scope="session", autouse=True)
def term():
"""Configure TERM for predictable output from Pygments."""
from _pytest.monkeypatch import MonkeyPatch
m = MonkeyPatch()
m.setenv("TERM", "xterm-256color")
yield m
m.undo()
# if _orig_trace and not hasattr(sys, "pypy_version_info"):
# Fails with PyPy2 (https://travis-ci.org/antocuni/pdb/jobs/509624590)?!
@pytest.fixture(autouse=True)
def restore_settrace(monkeypatch):
"""(Re)store sys.gettrace after test run.
This is required to re-enable coverage tracking.
"""
assert sys.gettrace() is _orig_trace
orig_settrace = sys.settrace
# Wrap sys.settrace to restore original tracing function (coverage)
# with `sys.settrace(None)`.
def settrace(func):
if func is None:
orig_settrace(_orig_trace)
else:
orig_settrace(func)
monkeypatch.setattr("sys.settrace", settrace)
yield
newtrace = sys.gettrace()
if newtrace is not _orig_trace:
sys.settrace(_orig_trace)
assert newtrace is None
@pytest.fixture(scope="session")
def _tmphome_path(tmpdir_factory):
return tmpdir_factory.mktemp("tmphome")
@pytest.fixture(autouse=sys.version_info < (3, 6))
def tmphome(request, monkeypatch):
"""Set up HOME in a temporary directory.
This ignores any real ~/.pdbrc.py then, and seems to be
required also with linecache on py27, where it would read contents from
~/.pdbrc?!.
"""
# Use tmpdir from testdir, if it is used.
if "testdir" in request.fixturenames:
tmpdir = request.getfixturevalue("testdir").tmpdir
else:
tmpdir = request.getfixturevalue("_tmphome_path")
monkeypatch.setenv("HOME", str(tmpdir))
monkeypatch.setenv("USERPROFILE", str(tmpdir))
with tmpdir.as_cwd():
yield tmpdir
@pytest.fixture(params=("pyrepl", "readline"), scope="session")
def readline_param(request):
from _pytest.monkeypatch import MonkeyPatch
m = MonkeyPatch()
if request.param == "pyrepl":
try:
import pyrepl.readline # noqa: F401
except ImportError as exc:
pytest.skip(msg="pyrepl not available: {}".format(exc))
m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", True)
else:
m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", False)
return request.param
@pytest.fixture
def monkeypatch_readline(request, monkeypatch, readline_param):
"""Patch readline to return given results."""
def inner(line, begidx, endidx):
if readline_param == "pyrepl":
readline = "pyrepl.readline"
else:
assert readline_param == "readline"
readline = "readline"
monkeypatch.setattr("%s.get_line_buffer" % readline, lambda: line)
monkeypatch.setattr("%s.get_begidx" % readline, lambda: begidx)
monkeypatch.setattr("%s.get_endidx" % readline, lambda: endidx)
return inner
@pytest.fixture
def monkeypatch_pdb_methods(monkeypatch):
def mock(method, *args, **kwargs):
print("=== %s(%s, %s)" % (method, args, kwargs))
for mock_method in ("set_trace", "set_continue"):
monkeypatch.setattr(
"pdb.pdb.Pdb.%s" % mock_method, functools.partial(mock, mock_method)
)
@pytest.fixture
def monkeypatch_importerror(monkeypatch):
@contextmanager
def cm(mocked_imports):
orig_import = __import__
def import_mock(name, *args):
if name in mocked_imports:
raise ImportError
return orig_import(name, *args)
with monkeypatch.context() as m:
if sys.version_info >= (3,):
m.setattr('builtins.__import__', import_mock)
else:
m.setattr('__builtin__.__import__', import_mock)
yield m
return cm
| 28.0625
| 80
| 0.659985
| 473
| 4,041
| 5.477801
| 0.336152
| 0.027789
| 0.018526
| 0.031262
| 0.074875
| 0.074875
| 0.038595
| 0
| 0
| 0
| 0
| 0.006752
| 0.230389
| 4,041
| 143
| 81
| 28.258741
| 0.826367
| 0.156892
| 0
| 0.152174
| 0
| 0
| 0.118927
| 0.031595
| 0
| 0
| 0
| 0
| 0.032609
| 1
| 0.152174
| false
| 0
| 0.184783
| 0.01087
| 0.391304
| 0.01087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee60185bcf81d5e6fbf52b5b69fb40616c44fa1
| 1,279
|
py
|
Python
|
thing_gym_ros/envs/utils.py
|
utiasSTARS/thing-gym-ros
|
6e8a034ac0d1686f29bd29e2aaa63f39a5b188d4
|
[
"MIT"
] | 1
|
2021-12-25T01:10:32.000Z
|
2021-12-25T01:10:32.000Z
|
thing_gym_ros/envs/utils.py
|
utiasSTARS/thing-gym-ros
|
6e8a034ac0d1686f29bd29e2aaa63f39a5b188d4
|
[
"MIT"
] | null | null | null |
thing_gym_ros/envs/utils.py
|
utiasSTARS/thing-gym-ros
|
6e8a034ac0d1686f29bd29e2aaa63f39a5b188d4
|
[
"MIT"
] | null | null | null |
""" Various generic env utilties. """
def center_crop_img(img, crop_zoom):
""" crop_zoom is amount to "zoom" into the image. E.g. 2.0 would cut out half of the width,
half of the height, and only give the center. """
raw_height, raw_width = img.shape[:2]
center = raw_height // 2, raw_width // 2
crop_size = raw_height // crop_zoom, raw_width // crop_zoom
min_y, max_y = int(center[0] - crop_size[0] // 2), int(center[0] + crop_size[0] // 2)
min_x, max_x = int(center[1] - crop_size[1] // 2), int(center[1] + crop_size[1] // 2)
img_cropped = img[min_y:max_y, min_x:max_x]
return img_cropped
def crop_img(img, relative_corners):
""" relative_corners are floats between 0 and 1 designating where the corners of a crop box
should be ([[top_left_x, top_left_y], [bottom_right_x, bottom_right_y]]).
e.g. [[0, 0], [1, 1]] would be the full image, [[0.5, 0.5], [1, 1]] would be bottom right."""
rc = relative_corners
raw_height, raw_width = img.shape[:2]
top_left_pix = [int(rc[0][0] * raw_width), int(rc[0][1] * raw_height)]
bottom_right_pix = [int(rc[1][0] * raw_width), int(rc[1][1] * raw_height)]
img_cropped = img[top_left_pix[1]:bottom_right_pix[1], top_left_pix[0]:bottom_right_pix[0]]
return img_cropped
| 53.291667
| 97
| 0.6638
| 233
| 1,279
| 3.390558
| 0.253219
| 0.068354
| 0.037975
| 0.043038
| 0.202532
| 0.167089
| 0.167089
| 0
| 0
| 0
| 0
| 0.04023
| 0.183737
| 1,279
| 24
| 98
| 53.291667
| 0.716475
| 0.326818
| 0
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee83db3e5e99371f123bcdb50f3fcc2018ce29b
| 4,947
|
py
|
Python
|
auto_nag/tests/test_round_robin.py
|
Mozilla-GitHub-Standards/f9c78643f5862cda82001d4471255ac29ef0c6b2c6171e2c1cbecab3d2fef4dd
|
28d999fcba9ad47d1dd0b2222880b71726ddd47c
|
[
"BSD-3-Clause"
] | null | null | null |
auto_nag/tests/test_round_robin.py
|
Mozilla-GitHub-Standards/f9c78643f5862cda82001d4471255ac29ef0c6b2c6171e2c1cbecab3d2fef4dd
|
28d999fcba9ad47d1dd0b2222880b71726ddd47c
|
[
"BSD-3-Clause"
] | null | null | null |
auto_nag/tests/test_round_robin.py
|
Mozilla-GitHub-Standards/f9c78643f5862cda82001d4471255ac29ef0c6b2c6171e2c1cbecab3d2fef4dd
|
28d999fcba9ad47d1dd0b2222880b71726ddd47c
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from mock import patch
from auto_nag.people import People
from auto_nag.round_robin import BadFallback, RoundRobin
class TestRoundRobin(unittest.TestCase):
config = {
'doc': 'The triagers need to have a \'Fallback\' entry.',
'triagers': {
'A B': {'bzmail': 'ab@mozilla.com'},
'C D': {'bzmail': 'cd@mozilla.com'},
'E F': {'bzmail': 'ef@mozilla.com'},
'Fallback': {'bzmail': 'gh@mozilla.com'},
},
'components': {'P1::C1': 'default', 'P2::C2': 'default', 'P3::C3': 'special'},
'default': {
'doc': 'All the dates are the duty end dates.',
'2019-02-21': 'A B',
'2019-02-28': 'C D',
'2019-03-07': 'E F',
},
'special': {
'doc': 'All the dates are the duty end dates.',
'2019-02-21': 'E F',
'2019-02-28': 'A B',
'2019-03-07': 'C D',
},
}
people = People(
[
{
'mail': 'gh@mozilla.com',
'cn': 'G H',
'ismanager': 'FALSE',
'title': 'nothing',
}
]
)
def mk_bug(self, pc):
p, c = pc.split('::')
return {
'product': p,
'component': c,
'triage_owner': 'ij@mozilla.com',
'triage_owner_detail': {'nick': 'ij'},
}
@staticmethod
def _get_nick(x, bzmail):
return bzmail.split('@')[0]
def test_get(self):
with patch.object(RoundRobin, 'get_nick', new=TestRoundRobin._get_nick):
rr = RoundRobin(
rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people
)
assert rr.get(self.mk_bug('P1::C1'), '2019-02-17') == (
'ab@mozilla.com',
'ab',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-02-17') == (
'ab@mozilla.com',
'ab',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-02-17') == (
'ef@mozilla.com',
'ef',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-02-24') == (
'cd@mozilla.com',
'cd',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-02-24') == (
'cd@mozilla.com',
'cd',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-02-24') == (
'ab@mozilla.com',
'ab',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-02-28') == (
'cd@mozilla.com',
'cd',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-02-28') == (
'cd@mozilla.com',
'cd',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-02-28') == (
'ab@mozilla.com',
'ab',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-03-05') == (
'ef@mozilla.com',
'ef',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-03-05') == (
'ef@mozilla.com',
'ef',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-03-05') == (
'cd@mozilla.com',
'cd',
)
assert rr.get(self.mk_bug('P1::C1'), '2019-03-08') == (
'gh@mozilla.com',
'gh',
)
assert rr.get(self.mk_bug('P2::C2'), '2019-03-08') == (
'gh@mozilla.com',
'gh',
)
assert rr.get(self.mk_bug('P3::C3'), '2019-03-08') == (
'gh@mozilla.com',
'gh',
)
assert rr.get(self.mk_bug('Foo::Bar'), '2019-03-01') == (
'ij@mozilla.com',
'ij',
)
def test_get_who_to_nag(self):
rr = RoundRobin(
rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people
)
assert rr.get_who_to_nag('2019-02-25') == {}
assert rr.get_who_to_nag('2019-02-28') == {'gh@mozilla.com': ['']}
assert rr.get_who_to_nag('2019-03-05') == {'gh@mozilla.com': ['']}
assert rr.get_who_to_nag('2019-03-07') == {'gh@mozilla.com': ['']}
assert rr.get_who_to_nag('2019-03-10') == {'gh@mozilla.com': ['']}
with patch.object(RoundRobin, 'is_mozilla', return_value=False):
rr = RoundRobin(
rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people
)
self.assertRaises(BadFallback, rr.get_who_to_nag, '2019-03-01')
| 31.711538
| 86
| 0.439256
| 573
| 4,947
| 3.699825
| 0.230366
| 0.122642
| 0.108962
| 0.113208
| 0.54434
| 0.54434
| 0.54434
| 0.535377
| 0.516981
| 0.486792
| 0
| 0.086928
| 0.381443
| 4,947
| 155
| 87
| 31.916129
| 0.605882
| 0.041641
| 0
| 0.290076
| 0
| 0
| 0.241554
| 0
| 0
| 0
| 0
| 0
| 0.167939
| 1
| 0.030534
| false
| 0
| 0.030534
| 0.007634
| 0.099237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee87adc70e779b9ff0da63b63fc29dd8e09baec
| 21,473
|
py
|
Python
|
scipy/weave/inline_tools.py
|
tacaswell/scipy
|
4d7e924a319299e39c9a9514e021fbfdfceb854e
|
[
"BSD-3-Clause"
] | 1
|
2017-01-18T20:32:35.000Z
|
2017-01-18T20:32:35.000Z
|
scipy/weave/inline_tools.py
|
tacaswell/scipy
|
4d7e924a319299e39c9a9514e021fbfdfceb854e
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/weave/inline_tools.py
|
tacaswell/scipy
|
4d7e924a319299e39c9a9514e021fbfdfceb854e
|
[
"BSD-3-Clause"
] | null | null | null |
# should re-write compiled functions to take a local and global dict
# as input.
from __future__ import absolute_import, print_function
import sys
import os
from . import ext_tools
from . import catalog
from . import common_info
from numpy.core.multiarray import _get_ndarray_c_version
ndarray_api_version = '/* NDARRAY API VERSION %x */' % (_get_ndarray_c_version(),)
# not an easy way for the user_path_list to come in here.
# the PYTHONCOMPILED environment variable offers the most hope.
function_catalog = catalog.catalog()
class inline_ext_function(ext_tools.ext_function):
# Some specialization is needed for inline extension functions
def function_declaration_code(self):
code = 'static PyObject* %s(PyObject*self, PyObject* args)\n{\n'
return code % self.name
def template_declaration_code(self):
code = 'template<class T>\n' \
'static PyObject* %s(PyObject*self, PyObject* args)\n{\n'
return code % self.name
def parse_tuple_code(self):
""" Create code block for PyArg_ParseTuple. Variable declarations
for all PyObjects are done also.
This code got a lot uglier when I added local_dict...
"""
declare_return = 'py::object return_val;\n' \
'int exception_occurred = 0;\n' \
'PyObject *py__locals = NULL;\n' \
'PyObject *py__globals = NULL;\n'
py_objects = ', '.join(self.arg_specs.py_pointers())
if py_objects:
declare_py_objects = 'PyObject ' + py_objects + ';\n'
else:
declare_py_objects = ''
py_vars = ' = '.join(self.arg_specs.py_variables())
if py_vars:
init_values = py_vars + ' = NULL;\n\n'
else:
init_values = ''
parse_tuple = 'if(!PyArg_ParseTuple(args,"OO:compiled_func",'\
'&py__locals,'\
'&py__globals))\n'\
' return NULL;\n'
return declare_return + declare_py_objects + \
init_values + parse_tuple
def arg_declaration_code(self):
"""Return the declaration code as a string."""
arg_strings = [arg.declaration_code(inline=1)
for arg in self.arg_specs]
return "".join(arg_strings)
def arg_cleanup_code(self):
"""Return the cleanup code as a string."""
arg_strings = [arg.cleanup_code() for arg in self.arg_specs]
return "".join(arg_strings)
def arg_local_dict_code(self):
"""Return the code to create the local dict as a string."""
arg_strings = [arg.local_dict_code() for arg in self.arg_specs]
return "".join(arg_strings)
def function_code(self):
from .ext_tools import indent
decl_code = indent(self.arg_declaration_code(),4)
cleanup_code = indent(self.arg_cleanup_code(),4)
function_code = indent(self.code_block,4)
# local_dict_code = indent(self.arg_local_dict_code(),4)
try_code = \
' try \n' \
' { \n' \
'#if defined(__GNUC__) || defined(__ICC)\n' \
' PyObject* raw_locals __attribute__ ((unused));\n' \
' PyObject* raw_globals __attribute__ ((unused));\n' \
'#else\n' \
' PyObject* raw_locals;\n' \
' PyObject* raw_globals;\n' \
'#endif\n' \
' raw_locals = py_to_raw_dict(py__locals,"_locals");\n' \
' raw_globals = py_to_raw_dict(py__globals,"_globals");\n' \
' /* argument conversion code */ \n' \
+ decl_code + \
' /* inline code */ \n' \
+ function_code + \
' /*I would like to fill in changed locals and globals here...*/ \n' \
' }\n'
catch_code = "catch(...) \n" \
"{ \n" + \
" return_val = py::object(); \n" \
" exception_occurred = 1; \n" \
"} \n"
return_code = " /* cleanup code */ \n" + \
cleanup_code + \
" if(!(PyObject*)return_val && !exception_occurred)\n" \
" {\n \n" \
" return_val = Py_None; \n" \
" }\n \n" \
" return return_val.disown(); \n" \
"} \n"
all_code = self.function_declaration_code() + \
indent(self.parse_tuple_code(),4) + \
try_code + \
indent(catch_code,4) + \
return_code
return all_code
def python_function_definition_code(self):
args = (self.name, self.name)
function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS},\n' % args
return function_decls
class inline_ext_module(ext_tools.ext_module):
def __init__(self,name,compiler=''):
ext_tools.ext_module.__init__(self,name,compiler)
self._build_information.append(common_info.inline_info())
function_cache = {}
def inline(code,arg_names=[],local_dict=None, global_dict=None,
force=0,
compiler='',
verbose=0,
support_code=None,
headers=[],
customize=None,
type_converters=None,
auto_downcast=1,
newarr_converter=0,
**kw):
"""
Inline C/C++ code within Python scripts.
``inline()`` compiles and executes C/C++ code on the fly. Variables
in the local and global Python scope are also available in the
C/C++ code. Values are passed to the C/C++ code by assignment
much like variables passed are passed into a standard Python
function. Values are returned from the C/C++ code through a
special argument called return_val. Also, the contents of
mutable objects can be changed within the C/C++ code and the
changes remain after the C code exits and returns to Python.
inline has quite a few options as listed below. Also, the keyword
arguments for distutils extension modules are accepted to
specify extra information needed for compiling.
Parameters
----------
code : string
A string of valid C++ code. It should not specify a return
statement. Instead it should assign results that need to be
returned to Python in the `return_val`.
arg_names : [str], optional
A list of Python variable names that should be transferred from
Python into the C/C++ code. It defaults to an empty string.
local_dict : dict, optional
If specified, it is a dictionary of values that should be used as
the local scope for the C/C++ code. If local_dict is not
specified the local dictionary of the calling function is used.
global_dict : dict, optional
If specified, it is a dictionary of values that should be used as
the global scope for the C/C++ code. If `global_dict` is not
specified, the global dictionary of the calling function is used.
force : {0, 1}, optional
If 1, the C++ code is compiled every time inline is called. This
is really only useful for debugging, and probably only useful if
your editing `support_code` a lot.
compiler : str, optional
The name of compiler to use when compiling. On windows, it
understands 'msvc' and 'gcc' as well as all the compiler names
understood by distutils. On Unix, it'll only understand the
values understood by distutils. (I should add 'gcc' though to
this).
On windows, the compiler defaults to the Microsoft C++ compiler.
If this isn't available, it looks for mingw32 (the gcc compiler).
On Unix, it'll probably use the same compiler that was used when
compiling Python. Cygwin's behavior should be similar.
verbose : {0,1,2}, optional
Specifies how much information is printed during the compile
phase of inlining code. 0 is silent (except on windows with msvc
where it still prints some garbage). 1 informs you when compiling
starts, finishes, and how long it took. 2 prints out the command
lines for the compilation process and can be useful if your having
problems getting code to work. Its handy for finding the name of
the .cpp file if you need to examine it. verbose has no effect if
the compilation isn't necessary.
support_code : str, optional
A string of valid C++ code declaring extra code that might be
needed by your compiled function. This could be declarations of
functions, classes, or structures.
headers : [str], optional
A list of strings specifying header files to use when compiling
the code. The list might look like ``["<vector>","'my_header'"]``.
Note that the header strings need to be in a form than can be
pasted at the end of a ``#include`` statement in the C++ code.
customize : base_info.custom_info, optional
An alternative way to specify `support_code`, `headers`, etc. needed
by the function. See :mod:`scipy.weave.base_info` for more
details. (not sure this'll be used much).
type_converters : [type converters], optional
These guys are what convert Python data types to C/C++ data types.
If you'd like to use a different set of type conversions than the
default, specify them here. Look in the type conversions section
of the main documentation for examples.
auto_downcast : {1,0}, optional
This only affects functions that have numpy arrays as input
variables. Setting this to 1 will cause all floating point values
to be cast as float instead of double if all the Numeric arrays
are of type float. If even one of the arrays has type double or
double complex, all variables maintain their standard
types.
newarr_converter : int, optional
Unused.
Other Parameters
----------------
Relevant :mod:`distutils` keywords. These are duplicated from Greg Ward's
:class:`distutils.extension.Extension` class for convenience:
sources : [string]
List of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
.. note:: The `module_path` file is always appended to the front of
this list
include_dirs : [string]
List of directories to search for C/C++ header files (in Unix
form for portability).
define_macros : [(name : string, value : string|None)]
List of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line).
undef_macros : [string]
List of macros to undefine explicitly.
library_dirs : [string]
List of directories to search for C/C++ libraries at link time.
libraries : [string]
List of library names (not filenames or paths) to link against.
runtime_library_dirs : [string]
List of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded).
extra_objects : [string]
List of extra files to link with (e.g. object files not implied
by 'sources', static libraries that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
Any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
Any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
List of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
Any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
List of files that the extension depends on.
language : string
Extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
See Also
--------
distutils.extension.Extension : Describes additional parameters.
"""
# this grabs the local variables from the *previous* call
# frame -- that is the locals from the function that called
# inline.
global function_catalog
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
if force:
module_dir = global_dict.get('__file__',None)
func = compile_function(code,arg_names,local_dict,
global_dict,module_dir,
compiler=compiler,
verbose=verbose,
support_code=support_code,
headers=headers,
customize=customize,
type_converters=type_converters,
auto_downcast=auto_downcast,
**kw)
function_catalog.add_function(code,func,module_dir)
results = attempt_function_call(code,local_dict,global_dict)
else:
# 1. try local cache
try:
results = apply(function_cache[code],(local_dict,global_dict))
return results
except TypeError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
except KeyError:
pass
# 2. try function catalog
try:
results = attempt_function_call(code,local_dict,global_dict)
# 3. build the function
except ValueError:
# compile the library
module_dir = global_dict.get('__file__',None)
func = compile_function(code,arg_names,local_dict,
global_dict,module_dir,
compiler=compiler,
verbose=verbose,
support_code=support_code,
headers=headers,
customize=customize,
type_converters=type_converters,
auto_downcast=auto_downcast,
**kw)
function_catalog.add_function(code,func,module_dir)
results = attempt_function_call(code,local_dict,global_dict)
return results
def attempt_function_call(code,local_dict,global_dict):
# we try 3 levels here -- a local cache first, then the
# catalog cache, and then persistent catalog.
#
global function_catalog
# 1. try local cache
try:
results = apply(function_cache[code],(local_dict,global_dict))
return results
except TypeError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
except KeyError:
pass
# 2. try catalog cache.
function_list = function_catalog.get_functions_fast(code)
for func in function_list:
try:
results = apply(func,(local_dict,global_dict))
function_catalog.fast_cache(code,func)
function_cache[code] = func
return results
except TypeError as msg: # should specify argument types here.
# This should really have its own error type, instead of
# checking the beginning of the message, but I don't know
# how to define that yet.
msg = str(msg)
if msg[:16] == "Conversion Error":
pass
else:
raise TypeError(msg)
except NameError as msg:
msg = str(msg).strip()
if msg[:16] == "Conversion Error":
pass
else:
raise NameError(msg)
# 3. try persistent catalog
module_dir = global_dict.get('__file__',None)
function_list = function_catalog.get_functions(code,module_dir)
for func in function_list:
try:
results = apply(func,(local_dict,global_dict))
function_catalog.fast_cache(code,func)
function_cache[code] = func
return results
except: # should specify argument types here.
pass
# if we get here, the function wasn't found
raise ValueError('function with correct signature not found')
def inline_function_code(code,arg_names,local_dict=None,
global_dict=None,auto_downcast=1,
type_converters=None,compiler=''):
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
ext_func = inline_ext_function('compiled_func',code,arg_names,
local_dict,global_dict,auto_downcast,
type_converters=type_converters)
from . import build_tools
compiler = build_tools.choose_compiler(compiler)
ext_func.set_compiler(compiler)
return ext_func.function_code()
def compile_function(code,arg_names,local_dict,global_dict,
module_dir,
compiler='',
verbose=1,
support_code=None,
headers=[],
customize=None,
type_converters=None,
auto_downcast=1,
**kw):
# figure out where to store and what to name the extension module
# that will contain the function.
# storage_dir = catalog.intermediate_dir()
code = ndarray_api_version + '\n' + code
module_path = function_catalog.unique_module_name(code, module_dir)
storage_dir, module_name = os.path.split(module_path)
mod = inline_ext_module(module_name,compiler)
# create the function. This relies on the auto_downcast and
# type factories setting
ext_func = inline_ext_function('compiled_func',code,arg_names,
local_dict,global_dict,auto_downcast,
type_converters=type_converters)
mod.add_function(ext_func)
# if customize (a custom_info object), then set the module customization.
if customize:
mod.customize = customize
# add the extra "support code" needed by the function to the module.
if support_code:
mod.customize.add_support_code(support_code)
# add the extra headers needed by the function to the module.
for header in headers:
mod.customize.add_header(header)
# it's nice to let the users know when anything gets compiled, as the
# slowdown is very noticeable.
if verbose > 0:
print('<weave: compiling>')
# compile code in correct location, with the given compiler and verbosity
# setting. All input keywords are passed through to distutils
mod.compile(location=storage_dir,compiler=compiler,
verbose=verbose, **kw)
# import the module and return the function. Make sure
# the directory where it lives is in the python path.
try:
sys.path.insert(0,storage_dir)
exec('import ' + module_name)
func = eval(module_name+'.compiled_func')
finally:
del sys.path[0]
return func
| 42.7749
| 93
| 0.593862
| 2,616
| 21,473
| 4.719037
| 0.196865
| 0.019684
| 0.015796
| 0.020008
| 0.304415
| 0.280761
| 0.268449
| 0.246983
| 0.24358
| 0.233293
| 0
| 0.003695
| 0.331998
| 21,473
| 501
| 94
| 42.860279
| 0.856944
| 0.42621
| 0
| 0.496212
| 0
| 0
| 0.150172
| 0.016552
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049242
| false
| 0.034091
| 0.037879
| 0
| 0.151515
| 0.007576
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee8c1be25a8a7813888c36156d1084e0932af6f
| 22,062
|
py
|
Python
|
trove/guestagent/common/configuration.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 1
|
2020-04-08T07:42:19.000Z
|
2020-04-08T07:42:19.000Z
|
trove/guestagent/common/configuration.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
trove/guestagent/common/configuration.py
|
sapcc/trove
|
c03ec0827687fba202f72f4d264ab70158604857
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
import re
import six
from trove.guestagent.common import guestagent_utils
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
class ConfigurationManager(object):
"""
ConfigurationManager is responsible for management of
datastore configuration.
Its base functionality includes reading and writing configuration files.
It is responsible for validating user inputs and requests.
When supplied an override strategy it allows the user to manage
configuration overrides as well.
"""
# Configuration group names. The names determine the order in which the
# groups get applied. System groups are divided into two camps; pre-user
# and post-user. In general system overrides will get applied over the
# user group, unless specified otherwise (i.e. SYSTEM_POST_USER_GROUP
# will be used).
SYSTEM_PRE_USER_GROUP = '10-system'
USER_GROUP = '20-user'
SYSTEM_POST_USER_GROUP = '50-system'
DEFAULT_STRATEGY_OVERRIDES_SUB_DIR = 'overrides'
DEFAULT_CHANGE_ID = 'common'
def __init__(self, base_config_path, owner, group, codec,
requires_root=False, override_strategy=None):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration files.
:type owner string
:param group Group of the configuration files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the manager requires superuser
privileges.
:type requires_root boolean
:param override_strategy Strategy used to manage configuration
overrides (e.g. ImportOverrideStrategy).
Defaults to OneFileOverrideStrategy
if None. This strategy should be
compatible with very much any datastore.
It is recommended each datastore defines
its strategy explicitly to avoid upgrade
compatibility issues in case the default
implementation changes in the future.
:type override_strategy ConfigurationOverrideStrategy
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
self._value_cache = None
if not override_strategy:
# Use OneFile strategy by default. Store the revisions in a
# sub-directory at the location of the configuration file.
revision_dir = guestagent_utils.build_file_path(
os.path.dirname(base_config_path),
self.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
self._override_strategy = OneFileOverrideStrategy(revision_dir)
else:
self._override_strategy = override_strategy
self._override_strategy.configure(
base_config_path, owner, group, codec, requires_root)
def get_value(self, key, default=None):
"""Return the current value at a given key or 'default'.
"""
if self._value_cache is None:
self.refresh_cache()
return self._value_cache.get(key, default)
def parse_configuration(self):
"""Read contents of the configuration file (applying overrides if any)
and parse it into a dict.
:returns: Configuration file as a Python dict.
"""
base_options = operating_system.read_file(
self._base_config_path, codec=self._codec,
as_root=self._requires_root)
updates = self._override_strategy.parse_updates()
guestagent_utils.update_dict(updates, base_options)
return base_options
def save_configuration(self, options):
"""Write given contents to the base configuration file.
Remove all existing overrides (both system and user).
:param contents Contents of the configuration file.
:type contents string or dict
"""
if isinstance(options, dict):
# Serialize a dict of options for writing.
self.save_configuration(self._codec.serialize(options))
else:
self._override_strategy.remove(self.USER_GROUP)
self._override_strategy.remove(self.SYSTEM_PRE_USER_GROUP)
self._override_strategy.remove(self.SYSTEM_POST_USER_GROUP)
operating_system.write_file(
self._base_config_path, options, as_root=self._requires_root)
operating_system.chown(
self._base_config_path, self._owner, self._group,
as_root=self._requires_root)
operating_system.chmod(
self._base_config_path, FileMode.ADD_READ_ALL,
as_root=self._requires_root)
self.refresh_cache()
def has_system_override(self, change_id):
"""Return whether a given 'system' change exists.
"""
return (self._override_strategy.exists(self.SYSTEM_POST_USER_GROUP,
change_id) or
self._override_strategy.exists(self.SYSTEM_PRE_USER_GROUP,
change_id))
def apply_system_override(self, options, change_id=DEFAULT_CHANGE_ID,
pre_user=False):
"""Apply a 'system' change to the configuration.
System overrides are always applied after all user changes so that
they override any user-defined setting.
:param options Configuration changes.
:type options string or dict
"""
group_name = (
self.SYSTEM_PRE_USER_GROUP if pre_user else
self.SYSTEM_POST_USER_GROUP)
self._apply_override(group_name, change_id, options)
def apply_user_override(self, options, change_id=DEFAULT_CHANGE_ID):
"""Apply a 'user' change to the configuration.
The 'system' values will be re-applied over this override.
:param options Configuration changes.
:type options string or dict
"""
self._apply_override(self.USER_GROUP, change_id, options)
def get_user_override(self, change_id=DEFAULT_CHANGE_ID):
"""Get the user overrides"""
return self._override_strategy.get(self.USER_GROUP, change_id)
def _apply_override(self, group_name, change_id, options):
if not isinstance(options, dict):
# Deserialize the options into a dict if not already.
self._apply_override(
group_name, change_id, self._codec.deserialize(options))
else:
self._override_strategy.apply(group_name, change_id, options)
self.refresh_cache()
def remove_system_override(self, change_id=DEFAULT_CHANGE_ID):
"""Revert a 'system' configuration change.
"""
self._remove_override(self.SYSTEM_POST_USER_GROUP, change_id)
self._remove_override(self.SYSTEM_PRE_USER_GROUP, change_id)
def remove_user_override(self, change_id=DEFAULT_CHANGE_ID):
"""Revert a 'user' configuration change.
"""
self._remove_override(self.USER_GROUP, change_id)
def _remove_override(self, group_name, change_id):
self._override_strategy.remove(group_name, change_id)
self.refresh_cache()
def refresh_cache(self):
self._value_cache = self.parse_configuration()
@six.add_metaclass(abc.ABCMeta)
class ConfigurationOverrideStrategy(object):
"""ConfigurationOverrideStrategy handles configuration files.
The strategy provides functionality to enumerate, apply and remove
configuration overrides.
"""
@abc.abstractmethod
def configure(self, *args, **kwargs):
"""Configure this strategy.
A strategy needs to be configured before it can be used.
It would typically be configured by the ConfigurationManager.
"""
@abc.abstractmethod
def exists(self, group_name, change_id):
"""Return whether a given revision exists.
"""
@abc.abstractmethod
def apply(self, group_name, change_id, options):
"""Apply given options on the most current configuration revision.
Update if a file with the same id already exists.
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
:param options Configuration changes.
:type options dict
"""
@abc.abstractmethod
def remove(self, group_name, change_id=None):
"""Rollback a given configuration override.
Remove the whole group if 'change_id' is None.
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
"""
@abc.abstractmethod
def get(self, group_name, change_id=None):
"""Return the contents of a given configuration override
:param group_name The group the override belongs to.
:type group_name string
:param change_id The name of the override within the group.
:type change_id string
"""
def parse_updates(self):
"""Return all updates applied to the base revision as a single dict.
Return an empty dict if the base file is always the most current
version of configuration.
:returns: Updates to the base revision as a Python dict.
"""
return {}
class ImportOverrideStrategy(ConfigurationOverrideStrategy):
"""Import strategy keeps overrides in separate files that get imported
into the base configuration file which never changes itself.
An override file is simply deleted when the override is removed.
We keep two sets of override files in a separate directory.
- User overrides - configuration overrides applied by the user via the
Trove API.
- System overrides - 'internal' configuration changes applied by the
guestagent.
The name format of override files is: '<set prefix>-<n>-<group name>.<ext>'
where 'set prefix' is to used to order user/system sets,
'n' is an index used to keep track of the order in which overrides
within their set got applied.
"""
FILE_NAME_PATTERN = r'%s-([0-9]+)-%s\.%s$'
def __init__(self, revision_dir, revision_ext):
"""
:param revision_dir Path to the directory for import files.
:type revision_dir string
:param revision_ext Extension of revision files.
:type revision_ext string
"""
self._revision_dir = revision_dir
self._revision_ext = revision_ext
def configure(self, base_config_path, owner, group, codec, requires_root):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration and
revision files.
:type owner string
:param group Group of the configuration and
revision files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the strategy requires superuser
privileges.
:type requires_root boolean
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
def exists(self, group_name, change_id):
return self._find_revision_file(group_name, change_id) is not None
def apply(self, group_name, change_id, options):
self._initialize_import_directory()
revision_file = self._find_revision_file(group_name, change_id)
if revision_file is None:
# Create a new file.
last_revision_index = self._get_last_file_index(group_name)
revision_file = guestagent_utils.build_file_path(
self._revision_dir,
'%s-%03d-%s' % (group_name, last_revision_index + 1,
change_id),
self._revision_ext)
else:
# Update the existing file.
current = operating_system.read_file(
revision_file, codec=self._codec, as_root=self._requires_root)
options = guestagent_utils.update_dict(options, current)
operating_system.write_file(
revision_file, options, codec=self._codec,
as_root=self._requires_root)
operating_system.chown(
revision_file, self._owner, self._group,
as_root=self._requires_root)
operating_system.chmod(
revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def _initialize_import_directory(self):
"""Lazy-initialize the directory for imported revision files.
"""
if not os.path.exists(self._revision_dir):
operating_system.create_directory(
self._revision_dir, user=self._owner, group=self._group,
force=True, as_root=self._requires_root)
def remove(self, group_name, change_id=None):
removed = set()
if change_id:
# Remove a given file.
revision_file = self._find_revision_file(group_name, change_id)
if revision_file:
removed.add(revision_file)
else:
# Remove the entire group.
removed = self._collect_revision_files(group_name)
for path in removed:
operating_system.remove(path, force=True,
as_root=self._requires_root)
def get(self, group_name, change_id):
revision_file = self._find_revision_file(group_name, change_id)
return operating_system.read_file(revision_file,
codec=self._codec,
as_root=self._requires_root)
def parse_updates(self):
parsed_options = {}
for path in self._collect_revision_files():
options = operating_system.read_file(path, codec=self._codec,
as_root=self._requires_root)
guestagent_utils.update_dict(options, parsed_options)
return parsed_options
@property
def has_revisions(self):
"""Return True if there currently are any revision files.
"""
return (operating_system.exists(
self._revision_dir, is_directory=True,
as_root=self._requires_root) and
(len(self._collect_revision_files()) > 0))
def _get_last_file_index(self, group_name):
"""Get the index of the most current file in a given group.
"""
current_files = self._collect_revision_files(group_name)
if current_files:
name_pattern = self._build_rev_name_pattern(group_name=group_name)
last_file_name = os.path.basename(current_files[-1])
last_index_match = re.match(name_pattern, last_file_name)
if last_index_match:
return int(last_index_match.group(1))
return 0
def _collect_revision_files(self, group_name='.+'):
"""Collect and return a sorted list of paths to existing revision
files. The files should be sorted in the same order in which
they were applied.
"""
name_pattern = self._build_rev_name_pattern(group_name=group_name)
return sorted(operating_system.list_files_in_directory(
self._revision_dir, recursive=True, pattern=name_pattern,
as_root=self._requires_root))
def _find_revision_file(self, group_name, change_id):
name_pattern = self._build_rev_name_pattern(group_name, change_id)
found = operating_system.list_files_in_directory(
self._revision_dir, recursive=True, pattern=name_pattern,
as_root=self._requires_root)
return next(iter(found), None)
def _build_rev_name_pattern(self, group_name='.+', change_id='.+'):
return self.FILE_NAME_PATTERN % (group_name, change_id,
self._revision_ext)
class OneFileOverrideStrategy(ConfigurationOverrideStrategy):
"""This is a strategy for datastores that do not support multiple
configuration files.
It uses the Import Strategy to keep the overrides internally.
When an override is applied or removed a new configuration file is
generated by applying all changes on a saved-off base revision.
"""
BASE_REVISION_NAME = 'base'
REVISION_EXT = 'rev'
def __init__(self, revision_dir):
"""
:param revision_dir Path to the directory for import files.
:type revision_dir string
"""
self._revision_dir = revision_dir
self._import_strategy = ImportOverrideStrategy(revision_dir,
self.REVISION_EXT)
def configure(self, base_config_path, owner, group, codec, requires_root):
"""
:param base_config_path Path to the configuration file.
:type base_config_path string
:param owner Owner of the configuration and
revision files.
:type owner string
:param group Group of the configuration and
revision files.
:type group string
:param codec Codec for reading/writing of the particular
configuration format.
:type codec StreamCodec
:param requires_root Whether the strategy requires superuser
privileges.
:type requires_root boolean
"""
self._base_config_path = base_config_path
self._owner = owner
self._group = group
self._codec = codec
self._requires_root = requires_root
self._base_revision_file = guestagent_utils.build_file_path(
self._revision_dir, self.BASE_REVISION_NAME, self.REVISION_EXT)
self._import_strategy.configure(
base_config_path, owner, group, codec, requires_root)
def exists(self, group_name, change_id):
return self._import_strategy.exists(group_name, change_id)
def apply(self, group_name, change_id, options):
self._import_strategy.apply(group_name, change_id, options)
self._regenerate_base_configuration()
def remove(self, group_name, change_id=None):
if self._import_strategy.has_revisions:
self._import_strategy.remove(group_name, change_id=change_id)
self._regenerate_base_configuration()
if not self._import_strategy.has_revisions:
# The base revision file is no longer needed if there are no
# overrides. It will be regenerated based on the current
# configuration file on the first 'apply()'.
operating_system.remove(self._base_revision_file, force=True,
as_root=self._requires_root)
def get(self, group_name, change_id):
return self._import_strategy.get(group_name, change_id)
def _regenerate_base_configuration(self):
"""Gather all configuration changes and apply them in order on the base
revision. Write the results to the configuration file.
"""
if not os.path.exists(self._base_revision_file):
# Initialize the file with the current configuration contents if it
# does not exist.
operating_system.copy(
self._base_config_path, self._base_revision_file,
force=True, preserve=True, as_root=self._requires_root)
base_revision = operating_system.read_file(
self._base_revision_file, codec=self._codec,
as_root=self._requires_root)
changes = self._import_strategy.parse_updates()
updated_revision = guestagent_utils.update_dict(changes, base_revision)
operating_system.write_file(
self._base_config_path, updated_revision, codec=self._codec,
as_root=self._requires_root)
| 40.629834
| 79
| 0.631629
| 2,555
| 22,062
| 5.192172
| 0.131115
| 0.03558
| 0.033921
| 0.038444
| 0.487035
| 0.428916
| 0.368988
| 0.346073
| 0.284185
| 0.241972
| 0
| 0.001503
| 0.306228
| 22,062
| 542
| 80
| 40.704797
| 0.865216
| 0.385912
| 0
| 0.32636
| 0
| 0
| 0.006627
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16318
| false
| 0
| 0.079498
| 0.016736
| 0.359833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ee8cb45529200e0a449b9203826ebdcb7530c60
| 18,018
|
py
|
Python
|
API-Reference-Code-Generator.py
|
sawyercade/Documentation
|
257b68c8ca2928e8a730ea44196297a400587437
|
[
"Apache-2.0"
] | 116
|
2017-09-13T17:11:07.000Z
|
2022-03-13T00:33:03.000Z
|
API-Reference-Code-Generator.py
|
sawyercade/Documentation
|
257b68c8ca2928e8a730ea44196297a400587437
|
[
"Apache-2.0"
] | 148
|
2017-09-14T01:07:09.000Z
|
2022-03-28T21:47:55.000Z
|
API-Reference-Code-Generator.py
|
sawyercade/Documentation
|
257b68c8ca2928e8a730ea44196297a400587437
|
[
"Apache-2.0"
] | 124
|
2017-09-07T22:05:43.000Z
|
2022-03-26T05:44:32.000Z
|
import pathlib
import yaml
documentations = {"Our Platform": "QuantConnect-Platform-2.0.0.yaml",
"Alpha Streams": "QuantConnect-Alpha-0.8.yaml"}
def RequestTable(api_call, params):
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{api_call}</code> Method</th>\n</tr>\n</thead>'
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n{\n'
for item in params:
example_ = "/"
description_ = "Optional. " if "required" not in item or not item["required"] else ""
description_ += item["description"]
if description_[-1] != ".":
description_ += "."
if "type" in item["schema"]:
type_ = item["schema"]["type"]
else:
type_ = item["schema"]["$ref"].split("/")[-1]
if "minimum" in item["schema"]:
description_ += f' Minimum: {item["schema"]["minimum"]}'
example_ = item["schema"]["minimum"]
elif "maximum" in item["schema"]:
description_ += f' Maximum: {item["schema"]["maximum"]}'
example_ = item["schema"]["maximum"]
elif "default" in item["schema"]:
description_ += f' Default: {item["schema"]["default"]}'
example_ = item["schema"]["default"]
if type_ == "array":
array_obj = item["schema"]["items"]
if "$ref" in array_obj:
type_ = array_obj["$ref"].split("/")[-1] + " Array"
ref = array_obj["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
request_object_ = doc
for path in ref:
request_object_ = request_object_[path]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
example_, __, __ = ExampleWriting(request_object_properties_, [], 1)
if "type" in array_obj:
type_ = array_obj["type"] + " Array"
if "enum" in array_obj:
type_ = type_ + " Enum"
description_ += f' Options: {str(array_obj["enum"])}'
example_ = f'"{array_obj["enum"][0]}"'
if "Enum" not in type_:
if "string" in type_:
example_ = '"string"'
elif "number" in type_ or "integer" in type_:
example_ = '0'
elif "boolean" in type_:
example_ = 'true'
writeUp += f'\n<tr>\n<td width="20%">{item["name"]}</td> <td> <code>{type_}</code><br/>{description_}</td>\n</tr>'
example += f' "{item["name"]}": {example_},\n'
return writeUp + example + "\b}</pre>\n</div>\n</td>\n</tr>\n</table>"
def ResponseTable(requestBody):
writeUp = ""
array = False
order = 0
if "content" in requestBody:
component = requestBody["content"]["application/json"]["schema"]
if "$ref" in component:
component = component["$ref"].split("/")[1:]
elif "items" in component and "$ref" in component["items"]:
component = component["items"]["$ref"].split("/")[1:]
array = True
order += 1
else:
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2">{requestBody["description"]}</th>\n'
writeUp += '</tr>\n</thead>\n'
writeUp += f'<tr>\n<td width="20%">value</td> <td> <code>{component["items"]["type"]}</code> <br/>/</td>\n</tr>\n'
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += f'[\n "{component["items"]["example"]}"\n]'
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
return writeUp
else:
component = requestBody["$ref"].split("/")[1:]
item_list = [component]
i = 0
while i < len(item_list):
request_object = doc
for item in item_list[i]:
request_object = request_object[item]
if "items" in request_object and "oneOf" in request_object["items"]:
prop = request_object["items"]["oneOf"]
example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n[\n ['
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="2"><code>{item}</code> Model - {request_object["description"]}</th>\n'
writeUp += '</tr>\n</thead>'
for y in prop:
path = y["$ref"].split("/")[1:]
name = path[-1]
enum = ""
item_list.append(path)
request_object = doc
for item in path:
request_object = request_object[item]
if "enum" in request_object:
enum = " Options: " + str(request_object["enum"])
description_ = request_object["description"]
if description_[-1] != ".":
description_ += "."
writeUp += f'\n<tr>\n<td width="20%">{name}</td> <td> <code>{request_object["type"]}</code> <br/> {description_ + enum}</td>\n</tr>\n'
if "example" in request_object:
text = request_object["example"]
elif "enum" in request_object:
text = '"' + request_object["enum"][0] + '"'
example += f'\n {text},'
example += '\b\n ]\n]'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
continue
elif "oneOf" in request_object:
for y in request_object["oneOf"]:
item_list.append(y["$ref"].split("/")[1:])
i += 1
continue
elif "properties" in request_object:
request_object_properties = request_object["properties"]
elif "content" in request_object:
item_list.append(request_object["content"]["application/json"]["schema"]["$ref"].split("/")[1:])
i += 1
continue
elif "type" in request_object and "properties" not in request_object:
request_object_properties = {item: request_object}
writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n'
if "description" in request_object:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model - {request_object["description"]}</th>\n'
else:
writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model</th>\n'
writeUp += '</tr>\n</thead>\n'
example, html_property, item_list = ExampleWriting(request_object_properties, item_list, array, order)
if array:
array = False
order -= 1
for line in html_property:
writeUp += line
writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n'
writeUp += example
writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>'
i += 1
return writeUp
def ExampleWriting(request_object_properties, item_list, array=False, order=0):
tab = " " * order
if array:
example = "[\n {\n"
else:
example = "{\n"
line = []
for name, properties in request_object_properties.items():
type_ = properties["type"] if "type" in properties else "object"
description_ = properties["description"] if "description" in properties else "/"
if (example != "{\n" and not array) or (example != "[\n {\n" and array):
example += ",\n"
example_ = tab + f' "{name}": '
if type_ == "array":
example_ += '[\n'
if "type" in properties["items"]:
type_ = properties["items"]["type"] + " Array"
example_ += tab + f' "{properties["items"]["type"]}"'
elif "$ref" in properties["items"]:
ref = properties["items"]["$ref"].split("/")[1:]
type_ = ref[-1] + " Array"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+2)
example_ += tab + " " * 2 + write_up
elif type_ == "object":
if "additionalProperties" in properties:
add_prop = properties["additionalProperties"]
if "type" in add_prop:
prop_type = add_prop["type"]
if "format" in prop_type:
type_ = prop_type + f'$({prop_type["format"]})' + " object"
if prop_type["format"] == "date-time":
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += "0"
else:
type_ = prop_type + " object"
example_ += f'"{prop_type}"'
elif "$ref" in add_prop:
ref = add_prop["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "$ref" in properties:
ref = properties["$ref"].split("/")[1:]
type_ = ref[-1] + " object"
if ref not in item_list:
item_list.append(ref)
request_object_ = doc
for item in ref:
request_object_ = request_object_[item]
if "properties" in request_object_:
request_object_properties_ = request_object_["properties"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1)
example_ += write_up
elif "type" in request_object_:
properties = request_object_properties_ = request_object_
type_ = request_object_["type"]
description_ = request_object_["description"] if "description" in request_object_ else "/"
elif type_ == "integer" or type_ == "number":
example_ += "0"
elif type_ == "boolean":
example_ += "true"
elif type_ == "string":
if "format" in properties:
type_ += f'(${properties["format"]})'
example_ += "2021-11-26T15:18:27.693Z"
else:
example_ += '"string"'
if description_[-1] != ".":
description_ += "."
if "enum" in properties:
type_ += " Enum"
description_ += f' Options : {properties["enum"]}'
if "string" in type_:
example_ = tab + f' "{name}": "{properties["enum"][0]}"'
else:
example_ = tab + f' "{name}": {properties["enum"][0]}'
if "example" in properties:
eg = properties["example"]
type_ += f'<br/><i><sub>example: {eg}</sub></i>'
if isinstance(eg, str):
eg = '"' + eg + '"'
example_ = tab + f' "{name}": {eg}'
if "Array" in type_:
example_ += "\n" + tab + " ]"
if order == 0 or array:
line.append(f'<tr>\n<td width="20%">{name}</td> <td> <code>{type_}</code> <br/> {description_}</td>\n</tr>\n')
example += example_
if not array:
return example + "\n" + tab + "}", line, item_list
return example + "\n" + tab + "}\n" + " " * (order-1) + "]", line, item_list
for section, source in documentations.items():
yaml_file = open(source)
doc = yaml.load(yaml_file, Loader=yaml.Loader)
paths = doc["paths"]
for api_call, result in paths.items():
j = 1
content = result["post"] if "post" in result else result["get"]
# Create path if not exist
destination_folder = pathlib.Path("/".join(content["tags"]))
destination_folder.mkdir(parents=True, exist_ok=True)
# Create Introduction part
with open(destination_folder / f'{j:02} Introduction.html', "w") as html_file:
html_file.write("<p>\n")
html_file.write(f"{content['summary']}\n")
html_file.write("</p>\n")
j += 1
# Create Description part if having one
if "description" in content:
with open(destination_folder / f'{j:02} Description.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'{content["description"]}\n')
html_file.write('</p>\n')
j += 1
# Create Request part
with open(destination_folder / f'{j:02} Request.html', "w") as html_file:
description_ = ""
if "parameters" in content:
writeUp = RequestTable(api_call, content["parameters"])
elif "requestBody" in content:
if "description" in content["requestBody"]:
description_ = str(content["requestBody"]["description"])
if description_[-1] != ".":
description_ += "."
description_ += " "
writeUp = ResponseTable(content["requestBody"])
else:
writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n'
writeUp += f'<th colspan="1"><code>{api_call}</code> Method</th>\n</tr>\n</thead>\n'
writeUp += f'</tr>\n<td><code>{api_call}</code> method takes no parameters.</td>\n</tr>\n</table>'
description_ += f'The <code>{api_call}</code> API accepts requests in the following format:\n'
html_file.write("<p>\n" + description_ + "</p>\n")
html_file.write(writeUp)
j += 1
# Create Response part
with open(destination_folder / f'{j:02} Responses.html', "w") as html_file:
html_file.write('<p>\n')
html_file.write(f'The <code>{api_call}</code> API provides a response in the following format:\n')
html_file.write('</p>\n')
request_body = content["responses"]
for code, properties in request_body.items():
if code == "200":
html_file.write('<h4>200 Success</h4>\n')
elif code == "401":
html_file.write('<h4>401 Authentication Error</h4>\n<table class="table qc-table">\n<thead>\n<tr>\n')
html_file.write('<th colspan="2"><code>UnauthorizedError</code> Model - Unauthorized response from the API. Key is missing, invalid, or timestamp is too old for hash.</th>\n')
html_file.write('</tr>\n</thead>\n<tr>\n<td width="20%">www_authenticate</td> <td> <code>string</code> <br/> Header</td>\n</tr>\n</table>\n')
continue
elif code == "404":
html_file.write('<h4>404 Not Found Error</h4>\n')
html_file.write('<p>The requested item, index, page was not found.</p>\n')
continue
elif code == "default":
html_file.write('<h4>Default Generic Error</h4>\n')
writeUp = ResponseTable(properties)
html_file.write(writeUp)
print(f"Documentation of {section} is generated and inplace!")
| 41.04328
| 195
| 0.466811
| 1,816
| 18,018
| 4.457599
| 0.105727
| 0.112415
| 0.009883
| 0.038542
| 0.478196
| 0.422483
| 0.372329
| 0.323039
| 0.300556
| 0.274243
| 0
| 0.013493
| 0.39122
| 18,018
| 439
| 196
| 41.04328
| 0.724496
| 0.007104
| 0
| 0.327974
| 0
| 0.057878
| 0.247638
| 0.108471
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009646
| false
| 0
| 0.006431
| 0
| 0.032154
| 0.003215
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eea2bc9a6e4ca781595beca55133b3f45fb4b7b
| 551
|
py
|
Python
|
forge_api_client/hubs.py
|
dmh126/forge-python-data-management-api
|
9c33f220021251a0340346065e3dd1998fc49a12
|
[
"MIT"
] | 1
|
2019-07-02T08:32:22.000Z
|
2019-07-02T08:32:22.000Z
|
forge_api_client/hubs.py
|
dmh126/forge-python-data-management-api
|
9c33f220021251a0340346065e3dd1998fc49a12
|
[
"MIT"
] | null | null | null |
forge_api_client/hubs.py
|
dmh126/forge-python-data-management-api
|
9c33f220021251a0340346065e3dd1998fc49a12
|
[
"MIT"
] | 2
|
2019-07-04T05:13:42.000Z
|
2020-05-09T22:15:05.000Z
|
from .utils import get_request, authorized
class Hubs:
@authorized
def getHubs(self):
url = self.api_url + '/project/v1/hubs'
headers = {
'Authorization': '%s %s' % (self.token_type, self.access_token)
}
return get_request(url, headers)
@authorized
def getHub(self, hub_id):
url = self.api_url + '/project/v1/hubs/%s' % hub_id
headers = {
'Authorization': '%s %s' % (self.token_type, self.access_token)
}
return get_request(url, headers)
| 21.192308
| 75
| 0.575318
| 65
| 551
| 4.707692
| 0.384615
| 0.098039
| 0.065359
| 0.084967
| 0.666667
| 0.666667
| 0.666667
| 0.496732
| 0.496732
| 0.496732
| 0
| 0.005181
| 0.299456
| 551
| 25
| 76
| 22.04
| 0.787565
| 0
| 0
| 0.5
| 0
| 0
| 0.128857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eebd18c0a711ceedaa9842ae51084a3bb575a36
| 8,841
|
py
|
Python
|
pactman/verifier/pytest_plugin.py
|
piotrantosz/pactman
|
2838e273d79831721da9c1b658b8f9d249efc789
|
[
"MIT"
] | 67
|
2018-08-26T03:39:16.000Z
|
2022-02-24T10:05:18.000Z
|
pactman/verifier/pytest_plugin.py
|
piotrantosz/pactman
|
2838e273d79831721da9c1b658b8f9d249efc789
|
[
"MIT"
] | 82
|
2018-08-29T00:09:32.000Z
|
2022-02-08T02:46:15.000Z
|
pactman/verifier/pytest_plugin.py
|
piotrantosz/pactman
|
2838e273d79831721da9c1b658b8f9d249efc789
|
[
"MIT"
] | 37
|
2018-08-22T04:40:31.000Z
|
2022-02-08T13:31:31.000Z
|
import glob
import logging
import os
import warnings
import pytest
from _pytest.outcomes import Failed
from _pytest.reports import TestReport
from .broker_pact import BrokerPact, BrokerPacts, PactBrokerConfig
from .result import PytestResult, log
def pytest_addoption(parser):
group = parser.getgroup("pact specific options (pactman)")
group.addoption(
"--pact-files", default=None, help="pact JSON files to verify (wildcards allowed)"
)
group.addoption("--pact-broker-url", default="", help="pact broker URL")
group.addoption("--pact-broker-token", default="", help="pact broker bearer token")
group.addoption(
"--pact-provider-name", default=None, help="pact name of provider being verified"
)
group.addoption(
"--pact-consumer-name",
default=None,
help="consumer name to limit pact verification to - "
"DEPRECATED, use --pact-verify-consumer instead",
)
group.addoption(
"--pact-verify-consumer", default=None, help="consumer name to limit pact verification to"
)
group.addoption(
"--pact-verify-consumer-tag",
metavar="TAG",
action="append",
help="limit broker pacts verified to those matching the tag. May be "
"specified multiple times in which case pacts matching any of these "
"tags will be verified.",
)
group.addoption(
"--pact-publish-results",
action="store_true",
default=False,
help="report pact verification results to pact broker",
)
group.addoption(
"--pact-provider-version",
default=None,
help="provider version to use when reporting pact results to pact broker",
)
group.addoption(
"--pact-allow-fail",
default=False,
action="store_true",
help="do not fail the pytest run if any pacts fail verification",
)
# Future options to be implemented. Listing them here so naming consistency can be a thing.
# group.addoption("--pact-publish-pacts", action="store_true", default=False,
# help="publish pacts to pact broker")
# group.addoption("--pact-consumer-version", default=None,
# help="consumer version to use when publishing pacts to the broker")
# group.addoption("--pact-consumer-version-source", default=None,
# help="generate consumer version from source 'git-tag' or 'git-hash'")
# group.addoption("--pact-consumer-version-tag", metavar='TAG', action="append",
# help="tag(s) that should be applied to the consumer version when pacts "
# "are uploaded to the broker; multiple tags may be supplied")
def get_broker_url(config):
return config.getoption("pact_broker_url") or os.environ.get("PACT_BROKER_URL")
def get_provider_name(config):
return config.getoption("pact_provider_name") or os.environ.get("PACT_PROVIDER_NAME")
# add the pact broker URL to the pytest output if running verbose
def pytest_report_header(config):
if config.getoption("verbose") > 0:
location = get_broker_url(config) or config.getoption("pact_files")
return [f"Loading pacts from {location}"]
def pytest_configure(config):
logging.getLogger("pactman").handlers = []
logging.basicConfig(format="%(message)s")
verbosity = config.getoption("verbose")
if verbosity > 0:
log.setLevel(logging.DEBUG)
class PytestPactVerifier:
def __init__(self, publish_results, provider_version, interaction, consumer):
self.publish_results = publish_results
self.provider_version = provider_version
self.interaction = interaction
self.consumer = consumer
def verify(self, provider_url, provider_setup, extra_provider_headers={}):
try:
self.interaction.verify_with_callable_setup(provider_url, provider_setup, extra_provider_headers)
except (Failed, AssertionError) as e:
raise Failed(str(e)) from None
def finish(self):
if self.consumer and self.publish_results and self.provider_version:
self.consumer.publish_result(self.provider_version)
def flatten_pacts(pacts):
for consumer in pacts:
last = consumer.interactions[-1]
for interaction in consumer.interactions:
if interaction is last:
yield (interaction, consumer)
else:
yield (interaction, None)
def load_pact_files(file_location):
for filename in glob.glob(file_location, recursive=True):
yield BrokerPact.load_file(filename, result_factory=PytestResult)
def test_id(identifier):
interaction, _ = identifier
return str(interaction)
def pytest_generate_tests(metafunc):
if "pact_verifier" in metafunc.fixturenames:
broker_url = get_broker_url(metafunc.config)
if not broker_url:
pact_files_location = metafunc.config.getoption("pact_files")
if not pact_files_location:
raise ValueError("need a --pact-broker-url or --pact-files option")
pact_files = load_pact_files(pact_files_location)
metafunc.parametrize(
"pact_verifier", flatten_pacts(pact_files), ids=test_id, indirect=True
)
else:
provider_name = get_provider_name(metafunc.config)
if not provider_name:
raise ValueError("--pact-broker-url requires the --pact-provider-name option")
broker = PactBrokerConfig(
broker_url,
metafunc.config.getoption("pact_broker_token"),
metafunc.config.getoption("pact_verify_consumer_tag", []),
)
broker_pacts = BrokerPacts(
provider_name, pact_broker=broker, result_factory=PytestResult
)
pacts = broker_pacts.consumers()
filter_consumer_name = metafunc.config.getoption("pact_verify_consumer")
if not filter_consumer_name:
filter_consumer_name = metafunc.config.getoption("pact_consumer_name")
if filter_consumer_name:
warnings.warn(
"The --pact-consumer-name command-line option is deprecated "
"and will be removed in the 3.0.0 release.",
DeprecationWarning,
)
if filter_consumer_name:
pacts = [pact for pact in pacts if pact.consumer == filter_consumer_name]
metafunc.parametrize("pact_verifier", flatten_pacts(pacts), ids=test_id, indirect=True)
class PactTestReport(TestReport):
"""Custom TestReport that allows us to attach an interaction to the result, and
then display the interaction's verification result ouput as well as the traceback
of the failure.
"""
@classmethod
def from_item_and_call(cls, item, call, interaction):
report = super().from_item_and_call(item, call)
report.pact_interaction = interaction
# the toterminal() call can't reasonably get at this config, so we store it here
report.verbosity = item.config.option.verbose
return report
def toterminal(self, out):
out.line("Pact failure details:", bold=True)
for text, kw in self.pact_interaction.result.results_for_terminal():
out.line(text, **kw)
if self.verbosity > 0:
out.line("Traceback:", bold=True)
return super().toterminal(out)
else:
out.line("Traceback not shown, use pytest -v to show it")
def pytest_runtest_makereport(item, call):
if call.when != "call" or "pact_verifier" not in getattr(item, "fixturenames", []):
return
# use our custom TestReport subclass if we're reporting on a pact verification call
interaction = item.funcargs["pact_verifier"].interaction
report = PactTestReport.from_item_and_call(item, call, interaction)
if report.failed and item.config.getoption("pact_allow_fail"):
# convert the fail into an "expected" fail, which allows the run to pass
report.wasxfail = True
report.outcome = "passed"
return report
def pytest_report_teststatus(report, config):
if not hasattr(report, "pact_interaction"):
return
if hasattr(report, "wasxfail"):
# wasxfail usually displays an "X" but since it's not *expected* to fail an "f" is a little clearer
return "ignore fail", "f", "IGNORE_FAIL"
@pytest.fixture()
def pact_verifier(pytestconfig, request):
interaction, consumer = request.param
p = PytestPactVerifier(
pytestconfig.getoption("pact_publish_results"),
pytestconfig.getoption("pact_provider_version"),
interaction,
consumer,
)
yield p
p.finish()
| 38.776316
| 109
| 0.658862
| 1,043
| 8,841
| 5.448706
| 0.239693
| 0.034489
| 0.044343
| 0.023755
| 0.173148
| 0.131445
| 0.062643
| 0.0183
| 0.0183
| 0.0183
| 0
| 0.001052
| 0.247144
| 8,841
| 227
| 110
| 38.947137
| 0.852764
| 0.153376
| 0
| 0.133721
| 0
| 0
| 0.205448
| 0.021471
| 0
| 0
| 0
| 0
| 0.005814
| 1
| 0.098837
| false
| 0.005814
| 0.052326
| 0.011628
| 0.22093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|